diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / src / library . js <nl> ppp b / src / library . js <nl> LibraryManager . library = { <nl> return ( f - + Math_floor ( f ) ! = . 5 ) ? + _round ( f ) : + _round ( f / + 2 ) * + 2 ; <nl> } , <nl> <nl> + / / TODO : fround ? <nl> + llvm_nearbyint_f32__asm : true , <nl> + llvm_nearbyint_f32__sig : ' ff ' , <nl> + llvm_nearbyint_f32__deps : [ ' roundf ' ] , <nl> + llvm_nearbyint_f32 : function ( f ) { <nl> + f = + f ; <nl> + return ( f - + Math_floor ( f ) ! = . 5 ) ? + _roundf ( f ) : + _roundf ( f / + 2 ) * + 2 ; <nl> + } , <nl> + <nl> + llvm_nearbyint_f64__asm : true , <nl> + llvm_nearbyint_f64__sig : ' dd ' , <nl> + llvm_nearbyint_f64__deps : [ ' round ' ] , <nl> + llvm_nearbyint_f64 : function ( f ) { <nl> + f = + f ; <nl> + return ( f - + Math_floor ( f ) ! = . 5 ) ? + _round ( f ) : + _round ( f / + 2 ) * + 2 ; <nl> + } , <nl> + <nl> _reallyNegative : function ( x ) { <nl> return x < 0 | | ( x = = = 0 & & ( 1 / x ) = = = - Infinity ) ; <nl> } , <nl> mmm a / tests / core / test_llvm_intrinsics . cpp <nl> ppp b / tests / core / test_llvm_intrinsics . cpp <nl> extern double llvm_log10_f64 ( double x ) ; <nl> extern float llvm_copysign_f32 ( float x , float y ) ; <nl> extern double llvm_copysign_f64 ( double x , double y ) ; <nl> <nl> - extern double llvm_round_f64 ( double x ) ; <nl> extern float llvm_round_f32 ( float x ) ; <nl> + extern double llvm_round_f64 ( double x ) ; <nl> extern float llvm_minnum_f32 ( float x , float y ) ; <nl> extern double llvm_minnum_f64 ( double x , double y ) ; <nl> extern float llvm_maxnum_f32 ( float x , float y ) ; <nl> extern double llvm_maxnum_f64 ( double x , double y ) ; <nl> + extern float llvm_nearbyint_f32 ( float x ) ; <nl> + extern double llvm_nearbyint_f64 ( double x ) ; <nl> } <nl> <nl> int main ( void ) { <nl> int main ( void ) { <nl> printf ( " llvm_round_f32 % . 1f \ n " , llvm_round_f32 ( - 20 . 5 ) ) ; <nl> printf ( " llvm_round_f32 % . 1f \ n " , llvm_round_f32 ( - 20 . 51 ) ) ; <nl> <nl> + printf ( " llvm_nearbyint_f64 % . 1f \ n " , llvm_nearbyint_f64 ( 20 . 50 ) ) ; <nl> + printf ( " llvm_nearbyint_f64 % . 1f \ n " , llvm_nearbyint_f64 ( 20 . 51 ) ) ; <nl> + printf ( " llvm_nearbyint_f64 % . 1f \ n " , llvm_nearbyint_f64 ( 42 ) ) ; <nl> + printf ( " llvm_nearbyint_f64 % . 1f \ n " , llvm_nearbyint_f64 ( - 20 . 49 ) ) ; <nl> + printf ( " llvm_nearbyint_f64 % . 1f \ n " , llvm_nearbyint_f64 ( - 20 . 5 ) ) ; <nl> + printf ( " llvm_nearbyint_f64 % . 1f \ n " , llvm_nearbyint_f64 ( - 20 . 51 ) ) ; <nl> + <nl> + printf ( " llvm_nearbyint_f32 % . 1f \ n " , llvm_nearbyint_f32 ( 20 . 50 ) ) ; <nl> + printf ( " llvm_nearbyint_f32 % . 1f \ n " , llvm_nearbyint_f32 ( 20 . 51 ) ) ; <nl> + printf ( " llvm_nearbyint_f32 % . 1f \ n " , llvm_nearbyint_f32 ( 42 ) ) ; <nl> + printf ( " llvm_nearbyint_f32 % . 1f \ n " , llvm_nearbyint_f32 ( - 20 . 49 ) ) ; <nl> + printf ( " llvm_nearbyint_f32 % . 1f \ n " , llvm_nearbyint_f32 ( - 20 . 5 ) ) ; <nl> + printf ( " llvm_nearbyint_f32 % . 1f \ n " , llvm_nearbyint_f32 ( - 20 . 51 ) ) ; <nl> + <nl> printf ( " llvm_minnum_f32 % . 1f \ n " , llvm_minnum_f32 ( 5 . 7 , 10 . 2 ) ) ; <nl> printf ( " llvm_minnum_f32 % . 1f \ n " , llvm_minnum_f32 ( 8 . 5 , 2 . 3 ) ) ; <nl> printf ( " llvm_minnum_f64 % . 1f \ n " , llvm_minnum_f64 ( 5 . 7 , 10 . 2 ) ) ; <nl> mmm a / tests / core / test_llvm_intrinsics . out <nl> ppp b / tests / core / test_llvm_intrinsics . out <nl> llvm_round_f32 42 . 0 <nl> llvm_round_f32 - 20 . 0 <nl> llvm_round_f32 - 21 . 0 <nl> llvm_round_f32 - 21 . 0 <nl> + llvm_nearbyint_f64 20 . 0 <nl> + llvm_nearbyint_f64 21 . 0 <nl> + llvm_nearbyint_f64 42 . 0 <nl> + llvm_nearbyint_f64 - 20 . 0 <nl> + llvm_nearbyint_f64 - 20 . 0 <nl> + llvm_nearbyint_f64 - 21 . 0 <nl> + llvm_nearbyint_f32 20 . 0 <nl> + llvm_nearbyint_f32 21 . 0 <nl> + llvm_nearbyint_f32 42 . 0 <nl> + llvm_nearbyint_f32 - 20 . 0 <nl> + llvm_nearbyint_f32 - 20 . 0 <nl> + llvm_nearbyint_f32 - 21 . 0 <nl> llvm_minnum_f32 5 . 7 <nl> llvm_minnum_f32 2 . 3 <nl> llvm_minnum_f64 5 . 7 <nl>
|
llvm . nearbyint . f * support ( )
|
emscripten-core/emscripten
|
272c87f9fb989fd66e759db8d0c0e3a3158e9f27
|
2018-02-27T20:37:53Z
|
mmm a / modules / imgproc / src / smooth . cpp <nl> ppp b / modules / imgproc / src / smooth . cpp <nl> class BilateralFilter_8u_Invoker : <nl> # if CV_SSE3 <nl> if ( haveSSE3 ) <nl> { <nl> + const __m128i izero = _mm_setzero_si128 ( ) ; <nl> const __m128 _b0 = _mm_set1_ps ( static_cast < float > ( b0 ) ) ; <nl> const __m128 _g0 = _mm_set1_ps ( static_cast < float > ( g0 ) ) ; <nl> const __m128 _r0 = _mm_set1_ps ( static_cast < float > ( r0 ) ) ; <nl> class BilateralFilter_8u_Invoker : <nl> <nl> for ( ; k < = maxk - 4 ; k + = 4 ) <nl> { <nl> - const uchar * sptr_k = sptr + j + space_ofs [ k ] ; <nl> - const uchar * sptr_k1 = sptr + j + space_ofs [ k + 1 ] ; <nl> - const uchar * sptr_k2 = sptr + j + space_ofs [ k + 2 ] ; <nl> - const uchar * sptr_k3 = sptr + j + space_ofs [ k + 3 ] ; <nl> + const int * const sptr_k0 = reinterpret_cast < const int * > ( sptr + j + space_ofs [ k ] ) ; <nl> + const int * const sptr_k1 = reinterpret_cast < const int * > ( sptr + j + space_ofs [ k + 1 ] ) ; <nl> + const int * const sptr_k2 = reinterpret_cast < const int * > ( sptr + j + space_ofs [ k + 2 ] ) ; <nl> + const int * const sptr_k3 = reinterpret_cast < const int * > ( sptr + j + space_ofs [ k + 3 ] ) ; <nl> <nl> - __m128 _b = _mm_set_ps ( sptr_k3 [ 0 ] , sptr_k2 [ 0 ] , sptr_k1 [ 0 ] , sptr_k [ 0 ] ) ; <nl> - __m128 _g = _mm_set_ps ( sptr_k3 [ 1 ] , sptr_k2 [ 1 ] , sptr_k1 [ 1 ] , sptr_k [ 1 ] ) ; <nl> - __m128 _r = _mm_set_ps ( sptr_k3 [ 2 ] , sptr_k2 [ 2 ] , sptr_k1 [ 2 ] , sptr_k [ 2 ] ) ; <nl> + __m128 _b = _mm_cvtepi32_ps ( _mm_unpacklo_epi16 ( _mm_unpacklo_epi8 ( _mm_cvtsi32_si128 ( sptr_k0 [ 0 ] ) , izero ) , izero ) ) ; <nl> + __m128 _g = _mm_cvtepi32_ps ( _mm_unpacklo_epi16 ( _mm_unpacklo_epi8 ( _mm_cvtsi32_si128 ( sptr_k1 [ 0 ] ) , izero ) , izero ) ) ; <nl> + __m128 _r = _mm_cvtepi32_ps ( _mm_unpacklo_epi16 ( _mm_unpacklo_epi8 ( _mm_cvtsi32_si128 ( sptr_k2 [ 0 ] ) , izero ) , izero ) ) ; <nl> + __m128 _z = _mm_cvtepi32_ps ( _mm_unpacklo_epi16 ( _mm_unpacklo_epi8 ( _mm_cvtsi32_si128 ( sptr_k3 [ 0 ] ) , izero ) , izero ) ) ; <nl> + <nl> + _MM_TRANSPOSE4_PS ( _b , _g , _r , _z ) ; <nl> <nl> __m128 bt = _mm_andnot_ps ( _signMask , _mm_sub_ps ( _b , _b0 ) ) ; <nl> __m128 gt = _mm_andnot_ps ( _signMask , _mm_sub_ps ( _g , _g0 ) ) ; <nl> class BilateralFilter_32f_Invoker : <nl> # if CV_SSE3 <nl> if ( haveSSE3 ) <nl> { <nl> + __m128 psum = _mm_setzero_ps ( ) ; <nl> const __m128 _val0 = _mm_set1_ps ( sptr [ j ] ) ; <nl> const __m128 _scale_index = _mm_set1_ps ( scale_index ) ; <nl> const __m128 _signMask = _mm_load_ps ( ( const float * ) bufSignMask ) ; <nl> class BilateralFilter_32f_Invoker : <nl> <nl> _sw = _mm_hadd_ps ( _w , _val ) ; <nl> _sw = _mm_hadd_ps ( _sw , _sw ) ; <nl> - _mm_storel_pi ( ( __m64 * ) bufSum32 , _sw ) ; <nl> - <nl> - sum + = bufSum32 [ 1 ] ; <nl> - wsum + = bufSum32 [ 0 ] ; <nl> + psum = _mm_add_ps ( _sw , psum ) ; <nl> } <nl> + _mm_storel_pi ( ( __m64 * ) bufSum32 , psum ) ; <nl> + <nl> + sum = bufSum32 [ 1 ] ; <nl> + wsum = bufSum32 [ 0 ] ; <nl> } <nl> # endif <nl> <nl> class BilateralFilter_32f_Invoker : <nl> } <nl> else <nl> { <nl> - assert ( cn = = 3 ) ; <nl> + CV_Assert ( cn = = 3 ) ; <nl> for ( j = 0 ; j < size . width * 3 ; j + = 3 ) <nl> { <nl> float sum_b = 0 , sum_g = 0 , sum_r = 0 , wsum = 0 ; <nl> class BilateralFilter_32f_Invoker : <nl> # if CV_SSE3 <nl> if ( haveSSE3 ) <nl> { <nl> + __m128 sum = _mm_setzero_ps ( ) ; <nl> const __m128 _b0 = _mm_set1_ps ( b0 ) ; <nl> const __m128 _g0 = _mm_set1_ps ( g0 ) ; <nl> const __m128 _r0 = _mm_set1_ps ( r0 ) ; <nl> class BilateralFilter_32f_Invoker : <nl> { <nl> __m128 _sw = _mm_loadu_ps ( space_weight + k ) ; <nl> <nl> - const float * sptr_k = sptr + j + space_ofs [ k ] ; <nl> - const float * sptr_k1 = sptr + j + space_ofs [ k + 1 ] ; <nl> - const float * sptr_k2 = sptr + j + space_ofs [ k + 2 ] ; <nl> - const float * sptr_k3 = sptr + j + space_ofs [ k + 3 ] ; <nl> + const float * const sptr_k0 = sptr + j + space_ofs [ k ] ; <nl> + const float * const sptr_k1 = sptr + j + space_ofs [ k + 1 ] ; <nl> + const float * const sptr_k2 = sptr + j + space_ofs [ k + 2 ] ; <nl> + const float * const sptr_k3 = sptr + j + space_ofs [ k + 3 ] ; <nl> <nl> - __m128 _b = _mm_set_ps ( sptr_k3 [ 0 ] , sptr_k2 [ 0 ] , sptr_k1 [ 0 ] , sptr_k [ 0 ] ) ; <nl> - __m128 _g = _mm_set_ps ( sptr_k3 [ 1 ] , sptr_k2 [ 1 ] , sptr_k1 [ 1 ] , sptr_k [ 1 ] ) ; <nl> - __m128 _r = _mm_set_ps ( sptr_k3 [ 2 ] , sptr_k2 [ 2 ] , sptr_k1 [ 2 ] , sptr_k [ 2 ] ) ; <nl> + __m128 _b = _mm_loadu_ps ( sptr_k0 ) ; <nl> + __m128 _g = _mm_loadu_ps ( sptr_k1 ) ; <nl> + __m128 _r = _mm_loadu_ps ( sptr_k2 ) ; <nl> + __m128 _z = _mm_loadu_ps ( sptr_k3 ) ; <nl> + _MM_TRANSPOSE4_PS ( _b , _g , _r , _z ) ; <nl> <nl> __m128 _bt = _mm_andnot_ps ( _signMask , _mm_sub_ps ( _b , _b0 ) ) ; <nl> __m128 _gt = _mm_andnot_ps ( _signMask , _mm_sub_ps ( _g , _g0 ) ) ; <nl> class BilateralFilter_32f_Invoker : <nl> _g = _mm_hadd_ps ( _g , _r ) ; <nl> <nl> _w = _mm_hadd_ps ( _w , _g ) ; <nl> - _mm_store_ps ( bufSum32 , _w ) ; <nl> - <nl> - wsum + = bufSum32 [ 0 ] ; <nl> - sum_b + = bufSum32 [ 1 ] ; <nl> - sum_g + = bufSum32 [ 2 ] ; <nl> - sum_r + = bufSum32 [ 3 ] ; <nl> + sum = _mm_add_ps ( sum , _w ) ; <nl> } <nl> - <nl> + _mm_store_ps ( bufSum32 , sum ) ; <nl> + wsum = bufSum32 [ 0 ] ; <nl> + sum_b = bufSum32 [ 1 ] ; <nl> + sum_g = bufSum32 [ 2 ] ; <nl> + sum_r = bufSum32 [ 3 ] ; <nl> } <nl> # endif <nl> <nl>
|
Merge pull request from ilya - lavrenov : BilateralFilter
|
opencv/opencv
|
d620ef0d5541cd02cf60a8808d2363142f16705c
|
2013-02-27T15:44:56Z
|
mmm a / src / ruby / bin / math_pb . rb <nl> ppp b / src / ruby / bin / math_pb . rb <nl> <nl> require ' google / protobuf ' <nl> <nl> Google : : Protobuf : : DescriptorPool . generated_pool . build do <nl> - add_message " math . DivArgs " do <nl> - optional : dividend , : int64 , 1 <nl> - optional : divisor , : int64 , 2 <nl> - end <nl> - add_message " math . DivReply " do <nl> - optional : quotient , : int64 , 1 <nl> - optional : remainder , : int64 , 2 <nl> - end <nl> - add_message " math . FibArgs " do <nl> - optional : limit , : int64 , 1 <nl> - end <nl> - add_message " math . Num " do <nl> - optional : num , : int64 , 1 <nl> - end <nl> - add_message " math . FibReply " do <nl> - optional : count , : int64 , 1 <nl> + add_file ( " math . proto " , : syntax = > : proto3 ) do <nl> + add_message " math . DivArgs " do <nl> + optional : dividend , : int64 , 1 <nl> + optional : divisor , : int64 , 2 <nl> + end <nl> + add_message " math . DivReply " do <nl> + optional : quotient , : int64 , 1 <nl> + optional : remainder , : int64 , 2 <nl> + end <nl> + add_message " math . FibArgs " do <nl> + optional : limit , : int64 , 1 <nl> + end <nl> + add_message " math . Num " do <nl> + optional : num , : int64 , 1 <nl> + end <nl> + add_message " math . FibReply " do <nl> + optional : count , : int64 , 1 <nl> + end <nl> end <nl> end <nl> <nl> mmm a / src / ruby / pb / grpc / health / v1 / health_pb . rb <nl> ppp b / src / ruby / pb / grpc / health / v1 / health_pb . rb <nl> <nl> require ' google / protobuf ' <nl> <nl> Google : : Protobuf : : DescriptorPool . generated_pool . build do <nl> - add_message " grpc . health . v1 . HealthCheckRequest " do <nl> - optional : service , : string , 1 <nl> - end <nl> - add_message " grpc . health . v1 . HealthCheckResponse " do <nl> - optional : status , : enum , 1 , " grpc . health . v1 . HealthCheckResponse . ServingStatus " <nl> - end <nl> - add_enum " grpc . health . v1 . HealthCheckResponse . ServingStatus " do <nl> - value : UNKNOWN , 0 <nl> - value : SERVING , 1 <nl> - value : NOT_SERVING , 2 <nl> + add_file ( " grpc / health / v1 / health . proto " , : syntax = > : proto3 ) do <nl> + add_message " grpc . health . v1 . HealthCheckRequest " do <nl> + optional : service , : string , 1 <nl> + end <nl> + add_message " grpc . health . v1 . HealthCheckResponse " do <nl> + optional : status , : enum , 1 , " grpc . health . v1 . HealthCheckResponse . ServingStatus " <nl> + end <nl> + add_enum " grpc . health . v1 . HealthCheckResponse . ServingStatus " do <nl> + value : UNKNOWN , 0 <nl> + value : SERVING , 1 <nl> + value : NOT_SERVING , 2 <nl> + value : SERVICE_UNKNOWN , 3 <nl> + end <nl> end <nl> end <nl> <nl> mmm a / src / ruby / pb / grpc / health / v1 / health_services_pb . rb <nl> ppp b / src / ruby / pb / grpc / health / v1 / health_services_pb . rb <nl> class Service <nl> self . unmarshal_class_method = : decode <nl> self . service_name = ' grpc . health . v1 . Health ' <nl> <nl> + # If the requested service is unknown , the call will fail with status <nl> + # NOT_FOUND . <nl> rpc : Check , HealthCheckRequest , HealthCheckResponse <nl> + # Performs a watch for the serving status of the requested service . <nl> + # The server will immediately send back a message indicating the current <nl> + # serving status . It will then subsequently send a new message whenever <nl> + # the service ' s serving status changes . <nl> + # <nl> + # If the requested service is unknown when the call is received , the <nl> + # server will send a message setting the serving status to <nl> + # SERVICE_UNKNOWN but will * not * terminate the call . If at some <nl> + # future point , the serving status of the service becomes known , the <nl> + # server will send a new message with the service ' s serving status . <nl> + # <nl> + # If the call terminates with status UNIMPLEMENTED , then clients <nl> + # should assume this method is not supported and should not retry the <nl> + # call . If the call terminates with any other status ( including OK ) , <nl> + # clients should retry the call with appropriate exponential backoff . <nl> + rpc : Watch , HealthCheckRequest , stream ( HealthCheckResponse ) <nl> end <nl> <nl> Stub = Service . rpc_stub_class <nl> mmm a / src / ruby / pb / src / proto / grpc / testing / empty_pb . rb <nl> ppp b / src / ruby / pb / src / proto / grpc / testing / empty_pb . rb <nl> <nl> require ' google / protobuf ' <nl> <nl> Google : : Protobuf : : DescriptorPool . generated_pool . build do <nl> - add_message " grpc . testing . Empty " do <nl> + add_file ( " src / proto / grpc / testing / empty . proto " , : syntax = > : proto3 ) do <nl> + add_message " grpc . testing . Empty " do <nl> + end <nl> end <nl> end <nl> <nl> mmm a / src / ruby / pb / src / proto / grpc / testing / messages_pb . rb <nl> ppp b / src / ruby / pb / src / proto / grpc / testing / messages_pb . rb <nl> <nl> require ' google / protobuf ' <nl> <nl> Google : : Protobuf : : DescriptorPool . generated_pool . build do <nl> - add_message " grpc . testing . BoolValue " do <nl> - optional : value , : bool , 1 <nl> - end <nl> - add_message " grpc . testing . Payload " do <nl> - optional : type , : enum , 1 , " grpc . testing . PayloadType " <nl> - optional : body , : bytes , 2 <nl> - end <nl> - add_message " grpc . testing . EchoStatus " do <nl> - optional : code , : int32 , 1 <nl> - optional : message , : string , 2 <nl> - end <nl> - add_message " grpc . testing . SimpleRequest " do <nl> - optional : response_type , : enum , 1 , " grpc . testing . PayloadType " <nl> - optional : response_size , : int32 , 2 <nl> - optional : payload , : message , 3 , " grpc . testing . Payload " <nl> - optional : fill_username , : bool , 4 <nl> - optional : fill_oauth_scope , : bool , 5 <nl> - optional : response_compressed , : message , 6 , " grpc . testing . BoolValue " <nl> - optional : response_status , : message , 7 , " grpc . testing . EchoStatus " <nl> - optional : expect_compressed , : message , 8 , " grpc . testing . BoolValue " <nl> - end <nl> - add_message " grpc . testing . SimpleResponse " do <nl> - optional : payload , : message , 1 , " grpc . testing . Payload " <nl> - optional : username , : string , 2 <nl> - optional : oauth_scope , : string , 3 <nl> - end <nl> - add_message " grpc . testing . StreamingInputCallRequest " do <nl> - optional : payload , : message , 1 , " grpc . testing . Payload " <nl> - optional : expect_compressed , : message , 2 , " grpc . testing . BoolValue " <nl> - end <nl> - add_message " grpc . testing . StreamingInputCallResponse " do <nl> - optional : aggregated_payload_size , : int32 , 1 <nl> - end <nl> - add_message " grpc . testing . ResponseParameters " do <nl> - optional : size , : int32 , 1 <nl> - optional : interval_us , : int32 , 2 <nl> - optional : compressed , : message , 3 , " grpc . testing . BoolValue " <nl> - end <nl> - add_message " grpc . testing . StreamingOutputCallRequest " do <nl> - optional : response_type , : enum , 1 , " grpc . testing . PayloadType " <nl> - repeated : response_parameters , : message , 2 , " grpc . testing . ResponseParameters " <nl> - optional : payload , : message , 3 , " grpc . testing . Payload " <nl> - optional : response_status , : message , 7 , " grpc . testing . EchoStatus " <nl> - end <nl> - add_message " grpc . testing . StreamingOutputCallResponse " do <nl> - optional : payload , : message , 1 , " grpc . testing . Payload " <nl> - end <nl> - add_message " grpc . testing . ReconnectParams " do <nl> - optional : max_reconnect_backoff_ms , : int32 , 1 <nl> - end <nl> - add_message " grpc . testing . ReconnectInfo " do <nl> - optional : passed , : bool , 1 <nl> - repeated : backoff_ms , : int32 , 2 <nl> - end <nl> - add_enum " grpc . testing . PayloadType " do <nl> - value : COMPRESSABLE , 0 <nl> + add_file ( " src / proto / grpc / testing / messages . proto " , : syntax = > : proto3 ) do <nl> + add_message " grpc . testing . BoolValue " do <nl> + optional : value , : bool , 1 <nl> + end <nl> + add_message " grpc . testing . Payload " do <nl> + optional : type , : enum , 1 , " grpc . testing . PayloadType " <nl> + optional : body , : bytes , 2 <nl> + end <nl> + add_message " grpc . testing . EchoStatus " do <nl> + optional : code , : int32 , 1 <nl> + optional : message , : string , 2 <nl> + end <nl> + add_message " grpc . testing . SimpleRequest " do <nl> + optional : response_type , : enum , 1 , " grpc . testing . PayloadType " <nl> + optional : response_size , : int32 , 2 <nl> + optional : payload , : message , 3 , " grpc . testing . Payload " <nl> + optional : fill_username , : bool , 4 <nl> + optional : fill_oauth_scope , : bool , 5 <nl> + optional : response_compressed , : message , 6 , " grpc . testing . BoolValue " <nl> + optional : response_status , : message , 7 , " grpc . testing . EchoStatus " <nl> + optional : expect_compressed , : message , 8 , " grpc . testing . BoolValue " <nl> + end <nl> + add_message " grpc . testing . SimpleResponse " do <nl> + optional : payload , : message , 1 , " grpc . testing . Payload " <nl> + optional : username , : string , 2 <nl> + optional : oauth_scope , : string , 3 <nl> + end <nl> + add_message " grpc . testing . StreamingInputCallRequest " do <nl> + optional : payload , : message , 1 , " grpc . testing . Payload " <nl> + optional : expect_compressed , : message , 2 , " grpc . testing . BoolValue " <nl> + end <nl> + add_message " grpc . testing . StreamingInputCallResponse " do <nl> + optional : aggregated_payload_size , : int32 , 1 <nl> + end <nl> + add_message " grpc . testing . ResponseParameters " do <nl> + optional : size , : int32 , 1 <nl> + optional : interval_us , : int32 , 2 <nl> + optional : compressed , : message , 3 , " grpc . testing . BoolValue " <nl> + end <nl> + add_message " grpc . testing . StreamingOutputCallRequest " do <nl> + optional : response_type , : enum , 1 , " grpc . testing . PayloadType " <nl> + repeated : response_parameters , : message , 2 , " grpc . testing . ResponseParameters " <nl> + optional : payload , : message , 3 , " grpc . testing . Payload " <nl> + optional : response_status , : message , 7 , " grpc . testing . EchoStatus " <nl> + end <nl> + add_message " grpc . testing . StreamingOutputCallResponse " do <nl> + optional : payload , : message , 1 , " grpc . testing . Payload " <nl> + end <nl> + add_message " grpc . testing . ReconnectParams " do <nl> + optional : max_reconnect_backoff_ms , : int32 , 1 <nl> + end <nl> + add_message " grpc . testing . ReconnectInfo " do <nl> + optional : passed , : bool , 1 <nl> + repeated : backoff_ms , : int32 , 2 <nl> + end <nl> + add_enum " grpc . testing . PayloadType " do <nl> + value : COMPRESSABLE , 0 <nl> + end <nl> end <nl> end <nl> <nl> mmm a / src / ruby / pb / src / proto / grpc / testing / test_pb . rb <nl> ppp b / src / ruby / pb / src / proto / grpc / testing / test_pb . rb <nl> <nl> require ' src / proto / grpc / testing / empty_pb ' <nl> require ' src / proto / grpc / testing / messages_pb ' <nl> Google : : Protobuf : : DescriptorPool . generated_pool . build do <nl> + add_file ( " src / proto / grpc / testing / test . proto " , : syntax = > : proto3 ) do <nl> + end <nl> end <nl> <nl> module Grpc <nl> mmm a / src / ruby / qps / src / proto / grpc / core / stats_pb . rb <nl> ppp b / src / ruby / qps / src / proto / grpc / core / stats_pb . rb <nl> <nl> require ' google / protobuf ' <nl> <nl> Google : : Protobuf : : DescriptorPool . generated_pool . build do <nl> - add_message " grpc . core . Bucket " do <nl> - optional : start , : double , 1 <nl> - optional : count , : uint64 , 2 <nl> - end <nl> - add_message " grpc . core . Histogram " do <nl> - repeated : buckets , : message , 1 , " grpc . core . Bucket " <nl> - end <nl> - add_message " grpc . core . Metric " do <nl> - optional : name , : string , 1 <nl> - oneof : value do <nl> - optional : count , : uint64 , 10 <nl> - optional : histogram , : message , 11 , " grpc . core . Histogram " <nl> + add_file ( " src / proto / grpc / core / stats . proto " , : syntax = > : proto3 ) do <nl> + add_message " grpc . core . Bucket " do <nl> + optional : start , : double , 1 <nl> + optional : count , : uint64 , 2 <nl> + end <nl> + add_message " grpc . core . Histogram " do <nl> + repeated : buckets , : message , 1 , " grpc . core . Bucket " <nl> + end <nl> + add_message " grpc . core . Metric " do <nl> + optional : name , : string , 1 <nl> + oneof : value do <nl> + optional : count , : uint64 , 10 <nl> + optional : histogram , : message , 11 , " grpc . core . Histogram " <nl> + end <nl> + end <nl> + add_message " grpc . core . Stats " do <nl> + repeated : metrics , : message , 1 , " grpc . core . Metric " <nl> end <nl> - end <nl> - add_message " grpc . core . Stats " do <nl> - repeated : metrics , : message , 1 , " grpc . core . Metric " <nl> end <nl> end <nl> <nl> mmm a / src / ruby / qps / src / proto / grpc / testing / benchmark_service_pb . rb <nl> ppp b / src / ruby / qps / src / proto / grpc / testing / benchmark_service_pb . rb <nl> <nl> <nl> require ' src / proto / grpc / testing / messages_pb ' <nl> Google : : Protobuf : : DescriptorPool . generated_pool . build do <nl> + add_file ( " src / proto / grpc / testing / benchmark_service . proto " , : syntax = > : proto3 ) do <nl> + end <nl> end <nl> <nl> module Grpc <nl> mmm a / src / ruby / qps / src / proto / grpc / testing / control_pb . rb <nl> ppp b / src / ruby / qps / src / proto / grpc / testing / control_pb . rb <nl> <nl> require ' src / proto / grpc / testing / payloads_pb ' <nl> require ' src / proto / grpc / testing / stats_pb ' <nl> Google : : Protobuf : : DescriptorPool . generated_pool . build do <nl> - add_message " grpc . testing . PoissonParams " do <nl> - optional : offered_load , : double , 1 <nl> - end <nl> - add_message " grpc . testing . ClosedLoopParams " do <nl> - end <nl> - add_message " grpc . testing . LoadParams " do <nl> - oneof : load do <nl> - optional : closed_loop , : message , 1 , " grpc . testing . ClosedLoopParams " <nl> - optional : poisson , : message , 2 , " grpc . testing . PoissonParams " <nl> + add_file ( " src / proto / grpc / testing / control . proto " , : syntax = > : proto3 ) do <nl> + add_message " grpc . testing . PoissonParams " do <nl> + optional : offered_load , : double , 1 <nl> end <nl> - end <nl> - add_message " grpc . testing . SecurityParams " do <nl> - optional : use_test_ca , : bool , 1 <nl> - optional : server_host_override , : string , 2 <nl> - optional : cred_type , : string , 3 <nl> - end <nl> - add_message " grpc . testing . ChannelArg " do <nl> - optional : name , : string , 1 <nl> - oneof : value do <nl> - optional : str_value , : string , 2 <nl> - optional : int_value , : int32 , 3 <nl> + add_message " grpc . testing . ClosedLoopParams " do <nl> end <nl> - end <nl> - add_message " grpc . testing . ClientConfig " do <nl> - repeated : server_targets , : string , 1 <nl> - optional : client_type , : enum , 2 , " grpc . testing . ClientType " <nl> - optional : security_params , : message , 3 , " grpc . testing . SecurityParams " <nl> - optional : outstanding_rpcs_per_channel , : int32 , 4 <nl> - optional : client_channels , : int32 , 5 <nl> - optional : async_client_threads , : int32 , 7 <nl> - optional : rpc_type , : enum , 8 , " grpc . testing . RpcType " <nl> - optional : load_params , : message , 10 , " grpc . testing . LoadParams " <nl> - optional : payload_config , : message , 11 , " grpc . testing . PayloadConfig " <nl> - optional : histogram_params , : message , 12 , " grpc . testing . HistogramParams " <nl> - repeated : core_list , : int32 , 13 <nl> - optional : core_limit , : int32 , 14 <nl> - optional : other_client_api , : string , 15 <nl> - repeated : channel_args , : message , 16 , " grpc . testing . ChannelArg " <nl> - optional : threads_per_cq , : int32 , 17 <nl> - optional : messages_per_stream , : int32 , 18 <nl> - optional : use_coalesce_api , : bool , 19 <nl> - end <nl> - add_message " grpc . testing . ClientStatus " do <nl> - optional : stats , : message , 1 , " grpc . testing . ClientStats " <nl> - end <nl> - add_message " grpc . testing . Mark " do <nl> - optional : reset , : bool , 1 <nl> - end <nl> - add_message " grpc . testing . ClientArgs " do <nl> - oneof : argtype do <nl> - optional : setup , : message , 1 , " grpc . testing . ClientConfig " <nl> - optional : mark , : message , 2 , " grpc . testing . Mark " <nl> + add_message " grpc . testing . LoadParams " do <nl> + oneof : load do <nl> + optional : closed_loop , : message , 1 , " grpc . testing . ClosedLoopParams " <nl> + optional : poisson , : message , 2 , " grpc . testing . PoissonParams " <nl> + end <nl> end <nl> - end <nl> - add_message " grpc . testing . ServerConfig " do <nl> - optional : server_type , : enum , 1 , " grpc . testing . ServerType " <nl> - optional : security_params , : message , 2 , " grpc . testing . SecurityParams " <nl> - optional : port , : int32 , 4 <nl> - optional : async_server_threads , : int32 , 7 <nl> - optional : core_limit , : int32 , 8 <nl> - optional : payload_config , : message , 9 , " grpc . testing . PayloadConfig " <nl> - repeated : core_list , : int32 , 10 <nl> - optional : other_server_api , : string , 11 <nl> - optional : threads_per_cq , : int32 , 12 <nl> - optional : resource_quota_size , : int32 , 1001 <nl> - repeated : channel_args , : message , 1002 , " grpc . testing . ChannelArg " <nl> - end <nl> - add_message " grpc . testing . ServerArgs " do <nl> - oneof : argtype do <nl> - optional : setup , : message , 1 , " grpc . testing . ServerConfig " <nl> - optional : mark , : message , 2 , " grpc . testing . Mark " <nl> + add_message " grpc . testing . SecurityParams " do <nl> + optional : use_test_ca , : bool , 1 <nl> + optional : server_host_override , : string , 2 <nl> + optional : cred_type , : string , 3 <nl> + end <nl> + add_message " grpc . testing . ChannelArg " do <nl> + optional : name , : string , 1 <nl> + oneof : value do <nl> + optional : str_value , : string , 2 <nl> + optional : int_value , : int32 , 3 <nl> + end <nl> + end <nl> + add_message " grpc . testing . ClientConfig " do <nl> + repeated : server_targets , : string , 1 <nl> + optional : client_type , : enum , 2 , " grpc . testing . ClientType " <nl> + optional : security_params , : message , 3 , " grpc . testing . SecurityParams " <nl> + optional : outstanding_rpcs_per_channel , : int32 , 4 <nl> + optional : client_channels , : int32 , 5 <nl> + optional : async_client_threads , : int32 , 7 <nl> + optional : rpc_type , : enum , 8 , " grpc . testing . RpcType " <nl> + optional : load_params , : message , 10 , " grpc . testing . LoadParams " <nl> + optional : payload_config , : message , 11 , " grpc . testing . PayloadConfig " <nl> + optional : histogram_params , : message , 12 , " grpc . testing . HistogramParams " <nl> + repeated : core_list , : int32 , 13 <nl> + optional : core_limit , : int32 , 14 <nl> + optional : other_client_api , : string , 15 <nl> + repeated : channel_args , : message , 16 , " grpc . testing . ChannelArg " <nl> + optional : threads_per_cq , : int32 , 17 <nl> + optional : messages_per_stream , : int32 , 18 <nl> + optional : use_coalesce_api , : bool , 19 <nl> + optional : median_latency_collection_interval_millis , : int32 , 20 <nl> + end <nl> + add_message " grpc . testing . ClientStatus " do <nl> + optional : stats , : message , 1 , " grpc . testing . ClientStats " <nl> + end <nl> + add_message " grpc . testing . Mark " do <nl> + optional : reset , : bool , 1 <nl> + end <nl> + add_message " grpc . testing . ClientArgs " do <nl> + oneof : argtype do <nl> + optional : setup , : message , 1 , " grpc . testing . ClientConfig " <nl> + optional : mark , : message , 2 , " grpc . testing . Mark " <nl> + end <nl> + end <nl> + add_message " grpc . testing . ServerConfig " do <nl> + optional : server_type , : enum , 1 , " grpc . testing . ServerType " <nl> + optional : security_params , : message , 2 , " grpc . testing . SecurityParams " <nl> + optional : port , : int32 , 4 <nl> + optional : async_server_threads , : int32 , 7 <nl> + optional : core_limit , : int32 , 8 <nl> + optional : payload_config , : message , 9 , " grpc . testing . PayloadConfig " <nl> + repeated : core_list , : int32 , 10 <nl> + optional : other_server_api , : string , 11 <nl> + optional : threads_per_cq , : int32 , 12 <nl> + optional : resource_quota_size , : int32 , 1001 <nl> + repeated : channel_args , : message , 1002 , " grpc . testing . ChannelArg " <nl> + end <nl> + add_message " grpc . testing . ServerArgs " do <nl> + oneof : argtype do <nl> + optional : setup , : message , 1 , " grpc . testing . ServerConfig " <nl> + optional : mark , : message , 2 , " grpc . testing . Mark " <nl> + end <nl> + end <nl> + add_message " grpc . testing . ServerStatus " do <nl> + optional : stats , : message , 1 , " grpc . testing . ServerStats " <nl> + optional : port , : int32 , 2 <nl> + optional : cores , : int32 , 3 <nl> + end <nl> + add_message " grpc . testing . CoreRequest " do <nl> + end <nl> + add_message " grpc . testing . CoreResponse " do <nl> + optional : cores , : int32 , 1 <nl> + end <nl> + add_message " grpc . testing . Void " do <nl> + end <nl> + add_message " grpc . testing . Scenario " do <nl> + optional : name , : string , 1 <nl> + optional : client_config , : message , 2 , " grpc . testing . ClientConfig " <nl> + optional : num_clients , : int32 , 3 <nl> + optional : server_config , : message , 4 , " grpc . testing . ServerConfig " <nl> + optional : num_servers , : int32 , 5 <nl> + optional : warmup_seconds , : int32 , 6 <nl> + optional : benchmark_seconds , : int32 , 7 <nl> + optional : spawn_local_worker_count , : int32 , 8 <nl> + end <nl> + add_message " grpc . testing . Scenarios " do <nl> + repeated : scenarios , : message , 1 , " grpc . testing . Scenario " <nl> + end <nl> + add_message " grpc . testing . ScenarioResultSummary " do <nl> + optional : qps , : double , 1 <nl> + optional : qps_per_server_core , : double , 2 <nl> + optional : server_system_time , : double , 3 <nl> + optional : server_user_time , : double , 4 <nl> + optional : client_system_time , : double , 5 <nl> + optional : client_user_time , : double , 6 <nl> + optional : latency_50 , : double , 7 <nl> + optional : latency_90 , : double , 8 <nl> + optional : latency_95 , : double , 9 <nl> + optional : latency_99 , : double , 10 <nl> + optional : latency_999 , : double , 11 <nl> + optional : server_cpu_usage , : double , 12 <nl> + optional : successful_requests_per_second , : double , 13 <nl> + optional : failed_requests_per_second , : double , 14 <nl> + optional : client_polls_per_request , : double , 15 <nl> + optional : server_polls_per_request , : double , 16 <nl> + optional : server_queries_per_cpu_sec , : double , 17 <nl> + optional : client_queries_per_cpu_sec , : double , 18 <nl> + end <nl> + add_message " grpc . testing . ScenarioResult " do <nl> + optional : scenario , : message , 1 , " grpc . testing . Scenario " <nl> + optional : latencies , : message , 2 , " grpc . testing . HistogramData " <nl> + repeated : client_stats , : message , 3 , " grpc . testing . ClientStats " <nl> + repeated : server_stats , : message , 4 , " grpc . testing . ServerStats " <nl> + repeated : server_cores , : int32 , 5 <nl> + optional : summary , : message , 6 , " grpc . testing . ScenarioResultSummary " <nl> + repeated : client_success , : bool , 7 <nl> + repeated : server_success , : bool , 8 <nl> + repeated : request_results , : message , 9 , " grpc . testing . RequestResultCount " <nl> + end <nl> + add_enum " grpc . testing . ClientType " do <nl> + value : SYNC_CLIENT , 0 <nl> + value : ASYNC_CLIENT , 1 <nl> + value : OTHER_CLIENT , 2 <nl> + value : CALLBACK_CLIENT , 3 <nl> + end <nl> + add_enum " grpc . testing . ServerType " do <nl> + value : SYNC_SERVER , 0 <nl> + value : ASYNC_SERVER , 1 <nl> + value : ASYNC_GENERIC_SERVER , 2 <nl> + value : OTHER_SERVER , 3 <nl> + value : CALLBACK_SERVER , 4 <nl> + end <nl> + add_enum " grpc . testing . RpcType " do <nl> + value : UNARY , 0 <nl> + value : STREAMING , 1 <nl> + value : STREAMING_FROM_CLIENT , 2 <nl> + value : STREAMING_FROM_SERVER , 3 <nl> + value : STREAMING_BOTH_WAYS , 4 <nl> end <nl> - end <nl> - add_message " grpc . testing . ServerStatus " do <nl> - optional : stats , : message , 1 , " grpc . testing . ServerStats " <nl> - optional : port , : int32 , 2 <nl> - optional : cores , : int32 , 3 <nl> - end <nl> - add_message " grpc . testing . CoreRequest " do <nl> - end <nl> - add_message " grpc . testing . CoreResponse " do <nl> - optional : cores , : int32 , 1 <nl> - end <nl> - add_message " grpc . testing . Void " do <nl> - end <nl> - add_message " grpc . testing . Scenario " do <nl> - optional : name , : string , 1 <nl> - optional : client_config , : message , 2 , " grpc . testing . ClientConfig " <nl> - optional : num_clients , : int32 , 3 <nl> - optional : server_config , : message , 4 , " grpc . testing . ServerConfig " <nl> - optional : num_servers , : int32 , 5 <nl> - optional : warmup_seconds , : int32 , 6 <nl> - optional : benchmark_seconds , : int32 , 7 <nl> - optional : spawn_local_worker_count , : int32 , 8 <nl> - end <nl> - add_message " grpc . testing . Scenarios " do <nl> - repeated : scenarios , : message , 1 , " grpc . testing . Scenario " <nl> - end <nl> - add_message " grpc . testing . ScenarioResultSummary " do <nl> - optional : qps , : double , 1 <nl> - optional : qps_per_server_core , : double , 2 <nl> - optional : server_system_time , : double , 3 <nl> - optional : server_user_time , : double , 4 <nl> - optional : client_system_time , : double , 5 <nl> - optional : client_user_time , : double , 6 <nl> - optional : latency_50 , : double , 7 <nl> - optional : latency_90 , : double , 8 <nl> - optional : latency_95 , : double , 9 <nl> - optional : latency_99 , : double , 10 <nl> - optional : latency_999 , : double , 11 <nl> - optional : server_cpu_usage , : double , 12 <nl> - optional : successful_requests_per_second , : double , 13 <nl> - optional : failed_requests_per_second , : double , 14 <nl> - optional : client_polls_per_request , : double , 15 <nl> - optional : server_polls_per_request , : double , 16 <nl> - optional : server_queries_per_cpu_sec , : double , 17 <nl> - optional : client_queries_per_cpu_sec , : double , 18 <nl> - end <nl> - add_message " grpc . testing . ScenarioResult " do <nl> - optional : scenario , : message , 1 , " grpc . testing . Scenario " <nl> - optional : latencies , : message , 2 , " grpc . testing . HistogramData " <nl> - repeated : client_stats , : message , 3 , " grpc . testing . ClientStats " <nl> - repeated : server_stats , : message , 4 , " grpc . testing . ServerStats " <nl> - repeated : server_cores , : int32 , 5 <nl> - optional : summary , : message , 6 , " grpc . testing . ScenarioResultSummary " <nl> - repeated : client_success , : bool , 7 <nl> - repeated : server_success , : bool , 8 <nl> - repeated : request_results , : message , 9 , " grpc . testing . RequestResultCount " <nl> - end <nl> - add_enum " grpc . testing . ClientType " do <nl> - value : SYNC_CLIENT , 0 <nl> - value : ASYNC_CLIENT , 1 <nl> - value : OTHER_CLIENT , 2 <nl> - end <nl> - add_enum " grpc . testing . ServerType " do <nl> - value : SYNC_SERVER , 0 <nl> - value : ASYNC_SERVER , 1 <nl> - value : ASYNC_GENERIC_SERVER , 2 <nl> - value : OTHER_SERVER , 3 <nl> - end <nl> - add_enum " grpc . testing . RpcType " do <nl> - value : UNARY , 0 <nl> - value : STREAMING , 1 <nl> - value : STREAMING_FROM_CLIENT , 2 <nl> - value : STREAMING_FROM_SERVER , 3 <nl> - value : STREAMING_BOTH_WAYS , 4 <nl> end <nl> end <nl> <nl> mmm a / src / ruby / qps / src / proto / grpc / testing / messages_pb . rb <nl> ppp b / src / ruby / qps / src / proto / grpc / testing / messages_pb . rb <nl> <nl> require ' google / protobuf ' <nl> <nl> Google : : Protobuf : : DescriptorPool . generated_pool . build do <nl> - add_message " grpc . testing . BoolValue " do <nl> - optional : value , : bool , 1 <nl> - end <nl> - add_message " grpc . testing . Payload " do <nl> - optional : type , : enum , 1 , " grpc . testing . PayloadType " <nl> - optional : body , : bytes , 2 <nl> - end <nl> - add_message " grpc . testing . EchoStatus " do <nl> - optional : code , : int32 , 1 <nl> - optional : message , : string , 2 <nl> - end <nl> - add_message " grpc . testing . SimpleRequest " do <nl> - optional : response_type , : enum , 1 , " grpc . testing . PayloadType " <nl> - optional : response_size , : int32 , 2 <nl> - optional : payload , : message , 3 , " grpc . testing . Payload " <nl> - optional : fill_username , : bool , 4 <nl> - optional : fill_oauth_scope , : bool , 5 <nl> - optional : response_compressed , : message , 6 , " grpc . testing . BoolValue " <nl> - optional : response_status , : message , 7 , " grpc . testing . EchoStatus " <nl> - optional : expect_compressed , : message , 8 , " grpc . testing . BoolValue " <nl> - end <nl> - add_message " grpc . testing . SimpleResponse " do <nl> - optional : payload , : message , 1 , " grpc . testing . Payload " <nl> - optional : username , : string , 2 <nl> - optional : oauth_scope , : string , 3 <nl> - end <nl> - add_message " grpc . testing . StreamingInputCallRequest " do <nl> - optional : payload , : message , 1 , " grpc . testing . Payload " <nl> - optional : expect_compressed , : message , 2 , " grpc . testing . BoolValue " <nl> - end <nl> - add_message " grpc . testing . StreamingInputCallResponse " do <nl> - optional : aggregated_payload_size , : int32 , 1 <nl> - end <nl> - add_message " grpc . testing . ResponseParameters " do <nl> - optional : size , : int32 , 1 <nl> - optional : interval_us , : int32 , 2 <nl> - optional : compressed , : message , 3 , " grpc . testing . BoolValue " <nl> - end <nl> - add_message " grpc . testing . StreamingOutputCallRequest " do <nl> - optional : response_type , : enum , 1 , " grpc . testing . PayloadType " <nl> - repeated : response_parameters , : message , 2 , " grpc . testing . ResponseParameters " <nl> - optional : payload , : message , 3 , " grpc . testing . Payload " <nl> - optional : response_status , : message , 7 , " grpc . testing . EchoStatus " <nl> - end <nl> - add_message " grpc . testing . StreamingOutputCallResponse " do <nl> - optional : payload , : message , 1 , " grpc . testing . Payload " <nl> - end <nl> - add_message " grpc . testing . ReconnectParams " do <nl> - optional : max_reconnect_backoff_ms , : int32 , 1 <nl> - end <nl> - add_message " grpc . testing . ReconnectInfo " do <nl> - optional : passed , : bool , 1 <nl> - repeated : backoff_ms , : int32 , 2 <nl> - end <nl> - add_enum " grpc . testing . PayloadType " do <nl> - value : COMPRESSABLE , 0 <nl> + add_file ( " src / proto / grpc / testing / messages . proto " , : syntax = > : proto3 ) do <nl> + add_message " grpc . testing . BoolValue " do <nl> + optional : value , : bool , 1 <nl> + end <nl> + add_message " grpc . testing . Payload " do <nl> + optional : type , : enum , 1 , " grpc . testing . PayloadType " <nl> + optional : body , : bytes , 2 <nl> + end <nl> + add_message " grpc . testing . EchoStatus " do <nl> + optional : code , : int32 , 1 <nl> + optional : message , : string , 2 <nl> + end <nl> + add_message " grpc . testing . SimpleRequest " do <nl> + optional : response_type , : enum , 1 , " grpc . testing . PayloadType " <nl> + optional : response_size , : int32 , 2 <nl> + optional : payload , : message , 3 , " grpc . testing . Payload " <nl> + optional : fill_username , : bool , 4 <nl> + optional : fill_oauth_scope , : bool , 5 <nl> + optional : response_compressed , : message , 6 , " grpc . testing . BoolValue " <nl> + optional : response_status , : message , 7 , " grpc . testing . EchoStatus " <nl> + optional : expect_compressed , : message , 8 , " grpc . testing . BoolValue " <nl> + end <nl> + add_message " grpc . testing . SimpleResponse " do <nl> + optional : payload , : message , 1 , " grpc . testing . Payload " <nl> + optional : username , : string , 2 <nl> + optional : oauth_scope , : string , 3 <nl> + end <nl> + add_message " grpc . testing . StreamingInputCallRequest " do <nl> + optional : payload , : message , 1 , " grpc . testing . Payload " <nl> + optional : expect_compressed , : message , 2 , " grpc . testing . BoolValue " <nl> + end <nl> + add_message " grpc . testing . StreamingInputCallResponse " do <nl> + optional : aggregated_payload_size , : int32 , 1 <nl> + end <nl> + add_message " grpc . testing . ResponseParameters " do <nl> + optional : size , : int32 , 1 <nl> + optional : interval_us , : int32 , 2 <nl> + optional : compressed , : message , 3 , " grpc . testing . BoolValue " <nl> + end <nl> + add_message " grpc . testing . StreamingOutputCallRequest " do <nl> + optional : response_type , : enum , 1 , " grpc . testing . PayloadType " <nl> + repeated : response_parameters , : message , 2 , " grpc . testing . ResponseParameters " <nl> + optional : payload , : message , 3 , " grpc . testing . Payload " <nl> + optional : response_status , : message , 7 , " grpc . testing . EchoStatus " <nl> + end <nl> + add_message " grpc . testing . StreamingOutputCallResponse " do <nl> + optional : payload , : message , 1 , " grpc . testing . Payload " <nl> + end <nl> + add_message " grpc . testing . ReconnectParams " do <nl> + optional : max_reconnect_backoff_ms , : int32 , 1 <nl> + end <nl> + add_message " grpc . testing . ReconnectInfo " do <nl> + optional : passed , : bool , 1 <nl> + repeated : backoff_ms , : int32 , 2 <nl> + end <nl> + add_enum " grpc . testing . PayloadType " do <nl> + value : COMPRESSABLE , 0 <nl> + end <nl> end <nl> end <nl> <nl> mmm a / src / ruby / qps / src / proto / grpc / testing / payloads_pb . rb <nl> ppp b / src / ruby / qps / src / proto / grpc / testing / payloads_pb . rb <nl> <nl> require ' google / protobuf ' <nl> <nl> Google : : Protobuf : : DescriptorPool . generated_pool . build do <nl> - add_message " grpc . testing . ByteBufferParams " do <nl> - optional : req_size , : int32 , 1 <nl> - optional : resp_size , : int32 , 2 <nl> - end <nl> - add_message " grpc . testing . SimpleProtoParams " do <nl> - optional : req_size , : int32 , 1 <nl> - optional : resp_size , : int32 , 2 <nl> - end <nl> - add_message " grpc . testing . ComplexProtoParams " do <nl> - end <nl> - add_message " grpc . testing . PayloadConfig " do <nl> - oneof : payload do <nl> - optional : bytebuf_params , : message , 1 , " grpc . testing . ByteBufferParams " <nl> - optional : simple_params , : message , 2 , " grpc . testing . SimpleProtoParams " <nl> - optional : complex_params , : message , 3 , " grpc . testing . ComplexProtoParams " <nl> + add_file ( " src / proto / grpc / testing / payloads . proto " , : syntax = > : proto3 ) do <nl> + add_message " grpc . testing . ByteBufferParams " do <nl> + optional : req_size , : int32 , 1 <nl> + optional : resp_size , : int32 , 2 <nl> + end <nl> + add_message " grpc . testing . SimpleProtoParams " do <nl> + optional : req_size , : int32 , 1 <nl> + optional : resp_size , : int32 , 2 <nl> + end <nl> + add_message " grpc . testing . ComplexProtoParams " do <nl> + end <nl> + add_message " grpc . testing . PayloadConfig " do <nl> + oneof : payload do <nl> + optional : bytebuf_params , : message , 1 , " grpc . testing . ByteBufferParams " <nl> + optional : simple_params , : message , 2 , " grpc . testing . SimpleProtoParams " <nl> + optional : complex_params , : message , 3 , " grpc . testing . ComplexProtoParams " <nl> + end <nl> end <nl> end <nl> end <nl> mmm a / src / ruby / qps / src / proto / grpc / testing / report_qps_scenario_service_pb . rb <nl> ppp b / src / ruby / qps / src / proto / grpc / testing / report_qps_scenario_service_pb . rb <nl> <nl> <nl> require ' src / proto / grpc / testing / control_pb ' <nl> Google : : Protobuf : : DescriptorPool . generated_pool . build do <nl> + add_file ( " src / proto / grpc / testing / report_qps_scenario_service . proto " , : syntax = > : proto3 ) do <nl> + end <nl> end <nl> <nl> module Grpc <nl> mmm a / src / ruby / qps / src / proto / grpc / testing / stats_pb . rb <nl> ppp b / src / ruby / qps / src / proto / grpc / testing / stats_pb . rb <nl> <nl> <nl> require ' src / proto / grpc / core / stats_pb ' <nl> Google : : Protobuf : : DescriptorPool . generated_pool . build do <nl> - add_message " grpc . testing . ServerStats " do <nl> - optional : time_elapsed , : double , 1 <nl> - optional : time_user , : double , 2 <nl> - optional : time_system , : double , 3 <nl> - optional : total_cpu_time , : uint64 , 4 <nl> - optional : idle_cpu_time , : uint64 , 5 <nl> - optional : cq_poll_count , : uint64 , 6 <nl> - optional : core_stats , : message , 7 , " grpc . core . Stats " <nl> - end <nl> - add_message " grpc . testing . HistogramParams " do <nl> - optional : resolution , : double , 1 <nl> - optional : max_possible , : double , 2 <nl> - end <nl> - add_message " grpc . testing . HistogramData " do <nl> - repeated : bucket , : uint32 , 1 <nl> - optional : min_seen , : double , 2 <nl> - optional : max_seen , : double , 3 <nl> - optional : sum , : double , 4 <nl> - optional : sum_of_squares , : double , 5 <nl> - optional : count , : double , 6 <nl> - end <nl> - add_message " grpc . testing . RequestResultCount " do <nl> - optional : status_code , : int32 , 1 <nl> - optional : count , : int64 , 2 <nl> - end <nl> - add_message " grpc . testing . ClientStats " do <nl> - optional : latencies , : message , 1 , " grpc . testing . HistogramData " <nl> - optional : time_elapsed , : double , 2 <nl> - optional : time_user , : double , 3 <nl> - optional : time_system , : double , 4 <nl> - repeated : request_results , : message , 5 , " grpc . testing . RequestResultCount " <nl> - optional : cq_poll_count , : uint64 , 6 <nl> - optional : core_stats , : message , 7 , " grpc . core . Stats " <nl> + add_file ( " src / proto / grpc / testing / stats . proto " , : syntax = > : proto3 ) do <nl> + add_message " grpc . testing . ServerStats " do <nl> + optional : time_elapsed , : double , 1 <nl> + optional : time_user , : double , 2 <nl> + optional : time_system , : double , 3 <nl> + optional : total_cpu_time , : uint64 , 4 <nl> + optional : idle_cpu_time , : uint64 , 5 <nl> + optional : cq_poll_count , : uint64 , 6 <nl> + optional : core_stats , : message , 7 , " grpc . core . Stats " <nl> + end <nl> + add_message " grpc . testing . HistogramParams " do <nl> + optional : resolution , : double , 1 <nl> + optional : max_possible , : double , 2 <nl> + end <nl> + add_message " grpc . testing . HistogramData " do <nl> + repeated : bucket , : uint32 , 1 <nl> + optional : min_seen , : double , 2 <nl> + optional : max_seen , : double , 3 <nl> + optional : sum , : double , 4 <nl> + optional : sum_of_squares , : double , 5 <nl> + optional : count , : double , 6 <nl> + end <nl> + add_message " grpc . testing . RequestResultCount " do <nl> + optional : status_code , : int32 , 1 <nl> + optional : count , : int64 , 2 <nl> + end <nl> + add_message " grpc . testing . ClientStats " do <nl> + optional : latencies , : message , 1 , " grpc . testing . HistogramData " <nl> + optional : time_elapsed , : double , 2 <nl> + optional : time_user , : double , 3 <nl> + optional : time_system , : double , 4 <nl> + repeated : request_results , : message , 5 , " grpc . testing . RequestResultCount " <nl> + optional : cq_poll_count , : uint64 , 6 <nl> + optional : core_stats , : message , 7 , " grpc . core . Stats " <nl> + end <nl> end <nl> end <nl> <nl> mmm a / src / ruby / qps / src / proto / grpc / testing / worker_service_pb . rb <nl> ppp b / src / ruby / qps / src / proto / grpc / testing / worker_service_pb . rb <nl> <nl> <nl> require ' src / proto / grpc / testing / control_pb ' <nl> Google : : Protobuf : : DescriptorPool . generated_pool . build do <nl> + add_file ( " src / proto / grpc / testing / worker_service . proto " , : syntax = > : proto3 ) do <nl> + end <nl> end <nl> <nl> module Grpc <nl>
|
regenerate ruby protos
|
grpc/grpc
|
b667c2f72ffecb1de672f5a7fb837737bf9ee405
|
2019-03-12T09:09:46Z
|
mmm a / include / swift / AST / Decl . h <nl> ppp b / include / swift / AST / Decl . h <nl> class ValueDecl : public Decl { <nl> / / / function requires capturing it . <nl> bool needsCapture ( ) const ; <nl> <nl> + / / / Retrieve the context discriminator for this local value , which <nl> + / / / is the index of this declaration in the sequence of <nl> + / / / discriminated declarations with the same name in the current <nl> + / / / context . Only local functions and variables with getters and <nl> + / / / setters have discriminators . <nl> + unsigned getLocalDiscriminator ( ) const ; <nl> + void setLocalDiscriminator ( unsigned index ) ; <nl> + <nl> / / / Retrieve the declaration that this declaration overrides , if any . <nl> ValueDecl * getOverriddenDecl ( ) const ; <nl> <nl> mmm a / include / swift / Parse / Parser . h <nl> ppp b / include / swift / Parse / Parser . h <nl> class Parser { <nl> unsigned VarPatternDepth = 0 ; <nl> bool GreaterThanIsOperator = true ; <nl> <nl> + typedef llvm : : DenseMap < Identifier , unsigned > LocalDiscriminatorMap ; <nl> + LocalDiscriminatorMap * LocalDiscriminators = nullptr ; <nl> + <nl> DelayedParsingCallbacks * DelayedParseCB = nullptr ; <nl> <nl> bool isDelayedParsingEnabled ( ) const { return DelayedParseCB ! = nullptr ; } <nl> class Parser { <nl> <nl> / / / A RAII object for temporarily changing CurDeclContext . <nl> class ContextChange { <nl> + protected : <nl> Parser & P ; <nl> DeclContext * OldContext ; <nl> + LocalDiscriminatorMap * OldDiscriminators ; <nl> public : <nl> - ContextChange ( Parser & P , DeclContext * DC ) <nl> - : P ( P ) , OldContext ( P . CurDeclContext ) { <nl> + ContextChange ( Parser & P , DeclContext * DC , <nl> + LocalDiscriminatorMap * discriminators = nullptr ) <nl> + : P ( P ) , OldContext ( P . CurDeclContext ) , <nl> + OldDiscriminators ( P . LocalDiscriminators ) { <nl> assert ( DC & & " pushing null context ? " ) ; <nl> P . CurDeclContext = DC ; <nl> + P . LocalDiscriminators = discriminators ; <nl> } <nl> <nl> / / / Prematurely pop the DeclContext installed by the constructor . <nl> / / / Makes the destructor a no - op . <nl> void pop ( ) { <nl> assert ( OldContext & & " already popped context ! " ) ; <nl> - P . CurDeclContext = OldContext ; <nl> + popImpl ( ) ; <nl> OldContext = nullptr ; <nl> } <nl> <nl> ~ ContextChange ( ) { <nl> - if ( OldContext ) P . CurDeclContext = OldContext ; <nl> + if ( OldContext ) popImpl ( ) ; <nl> + } <nl> + <nl> + private : <nl> + void popImpl ( ) { <nl> + P . CurDeclContext = OldContext ; <nl> + P . LocalDiscriminators = OldDiscriminators ; <nl> + } <nl> + } ; <nl> + <nl> + / / / A RAII object for parsing a new function / closure body . <nl> + class ParseFunctionBody { <nl> + LocalDiscriminatorMap LocalDiscriminators ; <nl> + ContextChange CC ; <nl> + public : <nl> + ParseFunctionBody ( Parser & P , DeclContext * DC ) <nl> + : CC ( P , DC , & LocalDiscriminators ) { } <nl> + <nl> + void pop ( ) { <nl> + CC . pop ( ) ; <nl> } <nl> } ; <nl> <nl> class Parser { <nl> bool isAssociatedType , <nl> DeclAttributes & Attributes ) ; <nl> <nl> + void setLocalDiscriminator ( ValueDecl * D ) ; <nl> + <nl> / / / \ brief Add the variables in the given pattern to the current scope , <nl> / / / collecting the variables in the vector \ c Decls and applying <nl> / / / \ c Attributes and \ c Static to each one . <nl> mmm a / lib / AST / ASTContext . cpp <nl> ppp b / lib / AST / ASTContext . cpp <nl> struct ASTContext : : Implementation { <nl> / / / they were imported . <nl> llvm : : DenseMap < swift : : Decl * , ClangNode > ClangNodes ; <nl> <nl> + / / / \ brief Map from local declarations to their discriminators . <nl> + / / / Missing entries implicitly have value 0 . <nl> + llvm : : DenseMap < const ValueDecl * , unsigned > LocalDiscriminators ; <nl> + <nl> / / / \ brief Structure that captures data that is segregated into different <nl> / / / arenas . <nl> struct Arena { <nl> void ASTContext : : setClangNode ( Decl * decl , ClangNode node ) { <nl> Impl . ClangNodes [ decl ] = node ; <nl> } <nl> <nl> + unsigned ValueDecl : : getLocalDiscriminator ( ) const { <nl> + assert ( getDeclContext ( ) - > isLocalContext ( ) ) ; <nl> + auto & discriminators = getASTContext ( ) . Impl . LocalDiscriminators ; <nl> + auto it = discriminators . find ( this ) ; <nl> + if ( it = = discriminators . end ( ) ) <nl> + return 0 ; <nl> + return it - > second ; <nl> + } <nl> + <nl> + void ValueDecl : : setLocalDiscriminator ( unsigned index ) { <nl> + assert ( getDeclContext ( ) - > isLocalContext ( ) ) ; <nl> + if ( ! index ) { <nl> + assert ( ! getASTContext ( ) . Impl . LocalDiscriminators . count ( this ) ) ; <nl> + return ; <nl> + } <nl> + getASTContext ( ) . Impl . LocalDiscriminators . insert ( { this , index } ) ; <nl> + } <nl> + <nl> void ASTContext : : recordConformance ( KnownProtocolKind protocolKind , Decl * decl ) { <nl> assert ( isa < NominalTypeDecl > ( decl ) | | isa < ExtensionDecl > ( decl ) ) ; <nl> auto index = static_cast < unsigned > ( protocolKind ) ; <nl> mmm a / lib / Parse / ParseDecl . cpp <nl> ppp b / lib / Parse / ParseDecl . cpp <nl> void Parser : : consumeDecl ( ParserPosition BeginParserPosition , unsigned Flags , <nl> } <nl> } <nl> <nl> + void Parser : : setLocalDiscriminator ( ValueDecl * D ) { <nl> + / / If we ' re not in a local context , this is unnecessary . <nl> + if ( ! LocalDiscriminators ) return ; <nl> + <nl> + Identifier name = D - > getName ( ) ; <nl> + assert ( ! name . empty ( ) & & <nl> + " setting a local discriminator on an anonymous decl ; " <nl> + " maybe the name hasn ' t been set yet ? " ) ; <nl> + unsigned discriminator = ( * LocalDiscriminators ) [ name ] + + ; <nl> + D - > setLocalDiscriminator ( discriminator ) ; <nl> + } <nl> + <nl> / / / \ brief Parse a single syntactic declaration and return a list of decl <nl> / / / ASTs . This can return multiple results for var decls that bind to multiple <nl> / / / values , structs that define a struct decl and a constructor , etc . <nl> bool Parser : : parseGetSet ( bool HasContainerType , Pattern * Indices , <nl> addFunctionParametersToScope ( Get - > getBodyParamPatterns ( ) , Get ) ; <nl> <nl> / / Establish the new context . <nl> - ContextChange CC ( * this , Get ) ; <nl> + ParseFunctionBody CC ( * this , Get ) ; <nl> <nl> SmallVector < ASTNode , 16 > Entries ; <nl> parseBraceItems ( Entries , BraceItemListKind : : Variable ) ; <nl> bool Parser : : parseGetSet ( bool HasContainerType , Pattern * Indices , <nl> addFunctionParametersToScope ( Set - > getBodyParamPatterns ( ) , Set ) ; <nl> <nl> / / Establish the new context . <nl> - ContextChange CC ( * this , Set ) ; <nl> + ParseFunctionBody CC ( * this , Set ) ; <nl> <nl> / / Parse the body . <nl> SmallVector < ASTNode , 16 > Entries ; <nl> void Parser : : parseDeclVarGetSet ( Pattern & pattern , bool HasContainerType , <nl> diagnose ( pattern . getLoc ( ) , diag : : getset_missing_type ) ; <nl> TyLoc = TypeLoc : : withoutLoc ( ErrorType : : get ( Context ) ) ; <nl> } <nl> + <nl> + setLocalDiscriminator ( PrimaryVar ) ; <nl> <nl> SourceLoc LBLoc = consumeToken ( tok : : l_brace ) ; <nl> <nl> Parser : : parseDeclFunc ( SourceLoc StaticLoc , unsigned Flags , <nl> <nl> addFunctionParametersToScope ( FD - > getBodyParamPatterns ( ) , FD ) ; <nl> setVarContext ( FD - > getArgParamPatterns ( ) , FD ) ; <nl> + setLocalDiscriminator ( FD ) ; <nl> <nl> / / Now that we have a context , update the generic parameters with that <nl> / / context . <nl> Parser : : parseDeclFunc ( SourceLoc StaticLoc , unsigned Flags , <nl> } <nl> <nl> / / Establish the new context . <nl> - ContextChange CC ( * this , FD ) ; <nl> + ParseFunctionBody CC ( * this , FD ) ; <nl> <nl> / / Check to see if we have a " { " to start a brace statement . <nl> if ( Tok . is ( tok : : l_brace ) ) { <nl> bool Parser : : parseAbstractFunctionBodyDelayed ( AbstractFunctionDecl * AFD ) { <nl> <nl> / / Re - enter the lexical scope . <nl> Scope S ( this , FunctionParserState - > takeScope ( ) ) ; <nl> - ContextChange CC ( * this , AFD ) ; <nl> + ParseFunctionBody CC ( * this , AFD ) ; <nl> <nl> ParserResult < BraceStmt > Body = <nl> parseBraceItemList ( diag : : func_decl_without_brace ) ; <nl> ParserResult < EnumDecl > Parser : : parseDeclEnum ( unsigned Flags , <nl> <nl> EnumDecl * UD = new ( Context ) EnumDecl ( EnumLoc , EnumName , EnumNameLoc , <nl> { } , GenericParams , CurDeclContext ) ; <nl> + setLocalDiscriminator ( UD ) ; <nl> <nl> if ( Attributes . isValid ( ) ) <nl> UD - > getMutableAttrs ( ) = Attributes ; <nl> ParserResult < StructDecl > Parser : : parseDeclStruct ( unsigned Flags , <nl> { } , <nl> GenericParams , <nl> CurDeclContext ) ; <nl> + setLocalDiscriminator ( SD ) ; <nl> <nl> if ( Attributes . isValid ( ) ) <nl> SD - > getMutableAttrs ( ) = Attributes ; <nl> ParserResult < ClassDecl > Parser : : parseDeclClass ( unsigned Flags , <nl> / / Create the class . <nl> ClassDecl * CD = new ( Context ) ClassDecl ( ClassLoc , ClassName , ClassNameLoc , <nl> { } , GenericParams , CurDeclContext ) ; <nl> + setLocalDiscriminator ( CD ) ; <nl> <nl> / / Attach attributes . <nl> if ( Attributes . isValid ( ) ) <nl> parseDeclProtocol ( unsigned Flags , DeclAttributes & Attributes ) { <nl> = new ( Context ) ProtocolDecl ( CurDeclContext , ProtocolLoc , NameLoc , <nl> ProtocolName , <nl> Context . AllocateCopy ( InheritedProtocols ) ) ; <nl> + / / No need to setLocalDiscriminator : protocols can ' t appear in local contexts . <nl> <nl> if ( Attributes . isValid ( ) ) <nl> Proto - > getMutableAttrs ( ) = Attributes ; <nl> ParserStatus Parser : : parseDeclSubscript ( bool HasContainerType , <nl> SubscriptLoc , Indices . get ( ) , ArrowLoc , <nl> ElementTy . get ( ) , DefRange , <nl> Get , Set , CurDeclContext ) ; <nl> + / / No need to setLocalDiscriminator because subscripts cannot <nl> + / / validly appear outside of type decls . <nl> + <nl> if ( Attributes . isValid ( ) ) <nl> Subscript - > getMutableAttrs ( ) = Attributes ; <nl> <nl> Parser : : parseDeclConstructor ( unsigned Flags , DeclAttributes & Attributes ) { <nl> new ( Context ) ConstructorDecl ( Context . getIdentifier ( " init " ) , <nl> ConstructorLoc , ArgPattern , BodyPattern , <nl> SelfDecl , GenericParams , CurDeclContext ) ; <nl> + / / No need to setLocalDiscriminator . <nl> <nl> if ( HasSelectorStyleSignature ) <nl> CD - > setHasSelectorStyleSignature ( ) ; <nl> Parser : : parseDeclConstructor ( unsigned Flags , DeclAttributes & Attributes ) { <nl> } <nl> } else { <nl> / / Parse the body . <nl> - ContextChange CC ( * this , CD ) ; <nl> + ParseFunctionBody CC ( * this , CD ) ; <nl> <nl> if ( ! isDelayedParsingEnabled ( ) ) { <nl> ParserResult < BraceStmt > Body = parseBraceItemList ( diag : : invalid_diagnostic ) ; <nl> parseDeclDestructor ( unsigned Flags , DeclAttributes & Attributes ) { <nl> DestructorDecl * DD <nl> = new ( Context ) DestructorDecl ( Context . getIdentifier ( " destructor " ) , <nl> DestructorLoc , SelfDecl , CurDeclContext ) ; <nl> + / / No need to setLocalDiscriminator . <nl> <nl> SelfDecl - > setDeclContext ( DD ) ; <nl> addToScope ( SelfDecl ) ; <nl> <nl> / / Parse the body . <nl> if ( Tok . is ( tok : : l_brace ) ) { <nl> - ContextChange CC ( * this , DD ) ; <nl> + ParseFunctionBody CC ( * this , DD ) ; <nl> if ( ! isDelayedParsingEnabled ( ) ) { <nl> ParserResult < BraceStmt > Body = parseBraceItemList ( diag : : invalid_diagnostic ) ; <nl> <nl>
|
Track a discriminator for named declarations ( except physical
|
apple/swift
|
892a90033283dd9b3a48173803d6683b7a6143dd
|
2013-12-05T01:42:09Z
|
mmm a / include / swift / AST / DiagnosticsSema . def <nl> ppp b / include / swift / AST / DiagnosticsSema . def <nl> ERROR ( not_objc_function_async , none , <nl> " ' async ' function cannot be represented in Objective - C " , ( ) ) <nl> NOTE ( not_objc_function_type_async , none , <nl> " ' async ' function types cannot be represented in Objective - C " , ( ) ) <nl> + ERROR ( actor_isolated_objc , none , <nl> + " actor - isolated % 0 % 1 cannot be @ objc " , <nl> + ( DescriptiveDeclKind , DeclName ) ) <nl> NOTE ( protocol_witness_async_conflict , none , <nl> " candidate is % select { not | } 0 ' async ' , but protocol requirement is % select { | not } 0 " , <nl> ( bool ) ) <nl> mmm a / lib / Sema / TypeCheckDeclObjC . cpp <nl> ppp b / lib / Sema / TypeCheckDeclObjC . cpp <nl> <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> # include " TypeCheckObjC . h " <nl> # include " TypeChecker . h " <nl> + # include " TypeCheckConcurrency . h " <nl> # include " TypeCheckProtocol . h " <nl> # include " swift / AST / ASTContext . h " <nl> # include " swift / AST / Decl . h " <nl> static bool checkObjCInForeignClassContext ( const ValueDecl * VD , <nl> return true ; <nl> } <nl> <nl> + / / / Actor - isolated declarations cannot be @ objc . <nl> + static bool checkObjCActorIsolation ( const ValueDecl * VD , <nl> + ObjCReason Reason ) { <nl> + / / Check actor isolation . <nl> + bool Diagnose = shouldDiagnoseObjCReason ( Reason , VD - > getASTContext ( ) ) ; <nl> + <nl> + switch ( getActorIsolation ( const_cast < ValueDecl * > ( VD ) ) ) { <nl> + case ActorIsolation : : ActorInstance : <nl> + / / Actor - isolated functions cannot be @ objc . <nl> + if ( Diagnose ) { <nl> + VD - > diagnose ( <nl> + diag : : actor_isolated_objc , VD - > getDescriptiveKind ( ) , VD - > getName ( ) ) ; <nl> + describeObjCReason ( VD , Reason ) ; <nl> + if ( auto FD = dyn_cast < FuncDecl > ( VD ) ) { <nl> + addAsyncNotes ( const_cast < FuncDecl * > ( FD ) ) ; <nl> + } <nl> + } <nl> + return true ; <nl> + <nl> + case ActorIsolation : : ActorPrivileged : <nl> + case ActorIsolation : : Independent : <nl> + case ActorIsolation : : Unspecified : <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> static VersionRange getMinOSVersionForClassStubs ( const llvm : : Triple & target ) { <nl> if ( target . isMacOSX ( ) ) <nl> return VersionRange : : allGTE ( llvm : : VersionTuple ( 10 , 15 , 0 ) ) ; <nl> bool swift : : isRepresentableInObjC ( <nl> return false ; <nl> if ( checkObjCInExtensionContext ( AFD , Diagnose ) ) <nl> return false ; <nl> + if ( checkObjCActorIsolation ( AFD , Reason ) ) <nl> + return false ; <nl> <nl> if ( AFD - > isOperator ( ) ) { <nl> AFD - > diagnose ( ( isa < ProtocolDecl > ( AFD - > getDeclContext ( ) ) <nl> bool swift : : isRepresentableInObjC ( <nl> Type resultType = FD - > mapTypeIntoContext ( FD - > getResultInterfaceType ( ) ) ; <nl> if ( auto tupleType = resultType - > getAs < TupleType > ( ) ) { <nl> for ( const auto & tupleElt : tupleType - > getElements ( ) ) { <nl> - addCompletionHandlerParam ( tupleElt . getType ( ) ) ; <nl> + if ( addCompletionHandlerParam ( tupleElt . getType ( ) ) ) <nl> + return false ; <nl> } <nl> } else { <nl> - addCompletionHandlerParam ( resultType ) ; <nl> + if ( addCompletionHandlerParam ( resultType ) ) <nl> + return false ; <nl> } <nl> <nl> / / For a throwing asynchronous function , an Error ? parameter is added <nl> bool swift : : isRepresentableInObjC ( const VarDecl * VD , ObjCReason Reason ) { <nl> <nl> if ( checkObjCInForeignClassContext ( VD , Reason ) ) <nl> return false ; <nl> + if ( checkObjCActorIsolation ( VD , Reason ) ) <nl> + return false ; <nl> <nl> if ( ! Diagnose | | Result ) <nl> return Result ; <nl> bool swift : : isRepresentableInObjC ( const SubscriptDecl * SD , ObjCReason Reason ) { <nl> return false ; <nl> if ( checkObjCWithGenericParams ( SD , Reason ) ) <nl> return false ; <nl> + if ( checkObjCActorIsolation ( SD , Reason ) ) <nl> + return false ; <nl> <nl> / / ObjC doesn ' t support class subscripts . <nl> if ( ! SD - > isInstanceMember ( ) ) { <nl> mmm a / test / attr / attr_objc_async . swift <nl> ppp b / test / attr / attr_objc_async . swift <nl> <nl> / / RUN : not % target - swift - frontend - typecheck - dump - ast - disable - objc - attr - requires - foundation - module % s - swift - version 5 - enable - source - import - I % S / Inputs - enable - experimental - concurrency > % t . ast <nl> / / RUN : % FileCheck - check - prefix CHECK - DUMP % s < % t . ast <nl> / / REQUIRES : objc_interop <nl> + / / REQUIRES : concurrency <nl> import Foundation <nl> <nl> - / / CHECK : class Concurrency <nl> - class Concurrency { <nl> + / / CHECK : class MyClass <nl> + class MyClass { <nl> / / CHECK : @ objc func doBigJob ( ) async - > Int <nl> / / CHECK - DUMP : func_decl { { . * } } doBigJob { { . * } } foreign_async = @ convention ( block ) ( Int ) - > ( ) , completion_handler_param = 0 <nl> @ objc func doBigJob ( ) async - > Int { return 0 } <nl> class Concurrency { <nl> @ objc class func createAsynchronously ( ) async - > Self ? { nil } <nl> / / expected - error @ - 1 { { asynchronous method returning ' Self ' cannot be ' @ objc ' } } <nl> } <nl> + <nl> + / / Actor class exporting Objective - C entry points . <nl> + <nl> + / / CHECK : class MyActor <nl> + actor class MyActor { <nl> + / / CHECK : @ objc func doBigJob ( ) async - > Int <nl> + / / CHECK - DUMP : func_decl { { . * } } doBigJob { { . * } } foreign_async = @ convention ( block ) ( Int ) - > ( ) , completion_handler_param = 0 <nl> + @ objc func doBigJob ( ) async - > Int { return 0 } <nl> + <nl> + / / CHECK : @ objc func doBigJobOrFail ( _ : Int ) async throws - > ( AnyObject , Int ) <nl> + / / CHECK - DUMP : func_decl { { . * } } doBigJobOrFail { { . * } } foreign_async = @ convention ( block ) ( Optional < AnyObject > , Int , Optional < Error > ) - > ( ) , completion_handler_param = 1 , error_param = 2 <nl> + @ objc func doBigJobOrFail ( _ : Int ) async throws - > ( AnyObject , Int ) { return ( self , 0 ) } <nl> + <nl> + / / Actor - isolated entities cannot be exposed to Objective - C . <nl> + @ objc func synchronousBad ( ) { } / / expected - error { { actor - isolated instance method ' synchronousBad ( ) ' cannot be @ objc } } <nl> + / / expected - note @ - 1 { { add ' async ' to function ' synchronousBad ( ) ' to make it asynchronous } } <nl> + / / expected - note @ - 2 { { add ' @ asyncHandler ' to function ' synchronousBad ( ) ' to create an implicit asynchronous context } } <nl> + <nl> + @ objc var badProp : AnyObject { self } / / expected - error { { actor - isolated property ' badProp ' cannot be @ objc } } <nl> + @ objc subscript ( index : Int ) - > AnyObject { self } / / expected - error { { actor - isolated subscript ' subscript ( _ : ) ' cannot be @ objc } } <nl> + <nl> + / / CHECK : @ objc @ actorIndependent func synchronousGood ( ) <nl> + @ objc @ actorIndependent func synchronousGood ( ) { } <nl> + } <nl>
|
[ Concurrency ] Ban actor - isolated operations from being @ objc .
|
apple/swift
|
e5c1491c6a77a21c9a6c2465de782d4777e31903
|
2020-09-30T07:02:17Z
|
mmm a / rust / flatbuffers / src / endian_scalar . rs <nl> ppp b / rust / flatbuffers / src / endian_scalar . rs <nl> impl EndianScalar for f32 { <nl> } <nl> # [ cfg ( not ( target_endian = " little " ) ) ] <nl> { <nl> - byte_swap_f32 ( & self ) <nl> + byte_swap_f32 ( self ) <nl> } <nl> } <nl> / / / Convert f32 from little - endian to host endian - ness . <nl> impl EndianScalar for f32 { <nl> } <nl> # [ cfg ( not ( target_endian = " little " ) ) ] <nl> { <nl> - byte_swap_f32 ( & self ) <nl> + byte_swap_f32 ( self ) <nl> } <nl> } <nl> } <nl> impl EndianScalar for f64 { <nl> } <nl> # [ cfg ( not ( target_endian = " little " ) ) ] <nl> { <nl> - byte_swap_f64 ( & self ) <nl> + byte_swap_f64 ( self ) <nl> } <nl> } <nl> / / / Convert f64 from little - endian to host endian - ness . <nl> impl EndianScalar for f64 { <nl> } <nl> # [ cfg ( not ( target_endian = " little " ) ) ] <nl> { <nl> - byte_swap_f64 ( & self ) <nl> + byte_swap_f64 ( self ) <nl> } <nl> } <nl> } <nl> mmm a / rust / flatbuffers / src / vector . rs <nl> ppp b / rust / flatbuffers / src / vector . rs <nl> use std : : mem : : size_of ; <nl> use std : : slice : : from_raw_parts ; <nl> use std : : str : : from_utf8_unchecked ; <nl> <nl> - use endian_scalar : : { EndianScalar , read_scalar } ; <nl> + # [ cfg ( target_endian = " little " ) ] <nl> + use endian_scalar : : EndianScalar ; <nl> + use endian_scalar : : read_scalar ; <nl> use follow : : Follow ; <nl> use primitives : : * ; <nl> <nl> mod le_safe_slice_impls { <nl> impl super : : SafeSliceAccess for f64 { } <nl> } <nl> <nl> + # [ cfg ( target_endian = " little " ) ] <nl> pub use self : : le_safe_slice_impls : : * ; <nl> <nl> pub fn follow_cast_ref < ' a , T : Sized + ' a > ( buf : & ' a [ u8 ] , loc : usize ) - > & ' a T { <nl> impl < ' a > Follow < ' a > for & ' a str { <nl> } <nl> } <nl> <nl> + # [ cfg ( target_endian = " little " ) ] <nl> fn follow_slice_helper < T > ( buf : & [ u8 ] , loc : usize ) - > & [ T ] { <nl> let sz = size_of : : < T > ( ) ; <nl> debug_assert ! ( sz > 0 ) ; <nl> mmm a / tests / RustTest . sh <nl> ppp b / tests / RustTest . sh <nl> set - e <nl> # See the License for the specific language governing permissions and <nl> # limitations under the License . <nl> <nl> + if [ [ " $ 1 " = = " mips - unknown - linux - gnu " ] ] ; then <nl> + TARGET_FLAG = " - - target mips - unknown - linux - gnu " <nl> + export CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_LINKER = mips - linux - gnu - gcc <nl> + export CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_RUNNER = " qemu - mips - L / usr / mips - linux - gnu " <nl> + fi <nl> + <nl> cd . / rust_usage_test <nl> - cargo test - - - - quiet <nl> + cargo test $ TARGET_FLAG - - - - quiet <nl> TEST_RESULT = $ ? <nl> if [ [ $ TEST_RESULT = = 0 ] ] ; then <nl> echo " OK : Rust tests passed . " <nl> else <nl> exit 1 <nl> fi <nl> <nl> - cargo run - - bin = alloc_check <nl> + cargo run $ TARGET_FLAG - - bin = alloc_check <nl> TEST_RESULT = $ ? <nl> if [ [ $ TEST_RESULT = = 0 ] ] ; then <nl> echo " OK : Rust heap alloc test passed . " <nl> else <nl> exit 1 <nl> fi <nl> <nl> - cargo bench <nl> + cargo bench $ TARGET_FLAG <nl> new file mode 100644 <nl> index 0000000000 . . f2e93f4e08 <nl> mmm / dev / null <nl> ppp b / tests / docker / languages / Dockerfile . testing . rust . big_endian . 1_30_1 <nl> <nl> + FROM rust : 1 . 30 . 1 - slim - stretch as base <nl> + RUN apt - qq update - y & & apt - qq install - y \ <nl> + gcc - mips - linux - gnu \ <nl> + libexpat1 \ <nl> + libmagic1 \ <nl> + libmpdec2 \ <nl> + libreadline7 \ <nl> + qemu - user <nl> + RUN rustup target add mips - unknown - linux - gnu <nl> + WORKDIR / code <nl> + ADD . . <nl> + RUN cp flatc_debian_stretch flatc <nl> + WORKDIR / code / tests <nl> + RUN rustc - - version <nl> + RUN . / RustTest . sh mips - unknown - linux - gnu <nl> mmm a / tests / rust_usage_test / tests / integration_test . rs <nl> ppp b / tests / rust_usage_test / tests / integration_test . rs <nl> mod roundtrip_vectors { <nl> <nl> const N : u64 = 20 ; <nl> <nl> - fn prop < T : PartialEq + : : std : : fmt : : Debug + Copy + flatbuffers : : EndianScalar + flatbuffers : : Push > ( xs : Vec < T > ) { <nl> + fn prop < T > ( xs : Vec < T > ) <nl> + where <nl> + T : for < ' a > flatbuffers : : Follow < ' a , Inner = T > <nl> + + flatbuffers : : EndianScalar <nl> + + flatbuffers : : Push <nl> + + : : std : : fmt : : Debug , <nl> + { <nl> use flatbuffers : : Follow ; <nl> <nl> let mut b = flatbuffers : : FlatBufferBuilder : : new ( ) ; <nl> mod roundtrip_vectors { <nl> <nl> let buf = b . finished_data ( ) ; <nl> <nl> - let got = < flatbuffers : : ForwardsUOffset < & [ T ] > > : : follow ( buf , 0 ) ; <nl> - assert_eq ! ( got , & xs [ . . ] ) ; <nl> + let got = < flatbuffers : : ForwardsUOffset < flatbuffers : : Vector < T > > > : : follow ( & buf [ . . ] , 0 ) ; <nl> + let mut result_vec : Vec < T > = Vec : : with_capacity ( got . len ( ) ) ; <nl> + for i in 0 . . got . len ( ) { <nl> + result_vec . push ( got . get ( i ) ) ; <nl> + } <nl> + assert_eq ! ( result_vec , xs ) ; <nl> } <nl> <nl> # [ test ] <nl> mod roundtrip_table { <nl> let tab = < flatbuffers : : ForwardsUOffset < flatbuffers : : Table > > : : follow ( buf , 0 ) ; <nl> <nl> for i in 0 . . xs . len ( ) { <nl> - let v = tab . get : : < flatbuffers : : ForwardsUOffset < & [ u8 ] > > ( fi2fo ( i as flatbuffers : : VOffsetT ) , None ) ; <nl> - assert_eq ! ( v , Some ( & xs [ i ] [ . . ] ) ) ; <nl> + let v = tab . get : : < flatbuffers : : ForwardsUOffset < flatbuffers : : Vector < u8 > > > ( fi2fo ( i as flatbuffers : : VOffsetT ) , None ) ; <nl> + assert ! ( v . is_some ( ) ) ; <nl> + let v2 = v . unwrap ( ) . safe_slice ( ) ; <nl> + assert_eq ! ( v2 , & xs [ i ] [ . . ] ) ; <nl> } <nl> } <nl> prop ( vec ! [ vec ! [ 1 , 2 , 3 ] ] ) ; <nl> mod roundtrip_table { <nl> <nl> const N : u64 = 20 ; <nl> <nl> - fn prop < ' a , T : flatbuffers : : Follow < ' a > + ' a + flatbuffers : : EndianScalar + flatbuffers : : Push + : : std : : fmt : : Debug > ( vecs : Vec < Vec < T > > ) { <nl> + fn prop < T > ( vecs : Vec < Vec < T > > ) <nl> + where <nl> + T : for < ' a > flatbuffers : : Follow < ' a , Inner = T > <nl> + + flatbuffers : : EndianScalar <nl> + + flatbuffers : : Push <nl> + + : : std : : fmt : : Debug , <nl> + { <nl> use flatbuffers : : field_index_to_field_offset as fi2fo ; <nl> use flatbuffers : : Follow ; <nl> <nl> mod roundtrip_table { <nl> let tab = < flatbuffers : : ForwardsUOffset < flatbuffers : : Table > > : : follow ( buf , 0 ) ; <nl> <nl> for i in 0 . . vecs . len ( ) { <nl> - let got = tab . get : : < flatbuffers : : ForwardsUOffset < & [ T ] > > ( fi2fo ( i as flatbuffers : : VOffsetT ) , None ) ; <nl> + let got = tab . get : : < flatbuffers : : ForwardsUOffset < flatbuffers : : Vector < T > > > ( fi2fo ( i as flatbuffers : : VOffsetT ) , None ) ; <nl> assert ! ( got . is_some ( ) ) ; <nl> let got2 = got . unwrap ( ) ; <nl> - assert_eq ! ( & vecs [ i ] [ . . ] , got2 ) ; <nl> + let mut got3 : Vec < T > = Vec : : with_capacity ( got2 . len ( ) ) ; <nl> + for i in 0 . . got2 . len ( ) { <nl> + got3 . push ( got2 . get ( i ) ) ; <nl> + } <nl> + assert_eq ! ( vecs [ i ] , got3 ) ; <nl> } <nl> } <nl> <nl> mod follow_impls { <nl> use flatbuffers : : Follow ; <nl> use flatbuffers : : field_index_to_field_offset as fi2fo ; <nl> <nl> + / / Define a test struct to use in a few tests . This replicates the work that the code generator <nl> + / / would normally do when defining a FlatBuffer struct . For reference , compare the following <nl> + / / ` FooStruct ` code with the code generated for the ` Vec3 ` struct in <nl> + / / ` . . / . . / monster_test_generated . rs ` . <nl> + use flatbuffers : : EndianScalar ; <nl> + # [ derive ( Copy , Clone , Debug , PartialEq ) ] <nl> + # [ repr ( C , packed ) ] <nl> + struct FooStruct { <nl> + a : i8 , <nl> + b : u8 , <nl> + c : i16 , <nl> + } <nl> + impl FooStruct { <nl> + fn new ( _a : i8 , _b : u8 , _c : i16 ) - > Self { <nl> + FooStruct { <nl> + a : _a . to_little_endian ( ) , <nl> + b : _b . to_little_endian ( ) , <nl> + c : _c . to_little_endian ( ) , <nl> + } <nl> + } <nl> + } <nl> + impl flatbuffers : : SafeSliceAccess for FooStruct { } <nl> + impl < ' a > flatbuffers : : Follow < ' a > for FooStruct { <nl> + type Inner = & ' a FooStruct ; <nl> + # [ inline ( always ) ] <nl> + fn follow ( buf : & ' a [ u8 ] , loc : usize ) - > Self : : Inner { <nl> + < & ' a FooStruct > : : follow ( buf , loc ) <nl> + } <nl> + } <nl> + impl < ' a > flatbuffers : : Follow < ' a > for & ' a FooStruct { <nl> + type Inner = & ' a FooStruct ; <nl> + # [ inline ( always ) ] <nl> + fn follow ( buf : & ' a [ u8 ] , loc : usize ) - > Self : : Inner { <nl> + flatbuffers : : follow_cast_ref : : < FooStruct > ( buf , loc ) <nl> + } <nl> + } <nl> + <nl> # [ test ] <nl> fn to_u8 ( ) { <nl> let vec : Vec < u8 > = vec ! [ 255 , 3 ] ; <nl> mod follow_impls { <nl> # [ test ] <nl> fn to_byte_slice ( ) { <nl> let vec : Vec < u8 > = vec ! [ 255 , 255 , 255 , 255 , 4 , 0 , 0 , 0 , 1 , 2 , 3 , 4 ] ; <nl> - let off : flatbuffers : : FollowStart < & [ u8 ] > = flatbuffers : : FollowStart : : new ( ) ; <nl> - assert_eq ! ( off . self_follow ( & vec [ . . ] , 4 ) , & [ 1 , 2 , 3 , 4 ] [ . . ] ) ; <nl> + let off : flatbuffers : : FollowStart < flatbuffers : : Vector < u8 > > = flatbuffers : : FollowStart : : new ( ) ; <nl> + assert_eq ! ( off . self_follow ( & vec [ . . ] , 4 ) . safe_slice ( ) , & [ 1 , 2 , 3 , 4 ] [ . . ] ) ; <nl> } <nl> <nl> # [ test ] <nl> mod follow_impls { <nl> # [ test ] <nl> fn to_byte_string_zero_teriminated ( ) { <nl> let vec : Vec < u8 > = vec ! [ 255 , 255 , 255 , 255 , 3 , 0 , 0 , 0 , 1 , 2 , 3 , 0 ] ; <nl> - let off : flatbuffers : : FollowStart < & [ u8 ] > = flatbuffers : : FollowStart : : new ( ) ; <nl> - assert_eq ! ( off . self_follow ( & vec [ . . ] , 4 ) , & [ 1 , 2 , 3 ] [ . . ] ) ; <nl> + let off : flatbuffers : : FollowStart < flatbuffers : : Vector < u8 > > = flatbuffers : : FollowStart : : new ( ) ; <nl> + assert_eq ! ( off . self_follow ( & vec [ . . ] , 4 ) . safe_slice ( ) , & [ 1 , 2 , 3 ] [ . . ] ) ; <nl> } <nl> <nl> # [ cfg ( target_endian = " little " ) ] <nl> mod follow_impls { <nl> <nl> # [ test ] <nl> fn to_struct ( ) { <nl> - # [ derive ( Copy , Clone , Debug , PartialEq ) ] <nl> - # [ repr ( C , packed ) ] <nl> - struct FooStruct { <nl> - a : i8 , <nl> - b : u8 , <nl> - c : i16 , <nl> - } <nl> - impl < ' a > flatbuffers : : Follow < ' a > for & ' a FooStruct { <nl> - type Inner = & ' a FooStruct ; <nl> - # [ inline ( always ) ] <nl> - fn follow ( buf : & ' a [ u8 ] , loc : usize ) - > Self : : Inner { <nl> - flatbuffers : : follow_cast_ref : : < FooStruct > ( buf , loc ) <nl> - } <nl> - } <nl> - <nl> let vec : Vec < u8 > = vec ! [ 255 , 255 , 255 , 255 , 1 , 2 , 3 , 4 ] ; <nl> let off : flatbuffers : : FollowStart < & FooStruct > = flatbuffers : : FollowStart : : new ( ) ; <nl> - assert_eq ! ( * off . self_follow ( & vec [ . . ] , 4 ) , FooStruct { a : 1 , b : 2 , c : 1027 } ) ; <nl> + assert_eq ! ( * off . self_follow ( & vec [ . . ] , 4 ) , FooStruct : : new ( 1 , 2 , 1027 ) ) ; <nl> } <nl> <nl> # [ test ] <nl> mod follow_impls { <nl> <nl> # [ test ] <nl> fn to_slice_of_struct_elements ( ) { <nl> - # [ derive ( Copy , Clone , Debug , PartialEq ) ] <nl> - # [ repr ( C , packed ) ] <nl> - struct FooStruct { <nl> - a : i8 , <nl> - b : u8 , <nl> - c : i16 , <nl> - } <nl> - impl flatbuffers : : SafeSliceAccess for FooStruct { } <nl> - impl < ' a > flatbuffers : : Follow < ' a > for FooStruct { <nl> - type Inner = & ' a FooStruct ; <nl> - # [ inline ( always ) ] <nl> - fn follow ( buf : & ' a [ u8 ] , loc : usize ) - > Self : : Inner { <nl> - flatbuffers : : follow_cast_ref : : < FooStruct > ( buf , loc ) <nl> - } <nl> - } <nl> - <nl> let buf : Vec < u8 > = vec ! [ 1 , 0 , 0 , 0 , / * struct data * / 1 , 2 , 3 , 4 ] ; <nl> let fs : flatbuffers : : FollowStart < flatbuffers : : Vector < FooStruct > > = flatbuffers : : FollowStart : : new ( ) ; <nl> - assert_eq ! ( fs . self_follow ( & buf [ . . ] , 0 ) . safe_slice ( ) , & vec ! [ FooStruct { a : 1 , b : 2 , c : 1027 } ] [ . . ] ) ; <nl> + assert_eq ! ( fs . self_follow ( & buf [ . . ] , 0 ) . safe_slice ( ) , & vec ! [ FooStruct : : new ( 1 , 2 , 1027 ) ] [ . . ] ) ; <nl> } <nl> <nl> # [ test ] <nl> fn to_vector_of_struct_elements ( ) { <nl> - # [ derive ( Copy , Clone , Debug , PartialEq ) ] <nl> - # [ repr ( C , packed ) ] <nl> - struct FooStruct { <nl> - a : i8 , <nl> - b : u8 , <nl> - c : i16 , <nl> - } <nl> - impl < ' a > flatbuffers : : Follow < ' a > for FooStruct { <nl> - type Inner = & ' a FooStruct ; <nl> - # [ inline ( always ) ] <nl> - fn follow ( buf : & ' a [ u8 ] , loc : usize ) - > Self : : Inner { <nl> - flatbuffers : : follow_cast_ref : : < FooStruct > ( buf , loc ) <nl> - } <nl> - } <nl> - <nl> let buf : Vec < u8 > = vec ! [ 1 , 0 , 0 , 0 , / * struct data * / 1 , 2 , 3 , 4 ] ; <nl> let fs : flatbuffers : : FollowStart < flatbuffers : : Vector < FooStruct > > = flatbuffers : : FollowStart : : new ( ) ; <nl> assert_eq ! ( fs . self_follow ( & buf [ . . ] , 0 ) . len ( ) , 1 ) ; <nl> - assert_eq ! ( fs . self_follow ( & buf [ . . ] , 0 ) . get ( 0 ) , & FooStruct { a : 1 , b : 2 , c : 1027 } ) ; <nl> + assert_eq ! ( fs . self_follow ( & buf [ . . ] , 0 ) . get ( 0 ) , & FooStruct : : new ( 1 , 2 , 1027 ) ) ; <nl> } <nl> <nl> # [ test ] <nl> mod follow_impls { <nl> ] ; <nl> let tab = < flatbuffers : : ForwardsUOffset < flatbuffers : : Table > > : : follow ( & buf [ . . ] , 0 ) ; <nl> assert_eq ! ( tab . get : : < flatbuffers : : ForwardsUOffset < & str > > ( fi2fo ( 0 ) , None ) , Some ( " moo " ) ) ; <nl> - assert_eq ! ( tab . get : : < flatbuffers : : ForwardsUOffset < & [ u8 ] > > ( fi2fo ( 0 ) , None ) , Some ( & vec ! [ 109 , 111 , 111 ] [ . . ] ) ) ; <nl> + let byte_vec = tab . get : : < flatbuffers : : ForwardsUOffset < flatbuffers : : Vector < u8 > > > ( fi2fo ( 0 ) , None ) . unwrap ( ) . safe_slice ( ) ; <nl> + assert_eq ! ( byte_vec , & vec ! [ 109 , 111 , 111 ] [ . . ] ) ; <nl> let v = tab . get : : < flatbuffers : : ForwardsUOffset < flatbuffers : : Vector < u8 > > > ( fi2fo ( 0 ) , None ) . unwrap ( ) ; <nl> assert_eq ! ( v . len ( ) , 3 ) ; <nl> assert_eq ! ( v . get ( 0 ) , 109 ) ; <nl> mod follow_impls { <nl> ] ; <nl> let tab = < flatbuffers : : ForwardsUOffset < flatbuffers : : Table > > : : follow ( & buf [ . . ] , 0 ) ; <nl> assert_eq ! ( tab . get : : < flatbuffers : : ForwardsUOffset < & str > > ( fi2fo ( 0 ) , Some ( " abc " ) ) , Some ( " abc " ) ) ; <nl> - assert_eq ! ( tab . get : : < flatbuffers : : ForwardsUOffset < & [ u8 ] > > ( fi2fo ( 0 ) , Some ( & vec ! [ 70 , 71 , 72 ] [ . . ] ) ) , Some ( & vec ! [ 70 , 71 , 72 ] [ . . ] ) ) ; <nl> + # [ cfg ( target_endian = " little " ) ] <nl> + { <nl> + assert_eq ! ( tab . get : : < flatbuffers : : ForwardsUOffset < & [ u8 ] > > ( fi2fo ( 0 ) , Some ( & vec ! [ 70 , 71 , 72 ] [ . . ] ) ) , Some ( & vec ! [ 70 , 71 , 72 ] [ . . ] ) ) ; <nl> + } <nl> <nl> let default_vec_buf : Vec < u8 > = vec ! [ 3 , 0 , 0 , 0 , 70 , 71 , 72 , 0 ] ; <nl> let default_vec = flatbuffers : : Vector : : new ( & default_vec_buf [ . . ] , 0 ) ; <nl> mod follow_impls { <nl> ] ; <nl> let tab = < flatbuffers : : ForwardsUOffset < flatbuffers : : Table > > : : follow ( & buf [ . . ] , 0 ) ; <nl> assert_eq ! ( tab . get : : < flatbuffers : : ForwardsUOffset < & str > > ( fi2fo ( 0 ) , Some ( " abc " ) ) , Some ( " abc " ) ) ; <nl> - assert_eq ! ( tab . get : : < flatbuffers : : ForwardsUOffset < & [ u8 ] > > ( fi2fo ( 0 ) , Some ( & vec ! [ 70 , 71 , 72 ] [ . . ] ) ) , Some ( & vec ! [ 70 , 71 , 72 ] [ . . ] ) ) ; <nl> + # [ cfg ( target_endian = " little " ) ] <nl> + { <nl> + assert_eq ! ( tab . get : : < flatbuffers : : ForwardsUOffset < & [ u8 ] > > ( fi2fo ( 0 ) , Some ( & vec ! [ 70 , 71 , 72 ] [ . . ] ) ) , Some ( & vec ! [ 70 , 71 , 72 ] [ . . ] ) ) ; <nl> + } <nl> <nl> let default_vec_buf : Vec < u8 > = vec ! [ 3 , 0 , 0 , 0 , 70 , 71 , 72 , 0 ] ; <nl> let default_vec = flatbuffers : : Vector : : new ( & default_vec_buf [ . . ] , 0 ) ; <nl>
|
Fix rust crate for big - endian targets ( )
|
google/flatbuffers
|
9e82ee25275e07e35f489f6feac543611f81fb83
|
2019-03-08T09:06:25Z
|
mmm a / xbmc / cores / VideoPlayer / DVDCodecs / Audio / DVDAudioCodecFFmpeg . cpp <nl> ppp b / xbmc / cores / VideoPlayer / DVDCodecs / Audio / DVDAudioCodecFFmpeg . cpp <nl> void CDVDAudioCodecFFmpeg : : Dispose ( ) <nl> int CDVDAudioCodecFFmpeg : : Decode ( uint8_t * pData , int iSize , double dts , double pts ) <nl> { <nl> int iBytesUsed ; <nl> - if ( ! m_pCodecContext ) return - 1 ; <nl> + if ( ! m_pCodecContext ) <nl> + return - 1 ; <nl> <nl> AVPacket avpkt ; <nl> av_init_packet ( & avpkt ) ; <nl> mmm a / xbmc / cores / VideoPlayer / VideoPlayerAudio . cpp <nl> ppp b / xbmc / cores / VideoPlayer / VideoPlayerAudio . cpp <nl> <nl> / / allow audio for slow and fast speeds ( but not rewind / fastforward ) <nl> # define ALLOW_AUDIO ( speed ) ( ( speed ) > 5 * DVD_PLAYSPEED_NORMAL / 10 & & ( speed ) < = 15 * DVD_PLAYSPEED_NORMAL / 10 ) <nl> <nl> - void CPTSInputQueue : : Add ( int64_t bytes , double pts ) <nl> - { <nl> - CSingleLock lock ( m_sync ) ; <nl> - <nl> - m_list . insert ( m_list . begin ( ) , std : : make_pair ( bytes , pts ) ) ; <nl> - } <nl> - <nl> - void CPTSInputQueue : : Flush ( ) <nl> - { <nl> - CSingleLock lock ( m_sync ) ; <nl> - <nl> - m_list . clear ( ) ; <nl> - } <nl> - double CPTSInputQueue : : Get ( int64_t bytes , bool consume ) <nl> - { <nl> - CSingleLock lock ( m_sync ) ; <nl> - <nl> - IT it = m_list . begin ( ) ; <nl> - for ( ; it ! = m_list . end ( ) ; + + it ) <nl> - { <nl> - if ( bytes < = it - > first ) <nl> - { <nl> - double pts = it - > second ; <nl> - if ( consume ) <nl> - { <nl> - it - > second = DVD_NOPTS_VALUE ; <nl> - m_list . erase ( + + it , m_list . end ( ) ) ; <nl> - } <nl> - return pts ; <nl> - } <nl> - bytes - = it - > first ; <nl> - } <nl> - return DVD_NOPTS_VALUE ; <nl> - } <nl> - <nl> - <nl> class CDVDMsgAudioCodecChange : public CDVDMsg <nl> { <nl> public : <nl> void CVideoPlayerAudio : : CloseStream ( bool bWaitForBuffers ) <nl> } <nl> } <nl> <nl> - / / decode one audio frame and returns its uncompressed size <nl> - int CVideoPlayerAudio : : DecodeFrame ( DVDAudioFrame & audioframe ) <nl> + void CVideoPlayerAudio : : OnStartup ( ) <nl> { <nl> - int result = 0 ; <nl> - <nl> - / / make sure the sent frame is clean <nl> - audioframe . nb_frames = 0 ; <nl> - <nl> - while ( ! m_bStop ) <nl> - { <nl> - bool switched = false ; <nl> - / * NOTE : the audio packet can contain several frames * / <nl> - while ( ! m_bStop & & m_decode . size > 0 ) <nl> - { <nl> - if ( ! m_pAudioCodec ) <nl> - return DECODE_FLAG_ERROR ; <nl> - <nl> - / * the packet dts refers to the first audioframe that starts in the packet * / <nl> - double dts = m_ptsInput . Get ( m_decode . size + m_pAudioCodec - > GetBufferSize ( ) , true ) ; <nl> - if ( dts ! = DVD_NOPTS_VALUE ) <nl> - m_audioClock = dts ; <nl> - <nl> - int len = m_pAudioCodec - > Decode ( m_decode . data , m_decode . size , m_decode . dts , m_decode . pts ) ; <nl> - if ( len < 0 | | len > m_decode . size ) <nl> - { <nl> - / * if error , we skip the packet * / <nl> - CLog : : Log ( LOGERROR , " CVideoPlayerAudio : : DecodeFrame - Decode Error . Skipping audio packet ( % d ) " , len ) ; <nl> - m_decode . Release ( ) ; <nl> - m_pAudioCodec - > Reset ( ) ; <nl> - return DECODE_FLAG_ERROR ; <nl> - } <nl> - <nl> - m_audioStats . AddSampleBytes ( len ) ; <nl> - <nl> - m_decode . data + = len ; <nl> - m_decode . size - = len ; <nl> + m_decode . Release ( ) ; <nl> <nl> - / / get decoded data and the size of it <nl> - m_pAudioCodec - > GetData ( audioframe ) ; <nl> + # ifdef TARGET_WINDOWS <nl> + CoInitializeEx ( NULL , COINIT_MULTITHREADED ) ; <nl> + # endif <nl> + } <nl> <nl> - if ( audioframe . nb_frames = = 0 ) <nl> - continue ; <nl> + void CVideoPlayerAudio : : UpdatePlayerInfo ( ) <nl> + { <nl> + std : : ostringstream s ; <nl> + s < < " aq : " < < std : : setw ( 2 ) < < std : : min ( 99 , m_messageQueue . GetLevel ( ) ) < < " % " ; <nl> + s < < " , Kb / s : " < < std : : fixed < < std : : setprecision ( 2 ) < < ( double ) GetAudioBitrate ( ) / 1024 . 0 ; <nl> <nl> - audioframe . hasTimestamp = true ; <nl> - if ( audioframe . pts = = DVD_NOPTS_VALUE ) <nl> - { <nl> - audioframe . pts = m_audioClock ; <nl> - if ( dts = = DVD_NOPTS_VALUE ) <nl> - audioframe . hasTimestamp = false ; <nl> - } <nl> - else <nl> - { <nl> - m_audioClock = audioframe . pts ; <nl> - } <nl> + / / print the inverse of the resample ratio , since that makes more sense <nl> + / / if the resample ratio is 0 . 5 , then we ' re playing twice as fast <nl> + if ( m_synctype = = SYNC_RESAMPLE ) <nl> + s < < " , rr : " < < std : : fixed < < std : : setprecision ( 5 ) < < 1 . 0 / m_dvdAudio . GetResampleRatio ( ) ; <nl> <nl> - if ( audioframe . format . m_sampleRate & & m_streaminfo . samplerate ! = ( int ) audioframe . format . m_sampleRate ) <nl> - { <nl> - / / The sample rate has changed or we just got it for the first time <nl> - / / for this stream . See if we should enable / disable passthrough due <nl> - / / to it . <nl> - m_streaminfo . samplerate = audioframe . format . m_sampleRate ; <nl> - if ( ! switched & & SwitchCodecIfNeeded ( ) ) <nl> - { <nl> - / / passthrough has been enabled / disabled , reprocess the packet <nl> - m_decode . data - = len ; <nl> - m_decode . size + = len ; <nl> - switched = true ; <nl> - continue ; <nl> - } <nl> - } <nl> + s < < " , att : " < < std : : fixed < < std : : setprecision ( 1 ) < < log ( GetCurrentAttenuation ( ) ) * 20 . 0f < < " dB " ; <nl> <nl> - / / increase audioclock to after the packet <nl> - m_audioClock + = audioframe . duration ; <nl> + SInfo info ; <nl> + info . info = s . str ( ) ; <nl> + info . pts = m_dvdAudio . GetPlayingPts ( ) ; <nl> + info . passthrough = m_pAudioCodec & & m_pAudioCodec - > NeedPassthrough ( ) ; <nl> <nl> - / / if demux source want ' s us to not display this , continue <nl> - if ( m_decode . msg - > GetPacketDrop ( ) ) <nl> - result | = DECODE_FLAG_DROP ; <nl> + { CSingleLock lock ( m_info_section ) ; <nl> + m_info = info ; <nl> + } <nl> + } <nl> <nl> - return result ; <nl> - } <nl> - / / free the current packet <nl> - m_decode . Release ( ) ; <nl> + void CVideoPlayerAudio : : Process ( ) <nl> + { <nl> + CLog : : Log ( LOGNOTICE , " running thread : CVideoPlayerAudio : : Process ( ) " ) ; <nl> <nl> - if ( m_messageQueue . ReceivedAbortRequest ( ) ) return DECODE_FLAG_ABORT ; <nl> + DVDAudioFrame audioframe ; <nl> + m_audioStats . Start ( ) ; <nl> <nl> + while ( ! m_bStop ) <nl> + { <nl> CDVDMsg * pMsg ; <nl> int timeout = ( int ) ( 1000 * m_dvdAudio . GetCacheTime ( ) ) + 100 ; <nl> <nl> int CVideoPlayerAudio : : DecodeFrame ( DVDAudioFrame & audioframe ) <nl> if ( m_syncState = = IDVDStreamPlayer : : SYNC_STARTING | | / * when not started * / <nl> ALLOW_AUDIO ( m_speed ) | | / * when playing normally * / <nl> m_speed < DVD_PLAYSPEED_PAUSE | | / * when rewinding * / <nl> - ( m_speed > DVD_PLAYSPEED_NORMAL & & m_audioClock < m_pClock - > GetClock ( ) ) ) / * when behind clock in ff * / <nl> + ( m_speed > DVD_PLAYSPEED_NORMAL & & m_audioClock < m_pClock - > GetClock ( ) ) ) / * when behind clock in ff * / <nl> priority = 0 ; <nl> <nl> if ( m_syncState = = IDVDStreamPlayer : : SYNC_WAITSYNC ) <nl> int CVideoPlayerAudio : : DecodeFrame ( DVDAudioFrame & audioframe ) <nl> <nl> MsgQueueReturnCode ret = m_messageQueue . Get ( & pMsg , timeout , priority ) ; <nl> <nl> - if ( ret = = MSGQ_TIMEOUT ) <nl> + if ( MSGQ_IS_ERROR ( ret ) ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " Got MSGQ_ABORT or MSGO_IS_ERROR return true " ) ; <nl> + break ; <nl> + } <nl> + else if ( ret = = MSGQ_TIMEOUT ) <nl> { <nl> / / Flush as the audio output may keep looping if we don ' t <nl> if ( ALLOW_AUDIO ( m_speed ) & & ! m_stalled & & m_syncState = = IDVDStreamPlayer : : SYNC_INSYNC ) <nl> int CVideoPlayerAudio : : DecodeFrame ( DVDAudioFrame & audioframe ) <nl> Sleep ( 10 ) ; <nl> continue ; <nl> } <nl> - else if ( MSGQ_IS_ERROR ( ret ) ) <nl> - return DECODE_FLAG_ABORT ; <nl> <nl> - if ( pMsg - > IsType ( CDVDMsg : : DEMUXER_PACKET ) ) <nl> - { <nl> - m_decode . Attach ( ( CDVDMsgDemuxerPacket * ) pMsg ) ; <nl> - m_ptsInput . Add ( m_decode . size , m_decode . dts ) ; <nl> - } <nl> - else if ( pMsg - > IsType ( CDVDMsg : : GENERAL_SYNCHRONIZE ) ) <nl> + / / handle messages <nl> + if ( pMsg - > IsType ( CDVDMsg : : GENERAL_SYNCHRONIZE ) ) <nl> { <nl> if ( ( ( CDVDMsgGeneralSynchronize * ) pMsg ) - > Wait ( 100 , SYNCSOURCE_AUDIO ) ) <nl> CLog : : Log ( LOGDEBUG , " CVideoPlayerAudio - CDVDMsg : : GENERAL_SYNCHRONIZE " ) ; <nl> else <nl> - m_messageQueue . Put ( pMsg - > Acquire ( ) , 1 ) ; / * push back as prio message , to process other prio messages * / <nl> + m_messageQueue . Put ( pMsg - > Acquire ( ) , 1 ) ; / / push back as prio message , to process other prio messages <nl> } <nl> else if ( pMsg - > IsType ( CDVDMsg : : GENERAL_RESYNC ) ) <nl> { / / player asked us to set internal clock <nl> int CVideoPlayerAudio : : DecodeFrame ( DVDAudioFrame & audioframe ) <nl> { <nl> bool sync = static_cast < CDVDMsgBool * > ( pMsg ) - > m_value ; <nl> m_dvdAudio . Flush ( ) ; <nl> - m_ptsInput . Flush ( ) ; <nl> m_stalled = true ; <nl> m_audioClock = 0 ; <nl> <nl> int CVideoPlayerAudio : : DecodeFrame ( DVDAudioFrame & audioframe ) <nl> { <nl> m_silence = static_cast < CDVDMsgBool * > ( pMsg ) - > m_value ; <nl> CLog : : Log ( LOGDEBUG , " CVideoPlayerAudio - CDVDMsg : : AUDIO_SILENCE ( % f , % d ) " <nl> - , m_audioClock , m_silence ) ; <nl> + , m_audioClock , m_silence ) ; <nl> } <nl> else if ( pMsg - > IsType ( CDVDMsg : : GENERAL_STREAMCHANGE ) ) <nl> { <nl> int CVideoPlayerAudio : : DecodeFrame ( DVDAudioFrame & audioframe ) <nl> OpenStream ( msg - > m_hints , msg - > m_codec ) ; <nl> msg - > m_codec = NULL ; <nl> } <nl> + else if ( pMsg - > IsType ( CDVDMsg : : DEMUXER_PACKET ) ) <nl> + { <nl> + DemuxPacket * pPacket = ( ( CDVDMsgDemuxerPacket * ) pMsg ) - > GetPacket ( ) ; <nl> + bool bPacketDrop = ( ( CDVDMsgDemuxerPacket * ) pMsg ) - > GetPacketDrop ( ) ; <nl> <nl> - pMsg - > Release ( ) ; <nl> - } <nl> - return 0 ; <nl> - } <nl> - <nl> - void CVideoPlayerAudio : : OnStartup ( ) <nl> - { <nl> - m_decode . Release ( ) ; <nl> - <nl> - # ifdef TARGET_WINDOWS <nl> - CoInitializeEx ( NULL , COINIT_MULTITHREADED ) ; <nl> - # endif <nl> - } <nl> - <nl> - void CVideoPlayerAudio : : UpdatePlayerInfo ( ) <nl> - { <nl> - std : : ostringstream s ; <nl> - s < < " aq : " < < std : : setw ( 2 ) < < std : : min ( 99 , m_messageQueue . GetLevel ( ) ) < < " % " ; <nl> - s < < " , Kb / s : " < < std : : fixed < < std : : setprecision ( 2 ) < < ( double ) GetAudioBitrate ( ) / 1024 . 0 ; <nl> - <nl> - / / print the inverse of the resample ratio , since that makes more sense <nl> - / / if the resample ratio is 0 . 5 , then we ' re playing twice as fast <nl> - if ( m_synctype = = SYNC_RESAMPLE ) <nl> - s < < " , rr : " < < std : : fixed < < std : : setprecision ( 5 ) < < 1 . 0 / m_dvdAudio . GetResampleRatio ( ) ; <nl> - <nl> - s < < " , att : " < < std : : fixed < < std : : setprecision ( 1 ) < < log ( GetCurrentAttenuation ( ) ) * 20 . 0f < < " dB " ; <nl> - <nl> - SInfo info ; <nl> - info . info = s . str ( ) ; <nl> - info . pts = m_dvdAudio . GetPlayingPts ( ) ; <nl> - info . passthrough = m_pAudioCodec & & m_pAudioCodec - > NeedPassthrough ( ) ; <nl> + int len = m_pAudioCodec - > Decode ( pPacket - > pData , pPacket - > iSize , pPacket - > dts , pPacket - > pts ) ; <nl> + if ( len < 0 ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " CVideoPlayerAudio : : DecodeFrame - Decode Error . Skipping audio packet ( % d ) " , len ) ; <nl> + m_pAudioCodec - > Reset ( ) ; <nl> + pMsg - > Release ( ) ; <nl> + continue ; <nl> + } <nl> <nl> - { CSingleLock lock ( m_info_section ) ; <nl> - m_info = info ; <nl> - } <nl> - } <nl> + UpdatePlayerInfo ( ) ; <nl> <nl> - void CVideoPlayerAudio : : Process ( ) <nl> - { <nl> - CLog : : Log ( LOGNOTICE , " running thread : CVideoPlayerAudio : : Process ( ) " ) ; <nl> + / / loop while no error and decoder produces output <nl> + while ( ! m_bStop ) <nl> + { <nl> + / / get decoded data and the size of it <nl> + m_pAudioCodec - > GetData ( audioframe ) ; <nl> <nl> - DVDAudioFrame audioframe ; <nl> - m_audioStats . Start ( ) ; <nl> + if ( audioframe . nb_frames = = 0 ) <nl> + break ; <nl> <nl> - while ( ! m_bStop ) <nl> - { <nl> - int result = DecodeFrame ( audioframe ) ; <nl> + audioframe . hasTimestamp = true ; <nl> + if ( audioframe . pts = = DVD_NOPTS_VALUE ) <nl> + { <nl> + audioframe . pts = m_audioClock ; <nl> + audioframe . hasTimestamp = false ; <nl> + } <nl> + else <nl> + { <nl> + m_audioClock = audioframe . pts ; <nl> + } <nl> <nl> - / / Drop when not playing normally <nl> - if ( ! ALLOW_AUDIO ( m_speed ) & & m_syncState = = IDVDStreamPlayer : : SYNC_INSYNC ) <nl> - { <nl> - result | = DECODE_FLAG_DROP ; <nl> - } <nl> + / / Drop when not playing normally <nl> + if ( ! ALLOW_AUDIO ( m_speed ) & & m_syncState = = IDVDStreamPlayer : : SYNC_INSYNC ) <nl> + { <nl> + break ; <nl> + } <nl> <nl> - UpdatePlayerInfo ( ) ; <nl> + if ( audioframe . format . m_sampleRate & & m_streaminfo . samplerate ! = ( int ) audioframe . format . m_sampleRate ) <nl> + { <nl> + / / The sample rate has changed or we just got it for the first time <nl> + / / for this stream . See if we should enable / disable passthrough due <nl> + / / to it . <nl> + m_streaminfo . samplerate = audioframe . format . m_sampleRate ; <nl> + if ( SwitchCodecIfNeeded ( ) ) <nl> + { <nl> + break ; <nl> + } <nl> + } <nl> <nl> - if ( result & DECODE_FLAG_ERROR ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " CVideoPlayerAudio : : Process - Decode Error " ) ; <nl> - continue ; <nl> - } <nl> + / / demuxer reads metatags that influence channel layout <nl> + if ( m_streaminfo . codec = = AV_CODEC_ID_FLAC & & m_streaminfo . channellayout ) <nl> + audioframe . format . m_channelLayout = CAEUtil : : GetAEChannelLayout ( m_streaminfo . channellayout ) ; <nl> <nl> - if ( result & DECODE_FLAG_ABORT ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " CVideoPlayerAudio : : Process - Abort received , exiting thread " ) ; <nl> - break ; <nl> - } <nl> + / / we have succesfully decoded an audio frame , setup renderer to match <nl> + if ( ! m_dvdAudio . IsValidFormat ( audioframe ) ) <nl> + { <nl> + if ( m_speed ) <nl> + m_dvdAudio . Drain ( ) ; <nl> <nl> - if ( audioframe . nb_frames = = 0 ) <nl> - continue ; <nl> + m_dvdAudio . Destroy ( ) ; <nl> <nl> - / / demuxer reads metatags that influence channel layout <nl> - if ( m_streaminfo . codec = = AV_CODEC_ID_FLAC & & m_streaminfo . channellayout ) <nl> - audioframe . format . m_channelLayout = CAEUtil : : GetAEChannelLayout ( m_streaminfo . channellayout ) ; <nl> - <nl> - / / we have succesfully decoded an audio frame , setup renderer to match <nl> - if ( ! m_dvdAudio . IsValidFormat ( audioframe ) ) <nl> - { <nl> - if ( m_speed ) <nl> - m_dvdAudio . Drain ( ) ; <nl> + if ( ! m_dvdAudio . Create ( audioframe , m_streaminfo . codec , m_setsynctype = = SYNC_RESAMPLE ) ) <nl> + CLog : : Log ( LOGERROR , " % s - failed to create audio renderer " , __FUNCTION__ ) ; <nl> <nl> - m_dvdAudio . Destroy ( ) ; <nl> + if ( m_syncState = = IDVDStreamPlayer : : SYNC_INSYNC ) <nl> + m_dvdAudio . Resume ( ) ; <nl> <nl> - if ( ! m_dvdAudio . Create ( audioframe , m_streaminfo . codec , m_setsynctype = = SYNC_RESAMPLE ) ) <nl> - CLog : : Log ( LOGERROR , " % s - failed to create audio renderer " , __FUNCTION__ ) ; <nl> + m_streaminfo . channels = audioframe . format . m_channelLayout . Count ( ) ; <nl> <nl> - if ( m_syncState = = IDVDStreamPlayer : : SYNC_INSYNC ) <nl> - m_dvdAudio . Resume ( ) ; <nl> + g_dataCacheCore . SignalAudioInfoChange ( ) ; <nl> + } <nl> <nl> - m_streaminfo . channels = audioframe . format . m_channelLayout . Count ( ) ; <nl> + / / Zero out the frame data if we are supposed to silence the audio <nl> + if ( m_silence ) <nl> + { <nl> + int size = audioframe . nb_frames * audioframe . framesize / audioframe . planes ; <nl> + for ( unsigned int i = 0 ; i < audioframe . planes ; i + + ) <nl> + memset ( audioframe . data [ i ] , 0 , size ) ; <nl> + } <nl> <nl> - g_dataCacheCore . SignalAudioInfoChange ( ) ; <nl> - } <nl> + SetSyncType ( audioframe . passthrough ) ; <nl> <nl> - / / Zero out the frame data if we are supposed to silence the audio <nl> - if ( m_silence ) <nl> - { <nl> - int size = audioframe . nb_frames * audioframe . framesize / audioframe . planes ; <nl> - for ( unsigned int i = 0 ; i < audioframe . planes ; i + + ) <nl> - memset ( audioframe . data [ i ] , 0 , size ) ; <nl> - } <nl> + if ( ! bPacketDrop ) <nl> + { <nl> + OutputPacket ( audioframe ) ; <nl> + <nl> + / / signal to our parent that we have initialized <nl> + if ( m_syncState = = IDVDStreamPlayer : : SYNC_STARTING ) <nl> + { <nl> + double cachetotal = DVD_SEC_TO_TIME ( m_dvdAudio . GetCacheTotal ( ) ) ; <nl> + double cachetime = m_dvdAudio . GetDelay ( ) ; <nl> + if ( cachetime > = cachetotal * 0 . 5 ) <nl> + { <nl> + m_syncState = IDVDStreamPlayer : : SYNC_WAITSYNC ; <nl> + m_stalled = false ; <nl> + SStartMsg msg ; <nl> + msg . player = VideoPlayer_AUDIO ; <nl> + msg . cachetotal = cachetotal ; <nl> + msg . cachetime = cachetime ; <nl> + msg . timestamp = audioframe . hasTimestamp ? audioframe . pts : DVD_NOPTS_VALUE ; <nl> + m_messageParent . Put ( new CDVDMsgType < SStartMsg > ( CDVDMsg : : PLAYER_STARTED , msg ) ) ; <nl> + } <nl> + } <nl> + } <nl> <nl> - if ( ! ( result & DECODE_FLAG_DROP ) ) <nl> - { <nl> - SetSyncType ( audioframe . passthrough ) ; <nl> + / / guess next pts <nl> + m_audioClock + = audioframe . duration ; <nl> <nl> - OutputPacket ( audioframe ) ; <nl> - } <nl> + int len = m_pAudioCodec - > Decode ( nullptr , 0 , DVD_NOPTS_VALUE , DVD_NOPTS_VALUE ) ; <nl> + if ( len < 0 ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " CVideoPlayerAudio : : DecodeFrame - Decode Error . Skipping audio packet ( % d ) " , len ) ; <nl> + m_pAudioCodec - > Reset ( ) ; <nl> + break ; <nl> + } <nl> + } / / while decoder produces output <nl> <nl> - / / signal to our parent that we have initialized <nl> - if ( m_syncState = = IDVDStreamPlayer : : SYNC_STARTING & & ! ( result & DECODE_FLAG_DROP ) ) <nl> - { <nl> - double cachetotal = DVD_SEC_TO_TIME ( m_dvdAudio . GetCacheTotal ( ) ) ; <nl> - double cachetime = m_dvdAudio . GetDelay ( ) ; <nl> - if ( cachetime > = cachetotal * 0 . 5 ) <nl> - { <nl> - m_syncState = IDVDStreamPlayer : : SYNC_WAITSYNC ; <nl> - m_stalled = false ; <nl> - SStartMsg msg ; <nl> - msg . player = VideoPlayer_AUDIO ; <nl> - msg . cachetotal = cachetotal ; <nl> - msg . cachetime = cachetime ; <nl> - msg . timestamp = audioframe . hasTimestamp ? audioframe . pts : DVD_NOPTS_VALUE ; <nl> - m_messageParent . Put ( new CDVDMsgType < SStartMsg > ( CDVDMsg : : PLAYER_STARTED , msg ) ) ; <nl> - } <nl> - } <nl> + } / / demuxer packet <nl> + <nl> + pMsg - > Release ( ) ; <nl> } <nl> } <nl> <nl> mmm a / xbmc / cores / VideoPlayer / VideoPlayerAudio . h <nl> ppp b / xbmc / cores / VideoPlayer / VideoPlayerAudio . h <nl> class CVideoPlayer ; <nl> class CDVDAudioCodec ; <nl> class CDVDAudioCodec ; <nl> <nl> - # define DECODE_FLAG_DROP 1 <nl> - # define DECODE_FLAG_RESYNC 2 <nl> - # define DECODE_FLAG_ERROR 4 <nl> - # define DECODE_FLAG_ABORT 8 <nl> - <nl> - class CPTSInputQueue <nl> - { <nl> - private : <nl> - typedef std : : list < std : : pair < int64_t , double > > : : iterator IT ; <nl> - std : : list < std : : pair < int64_t , double > > m_list ; <nl> - CCriticalSection m_sync ; <nl> - public : <nl> - void Add ( int64_t bytes , double pts ) ; <nl> - double Get ( int64_t bytes , bool consume ) ; <nl> - void Flush ( ) ; <nl> - } ; <nl> - <nl> class CVideoPlayerAudio : public CThread , public IDVDStreamPlayerAudio <nl> { <nl> public : <nl> class CVideoPlayerAudio : public CThread , public IDVDStreamPlayerAudio <nl> / / holds stream information for current playing stream <nl> CDVDStreamInfo m_streaminfo ; <nl> <nl> - CPTSInputQueue m_ptsInput ; <nl> - <nl> double GetCurrentPts ( ) { CSingleLock lock ( m_info_section ) ; return m_info . pts ; } <nl> <nl> bool IsStalled ( ) const { return m_stalled ; } <nl> class CVideoPlayerAudio : public CThread , public IDVDStreamPlayerAudio <nl> virtual void OnExit ( ) ; <nl> virtual void Process ( ) ; <nl> <nl> - int DecodeFrame ( DVDAudioFrame & audioframe ) ; <nl> - <nl> void UpdatePlayerInfo ( ) ; <nl> void OpenStream ( CDVDStreamInfo & hints , CDVDAudioCodec * codec ) ; <nl> / / ! Switch codec if needed . Called when the sample rate gotten from the <nl>
|
VideoPlayer : refactor audio player
|
xbmc/xbmc
|
4bcae8a333f5f4e0ea81553235795423cbf6e6ae
|
2016-01-24T11:24:57Z
|
mmm a / website / benchmark / hardware / index . html <nl> ppp b / website / benchmark / hardware / index . html <nl> < h3 class = " my - 3 " > Comments < / h3 > <nl> Results for AMD EPYC 7702 are from < b > Peng Gao < / b > in sina . com . < br / > <nl> Results for Intel NUC are from < b > Alexander Zaitsev < / b > , Altinity . < br / > <nl> Xeon Gold 6230 server is using 4 x SAMSUNG datacenter class SSD in RAID - 10 . < br / > <nl> - Results for Yandex Managed ClickHouse for " cold cache " are biased and should not be compared , because cache was not flushed for every next query . < / p > <nl> + Results for Yandex Managed ClickHouse for " cold cache " are biased and should not be compared , because cache was not flushed for every next query . < / p > <nl> < / div > <nl> < / div > <nl> { % endblock % } <nl>
|
Fix markup
|
ClickHouse/ClickHouse
|
bed8f03132a9f8ec8ada438e134d7e3b483143b9
|
2020-06-19T12:39:01Z
|
mmm a / src / debug - agent . h <nl> ppp b / src / debug - agent . h <nl> class DebuggerAgentSession ; <nl> class DebuggerAgent : public Thread { <nl> public : <nl> explicit DebuggerAgent ( const char * name , int port ) <nl> - : port_ ( port ) , name_ ( StrDup ( name ) ) , <nl> + : name_ ( StrDup ( name ) ) , port_ ( port ) , <nl> server_ ( OS : : CreateSocket ( ) ) , terminate_ ( false ) , <nl> session_access_ ( OS : : CreateMutex ( ) ) , session_ ( NULL ) , <nl> terminate_now_ ( OS : : CreateSemaphore ( 0 ) ) { } <nl>
|
Fixed Linux compiler warning .
|
v8/v8
|
b5d265a3a2977307344b1e40949b58ed06352c64
|
2009-03-23T22:35:01Z
|
mmm a / README . md <nl> ppp b / README . md <nl> SRS always use the most simple architecture to support complex transaction . <nl> * System arch : the system structure and arch . <nl> * Modularity arch : the main modularity of SRS . <nl> * Stream arch : the stream dispatch arch of SRS . <nl> - * RTMP cluster arch : the RTMP origin and edge cluster arch . <nl> - * Multiple processes arch ( by wenjie ) : the multiple process of SRS . <nl> - * CLI arch : the cli arch for SRS , api to manage SRS . <nl> - * Bandwidth specification : the bandwidth test specification of SRS . <nl> <nl> # # # System Architecture <nl> <nl> SRS always use the most simple architecture to support complex transaction . <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm + <nl> | SRS ( Simple RTMP Server ) | <nl> + mmmmmmmmmmmmmmm + mmmmmmmmmmmmmmm + mmmmmmmmm - - + mmmmmmmmm - + <nl> - | API / hook | Transcoder | HLS | RTMP | <nl> + | API / hook | Transcoder | HLS / HDS | RTMP / FLV | <nl> | http - parser | FFMPEG / x264 | NGINX / ts | protocol | <nl> + mmmmmmmmmmmmmmm + mmmmmmmmmmmmmmm + mmmmmmmmm - - + mmmmmmmmm - + <nl> | Network ( state - threads ) | <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm + <nl> - | All Linux ( RHEL , CentOS , Ubuntu , Fedora . . . ) | <nl> + | All Linux / Unix ( RHEL , CentOS , Ubuntu , Fedora . . . ) | <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm + <nl> < / pre > <nl> <nl> SRS always use the most simple architecture to support complex transaction . <nl> <nl> < pre > <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm + <nl> - | Main ( srs / bandwidth / librtmp ) | <nl> + | Main ( srs / ingest - hls / librtmp ) | <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm + <nl> | App ( Server / Client application ) | <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm + <nl> - | RTMP ( Protocol stack ) | <nl> + | RTMP / HTTP / RawStream ( Protocol stack ) | <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm + <nl> | Kernel ( depends on Core , provides error / log ) | <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm + <nl>
|
refine for 2 . 0
|
ossrs/srs
|
c19856be5ca93c8b28c67f232ea3834870fe7d5d
|
2015-05-23T05:10:11Z
|
mmm a / xbmc / cores / AudioRenderers / CoreAudioRenderer . cpp <nl> ppp b / xbmc / cores / AudioRenderers / CoreAudioRenderer . cpp <nl> <nl> <nl> # include " CoreAudioRenderer . h " <nl> # include " Application . h " <nl> + # include " Systeminfo . h " <nl> # include " guilib / AudioContext . h " <nl> # include " osx / CocoaInterface . h " <nl> # include " settings / GUISettings . h " <nl>
|
FIXED : Prevent ATV1 Users from trying to use any channel layout other than 2 . 0 , since the audio device cannot send any more than 2 channels over any interface ( no multichannel - PCM > 2 )
|
xbmc/xbmc
|
79f7fd9d4d9f5e7a716381afba9d350897faa9c6
|
2012-02-18T19:13:16Z
|
mmm a / tensorflow / python / keras / metrics_correctness_test . py <nl> ppp b / tensorflow / python / keras / metrics_correctness_test . py <nl> def setUp ( self ) : <nl> # Batch 2 = [ ( ( 9 - 6 ) ^ 2 * 4 , ( 12 - 8 ) ^ 2 * 5 ) ] = [ 36 , 80 ] <nl> <nl> # Result ( reduction = SUM ) = ( ( 2 + 12 ) + ( 36 + 80 ) ) / 2 = 65 <nl> - # Result ( reduction = SUM_OVER_BATCH_SIZE / AUTO ) = 130 / 4 = 32 . 5 <nl> + # Result ( reduction = SUM_OVER_BATCH_SIZE / AUTO / NONE ) = 130 / 4 = 32 . 5 <nl> <nl> # Loss ` output_2 ` : <nl> # Per - sample weighted losses <nl> def setUp ( self ) : <nl> # Batch 2 = [ ( 9 - 6 ) ^ 2 * 1 . 5 , ( 12 - 8 ) ^ 2 * 0 . 5 ) ] = [ 13 . 5 , 8 ] <nl> <nl> # Result ( reduction = SUM ) = ( ( 3 . 5 + 10 ) + ( 13 . 5 + 8 ) ) / 2 = 17 . 5 <nl> - # Result ( reduction = SUM_OVER_BATCH_SIZE / AUTO ) = 35 / 4 = 8 . 75 <nl> + # Result ( reduction = SUM_OVER_BATCH_SIZE / AUTO / NONE ) = 35 / 4 = 8 . 75 <nl> + <nl> + # When reduction is ' NONE ' loss value that is passed to the optimizer will <nl> + # be vector loss but what is reported is a scalar , which is an average of <nl> + # all the values in all the batch vectors . <nl> <nl> # Total loss = Output_loss_1 + Output_loss_2 <nl> <nl> def setUp ( self ) : <nl> } <nl> <nl> self . expected_fit_result = { <nl> + loss_reduction . ReductionV2 . NONE : <nl> + sum_over_batch_size_fit_result , <nl> loss_reduction . ReductionV2 . SUM : { <nl> ' loss ' : [ 82 . 5 , 82 . 5 ] , <nl> ' output_1_loss ' : [ 65 , 65 ] , <nl> def setUp ( self ) : <nl> <nl> # In the order : ' loss ' , ' output_1_loss ' , ' output_2_loss ' , <nl> self . expected_batch_result = { <nl> + loss_reduction . ReductionV2 . NONE : [ 41 . 25 , 32 . 5 , 8 . 75 ] , <nl> loss_reduction . ReductionV2 . SUM : [ 82 . 5 , 65 , 17 . 5 ] , <nl> loss_reduction . ReductionV2 . AUTO : [ 41 . 25 , 32 . 5 , 8 . 75 ] , <nl> loss_reduction . ReductionV2 . SUM_OVER_BATCH_SIZE : [ 41 . 25 , 32 . 5 , 8 . 75 ] , <nl> mmm a / tensorflow / python / ops / losses / loss_reduction . py <nl> ppp b / tensorflow / python / ops / losses / loss_reduction . py <nl> class ReductionV2 ( object ) : <nl> used with ` tf . distribute . Strategy ` , outside of built - in training loops such <nl> as ` tf . keras ` ` compile ` and ` fit ` , we expect reduction value to be <nl> ` SUM ` or ` NONE ` . Using ` AUTO ` in that case will raise an error . <nl> - * ` NONE ` : Un - reduced weighted losses with the same shape as input . <nl> + * ` NONE ` : Un - reduced weighted losses with the same shape as input . When this <nl> + reduction type used with built - in Keras training loops like <nl> + ` fit ` / ` evaluate ` , the unreduced vector loss is passed to the optimizer but <nl> + the reported loss will be a scalar value . <nl> * ` SUM ` : Scalar sum of weighted losses . <nl> * ` SUM_OVER_BATCH_SIZE ` : Scalar ` SUM ` divided by number of elements in losses . <nl> This reduction type is not supported when used with <nl>
|
Add information about None reduction type is handled in Keras fit / eval .
|
tensorflow/tensorflow
|
14a3a7d02ace46ed78bba6ef6fce76a404acf8e5
|
2019-05-07T23:26:00Z
|
mmm a / src / compiler / code - generator . cc <nl> ppp b / src / compiler / code - generator . cc <nl> CodeGenerator : : CodeGenerator ( Frame * frame , Linkage * linkage , <nl> info_ ( info ) , <nl> labels_ ( zone ( ) - > NewArray < Label > ( code - > InstructionBlockCount ( ) ) ) , <nl> current_block_ ( RpoNumber : : Invalid ( ) ) , <nl> - current_source_position_ ( SourcePosition : : Invalid ( ) ) , <nl> + current_source_position_ ( SourcePosition : : Unknown ( ) ) , <nl> masm_ ( info - > isolate ( ) , NULL , 0 ) , <nl> resolver_ ( this ) , <nl> safepoints_ ( code - > zone ( ) ) , <nl> void CodeGenerator : : AssembleSourcePosition ( Instruction * instr ) { <nl> SourcePosition source_position ; <nl> if ( ! code ( ) - > GetSourcePosition ( instr , & source_position ) ) return ; <nl> if ( source_position = = current_source_position_ ) return ; <nl> - DCHECK ( ! source_position . IsInvalid ( ) ) ; <nl> current_source_position_ = source_position ; <nl> if ( source_position . IsUnknown ( ) ) return ; <nl> int code_pos = source_position . raw ( ) ; <nl> - masm ( ) - > positions_recorder ( ) - > RecordPosition ( source_position . raw ( ) ) ; <nl> + masm ( ) - > positions_recorder ( ) - > RecordPosition ( code_pos ) ; <nl> masm ( ) - > positions_recorder ( ) - > WriteRecordedPositions ( ) ; <nl> if ( FLAG_code_comments ) { <nl> Vector < char > buffer = Vector < char > : : New ( 256 ) ; <nl> mmm a / src / compiler / graph - visualizer . cc <nl> ppp b / src / compiler / graph - visualizer . cc <nl> class JSONGraphNodeWriter { <nl> os_ < < " , \ " rankInputs \ " : [ 0 ] " ; <nl> } <nl> SourcePosition position = positions_ - > GetSourcePosition ( node ) ; <nl> - if ( ! position . IsUnknown ( ) ) { <nl> - DCHECK ( ! position . IsInvalid ( ) ) ; <nl> + if ( position . IsKnown ( ) ) { <nl> os_ < < " , \ " pos \ " : " < < position . raw ( ) ; <nl> } <nl> os_ < < " , \ " opcode \ " : \ " " < < IrOpcode : : Mnemonic ( node - > opcode ( ) ) < < " \ " " ; <nl> void GraphC1Visualizer : : PrintSchedule ( const char * phase , <nl> } <nl> if ( positions ! = NULL ) { <nl> SourcePosition position = positions - > GetSourcePosition ( node ) ; <nl> - if ( ! position . IsUnknown ( ) ) { <nl> - DCHECK ( ! position . IsInvalid ( ) ) ; <nl> + if ( position . IsKnown ( ) ) { <nl> os_ < < " pos : " < < position . raw ( ) ; <nl> } <nl> } <nl> mmm a / src / compiler / instruction - selector . cc <nl> ppp b / src / compiler / instruction - selector . cc <nl> void InstructionSelector : : VisitBlock ( BasicBlock * block ) { <nl> if ( instructions_ . size ( ) = = current_node_end ) continue ; <nl> / / Mark source position on first instruction emitted . <nl> SourcePosition source_position = source_positions_ - > GetSourcePosition ( node ) ; <nl> - if ( source_position . IsUnknown ( ) ) continue ; <nl> - DCHECK ( ! source_position . IsInvalid ( ) ) ; <nl> - if ( source_position_mode_ = = kAllSourcePositions | | <nl> - node - > opcode ( ) = = IrOpcode : : kCall ) { <nl> + if ( source_position . IsKnown ( ) & & <nl> + ( source_position_mode_ = = kAllSourcePositions | | <nl> + node - > opcode ( ) = = IrOpcode : : kCall ) ) { <nl> sequence ( ) - > SetSourcePosition ( instructions_ [ current_node_end ] , <nl> source_position ) ; <nl> } <nl> mmm a / src / compiler / instruction . cc <nl> ppp b / src / compiler / instruction . cc <nl> bool InstructionSequence : : GetSourcePosition ( const Instruction * instr , <nl> <nl> void InstructionSequence : : SetSourcePosition ( const Instruction * instr , <nl> SourcePosition value ) { <nl> - DCHECK ( ! value . IsInvalid ( ) ) ; <nl> - DCHECK ( ! value . IsUnknown ( ) ) ; <nl> source_positions_ . insert ( std : : make_pair ( instr , value ) ) ; <nl> } <nl> <nl> mmm a / src / compiler / pipeline . cc <nl> ppp b / src / compiler / pipeline . cc <nl> struct ContextSpecializerPhase { <nl> static const char * phase_name ( ) { return " context specializing " ; } <nl> <nl> void Run ( PipelineData * data , Zone * temp_zone ) { <nl> - SourcePositionTable : : Scope pos ( data - > source_positions ( ) , <nl> - SourcePosition : : Unknown ( ) ) ; <nl> JSContextSpecializer spec ( data - > jsgraph ( ) ) ; <nl> GraphReducer graph_reducer ( data - > graph ( ) , temp_zone ) ; <nl> AddReducer ( data , & graph_reducer , & spec ) ; <nl> struct InliningPhase { <nl> static const char * phase_name ( ) { return " inlining " ; } <nl> <nl> void Run ( PipelineData * data , Zone * temp_zone ) { <nl> - SourcePositionTable : : Scope pos ( data - > source_positions ( ) , <nl> - SourcePosition : : Unknown ( ) ) ; <nl> GraphReducer graph_reducer ( data - > graph ( ) , temp_zone ) ; <nl> JSInliner inliner ( & graph_reducer , data - > info ( ) - > is_inlining_enabled ( ) <nl> ? JSInliner : : kGeneralInlining <nl> struct OsrDeconstructionPhase { <nl> static const char * phase_name ( ) { return " OSR deconstruction " ; } <nl> <nl> void Run ( PipelineData * data , Zone * temp_zone ) { <nl> - SourcePositionTable : : Scope pos ( data - > source_positions ( ) , <nl> - SourcePosition : : Unknown ( ) ) ; <nl> OsrHelper osr_helper ( data - > info ( ) ) ; <nl> osr_helper . Deconstruct ( data - > jsgraph ( ) , data - > common ( ) , temp_zone ) ; <nl> } <nl> struct JSTypeFeedbackPhase { <nl> static const char * phase_name ( ) { return " type feedback specializing " ; } <nl> <nl> void Run ( PipelineData * data , Zone * temp_zone ) { <nl> - SourcePositionTable : : Scope pos ( data - > source_positions ( ) , <nl> - SourcePosition : : Unknown ( ) ) ; <nl> Handle < Context > native_context ( data - > info ( ) - > context ( ) - > native_context ( ) ) ; <nl> TypeFeedbackOracle oracle ( data - > isolate ( ) , temp_zone , <nl> data - > info ( ) - > unoptimized_code ( ) , <nl> struct TypedLoweringPhase { <nl> static const char * phase_name ( ) { return " typed lowering " ; } <nl> <nl> void Run ( PipelineData * data , Zone * temp_zone ) { <nl> - SourcePositionTable : : Scope pos ( data - > source_positions ( ) , <nl> - SourcePosition : : Unknown ( ) ) ; <nl> LoadElimination load_elimination ; <nl> JSBuiltinReducer builtin_reducer ( data - > jsgraph ( ) ) ; <nl> JSTypedLowering typed_lowering ( data - > jsgraph ( ) , temp_zone ) ; <nl> struct SimplifiedLoweringPhase { <nl> static const char * phase_name ( ) { return " simplified lowering " ; } <nl> <nl> void Run ( PipelineData * data , Zone * temp_zone ) { <nl> - SourcePositionTable : : Scope pos ( data - > source_positions ( ) , <nl> - SourcePosition : : Unknown ( ) ) ; <nl> SimplifiedLowering lowering ( data - > jsgraph ( ) , temp_zone , <nl> data - > source_positions ( ) ) ; <nl> lowering . LowerAllNodes ( ) ; <nl> struct ControlFlowOptimizationPhase { <nl> static const char * phase_name ( ) { return " control flow optimization " ; } <nl> <nl> void Run ( PipelineData * data , Zone * temp_zone ) { <nl> - SourcePositionTable : : Scope pos ( data - > source_positions ( ) , <nl> - SourcePosition : : Unknown ( ) ) ; <nl> ControlFlowOptimizer optimizer ( data - > jsgraph ( ) , temp_zone ) ; <nl> optimizer . Optimize ( ) ; <nl> } <nl> struct ChangeLoweringPhase { <nl> static const char * phase_name ( ) { return " change lowering " ; } <nl> <nl> void Run ( PipelineData * data , Zone * temp_zone ) { <nl> - SourcePositionTable : : Scope pos ( data - > source_positions ( ) , <nl> - SourcePosition : : Unknown ( ) ) ; <nl> ValueNumberingReducer vn_reducer ( temp_zone ) ; <nl> SimplifiedOperatorReducer simple_reducer ( data - > jsgraph ( ) ) ; <nl> ChangeLowering lowering ( data - > jsgraph ( ) ) ; <nl> struct ChangeLoweringPhase { <nl> struct EarlyControlReductionPhase { <nl> static const char * phase_name ( ) { return " early control reduction " ; } <nl> void Run ( PipelineData * data , Zone * temp_zone ) { <nl> - SourcePositionTable : : Scope pos ( data - > source_positions ( ) , <nl> - SourcePosition : : Unknown ( ) ) ; <nl> ControlReducer : : ReduceGraph ( temp_zone , data - > jsgraph ( ) , 0 ) ; <nl> } <nl> } ; <nl> struct EarlyControlReductionPhase { <nl> struct LateControlReductionPhase { <nl> static const char * phase_name ( ) { return " late control reduction " ; } <nl> void Run ( PipelineData * data , Zone * temp_zone ) { <nl> - SourcePositionTable : : Scope pos ( data - > source_positions ( ) , <nl> - SourcePosition : : Unknown ( ) ) ; <nl> ControlReducer : : ReduceGraph ( temp_zone , data - > jsgraph ( ) , 0 ) ; <nl> } <nl> } ; <nl> struct StressLoopPeelingPhase { <nl> static const char * phase_name ( ) { return " stress loop peeling " ; } <nl> <nl> void Run ( PipelineData * data , Zone * temp_zone ) { <nl> - SourcePositionTable : : Scope pos ( data - > source_positions ( ) , <nl> - SourcePosition : : Unknown ( ) ) ; <nl> / / Peel the first outer loop for testing . <nl> / / TODO ( titzer ) : peel all loops ? the N ' th loop ? Innermost loops ? <nl> LoopTree * loop_tree = LoopFinder : : BuildLoopTree ( data - > graph ( ) , temp_zone ) ; <nl> struct GenericLoweringPhase { <nl> static const char * phase_name ( ) { return " generic lowering " ; } <nl> <nl> void Run ( PipelineData * data , Zone * temp_zone ) { <nl> - SourcePositionTable : : Scope pos ( data - > source_positions ( ) , <nl> - SourcePosition : : Unknown ( ) ) ; <nl> JSGenericLowering generic ( data - > info ( ) - > is_typing_enabled ( ) , <nl> data - > jsgraph ( ) ) ; <nl> SelectLowering select ( data - > jsgraph ( ) - > graph ( ) , data - > jsgraph ( ) - > common ( ) ) ; <nl> mmm a / src / compiler / source - position . cc <nl> ppp b / src / compiler / source - position . cc <nl> class SourcePositionTable : : Decorator final : public GraphDecorator { <nl> : source_positions_ ( source_positions ) { } <nl> <nl> void Decorate ( Node * node , bool incomplete ) final { <nl> - DCHECK ( ! source_positions_ - > current_position_ . IsInvalid ( ) ) ; <nl> source_positions_ - > table_ . Set ( node , source_positions_ - > current_position_ ) ; <nl> } <nl> <nl> class SourcePositionTable : : Decorator final : public GraphDecorator { <nl> SourcePositionTable : : SourcePositionTable ( Graph * graph ) <nl> : graph_ ( graph ) , <nl> decorator_ ( nullptr ) , <nl> - current_position_ ( SourcePosition : : Invalid ( ) ) , <nl> + current_position_ ( SourcePosition : : Unknown ( ) ) , <nl> table_ ( graph - > zone ( ) ) { } <nl> <nl> <nl> void SourcePositionTable : : Print ( std : : ostream & os ) const { <nl> bool needs_comma = false ; <nl> for ( auto i : table_ ) { <nl> SourcePosition pos = i . second ; <nl> - if ( ! pos . IsUnknown ( ) ) { <nl> + if ( pos . IsKnown ( ) ) { <nl> if ( needs_comma ) { <nl> os < < " , " ; <nl> } <nl> mmm a / src / compiler / source - position . h <nl> ppp b / src / compiler / source - position . h <nl> class SourcePosition final { <nl> <nl> static SourcePosition Unknown ( ) { return SourcePosition ( kUnknownPosition ) ; } <nl> bool IsUnknown ( ) const { return raw ( ) = = kUnknownPosition ; } <nl> - <nl> - static SourcePosition Invalid ( ) { return SourcePosition ( kInvalidPosition ) ; } <nl> - bool IsInvalid ( ) const { return raw ( ) = = kInvalidPosition ; } <nl> + bool IsKnown ( ) const { return raw ( ) ! = kUnknownPosition ; } <nl> <nl> int raw ( ) const { return raw_ ; } <nl> <nl> private : <nl> - static const int kInvalidPosition = - 2 ; <nl> static const int kUnknownPosition = RelocInfo : : kNoPosition ; <nl> - STATIC_ASSERT ( kInvalidPosition ! = kUnknownPosition ) ; <nl> int raw_ ; <nl> } ; <nl> <nl> class SourcePositionTable final { <nl> <nl> private : <nl> void Init ( SourcePosition position ) { <nl> - if ( ! position . IsUnknown ( ) | | prev_position_ . IsInvalid ( ) ) { <nl> - source_positions_ - > current_position_ = position ; <nl> - } <nl> + if ( position . IsKnown ( ) ) source_positions_ - > current_position_ = position ; <nl> } <nl> <nl> SourcePositionTable * const source_positions_ ; <nl>
|
[ turbofan ] Treat uninitialized source positions as unknown .
|
v8/v8
|
8cc7e70098d5ca968585ca4d8cb68f987fafa7a7
|
2015-05-13T16:07:55Z
|
mmm a / lib / SILOptimizer / SILCombiner / SILCombinerMiscVisitors . cpp <nl> ppp b / lib / SILOptimizer / SILCombiner / SILCombinerMiscVisitors . cpp <nl> <nl> using namespace swift ; <nl> using namespace swift : : PatternMatch ; <nl> <nl> + / / / This flag is used to disable alloc stack optimizations to ease testing of <nl> + / / / other SILCombine optimizations . <nl> + static llvm : : cl : : opt < bool > <nl> + DisableAllocStackOpts ( " sil - combine - disable - alloc - stack - opts " , <nl> + llvm : : cl : : init ( false ) ) ; <nl> + <nl> SILInstruction * <nl> SILCombiner : : visitAllocExistentialBoxInst ( AllocExistentialBoxInst * AEBI ) { <nl> <nl> struct AllocStackAnalyzer : SILInstructionVisitor < AllocStackAnalyzer > { <nl> } / / end anonymous namespace <nl> <nl> SILInstruction * SILCombiner : : visitAllocStackInst ( AllocStackInst * AS ) { <nl> + / / If we are testing SILCombine and we are asked not to eliminate <nl> + / / alloc_stacks , just return . <nl> + if ( DisableAllocStackOpts ) <nl> + return nullptr ; <nl> + <nl> AllocStackAnalyzer Analyzer ( AS ) ; <nl> Analyzer . analyze ( ) ; <nl> <nl> mmm a / lib / SILOptimizer / Utils / Local . cpp <nl> ppp b / lib / SILOptimizer / Utils / Local . cpp <nl> static bool useDoesNotKeepClosureAlive ( const SILInstruction * I ) { <nl> } <nl> } <nl> <nl> + / / * HEY YOU , YES YOU , PLEASE READ * . Even though a textual partial apply is <nl> + / / printed with the convention of the closed over function upon it , all <nl> + / / non - inout arguments to a partial_apply are passed at + 1 . This includes <nl> + / / arguments that will eventually be passed as guaranteed or in_guaranteed to <nl> + / / the closed over function . This is because the partial apply is building up a <nl> + / / boxed aggregate to send off to the closed over function . Of course when you <nl> + / / call the function , the proper conventions will be used . <nl> void swift : : releasePartialApplyCapturedArg ( SILBuilder & Builder , SILLocation Loc , <nl> SILValue Arg , SILParameterInfo PInfo , <nl> InstModCallbacks Callbacks ) { <nl> void swift : : releasePartialApplyCapturedArg ( SILBuilder & Builder , SILLocation Loc , <nl> if ( PInfo . isIndirectMutating ( ) ) <nl> return ; <nl> <nl> - if ( isa < AllocStackInst > ( Arg ) ) { <nl> - return ; <nl> - } <nl> - <nl> / / If we have a trivial type , we do not need to put in any extra releases . <nl> if ( Arg - > getType ( ) . isTrivial ( Builder . getModule ( ) ) ) <nl> return ; <nl> <nl> - / / Otherwise , we need to destroy the argument . <nl> - if ( Arg - > getType ( ) . isObject ( ) ) { <nl> - / / If we have qualified ownership , we should just emit a destroy value . <nl> - if ( Arg - > getFunction ( ) - > hasQualifiedOwnership ( ) ) { <nl> - Callbacks . CreatedNewInst ( Builder . createDestroyValue ( Loc , Arg ) ) ; <nl> - return ; <nl> - } <nl> - <nl> - if ( Arg - > getType ( ) . hasReferenceSemantics ( ) ) { <nl> - auto U = Builder . emitStrongRelease ( Loc , Arg ) ; <nl> - if ( U . isNull ( ) ) <nl> - return ; <nl> + / / Otherwise , we need to destroy the argument . If we have an address , just <nl> + / / emit a destroy_addr . <nl> + if ( Arg - > getType ( ) . isAddress ( ) ) { <nl> + SILInstruction * NewInst = Builder . emitDestroyAddrAndFold ( Loc , Arg ) ; <nl> + Callbacks . CreatedNewInst ( NewInst ) ; <nl> + return ; <nl> + } <nl> <nl> - if ( auto * SRI = U . dyn_cast < StrongRetainInst * > ( ) ) { <nl> - Callbacks . DeleteInst ( SRI ) ; <nl> - return ; <nl> - } <nl> + / / Otherwise , we have an object . We emit the most optimized form of release <nl> + / / possible for that value . <nl> <nl> - Callbacks . CreatedNewInst ( U . get < StrongReleaseInst * > ( ) ) ; <nl> - return ; <nl> - } <nl> + / / If we have qualified ownership , we should just emit a destroy value . <nl> + if ( Arg - > getFunction ( ) - > hasQualifiedOwnership ( ) ) { <nl> + Callbacks . CreatedNewInst ( Builder . createDestroyValue ( Loc , Arg ) ) ; <nl> + return ; <nl> + } <nl> <nl> - auto U = Builder . emitReleaseValue ( Loc , Arg ) ; <nl> + if ( Arg - > getType ( ) . hasReferenceSemantics ( ) ) { <nl> + auto U = Builder . emitStrongRelease ( Loc , Arg ) ; <nl> if ( U . isNull ( ) ) <nl> return ; <nl> <nl> - if ( auto * RVI = U . dyn_cast < RetainValueInst * > ( ) ) { <nl> - Callbacks . DeleteInst ( RVI ) ; <nl> + if ( auto * SRI = U . dyn_cast < StrongRetainInst * > ( ) ) { <nl> + Callbacks . DeleteInst ( SRI ) ; <nl> return ; <nl> } <nl> <nl> - Callbacks . CreatedNewInst ( U . get < ReleaseValueInst * > ( ) ) ; <nl> + Callbacks . CreatedNewInst ( U . get < StrongReleaseInst * > ( ) ) ; <nl> + return ; <nl> + } <nl> + <nl> + auto U = Builder . emitReleaseValue ( Loc , Arg ) ; <nl> + if ( U . isNull ( ) ) <nl> + return ; <nl> + <nl> + if ( auto * RVI = U . dyn_cast < RetainValueInst * > ( ) ) { <nl> + Callbacks . DeleteInst ( RVI ) ; <nl> return ; <nl> } <nl> <nl> - SILInstruction * NewInst = Builder . emitDestroyAddrAndFold ( Loc , Arg ) ; <nl> - Callbacks . CreatedNewInst ( NewInst ) ; <nl> + Callbacks . CreatedNewInst ( U . get < ReleaseValueInst * > ( ) ) ; <nl> } <nl> <nl> / / / For each captured argument of PAI , decrement the ref count of the captured <nl> mmm a / test / SILOptimizer / sil_combine . sil <nl> ppp b / test / SILOptimizer / sil_combine . sil <nl> sil [ global_init ] @ global_init_fun : $ @ convention ( thin ) ( ) - > Builtin . RawPointer <nl> <nl> sil @ user : $ @ convention ( thin ) ( @ owned Builtin . NativeObject ) - > ( ) <nl> <nl> + sil @ unknown : $ @ convention ( thin ) ( ) - > ( ) <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / <nl> / / Simple DCE Tests / / <nl> / / / / / / / / / / / / / / / / / / / / / / <nl> sil [ readonly ] @ read_only_in : $ @ convention ( thin ) ( @ in SSS ) - > ( ) <nl> struct MyError : Error { <nl> } <nl> <nl> - sil @ unknown : $ @ convention ( thin ) ( ) - > ( ) <nl> sil [ readonly ] @ readonly_throwing : $ @ convention ( thin ) ( @ owned ZZZ ) - > ( ZZZ , @ error Error ) <nl> <nl> sil [ readonly ] @ readonly_owned : $ @ convention ( thin ) ( @ owned B ) - > @ owned B <nl> bb0 ( % 0 : $ Builtin . Int64 , % 1 : $ Builtin . RawPointer , % 2 : $ Builtin . RawPointer ) : <nl> return % 13 : $ ( ) <nl> } <nl> <nl> - / / Make sure that we handle partial_apply captured arguments correctly . <nl> - sil @ sil_combine_partial_apply_callee : $ @ convention ( thin ) ( @ in Builtin . NativeObject , @ inout Builtin . NativeObject , Builtin . NativeObject , @ owned Builtin . NativeObject , @ guaranteed Builtin . NativeObject ) - > ( ) <nl> - <nl> - / / CHECK - LABEL : sil @ sil_combine_partial_apply_caller : $ @ convention ( thin ) ( @ in Builtin . NativeObject , @ inout Builtin . NativeObject , Builtin . NativeObject , @ owned Builtin . NativeObject , @ guaranteed Builtin . NativeObject ) - > ( ) { <nl> - / / CHECK : bb0 ( [ [ ARG1 : % . * ] ] : $ * Builtin . NativeObject , [ [ ARG2 : % . * ] ] : $ * Builtin . NativeObject , [ [ ARG3 : % . * ] ] : $ Builtin . NativeObject , [ [ ARG4 : % . * ] ] : $ Builtin . NativeObject , [ [ ARG5 : % . * ] ] : $ Builtin . NativeObject ) : <nl> - / / CHECK - NEXT : strong_retain [ [ ARG3 ] ] <nl> - / / CHECK - NEXT : strong_retain [ [ ARG4 ] ] <nl> - / / CHECK - NEXT : strong_retain [ [ ARG5 ] ] <nl> - / / CHECK - NEXT : strong_release [ [ ARG3 ] ] <nl> - / / CHECK - NEXT : strong_release [ [ ARG4 ] ] <nl> - / / CHECK - NEXT : strong_release [ [ ARG5 ] ] <nl> - / / CHECK - NEXT : strong_release [ [ ARG4 ] ] <nl> - / / CHECK - NEXT : destroy_addr [ [ ARG1 ] ] <nl> - sil @ sil_combine_partial_apply_caller : $ @ convention ( thin ) ( @ in Builtin . NativeObject , @ inout Builtin . NativeObject , Builtin . NativeObject , @ owned Builtin . NativeObject , @ guaranteed Builtin . NativeObject ) - > ( ) { <nl> - bb0 ( % 0 : $ * Builtin . NativeObject , % 1 : $ * Builtin . NativeObject , % 2 : $ Builtin . NativeObject , % 3 : $ Builtin . NativeObject , % 4 : $ Builtin . NativeObject ) : <nl> - % 100 = function_ref @ sil_combine_partial_apply_callee : $ @ convention ( thin ) ( @ in Builtin . NativeObject , @ inout Builtin . NativeObject , Builtin . NativeObject , @ owned Builtin . NativeObject , @ guaranteed Builtin . NativeObject ) - > ( ) <nl> - % 101 = alloc_stack $ Builtin . NativeObject <nl> - copy_addr % 0 to [ initialization ] % 101 : $ * Builtin . NativeObject <nl> - strong_retain % 2 : $ Builtin . NativeObject <nl> - strong_retain % 3 : $ Builtin . NativeObject <nl> - strong_retain % 4 : $ Builtin . NativeObject <nl> - % 102 = partial_apply % 100 ( % 101 , % 1 , % 2 , % 3 , % 4 ) : $ @ convention ( thin ) ( @ in Builtin . NativeObject , @ inout Builtin . NativeObject , Builtin . NativeObject , @ owned Builtin . NativeObject , @ guaranteed Builtin . NativeObject ) - > ( ) <nl> - strong_release % 102 : $ @ callee_owned ( ) - > ( ) <nl> - strong_release % 3 : $ Builtin . NativeObject <nl> - destroy_addr % 0 : $ * Builtin . NativeObject <nl> - dealloc_stack % 101 : $ * Builtin . NativeObject <nl> - % 9999 = tuple ( ) <nl> - return % 9999 : $ ( ) <nl> - } <nl> - <nl> / / CHECK - LABEL : sil @ cmp_zext_peephole <nl> / / CHECK : bb0 ( [ [ Arg1 : % . * ] ] : $ Builtin . Word , [ [ Arg2 : % . * ] ] : $ Builtin . Word ) : <nl> / / CHECK : [ [ ZA1 : % . * ] ] = builtin " zextOrBitCast_Word_Int64 " ( [ [ Arg1 ] ] : $ Builtin . Word ) : $ Builtin . Int64 <nl> new file mode 100644 <nl> index 000000000000 . . c8d3551c0033 <nl> mmm / dev / null <nl> ppp b / test / SILOptimizer / sil_combine_apply . sil <nl> <nl> + / / RUN : % target - sil - opt - assume - parsing - unqualified - ownership - sil - enable - sil - verify - all % s - sil - combine - verify - skip - unreachable - must - be - last - sil - combine - disable - alloc - stack - opts | % FileCheck % s <nl> + <nl> + import Builtin <nl> + <nl> + / / / / / / / / / / / / / / / / / / <nl> + / / Declarations / / <nl> + / / / / / / / / / / / / / / / / / / <nl> + <nl> + sil @ unknown : $ @ convention ( thin ) ( ) - > ( ) <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / Tests for SILCombinerApply . / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + / / Make sure that we handle partial_apply captured arguments correctly . <nl> + / / <nl> + / / We use custom types here to make it easier to pattern match with FileCheck . <nl> + struct S1 { var x : Builtin . NativeObject } <nl> + struct S2 { var x : Builtin . NativeObject } <nl> + struct S3 { var x : Builtin . NativeObject } <nl> + struct S4 { var x : Builtin . NativeObject } <nl> + struct S5 { var x : Builtin . NativeObject } <nl> + struct S6 { var x : Builtin . NativeObject } <nl> + struct S7 { var x : Builtin . NativeObject } <nl> + struct S8 { var x : Builtin . NativeObject } <nl> + sil @ sil_combine_partial_apply_callee : $ @ convention ( thin ) ( @ in S1 , @ in S2 , @ in_guaranteed S3 , @ in_guaranteed S4 , @ inout S5 , S6 , @ owned S7 , @ guaranteed S8 ) - > ( ) <nl> + <nl> + / / * NOTE PLEASE READ * . If this test case looks funny to you , it is b / c partial <nl> + / / apply is funny . Specifically , even though a partial apply has the conventions <nl> + / / of the function on it , arguments to the partial apply ( that will be passed <nl> + / / off to the function ) must / always / be passed in at + 1 . This is because the <nl> + / / partial apply is building up a boxed aggregate to send off to the closed over <nl> + / / function . Of course when you call the function , the proper conventions will <nl> + / / be used . <nl> + / / <nl> + / / CHECK - LABEL : sil @ sil_combine_dead_partial_apply : $ @ convention ( thin ) ( @ in S2 , @ in S4 , @ inout S5 , S6 , @ owned S7 , @ guaranteed S8 ) - > ( ) { <nl> + / / CHECK : bb0 ( [ [ IN_ARG : % . * ] ] : $ * S2 , [ [ INGUARANTEED_ARG : % . * ] ] : $ * S4 , [ [ INOUT_ARG : % . * ] ] : $ * S5 , [ [ UNOWNED_ARG : % . * ] ] : $ S6 , [ [ OWNED_ARG : % . * ] ] : $ S7 , [ [ GUARANTEED_ARG : % . * ] ] : $ S8 ) : <nl> + / / <nl> + / / CHECK : function_ref unknown <nl> + / / CHECK : [ [ UNKNOWN_FUNC : % . * ] ] = function_ref @ unknown <nl> + / / CHECK - NEXT : [ [ IN_ADDRESS : % . * ] ] = alloc_stack $ S1 <nl> + / / CHECK - NEXT : [ [ INGUARANTEED_ADDRESS : % . * ] ] = alloc_stack $ S3 <nl> + / / <nl> + / / CHECK - NEXT : apply [ [ UNKNOWN_FUNC ] ] ( ) <nl> + / / <nl> + / / Then make sure that the destroys are placed after the destroy_value of the <nl> + / / partial_apply ( which is after this apply ) . . . <nl> + / / CHECK - NEXT : apply [ [ UNKNOWN_FUNC ] ] ( ) <nl> + / / <nl> + / / CHECK - NEXT : destroy_addr [ [ IN_ADDRESS ] ] <nl> + / / CHECK - NEXT : destroy_addr [ [ IN_ARG ] ] <nl> + / / CHECK - NEXT : destroy_addr [ [ INGUARANTEED_ADDRESS ] ] <nl> + / / CHECK - NEXT : destroy_addr [ [ INGUARANTEED_ARG ] ] <nl> + / / CHECK - NEXT : release_value [ [ UNOWNED_ARG ] ] <nl> + / / CHECK - NEXT : release_value [ [ OWNED_ARG ] ] <nl> + / / CHECK - NEXT : release_value [ [ GUARANTEED_ARG ] ] <nl> + / / <nl> + / / . . . but before the function epilog . <nl> + / / CHECK - NEXT : apply [ [ UNKNOWN_FUNC ] ] ( ) <nl> + / / CHECK - NEXT : dealloc_stack [ [ INGUARANTEED_ADDRESS ] ] <nl> + / / CHECK - NEXT : dealloc_stack [ [ IN_ADDRESS ] ] <nl> + / / CHECK - NEXT : tuple <nl> + / / CHECK - NEXT : return <nl> + / / CHECK - NEXT : } / / end sil function ' sil_combine_dead_partial_apply ' <nl> + sil @ sil_combine_dead_partial_apply : $ @ convention ( thin ) ( @ in S2 , @ in S4 , @ inout S5 , S6 , @ owned S7 , @ guaranteed S8 ) - > ( ) { <nl> + bb0 ( % 1 : $ * S2 , % 2 : $ * S4 , % 4 : $ * S5 , % 5 : $ S6 , % 6 : $ S7 , % 7 : $ S8 ) : <nl> + % 8 = function_ref @ unknown : $ @ convention ( thin ) ( ) - > ( ) <nl> + % 9 = function_ref @ sil_combine_partial_apply_callee : $ @ convention ( thin ) ( @ in S1 , @ in S2 , @ in_guaranteed S3 , @ in_guaranteed S4 , @ inout S5 , S6 , @ owned S7 , @ guaranteed S8 ) - > ( ) <nl> + <nl> + / / This is for the @ in alloc_stack case . <nl> + % 10 = alloc_stack $ S1 <nl> + / / This is for the @ in_guaranteed alloc_stack case . <nl> + % 11 = alloc_stack $ S3 <nl> + <nl> + / / Marker of space in between the alloc_stack and the partial_apply <nl> + apply % 8 ( ) : $ @ convention ( thin ) ( ) - > ( ) <nl> + <nl> + / / Now call the partial apply . We use the " unknown " function call after the <nl> + / / partial apply to ensure that we are truly placing releases at the partial <nl> + / / applies release rather than right afterwards . <nl> + % 102 = partial_apply % 9 ( % 10 , % 1 , % 11 , % 2 , % 4 , % 5 , % 6 , % 7 ) : $ @ convention ( thin ) ( @ in S1 , @ in S2 , @ in_guaranteed S3 , @ in_guaranteed S4 , @ inout S5 , S6 , @ owned S7 , @ guaranteed S8 ) - > ( ) <nl> + <nl> + / / Marker of space in between partial_apply and the release of % 102 . <nl> + apply % 8 ( ) : $ @ convention ( thin ) ( ) - > ( ) <nl> + <nl> + strong_release % 102 : $ @ callee_owned ( ) - > ( ) <nl> + <nl> + apply % 8 ( ) : $ @ convention ( thin ) ( ) - > ( ) <nl> + <nl> + / / Epilog . <nl> + <nl> + / / Cleanup the stack locations . <nl> + dealloc_stack % 11 : $ * S3 <nl> + dealloc_stack % 10 : $ * S1 <nl> + <nl> + % 9999 = tuple ( ) <nl> + return % 9999 : $ ( ) <nl> + } <nl>
|
[ sil - combine ] When deleting a dead partial_apply , insert a destroy for all non - trivial captured values .
|
apple/swift
|
5a2556eecabc51001d3601d066beb6c680a2be96
|
2017-07-07T00:01:43Z
|
mmm a / modules / nonfree / perf / perf_main . cpp <nl> ppp b / modules / nonfree / perf / perf_main . cpp <nl> <nl> static const char * impls [ ] = { <nl> # ifdef HAVE_CUDA <nl> " cuda " , <nl> + # endif <nl> + # ifdef HAVE_OPENCL <nl> + " ocl " , <nl> # endif <nl> " plain " <nl> } ; <nl> similarity index 66 % <nl> rename from modules / nonfree / perf / perf_surf . ocl . cpp <nl> rename to modules / nonfree / perf / perf_surf_ocl . cpp <nl> mmm a / modules / nonfree / perf / perf_surf . ocl . cpp <nl> ppp b / modules / nonfree / perf / perf_surf_ocl . cpp <nl> typedef perf : : TestBaseWithParam < std : : string > OCL_SURF ; <nl> " cv / detectors_descriptors_evaluation / images_datasets / leuven / img1 . png " , \ <nl> " stitching / a3 . png " <nl> <nl> - PERF_TEST_P ( OCL_SURF , DISABLED_with_data_transfer , testing : : Values ( SURF_IMAGES ) ) <nl> + # define OCL_TEST_CYCLE ( ) for ( ; startTimer ( ) , next ( ) ; cv : : ocl : : finish ( ) , stopTimer ( ) ) <nl> + <nl> + PERF_TEST_P ( OCL_SURF , with_data_transfer , testing : : Values ( SURF_IMAGES ) ) <nl> { <nl> string filename = getDataPath ( GetParam ( ) ) ; <nl> - Mat img = imread ( filename , IMREAD_GRAYSCALE ) ; <nl> - ASSERT_FALSE ( img . empty ( ) ) ; <nl> - <nl> - SURF_OCL d_surf ; <nl> - oclMat d_keypoints ; <nl> - oclMat d_descriptors ; <nl> - Mat cpu_kp ; <nl> - Mat cpu_dp ; <nl> + Mat src = imread ( filename , IMREAD_GRAYSCALE ) ; <nl> + ASSERT_FALSE ( src . empty ( ) ) ; <nl> <nl> + Mat cpu_kp , cpu_dp ; <nl> declare . time ( 60 ) ; <nl> <nl> - TEST_CYCLE ( ) <nl> + if ( getSelectedImpl ( ) = = " ocl " ) <nl> { <nl> - oclMat d_src ( img ) ; <nl> + SURF_OCL d_surf ; <nl> + oclMat d_keypoints , d_descriptors ; <nl> <nl> - d_surf ( d_src , oclMat ( ) , d_keypoints , d_descriptors ) ; <nl> + OCL_TEST_CYCLE ( ) <nl> + { <nl> + oclMat d_src ( src ) ; <nl> <nl> - d_keypoints . download ( cpu_kp ) ; <nl> - d_descriptors . download ( cpu_dp ) ; <nl> + d_surf ( d_src , oclMat ( ) , d_keypoints , d_descriptors ) ; <nl> + <nl> + d_keypoints . download ( cpu_kp ) ; <nl> + d_descriptors . download ( cpu_dp ) ; <nl> + } <nl> } <nl> + else if ( getSelectedImpl ( ) = = " plain " ) <nl> + { <nl> + cv : : SURF surf ; <nl> + std : : vector < cv : : KeyPoint > kp ; <nl> <nl> - SANITY_CHECK ( cpu_kp , 1 ) ; <nl> - SANITY_CHECK ( cpu_dp , 1 ) ; <nl> + TEST_CYCLE ( ) surf ( src , Mat ( ) , kp , cpu_dp ) ; <nl> + } <nl> + <nl> + SANITY_CHECK_NOTHING ( ) ; <nl> } <nl> <nl> - PERF_TEST_P ( OCL_SURF , DISABLED_without_data_transfer , testing : : Values ( SURF_IMAGES ) ) <nl> + PERF_TEST_P ( OCL_SURF , without_data_transfer , testing : : Values ( SURF_IMAGES ) ) <nl> { <nl> string filename = getDataPath ( GetParam ( ) ) ; <nl> - Mat img = imread ( filename , IMREAD_GRAYSCALE ) ; <nl> - ASSERT_FALSE ( img . empty ( ) ) ; <nl> - <nl> - SURF_OCL d_surf ; <nl> - oclMat d_keypoints ; <nl> - oclMat d_descriptors ; <nl> - oclMat d_src ( img ) ; <nl> + Mat src = imread ( filename , IMREAD_GRAYSCALE ) ; <nl> + ASSERT_FALSE ( src . empty ( ) ) ; <nl> <nl> + Mat cpu_kp , cpu_dp ; <nl> declare . time ( 60 ) ; <nl> <nl> - TEST_CYCLE ( ) d_surf ( d_src , oclMat ( ) , d_keypoints , d_descriptors ) ; <nl> + if ( getSelectedImpl ( ) = = " ocl " ) <nl> + { <nl> + SURF_OCL d_surf ; <nl> + oclMat d_keypoints , d_descriptors , d_src ( src ) ; <nl> + <nl> + OCL_TEST_CYCLE ( ) d_surf ( d_src , oclMat ( ) , d_keypoints , d_descriptors ) ; <nl> + } <nl> + else if ( getSelectedImpl ( ) = = " plain " ) <nl> + { <nl> + cv : : SURF surf ; <nl> + std : : vector < cv : : KeyPoint > kp ; <nl> + <nl> + TEST_CYCLE ( ) surf ( src , Mat ( ) , kp , cpu_dp ) ; <nl> + } <nl> <nl> - Mat cpu_kp ; <nl> - Mat cpu_dp ; <nl> - d_keypoints . download ( cpu_kp ) ; <nl> - d_descriptors . download ( cpu_dp ) ; <nl> - SANITY_CHECK ( cpu_kp , 1 ) ; <nl> - SANITY_CHECK ( cpu_dp , 1 ) ; <nl> + SANITY_CHECK_NOTHING ( ) ; <nl> } <nl> <nl> # endif / / HAVE_OPENCV_OCL <nl> similarity index 100 % <nl> rename from modules / nonfree / src / surf . ocl . cpp <nl> rename to modules / nonfree / src / surf_ocl . cpp <nl> similarity index 100 % <nl> rename from modules / nonfree / test / test_surf . ocl . cpp <nl> rename to modules / nonfree / test / test_surf_ocl . cpp <nl>
|
rewrote perf tests for SURF
|
opencv/opencv
|
ede5b23753f9946ba107237a3eaae006afd82b4f
|
2014-02-02T22:00:29Z
|
mmm a / toolsrc / include / vcpkg_Dependencies . h <nl> ppp b / toolsrc / include / vcpkg_Dependencies . h <nl> <nl> <nl> namespace vcpkg : : Dependencies <nl> { <nl> - enum class request_type <nl> + enum class RequestType <nl> { <nl> UNKNOWN , <nl> USER_REQUESTED , <nl> namespace vcpkg : : Dependencies <nl> struct remove_plan_action <nl> { <nl> remove_plan_action ( ) ; <nl> - remove_plan_action ( const remove_plan_type & plan_type , const request_type & request_type ) ; <nl> + remove_plan_action ( const remove_plan_type & plan_type , const RequestType & request_type ) ; <nl> remove_plan_action ( const remove_plan_action & ) = delete ; <nl> remove_plan_action ( remove_plan_action & & ) = default ; <nl> remove_plan_action & operator = ( const remove_plan_action & ) = delete ; <nl> namespace vcpkg : : Dependencies <nl> <nl> <nl> remove_plan_type plan_type ; <nl> - request_type request_type ; <nl> + RequestType request_type ; <nl> } ; <nl> <nl> struct package_spec_with_remove_plan <nl> mmm a / toolsrc / src / commands_remove . cpp <nl> ppp b / toolsrc / src / commands_remove . cpp <nl> namespace vcpkg : : Commands : : Remove <nl> { <nl> using Dependencies : : package_spec_with_remove_plan ; <nl> using Dependencies : : remove_plan_type ; <nl> - using Dependencies : : request_type ; <nl> - using Update : : outdated_package ; <nl> + using Dependencies : : RequestType ; <nl> + using Update : : OutdatedPackage ; <nl> <nl> static void delete_directory ( const fs : : path & directory ) <nl> { <nl> namespace vcpkg : : Commands : : Remove <nl> System : : println ( " The following packages will be removed : \ n % s " , <nl> Strings : : join ( " \ n " , remove , [ ] ( const package_spec_with_remove_plan * p ) <nl> { <nl> - if ( p - > plan . request_type = = Dependencies : : request_type : : AUTO_SELECTED ) <nl> + if ( p - > plan . request_type = = Dependencies : : RequestType : : AUTO_SELECTED ) <nl> { <nl> return " * " + p - > spec . toString ( ) ; <nl> } <nl> <nl> - if ( p - > plan . request_type = = Dependencies : : request_type : : USER_REQUESTED ) <nl> + if ( p - > plan . request_type = = Dependencies : : RequestType : : USER_REQUESTED ) <nl> { <nl> return " " + p - > spec . toString ( ) ; <nl> } <nl> namespace vcpkg : : Commands : : Remove <nl> <nl> const bool has_non_user_requested_packages = std : : find_if ( remove_plan . cbegin ( ) , remove_plan . cend ( ) , [ ] ( const package_spec_with_remove_plan & package ) - > bool <nl> { <nl> - return package . plan . request_type ! = request_type : : USER_REQUESTED ; <nl> + return package . plan . request_type ! = RequestType : : USER_REQUESTED ; <nl> } ) ! = remove_plan . cend ( ) ; <nl> <nl> if ( has_non_user_requested_packages ) <nl> mmm a / toolsrc / src / vcpkg_Dependencies . cpp <nl> ppp b / toolsrc / src / vcpkg_Dependencies . cpp <nl> namespace vcpkg : : Dependencies <nl> { <nl> } <nl> <nl> - remove_plan_action : : remove_plan_action ( ) : plan_type ( remove_plan_type : : UNKNOWN ) , request_type ( request_type : : UNKNOWN ) <nl> + remove_plan_action : : remove_plan_action ( ) : plan_type ( remove_plan_type : : UNKNOWN ) , request_type ( RequestType : : UNKNOWN ) <nl> { <nl> } <nl> <nl> - remove_plan_action : : remove_plan_action ( const remove_plan_type & plan_type , const Dependencies : : request_type & request_type ) : plan_type ( plan_type ) , request_type ( request_type ) <nl> + remove_plan_action : : remove_plan_action ( const remove_plan_type & plan_type , const Dependencies : : RequestType & request_type ) : plan_type ( plan_type ) , request_type ( request_type ) <nl> { <nl> } <nl> <nl> namespace vcpkg : : Dependencies <nl> const StatusParagraphs : : const_iterator it = status_db . find ( spec ) ; <nl> if ( it = = status_db . end ( ) | | ( * it ) - > state = = InstallState : : NOT_INSTALLED ) <nl> { <nl> - was_examined . emplace ( spec , remove_plan_action ( remove_plan_type : : NOT_INSTALLED , request_type : : USER_REQUESTED ) ) ; <nl> + was_examined . emplace ( spec , remove_plan_action ( remove_plan_type : : NOT_INSTALLED , RequestType : : USER_REQUESTED ) ) ; <nl> continue ; <nl> } <nl> <nl> namespace vcpkg : : Dependencies <nl> examine_stack . push_back ( an_installed_package . get ( ) - > package . spec ) ; <nl> } <nl> <nl> - const request_type request_type = specs_as_set . find ( spec ) ! = specs_as_set . end ( ) ? request_type : : USER_REQUESTED : request_type : : AUTO_SELECTED ; <nl> + const RequestType request_type = specs_as_set . find ( spec ) ! = specs_as_set . end ( ) ? RequestType : : USER_REQUESTED : RequestType : : AUTO_SELECTED ; <nl> was_examined . emplace ( spec , remove_plan_action ( remove_plan_type : : REMOVE , request_type ) ) ; <nl> } <nl> <nl>
|
request_type - > RequestType
|
microsoft/vcpkg
|
1d8099fd8cdad4ebbd8297a474bc7c7c0768660e
|
2017-04-04T23:44:44Z
|
mmm a / googletest / test / BUILD . bazel <nl> ppp b / googletest / test / BUILD . bazel <nl> <nl> - # Copyright 2017 Google Inc . <nl> + # Copyright 2017 Google Inc . <nl> # All Rights Reserved . <nl> # <nl> # <nl> cc_test ( <nl> " / / : gtest " , <nl> ] , <nl> ) <nl> + # Py tests <nl> + <nl> + py_library ( <nl> + name = " gtest_test_utils " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_test_utils . py " ] , <nl> + <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_help_test_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_help_test_ . cc " ] , <nl> + deps = [ " / / : gtest_main " ] , <nl> + ) <nl> + py_test ( <nl> + name = " gtest_help_test " , <nl> + size = " small " , <nl> + srcs = [ " gtest_help_test . py " ] , <nl> + data = [ " : gtest_help_test_ " ] , <nl> + deps = [ " : gtest_test_utils " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_output_test_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_output_test_ . cc " ] , <nl> + deps = [ " / / : gtest " ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " gtest_output_test " , <nl> + size = " small " , <nl> + srcs = [ " gtest_output_test . py " ] , <nl> + data = [ <nl> + " gtest_output_test_golden_lin . txt " , <nl> + " : gtest_output_test_ " , <nl> + ] , <nl> + deps = [ " : gtest_test_utils " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_color_test_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_color_test_ . cc " ] , <nl> + deps = [ " / / : gtest " ] , <nl> + ) <nl> + py_test ( <nl> + name = " gtest_color_test " , <nl> + size = " small " , <nl> + srcs = [ " gtest_color_test . py " ] , <nl> + data = [ " : gtest_color_test_ " ] , <nl> + deps = [ " : gtest_test_utils " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_env_var_test_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_env_var_test_ . cc " ] , <nl> + deps = [ " / / : gtest " ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " gtest_env_var_test " , <nl> + size = " small " , <nl> + srcs = [ " gtest_env_var_test . py " ] , <nl> + data = [ " : gtest_env_var_test_ " ] , <nl> + deps = [ " : gtest_test_utils " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_filter_unittest_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_filter_unittest_ . cc " ] , <nl> + deps = [ " / / : gtest " ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " gtest_filter_unittest " , <nl> + size = " small " , <nl> + srcs = [ " gtest_filter_unittest . py " ] , <nl> + data = [ " : gtest_filter_unittest_ " ] , <nl> + deps = [ " : gtest_test_utils " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_break_on_failure_unittest_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_break_on_failure_unittest_ . cc " ] , <nl> + deps = [ " / / : gtest " ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " gtest_break_on_failure_unittest " , <nl> + size = " small " , <nl> + srcs = [ " gtest_break_on_failure_unittest . py " ] , <nl> + data = [ " : gtest_break_on_failure_unittest_ " ] , <nl> + deps = [ " : gtest_test_utils " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_throw_on_failure_test_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_throw_on_failure_test_ . cc " ] , <nl> + deps = [ " / / : gtest " ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " gtest_throw_on_failure_test " , <nl> + size = " small " , <nl> + srcs = [ " gtest_throw_on_failure_test . py " ] , <nl> + data = [ " : gtest_throw_on_failure_test_ " ] , <nl> + deps = [ " : gtest_test_utils " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_list_tests_unittest_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_list_tests_unittest_ . cc " ] , <nl> + deps = [ " / / : gtest " ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " gtest_list_tests_unittest " , <nl> + size = " small " , <nl> + srcs = [ " gtest_list_tests_unittest . py " ] , <nl> + data = [ " : gtest_list_tests_unittest_ " ] , <nl> + deps = [ " : gtest_test_utils " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_shuffle_test_ " , <nl> + srcs = [ " gtest_shuffle_test_ . cc " ] , <nl> + deps = [ " / / : gtest " ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " gtest_shuffle_test " , <nl> + size = " small " , <nl> + srcs = [ " gtest_shuffle_test . py " ] , <nl> + data = [ " : gtest_shuffle_test_ " ] , <nl> + deps = [ " : gtest_test_utils " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_catch_exceptions_no_ex_test_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_catch_exceptions_test_ . cc " ] , <nl> + deps = [ " / / : gtest_main " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_catch_exceptions_ex_test_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_catch_exceptions_test_ . cc " ] , <nl> + copts = [ " - fexceptions " ] , <nl> + deps = [ " / / : gtest_main " ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " gtest_catch_exceptions_test " , <nl> + size = " small " , <nl> + srcs = [ " gtest_catch_exceptions_test . py " ] , <nl> + data = [ <nl> + " : gtest_catch_exceptions_ex_test_ " , <nl> + " : gtest_catch_exceptions_no_ex_test_ " , <nl> + ] , <nl> + deps = [ " : gtest_test_utils " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_xml_output_unittest_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_xml_output_unittest_ . cc " ] , <nl> + deps = [ " / / : gtest " ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " gtest_no_test_unittest " , <nl> + size = " small " , <nl> + srcs = [ " gtest_no_test_unittest . cc " ] , <nl> + deps = [ " / / : gtest " ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " gtest_xml_output_unittest " , <nl> + size = " small " , <nl> + srcs = [ <nl> + " gtest_xml_output_unittest . py " , <nl> + " gtest_xml_test_utils . py " , <nl> + ] , <nl> + data = [ <nl> + # We invoke gtest_no_test_unittest to verify the XML output <nl> + # when the test program contains no test definition . <nl> + " : gtest_no_test_unittest " , <nl> + " : gtest_xml_output_unittest_ " , <nl> + ] , <nl> + deps = [ " : gtest_test_utils " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_xml_outfile1_test_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_xml_outfile1_test_ . cc " ] , <nl> + deps = [ " / / : gtest_main " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_xml_outfile2_test_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_xml_outfile2_test_ . cc " ] , <nl> + deps = [ " / / : gtest_main " ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " gtest_xml_outfiles_test " , <nl> + size = " small " , <nl> + srcs = [ <nl> + " gtest_xml_outfiles_test . py " , <nl> + " gtest_xml_test_utils . py " , <nl> + ] , <nl> + data = [ <nl> + " : gtest_xml_outfile1_test_ " , <nl> + " : gtest_xml_outfile2_test_ " , <nl> + ] , <nl> + deps = [ " : gtest_test_utils " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " gtest_uninitialized_test_ " , <nl> + testonly = 1 , <nl> + srcs = [ " gtest_uninitialized_test_ . cc " ] , <nl> + deps = [ " / / : gtest " ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " gtest_uninitialized_test " , <nl> + size = " medium " , <nl> + srcs = [ " gtest_uninitialized_test . py " ] , <nl> + data = [ " : gtest_uninitialized_test_ " ] , <nl> + deps = [ " : gtest_test_utils " ] , <nl> + ) <nl>
|
Merge pull request from gennadiycivil / master
|
google/googletest
|
051053054df6d2dbedd5f572ae288f2f82a289b7
|
2018-01-18T19:52:37Z
|
mmm a / language / English / strings . xml <nl> ppp b / language / English / strings . xml <nl> <nl> < string id = " 10018 " > Settings - Network < / string > <nl> < string id = " 10019 " > Settings - Appearance < / string > <nl> < string id = " 10020 " > Scripts < / string > <nl> + < string id = " 10021 " > Web Browser < / string > <nl> <nl> < string id = " 10028 " > Videos / Playlist < / string > <nl> < string id = " 10034 " > Settings - Profiles < / string > <nl> diff - - git a / language / Spanish ( Mexico ) / strings . xml b / language / Spanish ( Mexico ) / strings . xml <nl> mmm a / language / Spanish ( Mexico ) / strings . xml <nl> ppp b / language / Spanish ( Mexico ) / strings . xml <nl> <nl> - < ? xml version = " 1 . 0 " encoding = " utf - 8 " standalone = " yes " ? > <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " standalone = " yes " ? > <nl> < ! - - Language file translated with Team XBMC Translator - - > <nl> < ! - - Translator : Josue Jeriel Camargo Cruz - - > <nl> < ! - - Email : josue . camargo @ gmail . com - - > <nl> <nl> < string id = " 10018 " > Configuración - Red < / string > <nl> < string id = " 10019 " > Configuración - Apariencia < / string > <nl> < string id = " 10020 " > Scripts < / string > <nl> + < string id = " 10021 " > Navegador Web < / string > <nl> < string id = " 10028 " > Vídeos / Lista de reproducción < / string > <nl> < string id = " 10034 " > Configuración - & gt ; Perfiles < / string > <nl> < string id = " 10100 " > Cuadro de diálogo Sí / No < / string > <nl> mmm a / language / Spanish / strings . xml <nl> ppp b / language / Spanish / strings . xml <nl> <nl> - < ? xml version = " 1 . 0 " encoding = " utf - 8 " standalone = " yes " ? > <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " standalone = " yes " ? > <nl> < ! - - Language file translated with Team XBMC Translator - - > <nl> < ! - - Translator : A . Soto - - > <nl> < ! - - Revisado por MaDDoGo - - > <nl> <nl> < string id = " 10018 " > Configuración - & gt ; Red < / string > <nl> < string id = " 10019 " > Configuración - & gt ; Apariencia < / string > <nl> < string id = " 10020 " > Scripts < / string > <nl> + < string id = " 10021 " > Navegador Web < / string > <nl> < string id = " 10028 " > Vídeos / Lista de reproducción < / string > <nl> < string id = " 10034 " > Configuración - & gt ; Perfiles < / string > <nl> < string id = " 10100 " > Cuadro de diálogo Sí / No < / string > <nl>
|
Add string for web browser .
|
xbmc/xbmc
|
e6183211053e66553fc01c272eabbc018c0b0720
|
2010-04-02T05:58:23Z
|
mmm a / src / snapshot / snapshot - common . cc <nl> ppp b / src / snapshot / snapshot - common . cc <nl> v8 : : StartupData Snapshot : : CreateSnapshotBlob ( <nl> } <nl> <nl> # ifdef V8_EMBEDDED_BUILTINS <nl> - # ifdef DEBUG <nl> namespace { <nl> bool BuiltinAliasesOffHeapTrampolineRegister ( Isolate * isolate , Code * code ) { <nl> DCHECK ( Builtins : : IsOffHeapSafe ( code - > builtin_index ( ) ) ) ; <nl> bool BuiltinAliasesOffHeapTrampolineRegister ( Isolate * isolate , Code * code ) { <nl> return false ; <nl> } <nl> } / / namespace <nl> - # endif / / DEBUG <nl> <nl> / / static <nl> EmbeddedData EmbeddedData : : FromIsolate ( Isolate * isolate ) { <nl> EmbeddedData EmbeddedData : : FromIsolate ( Isolate * isolate ) { <nl> Code * code = builtins - > builtin ( i ) ; <nl> <nl> if ( Builtins : : IsOffHeapSafe ( i ) ) { <nl> - # ifdef DEBUG <nl> / / Sanity - check that the given builtin is process - independent and does not <nl> / / use the trampoline register in its calling convention . <nl> if ( ! code - > IsProcessIndependent ( ) ) { <nl> EmbeddedData EmbeddedData : : FromIsolate ( Isolate * isolate ) { <nl> fprintf ( stderr , " % s aliases the off - heap trampoline register . \ n " , <nl> Builtins : : name ( i ) ) ; <nl> } <nl> - # endif <nl> <nl> uint32_t length = static_cast < uint32_t > ( code - > instruction_size ( ) ) ; <nl> <nl>
|
[ builtins ] Verify process - independence in release mode
|
v8/v8
|
f8fb4a5c747739ab8b5941cd86e611b2be0e1626
|
2018-03-15T16:58:12Z
|
mmm a / Telegram / SourceFiles / platform / linux / main_window_linux . cpp <nl> ppp b / Telegram / SourceFiles / platform / linux / main_window_linux . cpp <nl> For license and copyright information please follow this link : <nl> # include " platform / linux / linux_gsd_media_keys . h " <nl> # endif / / ! DESKTOP_APP_DISABLE_DBUS_INTEGRATION <nl> # include " base / call_delayed . h " <nl> + # include " ui / widgets / popup_menu . h " <nl> # include " ui / widgets / input_fields . h " <nl> # include " facades . h " <nl> # include " app . h " <nl> For license and copyright information please follow this link : <nl> # include < QtGui / QWindow > <nl> <nl> # ifndef DESKTOP_APP_DISABLE_DBUS_INTEGRATION <nl> + # include < QtCore / QTemporaryFile > <nl> # include < QtDBus / QDBusInterface > <nl> # include < QtDBus / QDBusConnection > <nl> # include < QtDBus / QDBusConnectionInterface > <nl> For license and copyright information please follow this link : <nl> # include < QtDBus / QDBusError > <nl> # include < QtDBus / QDBusMetaType > <nl> <nl> - # include < xcb / xcb . h > <nl> + # include < statusnotifieritem . h > <nl> + # include < dbusmenuexporter . h > <nl> <nl> extern " C " { <nl> # undef signals <nl> mmm a / Telegram / SourceFiles / platform / linux / main_window_linux . h <nl> ppp b / Telegram / SourceFiles / platform / linux / main_window_linux . h <nl> For license and copyright information please follow this link : <nl> <nl> # include " platform / platform_main_window . h " <nl> <nl> - # include " ui / widgets / popup_menu . h " <nl> + namespace Ui { <nl> + class PopupMenu ; <nl> + } / / namespace Ui <nl> <nl> # ifndef DESKTOP_APP_DISABLE_DBUS_INTEGRATION <nl> - # include " statusnotifieritem . h " <nl> - # include < QtCore / QTemporaryFile > <nl> # include < QtDBus / QDBusObjectPath > <nl> - # include < dbusmenuexporter . h > <nl> + <nl> + class QTemporaryFile ; <nl> + class DBusMenuExporter ; <nl> + class StatusNotifierItem ; <nl> <nl> typedef void * gpointer ; <nl> typedef char gchar ; <nl> class MainWindow : public Window : : MainWindow { <nl> # ifndef DESKTOP_APP_DISABLE_DBUS_INTEGRATION <nl> StatusNotifierItem * _sniTrayIcon = nullptr ; <nl> GDBusProxy * _sniDBusProxy = nullptr ; <nl> - std : : unique_ptr < QTemporaryFile > _trayIconFile = nullptr ; <nl> + std : : unique_ptr < QTemporaryFile > _trayIconFile ; <nl> <nl> bool _appMenuSupported = false ; <nl> DBusMenuExporter * _mainMenuExporter = nullptr ; <nl>
|
Use more forward declarations in main_window_linux
|
telegramdesktop/tdesktop
|
e594b75f4c3856880c9dd1bcee499da4b294992a
|
2020-12-28T13:00:05Z
|
mmm a / hphp / tools / benchy / benchy . py <nl> ppp b / hphp / tools / benchy / benchy . py <nl> def main ( ) : <nl> parser = argparse . ArgumentParser ( description = ' Convenience wrapper for ' <nl> ' benchmarking multiple branches . ' ) <nl> parser . add_argument ( ' - - no - build ' , action = ' store_const ' , const = True , <nl> - help = ' Don \ ' t clean and build . ' ) <nl> + default = False , help = ' Don \ ' t clean and build . ' ) <nl> parser . add_argument ( ' - - suite ' , action = ' append ' , type = str , <nl> help = ' Run any suite that matches the provided regex ' ) <nl> parser . add_argument ( ' - - benchmark ' , action = ' append ' , type = str , <nl> def main ( ) : <nl> default = False , help = ' Spit out the results as Remarkup ' ) <nl> parser . add_argument ( ' - - perf ' , action = ' store_const ' , const = True , <nl> default = False , help = ' Run perf for each VM invocation . ' ) <nl> + parser . add_argument ( ' - - re - print ' , action = ' store_const ' , const = True , <nl> + default = False , help = ' Re - print previous results without ' <nl> + ' re - building or re - running ' <nl> + ' benchmarks . ' ) <nl> parser . add_argument ( ' - v ' , ' - - verbose ' , type = int , default = 0 , <nl> help = ' Increase verbosity ' ) <nl> args = parser . parse_args ( ) <nl> def main ( ) : <nl> set_verbose_level ( args . verbose ) <nl> inner = args . inner <nl> outer = args . outer <nl> - do_build = args . no_build is None <nl> + should_build = not ( args . no_build or args . re_print ) <nl> + should_run_benchmarks = not args . re_print <nl> run_perf = args . perf <nl> output_mode = ' remarkup ' if args . remarkup else ' terminal ' <nl> <nl> branches = parse_branches ( args . branch ) <nl> <nl> - if do_build : <nl> + if should_build : <nl> build_branches ( branches ) <nl> - run_benchmarks ( included_suites , <nl> - included_benchmarks , <nl> - run_perf , inner , outer , branches ) <nl> + if should_run_benchmarks : <nl> + run_benchmarks ( included_suites , <nl> + included_benchmarks , <nl> + run_perf , inner , outer , branches ) <nl> process_results ( branches , output_mode ) <nl> <nl> <nl>
|
Enable re - printing of results without re - building or re - running
|
facebook/hhvm
|
ee4aade8d7ee1e0e6f6f0c75860665d09668ece0
|
2014-10-30T20:30:24Z
|
mmm a / samples / android / marsSampleChat / app / src / main / java / com / tencent / mars / sample / ConversationActivity . java <nl> ppp b / samples / android / marsSampleChat / app / src / main / java / com / tencent / mars / sample / ConversationActivity . java <nl> public void onPreEncode ( Main . ConversationListRequest req ) { <nl> <nl> @ Override <nl> public void onPostDecode ( Main . ConversationListResponse response ) { <nl> - / / update data list only <nl> - if ( response . list = = null ) { <nl> - Log . i ( TAG , " getconvlist : empty response list " ) ; <nl> - progressBar . setVisibility ( View . VISIBLE ) ; <nl> - return ; <nl> - } <nl> - else if ( response . list . length = = 0 ) { <nl> - Log . i ( TAG , " getconvlist : empty response list " ) ; <nl> - progressBar . setVisibility ( View . VISIBLE ) ; <nl> - return ; <nl> - } <nl> <nl> - for ( Main . Conversation conv : response . list ) { <nl> - dataList . add ( new Conversation ( conv . name , conv . topic , conv . notice ) ) ; <nl> - } <nl> } <nl> <nl> @ Override <nl> public void onTaskEnd ( ) { <nl> <nl> @ Override <nl> public void run ( ) { <nl> + if ( response ! = null ) { <nl> + for ( Main . Conversation conv : response . list ) { <nl> + dataList . add ( new Conversation ( conv . name , conv . topic , conv . notice ) ) ; <nl> + } <nl> + } <nl> + <nl> if ( ! dataList . isEmpty ( ) ) { <nl> progressBar . setVisibility ( View . INVISIBLE ) ; <nl> conversationListAdapter . list . clear ( ) ; <nl>
|
ui operation shouldn ' t in stn thread , like req2Buf buf2Resp function call
|
Tencent/mars
|
b66da6ec70e2fd1af5a69feddd53510d1702b56d
|
2017-01-05T07:33:22Z
|
mmm a / modules / python / common . cmake <nl> ppp b / modules / python / common . cmake <nl> endforeach ( mp ) <nl> <nl> # module blacklist <nl> ocv_list_filterout ( candidate_deps " ^ opencv_cud ( a | ev ) " ) <nl> - ocv_list_filterout ( candidate_deps " ^ opencv_adas $ " ) <nl> - ocv_list_filterout ( candidate_deps " ^ opencv_face $ " ) <nl> ocv_list_filterout ( candidate_deps " ^ opencv_matlab $ " ) <nl> - ocv_list_filterout ( candidate_deps " ^ opencv_tracking $ " ) <nl> - ocv_list_filterout ( candidate_deps " ^ opencv_optflow $ " ) <nl> - ocv_list_filterout ( candidate_deps " ^ opencv_bgsegm $ " ) <nl> - ocv_list_filterout ( candidate_deps " ^ opencv_xfeatures2d $ " ) <nl> - ocv_list_filterout ( candidate_deps " ^ opencv_ximgproc $ " ) <nl> - ocv_list_filterout ( candidate_deps " ^ opencv_xphoto $ " ) <nl> ocv_list_filterout ( candidate_deps " ^ opencv_ts $ " ) <nl> <nl> ocv_add_module ( $ { MODULE_NAME } BINDINGS OPTIONAL $ { candidate_deps } ) <nl> ocv_list_filterout ( opencv_hdrs " . h $ " ) <nl> ocv_list_filterout ( opencv_hdrs " cuda " ) <nl> ocv_list_filterout ( opencv_hdrs " cudev " ) <nl> ocv_list_filterout ( opencv_hdrs " opencv2 / objdetect / detection_based_tracker . hpp " ) <nl> - ocv_list_filterout ( opencv_hdrs " opencv2 / optim . hpp " ) <nl> <nl> set ( cv2_generated_hdrs <nl> " $ { CMAKE_CURRENT_BINARY_DIR } / pyopencv_generated_include . h " <nl>
|
removing modules from blacklist
|
opencv/opencv
|
40d0f853d6d27dc7e2fe7f9653df3767e64b8669
|
2014-08-19T15:19:11Z
|
mmm a / modules / dnn / src / op_halide . cpp <nl> ppp b / modules / dnn / src / op_halide . cpp <nl> HalideBackendNode : : HalideBackendNode ( const Ptr < HalideBackendNode > & base , <nl> HalideBackendWrapper : : HalideBackendWrapper ( int targetId , const cv : : Mat & m ) <nl> : BackendWrapper ( DNN_BACKEND_HALIDE , targetId ) <nl> { <nl> + managesDevMemory = true ; <nl> buffer = wrapToHalideBuffer ( m ) ; <nl> if ( targetId = = DNN_TARGET_CPU ) <nl> { <nl> HalideBackendWrapper : : HalideBackendWrapper ( const Ptr < BackendWrapper > & base , <nl> const MatShape & shape ) <nl> : BackendWrapper ( DNN_BACKEND_HALIDE , base - > targetId ) <nl> { <nl> + managesDevMemory = false ; <nl> int w , h , c , n ; <nl> getCanonicalSize ( shape , & w , & h , & c , & n ) ; <nl> Halide : : Buffer < float > baseBuffer = halideBuffer ( base ) ; <nl> HalideBackendWrapper : : HalideBackendWrapper ( const Ptr < BackendWrapper > & base , <nl> } <nl> } <nl> <nl> + HalideBackendWrapper : : ~ HalideBackendWrapper ( ) <nl> + { <nl> + if ( buffer . has_device_allocation ( ) & & ! managesDevMemory ) <nl> + { <nl> + buffer . raw_buffer ( ) - > device = 0 ; <nl> + buffer . raw_buffer ( ) - > device_interface = 0 ; <nl> + buffer . set_device_dirty ( false ) ; <nl> + } <nl> + } <nl> + <nl> void HalideBackendWrapper : : copyToHost ( ) <nl> { <nl> CV_Assert ( targetId = = DNN_TARGET_CPU | | buffer . device_dirty ( ) ) ; <nl> mmm a / modules / dnn / src / op_halide . hpp <nl> ppp b / modules / dnn / src / op_halide . hpp <nl> namespace dnn <nl> <nl> HalideBackendWrapper ( const Ptr < BackendWrapper > & base , const MatShape & shape ) ; <nl> <nl> + ~ HalideBackendWrapper ( ) ; <nl> + <nl> virtual void copyToHost ( ) ; <nl> <nl> Halide : : Buffer < float > buffer ; <nl> + <nl> + private : <nl> + bool managesDevMemory ; <nl> } ; <nl> # endif / / HAVE_HALIDE <nl> <nl>
|
Fix Halide buffer behavior in case of OpenCL device memory allocation
|
opencv/opencv
|
4e28c00e7bb5f2e7c49f48ddf3cdb897828ea9a7
|
2017-08-17T10:27:54Z
|
mmm a / depends / packages / qrencode . mk <nl> ppp b / depends / packages / qrencode . mk <nl> $ ( package ) _file_name = $ ( package ) - $ ( $ ( package ) _version ) . tar . bz2 <nl> $ ( package ) _sha256_hash = efe5188b1ddbcbf98763b819b146be6a90481aac30cfc8d858ab78a19cde1fa5 <nl> <nl> define $ ( package ) _set_vars <nl> - $ ( package ) _config_opts = - - disable - shared - without - tools - - disable - sdltest <nl> + $ ( package ) _config_opts = - - disable - shared - - without - tools - - without - tests - - disable - sdltest <nl> + $ ( package ) _config_opts + = - - disable - gprof - - disable - gcov - - disable - mudflap <nl> $ ( package ) _config_opts_linux = - - with - pic <nl> endef <nl> <nl>
|
depends : qrencode : configure flags cleanup
|
bitcoin/bitcoin
|
6a8ada3a4f67affcf0ef7452e206083d7b58b2bc
|
2019-08-14T12:34:28Z
|
mmm a / guilib / Key . h <nl> ppp b / guilib / Key . h <nl> class CKey <nl> <nl> void SetHeld ( unsigned int held ) ; <nl> unsigned int GetHeld ( ) const ; <nl> + <nl> + enum Modifier { <nl> + MODIFIER_CTRL = 0x00010000 , <nl> + MODIFIER_SHIFT = 0x00020000 , <nl> + MODIFIER_ALT = 0x00040000 <nl> + } ; <nl> + <nl> private : <nl> uint32_t m_buttonCode ; <nl> uint8_t m_leftTrigger ; <nl> mmm a / xbmc / Application . cpp <nl> ppp b / xbmc / Application . cpp <nl> bool CApplication : : OnKey ( CKey & key ) <nl> if ( control ) <nl> { <nl> if ( control - > GetControlType ( ) = = CGUIControl : : GUICONTROL_EDIT | | <nl> - ( control - > IsContainer ( ) & & g_Keyboard . GetShift ( ) ) ) <nl> + ( control - > IsContainer ( ) & & g_Keyboard . GetShift ( ) & & ! ( g_Keyboard . GetCtrl ( ) | | g_Keyboard . GetAlt ( ) | | g_Keyboard . GetRAlt ( ) ) ) ) <nl> useKeyboard = true ; <nl> } <nl> } <nl> bool CApplication : : ProcessKeyboard ( ) <nl> else <nl> keyID = KEY_UNICODE ; <nl> / / CLog : : Log ( LOGDEBUG , " Keyboard : time = % i key = % i " , CTimeUtils : : GetFrameTime ( ) , vkey ) ; <nl> + <nl> + / / Check what modifiers are held down and update the key code as appropriate <nl> + if ( g_Keyboard . GetCtrl ( ) ) <nl> + keyID | = CKey : : MODIFIER_CTRL ; <nl> + if ( g_Keyboard . GetShift ( ) ) <nl> + keyID | = CKey : : MODIFIER_SHIFT ; <nl> + if ( g_Keyboard . GetAlt ( ) ) <nl> + keyID | = CKey : : MODIFIER_ALT ; <nl> + <nl> + / / Create a key object with the keypress data and pass it to OnKey to be executed <nl> CKey key ( keyID ) ; <nl> key . SetHeld ( g_Keyboard . KeyHeld ( ) ) ; <nl> return OnKey ( key ) ; <nl> mmm a / xbmc / ButtonTranslator . cpp <nl> ppp b / xbmc / ButtonTranslator . cpp <nl> uint32_t CButtonTranslator : : TranslateKeyboardString ( const char * szButton ) <nl> <nl> uint32_t CButtonTranslator : : TranslateKeyboardButton ( TiXmlElement * pButton ) <nl> { <nl> + uint32_t button_id = 0 ; <nl> const char * szButton = pButton - > Value ( ) ; <nl> <nl> if ( ! szButton ) return 0 ; <nl> uint32_t CButtonTranslator : : TranslateKeyboardButton ( TiXmlElement * pButton ) <nl> { <nl> int id = 0 ; <nl> if ( pButton - > QueryIntAttribute ( " id " , & id ) = = TIXML_SUCCESS ) <nl> - return ( uint32_t ) id ; <nl> + button_id = ( uint32_t ) id ; <nl> else <nl> CLog : : Log ( LOGERROR , " Keyboard Translator : ` key ' button has no id " ) ; <nl> } <nl> else <nl> { <nl> - return TranslateKeyboardString ( szButton ) ; <nl> + button_id = TranslateKeyboardString ( szButton ) ; <nl> } <nl> - return 0 ; <nl> + <nl> + / / Process the ctrl / shift / alt modifiers <nl> + CStdString strMod ; <nl> + if ( pButton - > QueryValueAttribute ( " mod " , & strMod ) = = TIXML_SUCCESS ) <nl> + { <nl> + strMod . ToLower ( ) ; <nl> + <nl> + CStdStringArray modArray ; <nl> + StringUtils : : SplitString ( strMod , " , " , modArray ) ; <nl> + for ( unsigned int i = 0 ; i < modArray . size ( ) ; i + + ) <nl> + { <nl> + CStdString & substr = modArray [ i ] ; <nl> + substr . Trim ( ) ; <nl> + <nl> + if ( substr = = " ctrl " | | substr = = " control " ) <nl> + button_id | = CKey : : MODIFIER_CTRL ; <nl> + else if ( substr = = " shift " ) <nl> + button_id | = CKey : : MODIFIER_SHIFT ; <nl> + else if ( substr = = " alt " ) <nl> + button_id | = CKey : : MODIFIER_ALT ; <nl> + else <nl> + CLog : : Log ( LOGERROR , " Keyboard Translator : Unknown key modifier % s in % s " , substr . c_str ( ) , strMod . c_str ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + return button_id ; <nl> } <nl> <nl> void CButtonTranslator : : Clear ( ) <nl> mmm a / xbmc / KeyboardStat . cpp <nl> ppp b / xbmc / KeyboardStat . cpp <nl> struct XBMC_KeyMapping <nl> WCHAR Unicode ; <nl> } ; <nl> <nl> + / / Convert control keypresses e . g . ctrl - A from 0x01 to 0x41 <nl> + static XBMC_KeyMapping g_mapping_ctrlkeys [ ] = <nl> + { { 0x61 , 0x41 , XBMCK_a , XBMCK_a } <nl> + , { 0x62 , 0x42 , XBMCK_b , XBMCK_b } <nl> + , { 0x63 , 0x43 , XBMCK_c , XBMCK_c } <nl> + , { 0x64 , 0x44 , XBMCK_d , XBMCK_d } <nl> + , { 0x65 , 0x45 , XBMCK_e , XBMCK_e } <nl> + , { 0x66 , 0x46 , XBMCK_f , XBMCK_f } <nl> + , { 0x67 , 0x47 , XBMCK_g , XBMCK_g } <nl> + , { 0x68 , 0x48 , XBMCK_h , XBMCK_h } <nl> + , { 0x69 , 0x49 , XBMCK_i , XBMCK_i } <nl> + , { 0x6a , 0x4a , XBMCK_j , XBMCK_j } <nl> + , { 0x6b , 0x4b , XBMCK_k , XBMCK_k } <nl> + , { 0x6c , 0x4c , XBMCK_l , XBMCK_l } <nl> + , { 0x6d , 0x4d , XBMCK_m , XBMCK_m } <nl> + , { 0x6e , 0x4e , XBMCK_n , XBMCK_n } <nl> + , { 0x6f , 0x4f , XBMCK_o , XBMCK_o } <nl> + , { 0x70 , 0x50 , XBMCK_p , XBMCK_p } <nl> + , { 0x71 , 0x51 , XBMCK_q , XBMCK_q } <nl> + , { 0x72 , 0x52 , XBMCK_r , XBMCK_r } <nl> + , { 0x73 , 0x53 , XBMCK_s , XBMCK_s } <nl> + , { 0x74 , 0x54 , XBMCK_t , XBMCK_t } <nl> + , { 0x75 , 0x55 , XBMCK_u , XBMCK_u } <nl> + , { 0x76 , 0x56 , XBMCK_v , XBMCK_v } <nl> + , { 0x77 , 0x57 , XBMCK_w , XBMCK_w } <nl> + , { 0x78 , 0x58 , XBMCK_x , XBMCK_x } <nl> + , { 0x79 , 0x59 , XBMCK_y , XBMCK_y } <nl> + , { 0x7a , 0x5a , XBMCK_z , XBMCK_z } <nl> + } ; <nl> + <nl> / / based on the evdev mapped scancodes in / user / share / X11 / xkb / keycodes <nl> static XBMC_KeyMapping g_mapping_evdev [ ] = <nl> { { 121 , 0xad } / / Volume mute <nl> void CKeyboardStat : : Update ( XBMC_Event & event ) <nl> else if ( event . key . keysym . unicode = = ' " ' ) { m_VKey = 0xee ; m_cAscii = ' " ' ; } <nl> else if ( event . key . keysym . unicode = = ' \ ' ' ) { m_VKey = 0xee ; m_cAscii = ' \ ' ' ; } <nl> <nl> + / / For control key combinations , e . g . ctrl - P , the UNICODE gets set <nl> + / / to 1 for ctrl - A , 2 for ctrl - B etc . This mapping sets the UNICODE <nl> + / / back to ' a ' , ' b ' , etc . <nl> + / / It isn ' t clear to me if this applies to Linux and Mac as well as <nl> + / / Windows . <nl> + if ( m_bCtrl ) <nl> + { <nl> + if ( ! m_VKey & & ! m_cAscii ) <nl> + LookupKeyMapping ( & m_VKey , NULL , & m_wUnicode <nl> + , event . key . keysym . sym <nl> + , g_mapping_ctrlkeys <nl> + , sizeof ( g_mapping_ctrlkeys ) / sizeof ( g_mapping_ctrlkeys [ 0 ] ) ) ; <nl> + } <nl> <nl> / * Check for standard non printable keys * / <nl> if ( ! m_VKey & & ! m_cAscii ) <nl>
|
Updates to support key modifiers ctrl , shift and alt in the keymap . Tested on Windows and briefly on Linux . Modified Key . h , Application . cpp , ButtonTranslator . cpp and KeyboardStat . cpp . John Rennie 18 / 12 / 2009 .
|
xbmc/xbmc
|
19e238789c324ef59c59be42d543cd5b75b0a30f
|
2009-12-18T09:26:00Z
|
mmm a / Tests / UnitTests / MathTests / ConvolutionEngineTests . cpp <nl> ppp b / Tests / UnitTests / MathTests / ConvolutionEngineTests . cpp <nl> std : : vector < std : : tuple < ConvolutionEngineKind , DEVICEID_TYPE , size_t > > GetTestEng <nl> { <nl> std : : vector < std : : tuple < ConvolutionEngineKind , DEVICEID_TYPE , size_t > > res ; <nl> / / Reference engine . The engine does not use temp memory so safe to set it to 0 . <nl> - / / res . push_back ( std : : make_tuple ( ConvolutionEngineKind : : Reference , - 1 , 0 ) ) ; <nl> - / / res . push_back ( std : : make_tuple ( ConvolutionEngineKind : : Reference , 0 , 0 ) ) ; <nl> + res . push_back ( std : : make_tuple ( ConvolutionEngineKind : : Reference , - 1 , 0 ) ) ; <nl> + res . push_back ( std : : make_tuple ( ConvolutionEngineKind : : Reference , 0 , 0 ) ) ; <nl> <nl> / / Gemm engine . Implemented only for CPU for now . Uses temp memory . <nl> res . push_back ( std : : make_tuple ( ConvolutionEngineKind : : Gemm , - 1 , 0 ) ) ; <nl>
|
Enabled some convo test configs .
|
microsoft/CNTK
|
15b51ee3b8349e0d8ca984dc2dbe9761d639bccc
|
2016-04-21T17:55:12Z
|
mmm a / src / arm / full - codegen - arm . cc <nl> ppp b / src / arm / full - codegen - arm . cc <nl> <nl> - / / Copyright 2010 the V8 project authors . All rights reserved . <nl> + / / Copyright 2011 the V8 project authors . All rights reserved . <nl> / / Redistribution and use in source and binary forms , with or without <nl> / / modification , are permitted provided that the following conditions are <nl> / / met : <nl> void FullCodeGenerator : : EmitStackCheck ( IterationStatement * stmt ) { <nl> __ b ( hs , & ok ) ; <nl> StackCheckStub stub ; <nl> __ CallStub ( & stub ) ; <nl> + / / Record a mapping of this PC offset to the OSR id . This is used to find <nl> + / / the AST id from the unoptimized code in order to use it as a key into <nl> + / / the deoptimization input data found in the optimized code . <nl> + RecordStackCheck ( stmt - > OsrEntryId ( ) ) ; <nl> + <nl> __ bind ( & ok ) ; <nl> PrepareForBailoutForId ( stmt - > EntryId ( ) , NO_REGISTERS ) ; <nl> + / / Record a mapping of the OSR id to this PC . This is used if the OSR <nl> + / / entry becomes the target of a bailout . We don ' t expect it to be , but <nl> + / / we want it to work if it is . <nl> PrepareForBailoutForId ( stmt - > OsrEntryId ( ) , NO_REGISTERS ) ; <nl> - RecordStackCheck ( stmt - > OsrEntryId ( ) ) ; <nl> } <nl> <nl> <nl> mmm a / src / ia32 / deoptimizer - ia32 . cc <nl> ppp b / src / ia32 / deoptimizer - ia32 . cc <nl> <nl> - / / Copyright 2010 the V8 project authors . All rights reserved . <nl> + / / Copyright 2011 the V8 project authors . All rights reserved . <nl> / / Redistribution and use in source and binary forms , with or without <nl> / / modification , are permitted provided that the following conditions are <nl> / / met : <nl> void Deoptimizer : : DeoptimizeFunction ( JSFunction * function ) { <nl> <nl> void Deoptimizer : : PatchStackCheckCode ( RelocInfo * rinfo , <nl> Code * replacement_code ) { <nl> - / / The stack check code matches the pattern ( on ia32 , for example ) : <nl> + / / The stack check code matches the pattern : <nl> / / <nl> / / cmp esp , < limit > <nl> / / jae ok <nl> / / call < stack guard > <nl> + / / test eax , < loop nesting depth > <nl> / / ok : . . . <nl> / / <nl> - / / We will patch the code to : <nl> + / / We will patch away the branch so the code is : <nl> / / <nl> / / cmp esp , < limit > ; ; Not changed <nl> / / nop <nl> / / nop <nl> / / call < on - stack replacment > <nl> + / / test eax , < loop nesting depth > <nl> / / ok : <nl> Address call_target_address = rinfo - > pc ( ) ; <nl> ASSERT ( * ( call_target_address - 3 ) = = 0x73 & & / / jae <nl> - * ( call_target_address - 2 ) = = 0x05 & & / / offset <nl> + * ( call_target_address - 2 ) = = 0x07 & & / / offset <nl> * ( call_target_address - 1 ) = = 0xe8 ) ; / / call <nl> * ( call_target_address - 3 ) = 0x90 ; / / nop <nl> * ( call_target_address - 2 ) = 0x90 ; / / nop <nl> void Deoptimizer : : PatchStackCheckCode ( RelocInfo * rinfo , <nl> <nl> <nl> void Deoptimizer : : RevertStackCheckCode ( RelocInfo * rinfo , Code * check_code ) { <nl> + / / Replace the nops from patching ( Deoptimizer : : PatchStackCheckCode ) to <nl> + / / restore the conditional branch . <nl> Address call_target_address = rinfo - > pc ( ) ; <nl> ASSERT ( * ( call_target_address - 3 ) = = 0x90 & & / / nop <nl> * ( call_target_address - 2 ) = = 0x90 & & / / nop <nl> * ( call_target_address - 1 ) = = 0xe8 ) ; / / call <nl> * ( call_target_address - 3 ) = 0x73 ; / / jae <nl> - * ( call_target_address - 2 ) = 0x05 ; / / offset <nl> + * ( call_target_address - 2 ) = 0x07 ; / / offset <nl> rinfo - > set_target_address ( check_code - > entry ( ) ) ; <nl> } <nl> <nl> mmm a / src / ia32 / full - codegen - ia32 . cc <nl> ppp b / src / ia32 / full - codegen - ia32 . cc <nl> void FullCodeGenerator : : EmitStackCheck ( IterationStatement * stmt ) { <nl> __ j ( above_equal , & ok , taken ) ; <nl> StackCheckStub stub ; <nl> __ CallStub ( & stub ) ; <nl> - __ bind ( & ok ) ; <nl> - PrepareForBailoutForId ( stmt - > EntryId ( ) , NO_REGISTERS ) ; <nl> - PrepareForBailoutForId ( stmt - > OsrEntryId ( ) , NO_REGISTERS ) ; <nl> + / / Record a mapping of this PC offset to the OSR id . This is used to find <nl> + / / the AST id from the unoptimized code in order to use it as a key into <nl> + / / the deoptimization input data found in the optimized code . <nl> RecordStackCheck ( stmt - > OsrEntryId ( ) ) ; <nl> - / / Loop stack checks can be patched to perform on - stack <nl> - / / replacement . In order to decide whether or not to perform OSR we <nl> - / / embed the loop depth in a test instruction after the call so we <nl> - / / can extract it from the OSR builtin . <nl> + <nl> + / / Loop stack checks can be patched to perform on - stack replacement . In <nl> + / / order to decide whether or not to perform OSR we embed the loop depth <nl> + / / in a test instruction after the call so we can extract it from the OSR <nl> + / / builtin . <nl> ASSERT ( loop_depth ( ) > 0 ) ; <nl> __ test ( eax , Immediate ( Min ( loop_depth ( ) , Code : : kMaxLoopNestingMarker ) ) ) ; <nl> + <nl> + __ bind ( & ok ) ; <nl> + PrepareForBailoutForId ( stmt - > EntryId ( ) , NO_REGISTERS ) ; <nl> + / / Record a mapping of the OSR id to this PC . This is used if the OSR <nl> + / / entry becomes the target of a bailout . We don ' t expect it to be , but <nl> + / / we want it to work if it is . <nl> + PrepareForBailoutForId ( stmt - > OsrEntryId ( ) , NO_REGISTERS ) ; <nl> } <nl> <nl> <nl> mmm a / src / x64 / full - codegen - x64 . cc <nl> ppp b / src / x64 / full - codegen - x64 . cc <nl> void FullCodeGenerator : : EmitStackCheck ( IterationStatement * stmt ) { <nl> __ j ( above_equal , & ok ) ; <nl> StackCheckStub stub ; <nl> __ CallStub ( & stub ) ; <nl> + / / Record a mapping of this PC offset to the OSR id . This is used to find <nl> + / / the AST id from the unoptimized code in order to use it as a key into <nl> + / / the deoptimization input data found in the optimized code . <nl> + RecordStackCheck ( stmt - > OsrEntryId ( ) ) ; <nl> + <nl> __ bind ( & ok ) ; <nl> PrepareForBailoutForId ( stmt - > EntryId ( ) , NO_REGISTERS ) ; <nl> + / / Record a mapping of the OSR id to this PC . This is used if the OSR <nl> + / / entry becomes the target of a bailout . We don ' t expect it to be , but <nl> + / / we want it to work if it is . <nl> PrepareForBailoutForId ( stmt - > OsrEntryId ( ) , NO_REGISTERS ) ; <nl> - RecordStackCheck ( stmt - > OsrEntryId ( ) ) ; <nl> } <nl> <nl> <nl>
|
Small change to stack checks in unoptimized code .
|
v8/v8
|
dde853a4ad7b78b47a636a81579e7e6ba0cb21b4
|
2011-01-06T13:48:12Z
|
mmm a / xbmc / cores / VideoRenderers / VideoShaders / WinVideoFilter . cpp <nl> ppp b / xbmc / cores / VideoRenderers / VideoShaders / WinVideoFilter . cpp <nl> void CYUV2RGBShader : : Render ( CRect sourceRect , CRect destRect , <nl> contrast , brightness , flags ) ; <nl> SetShaderParameters ( YUVbuf ) ; <nl> Execute ( nullptr , 4 ) ; <nl> - <nl> - / / we changed view port , so we need to restore our real viewport . <nl> - g_Windowing . RestoreViewPort ( ) ; <nl> - <nl> } <nl> <nl> CYUV2RGBShader : : ~ CYUV2RGBShader ( ) <nl> void CYUV2RGBShader : : SetShaderParameters ( YUVBuffer * YUVbuf ) <nl> m_effect . SetTexture ( " g_VTexture " , YUVbuf - > planes [ 2 ] . texture ) ; <nl> m_effect . SetFloatArray ( " g_StepXY " , m_texSteps , ARRAY_SIZE ( m_texSteps ) ) ; <nl> <nl> - / / we need to set view port to the full size of current render target <nl> - ID3D11RenderTargetView * rtView = nullptr ; <nl> - g_Windowing . Get3D11Context ( ) - > OMGetRenderTargets ( 1 , & rtView , nullptr ) ; <nl> - <nl> - / / get dimention of render target <nl> - ID3D11Resource * rtResource = nullptr ; <nl> - rtView - > GetResource ( & rtResource ) ; <nl> - ID3D11Texture2D * rtTexture = nullptr ; <nl> - HRESULT hr = rtResource - > QueryInterface ( __uuidof ( ID3D11Texture2D ) , reinterpret_cast < void * * > ( & rtTexture ) ) ; <nl> - <nl> - float viewPortWidth = 0 . 0f , viewPortHeight = 0 . 0f ; <nl> - <nl> - if ( S_OK = = hr & & rtTexture ) <nl> - { <nl> - D3D11_TEXTURE2D_DESC rtDescr = { } ; <nl> - rtTexture - > GetDesc ( & rtDescr ) ; <nl> - viewPortWidth = static_cast < float > ( rtDescr . Width ) ; <nl> - viewPortHeight = static_cast < float > ( rtDescr . Height ) ; <nl> - } <nl> - <nl> - SAFE_RELEASE ( rtTexture ) ; <nl> - SAFE_RELEASE ( rtResource ) ; <nl> - SAFE_RELEASE ( rtView ) ; <nl> - <nl> - D3D11_VIEWPORT viewPort = { 0 . 0f , 0 . 0f , viewPortWidth , viewPortHeight , 0 . 0 , 1 . 0f } ; <nl> - g_Windowing . Get3D11Context ( ) - > RSSetViewports ( 1 , & viewPort ) ; <nl> + UINT numPorts = 1 ; <nl> + D3D11_VIEWPORT viewPort ; <nl> + g_Windowing . Get3D11Context ( ) - > RSGetViewports ( & numPorts , & viewPort ) ; <nl> m_effect . SetFloatArray ( " g_viewPort " , & viewPort . Width , 2 ) ; <nl> } <nl> <nl> mmm a / xbmc / cores / VideoRenderers / WinRenderer . cpp <nl> ppp b / xbmc / cores / VideoRenderers / WinRenderer . cpp <nl> void CWinRenderer : : RenderPS ( ) <nl> <nl> void CWinRenderer : : Stage1 ( ) <nl> { <nl> + CD3D11_VIEWPORT viewPort ( 0 . 0f , 0 . 0f , 0 . 0f , 0 . 0f ) ; <nl> ID3D11DeviceContext * pContext = g_Windowing . Get3D11Context ( ) ; <nl> <nl> - / / Store current render target and depth view . <nl> - ID3D11RenderTargetView * oldRT = nullptr ; ID3D11DepthStencilView * oldDS = nullptr ; <nl> - pContext - > OMGetRenderTargets ( 1 , & oldRT , & oldDS ) ; <nl> - <nl> - if ( ! m_bUseHQScaler ) <nl> - { <nl> - / / disable depth <nl> - pContext - > OMSetRenderTargets ( 1 , & oldRT , nullptr ) ; <nl> - / / render video frame <nl> - m_colorShader - > Render ( m_sourceRect , g_graphicsContext . StereoCorrection ( m_destRect ) , <nl> - CMediaSettings : : GetInstance ( ) . GetCurrentVideoSettings ( ) . m_Contrast , <nl> - CMediaSettings : : GetInstance ( ) . GetCurrentVideoSettings ( ) . m_Brightness , <nl> - m_iFlags , ( YUVBuffer * ) m_VideoBuffers [ m_iYV12RenderBuffer ] ) ; <nl> - } <nl> - else <nl> + / / store current render target and depth view . <nl> + ID3D11RenderTargetView * oldRTView = nullptr ; ID3D11DepthStencilView * oldDSView = nullptr ; <nl> + pContext - > OMGetRenderTargets ( 1 , & oldRTView , & oldDSView ) ; <nl> + / / select destination rectangle <nl> + CRect destRect = m_bUseHQScaler ? m_sourceRect : g_graphicsContext . StereoCorrection ( m_destRect ) ; <nl> + / / select target view <nl> + ID3D11RenderTargetView * pRTView = m_bUseHQScaler ? m_IntermediateTarget . GetRenderTarget ( ) : oldRTView ; <nl> + / / set destination render target <nl> + pContext - > OMSetRenderTargets ( 1 , & pRTView , nullptr ) ; <nl> + / / get rendertarget ' s dimension <nl> + if ( pRTView ) <nl> { <nl> - / / At DX9 setting a new render target will cause the viewport <nl> - / / to be set to the full size of the new render target . <nl> - / / In DX11 we should do this manualy <nl> - CD3D11_VIEWPORT viewPort ( 0 . 0f , 0 . 0f , static_cast < float > ( m_IntermediateTarget . GetWidth ( ) ) , static_cast < float > ( m_IntermediateTarget . GetHeight ( ) ) ) ; <nl> - pContext - > RSSetViewports ( 1 , & viewPort ) ; <nl> - <nl> - ID3D11RenderTargetView * newRT = m_IntermediateTarget . GetRenderTarget ( ) ; <nl> - <nl> - / / this needs to avoid binding m_IntermediateTarget as shader resource and as render target at the same time <nl> - CD3DHelper : : PSClearShaderResources ( pContext ) ; <nl> - / / Switch the render target to the temporary destination <nl> - pContext - > OMSetRenderTargets ( 1 , & newRT , nullptr ) ; <nl> - <nl> - m_colorShader - > Render ( m_sourceRect , m_sourceRect , <nl> - CMediaSettings : : GetInstance ( ) . GetCurrentVideoSettings ( ) . m_Contrast , <nl> - CMediaSettings : : GetInstance ( ) . GetCurrentVideoSettings ( ) . m_Brightness , <nl> - m_iFlags , ( YUVBuffer * ) m_VideoBuffers [ m_iYV12RenderBuffer ] ) ; <nl> - / / Restore our view port . <nl> - g_Windowing . RestoreViewPort ( ) ; <nl> - } <nl> + ID3D11Resource * pResource = nullptr ; <nl> + ID3D11Texture2D * pTexture = nullptr ; <nl> <nl> + pRTView - > GetResource ( & pResource ) ; <nl> + if ( SUCCEEDED ( pResource - > QueryInterface ( __uuidof ( ID3D11Texture2D ) , reinterpret_cast < void * * > ( & pTexture ) ) ) ) <nl> + { <nl> + D3D11_TEXTURE2D_DESC desc ; <nl> + pTexture - > GetDesc ( & desc ) ; <nl> + viewPort = CD3D11_VIEWPORT ( 0 . 0f , 0 . 0f , desc . Width , desc . Height ) ; <nl> + } <nl> + SAFE_RELEASE ( pResource ) ; <nl> + SAFE_RELEASE ( pTexture ) ; <nl> + } <nl> + / / reset scissors for HQ scaler <nl> + if ( m_bUseHQScaler ) <nl> + g_Windowing . ResetScissors ( ) ; <nl> + / / reset view port <nl> + pContext - > RSSetViewports ( 1 , & viewPort ) ; <nl> + / / render video frame <nl> + m_colorShader - > Render ( m_sourceRect , destRect , <nl> + CMediaSettings : : GetInstance ( ) . GetCurrentVideoSettings ( ) . m_Contrast , <nl> + CMediaSettings : : GetInstance ( ) . GetCurrentVideoSettings ( ) . m_Brightness , <nl> + m_iFlags , ( YUVBuffer * ) m_VideoBuffers [ m_iYV12RenderBuffer ] ) ; <nl> + / / Restore our view port . <nl> + g_Windowing . RestoreViewPort ( ) ; <nl> / / Restore the render target and depth view . <nl> - pContext - > OMSetRenderTargets ( 1 , & oldRT , oldDS ) ; <nl> - SAFE_RELEASE ( oldRT ) ; <nl> - SAFE_RELEASE ( oldDS ) ; <nl> + pContext - > OMSetRenderTargets ( 1 , & oldRTView , oldDSView ) ; <nl> + SAFE_RELEASE ( oldRTView ) ; <nl> + SAFE_RELEASE ( oldDSView ) ; <nl> } <nl> <nl> void CWinRenderer : : Stage2 ( ) <nl>
|
Merge pull request from afedchin / fix_dx_hqscallers
|
xbmc/xbmc
|
a92c98d3a87b1da0778e979c3fe5092b02a5d6ff
|
2015-10-31T12:26:05Z
|
mmm a / tensorflow / python / keras / distribute / BUILD <nl> ppp b / tensorflow / python / keras / distribute / BUILD <nl> cuda_py_test ( <nl> <nl> py_library ( <nl> name = " multi_worker_test_main_lib " , <nl> - testonly = True , <nl> srcs = [ " multi_worker_test . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : dtypes " , <nl> cuda_py_test ( <nl> name = " multi_worker_callback_test " , <nl> srcs = [ " multi_worker_callback_test . py " ] , <nl> additional_deps = [ <nl> + " : multi_worker_test_main_lib " , <nl> + " / / tensorflow / python : platform " , <nl> " / / tensorflow / python / distribute : collective_all_reduce_strategy " , <nl> " / / tensorflow / python / distribute : combinations " , <nl> - " / / tensorflow / python / keras / distribute : multi_worker_test_main_lib " , <nl> " / / tensorflow / python / distribute : multi_worker_test_base " , <nl> - " / / tensorflow / python : platform " , <nl> " / / tensorflow / python / distribute : distribute_config " , <nl> " / / tensorflow / python / distribute : distribute_coordinator " , <nl> " / / tensorflow / python / keras " , <nl>
|
Fix keras / distribute BUILD file .
|
tensorflow/tensorflow
|
a20bbef4103d847d9944a05a1556f19c7f35e933
|
2019-04-10T00:02:19Z
|
mmm a / docs / release - notes . md <nl> ppp b / docs / release - notes . md <nl> <nl> * Clara no longer assumes first argument ( binary name ) is always present ( # 729 ) <nl> * If it is missing , empty string is used as default . <nl> * Clara no longer reads 1 character past argument string ( # 830 ) <nl> + * Regression in Objective - C bindings ( Matchers ) fixed ( # 854 ) <nl> <nl> <nl> # # # Other notes : <nl>
|
Added Obj - C bindings fix to release notes
|
catchorg/Catch2
|
0692317bc51d6cdf3ccad37e4ad8fe92262dbe45
|
2017-03-15T09:04:09Z
|
mmm a / cocos / renderer / CCTexture2D . cpp <nl> ppp b / cocos / renderer / CCTexture2D . cpp <nl> const char * Texture2D : : getStringForFormat ( ) const <nl> case Texture2D : : PixelFormat : : PVRTC2 : <nl> return " PVRTC2 " ; <nl> <nl> + case Texture2D : : PixelFormat : : PVRTC2A : <nl> + return " PVRTC2A " ; <nl> + <nl> + case Texture2D : : PixelFormat : : PVRTC4A : <nl> + return " PVRTC4A " ; <nl> + <nl> + case Texture2D : : PixelFormat : : ETC : <nl> + return " ETC " ; <nl> + <nl> + case Texture2D : : PixelFormat : : S3TC_DXT1 : <nl> + return " S3TC_DXT1 " ; <nl> + <nl> + case Texture2D : : PixelFormat : : S3TC_DXT3 : <nl> + return " S3TC_DXT3 " ; <nl> + <nl> + case Texture2D : : PixelFormat : : S3TC_DXT5 : <nl> + return " S3TC_DXT5 " ; <nl> + <nl> + case Texture2D : : PixelFormat : : ATC_RGB : <nl> + return " ATC_RGB " ; <nl> + <nl> + case Texture2D : : PixelFormat : : ATC_EXPLICIT_ALPHA : <nl> + return " ATC_EXPLICIT_ALPHA " ; <nl> + <nl> + case Texture2D : : PixelFormat : : ATC_INTERPOLATED_ALPHA : <nl> + return " ATC_INTERPOLATED_ALPHA " ; <nl> + <nl> default : <nl> CCASSERT ( false , " unrecognized pixel format " ) ; <nl> CCLOG ( " stringForFormat : % ld , cannot give useful result " , ( long ) _pixelFormat ) ; <nl>
|
Texture2D : : getStringForFormat - added missed formats
|
cocos2d/cocos2d-x
|
15f0aaa598dbab75078149eba8641db7384d1461
|
2016-02-12T16:04:30Z
|
mmm a / src / library . js <nl> ppp b / src / library . js <nl> LibraryManager . library = { <nl> stdin : 0 , <nl> stdout : 0 , <nl> stderr : 0 , <nl> + _impure_ptr : 0 , <nl> <nl> - $ FS__deps : [ ' $ ERRNO_CODES ' , ' __setErrNo ' , ' stdin ' , ' stdout ' , ' stderr ' ] , <nl> + $ FS__deps : [ ' $ ERRNO_CODES ' , ' __setErrNo ' , ' stdin ' , ' stdout ' , ' stderr ' , ' _impure_ptr ' ] , <nl> $ FS__postset : ' FS . init ( ) ; ' , <nl> $ FS : { <nl> / / The path to the current folder . <nl> LibraryManager . library = { <nl> _stdout = allocate ( [ 2 ] , ' void * ' , ALLOC_STATIC ) ; <nl> _stderr = allocate ( [ 3 ] , ' void * ' , ALLOC_STATIC ) ; <nl> <nl> + / / Newlib initialization <nl> + FS . streams [ _stdin ] = FS . streams [ 1 ] ; <nl> + FS . streams [ _stdout ] = FS . streams [ 2 ] ; <nl> + FS . streams [ _stderr ] = FS . streams [ 3 ] ; <nl> + __impure_ptr = allocate ( 5 , " void * " , ALLOC_STATIC ) ; <nl> + var impure = getValue ( __impure_ptr , " void * " ) ; <nl> + setValue ( impure + { { { QUANTUM_SIZE } } } , _stdin , " void * " ) ; <nl> + setValue ( impure + { { { QUANTUM_SIZE } } } * 2 , _stdout , " void * " ) ; <nl> + setValue ( impure + { { { QUANTUM_SIZE } } } * 3 , _stderr , " void * " ) ; <nl> + <nl> / / Once initialized , permissions start having effect . <nl> FS . ignorePermissions = false ; <nl> } <nl> mmm a / src / settings . js <nl> ppp b / src / settings . js <nl> INCLUDE_FULL_LIBRARY = 0 ; / / Whether to include the whole library rather than ju <nl> / / dynamically loading modules that make use of runtime <nl> / / library functions that are not used in the main module . <nl> <nl> - C_DEFINES = { } ; / / A set of defines , for example generated from your header files . <nl> - / / This lets the emscripten libc see the right values <nl> + / / A set of defines , for example generated from your header files . This <nl> + / / lets the emscripten libc ( library . js ) see the right values . <nl> + / / The default value here has been generated from system / include . If you <nl> + / / modify those files , or use different headers , you will need to override <nl> + / / this . <nl> + C_DEFINES = { " S_IWRITE " : " 0000200 " , " _POSIX_CPUTIME " : " 1 " , " O_RDONLY " : " 0 " , " _POSIX_C_SOURCE " : " 2 " , " __FILENAME_MAX__ " : " 255 " , " DEFFILEMODE " : " 0000400 " , " PTHREAD_PRIO_NONE " : " 0 " , " _S_IFMT " : " 0170000 " , " _O_TEXT " : " 131072 " , " _FNOINHERIT " : " 262144 " , " F_WRLCK " : " 2 " , " _POSIX_JOB_CONTROL " : " 1 " , " _FASYNC " : " 64 " , " __BUFSIZ__ " : " 16 " , " S_IRUSR " : " 0000400 " , " F_UNLCK " : " 3 " , " _FDEFER " : " 32 " , " _O_EXCL " : " 2048 " , " AT_SYMLINK_NOFOLLOW " : " 2 " , " S_IFDIR " : " 0040000 " , " _POSIX_THREAD_ATTR_STACKSIZE " : " 200112 " , " _IFDIR " : " 0040000 " , " _POSIX_TIMERS " : " 1 " , " _IFLNK " : " 0120000 " , " FNDELAY " : " 16384 " , " _POSIX2_C_DEV " : " 200112 " , " _FCREAT " : " 512 " , " _POSIX_THREAD_CPUTIME " : " 1 " , " FAPPEND " : " 8 " , " O_CREAT " : " 512 " , " _POSIX_ADVISORY_INFO " : " 200112 " , " _MB_EXTENDED_CHARSETS_WINDOWS " : " 1 " , " _POSIX_V6_LPBIG_OFFBIG " : " - 1 " , " PTHREAD_MUTEX_ERRORCHECK " : " 2 " , " _XBS5_ILP32_OFF32 " : " - 1 " , " _POSIX_THREAD_SPORADIC_SERVER " : " 1 " , " FD_CLOEXEC " : " 1 " , " F_DUPFD_CLOEXEC " : " 14 " , " _XBS5_LP64_OFF64 " : " - 1 " , " _POSIX_SHARED_MEMORY_OBJECTS " : " 200112 " , " _POSIX_MEMORY_PROTECTION " : " 200112 " , " _POSIX_DEVCTL_DIRECTION " : " 1 " , " S_IRGRP " : " 0000040 " , " FDEFER " : " 32 " , " S_IFCHR " : " 0020000 " , " F_SETOWN " : " 6 " , " _POSIX_THREAD_PRIO_PROTECT " : " 1 " , " ___int_least16_t_defined " : " 1 " , " PTHREAD_SCOPE_SYSTEM " : " 1 " , " S_ISGID " : " 0002000 " , " _POSIX_INTERRUPT_CONTROL " : " 1 " , " FEXCL " : " 2048 " , " PTHREAD_SCOPE_PROCESS " : " 0 " , " _S_IFDIR " : " 0040000 " , " F_RSETLK " : " 11 " , " _POSIX_READER_WRITER_LOCKS " : " 200112 " , " F_UNLKSYS " : " 4 " , " __RAND_MAX " : " 2147483647 " , " PTHREAD_CREATE_JOINABLE " : " 1 " , " _S_IFIFO " : " 0010000 " , " FEXLOCK " : " 256 " , " _FNDELAY " : " 16384 " , " _POSIX_SPORADIC_SERVER " : " 1 " , " _MB_EXTENDED_CHARSETS_ISO " : " 1 " , " PTHREAD_STACK_MIN " : " 200 " , " O_APPEND " : " 8 " , " _POSIX_DEVICE_CONTROL " : " 1 " , " _POSIX_V6_ILP32_OFF32 " : " - 1 " , " _POSIX_SPIN_LOCKS " : " 200112 " , " O_NOCTTY " : " 32768 " , " UTIME_NOW " : " - 2 " , " O_ACCMODE " : " 3 " , " _FSHLOCK " : " 128 " , " _POSIX_REGEXP " : " 1 " , " ___int_least32_t_defined " : " 1 " , " _UNIX98_THREAD_MUTEX_ATTRIBUTES " : " 1 " , " _FTRUNC " : " 1024 " , " _POSIX_BARRIERS " : " 200112 " , " _POSIX_FSYNC " : " 200112 " , " F_SETLKW " : " 9 " , " _POSIX_RAW_SOCKETS " : " 200112 " , " O_RDWR " : " 2 " , " FOPEN " : " - 1 " , " F_RGETLK " : " 10 " , " F_DUPFD " : " 0 " , " __LARGE64_FILES " : " 1 " , " _S_IFCHR " : " 0020000 " , " _XOPEN_SHM " : " 1 " , " AT_SYMLINK_FOLLOW " : " 4 " , " _S_IWRITE " : " 0000200 " , " FSYNC " : " 8192 " , " _POSIX_CHOWN_RESTRICTED " : " 1 " , " FNOCTTY " : " 32768 " , " __USE_XOPEN2K " : " 1 " , " PTHREAD_MUTEX_DEFAULT " : " 3 " , " _IFSOCK " : " 0140000 " , " _FNOCTTY " : " 32768 " , " _IFIFO " : " 0010000 " , " PTHREAD_PROCESS_PRIVATE " : " 0 " , " _IFREG " : " 0100000 " , " S_BLKSIZE " : " 1024 " , " _POSIX_VERSION " : " 200112 " , " O_TRUNC " : " 1024 " , " _O_CREAT " : " 512 " , " ___int64_t_defined " : " 1 " , " S_IFREG " : " 0100000 " , " O_TEXT " : " 131072 " , " _POSIX_THREAD_PRIO_INHERIT " : " 1 " , " F_GETLK " : " 7 " , " S_IWOTH " : " 0000002 " , " F_GETFD " : " 1 " , " _XBS5_ILP32_OFFBIG " : " 1 " , " S_IFMT " : " 0170000 " , " _O_RDWR " : " 2 " , " _FREAD " : " 1 " , " F_GETFL " : " 3 " , " _FWRITE " : " 2 " , " F_RDLCK " : " 1 " , " S_IRWXO " : " 0000004 " , " SCHED_SPORADIC " : " 4 " , " S_IRWXU " : " 0000400 " , " _O_NOINHERIT " : " 262144 " , " _POSIX_THREAD_SAFE_FUNCTIONS " : " 200112 " , " _IFMT " : " 0170000 " , " _POSIX2_CHAR_TERM " : " 200112 " , " F_SETLK " : " 8 " , " S_IWUSR " : " 0000200 " , " FNBIO " : " 4096 " , " ALLPERMS " : " 0004000 " , " _POSIX_V6_LP64_OFF64 " : " - 1 " , " _POSIX_MEMLOCK_RANGE " : " 200112 " , " PTHREAD_INHERIT_SCHED " : " 1 " , " F_RSETLKW " : " 13 " , " F_SETFD " : " 2 " , " S_IFLNK " : " 0120000 " , " ___int16_t_defined " : " 1 " , " _S_IREAD " : " 0000400 " , " _FNONBLOCK " : " 16384 " , " F_SETFL " : " 4 " , " _POSIX_SHELL " : " 1 " , " FMARK " : " 16 " , " ___int8_t_defined " : " 1 " , " _POSIX2_UPE " : " 200112 " , " MALLOC_ALIGNMENT " : " 16 " , " PTHREAD_MUTEX_RECURSIVE " : " 1 " , " PTHREAD_CREATE_DETACHED " : " 0 " , " _POSIX2_VERSION " : " 200112 " , " FWRITE " : " 2 " , " FREAD " : " 1 " , " O_CLOEXEC " : " 262144 " , " H8300 " : " 1 " , " ITIMER_PROF " : " 2 " , " FNONBIO " : " 16384 " , " _FNBIO " : " 4096 " , " O_WRONLY " : " 1 " , " _POSIX_MONOTONIC_CLOCK " : " 200112 " , " O_NOINHERIT " : " 262144 " , " ACCESSPERMS " : " 0000400 " , " PTHREAD_EXPLICIT_SCHED " : " 2 " , " _POSIX_PRIORITIZED_IO " : " 1 " , " PATH_MAX " : " 4096 " , " F_CNVT " : " 12 " , " _POSIX_THREAD_PROCESS_SHARED " : " 200112 " , " _POSIX2_C_BIND " : " 200112 " , " SCHED_OTHER " : " 0 " , " FCREAT " : " 512 " , " _POSIX_SPAWN " : " 1 " , " S_IFIFO " : " 0010000 " , " _POSIX_MEMLOCK " : " 1 " , " S_ISVTX " : " 0001000 " , " SCHED_FIFO " : " 1 " , " S_IXUSR " : " 0000100 " , " UTIME_OMIT " : " - 1 " , " S_ENFMT " : " 0002000 " , " O_SYNC " : " 8192 " , " S_IRWXG " : " 0000040 " , " PTHREAD_PROCESS_SHARED " : " 1 " , " _IFBLK " : " 0060000 " , " _FEXLOCK " : " 256 " , " _XOPEN_VERSION " : " 600 " , " _POSIX_SYNCHRONIZED_IO " : " 200112 " , " S_IROTH " : " 0000004 " , " _POSIX_MAPPED_FILES " : " 200112 " , " FASYNC " : " 64 " , " ARG_MAX " : " 4096 " , " _POSIX_NO_TRUNC " : " 1 " , " _XOPEN_ENH_I18N " : " 1 " , " AT_EACCESS " : " 1 " , " F_GETOWN " : " 5 " , " PTHREAD_MUTEX_NORMAL " : " 0 " , " _FEXCL " : " 2048 " , " _O_TRUNC " : " 1024 " , " ITIMER_REAL " : " 0 " , " _S_IEXEC " : " 0000100 " , " _POSIX_ASYNCHRONOUS_IO " : " 1 " , " PTHREAD_PRIO_INHERIT " : " 1 " , " FTRUNC " : " 1024 " , " S_IXOTH " : " 0000001 " , " O_NONBLOCK " : " 16384 " , " FSHLOCK " : " 128 " , " ___int32_t_defined " : " 1 " , " _POSIX2_RE_DUP_MAX " : " 255 " , " _O_APPEND " : " 8 " , " _FLOAT_ARG " : " < type ' float ' > " , " _POSIX_MESSAGE_PASSING " : " 200112 " , " S_ISUID " : " 0004000 " , " _READ_WRITE_RETURN_TYPE " : " < type ' int ' > " , " _O_WRONLY " : " 1 " , " _POSIX_THREAD_PRIORITY_SCHEDULING " : " 200112 " , " _S_IFREG " : " 0100000 " , " _O_BINARY " : " 65536 " , " _XOPEN_CRYPT " : " 1 " , " _O_RAW " : " 65536 " , " _FTEXT " : " 131072 " , " _POSIX2_SW_DEV " : " 200112 " , " _POSIX_PRIORITY_SCHEDULING " : " 200112 " , " _LARGEFILE64_SOURCE " : " 1 " , " _POINTER_INT " : " < type ' long ' > " , " _POSIX_SEMAPHORES " : " 200112 " , " _IFCHR " : " 0020000 " , " _FMARK " : " 16 " , " _POSIX_IPV6 " : " 200112 " , " S_IREAD " : " 0000400 " , " _LONG_LONG_TYPE " : " < type ' long ' > " , " S_IFSOCK " : " 0140000 " , " ___int_least8_t_defined " : " 1 " , " _POSIX_V6_ILP32_OFFBIG " : " 1 " , " _FBINARY " : " 65536 " , " _FOPEN " : " - 1 " , " _O_RDONLY " : " 0 " , " O_BINARY " : " 65536 " , " SCHED_RR " : " 2 " , " _FAPPEND " : " 8 " , " ITIMER_VIRTUAL " : " 1 " , " S_IXGRP " : " 0000010 " , " AT_REMOVEDIR " : " 8 " , " _XBS5_LPBIG_OFFBIG " : " - 1 " , " O_EXCL " : " 2048 " , " S_IFBLK " : " 0060000 " , " _POSIX_TIMEOUTS " : " 1 " , " _POSIX_THREAD_ATTR_STACKADDR " : " 1 " , " S_IWGRP " : " 0000020 " , " _POSIX_THREADS " : " 200112 " , " S_IEXEC " : " 0000100 " , " _POSIX_REALTIME_SIGNALS " : " 200112 " , " PTHREAD_PRIO_PROTECT " : " 2 " , " AT_FDCWD " : " - 2 " , " _FSYNC " : " 8192 " , " _POSIX_SAVED_IDS " : " 1 " } <nl> <nl> SHOW_LABELS = 0 ; / / Show labels in the generated code <nl> <nl>
|
fix stdin / out / err for newlib ; fixes files
|
emscripten-core/emscripten
|
0d423cd35b930cbc70806e90a4583ad70d552a11
|
2011-09-25T19:40:53Z
|
mmm a / lib / ClangImporter / ImportDecl . cpp <nl> ppp b / lib / ClangImporter / ImportDecl . cpp <nl> createValueConstructor ( ClangImporter : : Implementation & Impl , <nl> SourceLoc ( ) , var - > getName ( ) , var - > getType ( ) , structDecl ) ; <nl> param - > setInterfaceType ( var - > getInterfaceType ( ) ) ; <nl> param - > setValidationStarted ( ) ; <nl> + Impl . recordForceUnwrapForDecl ( <nl> + param , var - > getAttrs ( ) . hasAttribute < ImplicitlyUnwrappedOptionalAttr > ( ) ) ; <nl> valueParameters . push_back ( param ) ; <nl> } <nl> <nl>
|
IUO : Fix missed update to clang importer for constructor parameters .
|
apple/swift
|
0544e2a26e1aa96cb59a9dcb8e069427839549b5
|
2018-01-05T22:40:15Z
|
mmm a / doc / classes / TabContainer . xml <nl> ppp b / doc / classes / TabContainer . xml <nl> <nl> If [ code ] true [ / code ] , tabs are visible . If [ code ] false [ / code ] , tabs ' content and titles are hidden . <nl> < / member > <nl> < member name = " use_hidden_tabs_for_min_size " type = " bool " setter = " set_use_hidden_tabs_for_min_size " getter = " get_use_hidden_tabs_for_min_size " default = " false " > <nl> + If [ code ] true [ / code ] , children [ Control ] nodes that are hidden have their minimum size take into account in the total , instead of only the currently visible one . <nl> < / member > <nl> < / members > <nl> < signals > <nl> mmm a / doc / classes / float . xml <nl> ppp b / doc / classes / float . xml <nl> <nl> < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> < class name = " float " category = " Built - In Types " version = " 3 . 2 " > <nl> < brief_description > <nl> - Float built - in type <nl> + Float built - in type . <nl> < / brief_description > <nl> < description > <nl> Float built - in type . <nl>
|
Merge pull request from YeldhamDev / tabcontainer_hidden_tabs_doc
|
godotengine/godot
|
80ad3c93ef24b5a2a803b2c0d05e5192ce674a48
|
2020-01-03T21:47:47Z
|
mmm a / tests / cases / aliases_fastcomp . ll <nl> ppp b / tests / cases / aliases_fastcomp . ll <nl> target triple = " asmjs - unknown - emscripten " <nl> <nl> @ . str = private unnamed_addr constant [ 18 x i8 ] c " hello , world ! % d \ 0A \ 00 " , align 1 ; [ # uses = 1 type = [ 18 x i8 ] * ] <nl> <nl> - @ othername = alias internal void ( i32 ) * @ doit <nl> - @ othername2 = alias internal void ( i32 ) * @ othername <nl> - @ othername3 = alias internal void ( i32 ) * @ othername2 <nl> + @ othername = internal alias void ( i32 ) * @ doit <nl> + @ othername2 = internal alias void ( i32 ) * @ othername <nl> + @ othername3 = internal alias void ( i32 ) * @ othername2 <nl> <nl> @ value = global i32 17 <nl> @ value2 = alias i32 * @ value <nl> mmm a / tests / cases / callalias . ll <nl> ppp b / tests / cases / callalias . ll <nl> target triple = " i386 - pc - linux - gnu " <nl> <nl> @ . str = private unnamed_addr constant [ 15 x i8 ] c " hello , world ! \ 0A \ 00 " , align 1 ; [ # uses = 1 type = [ 15 x i8 ] * ] <nl> <nl> - @ othername = alias internal void ( ) * @ doit <nl> + @ othername = internal alias void ( ) * @ doit <nl> <nl> define internal void @ doit ( ) unnamed_addr nounwind align 2 { <nl> % call = call i32 ( i8 * , . . . ) * @ printf ( i8 * getelementptr inbounds ( [ 15 x i8 ] * @ . str , i32 0 , i32 0 ) ) ; [ # uses = 0 type = i32 ] <nl> mmm a / tests / cases / callalias2 . ll <nl> ppp b / tests / cases / callalias2 . ll <nl> target triple = " i386 - pc - linux - gnu " <nl> <nl> @ . str = private unnamed_addr constant [ 15 x i8 ] c " hello , world ! \ 0A \ 00 " , align 1 ; [ # uses = 1 type = [ 15 x i8 ] * ] <nl> <nl> - @ othername = alias internal void ( ) * @ doit <nl> - @ othername2 = alias internal void ( ) * @ othername <nl> + @ othername = internal alias void ( ) * @ doit <nl> + @ othername2 = internal alias void ( ) * @ othername <nl> <nl> define internal void @ doit ( ) unnamed_addr nounwind align 2 { <nl> % call = call i32 ( i8 * , . . . ) * @ printf ( i8 * getelementptr inbounds ( [ 15 x i8 ] * @ . str , i32 0 , i32 0 ) ) ; [ # uses = 0 type = i32 ] <nl> mmm a / tests / cases / i24_ce_fastcomp . ll <nl> ppp b / tests / cases / i24_ce_fastcomp . ll <nl> target triple = " asmjs - unknown - emscripten " <nl> @ _ZNSt16bad_array_lengthC1Ev = alias void ( % " class . std : : bad_array_length " * ) * @ _ZNSt16bad_array_lengthC2Ev <nl> @ _ZNSt16bad_array_lengthD1Ev = alias bitcast ( void ( % " class . std : : bad_alloc " * ) * @ _ZNSt9bad_allocD2Ev to void ( % " class . std : : bad_array_length " * ) * ) <nl> @ _ZNSt16bad_array_lengthD2Ev = alias bitcast ( void ( % " class . std : : bad_alloc " * ) * @ _ZNSt9bad_allocD2Ev to void ( % " class . std : : bad_array_length " * ) * ) <nl> - @ __strtof_l = alias weak float ( i8 * , i8 * * , % struct . __locale_struct * ) * @ strtof_l <nl> - @ __strtod_l = alias weak double ( i8 * , i8 * * , % struct . __locale_struct . 0 * ) * @ strtod_l <nl> - @ __strtold_l = alias weak double ( i8 * , i8 * * , % struct . __locale_struct . 1 * ) * @ strtold_l <nl> + @ __strtof_l = weak alias float ( i8 * , i8 * * , % struct . __locale_struct * ) * @ strtof_l <nl> + @ __strtod_l = weak alias double ( i8 * , i8 * * , % struct . __locale_struct . 0 * ) * @ strtod_l <nl> + @ __strtold_l = weak alias double ( i8 * , i8 * * , % struct . __locale_struct . 1 * ) * @ strtold_l <nl> <nl> ; Function Attrs : nounwind <nl> define i32 @ main ( i32 % argc , i8 * * nocapture % argv ) # 0 { <nl> mmm a / tests / cases / legalizer_b_ta2 . ll <nl> ppp b / tests / cases / legalizer_b_ta2 . ll <nl> entry : <nl> store i128 % ored , i128 * % bundled , align 4 <nl> call i32 ( i8 * ) * @ puts ( i8 * % buffer ) <nl> <nl> - % ander = bitcast i128 18402271027389267967 to i128 <nl> + % ander = add i128 18402271027389267967 , 0 <nl> % anded = and i128 % loaded , % ander ; variable <nl> store i128 % anded , i128 * % bundled , align 4 <nl> call i32 ( i8 * ) * @ puts ( i8 * % buffer ) <nl> mmm a / tests / cases / phiptrtoint . ll <nl> ppp b / tests / cases / phiptrtoint . ll <nl> target triple = " asmjs - unknown - emscripten " <nl> % " class . test : : Processor " = type { i32 , % " class . test : : StateMachine " } <nl> % " class . test : : StateMachine " = type { { i32 , i32 } } <nl> <nl> - @ _ZN4test9ProcessorC1Ev = alias internal void ( % " class . test : : Processor " * ) * @ _ZN4test9ProcessorC2Ev <nl> - @ _ZN4test9ProcessorD1Ev = alias internal void ( % " class . test : : Processor " * ) * @ _ZN4test9ProcessorD2Ev <nl> + @ _ZN4test9ProcessorC1Ev = internal alias void ( % " class . test : : Processor " * ) * @ _ZN4test9ProcessorC2Ev <nl> + @ _ZN4test9ProcessorD1Ev = internal alias void ( % " class . test : : Processor " * ) * @ _ZN4test9ProcessorD2Ev <nl> <nl> define internal void @ _ZN4test9ProcessorC2Ev ( % " class . test : : Processor " * nocapture % this ) unnamed_addr nounwind align 2 { <nl> % 1 = getelementptr inbounds % " class . test : : Processor " * % this , i32 0 , i32 0 <nl>
|
update alias notations and other in tests / cases * . ll for llvm 3 . 6
|
emscripten-core/emscripten
|
5aeb793f2494e4834e66702e923edd7173d0fe00
|
2015-03-17T20:10:40Z
|
mmm a / src / FindReplaceDialog . cpp <nl> ppp b / src / FindReplaceDialog . cpp <nl> void FindReplaceDialog : : setExtendedScintilla ( ExtendedScintilla * scintilla ) <nl> ui - > replaceAllButton - > setEnabled ( isWriteable ) ; <nl> <nl> connect ( m_scintilla , SIGNAL ( destroyed ( ) ) , this , SLOT ( hide ( ) ) ) ; <nl> + connect ( ui - > findText , SIGNAL ( editingFinished ( ) ) , this , SLOT ( cancelFind ( ) ) ) ; <nl> } <nl> <nl> bool FindReplaceDialog : : findNext ( ) <nl> void FindReplaceDialog : : show ( ) <nl> <nl> void FindReplaceDialog : : replace ( ) <nl> { <nl> - m_scintilla - > replace ( ui - > replaceWithText - > text ( ) ) ; <nl> + if ( m_scintilla - > hasSelectedText ( ) ) <nl> + m_scintilla - > replace ( ui - > replaceWithText - > text ( ) ) ; <nl> findNext ( ) ; <nl> } <nl> <nl> void FindReplaceDialog : : replaceAll ( ) <nl> <nl> } <nl> <nl> + void FindReplaceDialog : : cancelFind ( ) <nl> + { <nl> + m_scintilla - > findFirst ( QString ( ) , false , false , false , false ) ; <nl> + clearIndicators ( ) ; <nl> + } <nl> void FindReplaceDialog : : help ( ) <nl> { <nl> QWhatsThis : : enterWhatsThisMode ( ) ; <nl> void FindReplaceDialog : : clearIndicators ( ) <nl> void FindReplaceDialog : : close ( ) <nl> { <nl> m_scintilla - > clearSelection ( ) ; <nl> - clearIndicators ( ) ; <nl> + / / Reset any previous find so it does not interfere with the next time <nl> + / / the dialog is open . <nl> + cancelFind ( ) ; <nl> QDialog : : close ( ) ; <nl> } <nl> <nl> mmm a / src / FindReplaceDialog . h <nl> ppp b / src / FindReplaceDialog . h <nl> private slots : <nl> void replace ( ) ; <nl> void findAll ( ) ; <nl> void replaceAll ( ) ; <nl> + void cancelFind ( ) ; <nl> void help ( ) ; <nl> void close ( ) ; <nl> void reject ( ) override ; <nl>
|
Replace button in SQL Editor inserts text at cursor after a previous find
|
sqlitebrowser/sqlitebrowser
|
12b4fd91a79aec5f051ad64f9e9a9de6ec7edc95
|
2018-11-17T13:51:46Z
|
mmm a / platforms / ios / build_framework . py <nl> ppp b / platforms / ios / build_framework . py <nl> def getXCodeMajor ( ) : <nl> raise Exception ( " Failed to parse Xcode version " ) <nl> <nl> class Builder : <nl> - def __init__ ( self , opencv , contrib , dynamic , bitcodedisabled , exclude , enablenonfree , targets ) : <nl> + def __init__ ( self , opencv , contrib , dynamic , bitcodedisabled , exclude , enablenonfree , targets , debug , debug_info ) : <nl> self . opencv = os . path . abspath ( opencv ) <nl> self . contrib = None <nl> if contrib : <nl> def __init__ ( self , opencv , contrib , dynamic , bitcodedisabled , exclude , enablenon <nl> self . exclude = exclude <nl> self . enablenonfree = enablenonfree <nl> self . targets = targets <nl> + self . debug = debug <nl> + self . debug_info = debug_info <nl> <nl> def getBD ( self , parent , t ) : <nl> <nl> def build ( self , outdir ) : <nl> def getToolchain ( self , arch , target ) : <nl> return None <nl> <nl> + def getConfiguration ( self ) : <nl> + return " Debug " if self . debug else " Release " <nl> + <nl> def getCMakeArgs ( self , arch , target ) : <nl> <nl> args = [ <nl> def getCMakeArgs ( self , arch , target ) : <nl> " - GXcode " , <nl> " - DAPPLE_FRAMEWORK = ON " , <nl> " - DCMAKE_INSTALL_PREFIX = install " , <nl> - " - DCMAKE_BUILD_TYPE = Release " , <nl> + " - DCMAKE_BUILD_TYPE = % s " % self . getConfiguration ( ) , <nl> ] + ( [ <nl> " - DBUILD_SHARED_LIBS = ON " , <nl> " - DCMAKE_MACOSX_BUNDLE = ON " , <nl> " - DCMAKE_XCODE_ATTRIBUTE_CODE_SIGNING_REQUIRED = NO " , <nl> ] if self . dynamic else [ ] ) + ( [ <nl> " - DOPENCV_ENABLE_NONFREE = ON " <nl> - ] if self . enablenonfree else [ ] ) <nl> + ] if self . enablenonfree else [ ] ) + ( [ <nl> + " - DBUILD_WITH_DEBUG_INFO = ON " <nl> + ] if self . debug_info else [ ] ) <nl> <nl> if len ( self . exclude ) > 0 : <nl> args + = [ " - DBUILD_opencv_world = OFF " ] if not self . dynamic else [ ] <nl> def getBuildCommand ( self , archs , target ) : <nl> <nl> buildcmd + = [ <nl> " - sdk " , target . lower ( ) , <nl> - " - configuration " , " Release " , <nl> + " - configuration " , self . getConfiguration ( ) , <nl> " - parallelizeTargets " , <nl> " - jobs " , str ( multiprocessing . cpu_count ( ) ) , <nl> ] + ( [ " - target " , " ALL_BUILD " ] if self . dynamic else [ ] ) <nl> def buildOne ( self , arch , target , builddir , cmakeargs = [ ] ) : <nl> shutil . rmtree ( clean_dir ) <nl> buildcmd = self . getBuildCommand ( arch , target ) <nl> execute ( buildcmd + [ " - target " , " ALL_BUILD " , " build " ] , cwd = builddir ) <nl> - execute ( [ " cmake " , " - P " , " cmake_install . cmake " ] , cwd = builddir ) <nl> + execute ( [ " cmake " , " - DBUILD_TYPE = % s " % self . getConfiguration ( ) , " - P " , " cmake_install . cmake " ] , cwd = builddir ) <nl> <nl> def mergeLibs ( self , builddir ) : <nl> - res = os . path . join ( builddir , " lib " , " Release " , " libopencv_merged . a " ) <nl> + res = os . path . join ( builddir , " lib " , self . getConfiguration ( ) , " libopencv_merged . a " ) <nl> libs = glob . glob ( os . path . join ( builddir , " install " , " lib " , " * . a " ) ) <nl> libs3 = glob . glob ( os . path . join ( builddir , " install " , " share " , " OpenCV " , " 3rdparty " , " lib " , " * . a " ) ) <nl> print ( " Merging libraries : \ n \ t % s " % " \ n \ t " . join ( libs + libs3 ) , file = sys . stderr ) <nl> def makeFramework ( self , outdir , builddirs ) : <nl> shutil . copytree ( os . path . join ( builddirs [ 0 ] , " install " , " include " , " opencv2 " ) , os . path . join ( dstdir , " Headers " ) ) <nl> <nl> # make universal static lib <nl> - libs = [ os . path . join ( d , " lib " , " Release " , libname ) for d in builddirs ] <nl> + libs = [ os . path . join ( d , " lib " , self . getConfiguration ( ) , libname ) for d in builddirs ] <nl> lipocmd = [ " lipo " , " - create " ] <nl> lipocmd . extend ( libs ) <nl> lipocmd . extend ( [ " - o " , os . path . join ( dstdir , name ) ] ) <nl> def getCMakeArgs ( self , arch , target ) : <nl> parser . add_argument ( ' - - iphoneos_archs ' , default = ' armv7 , armv7s , arm64 ' , help = ' select iPhoneOS target ARCHS ' ) <nl> parser . add_argument ( ' - - iphonesimulator_archs ' , default = ' i386 , x86_64 ' , help = ' select iPhoneSimulator target ARCHS ' ) <nl> parser . add_argument ( ' - - enable_nonfree ' , default = False , dest = ' enablenonfree ' , action = ' store_true ' , help = ' enable non - free modules ( disabled by default ) ' ) <nl> + parser . add_argument ( ' - - debug ' , default = False , dest = ' debug ' , action = ' store_true ' , help = ' Build " Debug " binaries ( disabled by default ) ' ) <nl> + parser . add_argument ( ' - - debug_info ' , default = False , dest = ' debug_info ' , action = ' store_true ' , help = ' Build with debug information ( useful for Release mode : BUILD_WITH_DEBUG_INFO = ON ) ' ) <nl> args = parser . parse_args ( ) <nl> <nl> os . environ [ ' IPHONEOS_DEPLOYMENT_TARGET ' ] = args . iphoneos_deployment_target <nl> def getCMakeArgs ( self , arch , target ) : <nl> [ <nl> ( iphoneos_archs , " iPhoneOS " ) , <nl> ( iphonesimulator_archs , " iPhoneSimulator " ) , <nl> - ] ) <nl> + ] , args . debug , args . debug_info ) <nl> b . build ( args . out ) <nl> old mode 100644 <nl> new mode 100755 <nl> index 2425fa158ae . . 5897192108f <nl> mmm a / platforms / osx / build_framework . py <nl> ppp b / platforms / osx / build_framework . py <nl> <nl> sys . path . insert ( 0 , os . path . abspath ( os . path . abspath ( os . path . dirname ( __file__ ) ) + ' / . . / ios ' ) ) <nl> from build_framework import Builder <nl> <nl> + MACOSX_DEPLOYMENT_TARGET = ' 10 . 12 ' # default , can be changed via command line options or environment variable <nl> + <nl> class OSXBuilder ( Builder ) : <nl> <nl> def getToolchain ( self , arch , target ) : <nl> def getToolchain ( self , arch , target ) : <nl> def getBuildCommand ( self , archs , target ) : <nl> buildcmd = [ <nl> " xcodebuild " , <nl> - " MACOSX_DEPLOYMENT_TARGET = 10 . 9 " , <nl> + " MACOSX_DEPLOYMENT_TARGET = " + os . environ [ ' MACOSX_DEPLOYMENT_TARGET ' ] , <nl> " ARCHS = % s " % archs [ 0 ] , <nl> " - sdk " , target . lower ( ) , <nl> - " - configuration " , " Release " , <nl> + " - configuration " , " Debug " if self . debug else " Release " , <nl> " - parallelizeTargets " , <nl> " - jobs " , str ( multiprocessing . cpu_count ( ) ) <nl> ] <nl> def getInfoPlist ( self , builddirs ) : <nl> parser . add_argument ( ' - - contrib ' , metavar = ' DIR ' , default = None , help = ' folder with opencv_contrib repository ( default is " None " - build only main framework ) ' ) <nl> parser . add_argument ( ' - - without ' , metavar = ' MODULE ' , default = [ ] , action = ' append ' , help = ' OpenCV modules to exclude from the framework ' ) <nl> parser . add_argument ( ' - - enable_nonfree ' , default = False , dest = ' enablenonfree ' , action = ' store_true ' , help = ' enable non - free modules ( disabled by default ) ' ) <nl> + parser . add_argument ( ' - - macosx_deployment_target ' , default = os . environ . get ( ' MACOSX_DEPLOYMENT_TARGET ' , MACOSX_DEPLOYMENT_TARGET ) , help = ' specify MACOSX_DEPLOYMENT_TARGET ' ) <nl> + parser . add_argument ( ' - - debug ' , action = ' store_true ' , help = ' Build " Debug " binaries ( CMAKE_BUILD_TYPE = Debug ) ' ) <nl> + parser . add_argument ( ' - - debug_info ' , action = ' store_true ' , help = ' Build with debug information ( useful for Release mode : BUILD_WITH_DEBUG_INFO = ON ) ' ) <nl> + <nl> args = parser . parse_args ( ) <nl> <nl> + os . environ [ ' MACOSX_DEPLOYMENT_TARGET ' ] = args . macosx_deployment_target <nl> + print ( ' Using MACOSX_DEPLOYMENT_TARGET = ' + os . environ [ ' MACOSX_DEPLOYMENT_TARGET ' ] ) <nl> + <nl> b = OSXBuilder ( args . opencv , args . contrib , False , False , args . without , args . enablenonfree , <nl> [ <nl> ( [ " x86_64 " ] , " MacOSX " ) <nl> - ] ) <nl> + ] , args . debug , args . debug_info ) <nl> b . build ( args . out ) <nl>
|
Merge pull request from komakai : apple - debug - build
|
opencv/opencv
|
96d5cbd0363ed216f97a6cea0700dc9f9839fbc1
|
2019-08-23T15:22:29Z
|
mmm a / src / buffer_cache / alt / page_cache . cc <nl> ppp b / src / buffer_cache / alt / page_cache . cc <nl> current_page_acq_t : : ~ current_page_acq_t ( ) { <nl> guarantee ( access_ = = access_t : : write ) ; <nl> the_txn_ - > remove_acquirer ( this ) ; <nl> } <nl> - if ( current_page_ ! = NULL ) { <nl> + rassert ( current_page_ ! = NULL ) ; <nl> + if ( ! declared_snapshotted_ ) { <nl> + rassert ( ! snapshotted_page_ . has ( ) ) ; <nl> current_page_ - > remove_acquirer ( this ) ; <nl> + } else { <nl> + snapshotted_page_ . reset_page_ptr ( page_cache_ ) ; <nl> + current_page_ - > remove_keepalive ( ) ; <nl> } <nl> - snapshotted_page_ . reset_page_ptr ( page_cache_ ) ; <nl> page_cache_ - > consider_evicting_current_page ( block_id_ ) ; <nl> } <nl> } <nl> void current_page_acq_t : : declare_snapshotted ( ) { <nl> if ( ! declared_snapshotted_ ) { <nl> declared_snapshotted_ = true ; <nl> rassert ( current_page_ ! = NULL ) ; <nl> + current_page_ - > add_keepalive ( ) ; <nl> current_page_ - > pulse_pulsables ( this ) ; <nl> } <nl> } <nl> void current_page_acq_t : : pulse_write_available ( ) { <nl> current_page_t : : current_page_t ( block_id_t block_id ) <nl> : block_id_ ( block_id ) , <nl> is_deleted_ ( false ) , <nl> - last_write_acquirer_ ( NULL ) { <nl> + last_write_acquirer_ ( NULL ) , <nl> + num_keepalives_ ( 0 ) { <nl> / / Increment the block version so that we can distinguish between unassigned <nl> / / current_page_acq_t : : block_version_ values ( which are 0 ) and assigned ones . <nl> rassert ( last_write_acquirer_version_ . debug_value ( ) = = 0 ) ; <nl> current_page_t : : current_page_t ( block_id_t block_id , <nl> : block_id_ ( block_id ) , <nl> page_ ( new page_t ( block_id , block_size , std : : move ( buf ) , page_cache ) ) , <nl> is_deleted_ ( false ) , <nl> - last_write_acquirer_ ( NULL ) { <nl> + last_write_acquirer_ ( NULL ) , <nl> + num_keepalives_ ( 0 ) { <nl> / / Increment the block version so that we can distinguish between unassigned <nl> / / current_page_acq_t : : block_version_ values ( which are 0 ) and assigned ones . <nl> rassert ( last_write_acquirer_version_ . debug_value ( ) = = 0 ) ; <nl> current_page_t : : current_page_t ( block_id_t block_id , <nl> : block_id_ ( block_id ) , <nl> page_ ( new page_t ( block_id , std : : move ( buf ) , token , page_cache ) ) , <nl> is_deleted_ ( false ) , <nl> - last_write_acquirer_ ( NULL ) { <nl> + last_write_acquirer_ ( NULL ) , <nl> + num_keepalives_ ( 0 ) { <nl> / / Increment the block version so that we can distinguish between unassigned <nl> / / current_page_acq_t : : block_version_ values ( which are 0 ) and assigned ones . <nl> rassert ( last_write_acquirer_version_ . debug_value ( ) = = 0 ) ; <nl> current_page_t : : ~ current_page_t ( ) { <nl> <nl> / / An imperfect sanity check . <nl> rassert ( ! page_ . has ( ) ) ; <nl> + rassert ( num_keepalives_ = = 0 ) ; <nl> } <nl> <nl> void current_page_t : : reset ( page_cache_t * page_cache ) { <nl> rassert ( acquirers_ . empty ( ) ) ; <nl> + rassert ( num_keepalives_ = = 0 ) ; <nl> <nl> / / KSI : Does last_write_acquirer_ even need to be NULL ? Could we not just inform <nl> / / it of our impending destruction ? <nl> bool current_page_t : : should_be_evicted ( ) const { <nl> return false ; <nl> } <nl> <nl> + / / A reason : The current_page_t is kept alive for another reason . ( Important . ) <nl> + if ( num_keepalives_ > 0 ) { <nl> + return false ; <nl> + } <nl> + <nl> / / A reason : Its page_t isn ' t evicted , or has other snapshotters or waiters <nl> / / anyway . ( Getting this wrong can only hurt performance . We want to evict <nl> / / current_page_t ' s with unloaded , otherwise unused page_t ' s . ) <nl> void current_page_t : : pulse_pulsables ( current_page_acq_t * const acq ) { <nl> cur - > snapshotted_page_ . init ( <nl> current_recency , <nl> the_page_for_read_or_deleted ( help ) ) ; <nl> - cur - > current_page_ = NULL ; <nl> acquirers_ . remove ( cur ) ; <nl> } <nl> cur = next ; <nl> void current_page_t : : pulse_pulsables ( current_page_acq_t * const acq ) { <nl> } <nl> } <nl> <nl> + void current_page_t : : add_keepalive ( ) { <nl> + + + num_keepalives_ ; <nl> + } <nl> + <nl> + void current_page_t : : remove_keepalive ( ) { <nl> + guarantee ( num_keepalives_ > 0 ) ; <nl> + - - num_keepalives_ ; <nl> + } <nl> + <nl> void current_page_t : : mark_deleted ( current_page_help_t help ) { <nl> rassert ( ! is_deleted_ ) ; <nl> is_deleted_ = true ; <nl> void page_txn_t : : remove_acquirer ( current_page_acq_t * acq ) { <nl> acq - > declare_readonly ( ) ; <nl> acq - > declare_snapshotted ( ) ; <nl> <nl> - / / Since we snapshotted the lead acquirer , it gets detached . <nl> - rassert ( acq - > current_page_ = = NULL ) ; <nl> / / Steal the snapshotted page_ptr_t . <nl> timestamped_page_ptr_t local = std : : move ( acq - > snapshotted_page_ ) ; <nl> / / It ' s okay to have two dirtied_page_t ' s or touched_page_t ' s for the <nl> mmm a / src / buffer_cache / alt / page_cache . hpp <nl> ppp b / src / buffer_cache / alt / page_cache . hpp <nl> class current_page_t { <nl> void add_acquirer ( current_page_acq_t * acq ) ; <nl> void remove_acquirer ( current_page_acq_t * acq ) ; <nl> void pulse_pulsables ( current_page_acq_t * acq ) ; <nl> + void add_keepalive ( ) ; <nl> + void remove_keepalive ( ) ; <nl> <nl> page_t * the_page_for_write ( current_page_help_t help , cache_account_t * account ) ; <nl> page_t * the_page_for_read ( current_page_help_t help , cache_account_t * account ) ; <nl> class current_page_t { <nl> / / All list elements have current_page_ ! = NULL , snapshotted_page_ = = NULL . <nl> intrusive_list_t < current_page_acq_t > acquirers_ ; <nl> <nl> + / / Avoids eviction if > 0 . This is used by snapshotted current_page_acq_t ' s <nl> + / / that have a snapshotted version of this block . If the current_page_t <nl> + / / would be evicted that would mess with the block version . <nl> + intptr_t num_keepalives_ ; <nl> + <nl> DISABLE_COPYING ( current_page_t ) ; <nl> } ; <nl> <nl>
|
Keep current_page_ts alive for as long as there are any snapshots . Otherwise we would mess up the block versions .
|
rethinkdb/rethinkdb
|
d4c342495eafd775eba1db2ca486cf4ff4e31e9c
|
2014-04-17T23:58:48Z
|
mmm a / Marlin / src / HAL / HAL_STM32 / HAL . h <nl> ppp b / Marlin / src / HAL / HAL_STM32 / HAL . h <nl> <nl> / / Types <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - typedef int8_t pin_t ; <nl> + typedef int16_t pin_t ; <nl> <nl> # define HAL_SERVO_LIB libServo <nl> <nl> mmm a / Marlin / src / core / boards . h <nl> ppp b / Marlin / src / core / boards . h <nl> <nl> # define BOARD_BLACK_STM32F407VE 4106 / / BLACK_STM32F407VE <nl> # define BOARD_BLACK_STM32F407ZE 4107 / / BLACK_STM32F407ZE <nl> # define BOARD_STEVAL 4108 / / STEVAL - 3DP001V1 3D PRINTER BOARD <nl> + # define BOARD_BIGTREE_SKR_PRO_V1_1 4109 / / BigTreeTech SKR Pro v1 . 1 ( STM32F407ZG ) <nl> <nl> / / <nl> / / ARM Cortex M7 <nl> mmm a / Marlin / src / pins / pins . h <nl> ppp b / Marlin / src / pins / pins . h <nl> <nl> # include " pins_BLACK_STM32F407VE . h " / / STM32F4 env : black_stm32f407ve <nl> # elif MB ( STEVAL ) <nl> # include " pins_STEVAL . h " / / STM32F4 env : STM32F4 <nl> + # elif MB ( BIGTREE_SKR_PRO_V1_1 ) <nl> + # include " pins_BIGTREE_SKR_PRO_V1 . 1 . h " / / STM32F4 env : BIGTREE_SKR_PRO <nl> <nl> / / <nl> / / ARM Cortex M7 <nl> new file mode 100644 <nl> index 00000000000 . . dc2cfd33659 <nl> mmm / dev / null <nl> ppp b / Marlin / src / pins / pins_BIGTREE_SKR_PRO_V1 . 1 . h <nl> <nl> + / * * <nl> + * Marlin 3D Printer Firmware <nl> + * Copyright ( C ) 2019 MarlinFirmware [ https : / / github . com / MarlinFirmware / Marlin ] <nl> + * <nl> + * Based on Sprinter and grbl . <nl> + * Copyright ( C ) 2011 Camiel Gubbels / Erik van der Zalm <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation , either version 3 of the License , or <nl> + * ( at your option ) any later version . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + # pragma once <nl> + <nl> + # ifndef TARGET_STM32F4 <nl> + # error " Oops ! Select an STM32F4 board in ' Tools > Board . ' " <nl> + # endif <nl> + <nl> + # if HOTENDS > 3 | | E_STEPPERS > 3 <nl> + # error " BIGTREE SKR Pro V1 . 1 supports up to 3 hotends / E - steppers . " <nl> + # endif <nl> + <nl> + # define BOARD_NAME " BIGTREE SKR Pro V1 . 1 " <nl> + <nl> + # define EEPROM_EMULATED_WITH_SRAM <nl> + <nl> + / / <nl> + / / Servos <nl> + / / <nl> + # define SERVO0_PIN PA1 <nl> + <nl> + / / <nl> + / / Limit Switches <nl> + / / <nl> + # define X_MIN_PIN PB10 <nl> + # define X_MAX_PIN PE15 <nl> + # define Y_MIN_PIN PE12 <nl> + # define Y_MAX_PIN PE10 <nl> + # define Z_MIN_PIN PG8 <nl> + # define Z_MAX_PIN PG5 <nl> + <nl> + / / <nl> + / / Z Probe must be this pins <nl> + / / <nl> + # ifndef Z_MIN_PROBE_PIN <nl> + # define Z_MIN_PROBE_PIN PA2 <nl> + # endif <nl> + <nl> + / / <nl> + / / Steppers <nl> + / / <nl> + # define X_STEP_PIN PE9 <nl> + # define X_DIR_PIN PF1 <nl> + # define X_ENABLE_PIN PF2 <nl> + # ifndef X_CS_PIN <nl> + # define X_CS_PIN PA15 <nl> + # endif <nl> + <nl> + # define Y_STEP_PIN PE11 <nl> + # define Y_DIR_PIN PE8 <nl> + # define Y_ENABLE_PIN PD7 <nl> + # ifndef Y_CS_PIN <nl> + # define Y_CS_PIN PB8 <nl> + # endif <nl> + <nl> + # define Z_STEP_PIN PE13 <nl> + # define Z_DIR_PIN PC2 <nl> + # define Z_ENABLE_PIN PC0 <nl> + # ifndef Z_CS_PIN <nl> + # define Z_CS_PIN PB9 <nl> + # endif <nl> + <nl> + # define E0_STEP_PIN PE14 <nl> + # define E0_DIR_PIN PA0 <nl> + # define E0_ENABLE_PIN PC3 <nl> + # ifndef E0_CS_PIN <nl> + # define E0_CS_PIN PB3 <nl> + # endif <nl> + <nl> + # define E1_STEP_PIN PD15 <nl> + # define E1_DIR_PIN PE7 <nl> + # define E1_ENABLE_PIN PA3 <nl> + # ifndef E1_CS_PIN <nl> + # define E1_CS_PIN PG15 <nl> + # endif <nl> + <nl> + # define E2_STEP_PIN PD13 <nl> + # define E2_DIR_PIN PG9 <nl> + # define E2_ENABLE_PIN PF0 <nl> + # ifndef E2_CS_PIN <nl> + # define E2_CS_PIN PG12 <nl> + # endif <nl> + <nl> + / / <nl> + / / Software SPI pins for TMC2130 stepper drivers <nl> + / / <nl> + # if ENABLED ( TMC_USE_SW_SPI ) <nl> + # define TMC_SW_MOSI PC12 <nl> + # define TMC_SW_MISO PC11 <nl> + # define TMC_SW_SCK PC10 <nl> + # endif <nl> + <nl> + # if HAS_DRIVER ( TMC2208 ) <nl> + / * * <nl> + * TMC2208 stepper drivers <nl> + * <nl> + * Hardware serial communication ports . <nl> + * If undefined software serial is used according to the pins below <nl> + * / <nl> + / / # define X_HARDWARE_SERIAL Serial <nl> + / / # define X2_HARDWARE_SERIAL Serial1 <nl> + / / # define Y_HARDWARE_SERIAL Serial1 <nl> + / / # define Y2_HARDWARE_SERIAL Serial1 <nl> + / / # define Z_HARDWARE_SERIAL Serial1 <nl> + / / # define Z2_HARDWARE_SERIAL Serial1 <nl> + / / # define E0_HARDWARE_SERIAL Serial1 <nl> + / / # define E1_HARDWARE_SERIAL Serial1 <nl> + / / # define E2_HARDWARE_SERIAL Serial1 <nl> + / / # define E3_HARDWARE_SERIAL Serial1 <nl> + / / # define E4_HARDWARE_SERIAL Serial1 <nl> + <nl> + / / <nl> + / / Software serial <nl> + / / <nl> + # define X_SERIAL_TX_PIN PC13 <nl> + # define X_SERIAL_RX_PIN PE4 <nl> + <nl> + # define Y_SERIAL_TX_PIN PE3 <nl> + # define Y_SERIAL_RX_PIN PE2 <nl> + <nl> + # define Z_SERIAL_TX_PIN PE0 <nl> + # define Z_SERIAL_RX_PIN PE1 <nl> + <nl> + # define E0_SERIAL_TX_PIN PD4 <nl> + # define E0_SERIAL_RX_PIN PD2 <nl> + <nl> + # define E1_SERIAL_TX_PIN PD0 <nl> + # define E1_SERIAL_RX_PIN PD1 <nl> + <nl> + # define Z2_SERIAL_TX_PIN PD6 <nl> + # define Z2_SERIAL_RX_PIN PD5 <nl> + # endif <nl> + <nl> + / / <nl> + / / Temperature Sensors <nl> + / / <nl> + # define TEMP_0_PIN PF3 / / T0 <nl> + # define TEMP_1_PIN PF4 / / T1 <nl> + # define TEMP_2_PIN PF5 / / T2 <nl> + # define TEMP_BED_PIN PF6 / / TB <nl> + <nl> + / / <nl> + / / Heaters / Fans <nl> + / / <nl> + # define HEATER_0_PIN PB1 / / Heater0 <nl> + # define HEATER_1_PIN PD14 / / Heater1 <nl> + # define HEATER_2_PIN PB0 / / Heater1 <nl> + # define HEATER_BED_PIN PD12 / / Hotbed <nl> + # define FAN_PIN PC8 / / Fan0 <nl> + # define FAN1_PIN PE5 / / Fan1 <nl> + # define FAN2_PIN PE6 / / Fan2 <nl> + <nl> + / / <nl> + / / Misc . Functions <nl> + / / <nl> + # define SDSS PB12 <nl> + <nl> + / * <nl> + | _____ _____ <nl> + | NC | · · | GND 5V | · · | GND <nl> + | RESET | · · | PF12 ( SD_DETECT ) ( LCD_D7 ) PG7 | · · | PG6 ( LCD_D6 ) <nl> + | ( MOSI ) PB15 | · · | PF11 ( BTN_EN2 ) ( LCD_D5 ) PG3 | · · | PG2 ( LCD_D4 ) <nl> + | ( SD_SS ) PB12 | · · | PG10 ( BTN_EN1 ) ( LCD_RS ) PD10 | · · | PD11 ( LCD_EN ) <nl> + | ( SCK ) PB13 | · · | PB14 ( MISO ) ( BTN_ENC ) PA8 | · · | PG4 ( BEEPER ) <nl> + |  ̄  ̄  ̄  ̄ <nl> + | EXP2 EXP1 <nl> + * / <nl> + # if HAS_SPI_LCD <nl> + # define BEEPER_PIN PG4 <nl> + # define BTN_ENC PA8 <nl> + <nl> + # if ENABLED ( CR10_STOCKDISPLAY ) <nl> + # define LCD_PINS_RS PG6 <nl> + <nl> + # define BTN_EN1 PD11 <nl> + # define BTN_EN2 PG2 <nl> + <nl> + # define LCD_PINS_ENABLE PG7 <nl> + # define LCD_PINS_D4 PG3 <nl> + <nl> + # else <nl> + <nl> + # define LCD_PINS_RS PD10 <nl> + <nl> + # define BTN_EN1 PG10 <nl> + # define BTN_EN2 PF11 <nl> + # define SD_DETECT_PIN PF12 <nl> + <nl> + # define LCD_SDSS PB12 <nl> + <nl> + # define LCD_PINS_ENABLE PD11 <nl> + # define LCD_PINS_D4 PG2 <nl> + <nl> + # if ENABLED ( ULTIPANEL ) <nl> + # define LCD_PINS_D5 PG3 <nl> + # define LCD_PINS_D6 PG6 <nl> + # define LCD_PINS_D7 PG7 <nl> + # endif <nl> + <nl> + # endif <nl> + <nl> + / / Alter timing for graphical display <nl> + # if HAS_GRAPHICAL_DISPLAY <nl> + # ifndef ST7920_DELAY_1 <nl> + # define ST7920_DELAY_1 DELAY_NS ( 96 ) <nl> + # endif <nl> + # ifndef ST7920_DELAY_2 <nl> + # define ST7920_DELAY_2 DELAY_NS ( 48 ) <nl> + # endif <nl> + # ifndef ST7920_DELAY_3 <nl> + # define ST7920_DELAY_3 DELAY_NS ( 600 ) <nl> + # endif <nl> + # endif <nl> + <nl> + # endif / / HAS_SPI_LCD <nl> new file mode 100644 <nl> index 00000000000 . . 92e276850c8 <nl> mmm / dev / null <nl> ppp b / buildroot / share / PlatformIO / boards / BigTree_SKR_Pro . json <nl> <nl> + { <nl> + " build " : { <nl> + " core " : " stm32 " , <nl> + " cpu " : " cortex - m4 " , <nl> + " extra_flags " : " - DSTM32F407xx " , <nl> + " f_cpu " : " 168000000L " , <nl> + " hwids " : [ <nl> + [ <nl> + " 0x1EAF " , <nl> + " 0x0003 " <nl> + ] , <nl> + [ <nl> + " 0x0483 " , <nl> + " 0x3748 " <nl> + ] <nl> + ] , <nl> + " ldscript " : " stm32f407xg . ld " , <nl> + " mcu " : " stm32f407zgt6 " , <nl> + " variant " : " BIGTREE_GENERIC_STM32F407_5X " <nl> + } , <nl> + " debug " : { <nl> + " jlink_device " : " STM32F407ZG " , <nl> + " openocd_target " : " stm32f4x " , <nl> + " svd_path " : " STM32F40x . svd " , <nl> + " tools " : { <nl> + " stlink " : { <nl> + " server " : { <nl> + " arguments " : [ <nl> + " - f " , <nl> + " scripts / interface / stlink . cfg " , <nl> + " - c " , <nl> + " transport select hla_swd " , <nl> + " - f " , <nl> + " scripts / target / stm32f4x . cfg " , <nl> + " - c " , <nl> + " reset_config none " <nl> + ] , <nl> + " executable " : " bin / openocd " , <nl> + " package " : " tool - openocd " <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " frameworks " : [ <nl> + " arduino " , <nl> + " stm32cube " <nl> + ] , <nl> + " name " : " STM32F407ZG ( 192k RAM . 1024k Flash ) " , <nl> + " upload " : { <nl> + " disable_flushing " : false , <nl> + " maximum_ram_size " : 196608 , <nl> + " maximum_size " : 1048576 , <nl> + " protocol " : " stlink " , <nl> + " protocols " : [ <nl> + " stlink " , <nl> + " dfu " , <nl> + " jlink " , <nl> + " cmsis - dap " <nl> + ] , <nl> + " require_upload_port " : true , <nl> + " use_1200bps_touch " : false , <nl> + " wait_for_upload_port " : false <nl> + } , <nl> + " url " : " http : / / www . st . com / en / microcontrollers / stm32f407zg . html " , <nl> + " vendor " : " Generic " <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . cc43893d09f <nl> mmm / dev / null <nl> ppp b / buildroot / share / PlatformIO / scripts / generic_create_variant . py <nl> <nl> + import os , shutil <nl> + from SCons . Script import DefaultEnvironment <nl> + from platformio import util <nl> + <nl> + env = DefaultEnvironment ( ) <nl> + platform = env . PioPlatform ( ) <nl> + board = env . BoardConfig ( ) <nl> + <nl> + FRAMEWORK_DIR = platform . get_package_dir ( " framework - arduinoststm32 " ) <nl> + CMSIS_DIR = os . path . join ( FRAMEWORK_DIR , " CMSIS " , " CMSIS " ) <nl> + assert os . path . isdir ( FRAMEWORK_DIR ) <nl> + assert os . path . isdir ( CMSIS_DIR ) <nl> + assert os . path . isdir ( " buildroot / share / PlatformIO / variants " ) <nl> + <nl> + mcu_type = board . get ( " build . mcu " ) [ : - 2 ] <nl> + variant = board . get ( " build . variant " ) <nl> + series = mcu_type [ : 7 ] . upper ( ) + " xx " <nl> + variant_dir = os . path . join ( FRAMEWORK_DIR , " variants " , variant ) <nl> + <nl> + source_dir = os . path . join ( " buildroot / share / PlatformIO / variants " , variant ) <nl> + assert os . path . isdir ( source_dir ) <nl> + <nl> + if not os . path . isdir ( variant_dir ) : <nl> + os . mkdir ( variant_dir ) <nl> + <nl> + for file_name in os . listdir ( source_dir ) : <nl> + full_file_name = os . path . join ( source_dir , file_name ) <nl> + if os . path . isfile ( full_file_name ) : <nl> + shutil . copy ( full_file_name , variant_dir ) <nl> new file mode 100644 <nl> index 00000000000 . . 63763e9db7b <nl> mmm / dev / null <nl> ppp b / buildroot / share / PlatformIO / variants / BIGTREE_GENERIC_STM32F407_5X / PeripheralPins . c <nl> <nl> + / * <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Copyright ( c ) 2019 , STMicroelectronics <nl> + * All rights reserved . <nl> + * <nl> + * Redistribution and use in source and binary forms , with or without <nl> + * modification , are permitted provided that the following conditions are met : <nl> + * <nl> + * 1 . Redistributions of source code must retain the above copyright notice , <nl> + * this list of conditions and the following disclaimer . <nl> + * 2 . Redistributions in binary form must reproduce the above copyright notice , <nl> + * this list of conditions and the following disclaimer in the documentation <nl> + * and / or other materials provided with the distribution . <nl> + * 3 . Neither the name of STMicroelectronics nor the names of its contributors <nl> + * may be used to endorse or promote products derived from this software <nl> + * without specific prior written permission . <nl> + * <nl> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS " <nl> + * AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE <nl> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE <nl> + * DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE <nl> + * FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL <nl> + * DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR <nl> + * SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER <nl> + * CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , <nl> + * OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + * OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Automatically generated from STM32F407Z ( E - G ) Tx . xml <nl> + * / <nl> + # include " Arduino . h " <nl> + # include " PeripheralPins . h " <nl> + <nl> + / * = = = = = <nl> + * Note : Commented lines are alternative possibilities which are not used by default . <nl> + * If you change them , you should know what you ' re doing first . <nl> + * = = = = = <nl> + * / <nl> + <nl> + / / * * * ADC * * * <nl> + <nl> + # ifdef HAL_ADC_MODULE_ENABLED <nl> + const PinMap PinMap_ADC [ ] = { <nl> + { PA_0 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 0 , 0 ) } , / / ADC1_IN0 <nl> + / / { PA_0 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 0 , 0 ) } , / / ADC2_IN0 <nl> + / / { PA_0 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 0 , 0 ) } , / / ADC3_IN0 <nl> + { PA_1 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 1 , 0 ) } , / / ADC1_IN1 <nl> + / / { PA_1 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 1 , 0 ) } , / / ADC2_IN1 <nl> + / / { PA_1 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 1 , 0 ) } , / / ADC3_IN1 <nl> + { PA_2 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 2 , 0 ) } , / / ADC1_IN2 <nl> + / / { PA_2 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 2 , 0 ) } , / / ADC2_IN2 <nl> + / / { PA_2 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 2 , 0 ) } , / / ADC3_IN2 <nl> + { PA_3 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 3 , 0 ) } , / / ADC1_IN3 <nl> + / / { PA_3 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 3 , 0 ) } , / / ADC2_IN3 <nl> + / / { PA_3 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 3 , 0 ) } , / / ADC3_IN3 <nl> + { PA_4 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 4 , 0 ) } , / / ADC1_IN4 <nl> + / / { PA_4 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 4 , 0 ) } , / / ADC2_IN4 <nl> + { PA_5 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 5 , 0 ) } , / / ADC1_IN5 <nl> + / / { PA_5 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 5 , 0 ) } , / / ADC2_IN5 <nl> + { PA_6 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 6 , 0 ) } , / / ADC1_IN6 <nl> + / / { PA_6 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 6 , 0 ) } , / / ADC2_IN6 <nl> + { PA_7 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 7 , 0 ) } , / / ADC1_IN7 <nl> + / / { PA_7 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 7 , 0 ) } , / / ADC2_IN7 <nl> + { PB_0 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 8 , 0 ) } , / / ADC1_IN8 <nl> + / / { PB_0 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 8 , 0 ) } , / / ADC2_IN8 <nl> + { PB_1 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 9 , 0 ) } , / / ADC1_IN9 <nl> + / / { PB_1 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 9 , 0 ) } , / / ADC2_IN9 <nl> + { PC_0 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 10 , 0 ) } , / / ADC1_IN10 <nl> + / / { PC_0 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 10 , 0 ) } , / / ADC2_IN10 <nl> + / / { PC_0 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 10 , 0 ) } , / / ADC3_IN10 <nl> + { PC_1 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 11 , 0 ) } , / / ADC1_IN11 <nl> + / / { PC_1 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 11 , 0 ) } , / / ADC2_IN11 <nl> + / / { PC_1 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 11 , 0 ) } , / / ADC3_IN11 <nl> + { PC_2 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 12 , 0 ) } , / / ADC1_IN12 <nl> + / / { PC_2 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 12 , 0 ) } , / / ADC2_IN12 <nl> + / / { PC_2 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 12 , 0 ) } , / / ADC3_IN12 <nl> + { PC_3 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 13 , 0 ) } , / / ADC1_IN13 <nl> + / / { PC_3 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 13 , 0 ) } , / / ADC2_IN13 <nl> + / / { PC_3 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 13 , 0 ) } , / / ADC3_IN13 <nl> + { PC_4 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 14 , 0 ) } , / / ADC1_IN14 <nl> + / / { PC_4 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 14 , 0 ) } , / / ADC2_IN14 <nl> + { PC_5 , ADC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 15 , 0 ) } , / / ADC1_IN15 <nl> + / / { PC_5 , ADC2 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 15 , 0 ) } , / / ADC2_IN15 <nl> + <nl> + # if STM32F4X_PIN_NUM > = 144 / / 144 pins mcu , 114 gpio , 24 ADC <nl> + { PF_3 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 9 , 0 ) } , / / ADC3_IN9 <nl> + { PF_4 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 14 , 0 ) } , / / ADC3_IN14 <nl> + { PF_5 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 15 , 0 ) } , / / ADC3_IN15 <nl> + { PF_6 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 4 , 0 ) } , / / ADC3_IN4 <nl> + { PF_7 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 5 , 0 ) } , / / ADC3_IN5 <nl> + { PF_8 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 6 , 0 ) } , / / ADC3_IN6 <nl> + { PF_9 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 7 , 0 ) } , / / ADC3_IN7 <nl> + { PF_10 , ADC3 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 8 , 0 ) } , / / ADC3_IN8 <nl> + # endif <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + / / * * * DAC * * * <nl> + <nl> + # ifdef HAL_DAC_MODULE_ENABLED <nl> + const PinMap PinMap_DAC [ ] = { <nl> + { PA_4 , DAC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 1 , 0 ) } , / / DAC_OUT1 <nl> + { PA_5 , DAC1 , STM_PIN_DATA_EXT ( STM_MODE_ANALOG , GPIO_NOPULL , 0 , 2 , 0 ) } , / / DAC_OUT2 <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + / / * * * I2C * * * <nl> + <nl> + # ifdef HAL_I2C_MODULE_ENABLED <nl> + const PinMap PinMap_I2C_SDA [ ] = { <nl> + { PB_7 , I2C1 , STM_PIN_DATA ( STM_MODE_AF_OD , GPIO_NOPULL , GPIO_AF4_I2C1 ) } , <nl> + { PB_9 , I2C1 , STM_PIN_DATA ( STM_MODE_AF_OD , GPIO_NOPULL , GPIO_AF4_I2C1 ) } , <nl> + { PB_11 , I2C2 , STM_PIN_DATA ( STM_MODE_AF_OD , GPIO_NOPULL , GPIO_AF4_I2C2 ) } , <nl> + { PC_9 , I2C3 , STM_PIN_DATA ( STM_MODE_AF_OD , GPIO_NOPULL , GPIO_AF4_I2C3 ) } , <nl> + # if STM32F4X_PIN_NUM > = 144 / / 144 pins mcu , 114 gpio <nl> + { PF_0 , I2C2 , STM_PIN_DATA ( STM_MODE_AF_OD , GPIO_NOPULL , GPIO_AF4_I2C2 ) } , <nl> + # endif <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + # ifdef HAL_I2C_MODULE_ENABLED <nl> + const PinMap PinMap_I2C_SCL [ ] = { <nl> + { PA_8 , I2C3 , STM_PIN_DATA ( STM_MODE_AF_OD , GPIO_NOPULL , GPIO_AF4_I2C3 ) } , <nl> + { PB_6 , I2C1 , STM_PIN_DATA ( STM_MODE_AF_OD , GPIO_NOPULL , GPIO_AF4_I2C1 ) } , <nl> + { PB_8 , I2C1 , STM_PIN_DATA ( STM_MODE_AF_OD , GPIO_NOPULL , GPIO_AF4_I2C1 ) } , <nl> + { PB_10 , I2C2 , STM_PIN_DATA ( STM_MODE_AF_OD , GPIO_NOPULL , GPIO_AF4_I2C2 ) } , <nl> + # if STM32F4X_PIN_NUM > = 144 / / 144 pins mcu , 114 gpio <nl> + { PF_1 , I2C2 , STM_PIN_DATA ( STM_MODE_AF_OD , GPIO_NOPULL , GPIO_AF4_I2C2 ) } , <nl> + # endif <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + / / * * * PWM * * * <nl> + <nl> + # ifdef HAL_TIM_MODULE_ENABLED <nl> + const PinMap PinMap_PWM [ ] = { <nl> + { PA_0 , TIM2 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM2 , 1 , 0 ) } , / / TIM2_CH1 <nl> + / / { PA_0 , TIM5 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM5 , 1 , 0 ) } , / / TIM5_CH1 <nl> + { PA_1 , TIM2 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM2 , 2 , 0 ) } , / / TIM2_CH2 <nl> + / / { PA_1 , TIM5 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM5 , 2 , 0 ) } , / / TIM5_CH2 <nl> + { PA_2 , TIM2 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM2 , 3 , 0 ) } , / / TIM2_CH3 <nl> + / / { PA_2 , TIM5 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM5 , 3 , 0 ) } , / / TIM5_CH3 <nl> + / / { PA_2 , TIM9 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM9 , 1 , 0 ) } , / / TIM9_CH1 <nl> + { PA_3 , TIM2 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM2 , 4 , 0 ) } , / / TIM2_CH4 <nl> + / / { PA_3 , TIM5 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM5 , 4 , 0 ) } , / / TIM5_CH4 <nl> + / / { PA_3 , TIM9 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM9 , 2 , 0 ) } , / / TIM9_CH2 <nl> + { PA_5 , TIM2 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM2 , 1 , 0 ) } , / / TIM2_CH1 <nl> + / / { PA_5 , TIM8 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM8 , 1 , 1 ) } , / / TIM8_CH1N <nl> + { PA_6 , TIM3 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM3 , 1 , 0 ) } , / / TIM3_CH1 <nl> + / / { PA_6 , TIM13 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF9_TIM13 , 1 , 0 ) } , / / TIM13_CH1 <nl> + / / { PA_7 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 1 , 1 ) } , / / TIM1_CH1N <nl> + { PA_7 , TIM3 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM3 , 2 , 0 ) } , / / TIM3_CH2 <nl> + / / { PA_7 , TIM8 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM8 , 1 , 1 ) } , / / TIM8_CH1N <nl> + / / { PA_7 , TIM14 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF9_TIM14 , 1 , 0 ) } , / / TIM14_CH1 <nl> + { PA_8 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 1 , 0 ) } , / / TIM1_CH1 <nl> + { PA_9 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 2 , 0 ) } , / / TIM1_CH2 <nl> + { PA_10 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 3 , 0 ) } , / / TIM1_CH3 <nl> + { PA_11 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 4 , 0 ) } , / / TIM1_CH4 <nl> + / / { PA_15 , TIM2 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM2 , 1 , 0 ) } , / / TIM2_CH1 <nl> + / / { PB_0 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 2 , 1 ) } , / / TIM1_CH2N <nl> + { PB_0 , TIM3 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM3 , 3 , 0 ) } , / / TIM3_CH3 <nl> + / / { PB_0 , TIM8 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM8 , 2 , 1 ) } , / / TIM8_CH2N <nl> + / / { PB_1 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 3 , 1 ) } , / / TIM1_CH3N <nl> + { PB_1 , TIM3 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM3 , 4 , 0 ) } , / / TIM3_CH4 <nl> + / / { PB_1 , TIM8 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM8 , 3 , 1 ) } , / / TIM8_CH3N <nl> + / / { PB_3 , TIM2 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM2 , 2 , 0 ) } , / / TIM2_CH2 <nl> + { PB_4 , TIM3 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM3 , 1 , 0 ) } , / / TIM3_CH1 <nl> + { PB_5 , TIM3 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM3 , 2 , 0 ) } , / / TIM3_CH2 <nl> + / / { PB_6 , TIM4 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM4 , 1 , 0 ) } , / / TIM4_CH1 <nl> + / / { PB_7 , TIM4 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM4 , 2 , 0 ) } , / / TIM4_CH2 <nl> + / / { PB_8 , TIM4 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM4 , 3 , 0 ) } , / / TIM4_CH3 <nl> + { PB_8 , TIM10 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM10 , 1 , 0 ) } , / / TIM10_CH1 <nl> + / / { PB_9 , TIM4 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM4 , 4 , 0 ) } , / / TIM4_CH4 <nl> + { PB_9 , TIM11 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM11 , 1 , 0 ) } , / / TIM11_CH1 <nl> + { PB_10 , TIM2 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM2 , 3 , 0 ) } , / / TIM2_CH3 <nl> + { PB_11 , TIM2 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM2 , 4 , 0 ) } , / / TIM2_CH4 <nl> + { PB_13 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 1 , 1 ) } , / / TIM1_CH1N <nl> + { PB_14 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 2 , 1 ) } , / / TIM1_CH2N <nl> + { PB_14 , TIM8 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM8 , 2 , 1 ) } , / / TIM8_CH2N <nl> + { PB_14 , TIM12 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF9_TIM12 , 1 , 0 ) } , / / TIM12_CH1 <nl> + { PB_15 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 3 , 1 ) } , / / TIM1_CH3N <nl> + { PB_15 , TIM8 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM8 , 3 , 1 ) } , / / TIM8_CH3N <nl> + { PB_15 , TIM12 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF9_TIM12 , 2 , 0 ) } , / / TIM12_CH2 <nl> + { PC_6 , TIM3 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM3 , 1 , 0 ) } , / / TIM3_CH1 <nl> + { PC_6 , TIM8 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM8 , 1 , 0 ) } , / / TIM8_CH1 <nl> + { PC_7 , TIM3 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM3 , 2 , 0 ) } , / / TIM3_CH2 <nl> + { PC_7 , TIM8 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM8 , 2 , 0 ) } , / / TIM8_CH2 <nl> + { PC_8 , TIM3 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM3 , 3 , 0 ) } , / / TIM3_CH3 <nl> + { PC_8 , TIM8 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM8 , 3 , 0 ) } , / / TIM8_CH3 <nl> + / / { PC_9 , TIM3 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM3 , 4 , 0 ) } , / / TIM3_CH4 <nl> + / / { PC_9 , TIM8 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM8 , 4 , 0 ) } , / / TIM8_CH4 <nl> + { PD_12 , TIM4 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM4 , 1 , 0 ) } , / / TIM4_CH1 <nl> + { PD_13 , TIM4 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM4 , 2 , 0 ) } , / / TIM4_CH2 <nl> + { PD_14 , TIM4 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM4 , 3 , 0 ) } , / / TIM4_CH3 <nl> + { PD_15 , TIM4 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF2_TIM4 , 4 , 0 ) } , / / TIM4_CH4 <nl> + { PE_5 , TIM9 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM9 , 1 , 0 ) } , / / TIM9_CH1 <nl> + { PE_6 , TIM9 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM9 , 2 , 0 ) } , / / TIM9_CH2 <nl> + { PE_8 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 1 , 1 ) } , / / TIM1_CH1N <nl> + { PE_9 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 1 , 0 ) } , / / TIM1_CH1 <nl> + { PE_10 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 2 , 1 ) } , / / TIM1_CH2N <nl> + { PE_11 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 2 , 0 ) } , / / TIM1_CH2 <nl> + { PE_12 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 3 , 1 ) } , / / TIM1_CH3N <nl> + { PE_13 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 3 , 0 ) } , / / TIM1_CH3 <nl> + { PE_14 , TIM1 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF1_TIM1 , 4 , 0 ) } , / / TIM1_CH4 <nl> + # if STM32F4X_PIN_NUM > = 144 / / 144 pins mcu , 114 gpio <nl> + { PF_6 , TIM10 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM10 , 1 , 0 ) } , / / TIM10_CH1 <nl> + { PF_7 , TIM11 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF3_TIM11 , 1 , 0 ) } , / / TIM11_CH1 <nl> + { PF_8 , TIM13 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF9_TIM13 , 1 , 0 ) } , / / TIM13_CH1 <nl> + { PF_9 , TIM14 , STM_PIN_DATA_EXT ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF9_TIM14 , 1 , 0 ) } , / / TIM14_CH1 <nl> + # endif <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + / / * * * SERIAL * * * <nl> + <nl> + # ifdef HAL_UART_MODULE_ENABLED <nl> + const PinMap PinMap_UART_TX [ ] = { <nl> + { PA_0 , UART4 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_UART4 ) } , <nl> + { PA_2 , USART2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART2 ) } , <nl> + { PA_9 , USART1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART1 ) } , <nl> + { PB_6 , USART1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART1 ) } , <nl> + { PB_10 , USART3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART3 ) } , <nl> + { PC_6 , USART6 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_USART6 ) } , <nl> + { PC_10 , UART4 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_UART4 ) } , <nl> + { PC_10 , USART3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART3 ) } , <nl> + { PC_12 , UART5 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_UART5 ) } , <nl> + { PD_5 , USART2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART2 ) } , <nl> + { PD_8 , USART3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART3 ) } , <nl> + # if STM32F4X_PIN_NUM > = 144 / / 144 pins mcu , 114 gpio <nl> + { PG_14 , USART6 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_USART6 ) } , <nl> + # endif <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + # ifdef HAL_UART_MODULE_ENABLED <nl> + const PinMap PinMap_UART_RX [ ] = { <nl> + { PA_1 , UART4 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_UART4 ) } , <nl> + { PA_3 , USART2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART2 ) } , <nl> + { PA_10 , USART1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART1 ) } , <nl> + { PB_7 , USART1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART1 ) } , <nl> + { PB_11 , USART3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART3 ) } , <nl> + { PC_7 , USART6 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_USART6 ) } , <nl> + { PC_11 , UART4 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_UART4 ) } , <nl> + { PC_11 , USART3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART3 ) } , <nl> + { PD_2 , UART5 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_UART5 ) } , <nl> + { PD_6 , USART2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART2 ) } , <nl> + { PD_9 , USART3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART3 ) } , <nl> + # if STM32F4X_PIN_NUM > = 144 / / 144 pins mcu , 114 gpio <nl> + { PG_9 , USART6 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_USART6 ) } , <nl> + # endif <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + # ifdef HAL_UART_MODULE_ENABLED <nl> + const PinMap PinMap_UART_RTS [ ] = { <nl> + { PA_1 , USART2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART2 ) } , <nl> + { PA_12 , USART1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART1 ) } , <nl> + { PB_14 , USART3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART3 ) } , <nl> + { PD_4 , USART2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART2 ) } , <nl> + { PD_12 , USART3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART3 ) } , <nl> + # if STM32F4X_PIN_NUM > = 144 / / 144 pins mcu , 114 gpio <nl> + { PG_8 , USART6 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_USART6 ) } , <nl> + { PG_12 , USART6 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_USART6 ) } , <nl> + # endif <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + # ifdef HAL_UART_MODULE_ENABLED <nl> + const PinMap PinMap_UART_CTS [ ] = { <nl> + { PA_0 , USART2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART2 ) } , <nl> + { PA_11 , USART1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART1 ) } , <nl> + { PB_13 , USART3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART3 ) } , <nl> + { PD_3 , USART2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART2 ) } , <nl> + { PD_11 , USART3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF7_USART3 ) } , <nl> + # if STM32F4X_PIN_NUM > = 144 / / 144 pins mcu , 114 gpio <nl> + { PG_13 , USART6 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_USART6 ) } , <nl> + { PG_15 , USART6 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF8_USART6 ) } , <nl> + # endif <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + / / * * * SPI * * * <nl> + <nl> + # ifdef HAL_SPI_MODULE_ENABLED <nl> + const PinMap PinMap_SPI_MOSI [ ] = { <nl> + { PA_7 , SPI1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI1 ) } , <nl> + { PB_5 , SPI1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI1 ) } , <nl> + { PB_5 , SPI3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF6_SPI3 ) } , <nl> + { PB_15 , SPI2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI2 ) } , <nl> + { PC_3 , SPI2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI2 ) } , <nl> + { PC_12 , SPI3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF6_SPI3 ) } , <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + # ifdef HAL_SPI_MODULE_ENABLED <nl> + const PinMap PinMap_SPI_MISO [ ] = { <nl> + { PA_6 , SPI1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI1 ) } , <nl> + { PB_4 , SPI1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI1 ) } , <nl> + { PB_4 , SPI3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF6_SPI3 ) } , <nl> + { PB_14 , SPI2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI2 ) } , <nl> + { PC_2 , SPI2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI2 ) } , <nl> + { PC_11 , SPI3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF6_SPI3 ) } , <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + # ifdef HAL_SPI_MODULE_ENABLED <nl> + const PinMap PinMap_SPI_SCLK [ ] = { <nl> + { PA_5 , SPI1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI1 ) } , <nl> + { PB_3 , SPI1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI1 ) } , <nl> + { PB_3 , SPI3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF6_SPI3 ) } , <nl> + { PB_10 , SPI2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI2 ) } , <nl> + { PB_13 , SPI2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI2 ) } , <nl> + { PC_10 , SPI3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF6_SPI3 ) } , <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + # ifdef HAL_SPI_MODULE_ENABLED <nl> + const PinMap PinMap_SPI_SSEL [ ] = { <nl> + { PA_4 , SPI1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI1 ) } , <nl> + { PA_4 , SPI3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF6_SPI3 ) } , <nl> + { PA_15 , SPI1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI1 ) } , <nl> + { PA_15 , SPI3 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF6_SPI3 ) } , <nl> + { PB_9 , SPI2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI2 ) } , <nl> + { PB_12 , SPI2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF5_SPI2 ) } , <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + / / * * * CAN * * * <nl> + <nl> + # ifdef HAL_CAN_MODULE_ENABLED <nl> + const PinMap PinMap_CAN_RD [ ] = { <nl> + { PA_11 , CAN1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_NOPULL , GPIO_AF9_CAN1 ) } , <nl> + { PB_5 , CAN2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_NOPULL , GPIO_AF9_CAN2 ) } , <nl> + { PB_8 , CAN1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_NOPULL , GPIO_AF9_CAN1 ) } , <nl> + { PB_12 , CAN2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_NOPULL , GPIO_AF9_CAN2 ) } , <nl> + { PD_0 , CAN1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_NOPULL , GPIO_AF9_CAN1 ) } , <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + # ifdef HAL_CAN_MODULE_ENABLED <nl> + const PinMap PinMap_CAN_TD [ ] = { <nl> + { PA_12 , CAN1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_NOPULL , GPIO_AF9_CAN1 ) } , <nl> + { PB_6 , CAN2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_NOPULL , GPIO_AF9_CAN2 ) } , <nl> + { PB_9 , CAN1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_NOPULL , GPIO_AF9_CAN1 ) } , <nl> + { PB_13 , CAN2 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_NOPULL , GPIO_AF9_CAN2 ) } , <nl> + { PD_1 , CAN1 , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_NOPULL , GPIO_AF9_CAN1 ) } , <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + / / * * * ETHERNET * * * <nl> + <nl> + # ifdef HAL_ETH_MODULE_ENABLED <nl> + const PinMap PinMap_Ethernet [ ] = { <nl> + { PA_0 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_CRS <nl> + { PA_1 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_REF_CLK | ETH_RX_CLK <nl> + { PA_2 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_MDIO <nl> + { PA_3 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_COL <nl> + { PA_7 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_CRS_DV | ETH_RX_DV <nl> + { PB_0 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_RXD2 <nl> + { PB_1 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_RXD3 <nl> + { PB_5 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_PPS_OUT <nl> + { PB_8 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_TXD3 <nl> + { PB_10 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_RX_ER <nl> + { PB_11 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_TX_EN <nl> + { PB_12 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_TXD0 <nl> + { PB_13 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_TXD1 <nl> + { PC_1 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_MDC <nl> + { PC_2 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_TXD2 <nl> + { PC_3 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_TX_CLK <nl> + { PC_4 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_RXD0 <nl> + { PC_5 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_RXD1 <nl> + { PE_2 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_TXD3 <nl> + # if STM32F4X_PIN_NUM > = 144 / / 144 pins mcu , 114 gpio <nl> + { PG_8 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_PPS_OUT <nl> + { PG_11 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_TX_EN <nl> + { PG_13 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_TXD0 <nl> + { PG_14 , ETH , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF11_ETH ) } , / / ETH_TXD1 <nl> + # endif <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + / / * * * No QUADSPI * * * <nl> + <nl> + / / * * * USB * * * <nl> + <nl> + # ifdef HAL_PCD_MODULE_ENABLED <nl> + const PinMap PinMap_USB_OTG_FS [ ] = { <nl> + / / { PA_8 , USB_OTG_FS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_FS ) } , / / USB_OTG_FS_SOF <nl> + / / { PA_9 , USB_OTG_FS , STM_PIN_DATA ( STM_MODE_INPUT , GPIO_NOPULL , GPIO_AF_NONE ) } , / / USB_OTG_FS_VBUS <nl> + / / { PA_10 , USB_OTG_FS , STM_PIN_DATA ( STM_MODE_AF_OD , GPIO_PULLUP , GPIO_AF10_OTG_FS ) } , / / USB_OTG_FS_ID <nl> + { PA_11 , USB_OTG_FS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_FS ) } , / / USB_OTG_FS_DM <nl> + { PA_12 , USB_OTG_FS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_FS ) } , / / USB_OTG_FS_DP <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> + <nl> + # ifdef HAL_PCD_MODULE_ENABLED <nl> + const PinMap PinMap_USB_OTG_HS [ ] = { <nl> + # ifdef USE_USB_HS_IN_FS <nl> + { PA_4 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF12_OTG_HS_FS ) } , / / USB_OTG_HS_SOF <nl> + { PB_12 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_OD , GPIO_PULLUP , GPIO_AF12_OTG_HS_FS ) } , / / USB_OTG_HS_ID <nl> + { PB_13 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_INPUT , GPIO_NOPULL , GPIO_AF_NONE ) } , / / USB_OTG_HS_VBUS <nl> + { PB_14 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF12_OTG_HS_FS ) } , / / USB_OTG_HS_DM <nl> + { PB_15 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF12_OTG_HS_FS ) } , / / USB_OTG_HS_DP <nl> + # else <nl> + { PA_3 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_HS ) } , / / USB_OTG_HS_ULPI_D0 <nl> + { PA_5 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_HS ) } , / / USB_OTG_HS_ULPI_CK <nl> + { PB_0 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_HS ) } , / / USB_OTG_HS_ULPI_D1 <nl> + { PB_1 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_HS ) } , / / USB_OTG_HS_ULPI_D2 <nl> + { PB_5 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_HS ) } , / / USB_OTG_HS_ULPI_D7 <nl> + { PB_10 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_HS ) } , / / USB_OTG_HS_ULPI_D3 <nl> + { PB_11 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_HS ) } , / / USB_OTG_HS_ULPI_D4 <nl> + { PB_12 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_HS ) } , / / USB_OTG_HS_ULPI_D5 <nl> + { PB_13 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_HS ) } , / / USB_OTG_HS_ULPI_D6 <nl> + { PC_0 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_HS ) } , / / USB_OTG_HS_ULPI_STP <nl> + { PC_2 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_HS ) } , / / USB_OTG_HS_ULPI_DIR <nl> + { PC_3 , USB_OTG_HS , STM_PIN_DATA ( STM_MODE_AF_PP , GPIO_PULLUP , GPIO_AF10_OTG_HS ) } , / / USB_OTG_HS_ULPI_NXT <nl> + # endif / * USE_USB_HS_IN_FS * / <nl> + { NC , NP , 0 } <nl> + } ; <nl> + # endif <nl> new file mode 100644 <nl> index 00000000000 . . 24248859373 <nl> mmm / dev / null <nl> ppp b / buildroot / share / PlatformIO / variants / BIGTREE_GENERIC_STM32F407_5X / PinNamesVar . h <nl> <nl> + / * SYS_WKUP * / <nl> + # ifdef PWR_WAKEUP_PIN1 <nl> + SYS_WKUP1 = PA_0 , <nl> + # endif <nl> + # ifdef PWR_WAKEUP_PIN2 <nl> + SYS_WKUP2 = NC , <nl> + # endif <nl> + # ifdef PWR_WAKEUP_PIN3 <nl> + SYS_WKUP3 = NC , <nl> + # endif <nl> + # ifdef PWR_WAKEUP_PIN4 <nl> + SYS_WKUP4 = NC , <nl> + # endif <nl> + # ifdef PWR_WAKEUP_PIN5 <nl> + SYS_WKUP5 = NC , <nl> + # endif <nl> + # ifdef PWR_WAKEUP_PIN6 <nl> + SYS_WKUP6 = NC , <nl> + # endif <nl> + # ifdef PWR_WAKEUP_PIN7 <nl> + SYS_WKUP7 = NC , <nl> + # endif <nl> + # ifdef PWR_WAKEUP_PIN8 <nl> + SYS_WKUP8 = NC , <nl> + # endif <nl> + / * USB * / <nl> + # ifdef USBCON <nl> + USB_OTG_FS_SOF = PA_8 , <nl> + USB_OTG_FS_VBUS = PA_9 , <nl> + USB_OTG_FS_ID = PA_10 , <nl> + USB_OTG_FS_DM = PA_11 , <nl> + USB_OTG_FS_DP = PA_12 , <nl> + USB_OTG_HS_ULPI_D0 = PA_3 , <nl> + USB_OTG_HS_SOF = PA_4 , <nl> + USB_OTG_HS_ULPI_CK = PA_5 , <nl> + USB_OTG_HS_ULPI_D1 = PB_0 , <nl> + USB_OTG_HS_ULPI_D2 = PB_1 , <nl> + USB_OTG_HS_ULPI_D7 = PB_5 , <nl> + USB_OTG_HS_ULPI_D3 = PB_10 , <nl> + USB_OTG_HS_ULPI_D4 = PB_11 , <nl> + USB_OTG_HS_ID = PB_12 , <nl> + USB_OTG_HS_ULPI_D5 = PB_12 , <nl> + USB_OTG_HS_ULPI_D6 = PB_13 , <nl> + USB_OTG_HS_VBUS = PB_13 , <nl> + USB_OTG_HS_DM = PB_14 , <nl> + USB_OTG_HS_DP = PB_15 , <nl> + USB_OTG_HS_ULPI_STP = PC_0 , <nl> + USB_OTG_HS_ULPI_DIR = PC_2 , <nl> + USB_OTG_HS_ULPI_NXT = PC_3 , <nl> + # endif <nl> new file mode 100644 <nl> index 00000000000 . . a6052deb864 <nl> mmm / dev / null <nl> ppp b / buildroot / share / PlatformIO / variants / BIGTREE_GENERIC_STM32F407_5X / ldscript . ld <nl> <nl> + / * <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * * <nl> + <nl> + * * File : LinkerScript . ld <nl> + * * <nl> + * * Abstract : Linker script for STM32F407ZGTx Device with <nl> + * * 1024KByte FLASH , 128KByte RAM <nl> + * * <nl> + * * Set heap size , stack size and stack location according <nl> + * * to application requirements . <nl> + * * <nl> + * * Set memory bank area and size if external memory is used . <nl> + * * <nl> + * * Target : STMicroelectronics STM32 <nl> + * * <nl> + * * <nl> + * * Distribution : The file is distributed as is , without any warranty <nl> + * * of any kind . <nl> + * * <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * * @ attention <nl> + * * <nl> + * * < h2 > < center > & copy ; COPYRIGHT ( c ) 2014 Ac6 < / center > < / h2 > <nl> + * * <nl> + * * Redistribution and use in source and binary forms , with or without modification , <nl> + * * are permitted provided that the following conditions are met : <nl> + * * 1 . Redistributions of source code must retain the above copyright notice , <nl> + * * this list of conditions and the following disclaimer . <nl> + * * 2 . Redistributions in binary form must reproduce the above copyright notice , <nl> + * * this list of conditions and the following disclaimer in the documentation <nl> + * * and / or other materials provided with the distribution . <nl> + * * 3 . Neither the name of Ac6 nor the names of its contributors <nl> + * * may be used to endorse or promote products derived from this software <nl> + * * without specific prior written permission . <nl> + * * <nl> + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS " <nl> + * * AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE <nl> + * * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE <nl> + * * DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE <nl> + * * FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL <nl> + * * DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR <nl> + * * SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER <nl> + * * CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , <nl> + * * OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + * * OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + * * <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * / <nl> + <nl> + / * Entry Point * / <nl> + ENTRY ( Reset_Handler ) <nl> + <nl> + / * Highest address of the user mode stack * / <nl> + _estack = 0x20020000 ; / * end of RAM * / <nl> + / * Generate a link error if heap and stack don ' t fit into RAM * / <nl> + _Min_Heap_Size = 0x200 ; ; / * required amount of heap * / <nl> + _Min_Stack_Size = 0x400 ; ; / * required amount of stack * / <nl> + <nl> + / * Specify the memory areas * / <nl> + MEMORY <nl> + { <nl> + FLASH ( rx ) : ORIGIN = 0x8008000 , LENGTH = 1024K <nl> + RAM ( xrw ) : ORIGIN = 0x20000000 , LENGTH = 128K <nl> + CCMRAM ( rw ) : ORIGIN = 0x10000000 , LENGTH = 64K <nl> + } <nl> + <nl> + / * Define output sections * / <nl> + SECTIONS <nl> + { <nl> + / * The startup code goes first into FLASH * / <nl> + . isr_vector : <nl> + { <nl> + . = ALIGN ( 4 ) ; <nl> + KEEP ( * ( . isr_vector ) ) / * Startup code * / <nl> + . = ALIGN ( 4 ) ; <nl> + } > FLASH <nl> + <nl> + / * The program code and other data goes into FLASH * / <nl> + . text ALIGN ( 4 ) : <nl> + { <nl> + . = ALIGN ( 4 ) ; <nl> + * ( . text ) / * . text sections ( code ) * / <nl> + * ( . text * ) / * . text * sections ( code ) * / <nl> + * ( . glue_7 ) / * glue arm to thumb code * / <nl> + * ( . glue_7t ) / * glue thumb to arm code * / <nl> + * ( . eh_frame ) <nl> + <nl> + KEEP ( * ( . init ) ) <nl> + KEEP ( * ( . fini ) ) <nl> + <nl> + . = ALIGN ( 4 ) ; <nl> + _etext = . ; / * define a global symbols at end of code * / <nl> + } > FLASH <nl> + <nl> + / * Constant data goes into FLASH * / <nl> + . rodata ALIGN ( 4 ) : <nl> + { <nl> + . = ALIGN ( 4 ) ; <nl> + * ( . rodata ) / * . rodata sections ( constants , strings , etc . ) * / <nl> + * ( . rodata * ) / * . rodata * sections ( constants , strings , etc . ) * / <nl> + . = ALIGN ( 4 ) ; <nl> + } > FLASH <nl> + <nl> + . ARM . extab : { * ( . ARM . extab * . gnu . linkonce . armextab . * ) } > FLASH <nl> + . ARM : { <nl> + __exidx_start = . ; <nl> + * ( . ARM . exidx * ) <nl> + __exidx_end = . ; <nl> + } > FLASH <nl> + <nl> + . preinit_array : <nl> + { <nl> + PROVIDE_HIDDEN ( __preinit_array_start = . ) ; <nl> + KEEP ( * ( . preinit_array * ) ) <nl> + PROVIDE_HIDDEN ( __preinit_array_end = . ) ; <nl> + } > FLASH <nl> + . init_array : <nl> + { <nl> + PROVIDE_HIDDEN ( __init_array_start = . ) ; <nl> + KEEP ( * ( SORT ( . init_array . * ) ) ) <nl> + KEEP ( * ( . init_array * ) ) <nl> + PROVIDE_HIDDEN ( __init_array_end = . ) ; <nl> + } > FLASH <nl> + . fini_array : <nl> + { <nl> + PROVIDE_HIDDEN ( __fini_array_start = . ) ; <nl> + KEEP ( * ( SORT ( . fini_array . * ) ) ) <nl> + KEEP ( * ( . fini_array * ) ) <nl> + PROVIDE_HIDDEN ( __fini_array_end = . ) ; <nl> + } > FLASH <nl> + <nl> + / * used by the startup to initialize data * / <nl> + _sidata = LOADADDR ( . data ) ; <nl> + <nl> + / * Initialized data sections goes into RAM , load LMA copy after code * / <nl> + . data : <nl> + { <nl> + . = ALIGN ( 4 ) ; <nl> + _sdata = . ; / * create a global symbol at data start * / <nl> + * ( . data ) / * . data sections * / <nl> + * ( . data * ) / * . data * sections * / <nl> + <nl> + . = ALIGN ( 4 ) ; <nl> + _edata = . ; / * define a global symbol at data end * / <nl> + } > RAM AT > FLASH <nl> + <nl> + _siccmram = LOADADDR ( . ccmram ) ; <nl> + <nl> + / * CCM - RAM section <nl> + * <nl> + * IMPORTANT NOTE ! <nl> + * If initialized variables will be placed in this section , <nl> + * the startup code needs to be modified to copy the init - values . <nl> + * / <nl> + . ccmram : <nl> + { <nl> + . = ALIGN ( 4 ) ; <nl> + _sccmram = . ; / * create a global symbol at ccmram start * / <nl> + * ( . ccmram ) <nl> + * ( . ccmram * ) <nl> + <nl> + . = ALIGN ( 4 ) ; <nl> + _eccmram = . ; / * create a global symbol at ccmram end * / <nl> + } > CCMRAM AT > FLASH <nl> + <nl> + <nl> + / * Uninitialized data section * / <nl> + . = ALIGN ( 4 ) ; <nl> + . bss : <nl> + { <nl> + / * This is used by the startup in order to initialize the . bss secion * / <nl> + _sbss = . ; / * define a global symbol at bss start * / <nl> + __bss_start__ = _sbss ; <nl> + * ( . bss ) <nl> + * ( . bss * ) <nl> + * ( COMMON ) <nl> + <nl> + . = ALIGN ( 4 ) ; <nl> + _ebss = . ; / * define a global symbol at bss end * / <nl> + __bss_end__ = _ebss ; <nl> + } > RAM <nl> + <nl> + / * User_heap_stack section , used to check that there is enough RAM left * / <nl> + . _user_heap_stack : <nl> + { <nl> + . = ALIGN ( 4 ) ; <nl> + PROVIDE ( end = . ) ; <nl> + PROVIDE ( _end = . ) ; <nl> + . = . + _Min_Heap_Size ; <nl> + . = . + _Min_Stack_Size ; <nl> + . = ALIGN ( 4 ) ; <nl> + } > RAM <nl> + <nl> + <nl> + <nl> + / * Remove information from the standard libraries * / <nl> + / DISCARD / : <nl> + { <nl> + libc . a ( * ) <nl> + libm . a ( * ) <nl> + libgcc . a ( * ) <nl> + } <nl> + <nl> + . ARM . attributes 0 : { * ( . ARM . attributes ) } <nl> + } <nl> + <nl> + <nl> new file mode 100644 <nl> index 00000000000 . . 2e111dff89d <nl> mmm / dev / null <nl> ppp b / buildroot / share / PlatformIO / variants / BIGTREE_GENERIC_STM32F407_5X / stm32f4xx_hal_conf . h <nl> <nl> + / * * <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * @ file stm32f4xx_hal_conf . h <nl> + * @ brief HAL configuration file . <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * @ attention <nl> + * <nl> + * < h2 > < center > & copy ; Copyright ( c ) 2017 STMicroelectronics . <nl> + * All rights reserved . < / center > < / h2 > <nl> + * <nl> + * This software component is licensed by ST under BSD 3 - Clause license , <nl> + * the " License " ; You may not use this file except in compliance with the <nl> + * License . You may obtain a copy of the License at : <nl> + * opensource . org / licenses / BSD - 3 - Clause <nl> + * <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * / <nl> + <nl> + / * Define to prevent recursive inclusion mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - * / <nl> + # ifndef __STM32F4xx_HAL_CONF_H <nl> + # define __STM32F4xx_HAL_CONF_H <nl> + <nl> + # ifdef __cplusplus <nl> + extern " C " { <nl> + # endif <nl> + <nl> + / * Exported types mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm * / <nl> + / * Exported constants mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - * / <nl> + <nl> + / * # # # # # # # # # # # # # # # # # # # # # # # # # # Module Selection # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # * / <nl> + / * * <nl> + * @ brief This is the list of modules to be used in the HAL driver <nl> + * / <nl> + # define HAL_MODULE_ENABLED <nl> + # define HAL_ADC_MODULE_ENABLED <nl> + / * # define HAL_CAN_MODULE_ENABLED * / <nl> + / * # define HAL_CAN_LEGACY_MODULE_ENABLED * / <nl> + # define HAL_CRC_MODULE_ENABLED <nl> + / * # define HAL_CEC_MODULE_ENABLED * / <nl> + / * # define HAL_CRYP_MODULE_ENABLED * / <nl> + # define HAL_DAC_MODULE_ENABLED <nl> + / * # define HAL_DCMI_MODULE_ENABLED * / <nl> + # define HAL_DMA_MODULE_ENABLED <nl> + / * # define HAL_DMA2D_MODULE_ENABLED * / <nl> + / * # define HAL_ETH_MODULE_ENABLED * / <nl> + # define HAL_FLASH_MODULE_ENABLED <nl> + / * # define HAL_NAND_MODULE_ENABLED * / <nl> + / * # define HAL_NOR_MODULE_ENABLED * / <nl> + / * # define HAL_PCCARD_MODULE_ENABLED * / <nl> + / * # define HAL_SRAM_MODULE_ENABLED * / <nl> + / * # define HAL_SDRAM_MODULE_ENABLED * / <nl> + / * # define HAL_HASH_MODULE_ENABLED * / <nl> + # define HAL_GPIO_MODULE_ENABLED <nl> + / * # define HAL_EXTI_MODULE_ENABLED * / <nl> + # define HAL_I2C_MODULE_ENABLED <nl> + / * # define HAL_SMBUS_MODULE_ENABLED * / <nl> + / * # define HAL_I2S_MODULE_ENABLED * / <nl> + / * # define HAL_IWDG_MODULE_ENABLED * / <nl> + / * # define HAL_LTDC_MODULE_ENABLED * / <nl> + / * # define HAL_DSI_MODULE_ENABLED * / <nl> + # define HAL_PWR_MODULE_ENABLED <nl> + / * # define HAL_QSPI_MODULE_ENABLED * / <nl> + # define HAL_RCC_MODULE_ENABLED <nl> + / * # define HAL_RNG_MODULE_ENABLED * / <nl> + # define HAL_RTC_MODULE_ENABLED <nl> + / * # define HAL_SAI_MODULE_ENABLED * / <nl> + # define HAL_SD_MODULE_ENABLED <nl> + # define HAL_SPI_MODULE_ENABLED <nl> + # define HAL_TIM_MODULE_ENABLED <nl> + / * # define HAL_UART_MODULE_ENABLED * / <nl> + / * # define HAL_USART_MODULE_ENABLED * / <nl> + / * # define HAL_IRDA_MODULE_ENABLED * / <nl> + / * # define HAL_SMARTCARD_MODULE_ENABLED * / <nl> + / * # define HAL_WWDG_MODULE_ENABLED * / <nl> + # define HAL_CORTEX_MODULE_ENABLED <nl> + # define HAL_PCD_MODULE_ENABLED <nl> + / * # define HAL_HCD_MODULE_ENABLED * / <nl> + / * # define HAL_FMPI2C_MODULE_ENABLED * / <nl> + / * # define HAL_SPDIFRX_MODULE_ENABLED * / <nl> + / * # define HAL_DFSDM_MODULE_ENABLED * / <nl> + / * # define HAL_LPTIM_MODULE_ENABLED * / <nl> + / * # define HAL_MMC_MODULE_ENABLED * / <nl> + <nl> + / * # # # # # # # # # # # # # # # # # # # # # # # # # # HSE / HSI Values adaptation # # # # # # # # # # # # # # # # # # # # # * / <nl> + / * * <nl> + * @ brief Adjust the value of External High Speed oscillator ( HSE ) used in your application . <nl> + * This value is used by the RCC HAL module to compute the system frequency <nl> + * ( when HSE is used as system clock source , directly or through the PLL ) . <nl> + * / <nl> + # if ! defined ( HSE_VALUE ) <nl> + # define HSE_VALUE ( ( uint32_t ) 8000000U ) / * ! < Value of the External oscillator in Hz * / <nl> + # endif / * HSE_VALUE * / <nl> + <nl> + # if ! defined ( HSE_STARTUP_TIMEOUT ) <nl> + # define HSE_STARTUP_TIMEOUT ( ( uint32_t ) 100U ) / * ! < Time out for HSE start up , in ms * / <nl> + # endif / * HSE_STARTUP_TIMEOUT * / <nl> + <nl> + / * * <nl> + * @ brief Internal High Speed oscillator ( HSI ) value . <nl> + * This value is used by the RCC HAL module to compute the system frequency <nl> + * ( when HSI is used as system clock source , directly or through the PLL ) . <nl> + * / <nl> + # if ! defined ( HSI_VALUE ) <nl> + # define HSI_VALUE ( ( uint32_t ) 16000000U ) / * ! < Value of the Internal oscillator in Hz * / <nl> + # endif / * HSI_VALUE * / <nl> + <nl> + / * * <nl> + * @ brief Internal Low Speed oscillator ( LSI ) value . <nl> + * / <nl> + # if ! defined ( LSI_VALUE ) <nl> + # define LSI_VALUE ( ( uint32_t ) 32000U ) / * ! < LSI Typical Value in Hz * / <nl> + # endif / * LSI_VALUE * / / * ! < Value of the Internal Low Speed oscillator in Hz <nl> + The real value may vary depending on the variations <nl> + in voltage and temperature . * / <nl> + / * * <nl> + * @ brief External Low Speed oscillator ( LSE ) value . <nl> + * / <nl> + # if ! defined ( LSE_VALUE ) <nl> + # define LSE_VALUE ( ( uint32_t ) 32768U ) / * ! < Value of the External Low Speed oscillator in Hz * / <nl> + # endif / * LSE_VALUE * / <nl> + <nl> + # if ! defined ( LSE_STARTUP_TIMEOUT ) <nl> + # define LSE_STARTUP_TIMEOUT ( ( uint32_t ) 5000U ) / * ! < Time out for LSE start up , in ms * / <nl> + # endif / * LSE_STARTUP_TIMEOUT * / <nl> + <nl> + / * * <nl> + * @ brief External clock source for I2S peripheral <nl> + * This value is used by the I2S HAL module to compute the I2S clock source <nl> + * frequency , this source is inserted directly through I2S_CKIN pad . <nl> + * / <nl> + # if ! defined ( EXTERNAL_CLOCK_VALUE ) <nl> + # define EXTERNAL_CLOCK_VALUE ( ( uint32_t ) 12288000U ) / * ! < Value of the External audio frequency in Hz * / <nl> + # endif / * EXTERNAL_CLOCK_VALUE * / <nl> + <nl> + / * Tip : To avoid modifying this file each time you need to use different HSE , <nl> + = = = you can define the HSE value in your toolchain compiler preprocessor . * / <nl> + <nl> + / * # # # # # # # # # # # # # # # # # # # # # # # # # # # System Configuration # # # # # # # # # # # # # # # # # # # # # # # # # * / <nl> + / * * <nl> + * @ brief This is the HAL system configuration section <nl> + * / <nl> + # define VDD_VALUE ( ( uint32_t ) 3300U ) / * ! < Value of VDD in mv * / <nl> + # define TICK_INT_PRIORITY ( ( uint32_t ) 0U ) / * ! < tick interrupt priority * / <nl> + # define USE_RTOS 0U <nl> + # define PREFETCH_ENABLE 1U <nl> + # define INSTRUCTION_CACHE_ENABLE 1U <nl> + # define DATA_CACHE_ENABLE 1U <nl> + <nl> + # define USE_HAL_ADC_REGISTER_CALLBACKS 0U / * ADC register callback disabled * / <nl> + # define USE_HAL_CAN_REGISTER_CALLBACKS 0U / * CAN register callback disabled * / <nl> + # define USE_HAL_CEC_REGISTER_CALLBACKS 0U / * CEC register callback disabled * / <nl> + # define USE_HAL_CRYP_REGISTER_CALLBACKS 0U / * CRYP register callback disabled * / <nl> + # define USE_HAL_DAC_REGISTER_CALLBACKS 0U / * DAC register callback disabled * / <nl> + # define USE_HAL_DCMI_REGISTER_CALLBACKS 0U / * DCMI register callback disabled * / <nl> + # define USE_HAL_DFSDM_REGISTER_CALLBACKS 0U / * DFSDM register callback disabled * / <nl> + # define USE_HAL_DMA2D_REGISTER_CALLBACKS 0U / * DMA2D register callback disabled * / <nl> + # define USE_HAL_DSI_REGISTER_CALLBACKS 0U / * DSI register callback disabled * / <nl> + # define USE_HAL_ETH_REGISTER_CALLBACKS 0U / * ETH register callback disabled * / <nl> + # define USE_HAL_HASH_REGISTER_CALLBACKS 0U / * HASH register callback disabled * / <nl> + # define USE_HAL_HCD_REGISTER_CALLBACKS 0U / * HCD register callback disabled * / <nl> + # define USE_HAL_I2C_REGISTER_CALLBACKS 0U / * I2C register callback disabled * / <nl> + # define USE_HAL_FMPI2C_REGISTER_CALLBACKS 0U / * FMPI2C register callback disabled * / <nl> + # define USE_HAL_I2S_REGISTER_CALLBACKS 0U / * I2S register callback disabled * / <nl> + # define USE_HAL_IRDA_REGISTER_CALLBACKS 0U / * IRDA register callback disabled * / <nl> + # define USE_HAL_LPTIM_REGISTER_CALLBACKS 0U / * LPTIM register callback disabled * / <nl> + # define USE_HAL_LTDC_REGISTER_CALLBACKS 0U / * LTDC register callback disabled * / <nl> + # define USE_HAL_MMC_REGISTER_CALLBACKS 0U / * MMC register callback disabled * / <nl> + # define USE_HAL_NAND_REGISTER_CALLBACKS 0U / * NAND register callback disabled * / <nl> + # define USE_HAL_NOR_REGISTER_CALLBACKS 0U / * NOR register callback disabled * / <nl> + # define USE_HAL_PCCARD_REGISTER_CALLBACKS 0U / * PCCARD register callback disabled * / <nl> + # define USE_HAL_PCD_REGISTER_CALLBACKS 0U / * PCD register callback disabled * / <nl> + # define USE_HAL_QSPI_REGISTER_CALLBACKS 0U / * QSPI register callback disabled * / <nl> + # define USE_HAL_RNG_REGISTER_CALLBACKS 0U / * RNG register callback disabled * / <nl> + # define USE_HAL_RTC_REGISTER_CALLBACKS 0U / * RTC register callback disabled * / <nl> + # define USE_HAL_SAI_REGISTER_CALLBACKS 0U / * SAI register callback disabled * / <nl> + # define USE_HAL_SD_REGISTER_CALLBACKS 0U / * SD register callback disabled * / <nl> + # define USE_HAL_SMARTCARD_REGISTER_CALLBACKS 0U / * SMARTCARD register callback disabled * / <nl> + # define USE_HAL_SDRAM_REGISTER_CALLBACKS 0U / * SDRAM register callback disabled * / <nl> + # define USE_HAL_SRAM_REGISTER_CALLBACKS 0U / * SRAM register callback disabled * / <nl> + # define USE_HAL_SPDIFRX_REGISTER_CALLBACKS 0U / * SPDIFRX register callback disabled * / <nl> + # define USE_HAL_SMBUS_REGISTER_CALLBACKS 0U / * SMBUS register callback disabled * / <nl> + # define USE_HAL_SPI_REGISTER_CALLBACKS 0U / * SPI register callback disabled * / <nl> + # define USE_HAL_TIM_REGISTER_CALLBACKS 0U / * TIM register callback disabled * / <nl> + # define USE_HAL_UART_REGISTER_CALLBACKS 0U / * UART register callback disabled * / <nl> + # define USE_HAL_USART_REGISTER_CALLBACKS 0U / * USART register callback disabled * / <nl> + # define USE_HAL_WWDG_REGISTER_CALLBACKS 0U / * WWDG register callback disabled * / <nl> + <nl> + / * # # # # # # # # # # # # # # # # # # # # # # # # # # Assert Selection # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # * / <nl> + / * * <nl> + * @ brief Uncomment the line below to expanse the " assert_param " macro in the <nl> + * HAL drivers code <nl> + * / <nl> + / * # define USE_FULL_ASSERT 1U * / <nl> + <nl> + / * # # # # # # # # # # # # # # # # # # Ethernet peripheral configuration # # # # # # # # # # # # # # # # # # # # # * / <nl> + <nl> + / * Section 1 : Ethernet peripheral configuration * / <nl> + <nl> + / * MAC ADDRESS : MAC_ADDR0 : MAC_ADDR1 : MAC_ADDR2 : MAC_ADDR3 : MAC_ADDR4 : MAC_ADDR5 * / <nl> + # define MAC_ADDR0 2U <nl> + # define MAC_ADDR1 0U <nl> + # define MAC_ADDR2 0U <nl> + # define MAC_ADDR3 0U <nl> + # define MAC_ADDR4 0U <nl> + # define MAC_ADDR5 0U <nl> + <nl> + / * Definition of the Ethernet driver buffers size and count * / <nl> + # define ETH_RX_BUF_SIZE ETH_MAX_PACKET_SIZE / * buffer size for receive * / <nl> + # define ETH_TX_BUF_SIZE ETH_MAX_PACKET_SIZE / * buffer size for transmit * / <nl> + # define ETH_RXBUFNB ( ( uint32_t ) 4U ) / * 4 Rx buffers of size ETH_RX_BUF_SIZE * / <nl> + # define ETH_TXBUFNB ( ( uint32_t ) 4U ) / * 4 Tx buffers of size ETH_TX_BUF_SIZE * / <nl> + <nl> + / * Section 2 : PHY configuration section * / <nl> + <nl> + / * DP83848_PHY_ADDRESS Address * / <nl> + # define DP83848_PHY_ADDRESS 0x01U <nl> + / * PHY Reset delay these values are based on a 1 ms Systick interrupt * / <nl> + # define PHY_RESET_DELAY ( ( uint32_t ) 0x000000FFU ) <nl> + / * PHY Configuration delay * / <nl> + # define PHY_CONFIG_DELAY ( ( uint32_t ) 0x00000FFFU ) <nl> + <nl> + # define PHY_READ_TO ( ( uint32_t ) 0x0000FFFFU ) <nl> + # define PHY_WRITE_TO ( ( uint32_t ) 0x0000FFFFU ) <nl> + <nl> + / * Section 3 : Common PHY Registers * / <nl> + <nl> + # define PHY_BCR ( ( uint16_t ) 0x0000U ) / * ! < Transceiver Basic Control Register * / <nl> + # define PHY_BSR ( ( uint16_t ) 0x0001U ) / * ! < Transceiver Basic Status Register * / <nl> + <nl> + # define PHY_RESET ( ( uint16_t ) 0x8000U ) / * ! < PHY Reset * / <nl> + # define PHY_LOOPBACK ( ( uint16_t ) 0x4000U ) / * ! < Select loop - back mode * / <nl> + # define PHY_FULLDUPLEX_100M ( ( uint16_t ) 0x2100U ) / * ! < Set the full - duplex mode at 100 Mb / s * / <nl> + # define PHY_HALFDUPLEX_100M ( ( uint16_t ) 0x2000U ) / * ! < Set the half - duplex mode at 100 Mb / s * / <nl> + # define PHY_FULLDUPLEX_10M ( ( uint16_t ) 0x0100U ) / * ! < Set the full - duplex mode at 10 Mb / s * / <nl> + # define PHY_HALFDUPLEX_10M ( ( uint16_t ) 0x0000U ) / * ! < Set the half - duplex mode at 10 Mb / s * / <nl> + # define PHY_AUTONEGOTIATION ( ( uint16_t ) 0x1000U ) / * ! < Enable auto - negotiation function * / <nl> + # define PHY_RESTART_AUTONEGOTIATION ( ( uint16_t ) 0x0200U ) / * ! < Restart auto - negotiation function * / <nl> + # define PHY_POWERDOWN ( ( uint16_t ) 0x0800U ) / * ! < Select the power down mode * / <nl> + # define PHY_ISOLATE ( ( uint16_t ) 0x0400U ) / * ! < Isolate PHY from MII * / <nl> + <nl> + # define PHY_AUTONEGO_COMPLETE ( ( uint16_t ) 0x0020U ) / * ! < Auto - Negotiation process completed * / <nl> + # define PHY_LINKED_STATUS ( ( uint16_t ) 0x0004U ) / * ! < Valid link established * / <nl> + # define PHY_JABBER_DETECTION ( ( uint16_t ) 0x0002U ) / * ! < Jabber condition detected * / <nl> + <nl> + / * Section 4 : Extended PHY Registers * / <nl> + # define PHY_SR ( ( uint16_t ) 0x10U ) / * ! < PHY status register Offset * / <nl> + <nl> + # define PHY_SPEED_STATUS ( ( uint16_t ) 0x0002U ) / * ! < PHY Speed mask * / <nl> + # define PHY_DUPLEX_STATUS ( ( uint16_t ) 0x0004U ) / * ! < PHY Duplex mask * / <nl> + <nl> + / * # # # # # # # # # # # # # # # # # # SPI peripheral configuration # # # # # # # # # # # # # # # # # # # # # # # # # # * / <nl> + <nl> + / * CRC FEATURE : Use to activate CRC feature inside HAL SPI Driver <nl> + * Activated : CRC code is present inside driver <nl> + * Deactivated : CRC code cleaned from driver <nl> + * / <nl> + <nl> + # define USE_SPI_CRC 0U <nl> + <nl> + / * Includes mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm * / <nl> + / * * <nl> + * @ brief Include module ' s header file <nl> + * / <nl> + <nl> + # ifdef HAL_RCC_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_rcc . h " <nl> + # endif / * HAL_RCC_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_GPIO_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_gpio . h " <nl> + # endif / * HAL_GPIO_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_EXTI_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_exti . h " <nl> + # endif / * HAL_EXTI_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_DMA_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_dma . h " <nl> + # endif / * HAL_DMA_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_CORTEX_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_cortex . h " <nl> + # endif / * HAL_CORTEX_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_ADC_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_adc . h " <nl> + # endif / * HAL_ADC_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_CAN_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_can . h " <nl> + # endif / * HAL_CAN_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_CAN_LEGACY_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_can_legacy . h " <nl> + # endif / * HAL_CAN_LEGACY_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_CRC_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_crc . h " <nl> + # endif / * HAL_CRC_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_CRYP_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_cryp . h " <nl> + # endif / * HAL_CRYP_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_DMA2D_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_dma2d . h " <nl> + # endif / * HAL_DMA2D_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_DAC_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_dac . h " <nl> + # endif / * HAL_DAC_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_DCMI_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_dcmi . h " <nl> + # endif / * HAL_DCMI_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_ETH_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_eth . h " <nl> + # endif / * HAL_ETH_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_FLASH_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_flash . h " <nl> + # endif / * HAL_FLASH_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_SRAM_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_sram . h " <nl> + # endif / * HAL_SRAM_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_NOR_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_nor . h " <nl> + # endif / * HAL_NOR_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_NAND_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_nand . h " <nl> + # endif / * HAL_NAND_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_PCCARD_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_pccard . h " <nl> + # endif / * HAL_PCCARD_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_SDRAM_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_sdram . h " <nl> + # endif / * HAL_SDRAM_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_HASH_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_hash . h " <nl> + # endif / * HAL_HASH_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_I2C_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_i2c . h " <nl> + # endif / * HAL_I2C_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_SMBUS_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_smbus . h " <nl> + # endif / * HAL_SMBUS_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_I2S_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_i2s . h " <nl> + # endif / * HAL_I2S_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_IWDG_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_iwdg . h " <nl> + # endif / * HAL_IWDG_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_LTDC_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_ltdc . h " <nl> + # endif / * HAL_LTDC_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_PWR_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_pwr . h " <nl> + # endif / * HAL_PWR_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_RNG_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_rng . h " <nl> + # endif / * HAL_RNG_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_RTC_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_rtc . h " <nl> + # endif / * HAL_RTC_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_SAI_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_sai . h " <nl> + # endif / * HAL_SAI_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_SD_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_sd . h " <nl> + # endif / * HAL_SD_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_SPI_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_spi . h " <nl> + # endif / * HAL_SPI_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_TIM_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_tim . h " <nl> + # endif / * HAL_TIM_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_UART_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_uart . h " <nl> + # endif / * HAL_UART_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_USART_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_usart . h " <nl> + # endif / * HAL_USART_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_IRDA_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_irda . h " <nl> + # endif / * HAL_IRDA_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_SMARTCARD_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_smartcard . h " <nl> + # endif / * HAL_SMARTCARD_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_WWDG_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_wwdg . h " <nl> + # endif / * HAL_WWDG_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_PCD_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_pcd . h " <nl> + # endif / * HAL_PCD_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_HCD_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_hcd . h " <nl> + # endif / * HAL_HCD_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_DSI_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_dsi . h " <nl> + # endif / * HAL_DSI_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_QSPI_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_qspi . h " <nl> + # endif / * HAL_QSPI_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_CEC_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_cec . h " <nl> + # endif / * HAL_CEC_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_FMPI2C_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_fmpi2c . h " <nl> + # endif / * HAL_FMPI2C_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_SPDIFRX_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_spdifrx . h " <nl> + # endif / * HAL_SPDIFRX_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_DFSDM_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_dfsdm . h " <nl> + # endif / * HAL_DFSDM_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_LPTIM_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_lptim . h " <nl> + # endif / * HAL_LPTIM_MODULE_ENABLED * / <nl> + <nl> + # ifdef HAL_MMC_MODULE_ENABLED <nl> + # include " stm32f4xx_hal_mmc . h " <nl> + # endif / * HAL_MMC_MODULE_ENABLED * / <nl> + <nl> + / * Exported macro mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm * / <nl> + # ifdef USE_FULL_ASSERT <nl> + / * * <nl> + * @ brief The assert_param macro is used for function ' s parameters check . <nl> + * @ param expr : If expr is false , it calls assert_failed function <nl> + * which reports the name of the source file and the source <nl> + * line number of the call that failed . <nl> + * If expr is true , it returns no value . <nl> + * @ retval None <nl> + * / <nl> + # define assert_param ( expr ) ( ( expr ) ? ( void ) 0U : assert_failed ( ( uint8_t * ) __FILE__ , __LINE__ ) ) <nl> + / * Exported functions mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - * / <nl> + void assert_failed ( uint8_t * file , uint32_t line ) ; <nl> + # else <nl> + # define assert_param ( expr ) ( ( void ) 0U ) <nl> + # endif / * USE_FULL_ASSERT * / <nl> + <nl> + # ifdef __cplusplus <nl> + } <nl> + # endif <nl> + <nl> + # endif / * __STM32F4xx_HAL_CONF_H * / <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * ( C ) COPYRIGHT STMicroelectronics * * * * * END OF FILE * * * * / <nl> new file mode 100644 <nl> index 00000000000 . . bd335702545 <nl> mmm / dev / null <nl> ppp b / buildroot / share / PlatformIO / variants / BIGTREE_GENERIC_STM32F407_5X / variant . cpp <nl> <nl> + / * <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Copyright ( c ) 2017 , STMicroelectronics <nl> + * All rights reserved . <nl> + * <nl> + * Redistribution and use in source and binary forms , with or without <nl> + * modification , are permitted provided that the following conditions are met : <nl> + * <nl> + * 1 . Redistributions of source code must retain the above copyright notice , <nl> + * this list of conditions and the following disclaimer . <nl> + * 2 . Redistributions in binary form must reproduce the above copyright notice , <nl> + * this list of conditions and the following disclaimer in the documentation <nl> + * and / or other materials provided with the distribution . <nl> + * 3 . Neither the name of STMicroelectronics nor the names of its contributors <nl> + * may be used to endorse or promote products derived from this software <nl> + * without specific prior written permission . <nl> + * <nl> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS " <nl> + * AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE <nl> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE <nl> + * DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE <nl> + * FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL <nl> + * DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR <nl> + * SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER <nl> + * CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , <nl> + * OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + * OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * / <nl> + <nl> + # include " variant . h " <nl> + <nl> + # ifdef __cplusplus <nl> + extern " C " { <nl> + # endif <nl> + <nl> + / / Pin number <nl> + / / This array allows to wrap Arduino pin number ( Dx or x ) <nl> + / / to STM32 PinName ( PX_n ) <nl> + const PinName digitalPin [ ] = { <nl> + # if STM32F4X_PIN_NUM > = 64 / / 64 pins mcu , 51 gpio <nl> + PC_13 , / / D0 <nl> + PC_14 , / / D1 - OSC32_IN <nl> + PC_15 , / / D2 - OSC32_OUT <nl> + PH_0 , / / D3 - OSC_IN <nl> + PH_1 , / / D4 - OSC_OUT <nl> + PC_0 , / / D5 - 1 : 2 : ADC123_IN10 <nl> + PC_1 , / / D6 - 1 : 2 : ADC123_IN11 <nl> + PC_2 , / / D7 - 1 : SPI2_MISO 2 : ADC123_IN12 <nl> + PC_3 , / / D8 - 1 : SPI2_MOSI 2 : ADC123_IN13 <nl> + PA_0 , / / D9 - 1 : UART4_TX / TIM5_CH1 2 : ADC123_IN0 <nl> + PA_1 , / / D10 - 1 : UART4_RX / TIM5_CH2 / TIM2_CH2 2 : ADC123_IN1 <nl> + PA_2 , / / D11 - 1 : USART2_TX / TIM5_CH3 / TIM9_CH1 / TIM2_CH3 2 : ADC123_IN2 <nl> + PA_3 , / / D12 - 1 : USART2_RX / TIM5_CH4 / TIM9_CH2 / TIM2_CH4 2 : ADC123_IN3 <nl> + PA_4 , / / D13 - NOT FT 1 : SPI1_NSS / SPI3_NSS / USART2_CK 2 : ADC12_IN4 / DAC_OUT1 <nl> + PA_5 , / / D14 - NOT FT 1 : SPI1_SCK 2 : ADC12_IN5 / DAC_OUT2 <nl> + PA_6 , / / D15 - 1 : SPI1_MISO / TIM13_CH1 / TIM3_CH1 2 : ADC12_IN6 <nl> + PA_7 , / / D16 - 1 : SPI1_MOSI / TIM14_CH1 / TIM3_CH2 2 : ADC12_IN7 <nl> + PC_4 , / / D17 - 1 : 2 : ADC12_IN14 <nl> + PC_5 , / / D18 - 1 : 2 : ADC12_IN15 <nl> + PB_0 , / / D19 - 1 : TIM3_CH3 2 : ADC12_IN8 <nl> + PB_1 , / / D20 - 1 : TIM3_CH4 2 : ADC12_IN9 <nl> + PB_2 , / / D21 - BOOT1 <nl> + PB_10 , / / D22 - 1 : SPI2_SCK / I2C2_SCL / USART3_TX / TIM2_CH3 <nl> + PB_11 , / / D23 - 1 : I2C2_SDA / USART3_RX / TIM2_CH4 <nl> + PB_12 , / / D24 - 1 : SPI2_NSS / OTG_HS_ID <nl> + PB_13 , / / D25 - 1 : SPI2_SCK 2 : OTG_HS_VBUS <nl> + PB_14 , / / D26 - 1 : SPI2_MISO / TIM12_CH1 / OTG_HS_DM <nl> + PB_15 , / / D27 - SPI2_MOSI / TIM12_CH2 / OTG_HS_DP <nl> + PC_6 , / / D28 - 1 : TIM8_CH1 / SDIO_D6 / USART6_TX / TIM3_CH1 <nl> + PC_7 , / / D29 - 1 : TIM8_CH2 / SDIO_D7 / USART6_RX / TIM3_CH2 <nl> + PC_8 , / / D30 - 1 : TIM8_CH3 / SDIO_D0 / TIM3_CH3 <nl> + PC_9 , / / D31 - 1 : TIM8_CH4 / SDIO_D1 / TIM3_CH4 <nl> + PA_8 , / / D32 - 1 : TIM1_CH1 / I2C3_SCL / OTG_FS_SOF <nl> + PA_9 , / / D33 - 1 : USART1_TX / TIM1_CH2 2 : OTG_FS_VBUS <nl> + PA_10 , / / 34 - 1 : USART1_RX / TIM1_CH3 / OTG_FS_ID <nl> + PA_11 , / / D35 - 1 : TIM1_CH4 / OTG_FS_DM <nl> + PA_12 , / / D36 - 1 : OTG_FS_DP <nl> + PA_13 , / / D37 - 0 : JTMS - SWDIO <nl> + PA_14 , / / D38 - 0 : JTCK - SWCLK <nl> + PA_15 , / / D39 - 0 : JTDI 1 : SPI3_NSS / SPI1_NSS <nl> + PC_10 , / / D40 - 1 : UART4_TX / SPI3_SCK / SDIO_D2 / USART3_TX <nl> + PC_11 , / / D41 - 1 : UART4_RX / SPI3_MISO / SDIO_D3 / USART3_RX <nl> + PC_12 , / / D42 - 1 : UART5_TX / SPI3_MOSI / SDIO_CK <nl> + PD_2 , / / D43 - 1 : UART5_RX / SDIO_CMD <nl> + PB_3 , / / D44 - 0 : JTDO 1 : SPI3_SCK / TIM2_CH2 / SPI1_SCK <nl> + PB_4 , / / D45 - 0 : NJTRST 1 : SPI3_MISO / TIM3_CH1 / SPI1_MISO <nl> + PB_5 , / / D45 - 1 : TIM3_CH2 / SPI1_MOSI / SPI3_MOSI <nl> + PB_6 , / / D47 - 1 : I2C1_SCL / TIM4_CH1 / USART1_TX <nl> + PB_7 , / / D48 - 1 : I2C1_SDA / TIM4_CH2 / USART1_RX <nl> + PB_8 , / / D49 - 1 : I2C1_SCL / TIM4_CH3 / SDIO_D4 / TIM10_CH1 <nl> + PB_9 , / / D50 - 1 : I2C1_SDA / TIM4_CH4 / SDIO_D5 / TIM11_CH1 / SPI2_NSS <nl> + # endif <nl> + # if STM32F4X_PIN_NUM > = 100 / / 100 pins mcu , 82 gpio <nl> + PE_2 , / / D51 - 1 : FSMC_A23 <nl> + PE_3 , / / D52 - 1 : FSMC_A19 <nl> + PE_4 , / / D53 - 1 : FSMC_A20 <nl> + PE_5 , / / D54 - 1 : FSMC_A21 <nl> + PE_6 , / / D55 - 1 : FSMC_A22 <nl> + PE_7 , / / D56 - 1 : FSMC_D4 <nl> + PE_8 , / / D57 - 1 : FSMC_D5 <nl> + PE_9 , / / D58 - 1 : FSMC_D6 / TIM1_CH1 <nl> + PE_10 , / / D59 - 1 : FSMC_D7 <nl> + PE_11 , / / D60 - 1 : FSMC_D8 / TIM1_CH2 <nl> + PE_12 , / / D61 - 1 : FSMC_D9 <nl> + PE_13 , / / D62 - 1 : FSMC_D10 / TIM1_CH3 <nl> + PE_14 , / / D63 - 1 : FSMC_D11 / TIM1_CH4 <nl> + PE_15 , / / D64 - 1 : FSMC_D12 <nl> + PD_8 , / / D65 - 1 : FSMC_D13 / USART3_TX <nl> + PD_9 , / / D66 - 1 : FSMC_D14 / USART3_RX <nl> + PD_10 , / / D67 - 1 : FSMC_D15 <nl> + PD_11 , / / D68 - 1 : FSMC_A16 <nl> + PD_12 , / / D69 - 1 : FSMC_A17 / TIM4_CH1 <nl> + PD_13 , / / D70 - 1 : FSMC_A18 / TIM4_CH2 <nl> + PD_14 , / / D71 - 1 : FSMC_D0 / TIM4_CH3 <nl> + PD_15 , / / D72 - 1 : FSMC_D1 / TIM4_CH4 <nl> + PD_0 , / / D73 - 1 : FSMC_D2 <nl> + PD_1 , / / D74 - 1 : FSMC_D3 <nl> + PD_3 , / / D75 - 1 : FSMC_CLK <nl> + PD_4 , / / D76 - 1 : FSMC_NOE <nl> + PD_5 , / / D77 - 1 : USART2_TX <nl> + PD_6 , / / D78 - 1 : USART2_RX <nl> + PD_7 , / / D79 <nl> + PE_0 , / / D80 <nl> + PE_1 , / / D81 <nl> + # endif <nl> + # if STM32F4X_PIN_NUM > = 144 / / 144 pins mcu , 114 gpio <nl> + PF_0 , / / D82 - 1 : FSMC_A0 / I2C2_SDA <nl> + PF_1 , / / D83 - 1 : FSMC_A1 / I2C2_SCL <nl> + PF_2 , / / D84 - 1 : FSMC_A2 <nl> + PF_3 , / / D85 - 1 : FSMC_A3 2 : ADC3_IN9 <nl> + PF_4 , / / D86 - 1 : FSMC_A4 2 : ADC3_IN14 <nl> + PF_5 , / / D87 - 1 : FSMC_A5 2 : ADC3_IN15 <nl> + PF_6 , / / D88 - 1 : TIM10_CH1 2 : ADC3_IN4 <nl> + PF_7 , / / D89 - 1 : TIM11_CH1 2 : ADC3_IN5 <nl> + PF_8 , / / D90 - 1 : TIM13_CH1 2 : ADC3_IN6 <nl> + PF_9 , / / D91 - 1 ; TIM14_CH1 2 : ADC3_IN7 <nl> + PF_10 , / / D92 - 2 : ADC3_IN8 <nl> + PF_11 , / / D93 <nl> + PF_12 , / / D94 - 1 : FSMC_A6 <nl> + PF_13 , / / D95 - 1 : FSMC_A7 <nl> + PF_14 , / / D96 - 1 : FSMC_A8 <nl> + PF_15 , / / D97 - 1 : FSMC_A9 <nl> + PG_0 , / / D98 - 1 : FSMC_A10 <nl> + PG_1 , / / D99 - 1 : FSMC_A11 <nl> + PG_2 , / / D100 - 1 : FSMC_A12 <nl> + PG_3 , / / D101 - 1 : FSMC_A13 <nl> + PG_4 , / / D102 - 1 : FSMC_A14 <nl> + PG_5 , / / D103 - 1 : FSMC_A15 <nl> + PG_6 , / / D104 <nl> + PG_7 , / / D105 <nl> + PG_8 , / / D106 <nl> + PG_9 , / / D107 - 1 : USART6_RX <nl> + PG_10 , / / D108 - 1 : FSMC_NE3 <nl> + PG_11 , / / D109 <nl> + PG_12 , / / D110 - 1 : FSMC_NE4 <nl> + PG_13 , / / D111 - 1 : FSMC_A24 <nl> + PG_14 , / / D112 - 1 : FSMC_A25 / USART6_TX <nl> + PG_15 , / / D113 <nl> + # endif <nl> + # if STM32F4X_PIN_NUM > = 176 / / 176 pins mcu , 140 gpio <nl> + PI_8 , / / D114 <nl> + PI_9 , / / D115 <nl> + PI_10 , / / D116 <nl> + PI_11 , / / D117 <nl> + PH_2 , / / D118 <nl> + PH_3 , / / D119 <nl> + PH_4 , / / D120 - 1 : I2C2_SCL <nl> + PH_5 , / / D121 - 1 : I2C2_SDA <nl> + PH_6 , / / D122 - 1 : TIM12_CH1 <nl> + PH_7 , / / D123 - 1 : I2C3_SCL <nl> + PH_8 , / / D124 - 1 : I2C3_SDA <nl> + PH_9 , / / D125 - 1 : TIM12_CH2 <nl> + PH_10 , / / D126 - 1 : TIM5_CH1 <nl> + PH_11 , / / D127 - 1 : TIM5_CH2 <nl> + PH_12 , / / D128 - 1 : TIM5_CH3 <nl> + PH_13 , / / D129 <nl> + PH_14 , / / D130 <nl> + PH_15 , / / D131 <nl> + PI_0 , / / D132 - 1 : TIM5_CH4 / SPI2_NSS <nl> + PI_1 , / / D133 - 1 : SPI2_SCK <nl> + PI_2 , / / D134 - 1 : TIM8_CH4 / SPI2_MISO <nl> + PI_3 , / / D135 - 1 : SPI2_MOS <nl> + PI_4 , / / D136 <nl> + PI_5 , / / D137 - 1 : TIM8_CH1 <nl> + PI_6 , / / D138 - 1 : TIM8_CH2 <nl> + PI_7 , / / D139 - 1 : TIM8_CH3 <nl> + # endif <nl> + # if STM32F4X_PIN_NUM > = 64 / / 64 pins mcu , 51 gpio , 16 ADC <nl> + PA_0 , / / D140 / A0 = D9 - 1 : UART4_TX / TIM5_CH1 2 : ADC123_IN0 <nl> + PA_1 , / / D141 / A1 = D10 - 1 : UART4_RX / TIM5_CH2 / TIM2_CH2 2 : ADC123_IN1 <nl> + PA_2 , / / D142 / A2 = D11 - 1 : USART2_TX / TIM5_CH3 / TIM9_CH1 / TIM2_CH3 2 : ADC123_IN2 <nl> + PA_3 , / / D143 / A3 = D12 - 1 : USART2_RX / TIM5_CH4 / TIM9_CH2 / TIM2_CH4 2 : ADC123_IN3 <nl> + PA_4 , / / D144 / A4 = D13 - NOT FT 1 : SPI1_NSS / SPI3_NSS / USART2_CK 2 : ADC12_IN4 / DAC_OUT1 <nl> + PA_5 , / / D145 / A5 = D14 - NOT FT 1 : SPI1_SCK 2 : ADC12_IN5 / DAC_OUT2 <nl> + PA_6 , / / D146 / A6 = D15 - 1 : SPI1_MISO / TIM13_CH1 / TIM3_CH1 2 : ADC12_IN6 <nl> + PA_7 , / / D147 / A7 = D16 - 1 : SPI1_MOSI / TIM14_CH1 / TIM3_CH2 2 : ADC12_IN7 <nl> + PB_0 , / / D148 / A8 = D19 - 1 : TIM3_CH3 2 : ADC12_IN8 <nl> + PB_1 , / / D149 / A9 = D20 - 1 : TIM3_CH4 2 : ADC12_IN9 <nl> + PC_0 , / / D150 / A10 = D5 - 1 : 2 : ADC123_IN10 <nl> + PC_1 , / / D151 / A11 = D6 - 1 : 2 : ADC123_IN11 <nl> + PC_2 , / / D152 / A12 = D7 - 1 : SPI2_MISO 2 : ADC123_IN12 <nl> + PC_3 , / / D153 / A13 = D8 - 1 : SPI2_MOSI 2 : ADC123_IN13 <nl> + PC_4 , / / D154 / A14 = D17 - 1 : 2 : ADC12_IN14 <nl> + PC_5 , / / D155 / A15 = D18 - 1 : 2 : ADC12_IN15 <nl> + # endif <nl> + # if STM32F4X_PIN_NUM > = 144 / / 144 pins mcu , 114 gpio , 24 ADC <nl> + PF_3 , / / D156 / A16 = D85 - 1 : FSMC_A3 2 : ADC3_IN9 <nl> + PF_4 , / / D157 / A17 = D86 - 1 : FSMC_A4 2 : ADC3_IN14 <nl> + PF_5 , / / D158 / A18 = D87 - 1 : FSMC_A5 2 : ADC3_IN15 <nl> + PF_6 , / / D159 / A19 = D88 - 1 : TIM10_CH1 2 : ADC3_IN4 <nl> + PF_7 , / / D160 / A20 = D89 - 1 : TIM11_CH1 2 : ADC3_IN5 <nl> + PF_8 , / / D161 / A21 = D90 - 1 : TIM13_CH1 2 : ADC3_IN6 <nl> + PF_9 , / / D162 / A22 = D91 - 1 ; TIM14_CH1 2 : ADC3_IN7 <nl> + PF_10 , / / D163 / A23 = D92 - 2 : ADC3_IN8 <nl> + # endif <nl> + } ; <nl> + <nl> + # ifdef __cplusplus <nl> + } <nl> + # endif <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> + # ifdef __cplusplus <nl> + extern " C " { <nl> + # endif <nl> + <nl> + / * * <nl> + * @ brief System Clock Configuration <nl> + * @ param None <nl> + * @ retval None <nl> + * / <nl> + WEAK void SystemClock_Config ( void ) <nl> + { <nl> + <nl> + RCC_OscInitTypeDef RCC_OscInitStruct ; <nl> + RCC_ClkInitTypeDef RCC_ClkInitStruct ; <nl> + <nl> + / * * Configure the main internal regulator output voltage <nl> + * / <nl> + __HAL_RCC_PWR_CLK_ENABLE ( ) ; <nl> + <nl> + __HAL_PWR_VOLTAGESCALING_CONFIG ( PWR_REGULATOR_VOLTAGE_SCALE1 ) ; <nl> + <nl> + / * * Initializes the CPU , AHB and APB busses clocks <nl> + * / <nl> + RCC_OscInitStruct . OscillatorType = RCC_OSCILLATORTYPE_HSE ; <nl> + RCC_OscInitStruct . HSEState = RCC_HSE_ON ; <nl> + RCC_OscInitStruct . PLL . PLLState = RCC_PLL_ON ; <nl> + RCC_OscInitStruct . PLL . PLLSource = RCC_PLLSOURCE_HSE ; <nl> + RCC_OscInitStruct . PLL . PLLM = 8 ; <nl> + RCC_OscInitStruct . PLL . PLLN = 336 ; <nl> + RCC_OscInitStruct . PLL . PLLP = RCC_PLLP_DIV2 ; <nl> + RCC_OscInitStruct . PLL . PLLQ = 7 ; <nl> + if ( HAL_RCC_OscConfig ( & RCC_OscInitStruct ) ! = HAL_OK ) { <nl> + _Error_Handler ( __FILE__ , __LINE__ ) ; <nl> + } <nl> + <nl> + / * * Initializes the CPU , AHB and APB busses clocks <nl> + * / <nl> + RCC_ClkInitStruct . ClockType = RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_SYSCLK <nl> + | RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2 ; <nl> + RCC_ClkInitStruct . SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK ; <nl> + RCC_ClkInitStruct . AHBCLKDivider = RCC_SYSCLK_DIV1 ; <nl> + RCC_ClkInitStruct . APB1CLKDivider = RCC_HCLK_DIV4 ; <nl> + RCC_ClkInitStruct . APB2CLKDivider = RCC_HCLK_DIV2 ; <nl> + <nl> + if ( HAL_RCC_ClockConfig ( & RCC_ClkInitStruct , FLASH_LATENCY_5 ) ! = HAL_OK ) { <nl> + _Error_Handler ( __FILE__ , __LINE__ ) ; <nl> + } <nl> + <nl> + / * * Configure the Systick interrupt time <nl> + * / <nl> + HAL_SYSTICK_Config ( HAL_RCC_GetHCLKFreq ( ) / 1000 ) ; <nl> + <nl> + / * * Configure the Systick <nl> + * / <nl> + HAL_SYSTICK_CLKSourceConfig ( SYSTICK_CLKSOURCE_HCLK ) ; <nl> + <nl> + / * SysTick_IRQn interrupt configuration * / <nl> + HAL_NVIC_SetPriority ( SysTick_IRQn , 0 , 0 ) ; <nl> + } <nl> + <nl> + # ifdef __cplusplus <nl> + } <nl> + # endif <nl> new file mode 100644 <nl> index 00000000000 . . d5d34f152d8 <nl> mmm / dev / null <nl> ppp b / buildroot / share / PlatformIO / variants / BIGTREE_GENERIC_STM32F407_5X / variant . h <nl> <nl> + / * <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Copyright ( c ) 2017 , STMicroelectronics <nl> + * All rights reserved . <nl> + * <nl> + * Redistribution and use in source and binary forms , with or without <nl> + * modification , are permitted provided that the following conditions are met : <nl> + * <nl> + * 1 . Redistributions of source code must retain the above copyright notice , <nl> + * this list of conditions and the following disclaimer . <nl> + * 2 . Redistributions in binary form must reproduce the above copyright notice , <nl> + * this list of conditions and the following disclaimer in the documentation <nl> + * and / or other materials provided with the distribution . <nl> + * 3 . Neither the name of STMicroelectronics nor the names of its contributors <nl> + * may be used to endorse or promote products derived from this software <nl> + * without specific prior written permission . <nl> + * <nl> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS " <nl> + * AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE <nl> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE <nl> + * DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE <nl> + * FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL <nl> + * DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR <nl> + * SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER <nl> + * CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , <nl> + * OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + * OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * / <nl> + <nl> + # ifndef _VARIANT_ARDUINO_STM32_ <nl> + # define _VARIANT_ARDUINO_STM32_ <nl> + <nl> + / * mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + * Headers <nl> + * mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - * / <nl> + # include " PeripheralPins . h " <nl> + <nl> + # ifdef __cplusplus <nl> + extern " C " { <nl> + # endif / / __cplusplus <nl> + <nl> + / * mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + * Pins <nl> + * mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - * / <nl> + extern const PinName digitalPin [ ] ; <nl> + <nl> + # ifdef STM32F405RX <nl> + # define STM32F4X_PIN_NUM 64 / / 64 pins mcu , 51 gpio <nl> + # define STM32F4X_GPIO_NUM 51 <nl> + # define STM32F4X_ADC_NUM 16 <nl> + # elif defined STM32F407_5VX <nl> + # define STM32F4X_PIN_NUM 100 / / 100 pins mcu , 82 gpio <nl> + # define STM32F4X_GPIO_NUM 82 <nl> + # define STM32F4X_ADC_NUM 16 <nl> + # elif defined STM32F407_5ZX <nl> + # define STM32F4X_PIN_NUM 144 / / 144 pins mcu , 114 gpio <nl> + # define STM32F4X_GPIO_NUM 114 <nl> + # define STM32F4X_ADC_NUM 24 <nl> + # elif defined STM32F407IX <nl> + # define STM32F4X_PIN_NUM 176 / / 176 pins mcu , 140 gpio <nl> + # define STM32F4X_GPIO_NUM 140 <nl> + # define STM32F4X_ADC_NUM 24 <nl> + # else <nl> + # error " no match MCU defined " <nl> + # endif <nl> + <nl> + # if STM32F4X_PIN_NUM > = 64 / / 64 pins mcu , 51 gpio <nl> + # define PC13 0 <nl> + # define PC14 1 / / OSC32_IN <nl> + # define PC15 2 / / OSC32_OUT <nl> + # define PH0 3 / / OSC_IN <nl> + # define PH1 4 / / OSC_OUT <nl> + # define PC0 5 / / 1 : 2 : ADC123_IN10 <nl> + # define PC1 6 / / 1 : 2 : ADC123_IN11 <nl> + # define PC2 7 / / 1 : SPI2_MISO 2 : ADC123_IN12 <nl> + # define PC3 8 / / 1 : SPI2_MOSI 2 : ADC123_IN13 <nl> + # define PA0 9 / / 1 : UART4_TX / TIM5_CH1 2 : ADC123_IN0 <nl> + # define PA1 10 / / 1 : UART4_RX / TIM5_CH2 / TIM2_CH2 2 : ADC123_IN1 <nl> + # define PA2 11 / / 1 : USART2_TX / TIM5_CH3 / TIM9_CH1 / TIM2_CH3 2 : ADC123_IN2 <nl> + # define PA3 12 / / 1 : USART2_RX / TIM5_CH4 / TIM9_CH2 / TIM2_CH4 2 : ADC123_IN3 <nl> + # define PA4 13 / / NOT FT 1 : SPI1_NSS / SPI3_NSS / USART2_CK 2 : ADC12_IN4 / DAC_OUT1 <nl> + # define PA5 14 / / NOT FT 1 : SPI1_SCK 2 : ADC12_IN5 / DAC_OUT2 <nl> + # define PA6 15 / / 1 : SPI1_MISO / TIM13_CH1 / TIM3_CH1 2 : ADC12_IN6 <nl> + # define PA7 16 / / 1 : SPI1_MOSI / TIM14_CH1 / TIM3_CH2 2 : ADC12_IN7 <nl> + # define PC4 17 / / 1 : 2 : ADC12_IN14 <nl> + # define PC5 18 / / 1 : 2 : ADC12_IN15 <nl> + # define PB0 19 / / 1 : TIM3_CH3 2 : ADC12_IN8 <nl> + # define PB1 20 / / 1 : TIM3_CH4 2 : ADC12_IN9 <nl> + # define PB2 21 / / BOOT1 <nl> + # define PB10 22 / / 1 : SPI2_SCK / I2C2_SCL / USART3_TX / TIM2_CH3 <nl> + # define PB11 23 / / 1 : I2C2_SDA / USART3_RX / TIM2_CH4 <nl> + # define PB12 24 / / 1 : SPI2_NSS / OTG_HS_ID <nl> + # define PB13 25 / / 1 : SPI2_SCK 2 : OTG_HS_VBUS <nl> + # define PB14 26 / / 1 : SPI2_MISO / TIM12_CH1 / OTG_HS_DM <nl> + # define PB15 27 / / SPI2_MOSI / TIM12_CH2 / OTG_HS_DP <nl> + # define PC6 28 / / 1 : TIM8_CH1 / SDIO_D6 / USART6_TX / TIM3_CH1 <nl> + # define PC7 29 / / 1 : TIM8_CH2 / SDIO_D7 / USART6_RX / TIM3_CH2 <nl> + # define PC8 30 / / 1 : TIM8_CH3 / SDIO_D0 / TIM3_CH3 <nl> + # define PC9 31 / / 1 : TIM8_CH4 / SDIO_D1 / TIM3_CH4 <nl> + # define PA8 32 / / 1 : TIM1_CH1 / I2C3_SCL / OTG_FS_SOF <nl> + # define PA9 33 / / 1 : USART1_TX / TIM1_CH2 2 : OTG_FS_VBUS <nl> + # define PA10 34 / / 1 : USART1_RX / TIM1_CH3 / OTG_FS_ID <nl> + # define PA11 35 / / 1 : TIM1_CH4 / OTG_FS_DM <nl> + # define PA12 36 / / 1 : OTG_FS_DP <nl> + # define PA13 37 / / 0 : JTMS - SWDIO <nl> + # define PA14 38 / / 0 : JTCK - SWCLK <nl> + # define PA15 39 / / 0 : JTDI 1 : SPI3_NSS / SPI1_NSS <nl> + # define PC10 40 / / 1 : UART4_TX / SPI3_SCK / SDIO_D2 / USART3_TX <nl> + # define PC11 41 / / 1 : UART4_RX / SPI3_MISO / SDIO_D3 / USART3_RX <nl> + # define PC12 42 / / 1 : UART5_TX / SPI3_MOSI / SDIO_CK <nl> + # define PD2 43 / / 1 : UART5_RX / SDIO_CMD <nl> + # define PB3 44 / / 0 : JTDO 1 : SPI3_SCK / TIM2_CH2 / SPI1_SCK <nl> + # define PB4 45 / / 0 : NJTRST 1 : SPI3_MISO / TIM3_CH1 / SPI1_MISO <nl> + # define PB5 45 / / 1 : TIM3_CH2 / SPI1_MOSI / SPI3_MOSI <nl> + # define PB6 47 / / 1 : I2C1_SCL / TIM4_CH1 / USART1_TX <nl> + # define PB7 48 / / 1 : I2C1_SDA / TIM4_CH2 / USART1_RX <nl> + # define PB8 49 / / 1 : I2C1_SCL / TIM4_CH3 / SDIO_D4 / TIM10_CH1 <nl> + # define PB9 50 / / 1 : I2C1_SDA / TIM4_CH4 / SDIO_D5 / TIM11_CH1 / SPI2_NSS <nl> + # endif <nl> + # if STM32F4X_PIN_NUM > = 100 / / 100 pins mcu , 82 gpio <nl> + # define PE2 51 / / 1 : FSMC_A23 <nl> + # define PE3 52 / / 1 : FSMC_A19 <nl> + # define PE4 53 / / 1 : FSMC_A20 <nl> + # define PE5 54 / / 1 : FSMC_A21 <nl> + # define PE6 55 / / 1 : FSMC_A22 <nl> + # define PE7 56 / / 1 : FSMC_D4 <nl> + # define PE8 57 / / 1 : FSMC_D5 <nl> + # define PE9 58 / / 1 : FSMC_D6 / TIM1_CH1 <nl> + # define PE10 59 / / 1 : FSMC_D7 <nl> + # define PE11 60 / / 1 : FSMC_D8 / TIM1_CH2 <nl> + # define PE12 61 / / 1 : FSMC_D9 <nl> + # define PE13 62 / / 1 : FSMC_D10 / TIM1_CH3 <nl> + # define PE14 63 / / 1 : FSMC_D11 / TIM1_CH4 <nl> + # define PE15 64 / / 1 : FSMC_D12 <nl> + # define PD8 65 / / 1 : FSMC_D13 / USART3_TX <nl> + # define PD9 66 / / 1 : FSMC_D14 / USART3_RX <nl> + # define PD10 67 / / 1 : FSMC_D15 <nl> + # define PD11 68 / / 1 : FSMC_A16 <nl> + # define PD12 69 / / 1 : FSMC_A17 / TIM4_CH1 <nl> + # define PD13 70 / / 1 : FSMC_A18 / TIM4_CH2 <nl> + # define PD14 71 / / 1 : FSMC_D0 / TIM4_CH3 <nl> + # define PD15 72 / / 1 : FSMC_D1 / TIM4_CH4 <nl> + # define PD0 73 / / 1 : FSMC_D2 <nl> + # define PD1 74 / / 1 : FSMC_D3 <nl> + # define PD3 75 / / 1 : FSMC_CLK <nl> + # define PD4 76 / / 1 : FSMC_NOE <nl> + # define PD5 77 / / 1 : USART2_TX <nl> + # define PD6 78 / / 1 : USART2_RX <nl> + # define PD7 79 <nl> + # define PE0 80 <nl> + # define PE1 81 <nl> + # endif <nl> + # if STM32F4X_PIN_NUM > = 144 / / 144 pins mcu , 114 gpio <nl> + # define PF0 82 / / 1 : FSMC_A0 / I2C2_SDA <nl> + # define PF1 83 / / 1 : FSMC_A1 / I2C2_SCL <nl> + # define PF2 84 / / 1 : FSMC_A2 <nl> + # define PF3 85 / / 1 : FSMC_A3 2 : ADC3_IN9 <nl> + # define PF4 86 / / 1 : FSMC_A4 2 : ADC3_IN14 <nl> + # define PF5 87 / / 1 : FSMC_A5 2 : ADC3_IN15 <nl> + # define PF6 88 / / 1 : TIM10_CH1 2 : ADC3_IN4 <nl> + # define PF7 89 / / 1 : TIM11_CH1 2 : ADC3_IN5 <nl> + # define PF8 90 / / 1 : TIM13_CH1 2 : ADC3_IN6 <nl> + # define PF9 91 / / 1 ; TIM14_CH1 2 : ADC3_IN7 <nl> + # define PF10 92 / / 2 : ADC3_IN8 <nl> + # define PF11 93 <nl> + # define PF12 94 / / 1 : FSMC_A6 <nl> + # define PF13 95 / / 1 : FSMC_A7 <nl> + # define PF14 96 / / 1 : FSMC_A8 <nl> + # define PF15 97 / / 1 : FSMC_A9 <nl> + # define PG0 98 / / 1 : FSMC_A10 <nl> + # define PG1 99 / / 1 : FSMC_A11 <nl> + # define PG2 100 / / 1 : FSMC_A12 <nl> + # define PG3 101 / / 1 : FSMC_A13 <nl> + # define PG4 102 / / 1 : FSMC_A14 <nl> + # define PG5 103 / / 1 : FSMC_A15 <nl> + # define PG6 104 <nl> + # define PG7 105 <nl> + # define PG8 106 <nl> + # define PG9 107 / / 1 : USART6_RX <nl> + # define PG10 108 / / 1 : FSMC_NE3 <nl> + # define PG11 109 <nl> + # define PG12 110 / / 1 : FSMC_NE4 <nl> + # define PG13 111 / / 1 : FSMC_A24 <nl> + # define PG14 112 / / 1 : FSMC_A25 / USART6_TX <nl> + # define PG15 113 <nl> + # endif <nl> + # if STM32F4X_PIN_NUM > = 176 / / 176 pins mcu , 140 gpio <nl> + # define PI8 114 <nl> + # define PI9 115 <nl> + # define PI10 116 <nl> + # define PI11 117 <nl> + # define PH2 118 <nl> + # define PH3 119 <nl> + # define PH4 120 / / 1 : I2C2_SCL <nl> + # define PH5 121 / / 1 : I2C2_SDA <nl> + # define PH6 122 / / 1 : TIM12_CH1 <nl> + # define PH7 123 / / 1 : I2C3_SCL <nl> + # define PH8 124 / / 1 : I2C3_SDA <nl> + # define PH9 125 / / 1 : TIM12_CH2 <nl> + # define PH10 126 / / 1 : TIM5_CH1 <nl> + # define PH11 127 / / 1 : TIM5_CH2 <nl> + # define PH12 128 / / 1 : TIM5_CH3 <nl> + # define PH13 129 <nl> + # define PH14 130 <nl> + # define PH15 131 <nl> + # define PI0 132 / / 1 : TIM5_CH4 / SPI2_NSS <nl> + # define PI1 133 / / 1 : SPI2_SCK <nl> + # define PI2 134 / / 1 : TIM8_CH4 / SPI2_MISO <nl> + # define PI3 135 / / 1 : SPI2_MOS <nl> + # define PI4 136 <nl> + # define PI5 137 / / 1 : TIM8_CH1 <nl> + # define PI6 138 / / 1 : TIM8_CH2 <nl> + # define PI7 139 / / 1 : TIM8_CH3 <nl> + # endif <nl> + <nl> + <nl> + / / This must be a literal <nl> + # define NUM_DIGITAL_PINS ( STM32F4X_GPIO_NUM + STM32F4X_ADC_NUM ) <nl> + / / This must be a literal with a value less than or equal to MAX_ANALOG_INPUTS <nl> + # define NUM_ANALOG_INPUTS ( STM32F4X_ADC_NUM ) <nl> + # define NUM_ANALOG_FIRST ( STM32F4X_GPIO_NUM ) <nl> + <nl> + / / Below ADC , DAC and PWM definitions already done in the core <nl> + / / Could be redefined here if needed <nl> + / / ADC resolution is 12bits <nl> + / / # define ADC_RESOLUTION 12 <nl> + / / # define DACC_RESOLUTION 12 <nl> + <nl> + / / PWM resolution <nl> + # define PWM_RESOLUTION 8 <nl> + # define PWM_FREQUENCY 20000 <nl> + # define PWM_MAX_DUTY_CYCLE 255 <nl> + <nl> + / / Below SPI and I2C definitions already done in the core <nl> + / / Could be redefined here if differs from the default one <nl> + / / SPI Definitions <nl> + # define PIN_SPI_MOSI PB15 <nl> + # define PIN_SPI_MISO PB14 <nl> + # define PIN_SPI_SCK PB13 <nl> + # define PIN_SPI_SS PB12 <nl> + <nl> + / / I2C Definitions <nl> + # define PIN_WIRE_SDA PB7 <nl> + # define PIN_WIRE_SCL PB6 <nl> + <nl> + / / Timer Definitions <nl> + / / Do not use timer used by PWM pins when possible . See PinMap_PWM in PeripheralPins . c <nl> + # define TIMER_TONE TIM6 <nl> + <nl> + / / Do not use basic timer : OC is required <nl> + # define TIMER_SERVO TIM2 / / TODO : advanced - control timers don ' t work <nl> + <nl> + / / UART Definitions <nl> + / / Define here Serial instance number to map on Serial generic name <nl> + # define SERIAL_UART_INSTANCE 1 / / ex : 2 for Serial2 ( USART2 ) <nl> + / / DEBUG_UART could be redefined to print on another instance than ' Serial ' <nl> + / / # define DEBUG_UART ( ( USART_TypeDef * ) U ( S ) ARTX ) / / ex : USART3 <nl> + / / DEBUG_UART baudrate , default : 9600 if not defined <nl> + / / # define DEBUG_UART_BAUDRATE x <nl> + / / DEBUG_UART Tx pin name , default : the first one found in PinMap_UART_TX for DEBUG_UART <nl> + / / # define DEBUG_PINNAME_TX PX_n / / PinName used for TX <nl> + <nl> + / / Default pin used for ' Serial ' instance ( ex : ST - Link ) <nl> + / / Mandatory for Firmata <nl> + # define PIN_SERIAL_RX PA10 <nl> + # define PIN_SERIAL_TX PA9 <nl> + <nl> + # ifdef __cplusplus <nl> + } / / extern " C " <nl> + # endif <nl> + / * mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + * Arduino objects - C + + only <nl> + * mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - * / <nl> + <nl> + # ifdef __cplusplus <nl> + / / These serial port names are intended to allow libraries and architecture - neutral <nl> + / / sketches to automatically default to the correct port name for a particular type <nl> + / / of use . For example , a GPS module would normally connect to SERIAL_PORT_HARDWARE_OPEN , <nl> + / / the first hardware serial port whose RX / TX pins are not dedicated to another use . <nl> + / / <nl> + / / SERIAL_PORT_MONITOR Port which normally prints to the Arduino Serial Monitor <nl> + / / <nl> + / / SERIAL_PORT_USBVIRTUAL Port which is USB virtual serial <nl> + / / <nl> + / / SERIAL_PORT_LINUXBRIDGE Port which connects to a Linux system via Bridge library <nl> + / / <nl> + / / SERIAL_PORT_HARDWARE Hardware serial port , physical RX & TX pins . <nl> + / / <nl> + / / SERIAL_PORT_HARDWARE_OPEN Hardware serial ports which are open for use . Their RX & TX <nl> + / / pins are NOT connected to anything by default . <nl> + # define SERIAL_PORT_MONITOR Serial <nl> + # define SERIAL_PORT_HARDWARE Serial1 <nl> + # endif <nl> + <nl> + # endif / * _VARIANT_ARDUINO_STM32_ * / <nl> mmm a / platformio . ini <nl> ppp b / platformio . ini <nl> lib_ignore = Adafruit NeoPixel , c1921b4 , TMCStepper , TMC26XStepper , SailfishLCD , <nl> src_filter = $ { common . default_src_filter } + < src / HAL / HAL_STM32 > <nl> monitor_speed = 250000 <nl> <nl> + # <nl> + # BIGTREE_SKR_PRO ( STM32F407ZGT6 ARM Cortex - M4 ) <nl> + # <nl> + [ env : BIGTREE_SKR_PRO ] <nl> + platform = ststm32 <nl> + framework = arduino <nl> + board = BigTree_SKR_Pro <nl> + extra_scripts = pre : buildroot / share / PlatformIO / scripts / generic_create_variant . py <nl> + build_flags = $ { common . build_flags } <nl> + - DUSBCON - DUSBD_USE_CDC - DUSBD_VID = 0x0483 - DUSB_PRODUCT = \ " STM32F407ZG \ " <nl> + - DTARGET_STM32F4 - DSTM32F407_5ZX - DVECT_TAB_OFFSET = 0x8000 <nl> + lib_deps = $ { common . lib_deps } <nl> + lib_ignore = Adafruit NeoPixel , c1921b4 , TMC26XStepper , SailfishLCD , SailfishRGB_LED , SlowSoftI2CMaster <nl> + src_filter = $ { common . default_src_filter } + < src / HAL / HAL_STM32 > <nl> + monitor_speed = 250000 <nl> + upload_protocol = cmsis - dap <nl> + <nl> # <nl> # Teensy 3 . 5 / 3 . 6 ( ARM Cortex - M4 ) <nl> # <nl>
|
BigTree SKR Pro V1 . 1 board support ( )
|
MarlinFirmware/Marlin
|
439e28783bc24b371fa2cd5e73a647b7b63f1b4e
|
2019-07-07T02:52:17Z
|
new file mode 100644 <nl> index 00000000000 . . 21a04983e09 <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / client / ide_service / clientIdeDaemon . ml <nl> <nl> + ( * <nl> + * Copyright ( c ) 2019 , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . <nl> + * <nl> + * ) <nl> + <nl> + open Core_kernel <nl> + <nl> + type message = <nl> + | Message : ' a ClientIdeMessage . t - > message <nl> + type message_queue = message Lwt_message_queue . t <nl> + <nl> + type state = <nl> + | Initializing <nl> + | Failed_to_initialize of string <nl> + | Initialized of { <nl> + saved_state_info : Saved_state_loader . Naming_table_saved_state_info . t ; <nl> + hhi_root : Path . t ; <nl> + server_env : ServerEnv . env ; <nl> + changed_files_to_process : Path . Set . t ; <nl> + } <nl> + type t = { <nl> + message_queue : message_queue ; <nl> + state : state ; <nl> + } <nl> + <nl> + let log s = <nl> + Hh_logger . log ( " [ ide - daemon ] " ^ ^ s ) <nl> + <nl> + let set_up_hh_logger_for_client_ide_service ~ ( root : Path . t ) : unit = <nl> + ( * Log to a file on disk . Note that calls to ` Hh_logger ` will always write to <nl> + ` stderr ` ; this is in addition to that . * ) <nl> + let client_ide_log_fn = ( ServerFiles . client_ide_log root ) in <nl> + begin try <nl> + Sys . rename client_ide_log_fn ( client_ide_log_fn ^ " . old " ) <nl> + with _e - > <nl> + ( ) <nl> + end ; <nl> + Hh_logger . set_log client_ide_log_fn ( Out_channel . create <nl> + client_ide_log_fn <nl> + ~ append : true <nl> + ) ; <nl> + EventLogger . init EventLogger . Event_logger_fake 0 . 0 ; <nl> + log " Starting client IDE service at % s " client_ide_log_fn <nl> + <nl> + let load_naming_table_from_saved_state_info <nl> + ( server_env : ServerEnv . env ) <nl> + ( saved_state_info : Saved_state_loader . Naming_table_saved_state_info . t ) <nl> + : ServerEnv . env Lwt . t = <nl> + let path = <nl> + Saved_state_loader . Naming_table_saved_state_info . ( Path . to_string <nl> + saved_state_info . naming_table_path ) in <nl> + let naming_table = Naming_table . load_from_sqlite <nl> + ~ update_reverse_entries : false <nl> + path <nl> + in <nl> + log " Loaded naming table from SQLite database at % s " path ; <nl> + let server_env = { server_env with ServerEnv . naming_table } in <nl> + Lwt . return server_env <nl> + <nl> + let load_saved_state <nl> + ( env : ServerEnv . env ) <nl> + ~ ( root : Path . t ) <nl> + ~ ( hhi_root : Path . t ) <nl> + ~ ( naming_table_saved_state_path : Path . t option ) <nl> + : state Lwt . t = <nl> + log " [ saved - state ] Starting load in root % s " ( Path . to_string root ) ; <nl> + let % lwt result = <nl> + try % lwt <nl> + let % lwt result = match naming_table_saved_state_path with <nl> + | Some naming_table_saved_state_path - > <nl> + ( * Assume that there are no changed files on disk if we ' re getting <nl> + passed the path to the saved - state directly , and that the saved - state <nl> + corresponds to the current state of the world . * ) <nl> + let changed_files = [ ] in <nl> + Lwt . return_ok ( { Saved_state_loader . Naming_table_saved_state_info . <nl> + naming_table_path = naming_table_saved_state_path ; <nl> + } , changed_files ) <nl> + | None - > <nl> + let % lwt result = State_loader_lwt . load <nl> + ~ repo : root <nl> + ~ saved_state_type : Saved_state_loader . Naming_table in <nl> + Lwt . return result <nl> + in <nl> + let % lwt new_state = match result with <nl> + | Ok ( saved_state_info , changed_files ) - > <nl> + log " [ saved - state ] Naming table path : % s " <nl> + Saved_state_loader . Naming_table_saved_state_info . ( Path . to_string <nl> + saved_state_info . naming_table_path ) ; <nl> + <nl> + let % lwt server_env = load_naming_table_from_saved_state_info <nl> + env saved_state_info in <nl> + log " [ saved - state ] Load succeeded " ; <nl> + <nl> + Lwt . return ( Initialized { <nl> + saved_state_info ; <nl> + hhi_root ; <nl> + server_env ; <nl> + changed_files_to_process = Path . Set . of_list changed_files ; <nl> + } ) <nl> + | Error load_error - > <nl> + let message = Saved_state_loader . load_error_to_string load_error in <nl> + log " [ saved - state ] % s " message ; <nl> + Lwt . return ( Failed_to_initialize message ) <nl> + in <nl> + Lwt . return new_state <nl> + with e - > <nl> + let stack = Printexc . get_backtrace ( ) in <nl> + Hh_logger . exc e <nl> + ~ prefix : " Uncaught exception in client IDE services " <nl> + ~ stack ; <nl> + Lwt . return ( Failed_to_initialize ( Printf . sprintf <nl> + " Uncaught exception in client IDE services : % s " stack ) ) <nl> + in <nl> + Lwt . return result <nl> + <nl> + let initialize <nl> + ( { ClientIdeMessage . Initialize_from_saved_state . <nl> + root ; <nl> + naming_table_saved_state_path ; <nl> + } : ClientIdeMessage . Initialize_from_saved_state . t ) = <nl> + set_up_hh_logger_for_client_ide_service ~ root ; <nl> + <nl> + Relative_path . set_path_prefix Relative_path . Root root ; <nl> + let hhi_root = Hhi . get_hhi_root ( ) in <nl> + log " Extracted hhi files to directory % s " ( Path . to_string hhi_root ) ; <nl> + Relative_path . set_path_prefix Relative_path . Hhi hhi_root ; <nl> + Relative_path . set_path_prefix Relative_path . Tmp ( Path . make " / tmp " ) ; <nl> + <nl> + let server_args = ServerArgs . default_options ~ root : ( Path . to_string root ) in <nl> + let ( server_config , server_local_config ) = ServerConfig . load <nl> + ServerConfig . filename server_args in <nl> + <nl> + ( * NOTE : We don ' t want to depend on shared memory in the long - term , since <nl> + we ' re only running one process and don ' t need to share memory with anyone . To <nl> + remove the shared memory usage here requires refactoring our heaps to never <nl> + write to shared memory . * ) <nl> + let _ : SharedMem . handle = SharedMem . init <nl> + ~ num_workers : 0 <nl> + ( ServerConfig . sharedmem_config server_config ) <nl> + in <nl> + let bytes_per_word = Sys . word_size / 8 in <nl> + let words_per_mb = 1_000_000 / bytes_per_word in <nl> + let max_size_in_words = 250 * words_per_mb in <nl> + Provider_config . set_local_memory_backend ~ max_size_in_words ; <nl> + <nl> + let genv = ServerEnvBuild . make_genv <nl> + server_args <nl> + server_config <nl> + server_local_config <nl> + [ ] ( * no workers * ) <nl> + None ( * no lru_workers * ) <nl> + in <nl> + let server_env = ServerEnvBuild . make_env genv . ServerEnv . config in <nl> + GlobalParserOptions . set server_env . ServerEnv . popt ; <nl> + GlobalNamingOptions . set server_env . ServerEnv . tcopt ; <nl> + <nl> + ( * Use server_config to modify server_env with the correct symbol index * ) <nl> + let namespace_map = GlobalOptions . po_auto_namespace_map server_env . ServerEnv . tcopt in <nl> + let sienv = SymbolIndex . initialize <nl> + ~ globalrev_opt : None <nl> + ~ namespace_map <nl> + ~ provider_name : server_local_config . ServerLocalConfig . symbolindex_search_provider <nl> + ~ quiet : server_local_config . ServerLocalConfig . symbolindex_quiet <nl> + ~ savedstate_file_opt : server_local_config . ServerLocalConfig . symbolindex_file <nl> + ~ workers : None in <nl> + let sienv = { <nl> + sienv with <nl> + SearchUtils . sie_log_timings = true ; <nl> + } in <nl> + let server_env = { <nl> + server_env with <nl> + ServerEnv . local_symbol_table = ref sienv ; <nl> + } in <nl> + <nl> + let % lwt new_state = load_saved_state server_env <nl> + ~ root ~ hhi_root ~ naming_table_saved_state_path in <nl> + log " Serverless IDE has completed initialization " ; <nl> + Lwt . return new_state <nl> + <nl> + let shutdown ( state : state ) : unit Lwt . t = <nl> + match state with <nl> + | Initializing <nl> + | Failed_to_initialize _ - > <nl> + log " No cleanup to be done " ; <nl> + Lwt . return_unit <nl> + | Initialized { hhi_root ; _ } - > <nl> + let hhi_root = Path . to_string hhi_root in <nl> + log " Removing hhi directory % s . . . " hhi_root ; <nl> + Sys_utils . rm_dir_tree hhi_root ; <nl> + Lwt . return_unit <nl> + <nl> + let make_context_from_document_location <nl> + ( server_env : ServerEnv . env ) <nl> + ( document_location : ClientIdeMessage . document_location ) <nl> + : ( ServerIdeContext . t * ServerIdeContext . entry ) = <nl> + let ( file_path , file_input ) = match document_location with <nl> + | { ClientIdeMessage . file_contents = None ; file_path ; _ } - > <nl> + let file_input = <nl> + ServerCommandTypes . FileName ( Path . to_string file_path ) in <nl> + ( file_path , file_input ) <nl> + | { ClientIdeMessage . file_contents = Some file_contents ; file_path ; _ } - > <nl> + let file_input = ServerCommandTypes . FileContent file_contents in <nl> + ( file_path , file_input ) <nl> + in <nl> + let file_path = file_path <nl> + | > Path . to_string <nl> + | > Relative_path . create_detect_prefix in <nl> + ServerIdeContext . update <nl> + ~ tcopt : server_env . ServerEnv . tcopt <nl> + ~ ctx : ServerIdeContext . empty <nl> + ~ path : file_path <nl> + ~ file_input <nl> + <nl> + module Handle_message_result = struct <nl> + type ' a t = <nl> + | Notification <nl> + | Response of ' a <nl> + | Error of string <nl> + end <nl> + <nl> + let handle_message : type a . <nl> + state - > <nl> + a ClientIdeMessage . t - > <nl> + ( state * a Handle_message_result . t ) Lwt . t = <nl> + fun state message - > <nl> + let open ClientIdeMessage in <nl> + match ( state , message ) with <nl> + | ( state , Shutdown ( ) ) - > <nl> + let % lwt ( ) = shutdown state in <nl> + Lwt . return ( state , Handle_message_result . Response ( ) ) <nl> + <nl> + | ( ( Failed_to_initialize _ | Initializing ) , File_changed _ ) - > <nl> + ( * Should not happen . * ) <nl> + Lwt . return ( state , Handle_message_result . Error ( <nl> + " IDE services could not process file change because " ^ <nl> + " it failed to initialize or was still initializing . The caller " ^ <nl> + " should have waited for the IDE services to become ready before " ^ <nl> + " sending file - change notifications . " <nl> + ) ) <nl> + | ( Initialized ( { <nl> + changed_files_to_process ; <nl> + _ <nl> + } as state ) , File_changed path ) - > <nl> + let changed_files_to_process = Path . Set . add changed_files_to_process path in <nl> + let state = Initialized { state with changed_files_to_process } in <nl> + Lwt . return ( state , Handle_message_result . Notification ) <nl> + <nl> + | ( Initializing , Initialize_from_saved_state param ) - > <nl> + let % lwt new_state = initialize param in <nl> + Lwt . return ( new_state , Handle_message_result . Response ( ) ) <nl> + | ( Initialized _ , Initialize_from_saved_state _ ) - > <nl> + Lwt . return ( state , Handle_message_result . Error <nl> + " Tried to initialize when already initialized " ) <nl> + <nl> + | ( Initializing , _ ) - > <nl> + Lwt . return ( state , Handle_message_result . Error <nl> + " IDE services have not yet been initialized " ) <nl> + | ( Failed_to_initialize error_message , _ ) - > <nl> + Lwt . return ( state , Handle_message_result . Error ( Printf . sprintf <nl> + " IDE services failed to initialize : % s " error_message ) ) <nl> + <nl> + | ( Initialized { server_env ; _ } , Hover document_location ) - > <nl> + let ( ctx , entry ) = <nl> + make_context_from_document_location server_env document_location in <nl> + let result = ServerIdeContext . with_context ~ ctx ~ f : ( fun ( ) - > <nl> + ServerHover . go_ctx <nl> + ~ ctx <nl> + ~ entry <nl> + ~ line : document_location . ClientIdeMessage . line <nl> + ~ column : document_location . ClientIdeMessage . column <nl> + ) in <nl> + Hh_logger . log " hover result length is % d " ( List . length result ) ; <nl> + Lwt . return ( state , Handle_message_result . Response result ) <nl> + <nl> + ( * Autocomplete * ) <nl> + | ( Initialized { server_env ; _ } , <nl> + Completion { ClientIdeMessage . Completion . <nl> + document_location = { ClientIdeMessage . <nl> + file_path ; <nl> + file_contents ; <nl> + line ; <nl> + column ; <nl> + } ; <nl> + is_manually_invoked ; <nl> + } ) - > <nl> + let path = file_path <nl> + | > Path . to_string <nl> + | > Relative_path . create_detect_prefix in <nl> + let file_content = match file_contents with <nl> + | Some file_contents - > <nl> + file_contents <nl> + | None - > <nl> + file_path <nl> + | > Path . to_string <nl> + | > Sys_utils . cat_no_fail <nl> + in <nl> + let sienv = ! ( server_env . ServerEnv . local_symbol_table ) in <nl> + let matches = ServerAutoComplete . auto_complete_at_position_ctx <nl> + ~ line <nl> + ~ column <nl> + ~ file_content <nl> + ~ path <nl> + ~ tcopt : server_env . ServerEnv . tcopt <nl> + ~ sienv <nl> + ~ is_manually_invoked <nl> + in <nl> + let result = { AutocompleteTypes . <nl> + completions = matches . Utils . With_complete_flag . value ; <nl> + char_at_pos = ' ' ; <nl> + is_complete = matches . Utils . With_complete_flag . is_complete ; <nl> + } in <nl> + Lwt . return ( state , Handle_message_result . Response result ) <nl> + <nl> + ( * Autocomplete docblock resolve * ) <nl> + | ( Initialized { server_env ; _ } , Completion_resolve param ) - > <nl> + let open ClientIdeMessage . Completion_resolve in <nl> + let start_time = Unix . gettimeofday ( ) in <nl> + let result = ServerDocblockAt . go_docblock_for_symbol <nl> + ~ env : server_env <nl> + ~ symbol : param . symbol <nl> + ~ kind : param . kind <nl> + in <nl> + let sienv = ! ( server_env . ServerEnv . local_symbol_table ) in <nl> + if sienv . SearchUtils . sie_log_timings then begin <nl> + let _t : float = <nl> + Hh_logger . log_duration ( Printf . sprintf " [ docblock ] Search for [ % s ] [ % s ] " <nl> + param . symbol ( SearchUtils . show_si_kind param . kind ) ) start_time in <nl> + ( ) <nl> + end ; <nl> + Lwt . return ( state , Handle_message_result . Response result ) <nl> + <nl> + ( * Document highlighting * ) <nl> + | ( Initialized { server_env ; _ } , Document_highlight document_location ) - > <nl> + let ( ctx , entry ) = <nl> + make_context_from_document_location server_env document_location in <nl> + let results = ServerIdeContext . with_context ~ ctx ~ f : ( fun ( ) - > <nl> + ServerHighlightRefs . go_ctx <nl> + ~ entry <nl> + ~ line : document_location . line <nl> + ~ column : document_location . column <nl> + ~ tcopt : server_env . ServerEnv . tcopt <nl> + ) in <nl> + Lwt . return ( state , Handle_message_result . Response results ) <nl> + <nl> + | ( Initialized { server_env ; _ } , Definition document_location ) - > <nl> + let ( ctx , entry ) = <nl> + make_context_from_document_location server_env document_location in <nl> + let result = ServerIdeContext . with_context ~ ctx ~ f : ( fun ( ) - > <nl> + ServerGoToDefinition . go_ctx <nl> + ~ entry <nl> + ~ line : document_location . ClientIdeMessage . line <nl> + ~ column : document_location . ClientIdeMessage . column <nl> + ) in <nl> + Lwt . return ( state , Handle_message_result . Response result ) <nl> + <nl> + ( * Type Definition * ) <nl> + | ( Initialized { server_env ; _ } , Type_definition document_location ) - > <nl> + let ( ctx , entry ) = <nl> + make_context_from_document_location server_env document_location in <nl> + let result = ServerIdeContext . with_context ~ ctx ~ f : ( fun ( ) - > <nl> + ServerTypeDefinition . go_ctx <nl> + ~ entry <nl> + ~ line : document_location . ClientIdeMessage . line <nl> + ~ column : document_location . ClientIdeMessage . column <nl> + ) in <nl> + Lwt . return ( state , Handle_message_result . Response result ) <nl> + <nl> + let write_message <nl> + ~ ( out_fd : Lwt_unix . file_descr ) <nl> + ~ ( message : ClientIdeMessage . message_from_daemon ) <nl> + : unit Lwt . t = <nl> + let % lwt _ : int = <nl> + Marshal_tools_lwt . to_fd_with_preamble out_fd message in <nl> + Lwt . return_unit <nl> + <nl> + let serve <nl> + ( type a ) <nl> + ~ ( in_fd : Lwt_unix . file_descr ) <nl> + ~ ( out_fd : Lwt_unix . file_descr ) <nl> + : unit Lwt . t = <nl> + let rec pump_message_queue ( message_queue : message_queue ) : unit Lwt . t = <nl> + try % lwt <nl> + let % lwt ( message : a ClientIdeMessage . t ) = <nl> + Marshal_tools_lwt . from_fd_with_preamble in_fd in <nl> + let is_queue_open = <nl> + Lwt_message_queue . push message_queue ( Message message ) in <nl> + match message with <nl> + | ClientIdeMessage . Shutdown ( ) - > <nl> + Lwt . return_unit <nl> + | _ when not is_queue_open - > <nl> + Lwt . return_unit <nl> + | _ - > <nl> + pump_message_queue message_queue <nl> + with e - > <nl> + let e = Exception . wrap e in <nl> + Lwt_message_queue . close message_queue ; <nl> + Exception . reraise e <nl> + in <nl> + <nl> + let rec handle_messages ( t : t ) : unit Lwt . t = <nl> + match t with <nl> + | { message_queue ; <nl> + state = Initialized ( { server_env ; changed_files_to_process ; _ } <nl> + as state ) ; } <nl> + when ( Lwt_message_queue . is_empty message_queue ) <nl> + & & not ( Lwt_unix . readable in_fd ) <nl> + & & not ( Path . Set . is_empty changed_files_to_process ) - > <nl> + ( * Process the next file change , but only if we have no new events to <nl> + handle . To ensure correctness , we would have to actually process all file <nl> + change events * before * we processed any other IDE queries . However , we ' re <nl> + trying to maximize availability , even if occasionally we give stale <nl> + results . We can revisit this trade - off later if we decide that the stale <nl> + results are baffling users . * ) <nl> + let next_file = Path . Set . choose changed_files_to_process in <nl> + let changed_files_to_process = <nl> + Path . Set . remove changed_files_to_process next_file in <nl> + let % lwt server_env = <nl> + ClientIdeIncremental . process_changed_file server_env next_file in <nl> + let % lwt ( ) = <nl> + if Path . Set . is_empty changed_files_to_process <nl> + then <nl> + let message = ClientIdeMessage . ( Notification Done_processing ) in <nl> + let % lwt ( ) = write_message ~ out_fd ~ message in <nl> + Lwt . return_unit <nl> + else <nl> + Lwt . return_unit <nl> + in <nl> + let state = Initialized { <nl> + state with <nl> + server_env ; <nl> + changed_files_to_process ; <nl> + } in <nl> + handle_messages { t with state } <nl> + <nl> + | t - > <nl> + let % lwt message = Lwt_message_queue . pop t . message_queue in <nl> + match message with <nl> + | None - > <nl> + Lwt . return_unit <nl> + | Some ( Message message ) - > <nl> + let % lwt state = <nl> + try % lwt <nl> + let % lwt ( state , response ) = handle_message t . state message in <nl> + match response with <nl> + | Handle_message_result . Notification - > <nl> + ( * No response needed for notifications . * ) <nl> + Lwt . return state <nl> + | Handle_message_result . Response response - > <nl> + let response = ClientIdeMessage . Response ( Ok response ) in <nl> + let % lwt ( ) = write_message ~ out_fd ~ message : response in <nl> + Lwt . return state <nl> + | Handle_message_result . Error message - > <nl> + let response = ClientIdeMessage . Response ( Error message ) in <nl> + let % lwt ( ) = write_message ~ out_fd ~ message : response in <nl> + Lwt . return state <nl> + with e - > <nl> + let stack = Printexc . get_backtrace ( ) in <nl> + Hh_logger . exc ~ prefix : " [ ide - daemon ] exception : " ~ stack e ; <nl> + Lwt . return t . state <nl> + in <nl> + handle_messages { t with state } <nl> + in <nl> + <nl> + try % lwt <nl> + let message_queue = Lwt_message_queue . create ( ) in <nl> + let % lwt ( ) = handle_messages { <nl> + message_queue ; <nl> + state = Initializing ; <nl> + } <nl> + and ( ) = pump_message_queue message_queue in <nl> + Lwt . return_unit <nl> + with e - > <nl> + let e = Exception . wrap e in <nl> + log <nl> + " Exception occurred while handling RPC call : % s " <nl> + ( Exception . to_string e ) ; <nl> + Lwt . return_unit <nl> + <nl> + let daemon_main ( ) ( channels : ( ' a , ' b ) Daemon . channel_pair ) : unit = <nl> + Printexc . record_backtrace true ; <nl> + let ( ic , oc ) = channels in <nl> + let in_fd = Lwt_unix . of_unix_file_descr ( Daemon . descr_of_in_channel ic ) in <nl> + let out_fd = Lwt_unix . of_unix_file_descr ( Daemon . descr_of_out_channel oc ) in <nl> + Lwt_main . run ( serve ~ in_fd ~ out_fd ) <nl> + <nl> + let daemon_entry_point : ( unit , unit , unit ) Daemon . entry = <nl> + Daemon . register_entry_point " ClientIdeService " daemon_main <nl> new file mode 100644 <nl> index 00000000000 . . fc1b72b8ab0 <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / client / ide_service / clientIdeDaemon . mli <nl> <nl> + ( * <nl> + * Copyright ( c ) 2019 , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . <nl> + * <nl> + * ) <nl> + <nl> + val daemon_entry_point : ( unit , unit , unit ) Daemon . entry <nl> new file mode 100644 <nl> index 00000000000 . . b6523fcee87 <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / client / ide_service / clientIdeIncremental . ml <nl> <nl> + ( * <nl> + * Copyright ( c ) 2019 , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . <nl> + * <nl> + * ) <nl> + <nl> + open Core_kernel <nl> + <nl> + let log s = <nl> + Hh_logger . log ( " [ ide - incremental ] " ^ ^ s ) <nl> + ; ; <nl> + <nl> + let strip_positions symbols = <nl> + List . fold symbols ~ init : SSet . empty ~ f : ( fun acc ( _ , x ) - > <nl> + SSet . add x acc <nl> + ) <nl> + ; ; <nl> + <nl> + ( * Print old and new symbols in a file after a change * ) <nl> + let log_file_info_change <nl> + ~ ( old_file_info : FileInfo . t option ) <nl> + ~ ( new_file_info : FileInfo . t option ) <nl> + ~ ( start_time : float ) <nl> + ~ ( path : Relative_path . t ) <nl> + : unit = <nl> + let end_time = Unix . gettimeofday ( ) in <nl> + let open FileInfo in <nl> + let list_symbols_in_file_info file_info = <nl> + let symbol_list_to_string symbols = <nl> + let num_symbols = List . length symbols in <nl> + let max_num_symbols_to_show = 5 in <nl> + match symbols with <nl> + | [ ] - > <nl> + " < none > " <nl> + | symbols when num_symbols < = max_num_symbols_to_show - > <nl> + symbols <nl> + | > strip_positions <nl> + | > SSet . elements <nl> + | > String . concat ~ sep : " , " <nl> + | symbols - > <nl> + let num_remaining_symbols = num_symbols - max_num_symbols_to_show in <nl> + let symbols = List . take symbols max_num_symbols_to_show in <nl> + Printf . sprintf " % s ( + % d more . . . ) " <nl> + ( symbols <nl> + | > strip_positions <nl> + | > SSet . elements <nl> + | > String . concat ~ sep : " , " ) <nl> + num_remaining_symbols ; <nl> + in <nl> + match file_info with <nl> + | Some file_info - > <nl> + Printf . sprintf " funs : % s , classes : % s , typedefs : % s , consts : % s " <nl> + ( symbol_list_to_string file_info . funs ) <nl> + ( symbol_list_to_string file_info . classes ) <nl> + ( symbol_list_to_string file_info . typedefs ) <nl> + ( symbol_list_to_string file_info . consts ) <nl> + | None - > <nl> + " < file absent > " <nl> + in <nl> + <nl> + let verb = <nl> + match ( old_file_info , new_file_info ) with <nl> + | ( Some _ , Some _ ) - > " updated " <nl> + | ( Some _ , None ) - > " deleted " <nl> + | ( None , Some _ ) - > " added " <nl> + | ( None , None ) - > <nl> + ( * May or may not indicate a bug in either the language client or the <nl> + language server . <nl> + <nl> + - Could happen if the language client sends spurious notifications . <nl> + - Could happen if the editor writes files in a certain way , such as if <nl> + they delete the file before moving a new one into place . <nl> + - Could happen if the language server was not able to read the file , <nl> + despite it existing on disk ( e . g . due to permissions ) . In this case , <nl> + we would fail to generate its [ FileInfo . t ] and assume that it was <nl> + deleted . This is correct from a certain point of view . <nl> + - Could happen due to a benign race condition where we process <nl> + file - change notifications more slowly than they happen . If a file is <nl> + quickly created , then deleted before we process the create event , <nl> + we ' ll think it was deleted twice . This is the correct way to handle <nl> + the race condition . <nl> + * ) <nl> + " spuriously updated " <nl> + in <nl> + log " File changed ( % . 3fs ) % s % s : old : % s vs . new : % s " <nl> + ( end_time - . start_time ) <nl> + ( Relative_path . to_absolute path ) <nl> + verb <nl> + ( list_symbols_in_file_info old_file_info ) <nl> + ( list_symbols_in_file_info new_file_info ) <nl> + ; ; <nl> + <nl> + ( * <nl> + * This fetches the new names out of the modified file <nl> + * Result : ( old * new ) <nl> + * ) <nl> + let compute_fileinfo_for_path <nl> + ( env : ServerEnv . env ) <nl> + ( path : Relative_path . t ) <nl> + : ( FileInfo . t option * FileInfo . t option ) Lwt . t = <nl> + <nl> + let start_time = Unix . gettimeofday ( ) in <nl> + let naming_table = env . ServerEnv . naming_table in <nl> + let old_file_info = Naming_table . get_file_info naming_table path in <nl> + <nl> + ( * Fetch file contents * ) <nl> + let % lwt contents = Lwt_utils . read_all ( Relative_path . to_absolute path ) in <nl> + let contents = Result . ok contents in <nl> + <nl> + let new_file_info = <nl> + match contents with <nl> + | None - > None <nl> + ( * The file couldn ' t be read from disk . Assume it ' s been deleted or is <nl> + otherwise inaccessible . We ' ve already deleted the entries from the naming <nl> + table and reverse naming table , so there ' s nothing left to do here . * ) <nl> + | Some contents - > <nl> + ( * We don ' t want our symbols to be mangled for export . Mangling would <nl> + * convert : xhp : myclass to __xhp_myclass , which would fail name lookup * ) <nl> + Facts_parser . mangle_xhp_mode : = false ; <nl> + let facts = Facts_parser . from_text <nl> + ~ php5_compat_mode : false <nl> + ~ hhvm_compat_mode : true <nl> + ~ filename : path <nl> + ~ text : contents in <nl> + let ( funs , classes , typedefs , consts ) = match facts with <nl> + | None - > <nl> + ( * File failed to parse or was not a Hack file . * ) <nl> + ( [ ] , [ ] , [ ] , [ ] ) <nl> + | Some facts - > <nl> + let to_ids name_type names = List . map names ~ f : ( fun name - > <nl> + let fixed_name = Utils . add_ns name in <nl> + let pos = FileInfo . File ( name_type , path ) in <nl> + ( pos , fixed_name ) ) in <nl> + let funs = facts . Facts . functions | > to_ids FileInfo . Fun in <nl> + <nl> + ( * Classes and typedefs are both stored under ` types ` . There ' s also a <nl> + ` typeAliases ` field which only stores typedefs that we could use if we <nl> + wanted , but we write out the pattern - matches here for <nl> + exhaustivity - checking . * ) <nl> + let classes = facts . Facts . types <nl> + | > Facts . InvSMap . filter ( fun _k v - > <nl> + let open Facts in <nl> + match v . kind with <nl> + | TKClass <nl> + | TKInterface <nl> + | TKEnum <nl> + | TKTrait <nl> + | TKUnknown <nl> + | TKMixed - > <nl> + true <nl> + | TKTypeAlias - > <nl> + false <nl> + ) <nl> + | > Facts . InvSMap . keys <nl> + | > to_ids FileInfo . Class in <nl> + let typedefs = facts . Facts . types <nl> + | > Facts . InvSMap . filter ( fun _k v - > <nl> + let open Facts in <nl> + match v . kind with <nl> + | TKTypeAlias - > <nl> + true <nl> + | TKClass <nl> + | TKInterface <nl> + | TKEnum <nl> + | TKTrait <nl> + | TKUnknown <nl> + | TKMixed - > <nl> + false <nl> + ) <nl> + | > Facts . InvSMap . keys <nl> + | > to_ids FileInfo . Typedef in <nl> + <nl> + let consts = facts . Facts . constants | > to_ids FileInfo . Const in <nl> + ( funs , classes , typedefs , consts ) <nl> + in <nl> + <nl> + let fi_mode = <nl> + Full_fidelity_parser . parse_mode <nl> + ~ rust : ( ParserOptions . rust env . ServerEnv . popt ) <nl> + ( Full_fidelity_source_text . make <nl> + path <nl> + contents ) <nl> + | > Option . value <nl> + ( * TODO : is this a reasonable default ? * ) <nl> + ~ default : FileInfo . Mstrict <nl> + in <nl> + Some { FileInfo . <nl> + file_mode = Some fi_mode ; <nl> + funs ; <nl> + classes ; <nl> + typedefs ; <nl> + consts ; <nl> + hash = None ; <nl> + comments = None ; <nl> + } <nl> + in <nl> + <nl> + log_file_info_change <nl> + ~ old_file_info <nl> + ~ new_file_info <nl> + ~ start_time <nl> + ~ path ; <nl> + Lwt . return ( old_file_info , new_file_info ) <nl> + ; ; <nl> + <nl> + let update_naming_table <nl> + ~ ( env : ServerEnv . env ) <nl> + ~ ( path : Relative_path . t ) <nl> + ~ ( old_file_info : FileInfo . t option ) <nl> + ~ ( new_file_info : FileInfo . t option ) <nl> + : ServerEnv . env = <nl> + let naming_table = env . ServerEnv . naming_table in <nl> + <nl> + ( * Remove the old entries from the forward and reverse naming tables . * ) <nl> + let naming_table = <nl> + match old_file_info with <nl> + | None - > naming_table <nl> + | Some old_file_info - > <nl> + <nl> + ( * Update reverse naming table * ) <nl> + let open FileInfo in <nl> + NamingGlobal . remove_decls <nl> + ~ funs : ( strip_positions old_file_info . funs ) <nl> + ~ classes : ( strip_positions old_file_info . classes ) <nl> + ~ typedefs : ( strip_positions old_file_info . typedefs ) <nl> + ~ consts : ( strip_positions old_file_info . consts ) ; <nl> + <nl> + ( * Update and return the forward naming table * ) <nl> + Naming_table . remove naming_table path <nl> + in <nl> + <nl> + ( * Update forward naming table and reverse naming table with the new <nl> + declarations . * ) <nl> + let naming_table = <nl> + match new_file_info with <nl> + | None - > naming_table <nl> + | Some new_file_info - > <nl> + <nl> + ( * Update reverse naming table * ) <nl> + let open FileInfo in <nl> + <nl> + ( * TODO : when we start typechecking files , we ' ll have to keep track of <nl> + which files have naming errors , so that we can re - typecheck them <nl> + - Also note that [ _fast ] means that this function call ignores errors * ) <nl> + NamingGlobal . ndecl_file_fast <nl> + path <nl> + ~ funs : ( strip_positions new_file_info . funs ) <nl> + ~ classes : ( strip_positions new_file_info . classes ) <nl> + ~ typedefs : ( strip_positions new_file_info . typedefs ) <nl> + ~ consts : ( strip_positions new_file_info . consts ) ; <nl> + <nl> + ( * Update and return the forward naming table * ) <nl> + Naming_table . update <nl> + naming_table <nl> + path <nl> + new_file_info <nl> + in <nl> + <nl> + { env with ServerEnv . naming_table } <nl> + ; ; <nl> + <nl> + let process_changed_file <nl> + ( env : ServerEnv . env ) <nl> + ( path : Path . t ) <nl> + : ServerEnv . env Lwt . t = <nl> + let path = Path . to_string path in <nl> + match Relative_path . strip_root_if_possible path with <nl> + | None - > <nl> + log " Ignored change to file % s , as it is not within our repo root " path ; <nl> + Lwt . return env <nl> + | Some path - > <nl> + let path = Relative_path . from_root path in <nl> + if not ( FindUtils . path_filter path ) <nl> + then Lwt . return env <nl> + else <nl> + let % lwt ( old_file_info , new_file_info ) = compute_fileinfo_for_path env path in <nl> + let env = update_naming_table ~ env ~ path ~ old_file_info ~ new_file_info in <nl> + Lwt . return env <nl> + ; ; <nl> new file mode 100644 <nl> index 00000000000 . . 6934fc11249 <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / client / ide_service / clientIdeIncremental . mli <nl> <nl> + ( * <nl> + * Copyright ( c ) 2019 , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . <nl> + * <nl> + * ) <nl> + <nl> + val process_changed_file : ServerEnv . env - > Path . t - > ServerEnv . env Lwt . t <nl> + ( * * Update the forward and reverse naming tables by parsing the file at the <nl> + given path and installing their declarations . If the file could not be read , <nl> + it ' s assumed to be deleted . <nl> + <nl> + Returns an updated [ ServerEnv . env ] , but also <nl> + modifies the global naming table state in [ NamingGlobal ] . * ) <nl> similarity index 100 % <nl> rename from hphp / hack / src / client / clientIdeMessage . ml <nl> rename to hphp / hack / src / client / ide_service / clientIdeMessage . ml <nl> new file mode 100644 <nl> index 00000000000 . . 35f7dd9ab42 <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / client / ide_service / clientIdeService . ml <nl> <nl> + ( * <nl> + * Copyright ( c ) 2019 , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . <nl> + * <nl> + * ) <nl> + <nl> + open Core_kernel <nl> + <nl> + type state = <nl> + | Uninitialized of { <nl> + wait_for_initialization : bool ; <nl> + } <nl> + | Failed_to_initialize of string <nl> + | Initialized <nl> + <nl> + type message_wrapper = <nl> + | Message_wrapper : ' a ClientIdeMessage . t - > message_wrapper <nl> + ( * * Existential type wrapper for ` ClientIdeMessage . t ` s , so that we can put <nl> + them in a queue without the typechecker trying to infer a concrete type for <nl> + ` ' a ` based on its first use . * ) <nl> + <nl> + type message_queue = message_wrapper Lwt_message_queue . t <nl> + <nl> + type response_wrapper = <nl> + | Response_wrapper : ( ' a , string ) result - > response_wrapper <nl> + ( * * Similar to [ Message_wrapper ] above . * ) <nl> + <nl> + type response_emitter = response_wrapper Lwt_message_queue . t <nl> + <nl> + type notification_emitter = ClientIdeMessage . notification Lwt_message_queue . t <nl> + <nl> + type t = { <nl> + mutable state : state ; <nl> + <nl> + state_changed_cv : unit Lwt_condition . t ; <nl> + ( * * Used to notify tasks when the state changes , so that they can wait for the <nl> + IDE service to be initialized . * ) <nl> + <nl> + daemon_handle : ( unit , unit ) Daemon . handle ; <nl> + ( * * The handle to the daemon process backing the IDE service . <nl> + <nl> + Note that ` ( unit , unit ) ` here refers to the input and output types of the <nl> + IDE service . However , we don ' t use the Daemon API ' s method of <nl> + producing / consuming these messages and instead do it with Lwt , so these <nl> + type parameters are not used . * ) <nl> + <nl> + in_fd : Lwt_unix . file_descr ; <nl> + out_fd : Lwt_unix . file_descr ; <nl> + <nl> + messages_to_send : message_queue ; <nl> + ( * * The queue of messages that we have yet to send to the daemon . * ) <nl> + <nl> + response_emitter : response_emitter ; <nl> + ( * * The queue of responses that we received from RPC calls to the daemon . We <nl> + assume that we receive the responses in the same order that we sent their <nl> + requests . * ) <nl> + <nl> + notification_emitter : notification_emitter ; <nl> + ( * * The queue of notifications that the daemon emitted . Notifications can be <nl> + emitted at any time , not just in response to an RPC call . * ) <nl> + } <nl> + <nl> + let set_state ( t : t ) ( new_state : state ) : unit = <nl> + t . state < - new_state ; <nl> + Lwt_condition . broadcast t . state_changed_cv ( ) <nl> + <nl> + let log s = <nl> + Hh_logger . log ( " [ ide - service ] " ^ ^ s ) <nl> + <nl> + let do_rpc <nl> + ~ ( message : ' a ClientIdeMessage . t ) <nl> + ~ ( messages_to_send : message_queue ) <nl> + ~ ( response_emitter : response_emitter ) <nl> + : ( ' a , string ) Lwt_result . t = <nl> + try % lwt <nl> + let success = <nl> + Lwt_message_queue . push messages_to_send ( Message_wrapper message ) in <nl> + ( if not success <nl> + then failwith " Could not send message ( queue was closed ) " ) ; <nl> + <nl> + let % lwt ( response : response_wrapper option ) = <nl> + Lwt_message_queue . pop response_emitter in <nl> + match response with <nl> + | None - > <nl> + failwith " Could not read response : queue was closed " <nl> + | Some ( Response_wrapper response ) - > <nl> + ( * We don ' t carry around the tag at runtime that tells us what type of <nl> + message the response was for . We ' re relying here on the invariant that <nl> + responses are provided in the order that requests are sent , and that we ' re <nl> + not sending any requests in parallel . This looks unsafe , but it ' s not <nl> + adding additional un - safety . The ` Response ` message here came from <nl> + ` Marshal_tools_lwt . from_fd_with_preamble ` , which is inherently unsafe <nl> + ( returns ` ' a ` ) , and we ' ve just happened to pass around that ` ' a ` rather <nl> + than coercing its type immediately . * ) <nl> + let response = Result . map ~ f : Obj . magic response in <nl> + Lwt . return response <nl> + with e - > <nl> + let stack = Printexc . get_backtrace ( ) in <nl> + let exn_message = Exn . to_string e in <nl> + let message = Printf . sprintf <nl> + " Exception occurred while handling RPC call : % s \ nStack : % s " <nl> + exn_message <nl> + stack <nl> + in <nl> + Lwt . return_error message <nl> + <nl> + let make ( ) : t = <nl> + let daemon_handle = Daemon . spawn <nl> + ~ channel_mode : ` pipe <nl> + ( Unix . stdin , Unix . stdout , Unix . stderr ) <nl> + ClientIdeDaemon . daemon_entry_point <nl> + ( ) <nl> + in <nl> + let ( ic , oc ) = daemon_handle . Daemon . channels in <nl> + let in_fd = Lwt_unix . of_unix_file_descr ( Daemon . descr_of_in_channel ic ) in <nl> + let out_fd = Lwt_unix . of_unix_file_descr ( Daemon . descr_of_out_channel oc ) in <nl> + { <nl> + state = Uninitialized { <nl> + wait_for_initialization = false ; <nl> + } ; <nl> + state_changed_cv = Lwt_condition . create ( ) ; <nl> + <nl> + daemon_handle ; <nl> + in_fd ; <nl> + out_fd ; <nl> + <nl> + messages_to_send = Lwt_message_queue . create ( ) ; <nl> + response_emitter = Lwt_message_queue . create ( ) ; <nl> + notification_emitter = Lwt_message_queue . create ( ) ; <nl> + } <nl> + <nl> + let rec wait_for_initialization ( t : t ) : unit Lwt . t = <nl> + match t . state with <nl> + | Uninitialized _ <nl> + | Failed_to_initialize _ - > <nl> + let % lwt ( ) = Lwt_condition . wait t . state_changed_cv in <nl> + wait_for_initialization t <nl> + | Initialized - > <nl> + Lwt . return_unit <nl> + <nl> + let initialize_from_saved_state <nl> + ( t : t ) <nl> + ~ ( root : Path . t ) <nl> + ~ ( naming_table_saved_state_path : Path . t option ) <nl> + ~ ( wait_for_initialization : bool ) <nl> + : ( unit , string ) Lwt_result . t = <nl> + set_state t ( Uninitialized { wait_for_initialization } ) ; <nl> + <nl> + let param = { ClientIdeMessage . Initialize_from_saved_state . <nl> + root ; <nl> + naming_table_saved_state_path ; <nl> + } in <nl> + ( * Do not use ` do_rpc ` here , as that depends on a running event loop in <nl> + ` serve ` . But ` serve ` should only be called once the IDE service is <nl> + initialized , after this function has completed . * ) <nl> + let % lwt ( _ : int ) = Marshal_tools_lwt . to_fd_with_preamble <nl> + t . out_fd ( ClientIdeMessage . Initialize_from_saved_state param ) in <nl> + let % lwt ( response : ClientIdeMessage . message_from_daemon ) = <nl> + Marshal_tools_lwt . from_fd_with_preamble t . in_fd in <nl> + <nl> + match response with <nl> + | ClientIdeMessage . Response ( <nl> + Ok _ ( * expected to be ` Ok ( ) ` , but not statically - checkable * ) <nl> + ) - > <nl> + log " Initialized IDE service process ( log file at % s ) " <nl> + ( ServerFiles . client_ide_log root ) ; <nl> + set_state t Initialized ; <nl> + Lwt . return_ok ( ) <nl> + | ClientIdeMessage . Notification _ - > <nl> + let error_message = <nl> + " Failed to initialize IDE service process " <nl> + ^ " because we received a notification before the initialization response " <nl> + in <nl> + log " % s " error_message ; <nl> + set_state t ( Failed_to_initialize error_message ) ; <nl> + Lwt . return_error error_message <nl> + | ClientIdeMessage . Response ( Error error_message ) - > <nl> + log " Failed to initialize IDE service process : % s " <nl> + error_message ; <nl> + set_state t ( Failed_to_initialize error_message ) ; <nl> + Lwt . return_error error_message <nl> + <nl> + let rec serve ( t : t ) : unit Lwt . t = <nl> + let send_queued_up_messages ~ out_fd messages_to_send : bool Lwt . t = <nl> + let % lwt next_message = Lwt_message_queue . pop messages_to_send in <nl> + match next_message with <nl> + | None - > <nl> + Lwt . return false <nl> + | Some ( Message_wrapper next_message ) - > <nl> + let % lwt ( _ : int ) = <nl> + Marshal_tools_lwt . to_fd_with_preamble out_fd next_message in <nl> + Lwt . return true <nl> + in <nl> + <nl> + let emit_messages_from_daemon <nl> + ~ in_fd <nl> + response_emitter <nl> + notification_emitter <nl> + : bool Lwt . t = <nl> + let % lwt ( message : ClientIdeMessage . message_from_daemon ) = <nl> + Marshal_tools_lwt . from_fd_with_preamble in_fd in <nl> + match message with <nl> + | ClientIdeMessage . Notification notification - > <nl> + Lwt . return ( Lwt_message_queue . push notification_emitter notification ) <nl> + | ClientIdeMessage . Response response - > <nl> + Lwt . return ( Lwt_message_queue . push <nl> + response_emitter ( Response_wrapper response ) ) <nl> + in <nl> + <nl> + let % lwt should_continue = <nl> + try % lwt <nl> + ( * We mutate the queues in ` t ` , which is why we don ' t return a new ` t ` here . * ) <nl> + let % lwt should_continue = Lwt . pick [ <nl> + send_queued_up_messages <nl> + ~ out_fd : t . out_fd <nl> + t . messages_to_send ; <nl> + emit_messages_from_daemon <nl> + ~ in_fd : t . in_fd <nl> + t . response_emitter <nl> + t . notification_emitter ; <nl> + ] in <nl> + Lwt . return should_continue <nl> + with e - > <nl> + let e = Exception . wrap e in <nl> + log <nl> + " Exception occurred in ClientIdeService . serve : % s " <nl> + ( Exception . to_string e ) ; <nl> + Lwt . return false <nl> + in <nl> + if should_continue then <nl> + serve t <nl> + else ( <nl> + log " Shutting down " ; <nl> + Lwt . return_unit <nl> + ) <nl> + <nl> + let destroy ( t : t ) : unit Lwt . t = <nl> + let { <nl> + daemon_handle ; <nl> + messages_to_send ; <nl> + response_emitter ; <nl> + notification_emitter ; <nl> + _ <nl> + } = t in <nl> + let % lwt ( ) = match t . state with <nl> + | Uninitialized _ <nl> + | Failed_to_initialize _ - > <nl> + Lwt . return_unit <nl> + | Initialized - > <nl> + let % lwt ( ) = Lwt . pick [ <nl> + ( <nl> + let % lwt ( _result : ( unit , string ) result ) = <nl> + do_rpc <nl> + ~ message : ( ClientIdeMessage . Shutdown ( ) ) <nl> + ~ messages_to_send <nl> + ~ response_emitter <nl> + in <nl> + Daemon . kill daemon_handle ; <nl> + Lwt . return_unit <nl> + ) ; <nl> + ( <nl> + let % lwt ( ) = Lwt_unix . sleep 5 . 0 in <nl> + Daemon . kill daemon_handle ; <nl> + Lwt . return_unit <nl> + ) ; <nl> + ] in <nl> + Lwt . return_unit <nl> + in <nl> + Lwt_message_queue . close messages_to_send ; <nl> + Lwt_message_queue . close notification_emitter ; <nl> + Lwt_message_queue . close response_emitter ; <nl> + Lwt . return_unit <nl> + <nl> + let push_message ( t : t ) ( message : message_wrapper ) : unit = <nl> + match t . state with <nl> + | Uninitialized _ <nl> + | Initialized - > <nl> + let _success : bool = Lwt_message_queue . push t . messages_to_send message in <nl> + ( ) <nl> + | Failed_to_initialize _ - > <nl> + ( * This is a terminal state . Don ' t waste memory queueing up messages that <nl> + can never be sent . * ) <nl> + ( ) <nl> + <nl> + let notify_file_changed ( t : t ) ( path : Path . t ) : unit = <nl> + push_message t ( Message_wrapper ( ClientIdeMessage . File_changed path ) ) <nl> + <nl> + ( * Simplified function for handling initialization cases * ) <nl> + let rpc ( t : t ) ( rpc_message : ' a ClientIdeMessage . t ) <nl> + : ( ' a , string ) Lwt_result . t = <nl> + let { messages_to_send ; response_emitter ; _ } = t in <nl> + match t . state with <nl> + | Uninitialized { wait_for_initialization = false } - > <nl> + Lwt . return_error " IDE service has not yet been initialized " <nl> + | Failed_to_initialize error_message - > <nl> + Lwt . return_error ( Printf . sprintf <nl> + " IDE service failed to initialize : % s " error_message ) <nl> + | Uninitialized { wait_for_initialization = true } - > <nl> + let % lwt ( ) = wait_for_initialization t in <nl> + let % lwt result = do_rpc <nl> + ~ message : rpc_message <nl> + ~ messages_to_send <nl> + ~ response_emitter <nl> + in <nl> + Lwt . return result <nl> + | Initialized - > <nl> + let % lwt result = do_rpc <nl> + ~ message : rpc_message <nl> + ~ messages_to_send <nl> + ~ response_emitter <nl> + in <nl> + Lwt . return result <nl> + <nl> + let get_notifications ( t : t ) : notification_emitter = <nl> + t . notification_emitter <nl> new file mode 100644 <nl> index 00000000000 . . 8e45b9eb93e <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / client / ide_service / clientIdeService . mli <nl> <nl> + ( * <nl> + * Copyright ( c ) 2019 , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . <nl> + * <nl> + * ) <nl> + <nl> + type t <nl> + ( * * Provides IDE services in the client , without an instance of hh_server <nl> + running . <nl> + <nl> + Basic approach : we load the naming table to give us just enough symbol <nl> + information to provide IDE services for just the files you ' re looking at . When <nl> + we need to look up declarations to service an IDE query , we parse and typecheck <nl> + the files containing those declarations on - demand , then answer your IDE query . <nl> + * ) <nl> + <nl> + val make : unit - > t <nl> + ( * * Create an uninitialized IDE service . All queries made to this service will <nl> + fail immediately , unless otherwise requested in the initialization procedure . * ) <nl> + <nl> + val initialize_from_saved_state : <nl> + t - > <nl> + root : Path . t - > <nl> + naming_table_saved_state_path : Path . t option - > <nl> + wait_for_initialization : bool - > <nl> + ( unit , string ) Lwt_result . t <nl> + ( * * Request that the IDE service initialize from the saved state . Queries made <nl> + to the service will fail until it is done initializing , unless <nl> + [ wait_for_initialization ] is [ true ] , in which case queries made to the service <nl> + will block until the initializing is complete . * ) <nl> + <nl> + val serve : t - > unit Lwt . t <nl> + ( * * Pump the message loop for the IDE service . Exits once the IDE service has <nl> + been [ destroy ] ed . * ) <nl> + <nl> + val destroy : t - > unit Lwt . t <nl> + ( * * Clean up any resources held by the IDE service ( such as the message loop and <nl> + background processes ) . * ) <nl> + <nl> + val notify_file_changed : t - > Path . t - > unit <nl> + ( * * The caller is expected to call this function to notify the IDE service <nl> + whenever a Hack file changes on disk , so that it can update its indexes <nl> + appropriately . * ) <nl> + <nl> + val rpc : t - > ' response ClientIdeMessage . t - > ( ' response , string ) Lwt_result . t <nl> + ( * * Make an RPC call to the IDE service . * ) <nl> + <nl> + val get_notifications : t - > ClientIdeMessage . notification Lwt_message_queue . t <nl> + ( * * Get a handle to the stream of notifications sent by the IDE service . These <nl> + notifications may be sent even during RPC requests , and so should be processed <nl> + asynchronously . * ) <nl> new file mode 100644 <nl> index 00000000000 . . 38dd4dd3610 <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / client / ide_service / dune <nl> <nl> + ( library <nl> + ( name client_ide_service ) <nl> + ( wrapped false ) <nl> + ( modules <nl> + clientIdeDaemon <nl> + clientIdeIncremental <nl> + clientIdeService ) <nl> + ( libraries <nl> + client_ide_message <nl> + lwt <nl> + server <nl> + server_env <nl> + state_loader <nl> + sys_utils ) <nl> + ( preprocess <nl> + ( pps lwt_ppx ) ) ) <nl> + <nl> + ( library <nl> + ( name client_ide_message ) <nl> + ( wrapped false ) <nl> + ( modules clientIdeMessage ) <nl> + ( libraries <nl> + facts <nl> + lwt_utils <nl> + server_command_types <nl> + sys_utils ) <nl> + ( preprocess <nl> + ( pps lwt_ppx ) ) ) <nl> deleted file mode 100644 <nl> index 08b6b8958df . . 00000000000 <nl> mmm a / hphp / hack / src / stubs / clientIdeService . ml <nl> ppp / dev / null <nl> <nl> - type t = unit <nl> - let make ( ) = ( ) <nl> - let initialize_from_saved_state ( ) <nl> - ~ root : _ ~ naming_table_saved_state_path : _ ~ wait_for_initialization : _ = <nl> - Lwt . return_error " Serverless IDE not available in open - source build " <nl> - let serve ( ) = Lwt . return_unit <nl> - let destroy ( ) = Lwt . return_unit <nl> - let notify_file_changed ( ) _ = ( ) <nl> - let rpc ( ) _ = <nl> - Lwt . return_error " Serverless IDE not available in open - source build " <nl> - let get_notifications ( ) = <nl> - failwith " Serverless IDE not available in open - source build " <nl> mmm a / hphp / hack / src / stubs / dune <nl> ppp b / hphp / hack / src / stubs / dune <nl> <nl> ( modules <nl> buildMain ) ) <nl> <nl> - ( library <nl> - ( name client_ide_service ) <nl> - ( wrapped false ) <nl> - ( modules <nl> - clientIdeService ) <nl> - ( libraries <nl> - lwt ) ) <nl> - <nl> ( library <nl> ( name debug_stubs ) <nl> ( wrapped false ) <nl> <nl> state_loader <nl> saved_state_loader <nl> state_loader_futures <nl> + state_loader_lwt <nl> xdb ) <nl> ( libraries <nl> hg <nl> new file mode 100644 <nl> index 00000000000 . . 1f6794abf10 <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / stubs / state_loader_lwt . ml <nl> <nl> + ( * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . <nl> + * <nl> + * ) <nl> + <nl> + let load ~ repo : _ ~ saved_state_type : _ = failwith " Not implemented " <nl> mmm a / hphp / hack / src / utils / dune <nl> ppp b / hphp / hack / src / utils / dune <nl> <nl> ( libraries <nl> sys_utils ) ) <nl> <nl> + ( library <nl> + ( name temp_file_lwt ) <nl> + ( wrapped false ) <nl> + ( modules <nl> + tempfile_lwt ) <nl> + ( preprocess ( pps lwt_ppx ppx_deriving . std ppx_deriving . enum ) ) ; See T41851208 <nl> + ( libraries <nl> + lwt_utils <nl> + sys_utils ) ) <nl> + <nl> ( library <nl> ( name mutable_accumulator ) <nl> ( wrapped false ) <nl>
|
Open - source serverless IDE
|
facebook/hhvm
|
09fb55de3f6e91a22cf623e12a0ca73473eef0de
|
2019-08-08T20:27:00Z
|
mmm a / lib / SILGen / Cleanup . h <nl> ppp b / lib / SILGen / Cleanup . h <nl> class LLVM_LIBRARY_VISIBILITY CleanupManager { <nl> friend class CleanupStateRestorationScope ; <nl> friend class SharedBorrowFormalEvaluation ; <nl> friend class FormalEvaluationScope ; <nl> + friend class PostponedCleanup ; <nl> <nl> public : <nl> CleanupManager ( SILGenFunction & SGF ) <nl> mmm a / lib / SILGen / SILGenApply . cpp <nl> ppp b / lib / SILGen / SILGenApply . cpp <nl> RValue SILGenFunction : : emitApply ( ResultPlanPtr & & resultPlan , <nl> ArrayRef < ManagedValue > args , <nl> const CalleeTypeInfo & calleeTypeInfo , <nl> ApplyOptions options , SGFContext evalContext , <nl> - PostponedCleanup & & postponedCleanup ) { <nl> + PostponedCleanup & postponedCleanup ) { <nl> auto substFnType = calleeTypeInfo . substFnType ; <nl> auto substResultType = calleeTypeInfo . substResultType ; <nl> <nl> RValue SILGenFunction : : emitMonomorphicApply ( SILLocation loc , <nl> ArgumentScope argScope ( * this , loc ) ; <nl> PostponedCleanup postpone ( * this ) ; <nl> return emitApply ( std : : move ( resultPlan ) , std : : move ( argScope ) , loc , fn , { } , <nl> - args , calleeTypeInfo , options , evalContext , <nl> - std : : move ( postpone ) ) ; <nl> + args , calleeTypeInfo , options , evalContext , postpone ) ; <nl> } <nl> <nl> / / / Count the number of SILParameterInfos that are needed in order to <nl> CallEmission : : applyNormalCall ( SGFContext C ) { <nl> firstLevelResult . value = SGF . emitApply ( <nl> std : : move ( resultPlan ) , std : : move ( argScope ) , uncurriedLoc . getValue ( ) , mv , <nl> callee . getSubstitutions ( ) , uncurriedArgs , calleeTypeInfo , options , <nl> - uncurriedContext , std : : move ( postpone ) ) ; <nl> + uncurriedContext , postpone ) ; <nl> firstLevelResult . foreignSelf = calleeTypeInfo . foreignSelf ; <nl> return firstLevelResult ; <nl> } <nl> RValue CallEmission : : applyRemainingCallSites ( RValue & & result , <nl> <nl> result = SGF . emitApply ( std : : move ( resultPtr ) , std : : move ( argScope ) , loc , <nl> functionMV , { } , siteArgs , calleeTypeInfo , <nl> - ApplyOptions : : None , context , std : : move ( postpone ) ) ; <nl> + ApplyOptions : : None , context , postpone ) ; <nl> } <nl> <nl> return std : : move ( result ) ; <nl> SILGenFunction : : emitApplyOfLibraryIntrinsic ( SILLocation loc , <nl> PostponedCleanup postpone ( * this ) ; <nl> return emitApply ( std : : move ( resultPlan ) , std : : move ( argScope ) , loc , mv , subs , <nl> finalArgs , calleeTypeInfo , ApplyOptions : : None , ctx , <nl> - std : : move ( postpone ) ) ; <nl> + postpone ) ; <nl> } <nl> <nl> static StringRef <nl> mmm a / lib / SILGen / SILGenBridging . cpp <nl> ppp b / lib / SILGen / SILGenBridging . cpp <nl> emitBridgeObjectiveCToNative ( SILGenFunction & SGF , <nl> ResultPlanBuilder : : computeResultPlan ( SGF , calleeTypeInfo , loc , context ) ; <nl> ArgumentScope argScope ( SGF , loc ) ; <nl> PostponedCleanup postpone ( SGF ) ; <nl> - RValue result = SGF . emitApply ( <nl> - std : : move ( resultPlan ) , std : : move ( argScope ) , loc , <nl> - ManagedValue : : forUnmanaged ( witnessRef ) , subs , <nl> - { objcValue , ManagedValue : : forUnmanaged ( metatypeValue ) } , calleeTypeInfo , <nl> - ApplyOptions : : None , context , std : : move ( postpone ) ) ; <nl> + RValue result = <nl> + SGF . emitApply ( std : : move ( resultPlan ) , std : : move ( argScope ) , loc , <nl> + ManagedValue : : forUnmanaged ( witnessRef ) , subs , <nl> + { objcValue , ManagedValue : : forUnmanaged ( metatypeValue ) } , <nl> + calleeTypeInfo , ApplyOptions : : None , context , postpone ) ; <nl> return std : : move ( result ) . getAsSingleValue ( SGF , loc ) ; <nl> } <nl> <nl> void SILGenFunction : : emitForeignToNativeThunk ( SILDeclRef thunk ) { <nl> ManagedValue resultMV = <nl> emitApply ( std : : move ( resultPlan ) , std : : move ( argScope ) , fd , <nl> ManagedValue : : forUnmanaged ( fn ) , subs , args , calleeTypeInfo , <nl> - ApplyOptions : : None , context , std : : move ( postpone ) ) <nl> + ApplyOptions : : None , context , postpone ) <nl> . getAsSingleValue ( * this , fd ) ; <nl> <nl> if ( indirectResult ) { <nl> mmm a / lib / SILGen / SILGenDecl . cpp <nl> ppp b / lib / SILGen / SILGenDecl . cpp <nl> CleanupHandle SILGenFunction : : enterDestroyCleanup ( SILValue valueOrAddr ) { <nl> } <nl> <nl> PostponedCleanup : : PostponedCleanup ( SILGenFunction & sgf , bool recursive ) <nl> - : SGF ( sgf ) , previouslyActiveCleanup ( sgf . CurrentlyActivePostponedCleanup ) , <nl> + : depth ( sgf . Cleanups . innermostScope ) , SGF ( sgf ) , <nl> + previouslyActiveCleanup ( sgf . CurrentlyActivePostponedCleanup ) , <nl> active ( true ) , applyRecursively ( recursive ) { <nl> SGF . CurrentlyActivePostponedCleanup = this ; <nl> } <nl> + <nl> PostponedCleanup : : PostponedCleanup ( SILGenFunction & sgf ) <nl> - : SGF ( sgf ) , previouslyActiveCleanup ( sgf . CurrentlyActivePostponedCleanup ) , <nl> + : depth ( sgf . Cleanups . innermostScope ) , SGF ( sgf ) , <nl> + previouslyActiveCleanup ( sgf . CurrentlyActivePostponedCleanup ) , <nl> active ( true ) , <nl> applyRecursively ( previouslyActiveCleanup <nl> ? previouslyActiveCleanup - > applyRecursively <nl> PostponedCleanup : : ~ PostponedCleanup ( ) { <nl> } <nl> <nl> void PostponedCleanup : : end ( ) { <nl> + if ( previouslyActiveCleanup & & applyRecursively & & <nl> + previouslyActiveCleanup - > applyRecursively ) { <nl> + previouslyActiveCleanup - > deferredCleanups . append ( deferredCleanups . begin ( ) , <nl> + deferredCleanups . end ( ) ) ; <nl> + } <nl> + <nl> SGF . CurrentlyActivePostponedCleanup = previouslyActiveCleanup ; <nl> active = false ; <nl> } <nl> mmm a / lib / SILGen / SILGenExpr . cpp <nl> ppp b / lib / SILGen / SILGenExpr . cpp <nl> void SILGenFunction : : emitExprInto ( Expr * E , Initialization * I , <nl> / / Handle the special case of copying an lvalue . <nl> if ( auto load = dyn_cast < LoadExpr > ( E ) ) { <nl> FormalEvaluationScope writeback ( * this ) ; <nl> + PostponedCleanup postpone ( * this ) ; <nl> auto lv = emitLValue ( load - > getSubExpr ( ) , AccessKind : : Read ) ; <nl> emitCopyLValueInto ( E , std : : move ( lv ) , I ) ; <nl> return ; <nl> emitRValueForDecl ( SILLocation loc , ConcreteDeclRef declRef , Type ncRefType , <nl> <nl> / / Any writebacks for this access are tightly scoped . <nl> FormalEvaluationScope scope ( * this ) ; <nl> + PostponedCleanup postpone ( * this ) ; <nl> <nl> / / If this is a decl that we have an lvalue for , produce and return it . <nl> ValueDecl * decl = declRef . getDecl ( ) ; <nl> RValue RValueEmitter : : visitStringLiteralExpr ( StringLiteralExpr * E , <nl> RValue RValueEmitter : : visitLoadExpr ( LoadExpr * E , SGFContext C ) { <nl> / / Any writebacks here are tightly scoped . <nl> FormalEvaluationScope writeback ( SGF ) ; <nl> + PostponedCleanup postpone ( SGF ) ; <nl> LValue lv = SGF . emitLValue ( E - > getSubExpr ( ) , AccessKind : : Read ) ; <nl> / / We can ' t load at immediate + 0 from the lvalue without deeper analysis , <nl> / / since the access will be immediately ended and might invalidate the value <nl> RValue RValueEmitter : : visitMemberRefExpr ( MemberRefExpr * E , SGFContext C ) { <nl> <nl> / / Any writebacks for this access are tightly scoped . <nl> FormalEvaluationScope scope ( SGF ) ; <nl> + PostponedCleanup postpone ( SGF ) ; <nl> <nl> LValue lv = SGF . emitLValue ( E , AccessKind : : Read ) ; <nl> / / We can ' t load at + 0 without further analysis , since the formal access into <nl> visitDotSyntaxBaseIgnoredExpr ( DotSyntaxBaseIgnoredExpr * E , SGFContext C ) { <nl> RValue RValueEmitter : : visitSubscriptExpr ( SubscriptExpr * E , SGFContext C ) { <nl> / / Any writebacks for this access are tightly scoped . <nl> FormalEvaluationScope scope ( SGF ) ; <nl> + PostponedCleanup postpone ( SGF ) ; <nl> <nl> LValue lv = SGF . emitLValue ( E , AccessKind : : Read ) ; <nl> / / We can ' t load at + 0 without further analysis , since the formal access into <nl> SILGenFunction : : emitApplyOfDefaultArgGenerator ( SILLocation loc , <nl> ArgumentScope argScope ( * this , loc ) ; <nl> PostponedCleanup postpone ( * this ) ; <nl> return emitApply ( std : : move ( resultPtr ) , std : : move ( argScope ) , loc , fnRef , subs , <nl> - { } , calleeTypeInfo , ApplyOptions : : None , C , <nl> - std : : move ( postpone ) ) ; <nl> + { } , calleeTypeInfo , ApplyOptions : : None , C , postpone ) ; <nl> } <nl> <nl> RValue SILGenFunction : : emitApplyOfStoredPropertyInitializer ( <nl> RValue SILGenFunction : : emitApplyOfStoredPropertyInitializer ( <nl> ArgumentScope argScope ( * this , loc ) ; <nl> PostponedCleanup postpone ( * this ) ; <nl> return emitApply ( std : : move ( resultPlan ) , std : : move ( argScope ) , loc , fnRef , subs , <nl> - { } , calleeTypeInfo , ApplyOptions : : None , C , <nl> - std : : move ( postpone ) ) ; <nl> + { } , calleeTypeInfo , ApplyOptions : : None , C , postpone ) ; <nl> } <nl> <nl> static void emitTupleShuffleExprInto ( RValueEmitter & emitter , <nl> static SILValue emitMetatypeOfDelegatingInitExclusivelyBorrowedSelf ( <nl> <nl> Scope S ( SGF , loc ) ; <nl> Optional < FormalEvaluationScope > FES ; <nl> + <nl> / / If we have not exclusively borrowed self , we need to do so now . <nl> if ( SGF . SelfInitDelegationState = = SILGenFunction : : WillExclusiveBorrowSelf ) { <nl> / / We need to use a full scope here to ensure that any underlying <nl> getOrCreateKeyPathEqualsAndHash ( SILGenFunction & SGF , <nl> loc , ManagedValue : : forUnmanaged ( equalsWitness ) , <nl> equatableSub , { lhsArg , rhsArg , metatyValue } , <nl> equalsInfo , ApplyOptions : : None , SGFContext ( ) , <nl> - std : : move ( postpone ) ) <nl> + postpone ) <nl> . getUnmanagedSingleValue ( subSGF , loc ) ; <nl> } <nl> <nl> getOrCreateKeyPathEqualsAndHash ( SILGenFunction & SGF , <nl> . emitApply ( std : : move ( hashResultPlan ) , std : : move ( argScope ) , loc , <nl> ManagedValue : : forUnmanaged ( hashWitness ) , hashableSub , <nl> { arg } , hashInfo , ApplyOptions : : None , SGFContext ( ) , <nl> - std : : move ( postpone ) ) <nl> + postpone ) <nl> . getUnmanagedSingleValue ( subSGF , loc ) ; <nl> } <nl> } <nl> computeNewSelfForRebindSelfInConstructorExpr ( SILGenFunction & SGF , <nl> / / Get newSelf , forward the cleanup for newSelf and clean everything else <nl> / / up . <nl> FormalEvaluationScope Scope ( SGF ) ; <nl> + PostponedCleanup postpone ( SGF ) ; <nl> ManagedValue newSelfWithCleanup = <nl> SGF . emitRValueAsSingleValue ( E - > getSubExpr ( ) ) ; <nl> <nl> static void emitSimpleAssignment ( SILGenFunction & SGF , SILLocation loc , <nl> if ( dest - > getType ( ) - > isEqual ( srcLoad - > getSubExpr ( ) - > getType ( ) ) ) { <nl> assert ( ! dest - > getType ( ) - > is < TupleType > ( ) ) ; <nl> FormalEvaluationScope writeback ( SGF ) ; <nl> + PostponedCleanup postpone ( SGF ) ; <nl> auto destLV = SGF . emitLValue ( dest , AccessKind : : Write ) ; <nl> auto srcLV = SGF . emitLValue ( srcLoad - > getSubExpr ( ) , AccessKind : : Read ) ; <nl> SGF . emitAssignLValueToLValue ( loc , std : : move ( srcLV ) , std : : move ( destLV ) ) ; <nl> static void emitSimpleAssignment ( SILGenFunction & SGF , SILLocation loc , <nl> } <nl> <nl> FormalEvaluationScope writeback ( SGF ) ; <nl> + PostponedCleanup postpone ( SGF ) ; <nl> LValue destLV = SGF . emitLValue ( dest , AccessKind : : Write ) ; <nl> SGF . emitAssignToLValue ( loc , src , std : : move ( destLV ) ) ; <nl> return ; <nl> } <nl> <nl> FormalEvaluationScope writeback ( SGF ) ; <nl> + PostponedCleanup postpone ( SGF ) ; <nl> <nl> / / Produce a flattened queue of LValues . <nl> SmallVector < Optional < LValue > , 4 > destLVs ; <nl> ManagedValue SILGenFunction : : emitLValueToPointer ( SILLocation loc , LValue & & lv , <nl> RValue RValueEmitter : : visitArrayToPointerExpr ( ArrayToPointerExpr * E , <nl> SGFContext C ) { <nl> FormalEvaluationScope writeback ( SGF ) ; <nl> + PostponedCleanup postpone ( SGF ) ; <nl> <nl> auto subExpr = E - > getSubExpr ( ) ; <nl> auto accessInfo = SGF . getArrayAccessInfo ( E - > getType ( ) , <nl> void SILGenFunction : : emitIgnoredExpr ( Expr * E ) { <nl> if ( E - > getType ( ) - > hasLValueType ( ) ) { <nl> / / Emit the l - value , but don ' t perform an access . <nl> FormalEvaluationScope scope ( * this ) ; <nl> + PostponedCleanup postpone ( * this ) ; <nl> emitLValue ( E , AccessKind : : Read ) ; <nl> return ; <nl> } <nl> void SILGenFunction : : emitIgnoredExpr ( Expr * E ) { <nl> / / ( which could materialize a potentially expensive value with cleanups ) . <nl> if ( auto * LE = dyn_cast < LoadExpr > ( E ) ) { <nl> FormalEvaluationScope scope ( * this ) ; <nl> + PostponedCleanup postpone ( * this ) ; <nl> LValue lv = emitLValue ( LE - > getSubExpr ( ) , AccessKind : : Read ) ; <nl> / / If the lvalue is purely physical , then it won ' t have any side effects , <nl> / / and we don ' t need to drill into it . <nl> mmm a / lib / SILGen / SILGenFunction . h <nl> ppp b / lib / SILGen / SILGenFunction . h <nl> class LLVM_LIBRARY_VISIBILITY SILGenFunction <nl> SILLocation loc , ManagedValue fn , SubstitutionList subs , <nl> ArrayRef < ManagedValue > args , <nl> const CalleeTypeInfo & calleeTypeInfo , ApplyOptions options , <nl> - SGFContext evalContext , PostponedCleanup & & cleanup ) ; <nl> + SGFContext evalContext , PostponedCleanup & cleanup ) ; <nl> <nl> RValue emitApplyOfDefaultArgGenerator ( SILLocation loc , <nl> ConcreteDeclRef defaultArgsOwner , <nl> class PostponedCleanup { <nl> friend Scope ; <nl> <nl> SmallVector < std : : pair < CleanupHandle , SILValue > , 16 > deferredCleanups ; <nl> + CleanupsDepth depth ; <nl> SILGenFunction & SGF ; <nl> PostponedCleanup * previouslyActiveCleanup ; <nl> bool active ; <nl> class PostponedCleanup { <nl> PostponedCleanup ( SILGenFunction & SGF , bool applyRecursively ) ; <nl> ~ PostponedCleanup ( ) ; <nl> <nl> - PostponedCleanup ( PostponedCleanup & & other ) <nl> - : deferredCleanups ( std : : move ( other . deferredCleanups ) ) , SGF ( other . SGF ) , <nl> - previouslyActiveCleanup ( other . previouslyActiveCleanup ) , <nl> - active ( other . active ) , applyRecursively ( other . applyRecursively ) { <nl> - other . active = false ; <nl> - } <nl> - <nl> void end ( ) ; <nl> <nl> PostponedCleanup ( ) = delete ; <nl> PostponedCleanup ( const PostponedCleanup & ) = delete ; <nl> PostponedCleanup & operator = ( const PostponedCleanup & ) = delete ; <nl> PostponedCleanup & operator = ( PostponedCleanup & & other ) = delete ; <nl> + PostponedCleanup ( PostponedCleanup & & ) = delete ; <nl> } ; <nl> <nl> } / / end namespace Lowering <nl> mmm a / lib / SILGen / Scope . cpp <nl> ppp b / lib / SILGen / Scope . cpp <nl> void Scope : : popImpl ( ) { <nl> / / Propagate the cleanup to the current parent scope . <nl> for ( auto valueToCleanup : cleanupsToPropagateToOuterScope ) { <nl> auto handle = cleanups . SGF . enterDestroyCleanup ( valueToCleanup ) ; <nl> - / / Reapply postponement in parent scope . <nl> - if ( currentlyActivePostponedCleanup - > applyRecursively & & <nl> - currentlyActivePostponedCleanup - > previouslyActiveCleanup ) { <nl> - currentlyActivePostponedCleanup - > previouslyActiveCleanup - > deferredCleanups <nl> - . push_back ( std : : make_pair ( handle , valueToCleanup ) ) ; <nl> + / / Propagate cleanup to parent Scope . <nl> + if ( currentlyActivePostponedCleanup - > depth . getDepth ( ) < depth . getDepth ( ) ) { <nl> + currentlyActivePostponedCleanup - > deferredCleanups . push_back ( <nl> + std : : make_pair ( handle , valueToCleanup ) ) ; <nl> } <nl> } <nl> } <nl>
|
Fix PostponedCleanup and use it in more places .
|
apple/swift
|
025a8b909a47033b5a60d53c3edf8b32b140ce77
|
2018-02-13T12:19:59Z
|
mmm a / tests / c_api_test / test . py <nl> ppp b / tests / c_api_test / test . py <nl> def c_str ( string ) : <nl> def test_load_from_file ( filename , reference ) : <nl> ref = None <nl> if reference ! = None : <nl> - ref = ctypes . byref ( reference ) <nl> + ref = reference <nl> handle = ctypes . c_void_p ( ) <nl> LIB . LGBM_DatasetCreateFromFile ( c_str ( filename ) , <nl> c_str ( ' max_bin = 15 ' ) , <nl> def test_load_from_csr ( filename , reference ) : <nl> handle = ctypes . c_void_p ( ) <nl> ref = None <nl> if reference ! = None : <nl> - ref = ctypes . byref ( reference ) <nl> + ref = reference <nl> <nl> LIB . LGBM_DatasetCreateFromCSR ( c_array ( ctypes . c_int , csr . indptr ) , <nl> dtype_int32 , <nl> def test_load_from_csc ( filename , reference ) : <nl> handle = ctypes . c_void_p ( ) <nl> ref = None <nl> if reference ! = None : <nl> - ref = ctypes . byref ( reference ) <nl> + ref = reference <nl> <nl> LIB . LGBM_DatasetCreateFromCSC ( c_array ( ctypes . c_int , csr . indptr ) , <nl> dtype_int32 , <nl> def test_load_from_mat ( filename , reference ) : <nl> handle = ctypes . c_void_p ( ) <nl> ref = None <nl> if reference ! = None : <nl> - ref = ctypes . byref ( reference ) <nl> + ref = reference <nl> <nl> LIB . LGBM_DatasetCreateFromMat ( data . ctypes . data_as ( ctypes . POINTER ( ctypes . c_void_p ) ) , <nl> dtype_float64 , <nl>
|
fix test c_api
|
microsoft/LightGBM
|
1cb3aa4ed369ad34ca05e8f0bc38d27b81007880
|
2016-12-23T06:35:05Z
|
mmm a / templates / lua - template - runtime / frameworks / runtime - src / Classes / runtime / Runtime . cpp <nl> ppp b / templates / lua - template - runtime / frameworks / runtime - src / Classes / runtime / Runtime . cpp <nl> void startScript ( string strDebugArg ) <nl> engine - > executeScriptFile ( ConfigParser : : getInstance ( ) - > getEntryFile ( ) . c_str ( ) ) ; <nl> } <nl> <nl> - static bool resetLuaModule ( string fileName ) <nl> + static void resetLuaModule ( const string & fileName ) <nl> { <nl> if ( fileName . empty ( ) ) <nl> { <nl> - CCLOG ( " fileName is null " ) ; <nl> - return false ; <nl> + return ; <nl> } <nl> auto engine = LuaEngine : : getInstance ( ) ; <nl> LuaStack * luaStack = engine - > getLuaStack ( ) ; <nl> static bool resetLuaModule ( string fileName ) <nl> tableKey = replaceAll ( tableKey , " \ \ " , " / " ) ; <nl> tableKey . append ( " . lua " ) ; <nl> found = fileName . rfind ( tableKey ) ; <nl> - if ( 0 = = found | | ( found ! = std : : string : : npos & & fileName . at ( found - 1 ) = = ' / ' ) ) <nl> + if ( 0 = = found | | ( found ! = std : : string : : npos & & fileName . at ( found - 1 ) = = ' / ' ) ) <nl> { <nl> lua_pushstring ( stack , key . c_str ( ) ) ; <nl> lua_pushnil ( stack ) ; <nl> static bool resetLuaModule ( string fileName ) <nl> lua_pop ( stack , 1 ) ; <nl> } <nl> lua_pop ( stack , 2 ) ; <nl> - return true ; <nl> } <nl> + <nl> bool reloadScript ( const string & file ) <nl> { <nl> auto director = Director : : getInstance ( ) ; <nl> bool reloadScript ( const string & file ) <nl> } <nl> FileUtils : : getInstance ( ) - > purgeCachedEntries ( ) ; <nl> string modulefile = file ; <nl> - if ( ! resetLuaModule ( modulefile ) ) <nl> + <nl> + if ( ! modulefile . empty ( ) ) <nl> + { <nl> + resetLuaModule ( modulefile ) ; <nl> + } <nl> + else <nl> { <nl> modulefile = ConfigParser : : getInstance ( ) - > getEntryFile ( ) . c_str ( ) ; <nl> } <nl> + <nl> auto engine = LuaEngine : : getInstance ( ) ; <nl> LuaStack * luaStack = engine - > getLuaStack ( ) ; <nl> std : : string require = " require \ ' " + modulefile + " \ ' " ; <nl>
|
Merge pull request from cocoscodeide / v3
|
cocos2d/cocos2d-x
|
33fa2fab8434731c3925d7124848c4108eaa834b
|
2014-10-08T09:46:56Z
|
mmm a / tensorflow / cc / ops / cc_op_gen . cc <nl> ppp b / tensorflow / cc / ops / cc_op_gen . cc <nl> string ToGuard ( const std : : string & path ) { <nl> void WriteCCOps ( const OpList & ops , const std : : string & dot_h_fname , <nl> const std : : string & dot_cc_fname ) { <nl> Env * env = Env : : Default ( ) ; <nl> - WritableFile * h = nullptr ; <nl> - WritableFile * cc = nullptr ; <nl> + std : : unique_ptr < WritableFile > h ; <nl> + std : : unique_ptr < WritableFile > cc ; <nl> TF_CHECK_OK ( env - > NewWritableFile ( dot_h_fname , & h ) ) ; <nl> TF_CHECK_OK ( env - > NewWritableFile ( dot_cc_fname , & cc ) ) ; <nl> <nl> namespace ops { <nl> TF_CHECK_OK ( s ) ; <nl> <nl> for ( const auto & op_def : ops . op ( ) ) { <nl> - WriteCCOp ( op_def , h , cc ) ; <nl> + WriteCCOp ( op_def , h . get ( ) , cc . get ( ) ) ; <nl> } <nl> <nl> / / . h Footer <nl> mmm a / tensorflow / contrib / ffmpeg / decode_audio_op . cc <nl> ppp b / tensorflow / contrib / ffmpeg / decode_audio_op . cc <nl> const char * kValidFileFormats [ ] = { " mp3 " , " ogg " , " wav " } ; <nl> / / Writes binary data to a file . <nl> Status WriteFile ( const string & filename , tensorflow : : StringPiece contents ) { <nl> Env & env = * Env : : Default ( ) ; <nl> - WritableFile * file = nullptr ; <nl> + std : : unique_ptr < WritableFile > file ; <nl> TF_RETURN_IF_ERROR ( env . NewWritableFile ( filename , & file ) ) ; <nl> - std : : unique_ptr < WritableFile > file_deleter ( file ) ; <nl> TF_RETURN_IF_ERROR ( file - > Append ( contents ) ) ; <nl> TF_RETURN_IF_ERROR ( file - > Close ( ) ) ; <nl> return Status : : OK ( ) ; <nl> mmm a / tensorflow / core / kernels / immutable_constant_op . cc <nl> ppp b / tensorflow / core / kernels / immutable_constant_op . cc <nl> ImmutableConstantOp : : ReadOnlyMemoryRegionAllocator : : <nl> <nl> Status ImmutableConstantOp : : ReadOnlyMemoryRegionAllocator : : InitWithMemoryRegion ( <nl> const string & name , Env * env ) { <nl> - ReadOnlyMemoryRegion * region_ptr ; <nl> - const auto status = env - > NewReadOnlyMemoryRegionFromFile ( name , & region_ptr ) ; <nl> + const auto status = <nl> + env - > NewReadOnlyMemoryRegionFromFile ( name , & memory_region_ ) ; <nl> if ( ! status . ok ( ) ) { <nl> return status ; <nl> } <nl> - memory_region_ . reset ( region_ptr ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / kernels / immutable_constant_op_test . cc <nl> ppp b / tensorflow / core / kernels / immutable_constant_op_test . cc <nl> class TestFileSystem : public NullFileSystem { <nl> public : <nl> ~ TestFileSystem ( ) override = default ; <nl> Status NewReadOnlyMemoryRegionFromFile ( <nl> - const string & fname , ReadOnlyMemoryRegion * * result ) override { <nl> + const string & fname , <nl> + std : : unique_ptr < ReadOnlyMemoryRegion > * result ) override { <nl> float val = 0 ; <nl> / / For the tests create in - memory regions with float values equal to the <nl> / / first letter of the region name . <nl> class TestFileSystem : public NullFileSystem { <nl> <nl> auto region = new TestReadOnlyMemoryRegion ( kTestTensorSizeBytes ) ; <nl> std : : fill_n ( region - > GetWritableDataStart ( ) , kTestTensorSize , val ) ; <nl> - * result = region ; <nl> + result - > reset ( region ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> } ; <nl> TEST ( ImmutableConstantOpTest , ExecutionError ) { <nl> Status CreateTempFile ( Env * env , float value , uint64 size , string * filename ) { <nl> const string dir = testing : : TmpDir ( ) ; <nl> * filename = io : : JoinPath ( dir , strings : : StrCat ( " file_ " , value ) ) ; <nl> - WritableFile * file ; <nl> + std : : unique_ptr < WritableFile > file ; <nl> TF_RETURN_IF_ERROR ( env - > NewWritableFile ( * filename , & file ) ) ; <nl> - std : : unique_ptr < WritableFile > file_unique_ptr ( file ) ; <nl> for ( uint64 i = 0 ; i < size ; + + i ) { <nl> StringPiece sp ; <nl> sp . set ( & value , sizeof ( value ) ) ; <nl> mmm a / tensorflow / core / platform / cloud / gcs_file_system . cc <nl> ppp b / tensorflow / core / platform / cloud / gcs_file_system . cc <nl> GcsFileSystem : : GcsFileSystem ( <nl> : auth_provider_ ( std : : move ( auth_provider ) ) , <nl> http_request_factory_ ( std : : move ( http_request_factory ) ) { } <nl> <nl> - Status GcsFileSystem : : NewRandomAccessFile ( const string & fname , <nl> - RandomAccessFile * * result ) { <nl> + Status GcsFileSystem : : NewRandomAccessFile ( <nl> + const string & fname , std : : unique_ptr < RandomAccessFile > * result ) { <nl> string bucket , object ; <nl> TF_RETURN_IF_ERROR ( ParseGcsPath ( fname , & bucket , & object ) ) ; <nl> - * result = new GcsRandomAccessFile ( bucket , object , auth_provider_ . get ( ) , <nl> - http_request_factory_ . get ( ) ) ; <nl> + result - > reset ( new GcsRandomAccessFile ( bucket , object , auth_provider_ . get ( ) , <nl> + http_request_factory_ . get ( ) ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status GcsFileSystem : : NewWritableFile ( const string & fname , <nl> - WritableFile * * result ) { <nl> + std : : unique_ptr < WritableFile > * result ) { <nl> string bucket , object ; <nl> TF_RETURN_IF_ERROR ( ParseGcsPath ( fname , & bucket , & object ) ) ; <nl> - * result = new GcsWritableFile ( bucket , object , auth_provider_ . get ( ) , <nl> - http_request_factory_ . get ( ) ) ; <nl> + result - > reset ( new GcsWritableFile ( bucket , object , auth_provider_ . get ( ) , <nl> + http_request_factory_ . get ( ) ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> / / Reads the file from GCS in chunks and stores it in a tmp file , <nl> / / which is then passed to GcsWritableFile . <nl> Status GcsFileSystem : : NewAppendableFile ( const string & fname , <nl> - WritableFile * * result ) { <nl> - RandomAccessFile * reader_ptr ; <nl> - TF_RETURN_IF_ERROR ( NewRandomAccessFile ( fname , & reader_ptr ) ) ; <nl> - std : : unique_ptr < RandomAccessFile > reader ( reader_ptr ) ; <nl> + std : : unique_ptr < WritableFile > * result ) { <nl> + std : : unique_ptr < RandomAccessFile > reader ; <nl> + TF_RETURN_IF_ERROR ( NewRandomAccessFile ( fname , & reader ) ) ; <nl> std : : unique_ptr < char [ ] > buffer ( new char [ kBufferSize ] ) ; <nl> Status status ; <nl> uint64 offset = 0 ; <nl> Status GcsFileSystem : : NewAppendableFile ( const string & fname , <nl> / / Create a writable file and pass the old content to it . <nl> string bucket , object ; <nl> TF_RETURN_IF_ERROR ( ParseGcsPath ( fname , & bucket , & object ) ) ; <nl> - * result = <nl> - new GcsWritableFile ( bucket , object , auth_provider_ . get ( ) , <nl> - old_content_filename , http_request_factory_ . get ( ) ) ; <nl> + result - > reset ( new GcsWritableFile ( bucket , object , auth_provider_ . get ( ) , <nl> + old_content_filename , <nl> + http_request_factory_ . get ( ) ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status GcsFileSystem : : NewReadOnlyMemoryRegionFromFile ( <nl> - const string & fname , ReadOnlyMemoryRegion * * result ) { <nl> + const string & fname , std : : unique_ptr < ReadOnlyMemoryRegion > * result ) { <nl> uint64 size ; <nl> TF_RETURN_IF_ERROR ( GetFileSize ( fname , & size ) ) ; <nl> std : : unique_ptr < char [ ] > data ( new char [ size ] ) ; <nl> <nl> - RandomAccessFile * file ; <nl> + std : : unique_ptr < RandomAccessFile > file ; <nl> TF_RETURN_IF_ERROR ( NewRandomAccessFile ( fname , & file ) ) ; <nl> - std : : unique_ptr < RandomAccessFile > file_ptr ( file ) ; <nl> <nl> StringPiece piece ; <nl> TF_RETURN_IF_ERROR ( file - > Read ( 0 , size , & piece , data . get ( ) ) ) ; <nl> <nl> - * result = new GcsReadOnlyMemoryRegion ( std : : move ( data ) , size ) ; <nl> + result - > reset ( new GcsReadOnlyMemoryRegion ( std : : move ( data ) , size ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / platform / cloud / gcs_file_system . h <nl> ppp b / tensorflow / core / platform / cloud / gcs_file_system . h <nl> class GcsFileSystem : public FileSystem { <nl> GcsFileSystem ( std : : unique_ptr < AuthProvider > auth_provider , <nl> std : : unique_ptr < HttpRequest : : Factory > http_request_factory ) ; <nl> <nl> - Status NewRandomAccessFile ( const string & fname , <nl> - RandomAccessFile * * result ) override ; <nl> + Status NewRandomAccessFile ( <nl> + const string & filename , <nl> + std : : unique_ptr < RandomAccessFile > * result ) override ; <nl> <nl> - Status NewWritableFile ( const string & fname , WritableFile * * result ) override ; <nl> + Status NewWritableFile ( const string & fname , <nl> + std : : unique_ptr < WritableFile > * result ) override ; <nl> <nl> - Status NewAppendableFile ( const string & fname , WritableFile * * result ) override ; <nl> + Status NewAppendableFile ( const string & fname , <nl> + std : : unique_ptr < WritableFile > * result ) override ; <nl> <nl> Status NewReadOnlyMemoryRegionFromFile ( <nl> - const string & fname , ReadOnlyMemoryRegion * * result ) override ; <nl> + const string & filename , <nl> + std : : unique_ptr < ReadOnlyMemoryRegion > * result ) override ; <nl> <nl> bool FileExists ( const string & fname ) override ; <nl> <nl> mmm a / tensorflow / core / platform / cloud / gcs_file_system_test . cc <nl> ppp b / tensorflow / core / platform / cloud / gcs_file_system_test . cc <nl> TEST ( GcsFileSystemTest , NewRandomAccessFile ) { <nl> std : : unique_ptr < HttpRequest : : Factory > ( <nl> new FakeHttpRequestFactory ( & requests ) ) ) ; <nl> <nl> - RandomAccessFile * file_ptr ; <nl> - TF_EXPECT_OK ( <nl> - fs . NewRandomAccessFile ( " gs : / / bucket / random_access . txt " , & file_ptr ) ) ; <nl> - std : : unique_ptr < RandomAccessFile > file ( file_ptr ) ; <nl> + std : : unique_ptr < RandomAccessFile > file ; <nl> + TF_EXPECT_OK ( fs . NewRandomAccessFile ( " gs : / / bucket / random_access . txt " , & file ) ) ; <nl> <nl> char scratch [ 6 ] ; <nl> StringPiece result ; <nl> TEST ( GcsFileSystemTest , NewWritableFile ) { <nl> std : : unique_ptr < HttpRequest : : Factory > ( <nl> new FakeHttpRequestFactory ( & requests ) ) ) ; <nl> <nl> - WritableFile * file_ptr ; <nl> - TF_EXPECT_OK ( fs . NewWritableFile ( " gs : / / bucket / path / writeable . txt " , & file_ptr ) ) ; <nl> - std : : unique_ptr < WritableFile > file ( file_ptr ) ; <nl> + std : : unique_ptr < WritableFile > file ; <nl> + TF_EXPECT_OK ( fs . NewWritableFile ( " gs : / / bucket / path / writeable . txt " , & file ) ) ; <nl> <nl> TF_EXPECT_OK ( file - > Append ( " content1 , " ) ) ; <nl> TF_EXPECT_OK ( file - > Append ( " content2 " ) ) ; <nl> TEST ( GcsFileSystemTest , NewAppendableFile ) { <nl> std : : unique_ptr < HttpRequest : : Factory > ( <nl> new FakeHttpRequestFactory ( & requests ) ) ) ; <nl> <nl> - WritableFile * file_ptr ; <nl> - TF_EXPECT_OK ( <nl> - fs . NewAppendableFile ( " gs : / / bucket / path / appendable . txt " , & file_ptr ) ) ; <nl> - std : : unique_ptr < WritableFile > file ( file_ptr ) ; <nl> + std : : unique_ptr < WritableFile > file ; <nl> + TF_EXPECT_OK ( fs . NewAppendableFile ( " gs : / / bucket / path / appendable . txt " , & file ) ) ; <nl> <nl> TF_EXPECT_OK ( file - > Append ( " content2 " ) ) ; <nl> TF_EXPECT_OK ( file - > Close ( ) ) ; <nl> TEST ( GcsFileSystemTest , NewReadOnlyMemoryRegionFromFile ) { <nl> std : : unique_ptr < HttpRequest : : Factory > ( <nl> new FakeHttpRequestFactory ( & requests ) ) ) ; <nl> <nl> - ReadOnlyMemoryRegion * region_ptr ; <nl> + std : : unique_ptr < ReadOnlyMemoryRegion > region ; <nl> TF_EXPECT_OK ( fs . NewReadOnlyMemoryRegionFromFile ( <nl> - " gs : / / bucket / path / random_access . txt " , & region_ptr ) ) ; <nl> - std : : unique_ptr < ReadOnlyMemoryRegion > region ( region_ptr ) ; <nl> + " gs : / / bucket / path / random_access . txt " , & region ) ) ; <nl> <nl> EXPECT_EQ ( content , StringPiece ( reinterpret_cast < const char * > ( region - > data ( ) ) , <nl> region - > length ( ) ) ) ; <nl> mmm a / tensorflow / core / platform / env . cc <nl> ppp b / tensorflow / core / platform / env . cc <nl> Status Env : : RegisterFileSystem ( const string & scheme , <nl> } <nl> <nl> Status Env : : NewRandomAccessFile ( const string & fname , <nl> - RandomAccessFile * * result ) { <nl> + std : : unique_ptr < RandomAccessFile > * result ) { <nl> FileSystem * fs ; <nl> TF_RETURN_IF_ERROR ( GetFileSystemForFile ( fname , & fs ) ) ; <nl> return fs - > NewRandomAccessFile ( fname , result ) ; <nl> } <nl> <nl> - Status Env : : NewWritableFile ( const string & fname , WritableFile * * result ) { <nl> + Status Env : : NewReadOnlyMemoryRegionFromFile ( <nl> + const string & fname , std : : unique_ptr < ReadOnlyMemoryRegion > * result ) { <nl> + FileSystem * fs ; <nl> + TF_RETURN_IF_ERROR ( GetFileSystemForFile ( fname , & fs ) ) ; <nl> + return fs - > NewReadOnlyMemoryRegionFromFile ( fname , result ) ; <nl> + } <nl> + <nl> + Status Env : : NewWritableFile ( const string & fname , <nl> + std : : unique_ptr < WritableFile > * result ) { <nl> FileSystem * fs ; <nl> TF_RETURN_IF_ERROR ( GetFileSystemForFile ( fname , & fs ) ) ; <nl> return fs - > NewWritableFile ( fname , result ) ; <nl> } <nl> <nl> - Status Env : : NewAppendableFile ( const string & fname , WritableFile * * result ) { <nl> + Status Env : : NewAppendableFile ( const string & fname , <nl> + std : : unique_ptr < WritableFile > * result ) { <nl> FileSystem * fs ; <nl> TF_RETURN_IF_ERROR ( GetFileSystemForFile ( fname , & fs ) ) ; <nl> return fs - > NewAppendableFile ( fname , result ) ; <nl> } <nl> <nl> + / / / Deprecated versions of factories . <nl> + <nl> + Status Env : : NewRandomAccessFile ( const string & fname , <nl> + RandomAccessFile * * result ) { <nl> + FileSystem * fs ; <nl> + TF_RETURN_IF_ERROR ( GetFileSystemForFile ( fname , & fs ) ) ; <nl> + std : : unique_ptr < RandomAccessFile > r ; <nl> + TF_RETURN_IF_ERROR ( fs - > NewRandomAccessFile ( fname , & r ) ) ; <nl> + * result = r . release ( ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status Env : : NewAppendableFile ( const string & fname , WritableFile * * result ) { <nl> + FileSystem * fs ; <nl> + TF_RETURN_IF_ERROR ( GetFileSystemForFile ( fname , & fs ) ) ; <nl> + <nl> + std : : unique_ptr < WritableFile > w ; <nl> + TF_RETURN_IF_ERROR ( fs - > NewAppendableFile ( fname , & w ) ) ; <nl> + * result = w . release ( ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status Env : : NewWritableFile ( const string & fname , WritableFile * * result ) { <nl> + FileSystem * fs ; <nl> + TF_RETURN_IF_ERROR ( GetFileSystemForFile ( fname , & fs ) ) ; <nl> + std : : unique_ptr < WritableFile > w ; <nl> + TF_RETURN_IF_ERROR ( fs - > NewWritableFile ( fname , & w ) ) ; <nl> + * result = w . release ( ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> Status Env : : NewReadOnlyMemoryRegionFromFile ( const string & fname , <nl> ReadOnlyMemoryRegion * * result ) { <nl> FileSystem * fs ; <nl> TF_RETURN_IF_ERROR ( GetFileSystemForFile ( fname , & fs ) ) ; <nl> - return fs - > NewReadOnlyMemoryRegionFromFile ( fname , result ) ; <nl> + std : : unique_ptr < ReadOnlyMemoryRegion > r ; <nl> + TF_RETURN_IF_ERROR ( fs - > NewReadOnlyMemoryRegionFromFile ( fname , & r ) ) ; <nl> + * result = r . release ( ) ; <nl> + return Status : : OK ( ) ; <nl> } <nl> <nl> bool Env : : FileExists ( const string & fname ) { <nl> mmm a / tensorflow / core / platform / env . h <nl> ppp b / tensorflow / core / platform / env . h <nl> class Env { <nl> / / / The ownership of the returned RandomAccessFile is passed to the caller <nl> / / / and the object should be deleted when is not used . The file object <nl> / / / shouldn ' t live longer than the Env object . <nl> + Status NewRandomAccessFile ( const string & fname , <nl> + std : : unique_ptr < RandomAccessFile > * result ) ; <nl> + / / NOTE : To be removed , replace with unique_ptr interface above . <nl> Status NewRandomAccessFile ( const string & fname , RandomAccessFile * * result ) ; <nl> <nl> / / / \ brief Creates an object that writes to a new file with the specified <nl> class Env { <nl> / / / The ownership of the returned WritableFile is passed to the caller <nl> / / / and the object should be deleted when is not used . The file object <nl> / / / shouldn ' t live longer than the Env object . <nl> + Status NewWritableFile ( const string & fname , <nl> + std : : unique_ptr < WritableFile > * result ) ; <nl> + / / NOTE : To be removed , replace with unique_ptr interface above . <nl> Status NewWritableFile ( const string & fname , WritableFile * * result ) ; <nl> <nl> / / / \ brief Creates an object that either appends to an existing file , or <nl> class Env { <nl> / / / The ownership of the returned WritableFile is passed to the caller <nl> / / / and the object should be deleted when is not used . The file object <nl> / / / shouldn ' t live longer than the Env object . <nl> + Status NewAppendableFile ( const string & fname , <nl> + std : : unique_ptr < WritableFile > * result ) ; <nl> + / / NOTE : To be removed , replace with unique_ptr interface above . <nl> Status NewAppendableFile ( const string & fname , WritableFile * * result ) ; <nl> <nl> / / / \ brief Creates a readonly region of memory with the file context . <nl> class Env { <nl> / / / The ownership of the returned ReadOnlyMemoryRegion is passed to the caller <nl> / / / and the object should be deleted when is not used . The memory region <nl> / / / object shouldn ' t live longer than the Env object . <nl> + Status NewReadOnlyMemoryRegionFromFile ( <nl> + const string & fname , std : : unique_ptr < ReadOnlyMemoryRegion > * result ) ; <nl> + / / NOTE : To be removed , replace with unique_ptr interface above . <nl> Status NewReadOnlyMemoryRegionFromFile ( const string & fname , <nl> ReadOnlyMemoryRegion * * result ) ; <nl> <nl> mmm a / tensorflow / core / platform / env_test . cc <nl> ppp b / tensorflow / core / platform / env_test . cc <nl> TEST ( EnvTest , FileToReadonlyMemoryRegion ) { <nl> const string input = CreateTestFile ( env , filename , length ) ; <nl> <nl> / / Create the region . <nl> - ReadOnlyMemoryRegion * region ; <nl> + std : : unique_ptr < ReadOnlyMemoryRegion > region ; <nl> TF_CHECK_OK ( env - > NewReadOnlyMemoryRegionFromFile ( filename , & region ) ) ; <nl> - std : : unique_ptr < ReadOnlyMemoryRegion > region_uptr ( region ) ; <nl> ASSERT_NE ( region , nullptr ) ; <nl> EXPECT_EQ ( length , region - > length ( ) ) ; <nl> EXPECT_EQ ( input , string ( reinterpret_cast < const char * > ( region - > data ( ) ) , <nl> mmm a / tensorflow / core / platform / file_system . h <nl> ppp b / tensorflow / core / platform / file_system . h <nl> class FileSystem { <nl> <nl> / / / The following functions are the implementations used by the corresponding <nl> / / / functions in the Env class . <nl> - virtual Status NewRandomAccessFile ( const string & fname , <nl> - RandomAccessFile * * result ) = 0 ; <nl> + virtual Status NewRandomAccessFile ( <nl> + const string & fname , std : : unique_ptr < RandomAccessFile > * result ) = 0 ; <nl> <nl> virtual Status NewWritableFile ( const string & fname , <nl> - WritableFile * * result ) = 0 ; <nl> + std : : unique_ptr < WritableFile > * result ) = 0 ; <nl> <nl> virtual Status NewAppendableFile ( const string & fname , <nl> - WritableFile * * result ) = 0 ; <nl> + std : : unique_ptr < WritableFile > * result ) = 0 ; <nl> <nl> virtual Status NewReadOnlyMemoryRegionFromFile ( <nl> - const string & fname , ReadOnlyMemoryRegion * * result ) = 0 ; <nl> + const string & fname , std : : unique_ptr < ReadOnlyMemoryRegion > * result ) = 0 ; <nl> <nl> virtual bool FileExists ( const string & fname ) = 0 ; <nl> <nl> class NullFileSystem : public FileSystem { <nl> <nl> ~ NullFileSystem ( ) override = default ; <nl> <nl> - Status NewRandomAccessFile ( const string & fname , <nl> - RandomAccessFile * * result ) override { <nl> + Status NewRandomAccessFile ( <nl> + const string & fname , std : : unique_ptr < RandomAccessFile > * result ) override { <nl> return errors : : Unimplemented ( " NewRandomAccessFile unimplemented " ) ; <nl> } <nl> <nl> - Status NewWritableFile ( const string & fname , WritableFile * * result ) override { <nl> + Status NewWritableFile ( const string & fname , <nl> + std : : unique_ptr < WritableFile > * result ) override { <nl> return errors : : Unimplemented ( " NewWritableFile unimplemented " ) ; <nl> } <nl> <nl> Status NewAppendableFile ( const string & fname , <nl> - WritableFile * * result ) override { <nl> + std : : unique_ptr < WritableFile > * result ) override { <nl> return errors : : Unimplemented ( " NewAppendableFile unimplemented " ) ; <nl> } <nl> <nl> Status NewReadOnlyMemoryRegionFromFile ( <nl> - const string & fname , ReadOnlyMemoryRegion * * result ) override { <nl> + const string & fname , <nl> + std : : unique_ptr < ReadOnlyMemoryRegion > * result ) override { <nl> return errors : : Unimplemented ( <nl> " NewReadOnlyMemoryRegionFromFile unimplemented " ) ; <nl> } <nl> mmm a / tensorflow / core / platform / posix / posix_file_system . cc <nl> ppp b / tensorflow / core / platform / posix / posix_file_system . cc <nl> class PosixReadOnlyMemoryRegion : public ReadOnlyMemoryRegion { <nl> <nl> } / / namespace <nl> <nl> - Status PosixFileSystem : : NewRandomAccessFile ( const string & fname , <nl> - RandomAccessFile * * result ) { <nl> + Status PosixFileSystem : : NewRandomAccessFile ( <nl> + const string & fname , std : : unique_ptr < RandomAccessFile > * result ) { <nl> string translated_fname = TranslateName ( fname ) ; <nl> - * result = NULL ; <nl> Status s ; <nl> int fd = open ( translated_fname . c_str ( ) , O_RDONLY ) ; <nl> if ( fd < 0 ) { <nl> s = IOError ( fname , errno ) ; <nl> } else { <nl> - * result = new PosixRandomAccessFile ( translated_fname , fd ) ; <nl> + result - > reset ( new PosixRandomAccessFile ( translated_fname , fd ) ) ; <nl> } <nl> return s ; <nl> } <nl> <nl> Status PosixFileSystem : : NewWritableFile ( const string & fname , <nl> - WritableFile * * result ) { <nl> + std : : unique_ptr < WritableFile > * result ) { <nl> string translated_fname = TranslateName ( fname ) ; <nl> Status s ; <nl> FILE * f = fopen ( translated_fname . c_str ( ) , " w " ) ; <nl> if ( f = = NULL ) { <nl> - * result = NULL ; <nl> s = IOError ( fname , errno ) ; <nl> } else { <nl> - * result = new PosixWritableFile ( translated_fname , f ) ; <nl> + result - > reset ( new PosixWritableFile ( translated_fname , f ) ) ; <nl> } <nl> return s ; <nl> } <nl> <nl> - Status PosixFileSystem : : NewAppendableFile ( const string & fname , <nl> - WritableFile * * result ) { <nl> + Status PosixFileSystem : : NewAppendableFile ( <nl> + const string & fname , std : : unique_ptr < WritableFile > * result ) { <nl> string translated_fname = TranslateName ( fname ) ; <nl> Status s ; <nl> FILE * f = fopen ( translated_fname . c_str ( ) , " a " ) ; <nl> if ( f = = NULL ) { <nl> - * result = NULL ; <nl> s = IOError ( fname , errno ) ; <nl> } else { <nl> - * result = new PosixWritableFile ( translated_fname , f ) ; <nl> + result - > reset ( new PosixWritableFile ( translated_fname , f ) ) ; <nl> } <nl> return s ; <nl> } <nl> <nl> Status PosixFileSystem : : NewReadOnlyMemoryRegionFromFile ( <nl> - const string & fname , ReadOnlyMemoryRegion * * result ) { <nl> + const string & fname , std : : unique_ptr < ReadOnlyMemoryRegion > * result ) { <nl> string translated_fname = TranslateName ( fname ) ; <nl> - * result = nullptr ; <nl> Status s = Status : : OK ( ) ; <nl> int fd = open ( translated_fname . c_str ( ) , O_RDONLY ) ; <nl> if ( fd < 0 ) { <nl> Status PosixFileSystem : : NewReadOnlyMemoryRegionFromFile ( <nl> if ( address = = MAP_FAILED ) { <nl> s = IOError ( fname , errno ) ; <nl> } else { <nl> - * result = new PosixReadOnlyMemoryRegion ( address , st . st_size ) ; <nl> + result - > reset ( new PosixReadOnlyMemoryRegion ( address , st . st_size ) ) ; <nl> } <nl> close ( fd ) ; <nl> } <nl> mmm a / tensorflow / core / platform / posix / posix_file_system . h <nl> ppp b / tensorflow / core / platform / posix / posix_file_system . h <nl> class PosixFileSystem : public FileSystem { <nl> <nl> ~ PosixFileSystem ( ) { } <nl> <nl> - Status NewRandomAccessFile ( const string & fname , <nl> - RandomAccessFile * * result ) override ; <nl> + Status NewRandomAccessFile ( <nl> + const string & filename , <nl> + std : : unique_ptr < RandomAccessFile > * result ) override ; <nl> <nl> - Status NewWritableFile ( const string & fname , WritableFile * * result ) override ; <nl> + Status NewWritableFile ( const string & fname , <nl> + std : : unique_ptr < WritableFile > * result ) override ; <nl> <nl> - Status NewAppendableFile ( const string & fname , WritableFile * * result ) override ; <nl> + Status NewAppendableFile ( const string & fname , <nl> + std : : unique_ptr < WritableFile > * result ) override ; <nl> <nl> Status NewReadOnlyMemoryRegionFromFile ( <nl> - const string & fname , ReadOnlyMemoryRegion * * result ) override ; <nl> + const string & filename , <nl> + std : : unique_ptr < ReadOnlyMemoryRegion > * result ) override ; <nl> <nl> bool FileExists ( const string & fname ) override ; <nl> <nl> mmm a / tensorflow / core / util / events_writer . cc <nl> ppp b / tensorflow / core / util / events_writer . cc <nl> bool EventsWriter : : Init ( ) { <nl> static_cast < long long > ( time_in_seconds ) , port : : Hostname ( ) . c_str ( ) ) ; <nl> port : : AdjustFilenameForLogging ( & filename_ ) ; <nl> <nl> - WritableFile * file ; <nl> - Status s = env_ - > NewWritableFile ( filename_ , & file ) ; <nl> + Status s = env_ - > NewWritableFile ( filename_ , & recordio_file_ ) ; <nl> if ( ! s . ok ( ) ) { <nl> LOG ( ERROR ) < < " Could not open events file : " < < filename_ < < " : " < < s ; <nl> return false ; <nl> } <nl> - recordio_file_ . reset ( file ) ; <nl> recordio_writer_ . reset ( new io : : RecordWriter ( recordio_file_ . get ( ) ) ) ; <nl> if ( recordio_writer_ . get ( ) = = NULL ) { <nl> LOG ( ERROR ) < < " Could not create record writer " ; <nl> mmm a / tensorflow / core / util / memmapped_file_system . cc <nl> ppp b / tensorflow / core / util / memmapped_file_system . cc <nl> bool MemmappedFileSystem : : FileExists ( const string & fname ) { <nl> return dir_element ! = directory_ . end ( ) ; <nl> } <nl> <nl> - Status MemmappedFileSystem : : NewRandomAccessFile ( const string & filename , <nl> - RandomAccessFile * * result ) { <nl> + Status MemmappedFileSystem : : NewRandomAccessFile ( <nl> + const string & filename , std : : unique_ptr < RandomAccessFile > * result ) { <nl> if ( ! mapped_memory_ ) { <nl> return errors : : FailedPrecondition ( " MemmappedEnv is not initialized " ) ; <nl> } <nl> Status MemmappedFileSystem : : NewRandomAccessFile ( const string & filename , <nl> if ( dir_element = = directory_ . end ( ) ) { <nl> return errors : : NotFound ( " Region " , filename , " is not found " ) ; <nl> } <nl> - * result = new RandomAccessFileFromMemmapped ( <nl> + result - > reset ( new RandomAccessFileFromMemmapped ( <nl> GetMemoryWithOffset ( dir_element - > second . offset ) , <nl> - dir_element - > second . length ) ; <nl> + dir_element - > second . length ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status MemmappedFileSystem : : NewReadOnlyMemoryRegionFromFile ( <nl> - const string & filename , ReadOnlyMemoryRegion * * result ) { <nl> + const string & filename , std : : unique_ptr < ReadOnlyMemoryRegion > * result ) { <nl> if ( ! mapped_memory_ ) { <nl> return errors : : FailedPrecondition ( " MemmappedEnv is not initialized " ) ; <nl> } <nl> Status MemmappedFileSystem : : NewReadOnlyMemoryRegionFromFile ( <nl> if ( dir_element = = directory_ . end ( ) ) { <nl> return errors : : NotFound ( " Region " , filename , " is not found " ) ; <nl> } <nl> - * result = new ReadOnlyMemoryRegionFromMemmapped ( <nl> + result - > reset ( new ReadOnlyMemoryRegionFromMemmapped ( <nl> GetMemoryWithOffset ( dir_element - > second . offset ) , <nl> - dir_element - > second . length ) ; <nl> + dir_element - > second . length ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status MemmappedFileSystem : : GetFileSize ( const string & filename , uint64 * size ) { <nl> } <nl> <nl> Status MemmappedFileSystem : : NewWritableFile ( const string & filename , <nl> - WritableFile * * wf ) { <nl> + std : : unique_ptr < WritableFile > * wf ) { <nl> return errors : : Unimplemented ( " memmapped format doesn ' t support writing " ) ; <nl> } <nl> <nl> - Status MemmappedFileSystem : : NewAppendableFile ( const string & filename , <nl> - WritableFile * * result ) { <nl> + Status MemmappedFileSystem : : NewAppendableFile ( <nl> + const string & filename , std : : unique_ptr < WritableFile > * result ) { <nl> return errors : : Unimplemented ( " memmapped format doesn ' t support writing " ) ; <nl> } <nl> <nl> constexpr char MemmappedFileSystem : : kMemmappedPackageDefaultGraphDef [ ] ; <nl> <nl> Status MemmappedFileSystem : : InitializeFromFile ( Env * env , <nl> const string & filename ) { <nl> - ReadOnlyMemoryRegion * region ; <nl> - TF_RETURN_IF_ERROR ( env - > NewReadOnlyMemoryRegionFromFile ( filename , & region ) ) ; <nl> - mapped_memory_ . reset ( region ) ; <nl> + TF_RETURN_IF_ERROR ( <nl> + env - > NewReadOnlyMemoryRegionFromFile ( filename , & mapped_memory_ ) ) ; <nl> directory_ . clear ( ) ; <nl> if ( mapped_memory_ - > length ( ) < = sizeof ( uint64 ) ) { <nl> return errors : : DataLoss ( " Corrupted memmapped model file : " , filename , <nl> mmm a / tensorflow / core / util / memmapped_file_system . h <nl> ppp b / tensorflow / core / util / memmapped_file_system . h <nl> class MemmappedFileSystem : public FileSystem { <nl> MemmappedFileSystem ( ) ; <nl> ~ MemmappedFileSystem ( ) override = default ; <nl> bool FileExists ( const string & fname ) override ; <nl> - Status NewRandomAccessFile ( const string & filename , <nl> - RandomAccessFile * * result ) override ; <nl> + Status NewRandomAccessFile ( <nl> + const string & filename , <nl> + std : : unique_ptr < RandomAccessFile > * result ) override ; <nl> Status NewReadOnlyMemoryRegionFromFile ( <nl> - const string & filename , ReadOnlyMemoryRegion * * result ) override ; <nl> + const string & filename , <nl> + std : : unique_ptr < ReadOnlyMemoryRegion > * result ) override ; <nl> <nl> / / All these functions return Unimplemented error , the memmapped storage is <nl> / / read only . <nl> - Status NewWritableFile ( const string & fname , WritableFile * * result ) override ; <nl> - Status NewAppendableFile ( const string & fname , WritableFile * * result ) override ; <nl> + Status NewWritableFile ( const string & fname , <nl> + std : : unique_ptr < WritableFile > * result ) override ; <nl> + Status NewAppendableFile ( const string & fname , <nl> + std : : unique_ptr < WritableFile > * result ) override ; <nl> Status GetChildren ( const string & dir , std : : vector < string > * r ) override ; <nl> Status DeleteFile ( const string & f ) override ; <nl> Status CreateDir ( const string & d ) override ; <nl> mmm a / tensorflow / core / util / memmapped_file_system_test . cc <nl> ppp b / tensorflow / core / util / memmapped_file_system_test . cc <nl> TEST ( MemmappedFileSystemTest , SimpleTest ) { <nl> ReadBinaryProto ( & memmapped_env , kProtoFileName , & test_graph_def ) ) ; <nl> EXPECT_EQ ( kTestGraphDefVersion , test_graph_def . version ( ) ) ; <nl> / / Check that we can correctly get a tensor memory . <nl> - ReadOnlyMemoryRegion * memory_region ; <nl> + std : : unique_ptr < ReadOnlyMemoryRegion > memory_region ; <nl> TF_ASSERT_OK ( memmapped_env . NewReadOnlyMemoryRegionFromFile ( kTensor2FileName , <nl> & memory_region ) ) ; <nl> - std : : unique_ptr < ReadOnlyMemoryRegion > mem_region_ptr ( memory_region ) ; <nl> + <nl> / / The memory region can be bigger but not less than Tensor size . <nl> ASSERT_GE ( memory_region - > length ( ) , test_tensor . TotalBytes ( ) ) ; <nl> EXPECT_EQ ( test_tensor . tensor_data ( ) , <nl> TEST ( MemmappedFileSystemTest , SimpleTest ) { <nl> <nl> TEST ( MemmappedFileSystemTest , NotInitalized ) { <nl> MemmappedEnv memmapped_env ( Env : : Default ( ) ) ; <nl> - ReadOnlyMemoryRegion * memory_region ; <nl> + std : : unique_ptr < ReadOnlyMemoryRegion > memory_region ; <nl> EXPECT_EQ ( <nl> error : : FAILED_PRECONDITION , <nl> memmapped_env <nl> TEST ( MemmappedFileSystemTest , ProxyToDefault ) { <nl> const string dir = testing : : TmpDir ( ) ; <nl> const string filename = io : : JoinPath ( dir , " test_file " ) ; <nl> / / Check that we can create write and read ordinary file . <nl> - WritableFile * writable_file ; <nl> + std : : unique_ptr < WritableFile > writable_file ; <nl> TF_ASSERT_OK ( memmapped_env . NewAppendableFile ( filename , & writable_file ) ) ; <nl> - std : : unique_ptr < WritableFile > writable_file_ptr ( writable_file ) ; <nl> const string test_string = " bla - bla - bla " ; <nl> TF_ASSERT_OK ( writable_file - > Append ( test_string ) ) ; <nl> TF_ASSERT_OK ( writable_file - > Close ( ) ) ; <nl> uint64 file_length = 0 ; <nl> TF_EXPECT_OK ( memmapped_env . GetFileSize ( filename , & file_length ) ) ; <nl> EXPECT_EQ ( test_string . length ( ) , file_length ) ; <nl> - RandomAccessFile * random_access_file ; <nl> + std : : unique_ptr < RandomAccessFile > random_access_file ; <nl> TF_ASSERT_OK ( <nl> memmapped_env . NewRandomAccessFile ( filename , & random_access_file ) ) ; <nl> - delete random_access_file ; <nl> } <nl> <nl> } / / namespace <nl> mmm a / tensorflow / core / util / memmapped_file_system_writer . cc <nl> ppp b / tensorflow / core / util / memmapped_file_system_writer . cc <nl> namespace tensorflow { <nl> <nl> Status MemmappedFileSystemWriter : : InitializeToFile ( Env * env , <nl> const string & filename ) { <nl> - WritableFile * writable_file ; <nl> - auto status = env - > NewWritableFile ( filename , & writable_file ) ; <nl> + auto status = env - > NewWritableFile ( filename , & output_file_ ) ; <nl> if ( status . ok ( ) ) { <nl> - output_file_ . reset ( writable_file ) ; <nl> output_file_offset_ = 0 ; <nl> } <nl> return status ; <nl> mmm a / tensorflow / core / util / reporter . cc <nl> ppp b / tensorflow / core / util / reporter . cc <nl> Status TestReporter : : Initialize ( ) { <nl> return errors : : InvalidArgument ( " Cannot create TestReporter , file exists : " , <nl> mangled_fname ) ; <nl> } <nl> - WritableFile * log_file ; <nl> - TF_RETURN_IF_ERROR ( env - > NewWritableFile ( mangled_fname , & log_file ) ) ; <nl> - log_file_ . reset ( log_file ) ; <nl> - TF_RETURN_IF_ERROR ( log_file - > Flush ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( env - > NewWritableFile ( mangled_fname , & log_file_ ) ) ; <nl> + TF_RETURN_IF_ERROR ( log_file_ - > Flush ( ) ) ; <nl> <nl> benchmark_entry_ . set_name ( test_name_ ) ; <nl> closed_ = false ; <nl> mmm a / tensorflow / core / util / tensor_slice_writer . cc <nl> ppp b / tensorflow / core / util / tensor_slice_writer . cc <nl> class TableBuilder : public TensorSliceWriter : : Builder { <nl> Status CreateTableTensorSliceBuilder ( const string & name , <nl> TensorSliceWriter : : Builder * * builder ) { <nl> * builder = nullptr ; <nl> - WritableFile * f ; <nl> + std : : unique_ptr < WritableFile > f ; <nl> Status s = Env : : Default ( ) - > NewWritableFile ( name , & f ) ; <nl> if ( s . ok ( ) ) { <nl> - * builder = new TableBuilder ( name , f ) ; <nl> + * builder = new TableBuilder ( name , f . release ( ) ) ; <nl> return Status : : OK ( ) ; <nl> } else { <nl> return s ; <nl> mmm a / tensorflow / python / framework / test_file_system . cc <nl> ppp b / tensorflow / python / framework / test_file_system . cc <nl> class TestRandomAccessFile : public RandomAccessFile { <nl> <nl> class TestFileSystem : public NullFileSystem { <nl> public : <nl> - Status NewRandomAccessFile ( const string & fname , <nl> - RandomAccessFile * * result ) override { <nl> - * result = new TestRandomAccessFile ; <nl> + Status NewRandomAccessFile ( <nl> + const string & fname , std : : unique_ptr < RandomAccessFile > * result ) override { <nl> + result - > reset ( new TestRandomAccessFile ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> / / Always return size of 10 <nl> mmm a / tensorflow / python / lib / io / py_record_writer . cc <nl> ppp b / tensorflow / python / lib / io / py_record_writer . cc <nl> PyRecordWriter : : PyRecordWriter ( ) { } <nl> <nl> PyRecordWriter * PyRecordWriter : : New ( const string & filename , <nl> const string & compression_type_string ) { <nl> - WritableFile * file ; <nl> + std : : unique_ptr < WritableFile > file ; <nl> Status s = Env : : Default ( ) - > NewWritableFile ( filename , & file ) ; <nl> if ( ! s . ok ( ) ) { <nl> return nullptr ; <nl> } <nl> PyRecordWriter * writer = new PyRecordWriter ; <nl> - writer - > file_ = file ; <nl> + writer - > file_ = file . release ( ) ; <nl> <nl> RecordWriterOptions options ; <nl> if ( compression_type_string = = " ZLIB " ) { <nl>
|
TensorFlow : minor change functions in Env interface .
|
tensorflow/tensorflow
|
209c006578483460e98c114a69d4b9ed6b95efed
|
2016-06-17T02:16:41Z
|
mmm a / POSIX / golang / golang - bridge . cpp <nl> ppp b / POSIX / golang / golang - bridge . cpp <nl> <nl> <nl> # ifndef CGO <nl> <nl> - # include " MMKVPredef . h " <nl> + # include " MMKVPredef . h " <nl> <nl> # include " MMKV . h " <nl> # include < stdint . h > <nl> MMKV_EXPORT void * decodeBytes ( void * handle , const char * oKey , uint64_t * lengthPt <nl> <nl> # ifndef MMKV_DISABLE_CRYPT <nl> <nl> - MMKV_EXPORT bool reKey ( void * handle , char * oKey , uint64_t length ) { <nl> + MMKV_EXPORT bool reKey ( void * handle , char * oKey , uint32_t length ) { <nl> MMKV * kv = static_cast < MMKV * > ( handle ) ; <nl> if ( kv ) { <nl> if ( oKey & & length > 0 ) { <nl> MMKV_EXPORT bool reKey ( void * handle , char * oKey , uint64_t length ) { <nl> return false ; <nl> } <nl> <nl> - MMKV_EXPORT void * cryptKey ( void * handle , uint64_t * lengthPtr ) { <nl> + MMKV_EXPORT void * cryptKey ( void * handle , uint32_t * lengthPtr ) { <nl> MMKV * kv = static_cast < MMKV * > ( handle ) ; <nl> if ( kv & & lengthPtr ) { <nl> auto cryptKey = kv - > cryptKey ( ) ; <nl> MMKV_EXPORT void checkReSetCryptKey ( void * handle , char * oKey , uint64_t length ) { <nl> <nl> # endif / / MMKV_DISABLE_CRYPT <nl> <nl> - MMKV_EXPORT uint32_t valueSize ( void * handle , char * oKey , bool actualSize ) { <nl> - MMKV * kv = static_cast < MMKV * > ( handle ) ; <nl> - if ( kv & & oKey ) { <nl> - string key ( oKey ) ; <nl> - auto ret = kv - > getValueSize ( key , actualSize ) ; <nl> - return static_cast < uint32_t > ( ret ) ; <nl> - } <nl> - return 0 ; <nl> - } <nl> - <nl> - MMKV_EXPORT int32_t writeValueToNB ( void * handle , char * oKey , void * pointer , uint32_t size ) { <nl> - MMKV * kv = static_cast < MMKV * > ( handle ) ; <nl> - if ( kv & & oKey ) { <nl> - string key ( oKey ) ; <nl> - return kv - > writeValueToBuffer ( key , pointer , size ) ; <nl> - } <nl> - return - 1 ; <nl> - } <nl> - <nl> MMKV_EXPORT uint64_t allKeys ( void * handle , char * * * keyArrayPtr , uint32_t * * sizeArrayPtr ) { <nl> MMKV * kv = static_cast < MMKV * > ( handle ) ; <nl> if ( kv ) { <nl> mmm a / POSIX / golang / golang - bridge . h <nl> ppp b / POSIX / golang / golang - bridge . h <nl> <nl> * / <nl> <nl> # ifdef __cplusplus <nl> - # include < stdint > <nl> + # include < stdint > <nl> extern " C " { <nl> # else <nl> - # include < stdint . h > <nl> - # include < stdbool . h > <nl> + # include < stdbool . h > <nl> + # include < stdint . h > <nl> # endif <nl> <nl> void mmkvInitialize ( const char * rootDir , int32_t logLevel ) ; <nl> bool encodeDouble ( void * handle , const char * oKey , double value ) ; <nl> double decodeDouble ( void * handle , const char * oKey , double defaultValue ) ; <nl> bool encodeBytes ( void * handle , const char * oKey , void * oValue , uint64_t length ) ; <nl> void * decodeBytes ( void * handle , const char * oKey , uint64_t * lengthPtr ) ; <nl> - bool reKey ( void * handle , char * oKey , uint64_t length ) ; <nl> - void * cryptKey ( void * handle , uint64_t * lengthPtr ) ; <nl> + bool reKey ( void * handle , char * oKey , uint32_t length ) ; <nl> + void * cryptKey ( void * handle , uint32_t * lengthPtr ) ; <nl> void checkReSetCryptKey ( void * handle , char * oKey , uint64_t length ) ; <nl> - uint32_t valueSize ( void * handle , char * oKey , bool actualSize ) ; <nl> - int32_t writeValueToNB ( void * handle , char * oKey , void * pointer , uint32_t size ) ; <nl> uint64_t allKeys ( void * handle , char * * * keyArrayPtr , uint32_t * * sizeArrayPtr ) ; <nl> bool containsKey ( void * handle , char * oKey ) ; <nl> uint64_t count ( void * handle ) ; <nl> const char * version ( ) ; <nl> void trim ( void * handle ) ; <nl> void mmkvClose ( void * handle ) ; <nl> <nl> - <nl> # ifdef __cplusplus <nl> } <nl> # endif <nl> mmm a / POSIX / golang / mmkv . go <nl> ppp b / POSIX / golang / mmkv . go <nl> package mmkv <nl> <nl> / / # cgo CXXFLAGS : - D CGO - D FORCE_POSIX - I $ { SRCDIR } / . . / . . / Core - std = c + + 17 <nl> / / # cgo LDFLAGS : - L . - lmmkv - L . / Core - lcore - lz - lpthread <nl> - / / # include " golang - bridge . h " <nl> - / / # include < stdlib . h > <nl> + / * <nl> + # include " golang - bridge . h " <nl> + # include < stdlib . h > <nl> + <nl> + typedef void * voidptr_t ; <nl> + <nl> + static void setStringArray ( char * * array , char * str , size_t n ) { <nl> + array [ n ] = str ; <nl> + } <nl> + <nl> + static char * getStringArray ( char * * array , size_t n ) { <nl> + return array [ n ] ; <nl> + } <nl> + <nl> + static void setSizeArray ( uint32_t * array , uint32_t value , size_t n ) { <nl> + array [ n ] = value ; <nl> + } <nl> + <nl> + static uint32_t getSizeArray ( uint32_t * array , size_t n ) { <nl> + return array [ n ] ; <nl> + } <nl> + <nl> + static void freeStringArray ( char * * a , size_t size ) { <nl> + size_t i ; <nl> + for ( i = 0 ; i < size ; i + + ) { <nl> + free ( a [ i ] ) ; <nl> + } <nl> + } <nl> + * / <nl> import " C " <nl> - / / import " fmt " <nl> import " unsafe " <nl> <nl> const ( <nl> type MMKV interface { <nl> GetFloat64WithDefault ( key string , defaultValue float64 ) float64 <nl> GetString ( key string ) string <nl> GetBytes ( key string ) [ ] byte <nl> - / * <nl> + <nl> RemoveKey ( key string ) <nl> RemoveKeys ( keys [ ] string ) <nl> ClearAll ( ) <nl> <nl> Count ( ) uint64 <nl> - AllKeys ( ) [ ] string * / <nl> + AllKeys ( ) [ ] string <nl> Contains ( key string ) bool <nl> TotalSize ( ) uint64 <nl> ActualSize ( ) uint64 <nl> - / * <nl> + <nl> MMAP_ID ( ) string <nl> <nl> Sync ( sync bool ) <nl> + ClearMemoryCache ( ) <nl> Trim ( ) <nl> Close ( ) <nl> <nl> ReKey ( newKey string ) bool <nl> CryptKey ( ) string <nl> - * / <nl> } <nl> <nl> / / Hello returns a greeting for the named person . <nl> func PageSize ( ) int32 { <nl> return int32 ( C . pageSize ( ) ) <nl> } <nl> <nl> + func DefaultMMKV ( ) MMKV { <nl> + mmkv : = ctorMMKV ( C . getDefaultMMKV ( MMKV_SINGLE_PROCESS , nil ) ) <nl> + return MMKV ( mmkv ) <nl> + } <nl> + <nl> + func DefaultMMKVWithMode ( mode int ) MMKV { <nl> + mmkv : = ctorMMKV ( C . getDefaultMMKV ( C . int ( mode ) , nil ) ) <nl> + return MMKV ( mmkv ) <nl> + } <nl> + <nl> + func DefaultMMKVWithModeAndCryptKey ( mode int , cryptKey string ) MMKV { <nl> + cCryptKey : = C . CString ( cryptKey ) <nl> + mmkv : = ctorMMKV ( C . getDefaultMMKV ( MMKV_SINGLE_PROCESS , cCryptKey ) ) <nl> + C . free ( unsafe . Pointer ( cCryptKey ) ) <nl> + return MMKV ( mmkv ) <nl> + } <nl> + <nl> func MMKVWithID ( mmapID string ) MMKV { <nl> cmmapID : = C . CString ( mmapID ) <nl> mmkv : = ctorMMKV ( C . getMMKVWithID ( cmmapID , MMKV_SINGLE_PROCESS , nil , nil ) ) <nl> func MMKVWithID ( mmapID string ) MMKV { <nl> return MMKV ( mmkv ) <nl> } <nl> <nl> + func MMKVWithIDAndMode ( mmapID string , mode int ) MMKV { <nl> + cmmapID : = C . CString ( mmapID ) <nl> + mmkv : = ctorMMKV ( C . getMMKVWithID ( cmmapID , C . int ( mode ) , nil , nil ) ) <nl> + C . free ( unsafe . Pointer ( cmmapID ) ) <nl> + <nl> + return MMKV ( mmkv ) <nl> + } <nl> + <nl> + func MMKVWithIDAndModeAndCryptKey ( mmapID string , mode int , cryptKey string ) MMKV { <nl> + cmmapID : = C . CString ( mmapID ) <nl> + cCryptKey : = C . CString ( cryptKey ) <nl> + <nl> + mmkv : = ctorMMKV ( C . getMMKVWithID ( cmmapID , C . int ( mode ) , cCryptKey , nil ) ) <nl> + <nl> + C . free ( unsafe . Pointer ( cmmapID ) ) <nl> + C . free ( unsafe . Pointer ( cCryptKey ) ) <nl> + <nl> + return MMKV ( mmkv ) <nl> + } <nl> + <nl> / / TODO : use _GoString_ to avoid string copying <nl> func ( kv ctorMMKV ) SetBool ( value bool , key string ) bool { <nl> cKey : = C . CString ( key ) <nl> func ( kv ctorMMKV ) GetBytes ( key string ) [ ] byte { <nl> return value <nl> } <nl> <nl> + func ( kv ctorMMKV ) RemoveKey ( key string ) { <nl> + cKey : = C . CString ( key ) <nl> + C . removeValueForKey ( unsafe . Pointer ( kv ) , cKey ) ; <nl> + C . free ( unsafe . Pointer ( cKey ) ) <nl> + } <nl> + <nl> + func ( kv ctorMMKV ) RemoveKeys ( keys [ ] string ) { <nl> + keyArray : = ( * * C . char ) ( C . calloc ( C . size_t ( len ( keys ) ) , C . sizeof_voidptr_t ) ) <nl> + sizeArray : = ( * C . uint32_t ) ( C . calloc ( C . size_t ( len ( keys ) ) , C . sizeof_voidptr_t ) ) <nl> + <nl> + for index , key : = range keys { <nl> + C . setStringArray ( keyArray , C . CString ( key ) , C . size_t ( index ) ) <nl> + C . setSizeArray ( sizeArray , C . uint32_t ( len ( key ) ) , C . size_t ( index ) ) <nl> + } <nl> + C . removeValuesForKeys ( unsafe . Pointer ( kv ) , keyArray , sizeArray , C . uint64_t ( len ( keys ) ) ) <nl> + <nl> + C . freeStringArray ( keyArray , C . size_t ( len ( keys ) ) ) <nl> + C . free ( unsafe . Pointer ( keyArray ) ) <nl> + C . free ( unsafe . Pointer ( sizeArray ) ) <nl> + } <nl> + <nl> + func ( kv ctorMMKV ) Count ( ) uint64 { <nl> + return uint64 ( C . count ( unsafe . Pointer ( kv ) ) ) <nl> + } <nl> + <nl> + func ( kv ctorMMKV ) AllKeys ( ) [ ] string { <nl> + var keyArray * * C . char <nl> + var sizeArray * C . uint32_t <nl> + <nl> + cCount : = C . allKeys ( unsafe . Pointer ( kv ) , & keyArray , & sizeArray ) <nl> + count : = uint64 ( cCount ) <nl> + if count = = 0 { <nl> + return [ ] string { } <nl> + } <nl> + <nl> + result : = make ( [ ] string , count ) <nl> + for index : = uint64 ( 0 ) ; index < count ; index + + { <nl> + cStr : = C . getStringArray ( keyArray , C . size_t ( index ) ) <nl> + cLen : = C . getSizeArray ( sizeArray , C . size_t ( index ) ) <nl> + result [ index ] = C . GoStringN ( cStr , C . int ( cLen ) ) <nl> + } <nl> + <nl> + C . freeStringArray ( keyArray , C . size_t ( cCount ) ) <nl> + C . free ( unsafe . Pointer ( keyArray ) ) <nl> + C . free ( unsafe . Pointer ( sizeArray ) ) <nl> + return result <nl> + } <nl> + <nl> func ( kv ctorMMKV ) Contains ( key string ) bool { <nl> cKey : = C . CString ( key ) <nl> ret : = C . containsKey ( unsafe . Pointer ( kv ) , cKey ) ; <nl> func ( kv ctorMMKV ) Contains ( key string ) bool { <nl> return bool ( ret ) <nl> } <nl> <nl> + func ( kv ctorMMKV ) ClearAll ( ) { <nl> + C . clearAll ( unsafe . Pointer ( kv ) ) <nl> + } <nl> + <nl> func ( kv ctorMMKV ) TotalSize ( ) uint64 { <nl> return uint64 ( C . totalSize ( unsafe . Pointer ( kv ) ) ) <nl> } <nl> func ( kv ctorMMKV ) TotalSize ( ) uint64 { <nl> func ( kv ctorMMKV ) ActualSize ( ) uint64 { <nl> return uint64 ( C . actualSize ( unsafe . Pointer ( kv ) ) ) <nl> } <nl> + <nl> + func ( kv ctorMMKV ) MMAP_ID ( ) string { <nl> + cStr : = C . mmapID ( unsafe . Pointer ( kv ) ) <nl> + return C . GoString ( cStr ) <nl> + } <nl> + <nl> + func ( kv ctorMMKV ) Sync ( sync bool ) { <nl> + C . mmkvSync ( unsafe . Pointer ( kv ) , C . bool ( sync ) ) <nl> + } <nl> + <nl> + func ( kv ctorMMKV ) ClearMemoryCache ( ) { <nl> + C . clearMemoryCache ( unsafe . Pointer ( kv ) ) <nl> + } <nl> + <nl> + func ( kv ctorMMKV ) Trim ( ) { <nl> + C . trim ( unsafe . Pointer ( kv ) ) <nl> + } <nl> + <nl> + func ( kv ctorMMKV ) Close ( ) { <nl> + C . mmkvClose ( unsafe . Pointer ( kv ) ) <nl> + } <nl> + <nl> + func ( kv ctorMMKV ) ReKey ( newKey string ) bool { <nl> + cKey : = C . CString ( newKey ) <nl> + ret : = C . reKey ( unsafe . Pointer ( kv ) , cKey , C . uint32_t ( len ( newKey ) ) ) <nl> + C . free ( unsafe . Pointer ( cKey ) ) <nl> + return bool ( ret ) <nl> + } <nl> + <nl> + func ( kv ctorMMKV ) CryptKey ( ) string { <nl> + var cLen C . uint32_t <nl> + cStr : = C . cryptKey ( unsafe . Pointer ( kv ) , & cLen ) <nl> + if cStr = = nil | | cLen = = 0 { <nl> + return " " <nl> + } <nl> + result : = C . GoStringN ( ( * C . char ) ( cStr ) , C . int ( cLen ) ) <nl> + C . free ( unsafe . Pointer ( cStr ) ) <nl> + return result <nl> + } <nl>
|
golang almost done
|
Tencent/MMKV
|
24d34704781c1a93507cbba9fa7de75824f51bfa
|
2020-12-11T13:26:35Z
|
mmm a / src / deoptimizer . cc <nl> ppp b / src / deoptimizer . cc <nl> class TranslatedState : : CapturedObjectMaterializer { <nl> int field_count ) <nl> : state_ ( state ) , frame_index_ ( frame_index ) , field_count_ ( field_count ) { } <nl> <nl> + / / Ensure the properties never contain mutable heap numbers . This is necessary <nl> + / / because the deoptimizer generalizes all maps to tagged representation <nl> + / / fields ( so mutable heap numbers are not allowed ) . <nl> + static void EnsurePropertiesGeneralized ( Handle < Object > properties_or_hash ) { <nl> + if ( properties_or_hash - > IsPropertyArray ( ) ) { <nl> + Handle < PropertyArray > properties = <nl> + Handle < PropertyArray > : : cast ( properties_or_hash ) ; <nl> + int length = properties - > length ( ) ; <nl> + for ( int i = 0 ; i < length ; i + + ) { <nl> + if ( properties - > get ( i ) - > IsMutableHeapNumber ( ) ) { <nl> + Handle < HeapObject > box ( HeapObject : : cast ( properties - > get ( i ) ) ) ; <nl> + box - > set_map ( properties - > GetIsolate ( ) - > heap ( ) - > heap_number_map ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> Handle < Object > FieldAt ( int * value_index ) { <nl> CHECK_GT ( field_count_ , 0 ) ; <nl> - - field_count_ ; <nl> - return state_ - > MaterializeAt ( frame_index_ , value_index ) ; <nl> + Handle < Object > object = state_ - > MaterializeAt ( frame_index_ , value_index ) ; <nl> + / / This is a big hammer to make sure that the materialized objects do not <nl> + / / have property arrays with mutable heap numbers ( mutable heap numbers are <nl> + / / bad because we generalize maps for all materialized objects ) . <nl> + EnsurePropertiesGeneralized ( object ) ; <nl> + return object ; <nl> } <nl> <nl> ~ CapturedObjectMaterializer ( ) { CHECK_EQ ( 0 , field_count_ ) ; } <nl> new file mode 100644 <nl> index 00000000000 . . 98a38c05e59 <nl> mmm / dev / null <nl> ppp b / test / mjsunit / regress / regress - 776309 . js <nl> <nl> + / / Copyright 2017 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + / / Flags : - - allow - natives - syntax <nl> + <nl> + function C ( ) { } <nl> + <nl> + function f ( b ) { <nl> + var o = new C ( ) ; <nl> + / / Create out - of - object properties only on one branch so that escape <nl> + / / analysis does not analyze the property array away . <nl> + if ( b ) o . t = 1 . 1 ; <nl> + % _DeoptimizeNow ( ) ; <nl> + return o . t ; <nl> + } <nl> + <nl> + / / Finish slack tracking for C . <nl> + for ( var i = 0 ; i < 1000 ; i + + ) new C ( ) ; <nl> + <nl> + f ( true ) ; <nl> + f ( true ) ; <nl> + f ( false ) ; <nl> + <nl> + % OptimizeFunctionOnNextCall ( f ) ; <nl> + <nl> + assertEquals ( 1 . 1 , f ( true ) ) ; <nl>
|
[ deoptimizer ] Make sure property arrays don ' t contain mutable heap numbers .
|
v8/v8
|
9eb92da6185868969c14e92c14cee21c4daee928
|
2017-11-09T12:02:47Z
|
mmm a / filament / include / filament / Material . h <nl> ppp b / filament / include / filament / Material . h <nl> class UTILS_PUBLIC Material : public FilamentAPI { <nl> struct BuilderDetails ; <nl> <nl> public : <nl> - using Variable = filament : : Variable ; <nl> using BlendingMode = filament : : BlendingMode ; <nl> using Shading = filament : : Shading ; <nl> using Interpolation = filament : : Interpolation ; <nl> mmm a / libs / filabridge / include / filament / MaterialEnums . h <nl> ppp b / libs / filabridge / include / filament / MaterialEnums . h <nl> enum class VertexDomain : uint8_t { <nl> / / when adding more entries , make sure to update VERTEX_DOMAIN_COUNT <nl> } ; <nl> <nl> + static constexpr size_t POST_PROCESS_STAGES_COUNT = 4 ; <nl> + enum class PostProcessStage : uint8_t { <nl> + TONE_MAPPING_OPAQUE , / / Tone mapping post - process <nl> + TONE_MAPPING_TRANSLUCENT , / / Tone mapping post - process <nl> + ANTI_ALIASING_OPAQUE , / / Anti - aliasing stage <nl> + ANTI_ALIASING_TRANSLUCENT , / / Anti - aliasing stage <nl> + } ; <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> static constexpr size_t MATERIAL_VARIABLES_COUNT = 4 ; <nl> enum class Variable : uint8_t { <nl> CUSTOM0 , <nl> enum class Variable : uint8_t { <nl> / / when adding more variables , make sure to update MATERIAL_VARIABLES_COUNT <nl> } ; <nl> <nl> - <nl> static constexpr size_t MATERIAL_PROPERTIES_COUNT = 16 ; <nl> enum class Property : uint8_t { <nl> BASE_COLOR , / / float4 , all shading models <nl> enum class Property : uint8_t { <nl> / / when adding new Properties , make sure to update MATERIAL_PROPERTIES_COUNT <nl> } ; <nl> <nl> - static constexpr size_t POST_PROCESS_STAGES_COUNT = 4 ; <nl> - enum class PostProcessStage : uint8_t { <nl> - TONE_MAPPING_OPAQUE , / / Tone mapping post - process <nl> - TONE_MAPPING_TRANSLUCENT , / / Tone mapping post - process <nl> - ANTI_ALIASING_OPAQUE , / / Anti - aliasing stage <nl> - ANTI_ALIASING_TRANSLUCENT , / / Anti - aliasing stage <nl> - } ; <nl> <nl> } / / namespace filament <nl> <nl>
|
Minor MaterialEnum . h cleanup - - first step .
|
google/filament
|
bf93468c3f515a0a9f5964f141e1e6d5f0dab754
|
2019-03-01T16:43:01Z
|
mmm a / plugins / trace_api_plugin / store_provider . cpp <nl> ppp b / plugins / trace_api_plugin / store_provider . cpp <nl> namespace eosio : : trace_api { <nl> / / Only process compression if its configured AND there is a range of irreversible blocks which would not also <nl> / / be deleted <nl> if ( _minimum_uncompressed_irreversible_history_blocks & & <nl> - ( ! _minimum_irreversible_history_blocks | | * _minimum_uncompressed_irreversible_history_blocks < _minimum_irreversible_history_blocks ) ) <nl> + ( ! _minimum_irreversible_history_blocks | | * _minimum_uncompressed_irreversible_history_blocks < * _minimum_irreversible_history_blocks ) ) <nl> { <nl> process_irreversible_slice_range ( lib , * _minimum_uncompressed_irreversible_history_blocks , _last_compressed_slice , [ this , & log ] ( uint32_t slice_to_compress ) { <nl> fc : : cfile trace ; <nl> mmm a / plugins / trace_api_plugin / trace_api_plugin . cpp <nl> ppp b / plugins / trace_api_plugin / trace_api_plugin . cpp <nl> struct trace_api_common_impl { <nl> cfg_options ( " trace - minimum - irreversible - history - blocks " , boost : : program_options : : value < int32_t > ( ) - > default_value ( - 1 ) , <nl> " Number of blocks to ensure are kept past LIB for retrieval before \ " slice \ " files can be automatically removed . \ n " <nl> " A value of - 1 indicates that automatic removal of \ " slice \ " files will be turned off . " ) ; <nl> - cfg_options ( " trace - minimum - irreversible - uncompressed - history - blocks " , boost : : program_options : : value < int32_t > ( ) - > default_value ( - 1 ) , <nl> + cfg_options ( " trace - minimum - uncompressed - irreversible - history - blocks " , boost : : program_options : : value < int32_t > ( ) - > default_value ( - 1 ) , <nl> " Number of blocks to ensure are uncompressed past LIB . Compressed \ " slice \ " files are still accessible but may carry a performance loss on retrieval \ n " <nl> " A value of - 1 indicates that automatic compression of \ " slice \ " files will be turned off . " ) ; <nl> cfg_options ( " trace - compression - seek - points " , boost : : program_options : : value < uint32_t > ( ) - > default_value ( 512 ) , <nl> struct trace_api_common_impl { <nl> minimum_irreversible_history_blocks = blocks ; <nl> } <nl> <nl> - const int32_t uncompressed_blocks = options . at ( " trace - minimum - irreversible - uncompressed - history - blocks " ) . as < int32_t > ( ) ; <nl> + const int32_t uncompressed_blocks = options . at ( " trace - minimum - uncompressed - irreversiblehistory - blocks " ) . as < int32_t > ( ) ; <nl> EOS_ASSERT ( uncompressed_blocks > = - 1 , chain : : plugin_config_exception , <nl> - " \ " trace - minimum - irreversible - uncompressed - history - blocks \ " must be greater to or equal to - 1 . " ) ; <nl> + " \ " trace - minimum - uncompressed - irreversible - history - blocks \ " must be greater to or equal to - 1 . " ) ; <nl> <nl> if ( uncompressed_blocks > manual_slice_file_value ) { <nl> minimum_uncompressed_irreversible_history_blocks = uncompressed_blocks ; <nl>
|
rename option and fix bad compare
|
EOSIO/eos
|
cdf5e87ec08572f77a3ab47a9e2dec8d9f6b75a1
|
2020-03-17T21:18:09Z
|
mmm a / modules / calib3d / src / stereosgbm . cpp <nl> ppp b / modules / calib3d / src / stereosgbm . cpp <nl> static void calcPixelCostBT ( const Mat & img1 , const Mat & img2 , int y , <nl> int D = maxD - minD , width1 = maxX1 - minX1 , width2 = maxX2 - minX2 ; <nl> const PixType * row1 = img1 . ptr < PixType > ( y ) , * row2 = img2 . ptr < PixType > ( y ) ; <nl> PixType * prow1 = buffer + width2 * 2 , * prow2 = prow1 + width * cn * 2 ; <nl> + # if CV_SIMD128 <nl> + bool useSIMD = checkHardwareSupport ( CV_CPU_SSE2 ) | | checkHardwareSupport ( CV_CPU_NEON ) ; <nl> + # endif <nl> <nl> tab + = tabOfs ; <nl> <nl> static void calcPixelCostBT ( const Mat & img1 , const Mat & img2 , int y , <nl> buffer - = minX2 ; <nl> cost - = minX1 * D + minD ; / / simplify the cost indices inside the loop <nl> <nl> - # if 1 <nl> for ( c = 0 ; c < cn * 2 ; c + + , prow1 + = width , prow2 + = width ) <nl> { <nl> int diff_scale = c < cn ? 0 : 2 ; <nl> static void calcPixelCostBT ( const Mat & img1 , const Mat & img2 , int y , <nl> int u1 = std : : max ( ul , ur ) ; u1 = std : : max ( u1 , u ) ; <nl> <nl> # if CV_SIMD128 <nl> - v_uint8x16 _u = v_setall_u8 ( ( uchar ) u ) , _u0 = v_setall_u8 ( ( uchar ) u0 ) ; <nl> - v_uint8x16 _u1 = v_setall_u8 ( ( uchar ) u1 ) ; <nl> - <nl> - for ( int d = minD ; d < maxD ; d + = 16 ) <nl> - { <nl> - v_uint8x16 _v = v_load ( prow2 + width - x - 1 + d ) ; <nl> - v_uint8x16 _v0 = v_load ( buffer + width - x - 1 + d ) ; <nl> - v_uint8x16 _v1 = v_load ( buffer + width - x - 1 + d + width2 ) ; <nl> - v_uint8x16 c0 = v_max ( _u - _v1 , _v0 - _u ) ; <nl> - v_uint8x16 c1 = v_max ( _v - _u1 , _u0 - _v ) ; <nl> - v_uint8x16 diff = v_min ( c0 , c1 ) ; <nl> - <nl> - v_int16x8 _c0 = v_load_aligned ( cost + x * D + d ) ; <nl> - v_int16x8 _c1 = v_load_aligned ( cost + x * D + d + 8 ) ; <nl> - <nl> - v_uint16x8 diff1 , diff2 ; <nl> - v_expand ( diff , diff1 , diff2 ) ; <nl> - v_store_aligned ( cost + x * D + d , _c0 + v_reinterpret_as_s16 ( diff1 > > diff_scale ) ) ; <nl> - v_store_aligned ( cost + x * D + d + 8 , _c1 + v_reinterpret_as_s16 ( diff2 > > diff_scale ) ) ; <nl> - } <nl> - # else <nl> - for ( int d = minD ; d < maxD ; d + + ) <nl> - { <nl> - int v = prow2 [ width - x - 1 + d ] ; <nl> - int v0 = buffer [ width - x - 1 + d ] ; <nl> - int v1 = buffer [ width - x - 1 + d + width2 ] ; <nl> - int c0 = std : : max ( 0 , u - v1 ) ; c0 = std : : max ( c0 , v0 - u ) ; <nl> - int c1 = std : : max ( 0 , v - u1 ) ; c1 = std : : max ( c1 , u0 - v ) ; <nl> - <nl> - cost [ x * D + d ] = ( CostType ) ( cost [ x * D + d ] + ( std : : min ( c0 , c1 ) > > diff_scale ) ) ; <nl> - } <nl> - # endif <nl> - } <nl> - } <nl> - # else <nl> - for ( c = 0 ; c < cn * 2 ; c + + , prow1 + = width , prow2 + = width ) <nl> - { <nl> - for ( x = minX1 ; x < maxX1 ; x + + ) <nl> - { <nl> - int u = prow1 [ x ] ; <nl> - # if CV_SSE2 <nl> if ( useSIMD ) <nl> { <nl> - __m128i _u = _mm_set1_epi8 ( u ) , z = _mm_setzero_si128 ( ) ; <nl> + v_uint8x16 _u = v_setall_u8 ( ( uchar ) u ) , _u0 = v_setall_u8 ( ( uchar ) u0 ) ; <nl> + v_uint8x16 _u1 = v_setall_u8 ( ( uchar ) u1 ) ; <nl> <nl> for ( int d = minD ; d < maxD ; d + = 16 ) <nl> { <nl> - __m128i _v = _mm_loadu_si128 ( ( const __m128i * ) ( prow2 + width - 1 - x + d ) ) ; <nl> - __m128i diff = _mm_adds_epu8 ( _mm_subs_epu8 ( _u , _v ) , _mm_subs_epu8 ( _v , _u ) ) ; <nl> - __m128i c0 = _mm_load_si128 ( ( __m128i * ) ( cost + x * D + d ) ) ; <nl> - __m128i c1 = _mm_load_si128 ( ( __m128i * ) ( cost + x * D + d + 8 ) ) ; <nl> - <nl> - _mm_store_si128 ( ( __m128i * ) ( cost + x * D + d ) , _mm_adds_epi16 ( c0 , _mm_unpacklo_epi8 ( diff , z ) ) ) ; <nl> - _mm_store_si128 ( ( __m128i * ) ( cost + x * D + d + 8 ) , _mm_adds_epi16 ( c1 , _mm_unpackhi_epi8 ( diff , z ) ) ) ; <nl> + v_uint8x16 _v = v_load ( prow2 + width - x - 1 + d ) ; <nl> + v_uint8x16 _v0 = v_load ( buffer + width - x - 1 + d ) ; <nl> + v_uint8x16 _v1 = v_load ( buffer + width - x - 1 + d + width2 ) ; <nl> + v_uint8x16 c0 = v_max ( _u - _v1 , _v0 - _u ) ; <nl> + v_uint8x16 c1 = v_max ( _v - _u1 , _u0 - _v ) ; <nl> + v_uint8x16 diff = v_min ( c0 , c1 ) ; <nl> + <nl> + v_int16x8 _c0 = v_load_aligned ( cost + x * D + d ) ; <nl> + v_int16x8 _c1 = v_load_aligned ( cost + x * D + d + 8 ) ; <nl> + <nl> + v_uint16x8 diff1 , diff2 ; <nl> + v_expand ( diff , diff1 , diff2 ) ; <nl> + v_store_aligned ( cost + x * D + d , _c0 + v_reinterpret_as_s16 ( diff1 > > diff_scale ) ) ; <nl> + v_store_aligned ( cost + x * D + d + 8 , _c1 + v_reinterpret_as_s16 ( diff2 > > diff_scale ) ) ; <nl> } <nl> } <nl> else <nl> static void calcPixelCostBT ( const Mat & img1 , const Mat & img2 , int y , <nl> { <nl> for ( int d = minD ; d < maxD ; d + + ) <nl> { <nl> - int v = prow2 [ width - 1 - x + d ] ; <nl> - cost [ x * D + d ] = ( CostType ) ( cost [ x * D + d ] + ( CostType ) std : : abs ( u - v ) ) ; <nl> + int v = prow2 [ width - x - 1 + d ] ; <nl> + int v0 = buffer [ width - x - 1 + d ] ; <nl> + int v1 = buffer [ width - x - 1 + d + width2 ] ; <nl> + int c0 = std : : max ( 0 , u - v1 ) ; c0 = std : : max ( c0 , v0 - u ) ; <nl> + int c1 = std : : max ( 0 , v - u1 ) ; c1 = std : : max ( c1 , u0 - v ) ; <nl> + <nl> + cost [ x * D + d ] = ( CostType ) ( cost [ x * D + d ] + ( std : : min ( c0 , c1 ) > > diff_scale ) ) ; <nl> } <nl> } <nl> } <nl> } <nl> - # endif <nl> } <nl> <nl> <nl> static void computeDisparitySGBM ( const Mat & img1 , const Mat & img2 , <nl> Mat & disp1 , const StereoSGBMParams & params , <nl> Mat & buffer ) <nl> { <nl> - # if CV_SSE2 <nl> + # if CV_SIMD128 <nl> + / / maxDisparity is supposed to multiple of 16 , so we can forget doing else <nl> static const uchar LSBTab [ ] = <nl> { <nl> 0 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 4 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , <nl> static void computeDisparitySGBM ( const Mat & img1 , const Mat & img2 , <nl> 6 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 4 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , <nl> 5 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 4 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 <nl> } ; <nl> + static const v_uint16x8 v_LSB = v_uint16x8 ( 0x1 , 0x2 , 0x4 , 0x8 , 0x10 , 0x20 , 0x40 , 0x80 ) ; <nl> <nl> - volatile bool useSIMD = checkHardwareSupport ( CV_CPU_SSE2 ) ; <nl> + bool useSIMD = checkHardwareSupport ( CV_CPU_SSE2 ) | | checkHardwareSupport ( CV_CPU_NEON ) ; <nl> # endif <nl> <nl> const int ALIGN = 16 ; <nl> static void computeDisparitySGBM ( const Mat & img1 , const Mat & img2 , <nl> const CostType * pixAdd = pixDiff + std : : min ( x + SW2 * D , ( width1 - 1 ) * D ) ; <nl> const CostType * pixSub = pixDiff + std : : max ( x - ( SW2 + 1 ) * D , 0 ) ; <nl> <nl> - # if CV_SSE2 <nl> + # if CV_SIMD128 <nl> if ( useSIMD ) <nl> { <nl> for ( d = 0 ; d < D ; d + = 8 ) <nl> { <nl> - __m128i hv = _mm_load_si128 ( ( const __m128i * ) ( hsumAdd + x - D + d ) ) ; <nl> - __m128i Cx = _mm_load_si128 ( ( __m128i * ) ( Cprev + x + d ) ) ; <nl> - hv = _mm_adds_epi16 ( _mm_subs_epi16 ( hv , <nl> - _mm_load_si128 ( ( const __m128i * ) ( pixSub + d ) ) ) , <nl> - _mm_load_si128 ( ( const __m128i * ) ( pixAdd + d ) ) ) ; <nl> - Cx = _mm_adds_epi16 ( _mm_subs_epi16 ( Cx , <nl> - _mm_load_si128 ( ( const __m128i * ) ( hsumSub + x + d ) ) ) , <nl> - hv ) ; <nl> - _mm_store_si128 ( ( __m128i * ) ( hsumAdd + x + d ) , hv ) ; <nl> - _mm_store_si128 ( ( __m128i * ) ( C + x + d ) , Cx ) ; <nl> + v_int16x8 hv = v_load ( hsumAdd + x - D + d ) ; <nl> + v_int16x8 Cx = v_load ( Cprev + x + d ) ; <nl> + v_int16x8 psub = v_load ( pixSub + d ) ; <nl> + v_int16x8 padd = v_load ( pixAdd + d ) ; <nl> + hv = ( hv - psub + padd ) ; <nl> + psub = v_load ( hsumSub + x + d ) ; <nl> + Cx = Cx - psub + hv ; <nl> + v_store ( hsumAdd + x + d , hv ) ; <nl> + v_store ( C + x + d , Cx ) ; <nl> } <nl> } <nl> else <nl> static void computeDisparitySGBM ( const Mat & img1 , const Mat & img2 , <nl> const CostType * Cp = C + x * D ; <nl> CostType * Sp = S + x * D ; <nl> <nl> - # if CV_SSE2 <nl> + # if CV_SIMD128 <nl> if ( useSIMD ) <nl> { <nl> - __m128i _P1 = _mm_set1_epi16 ( ( short ) P1 ) ; <nl> + v_int16x8 _P1 = v_setall_s16 ( ( short ) P1 ) ; <nl> <nl> - __m128i _delta0 = _mm_set1_epi16 ( ( short ) delta0 ) ; <nl> - __m128i _delta1 = _mm_set1_epi16 ( ( short ) delta1 ) ; <nl> - __m128i _delta2 = _mm_set1_epi16 ( ( short ) delta2 ) ; <nl> - __m128i _delta3 = _mm_set1_epi16 ( ( short ) delta3 ) ; <nl> - __m128i _minL0 = _mm_set1_epi16 ( ( short ) MAX_COST ) ; <nl> + v_int16x8 _delta0 = v_setall_s16 ( ( short ) delta0 ) ; <nl> + v_int16x8 _delta1 = v_setall_s16 ( ( short ) delta1 ) ; <nl> + v_int16x8 _delta2 = v_setall_s16 ( ( short ) delta2 ) ; <nl> + v_int16x8 _delta3 = v_setall_s16 ( ( short ) delta3 ) ; <nl> + v_int16x8 _minL0 = v_setall_s16 ( ( short ) MAX_COST ) ; <nl> <nl> for ( d = 0 ; d < D ; d + = 8 ) <nl> { <nl> - __m128i Cpd = _mm_load_si128 ( ( const __m128i * ) ( Cp + d ) ) ; <nl> - __m128i L0 , L1 , L2 , L3 ; <nl> + v_int16x8 Cpd = v_load ( Cp + d ) ; <nl> + v_int16x8 L0 , L1 , L2 , L3 ; <nl> <nl> - L0 = _mm_load_si128 ( ( const __m128i * ) ( Lr_p0 + d ) ) ; <nl> - L1 = _mm_load_si128 ( ( const __m128i * ) ( Lr_p1 + d ) ) ; <nl> - L2 = _mm_load_si128 ( ( const __m128i * ) ( Lr_p2 + d ) ) ; <nl> - L3 = _mm_load_si128 ( ( const __m128i * ) ( Lr_p3 + d ) ) ; <nl> + L0 = v_load ( Lr_p0 + d ) ; <nl> + L1 = v_load ( Lr_p1 + d ) ; <nl> + L2 = v_load ( Lr_p2 + d ) ; <nl> + L3 = v_load ( Lr_p3 + d ) ; <nl> <nl> - L0 = _mm_min_epi16 ( L0 , _mm_adds_epi16 ( _mm_loadu_si128 ( ( const __m128i * ) ( Lr_p0 + d - 1 ) ) , _P1 ) ) ; <nl> - L0 = _mm_min_epi16 ( L0 , _mm_adds_epi16 ( _mm_loadu_si128 ( ( const __m128i * ) ( Lr_p0 + d + 1 ) ) , _P1 ) ) ; <nl> + L0 = v_min ( L0 , ( v_load ( Lr_p0 + d - 1 ) + _P1 ) ) ; <nl> + L0 = v_min ( L0 , ( v_load ( Lr_p0 + d + 1 ) + _P1 ) ) ; <nl> <nl> - L1 = _mm_min_epi16 ( L1 , _mm_adds_epi16 ( _mm_loadu_si128 ( ( const __m128i * ) ( Lr_p1 + d - 1 ) ) , _P1 ) ) ; <nl> - L1 = _mm_min_epi16 ( L1 , _mm_adds_epi16 ( _mm_loadu_si128 ( ( const __m128i * ) ( Lr_p1 + d + 1 ) ) , _P1 ) ) ; <nl> + L1 = v_min ( L1 , ( v_load ( Lr_p1 + d - 1 ) + _P1 ) ) ; <nl> + L1 = v_min ( L1 , ( v_load ( Lr_p1 + d + 1 ) + _P1 ) ) ; <nl> <nl> - L2 = _mm_min_epi16 ( L2 , _mm_adds_epi16 ( _mm_loadu_si128 ( ( const __m128i * ) ( Lr_p2 + d - 1 ) ) , _P1 ) ) ; <nl> - L2 = _mm_min_epi16 ( L2 , _mm_adds_epi16 ( _mm_loadu_si128 ( ( const __m128i * ) ( Lr_p2 + d + 1 ) ) , _P1 ) ) ; <nl> + L2 = v_min ( L2 , ( v_load ( Lr_p2 + d - 1 ) + _P1 ) ) ; <nl> + L2 = v_min ( L2 , ( v_load ( Lr_p2 + d + 1 ) + _P1 ) ) ; <nl> <nl> - L3 = _mm_min_epi16 ( L3 , _mm_adds_epi16 ( _mm_loadu_si128 ( ( const __m128i * ) ( Lr_p3 + d - 1 ) ) , _P1 ) ) ; <nl> - L3 = _mm_min_epi16 ( L3 , _mm_adds_epi16 ( _mm_loadu_si128 ( ( const __m128i * ) ( Lr_p3 + d + 1 ) ) , _P1 ) ) ; <nl> + L3 = v_min ( L3 , ( v_load ( Lr_p3 + d - 1 ) + _P1 ) ) ; <nl> + L3 = v_min ( L3 , ( v_load ( Lr_p3 + d + 1 ) + _P1 ) ) ; <nl> <nl> - L0 = _mm_min_epi16 ( L0 , _delta0 ) ; <nl> - L0 = _mm_adds_epi16 ( _mm_subs_epi16 ( L0 , _delta0 ) , Cpd ) ; <nl> + L0 = v_min ( L0 , _delta0 ) ; <nl> + L0 = ( ( L0 - _delta0 ) + Cpd ) ; <nl> <nl> - L1 = _mm_min_epi16 ( L1 , _delta1 ) ; <nl> - L1 = _mm_adds_epi16 ( _mm_subs_epi16 ( L1 , _delta1 ) , Cpd ) ; <nl> + L1 = v_min ( L1 , _delta1 ) ; <nl> + L1 = ( ( L1 - _delta1 ) + Cpd ) ; <nl> <nl> - L2 = _mm_min_epi16 ( L2 , _delta2 ) ; <nl> - L2 = _mm_adds_epi16 ( _mm_subs_epi16 ( L2 , _delta2 ) , Cpd ) ; <nl> + L2 = v_min ( L2 , _delta2 ) ; <nl> + L2 = ( ( L2 - _delta2 ) + Cpd ) ; <nl> <nl> - L3 = _mm_min_epi16 ( L3 , _delta3 ) ; <nl> - L3 = _mm_adds_epi16 ( _mm_subs_epi16 ( L3 , _delta3 ) , Cpd ) ; <nl> + L3 = v_min ( L3 , _delta3 ) ; <nl> + L3 = ( ( L3 - _delta3 ) + Cpd ) ; <nl> <nl> - _mm_store_si128 ( ( __m128i * ) ( Lr_p + d ) , L0 ) ; <nl> - _mm_store_si128 ( ( __m128i * ) ( Lr_p + d + D2 ) , L1 ) ; <nl> - _mm_store_si128 ( ( __m128i * ) ( Lr_p + d + D2 * 2 ) , L2 ) ; <nl> - _mm_store_si128 ( ( __m128i * ) ( Lr_p + d + D2 * 3 ) , L3 ) ; <nl> + v_store ( Lr_p + d , L0 ) ; <nl> + v_store ( Lr_p + d + D2 , L1 ) ; <nl> + v_store ( Lr_p + d + D2 * 2 , L2 ) ; <nl> + v_store ( Lr_p + d + D2 * 3 , L3 ) ; <nl> <nl> - __m128i t0 = _mm_min_epi16 ( _mm_unpacklo_epi16 ( L0 , L2 ) , _mm_unpackhi_epi16 ( L0 , L2 ) ) ; <nl> - __m128i t1 = _mm_min_epi16 ( _mm_unpacklo_epi16 ( L1 , L3 ) , _mm_unpackhi_epi16 ( L1 , L3 ) ) ; <nl> - t0 = _mm_min_epi16 ( _mm_unpacklo_epi16 ( t0 , t1 ) , _mm_unpackhi_epi16 ( t0 , t1 ) ) ; <nl> - _minL0 = _mm_min_epi16 ( _minL0 , t0 ) ; <nl> + / / Get minimum from in L0 - L3 <nl> + v_int16x8 t02L , t02H , t13L , t13H , t0123L , t0123H ; <nl> + v_zip ( L0 , L2 , t02L , t02H ) ; / / L0 [ 0 ] L2 [ 0 ] L0 [ 1 ] L2 [ 1 ] . . . <nl> + v_zip ( L1 , L3 , t13L , t13H ) ; / / L1 [ 0 ] L3 [ 0 ] L1 [ 1 ] L3 [ 1 ] . . . <nl> + v_int16x8 t02 = v_min ( t02L , t02H ) ; / / L0 [ i ] L2 [ i ] L0 [ i ] L2 [ i ] . . . <nl> + v_int16x8 t13 = v_min ( t13L , t13H ) ; / / L1 [ i ] L3 [ i ] L1 [ i ] L3 [ i ] . . . <nl> + v_zip ( t02 , t13 , t0123L , t0123H ) ; / / L0 [ i ] L1 [ i ] L2 [ i ] L3 [ i ] . . . <nl> + v_int16x8 t0 = v_min ( t0123L , t0123H ) ; <nl> + _minL0 = v_min ( _minL0 , t0 ) ; <nl> <nl> - __m128i Sval = _mm_load_si128 ( ( const __m128i * ) ( Sp + d ) ) ; <nl> + v_int16x8 Sval = v_load ( Sp + d ) ; <nl> <nl> - L0 = _mm_adds_epi16 ( L0 , L1 ) ; <nl> - L2 = _mm_adds_epi16 ( L2 , L3 ) ; <nl> - Sval = _mm_adds_epi16 ( Sval , L0 ) ; <nl> - Sval = _mm_adds_epi16 ( Sval , L2 ) ; <nl> + L0 = L0 + L1 ; <nl> + L2 = L2 + L3 ; <nl> + Sval = Sval + L0 ; <nl> + Sval = Sval + L2 ; <nl> <nl> - _mm_store_si128 ( ( __m128i * ) ( Sp + d ) , Sval ) ; <nl> + v_store ( Sp + d , Sval ) ; <nl> } <nl> <nl> - _minL0 = _mm_min_epi16 ( _minL0 , _mm_srli_si128 ( _minL0 , 8 ) ) ; <nl> - _mm_storel_epi64 ( ( __m128i * ) & minLr [ 0 ] [ xm ] , _minL0 ) ; <nl> + v_int32x4 minL , minH ; <nl> + v_expand ( _minL0 , minL , minH ) ; <nl> + v_pack_store ( & minLr [ 0 ] [ xm ] , v_min ( minL , minH ) ) ; <nl> } <nl> else <nl> # endif <nl> static void computeDisparitySGBM ( const Mat & img1 , const Mat & img2 , <nl> <nl> const CostType * Cp = C + x * D ; <nl> <nl> - # if CV_SSE2 <nl> + # if CV_SIMD128 <nl> if ( useSIMD ) <nl> { <nl> - __m128i _P1 = _mm_set1_epi16 ( ( short ) P1 ) ; <nl> - __m128i _delta0 = _mm_set1_epi16 ( ( short ) delta0 ) ; <nl> + v_int16x8 _P1 = v_setall_s16 ( ( short ) P1 ) ; <nl> + v_int16x8 _delta0 = v_setall_s16 ( ( short ) delta0 ) ; <nl> <nl> - __m128i _minL0 = _mm_set1_epi16 ( ( short ) minL0 ) ; <nl> - __m128i _minS = _mm_set1_epi16 ( MAX_COST ) , _bestDisp = _mm_set1_epi16 ( - 1 ) ; <nl> - __m128i _d8 = _mm_setr_epi16 ( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ) , _8 = _mm_set1_epi16 ( 8 ) ; <nl> + v_int16x8 _minL0 = v_setall_s16 ( ( short ) minL0 ) ; <nl> + v_int16x8 _minS = v_setall_s16 ( MAX_COST ) , _bestDisp = v_setall_s16 ( - 1 ) ; <nl> + v_int16x8 _d8 = v_int16x8 ( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ) , _8 = v_setall_s16 ( 8 ) ; <nl> <nl> for ( d = 0 ; d < D ; d + = 8 ) <nl> { <nl> - __m128i Cpd = _mm_load_si128 ( ( const __m128i * ) ( Cp + d ) ) , L0 ; <nl> - <nl> - L0 = _mm_load_si128 ( ( const __m128i * ) ( Lr_p0 + d ) ) ; <nl> - L0 = _mm_min_epi16 ( L0 , _mm_adds_epi16 ( _mm_loadu_si128 ( ( const __m128i * ) ( Lr_p0 + d - 1 ) ) , _P1 ) ) ; <nl> - L0 = _mm_min_epi16 ( L0 , _mm_adds_epi16 ( _mm_loadu_si128 ( ( const __m128i * ) ( Lr_p0 + d + 1 ) ) , _P1 ) ) ; <nl> - L0 = _mm_min_epi16 ( L0 , _delta0 ) ; <nl> - L0 = _mm_adds_epi16 ( _mm_subs_epi16 ( L0 , _delta0 ) , Cpd ) ; <nl> - <nl> - _mm_store_si128 ( ( __m128i * ) ( Lr_p + d ) , L0 ) ; <nl> - _minL0 = _mm_min_epi16 ( _minL0 , L0 ) ; <nl> - L0 = _mm_adds_epi16 ( L0 , * ( __m128i * ) ( Sp + d ) ) ; <nl> - _mm_store_si128 ( ( __m128i * ) ( Sp + d ) , L0 ) ; <nl> - <nl> - __m128i mask = _mm_cmpgt_epi16 ( _minS , L0 ) ; <nl> - _minS = _mm_min_epi16 ( _minS , L0 ) ; <nl> - _bestDisp = _mm_xor_si128 ( _bestDisp , _mm_and_si128 ( _mm_xor_si128 ( _bestDisp , _d8 ) , mask ) ) ; <nl> - _d8 = _mm_adds_epi16 ( _d8 , _8 ) ; <nl> + v_int16x8 Cpd = v_load ( Cp + d ) ; <nl> + v_int16x8 L0 = v_load ( Lr_p0 + d ) ; <nl> + <nl> + L0 = v_min ( L0 , v_load ( Lr_p0 + d - 1 ) + _P1 ) ; <nl> + L0 = v_min ( L0 , v_load ( Lr_p0 + d + 1 ) + _P1 ) ; <nl> + L0 = v_min ( L0 , _delta0 ) ; <nl> + L0 = L0 - _delta0 + Cpd ; <nl> + <nl> + v_store ( Lr_p + d , L0 ) ; <nl> + _minL0 = v_min ( _minL0 , L0 ) ; <nl> + L0 = L0 + v_load ( Sp + d ) ; <nl> + v_store ( Sp + d , L0 ) ; <nl> + <nl> + v_int16x8 mask = _minS > L0 ; <nl> + _minS = v_min ( _minS , L0 ) ; <nl> + _bestDisp = _bestDisp ^ ( ( _bestDisp ^ _d8 ) & mask ) ; <nl> + _d8 + = _8 ; <nl> } <nl> + short bestDispBuf [ 8 ] ; <nl> + v_store ( bestDispBuf , _bestDisp ) ; <nl> <nl> - short CV_DECL_ALIGNED ( 16 ) bestDispBuf [ 8 ] ; <nl> - _mm_store_si128 ( ( __m128i * ) bestDispBuf , _bestDisp ) ; <nl> + v_int32x4 min32L , min32H ; <nl> + v_expand ( _minL0 , min32L , min32H ) ; <nl> + minLr [ 0 ] [ xm ] = ( CostType ) std : : min ( v_reduce_min ( min32L ) , v_reduce_min ( min32H ) ) ; <nl> <nl> - _minL0 = _mm_min_epi16 ( _minL0 , _mm_srli_si128 ( _minL0 , 8 ) ) ; <nl> - _minL0 = _mm_min_epi16 ( _minL0 , _mm_srli_si128 ( _minL0 , 4 ) ) ; <nl> - _minL0 = _mm_min_epi16 ( _minL0 , _mm_srli_si128 ( _minL0 , 2 ) ) ; <nl> + v_expand ( _minS , min32L , min32H ) ; <nl> + minS = std : : min ( v_reduce_min ( min32L ) , v_reduce_min ( min32H ) ) ; <nl> <nl> - __m128i qS = _mm_min_epi16 ( _minS , _mm_srli_si128 ( _minS , 8 ) ) ; <nl> - qS = _mm_min_epi16 ( qS , _mm_srli_si128 ( qS , 4 ) ) ; <nl> - qS = _mm_min_epi16 ( qS , _mm_srli_si128 ( qS , 2 ) ) ; <nl> + v_int16x8 ss = v_setall_s16 ( ( short ) minS ) ; <nl> + v_uint16x8 minMask = v_reinterpret_as_u16 ( ss = = _minS ) ; <nl> + v_uint16x8 minBit = minMask & v_LSB ; <nl> <nl> - minLr [ 0 ] [ xm ] = ( CostType ) _mm_cvtsi128_si32 ( _minL0 ) ; <nl> - minS = ( CostType ) _mm_cvtsi128_si32 ( qS ) ; <nl> - <nl> - qS = _mm_shuffle_epi32 ( _mm_unpacklo_epi16 ( qS , qS ) , 0 ) ; <nl> - qS = _mm_cmpeq_epi16 ( _minS , qS ) ; <nl> - int idx = _mm_movemask_epi8 ( _mm_packs_epi16 ( qS , qS ) ) & 255 ; <nl> + v_uint32x4 minBitL , minBitH ; <nl> + v_expand ( minBit , minBitL , minBitH ) ; <nl> <nl> + int idx = v_reduce_sum ( minBitL ) + v_reduce_sum ( minBitH ) ; <nl> bestDisp = bestDispBuf [ LSBTab [ idx ] ] ; <nl> } <nl> else <nl> static void computeDisparitySGBM ( const Mat & img1 , const Mat & img2 , <nl> } <nl> else <nl> { <nl> - # if CV_SSE2 <nl> + # if CV_SIMD128 <nl> if ( useSIMD ) <nl> { <nl> - __m128i _minS = _mm_set1_epi16 ( MAX_COST ) , _bestDisp = _mm_set1_epi16 ( - 1 ) ; <nl> - __m128i _d8 = _mm_setr_epi16 ( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ) , _8 = _mm_set1_epi16 ( 8 ) ; <nl> - <nl> - for ( d = 0 ; d < D ; d + = 8 ) <nl> - { <nl> - __m128i L0 = _mm_load_si128 ( ( const __m128i * ) ( Sp + d ) ) ; <nl> - __m128i mask = _mm_cmplt_epi16 ( L0 , _minS ) ; <nl> - _minS = _mm_min_epi16 ( L0 , _minS ) ; <nl> - _bestDisp = _mm_xor_si128 ( _bestDisp , _mm_and_si128 ( _mm_xor_si128 ( _bestDisp , _d8 ) , mask ) ) ; <nl> - _d8 = _mm_adds_epi16 ( _d8 , _8 ) ; <nl> - } <nl> - short CV_DECL_ALIGNED ( 16 ) bestDispBuf [ 8 ] ; <nl> - _mm_store_si128 ( ( __m128i * ) bestDispBuf , _bestDisp ) ; <nl> - short CV_DECL_ALIGNED ( 16 ) minSBuf [ 8 ] ; <nl> - _mm_store_si128 ( ( __m128i * ) minSBuf , _minS ) ; <nl> - <nl> - for ( int i = 0 ; i < 8 ; i + + ) <nl> - { <nl> - int Sval = minSBuf [ i ] ; <nl> - if ( Sval < = minS ) <nl> - { <nl> - if ( ( Sval < minS ) | | ( bestDispBuf [ i ] < bestDisp ) ) <nl> - { <nl> - bestDisp = bestDispBuf [ i ] ; <nl> - } <nl> - minS = Sval ; <nl> - } <nl> - } <nl> + v_int16x8 _minS = v_setall_s16 ( MAX_COST ) , _bestDisp = v_setall_s16 ( - 1 ) ; <nl> + v_int16x8 _d8 = v_int16x8 ( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ) , _8 = v_setall_s16 ( 8 ) ; <nl> + <nl> + for ( d = 0 ; d < D ; d + = 8 ) <nl> + { <nl> + v_int16x8 L0 = v_load ( Sp + d ) ; <nl> + v_int16x8 mask = L0 < _minS ; <nl> + _minS = v_min ( L0 , _minS ) ; <nl> + _bestDisp = _bestDisp ^ ( ( _bestDisp ^ _d8 ) & mask ) ; <nl> + _d8 = _d8 + _8 ; <nl> + } <nl> + v_int32x4 _d0 , _d1 ; <nl> + v_expand ( _minS , _d0 , _d1 ) ; <nl> + minS = ( int ) std : : min ( v_reduce_min ( _d0 ) , v_reduce_min ( _d1 ) ) ; <nl> + v_int16x8 v_mask = v_setall_s16 ( ( short ) minS ) = = _minS ; <nl> + <nl> + _bestDisp = ( _bestDisp & v_mask ) | ( v_setall_s16 ( SHRT_MAX ) & ~ v_mask ) ; <nl> + v_expand ( _bestDisp , _d0 , _d1 ) ; <nl> + bestDisp = ( int ) std : : min ( v_reduce_min ( _d0 ) , v_reduce_min ( _d1 ) ) ; <nl> } <nl> else <nl> # endif <nl> { <nl> - for ( d = 0 ; d < D ; d + + ) <nl> - { <nl> - int Sval = Sp [ d ] ; <nl> - if ( Sval < minS ) <nl> - { <nl> - minS = Sval ; <nl> - bestDisp = d ; <nl> - } <nl> - } <nl> + for ( d = 0 ; d < D ; d + + ) <nl> + { <nl> + int Sval = Sp [ d ] ; <nl> + if ( Sval < minS ) <nl> + { <nl> + minS = Sval ; <nl> + bestDisp = d ; <nl> + } <nl> + } <nl> } <nl> } <nl> <nl> struct SGBM3WayMainLoop : public ParallelLoopBody <nl> int costBufSize , hsumBufNRows ; <nl> int TAB_OFS , ftzero ; <nl> <nl> + # if CV_SIMD128 <nl> + bool useSIMD ; <nl> + # endif <nl> + <nl> PixType * clipTab ; <nl> <nl> SGBM3WayMainLoop ( Mat * _buffers , const Mat & _img1 , const Mat & _img2 , Mat * _dst_disp , const StereoSGBMParams & params , PixType * _clipTab , int _nstripes , int _stripe_overlap ) ; <nl> buffers ( _buffers ) , img1 ( & _img1 ) , img2 ( & _img2 ) , dst_disp ( _dst_disp ) , clipTab ( _cli <nl> hsumBufNRows = SH2 * 2 + 2 ; <nl> TAB_OFS = 256 * 4 ; <nl> ftzero = std : : max ( params . preFilterCap , 15 ) | 1 ; <nl> + <nl> + # if CV_SIMD128 <nl> + useSIMD = checkHardwareSupport ( CV_CPU_SSE2 ) | | checkHardwareSupport ( CV_CPU_NEON ) ; <nl> + # endif <nl> } <nl> <nl> void getBufferPointers ( Mat & buffer , int width , int width1 , int D , int num_ch , int SH2 , int P2 , <nl> void SGBM3WayMainLoop : : getRawMatchingCost ( CostType * C , / / target cost - volume row <nl> const CostType * pixSub = pixDiff + std : : max ( x - ( SW2 + 1 ) * D , 0 ) ; <nl> <nl> # if CV_SIMD128 <nl> - v_int16x8 hv_reg ; <nl> - for ( d = 0 ; d < D ; d + = 8 ) <nl> + if ( useSIMD ) <nl> { <nl> - hv_reg = v_load_aligned ( hsumAdd + x - D + d ) + ( v_load_aligned ( pixAdd + d ) - v_load_aligned ( pixSub + d ) ) ; <nl> - v_store_aligned ( hsumAdd + x + d , hv_reg ) ; <nl> - v_store_aligned ( C + x + d , v_load_aligned ( C + x + d ) + ( hv_reg - v_load_aligned ( hsumSub + x + d ) ) ) ; <nl> + v_int16x8 hv_reg ; <nl> + for ( d = 0 ; d < D ; d + = 8 ) <nl> + { <nl> + hv_reg = v_load_aligned ( hsumAdd + x - D + d ) + ( v_load_aligned ( pixAdd + d ) - v_load_aligned ( pixSub + d ) ) ; <nl> + v_store_aligned ( hsumAdd + x + d , hv_reg ) ; <nl> + v_store_aligned ( C + x + d , v_load_aligned ( C + x + d ) + ( hv_reg - v_load_aligned ( hsumSub + x + d ) ) ) ; <nl> + } <nl> } <nl> - # else <nl> - for ( d = 0 ; d < D ; d + + ) <nl> + else <nl> + # endif <nl> { <nl> - int hv = hsumAdd [ x + d ] = ( CostType ) ( hsumAdd [ x - D + d ] + pixAdd [ d ] - pixSub [ d ] ) ; <nl> - C [ x + d ] = ( CostType ) ( C [ x + d ] + hv - hsumSub [ x + d ] ) ; <nl> + for ( d = 0 ; d < D ; d + + ) <nl> + { <nl> + int hv = hsumAdd [ x + d ] = ( CostType ) ( hsumAdd [ x - D + d ] + pixAdd [ d ] - pixSub [ d ] ) ; <nl> + C [ x + d ] = ( CostType ) ( C [ x + d ] + hv - hsumSub [ x + d ] ) ; <nl> + } <nl> } <nl> - # endif <nl> } <nl> } <nl> else <nl> void SGBM3WayMainLoop : : getRawMatchingCost ( CostType * C , / / target cost - volume row <nl> <nl> # if CV_SIMD128 <nl> / / define some additional reduce operations : <nl> - inline short min ( const v_int16x8 & a ) <nl> + inline short min_pos ( const v_int16x8 & val , const v_int16x8 & pos , const short min_val ) <nl> { <nl> - short CV_DECL_ALIGNED ( 16 ) buf [ 8 ] ; <nl> - v_store_aligned ( buf , a ) ; <nl> - short s0 = std : : min ( buf [ 0 ] , buf [ 1 ] ) ; <nl> - short s1 = std : : min ( buf [ 2 ] , buf [ 3 ] ) ; <nl> - short s2 = std : : min ( buf [ 4 ] , buf [ 5 ] ) ; <nl> - short s3 = std : : min ( buf [ 6 ] , buf [ 7 ] ) ; <nl> - return std : : min ( std : : min ( s0 , s1 ) , std : : min ( s2 , s3 ) ) ; <nl> - } <nl> + v_int16x8 v_min = v_setall_s16 ( min_val ) ; <nl> + v_int16x8 v_mask = v_min = = val ; <nl> + v_int16x8 v_pos = ( pos & v_mask ) | ( v_setall_s16 ( SHRT_MAX ) & ~ v_mask ) ; <nl> <nl> - inline short min_pos ( const v_int16x8 & val , const v_int16x8 & pos ) <nl> - { <nl> - short CV_DECL_ALIGNED ( 16 ) val_buf [ 8 ] ; <nl> - v_store_aligned ( val_buf , val ) ; <nl> - short CV_DECL_ALIGNED ( 16 ) pos_buf [ 8 ] ; <nl> - v_store_aligned ( pos_buf , pos ) ; <nl> - short res_pos = 0 ; <nl> - short min_val = SHRT_MAX ; <nl> - if ( val_buf [ 0 ] < min_val ) { min_val = val_buf [ 0 ] ; res_pos = pos_buf [ 0 ] ; } <nl> - if ( val_buf [ 1 ] < min_val ) { min_val = val_buf [ 1 ] ; res_pos = pos_buf [ 1 ] ; } <nl> - if ( val_buf [ 2 ] < min_val ) { min_val = val_buf [ 2 ] ; res_pos = pos_buf [ 2 ] ; } <nl> - if ( val_buf [ 3 ] < min_val ) { min_val = val_buf [ 3 ] ; res_pos = pos_buf [ 3 ] ; } <nl> - if ( val_buf [ 4 ] < min_val ) { min_val = val_buf [ 4 ] ; res_pos = pos_buf [ 4 ] ; } <nl> - if ( val_buf [ 5 ] < min_val ) { min_val = val_buf [ 5 ] ; res_pos = pos_buf [ 5 ] ; } <nl> - if ( val_buf [ 6 ] < min_val ) { min_val = val_buf [ 6 ] ; res_pos = pos_buf [ 6 ] ; } <nl> - if ( val_buf [ 7 ] < min_val ) { min_val = val_buf [ 7 ] ; res_pos = pos_buf [ 7 ] ; } <nl> - return res_pos ; <nl> + return v_reduce_min ( v_pos ) ; <nl> } <nl> # endif <nl> <nl> inline void accumulateCostsLeftTop ( CostType * leftBuf , CostType * leftBuf_prev , Co <nl> CostType & leftMinCost , CostType & topMinCost , int D , int P1 , int P2 ) <nl> { <nl> # if CV_SIMD128 <nl> - v_int16x8 P1_reg = v_setall_s16 ( cv : : saturate_cast < CostType > ( P1 ) ) ; <nl> + if ( checkHardwareSupport ( CV_CPU_SSE2 ) | | checkHardwareSupport ( CV_CPU_NEON ) ) <nl> + { <nl> + v_int16x8 P1_reg = v_setall_s16 ( cv : : saturate_cast < CostType > ( P1 ) ) ; <nl> <nl> - v_int16x8 leftMinCostP2_reg = v_setall_s16 ( cv : : saturate_cast < CostType > ( leftMinCost + P2 ) ) ; <nl> - v_int16x8 leftMinCost_new_reg = v_setall_s16 ( SHRT_MAX ) ; <nl> - v_int16x8 src0_leftBuf = v_setall_s16 ( SHRT_MAX ) ; <nl> - v_int16x8 src1_leftBuf = v_load_aligned ( leftBuf_prev ) ; <nl> + v_int16x8 leftMinCostP2_reg = v_setall_s16 ( cv : : saturate_cast < CostType > ( leftMinCost + P2 ) ) ; <nl> + v_int16x8 leftMinCost_new_reg = v_setall_s16 ( SHRT_MAX ) ; <nl> + v_int16x8 src0_leftBuf = v_setall_s16 ( SHRT_MAX ) ; <nl> + v_int16x8 src1_leftBuf = v_load_aligned ( leftBuf_prev ) ; <nl> <nl> - v_int16x8 topMinCostP2_reg = v_setall_s16 ( cv : : saturate_cast < CostType > ( topMinCost + P2 ) ) ; <nl> - v_int16x8 topMinCost_new_reg = v_setall_s16 ( SHRT_MAX ) ; <nl> - v_int16x8 src0_topBuf = v_setall_s16 ( SHRT_MAX ) ; <nl> - v_int16x8 src1_topBuf = v_load_aligned ( topBuf ) ; <nl> + v_int16x8 topMinCostP2_reg = v_setall_s16 ( cv : : saturate_cast < CostType > ( topMinCost + P2 ) ) ; <nl> + v_int16x8 topMinCost_new_reg = v_setall_s16 ( SHRT_MAX ) ; <nl> + v_int16x8 src0_topBuf = v_setall_s16 ( SHRT_MAX ) ; <nl> + v_int16x8 src1_topBuf = v_load_aligned ( topBuf ) ; <nl> <nl> - v_int16x8 src2 ; <nl> - v_int16x8 src_shifted_left , src_shifted_right ; <nl> - v_int16x8 res ; <nl> + v_int16x8 src2 ; <nl> + v_int16x8 src_shifted_left , src_shifted_right ; <nl> + v_int16x8 res ; <nl> <nl> - for ( int i = 0 ; i < D - 8 ; i + = 8 ) <nl> - { <nl> - / / process leftBuf : <nl> - / / lookahead load : <nl> - src2 = v_load_aligned ( leftBuf_prev + i + 8 ) ; <nl> + for ( int i = 0 ; i < D - 8 ; i + = 8 ) <nl> + { <nl> + / / process leftBuf : <nl> + / / lookahead load : <nl> + src2 = v_load_aligned ( leftBuf_prev + i + 8 ) ; <nl> + <nl> + / / get shifted versions of the current block and add P1 : <nl> + src_shifted_left = v_extract < 7 > ( src0_leftBuf , src1_leftBuf ) + P1_reg ; <nl> + src_shifted_right = v_extract < 1 > ( src1_leftBuf , src2 ) + P1_reg ; <nl> + <nl> + / / process and save current block : <nl> + res = v_load_aligned ( costs + i ) + ( v_min ( v_min ( src_shifted_left , src_shifted_right ) , v_min ( src1_leftBuf , leftMinCostP2_reg ) ) - leftMinCostP2_reg ) ; <nl> + leftMinCost_new_reg = v_min ( leftMinCost_new_reg , res ) ; <nl> + v_store_aligned ( leftBuf + i , res ) ; <nl> + <nl> + / / update src buffers : <nl> + src0_leftBuf = src1_leftBuf ; <nl> + src1_leftBuf = src2 ; <nl> + <nl> + / / process topBuf : <nl> + / / lookahead load : <nl> + src2 = v_load_aligned ( topBuf + i + 8 ) ; <nl> + <nl> + / / get shifted versions of the current block and add P1 : <nl> + src_shifted_left = v_extract < 7 > ( src0_topBuf , src1_topBuf ) + P1_reg ; <nl> + src_shifted_right = v_extract < 1 > ( src1_topBuf , src2 ) + P1_reg ; <nl> + <nl> + / / process and save current block : <nl> + res = v_load_aligned ( costs + i ) + ( v_min ( v_min ( src_shifted_left , src_shifted_right ) , v_min ( src1_topBuf , topMinCostP2_reg ) ) - topMinCostP2_reg ) ; <nl> + topMinCost_new_reg = v_min ( topMinCost_new_reg , res ) ; <nl> + v_store_aligned ( topBuf + i , res ) ; <nl> + <nl> + / / update src buffers : <nl> + src0_topBuf = src1_topBuf ; <nl> + src1_topBuf = src2 ; <nl> + } <nl> <nl> - / / get shifted versions of the current block and add P1 : <nl> + / / a bit different processing for the last cycle of the loop : <nl> + / / process leftBuf : <nl> + src2 = v_setall_s16 ( SHRT_MAX ) ; <nl> src_shifted_left = v_extract < 7 > ( src0_leftBuf , src1_leftBuf ) + P1_reg ; <nl> src_shifted_right = v_extract < 1 > ( src1_leftBuf , src2 ) + P1_reg ; <nl> <nl> - / / process and save current block : <nl> - res = v_load_aligned ( costs + i ) + ( v_min ( v_min ( src_shifted_left , src_shifted_right ) , v_min ( src1_leftBuf , leftMinCostP2_reg ) ) - leftMinCostP2_reg ) ; <nl> - leftMinCost_new_reg = v_min ( leftMinCost_new_reg , res ) ; <nl> - v_store_aligned ( leftBuf + i , res ) ; <nl> - <nl> - / / update src buffers : <nl> - src0_leftBuf = src1_leftBuf ; <nl> - src1_leftBuf = src2 ; <nl> + res = v_load_aligned ( costs + D - 8 ) + ( v_min ( v_min ( src_shifted_left , src_shifted_right ) , v_min ( src1_leftBuf , leftMinCostP2_reg ) ) - leftMinCostP2_reg ) ; <nl> + leftMinCost = v_reduce_min ( v_min ( leftMinCost_new_reg , res ) ) ; <nl> + v_store_aligned ( leftBuf + D - 8 , res ) ; <nl> <nl> / / process topBuf : <nl> - / / lookahead load : <nl> - src2 = v_load_aligned ( topBuf + i + 8 ) ; <nl> - <nl> - / / get shifted versions of the current block and add P1 : <nl> + src2 = v_setall_s16 ( SHRT_MAX ) ; <nl> src_shifted_left = v_extract < 7 > ( src0_topBuf , src1_topBuf ) + P1_reg ; <nl> src_shifted_right = v_extract < 1 > ( src1_topBuf , src2 ) + P1_reg ; <nl> <nl> - / / process and save current block : <nl> - res = v_load_aligned ( costs + i ) + ( v_min ( v_min ( src_shifted_left , src_shifted_right ) , v_min ( src1_topBuf , topMinCostP2_reg ) ) - topMinCostP2_reg ) ; <nl> - topMinCost_new_reg = v_min ( topMinCost_new_reg , res ) ; <nl> - v_store_aligned ( topBuf + i , res ) ; <nl> - <nl> - / / update src buffers : <nl> - src0_topBuf = src1_topBuf ; <nl> - src1_topBuf = src2 ; <nl> + res = v_load_aligned ( costs + D - 8 ) + ( v_min ( v_min ( src_shifted_left , src_shifted_right ) , v_min ( src1_topBuf , topMinCostP2_reg ) ) - topMinCostP2_reg ) ; <nl> + topMinCost = v_reduce_min ( v_min ( topMinCost_new_reg , res ) ) ; <nl> + v_store_aligned ( topBuf + D - 8 , res ) ; <nl> } <nl> - <nl> - / / a bit different processing for the last cycle of the loop : <nl> - / / process leftBuf : <nl> - src2 = v_setall_s16 ( SHRT_MAX ) ; <nl> - src_shifted_left = v_extract < 7 > ( src0_leftBuf , src1_leftBuf ) + P1_reg ; <nl> - src_shifted_right = v_extract < 1 > ( src1_leftBuf , src2 ) + P1_reg ; <nl> - <nl> - res = v_load_aligned ( costs + D - 8 ) + ( v_min ( v_min ( src_shifted_left , src_shifted_right ) , v_min ( src1_leftBuf , leftMinCostP2_reg ) ) - leftMinCostP2_reg ) ; <nl> - leftMinCost = min ( v_min ( leftMinCost_new_reg , res ) ) ; <nl> - v_store_aligned ( leftBuf + D - 8 , res ) ; <nl> - <nl> - / / process topBuf : <nl> - src2 = v_setall_s16 ( SHRT_MAX ) ; <nl> - src_shifted_left = v_extract < 7 > ( src0_topBuf , src1_topBuf ) + P1_reg ; <nl> - src_shifted_right = v_extract < 1 > ( src1_topBuf , src2 ) + P1_reg ; <nl> - <nl> - res = v_load_aligned ( costs + D - 8 ) + ( v_min ( v_min ( src_shifted_left , src_shifted_right ) , v_min ( src1_topBuf , topMinCostP2_reg ) ) - topMinCostP2_reg ) ; <nl> - topMinCost = min ( v_min ( topMinCost_new_reg , res ) ) ; <nl> - v_store_aligned ( topBuf + D - 8 , res ) ; <nl> - # else <nl> - CostType leftMinCost_new = SHRT_MAX ; <nl> - CostType topMinCost_new = SHRT_MAX ; <nl> - int leftMinCost_P2 = leftMinCost + P2 ; <nl> - int topMinCost_P2 = topMinCost + P2 ; <nl> - CostType leftBuf_prev_i_minus_1 = SHRT_MAX ; <nl> - CostType topBuf_i_minus_1 = SHRT_MAX ; <nl> - CostType tmp ; <nl> - <nl> - for ( int i = 0 ; i < D - 1 ; i + + ) <nl> + else <nl> + # endif <nl> { <nl> - leftBuf [ i ] = cv : : saturate_cast < CostType > ( costs [ i ] + std : : min ( std : : min ( leftBuf_prev_i_minus_1 + P1 , leftBuf_prev [ i + 1 ] + P1 ) , std : : min ( ( int ) leftBuf_prev [ i ] , leftMinCost_P2 ) ) - leftMinCost_P2 ) ; <nl> - leftBuf_prev_i_minus_1 = leftBuf_prev [ i ] ; <nl> - leftMinCost_new = std : : min ( leftMinCost_new , leftBuf [ i ] ) ; <nl> - <nl> - tmp = topBuf [ i ] ; <nl> - topBuf [ i ] = cv : : saturate_cast < CostType > ( costs [ i ] + std : : min ( std : : min ( topBuf_i_minus_1 + P1 , topBuf [ i + 1 ] + P1 ) , std : : min ( ( int ) topBuf [ i ] , topMinCost_P2 ) ) - topMinCost_P2 ) ; <nl> - topBuf_i_minus_1 = tmp ; <nl> - topMinCost_new = std : : min ( topMinCost_new , topBuf [ i ] ) ; <nl> - } <nl> + CostType leftMinCost_new = SHRT_MAX ; <nl> + CostType topMinCost_new = SHRT_MAX ; <nl> + int leftMinCost_P2 = leftMinCost + P2 ; <nl> + int topMinCost_P2 = topMinCost + P2 ; <nl> + CostType leftBuf_prev_i_minus_1 = SHRT_MAX ; <nl> + CostType topBuf_i_minus_1 = SHRT_MAX ; <nl> + CostType tmp ; <nl> + <nl> + for ( int i = 0 ; i < D - 1 ; i + + ) <nl> + { <nl> + leftBuf [ i ] = cv : : saturate_cast < CostType > ( costs [ i ] + std : : min ( std : : min ( leftBuf_prev_i_minus_1 + P1 , leftBuf_prev [ i + 1 ] + P1 ) , std : : min ( ( int ) leftBuf_prev [ i ] , leftMinCost_P2 ) ) - leftMinCost_P2 ) ; <nl> + leftBuf_prev_i_minus_1 = leftBuf_prev [ i ] ; <nl> + leftMinCost_new = std : : min ( leftMinCost_new , leftBuf [ i ] ) ; <nl> + <nl> + tmp = topBuf [ i ] ; <nl> + topBuf [ i ] = cv : : saturate_cast < CostType > ( costs [ i ] + std : : min ( std : : min ( topBuf_i_minus_1 + P1 , topBuf [ i + 1 ] + P1 ) , std : : min ( ( int ) topBuf [ i ] , topMinCost_P2 ) ) - topMinCost_P2 ) ; <nl> + topBuf_i_minus_1 = tmp ; <nl> + topMinCost_new = std : : min ( topMinCost_new , topBuf [ i ] ) ; <nl> + } <nl> <nl> - leftBuf [ D - 1 ] = cv : : saturate_cast < CostType > ( costs [ D - 1 ] + std : : min ( leftBuf_prev_i_minus_1 + P1 , std : : min ( ( int ) leftBuf_prev [ D - 1 ] , leftMinCost_P2 ) ) - leftMinCost_P2 ) ; <nl> - leftMinCost = std : : min ( leftMinCost_new , leftBuf [ D - 1 ] ) ; <nl> + leftBuf [ D - 1 ] = cv : : saturate_cast < CostType > ( costs [ D - 1 ] + std : : min ( leftBuf_prev_i_minus_1 + P1 , std : : min ( ( int ) leftBuf_prev [ D - 1 ] , leftMinCost_P2 ) ) - leftMinCost_P2 ) ; <nl> + leftMinCost = std : : min ( leftMinCost_new , leftBuf [ D - 1 ] ) ; <nl> <nl> - topBuf [ D - 1 ] = cv : : saturate_cast < CostType > ( costs [ D - 1 ] + std : : min ( topBuf_i_minus_1 + P1 , std : : min ( ( int ) topBuf [ D - 1 ] , topMinCost_P2 ) ) - topMinCost_P2 ) ; <nl> - topMinCost = std : : min ( topMinCost_new , topBuf [ D - 1 ] ) ; <nl> - # endif <nl> + topBuf [ D - 1 ] = cv : : saturate_cast < CostType > ( costs [ D - 1 ] + std : : min ( topBuf_i_minus_1 + P1 , std : : min ( ( int ) topBuf [ D - 1 ] , topMinCost_P2 ) ) - topMinCost_P2 ) ; <nl> + topMinCost = std : : min ( topMinCost_new , topBuf [ D - 1 ] ) ; <nl> + } <nl> } <nl> <nl> / / performing in - place SGM cost accumulation from right to left ( the result is stored in rightBuf ) and <nl> inline void accumulateCostsRight ( CostType * rightBuf , CostType * topBuf , CostType * <nl> CostType & rightMinCost , int D , int P1 , int P2 , int & optimal_disp , CostType & min_cost ) <nl> { <nl> # if CV_SIMD128 <nl> - v_int16x8 P1_reg = v_setall_s16 ( cv : : saturate_cast < CostType > ( P1 ) ) ; <nl> + if ( checkHardwareSupport ( CV_CPU_SSE2 ) | | checkHardwareSupport ( CV_CPU_NEON ) ) <nl> + { <nl> + v_int16x8 P1_reg = v_setall_s16 ( cv : : saturate_cast < CostType > ( P1 ) ) ; <nl> <nl> - v_int16x8 rightMinCostP2_reg = v_setall_s16 ( cv : : saturate_cast < CostType > ( rightMinCost + P2 ) ) ; <nl> - v_int16x8 rightMinCost_new_reg = v_setall_s16 ( SHRT_MAX ) ; <nl> - v_int16x8 src0_rightBuf = v_setall_s16 ( SHRT_MAX ) ; <nl> - v_int16x8 src1_rightBuf = v_load ( rightBuf ) ; <nl> + v_int16x8 rightMinCostP2_reg = v_setall_s16 ( cv : : saturate_cast < CostType > ( rightMinCost + P2 ) ) ; <nl> + v_int16x8 rightMinCost_new_reg = v_setall_s16 ( SHRT_MAX ) ; <nl> + v_int16x8 src0_rightBuf = v_setall_s16 ( SHRT_MAX ) ; <nl> + v_int16x8 src1_rightBuf = v_load ( rightBuf ) ; <nl> <nl> - v_int16x8 src2 ; <nl> - v_int16x8 src_shifted_left , src_shifted_right ; <nl> - v_int16x8 res ; <nl> + v_int16x8 src2 ; <nl> + v_int16x8 src_shifted_left , src_shifted_right ; <nl> + v_int16x8 res ; <nl> <nl> - v_int16x8 min_sum_cost_reg = v_setall_s16 ( SHRT_MAX ) ; <nl> - v_int16x8 min_sum_pos_reg = v_setall_s16 ( 0 ) ; <nl> - v_int16x8 loop_idx ( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ) ; <nl> - v_int16x8 eight_reg = v_setall_s16 ( 8 ) ; <nl> + v_int16x8 min_sum_cost_reg = v_setall_s16 ( SHRT_MAX ) ; <nl> + v_int16x8 min_sum_pos_reg = v_setall_s16 ( 0 ) ; <nl> + v_int16x8 loop_idx ( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ) ; <nl> + v_int16x8 eight_reg = v_setall_s16 ( 8 ) ; <nl> <nl> - for ( int i = 0 ; i < D - 8 ; i + = 8 ) <nl> - { <nl> - / / lookahead load : <nl> - src2 = v_load_aligned ( rightBuf + i + 8 ) ; <nl> + for ( int i = 0 ; i < D - 8 ; i + = 8 ) <nl> + { <nl> + / / lookahead load : <nl> + src2 = v_load_aligned ( rightBuf + i + 8 ) ; <nl> + <nl> + / / get shifted versions of the current block and add P1 : <nl> + src_shifted_left = v_extract < 7 > ( src0_rightBuf , src1_rightBuf ) + P1_reg ; <nl> + src_shifted_right = v_extract < 1 > ( src1_rightBuf , src2 ) + P1_reg ; <nl> + <nl> + / / process and save current block : <nl> + res = v_load_aligned ( costs + i ) + ( v_min ( v_min ( src_shifted_left , src_shifted_right ) , v_min ( src1_rightBuf , rightMinCostP2_reg ) ) - rightMinCostP2_reg ) ; <nl> + rightMinCost_new_reg = v_min ( rightMinCost_new_reg , res ) ; <nl> + v_store_aligned ( rightBuf + i , res ) ; <nl> + <nl> + / / compute and save total cost : <nl> + res = res + v_load_aligned ( leftBuf + i ) + v_load_aligned ( topBuf + i ) ; <nl> + v_store_aligned ( leftBuf + i , res ) ; <nl> + <nl> + / / track disparity value with the minimum cost : <nl> + min_sum_cost_reg = v_min ( min_sum_cost_reg , res ) ; <nl> + min_sum_pos_reg = min_sum_pos_reg + ( ( min_sum_cost_reg = = res ) & ( loop_idx - min_sum_pos_reg ) ) ; <nl> + loop_idx = loop_idx + eight_reg ; <nl> + <nl> + / / update src : <nl> + src0_rightBuf = src1_rightBuf ; <nl> + src1_rightBuf = src2 ; <nl> + } <nl> <nl> - / / get shifted versions of the current block and add P1 : <nl> + / / a bit different processing for the last cycle of the loop : <nl> + src2 = v_setall_s16 ( SHRT_MAX ) ; <nl> src_shifted_left = v_extract < 7 > ( src0_rightBuf , src1_rightBuf ) + P1_reg ; <nl> src_shifted_right = v_extract < 1 > ( src1_rightBuf , src2 ) + P1_reg ; <nl> <nl> - / / process and save current block : <nl> - res = v_load_aligned ( costs + i ) + ( v_min ( v_min ( src_shifted_left , src_shifted_right ) , v_min ( src1_rightBuf , rightMinCostP2_reg ) ) - rightMinCostP2_reg ) ; <nl> - rightMinCost_new_reg = v_min ( rightMinCost_new_reg , res ) ; <nl> - v_store_aligned ( rightBuf + i , res ) ; <nl> + res = v_load_aligned ( costs + D - 8 ) + ( v_min ( v_min ( src_shifted_left , src_shifted_right ) , v_min ( src1_rightBuf , rightMinCostP2_reg ) ) - rightMinCostP2_reg ) ; <nl> + rightMinCost = v_reduce_min ( v_min ( rightMinCost_new_reg , res ) ) ; <nl> + v_store_aligned ( rightBuf + D - 8 , res ) ; <nl> <nl> - / / compute and save total cost : <nl> - res = res + v_load_aligned ( leftBuf + i ) + v_load_aligned ( topBuf + i ) ; <nl> - v_store_aligned ( leftBuf + i , res ) ; <nl> + res = res + v_load_aligned ( leftBuf + D - 8 ) + v_load_aligned ( topBuf + D - 8 ) ; <nl> + v_store_aligned ( leftBuf + D - 8 , res ) ; <nl> <nl> - / / track disparity value with the minimum cost : <nl> min_sum_cost_reg = v_min ( min_sum_cost_reg , res ) ; <nl> + min_cost = v_reduce_min ( min_sum_cost_reg ) ; <nl> min_sum_pos_reg = min_sum_pos_reg + ( ( min_sum_cost_reg = = res ) & ( loop_idx - min_sum_pos_reg ) ) ; <nl> - loop_idx = loop_idx + eight_reg ; <nl> - <nl> - / / update src : <nl> - src0_rightBuf = src1_rightBuf ; <nl> - src1_rightBuf = src2 ; <nl> + optimal_disp = min_pos ( min_sum_cost_reg , min_sum_pos_reg , min_cost ) ; <nl> } <nl> - <nl> - / / a bit different processing for the last cycle of the loop : <nl> - src2 = v_setall_s16 ( SHRT_MAX ) ; <nl> - src_shifted_left = v_extract < 7 > ( src0_rightBuf , src1_rightBuf ) + P1_reg ; <nl> - src_shifted_right = v_extract < 1 > ( src1_rightBuf , src2 ) + P1_reg ; <nl> - <nl> - res = v_load_aligned ( costs + D - 8 ) + ( v_min ( v_min ( src_shifted_left , src_shifted_right ) , v_min ( src1_rightBuf , rightMinCostP2_reg ) ) - rightMinCostP2_reg ) ; <nl> - rightMinCost = min ( v_min ( rightMinCost_new_reg , res ) ) ; <nl> - v_store_aligned ( rightBuf + D - 8 , res ) ; <nl> - <nl> - res = res + v_load_aligned ( leftBuf + D - 8 ) + v_load_aligned ( topBuf + D - 8 ) ; <nl> - v_store_aligned ( leftBuf + D - 8 , res ) ; <nl> - <nl> - min_sum_cost_reg = v_min ( min_sum_cost_reg , res ) ; <nl> - min_cost = min ( min_sum_cost_reg ) ; <nl> - min_sum_pos_reg = min_sum_pos_reg + ( ( min_sum_cost_reg = = res ) & ( loop_idx - min_sum_pos_reg ) ) ; <nl> - optimal_disp = min_pos ( min_sum_cost_reg , min_sum_pos_reg ) ; <nl> - # else <nl> - CostType rightMinCost_new = SHRT_MAX ; <nl> - int rightMinCost_P2 = rightMinCost + P2 ; <nl> - CostType rightBuf_i_minus_1 = SHRT_MAX ; <nl> - CostType tmp ; <nl> - min_cost = SHRT_MAX ; <nl> - <nl> - for ( int i = 0 ; i < D - 1 ; i + + ) <nl> + else <nl> + # endif <nl> { <nl> - tmp = rightBuf [ i ] ; <nl> - rightBuf [ i ] = cv : : saturate_cast < CostType > ( costs [ i ] + std : : min ( std : : min ( rightBuf_i_minus_1 + P1 , rightBuf [ i + 1 ] + P1 ) , std : : min ( ( int ) rightBuf [ i ] , rightMinCost_P2 ) ) - rightMinCost_P2 ) ; <nl> - rightBuf_i_minus_1 = tmp ; <nl> - rightMinCost_new = std : : min ( rightMinCost_new , rightBuf [ i ] ) ; <nl> - leftBuf [ i ] = cv : : saturate_cast < CostType > ( ( int ) leftBuf [ i ] + rightBuf [ i ] + topBuf [ i ] ) ; <nl> - if ( leftBuf [ i ] < min_cost ) <nl> + CostType rightMinCost_new = SHRT_MAX ; <nl> + int rightMinCost_P2 = rightMinCost + P2 ; <nl> + CostType rightBuf_i_minus_1 = SHRT_MAX ; <nl> + CostType tmp ; <nl> + min_cost = SHRT_MAX ; <nl> + <nl> + for ( int i = 0 ; i < D - 1 ; i + + ) <nl> { <nl> - optimal_disp = i ; <nl> - min_cost = leftBuf [ i ] ; <nl> + tmp = rightBuf [ i ] ; <nl> + rightBuf [ i ] = cv : : saturate_cast < CostType > ( costs [ i ] + std : : min ( std : : min ( rightBuf_i_minus_1 + P1 , rightBuf [ i + 1 ] + P1 ) , std : : min ( ( int ) rightBuf [ i ] , rightMinCost_P2 ) ) - rightMinCost_P2 ) ; <nl> + rightBuf_i_minus_1 = tmp ; <nl> + rightMinCost_new = std : : min ( rightMinCost_new , rightBuf [ i ] ) ; <nl> + leftBuf [ i ] = cv : : saturate_cast < CostType > ( ( int ) leftBuf [ i ] + rightBuf [ i ] + topBuf [ i ] ) ; <nl> + if ( leftBuf [ i ] < min_cost ) <nl> + { <nl> + optimal_disp = i ; <nl> + min_cost = leftBuf [ i ] ; <nl> + } <nl> } <nl> - } <nl> <nl> - rightBuf [ D - 1 ] = cv : : saturate_cast < CostType > ( costs [ D - 1 ] + std : : min ( rightBuf_i_minus_1 + P1 , std : : min ( ( int ) rightBuf [ D - 1 ] , rightMinCost_P2 ) ) - rightMinCost_P2 ) ; <nl> - rightMinCost = std : : min ( rightMinCost_new , rightBuf [ D - 1 ] ) ; <nl> - leftBuf [ D - 1 ] = cv : : saturate_cast < CostType > ( ( int ) leftBuf [ D - 1 ] + rightBuf [ D - 1 ] + topBuf [ D - 1 ] ) ; <nl> - if ( leftBuf [ D - 1 ] < min_cost ) <nl> - { <nl> - optimal_disp = D - 1 ; <nl> - min_cost = leftBuf [ D - 1 ] ; <nl> + rightBuf [ D - 1 ] = cv : : saturate_cast < CostType > ( costs [ D - 1 ] + std : : min ( rightBuf_i_minus_1 + P1 , std : : min ( ( int ) rightBuf [ D - 1 ] , rightMinCost_P2 ) ) - rightMinCost_P2 ) ; <nl> + rightMinCost = std : : min ( rightMinCost_new , rightBuf [ D - 1 ] ) ; <nl> + leftBuf [ D - 1 ] = cv : : saturate_cast < CostType > ( ( int ) leftBuf [ D - 1 ] + rightBuf [ D - 1 ] + topBuf [ D - 1 ] ) ; <nl> + if ( leftBuf [ D - 1 ] < min_cost ) <nl> + { <nl> + optimal_disp = D - 1 ; <nl> + min_cost = leftBuf [ D - 1 ] ; <nl> + } <nl> } <nl> - # endif <nl> } <nl> <nl> void SGBM3WayMainLoop : : operator ( ) ( const Range & range ) const <nl> void SGBM3WayMainLoop : : operator ( ) ( const Range & range ) const <nl> if ( uniquenessRatio > 0 ) <nl> { <nl> # if CV_SIMD128 <nl> - horPassCostVolume + = x ; <nl> - int thresh = ( 100 * min_cost ) / ( 100 - uniquenessRatio ) ; <nl> - v_int16x8 thresh_reg = v_setall_s16 ( ( short ) ( thresh + 1 ) ) ; <nl> - v_int16x8 d1 = v_setall_s16 ( ( short ) ( best_d - 1 ) ) ; <nl> - v_int16x8 d2 = v_setall_s16 ( ( short ) ( best_d + 1 ) ) ; <nl> - v_int16x8 eight_reg = v_setall_s16 ( 8 ) ; <nl> - v_int16x8 cur_d ( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ) ; <nl> - v_int16x8 mask , cost1 , cost2 ; <nl> - <nl> - for ( d = 0 ; d < D ; d + = 16 ) <nl> + if ( useSIMD ) <nl> { <nl> - cost1 = v_load_aligned ( horPassCostVolume + d ) ; <nl> - cost2 = v_load_aligned ( horPassCostVolume + d + 8 ) ; <nl> + horPassCostVolume + = x ; <nl> + int thresh = ( 100 * min_cost ) / ( 100 - uniquenessRatio ) ; <nl> + v_int16x8 thresh_reg = v_setall_s16 ( ( short ) ( thresh + 1 ) ) ; <nl> + v_int16x8 d1 = v_setall_s16 ( ( short ) ( best_d - 1 ) ) ; <nl> + v_int16x8 d2 = v_setall_s16 ( ( short ) ( best_d + 1 ) ) ; <nl> + v_int16x8 eight_reg = v_setall_s16 ( 8 ) ; <nl> + v_int16x8 cur_d ( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ) ; <nl> + v_int16x8 mask , cost1 , cost2 ; <nl> + <nl> + for ( d = 0 ; d < D ; d + = 16 ) <nl> + { <nl> + cost1 = v_load_aligned ( horPassCostVolume + d ) ; <nl> + cost2 = v_load_aligned ( horPassCostVolume + d + 8 ) ; <nl> <nl> - mask = cost1 < thresh_reg ; <nl> - mask = mask & ( ( cur_d < d1 ) | ( cur_d > d2 ) ) ; <nl> - if ( v_signmask ( mask ) ) <nl> - break ; <nl> + mask = cost1 < thresh_reg ; <nl> + mask = mask & ( ( cur_d < d1 ) | ( cur_d > d2 ) ) ; <nl> + if ( v_signmask ( mask ) ) <nl> + break ; <nl> <nl> - cur_d = cur_d + eight_reg ; <nl> + cur_d = cur_d + eight_reg ; <nl> <nl> - mask = cost2 < thresh_reg ; <nl> - mask = mask & ( ( cur_d < d1 ) | ( cur_d > d2 ) ) ; <nl> - if ( v_signmask ( mask ) ) <nl> - break ; <nl> + mask = cost2 < thresh_reg ; <nl> + mask = mask & ( ( cur_d < d1 ) | ( cur_d > d2 ) ) ; <nl> + if ( v_signmask ( mask ) ) <nl> + break ; <nl> <nl> - cur_d = cur_d + eight_reg ; <nl> + cur_d = cur_d + eight_reg ; <nl> + } <nl> + horPassCostVolume - = x ; <nl> } <nl> - horPassCostVolume - = x ; <nl> - # else <nl> - for ( d = 0 ; d < D ; d + + ) <nl> + else <nl> + # endif <nl> { <nl> - if ( horPassCostVolume [ x + d ] * ( 100 - uniquenessRatio ) < min_cost * 100 & & std : : abs ( d - best_d ) > 1 ) <nl> - break ; <nl> + for ( d = 0 ; d < D ; d + + ) <nl> + { <nl> + if ( horPassCostVolume [ x + d ] * ( 100 - uniquenessRatio ) < min_cost * 100 & & std : : abs ( d - best_d ) > 1 ) <nl> + break ; <nl> + } <nl> } <nl> - # endif <nl> if ( d < D ) <nl> continue ; <nl> } <nl> mmm a / modules / core / include / opencv2 / core / hal / intrin_neon . hpp <nl> ppp b / modules / core / include / opencv2 / core / hal / intrin_neon . hpp <nl> inline void v_store_f16 ( short * ptr , v_float16x4 & a ) <nl> { vst1_f16 ( ptr , a . val ) ; } <nl> # endif <nl> <nl> - # define OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( _Tpvec , scalartype , func , scalar_func ) \ <nl> + # define OPENCV_HAL_IMPL_NEON_REDUCE_OP_8 ( _Tpvec , _Tpnvec , scalartype , func , vectorfunc , suffix ) \ <nl> inline scalartype v_reduce_ # # func ( const _Tpvec & a ) \ <nl> { \ <nl> - scalartype CV_DECL_ALIGNED ( 16 ) buf [ 4 ] ; \ <nl> - v_store_aligned ( buf , a ) ; \ <nl> - scalartype s0 = scalar_func ( buf [ 0 ] , buf [ 1 ] ) ; \ <nl> - scalartype s1 = scalar_func ( buf [ 2 ] , buf [ 3 ] ) ; \ <nl> - return scalar_func ( s0 , s1 ) ; \ <nl> + _Tpnvec # # _t a0 = vp # # vectorfunc # # _ # # suffix ( vget_low_ # # suffix ( a . val ) , vget_high_ # # suffix ( a . val ) ) ; \ <nl> + a0 = vp # # vectorfunc # # _ # # suffix ( a0 , a0 ) ; \ <nl> + return ( scalartype ) vget_lane_ # # suffix ( vp # # vectorfunc # # _ # # suffix ( a0 , a0 ) , 0 ) ; \ <nl> } <nl> <nl> - OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_uint32x4 , unsigned , sum , OPENCV_HAL_ADD ) <nl> - OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_uint32x4 , unsigned , max , std : : max ) <nl> - OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_uint32x4 , unsigned , min , std : : min ) <nl> - OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_int32x4 , int , sum , OPENCV_HAL_ADD ) <nl> - OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_int32x4 , int , max , std : : max ) <nl> - OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_int32x4 , int , min , std : : min ) <nl> - OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_float32x4 , float , sum , OPENCV_HAL_ADD ) <nl> - OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_float32x4 , float , max , std : : max ) <nl> - OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_float32x4 , float , min , std : : min ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_8 ( v_uint16x8 , uint16x4 , unsigned short , sum , add , u16 ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_8 ( v_uint16x8 , uint16x4 , unsigned short , max , max , u16 ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_8 ( v_uint16x8 , uint16x4 , unsigned short , min , min , u16 ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_8 ( v_int16x8 , int16x4 , short , sum , add , s16 ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_8 ( v_int16x8 , int16x4 , short , max , max , s16 ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_8 ( v_int16x8 , int16x4 , short , min , min , s16 ) <nl> + <nl> + # define OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( _Tpvec , _Tpnvec , scalartype , func , vectorfunc , suffix ) \ <nl> + inline scalartype v_reduce_ # # func ( const _Tpvec & a ) \ <nl> + { \ <nl> + _Tpnvec # # _t a0 = vp # # vectorfunc # # _ # # suffix ( vget_low_ # # suffix ( a . val ) , vget_high_ # # suffix ( a . val ) ) ; \ <nl> + return ( scalartype ) vget_lane_ # # suffix ( vp # # vectorfunc # # _ # # suffix ( a0 , vget_high_ # # suffix ( a . val ) ) , 0 ) ; \ <nl> + } <nl> + <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_uint32x4 , uint32x2 , unsigned , sum , add , u32 ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_uint32x4 , uint32x2 , unsigned , max , max , u32 ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_uint32x4 , uint32x2 , unsigned , min , min , u32 ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_int32x4 , int32x2 , int , sum , add , s32 ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_int32x4 , int32x2 , int , max , max , s32 ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_int32x4 , int32x2 , int , min , min , s32 ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_float32x4 , float32x2 , float , sum , add , f32 ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_float32x4 , float32x2 , float , max , max , f32 ) <nl> + OPENCV_HAL_IMPL_NEON_REDUCE_OP_4 ( v_float32x4 , float32x2 , float , min , min , f32 ) <nl> <nl> inline int v_signmask ( const v_uint8x16 & a ) <nl> { <nl> mmm a / modules / core / include / opencv2 / core / hal / intrin_sse . hpp <nl> ppp b / modules / core / include / opencv2 / core / hal / intrin_sse . hpp <nl> inline void v_store_f16 ( short * ptr , v_float16x4 & a ) <nl> { _mm_storel_epi64 ( ( __m128i * ) ptr , a . val ) ; } <nl> # endif <nl> <nl> + # define OPENCV_HAL_IMPL_SSE_REDUCE_OP_8 ( _Tpvec , scalartype , func , suffix , sbit ) \ <nl> + inline scalartype v_reduce_ # # func ( const v_ # # _Tpvec & a ) \ <nl> + { \ <nl> + __m128i val = a . val ; \ <nl> + val = _mm_ # # func # # _ # # suffix ( val , _mm_srli_si128 ( val , 8 ) ) ; \ <nl> + val = _mm_ # # func # # _ # # suffix ( val , _mm_srli_si128 ( val , 4 ) ) ; \ <nl> + val = _mm_ # # func # # _ # # suffix ( val , _mm_srli_si128 ( val , 2 ) ) ; \ <nl> + return ( scalartype ) _mm_cvtsi128_si32 ( val ) ; \ <nl> + } \ <nl> + inline unsigned scalartype v_reduce_ # # func ( const v_u # # _Tpvec & a ) \ <nl> + { \ <nl> + __m128i val = a . val ; \ <nl> + __m128i smask = _mm_set1_epi16 ( sbit ) ; \ <nl> + val = _mm_xor_si128 ( val , smask ) ; \ <nl> + val = _mm_ # # func # # _ # # suffix ( val , _mm_srli_si128 ( val , 8 ) ) ; \ <nl> + val = _mm_ # # func # # _ # # suffix ( val , _mm_srli_si128 ( val , 4 ) ) ; \ <nl> + val = _mm_ # # func # # _ # # suffix ( val , _mm_srli_si128 ( val , 2 ) ) ; \ <nl> + return ( unsigned scalartype ) ( _mm_cvtsi128_si32 ( val ) ^ sbit ) ; \ <nl> + } <nl> + # define OPENCV_HAL_IMPL_SSE_REDUCE_OP_8_SUM ( _Tpvec , scalartype , suffix ) \ <nl> + inline scalartype v_reduce_sum ( const v_ # # _Tpvec & a ) \ <nl> + { \ <nl> + __m128i val = a . val ; \ <nl> + val = _mm_adds_epi # # suffix ( val , _mm_srli_si128 ( val , 8 ) ) ; \ <nl> + val = _mm_adds_epi # # suffix ( val , _mm_srli_si128 ( val , 4 ) ) ; \ <nl> + val = _mm_adds_epi # # suffix ( val , _mm_srli_si128 ( val , 2 ) ) ; \ <nl> + return ( scalartype ) _mm_cvtsi128_si32 ( val ) ; \ <nl> + } \ <nl> + inline unsigned scalartype v_reduce_sum ( const v_u # # _Tpvec & a ) \ <nl> + { \ <nl> + __m128i val = a . val ; \ <nl> + val = _mm_adds_epu # # suffix ( val , _mm_srli_si128 ( val , 8 ) ) ; \ <nl> + val = _mm_adds_epu # # suffix ( val , _mm_srli_si128 ( val , 4 ) ) ; \ <nl> + val = _mm_adds_epu # # suffix ( val , _mm_srli_si128 ( val , 2 ) ) ; \ <nl> + return ( unsigned scalartype ) _mm_cvtsi128_si32 ( val ) ; \ <nl> + } <nl> + OPENCV_HAL_IMPL_SSE_REDUCE_OP_8 ( int16x8 , short , max , epi16 , ( short ) - 32768 ) <nl> + OPENCV_HAL_IMPL_SSE_REDUCE_OP_8 ( int16x8 , short , min , epi16 , ( short ) - 32768 ) <nl> + OPENCV_HAL_IMPL_SSE_REDUCE_OP_8_SUM ( int16x8 , short , 16 ) <nl> + <nl> # define OPENCV_HAL_IMPL_SSE_REDUCE_OP_4 ( _Tpvec , scalartype , func , scalar_func ) \ <nl> inline scalartype v_reduce_ # # func ( const _Tpvec & a ) \ <nl> { \ <nl> mmm a / modules / core / test / test_intrin . cpp <nl> ppp b / modules / core / test / test_intrin . cpp <nl> template < typename R > struct TheTest <nl> R a = dataA ; <nl> EXPECT_EQ ( ( LaneType ) 1 , v_reduce_min ( a ) ) ; <nl> EXPECT_EQ ( ( LaneType ) R : : nlanes , v_reduce_max ( a ) ) ; <nl> - EXPECT_EQ ( ( LaneType ) ( 1 + R : : nlanes ) * 2 , v_reduce_sum ( a ) ) ; <nl> + EXPECT_EQ ( ( LaneType ) ( ( 1 + R : : nlanes ) * R : : nlanes / 2 ) , v_reduce_sum ( a ) ) ; <nl> return * this ; <nl> } <nl> <nl> TEST ( hal_intrin , uint16x8 ) { <nl> . test_logic ( ) <nl> . test_min_max ( ) <nl> . test_absdiff ( ) <nl> + . test_reduce ( ) <nl> . test_mask ( ) <nl> . test_pack < 1 > ( ) . test_pack < 2 > ( ) . test_pack < 7 > ( ) . test_pack < 16 > ( ) <nl> . test_pack_u < 1 > ( ) . test_pack_u < 2 > ( ) . test_pack_u < 7 > ( ) . test_pack_u < 16 > ( ) <nl> TEST ( hal_intrin , int16x8 ) { <nl> . test_min_max ( ) <nl> . test_absdiff ( ) <nl> . test_abs ( ) <nl> + . test_reduce ( ) <nl> . test_mask ( ) <nl> . test_pack < 1 > ( ) . test_pack < 2 > ( ) . test_pack < 7 > ( ) . test_pack < 16 > ( ) <nl> . test_unpack ( ) <nl>
|
Merge pull request from tomoaki0705 : featureUniversalStereoSgbm
|
opencv/opencv
|
ecb8fb964d2dfe9ade00edc11d4119f997360bab
|
2016-10-28T15:34:11Z
|
mmm a / Marlin / src / gcode / control / M17_M18_M84 . cpp <nl> ppp b / Marlin / src / gcode / control / M17_M18_M84 . cpp <nl> void GcodeSuite : : M18_M84 ( ) { <nl> stepper_inactive_time = parser . value_millis_from_seconds ( ) ; <nl> } <nl> else { <nl> - bool all_axis = ! ( ( parser . seen ( ' X ' ) ) | | ( parser . seen ( ' Y ' ) ) | | ( parser . seen ( ' Z ' ) ) | | ( parser . seen ( ' E ' ) ) ) ; <nl> + bool all_axis = ! ( parser . seen ( ' X ' ) | | parser . seen ( ' Y ' ) | | parser . seen ( ' Z ' ) | | parser . seen ( ' E ' ) ) ; <nl> if ( all_axis ) { <nl> stepper . finish_and_disable ( ) ; <nl> } <nl>
|
Update M17_M18_M84 . cpp
|
MarlinFirmware/Marlin
|
3dd04736c03936a1d6de6032452c763a8fec6c7a
|
2017-12-31T06:26:45Z
|
mmm a / xbmc / AutoSwitch . cpp <nl> ppp b / xbmc / AutoSwitch . cpp <nl> <nl> # define METHOD_BYFILECOUNT 3 <nl> # define METHOD_BYFOLDERTHUMBS 4 <nl> <nl> - CAutoSwitch : : CAutoSwitch ( void ) <nl> - { } <nl> + CAutoSwitch : : CAutoSwitch ( void ) = default ; <nl> <nl> - CAutoSwitch : : ~ CAutoSwitch ( void ) <nl> - { } <nl> + CAutoSwitch : : ~ CAutoSwitch ( void ) = default ; <nl> <nl> / / / \ brief Generic function to add a layer of transparency to the calling window <nl> / / / \ param vecItems Vector of FileItems passed from the calling window <nl> mmm a / xbmc / Autorun . cpp <nl> ppp b / xbmc / Autorun . cpp <nl> CAutorun : : CAutorun ( ) <nl> m_bEnable = true ; <nl> } <nl> <nl> - CAutorun : : ~ CAutorun ( ) <nl> - { } <nl> + CAutorun : : ~ CAutorun ( ) = default ; <nl> <nl> void CAutorun : : ExecuteAutorun ( const std : : string & path , bool bypassSettings , bool ignoreplaying , bool startFromBeginning ) <nl> { <nl> mmm a / xbmc / BackgroundInfoLoader . h <nl> ppp b / xbmc / BackgroundInfoLoader . h <nl> class CFileItemList ; <nl> class IBackgroundLoaderObserver <nl> { <nl> public : <nl> - virtual ~ IBackgroundLoaderObserver ( ) { } <nl> + virtual ~ IBackgroundLoaderObserver ( ) = default ; <nl> virtual void OnItemLoaded ( CFileItem * pItem ) = 0 ; <nl> } ; <nl> <nl> mmm a / xbmc / CueDocument . cpp <nl> ppp b / xbmc / CueDocument . cpp <nl> class CueReader <nl> public : <nl> virtual bool ready ( ) const = 0 ; <nl> virtual bool ReadLine ( std : : string & line ) = 0 ; <nl> - virtual ~ CueReader ( ) { } <nl> + virtual ~ CueReader ( ) = default ; <nl> private : <nl> std : : string m_sourcePath ; <nl> } ; <nl> CCueDocument : : CCueDocument ( ) <nl> { <nl> } <nl> <nl> - CCueDocument : : ~ CCueDocument ( ) <nl> - { } <nl> + CCueDocument : : ~ CCueDocument ( ) = default ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / Function : ParseFile ( ) <nl> mmm a / xbmc / DatabaseManager . cpp <nl> ppp b / xbmc / DatabaseManager . cpp <nl> CDatabaseManager : : CDatabaseManager ( ) : m_bIsUpgrading ( false ) <nl> { <nl> } <nl> <nl> - CDatabaseManager : : ~ CDatabaseManager ( ) <nl> - { <nl> - } <nl> + CDatabaseManager : : ~ CDatabaseManager ( ) = default ; <nl> <nl> void CDatabaseManager : : Initialize ( bool addonsOnly ) <nl> { <nl> mmm a / xbmc / DbUrl . cpp <nl> ppp b / xbmc / DbUrl . cpp <nl> CDbUrl : : CDbUrl ( ) <nl> Reset ( ) ; <nl> } <nl> <nl> - CDbUrl : : ~ CDbUrl ( ) <nl> - { } <nl> + CDbUrl : : ~ CDbUrl ( ) = default ; <nl> <nl> void CDbUrl : : Reset ( ) <nl> { <nl> mmm a / xbmc / GUIInfoManager . cpp <nl> ppp b / xbmc / GUIInfoManager . cpp <nl> class CSetCurrentItemJob : public CJob <nl> CFileItemPtr m_itemCurrentFile ; <nl> public : <nl> CSetCurrentItemJob ( const CFileItemPtr item ) : m_itemCurrentFile ( item ) { } <nl> - ~ CSetCurrentItemJob ( void ) override { } <nl> + ~ CSetCurrentItemJob ( void ) override = default ; <nl> <nl> bool DoWork ( void ) override <nl> { <nl> mmm a / xbmc / GUILargeTextureManager . cpp <nl> ppp b / xbmc / GUILargeTextureManager . cpp <nl> void CGUILargeTextureManager : : CLargeTexture : : SetTexture ( CBaseTexture * texture ) <nl> m_texture . Set ( texture , texture - > GetWidth ( ) , texture - > GetHeight ( ) ) ; <nl> } <nl> <nl> - CGUILargeTextureManager : : CGUILargeTextureManager ( ) <nl> - { <nl> - } <nl> + CGUILargeTextureManager : : CGUILargeTextureManager ( ) = default ; <nl> <nl> - CGUILargeTextureManager : : ~ CGUILargeTextureManager ( ) <nl> - { <nl> - } <nl> + CGUILargeTextureManager : : ~ CGUILargeTextureManager ( ) = default ; <nl> <nl> void CGUILargeTextureManager : : CleanupUnusedImages ( bool immediately ) <nl> { <nl> mmm a / xbmc / GUIPassword . cpp <nl> ppp b / xbmc / GUIPassword . cpp <nl> CGUIPassword : : CGUIPassword ( void ) <nl> iMasterLockRetriesLeft = - 1 ; <nl> bMasterUser = false ; <nl> } <nl> - CGUIPassword : : ~ CGUIPassword ( void ) <nl> - { } <nl> + CGUIPassword : : ~ CGUIPassword ( void ) = default ; <nl> <nl> bool CGUIPassword : : IsItemUnlocked ( CFileItem * pItem , const std : : string & strType ) <nl> { <nl> mmm a / xbmc / IFileItemListModifier . h <nl> ppp b / xbmc / IFileItemListModifier . h <nl> class CFileItemList ; <nl> class IFileItemListModifier <nl> { <nl> public : <nl> - IFileItemListModifier ( ) { } <nl> - virtual ~ IFileItemListModifier ( ) { } <nl> + IFileItemListModifier ( ) = default ; <nl> + virtual ~ IFileItemListModifier ( ) = default ; <nl> <nl> virtual bool CanModify ( const CFileItemList & items ) const = 0 ; <nl> virtual bool Modify ( CFileItemList & items ) const = 0 ; <nl> mmm a / xbmc / IProgressCallback . h <nl> ppp b / xbmc / IProgressCallback . h <nl> <nl> class IProgressCallback <nl> { <nl> public : <nl> - virtual ~ IProgressCallback ( ) { } <nl> + virtual ~ IProgressCallback ( ) = default ; <nl> virtual void SetProgressMax ( int max ) = 0 ; <nl> virtual void SetProgressAdvance ( int nSteps = 1 ) = 0 ; <nl> virtual bool Abort ( ) = 0 ; <nl> mmm a / xbmc / InfoScanner . cpp <nl> ppp b / xbmc / InfoScanner . cpp <nl> <nl> # include " utils / log . h " <nl> # include " utils / URIUtils . h " <nl> <nl> - CInfoScanner : : ~ CInfoScanner ( ) { } <nl> + CInfoScanner : : ~ CInfoScanner ( ) = default ; <nl> <nl> bool CInfoScanner : : HasNoMedia ( const std : : string & strDirectory ) const <nl> { <nl> mmm a / xbmc / LangInfo . cpp <nl> ppp b / xbmc / LangInfo . cpp <nl> CLangInfo : : CRegion : : CRegion ( ) <nl> SetDefaults ( ) ; <nl> } <nl> <nl> - CLangInfo : : CRegion : : ~ CRegion ( ) <nl> - { <nl> - <nl> - } <nl> + CLangInfo : : CRegion : : ~ CRegion ( ) = default ; <nl> <nl> void CLangInfo : : CRegion : : SetDefaults ( ) <nl> { <nl> CLangInfo : : CLangInfo ( ) <nl> m_speedUnit = m_defaultRegion . m_speedUnit ; <nl> } <nl> <nl> - CLangInfo : : ~ CLangInfo ( ) <nl> - { <nl> - } <nl> + CLangInfo : : ~ CLangInfo ( ) = default ; <nl> <nl> void CLangInfo : : OnSettingChanged ( std : : shared_ptr < const CSetting > setting ) <nl> { <nl> mmm a / xbmc / MediaSource . h <nl> ppp b / xbmc / MediaSource . h <nl> class CMediaSource <nl> SOURCE_TYPE_REMOVABLE = 6 <nl> } ; <nl> CMediaSource ( ) { m_iDriveType = SOURCE_TYPE_UNKNOWN ; m_iLockMode = LOCK_MODE_EVERYONE ; m_iBadPwdCount = 0 ; m_iHasLock = 0 ; m_ignore = false ; m_allowSharing = true ; } ; <nl> - virtual ~ CMediaSource ( ) { } ; <nl> + virtual ~ CMediaSource ( ) = default ; <nl> <nl> bool operator = = ( const CMediaSource & right ) const ; <nl> <nl> mmm a / xbmc / PartyModeManager . cpp <nl> ppp b / xbmc / PartyModeManager . cpp <nl> CPartyModeManager : : CPartyModeManager ( void ) <nl> ClearState ( ) ; <nl> } <nl> <nl> - CPartyModeManager : : ~ CPartyModeManager ( void ) <nl> - { <nl> - } <nl> + CPartyModeManager : : ~ CPartyModeManager ( void ) = default ; <nl> <nl> bool CPartyModeManager : : Enable ( PartyModeContext context / * = PARTYMODECONTEXT_MUSIC * / , const std : : string & strXspPath / * = " " * / ) <nl> { <nl> mmm a / xbmc / PasswordManager . h <nl> ppp b / xbmc / PasswordManager . h <nl> class CPasswordManager <nl> CPasswordManager ( ) ; <nl> CPasswordManager ( const CPasswordManager & ) ; <nl> CPasswordManager const & operator = ( CPasswordManager const & ) ; <nl> - ~ CPasswordManager ( ) { } ; <nl> + ~ CPasswordManager ( ) = default ; <nl> <nl> void Load ( ) ; <nl> void Save ( ) const ; <nl> mmm a / xbmc / SectionLoader . cpp <nl> ppp b / xbmc / SectionLoader . cpp <nl> <nl> / / Define this to get loggin on all calls to load / unload sections / dlls <nl> / / # define LOGALL <nl> <nl> - CSectionLoader : : CSectionLoader ( void ) <nl> - { } <nl> + CSectionLoader : : CSectionLoader ( void ) = default ; <nl> <nl> CSectionLoader : : ~ CSectionLoader ( void ) <nl> { <nl> mmm a / xbmc / ServiceManager . cpp <nl> ppp b / xbmc / ServiceManager . cpp <nl> CServiceManager : : CServiceManager ( ) : <nl> { <nl> } <nl> <nl> - CServiceManager : : ~ CServiceManager ( ) <nl> - { <nl> - } <nl> + CServiceManager : : ~ CServiceManager ( ) = default ; <nl> <nl> bool CServiceManager : : Init1 ( ) <nl> { <nl> mmm a / xbmc / TextureCache . cpp <nl> ppp b / xbmc / TextureCache . cpp <nl> CTextureCache : : CTextureCache ( ) : CJobQueue ( false , 1 , CJob : : PRIORITY_LOW_PAUSABLE <nl> { <nl> } <nl> <nl> - CTextureCache : : ~ CTextureCache ( ) <nl> - { <nl> - } <nl> + CTextureCache : : ~ CTextureCache ( ) = default ; <nl> <nl> void CTextureCache : : Initialize ( ) <nl> { <nl> mmm a / xbmc / TextureCacheJob . cpp <nl> ppp b / xbmc / TextureCacheJob . cpp <nl> CTextureCacheJob : : CTextureCacheJob ( const std : : string & url , const std : : string & ol <nl> { <nl> } <nl> <nl> - CTextureCacheJob : : ~ CTextureCacheJob ( ) <nl> - { <nl> - } <nl> + CTextureCacheJob : : ~ CTextureCacheJob ( ) = default ; <nl> <nl> bool CTextureCacheJob : : operator = = ( const CJob * job ) const <nl> { <nl> mmm a / xbmc / TextureDatabase . cpp <nl> ppp b / xbmc / TextureDatabase . cpp <nl> std : : string CTextureUtils : : UnwrapImageURL ( const std : : string & image ) <nl> return image ; <nl> } <nl> <nl> - CTextureDatabase : : CTextureDatabase ( ) <nl> - { <nl> - } <nl> + CTextureDatabase : : CTextureDatabase ( ) = default ; <nl> <nl> - CTextureDatabase : : ~ CTextureDatabase ( ) <nl> - { <nl> - } <nl> + CTextureDatabase : : ~ CTextureDatabase ( ) = default ; <nl> <nl> bool CTextureDatabase : : Open ( ) <nl> { <nl> mmm a / xbmc / TextureDatabase . h <nl> ppp b / xbmc / TextureDatabase . h <nl> class CVariant ; <nl> class CTextureRule : public CDatabaseQueryRule <nl> { <nl> public : <nl> - CTextureRule ( ) { } ; <nl> - ~ CTextureRule ( ) override { } ; <nl> + CTextureRule ( ) = default ; <nl> + ~ CTextureRule ( ) override = default ; <nl> <nl> static void GetAvailableFields ( std : : vector < std : : string > & fieldList ) ; <nl> protected : <nl> mmm a / xbmc / ThumbLoader . cpp <nl> ppp b / xbmc / ThumbLoader . cpp <nl> void CThumbLoader : : SetCachedImage ( const CFileItem & item , const std : : string & type <nl> } <nl> } <nl> <nl> - CProgramThumbLoader : : CProgramThumbLoader ( ) <nl> - { <nl> - } <nl> + CProgramThumbLoader : : CProgramThumbLoader ( ) = default ; <nl> <nl> - CProgramThumbLoader : : ~ CProgramThumbLoader ( ) <nl> - { <nl> - } <nl> + CProgramThumbLoader : : ~ CProgramThumbLoader ( ) = default ; <nl> <nl> bool CProgramThumbLoader : : LoadItem ( CFileItem * pItem ) <nl> { <nl> mmm a / xbmc / ThumbnailCache . cpp <nl> ppp b / xbmc / ThumbnailCache . cpp <nl> CThumbnailCache * CThumbnailCache : : m_pCacheInstance = NULL ; <nl> <nl> CCriticalSection CThumbnailCache : : m_cs ; <nl> <nl> - CThumbnailCache : : ~ CThumbnailCache ( ) <nl> - { } <nl> + CThumbnailCache : : ~ CThumbnailCache ( ) = default ; <nl> <nl> - CThumbnailCache : : CThumbnailCache ( ) <nl> - { <nl> - } <nl> + CThumbnailCache : : CThumbnailCache ( ) = default ; <nl> <nl> CThumbnailCache * CThumbnailCache : : GetThumbnailCache ( ) <nl> { <nl> mmm a / xbmc / URL . cpp <nl> ppp b / xbmc / URL . cpp <nl> <nl> <nl> using namespace ADDON ; <nl> <nl> - CURL : : ~ CURL ( ) <nl> - { <nl> - } <nl> + CURL : : ~ CURL ( ) = default ; <nl> <nl> void CURL : : Reset ( ) <nl> { <nl> mmm a / xbmc / Util . cpp <nl> ppp b / xbmc / Util . cpp <nl> std : : string GetHomePath ( const std : : string & strTarget , std : : string strPath ) <nl> } <nl> # endif <nl> } <nl> - CUtil : : CUtil ( void ) <nl> - { <nl> - } <nl> + CUtil : : CUtil ( void ) = default ; <nl> <nl> - CUtil : : ~ CUtil ( void ) <nl> - { } <nl> + CUtil : : ~ CUtil ( void ) = default ; <nl> <nl> std : : string CUtil : : GetTitleFromPath ( const std : : string & strFileNameAndPath , bool bIsFolder / * = false * / ) <nl> { <nl> mmm a / xbmc / XBApplicationEx . cpp <nl> ppp b / xbmc / XBApplicationEx . cpp <nl> CXBApplicationEx : : CXBApplicationEx ( ) <nl> m_renderGUI = false ; <nl> } <nl> <nl> - CXBApplicationEx : : ~ CXBApplicationEx ( ) <nl> - { <nl> - } <nl> + CXBApplicationEx : : ~ CXBApplicationEx ( ) = default ; <nl> <nl> / * Destroy the app * / <nl> VOID CXBApplicationEx : : Destroy ( ) <nl> mmm a / xbmc / XBDateTime . h <nl> ppp b / xbmc / XBDateTime . h <nl> class CDateTime : public IArchivable <nl> CDateTime ( const time_t & time ) ; <nl> CDateTime ( const tm & time ) ; <nl> CDateTime ( int year , int month , int day , int hour , int minute , int second ) ; <nl> - ~ CDateTime ( ) override { } <nl> + ~ CDateTime ( ) override = default ; <nl> <nl> static CDateTime GetCurrentDateTime ( ) ; <nl> static CDateTime GetUTCDateTime ( ) ; <nl>
|
[ modernize ] [ xbmc ] Prefer ' default ' for declarations
|
xbmc/xbmc
|
6298432b00b9b7f0a0129ede91200670f3fb27fa
|
2017-07-06T10:13:21Z
|
mmm a / src / compiler / expression / simple_function_call . cpp <nl> ppp b / src / compiler / expression / simple_function_call . cpp <nl> TypePtr SimpleFunctionCall : : inferAndCheck ( AnalysisResultPtr ar , TypePtr type , <nl> ( clsThis - > getName ( ) ! = m_className & & <nl> ! clsThis - > derivesFrom ( ar , m_className , true , false ) ) | | <nl> funcThis - > isStatic ( ) ) { <nl> - / / set the method static to avoid " unknown method " runtime exception <nl> - if ( Option : : StaticMethodAutoFix & & ! func - > containsThis ( ) ) { <nl> - func - > setStaticMethodAutoFixed ( ) ; <nl> - } else { <nl> - func - > setDynamic ( ) ; <nl> - } <nl> + func - > setDynamic ( ) ; <nl> if ( ar - > isFirstPass ( ) ) { <nl> ar - > getCodeError ( ) - > record ( self , CodeError : : MissingObjectContext , <nl> self ) ; <nl> mmm a / src / runtime / base / frame_injection . cpp <nl> ppp b / src / runtime / base / frame_injection . cpp <nl> Object & FrameInjection : : getThisForArrow ( ) { <nl> String FrameInjection : : GetStaticClassName ( ThreadInfo * info ) { <nl> if ( ! info ) info = ThreadInfo : : s_threadInfo . get ( ) ; <nl> for ( FrameInjection * t = info - > m_top ; t ; t = t - > m_prev ) { <nl> + if ( t = = info - > m_top & & ! t - > m_object . isNull ( ) & & t - > m_object - > o_getId ( ) ) { <nl> + return t - > m_object - > o_getClassName ( ) ; <nl> + } <nl> if ( ! t - > m_staticClass . empty ( ) ) { <nl> return t - > m_staticClass ; <nl> } <nl> - if ( ! t - > m_object . isNull ( ) ) { <nl> - return t - > m_object - > o_getClassName ( ) ; <nl> - } <nl> if ( t ! = info - > m_top & & ! t - > m_callingObject . isNull ( ) ) { <nl> return t - > m_callingObject - > o_getClassName ( ) ; <nl> } <nl> mmm a / src / runtime / base / object_data . cpp <nl> ppp b / src / runtime / base / object_data . cpp <nl> ObjectData : : ~ ObjectData ( ) { <nl> o_properties - > release ( ) ; <nl> } <nl> int * pmax = os_max_id . get ( ) ; <nl> - if ( o_id = = * pmax ) { <nl> + if ( o_id & & o_id = = * pmax ) { <nl> - - ( * pmax ) ; <nl> } <nl> } <nl> Variant ObjectData : : os_invoke ( const char * c , const char * s , <nl> CArrRef params , int64 hash , <nl> bool fatal / * = true * / ) { <nl> Object obj = create_object ( c , Array : : Create ( ) , false ) ; <nl> - obj . get ( ) - > o_id = 0 ; / / for isset ( $ this ) to tell whether this is a fake obj <nl> + int * pmax = os_max_id . get ( ) ; <nl> + int & id = obj . get ( ) - > o_id ; <nl> + if ( id = = * pmax ) - - ( * pmax ) ; <nl> + id = 0 ; / / for isset ( $ this ) to tell whether this is a fake obj <nl> return obj - > o_invoke ( s , params , hash , fatal ) ; <nl> } <nl> <nl> mmm a / src / test / test_code_run . cpp <nl> ppp b / src / test / test_code_run . cpp <nl> bool TestCodeRun : : TestLateStaticBinding ( ) { <nl> <nl> MVCRO ( " < ? php \ n " <nl> " class X { \ n " <nl> - " static function foo ( ) { echo \ " X : : foo \ n \ " ; } \ n " <nl> - " function bar ( ) { $ this : : foo ( ) ; } \ n " <nl> + " static function foo ( ) { echo \ " X : : foo \ \ n \ " ; } \ n " <nl> + " function bar ( ) { static : : foo ( ) ; } \ n " <nl> " } \ n " <nl> " class Y extends X { \ n " <nl> - " static function foo ( ) { echo \ " Y : : foo \ n \ " ; } \ n " <nl> + " static function foo ( ) { echo \ " Y : : foo \ \ n \ " ; } \ n " <nl> " function baz ( ) { X : : bar ( ) ; } \ n " <nl> " } \ n " <nl> " $ y = new Y ; \ n " <nl> - " $ y - > baz ( ) ; \ n " , <nl> + " $ y - > baz ( ) ; \ n " <nl> + " Y : : baz ( ) ; \ n " , <nl> <nl> - " Y : : foo \ n " <nl> - ) ; <nl> + " Y : : foo \ nX : : foo \ n " ) ; <nl> <nl> return true ; <nl> } <nl>
|
removing StaticMethodAutoFix to fix a late static binding bug
|
facebook/hhvm
|
91f38b4d541b8777ce88a086104d1112fa686c24
|
2010-07-07T00:01:16Z
|
mmm a / include / rapidjson / error / en . h <nl> ppp b / include / rapidjson / error / en . h <nl> inline const RAPIDJSON_ERROR_CHARTYPE * GetParseError_En ( ParseErrorCode parseErro <nl> case kParseErrorNumberMissFraction : return RAPIDJSON_ERROR_STRING ( " Miss fraction part in number . " ) ; <nl> case kParseErrorNumberMissExponent : return RAPIDJSON_ERROR_STRING ( " Miss exponent in number . " ) ; <nl> <nl> - case kParseErrorUnspecificSyntaxError : return RAPIDJSON_ERROR_STRING ( " Unspecific syntax error . " ) ; <nl> + case kParseErrorUnspecificSyntaxError : return RAPIDJSON_ERROR_STRING ( " Unspecific syntax error . " ) ; <nl> <nl> default : <nl> return RAPIDJSON_ERROR_STRING ( " Unknown error . " ) ; <nl> mmm a / include / rapidjson / reader . h <nl> ppp b / include / rapidjson / reader . h <nl> enum ParseErrorCode { <nl> kParseErrorNumberMissFraction , / / ! < Miss fraction part in number . <nl> kParseErrorNumberMissExponent , / / ! < Miss exponent in number . <nl> <nl> - kParseErrorUnspecificSyntaxError / / ! < General syntax error . <nl> + kParseErrorUnspecificSyntaxError / / ! < Unspecific syntax error . <nl> } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / test / unittest / readertest . cpp <nl> ppp b / test / unittest / readertest . cpp <nl> template < typename Encoding = UTF8 < > > <nl> struct IterativeParsingReaderHandler { <nl> typedef typename Encoding : : Ch Ch ; <nl> <nl> - IterativeParsingReaderHandler ( ) : <nl> - IsNullTriggered ( false ) , <nl> - IsBoolTriggered ( false ) , <nl> - IsIntTriggered ( false ) , <nl> - IsUintTriggered ( false ) , <nl> - IsInt64Triggered ( false ) , <nl> - IsUint64Triggered ( false ) , <nl> - IsDoubleTriggered ( false ) , <nl> - IsStringTriggered ( false ) , <nl> - IsStartObjectTriggered ( false ) , <nl> - IsEndObjectTriggered ( false ) , <nl> - MemberCount ( 0 ) , <nl> - IsStartArrayTriggered ( false ) , <nl> - ElementCount ( 0 ) { <nl> + IterativeParsingReaderHandler ( ) { <nl> + Reset ( ) ; <nl> + } <nl> + <nl> + void Reset ( ) { <nl> + IsNullTriggered = false ; <nl> + IsBoolTriggered = false ; <nl> + IsIntTriggered = false ; <nl> + IsUintTriggered = false ; <nl> + IsInt64Triggered = false ; <nl> + IsUint64Triggered = false ; <nl> + IsDoubleTriggered = false ; <nl> + IsStringTriggered = false ; <nl> + IsStartObjectTriggered = false ; <nl> + IsEndObjectTriggered = false ; <nl> + MemberCount = 0 ; <nl> + IsStartArrayTriggered = false ; <nl> + ElementCount = 0 ; <nl> } <nl> <nl> bool IsNullTriggered ; <nl> TEST ( Reader , IterativeParsing_StateTransition_ObjectInitial ) { <nl> / / ObjectInitial - > ObjectFinish - > Finish <nl> { <nl> ITERATIVE_PARSING_PREPARE_STATE_UNTIL ( " { } " , 1 ) ; <nl> + handler . Reset ( ) ; <nl> <nl> EXPECT_EQ ( Reader : : IterativeParsingObjectInitialState , state ) ; <nl> Reader : : IterativeParsingState d = reader . Transit < kParseIterativeFlag > ( <nl> TEST ( Reader , IterativeParsing_StateTransition_ObjectInitial ) { <nl> / / ObjectInitial - > MemberKey <nl> { <nl> ITERATIVE_PARSING_PREPARE_STATE_UNTIL ( " { \ " key \ " : 1 } " , 1 ) ; <nl> + handler . Reset ( ) ; <nl> <nl> EXPECT_EQ ( Reader : : IterativeParsingObjectInitialState , state ) ; <nl> Reader : : IterativeParsingState d = reader . Transit < kParseIterativeFlag > ( <nl> TEST ( Reader , IterativeParsing_StateTransition_MemberValue ) { <nl> / / MemberValue - > ObjectFinish <nl> { <nl> ITERATIVE_PARSING_PREPARE_STATE_UNTIL ( " { \ " k \ " : 123 } " , 9 ) ; <nl> + handler . Reset ( ) ; <nl> <nl> EXPECT_FALSE ( reader . HasParseError ( ) ) ; <nl> EXPECT_EQ ( Reader : : IterativeParsingMemberValueState , state ) ; <nl> TEST ( Reader , IterativeParsing_StateTransition_MemberValue ) { <nl> / / MemberValue - > MemberDelimiter <nl> { <nl> ITERATIVE_PARSING_PREPARE_STATE_UNTIL ( " { \ " k \ " : 1 , \ " e \ " : 2 } " , 7 ) ; <nl> + handler . Reset ( ) ; <nl> <nl> EXPECT_FALSE ( reader . HasParseError ( ) ) ; <nl> EXPECT_EQ ( Reader : : IterativeParsingMemberValueState , state ) ; <nl> TEST ( Reader , IterativeParsing_StateTransition_MemberValue ) { <nl> TEST ( Reader , IterativeParsing_StateTransition_MemberDelimiter ) { <nl> / / MemberDelimiter - > MemberKey <nl> ITERATIVE_PARSING_PREPARE_STATE_UNTIL ( " { \ " k \ " : 1 , \ " e \ " : 2 } " , 9 ) ; <nl> + handler . Reset ( ) ; <nl> <nl> EXPECT_FALSE ( reader . HasParseError ( ) ) ; <nl> EXPECT_EQ ( Reader : : IterativeParsingMemberDelimiterState , state ) ; <nl> TEST ( Reader , IterativeParsing_StateTransition_ArrayInitial ) { <nl> / / ArrayInitial - > ArrayFinish - > Finish <nl> { <nl> ITERATIVE_PARSING_PREPARE_STATE_UNTIL ( " [ ] " , 1 ) ; <nl> + handler . Reset ( ) ; <nl> <nl> EXPECT_FALSE ( reader . HasParseError ( ) ) ; <nl> EXPECT_EQ ( Reader : : IterativeParsingArrayInitialState , state ) ; <nl> TEST ( Reader , IterativeParsing_StateTransition_ArrayInitial ) { <nl> / / ArrayInitial - > Element <nl> { <nl> ITERATIVE_PARSING_PREPARE_STATE_UNTIL ( " [ 1 ] " , 1 ) ; <nl> + handler . Reset ( ) ; <nl> <nl> EXPECT_FALSE ( reader . HasParseError ( ) ) ; <nl> EXPECT_EQ ( Reader : : IterativeParsingArrayInitialState , state ) ; <nl> TEST ( Reader , IterativeParsing_StateTransition_Element ) { <nl> / / Element - > ArrayFinish - > Finish <nl> { <nl> ITERATIVE_PARSING_PREPARE_STATE_UNTIL ( " [ 1 ] " , 2 ) ; <nl> + handler . Reset ( ) ; <nl> <nl> EXPECT_FALSE ( reader . HasParseError ( ) ) ; <nl> EXPECT_EQ ( Reader : : IterativeParsingElementState , state ) ; <nl> TEST ( Reader , IterativeParsing_StateTransition_Element ) { <nl> / / Element - > ElementDelimiter <nl> { <nl> ITERATIVE_PARSING_PREPARE_STATE_UNTIL ( " [ 1 , 2 ] " , 2 ) ; <nl> + handler . Reset ( ) ; <nl> <nl> EXPECT_FALSE ( reader . HasParseError ( ) ) ; <nl> EXPECT_EQ ( Reader : : IterativeParsingElementState , state ) ; <nl> TEST ( Reader , IterativeParsing_StateTransition_ElementDelimiter ) { <nl> / / ElementDelimiter - > ArrayInitial <nl> { <nl> ITERATIVE_PARSING_PREPARE_STATE_UNTIL ( " [ 1 , [ 1 ] ] " , 4 ) ; <nl> + handler . Reset ( ) ; <nl> <nl> EXPECT_FALSE ( reader . HasParseError ( ) ) ; <nl> EXPECT_EQ ( Reader : : IterativeParsingElementDelimiterState , state ) ; <nl> TEST ( Reader , IterativeParsing_StateTransition_ElementDelimiter ) { <nl> / / ElementDelimiter - > ObjectInitial <nl> { <nl> ITERATIVE_PARSING_PREPARE_STATE_UNTIL ( " [ 1 , [ 1 ] ] " , 4 ) ; <nl> + handler . Reset ( ) ; <nl> <nl> EXPECT_FALSE ( reader . HasParseError ( ) ) ; <nl> EXPECT_EQ ( Reader : : IterativeParsingElementDelimiterState , state ) ; <nl> TEST ( Reader , IterativeParsing_StateTransition_ElementDelimiter ) { <nl> / / ElementDelimiter - > Element <nl> { <nl> ITERATIVE_PARSING_PREPARE_STATE_UNTIL ( " [ 1 , 2 ] " , 4 ) ; <nl> + handler . Reset ( ) ; <nl> <nl> EXPECT_FALSE ( reader . HasParseError ( ) ) ; <nl> EXPECT_EQ ( Reader : : IterativeParsingElementDelimiterState , state ) ; <nl>
|
Revise unittests : reset the handler before the transition which we are going to test .
|
Tencent/rapidjson
|
3038a7855e5a61b28133e2d9b72cecde95f2fb5b
|
2014-07-11T08:03:38Z
|
mmm a / lib / IRGen / GenProto . cpp <nl> ppp b / lib / IRGen / GenProto . cpp <nl> static llvm : : Value * emitWitnessTableAccessorCall ( <nl> <nl> / / Emit the source metadata if we haven ' t yet . <nl> if ( ! * srcMetadataCache ) { <nl> - * srcMetadataCache = IGF . emitTypeMetadataRef ( <nl> + * srcMetadataCache = IGF . emitAbstractTypeMetadataRef ( <nl> conformance - > getType ( ) - > getCanonicalType ( ) ) ; <nl> } <nl> <nl> mmm a / test / IRGen / witness_method . sil <nl> ppp b / test / IRGen / witness_method . sil <nl> struct SyncUp < Deliverable > : Synergy { <nl> <nl> / / CHECK - LABEL : define { { ( dllexport ) ? } } { { ( protected ) ? } } swiftcc void @ testGenericWitnessMethod ( % swift . opaque * noalias nocapture sret , % T14witness_method6SyncUpV * noalias nocapture , % swift . type * % T ) <nl> / / CHECK : entry : <nl> - / / CHECK : [ [ TMP : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s14witness_method6SyncUpVMa " ( [ [ INT ] ] 0 , % swift . type * % T ) <nl> + / / CHECK : [ [ TMP : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s14witness_method6SyncUpVMa " ( [ [ INT ] ] 255 , % swift . type * % T ) <nl> / / CHECK : [ [ METADATA : % . * ] ] = extractvalue % swift . metadata_response [ [ TMP ] ] , 0 <nl> / / CHECK : [ [ WTABLE : % . * ] ] = call i8 * * @ swift_getWitnessTable ( <nl> / / CHECK : [ [ WITNESS_ADDR : % . * ] ] = getelementptr inbounds i8 * , i8 * * [ [ WTABLE ] ] , i32 2 <nl> / / CHECK : [ [ WITNESS_FN : % . * ] ] = load i8 * , i8 * * [ [ WITNESS_ADDR ] ] <nl> / / CHECK : [ [ WITNESS : % . * ] ] = bitcast i8 * [ [ WITNESS_FN ] ] to void ( % swift . opaque * , % swift . opaque * , % swift . type * , i8 * * ) * <nl> + / / CHECK : [ [ TMP : % . * ] ] = call swiftcc % swift . metadata_response @ swift_checkMetadataState ( i64 0 , % swift . type * [ [ METADATA ] ] ) <nl> + / / CHECK : [ [ METADATA : % . * ] ] = extractvalue % swift . metadata_response [ [ TMP ] ] , 0 <nl> / / CHECK : [ [ ARG : % . * ] ] = bitcast % T14witness_method6SyncUpV * % 1 to % swift . opaque * <nl> / / CHECK : call swiftcc void [ [ WITNESS ] ] ( % swift . opaque * noalias nocapture sret % 0 , % swift . opaque * noalias nocapture swiftself [ [ ARG ] ] , % swift . type * [ [ METADATA ] ] , i8 * * [ [ WTABLE ] ] ) <nl> / / CHECK : ret void <nl> mmm a / test / Inputs / conditional_conformance_basic_conformances . swift <nl> ppp b / test / Inputs / conditional_conformance_basic_conformances . swift <nl> public func single_concrete ( ) { <nl> / / CHECK - NEXT : br i1 [ [ IS_NULL ] ] , label % cacheIsNull , label % cont <nl> <nl> / / CHECK : cacheIsNull : <nl> - / / CHECK - NEXT : [ [ T0 : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s42conditional_conformance_basic_conformances6SingleVyAA4IsP2VGMa " ( i64 0 ) <nl> + / / CHECK - NEXT : [ [ T0 : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s42conditional_conformance_basic_conformances6SingleVyAA4IsP2VGMa " ( i64 255 ) <nl> / / CHECK - NEXT : [ [ Single_TYPE : % . * ] ] = extractvalue % swift . metadata_response [ [ T0 ] ] , 0 <nl> <nl> - / / CHECK - NEXT : [ [ CONDITIONAL_REQUIREMENTS : % . * ] ] = getelementptr inbounds [ 1 x i8 * * ] , [ 1 x i8 * * ] * % conditional . requirement . buffer , i32 0 , i32 0 <nl> + / / CHECK : [ [ CONDITIONAL_REQUIREMENTS : % . * ] ] = getelementptr inbounds [ 1 x i8 * * ] , [ 1 x i8 * * ] * % conditional . requirement . buffer , i32 0 , i32 0 <nl> / / CHECK - NEXT : [ [ A_P2_PTR : % . * ] ] = getelementptr inbounds i8 * * , i8 * * * [ [ CONDITIONAL_REQUIREMENTS ] ] , i32 0 <nl> / / CHECK - NEXT : store i8 * * getelementptr inbounds ( [ 1 x i8 * ] , [ 1 x i8 * ] * @ " $ s42conditional_conformance_basic_conformances4IsP2VAA0F0AAWP " , i32 0 , i32 0 ) , i8 * * * [ [ A_P2_PTR ] ] , align 8 <nl> <nl> public func double_concrete_concrete ( ) { <nl> / / CHECK - NEXT : br i1 [ [ IS_NULL ] ] , label % cacheIsNull , label % cont <nl> <nl> / / CHECK : cacheIsNull : <nl> - / / CHECK - NEXT : [ [ T0 : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s42conditional_conformance_basic_conformances6DoubleVyAA4IsP2VAA0F2P3VGMa " ( i64 0 ) <nl> + / / CHECK - NEXT : [ [ T0 : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s42conditional_conformance_basic_conformances6DoubleVyAA4IsP2VAA0F2P3VGMa " ( i64 255 ) <nl> / / CHECK - NEXT : [ [ Double_TYPE : % . * ] ] = extractvalue % swift . metadata_response [ [ T0 ] ] , 0 <nl> <nl> - / / CHECK - NEXT : [ [ CONDITIONAL_REQUIREMENTS : % . * ] ] = getelementptr inbounds [ 2 x i8 * * ] , [ 2 x i8 * * ] * % conditional . requirement . buffer , i32 0 , i32 0 <nl> + / / CHECK : [ [ CONDITIONAL_REQUIREMENTS : % . * ] ] = getelementptr inbounds [ 2 x i8 * * ] , [ 2 x i8 * * ] * % conditional . requirement . buffer , i32 0 , i32 0 <nl> / / CHECK - NEXT : [ [ B_P2_PTR : % . * ] ] = getelementptr inbounds i8 * * , i8 * * * [ [ CONDITIONAL_REQUIREMENTS ] ] , i32 0 <nl> / / CHECK - NEXT : store i8 * * getelementptr inbounds ( [ 1 x i8 * ] , [ 1 x i8 * ] * @ " $ s42conditional_conformance_basic_conformances4IsP2VAA0F0AAWP " , i32 0 , i32 0 ) , i8 * * * [ [ B_P2_PTR ] ] , align 8 <nl> / / CHECK - NEXT : [ [ C_P3_PTR : % . * ] ] = getelementptr inbounds i8 * * , i8 * * * [ [ CONDITIONAL_REQUIREMENTS ] ] , i32 1 <nl> mmm a / test / Inputs / conditional_conformance_subclass . swift <nl> ppp b / test / Inputs / conditional_conformance_subclass . swift <nl> public func subclassgeneric_concrete ( ) { <nl> / / CHECK - NEXT : br i1 [ [ IS_NULL ] ] , label % cacheIsNull , label % cont <nl> <nl> / / CHECK : cacheIsNull : <nl> - / / CHECK - NEXT : [ [ T0 : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s32conditional_conformance_subclass15SubclassGenericCyAA4IsP2VGMa " ( i64 0 ) <nl> + / / CHECK - NEXT : [ [ T0 : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s32conditional_conformance_subclass15SubclassGenericCyAA4IsP2VGMa " ( i64 255 ) <nl> / / CHECK - NEXT : [ [ SubclassGeneric_TYPE : % . * ] ] = extractvalue % swift . metadata_response [ [ T0 ] ] , 0 <nl> - / / CHECK - NEXT : [ [ CONDITIONAL_REQUIREMENTS : % . * ] ] = getelementptr inbounds [ 1 x i8 * * ] , [ 1 x i8 * * ] * % conditional . requirement . buffer , i32 0 , i32 0 <nl> + / / CHECK : [ [ CONDITIONAL_REQUIREMENTS : % . * ] ] = getelementptr inbounds [ 1 x i8 * * ] , [ 1 x i8 * * ] * % conditional . requirement . buffer , i32 0 , i32 0 <nl> / / CHECK - NEXT : [ [ A_P2_PTR : % . * ] ] = getelementptr inbounds i8 * * , i8 * * * [ [ CONDITIONAL_REQUIREMENTS ] ] , i32 0 <nl> / / CHECK - NEXT : store i8 * * getelementptr inbounds ( [ 1 x i8 * ] , [ 1 x i8 * ] * @ " $ s32conditional_conformance_subclass4IsP2VAA0E0AAWP " , i32 0 , i32 0 ) , i8 * * * [ [ A_P2_PTR ] ] , align 8 <nl> / / CHECK - NEXT : [ [ Base_P1 : % . * ] ] = call i8 * * @ swift_getWitnessTable <nl> public func subclassconcrete ( ) { <nl> / / CHECK - NEXT : br i1 [ [ IS_NULL ] ] , label % cacheIsNull , label % cont <nl> <nl> / / CHECK : cacheIsNull : <nl> - / / CHECK - NEXT : [ [ T0 : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s32conditional_conformance_subclass16SubclassConcreteCMa " ( i64 0 ) <nl> + / / CHECK - NEXT : [ [ T0 : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s32conditional_conformance_subclass16SubclassConcreteCMa " ( i64 255 ) <nl> / / CHECK - NEXT : [ [ SubclassConcrete_TYPE : % . * ] ] = extractvalue % swift . metadata_response [ [ T0 ] ] , 0 <nl> - / / CHECK - NEXT : [ [ CONDITIONAL_REQUIREMENTS : % . * ] ] = getelementptr inbounds [ 1 x i8 * * ] , [ 1 x i8 * * ] * % conditional . requirement . buffer , i32 0 , i32 0 <nl> + / / CHECK : [ [ CONDITIONAL_REQUIREMENTS : % . * ] ] = getelementptr inbounds [ 1 x i8 * * ] , [ 1 x i8 * * ] * % conditional . requirement . buffer , i32 0 , i32 0 <nl> / / CHECK - NEXT : [ [ A_P2_PTR : % . * ] ] = getelementptr inbounds i8 * * , i8 * * * [ [ CONDITIONAL_REQUIREMENTS ] ] , i32 0 <nl> / / CHECK - NEXT : store i8 * * getelementptr inbounds ( [ 1 x i8 * ] , [ 1 x i8 * ] * @ " $ s32conditional_conformance_subclass4IsP2VAA0E0AAWP " , i32 0 , i32 0 ) , i8 * * * [ [ A_P2_PTR ] ] , align 8 <nl> / / CHECK - NEXT : [ [ Base_P1 : % . * ] ] = call i8 * * @ swift_getWitnessTable <nl> public func subclassgenericconcrete ( ) { <nl> / / CHECK - NEXT : br i1 [ [ IS_NULL ] ] , label % cacheIsNull , label % cont <nl> <nl> / / CHECK : cacheIsNull : <nl> - / / CHECK - NEXT : [ [ T0 : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s32conditional_conformance_subclass23SubclassGenericConcreteCMa " ( i64 0 ) <nl> + / / CHECK - NEXT : [ [ T0 : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s32conditional_conformance_subclass23SubclassGenericConcreteCMa " ( i64 255 ) <nl> / / CHECK - NEXT : [ [ SubclassGenericConcrete_TYPE : % . * ] ] = extractvalue % swift . metadata_response [ [ T0 ] ] , 0 <nl> - / / CHECK - NEXT : [ [ CONDITIONAL_REQUIREMENTS : % . * ] ] = getelementptr inbounds [ 1 x i8 * * ] , [ 1 x i8 * * ] * % conditional . requirement . buffer , i32 0 , i32 0 <nl> + / / CHECK : [ [ CONDITIONAL_REQUIREMENTS : % . * ] ] = getelementptr inbounds [ 1 x i8 * * ] , [ 1 x i8 * * ] * % conditional . requirement . buffer , i32 0 , i32 0 <nl> / / CHECK - NEXT : [ [ A_P2_PTR : % . * ] ] = getelementptr inbounds i8 * * , i8 * * * [ [ CONDITIONAL_REQUIREMENTS ] ] , i32 0 <nl> / / CHECK - NEXT : store i8 * * getelementptr inbounds ( [ 1 x i8 * ] , [ 1 x i8 * ] * @ " $ s32conditional_conformance_subclass4IsP2VAA0E0AAWP " , i32 0 , i32 0 ) , i8 * * * [ [ A_P2_PTR ] ] , align 8 <nl> / / CHECK - NEXT : [ [ Base_P1 : % . * ] ] = call i8 * * @ swift_getWitnessTable <nl> mmm a / test / Inputs / conditional_conformance_with_assoc . swift <nl> ppp b / test / Inputs / conditional_conformance_with_assoc . swift <nl> public func concrete_concrete ( ) { <nl> / / CHECK - NEXT : br i1 [ [ IS_NULL ] ] , label % cacheIsNull , label % cont <nl> <nl> / / CHECK : cacheIsNull : <nl> - / / CHECK - NEXT : [ [ T0 : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s34conditional_conformance_with_assoc6DoubleVyAA8IsAlsoP2VAA0F2P3VGMa " ( i64 0 ) <nl> + / / CHECK - NEXT : [ [ T0 : % . * ] ] = call swiftcc % swift . metadata_response @ " $ s34conditional_conformance_with_assoc6DoubleVyAA8IsAlsoP2VAA0F2P3VGMa " ( i64 255 ) <nl> / / CHECK - NEXT : [ [ Double_TYPE : % . * ] ] = extractvalue % swift . metadata_response [ [ T0 ] ] , 0 <nl> - / / CHECK - NEXT : [ [ CONDITIONAL_REQUIREMENTS : % . * ] ] = getelementptr inbounds [ 3 x i8 * * ] , [ 3 x i8 * * ] * % conditional . requirement . buffer , i32 0 , i32 0 <nl> + / / CHECK : [ [ CONDITIONAL_REQUIREMENTS : % . * ] ] = getelementptr inbounds [ 3 x i8 * * ] , [ 3 x i8 * * ] * % conditional . requirement . buffer , i32 0 , i32 0 <nl> / / CHECK - NEXT : [ [ C_P3_PTR : % . * ] ] = getelementptr inbounds i8 * * , i8 * * * [ [ CONDITIONAL_REQUIREMENTS ] ] , i32 0 <nl> / / CHECK - NEXT : store i8 * * getelementptr inbounds ( [ 2 x i8 * ] , [ 2 x i8 * ] * @ " $ s34conditional_conformance_with_assoc4IsP3VAA0F0AAWP " , i32 0 , i32 0 ) , i8 * * * [ [ C_P3_PTR ] ] , align 8 <nl> / / CHECK - NEXT : [ [ B_AT2_P2_PTR : % . * ] ] = getelementptr inbounds i8 * * , i8 * * * [ [ CONDITIONAL_REQUIREMENTS ] ] , i32 1 <nl> new file mode 100644 <nl> index 000000000000 . . 05bd44924a08 <nl> mmm / dev / null <nl> ppp b / test / Runtime / lazy_witness_table_cycle . swift <nl> <nl> + / / RUN : % target - run - simple - swift <nl> + / / REQUIRES : executable_test <nl> + / / REQUIRES : objc_interop <nl> + <nl> + / / SR - 5958 <nl> + import Foundation <nl> + <nl> + public struct Property : Equatable , Hashable , Codable { <nl> + public var value : PropertyValue < Property > <nl> + } <nl> + <nl> + public enum PropertyValue < P > : Equatable , Hashable where P : Equatable & Hashable { <nl> + case invalid <nl> + case date ( date : Date ? ) <nl> + } <nl> + <nl> + extension PropertyValue : Codable where P : Codable { <nl> + public func encode ( to encoder : Encoder ) throws { } <nl> + public init ( from decoder : Decoder ) throws { self = . invalid } <nl> + } <nl> + <nl> + extension String : Error { } <nl> + <nl> + let encoder = JSONEncoder ( ) <nl> + let json = try ! encoder . encode ( <nl> + Property ( value : . invalid ) <nl> + ) <nl> + <nl> + let decoder = JSONDecoder ( ) <nl> + let result = try ! decoder . decode ( Property . self , from : json ) <nl> + print ( result ) <nl>
|
[ IRGen ] Metadata for the conforming type in a witness table access need not be complete .
|
apple/swift
|
229ddf570f2286a766b76e89cde0c52785280abd
|
2019-02-06T06:02:49Z
|
mmm a / src / layer / padding . cpp <nl> ppp b / src / layer / padding . cpp <nl> int Padding : : load_param ( const ParamDict & pd ) <nl> return 0 ; <nl> } <nl> <nl> + template < typename T > <nl> + static void copy_make_border_image ( const Mat & src , Mat & dst , int top , int left , int type , T v ) <nl> + { <nl> + int w = dst . w ; <nl> + int h = dst . h ; <nl> + <nl> + const T * ptr = src ; <nl> + T * outptr = dst ; <nl> + <nl> + if ( type = = 0 ) <nl> + { <nl> + int y = 0 ; <nl> + / / fill top <nl> + for ( ; y < top ; y + + ) <nl> + { <nl> + int x = 0 ; <nl> + for ( ; x < w ; x + + ) <nl> + { <nl> + outptr [ x ] = v ; <nl> + } <nl> + outptr + = w ; <nl> + } <nl> + / / fill center <nl> + for ( ; y < ( top + src . h ) ; y + + ) <nl> + { <nl> + int x = 0 ; <nl> + for ( ; x < left ; x + + ) <nl> + { <nl> + outptr [ x ] = v ; <nl> + } <nl> + if ( src . w < 12 ) <nl> + { <nl> + for ( ; x < ( left + src . w ) ; x + + ) <nl> + { <nl> + outptr [ x ] = ptr [ x - left ] ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + memcpy ( outptr + left , ptr , src . w * sizeof ( T ) ) ; <nl> + x + = src . w ; <nl> + } <nl> + for ( ; x < w ; x + + ) <nl> + { <nl> + outptr [ x ] = v ; <nl> + } <nl> + ptr + = src . w ; <nl> + outptr + = w ; <nl> + } <nl> + / / fill bottom <nl> + for ( ; y < h ; y + + ) <nl> + { <nl> + int x = 0 ; <nl> + for ( ; x < w ; x + + ) <nl> + { <nl> + outptr [ x ] = v ; <nl> + } <nl> + outptr + = w ; <nl> + } <nl> + } <nl> + else if ( type = = 1 ) <nl> + { <nl> + int y = 0 ; <nl> + / / fill top <nl> + for ( ; y < top ; y + + ) <nl> + { <nl> + int x = 0 ; <nl> + for ( ; x < left ; x + + ) <nl> + { <nl> + outptr [ x ] = ptr [ 0 ] ; <nl> + } <nl> + if ( src . w < 12 ) <nl> + { <nl> + for ( ; x < ( left + src . w ) ; x + + ) <nl> + { <nl> + outptr [ x ] = ptr [ x - left ] ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + memcpy ( outptr + left , ptr , src . w * sizeof ( T ) ) ; <nl> + x + = src . w ; <nl> + } <nl> + for ( ; x < w ; x + + ) <nl> + { <nl> + outptr [ x ] = ptr [ src . w - 1 ] ; <nl> + } <nl> + outptr + = w ; <nl> + } <nl> + / / fill center <nl> + for ( ; y < ( top + src . h ) ; y + + ) <nl> + { <nl> + int x = 0 ; <nl> + for ( ; x < left ; x + + ) <nl> + { <nl> + outptr [ x ] = ptr [ 0 ] ; <nl> + } <nl> + if ( src . w < 12 ) <nl> + { <nl> + for ( ; x < ( left + src . w ) ; x + + ) <nl> + { <nl> + outptr [ x ] = ptr [ x - left ] ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + memcpy ( outptr + left , ptr , src . w * sizeof ( T ) ) ; <nl> + x + = src . w ; <nl> + } <nl> + for ( ; x < w ; x + + ) <nl> + { <nl> + outptr [ x ] = ptr [ src . w - 1 ] ; <nl> + } <nl> + ptr + = src . w ; <nl> + outptr + = w ; <nl> + } <nl> + / / fill bottom <nl> + ptr - = src . w ; <nl> + for ( ; y < h ; y + + ) <nl> + { <nl> + int x = 0 ; <nl> + for ( ; x < left ; x + + ) <nl> + { <nl> + outptr [ x ] = ptr [ 0 ] ; <nl> + } <nl> + if ( src . w < 12 ) <nl> + { <nl> + for ( ; x < ( left + src . w ) ; x + + ) <nl> + { <nl> + outptr [ x ] = ptr [ x - left ] ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + memcpy ( outptr + left , ptr , src . w * sizeof ( T ) ) ; <nl> + x + = src . w ; <nl> + } <nl> + for ( ; x < w ; x + + ) <nl> + { <nl> + outptr [ x ] = ptr [ src . w - 1 ] ; <nl> + } <nl> + outptr + = w ; <nl> + } <nl> + } <nl> + } <nl> + <nl> int Padding : : forward ( const Mat & bottom_blob , Mat & top_blob , const Option & opt ) const <nl> { <nl> - copy_make_border ( bottom_blob , top_blob , top , bottom , left , right , type , value , opt . blob_allocator , opt . num_threads ) ; <nl> + if ( top = = 0 & & bottom = = 0 & & left = = 0 & & right = = 0 ) <nl> + { <nl> + top_blob = bottom_blob ; <nl> + return 0 ; <nl> + } <nl> + <nl> + int w = bottom_blob . w ; <nl> + int h = bottom_blob . h ; <nl> + int channels = bottom_blob . c ; <nl> + int dims = bottom_blob . dims ; <nl> + size_t elemsize = bottom_blob . elemsize ; <nl> + <nl> + int outw = w + left + right ; <nl> + <nl> + if ( dims = = 1 ) <nl> + { <nl> + top_blob . create ( outw , elemsize , opt . blob_allocator ) ; <nl> + if ( top_blob . empty ( ) ) <nl> + return - 100 ; <nl> + <nl> + if ( elemsize = = 1 ) <nl> + copy_make_border_image < signed char > ( bottom_blob , top_blob , 0 , left , type , value ) ; <nl> + else if ( elemsize = = 4 ) <nl> + copy_make_border_image < float > ( bottom_blob , top_blob , 0 , left , type , value ) ; <nl> + <nl> + return 0 ; <nl> + } <nl> + <nl> + int outh = h + top + bottom ; <nl> + <nl> + if ( dims = = 2 ) <nl> + { <nl> + top_blob . create ( outw , outh , elemsize , opt . blob_allocator ) ; <nl> + if ( top_blob . empty ( ) ) <nl> + return - 100 ; <nl> + <nl> + if ( elemsize = = 1 ) <nl> + copy_make_border_image < signed char > ( bottom_blob , top_blob , top , left , type , value ) ; <nl> + else if ( elemsize = = 4 ) <nl> + copy_make_border_image < float > ( bottom_blob , top_blob , top , left , type , value ) ; <nl> + <nl> + return 0 ; <nl> + } <nl> + <nl> + if ( dims = = 3 ) <nl> + { <nl> + top_blob . create ( outw , outh , channels , elemsize , opt . blob_allocator ) ; <nl> + if ( top_blob . empty ( ) ) <nl> + return - 100 ; <nl> + <nl> + # pragma omp parallel for num_threads ( opt . num_threads ) <nl> + for ( int q = 0 ; q < channels ; q + + ) <nl> + { <nl> + const Mat m = bottom_blob . channel ( q ) ; <nl> + Mat borderm = top_blob . channel ( q ) ; <nl> + <nl> + if ( elemsize = = 1 ) <nl> + copy_make_border_image < signed char > ( m , borderm , top , left , type , value ) ; <nl> + else if ( elemsize = = 4 ) <nl> + copy_make_border_image < float > ( m , borderm , top , left , type , value ) ; <nl> + } <nl> <nl> - if ( top_blob . empty ( ) ) <nl> - return - 100 ; <nl> + return 0 ; <nl> + } <nl> <nl> return 0 ; <nl> } <nl> mmm a / src / layer / padding . h <nl> ppp b / src / layer / padding . h <nl> class Padding : public Layer <nl> int bottom ; <nl> int left ; <nl> int right ; <nl> - int type ; <nl> + int type ; / / 0 = BORDER_CONSTANT 1 = BORDER_REPLICATE <nl> float value ; <nl> } ; <nl> <nl> mmm a / src / mat . cpp <nl> ppp b / src / mat . cpp <nl> <nl> <nl> # include " cpu . h " <nl> <nl> + # include " layer_type . h " <nl> + # include " layer . h " <nl> + <nl> namespace ncnn { <nl> <nl> void Mat : : substract_mean_normalize ( const float * mean_vals , const float * norm_vals ) <nl> Mat Mat : : from_float16 ( const unsigned short * data , int size ) <nl> return m ; <nl> } <nl> <nl> - static void copy_make_border_image ( const Mat & src , Mat & dst , int top , int left , int type , float v ) <nl> - { <nl> - int w = dst . w ; <nl> - int h = dst . h ; <nl> - <nl> - const float * ptr = src ; / / . data ; <nl> - float * outptr = dst ; / / . data ; <nl> - <nl> - if ( type = = BORDER_CONSTANT ) <nl> - { <nl> - int y = 0 ; <nl> - / / fill top <nl> - for ( ; y < top ; y + + ) <nl> - { <nl> - int x = 0 ; <nl> - for ( ; x < w ; x + + ) <nl> - { <nl> - outptr [ x ] = v ; <nl> - } <nl> - outptr + = w ; <nl> - } <nl> - / / fill center <nl> - for ( ; y < ( top + src . h ) ; y + + ) <nl> - { <nl> - int x = 0 ; <nl> - for ( ; x < left ; x + + ) <nl> - { <nl> - outptr [ x ] = v ; <nl> - } <nl> - if ( src . w < 12 ) <nl> - { <nl> - for ( ; x < ( left + src . w ) ; x + + ) <nl> - { <nl> - outptr [ x ] = ptr [ x - left ] ; <nl> - } <nl> - } <nl> - else <nl> - { <nl> - memcpy ( outptr + left , ptr , src . w * sizeof ( float ) ) ; <nl> - x + = src . w ; <nl> - } <nl> - for ( ; x < w ; x + + ) <nl> - { <nl> - outptr [ x ] = v ; <nl> - } <nl> - ptr + = src . w ; <nl> - outptr + = w ; <nl> - } <nl> - / / fill bottom <nl> - for ( ; y < h ; y + + ) <nl> - { <nl> - int x = 0 ; <nl> - for ( ; x < w ; x + + ) <nl> - { <nl> - outptr [ x ] = v ; <nl> - } <nl> - outptr + = w ; <nl> - } <nl> - } <nl> - else if ( type = = BORDER_REPLICATE ) <nl> - { <nl> - int y = 0 ; <nl> - / / fill top <nl> - for ( ; y < top ; y + + ) <nl> - { <nl> - int x = 0 ; <nl> - for ( ; x < left ; x + + ) <nl> - { <nl> - outptr [ x ] = ptr [ 0 ] ; <nl> - } <nl> - if ( src . w < 12 ) <nl> - { <nl> - for ( ; x < ( left + src . w ) ; x + + ) <nl> - { <nl> - outptr [ x ] = ptr [ x - left ] ; <nl> - } <nl> - } <nl> - else <nl> - { <nl> - memcpy ( outptr + left , ptr , src . w * sizeof ( float ) ) ; <nl> - x + = src . w ; <nl> - } <nl> - for ( ; x < w ; x + + ) <nl> - { <nl> - outptr [ x ] = ptr [ src . w - 1 ] ; <nl> - } <nl> - outptr + = w ; <nl> - } <nl> - / / fill center <nl> - for ( ; y < ( top + src . h ) ; y + + ) <nl> - { <nl> - int x = 0 ; <nl> - for ( ; x < left ; x + + ) <nl> - { <nl> - outptr [ x ] = ptr [ 0 ] ; <nl> - } <nl> - if ( src . w < 12 ) <nl> - { <nl> - for ( ; x < ( left + src . w ) ; x + + ) <nl> - { <nl> - outptr [ x ] = ptr [ x - left ] ; <nl> - } <nl> - } <nl> - else <nl> - { <nl> - memcpy ( outptr + left , ptr , src . w * sizeof ( float ) ) ; <nl> - x + = src . w ; <nl> - } <nl> - for ( ; x < w ; x + + ) <nl> - { <nl> - outptr [ x ] = ptr [ src . w - 1 ] ; <nl> - } <nl> - ptr + = src . w ; <nl> - outptr + = w ; <nl> - } <nl> - / / fill bottom <nl> - ptr - = src . w ; <nl> - for ( ; y < h ; y + + ) <nl> - { <nl> - int x = 0 ; <nl> - for ( ; x < left ; x + + ) <nl> - { <nl> - outptr [ x ] = ptr [ 0 ] ; <nl> - } <nl> - if ( src . w < 12 ) <nl> - { <nl> - for ( ; x < ( left + src . w ) ; x + + ) <nl> - { <nl> - outptr [ x ] = ptr [ x - left ] ; <nl> - } <nl> - } <nl> - else <nl> - { <nl> - memcpy ( outptr + left , ptr , src . w * sizeof ( float ) ) ; <nl> - x + = src . w ; <nl> - } <nl> - for ( ; x < w ; x + + ) <nl> - { <nl> - outptr [ x ] = ptr [ src . w - 1 ] ; <nl> - } <nl> - outptr + = w ; <nl> - } <nl> - } <nl> - } <nl> - <nl> void copy_make_border ( const Mat & src , Mat & dst , int top , int bottom , int left , int right , int type , float v , Allocator * allocator , int num_threads ) <nl> { <nl> - int w = src . w + left + right ; <nl> - int h = src . h + top + bottom ; <nl> - size_t elemsize = src . elemsize ; <nl> + ncnn : : Layer * padding = ncnn : : create_layer ( ncnn : : LayerType : : Padding ) ; <nl> <nl> - if ( w = = src . w & & h = = src . h ) <nl> - { <nl> - dst = src ; <nl> - return ; <nl> - } <nl> + ncnn : : ParamDict pd ; <nl> + pd . set ( 0 , top ) ; <nl> + pd . set ( 1 , bottom ) ; <nl> + pd . set ( 2 , left ) ; <nl> + pd . set ( 3 , right ) ; <nl> + pd . set ( 4 , type ) ; <nl> + pd . set ( 5 , v ) ; <nl> <nl> - if ( src . dims = = 2 ) <nl> - { <nl> - dst . create ( w , h , elemsize , allocator ) ; <nl> - if ( dst . empty ( ) ) <nl> - return ; <nl> + padding - > load_param ( pd ) ; <nl> <nl> - copy_make_border_image ( src , dst , top , left , type , v ) ; <nl> - } <nl> - else if ( src . dims = = 3 ) <nl> - { <nl> - int channels = src . c ; <nl> + ncnn : : Option opt = ncnn : : get_default_option ( ) ; <nl> + opt . num_threads = num_threads ; <nl> + opt . blob_allocator = allocator ; <nl> <nl> - dst . create ( w , h , channels , elemsize , allocator ) ; <nl> - if ( dst . empty ( ) ) <nl> - return ; <nl> - <nl> - / / unroll image channel <nl> - # pragma omp parallel for num_threads ( num_threads ) <nl> - for ( int q = 0 ; q < channels ; q + + ) <nl> - { <nl> - const Mat m = src . channel ( q ) ; <nl> - Mat borderm = dst . channel ( q ) ; <nl> + padding - > forward ( src , dst , opt ) ; <nl> <nl> - copy_make_border_image ( m , borderm , top , left , type , v ) ; <nl> - } <nl> - } <nl> + delete padding ; <nl> } <nl> <nl> static void copy_cut_border_image ( const Mat & src , Mat & dst , int top , int left ) <nl>
|
padding is elemsize aware , copy_make_border is now a padding wrapper
|
Tencent/ncnn
|
bf1c58be46597f1d9f79734c4786d41063f31f0e
|
2018-08-20T10:40:52Z
|
mmm a / modules / imgproc / doc / feature_detection . rst <nl> ppp b / modules / imgproc / doc / feature_detection . rst <nl> http : / / en . wikipedia . org / wiki / Canny_edge_detector <nl> <nl> * An example on using the canny edge detector can be found at opencv_source_code / samples / cpp / edge . cpp <nl> <nl> - * ( Python ) An example on using the canny edge detector can be found at opencv_source_code / samples / cpp / edge . py <nl> + * ( Python ) An example on using the canny edge detector can be found at opencv_source_code / samples / python / edge . py <nl> <nl> cornerEigenValsAndVecs <nl> mmmmmmmmmmmmmmmmmmmmm - <nl>
|
Merge pull request from dpen2000 : patch - 2
|
opencv/opencv
|
648facccd687bb1414a12f421ad651ec6b3cf419
|
2013-12-26T13:47:09Z
|
mmm a / tensorflow / python / __init__ . py <nl> ppp b / tensorflow / python / __init__ . py <nl> <nl> <nl> # Export protos <nl> # pylint : disable = undefined - variable <nl> - tf_export ( ' AttrValue ' ) ( AttrValue ) <nl> - tf_export ( ' ConfigProto ' ) ( ConfigProto ) <nl> + tf_export ( v1 = [ ' AttrValue ' ] ) ( AttrValue ) <nl> + tf_export ( v1 = [ ' ConfigProto ' ] ) ( ConfigProto ) <nl> tf_export ( ' Event ' , ' summary . Event ' ) ( Event ) <nl> - tf_export ( ' GPUOptions ' ) ( GPUOptions ) <nl> - tf_export ( ' GraphDef ' ) ( GraphDef ) <nl> - tf_export ( ' GraphOptions ' ) ( GraphOptions ) <nl> - tf_export ( ' HistogramProto ' ) ( HistogramProto ) <nl> - tf_export ( ' LogMessage ' ) ( LogMessage ) <nl> - tf_export ( ' MetaGraphDef ' ) ( MetaGraphDef ) <nl> - tf_export ( ' NameAttrList ' ) ( NameAttrList ) <nl> - tf_export ( ' NodeDef ' ) ( NodeDef ) <nl> - tf_export ( ' OptimizerOptions ' ) ( OptimizerOptions ) <nl> - tf_export ( ' RunMetadata ' ) ( RunMetadata ) <nl> - tf_export ( ' RunOptions ' ) ( RunOptions ) <nl> + tf_export ( v1 = [ ' GPUOptions ' ] ) ( GPUOptions ) <nl> + tf_export ( v1 = [ ' GraphDef ' ] ) ( GraphDef ) <nl> + tf_export ( v1 = [ ' GraphOptions ' ] ) ( GraphOptions ) <nl> + tf_export ( v1 = [ ' HistogramProto ' ] ) ( HistogramProto ) <nl> + tf_export ( v1 = [ ' LogMessage ' ] ) ( LogMessage ) <nl> + tf_export ( v1 = [ ' MetaGraphDef ' ] ) ( MetaGraphDef ) <nl> + tf_export ( v1 = [ ' NameAttrList ' ] ) ( NameAttrList ) <nl> + tf_export ( v1 = [ ' NodeDef ' ] ) ( NodeDef ) <nl> + tf_export ( v1 = [ ' OptimizerOptions ' ] ) ( OptimizerOptions ) <nl> + tf_export ( v1 = [ ' RunMetadata ' ] ) ( RunMetadata ) <nl> + tf_export ( v1 = [ ' RunOptions ' ] ) ( RunOptions ) <nl> tf_export ( ' SessionLog ' , ' summary . SessionLog ' ) ( SessionLog ) <nl> tf_export ( ' Summary ' , ' summary . Summary ' ) ( Summary ) <nl> tf_export ( ' summary . SummaryDescription ' ) ( SummaryDescription ) <nl> deleted file mode 100644 <nl> index f1dffd5952850 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - attr - value . - list - value . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . AttrValue . ListValue " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " ListValue " <nl> - field { <nl> - name : " s " <nl> - number : 2 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_BYTES <nl> - } <nl> - field { <nl> - name : " i " <nl> - number : 3 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_INT64 <nl> - options { <nl> - packed : true <nl> - } <nl> - } <nl> - field { <nl> - name : " f " <nl> - number : 4 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_FLOAT <nl> - options { <nl> - packed : true <nl> - } <nl> - } <nl> - field { <nl> - name : " b " <nl> - number : 5 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_BOOL <nl> - options { <nl> - packed : true <nl> - } <nl> - } <nl> - field { <nl> - name : " type " <nl> - number : 6 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_ENUM <nl> - type_name : " . tensorflow . DataType " <nl> - options { <nl> - packed : true <nl> - } <nl> - } <nl> - field { <nl> - name : " shape " <nl> - number : 7 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . TensorShapeProto " <nl> - } <nl> - field { <nl> - name : " tensor " <nl> - number : 8 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . TensorProto " <nl> - } <nl> - field { <nl> - name : " func " <nl> - number : 9 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . NameAttrList " <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index 6ccd64f428c3b . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - attr - value . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . AttrValue " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " AttrValue " <nl> - field { <nl> - name : " s " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BYTES <nl> - oneof_index : 0 <nl> - } <nl> - field { <nl> - name : " i " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT64 <nl> - oneof_index : 0 <nl> - } <nl> - field { <nl> - name : " f " <nl> - number : 4 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_FLOAT <nl> - oneof_index : 0 <nl> - } <nl> - field { <nl> - name : " b " <nl> - number : 5 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - oneof_index : 0 <nl> - } <nl> - field { <nl> - name : " type " <nl> - number : 6 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_ENUM <nl> - type_name : " . tensorflow . DataType " <nl> - oneof_index : 0 <nl> - } <nl> - field { <nl> - name : " shape " <nl> - number : 7 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . TensorShapeProto " <nl> - oneof_index : 0 <nl> - } <nl> - field { <nl> - name : " tensor " <nl> - number : 8 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . TensorProto " <nl> - oneof_index : 0 <nl> - } <nl> - field { <nl> - name : " list " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . AttrValue . ListValue " <nl> - oneof_index : 0 <nl> - } <nl> - field { <nl> - name : " func " <nl> - number : 10 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . NameAttrList " <nl> - oneof_index : 0 <nl> - } <nl> - field { <nl> - name : " placeholder " <nl> - number : 9 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - oneof_index : 0 <nl> - } <nl> - nested_type { <nl> - name : " ListValue " <nl> - field { <nl> - name : " s " <nl> - number : 2 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_BYTES <nl> - } <nl> - field { <nl> - name : " i " <nl> - number : 3 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_INT64 <nl> - options { <nl> - packed : true <nl> - } <nl> - } <nl> - field { <nl> - name : " f " <nl> - number : 4 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_FLOAT <nl> - options { <nl> - packed : true <nl> - } <nl> - } <nl> - field { <nl> - name : " b " <nl> - number : 5 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_BOOL <nl> - options { <nl> - packed : true <nl> - } <nl> - } <nl> - field { <nl> - name : " type " <nl> - number : 6 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_ENUM <nl> - type_name : " . tensorflow . DataType " <nl> - options { <nl> - packed : true <nl> - } <nl> - } <nl> - field { <nl> - name : " shape " <nl> - number : 7 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . TensorShapeProto " <nl> - } <nl> - field { <nl> - name : " tensor " <nl> - number : 8 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . TensorProto " <nl> - } <nl> - field { <nl> - name : " func " <nl> - number : 9 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . NameAttrList " <nl> - } <nl> - } <nl> - oneof_decl { <nl> - name : " value " <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index d9b142682899b . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - config - proto . - device - count - entry . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . ConfigProto . DeviceCountEntry " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " DeviceCountEntry " <nl> - field { <nl> - name : " key " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " value " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT32 <nl> - } <nl> - options { <nl> - map_entry : true <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index caa72fe5a61aa . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - config - proto . - experimental . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . ConfigProto . Experimental " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " Experimental " <nl> - field { <nl> - name : " collective_group_leader " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " executor_type " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " recv_buf_max_chunk " <nl> - number : 4 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT32 <nl> - } <nl> - field { <nl> - name : " use_numa_affinity " <nl> - number : 5 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - reserved_range { <nl> - start : 2 <nl> - end : 3 <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index b505d813509c2 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - config - proto . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . ConfigProto " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " ConfigProto " <nl> - field { <nl> - name : " device_count " <nl> - number : 1 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . ConfigProto . DeviceCountEntry " <nl> - } <nl> - field { <nl> - name : " intra_op_parallelism_threads " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT32 <nl> - } <nl> - field { <nl> - name : " inter_op_parallelism_threads " <nl> - number : 5 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT32 <nl> - } <nl> - field { <nl> - name : " use_per_session_threads " <nl> - number : 9 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " session_inter_op_thread_pool " <nl> - number : 12 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . ThreadPoolOptionProto " <nl> - } <nl> - field { <nl> - name : " placement_period " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT32 <nl> - } <nl> - field { <nl> - name : " device_filters " <nl> - number : 4 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " gpu_options " <nl> - number : 6 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . GPUOptions " <nl> - } <nl> - field { <nl> - name : " allow_soft_placement " <nl> - number : 7 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " log_device_placement " <nl> - number : 8 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " graph_options " <nl> - number : 10 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . GraphOptions " <nl> - } <nl> - field { <nl> - name : " operation_timeout_in_ms " <nl> - number : 11 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT64 <nl> - } <nl> - field { <nl> - name : " rpc_options " <nl> - number : 13 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . RPCOptions " <nl> - } <nl> - field { <nl> - name : " cluster_def " <nl> - number : 14 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . ClusterDef " <nl> - } <nl> - field { <nl> - name : " isolate_session_state " <nl> - number : 15 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " experimental " <nl> - number : 16 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . ConfigProto . Experimental " <nl> - } <nl> - nested_type { <nl> - name : " DeviceCountEntry " <nl> - field { <nl> - name : " key " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " value " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT32 <nl> - } <nl> - options { <nl> - map_entry : true <nl> - } <nl> - } <nl> - nested_type { <nl> - name : " Experimental " <nl> - field { <nl> - name : " collective_group_leader " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " executor_type " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " recv_buf_max_chunk " <nl> - number : 4 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT32 <nl> - } <nl> - field { <nl> - name : " use_numa_affinity " <nl> - number : 5 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - reserved_range { <nl> - start : 2 <nl> - end : 3 <nl> - } <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index a2cc07483a4e1 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - g - p - u - options . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . GPUOptions " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " GPUOptions " <nl> - field { <nl> - name : " per_process_gpu_memory_fraction " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_DOUBLE <nl> - } <nl> - field { <nl> - name : " allow_growth " <nl> - number : 4 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " allocator_type " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " deferred_deletion_bytes " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT64 <nl> - } <nl> - field { <nl> - name : " visible_device_list " <nl> - number : 5 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " polling_active_delay_usecs " <nl> - number : 6 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT32 <nl> - } <nl> - field { <nl> - name : " polling_inactive_delay_msecs " <nl> - number : 7 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT32 <nl> - } <nl> - field { <nl> - name : " force_gpu_compatible " <nl> - number : 8 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " experimental " <nl> - number : 9 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . GPUOptions . Experimental " <nl> - } <nl> - nested_type { <nl> - name : " Experimental " <nl> - field { <nl> - name : " virtual_devices " <nl> - number : 1 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . GPUOptions . Experimental . VirtualDevices " <nl> - } <nl> - field { <nl> - name : " use_unified_memory " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " num_dev_to_dev_copy_streams " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT32 <nl> - } <nl> - field { <nl> - name : " collective_ring_order " <nl> - number : 4 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - nested_type { <nl> - name : " VirtualDevices " <nl> - field { <nl> - name : " memory_limit_mb " <nl> - number : 1 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_FLOAT <nl> - } <nl> - } <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index 19eccff03d247 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - graph - def . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . GraphDef " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " GraphDef " <nl> - field { <nl> - name : " node " <nl> - number : 1 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . NodeDef " <nl> - } <nl> - field { <nl> - name : " versions " <nl> - number : 4 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . VersionDef " <nl> - } <nl> - field { <nl> - name : " version " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT32 <nl> - options { <nl> - deprecated : true <nl> - } <nl> - } <nl> - field { <nl> - name : " library " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . FunctionDefLibrary " <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index a9f99bc171cc3 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - graph - options . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . GraphOptions " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " GraphOptions " <nl> - field { <nl> - name : " enable_recv_scheduling " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " optimizer_options " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . OptimizerOptions " <nl> - } <nl> - field { <nl> - name : " build_cost_model " <nl> - number : 4 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT64 <nl> - } <nl> - field { <nl> - name : " build_cost_model_after " <nl> - number : 9 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT64 <nl> - } <nl> - field { <nl> - name : " infer_shapes " <nl> - number : 5 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " place_pruned_graph " <nl> - number : 6 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " enable_bfloat16_sendrecv " <nl> - number : 7 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " timeline_step " <nl> - number : 8 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT32 <nl> - } <nl> - field { <nl> - name : " rewrite_options " <nl> - number : 10 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . RewriterConfig " <nl> - } <nl> - reserved_range { <nl> - start : 1 <nl> - end : 2 <nl> - } <nl> - reserved_name : " skip_common_subexpression_elimination " <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index d4402f330b8a2 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - histogram - proto . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . HistogramProto " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " HistogramProto " <nl> - field { <nl> - name : " min " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_DOUBLE <nl> - } <nl> - field { <nl> - name : " max " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_DOUBLE <nl> - } <nl> - field { <nl> - name : " num " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_DOUBLE <nl> - } <nl> - field { <nl> - name : " sum " <nl> - number : 4 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_DOUBLE <nl> - } <nl> - field { <nl> - name : " sum_squares " <nl> - number : 5 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_DOUBLE <nl> - } <nl> - field { <nl> - name : " bucket_limit " <nl> - number : 6 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_DOUBLE <nl> - options { <nl> - packed : true <nl> - } <nl> - } <nl> - field { <nl> - name : " bucket " <nl> - number : 7 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_DOUBLE <nl> - options { <nl> - packed : true <nl> - } <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index 5023aa96bf3b4 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - log - message . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . LogMessage " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " LogMessage " <nl> - field { <nl> - name : " level " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_ENUM <nl> - type_name : " . tensorflow . LogMessage . Level " <nl> - } <nl> - field { <nl> - name : " message " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - enum_type { <nl> - name : " Level " <nl> - value { <nl> - name : " UNKNOWN " <nl> - number : 0 <nl> - } <nl> - value { <nl> - name : " DEBUGGING " <nl> - number : 10 <nl> - } <nl> - value { <nl> - name : " INFO " <nl> - number : 20 <nl> - } <nl> - value { <nl> - name : " WARN " <nl> - number : 30 <nl> - } <nl> - value { <nl> - name : " ERROR " <nl> - number : 40 <nl> - } <nl> - value { <nl> - name : " FATAL " <nl> - number : 50 <nl> - } <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index 0ba09bec4b3fa . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - meta - graph - def . - collection - def - entry . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . MetaGraphDef . CollectionDefEntry " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " CollectionDefEntry " <nl> - field { <nl> - name : " key " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " value " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . CollectionDef " <nl> - } <nl> - options { <nl> - map_entry : true <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index 41c62a407b857 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - meta - graph - def . - meta - info - def . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . MetaGraphDef . MetaInfoDef " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " MetaInfoDef " <nl> - field { <nl> - name : " meta_graph_version " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " stripped_op_list " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . OpList " <nl> - } <nl> - field { <nl> - name : " any_info " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . google . protobuf . Any " <nl> - } <nl> - field { <nl> - name : " tags " <nl> - number : 4 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " tensorflow_version " <nl> - number : 5 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " tensorflow_git_version " <nl> - number : 6 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " stripped_default_attrs " <nl> - number : 7 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index 73dc414a779de . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - meta - graph - def . - signature - def - entry . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . MetaGraphDef . SignatureDefEntry " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " SignatureDefEntry " <nl> - field { <nl> - name : " key " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " value " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . SignatureDef " <nl> - } <nl> - options { <nl> - map_entry : true <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index d71c2358c93e9 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - meta - graph - def . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . MetaGraphDef " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " MetaGraphDef " <nl> - field { <nl> - name : " meta_info_def " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . MetaGraphDef . MetaInfoDef " <nl> - } <nl> - field { <nl> - name : " graph_def " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . GraphDef " <nl> - } <nl> - field { <nl> - name : " saver_def " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . SaverDef " <nl> - } <nl> - field { <nl> - name : " collection_def " <nl> - number : 4 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . MetaGraphDef . CollectionDefEntry " <nl> - } <nl> - field { <nl> - name : " signature_def " <nl> - number : 5 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . MetaGraphDef . SignatureDefEntry " <nl> - } <nl> - field { <nl> - name : " asset_file_def " <nl> - number : 6 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . AssetFileDef " <nl> - } <nl> - nested_type { <nl> - name : " MetaInfoDef " <nl> - field { <nl> - name : " meta_graph_version " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " stripped_op_list " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . OpList " <nl> - } <nl> - field { <nl> - name : " any_info " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . google . protobuf . Any " <nl> - } <nl> - field { <nl> - name : " tags " <nl> - number : 4 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " tensorflow_version " <nl> - number : 5 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " tensorflow_git_version " <nl> - number : 6 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " stripped_default_attrs " <nl> - number : 7 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - } <nl> - nested_type { <nl> - name : " CollectionDefEntry " <nl> - field { <nl> - name : " key " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " value " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . CollectionDef " <nl> - } <nl> - options { <nl> - map_entry : true <nl> - } <nl> - } <nl> - nested_type { <nl> - name : " SignatureDefEntry " <nl> - field { <nl> - name : " key " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " value " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . SignatureDef " <nl> - } <nl> - options { <nl> - map_entry : true <nl> - } <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index b119b20877219 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - name - attr - list . - attr - entry . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . NameAttrList . AttrEntry " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " AttrEntry " <nl> - field { <nl> - name : " key " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " value " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . AttrValue " <nl> - } <nl> - options { <nl> - map_entry : true <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index fcdb411ffce9b . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - name - attr - list . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . NameAttrList " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " NameAttrList " <nl> - field { <nl> - name : " name " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " attr " <nl> - number : 2 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . NameAttrList . AttrEntry " <nl> - } <nl> - nested_type { <nl> - name : " AttrEntry " <nl> - field { <nl> - name : " key " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " value " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . AttrValue " <nl> - } <nl> - options { <nl> - map_entry : true <nl> - } <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index 622e4c3d0f60c . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - node - def . - attr - entry . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . NodeDef . AttrEntry " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " AttrEntry " <nl> - field { <nl> - name : " key " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " value " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . AttrValue " <nl> - } <nl> - options { <nl> - map_entry : true <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index 646fa8abb9b22 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - node - def . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . NodeDef " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " NodeDef " <nl> - field { <nl> - name : " name " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " op " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " input " <nl> - number : 3 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " device " <nl> - number : 4 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " attr " <nl> - number : 5 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . NodeDef . AttrEntry " <nl> - } <nl> - nested_type { <nl> - name : " AttrEntry " <nl> - field { <nl> - name : " key " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_STRING <nl> - } <nl> - field { <nl> - name : " value " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . AttrValue " <nl> - } <nl> - options { <nl> - map_entry : true <nl> - } <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index 3ccf9d459b133 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - optimizer - options . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . OptimizerOptions " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " OptimizerOptions " <nl> - field { <nl> - name : " do_common_subexpression_elimination " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " do_constant_folding " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " max_folded_constant_in_bytes " <nl> - number : 6 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT64 <nl> - } <nl> - field { <nl> - name : " do_function_inlining " <nl> - number : 4 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " opt_level " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_ENUM <nl> - type_name : " . tensorflow . OptimizerOptions . Level " <nl> - } <nl> - field { <nl> - name : " global_jit_level " <nl> - number : 5 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_ENUM <nl> - type_name : " . tensorflow . OptimizerOptions . GlobalJitLevel " <nl> - } <nl> - enum_type { <nl> - name : " Level " <nl> - value { <nl> - name : " L1 " <nl> - number : 0 <nl> - } <nl> - value { <nl> - name : " L0 " <nl> - number : - 1 <nl> - } <nl> - } <nl> - enum_type { <nl> - name : " GlobalJitLevel " <nl> - value { <nl> - name : " DEFAULT " <nl> - number : 0 <nl> - } <nl> - value { <nl> - name : " OFF " <nl> - number : - 1 <nl> - } <nl> - value { <nl> - name : " ON_1 " <nl> - number : 1 <nl> - } <nl> - value { <nl> - name : " ON_2 " <nl> - number : 2 <nl> - } <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index 1287940326c01 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - run - metadata . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . RunMetadata " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " RunMetadata " <nl> - field { <nl> - name : " step_stats " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . StepStats " <nl> - } <nl> - field { <nl> - name : " cost_graph " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . CostGraphDef " <nl> - } <nl> - field { <nl> - name : " partition_graphs " <nl> - number : 3 <nl> - label : LABEL_REPEATED <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . GraphDef " <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index 47b5b56faf63e . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - run - options . - experimental . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . RunOptions . Experimental " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " Experimental " <nl> - field { <nl> - name : " collective_graph_key " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT64 <nl> - } <nl> - field { <nl> - name : " use_run_handler_pool " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index c0c2e7b9f8d71 . . 0000000000000 <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . - run - options . pbtxt <nl> ppp / dev / null <nl> <nl> - path : " tensorflow . RunOptions " <nl> - tf_proto { <nl> - descriptor { <nl> - name : " RunOptions " <nl> - field { <nl> - name : " trace_level " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_ENUM <nl> - type_name : " . tensorflow . RunOptions . TraceLevel " <nl> - } <nl> - field { <nl> - name : " timeout_in_ms " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT64 <nl> - } <nl> - field { <nl> - name : " inter_op_thread_pool " <nl> - number : 3 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT32 <nl> - } <nl> - field { <nl> - name : " output_partition_graphs " <nl> - number : 5 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " debug_options " <nl> - number : 6 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . DebugOptions " <nl> - } <nl> - field { <nl> - name : " report_tensor_allocations_upon_oom " <nl> - number : 7 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - field { <nl> - name : " experimental " <nl> - number : 8 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_MESSAGE <nl> - type_name : " . tensorflow . RunOptions . Experimental " <nl> - } <nl> - nested_type { <nl> - name : " Experimental " <nl> - field { <nl> - name : " collective_graph_key " <nl> - number : 1 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_INT64 <nl> - } <nl> - field { <nl> - name : " use_run_handler_pool " <nl> - number : 2 <nl> - label : LABEL_OPTIONAL <nl> - type : TYPE_BOOL <nl> - } <nl> - } <nl> - enum_type { <nl> - name : " TraceLevel " <nl> - value { <nl> - name : " NO_TRACE " <nl> - number : 0 <nl> - } <nl> - value { <nl> - name : " SOFTWARE_TRACE " <nl> - number : 1 <nl> - } <nl> - value { <nl> - name : " HARDWARE_TRACE " <nl> - number : 2 <nl> - } <nl> - value { <nl> - name : " FULL_TRACE " <nl> - number : 3 <nl> - } <nl> - } <nl> - reserved_range { <nl> - start : 4 <nl> - end : 5 <nl> - } <nl> - } <nl> - } <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . pbtxt <nl> ppp b / tensorflow / tools / api / golden / v2 / tensorflow . pbtxt <nl> tf_module { <nl> name : " AggregationMethod " <nl> mtype : " < type \ ' type \ ' > " <nl> } <nl> - member { <nl> - name : " AttrValue " <nl> - mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> - } <nl> - member { <nl> - name : " ConfigProto " <nl> - mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> - } <nl> member { <nl> name : " DType " <nl> mtype : " < type \ ' type \ ' > " <nl> tf_module { <nl> name : " FIFOQueue " <nl> mtype : " < type \ ' type \ ' > " <nl> } <nl> - member { <nl> - name : " GPUOptions " <nl> - mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> - } <nl> member { <nl> name : " GradientTape " <nl> mtype : " < type \ ' type \ ' > " <nl> tf_module { <nl> name : " Graph " <nl> mtype : " < type \ ' type \ ' > " <nl> } <nl> - member { <nl> - name : " GraphDef " <nl> - mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> - } <nl> - member { <nl> - name : " GraphOptions " <nl> - mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> - } <nl> - member { <nl> - name : " HistogramProto " <nl> - mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> - } <nl> member { <nl> name : " IndexedSlices " <nl> mtype : " < type \ ' type \ ' > " <nl> } <nl> - member { <nl> - name : " LogMessage " <nl> - mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> - } <nl> - member { <nl> - name : " MetaGraphDef " <nl> - mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> - } <nl> - member { <nl> - name : " NameAttrList " <nl> - mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> - } <nl> - member { <nl> - name : " NodeDef " <nl> - mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> - } <nl> member { <nl> name : " Operation " <nl> mtype : " < type \ ' type \ ' > " <nl> } <nl> - member { <nl> - name : " OptimizerOptions " <nl> - mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> - } <nl> member { <nl> name : " RegisterGradient " <nl> mtype : " < type \ ' type \ ' > " <nl> } <nl> - member { <nl> - name : " RunMetadata " <nl> - mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> - } <nl> - member { <nl> - name : " RunOptions " <nl> - mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> - } <nl> member { <nl> name : " SessionLog " <nl> mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> mmm a / tensorflow / tools / compatibility / renames_v2 . py <nl> ppp b / tensorflow / tools / compatibility / renames_v2 . py <nl> <nl> <nl> renames = { <nl> ' tf . AUTO_REUSE ' : ' tf . compat . v1 . AUTO_REUSE ' , <nl> + ' tf . AttrValue ' : ' tf . compat . v1 . AttrValue ' , <nl> ' tf . COMPILER_VERSION ' : ' tf . version . COMPILER_VERSION ' , <nl> ' tf . CXX11_ABI_FLAG ' : ' tf . sysconfig . CXX11_ABI_FLAG ' , <nl> ' tf . ConditionalAccumulator ' : ' tf . compat . v1 . ConditionalAccumulator ' , <nl> ' tf . ConditionalAccumulatorBase ' : ' tf . compat . v1 . ConditionalAccumulatorBase ' , <nl> + ' tf . ConfigProto ' : ' tf . compat . v1 . ConfigProto ' , <nl> ' tf . DeviceSpec ' : ' tf . compat . v1 . DeviceSpec ' , <nl> ' tf . Dimension ' : ' tf . compat . v1 . Dimension ' , <nl> ' tf . FixedLenFeature ' : ' tf . io . FixedLenFeature ' , <nl> ' tf . FixedLenSequenceFeature ' : ' tf . io . FixedLenSequenceFeature ' , <nl> ' tf . FixedLengthRecordReader ' : ' tf . compat . v1 . FixedLengthRecordReader ' , <nl> ' tf . GIT_VERSION ' : ' tf . version . GIT_VERSION ' , <nl> + ' tf . GPUOptions ' : ' tf . compat . v1 . GPUOptions ' , <nl> ' tf . GRAPH_DEF_VERSION ' : ' tf . version . GRAPH_DEF_VERSION ' , <nl> ' tf . GRAPH_DEF_VERSION_MIN_CONSUMER ' : ' tf . version . GRAPH_DEF_VERSION_MIN_CONSUMER ' , <nl> ' tf . GRAPH_DEF_VERSION_MIN_PRODUCER ' : ' tf . version . GRAPH_DEF_VERSION_MIN_PRODUCER ' , <nl> + ' tf . GraphDef ' : ' tf . compat . v1 . GraphDef ' , <nl> ' tf . GraphKeys ' : ' tf . compat . v1 . GraphKeys ' , <nl> + ' tf . GraphOptions ' : ' tf . compat . v1 . GraphOptions ' , <nl> + ' tf . HistogramProto ' : ' tf . compat . v1 . HistogramProto ' , <nl> ' tf . IdentityReader ' : ' tf . compat . v1 . IdentityReader ' , <nl> ' tf . InteractiveSession ' : ' tf . compat . v1 . InteractiveSession ' , <nl> ' tf . LMDBReader ' : ' tf . compat . v1 . LMDBReader ' , <nl> + ' tf . LogMessage ' : ' tf . compat . v1 . LogMessage ' , <nl> ' tf . MONOLITHIC_BUILD ' : ' tf . sysconfig . MONOLITHIC_BUILD ' , <nl> + ' tf . MetaGraphDef ' : ' tf . compat . v1 . MetaGraphDef ' , <nl> + ' tf . NameAttrList ' : ' tf . compat . v1 . NameAttrList ' , <nl> ' tf . NoGradient ' : ' tf . no_gradient ' , <nl> + ' tf . NodeDef ' : ' tf . compat . v1 . NodeDef ' , <nl> ' tf . NotDifferentiable ' : ' tf . no_gradient ' , <nl> ' tf . OpError ' : ' tf . errors . OpError ' , <nl> + ' tf . OptimizerOptions ' : ' tf . compat . v1 . OptimizerOptions ' , <nl> ' tf . PaddingFIFOQueue ' : ' tf . io . PaddingFIFOQueue ' , <nl> ' tf . Print ' : ' tf . compat . v1 . Print ' , <nl> ' tf . PriorityQueue ' : ' tf . io . PriorityQueue ' , <nl> <nl> ' tf . QueueBase ' : ' tf . io . QueueBase ' , <nl> ' tf . RandomShuffleQueue ' : ' tf . io . RandomShuffleQueue ' , <nl> ' tf . ReaderBase ' : ' tf . compat . v1 . ReaderBase ' , <nl> + ' tf . RunMetadata ' : ' tf . compat . v1 . RunMetadata ' , <nl> + ' tf . RunOptions ' : ' tf . compat . v1 . RunOptions ' , <nl> ' tf . Session ' : ' tf . compat . v1 . Session ' , <nl> ' tf . SparseConditionalAccumulator ' : ' tf . sparse . SparseConditionalAccumulator ' , <nl> ' tf . SparseFeature ' : ' tf . io . SparseFeature ' , <nl> mmm a / tensorflow / tools / compatibility / tf_upgrade_v2_test . py <nl> ppp b / tensorflow / tools / compatibility / tf_upgrade_v2_test . py <nl> def testAllAPI ( self ) : <nl> <nl> # Converts all symbols in the v1 namespace to the v2 namespace , raising <nl> # an error if the target of the conversion is not in the v2 namespace . <nl> + # Please regenerate the renames file or edit any manual renames if this <nl> + # test fails . <nl> def conversion_visitor ( unused_path , unused_parent , children ) : <nl> for child in children : <nl> _ , attr = tf_decorator . unwrap ( child [ 1 ] ) <nl>
|
Remove some class symbols from tf 2 . 0 .
|
tensorflow/tensorflow
|
11a1fc8cddc4e04dee1cd0155992fdd1bba6b111
|
2018-12-05T21:42:31Z
|
mmm a / src / core / CMakeLists . txt <nl> ppp b / src / core / CMakeLists . txt <nl> add_library ( core STATIC <nl> hle / kernel / kernel . h <nl> hle / kernel / memory / address_space_info . cpp <nl> hle / kernel / memory / address_space_info . h <nl> + hle / kernel / memory / memory_block . h <nl> hle / kernel / memory / memory_types . h <nl> hle / kernel / memory / slab_heap . h <nl> hle / kernel / mutex . cpp <nl> new file mode 100644 <nl> index 00000000000 . . 1bb31405abe <nl> mmm / dev / null <nl> ppp b / src / core / hle / kernel / memory / memory_block . h <nl> <nl> + / / Copyright 2020 yuzu Emulator Project <nl> + / / Licensed under GPLv2 or any later version <nl> + / / Refer to the license . txt file included . <nl> + <nl> + # pragma once <nl> + <nl> + # include " common / alignment . h " <nl> + # include " common / assert . h " <nl> + # include " common / common_types . h " <nl> + # include " core / hle / kernel / memory / memory_types . h " <nl> + # include " core / hle / kernel / svc_types . h " <nl> + <nl> + namespace Kernel : : Memory { <nl> + <nl> + enum class MemoryState : u32 { <nl> + None = 0 , <nl> + Mask = 0xFFFFFFFF , / / TODO ( bunnei ) : This should probable be 0xFF <nl> + All = ~ None , <nl> + <nl> + FlagCanReprotect = ( 1 < < 8 ) , <nl> + FlagCanDebug = ( 1 < < 9 ) , <nl> + FlagCanUseIpc = ( 1 < < 10 ) , <nl> + FlagCanUseNonDeviceIpc = ( 1 < < 11 ) , <nl> + FlagCanUseNonSecureIpc = ( 1 < < 12 ) , <nl> + FlagMapped = ( 1 < < 13 ) , <nl> + FlagCode = ( 1 < < 14 ) , <nl> + FlagCanAlias = ( 1 < < 15 ) , <nl> + FlagCanCodeAlias = ( 1 < < 16 ) , <nl> + FlagCanTransfer = ( 1 < < 17 ) , <nl> + FlagCanQueryPhysical = ( 1 < < 18 ) , <nl> + FlagCanDeviceMap = ( 1 < < 19 ) , <nl> + FlagCanAlignedDeviceMap = ( 1 < < 20 ) , <nl> + FlagCanIpcUserBuffer = ( 1 < < 21 ) , <nl> + FlagReferenceCounted = ( 1 < < 22 ) , <nl> + FlagCanMapProcess = ( 1 < < 23 ) , <nl> + FlagCanChangeAttribute = ( 1 < < 24 ) , <nl> + FlagCanCodeMemory = ( 1 < < 25 ) , <nl> + <nl> + FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | <nl> + FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | <nl> + FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer | <nl> + FlagReferenceCounted | FlagCanChangeAttribute , <nl> + <nl> + FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | <nl> + FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap | <nl> + FlagCanAlignedDeviceMap | FlagReferenceCounted , <nl> + <nl> + FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap , <nl> + <nl> + Free = static_cast < u32 > ( Svc : : MemoryState : : Free ) , <nl> + Io = static_cast < u32 > ( Svc : : MemoryState : : Io ) | FlagMapped , <nl> + Static = static_cast < u32 > ( Svc : : MemoryState : : Static ) | FlagMapped | FlagCanQueryPhysical , <nl> + Code = static_cast < u32 > ( Svc : : MemoryState : : Code ) | FlagsCode | FlagCanMapProcess , <nl> + CodeData = static_cast < u32 > ( Svc : : MemoryState : : CodeData ) | FlagsData | FlagCanMapProcess | <nl> + FlagCanCodeMemory , <nl> + Shared = static_cast < u32 > ( Svc : : MemoryState : : Shared ) | FlagMapped | FlagReferenceCounted , <nl> + Normal = static_cast < u32 > ( Svc : : MemoryState : : Normal ) | FlagsData | FlagCanCodeMemory , <nl> + <nl> + AliasCode = static_cast < u32 > ( Svc : : MemoryState : : AliasCode ) | FlagsCode | FlagCanMapProcess | <nl> + FlagCanCodeAlias , <nl> + AliasCodeData = static_cast < u32 > ( Svc : : MemoryState : : AliasCodeData ) | FlagsData | <nl> + FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory , <nl> + <nl> + Ipc = static_cast < u32 > ( Svc : : MemoryState : : Ipc ) | FlagsMisc | FlagCanAlignedDeviceMap | <nl> + FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc , <nl> + <nl> + Stack = static_cast < u32 > ( Svc : : MemoryState : : Stack ) | FlagsMisc | FlagCanAlignedDeviceMap | <nl> + FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc , <nl> + <nl> + ThreadLocal = <nl> + static_cast < u32 > ( Svc : : MemoryState : : ThreadLocal ) | FlagMapped | FlagReferenceCounted , <nl> + <nl> + Transfered = static_cast < u32 > ( Svc : : MemoryState : : Transfered ) | FlagsMisc | <nl> + FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | <nl> + FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc , <nl> + <nl> + SharedTransfered = static_cast < u32 > ( Svc : : MemoryState : : SharedTransfered ) | FlagsMisc | <nl> + FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc , <nl> + <nl> + SharedCode = static_cast < u32 > ( Svc : : MemoryState : : SharedCode ) | FlagMapped | <nl> + FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc , <nl> + <nl> + Inaccessible = static_cast < u32 > ( Svc : : MemoryState : : Inaccessible ) , <nl> + <nl> + NonSecureIpc = static_cast < u32 > ( Svc : : MemoryState : : NonSecureIpc ) | FlagsMisc | <nl> + FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc , <nl> + <nl> + NonDeviceIpc = <nl> + static_cast < u32 > ( Svc : : MemoryState : : NonDeviceIpc ) | FlagsMisc | FlagCanUseNonDeviceIpc , <nl> + <nl> + Kernel = static_cast < u32 > ( Svc : : MemoryState : : Kernel ) | FlagMapped , <nl> + <nl> + GeneratedCode = static_cast < u32 > ( Svc : : MemoryState : : GeneratedCode ) | FlagMapped | <nl> + FlagReferenceCounted | FlagCanDebug , <nl> + CodeOut = static_cast < u32 > ( Svc : : MemoryState : : CodeOut ) | FlagMapped | FlagReferenceCounted , <nl> + } ; <nl> + DECLARE_ENUM_FLAG_OPERATORS ( MemoryState ) ; <nl> + <nl> + static_assert ( static_cast < u32 > ( MemoryState : : Free ) = = 0x00000000 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : Io ) = = 0x00002001 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : Static ) = = 0x00042002 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : Code ) = = 0x00DC7E03 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : CodeData ) = = 0x03FEBD04 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : Normal ) = = 0x037EBD05 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : Shared ) = = 0x00402006 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : AliasCode ) = = 0x00DD7E08 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : AliasCodeData ) = = 0x03FFBD09 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : Ipc ) = = 0x005C3C0A ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : Stack ) = = 0x005C3C0B ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : ThreadLocal ) = = 0x0040200C ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : Transfered ) = = 0x015C3C0D ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : SharedTransfered ) = = 0x005C380E ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : SharedCode ) = = 0x0040380F ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : Inaccessible ) = = 0x00000010 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : NonSecureIpc ) = = 0x005C3811 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : NonDeviceIpc ) = = 0x004C2812 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : Kernel ) = = 0x00002013 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : GeneratedCode ) = = 0x00402214 ) ; <nl> + static_assert ( static_cast < u32 > ( MemoryState : : CodeOut ) = = 0x00402015 ) ; <nl> + <nl> + enum class MemoryPermission : u8 { <nl> + None = 0 , <nl> + Mask = static_cast < u8 > ( ~ None ) , <nl> + <nl> + Read = 1 < < 0 , <nl> + Write = 1 < < 1 , <nl> + Execute = 1 < < 2 , <nl> + <nl> + ReadAndWrite = Read | Write , <nl> + ReadAndExecute = Read | Execute , <nl> + <nl> + UserMask = static_cast < u8 > ( Svc : : MemoryPermission : : Read | Svc : : MemoryPermission : : Write | <nl> + Svc : : MemoryPermission : : Execute ) , <nl> + } ; <nl> + DECLARE_ENUM_FLAG_OPERATORS ( MemoryPermission ) ; <nl> + <nl> + enum class MemoryAttribute : u8 { <nl> + None = 0x00 , <nl> + Mask = 0x7F , <nl> + All = Mask , <nl> + DontCareMask = 0x80 , <nl> + <nl> + Locked = static_cast < u8 > ( Svc : : MemoryAttribute : : Locked ) , <nl> + IpcLocked = static_cast < u8 > ( Svc : : MemoryAttribute : : IpcLocked ) , <nl> + DeviceShared = static_cast < u8 > ( Svc : : MemoryAttribute : : DeviceShared ) , <nl> + Uncached = static_cast < u8 > ( Svc : : MemoryAttribute : : Uncached ) , <nl> + <nl> + IpcAndDeviceMapped = IpcLocked | DeviceShared , <nl> + LockedAndIpcLocked = Locked | IpcLocked , <nl> + DeviceSharedAndUncached = DeviceShared | Uncached <nl> + } ; <nl> + DECLARE_ENUM_FLAG_OPERATORS ( MemoryAttribute ) ; <nl> + <nl> + static_assert ( ( static_cast < u8 > ( MemoryAttribute : : Mask ) & <nl> + static_cast < u8 > ( MemoryAttribute : : DontCareMask ) ) = = 0 ) ; <nl> + <nl> + struct MemoryInfo { <nl> + VAddr addr { } ; <nl> + std : : size_t size { } ; <nl> + MemoryState state { } ; <nl> + MemoryPermission perm { } ; <nl> + MemoryAttribute attribute { } ; <nl> + MemoryPermission original_perm { } ; <nl> + u16 ipc_lock_count { } ; <nl> + u16 device_use_count { } ; <nl> + <nl> + constexpr Svc : : MemoryInfo GetSvcMemoryInfo ( ) const { <nl> + return { <nl> + addr , <nl> + size , <nl> + static_cast < Svc : : MemoryState > ( state & MemoryState : : Mask ) , <nl> + static_cast < Svc : : MemoryAttribute > ( attribute & MemoryAttribute : : Mask ) , <nl> + static_cast < Svc : : MemoryPermission > ( perm & MemoryPermission : : UserMask ) , <nl> + ipc_lock_count , <nl> + device_use_count , <nl> + } ; <nl> + } <nl> + <nl> + constexpr VAddr GetAddress ( ) const { <nl> + return addr ; <nl> + } <nl> + constexpr std : : size_t GetSize ( ) const { <nl> + return size ; <nl> + } <nl> + constexpr std : : size_t GetNumPages ( ) const { <nl> + return GetSize ( ) / PageSize ; <nl> + } <nl> + constexpr VAddr GetEndAddress ( ) const { <nl> + return GetAddress ( ) + GetSize ( ) ; <nl> + } <nl> + constexpr VAddr GetLastAddress ( ) const { <nl> + return GetEndAddress ( ) - 1 ; <nl> + } <nl> + } ; <nl> + <nl> + class MemoryBlock final { <nl> + friend class MemoryBlockManager ; <nl> + <nl> + private : <nl> + VAddr addr { } ; <nl> + std : : size_t num_pages { } ; <nl> + MemoryState state { MemoryState : : None } ; <nl> + u16 ipc_lock_count { } ; <nl> + u16 device_use_count { } ; <nl> + MemoryPermission perm { MemoryPermission : : None } ; <nl> + MemoryPermission original_perm { MemoryPermission : : None } ; <nl> + MemoryAttribute attribute { MemoryAttribute : : None } ; <nl> + <nl> + public : <nl> + static constexpr int Compare ( const MemoryBlock & lhs , const MemoryBlock & rhs ) { <nl> + if ( lhs . GetAddress ( ) < rhs . GetAddress ( ) ) { <nl> + return - 1 ; <nl> + } else if ( lhs . GetAddress ( ) < = rhs . GetLastAddress ( ) ) { <nl> + return 0 ; <nl> + } else { <nl> + return 1 ; <nl> + } <nl> + } <nl> + <nl> + public : <nl> + constexpr MemoryBlock ( ) = default ; <nl> + constexpr MemoryBlock ( VAddr addr , std : : size_t num_pages , MemoryState state , <nl> + MemoryPermission perm , MemoryAttribute attribute ) <nl> + : addr { addr } , num_pages ( num_pages ) , state { state } , perm { perm } , attribute { attribute } { } <nl> + <nl> + constexpr VAddr GetAddress ( ) const { <nl> + return addr ; <nl> + } <nl> + <nl> + constexpr std : : size_t GetNumPages ( ) const { <nl> + return num_pages ; <nl> + } <nl> + <nl> + constexpr std : : size_t GetSize ( ) const { <nl> + return GetNumPages ( ) * PageSize ; <nl> + } <nl> + <nl> + constexpr VAddr GetEndAddress ( ) const { <nl> + return GetAddress ( ) + GetSize ( ) ; <nl> + } <nl> + <nl> + constexpr VAddr GetLastAddress ( ) const { <nl> + return GetEndAddress ( ) - 1 ; <nl> + } <nl> + <nl> + constexpr MemoryInfo GetMemoryInfo ( ) const { <nl> + return { <nl> + GetAddress ( ) , GetSize ( ) , state , perm , <nl> + attribute , original_perm , ipc_lock_count , device_use_count , <nl> + } ; <nl> + } <nl> + <nl> + private : <nl> + constexpr bool HasProperties ( MemoryState s , MemoryPermission p , MemoryAttribute a ) const { <nl> + constexpr MemoryAttribute AttributeIgnoreMask { MemoryAttribute : : DontCareMask | <nl> + MemoryAttribute : : IpcLocked | <nl> + MemoryAttribute : : DeviceShared } ; <nl> + return state = = s & & perm = = p & & <nl> + ( attribute | AttributeIgnoreMask ) = = ( a | AttributeIgnoreMask ) ; <nl> + } <nl> + <nl> + constexpr bool HasSameProperties ( const MemoryBlock & rhs ) const { <nl> + return state = = rhs . state & & perm = = rhs . perm & & original_perm = = rhs . original_perm & & <nl> + attribute = = rhs . attribute & & ipc_lock_count = = rhs . ipc_lock_count & & <nl> + device_use_count = = rhs . device_use_count ; <nl> + } <nl> + <nl> + constexpr bool Contains ( VAddr start ) const { <nl> + return GetAddress ( ) < = start & & start < = GetEndAddress ( ) ; <nl> + } <nl> + <nl> + constexpr void Add ( std : : size_t count ) { <nl> + ASSERT ( count > 0 ) ; <nl> + ASSERT ( GetAddress ( ) + count * PageSize - 1 < GetEndAddress ( ) + count * PageSize - 1 ) ; <nl> + <nl> + num_pages + = count ; <nl> + } <nl> + <nl> + constexpr void Update ( MemoryState new_state , MemoryPermission new_perm , <nl> + MemoryAttribute new_attribute ) { <nl> + ASSERT ( original_perm = = MemoryPermission : : None ) ; <nl> + ASSERT ( ( attribute & MemoryAttribute : : IpcLocked ) = = MemoryAttribute : : None ) ; <nl> + <nl> + state = new_state ; <nl> + perm = new_perm ; <nl> + <nl> + / / TODO ( bunnei ) : Is this right ? <nl> + attribute = static_cast < MemoryAttribute > ( <nl> + new_attribute / * | ( attribute & ( MemoryAttribute : : IpcLocked | MemoryAttribute : : DeviceShared ) ) * / ) ; <nl> + } <nl> + <nl> + constexpr MemoryBlock Split ( VAddr split_addr ) { <nl> + ASSERT ( GetAddress ( ) < split_addr ) ; <nl> + ASSERT ( Contains ( split_addr ) ) ; <nl> + ASSERT ( Common : : IsAligned ( split_addr , PageSize ) ) ; <nl> + <nl> + MemoryBlock block ; <nl> + block . addr = addr ; <nl> + block . num_pages = ( split_addr - GetAddress ( ) ) / PageSize ; <nl> + block . state = state ; <nl> + block . ipc_lock_count = ipc_lock_count ; <nl> + block . device_use_count = device_use_count ; <nl> + block . perm = perm ; <nl> + block . original_perm = original_perm ; <nl> + block . attribute = attribute ; <nl> + <nl> + addr = split_addr ; <nl> + num_pages - = block . num_pages ; <nl> + <nl> + return block ; <nl> + } <nl> + } ; <nl> + static_assert ( std : : is_trivially_destructible < MemoryBlock > : : value ) ; <nl> + <nl> + } / / namespace Kernel : : Memory <nl>
|
kernel : memory : Add MemoryBlock class , for managing memory blocks and their state .
|
yuzu-emu/yuzu
|
c2f4dcb1e3ddca062a51cad11e6314196d9d16bc
|
2020-04-17T04:59:29Z
|
mmm a / src / btree / btree_store . cc <nl> ppp b / src / btree / btree_store . cc <nl> <nl> # include " serializer / config . hpp " <nl> # include " containers / archive / vector_stream . hpp " <nl> # include " concurrency / wait_any . hpp " <nl> + # include " query_measure . hpp " <nl> <nl> template < class protocol_t > <nl> btree_store_t < protocol_t > : : btree_store_t ( serializer_t * serializer , <nl> void btree_store_t < protocol_t > : : write ( <nl> object_buffer_t < fifo_enforcer_sink_t : : exit_write_t > * token , <nl> signal_t * interruptor ) <nl> THROWS_ONLY ( interrupted_exc_t ) { <nl> + TICKVAR ( bw_A ) ; <nl> assert_thread ( ) ; <nl> <nl> - scoped_ptr_t < transaction_t > txn ; <nl> - scoped_ptr_t < real_superblock_t > superblock ; <nl> - const int expected_change_count = 2 ; / / FIXME : this is incorrect , but will do for now <nl> - acquire_superblock_for_write ( rwi_write , timestamp . to_repli_timestamp ( ) , expected_change_count , token , & txn , & superblock , interruptor ) ; <nl> + DTICKVAR ( bw_B ) ; <nl> + DTICKVAR ( bw_C ) ; <nl> + DTICKVAR ( bw_D ) ; <nl> + DTICKVAR ( bw_E ) ; <nl> + { <nl> + scoped_ptr_t < transaction_t > txn ; <nl> + { <nl> + scoped_ptr_t < real_superblock_t > superblock ; <nl> + const int expected_change_count = 2 ; / / FIXME : this is incorrect , but will do for now <nl> + acquire_superblock_for_write ( rwi_write , timestamp . to_repli_timestamp ( ) , expected_change_count , token , & txn , & superblock , interruptor ) ; <nl> <nl> - check_and_update_metainfo ( DEBUG_ONLY ( metainfo_checker , ) new_metainfo , txn . get ( ) , superblock . get ( ) ) ; <nl> + ATICKVAR ( bw_B ) ; <nl> + check_and_update_metainfo ( DEBUG_ONLY ( metainfo_checker , ) new_metainfo , txn . get ( ) , superblock . get ( ) ) ; <nl> <nl> - protocol_write ( write , response , timestamp , btree . get ( ) , txn . get ( ) , superblock . get ( ) ) ; <nl> + ATICKVAR ( bw_C ) ; <nl> + protocol_write ( write , response , timestamp , btree . get ( ) , txn . get ( ) , superblock . get ( ) ) ; <nl> + <nl> + ATICKVAR ( bw_D ) ; <nl> + } <nl> + ATICKVAR ( bw_E ) ; <nl> + } <nl> + TICKVAR ( bw_F ) ; <nl> + logRQM ( " btree_store write ( % p ) bw_A % ld B % ld C % ld D % ld E % ld F \ n " , coro_t : : self ( ) , <nl> + bw_B - bw_A , bw_C - bw_B , bw_D - bw_C , bw_E - bw_D , bw_F - bw_E ) ; <nl> } <nl> <nl> / / TODO : Figure out wtf does the backfill filtering , figure out wtf constricts delete range operations to hit only a certain hash - interval , figure out what filters keys . <nl> mmm a / src / clustering / immediate_consistency / branch / broadcaster . cc <nl> ppp b / src / clustering / immediate_consistency / branch / broadcaster . cc <nl> void listener_writeread ( <nl> signal_t * interruptor ) <nl> THROWS_ONLY ( interrupted_exc_t ) <nl> { <nl> + TICKVAR ( lw_A ) ; <nl> cond_t resp_cond ; <nl> mailbox_t < void ( typename protocol_t : : write_response_t ) > resp_mailbox ( <nl> mailbox_manager , <nl> boost : : bind ( & store_listener_response < typename protocol_t : : write_response_t > , response , _1 , & resp_cond ) , <nl> mailbox_callback_mode_inline ) ; <nl> <nl> + TICKVAR ( lw_B ) ; <nl> send ( mailbox_manager , writeread_mailbox , <nl> w , ts , order_token , token , resp_mailbox . get_address ( ) ) ; <nl> <nl> + TICKVAR ( lw_C ) ; <nl> wait_interruptible ( & resp_cond , interruptor ) ; <nl> + <nl> + TICKVAR ( lw_D ) ; <nl> + / / logRQM ( " listener_writeread lw_A % ld B % ld C % ld D \ n " , <nl> + / / lw_B - lw_A , lw_C - lw_B , lw_D - lw_C ) ; <nl> } <nl> <nl> template < class protocol_t > <nl> void broadcaster_t < protocol_t > : : background_writeread ( dispatchee_t * mirror , auto_ <nl> write_ref . get ( ) - > write , & resp , write_ref . get ( ) - > timestamp , order_token , token , <nl> mirror_lock . get_drain_signal ( ) ) ; <nl> TICKVAR ( bwr_B ) ; <nl> - / / logRQM ( " background_writeread % ld \ n " , bwr_B - bwr_A ) ; <nl> <nl> if ( write_ref . get ( ) - > callback ) { <nl> write_ref . get ( ) - > callback - > on_response ( mirror - > get_peer ( ) , resp ) ; <nl> } <nl> + <nl> + TICKVAR ( bwr_C ) ; <nl> + / / logRQM ( " background_writeread A % ld B % ld C \ n " , bwr_B - bwr_A , bwr_C - bwr_B ) ; <nl> } catch ( interrupted_exc_t ) { <nl> return ; <nl> } <nl> mmm a / src / clustering / immediate_consistency / query / master . cc <nl> ppp b / src / clustering / immediate_consistency / query / master . cc <nl> void master_t < protocol_t > : : client_t : : perform_request ( <nl> broadcaster ' s write queue is gone . * / <nl> wait_interruptible ( & write_callback . done_cond , & parent - > shutdown_cond ) ; <nl> TICKVAR ( mc_H ) ; <nl> - logRQM ( " master client perform_request ( write ) mc_A % ld B % ld C % ld D % ld E % ld F % ld G % ld H \ n " , <nl> - mc_B - mc_A , mc_C - mc_B , mc_D - mc_C , mc_E - mc_D , mc_F - mc_E , mc_G - mc_F , mc_H - mc_G ) ; <nl> + / / logRQM ( " master client perform_request ( write ) mc_A % ld B % ld C % ld D % ld E % ld F % ld G % ld H \ n " , <nl> + / / mc_B - mc_A , mc_C - mc_B , mc_D - mc_C , mc_E - mc_D , mc_F - mc_E , mc_G - mc_F , mc_H - mc_G ) ; <nl> <nl> } else { <nl> unreachable ( ) ; <nl> mmm a / src / protocol_api . hpp <nl> ppp b / src / protocol_api . hpp <nl> <nl> # include " containers / binary_blob . hpp " <nl> # include " containers / scoped . hpp " <nl> # include " containers / object_buffer . hpp " <nl> + # include " query_measure . hpp " <nl> # include " rpc / serialize_macros . hpp " <nl> # include " timestamps . hpp " <nl> <nl> class store_subview_t : public store_view_t < protocol_t > <nl> object_buffer_t < fifo_enforcer_sink_t : : exit_write_t > * token , <nl> signal_t * interruptor ) <nl> THROWS_ONLY ( interrupted_exc_t ) { <nl> + TICKVAR ( svw_A ) ; <nl> home_thread_mixin_t : : assert_thread ( ) ; <nl> rassert ( region_is_superset ( get_region ( ) , metainfo_checker . get_domain ( ) ) ) ; <nl> rassert ( region_is_superset ( get_region ( ) , new_metainfo . get_domain ( ) ) ) ; <nl> <nl> - return store_view - > write ( DEBUG_ONLY ( metainfo_checker , ) new_metainfo , write , response , timestamp , order_token , token , interruptor ) ; <nl> + TICKVAR ( svw_B ) ; <nl> + store_view - > write ( DEBUG_ONLY ( metainfo_checker , ) new_metainfo , write , response , timestamp , order_token , token , interruptor ) ; <nl> + <nl> + TICKVAR ( svw_C ) ; <nl> + logRQM ( " subview write ( % p ) svw_A % ld B % ld C \ n " , coro_t : : self ( ) , <nl> + svw_B - svw_A , svw_C - svw_B ) ; <nl> } <nl> <nl> / / TODO : Make this take protocol_t : : progress_t again ( or maybe a <nl> mmm a / src / rdb_protocol / protocol . cc <nl> ppp b / src / rdb_protocol / protocol . cc <nl> <nl> # include " concurrency / wait_any . hpp " <nl> # include " containers / archive / vector_stream . hpp " <nl> # include " protob / protob . hpp " <nl> + # include " query_measure . hpp " <nl> # include " rdb_protocol / btree . hpp " <nl> # include " rdb_protocol / protocol . hpp " <nl> # include " rdb_protocol / query_language . hpp " <nl> void store_t : : protocol_write ( const write_t & write , <nl> btree_slice_t * btree , <nl> transaction_t * txn , <nl> superblock_t * superblock ) { <nl> + TICKVAR ( pw_A ) ; <nl> write_visitor_t v ( btree , txn , superblock , timestamp . to_repli_timestamp ( ) , ctx , response ) ; <nl> + TICKVAR ( pw_B ) ; <nl> boost : : apply_visitor ( v , write . write ) ; <nl> + TICKVAR ( pw_C ) ; <nl> + logRQM ( " protocol_write A % ld B % ld C \ n " , pw_B - pw_A , pw_C - pw_B ) ; <nl> } <nl> <nl> namespace { <nl> mmm a / src / rdb_protocol / query_language . cc <nl> ppp b / src / rdb_protocol / query_language . cc <nl> void execute_write_query ( WriteQuery * w , runtime_environment_t * env , Response * re <nl> } <nl> res - > add_response ( " { " + res_list + " } " ) ; <nl> TICKVAR ( wt_G ) ; <nl> - logRQM ( " execute_write_query UPDATE wt_A % ld wt_B % ld wt_C % ld wt_D % ld wt_E % ld wt_F % ld wt_G \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C , wt_E - wt_D , wt_F - wt_E , wt_G - wt_F ) ; <nl> + / / logRQM ( " execute_write_query UPDATE wt_A % ld wt_B % ld wt_C % ld wt_D % ld wt_E % ld wt_F % ld wt_G \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C , wt_E - wt_D , wt_F - wt_E , wt_G - wt_F ) ; <nl> } break ; <nl> case WriteQuery : : MUTATE : { <nl> view_t view = eval_term_as_view ( w - > mutable_mutate ( ) - > mutable_view ( ) , env , scopes , backtrace . with ( " view " ) ) ; <nl> void execute_write_query ( WriteQuery * w , runtime_environment_t * env , Response * re <nl> } <nl> res - > add_response ( " { " + res_list + " } " ) ; <nl> TICKVAR ( wt_E ) ; <nl> - logRQM ( " execute_write_query MUTATE wt_A % ld wt_B % ld wt_C % ld wt_D % ld wt_E \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C , wt_E - wt_D ) ; <nl> + / / logRQM ( " execute_write_query MUTATE wt_A % ld wt_B % ld wt_C % ld wt_D % ld wt_E \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C , wt_E - wt_D ) ; <nl> <nl> } break ; <nl> case WriteQuery : : DELETE : { <nl> void execute_write_query ( WriteQuery * w , runtime_environment_t * env , Response * re <nl> TICKVAR ( wt_D ) ; <nl> <nl> res - > add_response ( strprintf ( " { \ " deleted \ " : % d } " , deleted ) ) ; <nl> - logRQM ( " execute_write_query DELETE wt_A % ld wt_B % ld wt_C % ld wt_D \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C ) ; <nl> + / / logRQM ( " execute_write_query DELETE wt_A % ld wt_B % ld wt_C % ld wt_D \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C ) ; <nl> } break ; <nl> case WriteQuery : : INSERT : { <nl> std : : string pk = get_primary_key ( w - > mutable_insert ( ) - > mutable_table_ref ( ) , env , backtrace ) ; <nl> void execute_write_query ( WriteQuery * w , runtime_environment_t * env , Response * re <nl> } <nl> TICKVAR ( wt_D ) ; <nl> res - > add_response ( lhs . Print ( ) ) ; <nl> - logRQM ( " execute_write_query FOREACH wt_A % ld wt_B % ld wt_C % ld wt_D \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C ) ; <nl> + / / logRQM ( " execute_write_query FOREACH wt_A % ld wt_B % ld wt_C % ld wt_D \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C ) ; <nl> <nl> } break ; <nl> case WriteQuery : : POINTUPDATE : { <nl> void execute_write_query ( WriteQuery * w , runtime_environment_t * env , Response * re <nl> res - > add_response ( strprintf ( " { \ " updated \ " : % d , \ " skipped \ " : % d , \ " errors \ " : % d } " , <nl> mres = = point_modify : : MODIFIED , mres = = point_modify : : SKIPPED , 0 ) ) ; <nl> TICKVAR ( wt_G ) ; <nl> - logRQM ( " execute_write_query POINTUPDATE wt_A % ld wt_B % ld wt_C % ld wt_D % ld wt_E % ld wt_F % ld wt_G \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C , wt_E - wt_D , wt_F - wt_E , wt_G - wt_F ) ; <nl> + / / logRQM ( " execute_write_query POINTUPDATE wt_A % ld wt_B % ld wt_C % ld wt_D % ld wt_E % ld wt_F % ld wt_G \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C , wt_E - wt_D , wt_F - wt_E , wt_G - wt_F ) ; <nl> <nl> } break ; <nl> case WriteQuery : : POINTDELETE : { / / TODO : enforce primary key <nl> void execute_write_query ( WriteQuery * w , runtime_environment_t * env , Response * re <nl> <nl> res - > add_response ( strprintf ( " { \ " deleted \ " : % d } " , deleted ) ) ; <nl> TICKVAR ( wt_G ) ; <nl> - logRQM ( " execute_write_query POINTDELETE wt_A % ld wt_B % ld wt_C % ld wt_D % ld wt_E % ld wt_F % ld wt_G \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C , wt_E - wt_D , wt_F - wt_E , wt_G - wt_F ) ; <nl> + / / logRQM ( " execute_write_query POINTDELETE wt_A % ld wt_B % ld wt_C % ld wt_D % ld wt_E % ld wt_F % ld wt_G \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C , wt_E - wt_D , wt_F - wt_E , wt_G - wt_F ) ; <nl> <nl> } break ; <nl> case WriteQuery : : POINTMUTATE : { <nl> void execute_write_query ( WriteQuery * w , runtime_environment_t * env , Response * re <nl> res - > add_response ( strprintf ( " { \ " modified \ " : % d , \ " inserted \ " : % d , \ " deleted \ " : % d , \ " errors \ " : % d } " , <nl> mres = = point_modify : : MODIFIED , mres = = point_modify : : INSERTED , mres = = point_modify : : DELETED , 0 ) ) ; <nl> TICKVAR ( wt_G ) ; <nl> - logRQM ( " execute_write_query POINTMUTATE wt_A % ld wt_B % ld wt_C % ld wt_D % ld wt_E % ld wt_F % ld wt_G \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C , wt_E - wt_D , wt_F - wt_E , wt_G - wt_F ) ; <nl> + / / logRQM ( " execute_write_query POINTMUTATE wt_A % ld wt_B % ld wt_C % ld wt_D % ld wt_E % ld wt_F % ld wt_G \ n " , wt_B - wt_A , wt_C - wt_B , wt_D - wt_C , wt_E - wt_D , wt_F - wt_E , wt_G - wt_F ) ; <nl> } break ; <nl> default : <nl> unreachable ( ) ; <nl>
|
Littered the codebase with more ugly query_measure measurement lines .
|
rethinkdb/rethinkdb
|
305281a83ee9f1431a8915799bae5fcd0163892c
|
2012-10-15T11:55:40Z
|
new file mode 100644 <nl> index 000000000000 . . 8b844b166855 <nl> mmm / dev / null <nl> ppp b / utils / protocol_graph . py <nl> <nl> + # = = = mmm protocol_graph . py mmmmmmmmmmmmmmmmmmmmmmmm - * - coding : utf - 8 - * mmm - = = = # <nl> + # <nl> + # This source file is part of the Swift . org open source project <nl> + # <nl> + # Copyright ( c ) 2014 - 2015 Apple Inc . and the Swift project authors <nl> + # Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + # <nl> + # See http : / / swift . org / LICENSE . txt for license information <nl> + # See http : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + # <nl> + # = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = # <nl> + # <nl> + # Create a graph of the protocol refinement relationships , associated <nl> + # types , operator requirements , and defaulted generic operators . <nl> + # <nl> + # run as follows to view the Nth - largest connected component in a web browser : <nl> + # <nl> + # N = 0 & & rm - f / tmp / protocols . dot & & \ <nl> + # python protocol_graph . py stdlib . swift > / tmp / p0 . dot & & \ <nl> + # ( ccomps - zX # $ N - o / tmp / protocols . dot / tmp / p0 . dot | | true ) \ <nl> + # & & dot - Tsvg / tmp / protocols . dot > / tmp / protocols . svg \ <nl> + # & & open / tmp / protocols . svg <nl> + # = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = # <nl> + <nl> + import re <nl> + import sys <nl> + import os <nl> + import cgi <nl> + <nl> + # Open ' stdlib . swift ' in this directory if no path specified . <nl> + args = list ( sys . argv ) + [ os . path . join ( os . path . dirname ( __file__ ) , ' stdlib . swift ' ) ] <nl> + <nl> + reFlags = re . MULTILINE | re . VERBOSE <nl> + <nl> + # Pattern to recognize stdlib identifiers ( FIXME : doesn ' t handle Unicode ) . <nl> + identifier = ' [ A - Za - z_ ] [ A - Za - z0 - 9_ ] * ' <nl> + <nl> + # Pattern to recognize a ( possibly - generic ) operator decl . <nl> + operator = r ' ' ' <nl> + ( ? : ( ? : prefix | postfix ) . * ) ? func \ s * <nl> + ( ? = \ S ) [ ^ A - Za - z_ ] # non - space , non - identifier : begins an operator name <nl> + ( ? : ( ? = \ S ) [ ^ ( ) { ] ) * # rest of operator name <nl> + \ s * <nl> + ( < [ ^ > { ] + > ) ? # generic parameter list <nl> + \ s * <nl> + \ ( [ ^ ) ] * \ ) # function parameter list <nl> + ' ' ' <nl> + <nl> + # substitute local variables into the string <nl> + def interpolate ( string ) : <nl> + import inspect <nl> + frame = inspect . currentframe ( ) <nl> + return string % frame . f_back . f_locals <nl> + <nl> + # Given the bodyText of a protocol definition , return a list of <nl> + # associated type and operator requirements . <nl> + def bodyLines ( bodyText ) : <nl> + return [ <nl> + cgi . escape ( b . group ( 0 ) ) for b in <nl> + re . finditer ( <nl> + r ' ( typealias \ s * ' + identifier + r ' ( \ s * [ : , ] \ s * ' + identifier + ' ) | ' + operator + ' . * ) ' , <nl> + bodyText , reFlags ) <nl> + ] <nl> + <nl> + # Mapping from protocol to associated type / operator requirements <nl> + body = { } <nl> + <nl> + # Mapping from a parent protocol to set of children . <nl> + graph = { } <nl> + <nl> + # Mapping from protocol to generic operators taking instances as arguments <nl> + genericOperators = { } <nl> + <nl> + comments = r ' / / . * | / [ * ] ( . | \ n ) * ? [ * ] / ' # FIXME : doesn ' t respect strings or comment nesting ) <nl> + <nl> + # read source , stripping all comments <nl> + sourceSansComments = re . sub ( comments , ' ' , open ( args [ 1 ] ) . read ( ) , flags = reFlags ) <nl> + <nl> + genericParameterConstraint = interpolate ( r ' ( % ( identifier ) s ) \ s * : \ s * ( % ( identifier ) s ) ' ) <nl> + <nl> + def parseGenericOperator ( m ) : <nl> + genericParams = m . group ( 5 ) <nl> + genericOperator = cgi . escape ( m . group ( 0 ) . strip ( ) ) <nl> + functionParamStart = m . end ( 5 ) - m . start ( 0 ) <nl> + functionParams = genericOperator [ functionParamStart : ] <nl> + for m2 in re . finditer ( genericParameterConstraint , genericParams , reFlags ) : <nl> + typeParameter = m2 . group ( 1 ) <nl> + protocol = m2 . group ( 2 ) <nl> + <nl> + # we ' re only interested if we can find a function parameter of that type <nl> + if not re . search ( r ' : \ s * % s \ s * [ , ) ] ' % typeParameter , functionParams ) : continue <nl> + <nl> + # Make some replacements in the signature to limit the graph size <nl> + letterTau = ' & # x3c4 ; ' <nl> + letterPi = ' & # x3c0 ; ' <nl> + abbreviatedSignature = re . sub ( <nl> + r ' \ b % s \ b ' % protocol , letterPi , <nl> + re . sub ( r ' \ b % s \ b ' % typeParameter , letterTau , genericOperator ) ) <nl> + <nl> + genericOperators . setdefault ( protocol , set ( ) ) . add ( abbreviatedSignature ) <nl> + <nl> + def parseProtocol ( m ) : <nl> + child = m . group ( 1 ) <nl> + # skip irrelevant protocols <nl> + if re . match ( r ' _Builtin . * Convertible ' , child ) : return <nl> + graph . setdefault ( child , set ( ) ) <nl> + body [ child ] = bodyLines ( m . group ( 3 ) ) <nl> + if m . group ( 2 ) : <nl> + for parent in m . group ( 2 ) . strip ( ) . split ( " , " ) : <nl> + if re . match ( r ' _Builtin . * Convertible ' , parent ) : return <nl> + graph . setdefault ( parent . strip ( ) , set ( ) ) . add ( child ) <nl> + <nl> + protocolsAndOperators = interpolate ( r ' ' ' <nl> + \ bprotocol \ s + ( % ( identifier ) s ) \ s * <nl> + ( ? : : \ s * ( [ ^ { ] + ) ) ? # refinements <nl> + { ( [ ^ { } \ n ] * ( . * \ n ) * ? ) } # body <nl> + | <nl> + % ( operator ) s [ ^ { ] * ( ? = { ) # operator definition up to the open brace <nl> + ' ' ' ) <nl> + <nl> + # Main parsing loop <nl> + for m in re . finditer ( protocolsAndOperators , sourceSansComments , reFlags ) : <nl> + if m . group ( 1 ) : parseProtocol ( m ) <nl> + elif m . group ( 5 ) : parseGenericOperator ( m ) <nl> + # otherwise we matched some non - generic operator <nl> + <nl> + # Find clusters of protocols that have the same name when underscores <nl> + # are stripped <nl> + clusterBuilder = { } # map from potential cluster name to nodes in the cluster <nl> + for n in graph : <nl> + clusterBuilder . setdefault ( n . translate ( None , ' _ ' ) , set ( ) ) . add ( n ) <nl> + <nl> + # Grab the clusters with more than one member . <nl> + clusters = dict ( ( c , nodes ) for ( c , nodes ) in clusterBuilder . items ( ) if len ( nodes ) > 1 ) <nl> + <nl> + # A set of all intra - cluster edges <nl> + clusterEdges = set ( <nl> + ( s , t ) for ( c , elements ) in clusters . items ( ) <nl> + for s in elements <nl> + for t in graph [ s ] if t in elements ) <nl> + <nl> + print ' digraph ProtocolHierarchies { ' <nl> + print ' mclimit = 100 ; ranksep = 1 . 5 ; ' # ; packmode = " array1 " <nl> + print ' edge [ dir = " back " ] ; ' <nl> + print ' node [ shape = box , fontname = Helvetica , fontsize = 10 ] ; ' <nl> + <nl> + for c in sorted ( clusters ) : <nl> + print ' subgraph " cluster_ % s " { ' % c <nl> + for ( s , t ) in sorted ( clusterEdges ) : <nl> + if s in clusters [ c ] : <nl> + print ' % s - > % s [ weight = 100 ] ; ' % ( s , t ) <nl> + print ' } ' <nl> + <nl> + for node in sorted ( graph . keys ( ) ) : <nl> + requirements = body . get ( node , [ ] ) <nl> + generics = sorted ( genericOperators . get ( node , set ( ) ) ) <nl> + style = ' solid ' if node . startswith ( ' _ ' ) else ' bold ' <nl> + divider = ' < HR / > \ n ' if len ( requirements ) ! = 0 and len ( generics ) ! = 0 else ' ' <nl> + <nl> + label = node if len ( requirements + generics ) = = 0 else ( <nl> + ' \ n < TABLE BORDER = " 0 " > \ n < TR > < TD > \ n % s \ n < / TD > < / TR > < HR / > \ n % s % s % s < / TABLE > \ n ' % ( <nl> + node , <nl> + ' \ n ' . join ( ' < TR > < TD > % s < / TD > < / TR > ' % r for r in requirements ) , <nl> + divider , <nl> + ' \ n ' . join ( ' < TR > < TD > % s < / TD > < / TR > ' % g for g in generics ) ) ) <nl> + <nl> + print interpolate ( ' % ( node ) s [ style = % ( style ) s , label = < % ( label ) s > ] ' ) <nl> + <nl> + for ( parent , children ) in sorted ( graph . items ( ) ) : <nl> + print ' % s - > { ' % parent , <nl> + print ' ; ' . join ( <nl> + sorted ( child for child in children if not ( parent , child ) in clusterEdges ) ) , <nl> + print ' } ' <nl> + <nl> + print ' } ' <nl>
|
protocol graph dumping
|
apple/swift
|
82849e5d60aca59b691bf0491072a8445dff7532
|
2015-04-06T15:40:44Z
|
mmm a / cocos2dx / base_nodes / CCNode . cpp <nl> ppp b / cocos2dx / base_nodes / CCNode . cpp <nl> CCNode : : CCNode ( void ) <nl> , m_uOrderOfArrival ( 0 ) <nl> , m_eGLServerState ( ccGLServerState ( 0 ) ) <nl> , m_bReorderChildDirty ( false ) <nl> + , m_sAdditionalTransform ( CCAffineTransformMakeIdentity ( ) ) <nl> + , m_bAdditionalTransformDirty ( false ) <nl> { <nl> / / set default scheduler and actionManager <nl> CCDirector * director = CCDirector : : sharedDirector ( ) ; <nl> CCAffineTransform CCNode : : nodeToParentTransform ( void ) <nl> m_sTransform = CCAffineTransformTranslate ( m_sTransform , - m_obAnchorPointInPoints . x , - m_obAnchorPointInPoints . y ) ; <nl> } <nl> } <nl> + <nl> + if ( m_bAdditionalTransformDirty ) <nl> + { <nl> + m_sTransform = CCAffineTransformConcat ( m_sTransform , m_sAdditionalTransform ) ; <nl> + m_bAdditionalTransformDirty = false ; <nl> + } <nl> <nl> m_bTransformDirty = false ; <nl> } <nl> CCAffineTransform CCNode : : nodeToParentTransform ( void ) <nl> return m_sTransform ; <nl> } <nl> <nl> + void CCNode : : setAdditionalTransform ( const CCAffineTransform & additionalTransform ) <nl> + { <nl> + m_sAdditionalTransform = additionalTransform ; <nl> + m_bTransformDirty = true ; <nl> + m_bAdditionalTransformDirty = true ; <nl> + } <nl> + <nl> CCAffineTransform CCNode : : parentToNodeTransform ( void ) <nl> { <nl> if ( m_bInverseDirty ) { <nl> mmm a / cocos2dx / base_nodes / CCNode . h <nl> ppp b / cocos2dx / base_nodes / CCNode . h <nl> class CC_DLL CCNode : public CCObject <nl> * / <nl> CCPoint convertTouchToNodeSpaceAR ( CCTouch * touch ) ; <nl> <nl> + / * * <nl> + * Sets the additional transform . <nl> + * <nl> + * @ note The additional transform will be concatenated at the end of nodeToParentTransform . <nl> + * It could be used to simulate ` parent - child ` relationship between two nodes ( e . g . one is in BatchNode , another isn ' t ) . <nl> + * @ code <nl> + / / create a batchNode <nl> + CCSpriteBatchNode * batch = CCSpriteBatchNode : : create ( " Icon - 114 . png " ) ; <nl> + this - > addChild ( batch ) ; <nl> + <nl> + / / create two sprites , spriteA will be added to batchNode , they are using different textures . <nl> + CCSprite * spriteA = CCSprite : : createWithTexture ( batch - > getTexture ( ) ) ; <nl> + CCSprite * spriteB = CCSprite : : create ( " Icon - 72 . png " ) ; <nl> + <nl> + batch - > addChild ( spriteA ) ; <nl> + <nl> + / / We can ' t make spriteB as spriteA ' s child since they use different textures . So just add it to layer . <nl> + / / But we want to simulate ` parent - child ` relationship for these two node . <nl> + this - > addChild ( spriteB ) ; <nl> + <nl> + / / position <nl> + spriteA - > setPosition ( ccp ( 200 , 200 ) ) ; <nl> + <nl> + / / Gets the spriteA ' s transform . <nl> + CCAffineTransform t = spriteA - > nodeToParentTransform ( ) ; <nl> + <nl> + / / Sets the additional transform to spriteB , spriteB ' s postion will based on its pseudo parent i . e . spriteA . <nl> + spriteB - > setAdditionalTransform ( t ) ; <nl> + <nl> + / / scale <nl> + spriteA - > setScale ( 2 ) ; <nl> + <nl> + / / Gets the spriteA ' s transform . <nl> + t = spriteA - > nodeToParentTransform ( ) ; <nl> + <nl> + / / Sets the additional transform to spriteB , spriteB ' s scale will based on its pseudo parent i . e . spriteA . <nl> + spriteB - > setAdditionalTransform ( t ) ; <nl> + <nl> + / / rotation <nl> + spriteA - > setRotation ( 20 ) ; <nl> + <nl> + / / Gets the spriteA ' s transform . <nl> + t = spriteA - > nodeToParentTransform ( ) ; <nl> + <nl> + / / Sets the additional transform to spriteB , spriteB ' s rotation will based on its pseudo parent i . e . spriteA . <nl> + spriteB - > setAdditionalTransform ( t ) ; <nl> + * @ endcode <nl> + * / <nl> + void setAdditionalTransform ( const CCAffineTransform & additionalTransform ) ; <nl> / / / @ } end of Coordinate Converters <nl> <nl> private : <nl> class CC_DLL CCNode : public CCObject <nl> <nl> CCSize m_obContentSize ; / / / < untransformed size of the node <nl> <nl> + <nl> + CCAffineTransform m_sAdditionalTransform ; / / / < transform <nl> CCAffineTransform m_sTransform ; / / / < transform <nl> CCAffineTransform m_sInverse ; / / / < transform <nl> <nl> class CC_DLL CCNode : public CCObject <nl> <nl> bool m_bTransformDirty ; / / / < transform dirty flag <nl> bool m_bInverseDirty ; / / / < transform dirty flag <nl> - <nl> + bool m_bAdditionalTransformDirty ; / / / < The flag to check whether the additional transform is dirty <nl> bool m_bVisible ; / / / < is this node visible <nl> <nl> bool m_bIgnoreAnchorPointForPosition ; / / / < true if the Anchor Point will be ( 0 , 0 ) when you position the CCNode , false otherwise . <nl>
|
Merge pull request from dumganhar / jianghua - patch
|
cocos2d/cocos2d-x
|
1d8a49ad9069684572e459145bcaee9830148ef3
|
2013-02-22T08:18:02Z
|
mmm a / stdlib / public / SwiftShims / LibcShims . h <nl> ppp b / stdlib / public / SwiftShims / LibcShims . h <nl> double _swift_stdlib_squareRoot ( double _self ) { <nl> <nl> / / TLS - thread local storage <nl> <nl> - / / FIXME : multi - platform <nl> + # if defined ( __linux__ ) <nl> + typedef unsigned int __swift_pthread_key_t ; <nl> + # else <nl> typedef unsigned long __swift_pthread_key_t ; <nl> + # endif <nl> <nl> SWIFT_RUNTIME_STDLIB_INTERFACE <nl> int _swift_stdlib_pthread_key_create ( <nl>
|
[ stdlib ] Linux definition of pthread_key_t
|
apple/swift
|
b9da4e478ab08fc471bafaf529eee9b006e59186
|
2017-05-17T03:29:21Z
|
mmm a / docs / _static / logo . svg <nl> ppp b / docs / _static / logo . svg <nl> @ @ - 1 + 1 @ @ <nl> - / Users / blinkov / ClickHouse / website / logo . svg <nl> \ No newline at end of file <nl> + . . / . . / website / logo . svg <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . 0decfc367b1 <nl> mmm / dev / null <nl> ppp b / docs / _templates / layout . html <nl> <nl> + { % - extends " basic / layout . html " % } <nl> + <nl> + { % - block extrahead % } <nl> + { { super ( ) } } <nl> + < link rel = " stylesheet " href = " { { pathto ( ' _static / custom . css ' , 1 ) } } " type = " text / css " / > <nl> + { % if theme_touch_icon % } <nl> + < link rel = " apple - touch - icon " href = " { { pathto ( ' _static / ' ~ theme_touch_icon , 1 ) } } " / > <nl> + { % endif % } <nl> + { % if theme_canonical_url % } <nl> + < link rel = " canonical " href = " { { theme_canonical_url } } { { pagename } } . html " / > <nl> + { % endif % } <nl> + < meta name = " viewport " content = " width = device - width , initial - scale = 0 . 9 , maximum - scale = 0 . 9 " / > <nl> + { % endblock % } <nl> + <nl> + { # Disable base theme ' s top + bottom related navs ; we have our own in sidebar # } <nl> + { % - block relbar1 % } { % endblock % } <nl> + { % - block relbar2 % } { % endblock % } <nl> + <nl> + { # Nav should appear before content , not after # } <nl> + { % - block content % } <nl> + { % - if theme_fixed_sidebar | lower = = ' true ' % } <nl> + < div class = " document " > <nl> + { { sidebar ( ) } } <nl> + { % - block document % } <nl> + < div class = " documentwrapper " > <nl> + { % - if render_sidebar % } <nl> + < div class = " bodywrapper " > <nl> + { % - endif % } <nl> + < div class = " body " role = " main " > <nl> + { % block body % } { % endblock % } <nl> + < / div > <nl> + { % - if render_sidebar % } <nl> + < / div > <nl> + { % - endif % } <nl> + < / div > <nl> + { % - endblock % } <nl> + < div class = " clearer " > < / div > <nl> + < / div > <nl> + { % - else % } <nl> + { { super ( ) } } <nl> + { % - endif % } <nl> + { % - endblock % } <nl> + <nl> + { % - block footer % } <nl> + < div class = " footer " > <nl> + { % if show_copyright % } & copy ; { { copyright } } . { % endif % } <nl> + { % if theme_show_powered_by | lower = = ' true ' % } <nl> + { % if show_copyright % } | { % endif % } <nl> + Powered by < a href = " http : / / sphinx - doc . org / " > Sphinx { { sphinx_version } } < / a > <nl> + & amp ; < a href = " https : / / github . com / bitprophet / alabaster " > Alabaster { { alabaster_version } } < / a > <nl> + { % endif % } <nl> + { % - if show_source and has_source and sourcename % } <nl> + { % if show_copyright or theme_show_powered_by % } | { % endif % } <nl> + < a href = " { { pathto ( ' _sources / ' + sourcename , true ) | e } } " <nl> + rel = " nofollow " > { { _ ( ' Page source ' ) } } < / a > <nl> + { % - endif % } <nl> + < / div > <nl> + <nl> + { % if theme_github_banner | lower ! = ' false ' % } <nl> + < a href = " https : / / github . com / { { theme_github_user } } / { { theme_github_repo } } " class = " github " > <nl> + < img style = " position : absolute ; top : 0 ; right : 0 ; border : 0 ; " src = " { { pathto ( ' _static / ' ~ theme_github_banner , 1 ) if theme_github_banner | lower ! = ' true ' else ' https : / / s3 . amazonaws . com / github / ribbons / forkme_right_darkblue_121621 . png ' } } " alt = " Fork me on GitHub " class = " github " / > <nl> + < / a > <nl> + { % endif % } <nl> + <nl> + < ! - - Yandex . Metrika counter - - > <nl> + < script type = " text / javascript " > <nl> + ( function ( d , w , c ) { <nl> + ( w [ c ] = w [ c ] | | [ ] ) . push ( function ( ) { <nl> + try { <nl> + w . yaCounter18343495 = new Ya . Metrika2 ( { <nl> + id : 18343495 , <nl> + clickmap : true , <nl> + trackLinks : true , <nl> + accurateTrackBounce : true , <nl> + webvisor : true <nl> + } ) ; <nl> + } catch ( e ) { } <nl> + } ) ; <nl> + <nl> + var n = d . getElementsByTagName ( " script " ) [ 0 ] , <nl> + s = d . createElement ( " script " ) , <nl> + f = function ( ) { n . parentNode . insertBefore ( s , n ) ; } ; <nl> + s . type = " text / javascript " ; <nl> + s . async = true ; <nl> + s . src = " https : / / mc . yandex . ru / metrika / tag . js " ; <nl> + <nl> + if ( w . opera = = " [ object Opera ] " ) { <nl> + d . addEventListener ( " DOMContentLoaded " , f , false ) ; <nl> + } else { f ( ) ; } <nl> + } ) ( document , window , " yandex_metrika_callbacks2 " ) ; <nl> + < / script > <nl> + < noscript > <nl> + < div > < img src = " https : / / mc . yandex . ru / watch / 18343495 " style = " position : absolute ; left : - 9999px ; " alt = " " / > < / div > <nl> + < / noscript > <nl> + < ! - - / Yandex . Metrika counter - - > <nl> + <nl> + { % - endblock % } <nl>
|
lost files
|
ClickHouse/ClickHouse
|
5e572d1f43dffaf79b317bbb5a2fdf39600adfaf
|
2017-05-31T16:55:09Z
|
mmm a / source / common / common / thread . cc <nl> ppp b / source / common / common / thread . cc <nl> <nl> # include " assert . h " <nl> # include " thread . h " <nl> <nl> + # include < sys / syscall . h > <nl> + <nl> namespace Thread { <nl> <nl> Thread : : Thread ( std : : function < void ( ) > thread_routine ) : thread_routine_ ( thread_routine ) { <nl> Thread : : Thread ( std : : function < void ( ) > thread_routine ) : thread_routine_ ( thread_ro <nl> UNREFERENCED_PARAMETER ( rc ) ; <nl> } <nl> <nl> + int32_t Thread : : currentThreadId ( ) { return syscall ( SYS_gettid ) ; } <nl> + <nl> void Thread : : join ( ) { <nl> int rc = pthread_join ( thread_id_ , nullptr ) ; <nl> RELEASE_ASSERT ( rc = = 0 ) ; <nl> mmm a / source / common / common / thread . h <nl> ppp b / source / common / common / thread . h <nl> class Thread { <nl> public : <nl> Thread ( std : : function < void ( ) > thread_routine ) ; <nl> <nl> + / * * <nl> + * Get current thread id . <nl> + * / <nl> + static int32_t currentThreadId ( ) ; <nl> + <nl> / * * <nl> * Join on thread exit . <nl> * / <nl> class ConditionalInitializer { <nl> } ; <nl> <nl> / * * <nl> - * Impementation of BasicLockable <nl> + * Implementation of BasicLockable <nl> * / <nl> class MutexBasicLockable : public BasicLockable { <nl> public : <nl> mmm a / source / common / common / utility . h <nl> ppp b / source / common / common / utility . h <nl> <nl> <nl> # include " envoy / common / time . h " <nl> <nl> - # include < sys / syscall . h > <nl> - <nl> / * * <nl> * Utility class for formatting dates given a strftime style format string . <nl> * / <nl> class StringUtil { <nl> static const std : : string & valueOrDefault ( const std : : string & input , <nl> const std : : string & default_value ) ; <nl> } ; <nl> - <nl> - / * * <nl> - * Utility class for thread specific operations . <nl> - * / <nl> - class ThreadUtil { <nl> - public : <nl> - static int32_t currentThreadId ( ) { return syscall ( SYS_gettid ) ; } <nl> - } ; <nl> mmm a / source / common / runtime / runtime_impl . h <nl> ppp b / source / common / runtime / runtime_impl . h <nl> <nl> <nl> # include " common / common / empty_string . h " <nl> # include " common / common / logger . h " <nl> - # include " common / common / utility . h " <nl> + # include " common / common / thread . h " <nl> <nl> # include < dirent . h > <nl> <nl> class RandomGeneratorImpl : public RandomGenerator { <nl> static std : : ranlux48 & threadLocalGenerator ( ) { <nl> std : : chrono : : nanoseconds now = std : : chrono : : duration_cast < std : : chrono : : nanoseconds > ( <nl> std : : chrono : : system_clock : : now ( ) . time_since_epoch ( ) ) ; <nl> - static thread_local std : : ranlux48 generator ( now . count ( ) ^ ThreadUtil : : currentThreadId ( ) ) ; <nl> + static thread_local std : : ranlux48 generator ( now . count ( ) ^ Thread : : Thread : : currentThreadId ( ) ) ; <nl> <nl> return generator ; <nl> } <nl>
|
Refactor thread utils ( )
|
envoyproxy/envoy
|
eb21f2ec26e1e89151995b608c57ea7eea45b9ca
|
2016-10-22T04:27:25Z
|
mmm a / aten / src / ATen / native / Unique . cpp <nl> ppp b / aten / src / ATen / native / Unique . cpp <nl> std : : tuple < Tensor , Tensor > _unique_dim_cpu_template ( <nl> } / / namespace <nl> <nl> std : : tuple < Tensor , Tensor > <nl> - _unique_cpu ( const Tensor & self , const bool sorted , const bool return_inverse , optional < int64_t > dim ) { <nl> - if ( dim ) { <nl> - return AT_DISPATCH_ALL_TYPES ( self . type ( ) , " unique " , [ & ] { <nl> - / / The current implementation using ` dim ` always sorts due to unhashable tensors <nl> - return _unique_dim_cpu_template < scalar_t > ( self , dim . value ( ) , return_inverse ) ; <nl> - } ) ; <nl> - } <nl> + _unique_cpu ( const Tensor & self , const bool sorted , const bool return_inverse ) { <nl> return AT_DISPATCH_ALL_TYPES ( self . type ( ) , " unique " , [ & ] { <nl> return _unique_cpu_template < scalar_t > ( self , sorted , return_inverse ) ; <nl> } ) ; <nl> } <nl> <nl> - std : : tuple < Tensor , Tensor > unique_dim ( const Tensor & self , int64_t dim , const bool sorted , const bool return_inverse ) { <nl> - return at : : unique ( self , sorted , return_inverse , dim ) ; <nl> - } <nl> - <nl> - std : : tuple < Tensor , Tensor > _unique ( const Tensor & self , const bool sorted , const bool return_inverse ) { <nl> - return at : : unique ( self , sorted , return_inverse ) ; <nl> - } <nl> - <nl> - std : : tuple < Tensor , Tensor > _unique_dim ( const Tensor & self , int64_t dim , const bool sorted , const bool return_inverse ) { <nl> - return at : : unique ( self , sorted , return_inverse , dim ) ; <nl> + std : : tuple < Tensor , Tensor > <nl> + _unique_dim_cpu ( const Tensor & self , const int64_t dim , const bool sorted , const bool return_inverse ) { <nl> + return AT_DISPATCH_ALL_TYPES ( self . type ( ) , " unique_dim " , [ & ] { <nl> + / / The current implementation using ` dim ` always sorts due to unhashable tensors <nl> + return _unique_dim_cpu_template < scalar_t > ( self , dim , return_inverse ) ; <nl> + } ) ; <nl> } <nl> <nl> } / / namespace native <nl> mmm a / aten / src / ATen / native / cuda / Unique . cu <nl> ppp b / aten / src / ATen / native / cuda / Unique . cu <nl> template < typename scalar_t > <nl> } / / namespace <nl> <nl> std : : tuple < Tensor , Tensor > <nl> - _unique_cuda ( const Tensor & self , const bool sorted , const bool return_inverse , optional < int64_t > dim ) { <nl> - if ( dim ) { <nl> - return AT_DISPATCH_ALL_TYPES ( self . type ( ) , " unique " , [ & ] { <nl> - return _unique_dim_cuda_template < scalar_t > ( self , dim . value ( ) , return_inverse ) ; <nl> - } ) ; <nl> - } <nl> + _unique_cuda ( const Tensor & self , const bool sorted , const bool return_inverse ) { <nl> return AT_DISPATCH_ALL_TYPES ( self . type ( ) , " unique " , [ & ] { <nl> / / The current CUDA implementation of unique always sort due to the <nl> / / lack of hashtable implementation in thrust <nl> _unique_cuda ( const Tensor & self , const bool sorted , const bool return_inverse , o <nl> } ) ; <nl> } <nl> <nl> + std : : tuple < Tensor , Tensor > <nl> + _unique_dim_cuda ( const Tensor & self , const int64_t dim , const bool sorted , const bool return_inverse ) { <nl> + return AT_DISPATCH_ALL_TYPES ( self . type ( ) , " unique_dim " , [ & ] { <nl> + return _unique_dim_cuda_template < scalar_t > ( self , dim , return_inverse ) ; <nl> + } ) ; <nl> + } <nl> + <nl> } / / namespace native <nl> } / / namespace at <nl> mmm a / aten / src / ATen / native / native_functions . yaml <nl> ppp b / aten / src / ATen / native / native_functions . yaml <nl> <nl> matches_jit_signature : True <nl> variants : method <nl> <nl> - - func : unique ( Tensor self , bool sorted = true , bool return_inverse = false , int64_t ? dim = None ) - > ( Tensor , Tensor ) <nl> + - func : _unique ( Tensor self , bool sorted = true , bool return_inverse = false ) - > ( Tensor , Tensor ) <nl> variants : function <nl> dispatch : <nl> CPU : _unique_cpu <nl> CUDA : _unique_cuda <nl> <nl> - # unique_dim is not exposed to python , special cased in gen_python_functions . py <nl> - - func : unique_dim ( Tensor self , int64_t dim , bool sorted = true , bool return_inverse = false ) - > ( Tensor , Tensor ) <nl> - variants : function <nl> - <nl> - # FIXME : for back compatibility reason , _unique and _unique_dim is still there <nl> - # it just calls unique . These two functions should be deleted in the future <nl> - - func : _unique ( Tensor self , bool sorted = true , bool return_inverse = false ) - > ( Tensor , Tensor ) <nl> - variants : function <nl> - <nl> - func : _unique_dim ( Tensor self , int64_t dim , bool sorted = true , bool return_inverse = false ) - > ( Tensor , Tensor ) <nl> variants : function <nl> + dispatch : <nl> + CPU : _unique_dim_cpu <nl> + CUDA : _unique_dim_cuda <nl> <nl> - func : _unsafe_view ( Tensor self , IntList size ) - > Tensor <nl> <nl> mmm a / aten / src / ATen / native / sparse / SparseTensor . cpp <nl> ppp b / aten / src / ATen / native / sparse / SparseTensor . cpp <nl> SparseTensor dense_to_sparse ( const Tensor & self ) { <nl> SparseTensor dense_to_sparse ( const Tensor & self , int64_t sparse_dim ) { <nl> int64_t dims = self . dim ( ) ; <nl> AT_CHECK ( sparse_dim > 0 , " sparse_dim must be > 0 " ) ; <nl> - AT_CHECK ( sparse_dim < = dims , <nl> + AT_CHECK ( sparse_dim < = dims , <nl> " sparse_dim must be less than or equal to self . dim ( ) " ) ; <nl> at : : TensorOptions sparse_options = self . options ( ) . layout ( kSparse ) ; <nl> std : : vector < int64_t > sizes = self . sizes ( ) . vec ( ) ; <nl> SparseTensor dense_to_sparse ( const Tensor & self , int64_t sparse_dim ) { <nl> indices = nz . clone ( ) ; <nl> } else { <nl> Tensor i = nz . narrow ( 0 , 0 , sparse_dim ) ; <nl> - std : : tie ( indices , std : : ignore ) = at : : unique_dim ( i , 1 ) ; <nl> + std : : tie ( indices , std : : ignore ) = _unique_dim ( i , 1 ) ; <nl> indices = indices . contiguous ( ) ; / / many sparse CUDA kernels require contiguity , see issue # 12633 <nl> } <nl> <nl> deleted file mode 100644 <nl> index caec02c75356 . . 000000000000 <nl> mmm a / test / onnx / expect / TestOperators . test_unique . expect <nl> ppp / dev / null <nl> <nl> - ir_version : 3 <nl> - producer_name : " pytorch " <nl> - producer_version : " 0 . 4 " <nl> - graph { <nl> - node { <nl> - input : " x " <nl> - output : " 1 " <nl> - output : " 2 " <nl> - op_type : " ATen " <nl> - attribute { <nl> - name : " operator " <nl> - s : " unique " <nl> - type : STRING <nl> - } <nl> - attribute { <nl> - name : " return_inverse " <nl> - i : 0 <nl> - type : INT <nl> - } <nl> - attribute { <nl> - name : " sorted " <nl> - i : 1 <nl> - type : INT <nl> - } <nl> - } <nl> - name : " torch - jit - export " <nl> - input { <nl> - name : " x " <nl> - type { <nl> - tensor_type { <nl> - elem_type : 1 <nl> - shape { <nl> - dim { <nl> - dim_value : 10 <nl> - } <nl> - } <nl> - } <nl> - } <nl> - } <nl> - output { <nl> - name : " 1 " <nl> - type { <nl> - tensor_type { <nl> - elem_type : 1 <nl> - shape { <nl> - dim { <nl> - dim_value : 10 <nl> - } <nl> - } <nl> - } <nl> - } <nl> - } <nl> - } <nl> - opset_import { <nl> - version : 9 <nl> - } <nl> deleted file mode 100644 <nl> index e78170994a22 . . 000000000000 <nl> mmm a / test / onnx / expect / TestOperators . test_unique_dim . expect <nl> ppp / dev / null <nl> <nl> - ir_version : 3 <nl> - producer_name : " pytorch " <nl> - producer_version : " 0 . 4 " <nl> - graph { <nl> - node { <nl> - input : " x " <nl> - output : " 1 " <nl> - output : " 2 " <nl> - op_type : " ATen " <nl> - attribute { <nl> - name : " dim " <nl> - i : 1 <nl> - type : INT <nl> - } <nl> - attribute { <nl> - name : " operator " <nl> - s : " unique " <nl> - type : STRING <nl> - } <nl> - attribute { <nl> - name : " return_inverse " <nl> - i : 0 <nl> - type : INT <nl> - } <nl> - attribute { <nl> - name : " sorted " <nl> - i : 1 <nl> - type : INT <nl> - } <nl> - } <nl> - name : " torch - jit - export " <nl> - input { <nl> - name : " x " <nl> - type { <nl> - tensor_type { <nl> - elem_type : 1 <nl> - shape { <nl> - dim { <nl> - dim_value : 10 <nl> - } <nl> - dim { <nl> - dim_value : 10 <nl> - } <nl> - } <nl> - } <nl> - } <nl> - } <nl> - output { <nl> - name : " 1 " <nl> - type { <nl> - tensor_type { <nl> - elem_type : 1 <nl> - shape { <nl> - dim { <nl> - dim_value : 10 <nl> - } <nl> - dim { <nl> - dim_value : 10 <nl> - } <nl> - } <nl> - } <nl> - } <nl> - } <nl> - } <nl> - opset_import { <nl> - version : 9 <nl> - } <nl> mmm a / test / onnx / test_operators . py <nl> ppp b / test / onnx / test_operators . py <nl> def test_clip_max ( self ) : <nl> x = torch . randn ( 1 , 2 , 3 , 4 , requires_grad = True ) <nl> self . assertONNX ( lambda x : x . clamp ( max = 0 . 1 ) , x ) <nl> <nl> - def test_unique ( self ) : <nl> - x = torch . randn ( 10 , requires_grad = True ) <nl> - self . assertONNX ( lambda x : torch . unique ( x ) , x ) <nl> - <nl> - def test_unique_dim ( self ) : <nl> - x = torch . randn ( 10 , 10 , requires_grad = True ) <nl> - self . assertONNX ( lambda x : torch . unique ( x , dim = 1 ) , x ) <nl> - <nl> def test_hardtanh ( self ) : <nl> x = torch . randn ( 3 , 4 , requires_grad = True ) <nl> self . assertONNX ( lambda x : torch . nn . Hardtanh ( - 0 . 5 , 0 . 5 ) ( x ) , x ) <nl> mmm a / test / test_jit . py <nl> ppp b / test / test_jit . py <nl> def test_script_clamp_min ( x ) : <nl> self . checkScript ( test_script_clamp_min_none , input , optimize = True ) <nl> self . checkScript ( test_script_clamp_min , input , optimize = True ) <nl> <nl> - def test_script_unique_none ( self ) : <nl> - def test_unique_inverse ( a ) : <nl> - b , c = torch . unique ( a , return_inverse = True ) <nl> - return b + 1 <nl> - <nl> - def test_unique_inverse_nonedim ( a ) : <nl> - b , c = torch . unique ( a , return_inverse = True , dim = None ) <nl> - return b + 1 <nl> - <nl> - def test_unique_noinverse ( a ) : <nl> - b = torch . unique ( a ) <nl> - return b + 1 <nl> - <nl> - def test_unique_noinverse_nonedim ( a ) : <nl> - b = torch . unique ( a , dim = None ) <nl> - return b + 1 <nl> - <nl> - a = torch . rand ( 5 , 6 , 7 ) <nl> - <nl> - self . checkTrace ( test_unique_inverse , [ a ] , inputs_require_grads = False ) <nl> - self . checkTrace ( test_unique_inverse_nonedim , [ a ] , inputs_require_grads = False ) <nl> - self . checkTrace ( test_unique_noinverse , [ a ] , inputs_require_grads = False ) <nl> - self . checkTrace ( test_unique_noinverse_nonedim , [ a ] , inputs_require_grads = False ) <nl> - self . checkScript ( test_unique_inverse , [ a ] ) <nl> - self . checkScript ( test_unique_inverse_nonedim , [ a ] ) <nl> - # TODO : scripting unique when return_inverse = False is not supported yet <nl> - # self . checkScript ( test_unique_noinverse , [ a ] ) <nl> - # self . checkScript ( test_unique_noinverse_nonedim , [ a ] ) <nl> - <nl> def test_script_bool_constant ( self ) : <nl> script = ' ' ' <nl> def test_script_bool_constant ( ) : <nl> mmm a / tools / autograd / derivatives . yaml <nl> ppp b / tools / autograd / derivatives . yaml <nl> <nl> - name : uniform_ ( Tensor self , double from , double to , Generator generator ) <nl> self : zeros_like ( grad ) <nl> <nl> - - name : unique ( Tensor self , bool sorted , bool return_inverse , int64_t ? dim ) <nl> - self : not_implemented ( " unique " ) <nl> + - name : _unique ( Tensor self , bool sorted , bool return_inverse ) <nl> + self : not_implemented ( " _unique " ) <nl> <nl> - name : _unsafe_view ( Tensor self , IntList size ) <nl> self : grad . reshape ( self . sizes ( ) ) <nl> mmm a / tools / autograd / gen_python_functions . py <nl> ppp b / tools / autograd / gen_python_functions . py <nl> <nl> ' _th_ . * ' , ' _thnn_ . * ' , <nl> ' arange . * ' , ' range . * ' , ' _gesv . * ' , ' _getri . * ' , ' _inverse . * ' , <nl> ' _potrs . * ' , ' _cholesky . * ' , <nl> - ' slice ' , ' randint ( _out ) ? ' , ' unique_dim ' , ' _unique ' , ' _unique_dim ' , <nl> + ' slice ' , ' randint ( _out ) ? ' , <nl> ' item ' , ' _local_scalar_dense ' , <nl> ' max_pool1d ' , ' max_pool2d ' , ' max_pool3d ' , ' linear ' , ' to ' , <nl> ' copy_sparse_to_sparse_ ' , <nl> mmm a / torch / functional . py <nl> ppp b / torch / functional . py <nl> def unique ( input , sorted = True , return_inverse = False , dim = None ) : <nl> [ 1 , 2 ] ] ) <nl> <nl> " " " <nl> - output , inverse_indices = torch . _C . _VariableFunctions . unique ( <nl> - input , sorted = sorted , return_inverse = return_inverse , dim = dim ) <nl> + if dim is not None : <nl> + output , inverse_indices = torch . _unique_dim ( <nl> + input , <nl> + dim , <nl> + sorted = sorted , <nl> + return_inverse = return_inverse <nl> + ) <nl> + else : <nl> + output , inverse_indices = torch . _unique ( <nl> + input , <nl> + sorted = sorted , <nl> + return_inverse = return_inverse , <nl> + ) <nl> if return_inverse : <nl> return output , inverse_indices <nl> else : <nl> mmm a / torch / onnx / symbolic . py <nl> ppp b / torch / onnx / symbolic . py <nl> def conv_tbc ( g , input , weight , bias , pad ) : <nl> return g . op ( " ATen " , input , weight , bias , operator_s = " conv_tbc " , pad_i = pad ) <nl> <nl> <nl> - def unique ( g , input , sorted , return_inverse , dim ) : <nl> - sorted = _parse_arg ( sorted , ' i ' ) <nl> - return_inverse = _parse_arg ( return_inverse , ' i ' ) <nl> - if dim . node ( ) . kind ( ) = = " prim : : None " : <nl> - return g . op ( " ATen " , input , operator_s = " unique " , sorted_i = sorted , <nl> - return_inverse_i = return_inverse , outputs = 2 ) <nl> - else : <nl> - dim = _parse_arg ( dim , ' i ' ) <nl> - return g . op ( " ATen " , input , operator_s = " unique " , sorted_i = sorted , <nl> - return_inverse_i = return_inverse , dim_i = dim , outputs = 2 ) <nl> + @ parse_args ( ' v ' , ' i ' , ' i ' ) <nl> + def _unique ( g , input , sorted , return_inverse ) : <nl> + return g . op ( " ATen " , input , operator_s = " _unique " , sorted_i = sorted , <nl> + return_inverse_i = return_inverse , outputs = 2 ) <nl> <nl> <nl> # Metaprogram symbolics for each ATen native specialized cast operator . <nl> mmm a / torch / tensor . py <nl> ppp b / torch / tensor . py <nl> def unique ( self , sorted = True , return_inverse = False , dim = None ) : <nl> <nl> See : func : ` torch . unique ` <nl> " " " <nl> - return torch . unique ( self , sorted , return_inverse , dim ) <nl> + if dim is not None : <nl> + output , inverse_indices = torch . _unique_dim ( <nl> + self , <nl> + sorted = sorted , <nl> + return_inverse = return_inverse , <nl> + dim = dim <nl> + ) <nl> + else : <nl> + output , inverse_indices = torch . _unique ( <nl> + self , <nl> + sorted = sorted , <nl> + return_inverse = return_inverse <nl> + ) <nl> + if return_inverse : <nl> + return output , inverse_indices <nl> + else : <nl> + return output <nl> <nl> def __rsub__ ( self , other ) : <nl> return _C . _VariableFunctions . rsub ( self , other ) <nl>
|
Revert D13540278 : [ pytorch ] [ PR ] Unhide unique from C + + , make unique partially scriptable
|
pytorch/pytorch
|
c6503a42059c580f36809f96577c5a2c9a4e67d8
|
2019-01-22T20:22:40Z
|
mmm a / lib / SILPasses / DefiniteInitialization . cpp <nl> ppp b / lib / SILPasses / DefiniteInitialization . cpp <nl> bool ElementPromotion : : processNonTrivialRelease ( SILInstruction * Inst ) { <nl> return true ; <nl> } <nl> <nl> - <nl> - / / If the element type of the memory object is a class value <nl> - <nl> - <nl> / / Okay , the release is conditionally live . We have to force it up the CFG to <nl> / / a place where we have unconditional liveness , and if the memory object is a <nl> / / tuple , we have to do so for each element individually . <nl> <nl> - / / / TODO : We could make this more powerful to directly support these <nl> - / / / cases , at least when the value doesn ' t escape . <nl> - / / / <nl> - / / / When this gets fixed , the code in the ~ ElementUseCollector ( ) method <nl> - / / / can be removed . <nl> - / / / <nl> <nl> + <nl> + <nl> / / This is a release of an uninitialized value . Emit a diagnostic . <nl> diagnoseInitError ( MemoryUse ( Inst , UseKind : : Load , 0 , 0 ) , <nl> diag : : variable_destroyed_before_initialized ) ; <nl> mmm a / tools / swift / Helpers . cpp <nl> ppp b / tools / swift / Helpers . cpp <nl> bool swift : : runSILDiagnosticPasses ( SILModule & Module ) { <nl> <nl> performSILMandatoryInlining ( & Module ) ; <nl> <nl> + performSILCapturePromotion ( & Module ) ; <nl> performSILAllocBoxToStackPromotion ( & Module ) ; <nl> performInOutDeshadowing ( & Module ) ; <nl> performSILDefiniteInitialization ( & Module ) ; <nl>
|
Revert " Revert r10613 , the debug info problems are significant . "
|
apple/swift
|
5c812906cae14dae671d53d7c0693a065c21fdb7
|
2013-11-21T00:50:51Z
|
mmm a / src / mongo / SConscript <nl> ppp b / src / mongo / SConscript <nl> env . StaticLibrary ( " coredb " , [ <nl> " db / projection . cpp " , <nl> " db / querypattern . cpp " , <nl> " db / queryutil . cpp " , <nl> + " db / stats / timer_stats . cpp " , <nl> " db / stats / top . cpp " , <nl> " s / shardconnection . cpp " , <nl> ] , <nl> mmm a / src / mongo / db / dbcommands . cpp <nl> ppp b / src / mongo / db / dbcommands . cpp <nl> <nl> <nl> # include < time . h > <nl> <nl> + # include " mongo / base / counter . h " <nl> # include " mongo / base / init . h " <nl> # include " mongo / base / status . h " <nl> # include " mongo / bson / util / builder . h " <nl> <nl> # include " mongo / db / background . h " <nl> # include " mongo / db / btreecursor . h " <nl> # include " mongo / db / commands . h " <nl> + # include " mongo / db / commands / server_status . h " <nl> # include " mongo / db / db . h " <nl> # include " mongo / db / dur_stats . h " <nl> # include " mongo / db / index_update . h " <nl> <nl> # include " mongo / db / repl . h " <nl> # include " mongo / db / repl_block . h " <nl> # include " mongo / db / replutil . h " <nl> + # include " mongo / db / stats / timer_stats . h " <nl> # include " mongo / s / d_writeback . h " <nl> # include " mongo / s / stale_exception . h " / / for SendStaleConfigException <nl> # include " mongo / scripting / engine . h " <nl> namespace mongo { <nl> * / <nl> BSONObj * getLastErrorDefault = 0 ; <nl> <nl> + static TimerStats gleWtimeStats ; <nl> + static ServerStatusMetricField < TimerStats > displayGleLatency ( " getLastError . wtime " , & gleWtimeStats ) ; <nl> + <nl> + static Counter64 gleWtimeouts ; <nl> + static ServerStatusMetricField < Counter64 > gleWtimeoutsDisplay ( " getLastError . wtimeouts " , & gleWtimeouts ) ; <nl> + <nl> class CmdGetLastError : public Command { <nl> public : <nl> CmdGetLastError ( ) : Command ( " getLastError " , false , " getlasterror " ) { } <nl> namespace mongo { <nl> } <nl> <nl> int timeout = cmdObj [ " wtimeout " ] . numberInt ( ) ; <nl> - Timer t ; <nl> + TimerHolder timer ( & gleWtimeStats ) ; <nl> <nl> long long passes = 0 ; <nl> char buf [ 32 ] ; <nl> namespace mongo { <nl> } <nl> <nl> <nl> - if ( timeout > 0 & & t . millis ( ) > = timeout ) { <nl> + if ( timeout > 0 & & timer . millis ( ) > = timeout ) { <nl> + gleWtimeouts . increment ( ) ; <nl> result . append ( " wtimeout " , true ) ; <nl> errmsg = " timed out waiting for slaves " ; <nl> - result . append ( " waited " , t . millis ( ) ) ; <nl> + result . append ( " waited " , timer . millis ( ) ) ; <nl> result . append ( " replicatedTo " , getHostsReplicatedTo ( op ) ) ; <nl> result . append ( " err " , " timeout " ) ; <nl> return true ; <nl> namespace mongo { <nl> } <nl> <nl> result . append ( " replicatedTo " , getHostsReplicatedTo ( op ) ) ; <nl> - result . appendNumber ( " wtime " , t . millis ( ) ) ; <nl> + int myMillis = timer . recordMillis ( ) ; <nl> + result . appendNumber ( " wtime " , myMillis ) ; <nl> } <nl> <nl> result . appendNull ( " err " ) ; <nl> return true ; <nl> } <nl> + <nl> } cmdGetLastError ; <nl> <nl> class CmdGetPrevError : public Command { <nl> mmm a / src / mongo / s / client_info . cpp <nl> ppp b / src / mongo / s / client_info . cpp <nl> <nl> # include " server . h " <nl> # include " . . / util / scopeguard . h " <nl> # include " . . / db / commands . h " <nl> + # include " . . / db / commands / server_status . h " <nl> # include " . . / db / dbmessage . h " <nl> # include " . . / db / stats / counters . h " <nl> + # include " . . / db / stats / timer_stats . h " <nl> <nl> # include " . . / client / connpool . h " <nl> <nl> namespace mongo { <nl> _prev = temp ; <nl> } <nl> <nl> + static TimerStats gleWtimeStats ; <nl> + static ServerStatusMetricField < TimerStats > displayGleLatency ( " getLastError . wtime " , & gleWtimeStats ) ; <nl> + <nl> bool ClientInfo : : getLastError ( const string & dbName , <nl> const BSONObj & options , <nl> BSONObjBuilder & result , <nl> namespace mongo { <nl> bool fromWriteBackListener ) <nl> { <nl> <nl> + scoped_ptr < TimerHolder > gleTimerHolder ; <nl> + if ( ! fromWriteBackListener ) { <nl> + bool doTiming = false ; <nl> + const BSONElement & e = options [ " w " ] ; <nl> + if ( e . isNumber ( ) ) { <nl> + doTiming = e . numberInt ( ) > 1 ; <nl> + } <nl> + else if ( e . type ( ) = = String ) { <nl> + doTiming = true ; <nl> + } <nl> + if ( doTiming ) { <nl> + gleTimerHolder . reset ( new TimerHolder ( & gleWtimeStats ) ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> set < string > * shards = getPrev ( ) ; <nl> <nl> if ( shards - > size ( ) = = 0 ) { <nl>
|
SERVER - 8119 : add a timer for how long getLastError wtime takes
|
mongodb/mongo
|
0f1d143bf508de6e5735e1d984f5edb772f8ae4c
|
2013-01-09T17:42:09Z
|
mmm a / src / mongo / db / concurrency / lock_manager . cpp <nl> ppp b / src / mongo / db / concurrency / lock_manager . cpp <nl> void LockManager : : _dumpBucket ( const LockBucket * bucket ) const { <nl> < < " Thread = " < < threadId . str ( ) < < " ; " <nl> < < " ConvertMode = " < < modeName ( iter - > convertMode ) < < " ; " <nl> < < " EnqueueAtFront = " < < iter - > enqueueAtFront < < " ; " <nl> - < < " CompatibleFirst = " < < iter - > compatibleFirst < < " ; " < < ' \ n ' ; <nl> + < < " CompatibleFirst = " < < iter - > compatibleFirst < < " ; " <nl> + < < " DebugInfo = " < < iter - > locker - > getDebugInfo ( ) < < ' \ n ' ; <nl> } <nl> <nl> sb < < " PENDING : \ n " ; <nl> void LockManager : : _dumpBucket ( const LockBucket * bucket ) const { <nl> < < " Thread = " < < threadId . str ( ) < < " ; " <nl> < < " ConvertMode = " < < modeName ( iter - > convertMode ) < < " ; " <nl> < < " EnqueueAtFront = " < < iter - > enqueueAtFront < < " ; " <nl> - < < " CompatibleFirst = " < < iter - > compatibleFirst < < " ; " < < ' \ n ' ; <nl> + < < " CompatibleFirst = " < < iter - > compatibleFirst < < " ; " <nl> + < < " DebugInfo = " < < iter - > locker - > getDebugInfo ( ) < < ' \ n ' ; <nl> } <nl> <nl> sb < < " mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - \ n " ; <nl> mmm a / src / mongo / db / concurrency / locker . h <nl> ppp b / src / mongo / db / concurrency / locker . h <nl> class Locker { <nl> return _numResourcesToUnlockAtEndUnitOfWork ; <nl> } <nl> <nl> + std : : string getDebugInfo ( ) const { <nl> + return _debugInfo ; <nl> + } <nl> + <nl> + void setDebugInfo ( const std : : string & info ) { <nl> + _debugInfo = info ; <nl> + } <nl> + <nl> protected : <nl> Locker ( ) { } <nl> <nl> class Locker { <nl> private : <nl> bool _shouldConflictWithSecondaryBatchApplication = true ; <nl> bool _shouldAcquireTicket = true ; <nl> + std : : string _debugInfo ; / / Extra info about this locker for debugging purpose <nl> } ; <nl> <nl> / * * <nl> mmm a / src / mongo / db / transaction_participant . cpp <nl> ppp b / src / mongo / db / transaction_participant . cpp <nl> TransactionParticipant : : OplogSlotReserver : : OplogSlotReserver ( OperationContext * o <nl> opCtx - > lockState ( ) - > setShouldConflictWithSecondaryBatchApplication ( <nl> _locker - > shouldConflictWithSecondaryBatchApplication ( ) ) ; <nl> _locker - > unsetThreadId ( ) ; <nl> + if ( opCtx - > getLogicalSessionId ( ) ) { <nl> + _locker - > setDebugInfo ( " lsid : " + opCtx - > getLogicalSessionId ( ) - > toBSON ( ) . toString ( ) ) ; <nl> + } <nl> <nl> / / OplogSlotReserver is only used by primary , so always set max transaction lock timeout . <nl> invariant ( opCtx - > writesAreReplicated ( ) ) ; <nl> TransactionParticipant : : TxnResources : : TxnResources ( OperationContext * opCtx , Stas <nl> _locker - > releaseTicket ( ) ; <nl> } <nl> _locker - > unsetThreadId ( ) ; <nl> + if ( opCtx - > getLogicalSessionId ( ) ) { <nl> + _locker - > setDebugInfo ( " lsid : " + opCtx - > getLogicalSessionId ( ) - > toBSON ( ) . toString ( ) ) ; <nl> + } <nl> <nl> / / On secondaries , we yield the locks for transactions . <nl> if ( stashStyle = = StashStyle : : kSecondary ) { <nl>
|
SERVER - 37406 Save logical session id in the locker when stashing the transaction
|
mongodb/mongo
|
b1830f1ad949059fd7d70cc9490e0216fd30b438
|
2019-01-22T16:19:55Z
|
mmm a / cocos / 2d / cocos2d_wp8_headers . props <nl> ppp b / cocos / 2d / cocos2d_wp8_headers . props <nl> <nl> < PropertyGroup / > <nl> < ItemDefinitionGroup > <nl> < ClCompile > <nl> - < AdditionalIncludeDirectories > $ ( EngineRoot ) cocos \ platform \ wp8 ; $ ( EngineRoot ) cocos \ platform \ winrt ; $ ( EngineRoot ) \ external \ winrt - specific \ angle \ include ; $ ( EngineRoot ) \ external \ curl \ include \ wp8 ; $ ( EngineRoot ) \ external \ winrt - specific ; $ ( EngineRoot ) cocos \ audio \ include ; $ ( EngineRoot ) cocos ; $ ( EngineRoot ) external \ chipmunk \ include \ chipmunk ; $ ( EngineRoot ) external ; $ ( EngineRoot ) cocos \ editor - support ; $ ( EngineRoot ) ; $ ( EngineRoot ) external \ ConvertUTF ; $ ( GeneratedFilesDir ) < / AdditionalIncludeDirectories > <nl> + < AdditionalIncludeDirectories > $ ( EngineRoot ) cocos \ platform \ wp8 ; $ ( EngineRoot ) cocos \ platform \ winrt ; $ ( EngineRoot ) \ external \ winrt - specific \ angle \ include ; $ ( EngineRoot ) \ external \ curl \ include \ wp8 ; $ ( EngineRoot ) \ external \ winrt - specific ; $ ( EngineRoot ) cocos \ audio \ include ; $ ( EngineRoot ) cocos ; $ ( EngineRoot ) external \ chipmunk \ include \ chipmunk ; $ ( EngineRoot ) external ; $ ( EngineRoot ) cocos \ editor - support ; $ ( EngineRoot ) ; $ ( EngineRoot ) external \ ConvertUTF ; $ ( EngineRoot ) external \ wp8 - specific \ zlib \ include ; $ ( GeneratedFilesDir ) < / AdditionalIncludeDirectories > <nl> < PreprocessorDefinitions > _VARIADIC_MAX = 10 ; NOMINMAX ; GL_GLEXT_PROTOTYPES ; _CRT_SECURE_NO_WARNINGS ; _SCL_SECURE_NO_WARNINGS ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> < CompileAsWinRT > true < / CompileAsWinRT > <nl> < MultiProcessorCompilation > true < / MultiProcessorCompilation > <nl> mmm a / cocos / editor - support / cocostudio / proj . wp8 / libCocosStudio . vcxproj <nl> ppp b / cocos / editor - support / cocostudio / proj . wp8 / libCocosStudio . vcxproj <nl> <nl> < PrecompiledHeader > Use < / PrecompiledHeader > <nl> < PrecompiledHeaderFile > pch . h < / PrecompiledHeaderFile > <nl> < AdditionalUsingDirectories > $ ( WindowsSDK_MetadataPath ) ; $ ( AdditionalUsingDirectories ) < / AdditionalUsingDirectories > <nl> - < AdditionalIncludeDirectories > $ ( EngineRoot ) ; $ ( EngineRoot ) cocos ; $ ( EngineRoot ) cocos \ audio \ include ; $ ( EngineRoot ) cocos \ editor - support ; $ ( EngineRoot ) external ; $ ( EngineRoot ) external \ tinyxml2 ; $ ( EngineRoot ) external \ chipmunk \ include \ chipmunk ; $ ( EngineRoot ) external \ wp8 - specific \ zlib \ include ; $ ( EngineRoot ) extensions ; % ( AdditionalIncludeDirectories ) < / AdditionalIncludeDirectories > <nl> + < AdditionalIncludeDirectories > $ ( EngineRoot ) ; $ ( EngineRoot ) cocos ; $ ( EngineRoot ) cocos \ audio \ include ; $ ( EngineRoot ) cocos \ editor - support ; $ ( EngineRoot ) external ; $ ( EngineRoot ) external \ tinyxml2 ; $ ( EngineRoot ) external \ chipmunk \ include \ chipmunk ; $ ( EngineRoot ) extensions ; % ( AdditionalIncludeDirectories ) < / AdditionalIncludeDirectories > <nl> < PreprocessorDefinitions > WP8 ; _DEBUG ; _LIB ; COCOS2DXWIN32_EXPORTS ; GL_GLEXT_PROTOTYPES ; COCOS2D_DEBUG = 1 ; _CRT_SECURE_NO_WARNINGS ; _SCL_SECURE_NO_WARNINGS ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> < DisableSpecificWarnings > 4267 ; 4251 ; 4244 ; % ( DisableSpecificWarnings ) < / DisableSpecificWarnings > <nl> < AdditionalOptions > / Zm200 % ( AdditionalOptions ) < / AdditionalOptions > <nl>
|
fixed missing zlib . h header path
|
cocos2d/cocos2d-x
|
fa62f4273f332a4ba60f6c0561b8b9c529fbb5f8
|
2014-07-07T17:14:57Z
|
mmm a / src / userspace / server / server . xcodeproj / project . pbxproj <nl> ppp b / src / userspace / server / server . xcodeproj / project . pbxproj <nl> <nl> / * End PBXCopyFilesBuildPhase section * / <nl> <nl> / * Begin PBXFileReference section * / <nl> + 341C8E851D4CE6C00018D32C / * grabber_client . hpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . h ; name = grabber_client . hpp ; path = include / grabber_client . hpp ; sourceTree = " < group > " ; } ; <nl> + 341C8E871D4CF4AE0018D32C / * local_datagram_server . hpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . h ; name = local_datagram_server . hpp ; path = . . / . . / share / local_datagram_server . hpp ; sourceTree = " < group > " ; } ; <nl> + 341C8E881D4CF6BD0018D32C / * grabber_observer . hpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . h ; name = grabber_observer . hpp ; path = include / grabber_observer . hpp ; sourceTree = " < group > " ; } ; <nl> 344149111D212A1600C8ECFC / * server * / = { isa = PBXFileReference ; explicitFileType = " compiled . mach - o . executable " ; includeInIndex = 0 ; path = server ; sourceTree = BUILT_PRODUCTS_DIR ; } ; <nl> 344149141D212A1600C8ECFC / * main . cpp * / = { isa = PBXFileReference ; lastKnownFileType = sourcecode . cpp . cpp ; path = main . cpp ; sourceTree = " < group > " ; } ; <nl> 3446AA901D49A63E00337B36 / * io_hid_post_event_wrapper . hpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . h ; name = io_hid_post_event_wrapper . hpp ; path = include / io_hid_post_event_wrapper . hpp ; sourceTree = " < group > " ; } ; <nl> <nl> 3495C6801D3CE652007CB9E7 / * share * / = { <nl> isa = PBXGroup ; <nl> children = ( <nl> + 341C8E871D4CF4AE0018D32C / * local_datagram_server . hpp * / , <nl> 3446AA911D49A66600337B36 / * user_client . hpp * / , <nl> ) ; <nl> name = share ; <nl> <nl> 34E450271D2AB18400C5AC13 / * include * / = { <nl> isa = PBXGroup ; <nl> children = ( <nl> + 341C8E851D4CE6C00018D32C / * grabber_client . hpp * / , <nl> + 341C8E881D4CF6BD0018D32C / * grabber_observer . hpp * / , <nl> 3446AA901D49A63E00337B36 / * io_hid_post_event_wrapper . hpp * / , <nl> ) ; <nl> name = include ; <nl> <nl> isa = XCBuildConfiguration ; <nl> buildSettings = { <nl> ALWAYS_SEARCH_USER_PATHS = NO ; <nl> - CLANG_CXX_LANGUAGE_STANDARD = " gnu + + 0x " ; <nl> + CLANG_CXX_LANGUAGE_STANDARD = " c + + 14 " ; <nl> CLANG_CXX_LIBRARY = " libc + + " ; <nl> CLANG_ENABLE_MODULES = YES ; <nl> CLANG_ENABLE_OBJC_ARC = YES ; <nl> <nl> GCC_WARN_UNUSED_LABEL = YES ; <nl> GCC_WARN_UNUSED_VALUE = YES ; <nl> GCC_WARN_UNUSED_VARIABLE = YES ; <nl> + HEADER_SEARCH_PATHS = . . / vendor ; <nl> MACOSX_DEPLOYMENT_TARGET = 10 . 9 ; <nl> RUN_CLANG_STATIC_ANALYZER = YES ; <nl> SDKROOT = macosx ; <nl> new file mode 100644 <nl> index 000000000 . . 280e02f74 <nl> mmm / dev / null <nl> ppp b / src / userspace / server / server / include / grabber_client . hpp <nl> <nl> + # pragma once <nl> + <nl> + class grabber_client final { <nl> + public : <nl> + void open ( void ) { <nl> + io_service_ = std : : make_unique < asio : : io_service > ( ) ; <nl> + asio : : local : : datagram_protocol : : endpoint ep ( " / tmp / karabiner_grabber " ) ; <nl> + socket_ = std : : make_unique < asio : : local : : datagram_protocol : : socket > ( * io_service_ ) ; <nl> + <nl> + char request [ 4 ] ; <nl> + request [ 0 ] = 0xde ; <nl> + request [ 1 ] = 0xad ; <nl> + request [ 2 ] = 0xbe ; <nl> + request [ 3 ] = 0xef ; <nl> + <nl> + socket_ - > open ( ) ; <nl> + socket_ - > send_to ( asio : : buffer ( request , sizeof ( request ) ) , ep ) ; <nl> + std : : cout < < " sent " < < std : : endl ; <nl> + } <nl> + <nl> + void stop ( void ) { <nl> + } <nl> + <nl> + private : <nl> + std : : unique_ptr < asio : : io_service > io_service_ ; <nl> + std : : unique_ptr < asio : : local : : datagram_protocol : : socket > socket_ ; <nl> + } ; <nl> new file mode 100644 <nl> index 000000000 . . b88a5fffa <nl> mmm / dev / null <nl> ppp b / src / userspace / server / server / include / grabber_observer . hpp <nl> <nl> + # pragma once <nl> + <nl> + # include " local_datagram_server . hpp " <nl> + <nl> + class grabber_observer final { <nl> + public : <nl> + std : : thread start ( void ) { <nl> + const char * path = " / tmp / karabiner_observer " ; <nl> + unlink ( path ) ; <nl> + server_ = std : : make_unique < local_datagram_server > ( path ) ; <nl> + <nl> + chmod ( path , 0600 ) ; <nl> + <nl> + return std : : thread ( [ this ] { this - > worker ( ) ; } ) ; <nl> + } <nl> + <nl> + void stop ( void ) { <nl> + } <nl> + <nl> + void worker ( void ) { <nl> + if ( ! server_ ) { <nl> + return ; <nl> + } <nl> + <nl> + for ( ; ; ) <nl> + { <nl> + asio : : local : : datagram_protocol : : endpoint sender_endpoint ; <nl> + size_t length = server_ - > get_socket ( ) . receive_from ( asio : : buffer ( buffer_ ) , sender_endpoint ) ; <nl> + std : : cout < < length < < std : : endl ; <nl> + } <nl> + } <nl> + <nl> + private : <nl> + enum { <nl> + buffer_length = 8 * 1024 , <nl> + } ; <nl> + std : : array < uint8_t , buffer_length > buffer_ ; <nl> + std : : unique_ptr < local_datagram_server > server_ ; <nl> + } ; <nl> mmm a / src / userspace / server / server / main . cpp <nl> ppp b / src / userspace / server / server / main . cpp <nl> <nl> # include < IOKit / IOKitLib . h > <nl> # include < IOKit / hidsystem / IOHIDShared . h > <nl> # include < IOKit / hidsystem / ev_keymap . h > <nl> + <nl> # include < iostream > <nl> + # include < memory > <nl> + # include < thread > <nl> + <nl> + / / asio headers <nl> + # define ASIO_STANDALONE <nl> + # include < asio . hpp > <nl> <nl> + # include " grabber_client . hpp " <nl> + # include " grabber_observer . hpp " <nl> # include " io_hid_post_event_wrapper . hpp " <nl> <nl> int main ( int argc , const char * argv [ ] ) { <nl> + grabber_client client ; <nl> + client . open ( ) ; <nl> + <nl> io_hid_post_event_wrapper wrapper ; <nl> wrapper . start ( ) ; <nl> <nl>
|
add grabber_client , grabber_observer
|
pqrs-org/Karabiner-Elements
|
f8fb10a2e3055c29f471f50eb6a92a8bd232afdc
|
2016-07-30T15:04:05Z
|
mmm a / imconfig . h <nl> ppp b / imconfig . h <nl> <nl> / / # define IMGUI_DISABLE_WIN32_DEFAULT_CLIPBOARD_FUNCS <nl> / / # define IMGUI_DISABLE_WIN32_DEFAULT_IME_FUNCS <nl> <nl> - / / mmm - Don ' t implement help and test window functionality ( ShowUserGuide ( ) / ShowStyleEditor ( ) / ShowTestWindow ( ) methods will be empty ) <nl> + / / mmm - Don ' t implement test window functionality ( ShowTestWindow ( ) / ShowStyleEditor ( ) / ShowUserGuide ( ) methods will be empty ) <nl> + / / mmm - It is very strongly recommended to NOT disable the test windows . Please read the comment at the top of imgui_demo . cpp to learn why . <nl> / / # define IMGUI_DISABLE_TEST_WINDOWS <nl> <nl> / / mmm - Don ' t define obsolete functions names <nl> mmm a / imgui_demo . cpp <nl> ppp b / imgui_demo . cpp <nl> <nl> / / ( demo code ) <nl> <nl> / / Message to the person tempted to delete this file when integrating ImGui into their code base : <nl> - / / Do NOT remove this file from your project ! It is useful reference code that you and other users will want to refer to . <nl> + / / Don ' t do it ! Do NOT remove this file from your project ! It is useful reference code that you and other users will want to refer to . <nl> / / Everything in this file will be stripped out by the linker if you don ' t call ImGui : : ShowTestWindow ( ) . <nl> - / / During development , you can call ImGui : : ShowTestWindow ( ) in your code to learn about various features of ImGui . <nl> - / / Removing this file from your project is hindering your access to documentation , likely leading you to poorer usage of the library . <nl> + / / During development , you can call ImGui : : ShowTestWindow ( ) in your code to learn about various features of ImGui . Have it wired in a debug menu ! <nl> + / / Removing this file from your project is hindering access to documentation for everyone in your team , likely leading you to poorer usage of the library . <nl> + <nl> + / / Note that you can # define IMGUI_DISABLE_TEST_WINDOWS in imconfig . h for the same effect . <nl> + / / If you want to link core ImGui in your public builds but not those test windows , # define IMGUI_DISABLE_TEST_WINDOWS in imconfig . h and those functions will be empty . <nl> + / / For any other case , if you have ImGui available you probably want this to be available for reference and execution . <nl> + <nl> + / / Thank you , <nl> + / / - Your beloved friend , imgui_demo . cpp ( that you won ' t delete ) <nl> <nl> # if defined ( _MSC_VER ) & & ! defined ( _CRT_SECURE_NO_WARNINGS ) <nl> # define _CRT_SECURE_NO_WARNINGS <nl>
|
Comments about IMGUI_DISABLE_TEST_WINDOWS ( , )
|
ocornut/imgui
|
100d30a0a1b058a8ef8350d5242c30e5ca0fa5e8
|
2017-07-20T15:12:58Z
|
mmm a / test / test_jit . py <nl> ppp b / test / test_jit . py <nl> def forward ( self , thing ) : <nl> return thing - self . i <nl> <nl> class M ( torch . nn . Module ) : <nl> - __constants__ = [ ' mods ' ] <nl> - <nl> def __init__ ( self ) : <nl> super ( M , self ) . __init__ ( ) <nl> self . mods = nn . ModuleList ( [ Sub ( i ) for i in range ( 10 ) ] ) <nl> def forward ( self , v ) : <nl> x = torch . tensor ( 1 ) <nl> self . checkModule ( M ( ) , ( x , ) ) <nl> <nl> + class MForward ( torch . nn . Module ) : <nl> + def __init__ ( self ) : <nl> + super ( MForward , self ) . __init__ ( ) <nl> + self . mods = nn . ModuleList ( [ Sub ( i ) for i in range ( 10 ) ] ) <nl> + <nl> + def forward ( self , v ) : <nl> + v = self . mods [ 4 ] ( v ) <nl> + v = self . mods [ - 1 ] ( v ) <nl> + v = self . mods [ - 9 ] ( v ) <nl> + return v <nl> + <nl> + self . checkModule ( MForward ( ) , ( torch . tensor ( 1 ) , ) ) <nl> + <nl> class M2 ( M ) : <nl> def __init__ ( self ) : <nl> super ( M2 , self ) . __init__ ( ) <nl> mmm a / torch / csrc / jit / frontend / ir_emitter . cpp <nl> ppp b / torch / csrc / jit / frontend / ir_emitter . cpp <nl> struct to_ir { <nl> auto apply = Apply ( tree ) ; <nl> return emitApplyExpr ( apply , n_binders , type_hint ) ; <nl> } break ; <nl> + case TK_SUBSCRIPT : { <nl> + return emitSubscript ( Subscript ( tree ) ) ; <nl> + } break ; <nl> default : <nl> return std : : make_shared < SimpleValue > ( emitSimpleExpr ( tree , type_hint ) ) ; <nl> } <nl> struct to_ir { <nl> case TK_NONE : { <nl> return graph - > insertConstant ( IValue ( ) , tree - > range ( ) ) ; <nl> } break ; <nl> - case TK_SUBSCRIPT : { <nl> - return emitSubscript ( Subscript ( tree ) ) ; <nl> - } break ; <nl> case TK_IF_EXPR : { <nl> return emitTernaryIf ( TernaryIf ( tree ) ) ; <nl> } break ; <nl> struct to_ir { <nl> - > output ( ) ; <nl> } <nl> <nl> - Value * emitSubscript ( const Subscript & subscript ) { <nl> + std : : shared_ptr < SugaredValue > emitSubscript ( const Subscript & subscript ) { <nl> const SugaredValuePtr sv = emitSugaredExpr ( subscript . value ( ) , 1 ) ; <nl> const List < Expr > & subscript_exprs = subscript . subscript_exprs ( ) ; <nl> const SourceRange & range = subscript . range ( ) ; <nl> const SourceRange & val_range = subscript . value ( ) . range ( ) ; <nl> if ( subscript_exprs . size ( ) ! = 1 ) { <nl> - return emitMultidimSlicing ( <nl> - range , sv - > asValue ( val_range , method ) , subscript_exprs ) ; <nl> + return std : : make_shared < SimpleValue > ( emitMultidimSlicing ( <nl> + range , sv - > asValue ( val_range , method ) , subscript_exprs ) ) ; <nl> } <nl> if ( subscript_exprs [ 0 ] . kind ( ) = = TK_SLICE_EXPR ) { <nl> - return emitBasicSlice ( <nl> - range , sv - > asValue ( val_range , method ) , subscript_exprs ) ; <nl> + return std : : make_shared < SimpleValue > ( emitBasicSlice ( <nl> + range , sv - > asValue ( val_range , method ) , subscript_exprs ) ) ; <nl> } else { <nl> / / Desugars gather syntactic sugar foo [ i ] <nl> Value * idx = emitExpr ( subscript_exprs [ 0 ] ) ; <nl> struct to_ir { <nl> AT_ASSERT ( subscript_exprs . size ( ) = = 1 ) ; <nl> <nl> if ( val - > type ( ) - > cast < TupleType > ( ) ) { <nl> - return emitTupleIndex ( range , sv - > asValue ( val_range , method ) , idx ) ; <nl> + return std : : make_shared < SimpleValue > ( <nl> + emitTupleIndex ( range , sv - > asValue ( val_range , method ) , idx ) ) ; <nl> } else if ( val - > type ( ) - > isSubtypeOf ( TensorType : : get ( ) ) ) { <nl> - return emitMultidimSlicing ( range , val , subscript_exprs ) ; <nl> + return std : : make_shared < SimpleValue > ( <nl> + emitMultidimSlicing ( range , val , subscript_exprs ) ) ; <nl> } else { <nl> - return sv - > getitem ( range , method , idx ) - > asValue ( range , method ) ; <nl> + return sv - > getitem ( range , method , idx ) ; <nl> } <nl> } <nl> } <nl> mmm a / torch / csrc / jit / python / python_sugared_value . cpp <nl> ppp b / torch / csrc / jit / python / python_sugared_value . cpp <nl> SugaredValuePtr ModuleValue : : getitem ( <nl> return getSugaredModuleDict ( loc , m ) - > getModules ( ) - > getitem ( loc , m , idx ) ; <nl> } <nl> throw ErrorReport ( loc ) <nl> - < < " Only ModuleLists , Sequentials , and ModuleDict Modules are subscriptable " ; <nl> + < < " Only ModuleList , Sequential , and ModuleDict modules are subscriptable " ; <nl> } <nl> <nl> void checkInterface ( <nl>
|
[ jit ] Make ` ModuleList ` s a sugared value ( )
|
pytorch/pytorch
|
2c0f3536b6441ad23accb9398806b43d9bb2c443
|
2020-03-09T22:36:46Z
|
mmm a / docs / NativeComponentsWindows . md <nl> ppp b / docs / NativeComponentsWindows . md <nl> <nl> + mmm <nl> + id : native - components - windows <nl> + title : Native UI Components <nl> + layout : docs <nl> + category : Guides ( Windows ) <nl> + permalink : docs / native - components - windows . html <nl> + next : running - on - device - windows <nl> + mmm <nl> + <nl> + There are tons of native UI widgets out there ready to be used in the latest apps - some of them are part of the platform , others are available as third - party libraries , and still more might be in use in your very own portfolio . React Native has several of the most critical platform components already wrapped , like ` ScrollView ` and ` TextInput ` , but not all of them , and certainly not ones you might have written yourself for a previous app . Fortunately , it ' s quite easy to wrap up these existing components for seamless integration with your React Native application . <nl> + <nl> + Like the native module guide , this too is a more advanced guide that assumes you are somewhat familiar with the Universal Windows SDK programming . This guide will show you how to build a native UI component , walking you through the implementation of a subset of the existing ` ImageView ` component available in the core React Native library . <nl> + <nl> + # # ImageView example <nl> + <nl> + For this example we are going to walk through the implementation requirements to allow the use of ImageViews in JavaScript . <nl> + <nl> + Native views are created and manipulated by extending ` ViewManager ` or more commonly ` SimpleViewManager ` . A ` SimpleViewManager ` is convenient in this case because it applies common properties such as background color , opacity , and Flexbox layout . <nl> + <nl> + These subclasses are essentially singletons - only one instance of each is created by the bridge . They vend native views to the ` NativeViewHierarchyManager ` , which delegates back to them to set and update the properties of the views as necessary . The ` ViewManagers ` are also typically the delegates for the views , sending events back to JavaScript via the bridge . <nl> + <nl> + Vending a view is simple : <nl> + <nl> + 1 . Create the ViewManager subclass . <nl> + 2 . Implement the ` CreateViewInstance ` method <nl> + 3 . Expose view property setters using the ` [ ReactProp ] ` ( or ` [ ReactPropGroup ] ` ) attribute <nl> + 4 . Register the manager in ` CreateViewManagers ` of the applications package . <nl> + 5 . Implement the JavaScript module <nl> + <nl> + # # 1 . Create the ` ViewManager ` subclass <nl> + <nl> + In this example we create view manager class ` ReactImageManager ` that extends ` SimpleViewManager ` of type ` ReactImageView ` . ` ReactImageView ` is the type of object managed by the manager , this will be the custom native view . Name returned by ` getName ` is used to reference the native view type from JavaScript . <nl> + <nl> + ` ` ` csharp <nl> + . . . <nl> + <nl> + public class ReactImageManager : SimpleViewManager < Border > <nl> + { <nl> + <nl> + public override string Name <nl> + { <nl> + get <nl> + { <nl> + return " RCTImageView " ; <nl> + } <nl> + } <nl> + } <nl> + ` ` ` <nl> + <nl> + # # 2 . Implement method ` CreateViewInstance ` <nl> + <nl> + Views are created in the ` CreateViewInstance ` method , the view should initialize itself in its default state , any properties will be set via a follow up call to ` UpdateView . ` <nl> + <nl> + ` ` ` csharp <nl> + protected override Border CreateViewInstance ( ThemedReactContext reactContext ) <nl> + { <nl> + return new Border <nl> + { <nl> + Background = new ImageBrush ( ) <nl> + } ; <nl> + } <nl> + ` ` ` <nl> + <nl> + # # 3 . Expose view property setters using the ` [ ReactProp ] ` ( or ` [ ReactPropGroup ] ` ) attribute <nl> + <nl> + Properties that are to be reflected in JavaScript needs to be exposed as setter method annotated with ` [ ReactProp ] ` ( or ` [ ReactPropGroup ] ` ) . Setter method should take view to be updated ( of the current view type ) as a first argument and property value as a second argument . Setter should be declared as a ` void ` method and should be ` public ` . Property type sent to JS is determined automatically based on the type of value argument of the setter . This project uses [ Newtonsoft Json . NET ] ( http : / / www . newtonsoft . com / json ) to provide interoperability with JavaScript types . The parameter types of these attributes may be of any type that can be deserialized using Json . NET . Be aware that the use of composite types such as arrays , generics , and user - defined classes may not be supported out - of - the - box with [ . NET Native ] ( https : / / msdn . microsoft . com / en - us / library / dn584397 . aspx ) pre - compilation , and you may need add information to the [ runtime directives ( rd . xml ) ] ( https : / / msdn . microsoft . com / en - us / library / dn600639 . aspx ) file , or just manually deconstruct the JSON by using [ ` JArray ` ] . <nl> + <nl> + Attribute ` [ ReactProp ] ` has one obligatory argument ` Name ` of type ` String ` . Name assigned to the ` [ ReactProp ] ` attribute linked to the setter method is used to reference the property on JS side . <nl> + <nl> + Except from ` Name ` , ` [ ReactProp ] ` attribute may take following optional arguments : ` DefaultBoolean ` , ` DefaultInt32 ` , ` DefaultDouble ` . Those arguments should be of the corresponding primitive type ( accordingly ` boolean ` , ` int ` , ` double ` ) and the value provided will be passed to the setter method in case when the property that the setter is referencing has been removed from the component . Note that " default " values are only provided for primitive types , in case when setter is of some complex type , ` null ` will be provided as a default value in case when corresponding property gets removed . <nl> + <nl> + Setter declaration requirements for methods annotated with ` [ ReactPropGroup ] ` are different than for ` [ ReactProp ] ` , please refer to the ` [ ReactPropGroup ] ` attribute class docs for more information about it . <nl> + <nl> + * * IMPORTANT ! * * in ReactJS updating the property value will result in setter method call . Note that one of the ways we can update component is by removing properties that has been set before . In that case setter method will be called as well to notify view manager that property has changed . In that case " default " value will be provided ( for primitive types " default " can value can be specified using ` DefaultBoolean ` , ` DefaultDouble ` , etc . arguments of ` [ ReactProp ] ` attribute , for complex types setter will be called with value set to ` null ` ) . <nl> + <nl> + ` ` ` csharp <nl> + [ ReactProp ( " borderRadius " ) ] <nl> + public void SetBorderRadius ( Border view , double radius ) <nl> + { <nl> + view . CornerRadius = new CornerRadius ( radius ) ; <nl> + } <nl> + <nl> + [ ReactProp ( " borderColor " , CustomType = " Color " ) ] <nl> + public void SetBorderColor ( Border view , uint ? color ) <nl> + { <nl> + view . BorderBrush = color . HasValue <nl> + ? new SolidColorBrush ( ColorHelpers . Parse ( color . Value ) ) <nl> + : null ; <nl> + } <nl> + <nl> + [ ReactProp ( " src " ) ] <nl> + public void SetSource ( Border view , string source ) <nl> + { <nl> + var imageBrush = ( ImageBrush ) view . Background ; <nl> + imageBrush . ImageSource = new BitmapImage ( new Uri ( source ) ) ; <nl> + <nl> + view . GetReactContext ( ) <nl> + . GetNativeModule < UIManagerModule > ( ) <nl> + . EventDispatcher <nl> + . DispatchEvent ( <nl> + new ReactImageLoadEvent ( <nl> + view . GetTag ( ) , <nl> + ReactImageLoadEvent . OnLoadStart ) ) ; <nl> + } <nl> + ` ` ` <nl> + <nl> + # # 4 . Register the ` ViewManager ` <nl> + <nl> + The final Java step is to register the ViewManager to the application , this happens in a similar way to [ Native Modules ] ( docs / native - modules - windows . html ) , via the applications package member function ` CreateViewManagers . ` <nl> + <nl> + ` ` ` csharp <nl> + public IReadOnlyList < IViewManager > CreateViewManagers ( <nl> + ReactContext reactContext ) <nl> + { <nl> + return new List < IViewManager > <nl> + { <nl> + new ReactImageManager ( ) <nl> + } ; <nl> + } <nl> + ` ` ` <nl> + <nl> + # # 5 . Implement the JavaScript module <nl> + <nl> + The very final step is to create the JavaScript module that defines the interface layer between . NET and JavaScript for the users of your new view . Much of the effort is handled by internal React code in . NET and JavaScript and all that is left for you is to describe the ` propTypes ` . <nl> + <nl> + ` ` ` js <nl> + / / ImageView . js <nl> + <nl> + import { PropTypes } from ' react ' ; <nl> + import { requireNativeComponent , View } from ' react - native ' ; <nl> + <nl> + var iface = { <nl> + name : ' ImageView ' , <nl> + propTypes : { <nl> + src : PropTypes . string , <nl> + borderRadius : PropTypes . number , <nl> + resizeMode : PropTypes . oneOf ( [ ' cover ' , ' contain ' , ' stretch ' ] ) , <nl> + . . . View . propTypes / / include the default view properties <nl> + } , <nl> + } ; <nl> + <nl> + module . exports = requireNativeComponent ( ' RCTImageView ' , iface ) ; <nl> + ` ` ` <nl> + <nl> + ` requireNativeComponent ` commonly takes two parameters , the first is the name of the native view and the second is an object that describes the component interface . The component interface should declare a friendly ` name ` for use in debug messages and must declare the ` propTypes ` reflected by the Native View . The ` propTypes ` are used for checking the validity of a user ' s use of the native view . Note that if you need your JavaScript component to do more than just specify a name and propTypes , like do custom event handling , you can wrap the native component in a normal react component . In that case , you want to pass in the wrapper component instead of ` iface ` to ` requireNativeComponent ` . This is illustrated in the ` ReactTextBox ` example below . <nl> + <nl> + # Events <nl> + <nl> + So now we know how to expose native view components that we can control easily from JS , but how do we deal with events from the user , like pinch - zooms or panning ? When a native event occurs the native code should issue an event to the JavaScript representation of the View , and the two views are linked with the value returned from the ` GetTag ( ) ` method . <nl> + <nl> + ` ` ` csharp <nl> + class ReactTextChangedEvent : Event <nl> + { <nl> + . . . <nl> + <nl> + public override string EventName <nl> + { <nl> + get <nl> + { <nl> + return " topChange " ; <nl> + } <nl> + } <nl> + <nl> + public override void Dispatch ( RCTEventEmitter rctEventEmitter ) <nl> + { <nl> + var contentSize = new JObject <nl> + { <nl> + { " width " , _contextWidth } , <nl> + { " height " , _contentHeight } , <nl> + } ; <nl> + <nl> + var eventData = new JObject <nl> + { <nl> + { " text " , _text } , <nl> + { " contentSize " , contentSize } , <nl> + { " eventCount " , _eventCount } , <nl> + { " target " , ViewTag } , <nl> + } ; <nl> + <nl> + rctEventEmitter . receiveEvent ( ViewTag , EventName , eventData ) ; <nl> + } <nl> + } <nl> + <nl> + class ReactTextBox : TextBox , ILayoutManager <nl> + { <nl> + public ReactTextBox ( ) <nl> + { <nl> + LayoutUpdated + = OnLayoutUpdated ; <nl> + } <nl> + <nl> + . . . <nl> + private void OnLayoutUpdated ( object sender , object e ) <nl> + { <nl> + var width = ActualWidth ; <nl> + var height = ActualHeight ; <nl> + if ( width ! = _lastWidth | | height ! = _lastHeight ) <nl> + { <nl> + _lastWidth = width ; <nl> + _lastHeight = height ; <nl> + <nl> + this . GetReactContext ( ) <nl> + . GetNativeModule < UIManagerModule > ( ) <nl> + . EventDispatcher <nl> + . DispatchEvent ( <nl> + new ReactTextChangedEvent ( <nl> + this . GetTag ( ) , <nl> + Text , <nl> + width , <nl> + height , <nl> + IncrementEventCount ( ) ) ) ; <nl> + } <nl> + } <nl> + } <nl> + ` ` ` <nl> + <nl> + The event name ` topChange ` maps to the ` onChange ` callback prop in JavaScript ( mappings are in ` UIManagerModule . Constants . cs ` ) . This callback is invoked with the raw event , which we typically process in the wrapper component to make a simpler API : <nl> + <nl> + ` ` ` js <nl> + / / MyCustomView . js <nl> + <nl> + class MyCustomView extends React . Component { <nl> + constructor ( ) { <nl> + this . _onChange = this . _onChange . bind ( this ) ; <nl> + } <nl> + _onChange ( event : Event ) { <nl> + if ( ! this . props . onChangeMessage ) { <nl> + return ; <nl> + } <nl> + this . props . onChangeMessage ( event . nativeEvent . message ) ; <nl> + } <nl> + render ( ) { <nl> + return < RCTMyCustomView { . . . this . props } onChange = { this . _onChange } / > ; <nl> + } <nl> + } <nl> + MyCustomView . propTypes = { <nl> + / * * <nl> + * Callback that is called continuously when the user is dragging the map . <nl> + * / <nl> + onChangeMessage : React . PropTypes . func , <nl> + . . . <nl> + } ; <nl> + <nl> + var RCTMyCustomView = requireNativeComponent ( ` RCTMyCustomView ` , MyCustomView , { <nl> + nativeOnly : { onChange : true } <nl> + } ) ; <nl> + ` ` ` <nl> + <nl> + Note the use of ` nativeOnly ` above . Sometimes you ' ll have some special properties that you need to expose for the native component , but don ' t actually want them as part of the API for the associated React component . For example , ` Switch ` has a custom ` onChange ` handler for the raw native event , and exposes an ` onValueChange ` handler property that is invoked with just the boolean value rather than the raw event ( similar to ` onChangeMessage ` in the example above ) . Since you don ' t want these native only properties to be part of the API , you don ' t want to put them in ` propTypes ` , but if you don ' t you ' ll get an error . The solution is simply to call them out via the ` nativeOnly ` option . <nl> \ No newline at end of file <nl>
|
docs ( Components ) - Add Native Components Documentation ( )
|
microsoft/react-native-windows
|
e24eb549486737402ff96709a3b8526675e4b76a
|
2016-06-07T16:54:24Z
|
mmm a / xbmc / interfaces / json - rpc / FileItemHandler . cpp <nl> ppp b / xbmc / interfaces / json - rpc / FileItemHandler . cpp <nl> bool CFileItemHandler : : FillFileItemList ( const CVariant & parameterObject , CFileIt <nl> CFileOperations : : FillFileItemList ( parameterObject , list ) ; <nl> <nl> CStdString file = parameterObject [ " file " ] . asString ( ) ; <nl> - if ( ! file . empty ( ) & & ! CDirectory : : Exists ( file ) & & ( URIUtils : : IsURL ( file ) | | CFile : : Exists ( file ) ) ) <nl> + if ( ! file . empty ( ) & & ( URIUtils : : IsURL ( file ) | | ( CFile : : Exists ( file ) & & ! CDirectory : : Exists ( file ) ) ) ) <nl> { <nl> bool added = false ; <nl> for ( int index = 0 ; index < list . Size ( ) ; index + + ) <nl>
|
fixes : fix XBMC . Play to play plugin - related media paths through " file " parameter ( thanks xblitz )
|
xbmc/xbmc
|
0f336335f0090579d6fad196c3b66d293f126ead
|
2011-06-12T09:50:20Z
|
mmm a / test / common / common / base64_test . cc <nl> ppp b / test / common / common / base64_test . cc <nl> <nl> # include " common / common / base64 . h " <nl> <nl> TEST ( Base64 , EmptyBufferEncode ) { <nl> - Buffer : : OwnedImpl buffer ; <nl> - EXPECT_EQ ( " " , Base64 : : encode ( buffer , 0 ) ) ; <nl> + { <nl> + Buffer : : OwnedImpl buffer ; <nl> + EXPECT_EQ ( " " , Base64 : : encode ( buffer , 0 ) ) ; <nl> + } <nl> + <nl> + { <nl> + Buffer : : OwnedImpl buffer ; <nl> + buffer . add ( " \ 0 \ 0 " ) ; <nl> + EXPECT_EQ ( " " , Base64 : : encode ( buffer , 2 ) ) ; <nl> + } <nl> } <nl> <nl> TEST ( Base64 , SingleSliceBufferEncode ) { <nl>
|
add base64 0 buffer test ( )
|
envoyproxy/envoy
|
05478a157206151c5330fe62cbced9f6d7bae543
|
2017-01-20T18:16:09Z
|
mmm a / tensorflow / compiler / mlir / tensorflow / transforms / batchmatmul_to_einsum . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / batchmatmul_to_einsum . cc <nl> limitations under the License . <nl> # include " mlir / Analysis / LoopAnalysis . h " / / from @ llvm - project <nl> # include " mlir / Dialect / StandardOps / IR / Ops . h " / / from @ llvm - project <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / OpImplementation . h " / / from @ llvm - project <nl> # include " mlir / IR / PatternMatch . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> # include " mlir / Support / LLVM . h " / / from @ llvm - project <nl> # include " mlir / Support / LogicalResult . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / collection_ops_util . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / collection_ops_util . cc <nl> limitations under the License . <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Location . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> # include " mlir / Support / LLVM . h " / / from @ llvm - project <nl> # include " mlir / Support / LogicalResult . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / decode_attributes_hook . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / decode_attributes_hook . cc <nl> limitations under the License . <nl> # include " llvm / ADT / ArrayRef . h " <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Dialect . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Types . h " / / from @ llvm - project <nl> # include " mlir / Support / LogicalResult . h " / / from @ llvm - project <nl> # include " tensorflow / compiler / mlir / tensorflow / ir / tf_ops . h " <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / decompose_resource_ops . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / decompose_resource_ops . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / compiler / mlir / tensorflow / transforms / decompose_resource_ops . h " <nl> <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " tensorflow / compiler / mlir / tensorflow / ir / tf_ops . h " <nl> # include " tensorflow / compiler / mlir / tensorflow / ir / tf_types . h " <nl> <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / einsum . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / einsum . cc <nl> limitations under the License . <nl> # include " mlir / Analysis / LoopAnalysis . h " / / from @ llvm - project <nl> # include " mlir / Dialect / StandardOps / IR / Ops . h " / / from @ llvm - project <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / OpImplementation . h " / / from @ llvm - project <nl> # include " mlir / IR / PatternMatch . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> # include " mlir / Support / LLVM . h " / / from @ llvm - project <nl> # include " mlir / Support / LogicalResult . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / einsum . h <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / einsum . h <nl> limitations under the License . <nl> # include " llvm / ADT / ArrayRef . h " <nl> # include " llvm / Support / Casting . h " <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Location . h " / / from @ llvm - project <nl> # include " mlir / IR / MLIRContext . h " / / from @ llvm - project <nl> # include " mlir / IR / Matchers . h " / / from @ llvm - project <nl> # include " mlir / IR / PatternMatch . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / TypeUtilities . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> # include " tensorflow / compiler / mlir / tensorflow / ir / tf_ops . h " <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / fold_broadcast . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / fold_broadcast . cc <nl> limitations under the License . <nl> # include " llvm / Support / Casting . h " <nl> # include " mlir / Dialect / Traits . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> # include " mlir / IR / PatternMatch . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> # include " mlir / Support / LogicalResult . h " / / from @ llvm - project <nl> # include " mlir / Transforms / GreedyPatternRewriteDriver . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / fold_switch . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / fold_switch . cc <nl> limitations under the License . <nl> # include " mlir / Dialect / StandardOps / IR / Ops . h " / / from @ llvm - project <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / Block . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / MLIRContext . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> # include " mlir / IR / OperationSupport . h " / / from @ llvm - project <nl> # include " mlir / IR / PatternMatch . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / TypeUtilities . h " / / from @ llvm - project <nl> # include " mlir / IR / Types . h " / / from @ llvm - project <nl> # include " mlir / IR / Value . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / functional_control_flow_to_regions . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / functional_control_flow_to_regions . cc <nl> limitations under the License . <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / TypeUtilities . h " / / from @ llvm - project <nl> # include " mlir / IR / Value . h " / / from @ llvm - project <nl> # include " mlir / IR / Verifier . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / legalize_hlo . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / legalize_hlo . cc <nl> limitations under the License . <nl> # include " llvm / Support / raw_ostream . h " <nl> # include " mlir / Dialect / StandardOps / IR / Ops . h " / / from @ llvm - project <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Location . h " / / from @ llvm - project <nl> # include " mlir / IR / MLIRContext . h " / / from @ llvm - project <nl> # include " mlir / IR / Matchers . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> # include " mlir / IR / PatternMatch . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Value . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> # include " mlir / Support / LLVM . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / lower_tf . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / lower_tf . cc <nl> limitations under the License . <nl> # include " llvm / ADT / ArrayRef . h " <nl> # include " llvm / ADT / SmallVector . h " <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Diagnostics . h " / / from @ llvm - project <nl> # include " mlir / IR / MLIRContext . h " / / from @ llvm - project <nl> # include " mlir / IR / Matchers . h " / / from @ llvm - project <nl> # include " mlir / IR / PatternMatch . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / TypeRange . h " / / from @ llvm - project <nl> # include " mlir / IR / TypeUtilities . h " / / from @ llvm - project <nl> # include " tensorflow / compiler / mlir / tensorflow / ir / tf_ops . h " <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / optimize_global_tensors . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / optimize_global_tensors . cc <nl> limitations under the License . <nl> # include " mlir / Dialect / StandardOps / IR / Ops . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / SymbolTable . h " / / from @ llvm - project <nl> # include " mlir / IR / Types . h " / / from @ llvm - project <nl> # include " mlir / Interfaces / CallInterfaces . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / promote_resources_to_args . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / promote_resources_to_args . cc <nl> limitations under the License . <nl> # include " mlir / Dialect / StandardOps / IR / Ops . h " / / from @ llvm - project <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Types . h " / / from @ llvm - project <nl> # include " mlir / IR / Value . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / readonly_references_to_resources . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / readonly_references_to_resources . cc <nl> limitations under the License . <nl> # include " mlir / Dialect / StandardOps / IR / Ops . h " / / from @ llvm - project <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Types . h " / / from @ llvm - project <nl> # include " mlir / IR / Value . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / resource_op_lifting . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / resource_op_lifting . cc <nl> limitations under the License . <nl> # include " mlir / IR / BlockAndValueMapping . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Diagnostics . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> # include " mlir / IR / Region . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / SymbolTable . h " / / from @ llvm - project <nl> # include " mlir / IR / TypeUtilities . h " / / from @ llvm - project <nl> # include " mlir / IR / Types . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / rewrite_tpu_embedding_ops . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / rewrite_tpu_embedding_ops . cc <nl> limitations under the License . <nl> <nl> # include " llvm / ADT / SmallVector . h " <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> # include " mlir / Pass / PassRegistry . h " / / from @ llvm - project <nl> # include " tensorflow / compiler / mlir / tensorflow / ir / tf_ops . h " <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / shape_inference . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / shape_inference . cc <nl> limitations under the License . <nl> # include " mlir / IR / Block . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Diagnostics . h " / / from @ llvm - project <nl> # include " mlir / IR / Location . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> # include " mlir / IR / OperationSupport . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / SymbolTable . h " / / from @ llvm - project <nl> # include " mlir / IR / Value . h " / / from @ llvm - project <nl> # include " mlir / Interfaces / CallInterfaces . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / shape_inference_pass . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / shape_inference_pass . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> # include " mlir / Support / LLVM . h " / / from @ llvm - project <nl> # include " mlir / Support / LogicalResult . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / stack_ops_decomposition . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / stack_ops_decomposition . cc <nl> limitations under the License . <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Location . h " / / from @ llvm - project <nl> # include " mlir / IR / MLIRContext . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / SymbolTable . h " / / from @ llvm - project <nl> # include " mlir / IR / TypeUtilities . h " / / from @ llvm - project <nl> # include " mlir / IR / Types . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / tensor_array_ops_decomposition . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / tensor_array_ops_decomposition . cc <nl> limitations under the License . <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Location . h " / / from @ llvm - project <nl> # include " mlir / IR / MLIRContext . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / SymbolTable . h " / / from @ llvm - project <nl> # include " mlir / IR / TypeUtilities . h " / / from @ llvm - project <nl> # include " mlir / IR / Types . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / tensor_device_copy_conversion . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / tensor_device_copy_conversion . cc <nl> limitations under the License . <nl> # include " mlir / Pass / PassManager . h " <nl> # include " mlir / Transforms / DialectConversion . h " <nl> # include " mlir / Transforms / Passes . h " <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / OperationSupport . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Types . h " / / from @ llvm - project <nl> # include " mlir / Pass / PassOptions . h " / / from @ llvm - project <nl> # include " tensorflow / compiler / mlir / tensorflow / ir / tf_ops . h " <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / tensor_list_ops_decomposition . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / tensor_list_ops_decomposition . cc <nl> limitations under the License . <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / TypeUtilities . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> # include " mlir / Support / LogicalResult . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / tf_data_optimization . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / tf_data_optimization . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / compiler / mlir / tensorflow / transforms / tf_data_optimization . h " <nl> <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " tensorflow / compiler / mlir / tensorflow / ir / tf_ops . h " <nl> # include " tensorflow / compiler / mlir / tensorflow / ir / tf_types . h " <nl> <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / tpu_dynamic_layout_pass . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / tpu_dynamic_layout_pass . cc <nl> limitations under the License . <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Location . h " / / from @ llvm - project <nl> # include " mlir / IR / MLIRContext . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> # include " mlir / IR / OperationSupport . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / TypeUtilities . h " / / from @ llvm - project <nl> # include " mlir / IR / Types . h " / / from @ llvm - project <nl> # include " mlir / IR / Value . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / tpu_extract_outside_compilation . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / tpu_extract_outside_compilation . cc <nl> limitations under the License . <nl> # include " llvm / Support / FormatVariadic . h " <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / MLIRContext . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> # include " mlir / IR / OperationSupport . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / TypeRange . h " / / from @ llvm - project <nl> # include " mlir / IR / Visitors . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / tpu_rewrite_pass . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / tpu_rewrite_pass . cc <nl> limitations under the License . <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Types . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> # include " mlir / Pass / PassRegistry . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / tpu_space_to_depth_pass . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / tpu_space_to_depth_pass . cc <nl> limitations under the License . <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Location . h " / / from @ llvm - project <nl> # include " mlir / IR / MLIRContext . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> # include " mlir / IR / OperationSupport . h " / / from @ llvm - project <nl> # include " mlir / IR / PatternMatch . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / TypeUtilities . h " / / from @ llvm - project <nl> # include " mlir / IR / Types . h " / / from @ llvm - project <nl> # include " mlir / IR / Value . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / tpu_update_embedding_enqueue_op_inputs . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / tpu_update_embedding_enqueue_op_inputs . cc <nl> limitations under the License . <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / Block . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / Value . h " / / from @ llvm - project <nl> # include " mlir / IR / Visitors . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / unroll_batch_matmul . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / unroll_batch_matmul . cc <nl> limitations under the License . <nl> # include " mlir / Analysis / LoopAnalysis . h " / / from @ llvm - project <nl> # include " mlir / Dialect / StandardOps / IR / Ops . h " / / from @ llvm - project <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> + # include " mlir / IR / BuiltinTypes . h " / / from @ llvm - project <nl> # include " mlir / IR / OpImplementation . h " / / from @ llvm - project <nl> # include " mlir / IR / PatternMatch . h " / / from @ llvm - project <nl> - # include " mlir / IR / StandardTypes . h " / / from @ llvm - project <nl> # include " mlir / Pass / Pass . h " / / from @ llvm - project <nl> # include " mlir / Support / LLVM . h " / / from @ llvm - project <nl> # include " mlir / Support / LogicalResult . h " / / from @ llvm - project <nl>
|
[ mlir ] [ NFC ] Replace usages or mlir / IR / StandardTypes . h with mlir / IR / BuiltinTypes . h
|
tensorflow/tensorflow
|
110199cd2b22e117fcb4940d2566cf7d66d98059
|
2020-12-12T02:01:03Z
|
mmm a / test / cpp / codegen / BUILD <nl> ppp b / test / cpp / codegen / BUILD <nl> grpc_cc_test ( <nl> <nl> grpc_cc_binary ( <nl> name = " golden_file_test " , <nl> + testonly = True , <nl> srcs = [ " golden_file_test . cc " ] , <nl> deps = [ <nl> " / / : grpc + + " , <nl>
|
Merge pull request from apolcyn / mark_testonly
|
grpc/grpc
|
2f041aed40fc2d97cbdd0a875b9a87b12711e142
|
2017-12-20T05:13:53Z
|
mmm a / src / clustering / administration / issues / pinnings_shards_mismatch . cc <nl> ppp b / src / clustering / administration / issues / pinnings_shards_mismatch . cc <nl> std : : string pinnings_shards_mismatch_issue_t < protocol_t > : : get_description ( ) cons <nl> " Primary pinnings : \ n % s \ n " <nl> " Secondary pinnings : \ n % s \ n " , <nl> uuid_to_str ( offending_namespace ) . c_str ( ) , <nl> - scoped_cJSON_t ( render_as_json ( & _shards , 0 ) ) . Print ( ) . c_str ( ) , <nl> - scoped_cJSON_t ( render_as_json ( & _primary_pinnings , 0 ) ) . Print ( ) . c_str ( ) , <nl> - scoped_cJSON_t ( render_as_json ( & _secondary_pinnings , 0 ) ) . Print ( ) . c_str ( ) ) ; <nl> + scoped_cJSON_t ( render_as_json ( & _shards , 0 ) ) . PrintUnformatted ( ) . c_str ( ) , <nl> + scoped_cJSON_t ( render_as_json ( & _primary_pinnings , 0 ) ) . PrintUnformatted ( ) . c_str ( ) , <nl> + scoped_cJSON_t ( render_as_json ( & _secondary_pinnings , 0 ) ) . PrintUnformatted ( ) . c_str ( ) ) ; <nl> } <nl> <nl> template < class protocol_t > <nl> mmm a / src / memcached / protocol_json_adapter . tcc <nl> ppp b / src / memcached / protocol_json_adapter . tcc <nl> std : : string render_region_as_string ( key_range_t * target , const ctx_t & c ) { <nl> res . AddItemToArray ( cJSON_CreateNull ( ) ) ; <nl> } <nl> <nl> - return res . Print ( ) ; <nl> + return res . PrintUnformatted ( ) ; <nl> } <nl> <nl> template < class ctx_t > <nl>
|
don ' t pretty - print key ranges
|
rethinkdb/rethinkdb
|
cb3dbaaf57436a06a77fda99efa010637bd69b4d
|
2012-07-28T00:07:52Z
|
mmm a / src / corruptionCheck . js <nl> ppp b / src / corruptionCheck . js <nl> var CorruptionChecker = { <nl> CorruptionChecker . realFree ( allocation ) ; <nl> } , <nl> canary : function ( x ) { <nl> - return ( x + ( x < < 3 ) + ( x & 75 ) - ( x & 47 ) ) & 255 ; <nl> + return ( x & 127 ) + 10 ; <nl> } , <nl> fillBuffer : function ( allocation , size ) { <nl> for ( var x = allocation ; x < allocation + size ; x + + ) { <nl>
|
simplify corruption canary
|
emscripten-core/emscripten
|
f9728ef5b36fce1639af4ba6e9ac1e68321053fd
|
2013-02-20T00:52:27Z
|
mmm a / config . m4 <nl> ppp b / config . m4 <nl> if test " $ PHP_SWOOLE " ! = " no " ; then <nl> PHP_ADD_LIBRARY ( rt , 1 , SWOOLE_SHARED_LIBADD ) <nl> fi <nl> <nl> - <nl> PHP_ADD_LIBRARY ( pthread , 1 , SWOOLE_SHARED_LIBADD ) <nl> <nl> if test " $ PHP_OPENSSL " = " yes " ; then <nl> if test " $ PHP_SWOOLE " ! = " no " ; then <nl> src / protocol / SSL . c \ <nl> src / protocol / Http . c \ <nl> , $ ext_shared ) <nl> - <nl> + <nl> PHP_ADD_INCLUDE ( [ $ ext_srcdir / include ] ) <nl> PHP_ADD_BUILD_DIR ( $ ext_builddir / src / core ) <nl> PHP_ADD_BUILD_DIR ( $ ext_builddir / src / memory ) <nl> if test " $ PHP_SWOOLE " ! = " no " ; then <nl> PHP_ADD_BUILD_DIR ( $ ext_builddir / src / os ) <nl> PHP_ADD_BUILD_DIR ( $ ext_builddir / src / network ) <nl> PHP_ADD_BUILD_DIR ( $ ext_builddir / src / protocol ) <nl> + PHP_ADD_BUILD_DIR ( $ ext_builddir / thirdparty ) <nl> fi <nl> <nl>
|
Update config . m4 , added thirdparty BUILD_DIR .
|
swoole/swoole-src
|
20c117e83ef8782c2e91b1f18e1d9bc53681bb99
|
2014-10-29T01:50:56Z
|
mmm a / root . c <nl> ppp b / root . c <nl> static bool apply_ignore_vcs_configuration ( w_root_t * root , char * * errmsg ) <nl> } <nl> <nl> for ( i = 0 ; i < json_array_size ( ignores ) ; i + + ) { <nl> - const char * ignore = json_string_value ( json_array_get ( ignores , i ) ) ; <nl> + const json_t * jignore = json_array_get ( ignores , i ) ; <nl> <nl> - if ( ! ignore ) { <nl> + if ( ! json_is_string ( jignore ) ) { <nl> ignore_result ( asprintf ( errmsg , <nl> " ignore_vcs must be an array of strings " ) ) ; <nl> json_decref ( ignores ) ; <nl> return false ; <nl> } <nl> <nl> - name = w_string_new ( ignore ) ; <nl> + name = json_to_w_string ( jignore ) ; <nl> fullname = w_string_path_cat ( root - > root_path , name ) ; <nl> <nl> / / if we are completely ignoring this dir , we have nothing more to <nl> / / do here <nl> if ( w_ht_get ( root - > ignore . ignore_dirs , w_ht_ptr_val ( fullname ) ) ) { <nl> w_string_delref ( fullname ) ; <nl> - w_string_delref ( name ) ; <nl> continue ; <nl> } <nl> <nl> static bool apply_ignore_vcs_configuration ( w_root_t * root , char * * errmsg ) <nl> / / root / { . hg , . git , . svn } <nl> root - > query_cookie_dir = w_string_path_cat ( root - > root_path , name ) ; <nl> } <nl> - w_string_delref ( name ) ; <nl> w_string_delref ( fullname ) ; <nl> } <nl> <nl> static void apply_ignore_configuration ( w_root_t * root ) <nl> } <nl> <nl> for ( i = 0 ; i < json_array_size ( ignores ) ; i + + ) { <nl> - const char * ignore = json_string_value ( json_array_get ( ignores , i ) ) ; <nl> + const json_t * jignore = json_array_get ( ignores , i ) ; <nl> <nl> - if ( ! ignore ) { <nl> + if ( ! json_is_string ( jignore ) ) { <nl> w_log ( W_LOG_ERR , " ignore_dirs must be an array of strings \ n " ) ; <nl> continue ; <nl> } <nl> <nl> - name = w_string_new ( ignore ) ; <nl> + name = json_to_w_string ( jignore ) ; <nl> fullname = w_string_path_cat ( root - > root_path , name ) ; <nl> w_ignore_addstr ( & root - > ignore , fullname , false ) ; <nl> w_log ( W_LOG_DBG , " ignoring % . * s recursively \ n " , <nl> fullname - > len , fullname - > buf ) ; <nl> w_string_delref ( fullname ) ; <nl> - w_string_delref ( name ) ; <nl> } <nl> } <nl> <nl> static w_root_t * w_root_new ( const char * path , char * * errmsg ) <nl> root - > case_sensitive = is_case_sensitive_filesystem ( path ) ; <nl> <nl> w_pending_coll_init ( & root - > pending ) ; <nl> - root - > root_path = w_string_new ( path ) ; <nl> + root - > root_path = w_string_new_typed ( path , W_STRING_BYTE ) ; <nl> root - > commands = w_ht_new ( 2 , & trigger_hash_funcs ) ; <nl> root - > query_cookies = w_ht_new ( 2 , & w_ht_string_funcs ) ; <nl> w_ignore_init ( & root - > ignore ) ; <nl> static void stat_path ( w_root_t * root , <nl> w_log ( W_LOG_ERR , <nl> " readlink ( % s ) errno = % d tlen = % d \ n " , path , errno , ( int ) tlen ) ; <nl> } else { <nl> - file - > symlink_target = w_string_new_len ( link_target_path , tlen ) ; <nl> + file - > symlink_target = w_string_new_len_typed ( link_target_path , tlen , <nl> + W_STRING_BYTE ) ; <nl> } <nl> } <nl> # endif <nl> static void crawler ( w_root_t * root , struct watchman_pending_collection * coll , <nl> } <nl> <nl> / / Queue it up for analysis if the file is newly existing <nl> - name = w_string_new ( dirent - > d_name ) ; <nl> + name = w_string_new_typed ( dirent - > d_name , W_STRING_BYTE ) ; <nl> if ( dir - > files ) { <nl> file = w_ht_val_ptr ( w_ht_get ( dir - > files , w_ht_ptr_val ( name ) ) ) ; <nl> } else { <nl> static bool vcs_file_exists ( w_root_t * root , <nl> w_string_t * dir_name ; <nl> w_string_t * rel_dir_name ; <nl> <nl> - rel_dir_name = w_string_new ( dname ) ; <nl> + rel_dir_name = w_string_new_typed ( dname , W_STRING_BYTE ) ; <nl> dir_name = w_string_path_cat ( root - > root_path , rel_dir_name ) ; <nl> w_string_delref ( rel_dir_name ) ; <nl> <nl> static bool vcs_file_exists ( w_root_t * root , <nl> return false ; <nl> } <nl> <nl> - file_name = w_string_new ( fname ) ; <nl> + file_name = w_string_new_typed ( fname , W_STRING_BYTE ) ; <nl> file = w_ht_val_ptr ( w_ht_get ( dir - > files , w_ht_ptr_val ( file_name ) ) ) ; <nl> w_string_delref ( file_name ) ; <nl> <nl> static inline bool is_slash ( char c ) { <nl> char * w_find_enclosing_root ( const char * filename , char * * relpath ) { <nl> w_ht_iter_t i ; <nl> w_root_t * root = NULL ; <nl> - w_string_t * name = w_string_new ( filename ) ; <nl> + w_string_t * name = w_string_new_typed ( filename , W_STRING_BYTE ) ; <nl> char * prefix = NULL ; <nl> <nl> pthread_mutex_lock ( & root_lock ) ; <nl> static w_root_t * root_resolve ( const char * filename , bool auto_watch , <nl> watch_path = ( char * ) filename ; <nl> } <nl> <nl> - root_str = w_string_new ( watch_path ) ; <nl> + root_str = w_string_new_typed ( watch_path , W_STRING_BYTE ) ; <nl> pthread_mutex_lock ( & root_lock ) ; <nl> / / This will addref if it returns root <nl> if ( w_ht_lookup ( watched_roots , w_ht_ptr_val ( root_str ) , & root_val , true ) ) { <nl>
|
Fixes root . c to use the new string functions .
|
facebook/watchman
|
f38a679e09d6c4bbbb0fcdad2ccd53a71c4a2f78
|
2016-07-20T21:19:30Z
|
mmm a / tensorflow / core / framework / BUILD <nl> ppp b / tensorflow / core / framework / BUILD <nl> tf_cc_tests ( <nl> srcs = [ <nl> " allocator_test . cc " , <nl> " attr_value_util_test . cc " , <nl> + " batch_util_test . cc " , <nl> " bfloat16_test . cc " , <nl> " cancellation_test . cc " , <nl> " common_shape_fns_test . cc " , <nl> new file mode 100644 <nl> index 0000000000000 . . 4e98371bda70c <nl> mmm / dev / null <nl> ppp b / tensorflow / core / framework / batch_util_test . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / core / framework / tensor . h " <nl> + # include " tensorflow / core / framework / tensor_testutil . h " <nl> + # include " tensorflow / core / platform / logging . h " <nl> + # include " tensorflow / core / platform / test . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace { <nl> + <nl> + TEST ( CopyContiguousSlicesTest , CompatibleShape ) { <nl> + Tensor src ( DT_FLOAT , { 7 , 1 , 2 } ) ; <nl> + Tensor dst ( DT_FLOAT , { 9 , 2 , 1 } ) ; <nl> + auto s = batch_util : : CopyContiguousSlices ( <nl> + src , / * src_offset = * / 2 , / * dst_offset = * / 0 , / * num_slices = * / 5 , & dst ) ; <nl> + ASSERT_EQ ( error : : OK , s . code ( ) ) ; <nl> + } <nl> + <nl> + TEST ( CopyContiguousSlicesTest , SourceOffsetOutOfRange ) { <nl> + Tensor src ( DT_FLOAT , { 7 , 1 , 2 } ) ; <nl> + Tensor dst ( DT_FLOAT , { 9 , 2 , 1 } ) ; <nl> + auto s = batch_util : : CopyContiguousSlices ( <nl> + src , / * src_offset = * / 7 , / * dst_offset = * / 0 , / * num_slices = * / 5 , & dst ) ; <nl> + ASSERT_EQ ( error : : FAILED_PRECONDITION , s . code ( ) ) ; <nl> + } <nl> + <nl> + TEST ( CopyContiguousSlicesTest , DstOffsetOutOfRange ) { <nl> + Tensor src ( DT_FLOAT , { 7 , 1 , 2 } ) ; <nl> + Tensor dst ( DT_FLOAT , { 9 , 2 , 1 } ) ; <nl> + auto s = batch_util : : CopyContiguousSlices ( <nl> + src , / * src_offset = * / 0 , / * dst_offset = * / 0 , / * num_slices = * / 8 , & dst ) ; <nl> + ASSERT_EQ ( error : : FAILED_PRECONDITION , s . code ( ) ) ; <nl> + } <nl> + <nl> + TEST ( CopyContiguousSlicesTest , CheckDstWithExpectedValues ) { <nl> + auto src = test : : AsTensor < float > ( { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } , <nl> + TensorShape ( { 5 , 2 } ) ) ; <nl> + Tensor dst ( DT_FLOAT , { 9 , 2 , 1 } ) ; <nl> + auto s = batch_util : : CopyContiguousSlices ( <nl> + src , / * src_offset = * / 1 , / * dst_offset = * / 5 , / * num_slices = * / 3 , & dst ) ; <nl> + ASSERT_EQ ( error : : OK , s . code ( ) ) ; <nl> + test : : ExpectTensorEqual < float > ( <nl> + test : : AsTensor < float > ( { 2 , 3 , 4 , 5 , 6 , 7 } , TensorShape ( { 3 , 2 , 1 } ) ) , <nl> + dst . Slice ( 5 , 8 ) ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + } / / namespace tensorflow <nl> mmm a / tensorflow / core / framework / tensor . h <nl> ppp b / tensorflow / core / framework / tensor . h <nl> namespace batch_util { <nl> Status CopyElementToSlice ( Tensor element , Tensor * parent , int64 index ) ; <nl> Status CopySliceToElement ( const Tensor & parent , Tensor * element , int64 index ) ; <nl> Status MaybeMoveSliceToElement ( Tensor * parent , Tensor * element , int64 index ) ; <nl> + Status CopyContiguousSlices ( const Tensor & src , int64 src_offset , <nl> + int64 dst_offset , int64 num_slices , Tensor * dst ) ; <nl> } / / namespace batch_util <nl> <nl> / / / @ ingroup core <nl> class Tensor { <nl> friend Status batch_util : : MaybeMoveSliceToElement ( <nl> Tensor * parent , Tensor * element , <nl> int64 index ) ; / / For access to base < T > ( ) . <nl> + friend Status batch_util : : CopyContiguousSlices ( <nl> + const Tensor & src , int64 src_offset , int64 dst_offset , int64 num_slices , <nl> + Tensor * dst ) ; / / For access to base < T > ( ) . <nl> <nl> bool CanUseDMA ( ) const ; <nl> <nl> mmm a / tensorflow / core / util / batch_util . cc <nl> ppp b / tensorflow / core / util / batch_util . cc <nl> Status CopySliceToElement ( const Tensor & parent , Tensor * element , int64 index ) { <nl> } <nl> } <nl> <nl> + Status CopyContiguousSlices ( const Tensor & src , int64 src_offset , <nl> + int64 dst_offset , int64 num_slices , Tensor * dst ) { <nl> + if ( src . dtype ( ) ! = dst - > dtype ( ) ) { <nl> + return errors : : FailedPrecondition ( <nl> + " CopyContiguousSlices cannot perform copy : src and dst have different " <nl> + " dtypes . Source dtype : " , <nl> + src . dtype ( ) , " dstination dtype : " , dst - > dtype ( ) , " . " ) ; <nl> + } <nl> + if ( src . dims ( ) < 1 ) { <nl> + return errors : : FailedPrecondition ( <nl> + " CopyContiguousSlices cannot perform copy : src has to be a tensor with " <nl> + " rank > = 1 . Source shape : " , <nl> + src . shape ( ) . DebugString ( ) ) ; <nl> + } <nl> + <nl> + if ( dst - > dims ( ) < 1 ) { <nl> + return errors : : FailedPrecondition ( <nl> + " CopyContiguousSlices cannot perform copy : dst has to be a tensor " <nl> + " with rank > = 1 . Dest shape : " , <nl> + dst - > shape ( ) . DebugString ( ) ) ; <nl> + } <nl> + <nl> + const int64 src_dim0 = src . dim_size ( 0 ) ; <nl> + const int64 dst_dim0 = dst - > dim_size ( 0 ) ; <nl> + int64 src_chip_size = 1 ; <nl> + int64 dst_chip_size = 1 ; <nl> + for ( int i = 1 ; i < src . dims ( ) ; + + i ) { <nl> + src_chip_size * = src . dim_size ( i ) ; <nl> + } <nl> + for ( int i = 1 ; i < dst - > dims ( ) ; + + i ) { <nl> + dst_chip_size * = dst - > dim_size ( i ) ; <nl> + } <nl> + <nl> + if ( src_chip_size ! = dst_chip_size ) { <nl> + return errors : : FailedPrecondition ( <nl> + " CopyContiguousSlices cannot perform copy : source and dst shapes are " <nl> + " not compatible . Source shape : " , <nl> + src . shape ( ) . DebugString ( ) , " , dst shape : " , dst - > shape ( ) . DebugString ( ) ) ; <nl> + } <nl> + <nl> + if ( src_chip_size = = 0 & & dst_chip_size = = 0 ) { <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + if ( src_offset < 0 | | src_offset + num_slices > src_dim0 | | dst_offset < 0 | | <nl> + dst_offset + num_slices > dst_dim0 ) { <nl> + return errors : : FailedPrecondition ( <nl> + " CopyContiguousSlices cannot perform copy : index out of range . " <nl> + " src_offset : " , <nl> + src_offset , " , num_slices : " , num_slices , " , src_dim0 : " , src_dim0 , <nl> + " , dst_offset : " , dst_offset , " , dst_dim0 : " , dst_dim0 , " . " ) ; <nl> + } <nl> + <nl> + # define HANDLE_TYPE ( T ) \ <nl> + case DataTypeToEnum < T > : : value : { \ <nl> + const T * src_p = src . base < T > ( ) + ( src_chip_size * src_offset ) ; \ <nl> + T * dst_p = dst - > base < T > ( ) + ( dst_chip_size * dst_offset ) ; \ <nl> + HandleSliceToElement < T > ( src_p , dst_p , src_chip_size * num_slices ) ; \ <nl> + return Status : : OK ( ) ; \ <nl> + } <nl> + <nl> + switch ( src . dtype ( ) ) { <nl> + TF_CALL_ALL_TYPES ( HANDLE_TYPE ) ; <nl> + TF_CALL_QUANTIZED_TYPES ( HANDLE_TYPE ) ; <nl> + TF_CALL_uint32 ( HANDLE_TYPE ) ; <nl> + TF_CALL_uint64 ( HANDLE_TYPE ) ; <nl> + # undef HANDLE_TYPE <nl> + default : <nl> + return errors : : Unimplemented ( " CopyContiguousSlices unhandled data type : " , <nl> + src . dtype ( ) ) ; <nl> + } <nl> + } <nl> + <nl> / / Copies the index ^ th slice of parent ( in the 0th dimension ) into element . <nl> / / <nl> / / NOTE ( mrry ) : The implementation may be able to optimize the copy to a move . <nl> mmm a / tensorflow / core / util / batch_util . h <nl> ppp b / tensorflow / core / util / batch_util . h <nl> Status CopyElementToSlice ( Tensor element , Tensor * parent , int64 index ) ; <nl> / / Copies the index ^ th slice of parent ( in the 0th dimension ) into element . <nl> Status CopySliceToElement ( const Tensor & parent , Tensor * element , int64 index ) ; <nl> <nl> + / / Copies ' num_slices ' contiguous slices from ' src ' tensor starting from index <nl> + / / ' src_offset ' into target tensor ' dst ' , and places them into slices <nl> + / / starting from ' dst_offset ' . <nl> + / / <nl> + / / This function requires ' src ' and ' dst ' to have compatible shapes . That is it <nl> + / / requires cum_prod ( src . shape [ 1 : ] = = cum_prod ( dst - > shape [ 1 : ] ) . For example if <nl> + / / source is of shape [ x , 2 , 1 ] and dst is a tensor of shape [ y , 1 , 2 ] , this <nl> + / / function can still proceed successfully . <nl> + Status CopyContiguousSlices ( const Tensor & src , int64 src_offset , <nl> + int64 dst_offset , int64 num_slices , Tensor * dst ) ; <nl> + <nl> / / Copies the index ^ th slice of parent ( in the 0th dimension ) into element . <nl> / / <nl> / / NOTE ( mrry ) : The implementation may be able to optimize the copy to a move . <nl>
|
add a tensorflow : : batch_util : : CopyContiguousSlices utility function for
|
tensorflow/tensorflow
|
a67ee929f5aa2e16478d10e3287a248f34078cb5
|
2020-05-27T17:26:55Z
|
mmm a / docker / test / performance - comparison / compare . sh <nl> ppp b / docker / test / performance - comparison / compare . sh <nl> function restart <nl> <nl> set - m # Spawn servers in their own process groups <nl> <nl> - numactl - - cpunodebind = 0 - - localalloc \ <nl> + numactl - - cpunodebind = 0 - - membind = 0 \ <nl> left / clickhouse - server - - config - file = left / config / config . xml \ <nl> - - - - path left / db - - user_files_path left / db / user_files \ <nl> & > > left - server - log . log & <nl> function restart <nl> kill - 0 $ left_pid <nl> disown $ left_pid <nl> <nl> - numactl - - cpunodebind = 0 - - localalloc \ <nl> + numactl - - cpunodebind = 0 - - membind = 0 \ <nl> right / clickhouse - server - - config - file = right / config / config . xml \ <nl> - - - - path right / db - - user_files_path right / db / user_files \ <nl> & > > right - server - log . log & <nl>
|
Update compare . sh
|
ClickHouse/ClickHouse
|
15cd448afaf98c57f5300674e451eebc5b1f427e
|
2020-07-23T14:59:32Z
|
mmm a / tensorflow / contrib / tensorrt / convert / convert_graph . cc <nl> ppp b / tensorflow / contrib / tensorrt / convert / convert_graph . cc <nl> std : : pair < int , tensorflow : : Allocator * > GetDeviceAndAllocator ( <nl> / / If device is not set , use the first found GPU device for the conversion . <nl> for ( int tf_gpu_id_value = 0 ; tf_gpu_id_value < 100 ; + + tf_gpu_id_value ) { <nl> TfGpuId tf_gpu_id ( tf_gpu_id_value ) ; <nl> - CudaGpuId cuda_gpu_id ; <nl> - Status s = GpuIdManager : : TfToCudaGpuId ( tf_gpu_id , & cuda_gpu_id ) ; <nl> + PlatformGpuId platform_gpu_id ; <nl> + Status s = GpuIdManager : : TfToPlatformGpuId ( tf_gpu_id , & platform_gpu_id ) ; <nl> if ( s . ok ( ) ) { <nl> VLOG ( 1 ) < < " Found TF GPU " < < tf_gpu_id . value ( ) < < " at cuda device " <nl> - < < cuda_gpu_id . value ( ) ; <nl> - cuda_device_id = cuda_gpu_id . value ( ) ; <nl> + < < platform_gpu_id . value ( ) ; <nl> + cuda_device_id = platform_gpu_id . value ( ) ; <nl> GPUOptions gpu_options ; <nl> / / If the TF to Cuda gpu id mapping exist , the device and corresponding <nl> / / allocator must have been initialized already , so the <nl> mmm a / tensorflow / contrib / tensorrt / kernels / trt_engine_op . cc <nl> ppp b / tensorflow / contrib / tensorrt / kernels / trt_engine_op . cc <nl> tensorflow : : Status TRTEngineOp : : AllocateCalibrationResources ( <nl> new TRTInt8Calibrator ( device_buffers_ , batch_size , name ( ) ) ) ; <nl> const string label ( name ( ) ) ; <nl> auto segment_graph = & segment_graph_ ; <nl> - const int cuda_gpu_id = ctx - > device ( ) - > tensorflow_gpu_device_info ( ) - > gpu_id ; <nl> - if ( cuda_gpu_id < 0 ) { <nl> + const int platform_gpu_id = <nl> + ctx - > device ( ) - > tensorflow_gpu_device_info ( ) - > gpu_id ; <nl> + if ( platform_gpu_id < 0 ) { <nl> LOG ( ERROR ) < < " Can ' t get gpu_device_info from context - > device ( ) " ; <nl> return tensorflow : : errors : : InvalidArgument ( <nl> " Context - > device doesn ' t contain device info ! " ) ; <nl> } <nl> const int64 workspace_size_bytes = workspace_size_ ; <nl> cres - > thr_ . reset ( new std : : thread ( [ cres , label , segment_graph , shapes , <nl> - cuda_gpu_id , workspace_size_bytes ] ( ) { <nl> - VLOG ( 0 ) < < " Starting calibration thread on device " < < cuda_gpu_id <nl> + platform_gpu_id , workspace_size_bytes ] ( ) { <nl> + VLOG ( 0 ) < < " Starting calibration thread on device " < < platform_gpu_id <nl> < < " , Calibration Resource @ " < < cres ; <nl> - auto err = cudaSetDevice ( cuda_gpu_id ) ; <nl> + auto err = cudaSetDevice ( platform_gpu_id ) ; <nl> if ( err ! = cudaSuccess ) { <nl> / / TODO ( aaroey ) : should return error here . <nl> - LOG ( ERROR ) < < " Couldn ' t set cuda device to " < < cuda_gpu_id <nl> + LOG ( ERROR ) < < " Couldn ' t set cuda device to " < < platform_gpu_id <nl> < < " in calibration thread " ; <nl> } <nl> / / ConvertGraphDefToEngine ( ) will try to build the engine . This thread <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_bfc_allocator . h <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_bfc_allocator . h <nl> namespace tensorflow { <nl> / / Suballocator for GPU memory . <nl> class GPUMemAllocator : public SubAllocator { <nl> public : <nl> - / / ' cuda_gpu_id ' refers to the ID of the GPU device within <nl> + / / ' platform_gpu_id ' refers to the ID of the GPU device within <nl> / / the process and must reference a valid ID in the process . <nl> / / Note : stream_exec cannot be null . <nl> - explicit GPUMemAllocator ( se : : StreamExecutor * stream_exec , CudaGpuId gpu_id , <nl> - bool use_unified_memory , <nl> + explicit GPUMemAllocator ( se : : StreamExecutor * stream_exec , <nl> + PlatformGpuId gpu_id , bool use_unified_memory , <nl> const std : : vector < Visitor > & alloc_visitors , <nl> const std : : vector < Visitor > & free_visitors ) <nl> : SubAllocator ( alloc_visitors , free_visitors ) , <nl> class GPUMemAllocator : public SubAllocator { <nl> <nl> private : <nl> se : : StreamExecutor * stream_exec_ ; / / not owned , non - null <nl> - const CudaGpuId gpu_id_ ; <nl> + const PlatformGpuId gpu_id_ ; <nl> const bool use_unified_memory_ = false ; <nl> <nl> TF_DISALLOW_COPY_AND_ASSIGN ( GPUMemAllocator ) ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_bfc_allocator_test . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_bfc_allocator_test . cc <nl> static void CheckStats ( Allocator * a , int64 num_allocs , int64 bytes_in_use , <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , NoDups ) { <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator a ( sub_allocator , 1 < < 30 , " GPU_0_bfc " ) ; <nl> CheckStats ( & a , 0 , 0 , 0 , 0 ) ; <nl> <nl> TEST ( GPUBFCAllocatorTest , NoDups ) { <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , AllocationsAndDeallocations ) { <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator a ( sub_allocator , 1 < < 30 , " GPU_0_bfc " ) ; <nl> / / Allocate 256 raw pointers of sizes between 100 bytes and about <nl> / / a meg <nl> TEST ( GPUBFCAllocatorTest , AllocationsAndDeallocations ) { <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , ExerciseCoalescing ) { <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator a ( sub_allocator , 1 < < 30 , " GPU_0_bfc " ) ; <nl> CheckStats ( & a , 0 , 0 , 0 , 0 ) ; <nl> <nl> TEST ( GPUBFCAllocatorTest , ExerciseCoalescing ) { <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , AllocateZeroBufSize ) { <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator a ( sub_allocator , 1 < < 30 , " GPU_0_bfc " ) ; <nl> float * ptr = a . Allocate < float > ( 0 ) ; <nl> EXPECT_EQ ( nullptr , ptr ) ; <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , TracksSizes ) { <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator a ( sub_allocator , 1 < < 30 , " GPU_0_bfc " ) ; <nl> EXPECT_EQ ( true , a . TracksAllocationSizes ( ) ) ; <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , AllocatedVsRequested ) { <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator a ( sub_allocator , 1 < < 30 , " GPU_0_bfc " ) ; <nl> float * t1 = a . Allocate < float > ( 1 ) ; <nl> EXPECT_EQ ( 4 , a . RequestedSize ( t1 ) ) ; <nl> TEST ( GPUBFCAllocatorTest , AllocatedVsRequested ) { <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , TestCustomMemoryLimit ) { <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> / / Configure a 1MiB byte limit <nl> GPUBFCAllocator a ( sub_allocator , 1 < < 20 , " GPU_0_bfc " ) ; <nl> <nl> TEST ( GPUBFCAllocatorTest , AllocationsAndDeallocationsWithGrowth ) { <nl> options . set_allow_growth ( true ) ; <nl> <nl> / / Max of 2GiB , but starts out small . <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator a ( sub_allocator , 1LL < < 31 , " GPU_0_bfc " ) ; <nl> <nl> / / Allocate 10 raw pointers of sizes between 100 bytes and about <nl> TEST ( GPUBFCAllocatorTest , AllocationsAndDeallocationsWithGrowth ) { <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , DISABLED_AllocatorReceivesZeroMemory ) { <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator a ( sub_allocator , 1UL < < 60 , " GPU_0_bfc " ) ; <nl> sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator b ( sub_allocator , 1UL < < 60 , " GPU_0_bfc " ) ; <nl> void * amem = a . AllocateRaw ( 1 , 1 ) ; <nl> void * bmem = b . AllocateRaw ( 1 , 1 < < 30 ) ; <nl> TEST ( GPUBFCAllocatorTest , DISABLED_AllocatorReceivesZeroMemory ) { <nl> } <nl> <nl> static void BM_Allocation ( int iters ) { <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator a ( sub_allocator , 1uLL < < 33 , " GPU_0_bfc " ) ; <nl> / / Exercise a few different allocation sizes <nl> std : : vector < size_t > sizes = { 256 , 4096 , 16384 , 524288 , <nl> static void BM_Allocation ( int iters ) { <nl> BENCHMARK ( BM_Allocation ) ; <nl> <nl> static void BM_AllocationThreaded ( int iters , int num_threads ) { <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator a ( sub_allocator , 1uLL < < 33 , " GPU_0_bfc " ) ; <nl> thread : : ThreadPool pool ( Env : : Default ( ) , " test " , num_threads ) ; <nl> std : : atomic_int_fast32_t count ( iters ) ; <nl> BENCHMARK ( BM_AllocationThreaded ) - > Arg ( 1 ) - > Arg ( 4 ) - > Arg ( 16 ) ; <nl> / / A more complex benchmark that defers deallocation of an object for <nl> / / " delay " allocations . <nl> static void BM_AllocationDelayed ( int iters , int delay ) { <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator a ( sub_allocator , 1 < < 30 , " GPU_0_bfc " ) ; <nl> / / Exercise a few different allocation sizes <nl> std : : vector < int > sizes = { 256 , 4096 , 16384 , 4096 , 512 , 1024 , 1024 } ; <nl> class GPUBFCAllocatorPrivateMethodsTest : public : : testing : : Test { <nl> / / only methods inside this class can access private members of BFCAllocator . <nl> <nl> void TestBinDebugInfo ( ) { <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator a ( sub_allocator , 1 < < 30 , " GPU_0_bfc " ) ; <nl> <nl> std : : vector < void * > initial_ptrs ; <nl> class GPUBFCAllocatorPrivateMethodsTest : public : : testing : : Test { <nl> } <nl> <nl> void TestLog2FloorNonZeroSlow ( ) { <nl> - CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUBFCAllocator a ( sub_allocator , 1 / * total_memory * / , " GPU_0_bfc " ) ; <nl> EXPECT_EQ ( - 1 , a . Log2FloorNonZeroSlow ( 0 ) ) ; <nl> EXPECT_EQ ( 0 , a . Log2FloorNonZeroSlow ( 1 ) ) ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_cudamalloc_allocator . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_cudamalloc_allocator . cc <nl> limitations under the License . <nl> namespace tensorflow { <nl> <nl> GPUcudaMallocAllocator : : GPUcudaMallocAllocator ( Allocator * allocator , <nl> - CudaGpuId cuda_gpu_id ) <nl> + PlatformGpuId platform_gpu_id ) <nl> : base_allocator_ ( allocator ) { <nl> - stream_exec_ = GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> + stream_exec_ = <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) ; <nl> } <nl> <nl> GPUcudaMallocAllocator : : ~ GPUcudaMallocAllocator ( ) { delete base_allocator_ ; } <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_cudamalloc_allocator . h <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_cudamalloc_allocator . h <nl> namespace tensorflow { <nl> / / allocated memory . <nl> class GPUcudaMallocAllocator : public Allocator { <nl> public : <nl> - explicit GPUcudaMallocAllocator ( Allocator * allocator , CudaGpuId cuda_gpu_id ) ; <nl> + explicit GPUcudaMallocAllocator ( Allocator * allocator , <nl> + PlatformGpuId platform_gpu_id ) ; <nl> ~ GPUcudaMallocAllocator ( ) override ; <nl> string Name ( ) override { return " gpu_debug " ; } <nl> void * AllocateRaw ( size_t alignment , size_t num_bytes ) override ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_debug_allocator . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_debug_allocator . cc <nl> void InitMask ( se : : StreamExecutor * exec , void * ptr , int64 * mask ) { <nl> / / GPUDebugAllocator <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> GPUDebugAllocator : : GPUDebugAllocator ( Allocator * allocator , <nl> - CudaGpuId cuda_gpu_id ) <nl> + PlatformGpuId platform_gpu_id ) <nl> : base_allocator_ ( allocator ) { <nl> - stream_exec_ = GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> + stream_exec_ = <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) ; <nl> } <nl> <nl> GPUDebugAllocator : : ~ GPUDebugAllocator ( ) { delete base_allocator_ ; } <nl> bool GPUDebugAllocator : : CheckFooter ( void * ptr ) { <nl> / / GPUNanResetAllocator <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> GPUNanResetAllocator : : GPUNanResetAllocator ( Allocator * allocator , <nl> - CudaGpuId cuda_gpu_id ) <nl> + PlatformGpuId platform_gpu_id ) <nl> : base_allocator_ ( allocator ) { <nl> - stream_exec_ = GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> + stream_exec_ = <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) ; <nl> } <nl> <nl> GPUNanResetAllocator : : ~ GPUNanResetAllocator ( ) { delete base_allocator_ ; } <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_debug_allocator . h <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_debug_allocator . h <nl> namespace tensorflow { <nl> / / allocated memory . <nl> class GPUDebugAllocator : public Allocator { <nl> public : <nl> - explicit GPUDebugAllocator ( Allocator * allocator , CudaGpuId cuda_gpu_id ) ; <nl> + explicit GPUDebugAllocator ( Allocator * allocator , <nl> + PlatformGpuId platform_gpu_id ) ; <nl> ~ GPUDebugAllocator ( ) override ; <nl> string Name ( ) override { return " gpu_debug " ; } <nl> void * AllocateRaw ( size_t alignment , size_t num_bytes ) override ; <nl> class GPUDebugAllocator : public Allocator { <nl> / / user forgets to initialize the memory . <nl> class GPUNanResetAllocator : public Allocator { <nl> public : <nl> - explicit GPUNanResetAllocator ( Allocator * allocator , CudaGpuId cuda_gpu_id ) ; <nl> + explicit GPUNanResetAllocator ( Allocator * allocator , <nl> + PlatformGpuId platform_gpu_id ) ; <nl> ~ GPUNanResetAllocator ( ) override ; <nl> string Name ( ) override { return " gpu_nan_reset " ; } <nl> void * AllocateRaw ( size_t alignment , size_t num_bytes ) override ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_debug_allocator_test . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_debug_allocator_test . cc <nl> namespace tensorflow { <nl> namespace { <nl> <nl> TEST ( GPUDebugAllocatorTest , OverwriteDetection_None ) { <nl> - const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + const PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUDebugAllocator a ( new GPUBFCAllocator ( sub_allocator , 1 < < 30 , " " ) , <nl> - cuda_gpu_id ) ; <nl> - auto stream_exec = GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> + platform_gpu_id ) ; <nl> + auto stream_exec = <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) ; <nl> <nl> for ( int s : { 8 } ) { <nl> std : : vector < int64 > cpu_array ( s ) ; <nl> TEST ( GPUDebugAllocatorTest , OverwriteDetection_Header ) { <nl> for ( int s : { 8 , 211 } ) { <nl> EXPECT_DEATH ( <nl> { <nl> - const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + const PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , <nl> - cuda_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUDebugAllocator a ( new GPUBFCAllocator ( sub_allocator , 1 < < 30 , " " ) , <nl> - cuda_gpu_id ) ; <nl> + platform_gpu_id ) ; <nl> auto stream_exec = <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) ; <nl> <nl> std : : vector < int64 > cpu_array ( s ) ; <nl> memset ( & cpu_array [ 0 ] , 0 , cpu_array . size ( ) * sizeof ( int64 ) ) ; <nl> TEST ( GPUDebugAllocatorTest , OverwriteDetection_Footer ) { <nl> for ( int s : { 8 , 22 } ) { <nl> EXPECT_DEATH ( <nl> { <nl> - const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + const PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , <nl> - cuda_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUDebugAllocator a ( new GPUBFCAllocator ( sub_allocator , 1 < < 30 , " " ) , <nl> - cuda_gpu_id ) ; <nl> + platform_gpu_id ) ; <nl> auto stream_exec = <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) ; <nl> <nl> std : : vector < int64 > cpu_array ( s ) ; <nl> memset ( & cpu_array [ 0 ] , 0 , cpu_array . size ( ) * sizeof ( int64 ) ) ; <nl> TEST ( GPUDebugAllocatorTest , OverwriteDetection_Footer ) { <nl> } <nl> <nl> TEST ( GPUDebugAllocatorTest , ResetToNan ) { <nl> - const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + const PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUNanResetAllocator a ( new GPUBFCAllocator ( sub_allocator , 1 < < 30 , " " ) , <nl> - cuda_gpu_id ) ; <nl> - auto stream_exec = GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> + platform_gpu_id ) ; <nl> + auto stream_exec = <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) ; <nl> <nl> std : : vector < float > cpu_array ( 1024 ) ; <nl> std : : vector < float > cpu_array_result ( 1024 ) ; <nl> TEST ( GPUDebugAllocatorTest , ResetToNan ) { <nl> } <nl> <nl> TEST ( GPUDebugAllocatorTest , ResetToNanWithHeaderFooter ) { <nl> - const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + const PlatformGpuId platform_gpu_id ( 0 ) ; <nl> / / NaN reset must be the outer - most allocator . <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUNanResetAllocator a ( <nl> new GPUDebugAllocator ( new GPUBFCAllocator ( sub_allocator , 1 < < 30 , " " ) , <nl> - cuda_gpu_id ) , <nl> - cuda_gpu_id ) ; <nl> - auto stream_exec = GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> + platform_gpu_id ) , <nl> + platform_gpu_id ) ; <nl> + auto stream_exec = <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) ; <nl> <nl> std : : vector < float > cpu_array ( 1024 ) ; <nl> std : : vector < float > cpu_array_result ( 1024 ) ; <nl> TEST ( GPUDebugAllocatorTest , ResetToNanWithHeaderFooter ) { <nl> } <nl> <nl> TEST ( GPUDebugAllocatorTest , TracksSizes ) { <nl> - const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + const PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUDebugAllocator a ( new GPUBFCAllocator ( sub_allocator , 1 < < 30 , " " ) , <nl> - cuda_gpu_id ) ; <nl> + platform_gpu_id ) ; <nl> EXPECT_EQ ( true , a . TracksAllocationSizes ( ) ) ; <nl> } <nl> <nl> TEST ( GPUDebugAllocatorTest , AllocatedVsRequested ) { <nl> - const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + const PlatformGpuId platform_gpu_id ( 0 ) ; <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> - false / * use_unified_memory * / , { } , { } ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , false / * use_unified_memory * / , { } , { } ) ; <nl> GPUNanResetAllocator a ( <nl> new GPUDebugAllocator ( new GPUBFCAllocator ( sub_allocator , 1 < < 30 , " " ) , <nl> - cuda_gpu_id ) , <nl> - cuda_gpu_id ) ; <nl> + platform_gpu_id ) , <nl> + platform_gpu_id ) ; <nl> float * t1 = a . Allocate < float > ( 1 ) ; <nl> EXPECT_EQ ( 4 , a . RequestedSize ( t1 ) ) ; <nl> EXPECT_EQ ( 256 , a . AllocatedSize ( t1 ) ) ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_device . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_device . cc <nl> class EigenCudaStreamDevice : public : : Eigen : : StreamInterface { <nl> reinterpret_cast < unsigned int * > ( scratch + Eigen : : kCudaScratchSize ) ; <nl> stream_ = cuda_stream ; <nl> allocator_ = alloc ; <nl> - CudaGpuId cuda_gpu_id ; <nl> - TF_CHECK_OK ( GpuIdManager : : TfToCudaGpuId ( tf_gpu_id , & cuda_gpu_id ) ) ; <nl> - device_prop_ = & Eigen : : m_deviceProperties [ cuda_gpu_id . value ( ) ] ; <nl> + PlatformGpuId platform_gpu_id ; <nl> + TF_CHECK_OK ( GpuIdManager : : TfToPlatformGpuId ( tf_gpu_id , & platform_gpu_id ) ) ; <nl> + device_prop_ = & Eigen : : m_deviceProperties [ platform_gpu_id . value ( ) ] ; <nl> } <nl> <nl> const cudaStream_t & stream ( ) const override { return * stream_ ; } <nl> Status BaseGPUDevice : : Init ( const SessionOptions & options ) { <nl> gpu_device_info_ - > stream = streams_ [ 0 ] - > compute ; <nl> gpu_device_info_ - > default_context = device_contexts_ [ 0 ] ; <nl> gpu_device_info_ - > event_mgr = em_ . get ( ) ; <nl> - CudaGpuId cuda_gpu_id ; <nl> - TF_RETURN_IF_ERROR ( GpuIdManager : : TfToCudaGpuId ( tf_gpu_id_ , & cuda_gpu_id ) ) ; <nl> - gpu_device_info_ - > gpu_id = cuda_gpu_id . value ( ) ; <nl> + PlatformGpuId platform_gpu_id ; <nl> + TF_RETURN_IF_ERROR ( <nl> + GpuIdManager : : TfToPlatformGpuId ( tf_gpu_id_ , & platform_gpu_id ) ) ; <nl> + gpu_device_info_ - > gpu_id = platform_gpu_id . value ( ) ; <nl> set_tensorflow_gpu_device_info ( gpu_device_info_ ) ; <nl> <nl> / / Whether and how the GPU device uses its own threadpool . <nl> class ConcretePerOpGpuDevice : public PerOpGpuDevice { <nl> Eigen : : GpuDevice device_ ; <nl> } ; <nl> <nl> - / / Parse ' visible_device_list ' into a list of CUDA GPU ids . <nl> + / / Parse ' visible_device_list ' into a list of platform GPU ids . <nl> Status ParseVisibleDeviceList ( const string & visible_device_list , <nl> - std : : vector < CudaGpuId > * visible_gpu_order ) { <nl> + std : : vector < PlatformGpuId > * visible_gpu_order ) { <nl> visible_gpu_order - > clear ( ) ; <nl> se : : Platform * gpu_manager = GPUMachineManager ( ) ; <nl> <nl> Status ParseVisibleDeviceList ( const string & visible_device_list , <nl> } else { <nl> const std : : vector < string > order_str = <nl> str_util : : Split ( visible_device_list , ' , ' ) ; <nl> - for ( const string & cuda_gpu_id_str : order_str ) { <nl> - int32 cuda_gpu_id ; <nl> - if ( ! strings : : safe_strto32 ( cuda_gpu_id_str , & cuda_gpu_id ) ) { <nl> + for ( const string & platform_gpu_id_str : order_str ) { <nl> + int32 platform_gpu_id ; <nl> + if ( ! strings : : safe_strto32 ( platform_gpu_id_str , & platform_gpu_id ) ) { <nl> return errors : : InvalidArgument ( <nl> " Could not parse entry in ' visible_device_list ' : ' " , <nl> - cuda_gpu_id_str , " ' . visible_device_list = " , visible_device_list ) ; <nl> + platform_gpu_id_str , " ' . visible_device_list = " , <nl> + visible_device_list ) ; <nl> } <nl> - if ( cuda_gpu_id < 0 | | cuda_gpu_id > = gpu_manager - > VisibleDeviceCount ( ) ) { <nl> + if ( platform_gpu_id < 0 | | <nl> + platform_gpu_id > = gpu_manager - > VisibleDeviceCount ( ) ) { <nl> return errors : : InvalidArgument ( <nl> - " ' visible_device_list ' listed an invalid GPU id ' " , cuda_gpu_id , <nl> + " ' visible_device_list ' listed an invalid GPU id ' " , platform_gpu_id , <nl> " ' but visible device count is " , <nl> gpu_manager - > VisibleDeviceCount ( ) ) ; <nl> } <nl> - visible_gpu_order - > push_back ( CudaGpuId ( cuda_gpu_id ) ) ; <nl> + visible_gpu_order - > push_back ( PlatformGpuId ( platform_gpu_id ) ) ; <nl> } <nl> } <nl> <nl> / / Validate no repeats . <nl> - std : : set < CudaGpuId > visible_device_set ( visible_gpu_order - > begin ( ) , <nl> - visible_gpu_order - > end ( ) ) ; <nl> + std : : set < PlatformGpuId > visible_device_set ( visible_gpu_order - > begin ( ) , <nl> + visible_gpu_order - > end ( ) ) ; <nl> if ( visible_device_set . size ( ) ! = visible_gpu_order - > size ( ) ) { <nl> return errors : : InvalidArgument ( <nl> " visible_device_list contained a duplicate entry : " , <nl> Status ParseVisibleDeviceList ( const string & visible_device_list , <nl> <nl> Status VerifyVirtualDeviceSettings ( <nl> const size_t num_gpus_to_use , const GPUOptions & gpu_options , <nl> - const std : : vector < CudaGpuId > & visible_gpu_order , <nl> - const std : : vector < CudaGpuId > & valid_cuda_gpu_ids ) { <nl> + const std : : vector < PlatformGpuId > & visible_gpu_order , <nl> + const std : : vector < PlatformGpuId > & valid_platform_gpu_ids ) { <nl> const auto & virtual_devices = gpu_options . experimental ( ) . virtual_devices ( ) ; <nl> CHECK ( ! virtual_devices . empty ( ) ) ; <nl> if ( gpu_options . per_process_gpu_memory_fraction ( ) > 0 ) { <nl> Status VerifyVirtualDeviceSettings ( <nl> " # GPUs in visible_device_list : " , visible_gpu_order . size ( ) , <nl> " virtual_devices . size ( ) : " , virtual_devices . size ( ) ) ; <nl> } <nl> - if ( valid_cuda_gpu_ids . size ( ) ! = virtual_devices . size ( ) ) { <nl> + if ( valid_platform_gpu_ids . size ( ) ! = virtual_devices . size ( ) ) { <nl> return errors : : Unknown ( <nl> " The number of valid GPUs doesn ' t match the number of elements in " <nl> " the virtual_devices list . " , <nl> - " # valid GPUs : " , valid_cuda_gpu_ids . size ( ) , <nl> + " # valid GPUs : " , valid_platform_gpu_ids . size ( ) , <nl> " virtual_devices . size ( ) : " , virtual_devices . size ( ) ) ; <nl> } <nl> return Status : : OK ( ) ; <nl> int64 MinSystemMemory ( int64 available_memory ) { <nl> } <nl> <nl> / / Get the memory limit for the virtual device being created on GPU with <nl> - / / ' cuda_gpu_id ' , when that virtual device is the only virtual device being <nl> + / / ' platform_gpu_id ' , when that virtual device is the only virtual device being <nl> / / created on that GPU . <nl> Status SingleVirtualDeviceMemoryLimit ( const GPUOptions & gpu_options , <nl> - CudaGpuId cuda_gpu_id , <nl> + PlatformGpuId platform_gpu_id , <nl> int64 * memory_limit ) { <nl> int64 total_memory = 0 ; <nl> int64 available_memory = 0 ; <nl> se : : StreamExecutor * se = <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) ; <nl> if ( ! se - > DeviceMemoryUsage ( & available_memory , & total_memory ) ) { <nl> return errors : : Unknown ( " Failed to query available memory for GPU " , <nl> - cuda_gpu_id . value ( ) ) ; <nl> + platform_gpu_id . value ( ) ) ; <nl> } <nl> <nl> int64 allocated_memory = 0 ; <nl> Status BaseGPUDeviceFactory : : CreateDevices ( const SessionOptions & options , <nl> num_gpus_to_use = iter - > second ; <nl> } <nl> const auto & gpu_options = options . config . gpu_options ( ) ; <nl> - std : : vector < CudaGpuId > visible_gpu_order ; <nl> - std : : vector < CudaGpuId > valid_cuda_gpu_ids ; <nl> + std : : vector < PlatformGpuId > visible_gpu_order ; <nl> + std : : vector < PlatformGpuId > valid_platform_gpu_ids ; <nl> / / If we aren ' t going to use any GPUs , don ' t initialize them . <nl> / / We don ' t want to call ParseVisibleDeviceList if num_gpus_to_use is 0 , <nl> / / because it treats an empty gpu_options . visible_device_list as ' all GPUs are <nl> Status BaseGPUDeviceFactory : : CreateDevices ( const SessionOptions & options , <nl> TF_RETURN_IF_ERROR ( ParseVisibleDeviceList ( gpu_options . visible_device_list ( ) , <nl> & visible_gpu_order ) ) ; <nl> TF_RETURN_IF_ERROR ( <nl> - GetValidDeviceIds ( visible_gpu_order , & valid_cuda_gpu_ids ) ) ; <nl> + GetValidDeviceIds ( visible_gpu_order , & valid_platform_gpu_ids ) ) ; <nl> } <nl> - if ( num_gpus_to_use > valid_cuda_gpu_ids . size ( ) ) { <nl> - num_gpus_to_use = valid_cuda_gpu_ids . size ( ) ; <nl> + if ( num_gpus_to_use > valid_platform_gpu_ids . size ( ) ) { <nl> + num_gpus_to_use = valid_platform_gpu_ids . size ( ) ; <nl> } <nl> - if ( ! valid_cuda_gpu_ids . empty ( ) ) { <nl> + if ( ! valid_platform_gpu_ids . empty ( ) ) { <nl> / / Save the original device . <nl> int original_device = 0 ; <nl> cudaError_t err = cudaGetDevice ( & original_device ) ; <nl> Status BaseGPUDeviceFactory : : CreateDevices ( const SessionOptions & options , <nl> } <nl> / / Force to implicitly initialize CUDA runtime on each valid GPU before <nl> / / CreateGPUDevice ( ) . <nl> - for ( CudaGpuId cuda_gpu_id : valid_cuda_gpu_ids ) { <nl> - err = cudaSetDevice ( cuda_gpu_id . value ( ) ) ; <nl> + for ( PlatformGpuId platform_gpu_id : valid_platform_gpu_ids ) { <nl> + err = cudaSetDevice ( platform_gpu_id . value ( ) ) ; <nl> if ( err ! = cudaSuccess ) { <nl> - return errors : : Internal ( " cudaSetDevice ( ) on GPU : " , cuda_gpu_id . value ( ) , <nl> - " failed . Status : " , cudaGetErrorString ( err ) ) ; <nl> + return errors : : Internal ( " cudaSetDevice ( ) on GPU : " , <nl> + platform_gpu_id . value ( ) , " failed . Status : " , <nl> + cudaGetErrorString ( err ) ) ; <nl> } <nl> err = cudaFree ( nullptr ) ; <nl> if ( err ! = cudaSuccess ) { <nl> - return errors : : Internal ( <nl> - " CUDA runtime implicit initialization on GPU : " , cuda_gpu_id . value ( ) , <nl> - " failed . Status : " , cudaGetErrorString ( err ) ) ; <nl> + return errors : : Internal ( " CUDA runtime implicit initialization on GPU : " , <nl> + platform_gpu_id . value ( ) , " failed . Status : " , <nl> + cudaGetErrorString ( err ) ) ; <nl> } <nl> } <nl> / / Reset to the original device . <nl> Status BaseGPUDeviceFactory : : CreateDevices ( const SessionOptions & options , <nl> LOG ( INFO ) < < line_buf ; <nl> for ( int i = 0 ; i < visible_gpu_order . size ( ) ; + + i ) { <nl> line_buf = strings : : StrCat ( visible_gpu_order [ i ] . value ( ) , " : " ) ; <nl> - CudaGpuId cuda_id_i = visible_gpu_order [ i ] ; <nl> + PlatformGpuId gpu_id_i = visible_gpu_order [ i ] ; <nl> for ( int j = 0 ; j < visible_gpu_order . size ( ) ; + + j ) { <nl> - CudaGpuId cuda_id_j = visible_gpu_order [ j ] ; <nl> - if ( im . directed_links . find ( { cuda_id_i , cuda_id_j } ) ! = <nl> + PlatformGpuId gpu_id_j = visible_gpu_order [ j ] ; <nl> + if ( im . directed_links . find ( { gpu_id_i , gpu_id_j } ) ! = <nl> im . directed_links . end ( ) ) { <nl> line_buf . append ( " Y " ) ; <nl> } else { <nl> Status BaseGPUDeviceFactory : : CreateDevices ( const SessionOptions & options , <nl> <nl> const auto & virtual_devices = gpu_options . experimental ( ) . virtual_devices ( ) ; <nl> if ( ! virtual_devices . empty ( ) ) { <nl> - TF_RETURN_IF_ERROR ( VerifyVirtualDeviceSettings ( <nl> - num_gpus_to_use , gpu_options , visible_gpu_order , valid_cuda_gpu_ids ) ) ; <nl> + TF_RETURN_IF_ERROR ( VerifyVirtualDeviceSettings ( num_gpus_to_use , gpu_options , <nl> + visible_gpu_order , <nl> + valid_platform_gpu_ids ) ) ; <nl> / / We ' ve verified that num_gpus_to_use > = virtual_devices . size ( ) . <nl> num_gpus_to_use = virtual_devices . size ( ) ; <nl> CHECK ( gpu_options . visible_device_list ( ) . empty ( ) | | <nl> - valid_cuda_gpu_ids = = visible_gpu_order ) ; <nl> + valid_platform_gpu_ids = = visible_gpu_order ) ; <nl> } <nl> int next_tf_gpu_id = 0 ; <nl> std : : vector < int64 > memory_limit_bytes ; <nl> for ( int i = 0 ; i < num_gpus_to_use ; + + i ) { <nl> - const CudaGpuId cuda_gpu_id = valid_cuda_gpu_ids [ i ] ; <nl> + const PlatformGpuId platform_gpu_id = valid_platform_gpu_ids [ i ] ; <nl> if ( virtual_devices . empty ( ) | | <nl> virtual_devices . Get ( i ) . memory_limit_mb_size ( ) = = 0 ) { <nl> int64 single_virtual_device_memory_limit = 0 ; <nl> TF_RETURN_IF_ERROR ( SingleVirtualDeviceMemoryLimit ( <nl> - gpu_options , cuda_gpu_id , & single_virtual_device_memory_limit ) ) ; <nl> + gpu_options , platform_gpu_id , & single_virtual_device_memory_limit ) ) ; <nl> memory_limit_bytes . push_back ( single_virtual_device_memory_limit ) ; <nl> } else { <nl> const auto & memory_limit_mb = virtual_devices . Get ( i ) . memory_limit_mb ( ) ; <nl> Status BaseGPUDeviceFactory : : CreateDevices ( const SessionOptions & options , <nl> TfGpuId tf_gpu_id ( next_tf_gpu_id ) ; <nl> + + next_tf_gpu_id ; <nl> TF_RETURN_IF_ERROR ( <nl> - GpuIdManager : : InsertTfCudaGpuIdPair ( tf_gpu_id , cuda_gpu_id ) ) ; <nl> + GpuIdManager : : InsertTfPlatformGpuIdPair ( tf_gpu_id , platform_gpu_id ) ) ; <nl> } <nl> } <nl> const int num_tf_gpus = next_tf_gpu_id ; <nl> Status BaseGPUDeviceFactory : : CreateDevices ( const SessionOptions & options , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - static string GetShortDeviceDescription ( CudaGpuId cuda_gpu_id , <nl> + static string GetShortDeviceDescription ( PlatformGpuId platform_gpu_id , <nl> const se : : DeviceDescription & desc ) { <nl> int cc_major ; <nl> int cc_minor ; <nl> static string GetShortDeviceDescription ( CudaGpuId cuda_gpu_id , <nl> cc_minor = 0 ; <nl> } <nl> / / LINT . IfChange <nl> - return strings : : StrCat ( " device : " , cuda_gpu_id . value ( ) , <nl> - " , name : " , desc . name ( ) , <nl> - " , pci bus id : " , desc . pci_bus_id ( ) , <nl> + return strings : : StrCat ( " device : " , platform_gpu_id . value ( ) , " , name : " , <nl> + desc . name ( ) , " , pci bus id : " , desc . pci_bus_id ( ) , <nl> " , compute capability : " , cc_major , " . " , cc_minor ) ; <nl> / / LINT . ThenChange ( / / tensorflow / python / platform / test . py ) <nl> } <nl> Status BaseGPUDeviceFactory : : CreateGPUDevice ( const SessionOptions & options , <nl> const string device_name = <nl> strings : : StrCat ( name_prefix , " / device : GPU : " , tf_gpu_id . value ( ) ) ; <nl> GpuIdUtil : : CheckValidTfGpuId ( tf_gpu_id ) ; <nl> - CudaGpuId cuda_gpu_id ; <nl> - TF_RETURN_IF_ERROR ( GpuIdManager : : TfToCudaGpuId ( tf_gpu_id , & cuda_gpu_id ) ) ; <nl> + PlatformGpuId platform_gpu_id ; <nl> + TF_RETURN_IF_ERROR ( <nl> + GpuIdManager : : TfToPlatformGpuId ( tf_gpu_id , & platform_gpu_id ) ) ; <nl> int numa_node = dev_locality . numa_node ( ) ; <nl> <nl> se : : StreamExecutor * se = <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) ; <nl> const se : : DeviceDescription & desc = se - > GetDeviceDescription ( ) ; <nl> GPUProcessState * process_state = GPUProcessState : : singleton ( ) ; <nl> Allocator * gpu_allocator = process_state - > GetGPUAllocator ( <nl> Status BaseGPUDeviceFactory : : CreateGPUDevice ( const SessionOptions & options , <nl> / / TODO ( laigd ) : report error if memory_limit doesn ' t match stats . bytes_limit . <nl> BaseGPUDevice * gpu_device = CreateGPUDevice ( <nl> options , device_name , static_cast < Bytes > ( stats . bytes_limit ) , dev_locality , <nl> - tf_gpu_id , GetShortDeviceDescription ( cuda_gpu_id , desc ) , gpu_allocator , <nl> - ProcessState : : singleton ( ) - > GetCPUAllocator ( numa_node ) ) ; <nl> + tf_gpu_id , GetShortDeviceDescription ( platform_gpu_id , desc ) , <nl> + gpu_allocator , ProcessState : : singleton ( ) - > GetCPUAllocator ( numa_node ) ) ; <nl> LOG ( INFO ) < < " Created TensorFlow device ( " < < device_name < < " with " <nl> < < ( stats . bytes_limit > > 20 ) < < " MB memory ) - > physical GPU ( " <nl> - < < GetShortDeviceDescription ( cuda_gpu_id , desc ) < < " ) " ; <nl> + < < GetShortDeviceDescription ( platform_gpu_id , desc ) < < " ) " ; <nl> TF_RETURN_IF_ERROR ( gpu_device - > Init ( options ) ) ; <nl> devices - > push_back ( gpu_device ) ; <nl> <nl> Status BaseGPUDeviceFactory : : CreateGPUDevice ( const SessionOptions & options , <nl> } <nl> <nl> namespace { <nl> - std : : unique_ptr < std : : map < std : : pair < CudaGpuId , CudaGpuId > , bool > > <nl> + std : : unique_ptr < std : : map < std : : pair < PlatformGpuId , PlatformGpuId > , bool > > <nl> GetPeerAccessMap ( se : : Platform * platform , <nl> - const std : : vector < CudaGpuId > & visible_gpu_order ) { <nl> - std : : unique_ptr < std : : map < std : : pair < CudaGpuId , CudaGpuId > , bool > > map ( <nl> - new std : : map < std : : pair < CudaGpuId , CudaGpuId > , bool > ) ; <nl> - for ( CudaGpuId cuda_gpu_i : visible_gpu_order ) { <nl> - for ( CudaGpuId cuda_gpu_j : visible_gpu_order ) { <nl> + const std : : vector < PlatformGpuId > & visible_gpu_order ) { <nl> + std : : unique_ptr < std : : map < std : : pair < PlatformGpuId , PlatformGpuId > , bool > > map ( <nl> + new std : : map < std : : pair < PlatformGpuId , PlatformGpuId > , bool > ) ; <nl> + for ( PlatformGpuId platform_gpu_i : visible_gpu_order ) { <nl> + for ( PlatformGpuId platform_gpu_j : visible_gpu_order ) { <nl> se : : StreamExecutor * from = <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( platform , cuda_gpu_i ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform , platform_gpu_i ) <nl> + . ValueOrDie ( ) ; <nl> se : : StreamExecutor * to = <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( platform , cuda_gpu_j ) . ValueOrDie ( ) ; <nl> - ( * map ) [ { cuda_gpu_i , cuda_gpu_j } ] = from - > CanEnablePeerAccessTo ( to ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform , platform_gpu_j ) <nl> + . ValueOrDie ( ) ; <nl> + ( * map ) [ { platform_gpu_i , platform_gpu_j } ] = <nl> + from - > CanEnablePeerAccessTo ( to ) ; <nl> } <nl> } <nl> <nl> GetPeerAccessMap ( se : : Platform * platform , <nl> } / / namespace <nl> <nl> Status BaseGPUDeviceFactory : : GetInterconnectMaps ( <nl> - const std : : vector < CudaGpuId > & visible_gpu_order , se : : Platform * gpu_manager , <nl> - std : : vector < InterconnectMap > * maps ) { <nl> + const std : : vector < PlatformGpuId > & visible_gpu_order , <nl> + se : : Platform * gpu_manager , std : : vector < InterconnectMap > * maps ) { <nl> / / The default interconnect map is obtained from the StreamExecutor . <nl> auto access_map = GetPeerAccessMap ( gpu_manager , visible_gpu_order ) ; <nl> maps - > resize ( 1 ) ; <nl> InterconnectMap & imap = maps - > at ( 0 ) ; <nl> imap . name = " StreamExecutor " ; <nl> imap . strength = InterconnectMap : : kStreamExecutorStrength ; <nl> - for ( CudaGpuId cuda_id_i : visible_gpu_order ) { <nl> - for ( CudaGpuId cuda_id_j : visible_gpu_order ) { <nl> - if ( cuda_id_i = = cuda_id_j ) continue ; <nl> - if ( ( * access_map ) [ { cuda_id_i , cuda_id_j } ] ) { <nl> - imap . directed_links . insert ( { cuda_id_i , cuda_id_j } ) ; <nl> + for ( PlatformGpuId gpu_id_i : visible_gpu_order ) { <nl> + for ( PlatformGpuId gpu_id_j : visible_gpu_order ) { <nl> + if ( gpu_id_i = = gpu_id_j ) continue ; <nl> + if ( ( * access_map ) [ { gpu_id_i , gpu_id_j } ] ) { <nl> + imap . directed_links . insert ( { gpu_id_i , gpu_id_j } ) ; <nl> } <nl> } <nl> } <nl> Status BaseGPUDeviceFactory : : GetDeviceLocalities ( <nl> all_tf_gpu_ids . push_back ( TfGpuId ( i ) ) ; <nl> } <nl> for ( TfGpuId tf_gpu_id : all_tf_gpu_ids ) { <nl> - CudaGpuId cuda_gpu_id ; <nl> - TF_RETURN_IF_ERROR ( GpuIdManager : : TfToCudaGpuId ( tf_gpu_id , & cuda_gpu_id ) ) ; <nl> + PlatformGpuId platform_gpu_id ; <nl> + TF_RETURN_IF_ERROR ( <nl> + GpuIdManager : : TfToPlatformGpuId ( tf_gpu_id , & platform_gpu_id ) ) ; <nl> / / Get GPU bus_id from its reported NUMA affinity . Because GPUs are <nl> / / virtualized in some environments , we can ' t just use the GPU id . <nl> / / NUMA locales are indexed from 0 , buses are indexed from 1 . <nl> se : : StreamExecutor * se = <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) ; <nl> const se : : DeviceDescription & desc = se - > GetDeviceDescription ( ) ; <nl> int numa_node = desc . numa_node ( ) ; <nl> if ( numa_node < 0 ) { <nl> Status BaseGPUDeviceFactory : : GetDeviceLocalities ( <nl> / / may run into trouble later with data transfer operations . The <nl> / / trouble may manifest as slower than expected performance , or <nl> / / outright failures . <nl> - LOG ( INFO ) < < " Could not identify NUMA node of CUDA gpu id " < < cuda_gpu_id <nl> + LOG ( INFO ) < < " Could not identify NUMA node of platform GPU id " <nl> + < < platform_gpu_id <nl> < < " , defaulting to 0 . Your kernel may not have been built " <nl> < < " with NUMA support . " ; <nl> numa_node = 0 ; <nl> Status BaseGPUDeviceFactory : : GetDeviceLocalities ( <nl> LocalLinks * links = dev_locality . mutable_links ( ) ; <nl> for ( const InterconnectMap & imap : interconnects ) { <nl> for ( TfGpuId tf_gpu_dst : all_tf_gpu_ids ) { <nl> - CudaGpuId cuda_gpu_dst ; <nl> + PlatformGpuId platform_gpu_dst ; <nl> TF_RETURN_IF_ERROR ( <nl> - GpuIdManager : : TfToCudaGpuId ( tf_gpu_dst , & cuda_gpu_dst ) ) ; <nl> - if ( imap . directed_links . find ( { cuda_gpu_id , cuda_gpu_dst } ) ! = <nl> + GpuIdManager : : TfToPlatformGpuId ( tf_gpu_dst , & platform_gpu_dst ) ) ; <nl> + if ( imap . directed_links . find ( { platform_gpu_id , platform_gpu_dst } ) ! = <nl> imap . directed_links . end ( ) ) { <nl> InterconnectLink * ilink = links - > add_link ( ) ; <nl> ilink - > set_device_id ( tf_gpu_dst . value ( ) ) ; <nl> Status BaseGPUDeviceFactory : : GetDeviceLocalities ( <nl> / / add high strength links to the others . <nl> for ( TfGpuId tf_gpu_dst : all_tf_gpu_ids ) { <nl> if ( tf_gpu_id = = tf_gpu_dst ) continue ; <nl> - CudaGpuId cuda_gpu_dst ; <nl> + PlatformGpuId platform_gpu_dst ; <nl> TF_RETURN_IF_ERROR ( <nl> - GpuIdManager : : TfToCudaGpuId ( tf_gpu_dst , & cuda_gpu_dst ) ) ; <nl> - if ( cuda_gpu_id = = cuda_gpu_dst ) { <nl> + GpuIdManager : : TfToPlatformGpuId ( tf_gpu_dst , & platform_gpu_dst ) ) ; <nl> + if ( platform_gpu_id = = platform_gpu_dst ) { <nl> InterconnectLink * ilink = links - > add_link ( ) ; <nl> ilink - > set_device_id ( tf_gpu_dst . value ( ) ) ; <nl> ilink - > set_type ( " SAME_DEVICE " ) ; <nl> Status BaseGPUDeviceFactory : : GetDeviceLocalities ( <nl> } <nl> <nl> ( * localities ) [ tf_gpu_id ] = dev_locality ; <nl> - VLOG ( 1 ) < < " GPUDevice CudaGpuId " < < cuda_gpu_id < < " TfGpuId " < < tf_gpu_id <nl> - < < " on bus " < < dev_locality . bus_id ( ) < < " numa : " < < numa_node <nl> - < < " pci : " < < desc . pci_bus_id ( ) <nl> + VLOG ( 1 ) < < " GPUDevice PlatformGpuId " < < platform_gpu_id < < " TfGpuId " <nl> + < < tf_gpu_id < < " on bus " < < dev_locality . bus_id ( ) <nl> + < < " numa : " < < numa_node < < " pci : " < < desc . pci_bus_id ( ) <nl> < < " DeviceLocality : " < < dev_locality . DebugString ( ) ; <nl> } <nl> return Status : : OK ( ) ; <nl> Status BaseGPUDeviceFactory : : GetDeviceLocalities ( <nl> <nl> static int GetDefaultMinGPUMultiprocessorCount ( <nl> se : : Platform * gpu_manager , <nl> - const std : : vector < CudaGpuId > & visible_gpu_order ) { <nl> + const std : : vector < PlatformGpuId > & visible_gpu_order ) { <nl> static const int kDefaultMinGPUMultiprocessorCount = 8 ; <nl> <nl> / / Find the highest multi - processor count across all visible GPUs . <nl> int max_count = - 1 ; <nl> for ( int i = 0 ; i < visible_gpu_order . size ( ) ; + + i ) { <nl> auto exec_status = <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( gpu_manager , visible_gpu_order [ i ] ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( gpu_manager , visible_gpu_order [ i ] ) ; <nl> if ( ! exec_status . ok ( ) ) { <nl> continue ; <nl> } <nl> static int GetDefaultMinGPUMultiprocessorCount ( <nl> <nl> static int GetMinGPUMultiprocessorCount ( <nl> se : : Platform * gpu_manager , <nl> - const std : : vector < CudaGpuId > & visible_gpu_order ) { <nl> + const std : : vector < PlatformGpuId > & visible_gpu_order ) { <nl> const char * tf_min_gpu_core_count = getenv ( " TF_MIN_GPU_MULTIPROCESSOR_COUNT " ) ; <nl> <nl> if ( tf_min_gpu_core_count = = nullptr | | <nl> std : : vector < CudaVersion > GetSupportedCudaComputeCapabilities ( ) { <nl> } <nl> <nl> Status EnablePeerAccess ( se : : Platform * platform , <nl> - const std : : vector < CudaGpuId > & visible_gpu_order ) { <nl> + const std : : vector < PlatformGpuId > & visible_gpu_order ) { <nl> int possible_peer_count = 0 ; <nl> int enabled_peer_count = 0 ; <nl> for ( int i = 0 ; i < visible_gpu_order . size ( ) ; + + i ) { <nl> - const CudaGpuId cuda_gpu_i = visible_gpu_order [ i ] ; <nl> + const PlatformGpuId platform_gpu_i = visible_gpu_order [ i ] ; <nl> for ( int j = 0 ; j < visible_gpu_order . size ( ) ; + + j ) { <nl> - const CudaGpuId cuda_gpu_j = visible_gpu_order [ j ] ; <nl> + const PlatformGpuId platform_gpu_j = visible_gpu_order [ j ] ; <nl> / / We have already validated that ExecutorForDevice ( ) calls return OK . <nl> se : : StreamExecutor * from = <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( platform , cuda_gpu_i ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform , platform_gpu_i ) <nl> + . ValueOrDie ( ) ; <nl> se : : StreamExecutor * to = <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( platform , cuda_gpu_j ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform , platform_gpu_j ) <nl> + . ValueOrDie ( ) ; <nl> <nl> if ( from - > CanEnablePeerAccessTo ( to ) ) { <nl> + + possible_peer_count ; <nl> Status EnablePeerAccess ( se : : Platform * platform , <nl> if ( ! status . ok ( ) ) { <nl> LOG ( WARNING ) <nl> < < " Unable to enable peer access between device ordinals " <nl> - < < cuda_gpu_i < < " and " < < cuda_gpu_j < < " , status : " < < status ; <nl> + < < platform_gpu_i < < " and " < < platform_gpu_j <nl> + < < " , status : " < < status ; <nl> } else { <nl> + + enabled_peer_count ; <nl> } <nl> Status EnablePeerAccess ( se : : Platform * platform , <nl> } / / namespace <nl> <nl> Status BaseGPUDeviceFactory : : GetValidDeviceIds ( <nl> - const std : : vector < CudaGpuId > & visible_gpu_order , <nl> - std : : vector < CudaGpuId > * ids ) { <nl> + const std : : vector < PlatformGpuId > & visible_gpu_order , <nl> + std : : vector < PlatformGpuId > * ids ) { <nl> se : : Platform * gpu_manager = GPUMachineManager ( ) ; <nl> bool new_gpu_found = false ; <nl> for ( int i = 0 ; i < visible_gpu_order . size ( ) ; + + i ) { <nl> - const CudaGpuId cuda_gpu_id = visible_gpu_order [ i ] ; <nl> + const PlatformGpuId visible_gpu_id = visible_gpu_order [ i ] ; <nl> <nl> - / / Only perform this once per visible cuda gpu id . <nl> - if ( visible_gpu_initialized_ [ cuda_gpu_id . value ( ) ] ) { <nl> + / / Only perform this once per visible platform gpu id . <nl> + if ( visible_gpu_initialized_ [ visible_gpu_id . value ( ) ] ) { <nl> continue ; <nl> } <nl> <nl> - visible_gpu_initialized_ [ cuda_gpu_id . value ( ) ] = true ; <nl> + visible_gpu_initialized_ [ visible_gpu_id . value ( ) ] = true ; <nl> new_gpu_found = true ; <nl> <nl> - auto executor = GpuIdUtil : : ExecutorForCudaGpuId ( gpu_manager , cuda_gpu_id ) ; <nl> + auto executor = <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( gpu_manager , visible_gpu_id ) ; <nl> if ( ! executor . ok ( ) ) { <nl> return executor . status ( ) ; <nl> } <nl> Status BaseGPUDeviceFactory : : GetValidDeviceIds ( <nl> <nl> / / Filter out devices that don ' t have the right capability or power . <nl> for ( int i = 0 ; i < visible_gpu_order . size ( ) ; + + i ) { <nl> - const CudaGpuId visible_gpu_id = visible_gpu_order [ i ] ; <nl> + const PlatformGpuId visible_gpu_id = visible_gpu_order [ i ] ; <nl> auto exec_status = <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( gpu_manager , visible_gpu_id ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( gpu_manager , visible_gpu_id ) ; <nl> if ( ! exec_status . ok ( ) ) { <nl> LOG ( INFO ) < < " Ignoring visible gpu device " < < visible_gpu_id <nl> < < " whose executor is in invalid state : " <nl> Status BaseGPUDeviceFactory : : GetValidDeviceIds ( <nl> if ( ! ids - > empty ( ) ) { <nl> std : : vector < int > raw_ids ( ids - > size ( ) ) ; <nl> std : : transform ( ids - > begin ( ) , ids - > end ( ) , raw_ids . begin ( ) , <nl> - [ ] ( CudaGpuId id ) - > int { return id . value ( ) ; } ) ; <nl> + [ ] ( PlatformGpuId id ) - > int { return id . value ( ) ; } ) ; <nl> LOG ( INFO ) < < " Adding visible gpu devices : " <nl> < < str_util : : Join ( raw_ids , " , " ) ; <nl> } <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_device . h <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_device . h <nl> class BaseGPUDevice : public LocalDevice { <nl> DeviceContext * dc , <nl> Allocator * allocator ) override ; <nl> <nl> - / / Returns the CUDA GPU id of this device within the native driver system ; <nl> + / / Returns the platform GPU id of this device within the native driver system ; <nl> / / e . g . , for CUDA this is the ordinal of the GPU within the system . <nl> int gpu_id ( ) const { <nl> - CudaGpuId cuda_gpu_id ; <nl> - TF_CHECK_OK ( GpuIdManager : : TfToCudaGpuId ( tf_gpu_id_ , & cuda_gpu_id ) ) ; <nl> - return cuda_gpu_id . value ( ) ; <nl> + PlatformGpuId platform_gpu_id ; <nl> + TF_CHECK_OK ( GpuIdManager : : TfToPlatformGpuId ( tf_gpu_id_ , & platform_gpu_id ) ) ; <nl> + return platform_gpu_id . value ( ) ; <nl> } <nl> <nl> / / The executor that provides control for the device ; e . g . , for CUDA this <nl> class BaseGPUDeviceFactory : public DeviceFactory { <nl> int32 strength ; <nl> static const int kSameDeviceStrength ; <nl> static const int kStreamExecutorStrength ; <nl> - std : : set < std : : pair < CudaGpuId , CudaGpuId > > directed_links ; <nl> + std : : set < std : : pair < PlatformGpuId , PlatformGpuId > > directed_links ; <nl> } ; <nl> <nl> protected : <nl> / / Populates * maps with interconnect maps for all local direct access <nl> / / pathways between GPUs . <nl> virtual Status GetInterconnectMaps ( <nl> - const std : : vector < CudaGpuId > & visible_gpu_order , <nl> + const std : : vector < PlatformGpuId > & visible_gpu_order , <nl> se : : Platform * gpu_manager , std : : vector < InterconnectMap > * maps ) ; <nl> <nl> struct TfGpuIdHash { <nl> class BaseGPUDeviceFactory : public DeviceFactory { <nl> Allocator * gpu_allocator , <nl> Allocator * cpu_allocator ) = 0 ; <nl> <nl> - / / Returns into ' ids ' the list of valid CUDA GPU ids , in the order that <nl> + / / Returns into ' ids ' the list of valid platform GPU ids , in the order that <nl> / / they should map to TF GPU ids " / device : GPU : 0 " , " / device : GPU : 1 " , etc , <nl> / / based upon ' visible_gpu_order ' which was generated by parsing <nl> / / GPUOptions : : visible_device_list which is a comma - separated list of CUDA GPU <nl> / / ids . <nl> - Status GetValidDeviceIds ( const std : : vector < CudaGpuId > & visible_gpu_order , <nl> - std : : vector < CudaGpuId > * ids ) ; <nl> + Status GetValidDeviceIds ( const std : : vector < PlatformGpuId > & visible_gpu_order , <nl> + std : : vector < PlatformGpuId > * ids ) ; <nl> <nl> - / / visible_gpu_initialized_ [ cuda_gpu_id ] is true if visible GPU cuda_gpu_id <nl> - / / has been initialized by the process . <nl> + / / visible_gpu_initialized_ [ platform_gpu_id ] is true if visible GPU <nl> + / / platform_gpu_id has been initialized by the process . <nl> std : : unordered_map < int , bool > visible_gpu_initialized_ ; <nl> } ; <nl> <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_device_test . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_device_test . cc <nl> namespace tensorflow { <nl> namespace { <nl> const char * kDeviceNamePrefix = " / job : localhost / replica : 0 / task : 0 " ; <nl> <nl> - int64 GetTotalGPUMemory ( CudaGpuId gpu_id ) { <nl> + int64 GetTotalGPUMemory ( PlatformGpuId gpu_id ) { <nl> se : : StreamExecutor * se = <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( GPUMachineManager ( ) , gpu_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( GPUMachineManager ( ) , gpu_id ) <nl> + . ValueOrDie ( ) ; <nl> <nl> int64 total_memory , available_memory ; <nl> CHECK ( se - > DeviceMemoryUsage ( & available_memory , & total_memory ) ) ; <nl> return total_memory ; <nl> } <nl> <nl> - Status GetComputeCapability ( CudaGpuId gpu_id , int * cc_major , int * cc_minor ) { <nl> + Status GetComputeCapability ( PlatformGpuId gpu_id , int * cc_major , <nl> + int * cc_minor ) { <nl> se : : StreamExecutor * se = <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( GPUMachineManager ( ) , gpu_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( GPUMachineManager ( ) , gpu_id ) <nl> + . ValueOrDie ( ) ; <nl> if ( ! se - > GetDeviceDescription ( ) . cuda_compute_capability ( cc_major , cc_minor ) ) { <nl> * cc_major = 0 ; <nl> * cc_minor = 0 ; <nl> TEST_F ( GPUDeviceTest , MultipleVirtualDevices ) { <nl> / / error . <nl> TEST_F ( GPUDeviceTest , UnifiedMemoryUnavailableOnPrePascalGpus ) { <nl> int cc_major , cc_minor ; <nl> - TF_ASSERT_OK ( GetComputeCapability ( CudaGpuId ( 0 ) , & cc_major , & cc_minor ) ) ; <nl> + TF_ASSERT_OK ( GetComputeCapability ( PlatformGpuId ( 0 ) , & cc_major , & cc_minor ) ) ; <nl> / / Exit early while running on Pascal or later GPUs . <nl> if ( cc_major > = 6 ) { <nl> return ; <nl> TEST_F ( GPUDeviceTest , UnifiedMemoryUnavailableOnPrePascalGpus ) { <nl> / / more memory than what is available on the device . <nl> TEST_F ( GPUDeviceTest , UnifiedMemoryAllocation ) { <nl> static constexpr double kGpuMemoryFraction = 1 . 2 ; <nl> - static constexpr CudaGpuId kCudaGpuId ( 0 ) ; <nl> + static constexpr PlatformGpuId kPlatformGpuId ( 0 ) ; <nl> <nl> int cc_major , cc_minor ; <nl> - TF_ASSERT_OK ( GetComputeCapability ( kCudaGpuId , & cc_major , & cc_minor ) ) ; <nl> + TF_ASSERT_OK ( GetComputeCapability ( kPlatformGpuId , & cc_major , & cc_minor ) ) ; <nl> / / Exit early if running on pre - Pascal GPUs . <nl> if ( cc_major < 6 ) { <nl> LOG ( INFO ) <nl> TEST_F ( GPUDeviceTest , UnifiedMemoryAllocation ) { <nl> ASSERT_EQ ( 1 , devices . size ( ) ) ; <nl> <nl> int64 memory_limit = devices [ 0 ] - > attributes ( ) . memory_limit ( ) ; <nl> - ASSERT_EQ ( memory_limit , static_cast < int64 > ( GetTotalGPUMemory ( kCudaGpuId ) * <nl> + ASSERT_EQ ( memory_limit , static_cast < int64 > ( GetTotalGPUMemory ( kPlatformGpuId ) * <nl> kGpuMemoryFraction ) ) ; <nl> <nl> AllocatorAttributes allocator_attributes = AllocatorAttributes ( ) ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_id . h <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_id . h <nl> namespace tensorflow { <nl> / / physical machine , it can be filtered by CUDA environment variable <nl> / / CUDA_VISIBLE_DEVICES . Note that this id is not visible to Tensorflow , but <nl> / / result after filtering by CUDA_VISIBLE_DEVICES is visible to TF and is <nl> - / / called CUDA GPU id as below . See <nl> + / / called platform GPU id as below . See <nl> / / http : / / docs . nvidia . com / cuda / cuda - c - programming - guide / index . html # env - vars <nl> / / for more details . <nl> - / / - CUDA GPU id ( also called * visible * GPU id in <nl> + / / - * platform * GPU id ( also called * visible * GPU id in <nl> / / third_party / tensorflow / core / protobuf / config . proto ) : this is the id that is <nl> / / visible to Tensorflow after filtering by CUDA_VISIBLE_DEVICES , and is <nl> / / generated by the CUDA GPU driver . It starts from 0 and is used for CUDA API <nl> namespace tensorflow { <nl> / / field of the device name " / device : GPU : < id > " , and is also the identifier of <nl> / / a BaseGPUDevice . Note that the configuration allows us to create multiple <nl> / / BaseGPUDevice per GPU hardware in order to use multi CUDA streams on the <nl> - / / hardware , so the mapping between TF GPU id and CUDA GPU id is not a 1 : 1 <nl> + / / hardware , so the mapping between TF GPU id and platform GPU id is not a 1 : 1 <nl> / / mapping , see the example below . <nl> / / <nl> / / For example , assuming that in the machine we have GPU device with index 0 , 1 , <nl> / / 2 and 3 ( physical GPU id ) . Setting " CUDA_VISIBLE_DEVICES = 1 , 2 , 3 " will create <nl> - / / the following mapping between CUDA GPU id and physical GPU id : <nl> + / / the following mapping between platform GPU id and physical GPU id : <nl> / / <nl> - / / CUDA GPU id - > physical GPU id <nl> + / / platform GPU id - > physical GPU id <nl> / / 0 - > 1 <nl> / / 1 - > 2 <nl> / / 2 - > 3 <nl> namespace tensorflow { <nl> / / <nl> / / Assuming we configure the Session to create one BaseGPUDevice per GPU <nl> / / hardware , then setting GPUOptions : : visible_device_list to " 2 , 0 " will create <nl> - / / the following mappting between TF GPU id and CUDA GPU id : <nl> + / / the following mappting between TF GPU id and platform GPU id : <nl> / / <nl> - / / TF GPU id - > CUDA GPU ID <nl> + / / TF GPU id - > platform GPU ID <nl> / / 0 ( i . e . / device : GPU : 0 ) - > 2 <nl> / / 1 ( i . e . / device : GPU : 1 ) - > 0 <nl> / / <nl> - / / Note that CUDA GPU id 1 is filtered out by GPUOptions : : visible_device_list , <nl> - / / so it won ' t be used by the TF process . <nl> + / / Note that platform GPU id 1 is filtered out by <nl> + / / GPUOptions : : visible_device_list , so it won ' t be used by the TF process . <nl> / / <nl> / / On the other hand , if we configure it to create 2 BaseGPUDevice per GPU <nl> / / hardware , then setting GPUOptions : : visible_device_list to " 2 , 0 " will create <nl> - / / the following mappting between TF GPU id and CUDA GPU id : <nl> + / / the following mappting between TF GPU id and platform GPU id : <nl> / / <nl> - / / TF GPU id - > CUDA GPU ID <nl> + / / TF GPU id - > platform GPU ID <nl> / / 0 ( i . e . / device : GPU : 0 ) - > 2 <nl> / / 1 ( i . e . / device : GPU : 1 ) - > 2 <nl> / / 2 ( i . e . / device : GPU : 2 ) - > 0 <nl> / / 3 ( i . e . / device : GPU : 3 ) - > 0 <nl> / / <nl> - / / We create strong - typed integer classes for both TF GPU id and CUDA GPU id to <nl> - / / minimize programming errors and improve code readability . Except for the <nl> + / / We create strong - typed integer classes for both TF GPU id and platform GPU id <nl> + / / to minimize programming errors and improve code readability . Except for the <nl> / / StreamExecutor interface ( as we don ' t change its API ) , whenever we need a <nl> - / / TF GPU id ( or CUDA GPU id ) we should use TfGpuId ( or CudaGpuId ) instead of a <nl> - / / raw integer . <nl> + / / TF GPU id ( or platform GPU id ) we should use TfGpuId ( or PlatformGpuId ) <nl> + / / instead of a raw integer . <nl> TF_LIB_GTL_DEFINE_INT_TYPE ( TfGpuId , int32 ) ; <nl> - TF_LIB_GTL_DEFINE_INT_TYPE ( CudaGpuId , int32 ) ; <nl> + TF_LIB_GTL_DEFINE_INT_TYPE ( PlatformGpuId , int32 ) ; <nl> <nl> } / / namespace tensorflow <nl> <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_id_manager . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_id_manager . cc <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> namespace { <nl> - / / Manages the map between TfGpuId and CUDA GPU id . <nl> - class TfToCudaGpuIdMap { <nl> + / / Manages the map between TfGpuId and platform GPU id . <nl> + class TfToPlatformGpuIdMap { <nl> public : <nl> - static TfToCudaGpuIdMap * singleton ( ) { <nl> - static auto * id_map = new TfToCudaGpuIdMap ; <nl> + static TfToPlatformGpuIdMap * singleton ( ) { <nl> + static auto * id_map = new TfToPlatformGpuIdMap ; <nl> return id_map ; <nl> } <nl> <nl> - Status Insert ( TfGpuId tf_gpu_id , CudaGpuId cuda_gpu_id ) LOCKS_EXCLUDED ( mu_ ) { <nl> + Status Insert ( TfGpuId tf_gpu_id , PlatformGpuId platform_gpu_id ) <nl> + LOCKS_EXCLUDED ( mu_ ) { <nl> std : : pair < IdMapType : : iterator , bool > result ; <nl> { <nl> mutex_lock lock ( mu_ ) ; <nl> - result = id_map_ . insert ( { tf_gpu_id . value ( ) , cuda_gpu_id . value ( ) } ) ; <nl> + result = id_map_ . insert ( { tf_gpu_id . value ( ) , platform_gpu_id . value ( ) } ) ; <nl> } <nl> - if ( ! result . second & & cuda_gpu_id . value ( ) ! = result . first - > second ) { <nl> + if ( ! result . second & & platform_gpu_id . value ( ) ! = result . first - > second ) { <nl> return errors : : AlreadyExists ( <nl> " TensorFlow device ( GPU : " , tf_gpu_id . value ( ) , <nl> " ) is being mapped to " <nl> " multiple CUDA devices ( " , <nl> - cuda_gpu_id . value ( ) , " now , and " , result . first - > second , <nl> + platform_gpu_id . value ( ) , " now , and " , result . first - > second , <nl> " previously ) , which is not supported . " <nl> " This may be the result of providing different GPU configurations " <nl> " ( ConfigProto . gpu_options , for example different visible_device_list ) " <nl> class TfToCudaGpuIdMap { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - bool Find ( TfGpuId tf_gpu_id , CudaGpuId * cuda_gpu_id ) const <nl> + bool Find ( TfGpuId tf_gpu_id , PlatformGpuId * platform_gpu_id ) const <nl> LOCKS_EXCLUDED ( mu_ ) { <nl> mutex_lock lock ( mu_ ) ; <nl> auto result = id_map_ . find ( tf_gpu_id . value ( ) ) ; <nl> if ( result = = id_map_ . end ( ) ) return false ; <nl> - * cuda_gpu_id = result - > second ; <nl> + * platform_gpu_id = result - > second ; <nl> return true ; <nl> } <nl> <nl> private : <nl> - TfToCudaGpuIdMap ( ) = default ; <nl> + TfToPlatformGpuIdMap ( ) = default ; <nl> <nl> void TestOnlyReset ( ) LOCKS_EXCLUDED ( mu_ ) { <nl> mutex_lock lock ( mu_ ) ; <nl> class TfToCudaGpuIdMap { <nl> IdMapType id_map_ GUARDED_BY ( mu_ ) ; <nl> <nl> friend class : : tensorflow : : GpuIdManager ; <nl> - TF_DISALLOW_COPY_AND_ASSIGN ( TfToCudaGpuIdMap ) ; <nl> + TF_DISALLOW_COPY_AND_ASSIGN ( TfToPlatformGpuIdMap ) ; <nl> } ; <nl> } / / namespace <nl> <nl> - Status GpuIdManager : : InsertTfCudaGpuIdPair ( TfGpuId tf_gpu_id , <nl> - CudaGpuId cuda_gpu_id ) { <nl> - return TfToCudaGpuIdMap : : singleton ( ) - > Insert ( tf_gpu_id , cuda_gpu_id ) ; <nl> + Status GpuIdManager : : InsertTfPlatformGpuIdPair ( TfGpuId tf_gpu_id , <nl> + PlatformGpuId platform_gpu_id ) { <nl> + return TfToPlatformGpuIdMap : : singleton ( ) - > Insert ( tf_gpu_id , platform_gpu_id ) ; <nl> } <nl> <nl> - Status GpuIdManager : : TfToCudaGpuId ( TfGpuId tf_gpu_id , CudaGpuId * cuda_gpu_id ) { <nl> - if ( TfToCudaGpuIdMap : : singleton ( ) - > Find ( tf_gpu_id , cuda_gpu_id ) ) { <nl> + Status GpuIdManager : : TfToPlatformGpuId ( TfGpuId tf_gpu_id , <nl> + PlatformGpuId * platform_gpu_id ) { <nl> + if ( TfToPlatformGpuIdMap : : singleton ( ) - > Find ( tf_gpu_id , platform_gpu_id ) ) { <nl> return Status : : OK ( ) ; <nl> } <nl> return errors : : NotFound ( " TensorFlow device GPU : " , tf_gpu_id . value ( ) , <nl> Status GpuIdManager : : TfToCudaGpuId ( TfGpuId tf_gpu_id , CudaGpuId * cuda_gpu_id ) { <nl> } <nl> <nl> void GpuIdManager : : TestOnlyReset ( ) { <nl> - TfToCudaGpuIdMap : : singleton ( ) - > TestOnlyReset ( ) ; <nl> + TfToPlatformGpuIdMap : : singleton ( ) - > TestOnlyReset ( ) ; <nl> } <nl> <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_id_manager . h <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_id_manager . h <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> - / / Class that maintains a map from TfGpuId to CudaGpuId , and manages the <nl> + / / Class that maintains a map from TfGpuId to PlatformGpuId , and manages the <nl> / / translation between them . <nl> class GpuIdManager { <nl> public : <nl> - / / Adds a mapping from tf_gpu_id to cuda_gpu_id . <nl> - static Status InsertTfCudaGpuIdPair ( TfGpuId tf_gpu_id , CudaGpuId cuda_gpu_id ) ; <nl> + / / Adds a mapping from tf_gpu_id to platform_gpu_id . <nl> + static Status InsertTfPlatformGpuIdPair ( TfGpuId tf_gpu_id , <nl> + PlatformGpuId platform_gpu_id ) ; <nl> <nl> - / / Gets the cuda_gpu_id associated with tf_gpu_id . Returns OK if found . <nl> - static Status TfToCudaGpuId ( TfGpuId tf_gpu_id , CudaGpuId * cuda_gpu_id ) ; <nl> + / / Gets the platform_gpu_id associated with tf_gpu_id . Returns OK if found . <nl> + static Status TfToPlatformGpuId ( TfGpuId tf_gpu_id , <nl> + PlatformGpuId * platform_gpu_id ) ; <nl> <nl> / / Clears the map . Used in unit tests only . <nl> static void TestOnlyReset ( ) ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_id_manager_test . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_id_manager_test . cc <nl> limitations under the License . <nl> namespace tensorflow { <nl> namespace { <nl> <nl> - CudaGpuId TfToCudaGpuId ( TfGpuId tf ) { <nl> - CudaGpuId cuda ; <nl> - TF_CHECK_OK ( GpuIdManager : : TfToCudaGpuId ( tf , & cuda ) ) ; <nl> - return cuda ; <nl> + PlatformGpuId TfToPlatformGpuId ( TfGpuId tf ) { <nl> + PlatformGpuId platform_gpu_id ; <nl> + TF_CHECK_OK ( GpuIdManager : : TfToPlatformGpuId ( tf , & platform_gpu_id ) ) ; <nl> + return platform_gpu_id ; <nl> } <nl> <nl> TEST ( GpuIdManagerTest , Basics ) { <nl> TfGpuId key_0 ( 0 ) ; <nl> - CudaGpuId value_0 ( 0 ) ; <nl> - TF_ASSERT_OK ( GpuIdManager : : InsertTfCudaGpuIdPair ( key_0 , value_0 ) ) ; <nl> - EXPECT_EQ ( value_0 , TfToCudaGpuId ( key_0 ) ) ; <nl> + PlatformGpuId value_0 ( 0 ) ; <nl> + TF_ASSERT_OK ( GpuIdManager : : InsertTfPlatformGpuIdPair ( key_0 , value_0 ) ) ; <nl> + EXPECT_EQ ( value_0 , TfToPlatformGpuId ( key_0 ) ) ; <nl> <nl> / / Multiple calls to map the same value is ok . <nl> - TF_ASSERT_OK ( GpuIdManager : : InsertTfCudaGpuIdPair ( key_0 , value_0 ) ) ; <nl> - EXPECT_EQ ( value_0 , TfToCudaGpuId ( key_0 ) ) ; <nl> + TF_ASSERT_OK ( GpuIdManager : : InsertTfPlatformGpuIdPair ( key_0 , value_0 ) ) ; <nl> + EXPECT_EQ ( value_0 , TfToPlatformGpuId ( key_0 ) ) ; <nl> <nl> / / Map a different TfGpuId to a different value . <nl> TfGpuId key_1 ( 3 ) ; <nl> - CudaGpuId value_1 ( 2 ) ; <nl> - TF_ASSERT_OK ( GpuIdManager : : InsertTfCudaGpuIdPair ( key_1 , value_1 ) ) ; <nl> - EXPECT_EQ ( value_1 , TfToCudaGpuId ( key_1 ) ) ; <nl> + PlatformGpuId value_1 ( 2 ) ; <nl> + TF_ASSERT_OK ( GpuIdManager : : InsertTfPlatformGpuIdPair ( key_1 , value_1 ) ) ; <nl> + EXPECT_EQ ( value_1 , TfToPlatformGpuId ( key_1 ) ) ; <nl> <nl> / / Mapping a different TfGpuId to the same value is ok . <nl> TfGpuId key_2 ( 10 ) ; <nl> - TF_ASSERT_OK ( GpuIdManager : : InsertTfCudaGpuIdPair ( key_2 , value_1 ) ) ; <nl> - EXPECT_EQ ( value_1 , TfToCudaGpuId ( key_2 ) ) ; <nl> + TF_ASSERT_OK ( GpuIdManager : : InsertTfPlatformGpuIdPair ( key_2 , value_1 ) ) ; <nl> + EXPECT_EQ ( value_1 , TfToPlatformGpuId ( key_2 ) ) ; <nl> <nl> / / Mapping the same TfGpuId to a different value . <nl> - ASSERT_FALSE ( GpuIdManager : : InsertTfCudaGpuIdPair ( key_2 , value_0 ) . ok ( ) ) ; <nl> + ASSERT_FALSE ( GpuIdManager : : InsertTfPlatformGpuIdPair ( key_2 , value_0 ) . ok ( ) ) ; <nl> <nl> / / Getting a nonexistent mapping . <nl> - ASSERT_FALSE ( GpuIdManager : : TfToCudaGpuId ( TfGpuId ( 100 ) , & value_0 ) . ok ( ) ) ; <nl> + ASSERT_FALSE ( GpuIdManager : : TfToPlatformGpuId ( TfGpuId ( 100 ) , & value_0 ) . ok ( ) ) ; <nl> } <nl> <nl> } / / namespace <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_id_utils . h <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_id_utils . h <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> - / / Utility methods for translation between Tensorflow GPU ids and CUDA GPU ids . <nl> + / / Utility methods for translation between Tensorflow GPU ids and platform GPU <nl> + / / ids . <nl> class GpuIdUtil { <nl> public : <nl> / / Convenient methods for getting the associated executor given a TfGpuId or <nl> - / / CudaGpuId . <nl> - static se : : port : : StatusOr < se : : StreamExecutor * > ExecutorForCudaGpuId ( <nl> - se : : Platform * gpu_manager , CudaGpuId cuda_gpu_id ) { <nl> - return gpu_manager - > ExecutorForDevice ( cuda_gpu_id . value ( ) ) ; <nl> + / / PlatformGpuId . <nl> + static se : : port : : StatusOr < se : : StreamExecutor * > ExecutorForPlatformGpuId ( <nl> + se : : Platform * gpu_manager , PlatformGpuId platform_gpu_id ) { <nl> + return gpu_manager - > ExecutorForDevice ( platform_gpu_id . value ( ) ) ; <nl> } <nl> - static se : : port : : StatusOr < se : : StreamExecutor * > ExecutorForCudaGpuId ( <nl> - CudaGpuId cuda_gpu_id ) { <nl> - return ExecutorForCudaGpuId ( GPUMachineManager ( ) , cuda_gpu_id ) ; <nl> + static se : : port : : StatusOr < se : : StreamExecutor * > ExecutorForPlatformGpuId ( <nl> + PlatformGpuId platform_gpu_id ) { <nl> + return ExecutorForPlatformGpuId ( GPUMachineManager ( ) , platform_gpu_id ) ; <nl> } <nl> static se : : port : : StatusOr < se : : StreamExecutor * > ExecutorForTfGpuId ( <nl> TfGpuId tf_gpu_id ) { <nl> - CudaGpuId cuda_gpu_id ; <nl> - TF_RETURN_IF_ERROR ( GpuIdManager : : TfToCudaGpuId ( tf_gpu_id , & cuda_gpu_id ) ) ; <nl> - return ExecutorForCudaGpuId ( cuda_gpu_id ) ; <nl> + PlatformGpuId platform_gpu_id ; <nl> + TF_RETURN_IF_ERROR ( <nl> + GpuIdManager : : TfToPlatformGpuId ( tf_gpu_id , & platform_gpu_id ) ) ; <nl> + return ExecutorForPlatformGpuId ( platform_gpu_id ) ; <nl> } <nl> <nl> - / / Verify that the cuda_gpu_id associated with a TfGpuId is legitimate . <nl> + / / Verify that the platform_gpu_id associated with a TfGpuId is legitimate . <nl> static void CheckValidTfGpuId ( TfGpuId tf_gpu_id ) { <nl> - CudaGpuId cuda_gpu_id ; <nl> - TF_CHECK_OK ( GpuIdManager : : TfToCudaGpuId ( tf_gpu_id , & cuda_gpu_id ) ) ; <nl> + PlatformGpuId platform_gpu_id ; <nl> + TF_CHECK_OK ( GpuIdManager : : TfToPlatformGpuId ( tf_gpu_id , & platform_gpu_id ) ) ; <nl> const int visible_device_count = GPUMachineManager ( ) - > VisibleDeviceCount ( ) ; <nl> - CHECK_LT ( cuda_gpu_id . value ( ) , visible_device_count ) <nl> - < < " cuda_gpu_id is outside discovered device range . " <nl> - < < " TF GPU id : " < < tf_gpu_id < < " CUDA GPU id : " < < cuda_gpu_id <nl> + CHECK_LT ( platform_gpu_id . value ( ) , visible_device_count ) <nl> + < < " platform_gpu_id is outside discovered device range . " <nl> + < < " TF GPU id : " < < tf_gpu_id <nl> + < < " platform GPU id : " < < platform_gpu_id <nl> < < " visible device count : " < < visible_device_count ; <nl> } <nl> } ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_process_state . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_process_state . cc <nl> Allocator * GPUProcessState : : GetGPUAllocator ( const GPUOptions & options , <nl> return nullptr ; <nl> } <nl> <nl> - CudaGpuId cuda_gpu_id ; <nl> - TF_CHECK_OK ( GpuIdManager : : TfToCudaGpuId ( tf_gpu_id , & cuda_gpu_id ) ) ; <nl> + PlatformGpuId platform_gpu_id ; <nl> + TF_CHECK_OK ( GpuIdManager : : TfToPlatformGpuId ( tf_gpu_id , & platform_gpu_id ) ) ; <nl> int bus_id = BusIdForGPU ( tf_gpu_id ) ; <nl> while ( bus_id > = gpu_visitors_ . size ( ) ) { <nl> gpu_visitors_ . push_back ( { } ) ; <nl> } <nl> GPUMemAllocator * sub_allocator = new GPUMemAllocator ( <nl> - GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) , cuda_gpu_id , <nl> + GpuIdUtil : : ExecutorForPlatformGpuId ( platform_gpu_id ) . ValueOrDie ( ) , <nl> + platform_gpu_id , <nl> ( options . per_process_gpu_memory_fraction ( ) > 1 . 0 | | <nl> options . experimental ( ) . use_unified_memory ( ) ) , <nl> gpu_visitors_ [ bus_id ] , { } ) ; <nl> Allocator * GPUProcessState : : GetGPUAllocator ( const GPUOptions & options , <nl> / / If true , checks for memory overwrites by writing <nl> / / distinctive patterns on both ends of allocated memory . <nl> if ( useCudaMemoryGuardAllocator ( ) ) { <nl> - gpu_allocator = new GPUDebugAllocator ( gpu_allocator , cuda_gpu_id ) ; <nl> - gpu_allocator = new GPUNanResetAllocator ( gpu_allocator , cuda_gpu_id ) ; <nl> + gpu_allocator = new GPUDebugAllocator ( gpu_allocator , platform_gpu_id ) ; <nl> + gpu_allocator = new GPUNanResetAllocator ( gpu_allocator , platform_gpu_id ) ; <nl> } else if ( useCudaMallocAllocator ( ) ) { <nl> / / If true , passes all allocation requests through to cudaMalloc <nl> / / useful for doing memory debugging with tools like cuda - memcheck <nl> / / * * WARNING * * probably will not work in a multi - gpu scenario <nl> - gpu_allocator = new GPUcudaMallocAllocator ( gpu_allocator , cuda_gpu_id ) ; <nl> + gpu_allocator = <nl> + new GPUcudaMallocAllocator ( gpu_allocator , platform_gpu_id ) ; <nl> } <nl> <nl> Allocator * recording_allocator = nullptr ; <nl> if ( process_state_ - > ProcessState : : FLAGS_brain_gpu_record_mem_types ) { <nl> ProcessState : : MemDesc md ; <nl> md . loc = ProcessState : : MemDesc : : GPU ; <nl> - md . dev_index = cuda_gpu_id . value ( ) ; <nl> + md . dev_index = platform_gpu_id . value ( ) ; <nl> md . gpu_registered = false ; <nl> md . nic_registered = true ; <nl> recording_allocator = new internal : : RecordingAllocator ( <nl> mmm a / tensorflow / core / grappler / clusters / single_machine . cc <nl> ppp b / tensorflow / core / grappler / clusters / single_machine . cc <nl> Status SingleMachine : : Provision ( ) { <nl> strings : : StrCat ( " Not able to parse GPU device name : " , dev . name ( ) ) ) ; <nl> } <nl> TfGpuId tf_gpu_id ( parsed . id ) ; <nl> - CudaGpuId cuda_gpu_id ; <nl> - Status s = GpuIdManager : : TfToCudaGpuId ( tf_gpu_id , & cuda_gpu_id ) ; <nl> + PlatformGpuId platform_gpu_id ; <nl> + Status s = GpuIdManager : : TfToPlatformGpuId ( tf_gpu_id , & platform_gpu_id ) ; <nl> if ( ! s . ok ( ) ) { <nl> return errors : : Unavailable ( " Unknown TF GPU device with id " , <nl> tf_gpu_id . value ( ) , " : " , s . ToString ( ) ) ; <nl> } <nl> - attr = GetLocalGPUInfo ( cuda_gpu_id ) ; <nl> + attr = GetLocalGPUInfo ( platform_gpu_id ) ; <nl> } else if ( dev . device_type ( ) . find ( " XLA " ) = = string : : npos ) { <nl> / / Filter out the fake XLA devices to avoid double counting the actual <nl> / / hardware resources that are available . <nl> mmm a / tensorflow / core / grappler / clusters / utils . cc <nl> ppp b / tensorflow / core / grappler / clusters / utils . cc <nl> DeviceProperties GetLocalCPUInfo ( ) { <nl> return device ; <nl> } <nl> <nl> - DeviceProperties GetLocalGPUInfo ( CudaGpuId cuda_gpu_id ) { <nl> + DeviceProperties GetLocalGPUInfo ( PlatformGpuId platform_gpu_id ) { <nl> DeviceProperties device ; <nl> device . set_type ( " GPU " ) ; <nl> <nl> # if GOOGLE_CUDA <nl> cudaDeviceProp properties ; <nl> - cudaError_t error = cudaGetDeviceProperties ( & properties , cuda_gpu_id . value ( ) ) ; <nl> + cudaError_t error = <nl> + cudaGetDeviceProperties ( & properties , platform_gpu_id . value ( ) ) ; <nl> if ( error ! = cudaSuccess ) { <nl> device . set_type ( " UNKNOWN " ) ; <nl> LOG ( ERROR ) < < " Failed to get device properties , error code : " < < error ; <nl> DeviceProperties GetDeviceInfo ( const DeviceNameUtils : : ParsedName & device ) { <nl> } else if ( device . type = = " GPU " ) { <nl> if ( device . has_id ) { <nl> TfGpuId tf_gpu_id ( device . id ) ; <nl> - CudaGpuId cuda_gpu_id ; <nl> - Status s = GpuIdManager : : TfToCudaGpuId ( tf_gpu_id , & cuda_gpu_id ) ; <nl> + PlatformGpuId platform_gpu_id ; <nl> + Status s = GpuIdManager : : TfToPlatformGpuId ( tf_gpu_id , & platform_gpu_id ) ; <nl> if ( ! s . ok ( ) ) { <nl> LOG ( ERROR ) < < s ; <nl> return unknown ; <nl> } <nl> - return GetLocalGPUInfo ( cuda_gpu_id ) ; <nl> + return GetLocalGPUInfo ( platform_gpu_id ) ; <nl> } else { <nl> - return GetLocalGPUInfo ( CudaGpuId ( 0 ) ) ; <nl> + return GetLocalGPUInfo ( PlatformGpuId ( 0 ) ) ; <nl> } <nl> } <nl> return unknown ; <nl> mmm a / tensorflow / core / grappler / clusters / utils . h <nl> ppp b / tensorflow / core / grappler / clusters / utils . h <nl> DeviceProperties GetLocalCPUInfo ( ) ; <nl> <nl> / / Returns the DeviceProperties for the specified GPU attached to the server on <nl> / / which grappler is running . <nl> - DeviceProperties GetLocalGPUInfo ( CudaGpuId cuda_gpu_id ) ; <nl> + DeviceProperties GetLocalGPUInfo ( PlatformGpuId platform_gpu_id ) ; <nl> <nl> / / Returns the DeviceProperties of the specified device <nl> DeviceProperties GetDeviceInfo ( const DeviceNameUtils : : ParsedName & device ) ; <nl> mmm a / tensorflow / core / grappler / clusters / utils_test . cc <nl> ppp b / tensorflow / core / grappler / clusters / utils_test . cc <nl> TEST ( UtilsTest , GetLocalGPUInfo ) { <nl> LOG ( INFO ) < < " CUDA is enabled . " ; <nl> DeviceProperties properties ; <nl> <nl> - / / Invalid CUDA GPU ID . <nl> - properties = GetLocalGPUInfo ( CudaGpuId ( 100 ) ) ; <nl> + / / Invalid platform GPU ID . <nl> + properties = GetLocalGPUInfo ( PlatformGpuId ( 100 ) ) ; <nl> EXPECT_EQ ( " UNKNOWN " , properties . type ( ) ) ; <nl> <nl> - / / Succeed when a valid CUDA GPU id was inserted . <nl> - properties = GetLocalGPUInfo ( CudaGpuId ( 0 ) ) ; <nl> + / / Succeed when a valid platform GPU id was inserted . <nl> + properties = GetLocalGPUInfo ( PlatformGpuId ( 0 ) ) ; <nl> EXPECT_EQ ( " GPU " , properties . type ( ) ) ; <nl> EXPECT_EQ ( " NVIDIA " , properties . vendor ( ) ) ; <nl> # else <nl> LOG ( INFO ) < < " CUDA is not enabled . " ; <nl> DeviceProperties properties ; <nl> <nl> - properties = GetLocalGPUInfo ( CudaGpuId ( 0 ) ) ; <nl> + properties = GetLocalGPUInfo ( PlatformGpuId ( 0 ) ) ; <nl> EXPECT_EQ ( " GPU " , properties . type ( ) ) ; <nl> <nl> - properties = GetLocalGPUInfo ( CudaGpuId ( 100 ) ) ; <nl> + properties = GetLocalGPUInfo ( PlatformGpuId ( 100 ) ) ; <nl> EXPECT_EQ ( " GPU " , properties . type ( ) ) ; <nl> # endif <nl> } <nl> TEST ( UtilsTest , GetDeviceInfo ) { <nl> EXPECT_EQ ( " NVIDIA " , properties . vendor ( ) ) ; <nl> # endif <nl> <nl> - / / TF to CUDA GPU id mapping entry doesn ' t exist . <nl> + / / TF to platform GPU id mapping entry doesn ' t exist . <nl> device . has_id = true ; <nl> device . id = 0 ; <nl> properties = GetDeviceInfo ( device ) ; <nl> EXPECT_EQ ( " UNKNOWN " , properties . type ( ) ) ; <nl> <nl> # if GOOGLE_CUDA <nl> - / / Invalid CUDA GPU id . <nl> - GpuIdManager : : InsertTfCudaGpuIdPair ( TfGpuId ( 0 ) , CudaGpuId ( 100 ) ) ; <nl> + / / Invalid platform GPU id . <nl> + GpuIdManager : : InsertTfPlatformGpuIdPair ( TfGpuId ( 0 ) , PlatformGpuId ( 100 ) ) ; <nl> properties = GetDeviceInfo ( device ) ; <nl> EXPECT_EQ ( " UNKNOWN " , properties . type ( ) ) ; <nl> <nl> - / / Valid CUDA GPU id . <nl> - GpuIdManager : : InsertTfCudaGpuIdPair ( TfGpuId ( 1 ) , CudaGpuId ( 0 ) ) ; <nl> + / / Valid platform GPU id . <nl> + GpuIdManager : : InsertTfPlatformGpuIdPair ( TfGpuId ( 1 ) , PlatformGpuId ( 0 ) ) ; <nl> device . id = 1 ; <nl> properties = GetDeviceInfo ( device ) ; <nl> EXPECT_EQ ( " GPU " , properties . type ( ) ) ; <nl> mmm a / tensorflow / core / grappler / costs / utils . cc <nl> ppp b / tensorflow / core / grappler / costs / utils . cc <nl> DeviceProperties GetDeviceInfo ( const string & device_str ) { <nl> if ( DeviceNameUtils : : ParseFullName ( device_str , & parsed ) ) { <nl> if ( parsed . type = = " GPU " ) { <nl> TfGpuId tf_gpu_id ( parsed . id ) ; <nl> - CudaGpuId cuda_gpu_id ; <nl> - Status s = GpuIdManager : : TfToCudaGpuId ( tf_gpu_id , & cuda_gpu_id ) ; <nl> + PlatformGpuId platform_gpu_id ; <nl> + Status s = GpuIdManager : : TfToPlatformGpuId ( tf_gpu_id , & platform_gpu_id ) ; <nl> if ( ! s . ok ( ) ) { <nl> / / We are probably running simulation without linking cuda libraries . <nl> - cuda_gpu_id = CudaGpuId ( parsed . id ) ; <nl> + platform_gpu_id = PlatformGpuId ( parsed . id ) ; <nl> } <nl> - return GetLocalGPUInfo ( cuda_gpu_id ) ; <nl> + return GetLocalGPUInfo ( platform_gpu_id ) ; <nl> } else if ( parsed . type = = " CPU " ) { <nl> return GetLocalCPUInfo ( ) ; <nl> } <nl> mmm a / tensorflow / core / protobuf / config . proto <nl> ppp b / tensorflow / core / protobuf / config . proto <nl> message GPUOptions { <nl> / / after the process starts . Users are required to use vendor <nl> / / specific mechanisms ( e . g . , CUDA_VISIBLE_DEVICES ) to control the <nl> / / physical to visible device mapping prior to invoking TensorFlow . <nl> - / / 2 . In the code , the ids in this list are also called " CUDA GPU id " s , <nl> + / / 2 . In the code , the ids in this list are also called " platform GPU id " s , <nl> / / and the ' virtual ' ids of GPU devices ( i . e . the ids in the device <nl> / / name " / device : GPU : < id > " ) are also called " TF GPU id " s . Please <nl> / / refer to third_party / tensorflow / core / common_runtime / gpu / gpu_id . h <nl>
|
Merge pull request from ROCmSoftwarePlatform : upstream - staging - gpu - common - runtime - 1
|
tensorflow/tensorflow
|
96e7185cdb399345fb6e4c656d1b3088f848cf5a
|
2018-09-19T17:52:22Z
|
mmm a / src / runtime / ext / ext_collection . cpp <nl> ppp b / src / runtime / ext / ext_collection . cpp <nl> void c_Vector : : resize ( int64 sz , TypedValue * val ) { <nl> m_size = sz ; <nl> } <nl> <nl> - bool c_Vector : : contains ( int64 key ) { <nl> - return ( ( unsigned long long ) key < ( unsigned long long ) m_size ) ; <nl> - } <nl> - <nl> ObjectData * c_Vector : : clone ( ) { <nl> ObjectData * obj = ObjectData : : clone ( ) ; <nl> c_Vector * vec = static_cast < c_Vector * > ( obj ) ; <nl> Variant c_Vector : : t_get ( CVarRef key ) { <nl> } <nl> <nl> bool c_Vector : : t_contains ( CVarRef key ) { <nl> - if ( ! key . isInteger ( ) ) { <nl> - throwBadKeyType ( ) ; <nl> + if ( key . isInteger ( ) ) { <nl> + return contains ( key . toInt64 ( ) ) ; <nl> } <nl> - return contains ( key . toInt64 ( ) ) ; <nl> + throwBadKeyType ( ) ; <nl> + return false ; <nl> } <nl> <nl> Array c_Vector : : t_toarray ( ) { <nl> bool c_Vector : : OffsetEmpty ( ObjectData * obj , TypedValue * key ) { <nl> return result ? empty ( tvAsCVarRef ( result ) ) : true ; <nl> } <nl> <nl> + bool c_Vector : : OffsetContains ( ObjectData * obj , TypedValue * key ) { <nl> + ASSERT ( key - > m_type ! = KindOfRef ) ; <nl> + c_Vector * vec = static_cast < c_Vector * > ( obj ) ; <nl> + if ( key - > m_type = = KindOfInt64 ) { <nl> + return vec - > contains ( key - > m_data . num ) ; <nl> + } else { <nl> + throwBadKeyType ( ) ; <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> void c_Vector : : OffsetAppend ( ObjectData * obj , TypedValue * val ) { <nl> ASSERT ( val - > m_type ! = KindOfRef ) ; <nl> c_Vector * vec = static_cast < c_Vector * > ( obj ) ; <nl> void c_Vector : : OffsetAppend ( ObjectData * obj , TypedValue * val ) { <nl> <nl> void c_Vector : : OffsetUnset ( ObjectData * obj , TypedValue * key ) { <nl> Object e ( SystemLib : : AllocRuntimeExceptionObject ( <nl> - " Vector does not support unset " ) ) ; <nl> + " Cannot unset element of a Vector " ) ) ; <nl> throw e ; <nl> } <nl> <nl> Object c_Map : : t_put ( CVarRef key , CVarRef value ) { <nl> bool c_Map : : t_contains ( CVarRef key ) { <nl> DataType t = key . getType ( ) ; <nl> if ( t = = KindOfInt64 ) { <nl> - Bucket * p = find ( key . toInt64 ( ) ) ; <nl> - return ( p ! = NULL ) ; <nl> + return contains ( key . toInt64 ( ) ) ; <nl> } <nl> if ( IS_STRING_TYPE ( t ) ) { <nl> - StringData * sd = key . getStringData ( ) ; <nl> - Bucket * p = find ( sd - > data ( ) , sd - > size ( ) , sd - > hash ( ) ) ; <nl> - return ( p ! = NULL ) ; <nl> + return contains ( key . getStringData ( ) ) ; <nl> } <nl> + throwBadKeyType ( ) ; <nl> return false ; <nl> } <nl> <nl> Object c_Map : : t_remove ( CVarRef key ) { <nl> remove ( key . toInt64 ( ) ) ; <nl> } else if ( IS_STRING_TYPE ( t ) ) { <nl> remove ( key . getStringData ( ) ) ; <nl> + } else { <nl> + throwBadKeyType ( ) ; <nl> } <nl> return this ; <nl> } <nl> <nl> Object c_Map : : t_discard ( CVarRef key ) { <nl> - DataType t = key . getType ( ) ; <nl> - if ( t = = KindOfInt64 ) { <nl> - remove ( key . toInt64 ( ) ) ; <nl> - } else if ( IS_STRING_TYPE ( t ) ) { <nl> - remove ( key . getStringData ( ) ) ; <nl> - } <nl> - return this ; <nl> + return t_remove ( key ) ; <nl> } <nl> <nl> Array c_Map : : t_toarray ( ) { <nl> bool c_Map : : OffsetEmpty ( ObjectData * obj , TypedValue * key ) { <nl> return result ? empty ( tvAsCVarRef ( result ) ) : true ; <nl> } <nl> <nl> + bool c_Map : : OffsetContains ( ObjectData * obj , TypedValue * key ) { <nl> + ASSERT ( key - > m_type ! = KindOfRef ) ; <nl> + c_Map * mp = static_cast < c_Map * > ( obj ) ; <nl> + if ( key - > m_type = = KindOfInt64 ) { <nl> + return mp - > contains ( key - > m_data . num ) ; <nl> + } else if ( IS_STRING_TYPE ( key - > m_type ) ) { <nl> + return mp - > contains ( key - > m_data . pstr ) ; <nl> + } else { <nl> + throwBadKeyType ( ) ; <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> void c_Map : : OffsetAppend ( ObjectData * obj , TypedValue * val ) { <nl> Object e ( SystemLib : : AllocRuntimeExceptionObject ( <nl> " [ ] operator not supported for Maps " ) ) ; <nl> Object c_StableMap : : t_put ( CVarRef key , CVarRef value ) { <nl> bool c_StableMap : : t_contains ( CVarRef key ) { <nl> DataType t = key . getType ( ) ; <nl> if ( t = = KindOfInt64 ) { <nl> - Bucket * p = find ( key . toInt64 ( ) ) ; <nl> - return ( p ! = NULL ) ; <nl> + return contains ( key . toInt64 ( ) ) ; <nl> } <nl> if ( IS_STRING_TYPE ( t ) ) { <nl> - StringData * sd = key . getStringData ( ) ; <nl> - Bucket * p = find ( sd - > data ( ) , sd - > size ( ) , sd - > hash ( ) ) ; <nl> - return ( p ! = NULL ) ; <nl> + return contains ( key . getStringData ( ) ) ; <nl> } <nl> + throwBadKeyType ( ) ; <nl> return false ; <nl> } <nl> <nl> Object c_StableMap : : t_remove ( CVarRef key ) { <nl> remove ( key . toInt64 ( ) ) ; <nl> } else if ( IS_STRING_TYPE ( t ) ) { <nl> remove ( key . getStringData ( ) ) ; <nl> + } else { <nl> + throwBadKeyType ( ) ; <nl> } <nl> return this ; <nl> } <nl> <nl> Object c_StableMap : : t_discard ( CVarRef key ) { <nl> - DataType t = key . getType ( ) ; <nl> - if ( t = = KindOfInt64 ) { <nl> - remove ( key . toInt64 ( ) ) ; <nl> - } else if ( IS_STRING_TYPE ( t ) ) { <nl> - remove ( key . getStringData ( ) ) ; <nl> - } <nl> - return this ; <nl> + return t_remove ( key ) ; <nl> } <nl> <nl> Array c_StableMap : : t_toarray ( ) { <nl> bool c_StableMap : : OffsetEmpty ( ObjectData * obj , TypedValue * key ) { <nl> return result ? empty ( tvAsCVarRef ( result ) ) : true ; <nl> } <nl> <nl> + bool c_StableMap : : OffsetContains ( ObjectData * obj , TypedValue * key ) { <nl> + ASSERT ( key - > m_type ! = KindOfRef ) ; <nl> + c_Map * smp = static_cast < c_Map * > ( obj ) ; <nl> + if ( key - > m_type = = KindOfInt64 ) { <nl> + return smp - > contains ( key - > m_data . num ) ; <nl> + } else if ( IS_STRING_TYPE ( key - > m_type ) ) { <nl> + return smp - > contains ( key - > m_data . pstr ) ; <nl> + } else { <nl> + throwBadKeyType ( ) ; <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> void c_StableMap : : OffsetAppend ( ObjectData * obj , TypedValue * val ) { <nl> Object e ( SystemLib : : AllocRuntimeExceptionObject ( <nl> " [ ] operator not supported for StableMaps " ) ) ; <nl> mmm a / src / runtime / ext / ext_collection . h <nl> ppp b / src / runtime / ext / ext_collection . h <nl> class c_Vector : public ExtObjectDataFlags < ObjectData : : VectorAttrInit > , <nl> public : ObjectData * clone ( ) ; <nl> <nl> public : void resize ( int64 sz , TypedValue * val ) ; <nl> - public : bool contains ( int64 key ) ; <nl> - public : int getVersionNumber ( ) { return m_versionNumber ; } <nl> - <nl> + public : bool contains ( int64 key ) { <nl> + return ( ( unsigned long long ) key < ( unsigned long long ) m_size ) ; <nl> + } <nl> + public : int getVersionNumber ( ) { <nl> + return m_versionNumber ; <nl> + } <nl> + <nl> public : static TypedValue * OffsetGet ( ObjectData * obj , TypedValue * key ) ; <nl> public : static void OffsetSet ( ObjectData * obj , TypedValue * key , <nl> TypedValue * val ) ; <nl> public : static bool OffsetIsset ( ObjectData * obj , TypedValue * key ) ; <nl> public : static bool OffsetEmpty ( ObjectData * obj , TypedValue * key ) ; <nl> + public : static bool OffsetContains ( ObjectData * obj , TypedValue * key ) ; <nl> public : static void OffsetUnset ( ObjectData * obj , TypedValue * key ) ; <nl> public : static void OffsetAppend ( ObjectData * obj , TypedValue * val ) ; <nl> <nl> class c_Map : public ExtObjectDataFlags < ObjectData : : MapAttrInit > , <nl> + + m_versionNumber ; <nl> erase ( find ( key - > data ( ) , key - > size ( ) , key - > hash ( ) ) ) ; <nl> } <nl> - public : int getVersionNumber ( ) { return m_versionNumber ; } <nl> + public : bool contains ( int64 key ) { <nl> + return find ( key ) ; <nl> + } <nl> + public : bool contains ( StringData * key ) { <nl> + return find ( key - > data ( ) , key - > size ( ) , key - > hash ( ) ) ; <nl> + } <nl> + public : int getVersionNumber ( ) { <nl> + return m_versionNumber ; <nl> + } <nl> <nl> public : static TypedValue * OffsetGet ( ObjectData * obj , TypedValue * key ) ; <nl> public : static void OffsetSet ( ObjectData * obj , TypedValue * key , <nl> TypedValue * val ) ; <nl> public : static bool OffsetIsset ( ObjectData * obj , TypedValue * key ) ; <nl> public : static bool OffsetEmpty ( ObjectData * obj , TypedValue * key ) ; <nl> + public : static bool OffsetContains ( ObjectData * obj , TypedValue * key ) ; <nl> public : static void OffsetUnset ( ObjectData * obj , TypedValue * key ) ; <nl> public : static void OffsetAppend ( ObjectData * obj , TypedValue * val ) ; <nl> <nl> class c_StableMap : public ExtObjectDataFlags < ObjectData : : StableMapAttrInit > , <nl> + + m_versionNumber ; <nl> erase ( findForErase ( key - > data ( ) , key - > size ( ) , key - > hash ( ) ) ) ; <nl> } <nl> - public : int getVersionNumber ( ) { return m_versionNumber ; } <nl> + public : bool contains ( int64 key ) { <nl> + return find ( key ) ; <nl> + } <nl> + public : bool contains ( StringData * key ) { <nl> + return find ( key - > data ( ) , key - > size ( ) , key - > hash ( ) ) ; <nl> + } <nl> + public : int getVersionNumber ( ) { <nl> + return m_versionNumber ; <nl> + } <nl> <nl> public : static TypedValue * OffsetGet ( ObjectData * obj , TypedValue * key ) ; <nl> public : static void OffsetSet ( ObjectData * obj , TypedValue * key , <nl> TypedValue * val ) ; <nl> public : static bool OffsetIsset ( ObjectData * obj , TypedValue * key ) ; <nl> public : static bool OffsetEmpty ( ObjectData * obj , TypedValue * key ) ; <nl> + public : static bool OffsetContains ( ObjectData * obj , TypedValue * key ) ; <nl> public : static void OffsetUnset ( ObjectData * obj , TypedValue * key ) ; <nl> public : static void OffsetAppend ( ObjectData * obj , TypedValue * val ) ; <nl> <nl> inline void collectionOffsetSet ( ObjectData * obj , litstr offset , CVarRef val ) { <nl> collectionOffsetSet ( obj , Variant ( offset ) , val ) ; <nl> } <nl> <nl> + inline bool collectionOffsetContains ( ObjectData * obj , CVarRef offset ) { <nl> + TypedValue * key = ( TypedValue * ) ( & offset ) ; <nl> + if ( key - > m_type = = KindOfRef ) { <nl> + key = key - > m_data . pref - > tv ( ) ; <nl> + } <nl> + int ct = obj - > getCollectionType ( ) ; <nl> + if ( ct = = Collection : : VectorType ) { <nl> + return c_Vector : : OffsetContains ( obj , key ) ; <nl> + } else if ( ct = = Collection : : MapType ) { <nl> + return c_Map : : OffsetContains ( obj , key ) ; <nl> + } else if ( ct = = Collection : : StableMapType ) { <nl> + return c_StableMap : : OffsetContains ( obj , key ) ; <nl> + } else { <nl> + ASSERT ( false ) ; <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> inline bool collectionOffsetIsset ( ObjectData * obj , CVarRef offset ) { <nl> TypedValue * key = ( TypedValue * ) ( & offset ) ; <nl> if ( key - > m_type = = KindOfRef ) { <nl> mmm a / src / test / test_code_run . cpp <nl> ppp b / src / test / test_code_run . cpp <nl> bool TestCodeRun : : TestCollectionClasses ( ) { <nl> " } \ n " <nl> ) ; <nl> <nl> + MVCRO ( " < ? php \ n " <nl> + " try { \ n " <nl> + " $ v = new Vector ( ) ; \ n " <nl> + " $ v - > contains ( 1 ) ; \ n " <nl> + " try { \ n " <nl> + " $ v - > contains ( ' foo ' ) ; \ n " <nl> + " } catch ( Exception $ e ) { \ n " <nl> + " echo ' A ' ; \ n " <nl> + " } \ n " <nl> + " try { \ n " <nl> + " $ v - > contains ( 1 . 0 ) ; \ n " <nl> + " } catch ( Exception $ e ) { \ n " <nl> + " echo ' B ' ; \ n " <nl> + " } \ n " <nl> + " $ methods = Vector : : fromArray ( array ( ' contains ' , ' remove ' , " <nl> + " ' discard ' ) ) ; \ n " <nl> + " foreach ( $ methods as $ method ) { \ n " <nl> + " $ m = new Map ( ) ; \ n " <nl> + " $ m - > $ method ( 1 ) ; \ n " <nl> + " $ m - > $ method ( ' foo ' ) ; \ n " <nl> + " try { \ n " <nl> + " $ m - > $ method ( 1 . 0 ) ; \ n " <nl> + " } catch ( Exception $ e ) { \ n " <nl> + " echo ' C ' ; \ n " <nl> + " } \ n " <nl> + " $ s = new StableMap ( ) ; \ n " <nl> + " $ s - > $ method ( 1 ) ; \ n " <nl> + " $ s - > $ method ( ' foo ' ) ; \ n " <nl> + " try { \ n " <nl> + " $ s - > $ method ( 1 . 0 ) ; \ n " <nl> + " } catch ( Exception $ e ) { \ n " <nl> + " echo ' D ' ; \ n " <nl> + " } \ n " <nl> + " echo \ " \ \ n \ " ; \ n " <nl> + " } \ n " <nl> + " } catch ( Exception $ e ) { \ n " <nl> + " echo \ " Test failed \ \ n \ " ; \ n " <nl> + " } \ n " <nl> + , <nl> + " ABCD \ n " <nl> + " CD \ n " <nl> + " CD \ n " <nl> + ) ; <nl> + <nl> return true ; <nl> } <nl> <nl>
|
Fix idx ( ) to support collections , add throwBadKeyType in a few places
|
facebook/hhvm
|
7fccb7255a8e2a0535cf4ee860794d50f02aac01
|
2012-09-11T23:46:53Z
|
mmm a / lib / IRGen / GenExpr . cpp <nl> ppp b / lib / IRGen / GenExpr . cpp <nl> static llvm : : Value * emitCharacterLiteralExpr ( IRGenFunction & IGF , <nl> } <nl> <nl> / / / Emit an string literal expression . <nl> - static llvm : : Value * emitStringLiteralExpr ( IRGenFunction & IGF , <nl> - StringLiteralExpr * E ) { <nl> - assert ( E - > getType ( ) - > is < BuiltinRawPointerType > ( ) ) ; <nl> + static void emitStringLiteralExpr ( IRGenFunction & IGF , <nl> + StringLiteralExpr * E , <nl> + Explosion & explosion ) { <nl> / / CreateGlobalStringPtr adds our nul terminator . <nl> - return IGF . Builder . CreateGlobalStringPtr ( E - > getValue ( ) ) ; <nl> + if ( E - > getType ( ) - > is < BuiltinRawPointerType > ( ) ) { <nl> + explosion . addUnmanaged ( IGF . Builder . CreateGlobalStringPtr ( E - > getValue ( ) ) ) ; <nl> + } else { <nl> + assert ( E - > getType ( ) - > is < TupleType > ( ) ) ; <nl> + explosion . addUnmanaged ( IGF . Builder . CreateGlobalStringPtr ( E - > getValue ( ) ) ) ; <nl> + explosion . addUnmanaged ( IGF . Builder . getInt64 ( E - > getValue ( ) . size ( ) ) ) ; <nl> + } <nl> } <nl> <nl> static LValue emitDeclRefLValue ( IRGenFunction & IGF , DeclRefExpr * E ) { <nl> namespace { <nl> Out . addUnmanaged ( emitCharacterLiteralExpr ( IGF , E ) ) ; <nl> } <nl> void visitStringLiteralExpr ( StringLiteralExpr * E ) { <nl> - Out . addUnmanaged ( emitStringLiteralExpr ( IGF , E ) ) ; <nl> + emitStringLiteralExpr ( IGF , E , Out ) ; <nl> } <nl> void visitInterpolatedStringLiteralExpr ( InterpolatedStringLiteralExpr * E ) { <nl> visit ( E - > getSemanticExpr ( ) ) ; <nl> mmm a / lib / Sema / TypeCheckCoercion . cpp <nl> ppp b / lib / Sema / TypeCheckCoercion . cpp <nl> enum CoercionFlags { <nl> / / / expression . <nl> class SemaCoerce : public ExprVisitor < SemaCoerce , CoercedResult > { <nl> enum class LiteralType { <nl> - Int , Float , Char , String <nl> + Int , Float , Char , UTFString , ASCIIString <nl> } ; <nl> + static bool isStringLiteral ( LiteralType LitTy ) { <nl> + return LitTy = = LiteralType : : UTFString | | <nl> + LitTy = = LiteralType : : ASCIIString ; <nl> + } <nl> <nl> / / / \ brief Determine whether the given type is compatible with an integer <nl> / / / or floating - point literal and what function would perform the conversion . <nl> class SemaCoerce : public ExprVisitor < SemaCoerce , CoercedResult > { <nl> / / / argument type that it expects . <nl> std : : pair < FuncDecl * , Type > <nl> SemaCoerce : : isLiteralCompatibleType ( Type Ty , SourceLoc Loc , LiteralType LitTy ) { <nl> + if ( Ty - > is < LValueType > ( ) ) { <nl> + diagnose ( Loc , diag : : type_not_compatible_literal , Ty ) ; <nl> + return std : : pair < FuncDecl * , Type > ( ) ; <nl> + } <nl> + <nl> / / Look up the convertFrom * Literal method on the type . If it is missing , <nl> / / then the type isn ' t compatible with literals . If it is present , it must <nl> / / have a single argument . <nl> const char * MethodName = 0 ; <nl> + const char * AltMethodName = 0 ; <nl> switch ( LitTy ) { <nl> case LiteralType : : Int : MethodName = " convertFromIntegerLiteral " ; break ; <nl> case LiteralType : : Float : MethodName = " convertFromFloatLiteral " ; break ; <nl> case LiteralType : : Char : MethodName = " convertFromCharacterLiteral " ; break ; <nl> - case LiteralType : : String : MethodName = " convertFromStringLiteral " ; break ; <nl> + case LiteralType : : UTFString : MethodName = " convertFromStringLiteral " ; break ; <nl> + case LiteralType : : ASCIIString : <nl> + MethodName = " convertFromASCIIStringLiteral " ; <nl> + AltMethodName = " convertFromStringLiteral " ; <nl> + break ; <nl> } <nl> assert ( MethodName & & " Didn ' t know LitTy " ) ; <nl> - MemberLookup Lookup ( Ty , TC . Context . getIdentifier ( MethodName ) , TC . TU ) ; <nl> - <nl> - if ( ! Lookup . isSuccess ( ) | | Ty - > is < TupleType > ( ) | | Ty - > is < LValueType > ( ) ) { <nl> + <nl> + MemberLookup PrimaryLookup ( Ty , TC . Context . getIdentifier ( MethodName ) , TC . TU ) ; <nl> + Optional < MemberLookup > AltLookup ; <nl> + if ( AltMethodName & & ! PrimaryLookup . isSuccess ( ) ) <nl> + AltLookup . emplace ( Ty , TC . Context . getIdentifier ( AltMethodName ) , TC . TU ) ; <nl> + MemberLookup & Lookup = AltLookup ? AltLookup . getValue ( ) : PrimaryLookup ; <nl> + <nl> + if ( ! Lookup . isSuccess ( ) ) { <nl> diagnose ( Loc , diag : : type_not_compatible_literal , Ty ) ; <nl> return std : : pair < FuncDecl * , Type > ( ) ; <nl> } <nl> SemaCoerce : : isLiteralCompatibleType ( Type Ty , SourceLoc Loc , LiteralType LitTy ) { <nl> return std : : pair < FuncDecl * , Type > ( Method , ArgType ) ; <nl> } <nl> <nl> + / / Check for ( Builtin . RawPointer , Builtin . Int64 ) . <nl> + static bool isRawPtrAndInt64 ( Type ty ) { <nl> + TupleType * tt = ty - > getAs < TupleType > ( ) ; <nl> + if ( ! tt ) <nl> + return false ; <nl> + if ( tt - > getFields ( ) . size ( ) ! = 2 ) <nl> + return false ; <nl> + if ( ! tt - > getElementType ( 0 ) - > is < BuiltinRawPointerType > ( ) ) <nl> + return false ; <nl> + BuiltinIntegerType * IntTy = <nl> + tt - > getElementType ( 1 ) - > getAs < BuiltinIntegerType > ( ) ; <nl> + if ( ! IntTy ) <nl> + return false ; <nl> + if ( IntTy - > getBitWidth ( ) ! = 64 ) <nl> + return false ; <nl> + return true ; <nl> + } <nl> + <nl> CoercedResult SemaCoerce : : visitLiteralExpr ( LiteralExpr * E ) { <nl> assert ( E - > getType ( ) - > isUnresolvedType ( ) & & " only accepts unresolved types " ) ; <nl> <nl> CoercedResult SemaCoerce : : visitLiteralExpr ( LiteralExpr * E ) { <nl> else if ( isa < CharacterLiteralExpr > ( E ) ) <nl> LitTy = LiteralType : : Char ; <nl> else { <nl> - assert ( isa < StringLiteralExpr > ( E ) ) ; <nl> - LitTy = LiteralType : : String ; <nl> + StringLiteralExpr * StringE = cast < StringLiteralExpr > ( E ) ; <nl> + LitTy = LiteralType : : ASCIIString ; <nl> + for ( unsigned char c : StringE - > getValue ( ) ) { <nl> + if ( c > 127 ) { <nl> + LitTy = LiteralType : : UTFString ; <nl> + break ; <nl> + } <nl> + } <nl> } <nl> <nl> / / Check the destination type to see if it is compatible with literals , <nl> CoercedResult SemaCoerce : : visitLiteralExpr ( LiteralExpr * E ) { <nl> if ( Flags & CF_Apply ) <nl> E - > setType ( ArgType ) ; <nl> Intermediate = E ; <nl> - } else if ( LitTy = = LiteralType : : String & & <nl> + } else if ( isStringLiteral ( LitTy ) & & <nl> ArgType - > is < BuiltinRawPointerType > ( ) ) { <nl> / / Nothing to do . <nl> if ( Flags & CF_Apply ) <nl> E - > setType ( ArgType ) ; <nl> Intermediate = E ; <nl> + } else if ( isStringLiteral ( LitTy ) & & isRawPtrAndInt64 ( ArgType ) ) { <nl> + / / Nothing to do . <nl> + if ( Flags & CF_Apply ) <nl> + E - > setType ( ArgType ) ; <nl> + Intermediate = E ; <nl> } else if ( LitTy = = LiteralType : : Char & & <nl> ArgType - > is < BuiltinIntegerType > ( ) & & <nl> ArgType - > getAs < BuiltinIntegerType > ( ) - > getBitWidth ( ) = = 32 ) { <nl> CoercedResult SemaCoerce : : visitLiteralExpr ( LiteralExpr * E ) { <nl> LiteralInfo . second - > getAs < BuiltinIntegerType > ( ) - > getBitWidth ( ) = = 32 ) { <nl> / / ok . <nl> <nl> - } else if ( LitTy = = LiteralType : : String & & <nl> + } else if ( isStringLiteral ( LitTy ) & & <nl> LiteralInfo . second - > is < BuiltinRawPointerType > ( ) ) { <nl> / / ok . <nl> + } else if ( isStringLiteral ( LitTy ) & & isRawPtrAndInt64 ( LiteralInfo . second ) ) { <nl> + / / ok . <nl> } else { <nl> diagnose ( Method - > getLoc ( ) , <nl> diag : : type_literal_conversion_defined_wrong , DestTy ) ; <nl>
|
Change the convertFromStringLiteral convention to be a bit more efficient :
|
apple/swift
|
ff6f88236283eb023169e8b64db2d1f25546e728
|
2012-07-02T23:00:29Z
|
mmm a / dlib / cmake_find_blas . txt <nl> ppp b / dlib / cmake_find_blas . txt <nl> if ( UNIX ) <nl> CHECK_FUNCTION_EXISTS ( sgetrf_single OPENBLAS_HAS_LAPACK ) <nl> if ( OPENBLAS_HAS_LAPACK ) <nl> message ( STATUS " Using OpenBLAS ' s built in LAPACK " ) <nl> - set ( lapack_libraries gfortran ) <nl> + # set ( lapack_libraries gfortran ) <nl> set ( lapack_found 1 ) <nl> endif ( ) <nl> endif ( ) <nl> mmm a / dlib / matrix / matrix_assign_fwd . h <nl> ppp b / dlib / matrix / matrix_assign_fwd . h <nl> <nl> <nl> / / GCC 4 . 8 gives false alarms about some variables being uninitialized . Disable these <nl> / / false warnings . <nl> - # if ( defined ( __GNUC__ ) & & __GNUC__ = = 4 & & __GNUC_MINOR__ = = 8 ) <nl> - # pragma GCC diagnostic ignored " - Wmaybe - uninitialized " <nl> + # if defined ( __GNUC__ ) & & ( ( __GNUC__ > = 4 & & __GNUC_MINOR__ > = 8 ) | | ( __GNUC__ > 4 ) ) <nl> + # pragma GCC diagnostic push <nl> + # pragma GCC diagnostic ignored " - Wmaybe - uninitialized " <nl> # endif <nl> <nl> # include " . . / enable_if . h " <nl> namespace dlib <nl> <nl> } <nl> <nl> + # if defined ( __GNUC__ ) & & ( ( __GNUC__ > = 4 & & __GNUC_MINOR__ > = 8 ) | | ( __GNUC__ > 4 ) ) <nl> + # pragma GCC diagnostic pop <nl> + # endif <nl> + <nl> # endif / / DLIB_MATRIx_ASSIGn_FWD_ <nl> <nl> <nl> mmm a / dlib / matrix / matrix_data_layout . h <nl> ppp b / dlib / matrix / matrix_data_layout . h <nl> <nl> <nl> / / GCC 4 . 8 gives false alarms about some matrix operations going out of bounds . Disable <nl> / / these false warnings . <nl> - # if ( defined ( __GNUC__ ) & & __GNUC__ = = 4 & & __GNUC_MINOR__ = = 8 ) <nl> - # pragma GCC diagnostic ignored " - Warray - bounds " <nl> + # if defined ( __GNUC__ ) & & ( ( __GNUC__ > = 4 & & __GNUC_MINOR__ > = 8 ) | | ( __GNUC__ > 4 ) ) <nl> + # pragma GCC diagnostic push <nl> + # pragma GCC diagnostic ignored " - Warray - bounds " <nl> # endif <nl> <nl> namespace dlib <nl> namespace dlib <nl> <nl> } <nl> <nl> + # if defined ( __GNUC__ ) & & ( ( __GNUC__ > = 4 & & __GNUC_MINOR__ > = 8 ) | | ( __GNUC__ > 4 ) ) <nl> + # pragma GCC diagnostic pop <nl> + # endif <nl> + <nl> # endif / / DLIB_MATRIx_DATA_LAYOUT_ <nl> <nl> mmm a / dlib / optimization / optimization_oca . h <nl> ppp b / dlib / optimization / optimization_oca . h <nl> namespace dlib <nl> ) const <nl> { <nl> matrix_type empty_prior ; <nl> - return oca_impl ( problem , w , empty_prior , false , num_nonnegative , force_weight_to_1 ) ; <nl> + return oca_impl ( problem , w , empty_prior , false , num_nonnegative , force_weight_to_1 , 0 ) ; <nl> + } <nl> + <nl> + template < <nl> + typename matrix_type <nl> + > <nl> + typename matrix_type : : type solve_with_elastic_net ( <nl> + const oca_problem < matrix_type > & problem , <nl> + matrix_type & w , <nl> + double lasso_lambda , <nl> + unsigned long force_weight_to_1 = std : : numeric_limits < unsigned long > : : max ( ) <nl> + ) const <nl> + { <nl> + matrix_type empty_prior ; <nl> + return oca_impl ( problem , w , empty_prior , false , 0 , force_weight_to_1 , lasso_lambda ) ; <nl> } <nl> <nl> template < <nl> namespace dlib <nl> / / disable the force weight to 1 option for this mode . We also disable the <nl> / / non - negative constraints . <nl> unsigned long force_weight_to_1 = std : : numeric_limits < unsigned long > : : max ( ) ; <nl> - return oca_impl ( problem , w , prior , true , 0 , force_weight_to_1 ) ; <nl> + return oca_impl ( problem , w , prior , true , 0 , force_weight_to_1 , 0 ) ; <nl> } <nl> <nl> private : <nl> namespace dlib <nl> typename matrix_type : : type oca_impl ( <nl> const oca_problem < matrix_type > & problem , <nl> matrix_type & w , <nl> - const matrix_type prior , <nl> + const matrix_type & prior , <nl> bool have_prior , <nl> unsigned long num_nonnegative , <nl> - unsigned long force_weight_to_1 <nl> + unsigned long force_weight_to_1 , <nl> + const double lasso_lambda <nl> ) const <nl> { <nl> const unsigned long num_dims = problem . get_num_dimensions ( ) ; <nl> <nl> / / make sure requires clause is not broken <nl> DLIB_ASSERT ( problem . get_c ( ) > 0 & & <nl> - problem . get_num_dimensions ( ) > 0 , <nl> + problem . get_num_dimensions ( ) > 0 & & <nl> + 0 < = lasso_lambda & & lasso_lambda < 1 , <nl> " \ t scalar_type oca : : operator ( ) " <nl> < < " \ n \ t The oca_problem is invalid " <nl> < < " \ n \ t problem . get_c ( ) : " < < problem . get_c ( ) <nl> < < " \ n \ t problem . get_num_dimensions ( ) : " < < num_dims <nl> + < < " \ n \ t lasso_lambda : " < < lasso_lambda <nl> < < " \ n \ t this : " < < this <nl> ) ; <nl> + if ( have_prior ) <nl> + { <nl> + DLIB_ASSERT ( lasso_lambda = = 0 , " Solver doesn ' t support using a prior with lasso . " ) ; <nl> + DLIB_ASSERT ( num_nonnegative = = 0 , " Solver doesn ' t support using a prior with non - negative constraints . " ) ; <nl> + } <nl> + else if ( lasso_lambda ! = 0 ) <nl> + { <nl> + DLIB_ASSERT ( num_nonnegative = = 0 , " Solver doesn ' t support using lasso with non - negative constraints . " ) ; <nl> + } <nl> <nl> + const double ridge_lambda = 1 - lasso_lambda ; <nl> <nl> if ( num_nonnegative > num_dims ) <nl> num_nonnegative = num_dims ; <nl> namespace dlib <nl> typename sequence < vect_type > : : kernel_2a planes ; <nl> std : : vector < scalar_type > bs , miss_count ; <nl> <nl> - vect_type new_plane , alpha ; <nl> + vect_type new_plane , alpha , btemp ; <nl> <nl> w . set_size ( num_dims , 1 ) ; <nl> w = 0 ; <nl> namespace dlib <nl> scalar_type cp_obj = 0 ; <nl> <nl> matrix < scalar_type , 0 , 0 , mem_manager_type , layout_type > K , Ktmp ; <nl> + matrix < scalar_type , 0 , 1 , mem_manager_type , layout_type > lambda , d ; <nl> + if ( lasso_lambda ! = 0 ) <nl> + d . set_size ( num_dims ) ; <nl> + else <nl> + d . set_size ( num_nonnegative ) ; <nl> + d = lasso_lambda * ones_matrix ( d ) ; <nl> <nl> scalar_type R_lower_bound ; <nl> if ( problem . risk_has_lower_bound ( R_lower_bound ) ) <nl> namespace dlib <nl> else <nl> alpha = join_cols ( alpha , zeros_matrix < scalar_type > ( 1 , 1 ) ) ; <nl> <nl> - const scalar_type wnorm = 0 . 5 * trans ( w ) * w ; <nl> + const scalar_type wnorm = 0 . 5 * ridge_lambda * trans ( w ) * w + lasso_lambda * sum ( abs ( w ) ) ; <nl> const double prior_part = have_prior ? dot ( w , prior ) : 0 ; <nl> cur_obj = wnorm + C * cur_risk + prior_norm - prior_part ; <nl> <nl> namespace dlib <nl> <nl> <nl> / / solve the cutting plane subproblem for the next w . We solve it to an <nl> - / / accuracy that is related to how big the error gap is <nl> - scalar_type eps = std : : min < scalar_type > ( sub_eps , 0 . 1 * ( cur_obj - cp_obj ) ) ; <nl> + / / accuracy that is related to how big the error gap is . Also , we multiply <nl> + / / by ridge_lambda because the objective function for the QP we solve was <nl> + / / implicitly scaled by ridge_lambda . That is , we want to ask the QP <nl> + / / solver to solve the problem until the duality gap is 0 . 1 times smaller <nl> + / / than what it is now . So the factor of ridge_lambda is necessary to make <nl> + / / this happen . <nl> + scalar_type eps = std : : min < scalar_type > ( sub_eps , 0 . 1 * ridge_lambda * ( cur_obj - cp_obj ) ) ; <nl> / / just a sanity check <nl> if ( eps < 1e - 16 ) <nl> eps = 1e - 16 ; <nl> / / Note that we warm start this optimization by using the alpha from the last <nl> / / iteration as the starting point . <nl> - if ( num_nonnegative ! = 0 ) <nl> + if ( lasso_lambda ! = 0 ) <nl> + { <nl> + / / copy planes into a matrix so we can call solve_qp4_using_smo ( ) <nl> + matrix < scalar_type , 0 , 0 , mem_manager_type , layout_type > planes_mat ( num_dims , planes . size ( ) ) ; <nl> + for ( unsigned long i = 0 ; i < planes . size ( ) ; + + i ) <nl> + set_colm ( planes_mat , i ) = planes [ i ] ; <nl> + <nl> + btemp = ridge_lambda * mat ( bs ) - trans ( planes_mat ) * d ; <nl> + solve_qp4_using_smo ( planes_mat , K , btemp , d , alpha , lambda , eps , sub_max_iter , ( scalar_type ) ( 2 * lasso_lambda ) ) ; <nl> + } <nl> + else if ( num_nonnegative ! = 0 ) <nl> { <nl> / / copy planes into a matrix so we can call solve_qp4_using_smo ( ) <nl> matrix < scalar_type , 0 , 0 , mem_manager_type , layout_type > planes_mat ( num_nonnegative , planes . size ( ) ) ; <nl> for ( unsigned long i = 0 ; i < planes . size ( ) ; + + i ) <nl> set_colm ( planes_mat , i ) = colm ( planes [ i ] , 0 , num_nonnegative ) ; <nl> <nl> - solve_qp4_using_smo ( planes_mat , K , mat ( bs ) , alpha , eps , sub_max_iter ) ; <nl> + solve_qp4_using_smo ( planes_mat , K , mat ( bs ) , d , alpha , lambda , eps , sub_max_iter ) ; <nl> } <nl> else <nl> { <nl> namespace dlib <nl> w = - alpha ( 0 ) * planes [ 0 ] ; <nl> for ( unsigned long i = 1 ; i < planes . size ( ) ; + + i ) <nl> w - = alpha ( i ) * planes [ i ] ; <nl> - / / threshold the first num_nonnegative w elements if necessary . <nl> - if ( num_nonnegative ! = 0 ) <nl> + if ( lasso_lambda ! = 0 ) <nl> + w = ( lambda - d + w ) / ridge_lambda ; <nl> + else if ( num_nonnegative ! = 0 ) / / threshold the first num_nonnegative w elements if necessary . <nl> set_rowm ( w , range ( 0 , num_nonnegative - 1 ) ) = lowerbound ( rowm ( w , range ( 0 , num_nonnegative - 1 ) ) , 0 ) ; <nl> <nl> for ( long i = 0 ; i < alpha . size ( ) ; + + i ) <nl> namespace dlib <nl> <nl> / / Compute the lower bound on the true objective given to us by the cutting <nl> / / plane subproblem . <nl> - cp_obj = - 0 . 5 * trans ( w ) * w + trans ( alpha ) * mat ( bs ) ; <nl> + cp_obj = - 0 . 5 * ridge_lambda * trans ( w ) * w + trans ( alpha ) * mat ( bs ) ; <nl> if ( have_prior ) <nl> w + = prior ; <nl> <nl> mmm a / dlib / optimization / optimization_oca_abstract . h <nl> ppp b / dlib / optimization / optimization_oca_abstract . h <nl> namespace dlib <nl> Where prior is a user supplied vector and R ( w ) has the same <nl> interpretation as above . <nl> <nl> + Or it can use the elastic net regularizer : <nl> + Minimize : f ( w ) = = 0 . 5 * ( 1 - lasso_lambda ) * length_squared ( w ) + lasso_lambda * sum ( abs ( w ) ) + C * R ( w ) <nl> + <nl> + Where lasso_lambda is a number in the range [ 0 , 1 ) and controls <nl> + trade - off between doing L1 and L2 regularization . R ( w ) has the same <nl> + interpretation as above . <nl> + <nl> <nl> Note that the stopping condition must be provided by the user <nl> in the form of the optimization_status ( ) function . <nl> namespace dlib <nl> Where prior is a user supplied vector and R ( w ) has the same <nl> interpretation as above . <nl> <nl> + Or it can use the elastic net regularizer : <nl> + Minimize : f ( w ) = = 0 . 5 * ( 1 - lasso_lambda ) * length_squared ( w ) + lasso_lambda * sum ( abs ( w ) ) + C * R ( w ) <nl> + <nl> + Where lasso_lambda is a number in the range [ 0 , 1 ) and controls <nl> + trade - off between doing L1 and L2 regularization . R ( w ) has the same <nl> + interpretation as above . <nl> + <nl> <nl> For a detailed discussion you should consult the following papers <nl> from the Journal of Machine Learning Research : <nl> namespace dlib <nl> - returns the objective value at the solution # w <nl> ! * / <nl> <nl> + template < <nl> + typename matrix_type <nl> + > <nl> + typename matrix_type : : type solve_with_elastic_net ( <nl> + const oca_problem < matrix_type > & problem , <nl> + matrix_type & w , <nl> + scalar_type lasso_lambda , <nl> + unsigned long force_weight_to_1 = std : : numeric_limits < unsigned long > : : max ( ) <nl> + ) const ; <nl> + / * ! <nl> + requires <nl> + - problem . get_c ( ) > 0 <nl> + - problem . get_num_dimensions ( ) > 0 <nl> + - 0 < = lasso_lambda < 1 <nl> + ensures <nl> + - Solves the given oca problem and stores the solution in # w , but uses an <nl> + elastic net regularizer instead of the normal L2 regularizer . In <nl> + particular , this function solves : <nl> + Minimize : f ( w ) = = 0 . 5 * ( 1 - lasso_lambda ) * length_squared ( w ) + lasso_lambda * sum ( abs ( w ) ) + C * R ( w ) <nl> + - The optimization algorithm runs until problem . optimization_status ( ) <nl> + indicates it is time to stop . <nl> + - returns the objective value at the solution # w <nl> + - if ( force_weight_to_1 < problem . get_num_dimensions ( ) ) then <nl> + - The optimizer enforces the following constraints : <nl> + - # w ( force_weight_to_1 ) = = 1 <nl> + - for all i > force_weight_to_1 : <nl> + - # w ( i ) = = 0 <nl> + - That is , the element in the weight vector at the index indicated <nl> + by force_weight_to_1 will have a value of 1 upon completion of <nl> + this function , while all subsequent elements of w will have <nl> + values of 0 . <nl> + ! * / <nl> + <nl> void set_subproblem_epsilon ( <nl> double eps <nl> ) ; <nl> mmm a / dlib / optimization / optimization_solve_qp_using_smo . h <nl> ppp b / dlib / optimization / optimization_solve_qp_using_smo . h <nl> namespace dlib <nl> typename EXP1 , <nl> typename EXP2 , <nl> typename EXP3 , <nl> - typename T , long NR , long NC , typename MM , typename L <nl> + typename EXP4 , <nl> + typename T , long NR , long NC , typename MM , typename L , <nl> + long NR2 , long NC2 <nl> > <nl> unsigned long solve_qp4_using_smo ( <nl> const matrix_exp < EXP1 > & A , <nl> const matrix_exp < EXP2 > & Q , <nl> const matrix_exp < EXP3 > & b , <nl> + const matrix_exp < EXP4 > & d , <nl> matrix < T , NR , NC , MM , L > & alpha , <nl> + matrix < T , NR2 , NC2 , MM , L > & lambda , <nl> T eps , <nl> - unsigned long max_iter <nl> + unsigned long max_iter , <nl> + T max_lambda = std : : numeric_limits < T > : : infinity ( ) <nl> ) <nl> { <nl> / / make sure requires clause is not broken <nl> namespace dlib <nl> < < " \ n \ t eps : " < < eps <nl> < < " \ n \ t max_iter : " < < max_iter <nl> ) ; <nl> + DLIB_ASSERT ( is_col_vector ( d ) = = true & & <nl> + max_lambda > = 0 & & <nl> + d . size ( ) = = A . nr ( ) , <nl> + " \ t void solve_qp4_using_smo ( ) " <nl> + < < " \ n \ t Invalid arguments were given to this function " <nl> + < < " \ n \ t A . nr ( ) : " < < A . nr ( ) <nl> + < < " \ n \ t d . size ( ) : " < < d . size ( ) <nl> + < < " \ n \ t max_lambda : " < < max_lambda <nl> + ) ; <nl> <nl> const T C = sum ( alpha ) ; <nl> <nl> namespace dlib <nl> solve_qp_using_smo ( ) routine . <nl> * / <nl> <nl> + const bool d_is_zero = d = = zeros_matrix ( d ) ; <nl> + <nl> / / compute optimal lambda for current alpha <nl> - matrix < T , NR , 1 , MM , L > lambda = A * alpha ; <nl> - lambda = lowerbound ( lambda , 0 ) ; <nl> + if ( d_is_zero ) <nl> + lambda = A * alpha ; <nl> + else <nl> + lambda = A * alpha + d ; <nl> + lambda = clamp ( lambda , 0 , max_lambda ) ; <nl> <nl> / / Compute f ' ( alpha ) ( i . e . the gradient of f ( alpha ) with respect to alpha ) for the current alpha . <nl> matrix < T , NR , NC , MM , L > df = Q * alpha - b - trans ( A ) * lambda ; <nl> namespace dlib <nl> { <nl> / / compute optimal lambda and recheck the duality gap to make <nl> / / sure we have really converged . <nl> - lambda = A * alpha ; <nl> - lambda = lowerbound ( lambda , 0 ) ; <nl> + if ( d_is_zero ) <nl> + lambda = A * alpha ; <nl> + else <nl> + lambda = A * alpha + d ; <nl> + lambda = clamp ( lambda , 0 , max_lambda ) ; <nl> df = Q * alpha - b - trans ( A ) * lambda ; <nl> <nl> if ( trans ( alpha ) * df - C * min ( df ) < eps ) <nl> namespace dlib <nl> if ( ( iter % 300 ) = = 299 ) <nl> { <nl> / / compute the optimal lambda for the current alpha <nl> - lambda = A * alpha ; <nl> - lambda = lowerbound ( lambda , 0 ) ; <nl> + if ( d_is_zero ) <nl> + lambda = A * alpha ; <nl> + else <nl> + lambda = A * alpha + d ; <nl> + lambda = clamp ( lambda , 0 , max_lambda ) ; <nl> <nl> / / Perform this form of the update every so often because doing so can help <nl> / / avoid the buildup of numerical errors you get with the alternate update <nl> mmm a / dlib / optimization / optimization_solve_qp_using_smo_abstract . h <nl> ppp b / dlib / optimization / optimization_solve_qp_using_smo_abstract . h <nl> namespace dlib <nl> typename EXP1 , <nl> typename EXP2 , <nl> typename EXP3 , <nl> - typename T , long NR , long NC , typename MM , typename L <nl> + typename T , long NR , long NC , typename MM , typename L , <nl> + long NR2 , long NC2 <nl> > <nl> unsigned long solve_qp4_using_smo ( <nl> const matrix_exp < EXP1 > & A , <nl> const matrix_exp < EXP2 > & Q , <nl> const matrix_exp < EXP3 > & b , <nl> + const matrix_exp < EXP4 > & d , <nl> matrix < T , NR , NC , MM , L > & alpha , <nl> + matrix < T , NR2 , NC2 , MM , L > & lambda , <nl> T eps , <nl> - unsigned long max_iter <nl> + unsigned long max_iter , <nl> + T max_lambda = std : : numeric_limits < T > : : infinity ( ) <nl> ) ; <nl> / * ! <nl> requires <nl> - A . nc ( ) = = alpha . size ( ) <nl> - Q . nr ( ) = = Q . nc ( ) <nl> - is_col_vector ( b ) = = true <nl> + - is_col_vector ( d ) = = true <nl> - is_col_vector ( alpha ) = = true <nl> - b . size ( ) = = alpha . size ( ) = = Q . nr ( ) <nl> + - d . size ( ) = = A . nr ( ) <nl> - alpha . size ( ) > 0 <nl> - min ( alpha ) > = 0 <nl> - eps > 0 <nl> - max_iter > 0 <nl> + - max_lambda > = 0 <nl> ensures <nl> - Let C = = sum ( alpha ) ( i . e . C is the sum of the alpha values you <nl> supply to this function ) <nl> - This function solves the following quadratic program : <nl> Minimize : f ( alpha , lambda ) = = 0 . 5 * trans ( alpha ) * Q * alpha - trans ( alpha ) * b + <nl> - 0 . 5 * trans ( lambda ) * lambda - trans ( lambda ) * A * alpha <nl> + 0 . 5 * trans ( lambda ) * lambda - trans ( lambda ) * A * alpha - trans ( lambda ) * d <nl> subject to the following constraints : <nl> - sum ( alpha ) = = C ( i . e . the sum of alpha values doesn ' t change ) <nl> - min ( alpha ) > = 0 ( i . e . all alpha values are nonnegative ) <nl> - min ( lambda ) > = 0 ( i . e . all lambda values are nonnegative ) <nl> + - max ( lambda ) < = max_lambda ( i . e . all lambda values are less than max_lambda ) <nl> Where f is convex . This means that Q should be positive - semidefinite . <nl> - - The solution to the above QP will be stored in # alpha . The optimal <nl> - lambda is not output since its value is given by the following expression : <nl> - lowerbound ( A * alpha , 0 ) <nl> + - If you don ' t want an upper limit on lambda then max_lambda can be set to <nl> + infinity . <nl> + - The solution to the above QP will be stored in # alpha and # lambda . <nl> - This function uses a simple implementation of the sequential minimal <nl> optimization algorithm . It starts the algorithm with the given alpha <nl> and it works on the problem until the duality gap ( i . e . how far away <nl> we are from the optimum solution ) is less than eps . So eps controls <nl> how accurate the solution is and smaller values result in better solutions . <nl> + The initial value of lambda is ignored since the optimal lambda can be <nl> + obtained via a simple closed form expression given alpha . <nl> - At most max_iter iterations of optimization will be performed . <nl> - returns the number of iterations performed . If this method fails to <nl> converge to eps accuracy then the number returned will be max_iter + 1 . <nl> mmm a / dlib / server / server_iostream . h <nl> ppp b / dlib / server / server_iostream . h <nl> namespace dlib <nl> ) <nl> { <nl> bool my_fault = true ; <nl> - uint64 this_con_id ; <nl> + uint64 this_con_id = 0 ; <nl> try <nl> { <nl> sockstreambuf buf ( & con ) ; <nl> mmm a / dlib / smart_pointers / shared_ptr . h <nl> ppp b / dlib / smart_pointers / shared_ptr . h <nl> <nl> <nl> / / Don ' t warn about the use of std : : auto_ptr in this file . There is a pragma at the end of <nl> / / this file that re - enables the warning . <nl> - # if defined ( __GNUC__ ) & & __GNUC__ > = 4 & & __GNUC_MINOR__ > = 6 <nl> + # if defined ( __GNUC__ ) & & ( ( __GNUC__ > = 4 & & __GNUC_MINOR__ > = 6 ) | | ( __GNUC__ > 4 ) ) <nl> # pragma GCC diagnostic push <nl> # pragma GCC diagnostic ignored " - Wdeprecated - declarations " <nl> # endif <nl> namespace dlib <nl> <nl> } <nl> <nl> - # if defined ( __GNUC__ ) & & __GNUC__ > = 4 & & __GNUC_MINOR__ > = 6 <nl> + # if defined ( __GNUC__ ) & & ( ( __GNUC__ > = 4 & & __GNUC_MINOR__ > = 6 ) | | ( __GNUC__ > 4 ) ) <nl> # pragma GCC diagnostic pop <nl> # endif <nl> <nl> mmm a / dlib / smart_pointers / shared_ptr_thread_safe . h <nl> ppp b / dlib / smart_pointers / shared_ptr_thread_safe . h <nl> <nl> <nl> / / Don ' t warn about the use of std : : auto_ptr in this file . There is a pragma at the end of <nl> / / this file that re - enables the warning . <nl> - # if defined ( __GNUC__ ) & & __GNUC__ > = 4 & & __GNUC_MINOR__ > = 6 <nl> + # if defined ( __GNUC__ ) & & ( ( __GNUC__ > = 4 & & __GNUC_MINOR__ > = 6 ) | | ( __GNUC__ > 4 ) ) <nl> # pragma GCC diagnostic push <nl> # pragma GCC diagnostic ignored " - Wdeprecated - declarations " <nl> # endif <nl> namespace dlib <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> <nl> - # if defined ( __GNUC__ ) & & __GNUC__ > = 4 & & __GNUC_MINOR__ > = 6 <nl> + # if defined ( __GNUC__ ) & & ( ( __GNUC__ > = 4 & & __GNUC_MINOR__ > = 6 ) | | ( __GNUC__ > 4 ) ) <nl> # pragma GCC diagnostic pop <nl> # endif <nl> <nl> mmm a / dlib / test / least_squares . cpp <nl> ppp b / dlib / test / least_squares . cpp <nl> namespace <nl> <nl> matrix < double , 2 , 1 > rosen_residual_derivative_double ( int i , const matrix < double , 2 , 1 > & m ) <nl> { return rosen_residual_derivative ( i , m ) ; } <nl> + / * <nl> matrix < float , 2 , 1 > rosen_residual_derivative_float ( int i , const matrix < float , 2 , 1 > & m ) <nl> { return rosen_residual_derivative ( i , m ) ; } <nl> + * / <nl> <nl> double rosen_big_residual_double ( int i , const matrix < double , 2 , 1 > & m ) <nl> { return rosen_big_residual ( i , m ) ; } <nl> mmm a / dlib / test / oca . cpp <nl> ppp b / dlib / test / oca . cpp <nl> namespace <nl> dlog < < LINFO < < " error : " < < max ( abs ( w - true_w ) ) ; <nl> DLIB_TEST ( max ( abs ( w - true_w ) ) < 1e - 10 ) ; <nl> <nl> + solver . solve_with_elastic_net ( make_oca_problem_c_svm < w_type > ( 2 . 0 , 3 . 0 , mat ( x ) , mat ( y ) , false , 1e - 12 , 40 , max_index_plus_one ( x ) ) , w , 0 . 5 ) ; <nl> + dlog < < LINFO < < trans ( w ) ; <nl> + true_w = - 0 . 5 , 0 . 5 , 0 ; <nl> + dlog < < LINFO < < " error : " < < max ( abs ( w - true_w ) ) ; <nl> + DLIB_TEST ( max ( abs ( w - true_w ) ) < 1e - 10 ) ; <nl> + <nl> + print_spinner ( ) ; <nl> + <nl> w_type prior = true_w ; <nl> solver ( make_oca_problem_c_svm < w_type > ( 20 . 0 , 30 . 0 , mat ( x ) , mat ( y ) , false , 1e - 12 , 40 , max_index_plus_one ( x ) ) , w , prior ) ; <nl> dlog < < LINFO < < trans ( w ) ; <nl> mmm a / dlib / test / opt_qp_solver . cpp <nl> ppp b / dlib / test / opt_qp_solver . cpp <nl> namespace <nl> <nl> const double C = 2 ; <nl> <nl> - matrix < double , 0 , 1 > alpha ( 2 ) , true_alpha ( 2 ) ; <nl> + matrix < double , 0 , 1 > alpha ( 2 ) , true_alpha ( 2 ) , d ( 3 ) , lambda ; <nl> alpha = C / 2 , C / 2 ; <nl> + d = 0 ; <nl> <nl> - solve_qp4_using_smo ( A , tmp ( trans ( A ) * A ) , b , alpha , 1e - 9 , 800 ) ; <nl> + solve_qp4_using_smo ( A , tmp ( trans ( A ) * A ) , b , d , alpha , lambda , 1e - 9 , 800 ) ; <nl> matrix < double , 0 , 1 > w = lowerbound ( - A * alpha , 0 ) ; <nl> <nl> dlog < < LINFO < < " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * " ; <nl> namespace <nl> <nl> const double C = 2 ; <nl> <nl> - matrix < double , 0 , 1 > alpha ( 2 ) , true_alpha ( 2 ) ; <nl> + matrix < double , 0 , 1 > alpha ( 2 ) , true_alpha ( 2 ) , d ( 3 ) , lambda ; <nl> alpha = C / 2 , C / 2 ; <nl> + d = 0 ; <nl> <nl> - solve_qp4_using_smo ( A , tmp ( trans ( A ) * A ) , b , alpha , 1e - 9 , 800 ) ; <nl> + solve_qp4_using_smo ( A , tmp ( trans ( A ) * A ) , b , d , alpha , lambda , 1e - 9 , 800 ) ; <nl> matrix < double , 0 , 1 > w = lowerbound ( - A * alpha , 0 ) ; <nl> <nl> dlog < < LINFO < < " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * " ; <nl> namespace <nl> <nl> const double C = 2 ; <nl> <nl> - matrix < double , 0 , 1 > alpha ( 2 ) , true_alpha ( 2 ) ; <nl> + matrix < double , 0 , 1 > alpha ( 2 ) , true_alpha ( 2 ) , d ( 3 ) , lambda ; <nl> alpha = C / 2 , C / 2 ; <nl> + d = 0 ; <nl> <nl> - solve_qp4_using_smo ( A , tmp ( trans ( A ) * A ) , b , alpha , 1e - 9 , 800 ) ; <nl> + solve_qp4_using_smo ( A , tmp ( trans ( A ) * A ) , b , d , alpha , lambda , 1e - 9 , 800 ) ; <nl> matrix < double , 0 , 1 > w = lowerbound ( - A * alpha , 0 ) ; <nl> <nl> dlog < < LINFO < < " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * " ; <nl> namespace <nl> <nl> const double C = 2 ; <nl> <nl> - matrix < double , 0 , 1 > alpha ( 3 ) , true_alpha ( 3 ) ; <nl> + matrix < double , 0 , 1 > alpha ( 3 ) , true_alpha ( 3 ) , d ( 3 ) , lambda ; <nl> alpha = C / 2 , C / 2 , 0 ; <nl> + d = 0 ; <nl> <nl> - solve_qp4_using_smo ( A , tmp ( trans ( A ) * A ) , b , alpha , 1e - 9 , 800 ) ; <nl> + solve_qp4_using_smo ( A , tmp ( trans ( A ) * A ) , b , d , alpha , lambda , 1e - 9 , 800 ) ; <nl> matrix < double , 0 , 1 > w = lowerbound ( - A * alpha , 0 ) ; <nl> <nl> <nl> namespace <nl> <nl> const double C = 2 ; <nl> <nl> - matrix < double , 0 , 1 > alpha ( 2 ) , true_alpha ( 2 ) ; <nl> + matrix < double , 0 , 1 > alpha ( 2 ) , true_alpha ( 2 ) , d ( 3 ) , lambda ; <nl> alpha = C / 2 , C / 2 ; <nl> <nl> - solve_qp4_using_smo ( A , tmp ( trans ( A ) * A ) , b , alpha , 1e - 9 , 800 ) ; <nl> + solve_qp4_using_smo ( A , tmp ( trans ( A ) * A ) , b , d , alpha , lambda , 1e - 9 , 800 ) ; <nl> matrix < double , 0 , 1 > w = lowerbound ( - A * alpha , 0 ) ; <nl> <nl> dlog < < LINFO < < " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * " ; <nl> namespace <nl> <nl> const double C = 2 ; <nl> <nl> - matrix < double , 0 , 1 > alpha ( 3 ) , true_alpha ( 3 ) ; <nl> + matrix < double , 0 , 1 > alpha ( 3 ) , true_alpha ( 3 ) , d ( 3 ) , lambda ; <nl> alpha = C / 2 , C / 2 , 0 ; <nl> <nl> - solve_qp4_using_smo ( A , tmp ( trans ( A ) * A ) , b , alpha , 1e - 9 , 800 ) ; <nl> + solve_qp4_using_smo ( A , tmp ( trans ( A ) * A ) , b , d , alpha , lambda , 1e - 9 , 800 ) ; <nl> matrix < double , 0 , 1 > w = lowerbound ( - A * alpha , 0 ) ; <nl> <nl> dlog < < LINFO < < " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * " ; <nl> namespace <nl> <nl> const double C = 2 ; <nl> <nl> - matrix < double , 0 , 1 > alpha ( 3 ) , true_alpha ( 3 ) ; <nl> + matrix < double , 0 , 1 > alpha ( 3 ) , true_alpha ( 3 ) , d ( 3 ) , lambda ; <nl> alpha = C / 2 , C / 2 , 0 ; <nl> + d = 0 ; <nl> <nl> - solve_qp4_using_smo ( A , Q , b , alpha , 1e - 9 , 800 ) ; <nl> + solve_qp4_using_smo ( A , Q , b , d , alpha , lambda , 1e - 9 , 800 ) ; <nl> <nl> dlog < < LINFO < < " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * " ; <nl> <nl> mmm a / dlib / test / smart_pointers . cpp <nl> ppp b / dlib / test / smart_pointers . cpp <nl> <nl> <nl> # include " tester . h " <nl> <nl> + / / Don ' t warn about auto_ptr <nl> + # if defined ( __GNUC__ ) & & ( ( __GNUC__ > = 4 & & __GNUC_MINOR__ > = 6 ) | | ( __GNUC__ > 4 ) ) <nl> + # pragma GCC diagnostic ignored " - Wdeprecated - declarations " <nl> + # endif <nl> + <nl> namespace <nl> { <nl> bool used_array_delete ; <nl> mmm a / dlib / threads / threads_kernel_shared . h <nl> ppp b / dlib / threads / threads_kernel_shared . h <nl> extern " C " <nl> extern int USER_ERROR__missing_dlib_all_source_cpp_file__OR__inconsistent_use_of_DEBUG_or_ENABLE_ASSERTS_preprocessor_directives_ ; <nl> inline int dlib_check_consistent_assert_usage ( ) { USER_ERROR__missing_dlib_all_source_cpp_file__OR__inconsistent_use_of_DEBUG_or_ENABLE_ASSERTS_preprocessor_directives_ = 0 ; return 0 ; } <nl> # endif <nl> - const int dlib_check_assert_helper_variable = dlib_check_consistent_assert_usage ( ) ; <nl> + const int DLIB_NO_WARN_UNUSED dlib_check_assert_helper_variable = dlib_check_consistent_assert_usage ( ) ; <nl> } <nl> <nl> <nl> mmm a / docs / docs / optimization . xml <nl> ppp b / docs / docs / optimization . xml <nl> subject to the following constraint : <nl> This function solves the following quadratic program : <nl> < pre > <nl> Minimize : f ( alpha , lambda ) = = 0 . 5 * trans ( alpha ) * Q * alpha - trans ( alpha ) * b + <nl> - 0 . 5 * trans ( lambda ) * lambda - trans ( lambda ) * A * alpha <nl> + 0 . 5 * trans ( lambda ) * lambda - trans ( lambda ) * A * alpha - trans ( lambda ) * d <nl> subject to the following constraints : <nl> sum ( alpha ) = = C <nl> min ( alpha ) > = 0 <nl> min ( lambda ) > = 0 <nl> + max ( lambda ) < = max_lambda <nl> Where f is convex . This means that Q should be positive - semidefinite . <nl> < / pre > <nl> <nl> Or it can alternatively solve : <nl> <nl> Where prior is a user supplied vector and R ( w ) has the same <nl> interpretation as above . <nl> + <nl> + Or it can use the elastic net regularizer : <nl> + Minimize : f ( w ) = = 0 . 5 * ( 1 - lasso_lambda ) * length_squared ( w ) + lasso_lambda * sum ( abs ( w ) ) + C * R ( w ) <nl> + <nl> + Where lasso_lambda is a number in the range [ 0 , 1 ) and controls <nl> + trade - off between doing L1 and L2 regularization . R ( w ) has the same <nl> + interpretation as above . <nl> < / pre > <nl> < br / > <nl> < br / > <nl>
|
merged
|
davisking/dlib
|
523489e9b93e573477b079ee7c55f8c991f6ae48
|
2016-02-22T22:13:27Z
|
mmm a / hphp / runtime / vm / jit / guard - relaxation . cpp <nl> ppp b / hphp / runtime / vm / jit / guard - relaxation . cpp <nl> bool shouldHHIRRelaxGuards ( ) { <nl> assert ( ! ( RuntimeOption : : EvalHHIRRelaxGuards & & <nl> RuntimeOption : : EvalHHIRConstrictGuards ) ) ; <nl> return RuntimeOption : : EvalHHIRRelaxGuards & & <nl> - ( RuntimeOption : : EvalJitRegionSelector = = " tracelet " | | <nl> - RuntimeOption : : EvalJitRegionSelector = = " method " | | <nl> - mcg - > tx ( ) . mode ( ) = = TransKind : : Optimize ) ; <nl> + / / TODO ( # 5792564 ) : Guard relaxation doesn ' t work with loops . <nl> + / / TODO ( # 6599498 ) : Guard relaxation is broken in wholecfg mode . <nl> + ( mcg - > tx ( ) . mode ( ) ! = TransKind : : Optimize | | <nl> + RuntimeOption : : EvalJitPGORegionSelector = = " hottrace " ) ; <nl> } <nl> <nl> / * For each possible dest type , determine if its type might relax . * / <nl> mmm a / hphp / runtime / vm / jit / opt . cpp <nl> ppp b / hphp / runtime / vm / jit / opt . cpp <nl> void optimize ( IRUnit & unit , IRBuilder & irBuilder , TransKind kind ) { <nl> assertx ( checkEverything ( unit ) ) ; <nl> <nl> auto const hasLoop = RuntimeOption : : EvalJitLoops & & cfgHasLoop ( unit ) ; <nl> - auto const func = unit . entry ( ) - > front ( ) . marker ( ) . func ( ) ; <nl> - auto const regionMode = pgoRegionMode ( * func ) ; <nl> - auto const traceMode = kind ! = TransKind : : Optimize | | <nl> - regionMode = = PGORegionMode : : Hottrace ; <nl> - <nl> - / / TODO ( # 5792564 ) : Guard relaxation doesn ' t work with loops . <nl> - / / TODO ( # 6599498 ) : Guard relaxation is broken in wholecfg mode . <nl> - if ( shouldHHIRRelaxGuards ( ) & & ! hasLoop & & traceMode ) { <nl> + <nl> + if ( shouldHHIRRelaxGuards ( ) & & ! hasLoop ) { <nl> Timer _t ( Timer : : optimize_relaxGuards ) ; <nl> - const bool simple = kind = = TransKind : : Profile & & <nl> - ( RuntimeOption : : EvalJitRegionSelector = = " tracelet " | | <nl> - RuntimeOption : : EvalJitRegionSelector = = " method " ) ; <nl> + const bool simple = kind = = TransKind : : Profile ; <nl> RelaxGuardsFlags flags = ( RelaxGuardsFlags ) <nl> ( RelaxReflow | ( simple ? RelaxSimple : RelaxNormal ) ) ; <nl> auto changed = relaxGuards ( unit , * irBuilder . guards ( ) , flags ) ; <nl>
|
Stop constraining values for Optimize translations ( except with hottrace )
|
facebook/hhvm
|
f973fb6daca9a9f3b01dc078ce0be9576b4bff78
|
2015-06-26T16:02:11Z
|
mmm a / m4 / acx_pthread . m4 <nl> ppp b / m4 / acx_pthread . m4 <nl> if test " x $ acx_pthread_ok " = xyes ; then <nl> acx_pthread_ok = no <nl> fi <nl> <nl> + AC_MSG_CHECKING ( [ whether what we have so far is sufficient with - nostdlib ] ) <nl> + CFLAGS = " - nostdlib $ CFLAGS " <nl> + # we need c with nostdlib <nl> + LIBS = " $ LIBS - lc " <nl> + AC_TRY_LINK ( [ # include < pthread . h > ] , <nl> + [ pthread_t th ; pthread_join ( th , 0 ) ; <nl> + pthread_attr_init ( 0 ) ; pthread_cleanup_push ( 0 , 0 ) ; <nl> + pthread_create ( 0 , 0 , 0 , 0 ) ; pthread_cleanup_pop ( 0 ) ; ] , <nl> + [ done = yes ] , [ done = no ] ) <nl> + <nl> + if test " x $ done " = xyes ; then <nl> + AC_MSG_RESULT ( [ yes ] ) <nl> + else <nl> + AC_MSG_RESULT ( [ no ] ) <nl> + fi <nl> + <nl> + if test x " $ done " = xno ; then <nl> + AC_MSG_CHECKING ( [ whether - lpthread saves the day ] ) <nl> + LIBS = " - lpthread $ LIBS " <nl> + AC_TRY_LINK ( [ # include < pthread . h > ] , <nl> + [ pthread_t th ; pthread_join ( th , 0 ) ; <nl> + pthread_attr_init ( 0 ) ; pthread_cleanup_push ( 0 , 0 ) ; <nl> + pthread_create ( 0 , 0 , 0 , 0 ) ; pthread_cleanup_pop ( 0 ) ; ] , <nl> + [ done = yes ] , [ done = no ] ) <nl> + <nl> + if test " x $ done " = xyes ; then <nl> + AC_MSG_RESULT ( [ yes ] ) <nl> + PTHREAD_LIBS = " $ PTHREAD_LIBS - lpthread " <nl> + else <nl> + AC_MSG_RESULT ( [ no ] ) <nl> + AC_MSG_WARN ( [ Impossible to determine how to use pthreads with shared libraries and - nostdlib ] ) <nl> + fi <nl> + fi <nl> + <nl> CFLAGS = " $ save_CFLAGS " <nl> LIBS = " $ save_LIBS " <nl> CC = " $ save_CC " <nl>
|
Fix corner case in acx_pthread . m4 to work with - nostdlib , patch from Kacper Kowalik .
|
protocolbuffers/protobuf
|
d4a57f1557daba389d4af4d99afdc68a9fd5278a
|
2011-01-13T06:44:11Z
|
mmm a / core / undo_redo . cpp <nl> ppp b / core / undo_redo . cpp <nl> bool UndoRedo : : redo ( ) { <nl> <nl> _process_operation_list ( actions . write [ current_action ] . do_ops . front ( ) ) ; <nl> version + + ; <nl> + emit_signal ( " version_changed " ) ; <nl> <nl> return true ; <nl> } <nl> bool UndoRedo : : undo ( ) { <nl> _process_operation_list ( actions . write [ current_action ] . undo_ops . front ( ) ) ; <nl> current_action - - ; <nl> version - - ; <nl> + emit_signal ( " version_changed " ) ; <nl> + <nl> return true ; <nl> } <nl> <nl> void UndoRedo : : clear_history ( bool p_increase_version ) { <nl> while ( actions . size ( ) ) <nl> _pop_history_tail ( ) ; <nl> <nl> - if ( p_increase_version ) <nl> + if ( p_increase_version ) { <nl> version + + ; <nl> + emit_signal ( " version_changed " ) ; <nl> + } <nl> } <nl> <nl> String UndoRedo : : get_current_action_name ( ) const { <nl> <nl> ERR_FAIL_COND_V ( action_level > 0 , " " ) ; <nl> if ( current_action < 0 ) <nl> - return " " ; / / nothing to redo <nl> + return " " ; <nl> return actions [ current_action ] . name ; <nl> } <nl> <nl> + bool UndoRedo : : has_undo ( ) { <nl> + <nl> + return current_action > = 0 ; <nl> + } <nl> + <nl> + bool UndoRedo : : has_redo ( ) { <nl> + <nl> + return ( current_action + 1 ) < actions . size ( ) ; <nl> + } <nl> + <nl> uint64_t UndoRedo : : get_version ( ) const { <nl> <nl> return version ; <nl> void UndoRedo : : _bind_methods ( ) { <nl> ClassDB : : bind_method ( D_METHOD ( " add_undo_reference " , " object " ) , & UndoRedo : : add_undo_reference ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " clear_history " , " increase_version " ) , & UndoRedo : : clear_history , DEFVAL ( true ) ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " get_current_action_name " ) , & UndoRedo : : get_current_action_name ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " has_undo " ) , & UndoRedo : : has_undo ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " has_redo " ) , & UndoRedo : : has_redo ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " get_version " ) , & UndoRedo : : get_version ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " redo " ) , & UndoRedo : : redo ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " undo " ) , & UndoRedo : : undo ) ; <nl> <nl> + ADD_SIGNAL ( MethodInfo ( " version_changed " ) ) ; <nl> + <nl> BIND_ENUM_CONSTANT ( MERGE_DISABLE ) ; <nl> BIND_ENUM_CONSTANT ( MERGE_ENDS ) ; <nl> BIND_ENUM_CONSTANT ( MERGE_ALL ) ; <nl> mmm a / core / undo_redo . h <nl> ppp b / core / undo_redo . h <nl> class UndoRedo : public Object { <nl> String get_current_action_name ( ) const ; <nl> void clear_history ( bool p_increase_version = true ) ; <nl> <nl> + bool has_undo ( ) ; <nl> + bool has_redo ( ) ; <nl> + <nl> uint64_t get_version ( ) const ; <nl> <nl> void set_commit_notify_callback ( CommitNotifyCallback p_callback , void * p_ud ) ; <nl> mmm a / doc / classes / UndoRedo . xml <nl> ppp b / doc / classes / UndoRedo . xml <nl> <nl> This is useful mostly to check if something changed from a saved version . <nl> < / description > <nl> < / method > <nl> + < method name = " has_redo " > <nl> + < return type = " bool " > <nl> + < / return > <nl> + < description > <nl> + Returns [ code ] true [ / code ] if an ' redo ' action is available . <nl> + < / description > <nl> + < / method > <nl> + < method name = " has_undo " > <nl> + < return type = " bool " > <nl> + < / return > <nl> + < description > <nl> + Returns [ code ] true [ / code ] if an ' undo ' action is available . <nl> + < / description > <nl> + < / method > <nl> < method name = " is_commiting_action " qualifiers = " const " > <nl> < return type = " bool " > <nl> < / return > <nl> <nl> < / description > <nl> < / method > <nl> < / methods > <nl> + < signals > <nl> + < signal name = " version_changed " > <nl> + < description > <nl> + Called when [ method undo ] or [ method redo ] was called . <nl> + < / description > <nl> + < / signal > <nl> + < / signals > <nl> < constants > <nl> < constant name = " MERGE_DISABLE " value = " 0 " enum = " MergeMode " > <nl> Makes [ code ] do [ / code ] / [ code ] undo [ / code ] operations stay in separate actions . <nl>
|
UndoRedo add version changed signal
|
godotengine/godot
|
8f23f4b44e043c6a7f69e96369e6c26fe9fd205b
|
2019-06-26T14:32:34Z
|
mmm a / src / core / console_user_server / include / receiver . hpp <nl> ppp b / src / core / console_user_server / include / receiver . hpp <nl> class receiver final : public pqrs : : dispatcher : : extra : : dispatcher_client { <nl> } <nl> return ; <nl> } catch ( std : : exception & e ) { <nl> - logger : : get_logger ( ) - > error ( " Received data is corrupted : { 0 } " , e . what ( ) ) ; <nl> + logger : : get_logger ( ) - > error ( " received data is corrupted " ) ; <nl> } <nl> } <nl> } ) ; <nl> mmm a / src / core / grabber / include / receiver . hpp <nl> ppp b / src / core / grabber / include / receiver . hpp <nl> class receiver final : public pqrs : : dispatcher : : extra : : dispatcher_client { <nl> } <nl> return ; <nl> } catch ( std : : exception & e ) { <nl> - logger : : get_logger ( ) - > error ( " Received data is corrupted : { 0 } " , e . what ( ) ) ; <nl> + logger : : get_logger ( ) - > error ( " received data is corrupted " ) ; <nl> } <nl> } <nl> } ) ; <nl>
|
update log messages
|
pqrs-org/Karabiner-Elements
|
48aad015b14c9f7558486fb0a37d096a24b00b21
|
2019-05-19T15:16:04Z
|
mmm a / arangod / Aql / AqlItemBlock . cpp <nl> ppp b / arangod / Aql / AqlItemBlock . cpp <nl> void AqlItemBlock : : destroy ( ) noexcept { <nl> / / arbitrary types . so we put a global try . . . catch here to be on <nl> / / the safe side <nl> try { <nl> + _shadowRowIndexes . clear ( ) ; <nl> + <nl> if ( _valueCount . empty ( ) ) { <nl> eraseAll ( ) ; <nl> rescale ( 0 , 0 ) ; <nl> void AqlItemBlock : : shrink ( size_t nrItems ) { <nl> a . erase ( ) ; <nl> } <nl> <nl> + / / remove the shadow row indices pointing to now invalid rows . <nl> + _shadowRowIndexes . erase ( _shadowRowIndexes . lower_bound ( nrItems ) , <nl> + _shadowRowIndexes . end ( ) ) ; <nl> + <nl> / / adjust the size of the block <nl> _nrItems = nrItems ; <nl> } <nl> mmm a / arangod / Aql / AqlItemBlockManager . cpp <nl> ppp b / arangod / Aql / AqlItemBlockManager . cpp <nl> SharedAqlItemBlockPtr AqlItemBlockManager : : requestBlock ( size_t nrItems , Register <nl> TRI_ASSERT ( block - > getNrRegs ( ) = = nrRegs ) ; <nl> TRI_ASSERT ( block - > numEntries ( ) = = targetSize ) ; <nl> TRI_ASSERT ( block - > getRefCount ( ) = = 0 ) ; <nl> + TRI_ASSERT ( block - > hasShadowRows ( ) = = false ) ; <nl> <nl> return SharedAqlItemBlockPtr { block } ; <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . b0206d2a512 <nl> mmm / dev / null <nl> ppp b / tests / js / server / aql / aql - shadowrow - block - return - regression . js <nl> <nl> + / * jshint globalstrict : false , strict : false , maxlen : 500 * / <nl> + / * global assertEqual , AQL_EXECUTE , assertTrue , fail * / <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief tests for regression returning blocks to the manager <nl> + / / / <nl> + / / / @ file <nl> + / / / <nl> + / / / DISCLAIMER <nl> + / / / <nl> + / / / Copyright 2010 - 2014 triagens GmbH , Cologne , Germany <nl> + / / / <nl> + / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + / / / you may not use this file except in compliance with the License . <nl> + / / / You may obtain a copy of the License at <nl> + / / / <nl> + / / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + / / / <nl> + / / / Unless required by applicable law or agreed to in writing , software <nl> + / / / distributed under the License is distributed on an " AS IS " BASIS , <nl> + / / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + / / / See the License for the specific language governing permissions and <nl> + / / / limitations under the License . <nl> + / / / <nl> + / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / <nl> + / / / @ author Markus Pfeiffer <nl> + / / / @ author Copyright 2020 , triAGENS GmbH , Cologne , Germany <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + var jsunity = require ( " jsunity " ) ; <nl> + var internal = require ( " internal " ) ; <nl> + var errors = internal . errors ; <nl> + var db = require ( " @ arangodb " ) . db , indexId ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief test suite <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + function blockReturnRegressionSuite ( ) { <nl> + return { <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief set up <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + setUpAll : function ( ) { <nl> + } , <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief tear down <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + tearDownAll : function ( ) { <nl> + } , <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief test WITHIN_RECTANGLE as result <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + testBlockReuseOkWithSubquery : function ( ) { <nl> + const query = ` FOR c in 1 . . 1000 LET su = ( FOR d in 1 . . 1000 SORT d RETURN d ) RETURN LENGTH ( su ) ` ; <nl> + var actual = db . _query ( query ) ; <nl> + assertEqual ( actual . toArray ( ) . length , 1000 ) ; <nl> + } , <nl> + } ; <nl> + } <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief executes the test suite <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + jsunity . run ( blockReturnRegressionSuite ) ; <nl> + <nl> + return jsunity . done ( ) ; <nl> + <nl>
|
Fix returning blocks with shadow rows to the AqlItemBlockManager ( )
|
arangodb/arangodb
|
c936b9a673f0247f487668c01a5d289bf0f3d096
|
2020-01-10T15:44:22Z
|
mmm a / THNN . h <nl> ppp b / THNN . h <nl> typedef long THIndex_t ; <nl> typedef int THInteger_t ; <nl> typedef void THNNState ; <nl> <nl> + # define THNN_resizeAs_indices ( I1 , I2 ) \ <nl> + THLongStorage * size2 = THIndexTensor_ ( newSizeOf ) ( I2 ) ; \ <nl> + if ( ! THTensor_ ( isSize ) ( I1 , size2 ) ) \ <nl> + { \ <nl> + THTensor_ ( resize ) ( I1 , size2 , NULL ) ; \ <nl> + } \ <nl> + THLongStorage_free ( size2 ) ; <nl> + <nl> # include " generic / THNN . h " <nl> # include < THGenerateFloatTypes . h > <nl> <nl> - # endif <nl> \ No newline at end of file <nl> + # endif <nl> mmm a / generic / LookupTable . c <nl> ppp b / generic / LookupTable . c <nl> void THNN_ ( LookupTable_accGradParameters ) ( <nl> THTensor * gradWeight , <nl> THIntegerTensor * count , <nl> THTensor * sorted , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> bool scaleGradByFreq , <nl> int paddingValue , <nl> real scale ) <nl> mmm a / generic / MultiLabelMarginCriterion . c <nl> ppp b / generic / MultiLabelMarginCriterion . c <nl> <nl> void THNN_ ( MultiLabelMarginCriterion_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> - THTensor * target , <nl> + THIndexTensor * target , <nl> THTensor * output , <nl> THTensor * isTarget , <nl> bool sizeAverage ) <nl> { <nl> - real * input_data , * target_data , * isTarget_data ; <nl> + real * input_data , * isTarget_data ; <nl> + THIndex_t * target_data ; <nl> long nframe , dim ; <nl> long t , d , dt , ddt ; <nl> real sum ; <nl> void THNN_ ( MultiLabelMarginCriterion_updateOutput ) ( <nl> & & ( target - > size [ 1 ] = = dim ) , 3 , " inconsistent target size " ) ; <nl> } <nl> <nl> - THArgCheck ( THTensor_ ( minall ) ( target ) > = 0 , 3 , " target out of range " ) ; <nl> - THArgCheck ( THTensor_ ( maxall ) ( target ) < = dim , 3 , " target out of range " ) ; <nl> + THArgCheck ( THIndexTensor_ ( minall ) ( target ) > = 0 , 3 , " target out of range " ) ; <nl> + THArgCheck ( THIndexTensor_ ( maxall ) ( target ) < = dim , 3 , " target out of range " ) ; <nl> <nl> - target = THTensor_ ( newContiguous ) ( target ) ; <nl> + target = THIndexTensor_ ( newContiguous ) ( target ) ; <nl> input = THTensor_ ( newContiguous ) ( input ) ; <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> - target_data = THTensor_ ( data ) ( target ) ; <nl> + target_data = THIndexTensor_ ( data ) ( target ) ; <nl> <nl> - THTensor_ ( resizeAs ) ( isTarget , target ) ; <nl> + THNN_resizeAs_indices ( isTarget , target ) ; <nl> THTensor_ ( zero ) ( isTarget ) ; <nl> isTarget_data = THTensor_ ( data ) ( isTarget ) ; <nl> <nl> void THNN_ ( MultiLabelMarginCriterion_updateOutput ) ( <nl> { <nl> for ( ddt = 0 ; ddt < dim ; ddt + + ) <nl> { <nl> - long target_idx = ( long ) target_data [ ddt ] - TH_INDEX_BASE ; <nl> + THIndex_t target_idx = target_data [ ddt ] - TH_INDEX_BASE ; <nl> if ( target_idx < 0 ) <nl> break ; <nl> isTarget_data [ target_idx ] = 1 ; <nl> } <nl> for ( dt = 0 ; dt < dim ; dt + + ) <nl> { <nl> - long target_idx = ( long ) target_data [ dt ] - TH_INDEX_BASE ; <nl> + THIndex_t target_idx = target_data [ dt ] - TH_INDEX_BASE ; <nl> real input_target ; <nl> if ( target_idx < 0 ) <nl> break ; <nl> void THNN_ ( MultiLabelMarginCriterion_updateOutput ) ( <nl> THTensor_ ( set1d ) ( output , 0 , sum ) ; <nl> <nl> THTensor_ ( free ) ( input ) ; <nl> - THTensor_ ( free ) ( target ) ; <nl> + THIndexTensor_ ( free ) ( target ) ; <nl> } <nl> <nl> void THNN_ ( MultiLabelMarginCriterion_updateGradInput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> - THTensor * target , <nl> + THIndexTensor * target , <nl> THTensor * gradInput , <nl> THTensor * isTarget , <nl> bool sizeAverage ) <nl> { <nl> real * input_data ; <nl> real * gradInput_data ; <nl> - real * target_data ; <nl> + THIndex_t * target_data ; <nl> real * isTarget_data ; <nl> long nframe , dim ; <nl> long t , d , dt ; <nl> void THNN_ ( MultiLabelMarginCriterion_updateGradInput ) ( <nl> & & ( isTarget - > size [ 1 ] = = dim ) , 3 , " inconsistent isTarget size " ) ; <nl> } <nl> <nl> - THArgCheck ( THTensor_ ( minall ) ( target ) > = 0 , 3 , " target out of range " ) ; <nl> - THArgCheck ( THTensor_ ( maxall ) ( target ) < = dim , 3 , " target out of range " ) ; <nl> + THArgCheck ( THIndexTensor_ ( minall ) ( target ) > = 0 , 3 , " target out of range " ) ; <nl> + THArgCheck ( THIndexTensor_ ( maxall ) ( target ) < = dim , 3 , " target out of range " ) ; <nl> <nl> THArgCheck ( THTensor_ ( minall ) ( isTarget ) > = 0 , 3 , " isTarget out of range " ) ; <nl> THArgCheck ( THTensor_ ( maxall ) ( isTarget ) < = 1 , 3 , " isTarget out of range " ) ; <nl> <nl> - target = THTensor_ ( newContiguous ) ( target ) ; <nl> + target = THIndexTensor_ ( newContiguous ) ( target ) ; <nl> input = THTensor_ ( newContiguous ) ( input ) ; <nl> isTarget = THTensor_ ( newContiguous ) ( isTarget ) ; <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> - target_data = THTensor_ ( data ) ( target ) ; <nl> + target_data = THIndexTensor_ ( data ) ( target ) ; <nl> isTarget_data = THTensor_ ( data ) ( isTarget ) ; <nl> <nl> g = sizeAverage ? ( 1 . / ( ( real ) ( nframe * dim ) ) ) : ( 1 . / ( ( real ) dim ) ) ; <nl> void THNN_ ( MultiLabelMarginCriterion_updateGradInput ) ( <nl> { <nl> for ( dt = 0 ; dt < dim ; dt + + ) <nl> { <nl> - long target_idx = ( long ) target_data [ dt ] - TH_INDEX_BASE ; <nl> + THIndex_t target_idx = target_data [ dt ] - TH_INDEX_BASE ; <nl> real input_target ; <nl> if ( target_idx < 0 ) <nl> break ; <nl> void THNN_ ( MultiLabelMarginCriterion_updateGradInput ) ( <nl> } <nl> <nl> THTensor_ ( free ) ( input ) ; <nl> - THTensor_ ( free ) ( target ) ; <nl> + THIndexTensor_ ( free ) ( target ) ; <nl> THTensor_ ( free ) ( isTarget ) ; <nl> } <nl> <nl> mmm a / generic / MultiMarginCriterion . c <nl> ppp b / generic / MultiMarginCriterion . c <nl> <nl> void THNN_ ( MultiMarginCriterion_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> - THTensor * target , <nl> + THIndexTensor * target , <nl> THTensor * output , <nl> bool sizeAverage , <nl> int p , <nl> THTensor * weights , <nl> real margin ) <nl> { <nl> - real * input_data , * target_data , * weights_data ; <nl> + real * input_data , * weights_data ; <nl> + THIndex_t * target_data ; <nl> long nframe , dim ; <nl> long t , d ; <nl> real sum ; <nl> void THNN_ ( MultiMarginCriterion_updateOutput ) ( <nl> <nl> for ( t = 0 ; t < nframe ; t + + ) <nl> { <nl> - real idx = THTensor_ ( get1d ) ( target , t ) ; <nl> + THIndex_t idx = THIndexTensor_ ( get1d ) ( target , t ) ; <nl> THArgCheck ( ( idx > = TH_INDEX_BASE ) & & ( idx < dim + TH_INDEX_BASE ) , 3 , <nl> " target out of range " ) ; <nl> } <nl> <nl> input = THTensor_ ( newContiguous ) ( input ) ; <nl> - target = THTensor_ ( newContiguous ) ( target ) ; <nl> + target = THIndexTensor_ ( newContiguous ) ( target ) ; <nl> weights = weights ? THTensor_ ( newContiguous ) ( weights ) : NULL ; <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> - target_data = THTensor_ ( data ) ( target ) ; <nl> + target_data = THIndexTensor_ ( data ) ( target ) ; <nl> weights_data = weights ? THTensor_ ( data ) ( weights ) : NULL ; <nl> <nl> sum = 0 ; <nl> for ( t = 0 ; t < nframe ; t + + ) <nl> { <nl> - long target_idx = ( long ) ( target_data [ t ] - TH_INDEX_BASE ) ; <nl> + THIndex_t target_idx = target_data [ t ] - TH_INDEX_BASE ; <nl> real input_target = input_data [ target_idx ] ; <nl> for ( d = 0 ; d < dim ; d + + ) <nl> { <nl> void THNN_ ( MultiMarginCriterion_updateOutput ) ( <nl> THTensor_ ( set1d ) ( output , 0 , sum ) ; <nl> <nl> THTensor_ ( free ) ( input ) ; <nl> - THTensor_ ( free ) ( target ) ; <nl> + THIndexTensor_ ( free ) ( target ) ; <nl> if ( weights ) <nl> THTensor_ ( free ) ( weights ) ; <nl> } <nl> void THNN_ ( MultiMarginCriterion_updateOutput ) ( <nl> void THNN_ ( MultiMarginCriterion_updateGradInput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> - THTensor * target , <nl> + THIndexTensor * target , <nl> THTensor * gradInput , <nl> bool sizeAverage , <nl> int p , <nl> void THNN_ ( MultiMarginCriterion_updateGradInput ) ( <nl> { <nl> real * input_data ; <nl> real * gradInput_data ; <nl> - real * target_data ; <nl> + THIndex_t * target_data ; <nl> real * weights_data ; <nl> long nframe , dim ; <nl> long t , d ; <nl> void THNN_ ( MultiMarginCriterion_updateGradInput ) ( <nl> g = ( sizeAverage ? 1 . / ( ( real ) ( nframe * dim ) ) : 1 . / ( ( real ) dim ) ) ; <nl> <nl> input = THTensor_ ( newContiguous ) ( input ) ; <nl> - target = THTensor_ ( newContiguous ) ( target ) ; <nl> + target = THIndexTensor_ ( newContiguous ) ( target ) ; <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> <nl> THTensor_ ( resizeAs ) ( gradInput , input ) ; <nl> gradInput_data = THTensor_ ( data ) ( gradInput ) ; <nl> <nl> - target_data = THTensor_ ( data ) ( target ) ; <nl> + target_data = THIndexTensor_ ( data ) ( target ) ; <nl> weights = weights ? THTensor_ ( newContiguous ) ( weights ) : NULL ; <nl> weights_data = weights ? THTensor_ ( data ) ( weights ) : NULL ; <nl> <nl> for ( t = 0 ; t < nframe ; t + + ) <nl> { <nl> - long target_idx = ( long ) ( target_data [ t ] ) - TH_INDEX_BASE ; <nl> + THIndex_t target_idx = target_data [ t ] - TH_INDEX_BASE ; <nl> real input_target = input_data [ target_idx ] ; <nl> real gradInput_target = 0 ; <nl> for ( d = 0 ; d < dim ; d + + ) <nl> void THNN_ ( MultiMarginCriterion_updateGradInput ) ( <nl> } <nl> <nl> THTensor_ ( free ) ( input ) ; <nl> - THTensor_ ( free ) ( target ) ; <nl> + THIndexTensor_ ( free ) ( target ) ; <nl> if ( weights ) <nl> THTensor_ ( free ) ( weights ) ; <nl> } <nl> mmm a / generic / SpatialAdaptiveMaxPooling . c <nl> ppp b / generic / SpatialAdaptiveMaxPooling . c <nl> <nl> static void THNN_ ( SpatialAdaptiveMaxPooling_updateOutput_frame ) ( <nl> real * input_p , <nl> real * output_p , <nl> - real * indx_p , <nl> - real * indy_p , <nl> + THIndex_t * indx_p , <nl> + THIndex_t * indy_p , <nl> long nslices , <nl> long iwidth , <nl> long iheight , <nl> static void THNN_ ( SpatialAdaptiveMaxPooling_updateOutput_frame ) ( <nl> / * local pointers * / <nl> real * ip = input_p + k * strided + y_start * strideh + x_start * stridew ; <nl> real * op = output_p + k * owidth * oheight + i * owidth + j ; <nl> - real * indyp = indy_p + k * owidth * oheight + i * owidth + j ; <nl> - real * indxp = indx_p + k * owidth * oheight + i * owidth + j ; <nl> + THIndex_t * indyp = indy_p + k * owidth * oheight + i * owidth + j ; <nl> + THIndex_t * indxp = indx_p + k * owidth * oheight + i * owidth + j ; <nl> <nl> / * compute local max : * / <nl> long maxindex = - 1 ; <nl> static void THNN_ ( SpatialAdaptiveMaxPooling_updateOutput_frame ) ( <nl> * op = maxval ; <nl> <nl> / * store location of max ( x , y ) * / <nl> - * indyp = ( int ) ( maxindex / kW ) + TH_INDEX_BASE ; <nl> + * indyp = ( maxindex / kW ) + TH_INDEX_BASE ; <nl> * indxp = ( maxindex % kW ) + TH_INDEX_BASE ; <nl> } <nl> } <nl> void THNN_ ( SpatialAdaptiveMaxPooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int owidth , <nl> int oheight ) <nl> { <nl> void THNN_ ( SpatialAdaptiveMaxPooling_updateOutput ) ( <nl> <nl> real * input_data ; <nl> real * output_data ; <nl> - real * indices_data ; <nl> + THIndex_t * indices_data ; <nl> <nl> <nl> THNN_ARGCHECK ( input - > nDimension = = 3 | | input - > nDimension = = 4 , 2 , input , <nl> void THNN_ ( SpatialAdaptiveMaxPooling_updateOutput ) ( <nl> { <nl> THTensor_ ( resize3d ) ( output , nslices , oheight , owidth ) ; <nl> / * indices will contain i , j locations for each output point * / <nl> - THTensor_ ( resize4d ) ( indices , 2 , nslices , oheight , owidth ) ; <nl> + THIndexTensor_ ( resize4d ) ( indices , 2 , nslices , oheight , owidth ) ; <nl> <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> output_data = THTensor_ ( data ) ( output ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> THNN_ ( SpatialAdaptiveMaxPooling_updateOutput_frame ) ( input_data , output_data , <nl> indices_data + nslices * owidth * oheight , indices_data , <nl> void THNN_ ( SpatialAdaptiveMaxPooling_updateOutput ) ( <nl> <nl> THTensor_ ( resize4d ) ( output , nbatch , nslices , oheight , owidth ) ; <nl> / * indices will contain i , j locations for each output point * / <nl> - THTensor_ ( resize5d ) ( indices , 2 , nbatch , nslices , oheight , owidth ) ; <nl> + THIndexTensor_ ( resize5d ) ( indices , 2 , nbatch , nslices , oheight , owidth ) ; <nl> <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> output_data = THTensor_ ( data ) ( output ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> # pragma omp parallel for private ( p ) <nl> for ( p = 0 ; p < nbatch ; p + + ) <nl> void THNN_ ( SpatialAdaptiveMaxPooling_updateOutput ) ( <nl> static void THNN_ ( SpatialAdaptiveMaxPooling_updateGradInput_frame ) ( <nl> real * gradInput_p , <nl> real * gradOutput_p , <nl> - real * indx_p , <nl> - real * indy_p , <nl> + THIndex_t * indx_p , <nl> + THIndex_t * indy_p , <nl> long nslices , <nl> long iwidth , <nl> long iheight , <nl> static void THNN_ ( SpatialAdaptiveMaxPooling_updateGradInput_frame ) ( <nl> { <nl> real * gradInput_p_k = gradInput_p + k * iwidth * iheight ; <nl> real * gradOutput_p_k = gradOutput_p + k * owidth * oheight ; <nl> - real * indx_p_k = indx_p + k * owidth * oheight ; <nl> - real * indy_p_k = indy_p + k * owidth * oheight ; <nl> + THIndex_t * indx_p_k = indx_p + k * owidth * oheight ; <nl> + THIndex_t * indy_p_k = indy_p + k * owidth * oheight ; <nl> <nl> / * calculate max points * / <nl> long i , j ; <nl> void THNN_ ( SpatialAdaptiveMaxPooling_updateGradInput ) ( <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices ) <nl> + THIndexTensor * indices ) <nl> { <nl> int dimw = 2 ; <nl> int dimh = 1 ; <nl> void THNN_ ( SpatialAdaptiveMaxPooling_updateGradInput ) ( <nl> int owidth ; <nl> real * gradInput_data ; <nl> real * gradOutput_data ; <nl> - real * indices_data ; <nl> + THIndex_t * indices_data ; <nl> <nl> / * get contiguous gradOutput * / <nl> gradOutput = THTensor_ ( newContiguous ) ( gradOutput ) ; <nl> void THNN_ ( SpatialAdaptiveMaxPooling_updateGradInput ) ( <nl> / * get raw pointers * / <nl> gradInput_data = THTensor_ ( data ) ( gradInput ) ; <nl> gradOutput_data = THTensor_ ( data ) ( gradOutput ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> / * backprop * / <nl> if ( input - > nDimension = = 3 ) <nl> void THNN_ ( SpatialAdaptiveMaxPooling_updateGradInput ) ( <nl> } <nl> <nl> # endif <nl> - <nl> mmm a / generic / SpatialDilatedMaxPooling . c <nl> ppp b / generic / SpatialDilatedMaxPooling . c <nl> <nl> # else <nl> <nl> static inline void THNN_ ( SpatialDilatedMaxPooling_shapeCheck ) ( <nl> - THTensor * input , THTensor * gradOutput , THTensor * indices , <nl> + THTensor * input , THTensor * gradOutput , THIndexTensor * indices , <nl> int kH , int kW , int dH , int dW , int padH , int padW , <nl> int dilationH , int dilationW , bool ceil_mode ) { <nl> <nl> static inline void THNN_ ( SpatialDilatedMaxPooling_shapeCheck ) ( <nl> " pad should be smaller than half of kernel size , but got " <nl> " padW = % d , padH = % d , kW = % d , kH = % d " , <nl> padW , padH , kW , kH ) ; <nl> - <nl> + <nl> long nInputPlane = input - > size [ dimh - 1 ] ; <nl> long inputHeight = input - > size [ dimh ] ; <nl> long inputWidth = input - > size [ dimw ] ; <nl> static inline void THNN_ ( SpatialDilatedMaxPooling_shapeCheck ) ( <nl> THNN_CHECK_DIM_SIZE ( gradOutput , ndim , dimw , outputWidth ) ; <nl> } <nl> if ( indices ! = NULL ) { <nl> - THNN_CHECK_DIM_SIZE ( indices , ndim , dimf , nOutputPlane ) ; <nl> - THNN_CHECK_DIM_SIZE ( indices , ndim , dimh , outputHeight ) ; <nl> - THNN_CHECK_DIM_SIZE ( indices , ndim , dimw , outputWidth ) ; <nl> + THNN_CHECK_DIM_SIZE_INDICES ( indices , ndim , dimf , nOutputPlane ) ; <nl> + THNN_CHECK_DIM_SIZE_INDICES ( indices , ndim , dimh , outputHeight ) ; <nl> + THNN_CHECK_DIM_SIZE_INDICES ( indices , ndim , dimw , outputWidth ) ; <nl> } <nl> } <nl> <nl> static void THNN_ ( SpatialDilatedMaxPooling_updateOutput_frame ) ( <nl> real * input_p , <nl> real * output_p , <nl> - real * ind_p , <nl> + THIndex_t * ind_p , <nl> long nslices , <nl> long iwidth , <nl> long iheight , <nl> static void THNN_ ( SpatialDilatedMaxPooling_updateOutput_frame ) ( <nl> <nl> / * local pointers * / <nl> real * op = output_p + k * owidth * oheight + i * owidth + j ; <nl> - real * indp = ind_p + k * owidth * oheight + i * owidth + j ; <nl> + THIndex_t * indp = ind_p + k * owidth * oheight + i * owidth + j ; <nl> <nl> / * compute local max : * / <nl> long maxindex = - 1 ; <nl> void THNN_ ( SpatialDilatedMaxPooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kW , <nl> int kH , <nl> int dW , <nl> void THNN_ ( SpatialDilatedMaxPooling_updateOutput ) ( <nl> long outputWidth ; <nl> real * input_data ; <nl> real * output_data ; <nl> - real * indices_data ; <nl> + THIndex_t * indices_data ; <nl> <nl> THNN_ ( SpatialDilatedMaxPooling_shapeCheck ) <nl> ( input , NULL , NULL , kH , kW , dH , dW , <nl> void THNN_ ( SpatialDilatedMaxPooling_updateOutput ) ( <nl> { <nl> THTensor_ ( resize3d ) ( output , nInputPlane , outputHeight , outputWidth ) ; <nl> / * indices will contain the locations for each output point * / <nl> - THTensor_ ( resize3d ) ( indices , nInputPlane , outputHeight , outputWidth ) ; <nl> + THIndexTensor_ ( resize3d ) ( indices , nInputPlane , outputHeight , outputWidth ) ; <nl> <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> output_data = THTensor_ ( data ) ( output ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> THNN_ ( SpatialDilatedMaxPooling_updateOutput_frame ) <nl> ( input_data , output_data , <nl> void THNN_ ( SpatialDilatedMaxPooling_updateOutput ) ( <nl> <nl> THTensor_ ( resize4d ) ( output , nbatch , nInputPlane , outputHeight , outputWidth ) ; <nl> / * indices will contain the locations for each output point * / <nl> - THTensor_ ( resize4d ) ( indices , nbatch , nInputPlane , outputHeight , outputWidth ) ; <nl> + THIndexTensor_ ( resize4d ) ( indices , nbatch , nInputPlane , outputHeight , outputWidth ) ; <nl> <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> output_data = THTensor_ ( data ) ( output ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> # pragma omp parallel for private ( p ) <nl> for ( p = 0 ; p < nbatch ; p + + ) <nl> void THNN_ ( SpatialDilatedMaxPooling_updateOutput ) ( <nl> static void THNN_ ( SpatialDilatedMaxPooling_updateGradInput_frame ) ( <nl> real * gradInput_p , <nl> real * gradOutput_p , <nl> - real * ind_p , <nl> + THIndex_t * ind_p , <nl> long nInputPlane , <nl> long inputWidth , <nl> long inputHeight , <nl> static void THNN_ ( SpatialDilatedMaxPooling_updateGradInput_frame ) ( <nl> { <nl> real * gradInput_p_k = gradInput_p + k * inputWidth * inputHeight ; <nl> real * gradOutput_p_k = gradOutput_p + k * outputWidth * outputHeight ; <nl> - real * ind_p_k = ind_p + k * outputWidth * outputHeight ; <nl> + THIndex_t * ind_p_k = ind_p + k * outputWidth * outputHeight ; <nl> <nl> / * calculate max points * / <nl> long i , j ; <nl> void THNN_ ( SpatialDilatedMaxPooling_updateGradInput ) ( <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kW , <nl> int kH , <nl> int dW , <nl> void THNN_ ( SpatialDilatedMaxPooling_updateGradInput ) ( <nl> int outputWidth ; <nl> real * gradInput_data ; <nl> real * gradOutput_data ; <nl> - real * indices_data ; <nl> + THIndex_t * indices_data ; <nl> <nl> THNN_ ( SpatialDilatedMaxPooling_shapeCheck ) <nl> ( input , gradOutput , indices , kH , kW , dH , dW , <nl> void THNN_ ( SpatialDilatedMaxPooling_updateGradInput ) ( <nl> / * get raw pointers * / <nl> gradInput_data = THTensor_ ( data ) ( gradInput ) ; <nl> gradOutput_data = THTensor_ ( data ) ( gradOutput ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> / * backprop * / <nl> if ( input - > nDimension = = 3 ) <nl> mmm a / generic / SpatialFractionalMaxPooling . c <nl> ppp b / generic / SpatialFractionalMaxPooling . c <nl> static long * THNN_ ( SpatialFractionalMaxPooling_generateIntervals ) ( <nl> static void THNN_ ( SpatialFractionalMaxPooling_updateOutput_frame ) ( <nl> real * input , <nl> real * output , <nl> - real * indices , <nl> + THIndex_t * indices , <nl> real * randomSamples , <nl> long numPlanes , <nl> long inputW , long inputH , <nl> static void THNN_ ( SpatialFractionalMaxPooling_updateOutput_frame ) ( <nl> <nl> real * inputForPlane = input + plane * inputW * inputH ; <nl> real * outputForPlane = output + plane * outputW * outputH ; <nl> - real * indicesForPlane = indices + plane * outputW * outputH ; <nl> + THIndex_t * indicesForPlane = indices + plane * outputW * outputH ; <nl> <nl> for ( h = 0 ; h < outputH ; + + h ) { <nl> long inputHStart = sequenceH [ h ] ; <nl> static void THNN_ ( SpatialFractionalMaxPooling_updateOutput_frame ) ( <nl> <nl> outputForPlane [ h * outputW + w ] = maxVal ; <nl> / * + 1 to lua index * / <nl> - indicesForPlane [ h * outputW + w ] = ( real ) maxIndex + TH_INDEX_BASE ; <nl> + indicesForPlane [ h * outputW + w ] = maxIndex + TH_INDEX_BASE ; <nl> } <nl> } <nl> <nl> void THNN_ ( SpatialFractionalMaxPooling_updateOutput ) ( <nl> THTensor * output , <nl> int outputW , int outputH , <nl> int poolSizeW , int poolSizeH , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> THTensor * randomSamples ) { <nl> <nl> long numBatch = 1 ; <nl> void THNN_ ( SpatialFractionalMaxPooling_updateOutput ) ( <nl> / * resize output * / <nl> THTensor_ ( resize3d ) ( output , numPlanes , outputH , outputW ) ; <nl> / * indices will contain the locations for each output point * / <nl> - THTensor_ ( resize3d ) ( indices , numPlanes , outputH , outputW ) ; <nl> + THIndexTensor_ ( resize3d ) ( indices , numPlanes , outputH , outputW ) ; <nl> <nl> THNN_ ( SpatialFractionalMaxPooling_updateOutput_frame ) ( <nl> THTensor_ ( data ) ( input ) , <nl> THTensor_ ( data ) ( output ) , <nl> - THTensor_ ( data ) ( indices ) , <nl> + THIndexTensor_ ( data ) ( indices ) , <nl> THTensor_ ( data ) ( randomSamples ) , <nl> numPlanes , inputW , inputH , outputW , outputH , poolSizeW , poolSizeH ) ; <nl> } else { <nl> THTensor_ ( resize4d ) ( output , numBatch , numPlanes , outputH , outputW ) ; <nl> / * indices will contain the locations for each output point * / <nl> - THTensor_ ( resize4d ) ( indices , numBatch , numPlanes , outputH , outputW ) ; <nl> + THIndexTensor_ ( resize4d ) ( indices , numBatch , numPlanes , outputH , outputW ) ; <nl> <nl> long batch ; <nl> # pragma omp parallel for private ( batch ) <nl> void THNN_ ( SpatialFractionalMaxPooling_updateOutput ) ( <nl> THNN_ ( SpatialFractionalMaxPooling_updateOutput_frame ) ( <nl> THTensor_ ( data ) ( input ) + batch * numPlanes * inputH * inputW , <nl> THTensor_ ( data ) ( output ) + batch * numPlanes * outputH * outputW , <nl> - THTensor_ ( data ) ( indices ) + batch * numPlanes * outputH * outputW , <nl> + THIndexTensor_ ( data ) ( indices ) + batch * numPlanes * outputH * outputW , <nl> THTensor_ ( data ) ( randomSamples ) + batch * numPlanes * 2 , <nl> numPlanes , inputW , inputH , outputW , outputH , poolSizeW , poolSizeH ) ; <nl> } <nl> void THNN_ ( SpatialFractionalMaxPooling_updateOutput ) ( <nl> static void THNN_ ( SpatialFractionalMaxPooling_updateGradInput_frame ) ( <nl> real * gradInput , <nl> real * gradOutput , <nl> - real * indices , <nl> + THIndex_t * indices , <nl> long numPlanes , <nl> long inputW , long inputH , <nl> long outputW , long outputH ) { <nl> static void THNN_ ( SpatialFractionalMaxPooling_updateGradInput_frame ) ( <nl> for ( plane = 0 ; plane < numPlanes ; plane + + ) { <nl> real * gradInputForPlane = gradInput + plane * inputW * inputH ; <nl> real * gradOutputForPlane = gradOutput + plane * outputW * outputH ; <nl> - real * indicesForPlane = indices + plane * outputW * outputH ; <nl> + THIndex_t * indicesForPlane = indices + plane * outputW * outputH ; <nl> <nl> long h , w ; <nl> for ( h = 0 ; h < outputH ; + + h ) { <nl> void THNN_ ( SpatialFractionalMaxPooling_updateGradInput ) ( <nl> THTensor * gradInput , <nl> int outputW , int outputH , <nl> int poolSizeW , int poolSizeH , <nl> - THTensor * indices ) { <nl> + THIndexTensor * indices ) { <nl> <nl> long numBatch = 1 ; <nl> int planeDim = 0 ; <nl> void THNN_ ( SpatialFractionalMaxPooling_updateGradInput ) ( <nl> THNN_ ( SpatialFractionalMaxPooling_updateGradInput_frame ) ( <nl> THTensor_ ( data ) ( gradInput ) , <nl> THTensor_ ( data ) ( gradOutput ) , <nl> - THTensor_ ( data ) ( indices ) , <nl> + THIndexTensor_ ( data ) ( indices ) , <nl> numPlanes , inputW , inputH , outputW , outputH ) ; <nl> } else { <nl> long batch ; <nl> void THNN_ ( SpatialFractionalMaxPooling_updateGradInput ) ( <nl> THNN_ ( SpatialFractionalMaxPooling_updateGradInput_frame ) ( <nl> THTensor_ ( data ) ( gradInput ) + batch * numPlanes * inputH * inputW , <nl> THTensor_ ( data ) ( gradOutput ) + batch * numPlanes * outputH * outputW , <nl> - THTensor_ ( data ) ( indices ) + batch * numPlanes * outputH * outputW , <nl> + THIndexTensor_ ( data ) ( indices ) + batch * numPlanes * outputH * outputW , <nl> numPlanes , inputW , inputH , outputW , outputH ) ; <nl> } <nl> } <nl> mmm a / generic / SpatialMaxPooling . c <nl> ppp b / generic / SpatialMaxPooling . c <nl> void THNN_ ( SpatialMaxPooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kW , <nl> int kH , <nl> int dW , <nl> void THNN_ ( SpatialMaxPooling_updateGradInput ) ( <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kW , <nl> int kH , <nl> int dW , <nl> mmm a / generic / SpatialMaxUnpooling . c <nl> ppp b / generic / SpatialMaxUnpooling . c <nl> <nl> # else <nl> <nl> static void THNN_ ( SpatialMaxUnpooling_updateOutput_frame ) ( real * input_p , real * output_p , <nl> - real * ind_p , <nl> + THIndex_t * ind_p , <nl> long nslices , <nl> long iwidth , long iheight , <nl> long owidth , long oheight ) <nl> static void THNN_ ( SpatialMaxUnpooling_updateOutput_frame ) ( real * input_p , real * o <nl> { <nl> real * output_p_k = output_p + k * owidth * oheight ; <nl> real * input_p_k = input_p + k * iwidth * iheight ; <nl> - real * ind_p_k = ind_p + k * iwidth * iheight ; <nl> + THIndex_t * ind_p_k = ind_p + k * iwidth * iheight ; <nl> <nl> long i , j , maxp ; <nl> for ( i = 0 ; i < iheight ; i + + ) <nl> void THNN_ ( SpatialMaxUnpooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int owidth , int oheight ) <nl> { <nl> int dimw = 2 ; <nl> void THNN_ ( SpatialMaxUnpooling_updateOutput ) ( <nl> int iwidth ; <nl> real * input_data ; <nl> real * output_data ; <nl> - real * indices_data ; <nl> + THIndex_t * indices_data ; <nl> <nl> <nl> THNN_ARGCHECK ( input - > nDimension = = 3 | | input - > nDimension = = 4 , 2 , input , <nl> " 3D or 4D ( batch mode ) tensor expected for input , but got : % s " ) ; <nl> - THNN_CHECK_SHAPE ( input , indices ) ; <nl> + THNN_CHECK_SHAPE_INDICES ( input , indices ) ; <nl> <nl> if ( input - > nDimension = = 4 ) <nl> { <nl> void THNN_ ( SpatialMaxUnpooling_updateOutput ) ( <nl> <nl> / * get contiguous input and indices * / <nl> input = THTensor_ ( newContiguous ) ( input ) ; <nl> - indices = THTensor_ ( newContiguous ) ( indices ) ; <nl> + indices = THIndexTensor_ ( newContiguous ) ( indices ) ; <nl> <nl> / * resize output * / <nl> if ( input - > nDimension = = 3 ) <nl> void THNN_ ( SpatialMaxUnpooling_updateOutput ) ( <nl> <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> output_data = THTensor_ ( data ) ( output ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> THNN_ ( SpatialMaxUnpooling_updateOutput_frame ) ( input_data , output_data , <nl> indices_data , <nl> void THNN_ ( SpatialMaxUnpooling_updateOutput ) ( <nl> <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> output_data = THTensor_ ( data ) ( output ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> # pragma omp parallel for private ( p ) <nl> for ( p = 0 ; p < nbatch ; p + + ) <nl> void THNN_ ( SpatialMaxUnpooling_updateOutput ) ( <nl> <nl> / * cleanup * / <nl> THTensor_ ( free ) ( input ) ; <nl> - THTensor_ ( free ) ( indices ) ; <nl> + THIndexTensor_ ( free ) ( indices ) ; <nl> } <nl> <nl> static void THNN_ ( SpatialMaxUnpooling_updateGradInput_frame ) ( real * gradInput_p , real * gradOutput_p , <nl> - real * ind_p , <nl> + THIndex_t * ind_p , <nl> long nslices , <nl> long iwidth , long iheight , <nl> long owidth , long oheight ) <nl> static void THNN_ ( SpatialMaxUnpooling_updateGradInput_frame ) ( real * gradInput_p , <nl> { <nl> real * gradInput_p_k = gradInput_p + k * iwidth * iheight ; <nl> real * gradOutput_p_k = gradOutput_p + k * owidth * oheight ; <nl> - real * ind_p_k = ind_p + k * iwidth * iheight ; <nl> + THIndex_t * ind_p_k = ind_p + k * iwidth * iheight ; <nl> <nl> long i , j , maxp ; <nl> for ( i = 0 ; i < iheight ; i + + ) <nl> void THNN_ ( SpatialMaxUnpooling_updateGradInput ) ( <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int owidth , int oheight ) <nl> { <nl> int dimw = 2 ; <nl> void THNN_ ( SpatialMaxUnpooling_updateGradInput ) ( <nl> int iwidth ; <nl> real * gradInput_data ; <nl> real * gradOutput_data ; <nl> - real * indices_data ; <nl> + THIndex_t * indices_data ; <nl> <nl> - THNN_CHECK_SHAPE ( input , indices ) ; <nl> + THNN_CHECK_SHAPE_INDICES ( input , indices ) ; <nl> <nl> / * get contiguous gradOutput and indices * / <nl> gradOutput = THTensor_ ( newContiguous ) ( gradOutput ) ; <nl> - indices = THTensor_ ( newContiguous ) ( indices ) ; <nl> + indices = THIndexTensor_ ( newContiguous ) ( indices ) ; <nl> <nl> / * resize * / <nl> THTensor_ ( resizeAs ) ( gradInput , input ) ; <nl> void THNN_ ( SpatialMaxUnpooling_updateGradInput ) ( <nl> / * get raw pointers * / <nl> gradInput_data = THTensor_ ( data ) ( gradInput ) ; <nl> gradOutput_data = THTensor_ ( data ) ( gradOutput ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> / * backprop * / <nl> if ( input - > nDimension = = 3 ) <nl> void THNN_ ( SpatialMaxUnpooling_updateGradInput ) ( <nl> <nl> / * cleanup * / <nl> THTensor_ ( free ) ( gradOutput ) ; <nl> - THTensor_ ( free ) ( indices ) ; <nl> + THIndexTensor_ ( free ) ( indices ) ; <nl> } <nl> <nl> # endif <nl> mmm a / generic / THNN . h <nl> ppp b / generic / THNN . h <nl> TH_API void THNN_ ( LookupTable_accGradParameters ) ( <nl> THTensor * gradWeight , <nl> THIntegerTensor * count , <nl> THTensor * sorted , / / [ OPTIONAL ] <nl> - THTensor * indices , / / [ OPTIONAL ] <nl> + THIndexTensor * indices , / / [ OPTIONAL ] <nl> bool scaleGradByFreq , <nl> int paddingValue , <nl> real scale ) ; <nl> TH_API void THNN_ ( MSECriterion_updateGradInput ) ( <nl> TH_API void THNN_ ( MultiLabelMarginCriterion_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> - THTensor * target , <nl> + THIndexTensor * target , <nl> THTensor * output , <nl> THTensor * isTarget , <nl> bool sizeAverage ) ; <nl> TH_API void THNN_ ( MultiLabelMarginCriterion_updateGradInput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> - THTensor * target , <nl> + THIndexTensor * target , <nl> THTensor * gradInput , <nl> THTensor * isTarget , <nl> bool sizeAverage ) ; <nl> TH_API void THNN_ ( MultiLabelMarginCriterion_updateGradInput ) ( <nl> TH_API void THNN_ ( MultiMarginCriterion_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> - THTensor * target , <nl> + THIndexTensor * target , <nl> THTensor * output , <nl> bool sizeAverage , <nl> int p , <nl> TH_API void THNN_ ( MultiMarginCriterion_updateOutput ) ( <nl> TH_API void THNN_ ( MultiMarginCriterion_updateGradInput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> - THTensor * target , <nl> + THIndexTensor * target , <nl> THTensor * gradInput , <nl> bool sizeAverage , <nl> int p , <nl> TH_API void THNN_ ( TemporalMaxPooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kW , int dW ) ; <nl> TH_API void THNN_ ( TemporalMaxPooling_updateGradInput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kW , int dW ) ; <nl> TH_API void THNN_ ( TemporalSubSampling_updateOutput ) ( <nl> THNNState * state , <nl> TH_API void THNN_ ( SpatialAdaptiveMaxPooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int owidth , int oheight ) ; <nl> TH_API void THNN_ ( SpatialAdaptiveMaxPooling_updateGradInput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices ) ; <nl> + THIndexTensor * indices ) ; <nl> <nl> TH_API void THNN_ ( SpatialAveragePooling_updateOutput ) ( <nl> THNNState * state , <nl> TH_API void THNN_ ( SpatialFractionalMaxPooling_updateOutput ) ( <nl> THTensor * output , <nl> int outputW , int outputH , <nl> int poolSizeW , int poolSizeH , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> THTensor * randomSamples ) ; <nl> TH_API void THNN_ ( SpatialFractionalMaxPooling_updateGradInput ) ( <nl> THNNState * state , <nl> TH_API void THNN_ ( SpatialFractionalMaxPooling_updateGradInput ) ( <nl> THTensor * gradInput , <nl> int outputW , int outputH , <nl> int poolSizeW , int poolSizeH , <nl> - THTensor * indices ) ; <nl> + THIndexTensor * indices ) ; <nl> <nl> TH_API void THNN_ ( SpatialFullConvolution_updateOutput ) ( <nl> THNNState * state , <nl> TH_API void THNN_ ( SpatialMaxPooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kW , int kH , <nl> int dW , int dH , <nl> int padW , int padH , <nl> TH_API void THNN_ ( SpatialMaxPooling_updateGradInput ) ( <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kW , int kH , <nl> int dW , int dH , <nl> int padW , int padH , <nl> TH_API void THNN_ ( SpatialDilatedMaxPooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kW , int kH , <nl> int dW , int dH , <nl> int padW , int padH , <nl> TH_API void THNN_ ( SpatialDilatedMaxPooling_updateGradInput ) ( <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kW , int kH , <nl> int dW , int dH , <nl> int padW , int padH , <nl> TH_API void THNN_ ( SpatialMaxUnpooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int owidth , int oheight ) ; <nl> TH_API void THNN_ ( SpatialMaxUnpooling_updateGradInput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int owidth , int oheight ) ; <nl> <nl> TH_API void THNN_ ( SpatialSubSampling_updateOutput ) ( <nl> TH_API void THNN_ ( VolumetricMaxPooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kT , int kW , int kH , <nl> int dT , int dW , int dH , <nl> int pT , int pW , int pH , <nl> TH_API void THNN_ ( VolumetricMaxPooling_updateGradInput ) ( <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int dT , int dW , int dH , <nl> int pT , int pW , int pH ) ; <nl> <nl> TH_API void THNN_ ( VolumetricDilatedMaxPooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kT , int kW , int kH , <nl> int dT , int dW , int dH , <nl> int pT , int pW , int pH , <nl> TH_API void THNN_ ( VolumetricDilatedMaxPooling_updateGradInput ) ( <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int dT , int dW , int dH , <nl> int pT , int pW , int pH , <nl> int dilationT , int dilationW , int dilationH ) ; <nl> TH_API void THNN_ ( VolumetricMaxUnpooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int oT , int oW , int oH , <nl> int dT , int dW , int dH , <nl> int pT , int pW , int pH ) ; <nl> TH_API void THNN_ ( VolumetricMaxUnpooling_updateGradInput ) ( <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int oT , int oW , int oH , <nl> int dT , int dW , int dH , <nl> int pT , int pW , int pH ) ; <nl> mmm a / generic / TemporalMaxPooling . c <nl> ppp b / generic / TemporalMaxPooling . c <nl> void THNN_ ( TemporalMaxPooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kW , <nl> int dW ) <nl> { <nl> void THNN_ ( TemporalMaxPooling_updateOutput ) ( <nl> <nl> real * input_data ; <nl> real * output_data ; <nl> - real * indices_data ; <nl> + THIndex_t * indices_data ; <nl> <nl> long t , y ; <nl> <nl> void THNN_ ( TemporalMaxPooling_updateOutput ) ( <nl> THTensor_ ( resize2d ) ( output , noframe , framesize ) ; <nl> <nl> / * indices will contain index locations for each output point * / <nl> - THTensor_ ( resize2d ) ( indices , noframe , framesize ) ; <nl> + THIndexTensor_ ( resize2d ) ( indices , noframe , framesize ) ; <nl> <nl> / * get raw pointers * / <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> output_data = THTensor_ ( data ) ( output ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> for ( t = 0 ; t < noframe ; t + + ) <nl> { <nl> real * ip = input_data + t * framesize * dW ; <nl> real * op = output_data + t * framesize ; <nl> - real * xp = indices_data + t * framesize ; <nl> + THIndex_t * xp = indices_data + t * framesize ; <nl> # pragma omp parallel for private ( y ) <nl> for ( y = 0 ; y < framesize ; y + + ) <nl> { <nl> void THNN_ ( TemporalMaxPooling_updateOutput ) ( <nl> THTensor_ ( resize3d ) ( output , nbframe , noframe , framesize ) ; <nl> <nl> / * indices will contain index locations for each output point * / <nl> - THTensor_ ( resize3d ) ( indices , nbframe , noframe , framesize ) ; <nl> + THIndexTensor_ ( resize3d ) ( indices , nbframe , noframe , framesize ) ; <nl> <nl> / * get raw pointers * / <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> output_data = THTensor_ ( data ) ( output ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> for ( i = 0 ; i < nbframe ; i + + ) <nl> { <nl> real * inputSample_data = input_data + i * niframe * framesize ; <nl> real * outputSample_data = output_data + i * noframe * framesize ; <nl> - real * indicesSample_data = indices_data + i * noframe * framesize ; <nl> + THIndex_t * indicesSample_data = indices_data + i * noframe * framesize ; <nl> <nl> for ( t = 0 ; t < noframe ; t + + ) <nl> { <nl> real * ip = inputSample_data + t * framesize * dW ; <nl> real * op = outputSample_data + t * framesize ; <nl> - real * xp = indicesSample_data + t * framesize ; <nl> + THIndex_t * xp = indicesSample_data + t * framesize ; <nl> <nl> # pragma omp parallel for private ( y ) <nl> for ( y = 0 ; y < framesize ; y + + ) <nl> void THNN_ ( TemporalMaxPooling_updateGradInput ) ( <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kW , <nl> int dW ) <nl> { <nl> void THNN_ ( TemporalMaxPooling_updateGradInput ) ( <nl> <nl> real * gradInput_data ; <nl> real * gradOutput_data ; <nl> - real * indices_data ; <nl> + THIndex_t * indices_data ; <nl> <nl> long t , y ; <nl> <nl> void THNN_ ( TemporalMaxPooling_updateGradInput ) ( <nl> / * get raw pointers * / <nl> gradInput_data = THTensor_ ( data ) ( gradInput ) ; <nl> gradOutput_data = THTensor_ ( data ) ( gradOutput ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> if ( input - > nDimension = = 2 ) <nl> { <nl> void THNN_ ( TemporalMaxPooling_updateGradInput ) ( <nl> { <nl> real * gip = gradInput_data + t * framesize * dW ; <nl> real * gop = gradOutput_data + t * framesize ; <nl> - real * xp = indices_data + t * framesize ; <nl> + THIndex_t * xp = indices_data + t * framesize ; <nl> # pragma omp parallel for private ( y ) <nl> for ( y = 0 ; y < framesize ; y + + ) <nl> { <nl> void THNN_ ( TemporalMaxPooling_updateGradInput ) ( <nl> { <nl> real * gradInputSample_data = gradInput_data + i * niframe * framesize ; <nl> real * gradOutputSample_data = gradOutput_data + i * noframe * framesize ; <nl> - real * indicesSample_data = indices_data + i * noframe * framesize ; <nl> + THIndex_t * indicesSample_data = indices_data + i * noframe * framesize ; <nl> <nl> for ( t = 0 ; t < noframe ; t + + ) <nl> { <nl> real * gip = gradInputSample_data + t * framesize * dW ; <nl> real * gop = gradOutputSample_data + t * framesize ; <nl> - real * xp = indicesSample_data + t * framesize ; <nl> + THIndex_t * xp = indicesSample_data + t * framesize ; <nl> # pragma omp parallel for private ( y ) <nl> for ( y = 0 ; y < framesize ; y + + ) <nl> { <nl> mmm a / generic / VolumetricDilatedMaxPooling . c <nl> ppp b / generic / VolumetricDilatedMaxPooling . c <nl> <nl> static void THNN_ ( VolumetricDilatedMaxPooling_updateOutput_frame ) ( <nl> real * input_p , <nl> real * output_p , <nl> - real * indz_p , <nl> + THIndex_t * indz_p , <nl> long nslices , <nl> long itime , <nl> long iwidth , <nl> static void THNN_ ( VolumetricDilatedMaxPooling_updateOutput_frame ) ( <nl> long start_t = ti * dT - pT ; <nl> long start_h = i * dH - pH ; <nl> long start_w = j * dW - pW ; <nl> - <nl> + <nl> long kernel_t = fminf ( kT , kT + start_t ) ; <nl> long kernel_h = fminf ( kH , kH + start_h ) ; <nl> long kernel_w = fminf ( kW , kW + start_w ) ; <nl> static void THNN_ ( VolumetricDilatedMaxPooling_updateOutput_frame ) ( <nl> start_h + = dilationH ; <nl> while ( start_w < 0 ) <nl> start_w + = dilationW ; <nl> - <nl> + <nl> real * ip = input_p + k * itime * iwidth * iheight <nl> + start_t * iwidth * iheight + start_h * iwidth + start_w ; <nl> real * op = output_p + k * otime * owidth * oheight <nl> + ti * owidth * oheight + i * owidth + j ; <nl> - real * indzp = indz_p + k * otime * owidth * oheight <nl> + THIndex_t * indzp = indz_p + k * otime * owidth * oheight <nl> + ti * owidth * oheight + i * owidth + j ; <nl> <nl> / * compute local max : * / <nl> void THNN_ ( VolumetricDilatedMaxPooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kT , <nl> int kW , <nl> int kH , <nl> void THNN_ ( VolumetricDilatedMaxPooling_updateOutput ) ( <nl> long owidth ; <nl> real * input_data ; <nl> real * output_data ; <nl> - real * indices_data ; <nl> + THIndex_t * indices_data ; <nl> <nl> THNN_ARGCHECK ( input - > nDimension = = 4 | | input - > nDimension = = 5 , 2 , input , <nl> " 4D or 5D ( batch mode ) tensor expected for input , but got : % s " ) ; <nl> void THNN_ ( VolumetricDilatedMaxPooling_updateOutput ) ( <nl> / * resize output * / <nl> THTensor_ ( resize4d ) ( output , nslices , otime , oheight , owidth ) ; <nl> / * indices will contain ti , i , j uchar locations packed into float / double * / <nl> - THTensor_ ( resize4d ) ( indices , nslices , otime , oheight , owidth ) ; <nl> + THIndexTensor_ ( resize4d ) ( indices , nslices , otime , oheight , owidth ) ; <nl> <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> output_data = THTensor_ ( data ) ( output ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> THNN_ ( VolumetricDilatedMaxPooling_updateOutput_frame ) ( <nl> input_data , output_data , <nl> void THNN_ ( VolumetricDilatedMaxPooling_updateOutput ) ( <nl> / * resize output * / <nl> THTensor_ ( resize5d ) ( output , nBatch , nslices , otime , oheight , owidth ) ; <nl> / * indices will contain ti , i , j locations for each output point * / <nl> - THTensor_ ( resize5d ) ( indices , nBatch , nslices , otime , oheight , owidth ) ; <nl> + THIndexTensor_ ( resize5d ) ( indices , nBatch , nslices , otime , oheight , owidth ) ; <nl> <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> output_data = THTensor_ ( data ) ( output ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> # pragma omp parallel for private ( p ) <nl> for ( p = 0 ; p < nBatch ; p + + ) <nl> void THNN_ ( VolumetricDilatedMaxPooling_updateOutput ) ( <nl> static void THNN_ ( VolumetricDilatedMaxPooling_updateGradInput_frame ) ( <nl> real * gradInput_p , <nl> real * gradOutput_p , <nl> - real * indz_p , <nl> + THIndex_t * indz_p , <nl> long nslices , <nl> long itime , <nl> long iwidth , <nl> static void THNN_ ( VolumetricDilatedMaxPooling_updateGradInput_frame ) ( <nl> { <nl> real * gradInput_p_k = gradInput_p + k * itime * iwidth * iheight ; <nl> real * gradOutput_p_k = gradOutput_p + k * otime * owidth * oheight ; <nl> - real * indz_p_k = indz_p + k * otime * owidth * oheight ; <nl> + THIndex_t * indz_p_k = indz_p + k * otime * owidth * oheight ; <nl> <nl> / * calculate max points * / <nl> long ti , i , j ; <nl> static void THNN_ ( VolumetricDilatedMaxPooling_updateGradInput_frame ) ( <nl> for ( j = 0 ; j < owidth ; j + + ) <nl> { <nl> / * retrieve position of max * / <nl> - real * indzp = & indz_p_k [ ti * oheight * owidth + i * owidth + j ] ; <nl> + THIndex_t * indzp = & indz_p_k [ ti * oheight * owidth + i * owidth + j ] ; <nl> long maxti = ( ( unsigned char * ) ( indzp ) ) [ 0 ] * dilationT + ti * dT - pT ; <nl> long maxi = ( ( unsigned char * ) ( indzp ) ) [ 1 ] * dilationH + i * dH - pH ; <nl> long maxj = ( ( unsigned char * ) ( indzp ) ) [ 2 ] * dilationW + j * dW - pW ; <nl> void THNN_ ( VolumetricDilatedMaxPooling_updateGradInput ) ( <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int dT , <nl> int dW , <nl> int dH , <nl> void THNN_ ( VolumetricDilatedMaxPooling_updateGradInput ) ( <nl> int owidth ; <nl> real * gradInput_data ; <nl> real * gradOutput_data ; <nl> - real * indices_data ; <nl> + THIndex_t * indices_data ; <nl> <nl> int dimN = 0 ; <nl> int dimt = 1 ; <nl> void THNN_ ( VolumetricDilatedMaxPooling_updateGradInput ) ( <nl> / * get raw pointers * / <nl> gradInput_data = THTensor_ ( data ) ( gradInput ) ; <nl> gradOutput_data = THTensor_ ( data ) ( gradOutput ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> / * backprop * / <nl> if ( input - > nDimension = = 4 ) / * non - batch mode * / <nl> mmm a / generic / VolumetricMaxPooling . c <nl> ppp b / generic / VolumetricMaxPooling . c <nl> void THNN_ ( VolumetricMaxPooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int kT , <nl> int kW , <nl> int kH , <nl> void THNN_ ( VolumetricMaxPooling_updateGradInput ) ( <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int dT , <nl> int dW , <nl> int dH , <nl> mmm a / generic / VolumetricMaxUnpooling . c <nl> ppp b / generic / VolumetricMaxUnpooling . c <nl> <nl> static void THNN_ ( VolumetricMaxUnpooling_updateOutput_frame ) ( <nl> real * input_p , <nl> real * output_p , <nl> - real * ind_p , <nl> + THIndex_t * ind_p , <nl> long nslices , <nl> long iT , <nl> long iW , <nl> static void THNN_ ( VolumetricMaxUnpooling_updateOutput_frame ) ( <nl> <nl> / / real * output_p_k = output_p + k * oT * oW * oH + ti * oW * oH * dT + i * oW * dH + j * dW ; <nl> real * input_p_k = input_p + k * iT * iW * iH + ti * iW * iH + i * iW + j ; <nl> - real * ind_p_k = ind_p + k * iT * iW * iH + ti * iW * iH + i * iW + j ; <nl> + THIndex_t * ind_p_k = ind_p + k * iT * iW * iH + ti * iW * iH + i * iW + j ; <nl> <nl> maxz = ( ( unsigned char * ) ( ind_p_k ) ) [ 0 ] ; / * retrieve position of max * / <nl> maxy = ( ( unsigned char * ) ( ind_p_k ) ) [ 1 ] ; <nl> void THNN_ ( VolumetricMaxUnpooling_updateOutput ) ( <nl> THNNState * state , <nl> THTensor * input , <nl> THTensor * output , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int oT , <nl> int oW , <nl> int oH , <nl> void THNN_ ( VolumetricMaxUnpooling_updateOutput ) ( <nl> int iW ; <nl> real * input_data ; <nl> real * output_data ; <nl> - real * indices_data ; <nl> + THIndex_t * indices_data ; <nl> <nl> THNN_ARGCHECK ( input - > nDimension = = 4 | | input - > nDimension = = 5 , 2 , input , <nl> " 4D or 5D ( batch mode ) tensor expected for input , but got : % s " ) ; <nl> <nl> - if ( ! THTensor_ ( isSameSizeAs ) ( input , indices ) ) <nl> - { <nl> - THError ( " Invalid input size w . r . t current indices size " ) ; <nl> - } <nl> + THNN_CHECK_SHAPE_INDICES ( input , indices ) ; <nl> <nl> if ( input - > nDimension = = 5 ) <nl> { <nl> void THNN_ ( VolumetricMaxUnpooling_updateOutput ) ( <nl> <nl> / * get contiguous input * / <nl> input = THTensor_ ( newContiguous ) ( input ) ; <nl> - indices = THTensor_ ( newContiguous ) ( indices ) ; <nl> + indices = THIndexTensor_ ( newContiguous ) ( indices ) ; <nl> <nl> / * resize output * / <nl> if ( input - > nDimension = = 4 ) <nl> void THNN_ ( VolumetricMaxUnpooling_updateOutput ) ( <nl> <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> output_data = THTensor_ ( data ) ( output ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> THNN_ ( VolumetricMaxUnpooling_updateOutput_frame ) ( <nl> input_data , output_data , <nl> void THNN_ ( VolumetricMaxUnpooling_updateOutput ) ( <nl> <nl> input_data = THTensor_ ( data ) ( input ) ; <nl> output_data = THTensor_ ( data ) ( output ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> # pragma omp parallel for private ( p ) <nl> for ( p = 0 ; p < nbatch ; p + + ) <nl> void THNN_ ( VolumetricMaxUnpooling_updateOutput ) ( <nl> <nl> / * cleanup * / <nl> THTensor_ ( free ) ( input ) ; <nl> - THTensor_ ( free ) ( indices ) ; <nl> + THIndexTensor_ ( free ) ( indices ) ; <nl> } <nl> <nl> static void THNN_ ( VolumetricMaxUnpooling_updateGradInput_frame ) ( <nl> real * gradInput_p , <nl> real * gradOutput_p , <nl> - real * ind_p , <nl> + THIndex_t * ind_p , <nl> long nslices , <nl> long iT , <nl> long iW , <nl> static void THNN_ ( VolumetricMaxUnpooling_updateGradInput_frame ) ( <nl> <nl> real * gradInput_p_k = gradInput_p + k * iT * iW * iH + ti * iW * iH + i * iW + j ; <nl> / / real * gradOutput_p_k = gradOutput_p + k * oT * oW * oH + ti * oW * oH * dT + i * oW * dH + j * dW ; <nl> - real * ind_p_k = ind_p + k * iT * iW * iH + ti * iW * iH + i * iW + j ; <nl> + THIndex_t * ind_p_k = ind_p + k * iT * iW * iH + ti * iW * iH + i * iW + j ; <nl> <nl> maxz = ( ( unsigned char * ) ( ind_p_k ) ) [ 0 ] ; / * retrieve position of max * / <nl> maxy = ( ( unsigned char * ) ( ind_p_k ) ) [ 1 ] ; <nl> void THNN_ ( VolumetricMaxUnpooling_updateGradInput ) ( <nl> THTensor * input , <nl> THTensor * gradOutput , <nl> THTensor * gradInput , <nl> - THTensor * indices , <nl> + THIndexTensor * indices , <nl> int oT , <nl> int oW , <nl> int oH , <nl> void THNN_ ( VolumetricMaxUnpooling_updateGradInput ) ( <nl> int iW ; <nl> real * gradInput_data ; <nl> real * gradOutput_data ; <nl> - real * indices_data ; <nl> + THIndex_t * indices_data ; <nl> <nl> - if ( ! THTensor_ ( isSameSizeAs ) ( input , indices ) ) <nl> - { <nl> - THError ( " Invalid input size w . r . t current indices size " ) ; <nl> - } <nl> + THNN_CHECK_SHAPE_INDICES ( input , indices ) ; <nl> <nl> / / TODO : check gradOutput shape <nl> / * get contiguous gradOutput * / <nl> gradOutput = THTensor_ ( newContiguous ) ( gradOutput ) ; <nl> - indices = THTensor_ ( newContiguous ) ( indices ) ; <nl> + indices = THIndexTensor_ ( newContiguous ) ( indices ) ; <nl> <nl> / * resize * / <nl> THTensor_ ( resizeAs ) ( gradInput , input ) ; <nl> void THNN_ ( VolumetricMaxUnpooling_updateGradInput ) ( <nl> / * get raw pointers * / <nl> gradInput_data = THTensor_ ( data ) ( gradInput ) ; <nl> gradOutput_data = THTensor_ ( data ) ( gradOutput ) ; <nl> - indices_data = THTensor_ ( data ) ( indices ) ; <nl> + indices_data = THIndexTensor_ ( data ) ( indices ) ; <nl> <nl> / * backprop * / <nl> if ( input - > nDimension = = 4 ) <nl> void THNN_ ( VolumetricMaxUnpooling_updateGradInput ) ( <nl> <nl> / * cleanup * / <nl> THTensor_ ( free ) ( gradOutput ) ; <nl> - THTensor_ ( free ) ( indices ) ; <nl> + THIndexTensor_ ( free ) ( indices ) ; <nl> } <nl> <nl> # endif <nl> mmm a / init . c <nl> ppp b / init . c <nl> <nl> # I1 " % s , " # I2 " % s " , s1 . str , s2 . str ) ; \ <nl> } <nl> <nl> + # define THNN_CHECK_SHAPE_INDICES ( I1 , I2 ) \ <nl> + THLongStorage * size2 = THLongTensor_newSizeOf ( I2 ) ; \ <nl> + if ( I1 ! = NULL & & I2 ! = NULL & & ! THTensor_ ( isSize ) ( I1 , size2 ) ) \ <nl> + { \ <nl> + THDescBuff s1 = THTensor_ ( sizeDesc ) ( I1 ) ; \ <nl> + THDescBuff s2 = THLongTensor_sizeDesc ( I2 ) ; \ <nl> + THLongStorage_free ( size2 ) ; \ <nl> + THError ( # I1 " and " # I2 " shapes do not match : " \ <nl> + # I1 " % s , " # I2 " % s " , s1 . str , s2 . str ) ; \ <nl> + } else { \ <nl> + THLongStorage_free ( size2 ) ; \ <nl> + } <nl> + <nl> # define THNN_CHECK_NELEMENT ( I1 , I2 ) \ <nl> if ( I1 ! = NULL & & I2 ! = NULL ) { \ <nl> ptrdiff_t n1 = THTensor_ ( nElement ) ( I1 ) ; \ <nl> <nl> " but got " # T " to be of shape : % s " , DIM , DIM_SIZE , SIZE , s1 . str ) ; \ <nl> } <nl> <nl> + # define THNN_CHECK_DIM_SIZE_INDICES ( T , DIM , DIM_SIZE , SIZE ) \ <nl> + if ( THIndexTensor_ ( nDimension ) ( T ) ! = DIM | | \ <nl> + THIndexTensor_ ( size ) ( T , DIM_SIZE ) ! = SIZE ) { \ <nl> + THDescBuff s1 = THIndexTensor_ ( sizeDesc ) ( T ) ; \ <nl> + THError ( " Need " # T " of dimension % d and " # T " . size [ % d ] = = % d " \ <nl> + " but got " # T " to be of shape : % s " , DIM , DIM_SIZE , SIZE , s1 . str ) ; \ <nl> + } <nl> + <nl> # define THNN_ARGCHECK ( COND , ARG , T , FORMAT ) \ <nl> if ( ! ( COND ) ) { \ <nl> THDescBuff s1 = THTensor_ ( sizeDesc ) ( T ) ; \ <nl>
|
Merge pull request from gchanan / spatialNNGeneric
|
pytorch/pytorch
|
9f1b12bf06805707a897d32527261772a2091727
|
2016-11-08T23:17:58Z
|
mmm a / doc / files . md <nl> ppp b / doc / files . md <nl> <nl> - Filename | Description <nl> mmmmmmmmmmmmmmmmmmmmm | mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> - banlist . dat | stores the IPs / Subnets of banned nodes <nl> - bitcoin . conf | contains configuration settings for bitcoind or bitcoin - qt <nl> - bitcoind . pid | stores the process id of bitcoind while running <nl> - blocks / blk000 ? ? . dat | block data ( custom , 128 MiB per file ) ; since 0 . 8 . 0 <nl> - blocks / rev000 ? ? . dat | block undo data ( custom ) ; since 0 . 8 . 0 ( format changed since pre - 0 . 8 ) <nl> - blocks / index / * | block index ( LevelDB ) ; since 0 . 8 . 0 <nl> - chainstate / * | blockchain state database ( LevelDB ) ; since 0 . 8 . 0 <nl> - database / * | BDB database environment ; only used for wallet since 0 . 8 . 0 ; moved to wallets / directory on new installs since 0 . 16 . 0 <nl> - db . log | wallet database log file ; moved to wallets / directory on new installs since 0 . 16 . 0 <nl> - debug . log | contains debug information and general logging generated by bitcoind or bitcoin - qt <nl> - fee_estimates . dat | stores statistics used to estimate minimum transaction fees and priorities required for confirmation ; since 0 . 10 . 0 <nl> - indexes / txindex / * | optional transaction index database ( LevelDB ) ; since 0 . 17 . 0 <nl> - mempool . dat | dump of the mempool ' s transactions ; since 0 . 14 . 0 <nl> - peers . dat | peer IP address database ( custom format ) ; since 0 . 7 . 0 <nl> - wallet . dat | personal wallet ( BDB ) with keys and transactions ; moved to wallets / directory on new installs since 0 . 16 . 0 <nl> - wallets / database / * | BDB database environment ; used for wallets since 0 . 16 . 0 <nl> - wallets / db . log | wallet database log file ; since 0 . 16 . 0 <nl> - wallets / wallet . dat | personal wallet ( BDB ) with keys and transactions ; since 0 . 16 . 0 <nl> - . cookie | session RPC authentication cookie ( written at start when cookie authentication is used , deleted on shutdown ) : since 0 . 12 . 0 <nl> - onion_private_key | cached Tor hidden service private key for ` - listenonion ` : since 0 . 12 . 0 <nl> - guisettings . ini . bak | backup of former GUI settings after ` - resetguisettings ` is used <nl> - <nl> - Only used in pre - 0 . 8 . 0 <nl> mmmmmmmmmmmmmmmmmmmmm - <nl> - * blktree / * ; block chain index ( LevelDB ) ; since pre - 0 . 8 , replaced by blocks / index / * in 0 . 8 . 0 <nl> - * coins / * ; unspent transaction output database ( LevelDB ) ; since pre - 0 . 8 , replaced by chainstate / * in 0 . 8 . 0 <nl> - <nl> - Only used before 0 . 8 . 0 <nl> mmmmmmmmmmmmmmmmmmmmm - <nl> - * blkindex . dat : block chain index database ( BDB ) ; replaced by { chainstate / * , blocks / index / * , blocks / rev000 ? ? . dat } in 0 . 8 . 0 <nl> - * blk000 ? . dat : block data ( custom , 2 GiB per file ) ; replaced by blocks / blk000 ? ? . dat in 0 . 8 . 0 <nl> - <nl> - Only used before 0 . 7 . 0 <nl> mmmmmmmmmmmmmmmmmmmmm - <nl> - * addr . dat : peer IP address database ( BDB ) ; replaced by peers . dat in 0 . 7 . 0 <nl> + # Bitcoin Core file system <nl> + <nl> + * * Contents * * <nl> + <nl> + - [ Data directory location ] ( # data - directory - location ) <nl> + <nl> + - [ Data directory layout ] ( # data - directory - layout ) <nl> + <nl> + - [ Multi - wallet environment ] ( # multi - wallet - environment ) <nl> + <nl> + - [ GUI settings ] ( # gui - settings ) <nl> + <nl> + - [ Legacy subdirectories and files ] ( # legacy - subdirectories - and - files ) <nl> + <nl> + - [ Notes ] ( # notes ) <nl> + <nl> + # # Data directory location <nl> + <nl> + The data directory is the default location where the Bitcoin Core files are stored . <nl> + <nl> + 1 . The default data directory paths for supported platforms are : <nl> + <nl> + Platform | Data directory path <nl> + mmmmmmmmm | mmmmmmmmmmmmmmmmmm - - <nl> + Linux | ` $ HOME / . bitcoin / ` <nl> + macOS | ` $ HOME / Library / Application Support / Bitcoin / ` <nl> + Windows | ` % APPDATA % \ Bitcoin \ ` < sup > [ \ [ 1 \ ] ] ( # note1 ) < / sup > <nl> + <nl> + 2 . The non - default data directory path can be specified by ` - datadir ` option . <nl> + <nl> + 3 . All content of the data directory , except for ` bitcoin . conf ` file , is chain - specific . This means the actual data directory paths for non - mainnet cases differ : <nl> + <nl> + Chain option | Data directory path <nl> + mmmmmmmmmmmmmmmmmm - - | mmmmmmmmmmmmmmmmmm - - <nl> + no option ( mainnet ) | * path_to_datadir * ` / ` <nl> + ` - testnet ` | * path_to_datadir * ` / testnet3 / ` <nl> + ` - regtest ` | * path_to_datadir * ` / regtest / ` <nl> + <nl> + # # Data directory layout <nl> + <nl> + Subdirectory | File ( s ) | Description <nl> + mmmmmmmmmmmmmmmmmm - | mmmmmmmmmmmmmmmmmmmmm - - | mmmmmmmmmmmm <nl> + ` blocks / ` | | Blocks directory ; can be specified by ` - blocksdir ` option ( except for ` blocks / index / ` ) <nl> + ` blocks / index / ` | LevelDB database | Block index ; ` - blocksdir ` option does not affect this path <nl> + ` blocks / ` | ` blkNNNNN . dat ` < sup > [ \ [ 2 \ ] ] ( # note2 ) < / sup > | Actual Bitcoin blocks ( in network format , dumped in raw on disk , 128 MiB per file ) <nl> + ` blocks / ` | ` revNNNNN . dat ` < sup > [ \ [ 2 \ ] ] ( # note2 ) < / sup > | Block undo data ( custom format ) <nl> + ` chainstate / ` | LevelDB database | Blockchain state ( a compact representation of all currently unspent transaction outputs and some metadata about the transactions they are from ) <nl> + ` indexes / txindex / ` | LevelDB database | Transaction index ; * optional * , used if ` - txindex = 1 ` <nl> + ` wallets / ` | | [ Contains wallets ] ( # multi - wallet - environment ) ; can be specified by ` - walletdir ` option ; if ` wallets / ` subdirectory does not exist , a wallet resides in the data directory <nl> + ` . / ` | ` banlist . dat ` | Stores the IPs / subnets of banned nodes <nl> + ` . / ` | ` bitcoin . conf ` | Contains [ configuration settings ] ( bitcoin - conf . md ) for ` bitcoind ` or ` bitcoin - qt ` ; can be specified by ` - conf ` option <nl> + ` . / ` | ` bitcoind . pid ` | Stores the process ID ( PID ) of ` bitcoind ` or ` bitcoin - qt ` while running ; created at start and deleted on shutdown ; can be specified by ` - pid ` option <nl> + ` . / ` | ` debug . log ` | Contains debug information and general logging generated by ` bitcoind ` or ` bitcoin - qt ` ; can be specified by ` - debuglogfile ` option <nl> + ` . / ` | ` fee_estimates . dat ` | Stores statistics used to estimate minimum transaction fees and priorities required for confirmation <nl> + ` . / ` | ` guisettings . ini . bak ` | Backup of former [ GUI settings ] ( # gui - settings ) after ` - resetguisettings ` option is used <nl> + ` . / ` | ` mempool . dat ` | Dump of the mempool ' s transactions <nl> + ` . / ` | ` onion_private_key ` | Cached Tor hidden service private key for ` - listenonion ` option <nl> + ` . / ` | ` peers . dat ` | Peer IP address database ( custom format ) <nl> + ` . / ` | ` . cookie ` | Session RPC authentication cookie ; if used , created at start and deleted on shutdown ; can be specified by ` - rpccookiefile ` option <nl> + ` . / ` | ` . lock ` | Data directory lock file <nl> + <nl> + # # Multi - wallet environment <nl> + <nl> + Wallets are Berkeley DB ( BDB ) databases : <nl> + <nl> + Subdirectory | File ( s ) | Description <nl> + mmmmmmmmmmmm - | mmmmmmmmmmmmmmmmmm - | mmmmmmmmmmmm <nl> + ` database / ` | BDB logging files | Part of BDB environment ; created at start and deleted on shutdown ; a user * must keep it as safe * as personal wallet ` wallet . dat ` <nl> + ` . / ` | ` db . log ` | BDB error file <nl> + ` . / ` | ` wallet . dat ` | Personal wallet ( BDB ) with keys and transactions <nl> + ` . / ` | ` . walletlock ` | Wallet lock file <nl> + <nl> + 1 . Each user - defined wallet named " wallet_name " resides in ` wallets / wallet_name / ` subdirectory . <nl> + <nl> + 2 . The default ( unnamed ) wallet resides in ` wallets / ` subdirectory ; if the latter does not exist , the wallet resides in the data directory . <nl> + <nl> + 3 . A wallet database path can be specified by ` - wallet ` option . <nl> + <nl> + # # GUI settings <nl> + <nl> + ` bitcoin - qt ` uses [ ` QSettings ` ] ( https : / / doc . qt . io / qt - 5 / qsettings . html ) class ; this implies platform - specific [ locations where application settings are stored ] ( https : / / doc . qt . io / qt - 5 / qsettings . html # locations - where - application - settings - are - stored ) . <nl> + <nl> + # # Legacy subdirectories and files <nl> + <nl> + These subdirectories and files are no longer used by the Bitcoin Core : <nl> + <nl> + Path | Description | Repository notes <nl> + mmmmmmmmmmmmmmm | mmmmmmmmmmmm - | mmmmmmmmmmmmmmm - - <nl> + ` blktree / ` | Blockchain index ; replaced by ` blocks / index / ` in [ 0 . 8 . 0 ] ( https : / / github . com / bitcoin / bitcoin / blob / master / doc / release - notes / release - notes - 0 . 8 . 0 . md # improvements ) | [ PR # 2231 ] ( https : / / github . com / bitcoin / bitcoin / pull / 2231 ) , [ ` 8fdc94cc ` ] ( https : / / github . com / bitcoin / bitcoin / commit / 8fdc94cc8f0341e96b1edb3a5b56811c0b20bd15 ) <nl> + ` coins / ` | Unspent transaction output database ; replaced by ` chainstate / ` in 0 . 8 . 0 | [ PR # 2231 ] ( https : / / github . com / bitcoin / bitcoin / pull / 2231 ) , [ ` 8fdc94cc ` ] ( https : / / github . com / bitcoin / bitcoin / commit / 8fdc94cc8f0341e96b1edb3a5b56811c0b20bd15 ) <nl> + ` blkindex . dat ` | Blockchain index BDB database ; replaced by { ` chainstate / ` , ` blocks / index / ` , ` blocks / revNNNNN . dat ` < sup > [ \ [ 2 \ ] ] ( # note2 ) < / sup > } in 0 . 8 . 0 | [ PR # 1677 ] ( https : / / github . com / bitcoin / bitcoin / pull / 1677 ) <nl> + ` blk000 ? . dat ` | Block data ( custom format , 2 GiB per file ) ; replaced by ` blocks / blkNNNNN . dat ` < sup > [ \ [ 2 \ ] ] ( # note2 ) < / sup > in 0 . 8 . 0 | [ PR # 1677 ] ( https : / / github . com / bitcoin / bitcoin / pull / 1677 ) <nl> + ` addr . dat ` | Peer IP address BDB database ; replaced by ` peers . dat ` in [ 0 . 7 . 0 ] ( https : / / github . com / bitcoin / bitcoin / blob / master / doc / release - notes / release - notes - 0 . 7 . 0 . md ) | [ PR # 1198 ] ( https : / / github . com / bitcoin / bitcoin / pull / 1198 ) , [ ` 928d3a01 ` ] ( https : / / github . com / bitcoin / bitcoin / commit / 928d3a011cc66c7f907c4d053f674ea77dc611cc ) <nl> + <nl> + # # Notes <nl> + <nl> + < a name = " note1 " > 1 < / a > . The ` / ` ( slash , U + 002F ) is used as the platform - independent path component separator in this paper . <nl> + <nl> + < a name = " note2 " > 2 < / a > . ` NNNNN ` matches ` [ 0 - 9 ] { 5 } ` regex . <nl> + <nl>
|
Merge : doc : Add detailed info about Bitcoin Core files
|
bitcoin/bitcoin
|
08ed87e8875d72a1d8b157b67bbd431253d7db24
|
2019-10-10T19:58:28Z
|
mmm a / src / qtlibtorrent / qtorrenthandle . cpp <nl> ppp b / src / qtlibtorrent / qtorrenthandle . cpp <nl> QStringList QTorrentHandle : : absolute_files_path_uneeded ( ) const { <nl> QDir saveDir ( save_path ( ) ) ; <nl> QStringList res ; <nl> std : : vector < int > fp = torrent_handle : : file_priorities ( ) ; <nl> - qDebug ( ) < < fp . size ( ) < < num_files ( ) ; <nl> - Q_ASSERT ( fp . size ( ) = = num_files ( ) ) ; <nl> - for ( int i = 0 ; i < num_files ( ) ; + + i ) { <nl> + for ( uint i = 0 ; i < fp . size ( ) ; + + i ) { <nl> if ( fp [ i ] = = 0 ) { <nl> const QString file_path = QDir : : cleanPath ( saveDir . absoluteFilePath ( filepath_at ( i ) ) ) ; <nl> if ( file_path . contains ( " . unwanted " ) ) <nl>
|
Code clean up
|
qbittorrent/qBittorrent
|
7381b0dcf6497751ef7a80eb5ebd1fc4b1e0957d
|
2011-04-23T17:20:44Z
|
mmm a / hphp / hack / test / typecheck / dune <nl> ppp b / hphp / hack / test / typecheck / dune <nl> let ( ) = <nl> let typecheck_deps = deps files_tc in <nl> let cmds = [ " - - flags " <nl> ; " - - new - inference - lambda " <nl> + ; " - - coercion - from - dynamic " <nl> ; " - - shallow - class - decl " <nl> ; " - - error - format raw " <nl> ] in <nl> let ( ) = <nl> ; " - - flags " <nl> ; " - - out - extension . legacy_decl . out " <nl> ; " - - new - inference - lambda " <nl> + ; " - - coercion - from - dynamic " <nl> ; " - - error - format raw " ] in <nl> let cmd = String . concat " " cmds in <nl> let typecheck_legacy_decl = <nl> mmm a / hphp / hack / test / typecheck / dynamic / dynamic_error_message1 . php . exp <nl> ppp b / hphp / hack / test / typecheck / dynamic / dynamic_error_message1 . php . exp <nl> @ @ - 1 , 6 + 1 @ @ <nl> - File " dynamic_error_message1 . php " , line 5 , characters 10 - 11 : <nl> - Invalid return type ( Typing [ 4110 ] ) <nl> - File " dynamic_error_message1 . php " , line 3 , characters 34 - 36 : <nl> - Expected int <nl> - File " dynamic_error_message1 . php " , line 4 , characters 12 - 12 : <nl> - But got dynamic , the result of accessing a property of a dynamic type <nl> + No errors <nl> deleted file mode 100644 <nl> index 4269126fceb . . 00000000000 <nl> mmm a / hphp / hack / test / typecheck / dynamic / dynamic_error_message1 . php . like_types . exp <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - No errors <nl> mmm a / hphp / hack / test / typecheck / dynamic / dynamic_error_message2 . php . exp <nl> ppp b / hphp / hack / test / typecheck / dynamic / dynamic_error_message2 . php . exp <nl> @ @ - 1 , 6 + 1 @ @ <nl> - File " dynamic_error_message2 . php " , line 5 , characters 10 - 11 : <nl> - Invalid return type ( Typing [ 4110 ] ) <nl> - File " dynamic_error_message2 . php " , line 3 , characters 34 - 36 : <nl> - Expected int <nl> - File " dynamic_error_message2 . php " , line 4 , characters 8 - 11 : <nl> - But got dynamic , the result of calling a dynamic type as a function <nl> + No errors <nl> deleted file mode 100644 <nl> index 4269126fceb . . 00000000000 <nl> mmm a / hphp / hack / test / typecheck / dynamic / dynamic_error_message2 . php . like_types . exp <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - No errors <nl> mmm a / hphp / hack / test / typecheck / dynamic / dynamic_subtype . php . exp <nl> ppp b / hphp / hack / test / typecheck / dynamic / dynamic_subtype . php . exp <nl> @ @ - 1 , 6 + 1 @ @ <nl> - File " dynamic_subtype . php " , line 4 , characters 10 - 11 : <nl> - Invalid return type ( Typing [ 4110 ] ) <nl> - File " dynamic_subtype . php " , line 3 , characters 44 - 46 : <nl> - Expected int <nl> - File " dynamic_subtype . php " , line 3 , characters 21 - 27 : <nl> - But got dynamic <nl> + No errors <nl> deleted file mode 100644 <nl> index 4269126fceb . . 00000000000 <nl> mmm a / hphp / hack / test / typecheck / dynamic / dynamic_subtype . php . like_types . exp <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - No errors <nl> mmm a / hphp / hack / test / typecheck / dynamic_calls / builtins2 . php . exp <nl> ppp b / hphp / hack / test / typecheck / dynamic_calls / builtins2 . php . exp <nl> @ @ - 1 , 6 + 1 @ @ <nl> - File " builtins2 . php " , line 7 , characters 38 - 49 : <nl> - Invalid argument ( Typing [ 4110 ] ) <nl> - File " dyn_func_pointers . hhi " , line 12 , characters 22 - 27 : <nl> - Expected string <nl> - File " dyn_func_pointers . hhi " , line 12 , characters 42 - 48 : <nl> - But got dynamic <nl> + No errors <nl> deleted file mode 100644 <nl> index 4269126fceb . . 00000000000 <nl> mmm a / hphp / hack / test / typecheck / dynamic_calls / builtins2 . php . like_types . exp <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - No errors <nl> mmm a / hphp / hack / test / typecheck / error_messages / bitwise_dynamic . php . exp <nl> ppp b / hphp / hack / test / typecheck / error_messages / bitwise_dynamic . php . exp <nl> @ @ - 1 , 6 + 1 @ @ <nl> - File " bitwise_dynamic . php " , line 4 , characters 10 - 12 : <nl> - Invalid return type ( Typing [ 4110 ] ) <nl> - File " bitwise_dynamic . php " , line 3 , characters 27 - 32 : <nl> - Expected string <nl> - File " bitwise_dynamic . php " , line 4 , characters 10 - 12 : <nl> - But got dynamic because this is the result of a bitwise operation with all arguments typed dynamic <nl> + No errors <nl> deleted file mode 100644 <nl> index 4269126fceb . . 00000000000 <nl> mmm a / hphp / hack / test / typecheck / error_messages / bitwise_dynamic . php . like_types . exp <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - No errors <nl> mmm a / hphp / hack / test / typecheck / error_messages / inc_dynamic . php . exp <nl> ppp b / hphp / hack / test / typecheck / error_messages / inc_dynamic . php . exp <nl> @ @ - 1 , 6 + 1 @ @ <nl> - File " inc_dynamic . php " , line 4 , characters 10 - 13 : <nl> - Invalid return type ( Typing [ 4110 ] ) <nl> - File " inc_dynamic . php " , line 3 , characters 27 - 32 : <nl> - Expected string <nl> - File " inc_dynamic . php " , line 4 , characters 10 - 13 : <nl> - But got dynamic because this is the result of an increment / decrement of an argument typed dynamic <nl> + No errors <nl> deleted file mode 100644 <nl> index 4269126fceb . . 00000000000 <nl> mmm a / hphp / hack / test / typecheck / error_messages / inc_dynamic . php . like_types . exp <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - No errors <nl> mmm a / hphp / hack / test / typecheck / error_messages / sum_dynamic . php . exp <nl> ppp b / hphp / hack / test / typecheck / error_messages / sum_dynamic . php . exp <nl> @ @ - 1 , 6 + 1 @ @ <nl> - File " sum_dynamic . php " , line 4 , characters 10 - 16 : <nl> - Invalid return type ( Typing [ 4110 ] ) <nl> - File " sum_dynamic . php " , line 3 , characters 27 - 32 : <nl> - Expected string <nl> - File " sum_dynamic . php " , line 4 , characters 10 - 16 : <nl> - But got dynamic because this is the sum of two arguments typed dynamic <nl> + No errors <nl> deleted file mode 100644 <nl> index 4269126fceb . . 00000000000 <nl> mmm a / hphp / hack / test / typecheck / error_messages / sum_dynamic . php . like_types . exp <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - No errors <nl> mmm a / hphp / hack / test / typecheck / like_types / return . php . exp <nl> ppp b / hphp / hack / test / typecheck / like_types / return . php . exp <nl> File " return . php " , line 11 , characters 27 - 29 : <nl> Expected int <nl> File " return . php " , line 12 , characters 10 - 17 : <nl> But got ( dynamic & string ) from this " as " assertion <nl> - File " return . php " , line 16 , characters 10 - 19 : <nl> - Invalid return type ( Typing [ 4110 ] ) <nl> - File " return . php " , line 15 , characters 27 - 29 : <nl> - Expected int <nl> - File " return . php " , line 7 , characters 22 - 25 : <nl> - But got dynamic <nl>
|
Flip on coercion_from_dynamic for typecheck targets
|
facebook/hhvm
|
2518dfb3c6ea1146f6a1e00d8018ce0463cc8df7
|
2019-08-13T22:32:21Z
|
mmm a / arangod / RestHandler / RestEdgesHandler . cpp <nl> ppp b / arangod / RestHandler / RestEdgesHandler . cpp <nl> HttpHandler : : status_t RestEdgesHandler : : execute ( ) { <nl> bool RestEdgesHandler : : readEdges ( std : : vector < traverser : : TraverserExpression * > const & expressions ) { <nl> std : : vector < std : : string > const & suffix = _request - > suffix ( ) ; <nl> <nl> - if ( ! ( suffix . size ( ) = = 1 ) ) { <nl> + if ( suffix . size ( ) ! = 1 ) { <nl> generateError ( HttpResponse : : BAD , <nl> TRI_ERROR_HTTP_BAD_PARAMETER , <nl> " expected GET " + EDGES_PATH + <nl> bool RestEdgesHandler : : readEdges ( std : : vector < traverser : : TraverserExpression * > c <nl> <nl> triagens : : basics : : Json result ( triagens : : basics : : Json : : Object ) ; <nl> result ( " edges " , documents ) ; <nl> + result ( " error " , triagens : : basics : : Json ( false ) ) ; <nl> + result ( " code " , triagens : : basics : : Json ( 200 ) ) ; <nl> <nl> / / and generate a response <nl> generateResult ( result . json ( ) ) ; <nl> bool RestEdgesHandler : : readEdges ( std : : vector < traverser : : TraverserExpression * > c <nl> <nl> bool RestEdgesHandler : : readFilteredEdges ( ) { <nl> std : : vector < traverser : : TraverserExpression * > expressions ; <nl> - TRI_json_t * json = parseJsonBody ( ) ; <nl> + std : : unique_ptr < TRI_json_t > json ( parseJsonBody ( ) ) ; <nl> triagens : : basics : : ScopeGuard guard { <nl> [ ] ( ) - > void { } , <nl> - [ & json , & expressions ] ( ) - > void { <nl> + [ & expressions ] ( ) - > void { <nl> for ( auto & e : expressions ) { <nl> delete e ; <nl> } <nl> - TRI_FreeJson ( TRI_UNKNOWN_MEM_ZONE , json ) ; <nl> } <nl> } ; <nl> if ( json = = nullptr ) { <nl> + delete _response ; <nl> + _response = nullptr ; <nl> return readEdges ( expressions ) ; <nl> } <nl> <nl> - if ( ! TRI_IsArrayJson ( json ) ) { <nl> + if ( ! TRI_IsArrayJson ( json . get ( ) ) ) { <nl> generateError ( HttpResponse : : BAD , <nl> TRI_ERROR_HTTP_BAD_PARAMETER , <nl> " Expected a list of traverser expressions as body parameter " ) ; <nl> return false ; <nl> } <nl> <nl> - size_t length = TRI_LengthArrayJson ( json ) ; <nl> + size_t length = TRI_LengthArrayJson ( json . get ( ) ) ; <nl> expressions . reserve ( length ) ; <nl> <nl> for ( size_t i = 0 ; i < length ; + + i ) { <nl> - TRI_json_t * exp = TRI_LookupArrayJson ( json , i ) ; <nl> + TRI_json_t * exp = TRI_LookupArrayJson ( json . get ( ) , i ) ; <nl> if ( TRI_IsObjectJson ( exp ) ) { <nl> std : : unique_ptr < traverser : : TraverserExpression > expression ( new traverser : : TraverserExpression ( exp ) ) ; <nl> expressions . emplace_back ( expression . get ( ) ) ; <nl>
|
Fixed potential memleaks and bad access in edges api
|
arangodb/arangodb
|
07f772c7587636cdb12fbb0f888959e3ca6a94ad
|
2015-11-27T10:20:54Z
|
mmm a / dbms / src / Databases / DatabaseDictionary . cpp <nl> ppp b / dbms / src / Databases / DatabaseDictionary . cpp <nl> ASTPtr DatabaseDictionary : : getCreateTableQueryImpl ( const Context & context , <nl> return ast ; <nl> } <nl> <nl> - ASTPtr DatabaseDictionary : : getCreateDatabaseQuery ( const Context & / * context * / ) const <nl> + ASTPtr DatabaseDictionary : : getCreateDatabaseQuery ( ) const <nl> { <nl> String query ; <nl> { <nl> mmm a / dbms / src / Databases / DatabaseDictionary . h <nl> ppp b / dbms / src / Databases / DatabaseDictionary . h <nl> class DatabaseDictionary : public IDatabase <nl> <nl> bool empty ( const Context & context ) const override ; <nl> <nl> - ASTPtr getCreateDatabaseQuery ( const Context & context ) const override ; <nl> + ASTPtr getCreateDatabaseQuery ( ) const override ; <nl> <nl> void shutdown ( ) override ; <nl> <nl> mmm a / dbms / src / Databases / DatabaseMemory . cpp <nl> ppp b / dbms / src / Databases / DatabaseMemory . cpp <nl> <nl> # include < common / logger_useful . h > <nl> # include < Databases / DatabaseMemory . h > <nl> # include < Databases / DatabasesCommon . h > <nl> + # include < Parsers / ASTCreateQuery . h > <nl> <nl> <nl> namespace DB <nl> { <nl> <nl> - namespace ErrorCodes <nl> - { <nl> - extern const int CANNOT_GET_CREATE_TABLE_QUERY ; <nl> - extern const int CANNOT_GET_CREATE_DICTIONARY_QUERY ; <nl> - extern const int UNSUPPORTED_METHOD ; <nl> - } <nl> - <nl> DatabaseMemory : : DatabaseMemory ( const String & name_ ) <nl> : DatabaseWithOwnTablesBase ( name_ , " DatabaseMemory ( " + name_ + " ) " ) <nl> { } <nl> void DatabaseMemory : : removeTable ( <nl> detachTable ( table_name ) ; <nl> } <nl> <nl> - ASTPtr DatabaseMemory : : getCreateDatabaseQuery ( <nl> - const Context & ) const <nl> + ASTPtr DatabaseMemory : : getCreateDatabaseQuery ( ) const <nl> { <nl> - / / FIXME <nl> - throw Exception ( " There is no CREATE DATABASE query for DatabaseMemory " , ErrorCodes : : CANNOT_GET_CREATE_TABLE_QUERY ) ; <nl> + auto create_query = std : : make_shared < ASTCreateQuery > ( ) ; <nl> + create_query - > database = database_name ; <nl> + create_query - > set ( create_query - > storage , std : : make_shared < ASTStorage > ( ) ) ; <nl> + create_query - > storage - > set ( create_query - > storage - > engine , makeASTFunction ( getEngineName ( ) ) ) ; <nl> + return create_query ; <nl> } <nl> <nl> } <nl> mmm a / dbms / src / Databases / DatabaseMemory . h <nl> ppp b / dbms / src / Databases / DatabaseMemory . h <nl> class DatabaseMemory : public DatabaseWithOwnTablesBase <nl> const Context & context , <nl> const String & table_name ) override ; <nl> <nl> - ASTPtr getCreateDatabaseQuery ( const Context & context ) const override ; <nl> + ASTPtr getCreateDatabaseQuery ( ) const override ; <nl> } ; <nl> <nl> } <nl> mmm a / dbms / src / Databases / DatabaseMySQL . cpp <nl> ppp b / dbms / src / Databases / DatabaseMySQL . cpp <nl> time_t DatabaseMySQL : : getObjectMetadataModificationTime ( const String & table_nam <nl> return time_t ( local_tables_cache [ table_name ] . modification_time ) ; <nl> } <nl> <nl> - ASTPtr DatabaseMySQL : : getCreateDatabaseQuery ( const Context & ) const <nl> + ASTPtr DatabaseMySQL : : getCreateDatabaseQuery ( ) const <nl> { <nl> const auto & create_query = std : : make_shared < ASTCreateQuery > ( ) ; <nl> create_query - > database = database_name ; <nl> mmm a / dbms / src / Databases / DatabaseMySQL . h <nl> ppp b / dbms / src / Databases / DatabaseMySQL . h <nl> class DatabaseMySQL : public IDatabase <nl> <nl> DatabaseTablesIteratorPtr getTablesIterator ( const Context & context , const FilterByNameFunction & filter_by_table_name = { } ) override ; <nl> <nl> - ASTPtr getCreateDatabaseQuery ( const Context & context ) const override ; <nl> + ASTPtr getCreateDatabaseQuery ( ) const override ; <nl> <nl> bool isTableExist ( const Context & context , const String & name ) const override ; <nl> <nl> mmm a / dbms / src / Databases / DatabaseOnDisk . cpp <nl> ppp b / dbms / src / Databases / DatabaseOnDisk . cpp <nl> ASTPtr DatabaseOnDisk : : getCreateTableQueryImpl ( const Context & context , const St <nl> return ast ; <nl> } <nl> <nl> - ASTPtr DatabaseOnDisk : : getCreateDatabaseQuery ( const Context & / * context * / ) const <nl> + ASTPtr DatabaseOnDisk : : getCreateDatabaseQuery ( ) const <nl> { <nl> ASTPtr ast ; <nl> <nl> mmm a / dbms / src / Databases / DatabaseOnDisk . h <nl> ppp b / dbms / src / Databases / DatabaseOnDisk . h <nl> class DatabaseOnDisk : public DatabaseWithOwnTablesBase <nl> IDatabase & to_database , <nl> const String & to_table_name ) override ; <nl> <nl> - ASTPtr getCreateDatabaseQuery ( const Context & context ) const override ; <nl> + ASTPtr getCreateDatabaseQuery ( ) const override ; <nl> <nl> void drop ( const Context & context ) override ; <nl> <nl> mmm a / dbms / src / Databases / IDatabase . h <nl> ppp b / dbms / src / Databases / IDatabase . h <nl> class IDatabase : public std : : enable_shared_from_this < IDatabase > <nl> } <nl> <nl> / / / Get the CREATE DATABASE query for current database . <nl> - virtual ASTPtr getCreateDatabaseQuery ( const Context & context ) const = 0 ; <nl> + virtual ASTPtr getCreateDatabaseQuery ( ) const = 0 ; <nl> <nl> / / / Get name of database . <nl> String getDatabaseName ( ) const { return database_name ; } <nl> mmm a / dbms / src / Interpreters / Context . cpp <nl> ppp b / dbms / src / Interpreters / Context . cpp <nl> DatabasePtr Context : : detachDatabase ( const String & database_name ) <nl> } <nl> <nl> <nl> - ASTPtr Context : : getCreateTableQuery ( const String & database_name , const String & table_name ) const <nl> - { <nl> - auto lock = getLock ( ) ; <nl> - <nl> - String db = resolveDatabase ( database_name , current_database ) ; <nl> - assertDatabaseExists ( db ) ; <nl> - <nl> - return shared - > databases [ db ] - > getCreateTableQuery ( * this , table_name ) ; <nl> - } <nl> - <nl> - <nl> - ASTPtr Context : : getCreateDictionaryQuery ( const String & database_name , const String & dictionary_name ) const <nl> - { <nl> - auto lock = getLock ( ) ; <nl> - <nl> - String db = resolveDatabase ( database_name , current_database ) ; <nl> - assertDatabaseExists ( db ) ; <nl> - <nl> - return shared - > databases [ db ] - > getCreateDictionaryQuery ( * this , dictionary_name ) ; <nl> - } <nl> - <nl> ASTPtr Context : : getCreateExternalTableQuery ( const String & table_name ) const <nl> { <nl> TableAndCreateASTs : : const_iterator jt = external_tables . find ( table_name ) ; <nl> ASTPtr Context : : getCreateExternalTableQuery ( const String & table_name ) const <nl> return jt - > second . second ; <nl> } <nl> <nl> - ASTPtr Context : : getCreateDatabaseQuery ( const String & database_name ) const <nl> - { <nl> - auto lock = getLock ( ) ; <nl> - <nl> - String db = resolveDatabase ( database_name , current_database ) ; <nl> - assertDatabaseExists ( db ) ; <nl> - <nl> - return shared - > databases [ db ] - > getCreateDatabaseQuery ( * this ) ; <nl> - } <nl> - <nl> Settings Context : : getSettings ( ) const <nl> { <nl> return settings ; <nl> mmm a / dbms / src / Interpreters / Context . h <nl> ppp b / dbms / src / Interpreters / Context . h <nl> class Context <nl> <nl> String default_format ; / / / Format , used when server formats data by itself and if query does not have FORMAT specification . <nl> / / / Thus , used in HTTP interface . If not specified - then some globally default format is used . <nl> + / / TODO maybe replace with DatabaseMemory ? <nl> TableAndCreateASTs external_tables ; / / / Temporary tables . <nl> Scalars scalars ; <nl> StoragePtr view_source ; / / / Temporary StorageValues used to generate alias columns for materialized views <nl> class Context <nl> std : : optional < UInt16 > getTCPPortSecure ( ) const ; <nl> <nl> / / / Get query for the CREATE table . <nl> - / / TODO do we really need it here ? <nl> - ASTPtr getCreateTableQuery ( const String & database_name , const String & table_name ) const ; <nl> ASTPtr getCreateExternalTableQuery ( const String & table_name ) const ; <nl> - ASTPtr getCreateDatabaseQuery ( const String & database_name ) const ; <nl> - ASTPtr getCreateDictionaryQuery ( const String & database_name , const String & dictionary_name ) const ; <nl> <nl> const DatabasePtr getDatabase ( const String & database_name ) const ; <nl> DatabasePtr getDatabase ( const String & database_name ) ; <nl> mmm a / dbms / src / Interpreters / InterpreterCreateQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterCreateQuery . cpp <nl> void InterpreterCreateQuery : : setEngine ( ASTCreateQuery & create ) const <nl> String as_database_name = create . as_database . empty ( ) ? context . getCurrentDatabase ( ) : create . as_database ; <nl> String as_table_name = create . as_table ; <nl> <nl> - ASTPtr as_create_ptr = context . getCreateTableQuery ( as_database_name , as_table_name ) ; <nl> + ASTPtr as_create_ptr = context . getDatabase ( as_database_name ) - > getCreateTableQuery ( context , as_table_name ) ; <nl> const auto & as_create = as_create_ptr - > as < ASTCreateQuery & > ( ) ; <nl> <nl> if ( as_create . is_view ) <nl> BlockIO InterpreterCreateQuery : : createTable ( ASTCreateQuery & create ) <nl> if ( create . attach & & ! create . storage & & ! create . columns_list ) <nl> { <nl> / / Table SQL definition is available even if the table is detached <nl> - auto query = context . getCreateTableQuery ( database_name , table_name ) ; <nl> + auto query = context . getDatabase ( database_name ) - > getCreateTableQuery ( context , table_name ) ; <nl> create = query - > as < ASTCreateQuery & > ( ) ; / / Copy the saved create query , but use ATTACH instead of CREATE <nl> create . attach = true ; <nl> } <nl> BlockIO InterpreterCreateQuery : : createDictionary ( ASTCreateQuery & create ) <nl> <nl> if ( create . attach ) <nl> { <nl> - auto query = context . getCreateDictionaryQuery ( database_name , dictionary_name ) ; <nl> + auto query = context . getDatabase ( database_name ) - > getCreateDictionaryQuery ( context , dictionary_name ) ; <nl> create = query - > as < ASTCreateQuery & > ( ) ; <nl> create . attach = true ; <nl> } <nl> mmm a / dbms / src / Interpreters / InterpreterShowCreateQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterShowCreateQuery . cpp <nl> BlockInputStreamPtr InterpreterShowCreateQuery : : executeImpl ( ) <nl> if ( show_query - > temporary ) <nl> create_query = context . getCreateExternalTableQuery ( show_query - > table ) ; <nl> else <nl> - create_query = context . getCreateTableQuery ( show_query - > database , show_query - > table ) ; <nl> + create_query = context . getDatabase ( show_query - > database ) - > getCreateTableQuery ( context , show_query - > table ) ; <nl> } <nl> else if ( show_query = query_ptr - > as < ASTShowCreateDatabaseQuery > ( ) ; show_query ) <nl> { <nl> if ( show_query - > temporary ) <nl> throw Exception ( " Temporary databases are not possible . " , ErrorCodes : : SYNTAX_ERROR ) ; <nl> - create_query = context . getCreateDatabaseQuery ( show_query - > database ) ; <nl> + create_query = context . getDatabase ( show_query - > database ) - > getCreateDatabaseQuery ( ) ; <nl> } <nl> else if ( show_query = query_ptr - > as < ASTShowCreateDictionaryQuery > ( ) ; show_query ) <nl> { <nl> if ( show_query - > temporary ) <nl> throw Exception ( " Temporary dictionaries are not possible . " , ErrorCodes : : SYNTAX_ERROR ) ; <nl> - create_query = context . getCreateDictionaryQuery ( show_query - > database , show_query - > table ) ; <nl> + create_query = context . getDatabase ( show_query - > database ) - > getCreateDictionaryQuery ( context , show_query - > table ) ; <nl> } <nl> <nl> if ( ! create_query & & show_query - > temporary ) <nl> mmm a / dbms / src / Interpreters / InterpreterSystemQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterSystemQuery . cpp <nl> StoragePtr InterpreterSystemQuery : : tryRestartReplica ( const String & database_nam <nl> <nl> / / / If table was already dropped by anyone , an exception will be thrown <nl> auto table_lock = table - > lockExclusively ( context . getCurrentQueryId ( ) ) ; <nl> - create_ast = system_context . getCreateTableQuery ( database_name , table_name ) ; <nl> + create_ast = database - > getCreateTableQuery ( system_context , table_name ) ; <nl> <nl> database - > detachTable ( table_name ) ; <nl> } <nl>
|
refactor Context 2
|
ClickHouse/ClickHouse
|
03b1a576bad4d0cef5b389effe473128797995a1
|
2019-12-05T13:44:22Z
|
mmm a / docs / WindowsBuild . md <nl> ppp b / docs / WindowsBuild . md <nl> cmake - G Ninja ^ <nl> - DCMAKE_CXX_FLAGS : STRING = " - Wno - c + + 98 - compat - Wno - c + + 98 - compat - pedantic " ^ <nl> - DCMAKE_EXE_LINKER_FLAGS : STRING = " / INCREMENTAL : NO " ^ <nl> - DCMAKE_SHARED_LINKER_FLAGS : STRING = " / INCREMENTAL : NO " ^ <nl> + - DSWIFT_BUILD_SOURCEKIT = ON ^ <nl> - DSWIFT_INCLUDE_DOCS = OFF ^ <nl> - DSWIFT_PATH_TO_CMARK_SOURCE = " S : \ cmark " ^ <nl> - DSWIFT_PATH_TO_CMARK_BUILD = " S : \ b \ cmark " ^ <nl>
|
Merge pull request from apple / windows - sourcekit
|
apple/swift
|
6e8865ddd2202548c0642608d48fd9fc3bab46ce
|
2019-02-08T01:35:24Z
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.