diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
mmm a / dbms / tests / integration / README . md <nl> ppp b / dbms / tests / integration / README . md <nl> You must install latest Docker from <nl> https : / / docs . docker . com / engine / installation / linux / docker - ce / ubuntu / # set - up - the - repository <nl> Don ' t use Docker from your system repository . <nl> <nl> - * [ pip ] ( https : / / pypi . python . org / pypi / pip ) . To install : ` sudo apt - get install python - pip ` <nl> + * [ pip ] ( https : / / pypi . python . org / pypi / pip ) and ` libpq - dev ` . To install : ` sudo apt - get install python - pip libpq - dev ` <nl> * [ py . test ] ( https : / / docs . pytest . org / ) testing framework . To install : ` sudo - H pip install pytest ` <nl> * [ docker - compose ] ( https : / / docs . docker . com / compose / ) and additional python libraries . To install : ` sudo - H pip install docker - compose docker dicttoxml kazoo PyMySQL psycopg2 pymongo tzlocal kafka - python protobuf pytest - timeout ` <nl> <nl>
Merge pull request from yandex / excitoon - patch - 1
ClickHouse/ClickHouse
e8b58e2a07d9ed7fbabe93b48602aa4c0bbdabab
2019-06-29T12:36:18Z
mmm a / lib / Migrator / APIDiffMigratorPass . cpp <nl> ppp b / lib / Migrator / APIDiffMigratorPass . cpp <nl> struct APIDiffMigratorPass : public ASTMigratorPass , public SourceEntityWalker { <nl> unsigned Idx = 0 ; <nl> auto Ranges = getCallArgLabelRanges ( SM , Arg , <nl> LabelRangeEndAt : : LabelNameOnly ) ; <nl> + llvm : : SmallVector < uint8_t , 2 > ToRemoveIndices ; <nl> for ( unsigned I = 0 ; I < Ranges . size ( ) ; I + + ) { <nl> if ( std : : any_of ( IgnoreArgIndex . begin ( ) , IgnoreArgIndex . end ( ) , <nl> [ I ] ( unsigned Ig ) { return Ig = = I ; } ) ) <nl> struct APIDiffMigratorPass : public ASTMigratorPass , public SourceEntityWalker { <nl> if ( Idx < NewName . argSize ( ) ) { <nl> auto Label = NewName . args ( ) [ Idx + + ] ; <nl> <nl> - / / FIXME : We update only when args are consistently valid . <nl> if ( Label ! = " _ " ) { <nl> if ( LR . getByteLength ( ) ) <nl> Editor . replace ( LR , Label ) ; <nl> else <nl> Editor . insert ( LR . getStart ( ) , ( llvm : : Twine ( Label ) + " : " ) . str ( ) ) ; <nl> + } else if ( LR . getByteLength ( ) ) { <nl> + / / New label is " _ " however the old label is explicit . <nl> + ToRemoveIndices . push_back ( I ) ; <nl> } <nl> } <nl> } <nl> + if ( ! ToRemoveIndices . empty ( ) ) { <nl> + auto Ranges = getCallArgLabelRanges ( SM , Arg , <nl> + LabelRangeEndAt : : BeforeElemStart ) ; <nl> + for ( auto I : ToRemoveIndices ) { <nl> + Editor . remove ( Ranges [ I ] ) ; <nl> + } <nl> + } <nl> } <nl> <nl> void handleFuncRename ( ValueDecl * FD , Expr * FuncRefContainer , Expr * Arg ) { <nl> mmm a / test / Migrator / API . json <nl> ppp b / test / Migrator / API . json <nl> <nl> " RightComment " : " barNewInstanceFunc1 ( newlabel1 : newlabel2 : newlabel3 : newlabel4 : ) " , <nl> " ModuleName " : " bar " <nl> } , <nl> + { <nl> + " DiffItemKind " : " CommonDiffItem " , <nl> + " NodeKind " : " Function " , <nl> + " NodeAnnotation " : " Rename " , <nl> + " ChildIndex " : " 0 " , <nl> + " LeftUsr " : " c : objc ( cs ) BarForwardDeclaredClass ( im ) barInstanceFunc2 : toRemove : toRemove1 : toRemove2 : " , <nl> + " LeftComment " : " " , <nl> + " RightUsr " : " " , <nl> + " RightComment " : " barNewInstanceFunc2 ( _ : _ : NotToRemove1 : _ : ) " , <nl> + " ModuleName " : " bar " <nl> + } , <nl> { <nl> " DiffItemKind " : " CommonDiffItem " , <nl> " NodeKind " : " Constructor " , <nl> mmm a / test / Migrator / mock - sdk / Bar . framework / Headers / Bar . h <nl> ppp b / test / Migrator / mock - sdk / Bar . framework / Headers / Bar . h <nl> int barGlobalFuncOldName ( int a ) ; <nl> - ( id ) initWithOldLabel0 : ( int ) frame ; <nl> - ( void ) barInstanceFunc0 ; <nl> - ( void ) barInstanceFunc1 : ( int ) info anotherValue : ( int ) info1 anotherValue1 : ( int ) info2 anotherValue2 : ( int ) info3 ; <nl> + - ( void ) barInstanceFunc2 : ( int ) info toRemove : ( int ) info1 toRemove1 : ( int ) info2 toRemove2 : ( int ) info3 ; <nl> @ end <nl> <nl> enum BarForwardDeclaredEnum { <nl> mmm a / test / Migrator / rename . swift <nl> ppp b / test / Migrator / rename . swift <nl> func foo ( _ b : BarForwardDeclaredClass ) { <nl> barGlobalFuncOldName ( 2 ) <nl> _ = barGlobalVariableOldEnumElement <nl> } <nl> + <nl> + func foo1 ( _ b : BarForwardDeclaredClass ) { <nl> + b . barInstanceFunc2 ( 0 , toRemove : 1 , toRemove1 : 2 , toRemove2 : 3 ) <nl> + } <nl> mmm a / test / Migrator / rename . swift . expected <nl> ppp b / test / Migrator / rename . swift . expected <nl> func foo ( _ b : BarForwardDeclaredClass ) { <nl> barGlobalFuncNewName ( newlabel : 2 ) <nl> _ = NewEnum . enumElement <nl> } <nl> + <nl> + func foo1 ( _ b : BarForwardDeclaredClass ) { <nl> + b . barNewInstanceFunc2 ( 0 , 1 , NotToRemove1 : 2 , 3 ) <nl> + } <nl>
[ migrator ] Handle renaming from explicit argument label to empty argument label at call sites . rdar : / / 32241559 ( )
apple/swift
c7b4e36476e95c5ba2c697b6a4f484674326608b
2017-05-23T18:34:05Z
mmm a / addons / skin . confluence / 720p / ViewsVideoLibrary . xml <nl> ppp b / addons / skin . confluence / 720p / ViewsVideoLibrary . xml <nl> <nl> < control type = " group " > <nl> < posx > 0 < / posx > <nl> < posy > 350 < / posy > <nl> - < visible > Control . IsVisible ( 508 ) + [ [ ListItem . IsFolder + Container . Content ( Movies ) ] | Skin . HasSetting ( View508HideInfo ) ] < / visible > <nl> + < visible > Control . IsVisible ( 508 ) + [ [ ListItem . IsFolder + Container . Content ( Movies ) ] | Skin . HasSetting ( View508HideInfo ) | Container . Content ( Sets ) ] < / visible > <nl> < include > VisibleFadeEffect < / include > <nl> < control type = " image " > <nl> < posx > 0 < / posx > <nl> <nl> < posx > 0 < / posx > <nl> < posy > 460 < / posy > <nl> < control type = " fixedlist " id = " 508 " > <nl> - < visible > Container . Content ( Movies ) | Container . Content ( TVShows ) < / visible > <nl> + < visible > Container . Content ( Movies ) | Container . Content ( TVShows ) | Container . Content ( Sets ) < / visible > <nl> < hitrect x = " 0 " y = " - 10 " w = " 1280 " h = " 190 " / > <nl> < posx > - 20 < / posx > <nl> < posy > 0 < / posy > <nl> <nl> < control type = " group " > <nl> < posx > 180 < / posx > <nl> < posy > 40 < / posy > <nl> - < visible > Control . IsVisible ( 508 ) + ! [ ListItem . IsFolder + Container . Content ( Movies ) ] + ! Skin . HasSetting ( View508HideInfo ) < / visible > <nl> + < visible > Control . IsVisible ( 508 ) + ! [ ListItem . IsFolder + Container . Content ( Movies ) ] + ! Skin . HasSetting ( View508HideInfo ) + ! Container . Content ( Sets ) < / visible > <nl> < include > VisibleFadeEffect < / include > <nl> < control type = " image " > <nl> < posx > 0 < / posx > <nl>
Changed : [ Confluence ] show the fanart view for movie sets
xbmc/xbmc
21dc3a258c2c211571e8be5213edbbe5844596db
2012-10-29T23:33:26Z
mmm a / tensorflow / python / ops / array_grad . py <nl> ppp b / tensorflow / python / ops / array_grad . py <nl> def _SqueezeGrad ( op , grad ) : <nl> def _TransposeGrad ( op , grad ) : <nl> " " " Returns unshuffle ( grad ) . " " " <nl> p = op . inputs [ 1 ] <nl> - if not context . executing_eagerly ( ) : <nl> - p_static = pywrap_tensorflow . TF_TryEvaluateConstant_wrapper ( <nl> - p . graph . _c_graph , p . _as_tf_output ( ) ) # pylint : disable = protected - access <nl> - if p_static is not None : <nl> - p = constant_op . constant ( p_static , dtype = p . dtype ) <nl> return [ array_ops . transpose ( grad , array_ops . invert_permutation ( p ) ) , None ] <nl> <nl> <nl>
Automated rollback of commit 430666a7a6a62063f14a5844558c8a79392b95d9
tensorflow/tensorflow
33820a9e528bae54a0de07cb4ff4f686072e8099
2019-10-05T09:29:07Z
mmm a / src / csharp / Grpc . Core . Tests / ClientServerTest . cs <nl> ppp b / src / csharp / Grpc . Core . Tests / ClientServerTest . cs <nl> public void UnknownMethodHandler ( ) <nl> Assert . AreEqual ( StatusCode . Unimplemented , ex . Status . StatusCode ) ; <nl> } <nl> <nl> + [ Test ] <nl> + public void StatusDetailIsUtf8 ( ) <nl> + { <nl> + / / some japanese and chinese characters <nl> + var nonAsciiString = " \ u30a1 \ u30a2 \ u30a3 \ u62b5 \ u6297 \ u662f \ u5f92 \ u52b3 \ u7684 " ; <nl> + helper . UnaryHandler = new UnaryServerMethod < string , string > ( async ( request , context ) = > <nl> + { <nl> + context . Status = new Status ( StatusCode . Unknown , nonAsciiString ) ; <nl> + return " " ; <nl> + } ) ; <nl> + <nl> + var ex = Assert . Throws < RpcException > ( ( ) = > Calls . BlockingUnaryCall ( helper . CreateUnaryCall ( ) , " abc " ) ) ; <nl> + Assert . AreEqual ( StatusCode . Unknown , ex . Status . StatusCode ) ; <nl> + Assert . AreEqual ( nonAsciiString , ex . Status . Detail ) ; <nl> + } <nl> + <nl> [ Test ] <nl> public void ServerCallContext_PeerInfoPresent ( ) <nl> { <nl> mmm a / src / csharp / Grpc . Core / Internal / BatchContextSafeHandle . cs <nl> ppp b / src / csharp / Grpc . Core / Internal / BatchContextSafeHandle . cs <nl> <nl> <nl> using System ; <nl> using System . Runtime . InteropServices ; <nl> + using System . Text ; <nl> using Grpc . Core ; <nl> <nl> namespace Grpc . Core . Internal <nl> namespace Grpc . Core . Internal <nl> / / / < / summary > <nl> internal class BatchContextSafeHandle : SafeHandleZeroIsInvalid <nl> { <nl> + static readonly Encoding EncodingUTF8 = System . Text . Encoding . UTF8 ; <nl> static readonly NativeMethods Native = NativeMethods . Get ( ) ; <nl> <nl> private BatchContextSafeHandle ( ) <nl> public ClientSideStatus GetReceivedStatusOnClient ( ) <nl> { <nl> UIntPtr detailsLength ; <nl> IntPtr detailsPtr = Native . grpcsharp_batch_context_recv_status_on_client_details ( this , out detailsLength ) ; <nl> - string details = Marshal . PtrToStringAnsi ( detailsPtr , ( int ) detailsLength . ToUInt32 ( ) ) ; <nl> + string details = PtrToStringUtf8 ( detailsPtr , ( int ) detailsLength . ToUInt32 ( ) ) ; <nl> var status = new Status ( Native . grpcsharp_batch_context_recv_status_on_client_status ( this ) , details ) ; <nl> <nl> IntPtr metadataArrayPtr = Native . grpcsharp_batch_context_recv_status_on_client_trailing_metadata ( this ) ; <nl> protected override bool ReleaseHandle ( ) <nl> Native . grpcsharp_batch_context_destroy ( handle ) ; <nl> return true ; <nl> } <nl> + <nl> + string PtrToStringUtf8 ( IntPtr ptr , int len ) <nl> + { <nl> + var bytes = new byte [ len ] ; <nl> + Marshal . Copy ( ptr , bytes , 0 , len ) ; <nl> + return EncodingUTF8 . GetString ( bytes ) ; <nl> + } <nl> } <nl> } <nl> mmm a / src / csharp / Grpc . Core / Internal / CallSafeHandle . cs <nl> ppp b / src / csharp / Grpc . Core / Internal / CallSafeHandle . cs <nl> <nl> using System ; <nl> using System . Diagnostics ; <nl> using System . Runtime . InteropServices ; <nl> + using System . Text ; <nl> using Grpc . Core ; <nl> using Grpc . Core . Utils ; <nl> using Grpc . Core . Profiling ; <nl> namespace Grpc . Core . Internal <nl> internal class CallSafeHandle : SafeHandleZeroIsInvalid , INativeCall <nl> { <nl> public static readonly CallSafeHandle NullInstance = new CallSafeHandle ( ) ; <nl> + static readonly Encoding EncodingUTF8 = System . Text . Encoding . UTF8 ; <nl> static readonly NativeMethods Native = NativeMethods . Get ( ) ; <nl> <nl> const uint GRPC_WRITE_BUFFER_HINT = 1 ; <nl> public void StartSendCloseFromClient ( SendCompletionHandler callback ) <nl> var ctx = BatchContextSafeHandle . Create ( ) ; <nl> var optionalPayloadLength = optionalPayload ! = null ? new UIntPtr ( ( ulong ) optionalPayload . Length ) : UIntPtr . Zero ; <nl> completionQueue . CompletionRegistry . RegisterBatchCompletion ( ctx , ( success , context ) = > callback ( success ) ) ; <nl> - Native . grpcsharp_call_send_status_from_server ( this , ctx , status . StatusCode , status . Detail , metadataArray , sendEmptyInitialMetadata , <nl> + var statusDetailBytes = EncodingUTF8 . GetBytes ( status . Detail ) ; <nl> + Native . grpcsharp_call_send_status_from_server ( this , ctx , status . StatusCode , statusDetailBytes , new UIntPtr ( ( ulong ) statusDetailBytes . Length ) , metadataArray , sendEmptyInitialMetadata , <nl> optionalPayload , optionalPayloadLength , writeFlags ) . CheckOk ( ) ; <nl> } <nl> } <nl> mmm a / src / csharp / Grpc . Core / Internal / NativeMethods . cs <nl> ppp b / src / csharp / Grpc . Core / Internal / NativeMethods . cs <nl> public class Delegates <nl> public delegate CallError grpcsharp_call_send_close_from_client_delegate ( CallSafeHandle call , <nl> BatchContextSafeHandle ctx ) ; <nl> public delegate CallError grpcsharp_call_send_status_from_server_delegate ( CallSafeHandle call , <nl> - BatchContextSafeHandle ctx , StatusCode statusCode , string statusMessage , MetadataArraySafeHandle metadataArray , bool sendEmptyInitialMetadata , <nl> + BatchContextSafeHandle ctx , StatusCode statusCode , byte [ ] statusMessage , UIntPtr statusMessageLen , MetadataArraySafeHandle metadataArray , bool sendEmptyInitialMetadata , <nl> byte [ ] optionalSendBuffer , UIntPtr optionalSendBufferLen , WriteFlags writeFlags ) ; <nl> public delegate CallError grpcsharp_call_recv_message_delegate ( CallSafeHandle call , <nl> BatchContextSafeHandle ctx ) ; <nl> mmm a / src / csharp / ext / grpc_csharp_ext . c <nl> ppp b / src / csharp / ext / grpc_csharp_ext . c <nl> grpcsharp_call_send_close_from_client ( grpc_call * call , <nl> <nl> GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_status_from_server ( <nl> grpc_call * call , grpcsharp_batch_context * ctx , grpc_status_code status_code , <nl> - const char * status_details , grpc_metadata_array * trailing_metadata , <nl> + const char * status_details , size_t status_details_len , <nl> + grpc_metadata_array * trailing_metadata , <nl> int32_t send_empty_initial_metadata , const char * optional_send_buffer , <nl> size_t optional_send_buffer_len , uint32_t write_flags ) { <nl> / * TODO : don ' t use magic number * / <nl> grpc_op ops [ 3 ] ; <nl> memset ( ops , 0 , sizeof ( ops ) ) ; <nl> size_t nops = 1 ; <nl> - grpc_slice status_details_slice = grpc_slice_from_copied_string ( status_details ) ; <nl> + grpc_slice status_details_slice = grpc_slice_from_copied_buffer ( status_details , status_details_len ) ; <nl> ops [ 0 ] . op = GRPC_OP_SEND_STATUS_FROM_SERVER ; <nl> ops [ 0 ] . data . send_status_from_server . status = status_code ; <nl> ops [ 0 ] . data . send_status_from_server . status_details = & status_details_slice ; <nl>
Merge pull request from jtattermusch / csharp_utf8_message
grpc/grpc
ca9d959f1559f19b146178f7f78ff75de63bc4e7
2017-01-31T21:17:00Z
mmm a / tensorflow / python / compat / compat . py <nl> ppp b / tensorflow / python / compat / compat . py <nl> <nl> from tensorflow . python . util import tf_contextlib <nl> from tensorflow . python . util . tf_export import tf_export <nl> <nl> - _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2019 , 2 , 8 ) <nl> + _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2019 , 2 , 9 ) <nl> <nl> <nl> @ tf_export ( " compat . forward_compatible " ) <nl>
compat : Update forward compatibility horizon to 2019 - 02 - 09
tensorflow/tensorflow
b85cb440e257a367fb70f8321ddaa669d1bd9fae
2019-02-09T09:08:09Z
mmm a / AUTHORS <nl> ppp b / AUTHORS <nl> Ashish Kulkarni < kulkarni . ashish @ gmail . com > <nl> Pablo Ruiz García < pablo . ruiz @ gmail . com > <nl> Trevor North < trevor @ blubolt . com > <nl> pussbb < pussbb @ gmail . com > <nl> + Nate Pinchot < nate . pinchot @ gmail . com > <nl> Kasper F . Brandt < poizan @ poizan . dk > <nl> peterrehm < peter . rehm @ renvest . de > <nl> Ruslan Grabovoy < kudgo . test @ gmail . com > <nl>
update the AUTHORS file
wkhtmltopdf/wkhtmltopdf
ccb82b1881de69a30ededd1b2a82b3a0fe5a2c75
2014-02-06T07:50:55Z
mmm a / tensorflow / lite / micro / kernels / detection_postprocess . cc <nl> ppp b / tensorflow / lite / micro / kernels / detection_postprocess . cc <nl> limitations under the License . <nl> # include < numeric > <nl> <nl> # define FLATBUFFERS_LOCALE_INDEPENDENT 0 <nl> - # if ! defined ( __GNUC__ ) | | defined ( __CC_ARM ) | | defined ( __clang__ ) <nl> - / / TODO : remove this once this PR is merged and part of tensorflow downloads : <nl> - / / https : / / github . com / google / flatbuffers / pull / 6132 <nl> - # pragma clang diagnostic push <nl> - # pragma clang diagnostic ignored " - Wdouble - promotion " <nl> # include " flatbuffers / flexbuffers . h " <nl> - # pragma clang diagnostic pop <nl> - # else <nl> - # include " flatbuffers / flexbuffers . h " <nl> - # endif <nl> # include " tensorflow / lite / c / builtin_op_data . h " <nl> # include " tensorflow / lite / c / common . h " <nl> # include " tensorflow / lite / kernels / internal / common . h " <nl> struct OpData { <nl> TfLiteQuantizationParams input_box_encodings ; <nl> TfLiteQuantizationParams input_class_predictions ; <nl> TfLiteQuantizationParams input_anchors ; <nl> - <nl> - / / In case out dimensions need to be allocated . <nl> - TfLiteIntArray * detection_boxes_dims ; <nl> - TfLiteIntArray * detection_classes_dims ; <nl> - TfLiteIntArray * detection_scores_dims ; <nl> - TfLiteIntArray * num_detections_dims ; <nl> } ; <nl> <nl> - TfLiteStatus AllocateOutDimensions ( TfLiteContext * context , <nl> - TfLiteTensor * tensor , <nl> - TfLiteIntArray * * dims , int x , int y = 0 , <nl> - int z = 0 ) { <nl> - int size = 1 ; <nl> - int size_dim = 1 ; <nl> - size = size * x ; <nl> - <nl> - if ( y > 0 ) { <nl> - size = size * y ; <nl> - size_dim + + ; <nl> - if ( z > 0 ) { <nl> - size = size * z ; <nl> - size_dim + + ; <nl> - } <nl> - } <nl> - <nl> - * dims = reinterpret_cast < TfLiteIntArray * > ( context - > AllocatePersistentBuffer ( <nl> - context , TfLiteIntArrayGetSizeInBytes ( size ) ) ) ; <nl> - <nl> - ( * dims ) - > size = size_dim ; <nl> - ( * dims ) - > data [ 0 ] = x ; <nl> - if ( y > 0 ) { <nl> - ( * dims ) - > data [ 1 ] = y ; <nl> - } <nl> - if ( z > 0 ) { <nl> - ( * dims ) - > data [ 2 ] = z ; <nl> - } <nl> - <nl> - TFLITE_DCHECK ( tensor - > type = = kTfLiteFloat32 ) ; <nl> - tensor - > bytes = size * sizeof ( float ) ; <nl> - <nl> - return kTfLiteOk ; <nl> - } <nl> - <nl> void * Init ( TfLiteContext * context , const char * buffer , size_t length ) { <nl> OpData * op_data = nullptr ; <nl> <nl> TfLiteStatus Prepare ( TfLiteContext * context , TfLiteNode * node ) { <nl> context - > RequestScratchBufferInArena ( <nl> context , buffer_size * num_boxes * sizeof ( int ) , & op_data - > selected_idx ) ; <nl> <nl> - / / number of detected boxes <nl> - const int num_detected_boxes = <nl> - op_data - > max_detections * op_data - > max_classes_per_detection ; <nl> - <nl> / / Outputs : detection_boxes , detection_scores , detection_classes , <nl> / / num_detections <nl> TF_LITE_ENSURE_EQ ( context , NumOutputs ( node ) , 4 ) ; <nl> <nl> - / / Output Tensor detection_boxes : size is set to ( 1 , num_detected_boxes , 4 ) <nl> - TfLiteTensor * detection_boxes = <nl> - GetOutput ( context , node , kOutputTensorDetectionBoxes ) ; <nl> - if ( detection_boxes - > dims - > size = = 0 ) { <nl> - TF_LITE_ENSURE_STATUS ( AllocateOutDimensions ( context , detection_boxes , <nl> - & detection_boxes - > dims , <nl> - 1 , num_detected_boxes , 4 ) ) ; <nl> - op_data - > detection_boxes_dims = detection_boxes - > dims ; <nl> - } <nl> - <nl> - / / Output Tensor detection_classes : size is set to ( 1 , num_detected_boxes ) <nl> - TfLiteTensor * detection_classes = <nl> - GetOutput ( context , node , kOutputTensorDetectionClasses ) ; <nl> - if ( detection_classes - > dims - > size = = 0 ) { <nl> - TF_LITE_ENSURE_STATUS ( AllocateOutDimensions ( <nl> - context , detection_classes , <nl> - & detection_classes - > dims , 1 , num_detected_boxes ) ) ; <nl> - op_data - > detection_classes_dims = detection_classes - > dims ; <nl> - } <nl> - <nl> - / / Output Tensor detection_scores : size is set to ( 1 , num_detected_boxes ) <nl> - TfLiteTensor * detection_scores = <nl> - GetOutput ( context , node , kOutputTensorDetectionScores ) ; <nl> - if ( detection_scores - > dims - > size = = 0 ) { <nl> - TF_LITE_ENSURE_STATUS ( AllocateOutDimensions ( <nl> - context , detection_scores , <nl> - & detection_scores - > dims , 1 , num_detected_boxes ) ) ; <nl> - op_data - > detection_scores_dims = detection_scores - > dims ; <nl> - } <nl> - <nl> - / / Output Tensor num_detections : size is set to 1 <nl> - TfLiteTensor * num_detections = <nl> - GetOutput ( context , node , kOutputTensorNumDetections ) ; <nl> - if ( num_detections - > dims - > size = = 0 ) { <nl> - TF_LITE_ENSURE_STATUS ( <nl> - AllocateOutDimensions ( context , num_detections , <nl> - & num_detections - > dims , 1 ) ) ; <nl> - op_data - > num_detections_dims = num_detections - > dims ; <nl> - } <nl> - <nl> return kTfLiteOk ; <nl> } <nl> <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> TF_LITE_ENSURE ( context , ( kBatchSize = = 1 ) ) ; <nl> auto * op_data = static_cast < OpData * > ( node - > user_data ) ; <nl> <nl> - TfLiteEvalTensor * detection_boxes = <nl> - tflite : : micro : : GetEvalOutput ( context , node , kOutputTensorDetectionBoxes ) ; <nl> - if ( detection_boxes - > dims - > size = = 0 ) { <nl> - detection_boxes - > dims = op_data - > detection_boxes_dims ; <nl> - } <nl> - TfLiteEvalTensor * detection_classes = <nl> - tflite : : micro : : GetEvalOutput ( context , node , <nl> - kOutputTensorDetectionClasses ) ; <nl> - if ( detection_classes - > dims - > size = = 0 ) { <nl> - detection_classes - > dims = op_data - > detection_classes_dims ; <nl> - } <nl> - TfLiteEvalTensor * detection_scores = <nl> - tflite : : micro : : GetEvalOutput ( context , node , kOutputTensorDetectionScores ) ; <nl> - if ( detection_scores - > dims - > size = = 0 ) { <nl> - detection_scores - > dims = op_data - > detection_scores_dims ; <nl> - } <nl> - TfLiteEvalTensor * num_detections = <nl> - tflite : : micro : : GetEvalOutput ( context , node , kOutputTensorNumDetections ) ; <nl> - if ( num_detections - > dims - > size = = 0 ) { <nl> - num_detections - > dims = op_data - > num_detections_dims ; <nl> - } <nl> - <nl> / / These two functions correspond to two blocks in the Object Detection model . <nl> / / In future , we would like to break the custom op in two blocks , which is <nl> / / currently not feasible because we would like to input quantized inputs <nl> mmm a / tensorflow / lite / micro / kernels / detection_postprocess_test . cc <nl> ppp b / tensorflow / lite / micro / kernels / detection_postprocess_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # if ! defined ( __GNUC__ ) | | defined ( __CC_ARM ) | | defined ( __clang__ ) <nl> - / / TODO : remove this once this PR is merged and part of tensorflow downloads : <nl> - / / https : / / github . com / google / flatbuffers / pull / 6132 <nl> - # pragma clang diagnostic push <nl> - # pragma clang diagnostic ignored " - Wdouble - promotion " <nl> # include " flatbuffers / flexbuffers . h " <nl> - # pragma clang diagnostic pop <nl> - # else <nl> - # include " flatbuffers / flexbuffers . h " <nl> - # endif <nl> # include " tensorflow / lite / c / builtin_op_data . h " <nl> # include " tensorflow / lite / c / common . h " <nl> # include " tensorflow / lite / micro / kernels / kernel_runner . h " <nl> - <nl> # include " tensorflow / lite / micro / testing / micro_test . h " <nl> - # include " tensorflow / lite / micro / testing / test_utils . h " <nl> + # include " tensorflow / lite / micro / test_helpers . h " <nl> <nl> / / See : tensorflow / lite / micro / kernels / detection_postprocess_test / readme <nl> # include " tensorflow / lite / micro / kernels / detection_postprocess_test / flexbuffers_generated_data . h " <nl> TF_LITE_MICRO_TEST ( <nl> / * input3 min / max * / 0 . 0 , 100 . 5 ) ; <nl> } <nl> <nl> - TF_LITE_MICRO_TEST ( DetectionPostprocessFloatFastNMSUndefinedOutputDimensions ) { <nl> - float output_data1 [ 12 ] ; <nl> - float output_data2 [ 3 ] ; <nl> - float output_data3 [ 3 ] ; <nl> - float output_data4 [ 1 ] ; <nl> - <nl> - tflite : : testing : : TestDetectionPostprocess ( <nl> - tflite : : testing : : kInputShape1 , tflite : : testing : : kInputData1 , <nl> - tflite : : testing : : kInputShape2 , tflite : : testing : : kInputData2 , <nl> - tflite : : testing : : kInputShape3 , tflite : : testing : : kInputData3 , nullptr , <nl> - output_data1 , nullptr , output_data2 , nullptr , output_data3 , nullptr , <nl> - output_data4 , tflite : : testing : : kGolden1 , tflite : : testing : : kGolden2 , <nl> - tflite : : testing : : kGolden3 , tflite : : testing : : kGolden4 , <nl> - / * tolerance * / 0 , / * Use regular NMS : * / false ) ; <nl> - } <nl> - <nl> TF_LITE_MICRO_TESTS_END <nl>
TFLu : Remove support for unknown output dims for detecion_pp
tensorflow/tensorflow
171fbdaaedbe93c99312ab6dd8556120ab03e77d
2020-10-16T10:01:37Z
mmm a / lib / Frontend / ParseableInterfaceModuleLoader . cpp <nl> ppp b / lib / Frontend / ParseableInterfaceModuleLoader . cpp <nl> class swift : : ParseableInterfaceBuilder { <nl> return ; <nl> } <nl> <nl> - / / Optimize emitted modules . This has to happen after we parse arguments , <nl> - / / because parseSILOpts would override the current optimization mode . <nl> - subInvocation . getSILOptions ( ) . OptMode = OptimizationMode : : ForSpeed ; <nl> - <nl> / / Build the . swiftmodule ; this is a _very_ abridged version of the logic <nl> / / in performCompile in libFrontendTool , specialized , to just the one <nl> / / module - serialization task we ' re trying to do here . <nl> new file mode 100644 <nl> index 000000000000 . . 72992dc8ce0e <nl> mmm / dev / null <nl> ppp b / test / ParseableInterface / optimization - level . swift <nl> <nl> + / / RUN : % empty - directory ( % t ) <nl> + / / RUN : % target - swift - frontend - typecheck - emit - parseable - module - interface - path % t / Lib . swiftinterface % s - O <nl> + / / RUN : % target - swift - frontend - build - module - from - parseable - interface % t / Lib . swiftinterface - Xllvm - sil - print - pass - name - o / dev / null 2 > & 1 | % FileCheck - - check - prefix OPT % s <nl> + <nl> + / / RUN : % empty - directory ( % t ) <nl> + / / RUN : % target - swift - frontend - typecheck - emit - parseable - module - interface - path % t / Lib . swiftinterface % s - Onone <nl> + / / RUN : % target - swift - frontend - build - module - from - parseable - interface % t / Lib . swiftinterface - Xllvm - sil - print - pass - name - o / dev / null 2 > & 1 | % FileCheck - - check - prefix UNOPT % s <nl> + <nl> + / / This is a bit of an implementation detail , but we want to make sure <nl> + / / optimization passes don ' t run when compiling a . swiftinterface that was <nl> + / / generated with - Onone . <nl> + <nl> + / / OPT : EagerSpecializer <nl> + / / UNOPT - NOT : EagerSpecializer <nl> + public func f ( ) { } <nl> + <nl> mmm a / test / SourceKit / InterfaceGen / gen_swift_module . swift <nl> ppp b / test / SourceKit / InterfaceGen / gen_swift_module . swift <nl> func f ( s : inout [ Int ] ) { <nl> / / Test we can generate the interface of a module loaded via a . swiftinterface file correctly <nl> <nl> / / RUN : % empty - directory ( % t . mod ) <nl> - / / RUN : % swift - emit - module - o / dev / null - emit - parseable - module - interface - path % t . mod / swift_mod . swiftinterface % S / Inputs / swift_mod . swift - parse - as - library <nl> + / / RUN : % swift - emit - module - o / dev / null - emit - parseable - module - interface - path % t . mod / swift_mod . swiftinterface - O % S / Inputs / swift_mod . swift - parse - as - library <nl> / / RUN : % sourcekitd - test - req = interface - gen - module swift_mod - - - I % t . mod - module - cache - path % t . mod / mcp > % t . response <nl> / / RUN : diff - u % s . from_swiftinterface . response % t . response <nl>
Merge remote - tracking branch ' origin / master ' into master - next
apple/swift
5cc51e6cd68c1586e9f9e3c2b17cbc5978f4f25a
2019-04-11T18:10:00Z
mmm a / regrank / xgboost_regrank_obj . hpp <nl> ppp b / regrank / xgboost_regrank_obj . hpp <nl> namespace xgboost { <nl> virtual ~ PairwiseRankObj ( void ) { } <nl> virtual void GetLambdaWeight ( const std : : vector < ListEntry > & sorted_list , std : : vector < LambdaPair > & pairs ) { } <nl> } ; <nl> + <nl> + class LambdaRankObj_NDCG : public LambdaRankObj { <nl> + <nl> + public : <nl> + virtual ~ LambdaRankObj_NDCG ( void ) { } <nl> + <nl> + inline float DCG ( const std : : vector < float > & labels ) { <nl> + return 1 . 0 ; <nl> + } <nl> + <nl> + inline float GetIDCG ( const std : : vector < ListEntry > & sorted_list ) { <nl> + std : : vector < float > labels ; <nl> + for ( size_t i = 0 ; i < sorted_list . size ( ) ; i + + ) { <nl> + labels . push_back ( sorted_list [ i ] . label ) ; <nl> + } <nl> + <nl> + std : : sort ( labels . begin ( ) , labels . end ( ) , std : : greater < float > ( ) ) ; <nl> + return DCG ( labels ) ; <nl> + } <nl> + <nl> + / * <nl> + * \ brief Obtain the delta NDCG if trying to switch the positions of instances in index1 or index2 <nl> + * in sorted triples . Here DCG is calculated as sigma_i 2 ^ rel_i / log ( i + 1 ) <nl> + * \ param sorted_list the list containing entry information <nl> + * \ param index1 , index2 the instances switched <nl> + * \ param the IDCG of the list <nl> + * / <nl> + inline float GetLambdaNDCG ( const std : : vector < ListEntry > & sorted_list , <nl> + int index1 , <nl> + int index2 , float IDCG ) { <nl> + double original = ( 1 < < static_cast < int > ( sorted_list [ index1 ] . label ) ) / log ( index1 + 2 ) <nl> + + ( 1 < < static_cast < int > ( sorted_list [ index2 ] . label ) ) / log ( index2 + 2 ) ; <nl> + double changed = ( 1 < < static_cast < int > ( sorted_list [ index2 ] . label ) ) / log ( index1 + 2 ) <nl> + + ( 1 < < static_cast < int > ( sorted_list [ index1 ] . label ) ) / log ( index2 + 2 ) ; <nl> + double ans = ( original - changed ) / IDCG ; <nl> + if ( ans < 0 ) ans = - ans ; <nl> + return static_cast < float > ( ans ) ; <nl> + } <nl> + <nl> + virtual void GetLambdaWeight ( const std : : vector < ListEntry > & sorted_list , std : : vector < LambdaPair > & pairs ) { <nl> + float IDCG = GetIDCG ( sorted_list ) ; <nl> + for ( size_t i = 0 ; i < pairs . size ( ) ; i + + ) { <nl> + pairs [ i ] . weight = GetLambdaNDCG ( sorted_list , <nl> + pairs [ i ] . pos_index , pairs [ i ] . neg_index , IDCG ) ; <nl> + } <nl> + } <nl> + <nl> + } ; <nl> + <nl> + class LambdaRankObj_MAP : public LambdaRankObj { <nl> + <nl> + class Quadruple { <nl> + public : <nl> + / * \ brief the accumulated precision * / <nl> + float ap_acc_ ; <nl> + / * \ brief the accumulated precision assuming a positive instance is missing * / <nl> + float ap_acc_miss_ ; <nl> + / * \ brief the accumulated precision assuming that one more positive instance is inserted ahead * / <nl> + float ap_acc_add_ ; <nl> + / * \ brief the accumulated positive instance count * / <nl> + float hits_ ; <nl> + <nl> + Quadruple ( ) { } <nl> + <nl> + Quadruple ( const Quadruple & q ) { <nl> + ap_acc_ = q . ap_acc_ ; <nl> + ap_acc_miss_ = q . ap_acc_miss_ ; <nl> + ap_acc_add_ = q . ap_acc_add_ ; <nl> + hits_ = q . hits_ ; <nl> + } <nl> + <nl> + Quadruple ( float ap_acc , float ap_acc_miss , float ap_acc_add , float hits <nl> + ) : ap_acc_ ( ap_acc ) , ap_acc_miss_ ( ap_acc_miss ) , ap_acc_add_ ( ap_acc_add ) , hits_ ( hits ) { <nl> + <nl> + } <nl> + <nl> + } ; <nl> + <nl> + public : <nl> + virtual ~ LambdaRankObj_MAP ( void ) { } <nl> + <nl> + / * <nl> + * \ brief Obtain the delta MAP if trying to switch the positions of instances in index1 or index2 <nl> + * in sorted triples <nl> + * \ param sorted_list the list containing entry information <nl> + * \ param index1 , index2 the instances switched <nl> + * \ param map_acc a vector containing the accumulated precisions for each position in a list <nl> + * / <nl> + inline float GetLambdaMAP ( const std : : vector < ListEntry > & sorted_list , <nl> + int index1 , int index2 , <nl> + std : : vector < Quadruple > & map_acc ) { <nl> + if ( index1 = = index2 | | sorted_list [ index1 ] . label = = sorted_list [ index2 ] . label ) return 0 . 0 ; <nl> + if ( index1 > index2 ) std : : swap ( index1 , index2 ) ; <nl> + float original = map_acc [ index2 ] . ap_acc_ ; / / The accumulated precision in the interval [ index1 , index2 ] <nl> + if ( index1 ! = 0 ) original - = map_acc [ index1 - 1 ] . ap_acc_ ; <nl> + float changed = 0 ; <nl> + if ( sorted_list [ index1 ] . label < sorted_list [ index2 ] . label ) { <nl> + changed + = map_acc [ index2 - 1 ] . ap_acc_add_ - map_acc [ index1 ] . ap_acc_add_ ; <nl> + changed + = ( map_acc [ index1 ] . hits_ + 1 . 0f ) / ( index1 + 1 ) ; <nl> + } <nl> + else { <nl> + changed + = map_acc [ index2 - 1 ] . ap_acc_miss_ - map_acc [ index1 ] . ap_acc_miss_ ; <nl> + changed + = map_acc [ index2 ] . hits_ / ( index2 + 1 ) ; <nl> + } <nl> + float ans = ( changed - original ) / ( map_acc [ map_acc . size ( ) - 1 ] . hits_ ) ; <nl> + if ( ans < 0 ) ans = - ans ; <nl> + return ans ; <nl> + } <nl> + <nl> + / * <nl> + * \ brief preprocessing results for calculating delta MAP <nl> + * \ return The first field is the accumulated precision , the second field is the <nl> + * accumulated precision assuming a positive instance is missing , <nl> + * the third field is the accumulated precision assuming that one more positive <nl> + * instance is inserted , the fourth field is the accumulated positive instance count <nl> + * / <nl> + inline void GetMAPAcc ( const std : : vector < ListEntry > & sorted_list , <nl> + std : : vector < Quadruple > & map_acc ) { <nl> + map_acc . resize ( sorted_list . size ( ) ) ; <nl> + float hit = 0 , acc1 = 0 , acc2 = 0 , acc3 = 0 ; <nl> + for ( size_t i = 1 ; i < = sorted_list . size ( ) ; i + + ) { <nl> + if ( ( int ) sorted_list [ i - 1 ] . label = = 1 ) { <nl> + hit + + ; <nl> + acc1 + = hit / i ; <nl> + acc2 + = ( hit - 1 ) / i ; <nl> + acc3 + = ( hit + 1 ) / i ; <nl> + } <nl> + map_acc [ i - 1 ] = Quadruple ( acc1 , acc2 , acc3 , hit ) ; <nl> + } <nl> + } <nl> + virtual void GetLambdaWeight ( const std : : vector < ListEntry > & sorted_list , std : : vector < LambdaPair > & pairs ) { <nl> + std : : vector < Quadruple > map_acc ; <nl> + GetMAPAcc ( sorted_list , map_acc ) ; <nl> + for ( size_t i = 0 ; i < pairs . size ( ) ; i + + ) { <nl> + pairs [ i ] . weight = GetLambdaMAP ( sorted_list , pairs [ i ] . pos_index , pairs [ i ] . neg_index , map_acc ) ; <nl> + } <nl> + } <nl> + <nl> + } ; <nl> + <nl> } ; <nl> } ; <nl> # endif <nl>
Impement new Lambda rank interface
dmlc/xgboost
07e98254f5d2c8e2441af8306647f457623049fa
2014-05-16T12:42:46Z
mmm a / scripts / buildsystems / vcpkg . cmake <nl> ppp b / scripts / buildsystems / vcpkg . cmake <nl> if ( VCPKG_MANIFEST_MODE AND VCPKG_MANIFEST_INSTALL AND NOT _CMAKE_IN_TRY_COMPILE <nl> list ( APPEND _VCPKG_ADDITIONAL_MANIFEST_PARAMS " - - x - no - default - features " ) <nl> endif ( ) <nl> <nl> - file ( TO_NATIVE_PATH " $ { CMAKE_BINARY_DIR } / vcpkg - manifest - install . log " _VCPKG_MANIFEST_INSTALL_LOG ) <nl> + if ( CMAKE_VERSION VERSION_GREATER_EQUAL " 3 . 18 " ) <nl> + set ( _VCPKG_MANIFEST_INSTALL_ECHO_PARAMS ECHO_OUTPUT_VARIABLE ECHO_ERROR_VARIABLE ) <nl> + else ( ) <nl> + set ( _VCPKG_MANIFEST_INSTALL_ECHO_PARAMS ) <nl> + endif ( ) <nl> + <nl> execute_process ( <nl> COMMAND " $ { _VCPKG_EXECUTABLE } " install <nl> - - triplet " $ { VCPKG_TARGET_TRIPLET } " <nl> if ( VCPKG_MANIFEST_MODE AND VCPKG_MANIFEST_INSTALL AND NOT _CMAKE_IN_TRY_COMPILE <nl> " - - x - install - root = $ { _VCPKG_INSTALLED_DIR } " <nl> $ { _VCPKG_ADDITIONAL_MANIFEST_PARAMS } <nl> $ { VCPKG_INSTALL_OPTIONS } <nl> - OUTPUT_FILE " $ { _VCPKG_MANIFEST_INSTALL_LOG } " <nl> - ERROR_FILE " $ { _VCPKG_MANIFEST_INSTALL_LOG } " <nl> + OUTPUT_VARIABLE _VCPKG_MANIFEST_INSTALL_LOGTEXT <nl> + ERROR_VARIABLE _VCPKG_MANIFEST_INSTALL_LOGTEXT <nl> RESULT_VARIABLE _VCPKG_INSTALL_RESULT <nl> + $ { _VCPKG_MANIFEST_INSTALL_ECHO_PARAMS } <nl> ) <nl> <nl> + file ( TO_NATIVE_PATH " $ { CMAKE_BINARY_DIR } / vcpkg - manifest - install . log " _VCPKG_MANIFEST_INSTALL_LOGFILE ) <nl> + file ( WRITE " $ { _VCPKG_MANIFEST_INSTALL_LOGFILE } " " $ { _VCPKG_MANIFEST_INSTALL_LOGTEXT } " ) <nl> + <nl> if ( _VCPKG_INSTALL_RESULT EQUAL 0 ) <nl> message ( STATUS " Running vcpkg install - done " ) <nl> <nl> if ( VCPKG_MANIFEST_MODE AND VCPKG_MANIFEST_INSTALL AND NOT _CMAKE_IN_TRY_COMPILE <nl> " $ { _VCPKG_INSTALLED_DIR } / vcpkg / status " ) <nl> else ( ) <nl> message ( STATUS " Running vcpkg install - failed " ) <nl> - _vcpkg_add_fatal_error ( " vcpkg install failed . See logs for more information : $ { _VCPKG_MANIFEST_INSTALL_LOG } " ) <nl> + _vcpkg_add_fatal_error ( " vcpkg install failed . See logs for more information : $ { _VCPKG_MANIFEST_INSTALL_LOGFILE } " ) <nl> endif ( ) <nl> endif ( ) <nl> endif ( ) <nl>
Write manifest " vcpkg install " output to stdout ( )
microsoft/vcpkg
730187bfd9c314c29148495d2c2527797fad5d43
2020-12-21T20:13:32Z
mmm a / hphp / runtime / ext / ext_posix . cpp <nl> ppp b / hphp / runtime / ext / ext_posix . cpp <nl> static Variant php_posix_group_to_array ( int gid , <nl> return false ; <nl> } <nl> <nl> - Array members ; <nl> + Array members = Array : : Create ( ) ; <nl> for ( int count = 0 ; gr . gr_mem [ count ] ! = NULL ; count + + ) { <nl> members . append ( String ( gr . gr_mem [ count ] , CopyString ) ) ; <nl> } <nl> similarity index 100 % <nl> rename from hphp / test / zend / bad / ext / posix / tests / posix_getgrgid_basic . php <nl> rename to hphp / test / zend / good / ext / posix / tests / posix_getgrgid_basic . php <nl> similarity index 100 % <nl> rename from hphp / test / zend / bad / ext / posix / tests / posix_getgrgid_basic . php . expectf <nl> rename to hphp / test / zend / good / ext / posix / tests / posix_getgrgid_basic . php . expectf <nl> similarity index 100 % <nl> rename from hphp / test / zend / bad / ext / posix / tests / posix_getgrgid_basic . php . skipif <nl> rename to hphp / test / zend / good / ext / posix / tests / posix_getgrgid_basic . php . skipif <nl>
posix_getgrgid ( ) [ ' members ' ] should be empty array if no users found
facebook/hhvm
5910660388b04846da48067b3df159ec64ad27fa
2014-09-09T05:00:17Z
mmm a / tensorflow / python / keras / BUILD <nl> ppp b / tensorflow / python / keras / BUILD <nl> cuda_py_test ( <nl> " / / third_party / py / numpy " , <nl> " / / tensorflow / python : client_testlib " , <nl> ] , <nl> - shard_count = 4 , <nl> + shard_count = 6 , <nl> ) <nl> <nl> py_test ( <nl> mmm a / tensorflow / python / keras / layers / unified_gru_test . py <nl> ppp b / tensorflow / python / keras / layers / unified_gru_test . py <nl> def test_unified_gru_feature_parity_with_canonical_gru ( self ) : <nl> cudnn_model . fit ( x_train , y_train ) <nl> y_4 = cudnn_model . predict ( x_train ) <nl> <nl> - self . assertAllClose ( y_1 , y_3 ) <nl> - self . assertAllClose ( y_2 , y_4 ) <nl> + self . assertAllClose ( y_1 , y_3 , rtol = 1e - 5 , atol = 1e - 5 ) <nl> + self . assertAllClose ( y_2 , y_4 , rtol = 1e - 5 , atol = 1e - 5 ) <nl> <nl> @ parameterized . named_parameters ( <nl> # test_name , use_bias , bias_initializer , activation <nl> mmm a / tensorflow / python / keras / layers / unified_lstm_test . py <nl> ppp b / tensorflow / python / keras / layers / unified_lstm_test . py <nl> def test_unified_lstm_feature_parity_with_canonical_lstm ( self ) : <nl> cudnn_model . fit ( x_train , y_train ) <nl> y_4 = cudnn_model . predict ( x_train ) <nl> <nl> - self . assertAllClose ( y_1 , y_3 ) <nl> - self . assertAllClose ( y_2 , y_4 ) <nl> + self . assertAllClose ( y_1 , y_3 , rtol = 1e - 5 , atol = 1e - 5 ) <nl> + self . assertAllClose ( y_2 , y_4 , rtol = 1e - 5 , atol = 1e - 5 ) <nl> <nl> @ parameterized . named_parameters ( ( ' v0 ' , 0 ) , ( ' v1 ' , 1 ) , ( ' v2 ' , 2 ) ) <nl> def test_implementation_mode_LSTM ( self , implementation_mode ) : <nl>
Fix flakiness in the test case .
tensorflow/tensorflow
d969cd8ff9a337503963c8f4a02f56b7d776171e
2018-12-20T20:04:08Z
mmm a / src / video_core / shader / decode / memory . cpp <nl> ppp b / src / video_core / shader / decode / memory . cpp <nl> u32 ShaderIR : : DecodeMemory ( BasicBlock & bb , u32 pc ) { <nl> } <nl> break ; <nl> } <nl> + case OpCode : : Id : : LD_L : { <nl> + UNIMPLEMENTED_IF_MSG ( instr . ld_l . unknown = = 1 , " LD_L Unhandled mode : { } " , <nl> + static_cast < unsigned > ( instr . ld_l . unknown . Value ( ) ) ) ; <nl> + <nl> + const Node index = Operation ( OperationCode : : IAdd , GetRegister ( instr . gpr8 ) , <nl> + Immediate ( static_cast < s32 > ( instr . smem_imm ) ) ) ; <nl> + const Node lmem = GetLocalMemory ( index ) ; <nl> + <nl> + switch ( instr . ldst_sl . type . Value ( ) ) { <nl> + case Tegra : : Shader : : StoreType : : Bytes32 : <nl> + SetRegister ( bb , instr . gpr0 , lmem ) ; <nl> + break ; <nl> + default : <nl> + UNIMPLEMENTED_MSG ( " LD_L Unhandled type : { } " , <nl> + static_cast < unsigned > ( instr . ldst_sl . type . Value ( ) ) ) ; <nl> + } <nl> + break ; <nl> + } <nl> case OpCode : : Id : : ST_A : { <nl> UNIMPLEMENTED_IF_MSG ( instr . gpr8 . Value ( ) ! = Register : : ZeroIndex , <nl> " Indirect attribute loads are not supported " ) ; <nl>
shader_decode : Implement LD_L
yuzu-emu/yuzu
8d42feb09b25825dad786cf311c9e7721c0f6c7c
2019-01-15T20:54:52Z
mmm a / headers / common . h <nl> ppp b / headers / common . h <nl> inline float32x8 min < float32 , 8 > ( float32x8 a , float32x8 b ) { <nl> return _mm256_min_ps ( a , b ) ; <nl> } <nl> <nl> + template < > <nl> + inline float32x4 min < float32 , 4 > ( float32x4 a , float32x4 b ) { <nl> + return _mm_min_ps ( a , b ) ; <nl> + } <nl> + <nl> + template < > <nl> + inline float32x1 min < float32 , 1 > ( float32x1 a , float32x1 b ) { <nl> + return std : : min ( a , b ) ; <nl> + } <nl> + <nl> template < typename T , int dim > <nl> inline vec < T , dim > max ( vec < T , dim > , vec < T , dim > ) ; <nl> <nl> mmm a / lang / ir . h <nl> ppp b / lang / ir . h <nl> DEFINE_EXPRESSION_OP ( > , cmp_gt ) <nl> DEFINE_EXPRESSION_OP ( > = , cmp_ge ) <nl> DEFINE_EXPRESSION_OP ( = = , cmp_eq ) <nl> <nl> + # define DEFINE_EXPRESSION_FUNC ( op_name ) \ <nl> + inline ExpressionHandle op_name ( const ExpressionHandle & lhs , \ <nl> + const ExpressionHandle & rhs ) { \ <nl> + return ExpressionHandle ( \ <nl> + std : : make_shared < BinaryOpExpression > ( BinaryType : : op_name , lhs , rhs ) ) ; \ <nl> + } <nl> + <nl> + DEFINE_EXPRESSION_FUNC ( min ) ; <nl> + DEFINE_EXPRESSION_FUNC ( max ) ; <nl> + <nl> template < typename T > <nl> inline ExprH cast ( ExprH input ) { <nl> auto ret = std : : make_shared < UnaryOpExpression > ( UnaryType : : cast , input ) ; <nl> mmm a / lang / unit_tests . cpp <nl> ppp b / lang / unit_tests . cpp <nl> TC_TEST ( " rand " ) { <nl> auto func = kernel ( [ & ] ( ) { <nl> declare ( i ) ; <nl> <nl> - For ( i , 0 , n , [ & ] { <nl> - Print ( Rand < float > ( ) ) ; <nl> - } ) ; <nl> + For ( i , 0 , n , [ & ] { Print ( Rand < float > ( ) ) ; } ) ; <nl> } ) ; <nl> <nl> func ( ) ; <nl> auto ray_march = [ & ] { <nl> root . fixed ( 0 , n * n ) . place ( color_r ) . place ( color_g ) . place ( color_b ) ; <nl> } ) ; <nl> <nl> - auto sdf = [ & ] ( Vector p ) { return p . norm ( ) - 1 . 0_f ; } ; <nl> + auto sdf = [ & ] ( Vector p ) { return min ( p . norm ( ) - 1 . 0_f , p ( 1 ) + 2 . 0f ) ; } ; <nl> <nl> float32 eps = 1e - 4f ; <nl> float32 dist_limit = 1e3 ; <nl>
min / max
taichi-dev/taichi
1c89466a5e3d1400dd1696b1c5210fb5f3114a86
2019-02-24T19:51:10Z
mmm a / fdbserver / masterserver . actor . cpp <nl> ppp b / fdbserver / masterserver . actor . cpp <nl> ACTOR Future < Void > masterServer ( MasterInterface mi , Reference < AsyncVar < ServerDB <nl> loop choose { <nl> when ( wait ( core ) ) { break ; } <nl> when ( wait ( onDBChange ) ) { <nl> - onDBChange = db - > onChange ( ) | | ccInterface - > onChange ( ) ; <nl> - if ( ccInterface - > get ( ) . present ( ) & & db - > get ( ) . clusterInterface = = ccInterface - > get ( ) . get ( ) & & ! lifetime . isStillValid ( db - > get ( ) . masterLifetime , mi . id ( ) = = db - > get ( ) . master . id ( ) ) ) { <nl> + onDBChange = db - > onChange ( ) ; <nl> + if ( ! lifetime . isStillValid ( db - > get ( ) . masterLifetime , mi . id ( ) = = db - > get ( ) . master . id ( ) ) ) { <nl> TraceEvent ( " MasterTerminated " , mi . id ( ) ) . detail ( " Reason " , " LifetimeToken " ) . detail ( " MyToken " , lifetime . toString ( ) ) . detail ( " CurrentToken " , db - > get ( ) . masterLifetime . toString ( ) ) ; <nl> TEST ( true ) ; / / Master replaced , dying <nl> if ( BUGGIFY ) wait ( delay ( 5 ) ) ; <nl>
use the old logic for lifetime since we already have verified the cluster controller is correct
apple/foundationdb
32c0169fc88ec997c97cbc7535a6149185cb22e6
2020-07-20T17:26:47Z
mmm a / src / google / protobuf / io / gzip_stream . h <nl> ppp b / src / google / protobuf / io / gzip_stream . h <nl> class LIBPROTOBUF_EXPORT GzipOutputStream : public ZeroCopyOutputStream { <nl> ZLIB = 2 , <nl> } ; <nl> <nl> - struct Options { <nl> + struct LIBPROTOBUF_EXPORT Options { <nl> / / Defaults to GZIP . <nl> Format format ; <nl> <nl>
Add missing export macro
protocolbuffers/protobuf
1e36e1006af3d76e7460263b9345108a0bc2aaf9
2012-12-05T06:26:08Z
mmm a / docs / ABI . rst <nl> ppp b / docs / ABI . rst <nl> Class Metadata <nl> Tuple Metadata <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> <nl> + In addition to the ` common metadata layout ` _ fields , tuple metadata records <nl> + contain the following fields : <nl> + <nl> + - The * * number of elements * * in the tuple is a pointer - sized integer at <nl> + * * offset 1 * * . <nl> + - The * * labels string * * is a pointer to a list of consecutive null - terminated <nl> + label names for the tuple at * * offset 2 * * . Each label name is given as a <nl> + null - terminated , UTF - 8 - encoded string in sequence . <nl> + - The * * element vector * * begins at * * offset 3 * * and consists of a vector of <nl> + type – offset pairs . The metadata for the * n * \ th element ' s type is a pointer <nl> + at * * offset 3 + 2 * n * * . The offset in bytes from the beginning of the tuple to <nl> + the beginning of the * n * \ th element is at * * offset 3 + 2 * n + 1 * * . <nl> + <nl> Function Metadata <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> <nl> + In addition to the ` common metadata layout ` _ fields , function metadata records <nl> + contain the following fields : <nl> + <nl> + - A reference to the * * argument type * * metadata record is stored at <nl> + * * offset 1 * * . If the function takes multiple arguments , this references a <nl> + ` tuple metadata ` _ record . <nl> + - A reference to the * * result type * * metadata record is stored at <nl> + * * offset 2 * * . If the function has multiple returns , this references a <nl> + ` tuple metadata ` _ record . <nl> + <nl> Protocol Metadata <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> <nl> + Protocol metadata records currently have no special fields beyond the <nl> + ` common metadata layout ` _ fields . <nl> + <nl> Metatype Metadata <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> <nl> + In addition to the ` common metadata layout ` _ fields , metatype metadata records <nl> + contain the following fields : <nl> + <nl> + - A reference to the metadata record for the * * instance type * * that the metatype <nl> + represents is stored at * * offset 1 * * . <nl> + <nl> Generic Parameter Vector <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> <nl>
ABI : Document the tuple , function , protocol , and metatype metadata layouts .
apple/swift
b92fa26a56a950d9e999fd85b2127a72772071ac
2013-10-10T21:10:54Z
mmm a / src / dialogs / filesel . cpp <nl> ppp b / src / dialogs / filesel . cpp <nl> jstring ase_file_selector ( const jstring & message , <nl> JWidget goforward = jwidget_find_name ( window , " goforward " ) ; <nl> JWidget goup = jwidget_find_name ( window , " goup " ) ; <nl> JWidget location = jwidget_find_name ( window , " location " ) ; <nl> - filetype = ( ComboBox * ) jwidget_find_name ( window , " filetype " ) ; <nl> + filetype = dynamic_cast < ComboBox * > ( jwidget_find_name ( window , " filetype " ) ) ; <nl> + assert ( filetype ! = NULL ) ; <nl> filename_entry = jwidget_find_name ( window , " filename " ) ; <nl> <nl> jwidget_focusrest ( goback , false ) ; <nl> jstring ase_file_selector ( const jstring & message , <nl> } <nl> else { <nl> fileview = jwidget_find_name ( window , " fileview " ) ; <nl> - filetype = ( ComboBox * ) jwidget_find_name ( window , " filetype " ) ; <nl> + filetype = dynamic_cast < ComboBox * > ( jwidget_find_name ( window , " filetype " ) ) ; <nl> + assert ( filetype ! = NULL ) ; <nl> filename_entry = jwidget_find_name ( window , " filename " ) ; <nl> <nl> jwidget_signal_off ( fileview ) ; <nl> jstring ase_file_selector ( const jstring & message , <nl> static void update_location ( JWidget window ) <nl> { <nl> JWidget fileview = jwidget_find_name ( window , " fileview " ) ; <nl> - ComboBox * location = ( ComboBox * ) jwidget_find_name ( window , " location " ) ; <nl> + ComboBox * location = dynamic_cast < ComboBox * > ( jwidget_find_name ( window , " location " ) ) ; <nl> + assert ( location ! = NULL ) ; <nl> + <nl> FileItem * current_folder = fileview_get_current_folder ( fileview ) ; <nl> FileItem * fileitem = current_folder ; <nl> JList locations = jlist_new ( ) ; <nl> static void add_in_navigation_history ( FileItem * folder ) <nl> static void select_filetype_from_filename ( JWidget window ) <nl> { <nl> JWidget entry = jwidget_find_name ( window , " filename " ) ; <nl> - ComboBox * filetype = ( ComboBox * ) jwidget_find_name ( window , " filetype " ) ; <nl> + ComboBox * filetype = dynamic_cast < ComboBox * > ( jwidget_find_name ( window , " filetype " ) ) ; <nl> + assert ( filetype ! = NULL ) ; <nl> + <nl> const char * filename = entry - > getText ( ) ; <nl> char * p = get_extension ( filename ) ; <nl> char buf [ MAX_PATH ] ; <nl> static bool fileview_msg_proc ( JWidget widget , JMessage msg ) <nl> return false ; <nl> } <nl> <nl> - / * hook for the ' location ' combo - box * / <nl> + / / Hook for the ' location ' combo - box <nl> static bool location_msg_proc ( JWidget widget , JMessage msg ) <nl> { <nl> if ( msg - > type = = JM_SIGNAL ) { <nl> mmm a / src / jinete / jbase . h <nl> ppp b / src / jinete / jbase . h <nl> enum { <nl> JI_SIGNAL_SHOW , <nl> JI_SIGNAL_HIDE , <nl> JI_SIGNAL_ADD_CHILD , <nl> - JI_SIGNAL_REMOVE_CHILD , <nl> - JI_SIGNAL_NEW_PARENT , <nl> JI_SIGNAL_SET_TEXT , <nl> JI_SIGNAL_SET_FONT , <nl> JI_SIGNAL_INIT_THEME , <nl> mmm a / src / jinete / jwidget . cpp <nl> ppp b / src / jinete / jwidget . cpp <nl> void jwidget_add_child ( JWidget widget , JWidget child ) <nl> jlist_append ( widget - > children , child ) ; <nl> child - > parent = widget ; <nl> <nl> - jwidget_emit_signal ( child , JI_SIGNAL_NEW_PARENT ) ; <nl> jwidget_emit_signal ( widget , JI_SIGNAL_ADD_CHILD ) ; <nl> } <nl> <nl> void jwidget_remove_child ( JWidget widget , JWidget child ) <nl> <nl> jlist_remove ( widget - > children , child ) ; <nl> child - > parent = NULL ; <nl> - <nl> - jwidget_emit_signal ( child , JI_SIGNAL_NEW_PARENT ) ; <nl> - jwidget_emit_signal ( widget , JI_SIGNAL_REMOVE_CHILD ) ; <nl> } <nl> <nl> void jwidget_replace_child ( JWidget widget , JWidget old_child , JWidget new_child ) <nl> void jwidget_replace_child ( JWidget widget , JWidget old_child , JWidget new_child ) <nl> jlist_insert_before ( widget - > children , before , new_child ) ; <nl> new_child - > parent = widget ; <nl> <nl> - jwidget_emit_signal ( new_child , JI_SIGNAL_NEW_PARENT ) ; <nl> jwidget_emit_signal ( widget , JI_SIGNAL_ADD_CHILD ) ; <nl> } <nl> <nl>
Removed JI_SIGNAL_REMOVE_CHILD and JI_SIGNAL_NEW_PARENT .
aseprite/aseprite
e0ff9d5ee8960a574b8093804eff25465b6b3c13
2010-07-19T21:57:16Z
mmm a / src / mongo / s / collection_metadata . cpp <nl> ppp b / src / mongo / s / collection_metadata . cpp <nl> namespace mongo { <nl> return metadata . release ( ) ; <nl> } <nl> <nl> + CollectionMetadata * CollectionMetadata : : cloneMerge ( const BSONObj & minKey , <nl> + const BSONObj & maxKey , <nl> + const ChunkVersion & newShardVersion , <nl> + string * errMsg ) const { <nl> + <nl> + if ( newShardVersion < = _shardVersion ) { <nl> + <nl> + * errMsg = stream ( ) < < " cannot merge range " < < rangeToString ( minKey , maxKey ) <nl> + < < " , new shard version " < < newShardVersion . toString ( ) <nl> + < < " is not greater than current version " <nl> + < < _shardVersion . toString ( ) ; <nl> + <nl> + warning ( ) < < * errMsg < < endl ; <nl> + return NULL ; <nl> + } <nl> + <nl> + RangeVector overlap ; <nl> + getRangeMapOverlap ( _chunksMap , minKey , maxKey , & overlap ) ; <nl> + <nl> + if ( overlap . empty ( ) | | overlap . size ( ) = = 1 ) { <nl> + <nl> + * errMsg = stream ( ) < < " cannot merge range " < < rangeToString ( minKey , maxKey ) <nl> + < < ( overlap . empty ( ) ? " , no chunks found in this range " : <nl> + " , only one chunk found in this range " ) ; <nl> + <nl> + warning ( ) < < * errMsg < < endl ; <nl> + return NULL ; <nl> + } <nl> + <nl> + bool validStartEnd = true ; <nl> + bool validNoHoles = true ; <nl> + if ( overlap . begin ( ) - > first . woCompare ( minKey ) ! = 0 ) { <nl> + / / First chunk doesn ' t start with minKey <nl> + validStartEnd = false ; <nl> + } <nl> + else if ( overlap . rbegin ( ) - > second . woCompare ( maxKey ) ! = 0 ) { <nl> + / / Last chunk doesn ' t end with maxKey <nl> + validStartEnd = false ; <nl> + } <nl> + else { <nl> + / / Check that there are no holes <nl> + BSONObj prevMaxKey = minKey ; <nl> + for ( RangeVector : : iterator it = overlap . begin ( ) ; it ! = overlap . end ( ) ; + + it ) { <nl> + if ( it - > first . woCompare ( prevMaxKey ) ! = 0 ) { <nl> + validNoHoles = false ; <nl> + break ; <nl> + } <nl> + prevMaxKey = it - > second ; <nl> + } <nl> + } <nl> + <nl> + if ( ! validStartEnd | | ! validNoHoles ) { <nl> + <nl> + * errMsg = stream ( ) < < " cannot merge range " < < rangeToString ( minKey , maxKey ) <nl> + < < " , overlapping chunks " < < overlapToString ( overlap ) <nl> + < < ( ! validStartEnd ? " do not have the same min and max key " : <nl> + " are not all adjacent " ) ; <nl> + <nl> + warning ( ) < < * errMsg < < endl ; <nl> + return NULL ; <nl> + } <nl> + <nl> + auto_ptr < CollectionMetadata > metadata ( new CollectionMetadata ) ; <nl> + metadata - > _keyPattern = this - > _keyPattern ; <nl> + metadata - > _keyPattern . getOwned ( ) ; <nl> + metadata - > _pendingMap = this - > _pendingMap ; <nl> + metadata - > _chunksMap = this - > _chunksMap ; <nl> + metadata - > _rangesMap = this - > _rangesMap ; <nl> + metadata - > _shardVersion = newShardVersion ; <nl> + metadata - > _collVersion = <nl> + newShardVersion > _collVersion ? newShardVersion : this - > _collVersion ; <nl> + <nl> + for ( RangeVector : : iterator it = overlap . begin ( ) ; it ! = overlap . end ( ) ; + + it ) { <nl> + metadata - > _chunksMap . erase ( it - > first ) ; <nl> + } <nl> + <nl> + metadata - > _chunksMap . insert ( make_pair ( minKey , maxKey ) ) ; <nl> + <nl> + dassert ( metadata - > isValid ( ) ) ; <nl> + return metadata . release ( ) ; <nl> + } <nl> + <nl> bool CollectionMetadata : : keyBelongsToMe ( const BSONObj & key ) const { <nl> / / For now , collections don ' t move . So if the collection is not sharded , assume <nl> / / the document with the given key can be accessed . <nl> mmm a / src / mongo / s / collection_metadata . h <nl> ppp b / src / mongo / s / collection_metadata . h <nl> namespace mongo { <nl> const ChunkVersion & newShardVersion , <nl> string * errMsg ) const ; <nl> <nl> + / * * <nl> + * Returns a new metadata instance by merging a key range which starts and ends at existing <nl> + * chunks into a single chunk . The range may not have holes . The resulting metadata will <nl> + * have the ' newShardVersion ' . The caller owns the new metadata . <nl> + * <nl> + * If a new metadata can ' t be created , returns NULL and fills in ' errMsg ' , if it was <nl> + * provided . <nl> + * / <nl> + CollectionMetadata * cloneMerge ( const BSONObj & minKey , <nl> + const BSONObj & maxKey , <nl> + const ChunkVersion & newShardVersion , <nl> + string * errMsg ) const ; <nl> + <nl> / / <nl> / / verification logic <nl> / / <nl> mmm a / src / mongo / s / collection_metadata_test . cpp <nl> ppp b / src / mongo / s / collection_metadata_test . cpp <nl> namespace { <nl> ASSERT ( ! cloned - > keyIsPending ( BSON ( " a " < < 25 ) ) ) ; <nl> } <nl> <nl> + TEST_F ( NoChunkFixture , MergeChunkEmpty ) { <nl> + <nl> + string errMsg ; <nl> + scoped_ptr < CollectionMetadata > cloned ; <nl> + <nl> + cloned . reset ( getCollMetadata ( ) . cloneMerge ( BSON ( " a " < < 15 ) , <nl> + BSON ( " a " < < 25 ) , <nl> + ChunkVersion ( 1 , 0 , OID : : gen ( ) ) , <nl> + & errMsg ) ) ; <nl> + <nl> + ASSERT_NOT_EQUALS ( errMsg , " " ) ; <nl> + ASSERT ( cloned = = NULL ) ; <nl> + } <nl> + <nl> / * * <nl> * Fixture with single chunk containing : <nl> * [ 10 - > 20 ) <nl> namespace { <nl> ASSERT ( ! cloned - > keyIsPending ( BSON ( " a " < < 35 ) ) ) ; <nl> } <nl> <nl> + TEST_F ( SingleChunkFixture , MergeChunkSingle ) { <nl> + <nl> + string errMsg ; <nl> + scoped_ptr < CollectionMetadata > cloned ; <nl> + <nl> + cloned . reset ( getCollMetadata ( ) . cloneMerge ( BSON ( " a " < < 10 ) , <nl> + BSON ( " a " < < 20 ) , <nl> + ChunkVersion ( 2 , 0 , OID : : gen ( ) ) , <nl> + & errMsg ) ) ; <nl> + <nl> + ASSERT_NOT_EQUALS ( errMsg , " " ) ; <nl> + ASSERT ( cloned = = NULL ) ; <nl> + } <nl> + <nl> / * * <nl> * Fixture with single chunk containing : <nl> * [ ( min , min ) - > ( max , max ) ) <nl> namespace { <nl> ChunkType nextChunk ; <nl> ASSERT ( getCollMetadata ( ) . getNextChunk ( BSON ( " a " < < 30 ) , & nextChunk ) ) ; <nl> } <nl> + <nl> + TEST_F ( ThreeChunkWithRangeGapFixture , MergeChunkHoleInRange ) { <nl> + <nl> + string errMsg ; <nl> + scoped_ptr < CollectionMetadata > cloned ; <nl> + <nl> + / / Try to merge with hole in range <nl> + ChunkVersion newShardVersion ( 5 , 0 , getCollMetadata ( ) . getShardVersion ( ) . epoch ( ) ) ; <nl> + cloned . reset ( getCollMetadata ( ) . cloneMerge ( BSON ( " a " < < 10 ) , <nl> + BSON ( " a " < < MAXKEY ) , <nl> + newShardVersion , <nl> + & errMsg ) ) ; <nl> + <nl> + ASSERT_NOT_EQUALS ( errMsg , " " ) ; <nl> + ASSERT ( cloned = = NULL ) ; <nl> + } <nl> + <nl> + TEST_F ( ThreeChunkWithRangeGapFixture , MergeChunkDiffEndKey ) { <nl> + <nl> + string errMsg ; <nl> + scoped_ptr < CollectionMetadata > cloned ; <nl> + <nl> + / / Try to merge with different end key <nl> + ChunkVersion newShardVersion ( 5 , 0 , getCollMetadata ( ) . getShardVersion ( ) . epoch ( ) ) ; <nl> + cloned . reset ( getCollMetadata ( ) . cloneMerge ( BSON ( " a " < < MINKEY ) , <nl> + BSON ( " a " < < 19 ) , <nl> + newShardVersion , <nl> + & errMsg ) ) ; <nl> + <nl> + ASSERT_NOT_EQUALS ( errMsg , " " ) ; <nl> + ASSERT ( cloned = = NULL ) ; <nl> + } <nl> + <nl> + TEST_F ( ThreeChunkWithRangeGapFixture , MergeChunkMinKey ) { <nl> + <nl> + string errMsg ; <nl> + scoped_ptr < CollectionMetadata > cloned ; <nl> + <nl> + ASSERT_EQUALS ( getCollMetadata ( ) . getNumChunks ( ) , 3u ) ; <nl> + <nl> + / / Try to merge lowest chunks together <nl> + ChunkVersion newShardVersion ( 5 , 0 , getCollMetadata ( ) . getShardVersion ( ) . epoch ( ) ) ; <nl> + cloned . reset ( getCollMetadata ( ) . cloneMerge ( BSON ( " a " < < MINKEY ) , <nl> + BSON ( " a " < < 20 ) , <nl> + newShardVersion , <nl> + & errMsg ) ) ; <nl> + <nl> + ASSERT_EQUALS ( errMsg , " " ) ; <nl> + ASSERT ( cloned ! = NULL ) ; <nl> + ASSERT ( cloned - > keyBelongsToMe ( BSON ( " a " < < 10 ) ) ) ; <nl> + ASSERT_EQUALS ( cloned - > getNumChunks ( ) , 2u ) ; <nl> + ASSERT_EQUALS ( cloned - > getShardVersion ( ) . majorVersion ( ) , 5 ) ; <nl> + } <nl> + <nl> + TEST_F ( ThreeChunkWithRangeGapFixture , MergeChunkMaxKey ) { <nl> + <nl> + string errMsg ; <nl> + scoped_ptr < CollectionMetadata > cloned ; <nl> + ChunkVersion newShardVersion ( 5 , 0 , getCollMetadata ( ) . getShardVersion ( ) . epoch ( ) ) ; <nl> + <nl> + / / Add one chunk to complete the range <nl> + ChunkType chunk ; <nl> + chunk . setMin ( BSON ( " a " < < 20 ) ) ; <nl> + chunk . setMax ( BSON ( " a " < < 30 ) ) ; <nl> + cloned . reset ( getCollMetadata ( ) . clonePlusChunk ( chunk , <nl> + newShardVersion , <nl> + & errMsg ) ) ; <nl> + ASSERT_EQUALS ( errMsg , " " ) ; <nl> + ASSERT_EQUALS ( cloned - > getNumChunks ( ) , 4u ) ; <nl> + ASSERT ( cloned ! = NULL ) ; <nl> + <nl> + / / Try to merge highest chunks together <nl> + newShardVersion . incMajor ( ) ; <nl> + cloned . reset ( cloned - > cloneMerge ( BSON ( " a " < < 20 ) , <nl> + BSON ( " a " < < MAXKEY ) , <nl> + newShardVersion , <nl> + & errMsg ) ) ; <nl> + <nl> + ASSERT_EQUALS ( errMsg , " " ) ; <nl> + ASSERT ( cloned ! = NULL ) ; <nl> + ASSERT ( cloned - > keyBelongsToMe ( BSON ( " a " < < 30 ) ) ) ; <nl> + ASSERT_EQUALS ( cloned - > getNumChunks ( ) , 3u ) ; <nl> + ASSERT_EQUALS ( cloned - > getShardVersion ( ) . majorVersion ( ) , 6 ) ; <nl> + } <nl> + <nl> + TEST_F ( ThreeChunkWithRangeGapFixture , MergeChunkFullRange ) { <nl> + <nl> + string errMsg ; <nl> + scoped_ptr < CollectionMetadata > cloned ; <nl> + ChunkVersion newShardVersion ( 5 , 0 , getCollMetadata ( ) . getShardVersion ( ) . epoch ( ) ) ; <nl> + <nl> + / / Add one chunk to complete the range <nl> + ChunkType chunk ; <nl> + chunk . setMin ( BSON ( " a " < < 20 ) ) ; <nl> + chunk . setMax ( BSON ( " a " < < 30 ) ) ; <nl> + cloned . reset ( getCollMetadata ( ) . clonePlusChunk ( chunk , <nl> + newShardVersion , <nl> + & errMsg ) ) ; <nl> + ASSERT_EQUALS ( errMsg , " " ) ; <nl> + ASSERT_EQUALS ( cloned - > getNumChunks ( ) , 4u ) ; <nl> + ASSERT ( cloned ! = NULL ) ; <nl> + <nl> + / / Try to merge all chunks together <nl> + newShardVersion . incMajor ( ) ; <nl> + cloned . reset ( cloned - > cloneMerge ( BSON ( " a " < < MINKEY ) , <nl> + BSON ( " a " < < MAXKEY ) , <nl> + newShardVersion , <nl> + & errMsg ) ) ; <nl> + <nl> + ASSERT_EQUALS ( errMsg , " " ) ; <nl> + ASSERT ( cloned ! = NULL ) ; <nl> + ASSERT ( cloned - > keyBelongsToMe ( BSON ( " a " < < 10 ) ) ) ; <nl> + ASSERT ( cloned - > keyBelongsToMe ( BSON ( " a " < < 30 ) ) ) ; <nl> + ASSERT_EQUALS ( cloned - > getNumChunks ( ) , 1u ) ; <nl> + ASSERT_EQUALS ( cloned - > getShardVersion ( ) . majorVersion ( ) , 6 ) ; <nl> + } <nl> + <nl> + TEST_F ( ThreeChunkWithRangeGapFixture , MergeChunkMiddleRange ) { <nl> + <nl> + string errMsg ; <nl> + scoped_ptr < CollectionMetadata > cloned ; <nl> + ChunkVersion newShardVersion ( 5 , 0 , getCollMetadata ( ) . getShardVersion ( ) . epoch ( ) ) ; <nl> + <nl> + / / Add one chunk to complete the range <nl> + ChunkType chunk ; <nl> + chunk . setMin ( BSON ( " a " < < 20 ) ) ; <nl> + chunk . setMax ( BSON ( " a " < < 30 ) ) ; <nl> + cloned . reset ( getCollMetadata ( ) . clonePlusChunk ( chunk , <nl> + newShardVersion , <nl> + & errMsg ) ) ; <nl> + ASSERT_EQUALS ( errMsg , " " ) ; <nl> + ASSERT_EQUALS ( cloned - > getNumChunks ( ) , 4u ) ; <nl> + ASSERT ( cloned ! = NULL ) ; <nl> + <nl> + / / Try to merge middle two chunks <nl> + newShardVersion . incMajor ( ) ; <nl> + cloned . reset ( cloned - > cloneMerge ( BSON ( " a " < < 10 ) , <nl> + BSON ( " a " < < 30 ) , <nl> + newShardVersion , <nl> + & errMsg ) ) ; <nl> + <nl> + ASSERT_EQUALS ( errMsg , " " ) ; <nl> + ASSERT ( cloned ! = NULL ) ; <nl> + ASSERT ( cloned - > keyBelongsToMe ( BSON ( " a " < < 20 ) ) ) ; <nl> + ASSERT_EQUALS ( cloned - > getNumChunks ( ) , 3u ) ; <nl> + ASSERT_EQUALS ( cloned - > getShardVersion ( ) . majorVersion ( ) , 6 ) ; <nl> + } <nl> + <nl> + <nl> + <nl> } / / unnamed namespace <nl> mmm a / src / mongo / s / d_logic . h <nl> ppp b / src / mongo / s / d_logic . h <nl> namespace mongo { <nl> void splitChunk ( const string & ns , const BSONObj & min , const BSONObj & max , const vector < BSONObj > & splitKeys , <nl> ChunkVersion version ) ; <nl> <nl> + / * * <nl> + * Creates and installs a new chunk metadata for a given collection by merging a range of <nl> + * chunks [ ' minKey ' , ' maxKey ' ) into a single chunk with version ' mergedVersion ' . <nl> + * The current metadata must overlap the range completely and minKey and maxKey must not <nl> + * divide an existing chunk . <nl> + * <nl> + * The merged chunk version must have a greater version than the current shard version , <nl> + * and if it has a greater major version clients will need to reload metadata . <nl> + * <nl> + * @ param ns the collection <nl> + * @ param minKey maxKey the range which should be merged <nl> + * @ param newShardVersion the shard version the newly merged chunk should have <nl> + * / <nl> + void mergeChunks ( const string & ns , <nl> + const BSONObj & minKey , <nl> + const BSONObj & maxKey , <nl> + ChunkVersion mergedVersion ) ; <nl> + <nl> bool inCriticalMigrateSection ( ) ; <nl> <nl> / * * <nl> mmm a / src / mongo / s / d_state . cpp <nl> ppp b / src / mongo / s / d_state . cpp <nl> namespace mongo { <nl> _collMetadata [ ns ] = cloned ; <nl> } <nl> <nl> + void ShardingState : : mergeChunks ( const string & ns , <nl> + const BSONObj & minKey , <nl> + const BSONObj & maxKey , <nl> + ChunkVersion mergedVersion ) { <nl> + <nl> + scoped_lock lk ( _mutex ) ; <nl> + <nl> + CollectionMetadataMap : : const_iterator it = _collMetadata . find ( ns ) ; <nl> + verify ( it ! = _collMetadata . end ( ) ) ; <nl> + <nl> + string errMsg ; <nl> + <nl> + CollectionMetadataPtr cloned ( it - > second - > cloneMerge ( minKey , <nl> + maxKey , <nl> + mergedVersion , <nl> + & errMsg ) ) ; <nl> + / / uassert to match old behavior , TODO : report errors w / o throwing <nl> + uassert ( 17004 , errMsg , NULL ! = cloned . get ( ) ) ; <nl> + <nl> + _collMetadata [ ns ] = cloned ; <nl> + } <nl> + <nl> void ShardingState : : resetVersion ( const string & ns ) { <nl> scoped_lock lk ( _mutex ) ; <nl> <nl>
SERVER - 8869 merge chunks : metadata clone and sharding state piece
mongodb/mongo
57213bdd0d1abf214f4a7db3f6e4aa161a69a9c5
2013-08-01T14:01:55Z
mmm a / lib / Transforms / LoopTiling . cpp <nl> ppp b / lib / Transforms / LoopTiling . cpp <nl> static bool setTiledIndexSetHyperRect ( ArrayRef < ForStmt * > origLoops , <nl> / / TODO ( bondhugula ) : Keep it simple for now - constant upper bound . <nl> if ( ! origLoops [ i ] - > hasConstantUpperBound ( ) ) <nl> return false ; <nl> + <nl> int64_t largestDiv = getLargestDivisorOfTripCount ( * origLoops [ i ] ) ; <nl> auto mayBeConstantCount = getConstantTripCount ( * origLoops [ i ] ) ; <nl> AffineMap lbMap , ubMap ; <nl> auto dim = b . getAffineDimExpr ( 0 ) ; <nl> lbMap = b . getAffineMap ( 1 , 0 , dim , { } ) ; <nl> newLoops [ width + i ] - > setLowerBound ( newLoops [ i ] , lbMap ) ; <nl> + <nl> + / / Set the upper bound . <nl> if ( mayBeConstantCount . hasValue ( ) & & <nl> mayBeConstantCount . getValue ( ) < tileSizes [ i ] ) { <nl> - ubMap = b . getConstantAffineMap ( mayBeConstantCount . getValue ( ) - 1 ) ; <nl> + / / Trip count is less than tile size ; upper bound is the trip count . <nl> + ubMap = b . getConstantAffineMap ( mayBeConstantCount . getValue ( ) ) ; <nl> newLoops [ width + i ] - > setUpperBoundMap ( ubMap ) ; <nl> - } else if ( largestDiv % tileSizes [ i ] = = 0 ) { <nl> - / / No need of min . <nl> - ubMap = b . getAffineMap ( 1 , 0 , dim + tileSizes [ i ] - 1 , { } ) ; <nl> - newLoops [ width + i ] - > setUpperBound ( newLoops [ i ] , ubMap ) ; <nl> - } else { <nl> + } else if ( largestDiv % tileSizes [ i ] ! = 0 ) { <nl> + / / Intra - tile loop ii goes from i to min ( i + tileSize , ub_i ) . <nl> auto ubMax = <nl> b . getAffineConstantExpr ( origLoops [ i ] - > getConstantUpperBound ( ) ) ; <nl> - ubMap = b . getAffineMap ( 1 , 0 , { dim + tileSizes [ i ] - 1 , ubMax } , { } ) ; <nl> + ubMap = b . getAffineMap ( 1 , 0 , { dim + tileSizes [ i ] , ubMax } , { } ) ; <nl> + newLoops [ width + i ] - > setUpperBound ( newLoops [ i ] , ubMap ) ; <nl> + } else { <nl> + / / No need of the min expression . <nl> + ubMap = b . getAffineMap ( 1 , 0 , dim + tileSizes [ i ] , { } ) ; <nl> newLoops [ width + i ] - > setUpperBound ( newLoops [ i ] , ubMap ) ; <nl> } <nl> } <nl> mmm a / test / Transforms / loop - tiling . mlir <nl> ppp b / test / Transforms / loop - tiling . mlir <nl> <nl> / / RUN : mlir - opt % s - loop - tile | FileCheck % s <nl> <nl> - / / CHECK : # map0 = ( d0 ) - > ( d0 + 31 ) <nl> - / / CHECK : # map1 = ( d0 ) - > ( d0 + 31 , 51 ) <nl> + / / CHECK : # map0 = ( d0 ) - > ( d0 + 32 ) <nl> + / / CHECK : # map1 = ( d0 ) - > ( d0 + 32 , 50 ) <nl> / / CHECK - LABEL : mlfunc @ loop_tiling ( ) <nl> / / CHECK - NEXT : for % i0 = 0 to 256 step 32 { <nl> / / CHECK - NEXT : for % i1 = 0 to 512 step 32 { <nl> <nl> / / CHECK - NEXT : } <nl> / / CHECK - NEXT : } <nl> / / CHECK - NEXT : } <nl> - / / CHECK - NEXT : for % i6 = 0 to 51 step 32 { <nl> + / / CHECK - NEXT : for % i6 = 0 to 50 step 32 { <nl> / / CHECK - NEXT : for % i7 = ( d0 ) - > ( d0 ) ( % i6 ) to min # map1 ( % i6 ) { <nl> / / CHECK - NEXT : " bar " ( % i7 , % i7 ) : ( index , index ) - > ( ) <nl> / / CHECK - NEXT : } <nl> / / CHECK - NEXT : } <nl> / / CHECK - NEXT : for % i8 = 0 to 21 step 32 { <nl> - / / CHECK - NEXT : for % i9 = ( d0 ) - > ( d0 ) ( % i8 ) to 20 { <nl> + / / CHECK - NEXT : for % i9 = ( d0 ) - > ( d0 ) ( % i8 ) to 21 { <nl> / / CHECK - NEXT : " foobar " ( % i9 ) : ( index ) - > ( ) <nl> / / CHECK - NEXT : } <nl> / / CHECK - NEXT : } <nl> mlfunc @ loop_tiling ( ) { <nl> } <nl> } <nl> <nl> - for % x = 0 to 51 { <nl> + for % x = 0 to 50 { <nl> " bar " ( % x , % x ) : ( index , index ) - > ( ) <nl> } <nl> <nl>
Complete migration to exclusive upper bound
tensorflow/tensorflow
58f9681000cb85061adf3b09999d7f8668344821
2019-03-29T20:52:17Z
mmm a / BUILD <nl> ppp b / BUILD <nl> grpc_cc_library ( <nl> ] , <nl> ) <nl> <nl> - grpc_cc_library ( <nl> - name = " grpc " , <nl> + grpc_cc_libraries ( <nl> + name_list = [ " grpc " , " grpc_unsecure " , ] , <nl> srcs = [ <nl> " src / core / lib / surface / init . c " , <nl> - " src / core / plugin_registry / grpc_plugin_registry . c " , <nl> + ] , <nl> + additional_src_list = [ <nl> + [ <nl> + " src / core / plugin_registry / grpc_plugin_registry . c " , <nl> + ] , <nl> + [ <nl> + " src / core / lib / surface / init_unsecure . c " , <nl> + " src / core / plugin_registry / grpc_unsecure_plugin_registry . c " , <nl> + ] , <nl> ] , <nl> language = " c " , <nl> standalone = True , <nl> deps = [ <nl> " census " , <nl> " grpc_base " , <nl> - " grpc_lb_policy_grpclb_secure " , <nl> " grpc_lb_policy_pick_first " , <nl> " grpc_lb_policy_round_robin " , <nl> " grpc_load_reporting " , <nl> " grpc_max_age_filter " , <nl> - " grpc_resolver_dns_ares " , <nl> " grpc_resolver_dns_native " , <nl> " grpc_resolver_sockaddr " , <nl> - " grpc_secure " , <nl> " grpc_transport_chttp2_client_insecure " , <nl> - " grpc_transport_chttp2_client_secure " , <nl> " grpc_transport_chttp2_server_insecure " , <nl> - " grpc_transport_chttp2_server_secure " , <nl> " grpc_message_size_filter " , <nl> " grpc_deadline_filter " , <nl> ] , <nl> + additional_dep_list = [ <nl> + [ <nl> + " grpc_secure " , <nl> + " grpc_resolver_dns_ares " , <nl> + " grpc_lb_policy_grpclb_secure " , <nl> + " grpc_transport_chttp2_client_secure " , <nl> + " grpc_transport_chttp2_server_secure " , <nl> + ] , <nl> + [ ] , <nl> + ] , <nl> ) <nl> <nl> grpc_cc_library ( <nl> grpc_cc_library ( <nl> ] , <nl> ) <nl> <nl> - grpc_cc_library ( <nl> - name = " grpc_unsecure " , <nl> - srcs = [ <nl> - " src / core / lib / surface / init . c " , <nl> - " src / core / lib / surface / init_unsecure . c " , <nl> - " src / core / plugin_registry / grpc_unsecure_plugin_registry . c " , <nl> - ] , <nl> - language = " c " , <nl> - standalone = True , <nl> - deps = [ <nl> - " census " , <nl> - " grpc_base " , <nl> - " grpc_lb_policy_grpclb " , <nl> - " grpc_lb_policy_pick_first " , <nl> - " grpc_lb_policy_round_robin " , <nl> - " grpc_load_reporting " , <nl> - " grpc_max_age_filter " , <nl> - " grpc_resolver_dns_native " , <nl> - " grpc_resolver_sockaddr " , <nl> - " grpc_transport_chttp2_client_insecure " , <nl> - " grpc_transport_chttp2_server_insecure " , <nl> - " grpc_message_size_filter " , <nl> - " grpc_deadline_filter " , <nl> - ] , <nl> - ) <nl> - <nl> grpc_cc_library ( <nl> name = " grpc + + " , <nl> srcs = [ <nl> mmm a / Makefile <nl> ppp b / Makefile <nl> LD_ubsan = clang <nl> LDXX_ubsan = clang + + <nl> CPPFLAGS_ubsan = - O0 - fsanitize - coverage = edge - fsanitize = undefined - fno - omit - frame - pointer - Wno - unused - command - line - argument - Wvarargs <nl> LDFLAGS_ubsan = - fsanitize = undefined , unsigned - integer - overflow <nl> - DEFINES_ubsan = NDEBUG <nl> + DEFINES_ubsan = NDEBUG GRPC_UBSAN <nl> <nl> VALID_CONFIG_tsan = 1 <nl> REQUIRE_CUSTOM_LIBRARIES_tsan = 1 <nl> AROPTS = $ ( GRPC_CROSS_AROPTS ) # e . g . , rc - - target = elf32 - little <nl> USE_BUILT_PROTOC = false <nl> endif <nl> <nl> - GTEST_LIB = - Ithird_party / googletest / googletest / include - Ithird_party / googletest / googletest third_party / googletest / googletest / src / gtest - all . cc <nl> + GTEST_LIB = - Ithird_party / googletest / googletest / include - Ithird_party / googletest / googletest third_party / googletest / googletest / src / gtest - all . cc - Ithird_party / googletest / googlemock / include - Ithird_party / googletest / googlemock third_party / googletest / googlemock / src / gmock - all . cc <nl> GTEST_LIB + = - lgflags <nl> ifeq ( $ ( V ) , 1 ) <nl> E = @ : <nl> PROTOBUF_PKG_CONFIG = false <nl> PC_REQUIRES_GRPCXX = <nl> PC_LIBS_GRPCXX = <nl> <nl> - CPPFLAGS : = - Ithird_party / googletest / googletest / include $ ( CPPFLAGS ) <nl> + CPPFLAGS : = - Ithird_party / googletest / googletest / include - Ithird_party / googletest / googlemock / include $ ( CPPFLAGS ) <nl> <nl> PROTOC_PLUGINS_ALL = $ ( BINDIR ) / $ ( CONFIG ) / grpc_cpp_plugin $ ( BINDIR ) / $ ( CONFIG ) / grpc_csharp_plugin $ ( BINDIR ) / $ ( CONFIG ) / grpc_node_plugin $ ( BINDIR ) / $ ( CONFIG ) / grpc_objective_c_plugin $ ( BINDIR ) / $ ( CONFIG ) / grpc_php_plugin $ ( BINDIR ) / $ ( CONFIG ) / grpc_python_plugin $ ( BINDIR ) / $ ( CONFIG ) / grpc_ruby_plugin <nl> PROTOC_PLUGINS_DIR = $ ( BINDIR ) / $ ( CONFIG ) <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / health / v1 / health . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / health / v1 / health . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / health / v1 / health . pb . cc : src / proto / grpc / health / v1 / health . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / lb / v1 / load_balancer . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / lb / v1 / load_balancer . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / lb / v1 / load_balancer . pb . cc : src / proto / grpc / lb / v1 / load_balancer . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / reflection / v1alpha / reflection . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / reflection / v1alpha / reflection . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / reflection / v1alpha / reflection . pb . cc : src / proto / grpc / reflection / v1alpha / reflection . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / status / status . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / status / status . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / status / status . pb . cc : src / proto / grpc / status / status . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / testing / compiler_test . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / testing / compiler_test . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / testing / compiler_test . pb . cc : src / proto / grpc / testing / compiler_test . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> $ ( GENDIR ) / src / proto / grpc / testing / compiler_test . pb . cc : src / proto / grpc / testing / com <nl> $ ( GENDIR ) / src / proto / grpc / testing / compiler_test . grpc . pb . cc : src / proto / grpc / testing / compiler_test . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) <nl> $ ( E ) " [ GRPC ] Generating gRPC ' s protobuf service CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> - $ ( Q ) $ ( PROTOC ) - Ithird_party / protobuf / src - I . - - grpc_out = $ ( GENDIR ) - - plugin = protoc - gen - grpc = $ ( PROTOC_PLUGINS_DIR ) / grpc_cpp_plugin $ ( EXECUTABLE_SUFFIX ) $ < <nl> + $ ( Q ) $ ( PROTOC ) - Ithird_party / protobuf / src - I . - - grpc_out = generate_mock_code = true : $ ( GENDIR ) - - plugin = protoc - gen - grpc = $ ( PROTOC_PLUGINS_DIR ) / grpc_cpp_plugin $ ( EXECUTABLE_SUFFIX ) $ < <nl> endif <nl> <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / testing / control . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / testing / control . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / testing / control . pb . cc : src / proto / grpc / testing / control . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) $ ( GENDIR ) / src / proto / grpc / testing / payloads . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / stats . pb . cc <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . pb . cc : src / proto / grpc / testing / duplicate / echo_duplicate . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . pb . cc <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / testing / echo . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / testing / echo . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / testing / echo . pb . cc : src / proto / grpc / testing / echo . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . pb . cc <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> $ ( GENDIR ) / src / proto / grpc / testing / echo . pb . cc : src / proto / grpc / testing / echo . proto $ <nl> $ ( GENDIR ) / src / proto / grpc / testing / echo . grpc . pb . cc : src / proto / grpc / testing / echo . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . grpc . pb . cc <nl> $ ( E ) " [ GRPC ] Generating gRPC ' s protobuf service CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> - $ ( Q ) $ ( PROTOC ) - Ithird_party / protobuf / src - I . - - grpc_out = $ ( GENDIR ) - - plugin = protoc - gen - grpc = $ ( PROTOC_PLUGINS_DIR ) / grpc_cpp_plugin $ ( EXECUTABLE_SUFFIX ) $ < <nl> + $ ( Q ) $ ( PROTOC ) - Ithird_party / protobuf / src - I . - - grpc_out = generate_mock_code = true : $ ( GENDIR ) - - plugin = protoc - gen - grpc = $ ( PROTOC_PLUGINS_DIR ) / grpc_cpp_plugin $ ( EXECUTABLE_SUFFIX ) $ < <nl> endif <nl> <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . pb . cc : src / proto / grpc / testing / echo_messages . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / testing / empty . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / testing / empty . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / testing / empty . pb . cc : src / proto / grpc / testing / empty . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / testing / messages . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / testing / messages . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / testing / messages . pb . cc : src / proto / grpc / testing / messages . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / testing / metrics . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / testing / metrics . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / testing / metrics . pb . cc : src / proto / grpc / testing / metrics . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / testing / payloads . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / testing / payloads . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / testing / payloads . pb . cc : src / proto / grpc / testing / payloads . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / testing / services . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / testing / services . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / testing / services . pb . cc : src / proto / grpc / testing / services . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) $ ( GENDIR ) / src / proto / grpc / testing / messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / control . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / stats . pb . cc <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / testing / stats . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / testing / stats . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / testing / stats . pb . cc : src / proto / grpc / testing / stats . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> ifeq ( $ ( NO_PROTOC ) , true ) <nl> $ ( GENDIR ) / src / proto / grpc / testing / test . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / src / proto / grpc / testing / test . grpc . pb . cc : protoc_dep_error <nl> else <nl> + <nl> $ ( GENDIR ) / src / proto / grpc / testing / test . pb . cc : src / proto / grpc / testing / test . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) $ ( GENDIR ) / src / proto / grpc / testing / empty . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / messages . pb . cc <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> mmm a / bazel / cc_grpc_library . bzl <nl> ppp b / bazel / cc_grpc_library . bzl <nl> <nl> <nl> load ( " / / : bazel / generate_cc . bzl " , " generate_cc " ) <nl> <nl> - def cc_grpc_library ( name , srcs , deps , proto_only , well_known_protos , use_external = False , * * kwargs ) : <nl> + def cc_grpc_library ( name , srcs , deps , proto_only , well_known_protos , generate_mock , use_external = False , * * kwargs ) : <nl> " " " Generates C + + grpc classes from a . proto file . <nl> <nl> Assumes the generated classes will be used in cc_api_version = 2 . <nl> def cc_grpc_library ( name , srcs , deps , proto_only , well_known_protos , use_externa <nl> " @ com_google_protobuf / / : well_known_protos " <nl> use_external : When True the grpc deps are prefixed with / / external . This <nl> allows grpc to be used as a dependency in other bazel projects . <nl> + generate_mock : When true GMOCk code for client stub is generated . <nl> * * kwargs : rest of arguments , e . g . , compatible_with and visibility . <nl> " " " <nl> if len ( srcs ) > 1 : <nl> def cc_grpc_library ( name , srcs , deps , proto_only , well_known_protos , use_externa <nl> srcs = [ proto_target ] , <nl> plugin = plugin , <nl> well_known_protos = well_known_protos , <nl> + generate_mock = generate_mock , <nl> * * kwargs <nl> ) <nl> <nl> mmm a / bazel / generate_cc . bzl <nl> ppp b / bazel / generate_cc . bzl <nl> def generate_cc_impl ( ctx ) : <nl> if ctx . executable . plugin : <nl> outs + = [ proto . basename [ : - len ( " . proto " ) ] + " . grpc . pb . h " for proto in protos ] <nl> outs + = [ proto . basename [ : - len ( " . proto " ) ] + " . grpc . pb . cc " for proto in protos ] <nl> + if ctx . attr . generate_mock : <nl> + outs + = [ proto . basename [ : - len ( " . proto " ) ] + " _mock . grpc . pb . h " for proto in protos ] <nl> else : <nl> outs + = [ proto . basename [ : - len ( " . proto " ) ] + " . pb . h " for proto in protos ] <nl> outs + = [ proto . basename [ : - len ( " . proto " ) ] + " . pb . cc " for proto in protos ] <nl> def generate_cc_impl ( ctx ) : <nl> arguments = [ ] <nl> if ctx . executable . plugin : <nl> arguments + = [ " - - plugin = protoc - gen - PLUGIN = " + ctx . executable . plugin . path ] <nl> - arguments + = [ " - - PLUGIN_out = " + " , " . join ( ctx . attr . flags ) + " : " + dir_out ] <nl> + flags = list ( ctx . attr . flags ) <nl> + if ctx . attr . generate_mock : <nl> + flags . append ( " generate_mock_code = true " ) <nl> + arguments + = [ " - - PLUGIN_out = " + " , " . join ( flags ) + " : " + dir_out ] <nl> additional_input = [ ctx . executable . plugin ] <nl> else : <nl> arguments + = [ " - - cpp_out = " + " , " . join ( ctx . attr . flags ) + " : " + dir_out ] <nl> generate_cc = rule ( <nl> " well_known_protos " : attr . label ( <nl> mandatory = False , <nl> ) , <nl> + " generate_mock " : attr . bool ( <nl> + default = False , <nl> + mandatory = False , <nl> + ) , <nl> " _protoc " : attr . label ( <nl> default = Label ( " / / external : protocol_compiler " ) , <nl> executable = True , <nl> mmm a / bazel / grpc_build_system . bzl <nl> ppp b / bazel / grpc_build_system . bzl <nl> def grpc_cc_library ( name , srcs = [ ] , public_hdrs = [ ] , hdrs = [ ] , external_deps <nl> ] <nl> ) <nl> <nl> - def grpc_cc_libraries ( name_list , additional_dep_list , srcs = [ ] , public_hdrs = [ ] , hdrs = [ ] , external_deps = [ ] , deps = [ ] , standalone = False , language = " C + + " ) : <nl> - for i in range ( len ( name_list ) ) : <nl> + def grpc_cc_libraries ( name_list , additional_src_list = [ ] , additional_dep_list = [ ] , srcs = [ ] , public_hdrs = [ ] , hdrs = [ ] , external_deps = [ ] , deps = [ ] , standalone = False , language = " C + + " ) : <nl> + names = len ( name_list ) <nl> + asl = additional_src_list + [ [ ] ] * ( names - len ( additional_src_list ) ) <nl> + adl = additional_dep_list + [ [ ] ] * ( names - len ( additional_dep_list ) ) <nl> + for i in range ( names ) : <nl> grpc_cc_library ( <nl> name = name_list [ i ] , <nl> - srcs = srcs , <nl> + srcs = srcs + asl [ i ] , <nl> hdrs = hdrs , <nl> public_hdrs = public_hdrs , <nl> - deps = deps + additional_dep_list [ i ] , <nl> + deps = deps + adl [ i ] , <nl> external_deps = external_deps , <nl> standalone = standalone , <nl> language = language <nl> def grpc_proto_plugin ( name , srcs = [ ] , deps = [ ] ) : <nl> load ( " / / : bazel / cc_grpc_library . bzl " , " cc_grpc_library " ) <nl> <nl> def grpc_proto_library ( name , srcs = [ ] , deps = [ ] , well_known_protos = None , <nl> - has_services = True , use_external = False ) : <nl> + has_services = True , use_external = False , generate_mock = False ) : <nl> cc_grpc_library ( <nl> name = name , <nl> srcs = srcs , <nl> def grpc_proto_library ( name , srcs = [ ] , deps = [ ] , well_known_protos = None , <nl> well_known_protos = well_known_protos , <nl> proto_only = not has_services , <nl> use_external = use_external , <nl> + generate_mock = generate_mock , <nl> ) <nl> <nl> mmm a / build . yaml <nl> ppp b / build . yaml <nl> filegroups : <nl> - name : grpc + + _test <nl> language : c + + <nl> public_headers : <nl> + - include / grpc + + / test / mock_stream . h <nl> - include / grpc + + / test / server_context_test_spouse . h <nl> deps : <nl> - grpc + + <nl> targets : <nl> - grpc <nl> - gpr <nl> args : <nl> - - - - generated_file_path = gens / src / proto / grpc / testing / compiler_test . grpc . pb . h <nl> + - - - generated_file_path = gens / src / proto / grpc / testing / <nl> - name : grpc_cli <nl> build : test <nl> run : false <nl> targets : <nl> gtest : true <nl> build : test <nl> language : c + + <nl> + headers : <nl> + - include / grpc + + / test / mock_stream . h <nl> src : <nl> - test / cpp / end2end / mock_test . cc <nl> deps : <nl> configs : <nl> CPPFLAGS : - O0 - fsanitize - coverage = edge - fsanitize = undefined - fno - omit - frame - pointer <nl> - Wno - unused - command - line - argument - Wvarargs <nl> CXX : clang + + <nl> - DEFINES : NDEBUG <nl> + DEFINES : NDEBUG GRPC_UBSAN <nl> LD : clang <nl> LDFLAGS : - fsanitize = undefined , unsigned - integer - overflow <nl> LDXX : clang + + <nl> mmm a / doc / service_config . md <nl> ppp b / doc / service_config . md <nl> The service config is a JSON string of the following form : <nl> ` ` ` <nl> { <nl> / / Load balancing policy name . <nl> - / / Supported values are ' round_robin ' and ' grpclb ' . <nl> - / / Optional ; if unset , the default behavior is pick the first available <nl> - / / backend . <nl> - / / Note that if the resolver returns only balancer addresses and no <nl> - / / backend addresses , gRPC will always use the ' grpclb ' policy , <nl> - / / regardless of what this field is set to . <nl> + / / Currently , the only selectable client - side policy provided with gRPC <nl> + / / is ' round_robin ' , but third parties may add their own policies . <nl> + / / This field is optional ; if unset , the default behavior is to pick <nl> + / / the first available backend . <nl> + / / If the policy name is set via the client API , that value overrides <nl> + / / the value specified here . <nl> + / / <nl> + / / Note that if the resolver returns at least one balancer address ( as <nl> + / / opposed to backend addresses ) , gRPC will use grpclb ( see <nl> + / / https : / / github . com / grpc / grpc / blob / master / doc / load - balancing . md ) , <nl> + / / regardless of what LB policy is requested either here or via the <nl> + / / client API . However , if the resolver returns at least one backend <nl> + / / address in addition to the balancer address ( es ) , the client may fall <nl> + / / back to the requested policy if it is unable to reach any of the <nl> + / / grpclb load balancers . <nl> ' loadBalancingPolicy ' : string , <nl> <nl> / / Per - method configuration . Optional . <nl> mmm a / grpc . def <nl> ppp b / grpc . def <nl> EXPORTS <nl> grpc_server_request_registered_call <nl> grpc_server_create <nl> grpc_server_register_completion_queue <nl> - grpc_server_register_non_listening_completion_queue <nl> grpc_server_add_insecure_http2_port <nl> grpc_server_start <nl> grpc_server_shutdown_and_notify <nl> mmm a / include / grpc + + / impl / codegen / client_unary_call . h <nl> ppp b / include / grpc + + / impl / codegen / client_unary_call . h <nl> template < class InputMessage , class OutputMessage > <nl> Status BlockingUnaryCall ( ChannelInterface * channel , const RpcMethod & method , <nl> ClientContext * context , const InputMessage & request , <nl> OutputMessage * result ) { <nl> - CompletionQueue cq ( true ) ; / / Pluckable completion queue <nl> + CompletionQueue cq ( grpc_completion_queue_attributes { <nl> + GRPC_CQ_CURRENT_VERSION , GRPC_CQ_PLUCK , <nl> + GRPC_CQ_DEFAULT_POLLING } ) ; / / Pluckable completion queue <nl> Call call ( channel - > CreateCall ( method , context , & cq ) ) ; <nl> CallOpSet < CallOpSendInitialMetadata , CallOpSendMessage , <nl> CallOpRecvInitialMetadata , CallOpRecvMessage < OutputMessage > , <nl> mmm a / include / grpc + + / impl / codegen / completion_queue . h <nl> ppp b / include / grpc + + / impl / codegen / completion_queue . h <nl> class CompletionQueue : private GrpcLibraryCodegen { <nl> public : <nl> / / / Default constructor . Implicitly creates a \ a grpc_completion_queue <nl> / / / instance . <nl> - CompletionQueue ( ) : CompletionQueue ( false ) { } <nl> + CompletionQueue ( ) <nl> + : CompletionQueue ( grpc_completion_queue_attributes { <nl> + GRPC_CQ_CURRENT_VERSION , GRPC_CQ_NEXT , GRPC_CQ_DEFAULT_POLLING } ) { } <nl> <nl> / / / Wrap \ a take , taking ownership of the instance . <nl> / / / <nl> class CompletionQueue : private GrpcLibraryCodegen { <nl> } ; <nl> void CompleteAvalanching ( ) ; <nl> <nl> + protected : <nl> + / / / Private constructor of CompletionQueue only visible to friend classes <nl> + CompletionQueue ( const grpc_completion_queue_attributes & attributes ) { <nl> + cq_ = g_core_codegen_interface - > grpc_completion_queue_create ( <nl> + g_core_codegen_interface - > grpc_completion_queue_factory_lookup ( <nl> + & attributes ) , <nl> + & attributes , NULL ) ; <nl> + InitialAvalanching ( ) ; / / reserve this for the future shutdown <nl> + } <nl> + <nl> private : <nl> / / Friend synchronous wrappers so that they can access Pluck ( ) , which is <nl> / / a semi - private API geared towards the synchronous implementation . <nl> class CompletionQueue : private GrpcLibraryCodegen { <nl> const InputMessage & request , <nl> OutputMessage * result ) ; <nl> <nl> - / / / Private constructor of CompletionQueue only visible to friend classes <nl> - CompletionQueue ( bool is_pluck ) { <nl> - if ( is_pluck ) { <nl> - cq_ = g_core_codegen_interface - > grpc_completion_queue_create_for_pluck ( <nl> - nullptr ) ; <nl> - } else { <nl> - cq_ = g_core_codegen_interface - > grpc_completion_queue_create_for_next ( <nl> - nullptr ) ; <nl> - } <nl> - InitialAvalanching ( ) ; / / reserve this for the future shutdown <nl> - } <nl> - <nl> NextStatus AsyncNextInternal ( void * * tag , bool * ok , gpr_timespec deadline ) ; <nl> <nl> / / / Wraps \ a grpc_completion_queue_pluck . <nl> class CompletionQueue : private GrpcLibraryCodegen { <nl> / / / by servers . Instantiated by \ a ServerBuilder . <nl> class ServerCompletionQueue : public CompletionQueue { <nl> public : <nl> - bool IsFrequentlyPolled ( ) { return is_frequently_polled_ ; } <nl> + bool IsFrequentlyPolled ( ) { return polling_type_ ! = GRPC_CQ_NON_LISTENING ; } <nl> <nl> private : <nl> - bool is_frequently_polled_ ; <nl> + grpc_cq_polling_type polling_type_ ; <nl> friend class ServerBuilder ; <nl> / / / \ param is_frequently_polled Informs the GRPC library about whether the <nl> / / / server completion queue would be actively polled ( by calling Next ( ) or <nl> / / / AsyncNext ( ) ) . By default all server completion queues are assumed to be <nl> / / / frequently polled . <nl> - ServerCompletionQueue ( bool is_frequently_polled = true ) <nl> - : is_frequently_polled_ ( is_frequently_polled ) { } <nl> + ServerCompletionQueue ( grpc_cq_polling_type polling_type ) <nl> + : CompletionQueue ( grpc_completion_queue_attributes { <nl> + GRPC_CQ_CURRENT_VERSION , GRPC_CQ_NEXT , polling_type } ) , <nl> + polling_type_ ( polling_type ) { } <nl> } ; <nl> <nl> } / / namespace grpc <nl> mmm a / include / grpc + + / impl / codegen / core_codegen . h <nl> ppp b / include / grpc + + / impl / codegen / core_codegen . h <nl> <nl> namespace grpc { <nl> <nl> / / / Implementation of the core codegen interface . <nl> - class CoreCodegen : public CoreCodegenInterface { <nl> + class CoreCodegen final : public CoreCodegenInterface { <nl> private : <nl> + virtual const grpc_completion_queue_factory * <nl> + grpc_completion_queue_factory_lookup ( <nl> + const grpc_completion_queue_attributes * attributes ) override ; <nl> + virtual grpc_completion_queue * grpc_completion_queue_create ( <nl> + const grpc_completion_queue_factory * factory , <nl> + const grpc_completion_queue_attributes * attributes , <nl> + void * reserved ) override ; <nl> grpc_completion_queue * grpc_completion_queue_create_for_next ( <nl> void * reserved ) override ; <nl> grpc_completion_queue * grpc_completion_queue_create_for_pluck ( <nl> mmm a / include / grpc + + / impl / codegen / core_codegen_interface . h <nl> ppp b / include / grpc + + / impl / codegen / core_codegen_interface . h <nl> class CoreCodegenInterface { <nl> virtual void assert_fail ( const char * failed_assertion , const char * file , <nl> int line ) = 0 ; <nl> <nl> + virtual const grpc_completion_queue_factory * <nl> + grpc_completion_queue_factory_lookup ( <nl> + const grpc_completion_queue_attributes * attributes ) = 0 ; <nl> + virtual grpc_completion_queue * grpc_completion_queue_create ( <nl> + const grpc_completion_queue_factory * factory , <nl> + const grpc_completion_queue_attributes * attributes , void * reserved ) = 0 ; <nl> virtual grpc_completion_queue * grpc_completion_queue_create_for_next ( <nl> void * reserved ) = 0 ; <nl> virtual grpc_completion_queue * grpc_completion_queue_create_for_pluck ( <nl> mmm a / include / grpc + + / impl / codegen / proto_utils . h <nl> ppp b / include / grpc + + / impl / codegen / proto_utils . h <nl> namespace internal { <nl> <nl> class GrpcBufferWriterPeer ; <nl> <nl> - const int kGrpcBufferWriterMaxBufferLength = 8192 ; <nl> + const int kGrpcBufferWriterMaxBufferLength = 1024 * 1024 ; <nl> <nl> class GrpcBufferWriter final <nl> : public : : grpc : : protobuf : : io : : ZeroCopyOutputStream { <nl> mmm a / include / grpc + + / impl / codegen / sync_stream . h <nl> ppp b / include / grpc + + / impl / codegen / sync_stream . h <nl> class ClientReader final : public ClientReaderInterface < R > { <nl> ClientReader ( ChannelInterface * channel , const RpcMethod & method , <nl> ClientContext * context , const W & request ) <nl> : context_ ( context ) , <nl> - cq_ ( true ) , / / Pluckable cq <nl> + cq_ ( grpc_completion_queue_attributes { <nl> + GRPC_CQ_CURRENT_VERSION , GRPC_CQ_PLUCK , <nl> + GRPC_CQ_DEFAULT_POLLING } ) , / / Pluckable cq <nl> call_ ( channel - > CreateCall ( method , context , & cq_ ) ) { <nl> CallOpSet < CallOpSendInitialMetadata , CallOpSendMessage , <nl> CallOpClientSendClose > <nl> class ClientWriter : public ClientWriterInterface < W > { <nl> ClientWriter ( ChannelInterface * channel , const RpcMethod & method , <nl> ClientContext * context , R * response ) <nl> : context_ ( context ) , <nl> - cq_ ( true ) , / / Pluckable cq <nl> + cq_ ( grpc_completion_queue_attributes { <nl> + GRPC_CQ_CURRENT_VERSION , GRPC_CQ_PLUCK , <nl> + GRPC_CQ_DEFAULT_POLLING } ) , / / Pluckable cq <nl> call_ ( channel - > CreateCall ( method , context , & cq_ ) ) { <nl> finish_ops_ . RecvMessage ( response ) ; <nl> finish_ops_ . AllowNoMessage ( ) ; <nl> class ClientReaderWriter final : public ClientReaderWriterInterface < W , R > { <nl> ClientReaderWriter ( ChannelInterface * channel , const RpcMethod & method , <nl> ClientContext * context ) <nl> : context_ ( context ) , <nl> - cq_ ( true ) , / / Pluckable cq <nl> + cq_ ( grpc_completion_queue_attributes { <nl> + GRPC_CQ_CURRENT_VERSION , GRPC_CQ_PLUCK , <nl> + GRPC_CQ_DEFAULT_POLLING } ) , / / Pluckable cq <nl> call_ ( channel - > CreateCall ( method , context , & cq_ ) ) { <nl> if ( ! context_ - > initial_metadata_corked_ ) { <nl> CallOpSet < CallOpSendInitialMetadata > ops ; <nl> new file mode 100644 <nl> index 00000000000 . . f2de9472d6b <nl> mmm / dev / null <nl> ppp b / include / grpc + + / test / mock_stream . h <nl> <nl> + / * <nl> + * <nl> + * Copyright 2017 , Google Inc . <nl> + * All rights reserved . <nl> + * <nl> + * Redistribution and use in source and binary forms , with or without <nl> + * modification , are permitted provided that the following conditions are <nl> + * met : <nl> + * <nl> + * * Redistributions of source code must retain the above copyright <nl> + * notice , this list of conditions and the following disclaimer . <nl> + * * Redistributions in binary form must reproduce the above <nl> + * copyright notice , this list of conditions and the following disclaimer <nl> + * in the documentation and / or other materials provided with the <nl> + * distribution . <nl> + * * Neither the name of Google Inc . nor the names of its <nl> + * contributors may be used to endorse or promote products derived from <nl> + * this software without specific prior written permission . <nl> + * <nl> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + * " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + * LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + * A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + * SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + * LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + * DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + * THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + * ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + * OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + * <nl> + * / <nl> + <nl> + # ifndef GRPCXX_TEST_MOCK_STREAM_H <nl> + # define GRPCXX_TEST_MOCK_STREAM_H <nl> + <nl> + # include < stdint . h > <nl> + <nl> + # include < gmock / gmock . h > <nl> + # include < grpc + + / impl / codegen / call . h > <nl> + # include < grpc + + / support / async_stream . h > <nl> + # include < grpc + + / support / async_unary_call . h > <nl> + # include < grpc + + / support / sync_stream . h > <nl> + <nl> + namespace grpc { <nl> + namespace testing { <nl> + <nl> + template < class R > <nl> + class MockClientReader : public ClientReaderInterface < R > { <nl> + public : <nl> + MockClientReader ( ) = default ; <nl> + <nl> + / / ClientStreamingInterface <nl> + MOCK_METHOD0_T ( Finish , Status ( ) ) ; <nl> + <nl> + / / ReaderInterface <nl> + MOCK_METHOD1_T ( NextMessageSize , bool ( uint32_t * ) ) ; <nl> + MOCK_METHOD1_T ( Read , bool ( R * ) ) ; <nl> + <nl> + / / ClientReaderInterface <nl> + MOCK_METHOD0_T ( WaitForInitialMetadata , void ( ) ) ; <nl> + } ; <nl> + <nl> + template < class W > <nl> + class MockClientWriter : public ClientWriterInterface < W > { <nl> + public : <nl> + MockClientWriter ( ) = default ; <nl> + <nl> + / / ClientStreamingInterface <nl> + MOCK_METHOD0_T ( Finish , Status ( ) ) ; <nl> + <nl> + / / WriterInterface <nl> + MOCK_METHOD2_T ( Write , bool ( const W & , const WriteOptions ) ) ; <nl> + <nl> + / / ClientWriterInterface <nl> + MOCK_METHOD0_T ( WritesDone , bool ( ) ) ; <nl> + } ; <nl> + <nl> + template < class W , class R > <nl> + class MockClientReaderWriter : public ClientReaderWriterInterface < W , R > { <nl> + public : <nl> + MockClientReaderWriter ( ) = default ; <nl> + <nl> + / / ClientStreamingInterface <nl> + MOCK_METHOD0_T ( Finish , Status ( ) ) ; <nl> + <nl> + / / ReaderInterface <nl> + MOCK_METHOD1_T ( NextMessageSize , bool ( uint32_t * ) ) ; <nl> + MOCK_METHOD1_T ( Read , bool ( R * ) ) ; <nl> + <nl> + / / WriterInterface <nl> + MOCK_METHOD2_T ( Write , bool ( const W & , const WriteOptions ) ) ; <nl> + <nl> + / / ClientReaderWriterInterface <nl> + MOCK_METHOD0_T ( WaitForInitialMetadata , void ( ) ) ; <nl> + MOCK_METHOD0_T ( WritesDone , bool ( ) ) ; <nl> + } ; <nl> + <nl> + / / TODO : We do not support mocking an async RPC for now . <nl> + <nl> + template < class R > <nl> + class MockClientAsyncResponseReader <nl> + : public ClientAsyncResponseReaderInterface < R > { <nl> + public : <nl> + MockClientAsyncResponseReader ( ) = default ; <nl> + <nl> + MOCK_METHOD1_T ( ReadInitialMetadata , void ( void * ) ) ; <nl> + MOCK_METHOD3_T ( Finish , void ( R * , Status * , void * ) ) ; <nl> + } ; <nl> + <nl> + template < class R > <nl> + class MockClientAsyncReader : public ClientAsyncReaderInterface < R > { <nl> + public : <nl> + MockClientAsyncReader ( ) = default ; <nl> + <nl> + / / ClientAsyncStreamingInterface <nl> + MOCK_METHOD1_T ( ReadInitialMetadata , void ( void * ) ) ; <nl> + MOCK_METHOD2_T ( Finish , void ( Status * , void * ) ) ; <nl> + <nl> + / / AsyncReaderInterface <nl> + MOCK_METHOD2_T ( Read , void ( R * , void * ) ) ; <nl> + } ; <nl> + <nl> + template < class W > <nl> + class MockClientAsyncWriter : public ClientAsyncWriterInterface < W > { <nl> + public : <nl> + MockClientAsyncWriter ( ) = default ; <nl> + <nl> + / / ClientAsyncStreamingInterface <nl> + MOCK_METHOD1_T ( ReadInitialMetadata , void ( void * ) ) ; <nl> + MOCK_METHOD2_T ( Finish , void ( Status * , void * ) ) ; <nl> + <nl> + / / AsyncWriterInterface <nl> + MOCK_METHOD2_T ( Write , void ( const W & , void * ) ) ; <nl> + <nl> + / / ClientAsyncWriterInterface <nl> + MOCK_METHOD1_T ( WritesDone , void ( void * ) ) ; <nl> + } ; <nl> + <nl> + template < class W , class R > <nl> + class MockClientAsyncReaderWriter <nl> + : public ClientAsyncReaderWriterInterface < W , R > { <nl> + public : <nl> + MockClientAsyncReaderWriter ( ) = default ; <nl> + <nl> + / / ClientAsyncStreamingInterface <nl> + MOCK_METHOD1_T ( ReadInitialMetadata , void ( void * ) ) ; <nl> + MOCK_METHOD2_T ( Finish , void ( Status * , void * ) ) ; <nl> + <nl> + / / AsyncWriterInterface <nl> + MOCK_METHOD2_T ( Write , void ( const W & , void * ) ) ; <nl> + <nl> + / / AsyncReaderInterface <nl> + MOCK_METHOD2_T ( Read , void ( R * , void * ) ) ; <nl> + <nl> + / / ClientAsyncReaderWriterInterface <nl> + MOCK_METHOD1_T ( WritesDone , void ( void * ) ) ; <nl> + } ; <nl> + <nl> + } / / namespace testing <nl> + } / / namespace grpc <nl> + <nl> + # endif / / GRPCXX_TEST_MOCK_STREAM_H <nl> mmm a / include / grpc / grpc . h <nl> ppp b / include / grpc / grpc . h <nl> GRPCAPI const char * grpc_version_string ( void ) ; <nl> / * * Return a string specifying what the ' g ' in gRPC stands for * / <nl> GRPCAPI const char * grpc_g_stands_for ( void ) ; <nl> <nl> - / * * Specifies the type of APIs to use to pop events from the completion queue * / <nl> - typedef enum { <nl> - / * * Events are popped out by calling grpc_completion_queue_next ( ) API ONLY * / <nl> - GRPC_CQ_NEXT = 1 , <nl> - <nl> - / * * Events are popped out by calling grpc_completion_queue_pluck ( ) API ONLY * / <nl> - GRPC_CQ_PLUCK <nl> - } grpc_cq_completion_type ; <nl> - <nl> - / * * Completion queues internally MAY maintain a set of file descriptors in a <nl> - structure called ' pollset ' . This enum specifies if a completion queue has an <nl> - associated pollset and any restrictions on the type of file descriptors that <nl> - can be present in the pollset . <nl> - <nl> - I / O progress can only be made when grpc_completion_queue_next ( ) or <nl> - grpc_completion_queue_pluck ( ) are called on the completion queue ( unless the <nl> - grpc_cq_polling_type is GRPC_CQ_NON_POLLING ) and hence it is very important <nl> - to actively call these APIs * / <nl> - typedef enum { <nl> - / * * The completion queue will have an associated pollset and there is no <nl> - restriction on the type of file descriptors the pollset may contain * / <nl> - GRPC_CQ_DEFAULT_POLLING , <nl> - <nl> - / * * Similar to GRPC_CQ_DEFAULT_POLLING except that the completion queues will <nl> - not contain any ' listening file descriptors ' ( i . e file descriptors used to <nl> - listen to incoming channels ) * / <nl> - GRPC_CQ_NON_LISTENING , <nl> - <nl> - / * * The completion queue will not have an associated pollset . Note that <nl> - grpc_completion_queue_next ( ) or grpc_completion_queue_pluck ( ) MUST still <nl> - be called to pop events from the completion queue ; it is not required to <nl> - call them actively to make I / O progress * / <nl> - GRPC_CQ_NON_POLLING <nl> - } grpc_cq_polling_type ; <nl> - <nl> - # define GRPC_CQ_CURRENT_VERSION 1 <nl> - typedef struct grpc_completion_queue_attributes { <nl> - / * The version number of this structure . More fields might be added to this <nl> - structure in future . * / <nl> - int version ; / * Set to GRPC_CQ_CURRENT_VERSION * / <nl> - <nl> - grpc_cq_completion_type cq_completion_type ; <nl> - <nl> - grpc_cq_polling_type cq_polling_type ; <nl> - } grpc_completion_queue_attributes ; <nl> - <nl> - / * * The completion queue factory structure is opaque to the callers of grpc * / <nl> - typedef struct grpc_completion_queue_factory grpc_completion_queue_factory ; <nl> - <nl> / * * Returns the completion queue factory based on the attributes . MAY return a <nl> NULL if no factory can be found * / <nl> GRPCAPI const grpc_completion_queue_factory * <nl> GRPCAPI void grpc_server_register_completion_queue ( grpc_server * server , <nl> grpc_completion_queue * cq , <nl> void * reserved ) ; <nl> <nl> - / * * Register a non - listening completion queue with the server . This API is <nl> - similar to grpc_server_register_completion_queue except that the server will <nl> - not use this completion_queue to listen to any incoming channels . <nl> - <nl> - Registering a non - listening completion queue will have negative performance <nl> - impact and hence this API is not recommended for production use cases . * / <nl> - GRPCAPI void grpc_server_register_non_listening_completion_queue ( <nl> - grpc_server * server , grpc_completion_queue * q , void * reserved ) ; <nl> - <nl> / * * Add a HTTP2 over plaintext over tcp listener . <nl> Returns bound port number on success , 0 on failure . <nl> REQUIRES : server not started * / <nl> mmm a / include / grpc / impl / codegen / grpc_types . h <nl> ppp b / include / grpc / impl / codegen / grpc_types . h <nl> typedef struct { <nl> <nl> typedef struct grpc_resource_quota grpc_resource_quota ; <nl> <nl> + / * * Completion queues internally MAY maintain a set of file descriptors in a <nl> + structure called ' pollset ' . This enum specifies if a completion queue has an <nl> + associated pollset and any restrictions on the type of file descriptors that <nl> + can be present in the pollset . <nl> + <nl> + I / O progress can only be made when grpc_completion_queue_next ( ) or <nl> + grpc_completion_queue_pluck ( ) are called on the completion queue ( unless the <nl> + grpc_cq_polling_type is GRPC_CQ_NON_POLLING ) and hence it is very important <nl> + to actively call these APIs * / <nl> + typedef enum { <nl> + / * * The completion queue will have an associated pollset and there is no <nl> + restriction on the type of file descriptors the pollset may contain * / <nl> + GRPC_CQ_DEFAULT_POLLING , <nl> + <nl> + / * * Similar to GRPC_CQ_DEFAULT_POLLING except that the completion queues will <nl> + not contain any ' listening file descriptors ' ( i . e file descriptors used to <nl> + listen to incoming channels ) * / <nl> + GRPC_CQ_NON_LISTENING , <nl> + <nl> + / * * The completion queue will not have an associated pollset . Note that <nl> + grpc_completion_queue_next ( ) or grpc_completion_queue_pluck ( ) MUST still <nl> + be called to pop events from the completion queue ; it is not required to <nl> + call them actively to make I / O progress * / <nl> + GRPC_CQ_NON_POLLING <nl> + } grpc_cq_polling_type ; <nl> + <nl> + / * * Specifies the type of APIs to use to pop events from the completion queue * / <nl> + typedef enum { <nl> + / * * Events are popped out by calling grpc_completion_queue_next ( ) API ONLY * / <nl> + GRPC_CQ_NEXT = 1 , <nl> + <nl> + / * * Events are popped out by calling grpc_completion_queue_pluck ( ) API ONLY * / <nl> + GRPC_CQ_PLUCK <nl> + } grpc_cq_completion_type ; <nl> + <nl> + # define GRPC_CQ_CURRENT_VERSION 1 <nl> + typedef struct grpc_completion_queue_attributes { <nl> + / * The version number of this structure . More fields might be added to this <nl> + structure in future . * / <nl> + int version ; / * Set to GRPC_CQ_CURRENT_VERSION * / <nl> + <nl> + grpc_cq_completion_type cq_completion_type ; <nl> + <nl> + grpc_cq_polling_type cq_polling_type ; <nl> + } grpc_completion_queue_attributes ; <nl> + <nl> + / * * The completion queue factory structure is opaque to the callers of grpc * / <nl> + typedef struct grpc_completion_queue_factory grpc_completion_queue_factory ; <nl> + <nl> # ifdef __cplusplus <nl> } <nl> # endif <nl> mmm a / src / compiler / cpp_generator . cc <nl> ppp b / src / compiler / cpp_generator . cc <nl> grpc : : string GetSourceEpilogue ( grpc_generator : : File * file , <nl> return temp ; <nl> } <nl> <nl> + / / TODO ( mmukhi ) : Make sure we need parameters or not . <nl> + grpc : : string GetMockPrologue ( grpc_generator : : File * file , <nl> + const Parameters & / * params * / ) { <nl> + grpc : : string output ; <nl> + { <nl> + / / Scope the output stream so it closes and finalizes output to the string . <nl> + auto printer = file - > CreatePrinter ( & output ) ; <nl> + std : : map < grpc : : string , grpc : : string > vars ; <nl> + <nl> + vars [ " filename " ] = file - > filename ( ) ; <nl> + vars [ " filename_base " ] = file - > filename_without_ext ( ) ; <nl> + vars [ " message_header_ext " ] = message_header_ext ( ) ; <nl> + vars [ " service_header_ext " ] = service_header_ext ( ) ; <nl> + <nl> + printer - > Print ( vars , " / / Generated by the gRPC C + + plugin . \ n " ) ; <nl> + printer - > Print ( vars , <nl> + " / / If you make any local change , they will be lost . \ n " ) ; <nl> + printer - > Print ( vars , " / / source : $ filename $ \ n \ n " ) ; <nl> + <nl> + printer - > Print ( vars , " # include \ " $ filename_base $ $ message_header_ext $ \ " \ n " ) ; <nl> + printer - > Print ( vars , " # include \ " $ filename_base $ $ service_header_ext $ \ " \ n " ) ; <nl> + printer - > Print ( vars , file - > additional_headers ( ) . c_str ( ) ) ; <nl> + printer - > Print ( vars , " \ n " ) ; <nl> + } <nl> + return output ; <nl> + } <nl> + <nl> + / / TODO ( mmukhi ) : Add client - stream and completion - queue headers . <nl> + grpc : : string GetMockIncludes ( grpc_generator : : File * file , <nl> + const Parameters & params ) { <nl> + grpc : : string output ; <nl> + { <nl> + / / Scope the output stream so it closes and finalizes output to the string . <nl> + auto printer = file - > CreatePrinter ( & output ) ; <nl> + std : : map < grpc : : string , grpc : : string > vars ; <nl> + <nl> + static const char * headers_strs [ ] = { <nl> + " grpc + + / impl / codegen / async_stream . h " , <nl> + " grpc + + / impl / codegen / sync_stream . h " , " gmock / gmock . h " , <nl> + } ; <nl> + std : : vector < grpc : : string > headers ( headers_strs , array_end ( headers_strs ) ) ; <nl> + PrintIncludes ( printer . get ( ) , headers , params ) ; <nl> + <nl> + if ( ! file - > package ( ) . empty ( ) ) { <nl> + std : : vector < grpc : : string > parts = file - > package_parts ( ) ; <nl> + <nl> + for ( auto part = parts . begin ( ) ; part ! = parts . end ( ) ; part + + ) { <nl> + vars [ " part " ] = * part ; <nl> + printer - > Print ( vars , " namespace $ part $ { \ n " ) ; <nl> + } <nl> + } <nl> + <nl> + printer - > Print ( vars , " \ n " ) ; <nl> + } <nl> + return output ; <nl> + } <nl> + <nl> + void PrintMockClientMethods ( grpc_generator : : Printer * printer , <nl> + const grpc_generator : : Method * method , <nl> + std : : map < grpc : : string , grpc : : string > * vars ) { <nl> + ( * vars ) [ " Method " ] = method - > name ( ) ; <nl> + ( * vars ) [ " Request " ] = method - > input_type_name ( ) ; <nl> + ( * vars ) [ " Response " ] = method - > output_type_name ( ) ; <nl> + <nl> + if ( method - > NoStreaming ( ) ) { <nl> + printer - > Print ( <nl> + * vars , <nl> + " MOCK_METHOD3 ( $ Method $ , : : grpc : : Status ( : : grpc : : ClientContext * context , " <nl> + " const $ Request $ & request , $ Response $ * response ) ) ; \ n " ) ; <nl> + printer - > Print ( * vars , <nl> + " MOCK_METHOD3 ( Async $ Method $ Raw , " <nl> + " : : grpc : : ClientAsyncResponseReaderInterface < $ Response $ > * " <nl> + " ( : : grpc : : ClientContext * context , const $ Request $ & request , " <nl> + " : : grpc : : CompletionQueue * cq ) ) ; \ n " ) ; <nl> + } else if ( ClientOnlyStreaming ( method ) ) { <nl> + printer - > Print ( <nl> + * vars , <nl> + " MOCK_METHOD2 ( $ Method $ Raw , " <nl> + " : : grpc : : ClientWriterInterface < $ Request $ > * " <nl> + " ( : : grpc : : ClientContext * context , $ Response $ * response ) ) ; \ n " ) ; <nl> + printer - > Print ( * vars , <nl> + " MOCK_METHOD4 ( Async $ Method $ Raw , " <nl> + " : : grpc : : ClientAsyncWriterInterface < $ Request $ > * " <nl> + " ( : : grpc : : ClientContext * context , $ Response $ * response , " <nl> + " : : grpc : : CompletionQueue * cq , void * tag ) ) ; \ n " ) ; <nl> + } else if ( ServerOnlyStreaming ( method ) ) { <nl> + printer - > Print ( <nl> + * vars , <nl> + " MOCK_METHOD2 ( $ Method $ Raw , " <nl> + " : : grpc : : ClientReaderInterface < $ Response $ > * " <nl> + " ( : : grpc : : ClientContext * context , const $ Request $ & request ) ) ; \ n " ) ; <nl> + printer - > Print ( * vars , <nl> + " MOCK_METHOD4 ( Async $ Method $ Raw , " <nl> + " : : grpc : : ClientAsyncReaderInterface < $ Response $ > * " <nl> + " ( : : grpc : : ClientContext * context , const $ Request $ & request , " <nl> + " : : grpc : : CompletionQueue * cq , void * tag ) ) ; \ n " ) ; <nl> + } else if ( method - > BidiStreaming ( ) ) { <nl> + printer - > Print ( <nl> + * vars , <nl> + " MOCK_METHOD1 ( $ Method $ Raw , " <nl> + " : : grpc : : ClientReaderWriterInterface < $ Request $ , $ Response $ > * " <nl> + " ( : : grpc : : ClientContext * context ) ) ; \ n " ) ; <nl> + printer - > Print ( <nl> + * vars , <nl> + " MOCK_METHOD3 ( Async $ Method $ Raw , " <nl> + " : : grpc : : ClientAsyncReaderWriterInterface < $ Request $ , $ Response $ > * " <nl> + " ( : : grpc : : ClientContext * context , : : grpc : : CompletionQueue * cq , " <nl> + " void * tag ) ) ; \ n " ) ; <nl> + } <nl> + } <nl> + <nl> + void PrintMockService ( grpc_generator : : Printer * printer , <nl> + const grpc_generator : : Service * service , <nl> + std : : map < grpc : : string , grpc : : string > * vars ) { <nl> + ( * vars ) [ " Service " ] = service - > name ( ) ; <nl> + <nl> + printer - > Print ( * vars , <nl> + " class Mock $ Service $ Stub : public $ Service $ : : StubInterface { \ n " <nl> + " public : \ n " ) ; <nl> + printer - > Indent ( ) ; <nl> + for ( int i = 0 ; i < service - > method_count ( ) ; + + i ) { <nl> + PrintMockClientMethods ( printer , service - > method ( i ) . get ( ) , vars ) ; <nl> + } <nl> + printer - > Outdent ( ) ; <nl> + printer - > Print ( " } ; \ n " ) ; <nl> + } <nl> + <nl> + grpc : : string GetMockServices ( grpc_generator : : File * file , <nl> + const Parameters & params ) { <nl> + grpc : : string output ; <nl> + { <nl> + / / Scope the output stream so it closes and finalizes output to the string . <nl> + auto printer = file - > CreatePrinter ( & output ) ; <nl> + std : : map < grpc : : string , grpc : : string > vars ; <nl> + / / Package string is empty or ends with a dot . It is used to fully qualify <nl> + / / method names . <nl> + vars [ " Package " ] = file - > package ( ) ; <nl> + if ( ! file - > package ( ) . empty ( ) ) { <nl> + vars [ " Package " ] . append ( " . " ) ; <nl> + } <nl> + <nl> + if ( ! params . services_namespace . empty ( ) ) { <nl> + vars [ " services_namespace " ] = params . services_namespace ; <nl> + printer - > Print ( vars , " \ nnamespace $ services_namespace $ { \ n \ n " ) ; <nl> + } <nl> + <nl> + for ( int i = 0 ; i < file - > service_count ( ) ; i + + ) { <nl> + PrintMockService ( printer . get ( ) , file - > service ( i ) . get ( ) , & vars ) ; <nl> + printer - > Print ( " \ n " ) ; <nl> + } <nl> + <nl> + if ( ! params . services_namespace . empty ( ) ) { <nl> + printer - > Print ( vars , " } / / namespace $ services_namespace $ \ n \ n " ) ; <nl> + } <nl> + } <nl> + return output ; <nl> + } <nl> + <nl> + grpc : : string GetMockEpilogue ( grpc_generator : : File * file , <nl> + const Parameters & / * params * / ) { <nl> + grpc : : string temp ; <nl> + <nl> + if ( ! file - > package ( ) . empty ( ) ) { <nl> + std : : vector < grpc : : string > parts = file - > package_parts ( ) ; <nl> + <nl> + for ( auto part = parts . begin ( ) ; part ! = parts . end ( ) ; part + + ) { <nl> + temp . append ( " } / / namespace " ) ; <nl> + temp . append ( * part ) ; <nl> + temp . append ( " \ n " ) ; <nl> + } <nl> + temp . append ( " \ n " ) ; <nl> + } <nl> + <nl> + return temp ; <nl> + } <nl> + <nl> } / / namespace grpc_cpp_generator <nl> mmm a / src / compiler / cpp_generator . h <nl> ppp b / src / compiler / cpp_generator . h <nl> struct Parameters { <nl> bool use_system_headers ; <nl> / / Prefix to any grpc include <nl> grpc : : string grpc_search_path ; <nl> + / / Generate GMOCK code to facilitate unit testing . <nl> + bool generate_mock_code ; <nl> } ; <nl> <nl> / / Return the prologue of the generated header file . <nl> grpc : : string GetSourceServices ( grpc_generator : : File * file , <nl> grpc : : string GetSourceEpilogue ( grpc_generator : : File * file , <nl> const Parameters & params ) ; <nl> <nl> + / / Return the prologue of the generated mock file . <nl> + grpc : : string GetMockPrologue ( grpc_generator : : File * file , <nl> + const Parameters & params ) ; <nl> + <nl> + / / Return the includes needed for generated mock file . <nl> + grpc : : string GetMockIncludes ( grpc_generator : : File * file , <nl> + const Parameters & params ) ; <nl> + <nl> + / / Return the services for generated mock file . <nl> + grpc : : string GetMockServices ( grpc_generator : : File * file , <nl> + const Parameters & params ) ; <nl> + <nl> + / / Return the epilogue of generated mock file . <nl> + grpc : : string GetMockEpilogue ( grpc_generator : : File * file , <nl> + const Parameters & params ) ; <nl> + <nl> + / / Return the prologue of the generated mock file . <nl> + grpc : : string GetMockPrologue ( grpc_generator : : File * file , <nl> + const Parameters & params ) ; <nl> + <nl> + / / Return the includes needed for generated mock file . <nl> + grpc : : string GetMockIncludes ( grpc_generator : : File * file , <nl> + const Parameters & params ) ; <nl> + <nl> + / / Return the services for generated mock file . <nl> + grpc : : string GetMockServices ( grpc_generator : : File * file , <nl> + const Parameters & params ) ; <nl> + <nl> + / / Return the epilogue of generated mock file . <nl> + grpc : : string GetMockEpilogue ( grpc_generator : : File * file , <nl> + const Parameters & params ) ; <nl> + <nl> } / / namespace grpc_cpp_generator <nl> <nl> # endif / / GRPC_INTERNAL_COMPILER_CPP_GENERATOR_H <nl> mmm a / src / compiler / cpp_plugin . cc <nl> ppp b / src / compiler / cpp_plugin . cc <nl> class CppGrpcGenerator : public grpc : : protobuf : : compiler : : CodeGenerator { <nl> <nl> grpc_cpp_generator : : Parameters generator_parameters ; <nl> generator_parameters . use_system_headers = true ; <nl> + generator_parameters . generate_mock_code = false ; <nl> <nl> ProtoBufFile pbfile ( file ) ; <nl> <nl> class CppGrpcGenerator : public grpc : : protobuf : : compiler : : CodeGenerator { <nl> } <nl> } else if ( param [ 0 ] = = " grpc_search_path " ) { <nl> generator_parameters . grpc_search_path = param [ 1 ] ; <nl> + } else if ( param [ 0 ] = = " generate_mock_code " ) { <nl> + if ( param [ 1 ] = = " true " ) { <nl> + generator_parameters . generate_mock_code = true ; <nl> + } else if ( param [ 1 ] ! = " false " ) { <nl> + * error = grpc : : string ( " Invalid parameter : " ) + * parameter_string ; <nl> + return false ; <nl> + } <nl> } else { <nl> * error = grpc : : string ( " Unknown parameter : " ) + * parameter_string ; <nl> return false ; <nl> class CppGrpcGenerator : public grpc : : protobuf : : compiler : : CodeGenerator { <nl> grpc : : protobuf : : io : : CodedOutputStream source_coded_out ( source_output . get ( ) ) ; <nl> source_coded_out . WriteRaw ( source_code . data ( ) , source_code . size ( ) ) ; <nl> <nl> + if ( ! generator_parameters . generate_mock_code ) { <nl> + return true ; <nl> + } <nl> + grpc : : string mock_code = <nl> + grpc_cpp_generator : : GetMockPrologue ( & pbfile , generator_parameters ) + <nl> + grpc_cpp_generator : : GetMockIncludes ( & pbfile , generator_parameters ) + <nl> + grpc_cpp_generator : : GetMockServices ( & pbfile , generator_parameters ) + <nl> + grpc_cpp_generator : : GetMockEpilogue ( & pbfile , generator_parameters ) ; <nl> + std : : unique_ptr < grpc : : protobuf : : io : : ZeroCopyOutputStream > mock_output ( <nl> + context - > Open ( file_name + " _mock . grpc . pb . h " ) ) ; <nl> + grpc : : protobuf : : io : : CodedOutputStream mock_coded_out ( mock_output . get ( ) ) ; <nl> + mock_coded_out . WriteRaw ( mock_code . data ( ) , mock_code . size ( ) ) ; <nl> + <nl> return true ; <nl> } <nl> <nl> mmm a / src / core / ext / filters / client_channel / client_channel . c <nl> ppp b / src / core / ext / filters / client_channel / client_channel . c <nl> static void on_resolver_result_changed_locked ( grpc_exec_ctx * exec_ctx , <nl> GPR_ASSERT ( channel_arg - > type = = GRPC_ARG_STRING ) ; <nl> lb_policy_name = channel_arg - > value . string ; <nl> } <nl> - / / Special case : If all of the addresses are balancer addresses , <nl> - / / assume that we should use the grpclb policy , regardless of what the <nl> - / / resolver actually specified . <nl> + / / Special case : If at least one balancer address is present , we use <nl> + / / the grpclb policy , regardless of what the resolver actually specified . <nl> channel_arg = <nl> grpc_channel_args_find ( chand - > resolver_result , GRPC_ARG_LB_ADDRESSES ) ; <nl> if ( channel_arg ! = NULL & & channel_arg - > type = = GRPC_ARG_POINTER ) { <nl> grpc_lb_addresses * addresses = channel_arg - > value . pointer . p ; <nl> - bool found_backend_address = false ; <nl> + bool found_balancer_address = false ; <nl> for ( size_t i = 0 ; i < addresses - > num_addresses ; + + i ) { <nl> - if ( ! addresses - > addresses [ i ] . is_balancer ) { <nl> - found_backend_address = true ; <nl> + if ( addresses - > addresses [ i ] . is_balancer ) { <nl> + found_balancer_address = true ; <nl> break ; <nl> } <nl> } <nl> - if ( ! found_backend_address ) { <nl> + if ( found_balancer_address ) { <nl> if ( lb_policy_name ! = NULL & & strcmp ( lb_policy_name , " grpclb " ) ! = 0 ) { <nl> gpr_log ( GPR_INFO , <nl> - " resolver requested LB policy % s but provided only balancer " <nl> - " addresses , no backend addresses - - forcing use of grpclb LB " <nl> - " policy " , <nl> + " resolver requested LB policy % s but provided at least one " <nl> + " balancer address - - forcing use of grpclb LB policy " , <nl> lb_policy_name ) ; <nl> } <nl> lb_policy_name = " grpclb " ; <nl> mmm a / src / core / ext / filters / client_channel / lb_policy / grpclb / grpclb . c <nl> ppp b / src / core / ext / filters / client_channel / lb_policy / grpclb / grpclb . c <nl> static grpc_lb_policy * glb_create ( grpc_exec_ctx * exec_ctx , <nl> / * Count the number of gRPC - LB addresses . There must be at least one . <nl> * TODO ( roth ) : For now , we ignore non - balancer addresses , but in the <nl> * future , we may change the behavior such that we fall back to using <nl> - * the non - balancer addresses if we cannot reach any balancers . At that <nl> - * time , this should be changed to allow a list with no balancer addresses , <nl> - * since the resolver might fail to return a balancer address even when <nl> - * this is the right LB policy to use . * / <nl> + * the non - balancer addresses if we cannot reach any balancers . In the <nl> + * fallback case , we should use the LB policy indicated by <nl> + * GRPC_ARG_LB_POLICY_NAME ( although if that specifies grpclb or is <nl> + * unset , we should default to pick_first ) . * / <nl> const grpc_arg * arg = <nl> grpc_channel_args_find ( args - > args , GRPC_ARG_LB_ADDRESSES ) ; <nl> if ( arg = = NULL | | arg - > type ! = GRPC_ARG_POINTER ) { <nl> mmm a / src / core / ext / transport / chttp2 / server / chttp2_server . c <nl> ppp b / src / core / ext / transport / chttp2 / server / chttp2_server . c <nl> static void on_handshake_done ( grpc_exec_ctx * exec_ctx , void * arg , <nl> gpr_mu_lock ( & connection_state - > server_state - > mu ) ; <nl> if ( error ! = GRPC_ERROR_NONE | | connection_state - > server_state - > shutdown ) { <nl> const char * error_str = grpc_error_string ( error ) ; <nl> - gpr_log ( GPR_ERROR , " Handshaking failed : % s " , error_str ) ; <nl> + gpr_log ( GPR_DEBUG , " Handshaking failed : % s " , error_str ) ; <nl> <nl> if ( error = = GRPC_ERROR_NONE & & args - > endpoint ! = NULL ) { <nl> / / We were shut down after handshaking completed successfully , so <nl> mmm a / src / core / lib / iomgr / udp_server . c <nl> ppp b / src / core / lib / iomgr / udp_server . c <nl> struct grpc_udp_listener { <nl> struct grpc_udp_listener * next ; <nl> } ; <nl> <nl> + struct shutdown_fd_args { <nl> + grpc_fd * fd ; <nl> + gpr_mu * server_mu ; <nl> + } ; <nl> + <nl> / * the overall server * / <nl> struct grpc_udp_server { <nl> gpr_mu mu ; <nl> grpc_udp_server * grpc_udp_server_create ( const grpc_channel_args * args ) { <nl> return s ; <nl> } <nl> <nl> - static void shutdown_fd ( grpc_exec_ctx * exec_ctx , void * fd , grpc_error * error ) { <nl> - grpc_fd_shutdown ( exec_ctx , ( grpc_fd * ) fd , GRPC_ERROR_REF ( error ) ) ; <nl> + static void shutdown_fd ( grpc_exec_ctx * exec_ctx , void * args , <nl> + grpc_error * error ) { <nl> + struct shutdown_fd_args * shutdown_args = ( struct shutdown_fd_args * ) args ; <nl> + gpr_mu_lock ( shutdown_args - > server_mu ) ; <nl> + grpc_fd_shutdown ( exec_ctx , shutdown_args - > fd , GRPC_ERROR_REF ( error ) ) ; <nl> + gpr_mu_unlock ( shutdown_args - > server_mu ) ; <nl> + gpr_free ( shutdown_args ) ; <nl> } <nl> <nl> static void dummy_cb ( grpc_exec_ctx * exec_ctx , void * arg , grpc_error * error ) { <nl> void grpc_udp_server_destroy ( grpc_exec_ctx * exec_ctx , grpc_udp_server * s , <nl> if ( s - > active_ports ) { <nl> for ( sp = s - > head ; sp ; sp = sp - > next ) { <nl> GPR_ASSERT ( sp - > orphan_cb ) ; <nl> - grpc_closure_init ( & sp - > orphan_fd_closure , shutdown_fd , sp - > emfd , <nl> + struct shutdown_fd_args * args = gpr_malloc ( sizeof ( * args ) ) ; <nl> + args - > fd = sp - > emfd ; <nl> + args - > server_mu = & s - > mu ; <nl> + grpc_closure_init ( & sp - > orphan_fd_closure , shutdown_fd , args , <nl> grpc_schedule_on_exec_ctx ) ; <nl> sp - > orphan_cb ( exec_ctx , sp - > emfd , & sp - > orphan_fd_closure , <nl> sp - > server - > user_data ) ; <nl> mmm a / src / core / lib / surface / completion_queue . c <nl> ppp b / src / core / lib / surface / completion_queue . c <nl> typedef struct { <nl> void * tag ; <nl> } plucker ; <nl> <nl> + typedef struct { <nl> + bool can_get_pollset ; <nl> + bool can_listen ; <nl> + size_t ( * size ) ( void ) ; <nl> + void ( * init ) ( grpc_pollset * pollset , gpr_mu * * mu ) ; <nl> + grpc_error * ( * kick ) ( grpc_pollset * pollset , <nl> + grpc_pollset_worker * specific_worker ) ; <nl> + grpc_error * ( * work ) ( grpc_exec_ctx * exec_ctx , grpc_pollset * pollset , <nl> + grpc_pollset_worker * * worker , gpr_timespec now , <nl> + gpr_timespec deadline ) ; <nl> + void ( * shutdown ) ( grpc_exec_ctx * exec_ctx , grpc_pollset * pollset , <nl> + grpc_closure * closure ) ; <nl> + void ( * destroy ) ( grpc_pollset * pollset ) ; <nl> + } cq_poller_vtable ; <nl> + <nl> + typedef struct non_polling_worker { <nl> + gpr_cv cv ; <nl> + bool kicked ; <nl> + struct non_polling_worker * next ; <nl> + struct non_polling_worker * prev ; <nl> + } non_polling_worker ; <nl> + <nl> + typedef struct { <nl> + gpr_mu mu ; <nl> + non_polling_worker * root ; <nl> + grpc_closure * shutdown ; <nl> + } non_polling_poller ; <nl> + <nl> + static size_t non_polling_poller_size ( void ) { <nl> + return sizeof ( non_polling_poller ) ; <nl> + } <nl> + <nl> + static void non_polling_poller_init ( grpc_pollset * pollset , gpr_mu * * mu ) { <nl> + non_polling_poller * npp = ( non_polling_poller * ) pollset ; <nl> + gpr_mu_init ( & npp - > mu ) ; <nl> + * mu = & npp - > mu ; <nl> + } <nl> + <nl> + static void non_polling_poller_destroy ( grpc_pollset * pollset ) { <nl> + non_polling_poller * npp = ( non_polling_poller * ) pollset ; <nl> + gpr_mu_destroy ( & npp - > mu ) ; <nl> + } <nl> + <nl> + static grpc_error * non_polling_poller_work ( grpc_exec_ctx * exec_ctx , <nl> + grpc_pollset * pollset , <nl> + grpc_pollset_worker * * worker , <nl> + gpr_timespec now , <nl> + gpr_timespec deadline ) { <nl> + non_polling_poller * npp = ( non_polling_poller * ) pollset ; <nl> + if ( npp - > shutdown ) return GRPC_ERROR_NONE ; <nl> + non_polling_worker w ; <nl> + gpr_cv_init ( & w . cv ) ; <nl> + if ( worker ! = NULL ) * worker = ( grpc_pollset_worker * ) & w ; <nl> + if ( npp - > root = = NULL ) { <nl> + npp - > root = w . next = w . prev = & w ; <nl> + } else { <nl> + w . next = npp - > root ; <nl> + w . prev = w . next - > prev ; <nl> + w . next - > prev = w . prev - > next = & w ; <nl> + } <nl> + w . kicked = false ; <nl> + while ( ! npp - > shutdown & & ! w . kicked & & ! gpr_cv_wait ( & w . cv , & npp - > mu , deadline ) ) <nl> + ; <nl> + if ( & w = = npp - > root ) { <nl> + npp - > root = w . next ; <nl> + if ( & w = = npp - > root ) { <nl> + if ( npp - > shutdown ) { <nl> + grpc_closure_sched ( exec_ctx , npp - > shutdown , GRPC_ERROR_NONE ) ; <nl> + } <nl> + npp - > root = NULL ; <nl> + } <nl> + } <nl> + w . next - > prev = w . prev ; <nl> + w . prev - > next = w . next ; <nl> + gpr_cv_destroy ( & w . cv ) ; <nl> + if ( worker ! = NULL ) * worker = NULL ; <nl> + return GRPC_ERROR_NONE ; <nl> + } <nl> + <nl> + static grpc_error * non_polling_poller_kick ( <nl> + grpc_pollset * pollset , grpc_pollset_worker * specific_worker ) { <nl> + non_polling_poller * p = ( non_polling_poller * ) pollset ; <nl> + if ( specific_worker = = NULL ) specific_worker = ( grpc_pollset_worker * ) p - > root ; <nl> + if ( specific_worker ! = NULL ) { <nl> + non_polling_worker * w = ( non_polling_worker * ) specific_worker ; <nl> + if ( ! w - > kicked ) { <nl> + w - > kicked = true ; <nl> + gpr_cv_signal ( & w - > cv ) ; <nl> + } <nl> + } <nl> + return GRPC_ERROR_NONE ; <nl> + } <nl> + <nl> + static void non_polling_poller_shutdown ( grpc_exec_ctx * exec_ctx , <nl> + grpc_pollset * pollset , <nl> + grpc_closure * closure ) { <nl> + non_polling_poller * p = ( non_polling_poller * ) pollset ; <nl> + GPR_ASSERT ( closure ! = NULL ) ; <nl> + p - > shutdown = closure ; <nl> + if ( p - > root = = NULL ) { <nl> + grpc_closure_sched ( exec_ctx , closure , GRPC_ERROR_NONE ) ; <nl> + } else { <nl> + non_polling_worker * w = p - > root ; <nl> + do { <nl> + gpr_cv_signal ( & w - > cv ) ; <nl> + w = w - > next ; <nl> + } while ( w ! = p - > root ) ; <nl> + } <nl> + } <nl> + <nl> + static const cq_poller_vtable g_poller_vtable_by_poller_type [ ] = { <nl> + / * GRPC_CQ_DEFAULT_POLLING * / <nl> + { . can_get_pollset = true , <nl> + . can_listen = true , <nl> + . size = grpc_pollset_size , <nl> + . init = grpc_pollset_init , <nl> + . kick = grpc_pollset_kick , <nl> + . work = grpc_pollset_work , <nl> + . shutdown = grpc_pollset_shutdown , <nl> + . destroy = grpc_pollset_destroy } , <nl> + / * GRPC_CQ_NON_LISTENING * / <nl> + { . can_get_pollset = true , <nl> + . can_listen = false , <nl> + . size = grpc_pollset_size , <nl> + . init = grpc_pollset_init , <nl> + . kick = grpc_pollset_kick , <nl> + . work = grpc_pollset_work , <nl> + . shutdown = grpc_pollset_shutdown , <nl> + . destroy = grpc_pollset_destroy } , <nl> + / * GRPC_CQ_NON_POLLING * / <nl> + { . can_get_pollset = false , <nl> + . can_listen = false , <nl> + . size = non_polling_poller_size , <nl> + . init = non_polling_poller_init , <nl> + . kick = non_polling_poller_kick , <nl> + . work = non_polling_poller_work , <nl> + . shutdown = non_polling_poller_shutdown , <nl> + . destroy = non_polling_poller_destroy } , <nl> + } ; <nl> + <nl> / * Completion queue structure * / <nl> struct grpc_completion_queue { <nl> / * * owned by pollset * / <nl> gpr_mu * mu ; <nl> <nl> grpc_cq_completion_type completion_type ; <nl> - grpc_cq_polling_type polling_type ; <nl> + <nl> + const cq_poller_vtable * poller_vtable ; <nl> <nl> / * * completed events * / <nl> grpc_cq_completion completed_head ; <nl> grpc_completion_queue * grpc_completion_queue_create_internal ( <nl> " polling_type = % d ) " , <nl> 2 , ( completion_type , polling_type ) ) ; <nl> <nl> - cc = gpr_zalloc ( sizeof ( grpc_completion_queue ) + grpc_pollset_size ( ) ) ; <nl> - grpc_pollset_init ( POLLSET_FROM_CQ ( cc ) , & cc - > mu ) ; <nl> + const cq_poller_vtable * poller_vtable = <nl> + & g_poller_vtable_by_poller_type [ polling_type ] ; <nl> + <nl> + cc = gpr_zalloc ( sizeof ( grpc_completion_queue ) + poller_vtable - > size ( ) ) ; <nl> + poller_vtable - > init ( POLLSET_FROM_CQ ( cc ) , & cc - > mu ) ; <nl> # ifndef NDEBUG <nl> cc - > outstanding_tags = NULL ; <nl> cc - > outstanding_tag_capacity = 0 ; <nl> # endif <nl> <nl> cc - > completion_type = completion_type ; <nl> - cc - > polling_type = polling_type ; <nl> + cc - > poller_vtable = poller_vtable ; <nl> <nl> / * Initial ref is dropped by grpc_completion_queue_shutdown * / <nl> gpr_ref_init ( & cc - > pending_events , 1 ) ; <nl> grpc_cq_completion_type grpc_get_cq_completion_type ( grpc_completion_queue * cc ) { <nl> return cc - > completion_type ; <nl> } <nl> <nl> - grpc_cq_polling_type grpc_get_cq_polling_type ( grpc_completion_queue * cc ) { <nl> - return cc - > polling_type ; <nl> - } <nl> - <nl> # ifdef GRPC_CQ_REF_COUNT_DEBUG <nl> void grpc_cq_internal_ref ( grpc_completion_queue * cc , const char * reason , <nl> const char * file , int line ) { <nl> void grpc_cq_internal_unref ( grpc_completion_queue * cc ) { <nl> # endif <nl> if ( gpr_unref ( & cc - > owning_refs ) ) { <nl> GPR_ASSERT ( cc - > completed_head . next = = ( uintptr_t ) & cc - > completed_head ) ; <nl> - grpc_pollset_destroy ( POLLSET_FROM_CQ ( cc ) ) ; <nl> + cc - > poller_vtable - > destroy ( POLLSET_FROM_CQ ( cc ) ) ; <nl> # ifndef NDEBUG <nl> gpr_free ( cc - > outstanding_tags ) ; <nl> # endif <nl> void grpc_cq_end_op ( grpc_exec_ctx * exec_ctx , grpc_completion_queue * cc , <nl> } <nl> } <nl> grpc_error * kick_error = <nl> - grpc_pollset_kick ( POLLSET_FROM_CQ ( cc ) , pluck_worker ) ; <nl> + cc - > poller_vtable - > kick ( POLLSET_FROM_CQ ( cc ) , pluck_worker ) ; <nl> gpr_mu_unlock ( cc - > mu ) ; <nl> if ( kick_error ! = GRPC_ERROR_NONE ) { <nl> const char * msg = grpc_error_string ( kick_error ) ; <nl> void grpc_cq_end_op ( grpc_exec_ctx * exec_ctx , grpc_completion_queue * cc , <nl> GPR_ASSERT ( ! cc - > shutdown ) ; <nl> GPR_ASSERT ( cc - > shutdown_called ) ; <nl> cc - > shutdown = 1 ; <nl> - grpc_pollset_shutdown ( exec_ctx , POLLSET_FROM_CQ ( cc ) , <nl> - & cc - > pollset_shutdown_done ) ; <nl> + cc - > poller_vtable - > shutdown ( exec_ctx , POLLSET_FROM_CQ ( cc ) , <nl> + & cc - > pollset_shutdown_done ) ; <nl> gpr_mu_unlock ( cc - > mu ) ; <nl> } <nl> <nl> grpc_event grpc_completion_queue_next ( grpc_completion_queue * cc , <nl> gpr_mu_lock ( cc - > mu ) ; <nl> continue ; <nl> } else { <nl> - grpc_error * err = grpc_pollset_work ( & exec_ctx , POLLSET_FROM_CQ ( cc ) , NULL , <nl> - now , iteration_deadline ) ; <nl> + grpc_error * err = cc - > poller_vtable - > work ( & exec_ctx , POLLSET_FROM_CQ ( cc ) , <nl> + NULL , now , iteration_deadline ) ; <nl> if ( err ! = GRPC_ERROR_NONE ) { <nl> gpr_mu_unlock ( cc - > mu ) ; <nl> const char * msg = grpc_error_string ( err ) ; <nl> grpc_event grpc_completion_queue_pluck ( grpc_completion_queue * cc , void * tag , <nl> grpc_exec_ctx_flush ( & exec_ctx ) ; <nl> gpr_mu_lock ( cc - > mu ) ; <nl> } else { <nl> - grpc_error * err = grpc_pollset_work ( & exec_ctx , POLLSET_FROM_CQ ( cc ) , <nl> - & worker , now , iteration_deadline ) ; <nl> + grpc_error * err = cc - > poller_vtable - > work ( <nl> + & exec_ctx , POLLSET_FROM_CQ ( cc ) , & worker , now , iteration_deadline ) ; <nl> if ( err ! = GRPC_ERROR_NONE ) { <nl> del_plucker ( cc , tag , & worker ) ; <nl> gpr_mu_unlock ( cc - > mu ) ; <nl> void grpc_completion_queue_shutdown ( grpc_completion_queue * cc ) { <nl> if ( gpr_unref ( & cc - > pending_events ) ) { <nl> GPR_ASSERT ( ! cc - > shutdown ) ; <nl> cc - > shutdown = 1 ; <nl> - grpc_pollset_shutdown ( & exec_ctx , POLLSET_FROM_CQ ( cc ) , <nl> - & cc - > pollset_shutdown_done ) ; <nl> + cc - > poller_vtable - > shutdown ( & exec_ctx , POLLSET_FROM_CQ ( cc ) , <nl> + & cc - > pollset_shutdown_done ) ; <nl> } <nl> gpr_mu_unlock ( cc - > mu ) ; <nl> grpc_exec_ctx_finish ( & exec_ctx ) ; <nl> void grpc_completion_queue_destroy ( grpc_completion_queue * cc ) { <nl> } <nl> <nl> grpc_pollset * grpc_cq_pollset ( grpc_completion_queue * cc ) { <nl> - return POLLSET_FROM_CQ ( cc ) ; <nl> + return cc - > poller_vtable - > can_get_pollset ? POLLSET_FROM_CQ ( cc ) : NULL ; <nl> } <nl> <nl> grpc_completion_queue * grpc_cq_from_pollset ( grpc_pollset * ps ) { <nl> bool grpc_cq_is_non_listening_server_cq ( grpc_completion_queue * cc ) { <nl> <nl> void grpc_cq_mark_server_cq ( grpc_completion_queue * cc ) { cc - > is_server_cq = 1 ; } <nl> <nl> - int grpc_cq_is_server_cq ( grpc_completion_queue * cc ) { return cc - > is_server_cq ; } <nl> + bool grpc_cq_is_server_cq ( grpc_completion_queue * cc ) { <nl> + return cc - > is_server_cq ; <nl> + } <nl> + <nl> + bool grpc_cq_can_listen ( grpc_completion_queue * cc ) { <nl> + return cc - > poller_vtable - > can_listen ; <nl> + } <nl> mmm a / src / core / lib / surface / completion_queue . h <nl> ppp b / src / core / lib / surface / completion_queue . h <nl> void grpc_cq_end_op ( grpc_exec_ctx * exec_ctx , grpc_completion_queue * cc , <nl> grpc_pollset * grpc_cq_pollset ( grpc_completion_queue * cc ) ; <nl> grpc_completion_queue * grpc_cq_from_pollset ( grpc_pollset * ps ) ; <nl> <nl> - void grpc_cq_mark_non_listening_server_cq ( grpc_completion_queue * cc ) ; <nl> - bool grpc_cq_is_non_listening_server_cq ( grpc_completion_queue * cc ) ; <nl> void grpc_cq_mark_server_cq ( grpc_completion_queue * cc ) ; <nl> - int grpc_cq_is_server_cq ( grpc_completion_queue * cc ) ; <nl> + bool grpc_cq_is_server_cq ( grpc_completion_queue * cc ) ; <nl> + bool grpc_cq_can_listen ( grpc_completion_queue * cc ) ; <nl> <nl> grpc_cq_completion_type grpc_get_cq_completion_type ( grpc_completion_queue * cc ) ; <nl> - grpc_cq_polling_type grpc_get_cq_polling_type ( grpc_completion_queue * cc ) ; <nl> <nl> grpc_completion_queue * grpc_completion_queue_create_internal ( <nl> grpc_cq_completion_type completion_type , grpc_cq_polling_type polling_type ) ; <nl> mmm a / src / core / lib / surface / server . c <nl> ppp b / src / core / lib / surface / server . c <nl> const grpc_channel_filter grpc_server_top_filter = { <nl> <nl> static void register_completion_queue ( grpc_server * server , <nl> grpc_completion_queue * cq , <nl> - bool is_non_listening , void * reserved ) { <nl> + void * reserved ) { <nl> size_t i , n ; <nl> GPR_ASSERT ( ! reserved ) ; <nl> for ( i = 0 ; i < server - > cq_count ; i + + ) { <nl> static void register_completion_queue ( grpc_server * server , <nl> <nl> grpc_cq_mark_server_cq ( cq ) ; <nl> <nl> - if ( is_non_listening ) { <nl> - grpc_cq_mark_non_listening_server_cq ( cq ) ; <nl> - } <nl> - <nl> GRPC_CQ_INTERNAL_REF ( cq , " server " ) ; <nl> n = server - > cq_count + + ; <nl> server - > cqs = gpr_realloc ( server - > cqs , <nl> void grpc_server_register_completion_queue ( grpc_server * server , <nl> calls grpc_completion_queue_pluck ( ) on server completion queues * / <nl> } <nl> <nl> - register_completion_queue ( server , cq , false , reserved ) ; <nl> - } <nl> - <nl> - void grpc_server_register_non_listening_completion_queue ( <nl> - grpc_server * server , grpc_completion_queue * cq , void * reserved ) { <nl> - GRPC_API_TRACE ( <nl> - " grpc_server_register_non_listening_completion_queue ( server = % p , cq = % p , " <nl> - " reserved = % p ) " , <nl> - 3 , ( server , cq , reserved ) ) ; <nl> - register_completion_queue ( server , cq , true , reserved ) ; <nl> + register_completion_queue ( server , cq , reserved ) ; <nl> } <nl> <nl> grpc_server * grpc_server_create ( const grpc_channel_args * args , void * reserved ) { <nl> void grpc_server_start ( grpc_server * server ) { <nl> server - > requested_calls_per_cq = <nl> gpr_malloc ( sizeof ( * server - > requested_calls_per_cq ) * server - > cq_count ) ; <nl> for ( i = 0 ; i < server - > cq_count ; i + + ) { <nl> - if ( ! grpc_cq_is_non_listening_server_cq ( server - > cqs [ i ] ) ) { <nl> + if ( grpc_cq_can_listen ( server - > cqs [ i ] ) ) { <nl> server - > pollsets [ server - > pollset_count + + ] = <nl> grpc_cq_pollset ( server - > cqs [ i ] ) ; <nl> } <nl> mmm a / src / cpp / common / core_codegen . cc <nl> ppp b / src / cpp / common / core_codegen . cc <nl> struct grpc_byte_buffer ; <nl> <nl> namespace grpc { <nl> <nl> + const grpc_completion_queue_factory * <nl> + CoreCodegen : : grpc_completion_queue_factory_lookup ( <nl> + const grpc_completion_queue_attributes * attributes ) { <nl> + return : : grpc_completion_queue_factory_lookup ( attributes ) ; <nl> + } <nl> + <nl> + grpc_completion_queue * CoreCodegen : : grpc_completion_queue_create ( <nl> + const grpc_completion_queue_factory * factory , <nl> + const grpc_completion_queue_attributes * attributes , void * reserved ) { <nl> + return : : grpc_completion_queue_create ( factory , attributes , reserved ) ; <nl> + } <nl> + <nl> grpc_completion_queue * CoreCodegen : : grpc_completion_queue_create_for_next ( <nl> void * reserved ) { <nl> return : : grpc_completion_queue_create_for_next ( reserved ) ; <nl> mmm a / src / cpp / server / server_builder . cc <nl> ppp b / src / cpp / server / server_builder . cc <nl> ServerBuilder : : ~ ServerBuilder ( ) { <nl> <nl> std : : unique_ptr < ServerCompletionQueue > ServerBuilder : : AddCompletionQueue ( <nl> bool is_frequently_polled ) { <nl> - ServerCompletionQueue * cq = new ServerCompletionQueue ( is_frequently_polled ) ; <nl> + ServerCompletionQueue * cq = new ServerCompletionQueue ( <nl> + is_frequently_polled ? GRPC_CQ_DEFAULT_POLLING : GRPC_CQ_NON_LISTENING ) ; <nl> cqs_ . push_back ( cq ) ; <nl> return std : : unique_ptr < ServerCompletionQueue > ( cq ) ; <nl> } <nl> std : : unique_ptr < Server > ServerBuilder : : BuildAndStart ( ) { <nl> sync_server_cqs ( std : : make_shared < <nl> std : : vector < std : : unique_ptr < ServerCompletionQueue > > > ( ) ) ; <nl> <nl> + int num_frequently_polled_cqs = 0 ; <nl> + for ( auto it = cqs_ . begin ( ) ; it ! = cqs_ . end ( ) ; + + it ) { <nl> + if ( ( * it ) - > IsFrequentlyPolled ( ) ) { <nl> + num_frequently_polled_cqs + + ; <nl> + } <nl> + } <nl> + <nl> + const bool is_hybrid_server = <nl> + has_sync_methods & & num_frequently_polled_cqs > 0 ; <nl> + <nl> if ( has_sync_methods ) { <nl> / / This is a Sync server <nl> gpr_log ( GPR_INFO , <nl> std : : unique_ptr < Server > ServerBuilder : : BuildAndStart ( ) { <nl> sync_server_settings_ . max_pollers , <nl> sync_server_settings_ . cq_timeout_msec ) ; <nl> <nl> + grpc_cq_polling_type polling_type = <nl> + is_hybrid_server ? GRPC_CQ_NON_POLLING : GRPC_CQ_DEFAULT_POLLING ; <nl> + <nl> / / Create completion queues to listen to incoming rpc requests <nl> for ( int i = 0 ; i < sync_server_settings_ . num_cqs ; i + + ) { <nl> - sync_server_cqs - > emplace_back ( new ServerCompletionQueue ( ) ) ; <nl> + sync_server_cqs - > emplace_back ( new ServerCompletionQueue ( polling_type ) ) ; <nl> } <nl> } <nl> <nl> std : : unique_ptr < Server > ServerBuilder : : BuildAndStart ( ) { <nl> / / server <nl> / / 2 . cqs_ : Completion queues added via AddCompletionQueue ( ) call <nl> <nl> - / / All sync cqs ( if any ) are frequently polled by ThreadManager <nl> - int num_frequently_polled_cqs = sync_server_cqs - > size ( ) ; <nl> - <nl> for ( auto it = sync_server_cqs - > begin ( ) ; it ! = sync_server_cqs - > end ( ) ; + + it ) { <nl> grpc_server_register_completion_queue ( server - > server_ , ( * it ) - > cq ( ) , <nl> nullptr ) ; <nl> + num_frequently_polled_cqs + + ; <nl> } <nl> <nl> / / cqs_ contains the completion queue added by calling the ServerBuilder ' s <nl> std : : unique_ptr < Server > ServerBuilder : : BuildAndStart ( ) { <nl> / / listening to incoming channels . Such completion queues must be registered <nl> / / as non - listening queues <nl> for ( auto it = cqs_ . begin ( ) ; it ! = cqs_ . end ( ) ; + + it ) { <nl> - if ( ( * it ) - > IsFrequentlyPolled ( ) ) { <nl> - grpc_server_register_completion_queue ( server - > server_ , ( * it ) - > cq ( ) , <nl> - nullptr ) ; <nl> - num_frequently_polled_cqs + + ; <nl> - } else { <nl> - grpc_server_register_non_listening_completion_queue ( server - > server_ , <nl> - ( * it ) - > cq ( ) , nullptr ) ; <nl> - } <nl> + grpc_server_register_completion_queue ( server - > server_ , ( * it ) - > cq ( ) , <nl> + nullptr ) ; <nl> } <nl> <nl> if ( num_frequently_polled_cqs = = 0 ) { <nl> mmm a / src / node / ext / server_generic . cc <nl> ppp b / src / node / ext / server_generic . cc <nl> namespace grpc { <nl> namespace node { <nl> <nl> Server : : Server ( grpc_server * server ) : wrapped_server ( server ) { <nl> - shutdown_queue = grpc_completion_queue_create_for_pluck ( NULL ) ; <nl> - grpc_server_register_non_listening_completion_queue ( server , shutdown_queue , <nl> - NULL ) ; <nl> + grpc_completion_queue_attributes attrs = { <nl> + GRPC_CQ_CURRENT_VERSION , GRPC_CQ_PLUCK , GRPC_CQ_NON_LISTENING } ; <nl> + shutdown_queue = grpc_completion_queue_create ( <nl> + grpc_completion_queue_factory_lookup ( & attrs ) , & attrs , NULL ) ; <nl> + grpc_server_register_completion_queue ( server , shutdown_queue , NULL ) ; <nl> } <nl> <nl> Server : : ~ Server ( ) { <nl> new file mode 100644 <nl> index 00000000000 . . dbb91d91392 <nl> mmm / dev / null <nl> ppp b / src / proto / grpc / health / v1 / BUILD <nl> <nl> + # Copyright 2017 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + licenses ( [ " notice " ] ) # 3 - clause BSD <nl> + <nl> + package ( default_visibility = [ " / / visibility : public " ] ) <nl> + <nl> + load ( " / / bazel : grpc_build_system . bzl " , " grpc_proto_library " ) <nl> + <nl> + grpc_proto_library ( <nl> + name = " health_proto " , <nl> + srcs = [ " health . proto " ] , <nl> + ) <nl> mmm a / src / proto / grpc / testing / BUILD <nl> ppp b / src / proto / grpc / testing / BUILD <nl> load ( " / / bazel : grpc_build_system . bzl " , " grpc_proto_library " ) <nl> grpc_proto_library ( <nl> name = " compiler_test_proto " , <nl> srcs = [ " compiler_test . proto " ] , <nl> + generate_mock = True , <nl> ) <nl> <nl> grpc_proto_library ( <nl> grpc_proto_library ( <nl> name = " echo_proto " , <nl> srcs = [ " echo . proto " ] , <nl> deps = [ " echo_messages_proto " ] , <nl> + generate_mock = True , <nl> ) <nl> <nl> grpc_proto_library ( <nl> mmm a / src / proto / grpc / testing / compiler_test . proto <nl> ppp b / src / proto / grpc / testing / compiler_test . proto <nl> service ServiceA { <nl> / / Method A2 leading comment 2 <nl> rpc MethodA2 ( stream Request ) returns ( Response ) ; <nl> / / MethodA2 trailing comment 1 <nl> + <nl> + / / Method A3 leading comment 1 <nl> + rpc MethodA3 ( Request ) returns ( stream Response ) ; <nl> + / / Method A3 trailing comment 1 <nl> + <nl> + / / Method A4 leading comment 1 <nl> + rpc MethodA4 ( stream Request ) returns ( stream Response ) ; <nl> + / / Method A4 trailing comment 1 <nl> } <nl> / / Ignored ServiceA trailing comment 1 <nl> <nl> mmm a / src / python / grpcio / grpc / _cython / _cygrpc / grpc . pxi <nl> ppp b / src / python / grpcio / grpc / _cython / _cygrpc / grpc . pxi <nl> cdef extern from " grpc / grpc . h " : <nl> void grpc_server_register_completion_queue ( grpc_server * server , <nl> grpc_completion_queue * cq , <nl> void * reserved ) nogil <nl> - void grpc_server_register_non_listening_completion_queue ( <nl> - grpc_server * server , grpc_completion_queue * cq , void * reserved ) nogil <nl> int grpc_server_add_insecure_http2_port ( <nl> grpc_server * server , const char * addr ) nogil <nl> void grpc_server_start ( grpc_server * server ) nogil <nl> cdef extern from " grpc / compression . h " : <nl> int grpc_compression_options_is_algorithm_enabled ( <nl> const grpc_compression_options * opts , <nl> grpc_compression_algorithm algorithm ) nogil <nl> - <nl> mmm a / src / python / grpcio / grpc / _cython / _cygrpc / server . pyx . pxi <nl> ppp b / src / python / grpcio / grpc / _cython / _cygrpc / server . pyx . pxi <nl> cdef class Server : <nl> self . c_server , queue . c_completion_queue , NULL ) <nl> self . registered_completion_queues . append ( queue ) <nl> <nl> - def register_non_listening_completion_queue ( <nl> - self , CompletionQueue queue not None ) : <nl> - if self . is_started : <nl> - raise ValueError ( " cannot register completion queues after start " ) <nl> - with nogil : <nl> - grpc_server_register_non_listening_completion_queue ( <nl> - self . c_server , queue . c_completion_queue , NULL ) <nl> - self . registered_completion_queues . append ( queue ) <nl> - <nl> def start ( self ) : <nl> if self . is_started : <nl> raise ValueError ( " the server has already started " ) <nl> self . backup_shutdown_queue = CompletionQueue ( ) <nl> - self . register_non_listening_completion_queue ( self . backup_shutdown_queue ) <nl> + self . register_completion_queue ( self . backup_shutdown_queue ) <nl> self . is_started = True <nl> with nogil : <nl> grpc_server_start ( self . c_server ) <nl> mmm a / src / ruby / ext / grpc / rb_grpc_imports . generated . c <nl> ppp b / src / ruby / ext / grpc / rb_grpc_imports . generated . c <nl> grpc_server_register_method_type grpc_server_register_method_import ; <nl> grpc_server_request_registered_call_type grpc_server_request_registered_call_import ; <nl> grpc_server_create_type grpc_server_create_import ; <nl> grpc_server_register_completion_queue_type grpc_server_register_completion_queue_import ; <nl> - grpc_server_register_non_listening_completion_queue_type grpc_server_register_non_listening_completion_queue_import ; <nl> grpc_server_add_insecure_http2_port_type grpc_server_add_insecure_http2_port_import ; <nl> grpc_server_start_type grpc_server_start_import ; <nl> grpc_server_shutdown_and_notify_type grpc_server_shutdown_and_notify_import ; <nl> void grpc_rb_load_imports ( HMODULE library ) { <nl> grpc_server_request_registered_call_import = ( grpc_server_request_registered_call_type ) GetProcAddress ( library , " grpc_server_request_registered_call " ) ; <nl> grpc_server_create_import = ( grpc_server_create_type ) GetProcAddress ( library , " grpc_server_create " ) ; <nl> grpc_server_register_completion_queue_import = ( grpc_server_register_completion_queue_type ) GetProcAddress ( library , " grpc_server_register_completion_queue " ) ; <nl> - grpc_server_register_non_listening_completion_queue_import = ( grpc_server_register_non_listening_completion_queue_type ) GetProcAddress ( library , " grpc_server_register_non_listening_completion_queue " ) ; <nl> grpc_server_add_insecure_http2_port_import = ( grpc_server_add_insecure_http2_port_type ) GetProcAddress ( library , " grpc_server_add_insecure_http2_port " ) ; <nl> grpc_server_start_import = ( grpc_server_start_type ) GetProcAddress ( library , " grpc_server_start " ) ; <nl> grpc_server_shutdown_and_notify_import = ( grpc_server_shutdown_and_notify_type ) GetProcAddress ( library , " grpc_server_shutdown_and_notify " ) ; <nl> mmm a / src / ruby / ext / grpc / rb_grpc_imports . generated . h <nl> ppp b / src / ruby / ext / grpc / rb_grpc_imports . generated . h <nl> extern grpc_server_create_type grpc_server_create_import ; <nl> typedef void ( * grpc_server_register_completion_queue_type ) ( grpc_server * server , grpc_completion_queue * cq , void * reserved ) ; <nl> extern grpc_server_register_completion_queue_type grpc_server_register_completion_queue_import ; <nl> # define grpc_server_register_completion_queue grpc_server_register_completion_queue_import <nl> - typedef void ( * grpc_server_register_non_listening_completion_queue_type ) ( grpc_server * server , grpc_completion_queue * q , void * reserved ) ; <nl> - extern grpc_server_register_non_listening_completion_queue_type grpc_server_register_non_listening_completion_queue_import ; <nl> - # define grpc_server_register_non_listening_completion_queue grpc_server_register_non_listening_completion_queue_import <nl> typedef int ( * grpc_server_add_insecure_http2_port_type ) ( grpc_server * server , const char * addr ) ; <nl> extern grpc_server_add_insecure_http2_port_type grpc_server_add_insecure_http2_port_import ; <nl> # define grpc_server_add_insecure_http2_port grpc_server_add_insecure_http2_port_import <nl> mmm a / templates / Makefile . template <nl> ppp b / templates / Makefile . template <nl> <nl> USE_BUILT_PROTOC = false <nl> endif <nl> <nl> - GTEST_LIB = - Ithird_party / googletest / googletest / include - Ithird_party / googletest / googletest third_party / googletest / googletest / src / gtest - all . cc <nl> + GTEST_LIB = - Ithird_party / googletest / googletest / include - Ithird_party / googletest / googletest third_party / googletest / googletest / src / gtest - all . cc - Ithird_party / googletest / googlemock / include - Ithird_party / googletest / googlemock third_party / googletest / googlemock / src / gmock - all . cc <nl> GTEST_LIB + = - lgflags <nl> ifeq ( $ ( V ) , 1 ) <nl> E = @ : <nl> <nl> PC_REQUIRES_GRPCXX = <nl> PC_LIBS_GRPCXX = <nl> <nl> - CPPFLAGS : = - Ithird_party / googletest / googletest / include $ ( CPPFLAGS ) <nl> + CPPFLAGS : = - Ithird_party / googletest / googletest / include - Ithird_party / googletest / googlemock / include $ ( CPPFLAGS ) <nl> <nl> PROTOC_PLUGINS_ALL = \ <nl> % for tgt in targets : <nl> <nl> $ ( GENDIR ) / $ { p } . pb . cc : protoc_dep_error <nl> $ ( GENDIR ) / $ { p } . grpc . pb . cc : protoc_dep_error <nl> else <nl> + < % <nl> + pluginflags = " " <nl> + % > <nl> + % if p in [ " src / proto / grpc / testing / compiler_test " , " src / proto / grpc / testing / echo " ] : <nl> + < % <nl> + pluginflags = " generate_mock_code = true : " <nl> + % > <nl> + % endif <nl> $ ( GENDIR ) / $ { p } . pb . cc : $ { p } . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) $ { ' ' . join ( ' $ ( GENDIR ) / % s . pb . cc ' % q for q in proto_deps . get ( p , [ ] ) ) } <nl> $ ( E ) " [ PROTOC ] Generating protobuf CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> <nl> $ ( GENDIR ) / $ { p } . grpc . pb . cc : $ { p } . proto $ ( PROTOBUF_DEP ) $ ( PROTOC_PLUGINS ) $ { ' ' . join ( ' $ ( GENDIR ) / % s . pb . cc $ ( GENDIR ) / % s . grpc . pb . cc ' % ( q , q ) for q in proto_deps . get ( p , [ ] ) ) } <nl> $ ( E ) " [ GRPC ] Generating gRPC ' s protobuf service CC file from $ < " <nl> $ ( Q ) mkdir - p ` dirname $ @ ` <nl> - $ ( Q ) $ ( PROTOC ) - Ithird_party / protobuf / src - I . - - grpc_out = $ ( GENDIR ) - - plugin = protoc - gen - grpc = $ ( PROTOC_PLUGINS_DIR ) / grpc_cpp_plugin $ ( EXECUTABLE_SUFFIX ) $ < <nl> + $ ( Q ) $ ( PROTOC ) - Ithird_party / protobuf / src - I . - - grpc_out = $ { pluginflags } $ ( GENDIR ) - - plugin = protoc - gen - grpc = $ ( PROTOC_PLUGINS_DIR ) / grpc_cpp_plugin $ ( EXECUTABLE_SUFFIX ) $ < <nl> endif <nl> <nl> % endfor <nl> mmm a / templates / tools / run_tests / generated / sources_and_headers . json . template <nl> ppp b / templates / tools / run_tests / generated / sources_and_headers . json . template <nl> <nl> for f in src : <nl> name , ext = os . path . splitext ( f ) <nl> if ext = = ' . proto ' : <nl> - out . extend ( fmt % name for fmt in [ ' % s . grpc . pb . h ' , ' % s . pb . h ' ] ) <nl> + out . extend ( fmt % name for fmt in [ ' % s . grpc . pb . h ' , ' % s . pb . h ' , ' % s_mock . grpc . pb . h ' ] ) <nl> return out <nl> <nl> def all_targets ( targets , libs , filegroups ) : <nl> mmm a / test / core / end2end / fixtures / http_proxy_fixture . c <nl> ppp b / test / core / end2end / fixtures / http_proxy_fixture . c <nl> <nl> # include " src / core / lib / iomgr / sockaddr_utils . h " <nl> # include " src / core / lib / iomgr / tcp_client . h " <nl> # include " src / core / lib / iomgr / tcp_server . h " <nl> + # include " src / core / lib / iomgr / timer . h " <nl> # include " src / core / lib / slice / slice_internal . h " <nl> # include " test / core / util / port . h " <nl> <nl> struct grpc_end2end_http_proxy { <nl> grpc_channel_args * channel_args ; <nl> gpr_mu * mu ; <nl> grpc_pollset * pollset ; <nl> - gpr_atm shutdown ; <nl> + gpr_refcount users ; <nl> } ; <nl> <nl> / / <nl> struct grpc_end2end_http_proxy { <nl> / / <nl> <nl> typedef struct proxy_connection { <nl> + grpc_end2end_http_proxy * proxy ; <nl> + <nl> grpc_endpoint * client_endpoint ; <nl> grpc_endpoint * server_endpoint ; <nl> <nl> typedef struct proxy_connection { <nl> grpc_http_request http_request ; <nl> } proxy_connection ; <nl> <nl> + static void proxy_connection_ref ( proxy_connection * conn , const char * reason ) { <nl> + gpr_ref ( & conn - > refcount ) ; <nl> + } <nl> + <nl> / / Helper function to destroy the proxy connection . <nl> static void proxy_connection_unref ( grpc_exec_ctx * exec_ctx , <nl> - proxy_connection * conn ) { <nl> + proxy_connection * conn , const char * reason ) { <nl> if ( gpr_unref ( & conn - > refcount ) ) { <nl> + gpr_log ( GPR_DEBUG , " endpoints : % p % p " , conn - > client_endpoint , <nl> + conn - > server_endpoint ) ; <nl> grpc_endpoint_destroy ( exec_ctx , conn - > client_endpoint ) ; <nl> - if ( conn - > server_endpoint ! = NULL ) <nl> + if ( conn - > server_endpoint ! = NULL ) { <nl> grpc_endpoint_destroy ( exec_ctx , conn - > server_endpoint ) ; <nl> + } <nl> grpc_pollset_set_destroy ( exec_ctx , conn - > pollset_set ) ; <nl> grpc_slice_buffer_destroy_internal ( exec_ctx , & conn - > client_read_buffer ) ; <nl> grpc_slice_buffer_destroy_internal ( exec_ctx , <nl> static void proxy_connection_unref ( grpc_exec_ctx * exec_ctx , <nl> grpc_slice_buffer_destroy_internal ( exec_ctx , & conn - > server_write_buffer ) ; <nl> grpc_http_parser_destroy ( & conn - > http_parser ) ; <nl> grpc_http_request_destroy ( & conn - > http_request ) ; <nl> + gpr_unref ( & conn - > proxy - > users ) ; <nl> gpr_free ( conn ) ; <nl> } <nl> } <nl> static void proxy_connection_failed ( grpc_exec_ctx * exec_ctx , <nl> grpc_endpoint_shutdown ( exec_ctx , conn - > server_endpoint , <nl> GRPC_ERROR_REF ( error ) ) ; <nl> } <nl> - proxy_connection_unref ( exec_ctx , conn ) ; <nl> + proxy_connection_unref ( exec_ctx , conn , " conn_failed " ) ; <nl> } <nl> <nl> / / Callback for writing proxy data to the client . <nl> static void on_client_write_done ( grpc_exec_ctx * exec_ctx , void * arg , <nl> & conn - > on_client_write_done ) ; <nl> } else { <nl> / / No more writes . Unref the connection . <nl> - proxy_connection_unref ( exec_ctx , conn ) ; <nl> + proxy_connection_unref ( exec_ctx , conn , " write_done " ) ; <nl> } <nl> } <nl> <nl> static void on_server_write_done ( grpc_exec_ctx * exec_ctx , void * arg , <nl> & conn - > on_server_write_done ) ; <nl> } else { <nl> / / No more writes . Unref the connection . <nl> - proxy_connection_unref ( exec_ctx , conn ) ; <nl> + proxy_connection_unref ( exec_ctx , conn , " server_write " ) ; <nl> } <nl> } <nl> <nl> static void on_client_read_done ( grpc_exec_ctx * exec_ctx , void * arg , <nl> } else { <nl> grpc_slice_buffer_move_into ( & conn - > client_read_buffer , <nl> & conn - > server_write_buffer ) ; <nl> - gpr_ref ( & conn - > refcount ) ; <nl> + proxy_connection_ref ( conn , " client_read " ) ; <nl> grpc_endpoint_write ( exec_ctx , conn - > server_endpoint , <nl> & conn - > server_write_buffer , <nl> & conn - > on_server_write_done ) ; <nl> static void on_server_read_done ( grpc_exec_ctx * exec_ctx , void * arg , <nl> } else { <nl> grpc_slice_buffer_move_into ( & conn - > server_read_buffer , <nl> & conn - > client_write_buffer ) ; <nl> - gpr_ref ( & conn - > refcount ) ; <nl> + proxy_connection_ref ( conn , " server_read " ) ; <nl> grpc_endpoint_write ( exec_ctx , conn - > client_endpoint , <nl> & conn - > client_write_buffer , <nl> & conn - > on_client_write_done ) ; <nl> static void on_write_response_done ( grpc_exec_ctx * exec_ctx , void * arg , <nl> / / Start reading from both client and server . One of the read <nl> / / requests inherits our ref to conn , but we need to take a new ref <nl> / / for the other one . <nl> - gpr_ref ( & conn - > refcount ) ; <nl> + proxy_connection_ref ( conn , " client_read " ) ; <nl> + proxy_connection_ref ( conn , " server_read " ) ; <nl> + proxy_connection_unref ( exec_ctx , conn , " write_response " ) ; <nl> grpc_endpoint_read ( exec_ctx , conn - > client_endpoint , & conn - > client_read_buffer , <nl> & conn - > on_client_read_done ) ; <nl> grpc_endpoint_read ( exec_ctx , conn - > server_endpoint , & conn - > server_read_buffer , <nl> static void on_server_connect_done ( grpc_exec_ctx * exec_ctx , void * arg , <nl> static void on_read_request_done ( grpc_exec_ctx * exec_ctx , void * arg , <nl> grpc_error * error ) { <nl> proxy_connection * conn = arg ; <nl> + gpr_log ( GPR_DEBUG , " on_read_request_done : % p % s " , conn , <nl> + grpc_error_string ( error ) ) ; <nl> if ( error ! = GRPC_ERROR_NONE ) { <nl> proxy_connection_failed ( exec_ctx , conn , true / * is_client * / , <nl> " HTTP proxy read request " , error ) ; <nl> static void on_accept ( grpc_exec_ctx * exec_ctx , void * arg , <nl> gpr_free ( acceptor ) ; <nl> grpc_end2end_http_proxy * proxy = arg ; <nl> / / Instantiate proxy_connection . <nl> - proxy_connection * conn = gpr_malloc ( sizeof ( * conn ) ) ; <nl> - memset ( conn , 0 , sizeof ( * conn ) ) ; <nl> + proxy_connection * conn = gpr_zalloc ( sizeof ( * conn ) ) ; <nl> + gpr_ref ( & proxy - > users ) ; <nl> conn - > client_endpoint = endpoint ; <nl> + conn - > proxy = proxy ; <nl> gpr_ref_init ( & conn - > refcount , 1 ) ; <nl> conn - > pollset_set = grpc_pollset_set_create ( ) ; <nl> grpc_pollset_set_add_pollset ( exec_ctx , conn - > pollset_set , proxy - > pollset ) ; <nl> + grpc_endpoint_add_to_pollset_set ( exec_ctx , endpoint , conn - > pollset_set ) ; <nl> grpc_closure_init ( & conn - > on_read_request_done , on_read_request_done , conn , <nl> grpc_schedule_on_exec_ctx ) ; <nl> grpc_closure_init ( & conn - > on_server_connect_done , on_server_connect_done , conn , <nl> static void thread_main ( void * arg ) { <nl> grpc_end2end_http_proxy * proxy = arg ; <nl> grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT ; <nl> do { <nl> + gpr_ref ( & proxy - > users ) ; <nl> const gpr_timespec now = gpr_now ( GPR_CLOCK_MONOTONIC ) ; <nl> const gpr_timespec deadline = <nl> gpr_time_add ( now , gpr_time_from_seconds ( 1 , GPR_TIMESPAN ) ) ; <nl> static void thread_main ( void * arg ) { <nl> grpc_pollset_work ( & exec_ctx , proxy - > pollset , & worker , now , deadline ) ) ; <nl> gpr_mu_unlock ( proxy - > mu ) ; <nl> grpc_exec_ctx_flush ( & exec_ctx ) ; <nl> - } while ( ! gpr_atm_acq_load ( & proxy - > shutdown ) ) ; <nl> + } while ( ! gpr_unref ( & proxy - > users ) ) ; <nl> grpc_exec_ctx_finish ( & exec_ctx ) ; <nl> } <nl> <nl> grpc_end2end_http_proxy * grpc_end2end_http_proxy_create ( void ) { <nl> grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT ; <nl> grpc_end2end_http_proxy * proxy = gpr_malloc ( sizeof ( * proxy ) ) ; <nl> memset ( proxy , 0 , sizeof ( * proxy ) ) ; <nl> + gpr_ref_init ( & proxy - > users , 1 ) ; <nl> / / Construct proxy address . <nl> const int proxy_port = grpc_pick_unused_port_or_die ( ) ; <nl> gpr_join_host_port ( & proxy - > proxy_name , " localhost " , proxy_port ) ; <nl> static void destroy_pollset ( grpc_exec_ctx * exec_ctx , void * arg , <nl> } <nl> <nl> void grpc_end2end_http_proxy_destroy ( grpc_end2end_http_proxy * proxy ) { <nl> - gpr_atm_rel_store ( & proxy - > shutdown , 1 ) ; / / Signal proxy thread to shutdown . <nl> + gpr_unref ( & proxy - > users ) ; / / Signal proxy thread to shutdown . <nl> grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT ; <nl> gpr_thd_join ( proxy - > thd ) ; <nl> grpc_tcp_server_shutdown_listeners ( & exec_ctx , proxy - > server ) ; <nl> grpc_tcp_server_unref ( & exec_ctx , proxy - > server ) ; <nl> gpr_free ( proxy - > proxy_name ) ; <nl> grpc_channel_args_destroy ( & exec_ctx , proxy - > channel_args ) ; <nl> - grpc_closure destroyed ; <nl> - grpc_closure_init ( & destroyed , destroy_pollset , proxy - > pollset , <nl> - grpc_schedule_on_exec_ctx ) ; <nl> - grpc_pollset_shutdown ( & exec_ctx , proxy - > pollset , & destroyed ) ; <nl> + grpc_pollset_shutdown ( & exec_ctx , proxy - > pollset , <nl> + grpc_closure_create ( destroy_pollset , proxy - > pollset , <nl> + grpc_schedule_on_exec_ctx ) ) ; <nl> gpr_free ( proxy ) ; <nl> grpc_exec_ctx_finish ( & exec_ctx ) ; <nl> } <nl> mmm a / test / core / end2end / tests / cancel_after_invoke . c <nl> ppp b / test / core / end2end / tests / cancel_after_invoke . c <nl> static void * tag ( intptr_t t ) { return ( void * ) t ; } <nl> static grpc_end2end_test_fixture begin_test ( grpc_end2end_test_config config , <nl> const char * test_name , <nl> cancellation_mode mode , <nl> + size_t test_ops , <nl> grpc_channel_args * client_args , <nl> grpc_channel_args * server_args ) { <nl> grpc_end2end_test_fixture f ; <nl> - gpr_log ( GPR_INFO , " Running test : % s / % s / % s " , test_name , config . name , <nl> - mode . name ) ; <nl> + gpr_log ( GPR_INFO , " Running test : % s / % s / % s [ % " PRIdPTR " ops ] " , test_name , <nl> + config . name , mode . name , test_ops ) ; <nl> f = config . create_fixture ( client_args , server_args ) ; <nl> config . init_server ( & f , server_args ) ; <nl> config . init_client ( & f , client_args ) ; <nl> static void test_cancel_after_invoke ( grpc_end2end_test_config config , <nl> grpc_op ops [ 6 ] ; <nl> grpc_op * op ; <nl> grpc_call * c ; <nl> - grpc_end2end_test_fixture f = <nl> - begin_test ( config , " test_cancel_after_invoke " , mode , NULL , NULL ) ; <nl> + grpc_end2end_test_fixture f = begin_test ( config , " test_cancel_after_invoke " , <nl> + mode , test_ops , NULL , NULL ) ; <nl> cq_verifier * cqv = cq_verifier_create ( f . cq ) ; <nl> grpc_metadata_array initial_metadata_recv ; <nl> grpc_metadata_array trailing_metadata_recv ; <nl> mmm a / test / core / util / port_server_client . c <nl> ppp b / test / core / util / port_server_client . c <nl> void grpc_free_port_using_server ( int port ) { <nl> grpc_resource_quota * resource_quota = <nl> grpc_resource_quota_create ( " port_server_client / free " ) ; <nl> grpc_httpcli_get ( & exec_ctx , & context , & pr . pops , resource_quota , & req , <nl> - grpc_timeout_seconds_to_deadline ( 10 ) , <nl> + grpc_timeout_seconds_to_deadline ( 30 ) , <nl> grpc_closure_create ( freed_port_from_server , & pr , <nl> grpc_schedule_on_exec_ctx ) , <nl> & rsp ) ; <nl> int grpc_pick_port_using_server ( void ) { <nl> grpc_resource_quota_create ( " port_server_client / pick " ) ; <nl> grpc_httpcli_get ( <nl> & exec_ctx , & context , & pr . pops , resource_quota , & req , <nl> - grpc_timeout_seconds_to_deadline ( 10 ) , <nl> + grpc_timeout_seconds_to_deadline ( 30 ) , <nl> grpc_closure_create ( got_port_from_server , & pr , grpc_schedule_on_exec_ctx ) , <nl> & pr . response ) ; <nl> grpc_resource_quota_unref_internal ( & exec_ctx , resource_quota ) ; <nl> mmm a / test / core / util / test_config . c <nl> ppp b / test / core / util / test_config . c <nl> bool BuiltUnderMsan ( ) { <nl> # endif <nl> } <nl> <nl> + bool BuiltUnderUbsan ( ) { <nl> + # ifdef GRPC_UBSAN <nl> + return true ; <nl> + # else <nl> + return false ; <nl> + # endif <nl> + } <nl> + <nl> int64_t grpc_test_sanitizer_slowdown_factor ( ) { <nl> int64_t sanitizer_multiplier = 1 ; <nl> if ( BuiltUnderValgrind ( ) ) { <nl> int64_t grpc_test_sanitizer_slowdown_factor ( ) { <nl> sanitizer_multiplier = 3 ; <nl> } else if ( BuiltUnderMsan ( ) ) { <nl> sanitizer_multiplier = 4 ; <nl> + } else if ( BuiltUnderUbsan ( ) ) { <nl> + sanitizer_multiplier = 5 ; <nl> } <nl> return sanitizer_multiplier ; <nl> } <nl> mmm a / test / cpp / codegen / BUILD <nl> ppp b / test / cpp / codegen / BUILD <nl> cc_test ( <nl> cc_test ( <nl> name = " golden_file_test " , <nl> srcs = [ " golden_file_test . cc " ] , <nl> - args = [ " - - generated_file_path = $ ( GENDIR ) / src / proto / grpc / testing / compiler_test . grpc . pb . h " ] , <nl> + args = [ " - - generated_file_path = $ ( GENDIR ) / src / proto / grpc / testing / " ] , <nl> data = [ <nl> " : compiler_test_golden " , <nl> " / / src / proto / grpc / testing : _compiler_test_proto_grpc_codegen " , <nl> mmm a / test / cpp / codegen / compiler_test_golden <nl> ppp b / test / cpp / codegen / compiler_test_golden <nl> class ServiceA final { <nl> return std : : unique_ptr < : : grpc : : ClientAsyncWriterInterface < : : grpc : : testing : : Request > > ( AsyncMethodA2Raw ( context , response , cq , tag ) ) ; <nl> } <nl> / / MethodA2 trailing comment 1 <nl> + / / Method A3 leading comment 1 <nl> + std : : unique_ptr < : : grpc : : ClientReaderInterface < : : grpc : : testing : : Response > > MethodA3 ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request ) { <nl> + return std : : unique_ptr < : : grpc : : ClientReaderInterface < : : grpc : : testing : : Response > > ( MethodA3Raw ( context , request ) ) ; <nl> + } <nl> + std : : unique_ptr < : : grpc : : ClientAsyncReaderInterface < : : grpc : : testing : : Response > > AsyncMethodA3 ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request , : : grpc : : CompletionQueue * cq , void * tag ) { <nl> + return std : : unique_ptr < : : grpc : : ClientAsyncReaderInterface < : : grpc : : testing : : Response > > ( AsyncMethodA3Raw ( context , request , cq , tag ) ) ; <nl> + } <nl> + / / Method A3 trailing comment 1 <nl> + / / Method A4 leading comment 1 <nl> + std : : unique_ptr < : : grpc : : ClientReaderWriterInterface < : : grpc : : testing : : Request , : : grpc : : testing : : Response > > MethodA4 ( : : grpc : : ClientContext * context ) { <nl> + return std : : unique_ptr < : : grpc : : ClientReaderWriterInterface < : : grpc : : testing : : Request , : : grpc : : testing : : Response > > ( MethodA4Raw ( context ) ) ; <nl> + } <nl> + std : : unique_ptr < : : grpc : : ClientAsyncReaderWriterInterface < : : grpc : : testing : : Request , : : grpc : : testing : : Response > > AsyncMethodA4 ( : : grpc : : ClientContext * context , : : grpc : : CompletionQueue * cq , void * tag ) { <nl> + return std : : unique_ptr < : : grpc : : ClientAsyncReaderWriterInterface < : : grpc : : testing : : Request , : : grpc : : testing : : Response > > ( AsyncMethodA4Raw ( context , cq , tag ) ) ; <nl> + } <nl> + / / Method A4 trailing comment 1 <nl> private : <nl> virtual : : grpc : : ClientAsyncResponseReaderInterface < : : grpc : : testing : : Response > * AsyncMethodA1Raw ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request , : : grpc : : CompletionQueue * cq ) = 0 ; <nl> virtual : : grpc : : ClientWriterInterface < : : grpc : : testing : : Request > * MethodA2Raw ( : : grpc : : ClientContext * context , : : grpc : : testing : : Response * response ) = 0 ; <nl> virtual : : grpc : : ClientAsyncWriterInterface < : : grpc : : testing : : Request > * AsyncMethodA2Raw ( : : grpc : : ClientContext * context , : : grpc : : testing : : Response * response , : : grpc : : CompletionQueue * cq , void * tag ) = 0 ; <nl> + virtual : : grpc : : ClientReaderInterface < : : grpc : : testing : : Response > * MethodA3Raw ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request ) = 0 ; <nl> + virtual : : grpc : : ClientAsyncReaderInterface < : : grpc : : testing : : Response > * AsyncMethodA3Raw ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request , : : grpc : : CompletionQueue * cq , void * tag ) = 0 ; <nl> + virtual : : grpc : : ClientReaderWriterInterface < : : grpc : : testing : : Request , : : grpc : : testing : : Response > * MethodA4Raw ( : : grpc : : ClientContext * context ) = 0 ; <nl> + virtual : : grpc : : ClientAsyncReaderWriterInterface < : : grpc : : testing : : Request , : : grpc : : testing : : Response > * AsyncMethodA4Raw ( : : grpc : : ClientContext * context , : : grpc : : CompletionQueue * cq , void * tag ) = 0 ; <nl> } ; <nl> class Stub final : public StubInterface { <nl> public : <nl> class ServiceA final { <nl> std : : unique_ptr < : : grpc : : ClientAsyncWriter < : : grpc : : testing : : Request > > AsyncMethodA2 ( : : grpc : : ClientContext * context , : : grpc : : testing : : Response * response , : : grpc : : CompletionQueue * cq , void * tag ) { <nl> return std : : unique_ptr < : : grpc : : ClientAsyncWriter < : : grpc : : testing : : Request > > ( AsyncMethodA2Raw ( context , response , cq , tag ) ) ; <nl> } <nl> + std : : unique_ptr < : : grpc : : ClientReader < : : grpc : : testing : : Response > > MethodA3 ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request ) { <nl> + return std : : unique_ptr < : : grpc : : ClientReader < : : grpc : : testing : : Response > > ( MethodA3Raw ( context , request ) ) ; <nl> + } <nl> + std : : unique_ptr < : : grpc : : ClientAsyncReader < : : grpc : : testing : : Response > > AsyncMethodA3 ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request , : : grpc : : CompletionQueue * cq , void * tag ) { <nl> + return std : : unique_ptr < : : grpc : : ClientAsyncReader < : : grpc : : testing : : Response > > ( AsyncMethodA3Raw ( context , request , cq , tag ) ) ; <nl> + } <nl> + std : : unique_ptr < : : grpc : : ClientReaderWriter < : : grpc : : testing : : Request , : : grpc : : testing : : Response > > MethodA4 ( : : grpc : : ClientContext * context ) { <nl> + return std : : unique_ptr < : : grpc : : ClientReaderWriter < : : grpc : : testing : : Request , : : grpc : : testing : : Response > > ( MethodA4Raw ( context ) ) ; <nl> + } <nl> + std : : unique_ptr < : : grpc : : ClientAsyncReaderWriter < : : grpc : : testing : : Request , : : grpc : : testing : : Response > > AsyncMethodA4 ( : : grpc : : ClientContext * context , : : grpc : : CompletionQueue * cq , void * tag ) { <nl> + return std : : unique_ptr < : : grpc : : ClientAsyncReaderWriter < : : grpc : : testing : : Request , : : grpc : : testing : : Response > > ( AsyncMethodA4Raw ( context , cq , tag ) ) ; <nl> + } <nl> <nl> private : <nl> std : : shared_ptr < : : grpc : : ChannelInterface > channel_ ; <nl> : : grpc : : ClientAsyncResponseReader < : : grpc : : testing : : Response > * AsyncMethodA1Raw ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request , : : grpc : : CompletionQueue * cq ) override ; <nl> : : grpc : : ClientWriter < : : grpc : : testing : : Request > * MethodA2Raw ( : : grpc : : ClientContext * context , : : grpc : : testing : : Response * response ) override ; <nl> : : grpc : : ClientAsyncWriter < : : grpc : : testing : : Request > * AsyncMethodA2Raw ( : : grpc : : ClientContext * context , : : grpc : : testing : : Response * response , : : grpc : : CompletionQueue * cq , void * tag ) override ; <nl> + : : grpc : : ClientReader < : : grpc : : testing : : Response > * MethodA3Raw ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request ) override ; <nl> + : : grpc : : ClientAsyncReader < : : grpc : : testing : : Response > * AsyncMethodA3Raw ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request , : : grpc : : CompletionQueue * cq , void * tag ) override ; <nl> + : : grpc : : ClientReaderWriter < : : grpc : : testing : : Request , : : grpc : : testing : : Response > * MethodA4Raw ( : : grpc : : ClientContext * context ) override ; <nl> + : : grpc : : ClientAsyncReaderWriter < : : grpc : : testing : : Request , : : grpc : : testing : : Response > * AsyncMethodA4Raw ( : : grpc : : ClientContext * context , : : grpc : : CompletionQueue * cq , void * tag ) override ; <nl> const : : grpc : : RpcMethod rpcmethod_MethodA1_ ; <nl> const : : grpc : : RpcMethod rpcmethod_MethodA2_ ; <nl> + const : : grpc : : RpcMethod rpcmethod_MethodA3_ ; <nl> + const : : grpc : : RpcMethod rpcmethod_MethodA4_ ; <nl> } ; <nl> static std : : unique_ptr < Stub > NewStub ( const std : : shared_ptr < : : grpc : : ChannelInterface > & channel , const : : grpc : : StubOptions & options = : : grpc : : StubOptions ( ) ) ; <nl> <nl> class ServiceA final { <nl> / / Method A2 leading comment 2 <nl> virtual : : grpc : : Status MethodA2 ( : : grpc : : ServerContext * context , : : grpc : : ServerReader < : : grpc : : testing : : Request > * reader , : : grpc : : testing : : Response * response ) ; <nl> / / MethodA2 trailing comment 1 <nl> + / / Method A3 leading comment 1 <nl> + virtual : : grpc : : Status MethodA3 ( : : grpc : : ServerContext * context , const : : grpc : : testing : : Request * request , : : grpc : : ServerWriter < : : grpc : : testing : : Response > * writer ) ; <nl> + / / Method A3 trailing comment 1 <nl> + / / Method A4 leading comment 1 <nl> + virtual : : grpc : : Status MethodA4 ( : : grpc : : ServerContext * context , : : grpc : : ServerReaderWriter < : : grpc : : testing : : Response , : : grpc : : testing : : Request > * stream ) ; <nl> + / / Method A4 trailing comment 1 <nl> } ; <nl> template < class BaseClass > <nl> class WithAsyncMethod_MethodA1 : public BaseClass { <nl> class ServiceA final { <nl> : : grpc : : Service : : RequestAsyncClientStreaming ( 1 , context , reader , new_call_cq , notification_cq , tag ) ; <nl> } <nl> } ; <nl> - typedef WithAsyncMethod_MethodA1 < WithAsyncMethod_MethodA2 < Service > > AsyncService ; <nl> + template < class BaseClass > <nl> + class WithAsyncMethod_MethodA3 : public BaseClass { <nl> + private : <nl> + void BaseClassMustBeDerivedFromService ( const Service * service ) { } <nl> + public : <nl> + WithAsyncMethod_MethodA3 ( ) { <nl> + : : grpc : : Service : : MarkMethodAsync ( 2 ) ; <nl> + } <nl> + ~ WithAsyncMethod_MethodA3 ( ) override { <nl> + BaseClassMustBeDerivedFromService ( this ) ; <nl> + } <nl> + / / disable synchronous version of this method <nl> + : : grpc : : Status MethodA3 ( : : grpc : : ServerContext * context , const : : grpc : : testing : : Request * request , : : grpc : : ServerWriter < : : grpc : : testing : : Response > * writer ) final override { <nl> + abort ( ) ; <nl> + return : : grpc : : Status ( : : grpc : : StatusCode : : UNIMPLEMENTED , " " ) ; <nl> + } <nl> + void RequestMethodA3 ( : : grpc : : ServerContext * context , : : grpc : : testing : : Request * request , : : grpc : : ServerAsyncWriter < : : grpc : : testing : : Response > * writer , : : grpc : : CompletionQueue * new_call_cq , : : grpc : : ServerCompletionQueue * notification_cq , void * tag ) { <nl> + : : grpc : : Service : : RequestAsyncServerStreaming ( 2 , context , request , writer , new_call_cq , notification_cq , tag ) ; <nl> + } <nl> + } ; <nl> + template < class BaseClass > <nl> + class WithAsyncMethod_MethodA4 : public BaseClass { <nl> + private : <nl> + void BaseClassMustBeDerivedFromService ( const Service * service ) { } <nl> + public : <nl> + WithAsyncMethod_MethodA4 ( ) { <nl> + : : grpc : : Service : : MarkMethodAsync ( 3 ) ; <nl> + } <nl> + ~ WithAsyncMethod_MethodA4 ( ) override { <nl> + BaseClassMustBeDerivedFromService ( this ) ; <nl> + } <nl> + / / disable synchronous version of this method <nl> + : : grpc : : Status MethodA4 ( : : grpc : : ServerContext * context , : : grpc : : ServerReaderWriter < : : grpc : : testing : : Response , : : grpc : : testing : : Request > * stream ) final override { <nl> + abort ( ) ; <nl> + return : : grpc : : Status ( : : grpc : : StatusCode : : UNIMPLEMENTED , " " ) ; <nl> + } <nl> + void RequestMethodA4 ( : : grpc : : ServerContext * context , : : grpc : : ServerAsyncReaderWriter < : : grpc : : testing : : Response , : : grpc : : testing : : Request > * stream , : : grpc : : CompletionQueue * new_call_cq , : : grpc : : ServerCompletionQueue * notification_cq , void * tag ) { <nl> + : : grpc : : Service : : RequestAsyncBidiStreaming ( 3 , context , stream , new_call_cq , notification_cq , tag ) ; <nl> + } <nl> + } ; <nl> + typedef WithAsyncMethod_MethodA1 < WithAsyncMethod_MethodA2 < WithAsyncMethod_MethodA3 < WithAsyncMethod_MethodA4 < Service > > > > AsyncService ; <nl> template < class BaseClass > <nl> class WithGenericMethod_MethodA1 : public BaseClass { <nl> private : <nl> class ServiceA final { <nl> } <nl> } ; <nl> template < class BaseClass > <nl> + class WithGenericMethod_MethodA3 : public BaseClass { <nl> + private : <nl> + void BaseClassMustBeDerivedFromService ( const Service * service ) { } <nl> + public : <nl> + WithGenericMethod_MethodA3 ( ) { <nl> + : : grpc : : Service : : MarkMethodGeneric ( 2 ) ; <nl> + } <nl> + ~ WithGenericMethod_MethodA3 ( ) override { <nl> + BaseClassMustBeDerivedFromService ( this ) ; <nl> + } <nl> + / / disable synchronous version of this method <nl> + : : grpc : : Status MethodA3 ( : : grpc : : ServerContext * context , const : : grpc : : testing : : Request * request , : : grpc : : ServerWriter < : : grpc : : testing : : Response > * writer ) final override { <nl> + abort ( ) ; <nl> + return : : grpc : : Status ( : : grpc : : StatusCode : : UNIMPLEMENTED , " " ) ; <nl> + } <nl> + } ; <nl> + template < class BaseClass > <nl> + class WithGenericMethod_MethodA4 : public BaseClass { <nl> + private : <nl> + void BaseClassMustBeDerivedFromService ( const Service * service ) { } <nl> + public : <nl> + WithGenericMethod_MethodA4 ( ) { <nl> + : : grpc : : Service : : MarkMethodGeneric ( 3 ) ; <nl> + } <nl> + ~ WithGenericMethod_MethodA4 ( ) override { <nl> + BaseClassMustBeDerivedFromService ( this ) ; <nl> + } <nl> + / / disable synchronous version of this method <nl> + : : grpc : : Status MethodA4 ( : : grpc : : ServerContext * context , : : grpc : : ServerReaderWriter < : : grpc : : testing : : Response , : : grpc : : testing : : Request > * stream ) final override { <nl> + abort ( ) ; <nl> + return : : grpc : : Status ( : : grpc : : StatusCode : : UNIMPLEMENTED , " " ) ; <nl> + } <nl> + } ; <nl> + template < class BaseClass > <nl> class WithStreamedUnaryMethod_MethodA1 : public BaseClass { <nl> private : <nl> void BaseClassMustBeDerivedFromService ( const Service * service ) { } <nl> class ServiceA final { <nl> virtual : : grpc : : Status StreamedMethodA1 ( : : grpc : : ServerContext * context , : : grpc : : ServerUnaryStreamer < : : grpc : : testing : : Request , : : grpc : : testing : : Response > * server_unary_streamer ) = 0 ; <nl> } ; <nl> typedef WithStreamedUnaryMethod_MethodA1 < Service > StreamedUnaryService ; <nl> - typedef Service SplitStreamedService ; <nl> - typedef WithStreamedUnaryMethod_MethodA1 < Service > StreamedService ; <nl> + template < class BaseClass > <nl> + class WithSplitStreamingMethod_MethodA3 : public BaseClass { <nl> + private : <nl> + void BaseClassMustBeDerivedFromService ( const Service * service ) { } <nl> + public : <nl> + WithSplitStreamingMethod_MethodA3 ( ) { <nl> + : : grpc : : Service : : MarkMethodStreamed ( 2 , <nl> + new : : grpc : : SplitServerStreamingHandler < : : grpc : : testing : : Request , : : grpc : : testing : : Response > ( std : : bind ( & WithSplitStreamingMethod_MethodA3 < BaseClass > : : StreamedMethodA3 , this , std : : placeholders : : _1 , std : : placeholders : : _2 ) ) ) ; <nl> + } <nl> + ~ WithSplitStreamingMethod_MethodA3 ( ) override { <nl> + BaseClassMustBeDerivedFromService ( this ) ; <nl> + } <nl> + / / disable regular version of this method <nl> + : : grpc : : Status MethodA3 ( : : grpc : : ServerContext * context , const : : grpc : : testing : : Request * request , : : grpc : : ServerWriter < : : grpc : : testing : : Response > * writer ) final override { <nl> + abort ( ) ; <nl> + return : : grpc : : Status ( : : grpc : : StatusCode : : UNIMPLEMENTED , " " ) ; <nl> + } <nl> + / / replace default version of method with split streamed <nl> + virtual : : grpc : : Status StreamedMethodA3 ( : : grpc : : ServerContext * context , : : grpc : : ServerSplitStreamer < : : grpc : : testing : : Request , : : grpc : : testing : : Response > * server_split_streamer ) = 0 ; <nl> + } ; <nl> + typedef WithSplitStreamingMethod_MethodA3 < Service > SplitStreamedService ; <nl> + typedef WithStreamedUnaryMethod_MethodA1 < WithSplitStreamingMethod_MethodA3 < Service > > StreamedService ; <nl> } ; <nl> <nl> / / ServiceB leading comment 1 <nl> new file mode 100644 <nl> index 00000000000 . . 8e4b4d59112 <nl> mmm / dev / null <nl> ppp b / test / cpp / codegen / compiler_test_mock_golden <nl> <nl> + / / Generated by the gRPC C + + plugin . <nl> + / / If you make any local change , they will be lost . <nl> + / / source : src / proto / grpc / testing / compiler_test . proto <nl> + <nl> + # include " src / proto / grpc / testing / compiler_test . pb . h " <nl> + # include " src / proto / grpc / testing / compiler_test . grpc . pb . h " <nl> + <nl> + # include < grpc + + / impl / codegen / async_stream . h > <nl> + # include < grpc + + / impl / codegen / sync_stream . h > <nl> + # include < gmock / gmock . h > <nl> + namespace grpc { <nl> + namespace testing { <nl> + <nl> + class MockServiceAStub : public ServiceA : : StubInterface { <nl> + public : <nl> + MOCK_METHOD3 ( MethodA1 , : : grpc : : Status ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request , : : grpc : : testing : : Response * response ) ) ; <nl> + MOCK_METHOD3 ( AsyncMethodA1Raw , : : grpc : : ClientAsyncResponseReaderInterface < : : grpc : : testing : : Response > * ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request , : : grpc : : CompletionQueue * cq ) ) ; <nl> + MOCK_METHOD2 ( MethodA2Raw , : : grpc : : ClientWriterInterface < : : grpc : : testing : : Request > * ( : : grpc : : ClientContext * context , : : grpc : : testing : : Response * response ) ) ; <nl> + MOCK_METHOD4 ( AsyncMethodA2Raw , : : grpc : : ClientAsyncWriterInterface < : : grpc : : testing : : Request > * ( : : grpc : : ClientContext * context , : : grpc : : testing : : Response * response , : : grpc : : CompletionQueue * cq , void * tag ) ) ; <nl> + MOCK_METHOD2 ( MethodA3Raw , : : grpc : : ClientReaderInterface < : : grpc : : testing : : Response > * ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request ) ) ; <nl> + MOCK_METHOD4 ( AsyncMethodA3Raw , : : grpc : : ClientAsyncReaderInterface < : : grpc : : testing : : Response > * ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request , : : grpc : : CompletionQueue * cq , void * tag ) ) ; <nl> + MOCK_METHOD1 ( MethodA4Raw , : : grpc : : ClientReaderWriterInterface < : : grpc : : testing : : Request , : : grpc : : testing : : Response > * ( : : grpc : : ClientContext * context ) ) ; <nl> + MOCK_METHOD3 ( AsyncMethodA4Raw , : : grpc : : ClientAsyncReaderWriterInterface < : : grpc : : testing : : Request , : : grpc : : testing : : Response > * ( : : grpc : : ClientContext * context , : : grpc : : CompletionQueue * cq , void * tag ) ) ; <nl> + } ; <nl> + <nl> + class MockServiceBStub : public ServiceB : : StubInterface { <nl> + public : <nl> + MOCK_METHOD3 ( MethodB1 , : : grpc : : Status ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request , : : grpc : : testing : : Response * response ) ) ; <nl> + MOCK_METHOD3 ( AsyncMethodB1Raw , : : grpc : : ClientAsyncResponseReaderInterface < : : grpc : : testing : : Response > * ( : : grpc : : ClientContext * context , const : : grpc : : testing : : Request & request , : : grpc : : CompletionQueue * cq ) ) ; <nl> + } ; <nl> + <nl> + } / / namespace grpc <nl> + } / / namespace testing <nl> + <nl> mmm a / test / cpp / codegen / golden_file_test . cc <nl> ppp b / test / cpp / codegen / golden_file_test . cc <nl> <nl> # include < gflags / gflags . h > <nl> # include < gtest / gtest . h > <nl> <nl> - DEFINE_string ( generated_file_path , " " , <nl> - " path to the generated compiler_test . grpc . pb . h file " ) ; <nl> + DEFINE_string ( <nl> + generated_file_path , " " , <nl> + " path to the directory containing generated files compiler_test . grpc . pb . h " <nl> + " and compiler_test_mock . grpc . pb . h " ) ; <nl> <nl> const char kGoldenFilePath [ ] = " test / cpp / codegen / compiler_test_golden " ; <nl> + const char kMockGoldenFilePath [ ] = " test / cpp / codegen / compiler_test_mock_golden " ; <nl> <nl> - TEST ( GoldenFileTest , TestGeneratedFile ) { <nl> - ASSERT_FALSE ( FLAGS_generated_file_path . empty ( ) ) ; <nl> - <nl> - std : : ifstream generated ( FLAGS_generated_file_path ) ; <nl> - std : : ifstream golden ( kGoldenFilePath ) ; <nl> + void run_test ( std : : basic_string < char > generated_file , <nl> + std : : basic_string < char > golden_file ) { <nl> + std : : ifstream generated ( generated_file ) ; <nl> + std : : ifstream golden ( golden_file ) ; <nl> <nl> ASSERT_TRUE ( generated . good ( ) ) ; <nl> ASSERT_TRUE ( golden . good ( ) ) ; <nl> TEST ( GoldenFileTest , TestGeneratedFile ) { <nl> golden . close ( ) ; <nl> } <nl> <nl> + TEST ( GoldenFileTest , TestGeneratedFile ) { <nl> + run_test ( FLAGS_generated_file_path + " compiler_test . grpc . pb . h " , <nl> + kGoldenFilePath ) ; <nl> + } <nl> + <nl> + TEST ( GoldenMockFileTest , TestGeneratedMockFile ) { <nl> + run_test ( FLAGS_generated_file_path + " compiler_test_mock . grpc . pb . h " , <nl> + kMockGoldenFilePath ) ; <nl> + } <nl> + <nl> int main ( int argc , char * * argv ) { <nl> : : testing : : InitGoogleTest ( & argc , argv ) ; <nl> : : google : : ParseCommandLineFlags ( & argc , & argv , true ) ; <nl> + if ( FLAGS_generated_file_path . empty ( ) ) { <nl> + FLAGS_generated_file_path = " gens / src / proto / grpc / testing / " ; <nl> + } <nl> + if ( FLAGS_generated_file_path . back ( ) ! = ' / ' ) <nl> + FLAGS_generated_file_path . append ( " / " ) ; <nl> return RUN_ALL_TESTS ( ) ; <nl> } <nl> mmm a / test / cpp / end2end / BUILD <nl> ppp b / test / cpp / end2end / BUILD <nl> cc_test ( <nl> " / / src / proto / grpc / testing : echo_messages_proto " , <nl> " / / src / proto / grpc / testing : echo_proto " , <nl> " / / src / proto / grpc / testing / duplicate : echo_duplicate_proto " , <nl> + " / / src / proto / grpc / health / v1 : health_proto " , <nl> " / / test / core / util : gpr_test_util " , <nl> " / / test / core / util : grpc_test_util " , <nl> " / / test / cpp / util : test_util " , <nl> mmm a / test / cpp / end2end / async_end2end_test . cc <nl> ppp b / test / cpp / end2end / async_end2end_test . cc <nl> <nl> # include < grpc + + / channel . h > <nl> # include < grpc + + / client_context . h > <nl> # include < grpc + + / create_channel . h > <nl> + # include < grpc + + / ext / health_check_service_server_builder_option . h > <nl> # include < grpc + + / server . h > <nl> # include < grpc + + / server_builder . h > <nl> # include < grpc + + / server_context . h > <nl> <nl> # include < gtest / gtest . h > <nl> <nl> # include " src / core / lib / iomgr / port . h " <nl> + # include " src / proto / grpc / health / v1 / health . grpc . pb . h " <nl> # include " src / proto / grpc / testing / duplicate / echo_duplicate . grpc . pb . h " <nl> # include " src / proto / grpc / testing / echo . grpc . pb . h " <nl> # include " test / core / util / port . h " <nl> class ServerBuilderSyncPluginDisabler : public : : grpc : : ServerBuilderOption { <nl> <nl> class TestScenario { <nl> public : <nl> - TestScenario ( bool non_block , const grpc : : string & creds_type , <nl> + TestScenario ( bool non_block , const grpc : : string & creds_type , bool hcs , <nl> const grpc : : string & content ) <nl> : disable_blocking ( non_block ) , <nl> + health_check_service ( hcs ) , <nl> credentials_type ( creds_type ) , <nl> message_content ( content ) { } <nl> void Log ( ) const ; <nl> bool disable_blocking ; <nl> + bool health_check_service ; <nl> / / Although the below grpc : : string ' s are logically const , we can ' t declare <nl> / / them const because of a limitation in the way old compilers ( e . g . , gcc - 4 . 4 ) <nl> / / manage vector insertion using a copy constructor <nl> static std : : ostream & operator < < ( std : : ostream & out , <nl> return out < < " TestScenario { disable_blocking = " <nl> < < ( scenario . disable_blocking ? " true " : " false " ) <nl> < < " , credentials = ' " < < scenario . credentials_type <nl> + < < " , health_check_service = " <nl> + < < ( scenario . health_check_service ? " true " : " false " ) <nl> < < " ' , message_size = " < < scenario . message_content . size ( ) < < " } " ; <nl> } <nl> <nl> void TestScenario : : Log ( ) const { <nl> gpr_log ( GPR_DEBUG , " % s " , out . str ( ) . c_str ( ) ) ; <nl> } <nl> <nl> + class HealthCheck : public health : : v1 : : Health : : Service { } ; <nl> + <nl> class AsyncEnd2endTest : public : : testing : : TestWithParam < TestScenario > { <nl> protected : <nl> AsyncEnd2endTest ( ) { GetParam ( ) . Log ( ) ; } <nl> class AsyncEnd2endTest : public : : testing : : TestWithParam < TestScenario > { <nl> GetParam ( ) . credentials_type ) ; <nl> builder . AddListeningPort ( server_address_ . str ( ) , server_creds ) ; <nl> builder . RegisterService ( & service_ ) ; <nl> + if ( GetParam ( ) . health_check_service ) { <nl> + builder . RegisterService ( & health_check_ ) ; <nl> + } <nl> cq_ = builder . AddCompletionQueue ( ) ; <nl> <nl> / / TODO ( zyc ) : make a test option to choose wheather sync plugins should be <nl> class AsyncEnd2endTest : public : : testing : : TestWithParam < TestScenario > { <nl> std : : unique_ptr < grpc : : testing : : EchoTestService : : Stub > stub_ ; <nl> std : : unique_ptr < Server > server_ ; <nl> grpc : : testing : : EchoTestService : : AsyncService service_ ; <nl> + HealthCheck health_check_ ; <nl> std : : ostringstream server_address_ ; <nl> int port_ ; <nl> <nl> std : : vector < TestScenario > CreateTestScenarios ( bool test_disable_blocking , <nl> messages . push_back ( big_msg ) ; <nl> } <nl> <nl> - for ( auto cred = credentials_types . begin ( ) ; cred ! = credentials_types . end ( ) ; <nl> - + + cred ) { <nl> - for ( auto msg = messages . begin ( ) ; msg ! = messages . end ( ) ; msg + + ) { <nl> - scenarios . emplace_back ( false , * cred , * msg ) ; <nl> - if ( test_disable_blocking ) { <nl> - scenarios . emplace_back ( true , * cred , * msg ) ; <nl> + for ( auto health_check_service : { false , true } ) { <nl> + for ( auto cred = credentials_types . begin ( ) ; cred ! = credentials_types . end ( ) ; <nl> + + + cred ) { <nl> + for ( auto msg = messages . begin ( ) ; msg ! = messages . end ( ) ; msg + + ) { <nl> + scenarios . emplace_back ( false , * cred , health_check_service , * msg ) ; <nl> + if ( test_disable_blocking ) { <nl> + scenarios . emplace_back ( true , * cred , health_check_service , * msg ) ; <nl> + } <nl> } <nl> } <nl> } <nl> mmm a / test / cpp / end2end / mock_test . cc <nl> ppp b / test / cpp / end2end / mock_test . cc <nl> <nl> # include < climits > <nl> # include < thread > <nl> <nl> + # include < gmock / gmock . h > <nl> # include < grpc + + / channel . h > <nl> # include < grpc + + / client_context . h > <nl> # include < grpc + + / create_channel . h > <nl> <nl> # include < grpc / support / time . h > <nl> # include < gtest / gtest . h > <nl> <nl> + # include < grpc + + / test / mock_stream . h > <nl> + <nl> # include " src / proto / grpc / testing / duplicate / echo_duplicate . grpc . pb . h " <nl> # include " src / proto / grpc / testing / echo . grpc . pb . h " <nl> + # include " src / proto / grpc / testing / echo_mock . grpc . pb . h " <nl> # include " test / core / util / port . h " <nl> # include " test / core / util / test_config . h " <nl> <nl> + # include < iostream > <nl> + <nl> + using namespace std ; <nl> using grpc : : testing : : EchoRequest ; <nl> using grpc : : testing : : EchoResponse ; <nl> using grpc : : testing : : EchoTestService ; <nl> + using grpc : : testing : : MockClientReaderWriter ; <nl> using std : : chrono : : system_clock ; <nl> + using : : testing : : AtLeast ; <nl> + using : : testing : : SetArgPointee ; <nl> + using : : testing : : SaveArg ; <nl> + using : : testing : : _ ; <nl> + using : : testing : : Return ; <nl> + using : : testing : : Invoke ; <nl> + using : : testing : : WithArg ; <nl> + using : : testing : : DoAll ; <nl> <nl> namespace grpc { <nl> namespace testing { <nl> <nl> namespace { <nl> - template < class W , class R > <nl> - class MockClientReaderWriter final : public ClientReaderWriterInterface < W , R > { <nl> - public : <nl> - void WaitForInitialMetadata ( ) override { } <nl> - bool NextMessageSize ( uint32_t * sz ) override { <nl> - * sz = UINT_MAX ; <nl> - return true ; <nl> - } <nl> - bool Read ( R * msg ) override { return true ; } <nl> - bool Write ( const W & msg ) override { return true ; } <nl> - bool WritesDone ( ) override { return true ; } <nl> - Status Finish ( ) override { return Status : : OK ; } <nl> - } ; <nl> - template < > <nl> - class MockClientReaderWriter < EchoRequest , EchoResponse > final <nl> - : public ClientReaderWriterInterface < EchoRequest , EchoResponse > { <nl> - public : <nl> - MockClientReaderWriter ( ) : writes_done_ ( false ) { } <nl> - void WaitForInitialMetadata ( ) override { } <nl> - bool NextMessageSize ( uint32_t * sz ) override { <nl> - * sz = UINT_MAX ; <nl> - return true ; <nl> - } <nl> - bool Read ( EchoResponse * msg ) override { <nl> - if ( writes_done_ ) return false ; <nl> - msg - > set_message ( last_message_ ) ; <nl> - return true ; <nl> - } <nl> - <nl> - bool Write ( const EchoRequest & msg , WriteOptions options ) override { <nl> - gpr_log ( GPR_INFO , " mock recv msg % s " , msg . message ( ) . c_str ( ) ) ; <nl> - last_message_ = msg . message ( ) ; <nl> - return true ; <nl> - } <nl> - bool WritesDone ( ) override { <nl> - writes_done_ = true ; <nl> - return true ; <nl> - } <nl> - Status Finish ( ) override { return Status : : OK ; } <nl> - <nl> - private : <nl> - bool writes_done_ ; <nl> - grpc : : string last_message_ ; <nl> - } ; <nl> - <nl> - / / Mocked stub . <nl> - class MockStub : public EchoTestService : : StubInterface { <nl> - public : <nl> - MockStub ( ) { } <nl> - ~ MockStub ( ) { } <nl> - Status Echo ( ClientContext * context , const EchoRequest & request , <nl> - EchoResponse * response ) override { <nl> - response - > set_message ( request . message ( ) ) ; <nl> - return Status : : OK ; <nl> - } <nl> - Status Unimplemented ( ClientContext * context , const EchoRequest & request , <nl> - EchoResponse * response ) override { <nl> - return Status : : OK ; <nl> - } <nl> - <nl> - private : <nl> - ClientAsyncResponseReaderInterface < EchoResponse > * AsyncEchoRaw ( <nl> - ClientContext * context , const EchoRequest & request , <nl> - CompletionQueue * cq ) override { <nl> - return nullptr ; <nl> - } <nl> - ClientWriterInterface < EchoRequest > * RequestStreamRaw ( <nl> - ClientContext * context , EchoResponse * response ) override { <nl> - return nullptr ; <nl> - } <nl> - ClientAsyncWriterInterface < EchoRequest > * AsyncRequestStreamRaw ( <nl> - ClientContext * context , EchoResponse * response , CompletionQueue * cq , <nl> - void * tag ) override { <nl> - return nullptr ; <nl> - } <nl> - ClientReaderInterface < EchoResponse > * ResponseStreamRaw ( <nl> - ClientContext * context , const EchoRequest & request ) override { <nl> - return nullptr ; <nl> - } <nl> - ClientAsyncReaderInterface < EchoResponse > * AsyncResponseStreamRaw ( <nl> - ClientContext * context , const EchoRequest & request , CompletionQueue * cq , <nl> - void * tag ) override { <nl> - return nullptr ; <nl> - } <nl> - ClientReaderWriterInterface < EchoRequest , EchoResponse > * BidiStreamRaw ( <nl> - ClientContext * context ) override { <nl> - return new MockClientReaderWriter < EchoRequest , EchoResponse > ( ) ; <nl> - } <nl> - ClientAsyncReaderWriterInterface < EchoRequest , EchoResponse > * <nl> - AsyncBidiStreamRaw ( ClientContext * context , CompletionQueue * cq , <nl> - void * tag ) override { <nl> - return nullptr ; <nl> - } <nl> - ClientAsyncResponseReaderInterface < EchoResponse > * AsyncUnimplementedRaw ( <nl> - ClientContext * context , const EchoRequest & request , <nl> - CompletionQueue * cq ) override { <nl> - return nullptr ; <nl> - } <nl> - } ; <nl> - <nl> class FakeClient { <nl> public : <nl> explicit FakeClient ( EchoTestService : : StubInterface * stub ) : stub_ ( stub ) { } <nl> class FakeClient { <nl> EXPECT_TRUE ( s . ok ( ) ) ; <nl> } <nl> <nl> + void DoRequestStream ( ) { <nl> + EchoRequest request ; <nl> + EchoResponse response ; <nl> + <nl> + ClientContext context ; <nl> + grpc : : string msg ( " hello " ) ; <nl> + grpc : : string exp ( msg ) ; <nl> + <nl> + std : : unique_ptr < ClientWriterInterface < EchoRequest > > cstream = <nl> + stub_ - > RequestStream ( & context , & response ) ; <nl> + <nl> + request . set_message ( msg ) ; <nl> + EXPECT_TRUE ( cstream - > Write ( request ) ) ; <nl> + <nl> + msg = " , world " ; <nl> + request . set_message ( msg ) ; <nl> + exp . append ( msg ) ; <nl> + EXPECT_TRUE ( cstream - > Write ( request ) ) ; <nl> + <nl> + cstream - > WritesDone ( ) ; <nl> + Status s = cstream - > Finish ( ) ; <nl> + <nl> + EXPECT_EQ ( exp , response . message ( ) ) ; <nl> + EXPECT_TRUE ( s . ok ( ) ) ; <nl> + } <nl> + <nl> + void DoResponseStream ( ) { <nl> + EchoRequest request ; <nl> + EchoResponse response ; <nl> + request . set_message ( " hello world " ) ; <nl> + <nl> + ClientContext context ; <nl> + std : : unique_ptr < ClientReaderInterface < EchoResponse > > cstream = <nl> + stub_ - > ResponseStream ( & context , request ) ; <nl> + <nl> + grpc : : string exp = " " ; <nl> + EXPECT_TRUE ( cstream - > Read ( & response ) ) ; <nl> + exp . append ( response . message ( ) + " " ) ; <nl> + <nl> + EXPECT_TRUE ( cstream - > Read ( & response ) ) ; <nl> + exp . append ( response . message ( ) ) ; <nl> + <nl> + EXPECT_FALSE ( cstream - > Read ( & response ) ) ; <nl> + EXPECT_EQ ( request . message ( ) , exp ) ; <nl> + <nl> + Status s = cstream - > Finish ( ) ; <nl> + EXPECT_TRUE ( s . ok ( ) ) ; <nl> + } <nl> + <nl> void DoBidiStream ( ) { <nl> EchoRequest request ; <nl> EchoResponse response ; <nl> class TestServiceImpl : public EchoTestService : : Service { <nl> return Status : : OK ; <nl> } <nl> <nl> + Status RequestStream ( ServerContext * context , <nl> + ServerReader < EchoRequest > * reader , <nl> + EchoResponse * response ) override { <nl> + EchoRequest request ; <nl> + grpc : : string resp ( " " ) ; <nl> + while ( reader - > Read ( & request ) ) { <nl> + gpr_log ( GPR_INFO , " recv msg % s " , request . message ( ) . c_str ( ) ) ; <nl> + resp . append ( request . message ( ) ) ; <nl> + } <nl> + response - > set_message ( resp ) ; <nl> + return Status : : OK ; <nl> + } <nl> + <nl> + Status ResponseStream ( ServerContext * context , const EchoRequest * request , <nl> + ServerWriter < EchoResponse > * writer ) override { <nl> + EchoResponse response ; <nl> + vector < grpc : : string > tokens = split ( request - > message ( ) ) ; <nl> + for ( grpc : : string token : tokens ) { <nl> + response . set_message ( token ) ; <nl> + writer - > Write ( response ) ; <nl> + } <nl> + return Status : : OK ; <nl> + } <nl> + <nl> Status BidiStream ( <nl> ServerContext * context , <nl> ServerReaderWriter < EchoResponse , EchoRequest > * stream ) override { <nl> class TestServiceImpl : public EchoTestService : : Service { <nl> } <nl> return Status : : OK ; <nl> } <nl> + <nl> + private : <nl> + const vector < grpc : : string > split ( const grpc : : string & input ) { <nl> + grpc : : string buff ( " " ) ; <nl> + vector < grpc : : string > result ; <nl> + <nl> + for ( auto n : input ) { <nl> + if ( n ! = ' ' ) { <nl> + buff + = n ; <nl> + continue ; <nl> + } <nl> + if ( buff = = " " ) continue ; <nl> + result . push_back ( buff ) ; <nl> + buff = " " ; <nl> + } <nl> + if ( buff ! = " " ) result . push_back ( buff ) ; <nl> + <nl> + return result ; <nl> + } <nl> } ; <nl> <nl> class MockTest : public : : testing : : Test { <nl> TEST_F ( MockTest , SimpleRpc ) { <nl> ResetStub ( ) ; <nl> FakeClient client ( stub_ . get ( ) ) ; <nl> client . DoEcho ( ) ; <nl> - MockStub stub ; <nl> + MockEchoTestServiceStub stub ; <nl> + EchoResponse resp ; <nl> + resp . set_message ( " hello world " ) ; <nl> + EXPECT_CALL ( stub , Echo ( _ , _ , _ ) ) <nl> + . Times ( AtLeast ( 1 ) ) <nl> + . WillOnce ( DoAll ( SetArgPointee < 2 > ( resp ) , Return ( Status : : OK ) ) ) ; <nl> client . ResetStub ( & stub ) ; <nl> client . DoEcho ( ) ; <nl> } <nl> <nl> + TEST_F ( MockTest , ClientStream ) { <nl> + ResetStub ( ) ; <nl> + FakeClient client ( stub_ . get ( ) ) ; <nl> + client . DoRequestStream ( ) ; <nl> + <nl> + MockEchoTestServiceStub stub ; <nl> + auto w = new MockClientWriter < EchoRequest > ( ) ; <nl> + EchoResponse resp ; <nl> + resp . set_message ( " hello , world " ) ; <nl> + <nl> + EXPECT_CALL ( * w , Write ( _ , _ ) ) . Times ( 2 ) . WillRepeatedly ( Return ( true ) ) ; <nl> + EXPECT_CALL ( * w , WritesDone ( ) ) ; <nl> + EXPECT_CALL ( * w , Finish ( ) ) . WillOnce ( Return ( Status : : OK ) ) ; <nl> + <nl> + EXPECT_CALL ( stub , RequestStreamRaw ( _ , _ ) ) <nl> + . WillOnce ( DoAll ( SetArgPointee < 1 > ( resp ) , Return ( w ) ) ) ; <nl> + client . ResetStub ( & stub ) ; <nl> + client . DoRequestStream ( ) ; <nl> + } <nl> + <nl> + TEST_F ( MockTest , ServerStream ) { <nl> + ResetStub ( ) ; <nl> + FakeClient client ( stub_ . get ( ) ) ; <nl> + client . DoResponseStream ( ) ; <nl> + <nl> + MockEchoTestServiceStub stub ; <nl> + auto r = new MockClientReader < EchoResponse > ( ) ; <nl> + EchoResponse resp1 ; <nl> + resp1 . set_message ( " hello " ) ; <nl> + EchoResponse resp2 ; <nl> + resp2 . set_message ( " world " ) ; <nl> + <nl> + EXPECT_CALL ( * r , Read ( _ ) ) <nl> + . WillOnce ( DoAll ( SetArgPointee < 0 > ( resp1 ) , Return ( true ) ) ) <nl> + . WillOnce ( DoAll ( SetArgPointee < 0 > ( resp2 ) , Return ( true ) ) ) <nl> + . WillOnce ( Return ( false ) ) ; <nl> + EXPECT_CALL ( * r , Finish ( ) ) . WillOnce ( Return ( Status : : OK ) ) ; <nl> + <nl> + EXPECT_CALL ( stub , ResponseStreamRaw ( _ , _ ) ) . WillOnce ( Return ( r ) ) ; <nl> + <nl> + client . ResetStub ( & stub ) ; <nl> + client . DoResponseStream ( ) ; <nl> + } <nl> + <nl> + ACTION_P ( copy , msg ) { arg0 - > set_message ( msg - > message ( ) ) ; } <nl> + <nl> TEST_F ( MockTest , BidiStream ) { <nl> ResetStub ( ) ; <nl> FakeClient client ( stub_ . get ( ) ) ; <nl> client . DoBidiStream ( ) ; <nl> - MockStub stub ; <nl> + MockEchoTestServiceStub stub ; <nl> + auto rw = new MockClientReaderWriter < EchoRequest , EchoResponse > ( ) ; <nl> + EchoRequest msg ; <nl> + <nl> + EXPECT_CALL ( * rw , Write ( _ , _ ) ) <nl> + . Times ( 3 ) <nl> + . WillRepeatedly ( DoAll ( SaveArg < 0 > ( & msg ) , Return ( true ) ) ) ; <nl> + EXPECT_CALL ( * rw , Read ( _ ) ) <nl> + . WillOnce ( DoAll ( WithArg < 0 > ( copy ( & msg ) ) , Return ( true ) ) ) <nl> + . WillOnce ( DoAll ( WithArg < 0 > ( copy ( & msg ) ) , Return ( true ) ) ) <nl> + . WillOnce ( DoAll ( WithArg < 0 > ( copy ( & msg ) ) , Return ( true ) ) ) <nl> + . WillOnce ( Return ( false ) ) ; <nl> + EXPECT_CALL ( * rw , WritesDone ( ) ) ; <nl> + EXPECT_CALL ( * rw , Finish ( ) ) . WillOnce ( Return ( Status : : OK ) ) ; <nl> + <nl> + EXPECT_CALL ( stub , BidiStreamRaw ( _ ) ) . WillOnce ( Return ( rw ) ) ; <nl> client . ResetStub ( & stub ) ; <nl> client . DoBidiStream ( ) ; <nl> } <nl> mmm a / test / cpp / microbenchmarks / fullstack_fixtures . h <nl> ppp b / test / cpp / microbenchmarks / fullstack_fixtures . h <nl> class TCP : public FullstackFixture { <nl> public : <nl> TCP ( Service * service , const FixtureConfiguration & fixture_configuration = <nl> FixtureConfiguration ( ) ) <nl> - : FullstackFixture ( service , fixture_configuration , MakeAddress ( ) ) { } <nl> + : FullstackFixture ( service , fixture_configuration , MakeAddress ( & port_ ) ) { } <nl> + <nl> + ~ TCP ( ) { grpc_recycle_unused_port ( port_ ) ; } <nl> <nl> private : <nl> - static grpc : : string MakeAddress ( ) { <nl> - int port = grpc_pick_unused_port_or_die ( ) ; <nl> + int port_ ; <nl> + <nl> + static grpc : : string MakeAddress ( int * port ) { <nl> + * port = grpc_pick_unused_port_or_die ( ) ; <nl> std : : stringstream addr ; <nl> - addr < < " localhost : " < < port ; <nl> + addr < < " localhost : " < < * port ; <nl> return addr . str ( ) ; <nl> } <nl> } ; <nl> class UDS : public FullstackFixture { <nl> public : <nl> UDS ( Service * service , const FixtureConfiguration & fixture_configuration = <nl> FixtureConfiguration ( ) ) <nl> - : FullstackFixture ( service , fixture_configuration , MakeAddress ( ) ) { } <nl> + : FullstackFixture ( service , fixture_configuration , MakeAddress ( & port_ ) ) { } <nl> + <nl> + ~ UDS ( ) { grpc_recycle_unused_port ( port_ ) ; } <nl> <nl> private : <nl> - static grpc : : string MakeAddress ( ) { <nl> - int port = grpc_pick_unused_port_or_die ( ) ; / / just for a unique id - not a <nl> - / / real port <nl> + int port_ ; <nl> + <nl> + static grpc : : string MakeAddress ( int * port ) { <nl> + * port = grpc_pick_unused_port_or_die ( ) ; / / just for a unique id - not a <nl> + / / real port <nl> std : : stringstream addr ; <nl> - addr < < " unix : / tmp / bm_fullstack . " < < port ; <nl> + addr < < " unix : / tmp / bm_fullstack . " < < * port ; <nl> return addr . str ( ) ; <nl> } <nl> } ; <nl> mmm a / third_party / gtest . BUILD <nl> ppp b / third_party / gtest . BUILD <nl> cc_library ( <nl> name = " gtest " , <nl> srcs = [ <nl> " googletest / src / gtest - all . cc " , <nl> + " googlemock / src / gmock - all . cc " <nl> ] , <nl> - hdrs = glob ( [ " googletest / include / * * / * . h " , " googletest / src / * . cc " , " googletest / src / * . h " ] ) , <nl> + hdrs = glob ( [ " googletest / include / * * / * . h " , " googletest / src / * . cc " , " googletest / src / * . h " , " googlemock / include / * * / * . h " , " googlemock / src / * . cc " , " googlemock / src / * . h " ] ) , <nl> includes = [ <nl> " googletest " , <nl> " googletest / include " , <nl> + " googlemock " , <nl> + " googlemock / include " , <nl> ] , <nl> linkstatic = 1 , <nl> visibility = [ <nl> mmm a / tools / jenkins / run_performance . sh <nl> ppp b / tools / jenkins / run_performance . sh <nl> BENCHMARKS_TO_RUN = " bm_fullstack_unary_ping_pong bm_fullstack_streaming_ping_pong <nl> # Enter the gRPC repo root <nl> cd $ ( dirname $ 0 ) / . . / . . <nl> <nl> + tools / run_tests / start_port_server . py <nl> tools / profiling / microbenchmarks / bm_diff . py - d origin / $ ghprbTargetBranch - b $ BENCHMARKS_TO_RUN <nl> mmm a / tools / profiling / microbenchmarks / bm_diff . py <nl> ppp b / tools / profiling / microbenchmarks / bm_diff . py <nl> def eintr_be_gone ( fn ) : <nl> <nl> <nl> def read_json ( filename ) : <nl> - with open ( filename ) as f : return json . loads ( f . read ( ) ) <nl> + try : <nl> + with open ( filename ) as f : return json . loads ( f . read ( ) ) <nl> + except ValueError , e : <nl> + return None <nl> <nl> <nl> def finalize ( ) : <nl> def finalize ( ) : <nl> js_old_ctr = read_json ( ' % s . counters . old . % d . json ' % ( bm , loop ) ) <nl> js_old_opt = read_json ( ' % s . opt . old . % d . json ' % ( bm , loop ) ) <nl> <nl> - for row in bm_json . expand_json ( js_new_ctr , js_new_opt ) : <nl> - print row <nl> - name = row [ ' cpp_name ' ] <nl> - if name . endswith ( ' _mean ' ) or name . endswith ( ' _stddev ' ) : continue <nl> - benchmarks [ name ] . add_sample ( row , True ) <nl> - for row in bm_json . expand_json ( js_old_ctr , js_old_opt ) : <nl> - print row <nl> - name = row [ ' cpp_name ' ] <nl> - if name . endswith ( ' _mean ' ) or name . endswith ( ' _stddev ' ) : continue <nl> - benchmarks [ name ] . add_sample ( row , False ) <nl> + if js_new_ctr : <nl> + for row in bm_json . expand_json ( js_new_ctr , js_new_opt ) : <nl> + print row <nl> + name = row [ ' cpp_name ' ] <nl> + if name . endswith ( ' _mean ' ) or name . endswith ( ' _stddev ' ) : continue <nl> + benchmarks [ name ] . add_sample ( row , True ) <nl> + if js_old_ctr : <nl> + for row in bm_json . expand_json ( js_old_ctr , js_old_opt ) : <nl> + print row <nl> + name = row [ ' cpp_name ' ] <nl> + if name . endswith ( ' _mean ' ) or name . endswith ( ' _stddev ' ) : continue <nl> + benchmarks [ name ] . add_sample ( row , False ) <nl> <nl> really_interesting = set ( ) <nl> for name , bm in benchmarks . items ( ) : <nl> def finalize ( ) : <nl> text = ' Performance differences noted : \ n ' + tabulate . tabulate ( rows , headers = headers , floatfmt = ' + . 2f ' ) <nl> else : <nl> text = ' No significant performance differences ' <nl> - comment_on_pr . comment_on_pr ( ' ` ` ` \ n % s \ n ` ` ` ' % text ) <nl> print text <nl> + comment_on_pr . comment_on_pr ( ' ` ` ` \ n % s \ n ` ` ` ' % text ) <nl> <nl> <nl> eintr_be_gone ( finalize ) <nl> mmm a / tools / run_tests / generated / sources_and_headers . json <nl> ppp b / tools / run_tests / generated / sources_and_headers . json <nl> <nl> " headers " : [ <nl> " src / proto / grpc / testing / control . grpc . pb . h " , <nl> " src / proto / grpc / testing / control . pb . h " , <nl> + " src / proto / grpc / testing / control_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . pb . h " , <nl> + " src / proto / grpc / testing / messages_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / payloads . grpc . pb . h " , <nl> " src / proto / grpc / testing / payloads . pb . h " , <nl> + " src / proto / grpc / testing / payloads_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / services . grpc . pb . h " , <nl> " src / proto / grpc / testing / services . pb . h " , <nl> + " src / proto / grpc / testing / services_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / stats . grpc . pb . h " , <nl> - " src / proto / grpc / testing / stats . pb . h " <nl> + " src / proto / grpc / testing / stats . pb . h " , <nl> + " src / proto / grpc / testing / stats_mock . grpc . pb . h " <nl> ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> <nl> " headers " : [ <nl> " src / proto / grpc / testing / control . grpc . pb . h " , <nl> " src / proto / grpc / testing / control . pb . h " , <nl> + " src / proto / grpc / testing / control_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . pb . h " , <nl> + " src / proto / grpc / testing / messages_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / payloads . grpc . pb . h " , <nl> " src / proto / grpc / testing / payloads . pb . h " , <nl> + " src / proto / grpc / testing / payloads_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / services . grpc . pb . h " , <nl> " src / proto / grpc / testing / services . pb . h " , <nl> + " src / proto / grpc / testing / services_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / stats . grpc . pb . h " , <nl> - " src / proto / grpc / testing / stats . pb . h " <nl> + " src / proto / grpc / testing / stats . pb . h " , <nl> + " src / proto / grpc / testing / stats_mock . grpc . pb . h " <nl> ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> <nl> ] , <nl> " headers " : [ <nl> " src / proto / grpc / testing / echo_messages . grpc . pb . h " , <nl> - " src / proto / grpc / testing / echo_messages . pb . h " <nl> + " src / proto / grpc / testing / echo_messages . pb . h " , <nl> + " src / proto / grpc / testing / echo_messages_mock . grpc . pb . h " <nl> ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> <nl> ] , <nl> " headers " : [ <nl> " src / proto / grpc / testing / compiler_test . grpc . pb . h " , <nl> - " src / proto / grpc / testing / compiler_test . pb . h " <nl> + " src / proto / grpc / testing / compiler_test . pb . h " , <nl> + " src / proto / grpc / testing / compiler_test_mock . grpc . pb . h " <nl> ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> <nl> " src / proto / grpc / testing / echo . grpc . pb . h " , <nl> " src / proto / grpc / testing / echo . pb . h " , <nl> " src / proto / grpc / testing / echo_messages . grpc . pb . h " , <nl> - " src / proto / grpc / testing / echo_messages . pb . h " <nl> + " src / proto / grpc / testing / echo_messages . pb . h " , <nl> + " src / proto / grpc / testing / echo_messages_mock . grpc . pb . h " , <nl> + " src / proto / grpc / testing / echo_mock . grpc . pb . h " <nl> ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> <nl> ] , <nl> " headers " : [ <nl> " src / proto / grpc / lb / v1 / load_balancer . grpc . pb . h " , <nl> - " src / proto / grpc / lb / v1 / load_balancer . pb . h " <nl> + " src / proto / grpc / lb / v1 / load_balancer . pb . h " , <nl> + " src / proto / grpc / lb / v1 / load_balancer_mock . grpc . pb . h " <nl> ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> <nl> ] , <nl> " headers " : [ <nl> " src / proto / grpc / lb / v1 / load_balancer . grpc . pb . h " , <nl> - " src / proto / grpc / lb / v1 / load_balancer . pb . h " <nl> + " src / proto / grpc / lb / v1 / load_balancer . pb . h " , <nl> + " src / proto / grpc / lb / v1 / load_balancer_mock . grpc . pb . h " <nl> ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> <nl> " headers " : [ <nl> " src / proto / grpc / testing / metrics . grpc . pb . h " , <nl> " src / proto / grpc / testing / metrics . pb . h " , <nl> + " src / proto / grpc / testing / metrics_mock . grpc . pb . h " , <nl> " test / cpp / util / metrics_server . h " <nl> ] , <nl> " is_filegroup " : false , <nl> <nl> " grpc + + _test_util " , <nl> " grpc_test_util " <nl> ] , <nl> - " headers " : [ ] , <nl> + " headers " : [ <nl> + " include / grpc + + / test / mock_stream . h " <nl> + ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> " name " : " mock_test " , <nl> " src " : [ <nl> + " include / grpc + + / test / mock_stream . h " , <nl> " test / cpp / end2end / mock_test . cc " <nl> ] , <nl> " third_party " : false , <nl> <nl> " headers " : [ <nl> " src / proto / grpc / testing / empty . grpc . pb . h " , <nl> " src / proto / grpc / testing / empty . pb . h " , <nl> + " src / proto / grpc / testing / empty_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . pb . h " , <nl> + " src / proto / grpc / testing / messages_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / test . grpc . pb . h " , <nl> - " src / proto / grpc / testing / test . pb . h " <nl> + " src / proto / grpc / testing / test . pb . h " , <nl> + " src / proto / grpc / testing / test_mock . grpc . pb . h " <nl> ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> <nl> " headers " : [ <nl> " src / proto / grpc / testing / empty . grpc . pb . h " , <nl> " src / proto / grpc / testing / empty . pb . h " , <nl> + " src / proto / grpc / testing / empty_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . pb . h " , <nl> + " src / proto / grpc / testing / messages_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / test . grpc . pb . h " , <nl> - " src / proto / grpc / testing / test . pb . h " <nl> + " src / proto / grpc / testing / test . pb . h " , <nl> + " src / proto / grpc / testing / test_mock . grpc . pb . h " <nl> ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> <nl> " src / proto / grpc / testing / echo . grpc . pb . h " , <nl> " src / proto / grpc / testing / echo . pb . h " , <nl> " src / proto / grpc / testing / echo_messages . grpc . pb . h " , <nl> - " src / proto / grpc / testing / echo_messages . pb . h " <nl> + " src / proto / grpc / testing / echo_messages . pb . h " , <nl> + " src / proto / grpc / testing / echo_messages_mock . grpc . pb . h " , <nl> + " src / proto / grpc / testing / echo_mock . grpc . pb . h " <nl> ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> <nl> " headers " : [ <nl> " src / proto / grpc / testing / empty . grpc . pb . h " , <nl> " src / proto / grpc / testing / empty . pb . h " , <nl> + " src / proto / grpc / testing / empty_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . pb . h " , <nl> + " src / proto / grpc / testing / messages_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / metrics . grpc . pb . h " , <nl> " src / proto / grpc / testing / metrics . pb . h " , <nl> + " src / proto / grpc / testing / metrics_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / test . grpc . pb . h " , <nl> " src / proto / grpc / testing / test . pb . h " , <nl> + " src / proto / grpc / testing / test_mock . grpc . pb . h " , <nl> " test / cpp / interop / client_helper . h " , <nl> " test / cpp / interop / interop_client . h " , <nl> " test / cpp / interop / stress_interop_client . h " , <nl> <nl> " headers " : [ <nl> " include / grpc + + / support / error_details . h " , <nl> " src / proto / grpc / status / status . grpc . pb . h " , <nl> - " src / proto / grpc / status / status . pb . h " <nl> + " src / proto / grpc / status / status . pb . h " , <nl> + " src / proto / grpc / status / status_mock . grpc . pb . h " <nl> ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> <nl> " headers " : [ <nl> " src / proto / grpc / health / v1 / health . grpc . pb . h " , <nl> " src / proto / grpc / health / v1 / health . pb . h " , <nl> + " src / proto / grpc / health / v1 / health_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / duplicate / echo_duplicate . grpc . pb . h " , <nl> " src / proto / grpc / testing / duplicate / echo_duplicate . pb . h " , <nl> + " src / proto / grpc / testing / duplicate / echo_duplicate_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / echo . grpc . pb . h " , <nl> " src / proto / grpc / testing / echo . pb . h " , <nl> " src / proto / grpc / testing / echo_messages . grpc . pb . h " , <nl> " src / proto / grpc / testing / echo_messages . pb . h " , <nl> + " src / proto / grpc / testing / echo_messages_mock . grpc . pb . h " , <nl> + " src / proto / grpc / testing / echo_mock . grpc . pb . h " , <nl> " test / cpp / end2end / test_service_impl . h " , <nl> " test / cpp / util / byte_buffer_proto_helper . h " , <nl> " test / cpp / util / create_test_channel . h " , <nl> <nl> " headers " : [ <nl> " src / proto / grpc / testing / empty . grpc . pb . h " , <nl> " src / proto / grpc / testing / empty . pb . h " , <nl> + " src / proto / grpc / testing / empty_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . pb . h " , <nl> + " src / proto / grpc / testing / messages_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / test . grpc . pb . h " , <nl> " src / proto / grpc / testing / test . pb . h " , <nl> + " src / proto / grpc / testing / test_mock . grpc . pb . h " , <nl> " test / cpp / interop / http2_client . h " <nl> ] , <nl> " is_filegroup " : false , <nl> <nl> " headers " : [ <nl> " src / proto / grpc / testing / messages . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . pb . h " , <nl> + " src / proto / grpc / testing / messages_mock . grpc . pb . h " , <nl> " test / cpp / interop / client_helper . h " <nl> ] , <nl> " is_filegroup " : false , <nl> <nl> " headers " : [ <nl> " src / proto / grpc / testing / empty . grpc . pb . h " , <nl> " src / proto / grpc / testing / empty . pb . h " , <nl> + " src / proto / grpc / testing / empty_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . pb . h " , <nl> + " src / proto / grpc / testing / messages_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / test . grpc . pb . h " , <nl> " src / proto / grpc / testing / test . pb . h " , <nl> + " src / proto / grpc / testing / test_mock . grpc . pb . h " , <nl> " test / cpp / interop / interop_client . h " <nl> ] , <nl> " is_filegroup " : false , <nl> <nl> " headers " : [ <nl> " src / proto / grpc / testing / empty . grpc . pb . h " , <nl> " src / proto / grpc / testing / empty . pb . h " , <nl> + " src / proto / grpc / testing / empty_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . pb . h " , <nl> + " src / proto / grpc / testing / messages_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / test . grpc . pb . h " , <nl> - " src / proto / grpc / testing / test . pb . h " <nl> + " src / proto / grpc / testing / test . pb . h " , <nl> + " src / proto / grpc / testing / test_mock . grpc . pb . h " <nl> ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> <nl> " headers " : [ <nl> " src / proto / grpc / testing / control . grpc . pb . h " , <nl> " src / proto / grpc / testing / control . pb . h " , <nl> + " src / proto / grpc / testing / control_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . grpc . pb . h " , <nl> " src / proto / grpc / testing / messages . pb . h " , <nl> + " src / proto / grpc / testing / messages_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / payloads . grpc . pb . h " , <nl> " src / proto / grpc / testing / payloads . pb . h " , <nl> + " src / proto / grpc / testing / payloads_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / services . grpc . pb . h " , <nl> " src / proto / grpc / testing / services . pb . h " , <nl> + " src / proto / grpc / testing / services_mock . grpc . pb . h " , <nl> " src / proto / grpc / testing / stats . grpc . pb . h " , <nl> " src / proto / grpc / testing / stats . pb . h " , <nl> + " src / proto / grpc / testing / stats_mock . grpc . pb . h " , <nl> " test / cpp / qps / benchmark_config . h " , <nl> " test / cpp / qps / client . h " , <nl> " test / cpp / qps / driver . h " , <nl> <nl> " deps " : [ ] , <nl> " headers " : [ <nl> " src / proto / grpc / reflection / v1alpha / reflection . grpc . pb . h " , <nl> - " src / proto / grpc / reflection / v1alpha / reflection . pb . h " <nl> + " src / proto / grpc / reflection / v1alpha / reflection . pb . h " , <nl> + " src / proto / grpc / reflection / v1alpha / reflection_mock . grpc . pb . h " <nl> ] , <nl> " is_filegroup " : true , <nl> " language " : " c + + " , <nl> <nl> " grpc + + " <nl> ] , <nl> " headers " : [ <nl> + " include / grpc + + / test / mock_stream . h " , <nl> " include / grpc + + / test / server_context_test_spouse . h " <nl> ] , <nl> " is_filegroup " : true , <nl> " language " : " c + + " , <nl> " name " : " grpc + + _test " , <nl> " src " : [ <nl> + " include / grpc + + / test / mock_stream . h " , <nl> " include / grpc + + / test / server_context_test_spouse . h " <nl> ] , <nl> " third_party " : false , <nl> mmm a / tools / run_tests / generated / tests . json <nl> ppp b / tools / run_tests / generated / tests . json <nl> <nl> } , <nl> { <nl> " args " : [ <nl> - " - - generated_file_path = gens / src / proto / grpc / testing / compiler_test . grpc . pb . h " <nl> + " - - generated_file_path = gens / src / proto / grpc / testing / " <nl> ] , <nl> " ci_platforms " : [ <nl> " linux " , <nl> mmm a / tools / run_tests / python_utils / port_server . py <nl> ppp b / tools / run_tests / python_utils / port_server . py <nl> <nl> from __future__ import print_function <nl> <nl> import argparse <nl> - from six . moves import BaseHTTPServer <nl> + from BaseHTTPServer import HTTPServer , BaseHTTPRequestHandler <nl> import hashlib <nl> import os <nl> import socket <nl> import sys <nl> import time <nl> + from SocketServer import ThreadingMixIn <nl> + import threading <nl> <nl> <nl> # increment this number whenever making a change to ensure that <nl> # the changes are picked up by running CI servers <nl> # note that all changes must be backwards compatible <nl> - _MY_VERSION = 9 <nl> + _MY_VERSION = 14 <nl> <nl> <nl> if len ( sys . argv ) = = 2 and sys . argv [ 1 ] = = ' dump_version ' : <nl> <nl> <nl> pool = [ ] <nl> in_use = { } <nl> + mu = threading . Lock ( ) <nl> <nl> <nl> def refill_pool ( max_timeout , req ) : <nl> def refill_pool ( max_timeout , req ) : <nl> def allocate_port ( req ) : <nl> global pool <nl> global in_use <nl> + global mu <nl> + mu . acquire ( ) <nl> max_timeout = 600 <nl> while not pool : <nl> refill_pool ( max_timeout , req ) <nl> if not pool : <nl> req . log_message ( " failed to find ports : retrying soon " ) <nl> + mu . release ( ) <nl> time . sleep ( 1 ) <nl> + mu . acquire ( ) <nl> max_timeout / = 2 <nl> port = pool [ 0 ] <nl> pool = pool [ 1 : ] <nl> in_use [ port ] = time . time ( ) <nl> + mu . release ( ) <nl> return port <nl> <nl> <nl> keep_running = True <nl> <nl> <nl> - class Handler ( BaseHTTPServer . BaseHTTPRequestHandler ) : <nl> + class Handler ( BaseHTTPRequestHandler ) : <nl> <nl> def setup ( self ) : <nl> # If the client is unreachable for 5 seconds , close the connection <nl> self . timeout = 5 <nl> - BaseHTTPServer . BaseHTTPRequestHandler . setup ( self ) <nl> + BaseHTTPRequestHandler . setup ( self ) <nl> <nl> def do_GET ( self ) : <nl> global keep_running <nl> def do_GET ( self ) : <nl> elif self . path = = ' / quitquitquit ' : <nl> self . send_response ( 200 ) <nl> self . end_headers ( ) <nl> - keep_running = False <nl> + self . server . shutdown ( ) <nl> <nl> + class ThreadedHTTPServer ( ThreadingMixIn , HTTPServer ) : <nl> + " " " Handle requests in a separate thread " " " <nl> <nl> - httpd = BaseHTTPServer . HTTPServer ( ( ' ' , args . port ) , Handler ) <nl> - while keep_running : <nl> - httpd . handle_request ( ) <nl> - sys . stderr . flush ( ) <nl> <nl> - print ( ' done ' ) <nl> + ThreadedHTTPServer ( ( ' ' , args . port ) , Handler ) . serve_forever ( ) <nl> + <nl> mmm a / vsprojects / vcxproj / test / mock_test / mock_test . vcxproj <nl> ppp b / vsprojects / vcxproj / test / mock_test / mock_test . vcxproj <nl> <nl> < / Link > <nl> < / ItemDefinitionGroup > <nl> <nl> + < ItemGroup > <nl> + < ClInclude Include = " $ ( SolutionDir ) \ . . \ include \ grpc + + \ test \ mock_stream . h " / > <nl> + < / ItemGroup > <nl> < ItemGroup > <nl> < ClCompile Include = " $ ( SolutionDir ) \ . . \ test \ cpp \ end2end \ mock_test . cc " > <nl> < / ClCompile > <nl> mmm a / vsprojects / vcxproj / test / mock_test / mock_test . vcxproj . filters <nl> ppp b / vsprojects / vcxproj / test / mock_test / mock_test . vcxproj . filters <nl> <nl> < Filter > test \ cpp \ end2end < / Filter > <nl> < / ClCompile > <nl> < / ItemGroup > <nl> + < ItemGroup > <nl> + < ClInclude Include = " $ ( SolutionDir ) \ . . \ include \ grpc + + \ test \ mock_stream . h " > <nl> + < Filter > include \ grpc + + \ test < / Filter > <nl> + < / ClInclude > <nl> + < / ItemGroup > <nl> <nl> < ItemGroup > <nl> + < Filter Include = " include " > <nl> + < UniqueIdentifier > { b827d6d2 - cfa5 - 2dd4 - 6ebc - afcccd5e8e0c } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " include \ grpc + + " > <nl> + < UniqueIdentifier > { 28289e8f - b68e - b9f5 - 7680 - c15d77b574a5 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " include \ grpc + + \ test " > <nl> + < UniqueIdentifier > { 4a7b43be - c730 - 6221 - d190 - e394521f9ae7 } < / UniqueIdentifier > <nl> + < / Filter > <nl> < Filter Include = " test " > <nl> < UniqueIdentifier > { 69c257a2 - 3e4c - a86e - ce0d - 1a97b237d294 } < / UniqueIdentifier > <nl> < / Filter > <nl>
Merge github . com : grpc / grpc into trickle_stall
grpc/grpc
ff54f5b15073a16e1270147351aaaefa34112920
2017-04-26T17:38:15Z
mmm a / src / core / ext / filters / client_channel / lb_policy / xds / xds_routing . cc <nl> ppp b / src / core / ext / filters / client_channel / lb_policy / xds / xds_routing . cc <nl> class XdsRoutingLb : public LoadBalancingPolicy { <nl> <nl> XdsRoutingLb : : PickResult XdsRoutingLb : : RoutePicker : : Pick ( PickArgs args ) { <nl> absl : : string_view path ; <nl> + / / TODO ( roth ) : Using const auto & here trigger a warning in a macos or windows <nl> + / / build : <nl> + / / * ( args . initial_metadata ) is returning values not references . <nl> for ( const auto p : * ( args . initial_metadata ) ) { <nl> if ( p . first = = " : path " ) { <nl> path = p . second ; <nl> mmm a / src / core / ext / filters / client_channel / xds / xds_api . cc <nl> ppp b / src / core / ext / filters / client_channel / xds / xds_api . cc <nl> grpc_error * RouteConfigParse ( <nl> if ( envoy_api_v2_route_RouteMatch_has_prefix ( match ) ) { <nl> upb_strview prefix = envoy_api_v2_route_RouteMatch_prefix ( match ) ; <nl> if ( prefix . size > 0 ) { <nl> + if ( prefix . data [ 0 ] ! = ' / ' ) { <nl> + return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( <nl> + " Prefix is not starting with a / " ) ; <nl> + } <nl> std : : vector < absl : : string_view > prefix_elements = absl : : StrSplit ( <nl> absl : : string_view ( prefix . data , prefix . size ) . substr ( 1 ) , ' / ' ) ; <nl> if ( prefix_elements . size ( ) ! = 2 ) { <nl> return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( <nl> " Prefix not in the required format of / service / " ) ; <nl> + } else if ( ! prefix_elements [ 1 ] . empty ( ) ) { <nl> + return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( <nl> + " Prefix is not ending with a / " ) ; <nl> + } else if ( prefix_elements [ 0 ] . empty ( ) ) { <nl> + return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( " Prefix cannot be empty " ) ; <nl> } <nl> rds_route . service = std : : string ( prefix_elements [ 0 ] ) ; <nl> } <nl> grpc_error * RouteConfigParse ( <nl> return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( <nl> " Path if set cannot be empty " ) ; <nl> } <nl> + if ( path . data [ 0 ] ! = ' / ' ) { <nl> + return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( <nl> + " Path is not starting with a / " ) ; <nl> + } <nl> std : : vector < absl : : string_view > path_elements = absl : : StrSplit ( <nl> absl : : string_view ( path . data , path . size ) . substr ( 1 ) , ' / ' ) ; <nl> if ( path_elements . size ( ) ! = 2 ) { <nl> return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( <nl> " Path not in the required format of / service / method " ) ; <nl> + } else if ( path_elements [ 0 ] . empty ( ) ) { <nl> + return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( <nl> + " Path service name cannot be empty " ) ; <nl> + } else if ( path_elements [ 1 ] . empty ( ) ) { <nl> + return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( <nl> + " Path method name cannot be empty " ) ; <nl> } <nl> rds_route . service = std : : string ( path_elements [ 0 ] ) ; <nl> rds_route . method = std : : string ( path_elements [ 1 ] ) ; <nl> mmm a / test / cpp / end2end / test_service_impl . cc <nl> ppp b / test / cpp / end2end / test_service_impl . cc <nl> experimental : : ServerUnaryReactor * CallbackTestServiceImpl : : Echo ( <nl> error . error_message ( ) , error . binary_error_details ( ) ) ) ; <nl> return ; <nl> } <nl> - int server_try_cancel = GetIntValueFromMetadata ( <nl> + int server_try_cancel = internal : : GetIntValueFromMetadata ( <nl> kServerTryCancelRequest , ctx_ - > client_metadata ( ) , DO_NOT_CANCEL ) ; <nl> if ( server_try_cancel ! = DO_NOT_CANCEL ) { <nl> / / Since this is a unary RPC , by the time this server handler is called , <nl> experimental : : ServerUnaryReactor * CallbackTestServiceImpl : : Echo ( <nl> } <nl> gpr_log ( GPR_DEBUG , " Request message was % s " , req_ - > message ( ) . c_str ( ) ) ; <nl> resp_ - > set_message ( req_ - > message ( ) ) ; <nl> - MaybeEchoDeadline ( ctx_ , req_ , resp_ ) ; <nl> + internal : : MaybeEchoDeadline ( ctx_ , req_ , resp_ ) ; <nl> if ( service_ - > host_ ) { <nl> resp_ - > mutable_param ( ) - > set_host ( * service_ - > host_ ) ; <nl> } <nl> experimental : : ServerUnaryReactor * CallbackTestServiceImpl : : Echo ( <nl> if ( req_ - > has_param ( ) & & <nl> ( req_ - > param ( ) . expected_client_identity ( ) . length ( ) > 0 | | <nl> req_ - > param ( ) . check_auth_context ( ) ) ) { <nl> - CheckServerAuthContext ( ctx_ , <nl> - req_ - > param ( ) . expected_transport_security_type ( ) , <nl> - req_ - > param ( ) . expected_client_identity ( ) ) ; <nl> + internal : : CheckServerAuthContext ( <nl> + ctx_ , req_ - > param ( ) . expected_transport_security_type ( ) , <nl> + req_ - > param ( ) . expected_client_identity ( ) ) ; <nl> } <nl> if ( req_ - > has_param ( ) & & req_ - > param ( ) . response_message_length ( ) > 0 ) { <nl> resp_ - > set_message ( <nl> CallbackTestServiceImpl : : CheckClientInitialMetadata ( <nl> class Reactor : public : : grpc : : experimental : : ServerUnaryReactor { <nl> public : <nl> explicit Reactor ( experimental : : CallbackServerContext * ctx ) { <nl> - EXPECT_EQ ( MetadataMatchCount ( ctx - > client_metadata ( ) , <nl> - kCheckClientInitialMetadataKey , <nl> - kCheckClientInitialMetadataVal ) , <nl> + EXPECT_EQ ( internal : : MetadataMatchCount ( ctx - > client_metadata ( ) , <nl> + kCheckClientInitialMetadataKey , <nl> + kCheckClientInitialMetadataVal ) , <nl> 1 ) ; <nl> EXPECT_EQ ( ctx - > client_metadata ( ) . count ( kCheckClientInitialMetadataKey ) , <nl> 1u ) ; <nl> CallbackTestServiceImpl : : RequestStream ( <nl> / / is cancelled while the server is reading messages from the client <nl> / / CANCEL_AFTER_PROCESSING : The RPC is cancelled after the server reads <nl> / / all the messages from the client <nl> - int server_try_cancel = GetIntValueFromMetadata ( <nl> + int server_try_cancel = internal : : GetIntValueFromMetadata ( <nl> kServerTryCancelRequest , context - > client_metadata ( ) , DO_NOT_CANCEL ) ; <nl> if ( server_try_cancel = = CANCEL_BEFORE_PROCESSING ) { <nl> ServerTryCancelNonblocking ( context ) ; <nl> CallbackTestServiceImpl : : ResponseStream ( <nl> / / is cancelled while the server is reading messages from the client <nl> / / CANCEL_AFTER_PROCESSING : The RPC is cancelled after the server reads <nl> / / all the messages from the client <nl> - int server_try_cancel = GetIntValueFromMetadata ( <nl> + int server_try_cancel = internal : : GetIntValueFromMetadata ( <nl> kServerTryCancelRequest , context - > client_metadata ( ) , DO_NOT_CANCEL ) ; <nl> if ( server_try_cancel = = CANCEL_BEFORE_PROCESSING ) { <nl> ServerTryCancelNonblocking ( context ) ; <nl> CallbackTestServiceImpl : : ResponseStream ( <nl> Reactor ( experimental : : CallbackServerContext * ctx , <nl> const EchoRequest * request , int server_try_cancel ) <nl> : ctx_ ( ctx ) , request_ ( request ) , server_try_cancel_ ( server_try_cancel ) { <nl> - server_coalescing_api_ = GetIntValueFromMetadata ( <nl> + server_coalescing_api_ = internal : : GetIntValueFromMetadata ( <nl> kServerUseCoalescingApi , ctx - > client_metadata ( ) , 0 ) ; <nl> - server_responses_to_send_ = GetIntValueFromMetadata ( <nl> + server_responses_to_send_ = internal : : GetIntValueFromMetadata ( <nl> kServerResponseStreamsToSend , ctx - > client_metadata ( ) , <nl> kServerDefaultResponseStreamsToSend ) ; <nl> if ( server_try_cancel_ = = CANCEL_DURING_PROCESSING ) { <nl> CallbackTestServiceImpl : : BidiStream ( <nl> / / is cancelled while the server is reading messages from the client <nl> / / CANCEL_AFTER_PROCESSING : The RPC is cancelled after the server reads <nl> / / all the messages from the client <nl> - server_try_cancel_ = GetIntValueFromMetadata ( <nl> + server_try_cancel_ = internal : : GetIntValueFromMetadata ( <nl> kServerTryCancelRequest , ctx - > client_metadata ( ) , DO_NOT_CANCEL ) ; <nl> - server_write_last_ = GetIntValueFromMetadata ( kServerFinishAfterNReads , <nl> - ctx - > client_metadata ( ) , 0 ) ; <nl> + server_write_last_ = internal : : GetIntValueFromMetadata ( <nl> + kServerFinishAfterNReads , ctx - > client_metadata ( ) , 0 ) ; <nl> if ( server_try_cancel_ = = CANCEL_BEFORE_PROCESSING ) { <nl> ServerTryCancelNonblocking ( ctx ) ; <nl> } else { <nl> mmm a / test / cpp / end2end / test_service_impl . h <nl> ppp b / test / cpp / end2end / test_service_impl . h <nl> typedef enum { <nl> CANCEL_AFTER_PROCESSING <nl> } ServerTryCancelRequestPhase ; <nl> <nl> - namespace { <nl> + namespace internal { <nl> / / When echo_deadline is requested , deadline seen in the ServerContext is set in <nl> / / the response in seconds . <nl> void MaybeEchoDeadline ( experimental : : ServerContextBase * context , <nl> void ServerTryCancel ( ServerContext * context ) { <nl> gpr_time_from_micros ( 1000 , GPR_TIMESPAN ) ) ) ; <nl> } <nl> } <nl> - } / / namespace <nl> + } / / namespace internal <nl> <nl> class TestServiceSignaller { <nl> public : <nl> class TestMultipleServiceImpl : public RpcService { <nl> return Status ( static_cast < StatusCode > ( error . code ( ) ) , <nl> error . error_message ( ) , error . binary_error_details ( ) ) ; <nl> } <nl> - int server_try_cancel = GetIntValueFromMetadata ( <nl> + int server_try_cancel = internal : : GetIntValueFromMetadata ( <nl> kServerTryCancelRequest , context - > client_metadata ( ) , DO_NOT_CANCEL ) ; <nl> if ( server_try_cancel > DO_NOT_CANCEL ) { <nl> / / Since this is a unary RPC , by the time this server handler is called , <nl> / / the ' request ' message is already read from the client . So the scenarios <nl> / / in server_try_cancel don ' t make much sense . Just cancel the RPC as long <nl> / / as server_try_cancel is not DO_NOT_CANCEL <nl> - ServerTryCancel ( context ) ; <nl> + internal : : ServerTryCancel ( context ) ; <nl> return Status : : CANCELLED ; <nl> } <nl> <nl> response - > set_message ( request - > message ( ) ) ; <nl> - MaybeEchoDeadline ( context , request , response ) ; <nl> + internal : : MaybeEchoDeadline ( context , request , response ) ; <nl> if ( host_ ) { <nl> response - > mutable_param ( ) - > set_host ( * host_ ) ; <nl> } <nl> class TestMultipleServiceImpl : public RpcService { <nl> if ( request - > has_param ( ) & & <nl> ( request - > param ( ) . expected_client_identity ( ) . length ( ) > 0 | | <nl> request - > param ( ) . check_auth_context ( ) ) ) { <nl> - CheckServerAuthContext ( <nl> + internal : : CheckServerAuthContext ( <nl> context , request - > param ( ) . expected_transport_security_type ( ) , <nl> request - > param ( ) . expected_client_identity ( ) ) ; <nl> } <nl> class TestMultipleServiceImpl : public RpcService { <nl> Status CheckClientInitialMetadata ( ServerContext * context , <nl> const SimpleRequest * / * request * / , <nl> SimpleResponse * / * response * / ) { <nl> - EXPECT_EQ ( MetadataMatchCount ( context - > client_metadata ( ) , <nl> - kCheckClientInitialMetadataKey , <nl> - kCheckClientInitialMetadataVal ) , <nl> + EXPECT_EQ ( internal : : MetadataMatchCount ( context - > client_metadata ( ) , <nl> + kCheckClientInitialMetadataKey , <nl> + kCheckClientInitialMetadataVal ) , <nl> 1 ) ; <nl> EXPECT_EQ ( 1u , <nl> context - > client_metadata ( ) . count ( kCheckClientInitialMetadataKey ) ) ; <nl> class TestMultipleServiceImpl : public RpcService { <nl> / / reading messages from the client <nl> / / CANCEL_AFTER_PROCESSING : The RPC is cancelled after the server reads <nl> / / all the messages from the client <nl> - int server_try_cancel = GetIntValueFromMetadata ( <nl> + int server_try_cancel = internal : : GetIntValueFromMetadata ( <nl> kServerTryCancelRequest , context - > client_metadata ( ) , DO_NOT_CANCEL ) ; <nl> <nl> EchoRequest request ; <nl> response - > set_message ( " " ) ; <nl> <nl> if ( server_try_cancel = = CANCEL_BEFORE_PROCESSING ) { <nl> - ServerTryCancel ( context ) ; <nl> + internal : : ServerTryCancel ( context ) ; <nl> return Status : : CANCELLED ; <nl> } <nl> <nl> std : : thread * server_try_cancel_thd = nullptr ; <nl> if ( server_try_cancel = = CANCEL_DURING_PROCESSING ) { <nl> server_try_cancel_thd = <nl> - new std : : thread ( [ context ] { ServerTryCancel ( context ) ; } ) ; <nl> + new std : : thread ( [ context ] { internal : : ServerTryCancel ( context ) ; } ) ; <nl> } <nl> <nl> int num_msgs_read = 0 ; <nl> class TestMultipleServiceImpl : public RpcService { <nl> } <nl> <nl> if ( server_try_cancel = = CANCEL_AFTER_PROCESSING ) { <nl> - ServerTryCancel ( context ) ; <nl> + internal : : ServerTryCancel ( context ) ; <nl> return Status : : CANCELLED ; <nl> } <nl> <nl> class TestMultipleServiceImpl : public RpcService { <nl> / / writing messages to the client <nl> / / CANCEL_AFTER_PROCESSING : The RPC is cancelled after the server writes <nl> / / all the messages to the client <nl> - int server_try_cancel = GetIntValueFromMetadata ( <nl> + int server_try_cancel = internal : : GetIntValueFromMetadata ( <nl> kServerTryCancelRequest , context - > client_metadata ( ) , DO_NOT_CANCEL ) ; <nl> <nl> - int server_coalescing_api = GetIntValueFromMetadata ( <nl> + int server_coalescing_api = internal : : GetIntValueFromMetadata ( <nl> kServerUseCoalescingApi , context - > client_metadata ( ) , 0 ) ; <nl> <nl> - int server_responses_to_send = GetIntValueFromMetadata ( <nl> + int server_responses_to_send = internal : : GetIntValueFromMetadata ( <nl> kServerResponseStreamsToSend , context - > client_metadata ( ) , <nl> kServerDefaultResponseStreamsToSend ) ; <nl> <nl> if ( server_try_cancel = = CANCEL_BEFORE_PROCESSING ) { <nl> - ServerTryCancel ( context ) ; <nl> + internal : : ServerTryCancel ( context ) ; <nl> return Status : : CANCELLED ; <nl> } <nl> <nl> class TestMultipleServiceImpl : public RpcService { <nl> std : : thread * server_try_cancel_thd = nullptr ; <nl> if ( server_try_cancel = = CANCEL_DURING_PROCESSING ) { <nl> server_try_cancel_thd = <nl> - new std : : thread ( [ context ] { ServerTryCancel ( context ) ; } ) ; <nl> + new std : : thread ( [ context ] { internal : : ServerTryCancel ( context ) ; } ) ; <nl> } <nl> <nl> for ( int i = 0 ; i < server_responses_to_send ; i + + ) { <nl> class TestMultipleServiceImpl : public RpcService { <nl> } <nl> <nl> if ( server_try_cancel = = CANCEL_AFTER_PROCESSING ) { <nl> - ServerTryCancel ( context ) ; <nl> + internal : : ServerTryCancel ( context ) ; <nl> return Status : : CANCELLED ; <nl> } <nl> <nl> class TestMultipleServiceImpl : public RpcService { <nl> / / reading / writing messages from / to the client <nl> / / CANCEL_AFTER_PROCESSING : The RPC is cancelled after the server <nl> / / reads / writes all messages from / to the client <nl> - int server_try_cancel = GetIntValueFromMetadata ( <nl> + int server_try_cancel = internal : : GetIntValueFromMetadata ( <nl> kServerTryCancelRequest , context - > client_metadata ( ) , DO_NOT_CANCEL ) ; <nl> <nl> EchoRequest request ; <nl> EchoResponse response ; <nl> <nl> if ( server_try_cancel = = CANCEL_BEFORE_PROCESSING ) { <nl> - ServerTryCancel ( context ) ; <nl> + internal : : ServerTryCancel ( context ) ; <nl> return Status : : CANCELLED ; <nl> } <nl> <nl> std : : thread * server_try_cancel_thd = nullptr ; <nl> if ( server_try_cancel = = CANCEL_DURING_PROCESSING ) { <nl> server_try_cancel_thd = <nl> - new std : : thread ( [ context ] { ServerTryCancel ( context ) ; } ) ; <nl> + new std : : thread ( [ context ] { internal : : ServerTryCancel ( context ) ; } ) ; <nl> } <nl> <nl> / / kServerFinishAfterNReads suggests after how many reads , the server should <nl> / / write the last message and send status ( coalesced using WriteLast ) <nl> - int server_write_last = GetIntValueFromMetadata ( <nl> + int server_write_last = internal : : GetIntValueFromMetadata ( <nl> kServerFinishAfterNReads , context - > client_metadata ( ) , 0 ) ; <nl> <nl> int read_counts = 0 ; <nl> class TestMultipleServiceImpl : public RpcService { <nl> } <nl> <nl> if ( server_try_cancel = = CANCEL_AFTER_PROCESSING ) { <nl> - ServerTryCancel ( context ) ; <nl> + internal : : ServerTryCancel ( context ) ; <nl> return Status : : CANCELLED ; <nl> } <nl> <nl> mmm a / test / cpp / end2end / xds_end2end_test . cc <nl> ppp b / test / cpp / end2end / xds_end2end_test . cc <nl> class XdsEnd2endTest : public : : testing : : TestWithParam < TestType > { <nl> return backend_ports ; <nl> } <nl> <nl> - enum RpcServiceMethod { <nl> - TEST_ECHO , <nl> - TEST_ECHO1 , <nl> - TEST_ECHO2 , <nl> - TEST1_ECHO , <nl> - TEST1_ECHO1 , <nl> - TEST1_ECHO2 , <nl> - TEST2_ECHO , <nl> - TEST2_ECHO1 , <nl> - TEST2_ECHO2 , <nl> + enum RpcService { <nl> + SERVICE_ECHO , <nl> + SERVICE_ECHO1 , <nl> + SERVICE_ECHO2 , <nl> + } ; <nl> + <nl> + enum RpcMethod { <nl> + METHOD_ECHO , <nl> + METHOD_ECHO1 , <nl> + METHOD_ECHO2 , <nl> } ; <nl> <nl> struct RpcOptions { <nl> - RpcServiceMethod service_method = TEST_ECHO ; <nl> - EchoResponse * response = nullptr ; <nl> + RpcService service = SERVICE_ECHO ; <nl> + RpcMethod method = METHOD_ECHO ; <nl> int timeout_ms = 1000 ; <nl> bool wait_for_ready = false ; <nl> bool server_fail = false ; <nl> - int times = 1 ; <nl> + <nl> + RpcOptions ( ) { } <nl> + <nl> + RpcOptions & set_rpc_service ( RpcService rpc_service ) { <nl> + service = rpc_service ; <nl> + return * this ; <nl> + } <nl> + <nl> + RpcOptions & set_rpc_method ( RpcMethod rpc_method ) { <nl> + method = rpc_method ; <nl> + return * this ; <nl> + } <nl> + <nl> + RpcOptions & set_timeout_ms ( int rpc_timeout_ms ) { <nl> + timeout_ms = rpc_timeout_ms ; <nl> + return * this ; <nl> + } <nl> + <nl> + RpcOptions & set_wait_for_ready ( bool rpc_wait_for_ready ) { <nl> + wait_for_ready = rpc_wait_for_ready ; <nl> + return * this ; <nl> + } <nl> + <nl> + RpcOptions & set_server_fail ( bool rpc_server_fail ) { <nl> + server_fail = rpc_server_fail ; <nl> + return * this ; <nl> + } <nl> } ; <nl> <nl> / / TODO @ donnadionne : Will replace SendRpc in all tests . <nl> - Status SendRpcNew ( const RpcOptions & rpc_options , <nl> - EchoResponse * response = nullptr ) { <nl> - const bool local_response = ( response = = nullptr ) ; <nl> - if ( local_response ) response = new EchoResponse ; <nl> - EchoRequest request ; <nl> - request . set_message ( kRequestMessage_ ) ; <nl> - if ( rpc_options . server_fail ) { <nl> - request . mutable_param ( ) - > mutable_expected_error ( ) - > set_code ( <nl> - GRPC_STATUS_FAILED_PRECONDITION ) ; <nl> - } <nl> + template < typename Stub > <nl> + Status SendRpcMethod ( Stub * stub , const RpcOptions & rpc_options , <nl> + EchoRequest & request , EchoResponse * response ) { <nl> ClientContext context ; <nl> context . set_deadline ( <nl> grpc_timeout_milliseconds_to_deadline ( rpc_options . timeout_ms ) ) ; <nl> if ( rpc_options . wait_for_ready ) context . set_wait_for_ready ( true ) ; <nl> - Status status ; <nl> - switch ( rpc_options . service_method ) { <nl> - case TEST_ECHO : <nl> - status = stub_ - > Echo ( & context , request , response ) ; <nl> - break ; <nl> - case TEST_ECHO1 : <nl> - status = stub_ - > Echo1 ( & context , request , response ) ; <nl> - break ; <nl> - case TEST_ECHO2 : <nl> - status = stub_ - > Echo2 ( & context , request , response ) ; <nl> - break ; <nl> - case TEST1_ECHO : <nl> - status = stub1_ - > Echo ( & context , request , response ) ; <nl> - break ; <nl> - case TEST1_ECHO1 : <nl> - status = stub1_ - > Echo1 ( & context , request , response ) ; <nl> - break ; <nl> - case TEST1_ECHO2 : <nl> - status = stub1_ - > Echo2 ( & context , request , response ) ; <nl> - break ; <nl> - case TEST2_ECHO : <nl> - status = stub2_ - > Echo ( & context , request , response ) ; <nl> - break ; <nl> - case TEST2_ECHO1 : <nl> - status = stub2_ - > Echo1 ( & context , request , response ) ; <nl> - break ; <nl> - case TEST2_ECHO2 : <nl> - status = stub2_ - > Echo2 ( & context , request , response ) ; <nl> - break ; <nl> + switch ( rpc_options . method ) { <nl> + case METHOD_ECHO : <nl> + return ( * stub ) - > Echo ( & context , request , response ) ; <nl> + case METHOD_ECHO1 : <nl> + return ( * stub ) - > Echo1 ( & context , request , response ) ; <nl> + case METHOD_ECHO2 : <nl> + return ( * stub ) - > Echo2 ( & context , request , response ) ; <nl> } <nl> - if ( local_response ) delete response ; <nl> - return status ; <nl> } <nl> <nl> - / / TODO @ donnadionne : Will replace ChedkRpcSendOk in all tests . <nl> - void CheckRpcSendOkNew ( const RpcOptions & rpc_options ) { <nl> - for ( size_t i = 0 ; i < rpc_options . times ; + + i ) { <nl> - EchoResponse response ; <nl> - const Status status = SendRpcNew ( rpc_options , & response ) ; <nl> - EXPECT_TRUE ( status . ok ( ) ) < < " code = " < < status . error_code ( ) <nl> - < < " message = " < < status . error_message ( ) ; <nl> - EXPECT_EQ ( response . message ( ) , kRequestMessage_ ) ; <nl> - } <nl> - } <nl> - <nl> - Status SendRpc ( const string & method_name = " Echo " , <nl> - EchoResponse * response = nullptr , int timeout_ms = 1000 , <nl> - bool wait_for_ready = false , bool server_fail = false ) { <nl> + Status SendRpc ( const RpcOptions & rpc_options = RpcOptions ( ) , <nl> + EchoResponse * response = nullptr ) { <nl> const bool local_response = ( response = = nullptr ) ; <nl> if ( local_response ) response = new EchoResponse ; <nl> EchoRequest request ; <nl> request . set_message ( kRequestMessage_ ) ; <nl> - if ( server_fail ) { <nl> + if ( rpc_options . server_fail ) { <nl> request . mutable_param ( ) - > mutable_expected_error ( ) - > set_code ( <nl> GRPC_STATUS_FAILED_PRECONDITION ) ; <nl> } <nl> - ClientContext context ; <nl> - context . set_deadline ( grpc_timeout_milliseconds_to_deadline ( timeout_ms ) ) ; <nl> - if ( wait_for_ready ) context . set_wait_for_ready ( true ) ; <nl> Status status ; <nl> - if ( method_name = = " Echo " ) { <nl> - status = stub_ - > Echo ( & context , request , response ) ; <nl> - } else if ( method_name = = " Echo1 " ) { <nl> - status = stub1_ - > Echo1 ( & context , request , response ) ; <nl> - } else if ( method_name = = " Echo2 " ) { <nl> - status = stub2_ - > Echo2 ( & context , request , response ) ; <nl> + switch ( rpc_options . service ) { <nl> + case SERVICE_ECHO : <nl> + status = SendRpcMethod ( & stub_ , rpc_options , request , response ) ; <nl> + break ; <nl> + case SERVICE_ECHO1 : <nl> + status = SendRpcMethod ( & stub1_ , rpc_options , request , response ) ; <nl> + break ; <nl> + case SERVICE_ECHO2 : <nl> + status = SendRpcMethod ( & stub2_ , rpc_options , request , response ) ; <nl> + break ; <nl> } <nl> if ( local_response ) delete response ; <nl> return status ; <nl> } <nl> <nl> + / / TODO @ donnadionne : Will replace ChedkRpcSendOk in all tests . <nl> void CheckRpcSendOk ( const size_t times = 1 , <nl> - const string & method_name = " Echo " , <nl> - const int timeout_ms = 1000 , <nl> - bool wait_for_ready = false ) { <nl> + const RpcOptions & rpc_options = RpcOptions ( ) ) { <nl> for ( size_t i = 0 ; i < times ; + + i ) { <nl> EchoResponse response ; <nl> - const Status status = <nl> - SendRpc ( method_name , & response , timeout_ms , wait_for_ready ) ; <nl> + const Status status = SendRpc ( rpc_options , & response ) ; <nl> EXPECT_TRUE ( status . ok ( ) ) < < " code = " < < status . error_code ( ) <nl> < < " message = " < < status . error_message ( ) ; <nl> EXPECT_EQ ( response . message ( ) , kRequestMessage_ ) ; <nl> class XdsEnd2endTest : public : : testing : : TestWithParam < TestType > { <nl> <nl> void CheckRpcSendFailure ( const size_t times = 1 , bool server_fail = false ) { <nl> for ( size_t i = 0 ; i < times ; + + i ) { <nl> - const Status status = SendRpc ( " Echo " , nullptr , 1000 , false , server_fail ) ; <nl> + const Status status = SendRpc ( RpcOptions ( ) . set_server_fail ( server_fail ) ) ; <nl> EXPECT_FALSE ( status . ok ( ) ) ; <nl> } <nl> } <nl> TEST_P ( BasicTest , InitiallyEmptyServerlist ) { <nl> kDefaultResourceName ) ) ; <nl> const auto t0 = system_clock : : now ( ) ; <nl> / / Client will block : LB will initially send empty serverlist . <nl> - CheckRpcSendOk ( 1 , " Echo " , kCallDeadlineMs , true / * wait_for_ready * / ) ; <nl> + CheckRpcSendOk ( <nl> + 1 , RpcOptions ( ) . set_timeout_ms ( kCallDeadlineMs ) . set_wait_for_ready ( true ) ) ; <nl> const auto ellapsed_ms = <nl> std : : chrono : : duration_cast < std : : chrono : : milliseconds > ( <nl> system_clock : : now ( ) - t0 ) ; <nl> TEST_P ( BasicTest , BackendsRestart ) { <nl> CheckRpcSendFailure ( ) ; <nl> / / Restart all backends . RPCs should start succeeding again . <nl> StartAllBackends ( ) ; <nl> - CheckRpcSendOk ( 1 / * times * / , " Echo " , 2000 / * timeout_ms * / , <nl> - true / * wait_for_ready * / ) ; <nl> + CheckRpcSendOk ( 1 , RpcOptions ( ) . set_timeout_ms ( 2000 ) . set_wait_for_ready ( true ) ) ; <nl> } <nl> <nl> using XdsResolverOnlyTest = BasicTest ; <nl> TEST_P ( LdsTest , RouteMatchHasNonemptyPrefix ) { <nl> AdsServiceImpl : : NACKED ) ; <nl> } <nl> <nl> - / / Tests that LDS client should send a NACK if route match has empty path <nl> - / / as the only route ( default ) in the LDS response . <nl> - TEST_P ( LdsTest , RouteMatchHasEmptyPath ) { <nl> + / / Tests that LDS client should send a NACK if route match has a prefix <nl> + / / not in the format " / service / " : missing / or did not end with / . <nl> + TEST_P ( LdsTest , RouteMatchHasInvalidPrefix ) { <nl> + ResetStub ( / * failover_timeout = * / 0 , <nl> + / * expected_targets = * / " " , <nl> + / * xds_resource_does_not_exist_timeout * / 0 , <nl> + / * xds_routing_enabled = * / true ) ; <nl> + RouteConfiguration route_config = <nl> + balancers_ [ 0 ] - > ads_service ( ) - > default_route_config ( ) ; <nl> + auto * route1 = route_config . mutable_virtual_hosts ( 0 ) - > mutable_routes ( 0 ) ; <nl> + / / Invalid case 1 : no / <nl> + route1 - > mutable_match ( ) - > set_prefix ( " grpc . testing . EchoTest1Service " ) ; <nl> + auto * default_route = route_config . mutable_virtual_hosts ( 0 ) - > add_routes ( ) ; <nl> + default_route - > mutable_match ( ) - > set_prefix ( " " ) ; <nl> + default_route - > mutable_route ( ) - > set_cluster ( kDefaultResourceName ) ; <nl> + balancers_ [ 0 ] - > ads_service ( ) - > SetLdsResource ( <nl> + AdsServiceImpl : : BuildListener ( route_config ) , kDefaultResourceName ) ; <nl> + SetNextResolution ( { } ) ; <nl> + SetNextResolutionForLbChannelAllBalancers ( ) ; <nl> + CheckRpcSendFailure ( ) ; <nl> + EXPECT_EQ ( balancers_ [ 0 ] - > ads_service ( ) - > lds_response_state ( ) , <nl> + AdsServiceImpl : : NACKED ) ; <nl> + / / Invalid case 2 : missing / at the end <nl> + route1 - > mutable_match ( ) - > set_prefix ( " / grpc . testing . EchoTest1Service " ) ; <nl> + balancers_ [ 0 ] - > ads_service ( ) - > SetLdsResource ( <nl> + AdsServiceImpl : : BuildListener ( route_config ) , kDefaultResourceName ) ; <nl> + SetNextResolution ( { } ) ; <nl> + SetNextResolutionForLbChannelAllBalancers ( ) ; <nl> + CheckRpcSendFailure ( ) ; <nl> + EXPECT_EQ ( balancers_ [ 0 ] - > ads_service ( ) - > lds_response_state ( ) , <nl> + AdsServiceImpl : : NACKED ) ; <nl> + / / Invalid case 3 : missing / at the beginning <nl> + route1 - > mutable_match ( ) - > set_prefix ( " grpc . testing . EchoTest1Service / " ) ; <nl> + balancers_ [ 0 ] - > ads_service ( ) - > SetLdsResource ( <nl> + AdsServiceImpl : : BuildListener ( route_config ) , kDefaultResourceName ) ; <nl> + SetNextResolution ( { } ) ; <nl> + SetNextResolutionForLbChannelAllBalancers ( ) ; <nl> + CheckRpcSendFailure ( ) ; <nl> + EXPECT_EQ ( balancers_ [ 0 ] - > ads_service ( ) - > lds_response_state ( ) , <nl> + AdsServiceImpl : : NACKED ) ; <nl> + / / Invalid case 4 : extra content outside of " / service / " <nl> + route1 - > mutable_match ( ) - > set_prefix ( " / grpc . testing . EchoTest1Service / Echo1 " ) ; <nl> + balancers_ [ 0 ] - > ads_service ( ) - > SetLdsResource ( <nl> + AdsServiceImpl : : BuildListener ( route_config ) , kDefaultResourceName ) ; <nl> + SetNextResolution ( { } ) ; <nl> + SetNextResolutionForLbChannelAllBalancers ( ) ; <nl> + CheckRpcSendFailure ( ) ; <nl> + EXPECT_EQ ( balancers_ [ 0 ] - > ads_service ( ) - > lds_response_state ( ) , <nl> + AdsServiceImpl : : NACKED ) ; <nl> + / / Invalid case 5 : empty prefix " / / " <nl> + route1 - > mutable_match ( ) - > set_prefix ( " / / " ) ; <nl> + balancers_ [ 0 ] - > ads_service ( ) - > SetLdsResource ( <nl> + AdsServiceImpl : : BuildListener ( route_config ) , kDefaultResourceName ) ; <nl> + SetNextResolution ( { } ) ; <nl> + SetNextResolutionForLbChannelAllBalancers ( ) ; <nl> + CheckRpcSendFailure ( ) ; <nl> + EXPECT_EQ ( balancers_ [ 0 ] - > ads_service ( ) - > lds_response_state ( ) , <nl> + AdsServiceImpl : : NACKED ) ; <nl> + } <nl> + <nl> + / / Tests that LDS client should send a NACK if route match has path <nl> + / / not in the format of " / service / method " <nl> + TEST_P ( LdsTest , RouteMatchHasInvalidPath ) { <nl> + ResetStub ( / * failover_timeout = * / 0 , <nl> + / * expected_targets = * / " " , <nl> + / * xds_resource_does_not_exist_timeout * / 0 , <nl> + / * xds_routing_enabled = * / true ) ; <nl> RouteConfiguration route_config = <nl> balancers_ [ 0 ] - > ads_service ( ) - > default_route_config ( ) ; <nl> auto * route1 = route_config . mutable_virtual_hosts ( 0 ) - > mutable_routes ( 0 ) ; <nl> - route1 - > mutable_match ( ) - > set_path ( " " ) ; <nl> auto * default_route = route_config . mutable_virtual_hosts ( 0 ) - > add_routes ( ) ; <nl> default_route - > mutable_match ( ) - > set_prefix ( " " ) ; <nl> + default_route - > mutable_route ( ) - > set_cluster ( kDefaultResourceName ) ; <nl> + / / Invalid case 1 : empty path <nl> + route1 - > mutable_match ( ) - > set_path ( " " ) ; <nl> + balancers_ [ 0 ] - > ads_service ( ) - > SetLdsResource ( <nl> + AdsServiceImpl : : BuildListener ( route_config ) , kDefaultResourceName ) ; <nl> + SetNextResolution ( { } ) ; <nl> + SetNextResolutionForLbChannelAllBalancers ( ) ; <nl> + CheckRpcSendFailure ( ) ; <nl> + EXPECT_EQ ( balancers_ [ 0 ] - > ads_service ( ) - > lds_response_state ( ) , <nl> + AdsServiceImpl : : NACKED ) ; <nl> + / / Invalid case 2 : missing / at the beginning <nl> + route1 - > mutable_match ( ) - > set_path ( " grpc . testing . EchoTest1Service / Echo1 " ) ; <nl> + balancers_ [ 0 ] - > ads_service ( ) - > SetLdsResource ( <nl> + AdsServiceImpl : : BuildListener ( route_config ) , kDefaultResourceName ) ; <nl> + SetNextResolution ( { } ) ; <nl> + SetNextResolutionForLbChannelAllBalancers ( ) ; <nl> + CheckRpcSendFailure ( ) ; <nl> + EXPECT_EQ ( balancers_ [ 0 ] - > ads_service ( ) - > lds_response_state ( ) , <nl> + AdsServiceImpl : : NACKED ) ; <nl> + / / Invalid case 3 : extra / at the end <nl> + route1 - > mutable_match ( ) - > set_path ( " / grpc . testing . EchoTest1Service / Echo1 / " ) ; <nl> + balancers_ [ 0 ] - > ads_service ( ) - > SetLdsResource ( <nl> + AdsServiceImpl : : BuildListener ( route_config ) , kDefaultResourceName ) ; <nl> + SetNextResolution ( { } ) ; <nl> + SetNextResolutionForLbChannelAllBalancers ( ) ; <nl> + CheckRpcSendFailure ( ) ; <nl> + EXPECT_EQ ( balancers_ [ 0 ] - > ads_service ( ) - > lds_response_state ( ) , <nl> + AdsServiceImpl : : NACKED ) ; <nl> + / / Invalid case 4 : missinga / in the middle <nl> + route1 - > mutable_match ( ) - > set_path ( " / grpc . testing . EchoTest1Service . Echo1 " ) ; <nl> + balancers_ [ 0 ] - > ads_service ( ) - > SetLdsResource ( <nl> + AdsServiceImpl : : BuildListener ( route_config ) , kDefaultResourceName ) ; <nl> + SetNextResolution ( { } ) ; <nl> + SetNextResolutionForLbChannelAllBalancers ( ) ; <nl> + CheckRpcSendFailure ( ) ; <nl> + EXPECT_EQ ( balancers_ [ 0 ] - > ads_service ( ) - > lds_response_state ( ) , <nl> + AdsServiceImpl : : NACKED ) ; <nl> + / / Invalid case 5 : empty service " / / Echo1 " <nl> + route1 - > mutable_match ( ) - > set_path ( " / / Echo1 " ) ; <nl> + balancers_ [ 0 ] - > ads_service ( ) - > SetLdsResource ( <nl> + AdsServiceImpl : : BuildListener ( route_config ) , kDefaultResourceName ) ; <nl> + SetNextResolution ( { } ) ; <nl> + SetNextResolutionForLbChannelAllBalancers ( ) ; <nl> + CheckRpcSendFailure ( ) ; <nl> + EXPECT_EQ ( balancers_ [ 0 ] - > ads_service ( ) - > lds_response_state ( ) , <nl> + AdsServiceImpl : : NACKED ) ; <nl> + / / Invalid case 5 : empty method " / grpc . testing . EchoTest1Service / " <nl> + route1 - > mutable_match ( ) - > set_path ( " / grpc . testing . EchoTest1Service / " ) ; <nl> balancers_ [ 0 ] - > ads_service ( ) - > SetLdsResource ( <nl> AdsServiceImpl : : BuildListener ( route_config ) , kDefaultResourceName ) ; <nl> SetNextResolution ( { } ) ; <nl> TEST_P ( LdsTest , XdsRoutingPathMatching ) { <nl> balancers_ [ 0 ] - > ads_service ( ) - > BuildListener ( new_route_config ) ; <nl> balancers_ [ 0 ] - > ads_service ( ) - > SetLdsResource ( listener , kDefaultResourceName ) ; <nl> WaitForAllBackends ( 0 , 2 ) ; <nl> - RpcOptions rpc_options ; <nl> - rpc_options . times = kNumEchoRpcs ; <nl> - rpc_options . service_method = TEST_ECHO ; <nl> - rpc_options . wait_for_ready = true ; <nl> - CheckRpcSendOkNew ( rpc_options ) ; <nl> - rpc_options . times = kNumEcho1Rpcs ; <nl> - rpc_options . service_method = TEST1_ECHO1 ; <nl> - CheckRpcSendOkNew ( rpc_options ) ; <nl> - rpc_options . times = kNumEcho2Rpcs ; <nl> - rpc_options . service_method = TEST2_ECHO2 ; <nl> - CheckRpcSendOkNew ( rpc_options ) ; <nl> + CheckRpcSendOk ( kNumEchoRpcs , RpcOptions ( ) . set_wait_for_ready ( true ) ) ; <nl> + CheckRpcSendOk ( kNumEcho1Rpcs , RpcOptions ( ) <nl> + . set_rpc_service ( SERVICE_ECHO1 ) <nl> + . set_rpc_method ( METHOD_ECHO1 ) <nl> + . set_wait_for_ready ( true ) ) ; <nl> + CheckRpcSendOk ( kNumEcho2Rpcs , RpcOptions ( ) <nl> + . set_rpc_service ( SERVICE_ECHO2 ) <nl> + . set_rpc_method ( METHOD_ECHO2 ) <nl> + . set_wait_for_ready ( true ) ) ; <nl> / / Make sure RPCs all go to the correct backend . <nl> for ( size_t i = 0 ; i < 2 ; + + i ) { <nl> EXPECT_EQ ( kNumEchoRpcs / 2 , <nl> TEST_P ( LdsTest , XdsRoutingPrefixMatching ) { <nl> balancers_ [ 0 ] - > ads_service ( ) - > BuildListener ( new_route_config ) ; <nl> balancers_ [ 0 ] - > ads_service ( ) - > SetLdsResource ( listener , kDefaultResourceName ) ; <nl> WaitForAllBackends ( 0 , 2 ) ; <nl> - RpcOptions rpc_options ; <nl> - rpc_options . times = kNumEchoRpcs ; <nl> - rpc_options . service_method = TEST_ECHO ; <nl> - rpc_options . wait_for_ready = true ; <nl> - CheckRpcSendOkNew ( rpc_options ) ; <nl> - rpc_options . times = kNumEcho1Rpcs ; <nl> - rpc_options . service_method = TEST1_ECHO1 ; <nl> - CheckRpcSendOkNew ( rpc_options ) ; <nl> - rpc_options . times = kNumEcho2Rpcs ; <nl> - rpc_options . service_method = TEST2_ECHO2 ; <nl> - CheckRpcSendOkNew ( rpc_options ) ; <nl> + CheckRpcSendOk ( kNumEchoRpcs , RpcOptions ( ) . set_wait_for_ready ( true ) ) ; <nl> + CheckRpcSendOk ( kNumEcho1Rpcs , RpcOptions ( ) <nl> + . set_rpc_service ( SERVICE_ECHO1 ) <nl> + . set_rpc_method ( METHOD_ECHO1 ) <nl> + . set_wait_for_ready ( true ) ) ; <nl> + CheckRpcSendOk ( kNumEcho2Rpcs , RpcOptions ( ) <nl> + . set_rpc_service ( SERVICE_ECHO2 ) <nl> + . set_rpc_method ( METHOD_ECHO2 ) <nl> + . set_wait_for_ready ( true ) ) ; <nl> / / Make sure RPCs all go to the correct backend . <nl> for ( size_t i = 0 ; i < 2 ; + + i ) { <nl> EXPECT_EQ ( kNumEchoRpcs / 2 , <nl> TEST_P ( LocalityMapTest , NoLocalities ) { <nl> <nl> / / Tests that the locality map can work properly even when it contains a large <nl> / / number of localities . <nl> - TEST_P ( LocalityMapTest , StressTest ) { <nl> + / * TEST_P ( LocalityMapTest , StressTest ) { <nl> SetNextResolution ( { } ) ; <nl> SetNextResolutionForLbChannelAllBalancers ( ) ; <nl> const size_t kNumLocalities = 100 ; <nl> TEST_P ( LocalityMapTest , StressTest ) { <nl> AdsServiceImpl : : BuildEdsResource ( args ) , 60 * 1000 , kDefaultResourceName ) ) ; <nl> / / Wait until backend 0 is ready , before which kNumLocalities localities are <nl> / / received and handled by the xds policy . <nl> - WaitForBackend ( 0 , / * reset_counters = * / false ) ; <nl> + WaitForBackend ( 0 , / * reset_counters = * false ) ; <nl> EXPECT_EQ ( 0U , backends_ [ 1 ] - > backend_service ( ) - > request_count ( ) ) ; <nl> / / Wait until backend 1 is ready , before which kNumLocalities localities are <nl> / / removed by the xds policy . <nl> WaitForBackend ( 1 ) ; <nl> delayed_resource_setter . join ( ) ; <nl> - } <nl> + } * / <nl> <nl> / / Tests that the localities in a locality map are picked correctly after update <nl> / / ( addition , modification , deletion ) . <nl> TEST_P ( DropTest , Vanilla ) { <nl> size_t num_drops = 0 ; <nl> for ( size_t i = 0 ; i < kNumRpcs ; + + i ) { <nl> EchoResponse response ; <nl> - const Status status = SendRpc ( " Echo " , & response ) ; <nl> + const Status status = SendRpc ( RpcOptions ( ) , & response ) ; <nl> if ( ! status . ok ( ) & & <nl> status . error_message ( ) = = " Call dropped by load balancing policy " ) { <nl> + + num_drops ; <nl> TEST_P ( DropTest , DropPerHundred ) { <nl> size_t num_drops = 0 ; <nl> for ( size_t i = 0 ; i < kNumRpcs ; + + i ) { <nl> EchoResponse response ; <nl> - const Status status = SendRpc ( " Echo " , & response ) ; <nl> + const Status status = SendRpc ( RpcOptions ( ) , & response ) ; <nl> if ( ! status . ok ( ) & & <nl> status . error_message ( ) = = " Call dropped by load balancing policy " ) { <nl> + + num_drops ; <nl> TEST_P ( DropTest , DropPerTenThousand ) { <nl> size_t num_drops = 0 ; <nl> for ( size_t i = 0 ; i < kNumRpcs ; + + i ) { <nl> EchoResponse response ; <nl> - const Status status = SendRpc ( " Echo " , & response ) ; <nl> + const Status status = SendRpc ( RpcOptions ( ) , & response ) ; <nl> if ( ! status . ok ( ) & & <nl> status . error_message ( ) = = " Call dropped by load balancing policy " ) { <nl> + + num_drops ; <nl> TEST_P ( DropTest , Update ) { <nl> gpr_log ( GPR_INFO , " = = = = = = = = = BEFORE FIRST BATCH = = = = = = = = = = " ) ; <nl> for ( size_t i = 0 ; i < kNumRpcs ; + + i ) { <nl> EchoResponse response ; <nl> - const Status status = SendRpc ( " Echo " , & response ) ; <nl> + const Status status = SendRpc ( RpcOptions ( ) , & response ) ; <nl> if ( ! status . ok ( ) & & <nl> status . error_message ( ) = = " Call dropped by load balancing policy " ) { <nl> + + num_drops ; <nl> TEST_P ( DropTest , Update ) { <nl> size_t num_rpcs = kNumRpcs ; <nl> while ( seen_drop_rate < kDropRateThreshold ) { <nl> EchoResponse response ; <nl> - const Status status = SendRpc ( " Echo " , & response ) ; <nl> + const Status status = SendRpc ( RpcOptions ( ) , & response ) ; <nl> + + num_rpcs ; <nl> if ( ! status . ok ( ) & & <nl> status . error_message ( ) = = " Call dropped by load balancing policy " ) { <nl> TEST_P ( DropTest , Update ) { <nl> gpr_log ( GPR_INFO , " = = = = = = = = = BEFORE SECOND BATCH = = = = = = = = = = " ) ; <nl> for ( size_t i = 0 ; i < kNumRpcs ; + + i ) { <nl> EchoResponse response ; <nl> - const Status status = SendRpc ( " Echo " , & response ) ; <nl> + const Status status = SendRpc ( RpcOptions ( ) , & response ) ; <nl> if ( ! status . ok ( ) & & <nl> status . error_message ( ) = = " Call dropped by load balancing policy " ) { <nl> + + num_drops ; <nl> TEST_P ( DropTest , DropAll ) { <nl> / / Send kNumRpcs RPCs and all of them are dropped . <nl> for ( size_t i = 0 ; i < kNumRpcs ; + + i ) { <nl> EchoResponse response ; <nl> - const Status status = SendRpc ( " Echo " , & response ) ; <nl> + const Status status = SendRpc ( RpcOptions ( ) , & response ) ; <nl> EXPECT_EQ ( status . error_code ( ) , StatusCode : : UNAVAILABLE ) ; <nl> EXPECT_EQ ( status . error_message ( ) , " Call dropped by load balancing policy " ) ; <nl> } <nl> TEST_P ( ClientLoadReportingWithDropTest , Vanilla ) { <nl> / / Send kNumRpcs RPCs and count the drops . <nl> for ( size_t i = 0 ; i < kNumRpcs ; + + i ) { <nl> EchoResponse response ; <nl> - const Status status = SendRpc ( " Echo " , & response ) ; <nl> + const Status status = SendRpc ( RpcOptions ( ) , & response ) ; <nl> if ( ! status . ok ( ) & & <nl> status . error_message ( ) = = " Call dropped by load balancing policy " ) { <nl> + + num_drops ; <nl>
Code review comments : added invalid config tests and restructured
grpc/grpc
71aef940c33b8751a6847da882c0e08b4d7c7013
2020-04-15T20:09:03Z
mmm a / tensorflow / core / grappler / optimizers / meta_optimizer . cc <nl> ppp b / tensorflow / core / grappler / optimizers / meta_optimizer . cc <nl> Status MetaOptimizer : : InitializeOptimizers ( <nl> if ( cfg_ . remapping ( ) ! = RewriterConfig : : OFF ) { <nl> optimizers - > push_back ( MakeUnique < Remapper > ( cfg_ . remapping ( ) ) ) ; <nl> } <nl> - if ( cfg_ . pin_to_host_optimization ( ) ! = RewriterConfig : : OFF ) { <nl> + if ( cfg_ . pin_to_host_optimization ( ) = = RewriterConfig : : ON ) { <nl> optimizers - > push_back ( MakeUnique < PinToHostOptimizer > ( ) ) ; <nl> } <nl> if ( cfg_ . arithmetic_optimization ( ) ! = RewriterConfig : : OFF ) { <nl> bool MetaOptimizerEnabled ( const RewriterConfig & cfg ) { <nl> cfg . memory_optimization ( ) ! = RewriterConfig : : NO_MEM_OPT | | <nl> cfg . debug_stripper ( ) = = RewriterConfig : : ON | | <nl> cfg . scoped_allocator_optimization ( ) = = RewriterConfig : : ON | | <nl> - cfg . pin_to_host_optimization ( ) ! = RewriterConfig : : OFF | | <nl> + cfg . pin_to_host_optimization ( ) = = RewriterConfig : : ON | | <nl> ! cfg . optimizers ( ) . empty ( ) | | ! cfg . custom_optimizers ( ) . empty ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / protobuf / rewriter_config . proto <nl> ppp b / tensorflow / core / protobuf / rewriter_config . proto <nl> message RewriterConfig { <nl> / / Try to allocate some independent Op outputs contiguously in order to <nl> / / merge or eliminate downstream Ops ( off by default ) . <nl> Toggle scoped_allocator_optimization = 15 ; <nl> - / / Force small ops onto the CPU ( default is ON ) . <nl> + / / Force small ops onto the CPU ( default is OFF ) . <nl> Toggle pin_to_host_optimization = 18 ; <nl> / / Disable the entire meta optimizer ( off by default ) . <nl> bool disable_meta_optimizer = 19 ; <nl>
Automated rollback of commit cb98ceba9cff8c10ee3c7e89dc8925c88b28118e
tensorflow/tensorflow
694367b574dcaf5ac90f3e42b8dee8fa51ca9f38
2018-10-01T19:04:15Z
mmm a / cores / esp8266 / Print . h <nl> ppp b / cores / esp8266 / Print . h <nl> class Print { <nl> return write ( ( const uint8_t * ) buffer , size ) ; <nl> } <nl> / / These handle ambiguity for write ( 0 ) case , because ( 0 ) can be a pointer or an integer <nl> - size_t write ( short t ) { return write ( ( uint8_t ) t ) ; } <nl> - size_t write ( unsigned short t ) { return write ( ( uint8_t ) t ) ; } <nl> - size_t write ( int t ) { return write ( ( uint8_t ) t ) ; } <nl> - size_t write ( unsigned int t ) { return write ( ( uint8_t ) t ) ; } <nl> - size_t write ( long t ) { return write ( ( uint8_t ) t ) ; } <nl> - size_t write ( unsigned long t ) { return write ( ( uint8_t ) t ) ; } <nl> + inline size_t write ( short t ) { return write ( ( uint8_t ) t ) ; } <nl> + inline size_t write ( unsigned short t ) { return write ( ( uint8_t ) t ) ; } <nl> + inline size_t write ( int t ) { return write ( ( uint8_t ) t ) ; } <nl> + inline size_t write ( unsigned int t ) { return write ( ( uint8_t ) t ) ; } <nl> + inline size_t write ( long t ) { return write ( ( uint8_t ) t ) ; } <nl> + inline size_t write ( unsigned long t ) { return write ( ( uint8_t ) t ) ; } <nl> + / / Enable write ( char ) to fall through to write ( uint8_t ) <nl> + inline size_t write ( char c ) { return write ( ( uint8_t ) c ) ; } <nl> + inline size_t write ( int8_t c ) { return write ( ( uint8_t ) c ) ; } <nl> <nl> size_t printf ( const char * format , . . . ) __attribute__ ( ( format ( printf , 2 , 3 ) ) ) ; <nl> size_t printf_P ( PGM_P format , . . . ) __attribute__ ( ( format ( printf , 2 , 3 ) ) ) ; <nl> mmm a / libraries / Wire / Wire . h <nl> ppp b / libraries / Wire / Wire . h <nl> class TwoWire : public Stream <nl> void onReceive ( void ( * ) ( size_t ) ) ; / / legacy esp8266 backward compatibility <nl> void onRequest ( void ( * ) ( void ) ) ; <nl> <nl> - inline size_t write ( unsigned long n ) { return write ( ( uint8_t ) n ) ; } <nl> - inline size_t write ( long n ) { return write ( ( uint8_t ) n ) ; } <nl> - inline size_t write ( unsigned int n ) { return write ( ( uint8_t ) n ) ; } <nl> - inline size_t write ( int n ) { return write ( ( uint8_t ) n ) ; } <nl> using Print : : write ; <nl> } ; <nl> <nl> mmm a / tests / host / Makefile <nl> ppp b / tests / host / Makefile <nl> TEST_CPP_FILES : = \ <nl> core / test_pgmspace . cpp \ <nl> core / test_md5builder . cpp \ <nl> core / test_string . cpp \ <nl> - core / test_PolledTimeout . cpp <nl> + core / test_PolledTimeout . cpp \ <nl> + core / test_Print . cpp <nl> <nl> PREINCLUDES : = \ <nl> - include common / mock . h \ <nl> new file mode 100644 <nl> index 0000000000 . . c3cb2f5cd0 <nl> mmm / dev / null <nl> ppp b / tests / host / core / test_Print . cpp <nl> <nl> + / * <nl> + test_pgmspace . cpp - pgmspace tests <nl> + Copyright © 2016 Ivan Grokhotkov <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy <nl> + of this software and associated documentation files ( the " Software " ) , to deal <nl> + in the Software without restriction , including without limitation the rights <nl> + to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> + copies of the Software , and to permit persons to whom the Software is <nl> + furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in <nl> + all copies or substantial portions of the Software . <nl> + * / <nl> + <nl> + # include < catch . hpp > <nl> + # include < string . h > <nl> + # include < FS . h > <nl> + # include " . . / common / spiffs_mock . h " <nl> + # include < spiffs / spiffs . h > <nl> + <nl> + / / Use a SPIFFS file because we can ' t instantiate a virtual class like Print <nl> + TEST_CASE ( " Print : : write overrides all compile properly " , " [ core ] [ Print ] " ) <nl> + { <nl> + SPIFFS_MOCK_DECLARE ( 64 , 8 , 512 , " " ) ; <nl> + REQUIRE ( SPIFFS . begin ( ) ) ; <nl> + auto p = SPIFFS . open ( " test . bin " , " w " ) ; <nl> + REQUIRE ( p ) ; <nl> + uint8_t uint8 = 1 ; <nl> + uint16_t uint16 = 2 ; <nl> + uint32_t uint32 = 3 ; <nl> + size_t size = 4 ; <nl> + int8_t int8 = 1 ; <nl> + int16_t int16 = 2 ; <nl> + int32_t int32 = 3 ; <nl> + char c = ' h ' ; <nl> + int i = 10 ; <nl> + long l = 11 ; <nl> + unsigned char uc = 20 ; <nl> + unsigned int ui = 21 ; <nl> + unsigned long ul = 22 ; <nl> + p . write ( uint8 ) ; <nl> + p . write ( uint16 ) ; <nl> + p . write ( uint32 ) ; <nl> + p . write ( size ) ; <nl> + p . write ( int8 ) ; <nl> + p . write ( int16 ) ; <nl> + p . write ( int32 ) ; <nl> + p . write ( c ) ; <nl> + p . write ( i ) ; <nl> + p . write ( l ) ; <nl> + p . write ( uc ) ; <nl> + p . write ( ui ) ; <nl> + p . write ( ul ) ; <nl> + p . write ( 0 ) ; <nl> + p . write ( 1 ) ; <nl> + p . close ( ) ; <nl> + <nl> + p = SPIFFS . open ( " test . bin " , " r " ) ; <nl> + REQUIRE ( p ) ; <nl> + uint8_t buff [ 16 ] ; <nl> + int len = p . read ( buff , 16 ) ; <nl> + p . close ( ) ; <nl> + REQUIRE ( len = = 15 ) ; <nl> + REQUIRE ( buff [ 0 ] = = 1 ) ; <nl> + REQUIRE ( buff [ 1 ] = = 2 ) ; <nl> + REQUIRE ( buff [ 2 ] = = 3 ) ; <nl> + REQUIRE ( buff [ 3 ] = = 4 ) ; <nl> + REQUIRE ( buff [ 4 ] = = 1 ) ; <nl> + REQUIRE ( buff [ 5 ] = = 2 ) ; <nl> + REQUIRE ( buff [ 6 ] = = 3 ) ; <nl> + REQUIRE ( buff [ 7 ] = = ' h ' ) ; <nl> + REQUIRE ( buff [ 8 ] = = 10 ) ; <nl> + REQUIRE ( buff [ 9 ] = = 11 ) ; <nl> + REQUIRE ( buff [ 10 ] = = 20 ) ; <nl> + REQUIRE ( buff [ 11 ] = = 21 ) ; <nl> + REQUIRE ( buff [ 12 ] = = 22 ) ; <nl> + REQUIRE ( buff [ 13 ] = = 0 ) ; <nl> + REQUIRE ( buff [ 14 ] = = 1 ) ; <nl> + } <nl>
Add explicit Print : : write ( char ) ( )
esp8266/Arduino
82adc95d6456828e256145615e1dd4bf8d921e15
2019-05-18T21:07:13Z
mmm a / test / functional / test_runner . py <nl> ppp b / test / functional / test_runner . py <nl> def main ( ) : <nl> sys . exit ( 0 ) <nl> <nl> # Build list of tests <nl> + test_list = [ ] <nl> if tests : <nl> # Individual tests have been specified . Run specified tests that exist <nl> # in the ALL_SCRIPTS list . Accept the name with or without . py extension . <nl> tests = [ re . sub ( " \ . py $ " , " " , test ) + " . py " for test in tests ] <nl> - test_list = [ ] <nl> for test in tests : <nl> if test in ALL_SCRIPTS : <nl> test_list . append ( test ) <nl> def main ( ) : <nl> print ( " { } WARNING ! { } Test ' { } ' not found in full test list . " . format ( BOLD [ 1 ] , BOLD [ 0 ] , test ) ) <nl> elif args . extended : <nl> # Include extended tests <nl> - test_list = ALL_SCRIPTS <nl> + test_list + = ALL_SCRIPTS <nl> else : <nl> # Run base tests only <nl> - test_list = BASE_SCRIPTS <nl> + test_list + = BASE_SCRIPTS <nl> <nl> # Remove the test cases that the user has explicitly asked to exclude . <nl> if args . exclude : <nl>
Append scripts to new test_list array to fix bad assignment
bitcoin/bitcoin
b0fec8d6232d07dacf7b9f4fdf944a7a80992809
2018-03-17T18:42:31Z
mmm a / atom / browser / api / atom_api_web_contents . cc <nl> ppp b / atom / browser / api / atom_api_web_contents . cc <nl> void WebContents : : PluginCrashed ( const base : : FilePath & plugin_path , <nl> Emit ( " plugin - crashed " , info . name , info . version ) ; <nl> } <nl> <nl> + void WebContents : : MediaStartedPlaying ( ) { <nl> + Emit ( " media - started - playing " ) ; <nl> + } <nl> + <nl> + void WebContents : : MediaPaused ( ) { <nl> + Emit ( " media - paused " ) ; <nl> + } <nl> + <nl> void WebContents : : DocumentLoadedInFrame ( <nl> content : : RenderFrameHost * render_frame_host ) { <nl> if ( ! render_frame_host - > GetParent ( ) ) <nl> mmm a / atom / browser / api / atom_api_web_contents . h <nl> ppp b / atom / browser / api / atom_api_web_contents . h <nl> class WebContents : public mate : : TrackableObject < WebContents > , <nl> const std : : vector < content : : FaviconURL > & urls ) override ; <nl> void PluginCrashed ( const base : : FilePath & plugin_path , <nl> base : : ProcessId plugin_pid ) override ; <nl> + void MediaStartedPlaying ( ) override ; <nl> + void MediaPaused ( ) override ; <nl> <nl> / / brightray : : InspectableWebContentsViewDelegate : <nl> void DevToolsFocused ( ) override ; <nl> mmm a / atom / browser / lib / guest - view - manager . coffee <nl> ppp b / atom / browser / lib / guest - view - manager . coffee <nl> supportedWebViewEvents = [ <nl> ' page - title - updated ' <nl> ' page - favicon - updated ' <nl> ' enter - html - full - screen ' <nl> - ' leave - html - full - screen ' <nl> + ' leave - html - full - screen ' , <nl> + ' media - started - playing ' , <nl> + ' media - paused ' , <nl> ] <nl> <nl> nextInstanceId = 0 <nl> mmm a / atom / renderer / lib / web - view / guest - view - internal . coffee <nl> ppp b / atom / renderer / lib / web - view / guest - view - internal . coffee <nl> WEB_VIEW_EVENTS = <nl> ' crashed ' : [ ] <nl> ' gpu - crashed ' : [ ] <nl> ' plugin - crashed ' : [ ' name ' , ' version ' ] <nl> + ' media - started - playing ' : [ ] <nl> + ' media - paused ' : [ ] <nl> ' destroyed ' : [ ] <nl> ' page - title - updated ' : [ ' title ' , ' explicitSet ' ] <nl> ' page - favicon - updated ' : [ ' favicons ' ] <nl> mmm a / docs / api / web - contents . md <nl> ppp b / docs / api / web - contents . md <nl> Emitted when ` webContents ` wants to do basic auth . <nl> <nl> The usage is the same with [ the ` login ` event of ` app ` ] ( app . md # event - login ) . <nl> <nl> + # # # Event : ' media - started - playing ' <nl> + <nl> + Emitted when media starts playing . <nl> + <nl> + # # # Event : ' media - paused ' <nl> + <nl> + Emitted when media is paused or done playing . <nl> + <nl> # # Instance Methods <nl> <nl> The ` webContents ` object has the following instance methods : <nl> new file mode 100644 <nl> index 000000000000 . . ec56b38f44ed <nl> mmm / dev / null <nl> ppp b / spec / fixtures / assets / LICENSE <nl> <nl> + tone . wav <nl> + http : / / soundbible . com / 1815 - A - Tone . html <nl> + License : Public Domain <nl> new file mode 100644 <nl> index 000000000000 . . 7fbc54cbe196 <nl> Binary files / dev / null and b / spec / fixtures / assets / tone . wav differ <nl> new file mode 100644 <nl> index 000000000000 . . 0fda8e7075bc <nl> mmm / dev / null <nl> ppp b / spec / fixtures / pages / audio . html <nl> @ @ - 0 , 0 + 1 @ @ <nl> + < audio autoplay muted src = " . . / assets / tone . wav " > < / audio > <nl> mmm a / spec / webview - spec . coffee <nl> ppp b / spec / webview - spec . coffee <nl> describe ' < webview > tag ' , - > <nl> webview . src = " file : / / # { fixtures } / pages / onmouseup . html " <nl> webview . setAttribute ' nodeintegration ' , ' on ' <nl> document . body . appendChild webview <nl> + <nl> + describe ' media - started - playing media - paused events ' , - > <nl> + it ' emits when audio starts and stops playing ' , ( done ) - > <nl> + audioPlayed = false <nl> + webview . addEventListener ' media - started - playing ' , - > <nl> + audioPlayed = true <nl> + webview . addEventListener ' media - paused ' , - > <nl> + assert audioPlayed <nl> + done ( ) <nl> + webview . src = " file : / / # { fixtures } / pages / audio . html " <nl> + document . body . appendChild webview <nl>
Add media play events to webview
electron/electron
fed0c439700fc54f0152f3039ac6058faf2ae8c1
2015-12-20T03:37:51Z
mmm a / Marlin / src / inc / Version . h <nl> ppp b / Marlin / src / inc / Version . h <nl> <nl> * here we define this default string as the date where the latest release <nl> * version was tagged . <nl> * / <nl> - # define STRING_DISTRIBUTION_DATE " 2019 - 09 - 20 " <nl> + # define STRING_DISTRIBUTION_DATE " 2019 - 09 - 21 " <nl> <nl> / * * <nl> * Required minimum Configuration . h and Configuration_adv . h file versions . <nl>
[ cron ] Bump distribution date
MarlinFirmware/Marlin
7fc4f7b815949eb9ec4b8e7c5399d154d0f28c94
2019-09-21T05:00:13Z
mmm a / src / mongo / shell / shell_options_test . cpp <nl> ppp b / src / mongo / shell / shell_options_test . cpp <nl> namespace { <nl> ASSERT_EQUALS ( iterator - > _positionalStart , - 1 ) ; <nl> ASSERT_EQUALS ( iterator - > _positionalEnd , - 1 ) ; <nl> } <nl> + else if ( iterator - > _dottedName = = " useLegacyWriteOps " ) { <nl> + ASSERT_EQUALS ( iterator - > _singleName , " useLegacyWriteOps " ) ; <nl> + ASSERT_EQUALS ( iterator - > _type , moe : : Switch ) ; <nl> + ASSERT_EQUALS ( iterator - > _description , <nl> + " use legacy write ops instead of write commands " ) ; <nl> + ASSERT_EQUALS ( iterator - > _isVisible , false ) ; <nl> + ASSERT_TRUE ( iterator - > _default . isEmpty ( ) ) ; <nl> + ASSERT_TRUE ( iterator - > _implicit . isEmpty ( ) ) ; <nl> + ASSERT_EQUALS ( iterator - > _isComposing , false ) ; <nl> + ASSERT_EQUALS ( iterator - > _sources , moe : : SourceAll ) ; <nl> + ASSERT_EQUALS ( iterator - > _positionalStart , - 1 ) ; <nl> + ASSERT_EQUALS ( iterator - > _positionalEnd , - 1 ) ; <nl> + } <nl> # ifdef MONGO_SSL <nl> else if ( iterator - > _dottedName = = " ssl " ) { <nl> ASSERT_EQUALS ( iterator - > _singleName , " ssl " ) ; <nl>
SERVER - 11627 Make the shell use write commands by default
mongodb/mongo
b6c8f3bc6bfb11c15d0b6708ca7bc0dfe584a39f
2013-11-08T16:02:21Z
mmm a / xbmc / cores / dvdplayer / Codecs / libdvd / libdvdread / src / dvd_input . h <nl> ppp b / xbmc / cores / dvdplayer / Codecs / libdvd / libdvdread / src / dvd_input . h <nl> <nl> <nl> # define DVDINPUT_READ_DECRYPT ( 1 < < 0 ) <nl> <nl> + # if defined ( __MINGW32__ ) <nl> + # undef lseek <nl> + # define lseek _lseeki64 <nl> + # undef fseeko <nl> + # define fseeko fseeko64 <nl> + # undef ftello <nl> + # define ftello ftello64 <nl> + # define flockfile ( . . . ) <nl> + # define funlockfile ( . . . ) <nl> + # define getc_unlocked getc <nl> + # undef off_t <nl> + # define off_t off64_t <nl> + # undef stat <nl> + # define stat _stati64 <nl> + # define fstat _fstati64 <nl> + # define wstat _wstati64 <nl> + # endif <nl> + <nl> + <nl> typedef struct dvd_input_s * dvd_input_t ; <nl> <nl> / * * <nl>
[ WIN32 ] added largefile support to libdvdread based on a patch posted here : http : / / www . mail - archive . com / dvdnav - discuss @ mplayerhq . hu / msg00866 . html
xbmc/xbmc
737e4e32132e30aca110bf40a26546c97b87d10f
2010-01-05T21:33:43Z
deleted file mode 100644 <nl> index 5ea6a9518 . . 000000000 <nl> mmm a / build / fbcode_builder / manifests / mononoke <nl> ppp / dev / null <nl> <nl> - [ manifest ] <nl> - name = mononoke <nl> - fbsource_path = fbcode / eden <nl> - shipit_project = eden <nl> - shipit_fbcode_builder = true <nl> - <nl> - [ git ] <nl> - repo_url = https : / / github . com / facebookexperimental / eden . git <nl> - <nl> - [ build . not ( os = windows ) ] <nl> - builder = cargo <nl> - <nl> - [ build . os = windows ] <nl> - # building Mononoke on windows is not supported <nl> - builder = nop <nl> - <nl> - [ cargo ] <nl> - build_doc = true <nl> - workspace_dir = eden / mononoke <nl> - <nl> - [ shipit . pathmap ] <nl> - fbcode / configerator / structs / scm / mononoke / public_autocargo = eden / mononoke / config_structs <nl> - fbcode / configerator / structs / scm / mononoke = eden / mononoke / config_structs <nl> - fbcode / eden / oss = . <nl> - fbcode / eden = eden <nl> - fbcode / eden / mononoke / public_autocargo = eden / mononoke <nl> - fbcode / tools / lfs = tools / lfs <nl> - tools / rust / ossconfigs = . <nl> - <nl> - [ shipit . strip ] <nl> - # strip all code unrelated to mononoke to prevent triggering unnecessary checks <nl> - ^ fbcode / eden / ( ? ! mononoke | scm / lib / xdiff . * ) / . * $ <nl> - ^ fbcode / eden / mononoke / ( ? ! public_autocargo ) . + / Cargo \ . toml $ <nl> - <nl> - [ dependencies ] <nl> - fbthrift - rust <nl> - rust - shed <nl> - <nl> - [ dependencies . fb = on ] <nl> - rust <nl>
mononoke / opensource : temporarily remove mononoke manifest
facebook/watchman
f2c6d7c3492901f7f9d9a588c42bb3e24ea804e5
2020-05-05T11:20:10Z
mmm a / dlib / CMakeLists . txt <nl> ppp b / dlib / CMakeLists . txt <nl> if ( NOT TARGET dlib ) <nl> endif ( ) <nl> <nl> endif ( ) <nl> - <nl> - <nl> - # put dlib_needed_includes into the parent scope so the dlib / cmake file can use it . <nl> - if ( has_parent ) <nl> - set ( dlib_needed_includes $ { dlib_needed_includes } PARENT_SCOPE ) <nl> - endif ( ) <nl> - <nl>
removed unneeded cruft
davisking/dlib
a2277560fbfcad0730ddc3923f79f8551ff3abf1
2017-02-28T00:02:39Z
mmm a / src / video_core / rasterizer . cpp <nl> ppp b / src / video_core / rasterizer . cpp <nl> <nl> # include " common / math_util . h " <nl> <nl> # include " math . h " <nl> + # include " color . h " <nl> # include " pica . h " <nl> # include " rasterizer . h " <nl> # include " vertex_shader . h " <nl> static void DrawPixel ( int x , int y , const Math : : Vec4 < u8 > & color ) { <nl> break ; <nl> } <nl> <nl> + case registers . framebuffer . RGBA4 : <nl> + { <nl> + u8 * pixel = color_buffer + ( x + y * registers . framebuffer . GetWidth ( ) ) * 2 ; <nl> + pixel [ 1 ] = ( color . r ( ) & 0xF0 ) | ( color . g ( ) > > 4 ) ; <nl> + pixel [ 0 ] = ( color . b ( ) & 0xF0 ) | ( color . a ( ) > > 4 ) ; <nl> + break ; <nl> + } <nl> + <nl> default : <nl> LOG_CRITICAL ( Render_Software , " Unknown framebuffer color format % x " , registers . framebuffer . color_format ) ; <nl> UNIMPLEMENTED ( ) ; <nl> static const Math : : Vec4 < u8 > GetPixel ( int x , int y ) { <nl> ret . a ( ) = pixel [ 0 ] ; <nl> return ret ; <nl> } <nl> + <nl> + case registers . framebuffer . RGBA4 : <nl> + { <nl> + Math : : Vec4 < u8 > ret ; <nl> + u8 * pixel = color_buffer + ( x + y * registers . framebuffer . GetWidth ( ) ) * 2 ; <nl> + ret . r ( ) = Color : : Convert4To8 ( pixel [ 1 ] > > 4 ) ; <nl> + ret . g ( ) = Color : : Convert4To8 ( pixel [ 1 ] & 0x0F ) ; <nl> + ret . b ( ) = Color : : Convert4To8 ( pixel [ 0 ] > > 4 ) ; <nl> + ret . a ( ) = Color : : Convert4To8 ( pixel [ 0 ] & 0x0F ) ; <nl> + return ret ; <nl> + } <nl> + <nl> default : <nl> LOG_CRITICAL ( Render_Software , " Unknown framebuffer color format % x " , registers . framebuffer . color_format ) ; <nl> UNIMPLEMENTED ( ) ; <nl>
Rasterizer : Add support for RGBA4 framebuffer format .
yuzu-emu/yuzu
ed255ebaecbfa57258b70dfb1e7da3f0cd2c1e58
2015-02-25T00:58:33Z
mmm a / lib / AST / ProtocolConformance . cpp <nl> ppp b / lib / AST / ProtocolConformance . cpp <nl> SpecializedProtocolConformance : : SpecializedProtocolConformance ( <nl> / / terms of the specialized types , not the conformance - declaring decl ' s <nl> / / types . <nl> auto nominal = GenericConformance - > getType ( ) - > getAnyNominal ( ) ; <nl> - auto subMap = <nl> - getType ( ) - > getContextSubstitutionMap ( nominal - > getModuleContext ( ) , <nl> - nominal ) ; <nl> + auto module = nominal - > getModuleContext ( ) ; <nl> + auto subMap = getType ( ) - > getContextSubstitutionMap ( module , nominal ) ; <nl> <nl> SmallVector < Requirement , 4 > newReqs ; <nl> for ( auto oldReq : GenericConformance - > getConditionalRequirements ( ) ) { <nl> - if ( auto newReq = oldReq . subst ( subMap ) ) <nl> + if ( auto newReq = oldReq . subst ( QuerySubstitutionMap { subMap } , <nl> + LookUpConformanceInModule ( module ) ) ) <nl> newReqs . push_back ( * newReq ) ; <nl> } <nl> auto & ctxt = getProtocol ( ) - > getASTContext ( ) ; <nl> mmm a / test / Generics / conditional_conformances . swift <nl> ppp b / test / Generics / conditional_conformances . swift <nl> func existential_bad < T > ( _ : T . Type ) { <nl> _ = Free < T > ( ) as P2 / / expected - error { { ' Free < T > ' is not convertible to ' P2 ' ; did you mean to use ' as ! ' to force downcast ? } } <nl> } <nl> <nl> + / / rdar : / / problem / 35837054 <nl> + protocol P7 { } <nl> + <nl> + protocol P8 { <nl> + associatedtype A <nl> + } <nl> + <nl> + struct X0 { } <nl> + <nl> + struct X1 { } <nl> + <nl> + extension X1 : P8 { <nl> + typealias A = X0 <nl> + } <nl> + <nl> + struct X2 < T > { } <nl> + <nl> + extension X2 : P7 where T : P8 , T . A : P7 { } <nl> + <nl> + func takesF7 < T : P7 > ( _ : T ) { } <nl> + func passesConditionallyNotF7 ( x21 : X2 < X1 > ) { <nl> + takesF7 ( x21 ) / / expected - error { { type ' X1 . A ' ( aka ' X0 ' ) does not conform to protocol ' P7 ' } } <nl> + / / expected - error @ - 1 { { ' < T where T : P7 > ( T ) - > ( ) ' requires that ' X1 . A ' ( aka ' X0 ' ) conform to ' P7 ' } } <nl> + / / expected - note @ - 2 { { requirement specified as ' X1 . A ' ( aka ' X0 ' ) : ' P7 ' } } <nl> + / / expected - note @ - 3 { { requirement from conditional conformance of ' X2 < X1 > ' to ' P7 ' } } <nl> + } <nl>
Merge pull request from DougGregor / se - 0143 - rdar - 35837054
apple/swift
ec807a86c14799d3f52fb617c5b43fbd690ec18b
2017-12-05T00:13:39Z
mmm a / googletest / docs / primer . md <nl> ppp b / googletest / docs / primer . md <nl> To create a fixture : <nl> 1 . If necessary , write a destructor or ` TearDown ( ) ` function to release any <nl> resources you allocated in ` SetUp ( ) ` . To learn when you should use the <nl> constructor / destructor and when you should use ` SetUp ( ) / TearDown ( ) ` , read <nl> - this [ FAQ ] ( faq . md # should - i - use - the - constructordestructor - of - the - test - fixture - or - the - set - uptear - down - function ) entry . <nl> + this [ FAQ ] ( faq . md # should - i - use - the - constructordestructor - of - the - test - fixture - or - setupteardown ) entry . <nl> 1 . If needed , define subroutines for your tests to share . <nl> <nl> When using a fixture , use ` TEST_F ( ) ` instead of ` TEST ( ) ` as it allows you to <nl>
Fix broken FAQ link in primer . md
google/googletest
ca912f8b7000268da4ae37d68eda3b9b513d63ef
2018-08-29T04:03:19Z
mmm a / src / mongo / s / d_logic . cpp <nl> ppp b / src / mongo / s / d_logic . cpp <nl> namespace mongo { <nl> <nl> OID writebackID ; <nl> writebackID . initSequential ( ) ; <nl> - lastError . getSafe ( ) - > writeback ( writebackID ) ; <nl> <nl> const OID & clientID = ShardedConnectionInfo : : get ( false ) - > getID ( ) ; <nl> massert ( 10422 , " write with bad shard config and no server id ! " , clientID . isSet ( ) ) ; <nl> namespace mongo { <nl> <nl> b . appendBinData ( " msg " , m . header ( ) - > len , bdtCustom , ( char * ) ( m . singleData ( ) ) ) ; <nl> LOG ( 2 ) < < " writing back msg with len : " < < m . header ( ) - > len < < " op : " < < m . operation ( ) < < endl ; <nl> + <nl> + / / Don ' t register the writeback until immediately before we queue it - <nl> + / / after this line , mongos will wait for an hour if we don ' t queue correctly <nl> + lastError . getSafe ( ) - > writeback ( writebackID ) ; <nl> + <nl> writeBackManager . queueWriteBack ( clientID . str ( ) , b . obj ( ) ) ; <nl> <nl> return true ; <nl>
SERVER - 6450 register wb with gle immediately before queueing
mongodb/mongo
3ac0e57723cd6169e305b6795b686ec45b2d4b83
2012-07-30T20:06:32Z
mmm a / emcc <nl> ppp b / emcc <nl> try : <nl> for i in range ( len ( newargs ) ) : <nl> newargs [ i ] = newargs [ i ] . strip ( ) # On Windows Vista ( and possibly others ) , excessive spaces in the command line leak into the items in this array , so trim e . g . ' foo . cpp ' - > ' foo . cpp ' <nl> if newargs [ i ] . startswith ( ' - O ' ) : <nl> - requested_level = newargs [ i ] [ 2 ] <nl> + # Let - O default to - O2 , which is what gcc does . <nl> + requested_level = newargs [ i ] [ 2 : ] or ' 2 ' <nl> if requested_level = = ' s ' : <nl> print > > sys . stderr , ' emcc : warning : - Os is ignored ( use - O0 , - O1 , - O2 ) ' <nl> else : <nl>
emcc : Improve handling of - O flag .
emscripten-core/emscripten
21f7959f64db7f3309e7f9d0d106abb1ea82f403
2013-02-21T01:50:49Z
mmm a / plugins / chain_plugin / chain_plugin . cpp <nl> ppp b / plugins / chain_plugin / chain_plugin . cpp <nl> <nl> <nl> # include < eosio / chain / eosio_contract . hpp > <nl> <nl> - # include < eosio / chain / wast_to_wasm . hpp > <nl> - <nl> # include < boost / signals2 / connection . hpp > <nl> # include < boost / algorithm / string . hpp > <nl> # include < boost / lexical_cast . hpp > <nl> read_only : : get_code_results read_only : : get_code ( const get_code_params & params ) <nl> EOS_ASSERT ( params . code_as_wasm , unsupported_feature , " Returning WAST from get_code is no longer supported " ) ; <nl> <nl> if ( accnt . code . size ( ) ) { <nl> - if ( params . code_as_wasm ) { <nl> - result . wasm = string ( accnt . code . begin ( ) , accnt . code . end ( ) ) ; <nl> - } else { <nl> - result . wast = wasm_to_wast ( ( const uint8_t * ) accnt . code . data ( ) , accnt . code . size ( ) , true ) ; <nl> - } <nl> + result . wasm = string ( accnt . code . begin ( ) , accnt . code . end ( ) ) ; <nl> result . code_hash = fc : : sha256 : : hash ( accnt . code . data ( ) , accnt . code . size ( ) ) ; <nl> } <nl> <nl> abi_def abi ; <nl> if ( abi_serializer : : to_abi ( accnt . abi , abi ) ) { <nl> - <nl> result . abi = std : : move ( abi ) ; <nl> } <nl> <nl>
Remove some get_code dead code
EOSIO/eos
72053fb96fe1dece3422706b228699976052c467
2018-11-19T22:16:46Z
mmm a / build / fbcode_builder / manifests / mononoke <nl> ppp b / build / fbcode_builder / manifests / mononoke <nl> build_doc = true <nl> workspace_dir = eden / mononoke <nl> <nl> [ shipit . pathmap ] <nl> - fbcode / configerator / structs / scm / mononoke / public_autocargo = eden / mononoke / config_structs <nl> - fbcode / configerator / structs / scm / mononoke = eden / mononoke / config_structs <nl> + fbcode / configerator / structs / scm / mononoke / public_autocargo = configerator / structs / scm / mononoke <nl> + fbcode / configerator / structs / scm / mononoke = configerator / structs / scm / mononoke <nl> fbcode / eden / oss = . <nl> fbcode / eden = eden <nl> fbcode / eden / mononoke / public_autocargo = eden / mononoke <nl>
mononoke / configerator structs : change the OSS folder where configerator structs are saved
facebook/folly
d0c01e875821ed36c4cc875b88354b6b53b3a597
2020-06-24T16:38:52Z
mmm a / lib / Sema / CSDiagnostics . cpp <nl> ppp b / lib / Sema / CSDiagnostics . cpp <nl> Expr * FailureDiagnostic : : getBaseExprFor ( Expr * anchor ) const { <nl> return SE - > getBase ( ) ; <nl> else if ( auto * MRE = dyn_cast < MemberRefExpr > ( anchor ) ) <nl> return MRE - > getBase ( ) ; <nl> + else if ( auto * call = dyn_cast < CallExpr > ( anchor ) ) { <nl> + auto fnType = getType ( call - > getFn ( ) ) ; <nl> + if ( fnType - > isCallableNominalType ( getDC ( ) ) ) { <nl> + return call - > getFn ( ) ; <nl> + } <nl> + } <nl> <nl> return nullptr ; <nl> } <nl> mmm a / test / Sema / call_as_function_simple . swift <nl> ppp b / test / Sema / call_as_function_simple . swift <nl> struct Mutating { <nl> } <nl> } <nl> func testMutating ( _ x : Mutating , _ y : inout Mutating ) { <nl> - / / TODO ( SR - 11378 ) : Improve this error to match the error using a direct ` callAsFunction ` member reference . <nl> - / / expected - error @ + 2 { { cannot call value of non - function type ' Mutating ' } } <nl> - / / expected - error @ + 1 { { cannot invoke ' x ' with no arguments } } <nl> + / / expected - error @ + 1 { { cannot use mutating member on immutable value : ' x ' is a ' let ' constant } } <nl> _ = x ( ) <nl> / / expected - error @ + 1 { { cannot use mutating member on immutable value : ' x ' is a ' let ' constant } } <nl> _ = x . callAsFunction ( ) <nl>
[ Diagnostics ] Improve diagnostics for mutating ` callAsFunction ` used on r - value
apple/swift
184e238707e0479d20608db16db96db4afd3269e
2019-12-07T00:43:18Z
mmm a / tensorflow / python / keras / backend . py <nl> ppp b / tensorflow / python / keras / backend . py <nl> def relu ( x , alpha = 0 . , max_value = None , threshold = 0 ) : <nl> clip_max = max_value is not None <nl> <nl> if alpha ! = 0 . : <nl> + if max_value is None and threshold = = 0 : <nl> + return nn . leaky_relu ( x , alpha = alpha ) <nl> + <nl> if threshold ! = 0 : <nl> negative_part = nn . relu ( - x + threshold ) <nl> else : <nl> mmm a / tensorflow / python / keras / backend_test . py <nl> ppp b / tensorflow / python / keras / backend_test . py <nl> def test_relu ( self ) : <nl> relu_op = keras . backend . relu ( x ) <nl> self . assertAllClose ( keras . backend . eval ( relu_op ) , [ [ 0 , 0 ] , [ 2 , 7 ] ] ) <nl> <nl> - # alpha <nl> + # alpha ( leaky relu used ) <nl> relu_op = keras . backend . relu ( x , alpha = 0 . 5 ) <nl> + self . assertTrue ( ' LeakyRelu ' in relu_op . name ) <nl> self . assertAllClose ( keras . backend . eval ( relu_op ) , [ [ - 2 , 0 ] , [ 2 , 7 ] ] ) <nl> <nl> # max_value < some elements <nl> mmm a / tensorflow / python / keras / layers / advanced_activations . py <nl> ppp b / tensorflow / python / keras / layers / advanced_activations . py <nl> def __init__ ( self , axis = - 1 , * * kwargs ) : <nl> self . axis = axis <nl> <nl> def call ( self , inputs ) : <nl> - return activations . softmax ( inputs , axis = self . axis ) <nl> + return K . softmax ( inputs , axis = self . axis ) <nl> <nl> def get_config ( self ) : <nl> config = { ' axis ' : self . axis } <nl> def __init__ ( self , max_value = None , negative_slope = 0 , threshold = 0 , * * kwargs ) : <nl> def call ( self , inputs ) : <nl> # alpha is used for leaky relu slope in activations instead of <nl> # negative_slope . <nl> - return activations . relu ( <nl> - inputs , <nl> - alpha = self . negative_slope , <nl> - max_value = self . max_value , <nl> - threshold = self . threshold ) <nl> + return K . relu ( inputs , <nl> + alpha = self . negative_slope , <nl> + max_value = self . max_value , <nl> + threshold = self . threshold ) <nl> <nl> def get_config ( self ) : <nl> config = { <nl>
Make Keras relu use nn . leaky_relu when appropriate .
tensorflow/tensorflow
eb5cd6926ef8d2a5a748f1aa978e51148e22dd97
2018-09-14T01:27:33Z
mmm a / src / ast / context - slot - cache . cc <nl> ppp b / src / ast / context - slot - cache . cc <nl> <nl> <nl> # include " src / ast / scopes . h " <nl> # include " src / bootstrapper . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> # include " src / objects - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> <nl> namespace v8 { <nl> namespace internal { <nl> mmm a / test / cctest / compiler / test - js - constant - cache . cc <nl> ppp b / test / cctest / compiler / test - js - constant - cache . cc <nl> <nl> # include " src / assembler . h " <nl> # include " src / compiler / js - graph . h " <nl> # include " src / compiler / node - properties . h " <nl> - # include " src / factory . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> - # include " src / objects - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> + # include " src / factory - inl . h " <nl> # include " test / cctest / cctest . h " <nl> # include " test / cctest / compiler / value - helper . h " <nl> <nl> mmm a / test / cctest / compiler / test - js - typed - lowering . cc <nl> ppp b / test / cctest / compiler / test - js - typed - lowering . cc <nl> <nl> # include " src / compiler / operator - properties . h " <nl> # include " src / compiler / simplified - operator . h " <nl> # include " src / compiler / typer . h " <nl> - # include " src / factory . h " <nl> + # include " src / factory - inl . h " <nl> # include " src / isolate . h " <nl> - # include " src / objects - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> + # include " src / objects . h " <nl> # include " test / cctest / cctest . h " <nl> <nl> namespace v8 { <nl> mmm a / test / cctest / compiler / test - run - jsobjects . cc <nl> ppp b / test / cctest / compiler / test - run - jsobjects . cc <nl> <nl> <nl> # include " src / factory . h " <nl> # include " src / isolate . h " <nl> - # include " src / objects . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> # include " src / objects - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> # include " test / cctest / compiler / function - tester . h " <nl> <nl> namespace v8 { <nl> mmm a / test / cctest / heap / test - compaction . cc <nl> ppp b / test / cctest / heap / test - compaction . cc <nl> <nl> # include " src / factory . h " <nl> # include " src / heap / mark - compact . h " <nl> # include " src / isolate . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> # include " src / objects - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> # include " test / cctest / cctest . h " <nl> # include " test / cctest / heap / heap - tester . h " <nl> # include " test / cctest / heap / heap - utils . h " <nl> mmm a / test / cctest / heap / test - lab . cc <nl> ppp b / test / cctest / heap / test - lab . cc <nl> <nl> # include < vector > <nl> <nl> # include " src / globals . h " <nl> - # include " src / heap / heap . h " <nl> - # include " src / heap / spaces . h " <nl> + # include " src / heap / heap - inl . h " <nl> # include " src / heap / spaces - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / heap / incremental - marking . h - > src / objects - inl . h <nl> - # include " src / objects - inl . h " <nl> + # include " src / objects . h " <nl> # include " test / cctest / cctest . h " <nl> <nl> namespace v8 { <nl> mmm a / test / cctest / heap / test - page - promotion . cc <nl> ppp b / test / cctest / heap / test - page - promotion . cc <nl> <nl> # include " src / heap / array - buffer - tracker . h " <nl> # include " src / heap / spaces - inl . h " <nl> # include " src / isolate . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> # include " src / objects - inl . h " <nl> # include " test / cctest / cctest . h " <nl> # include " test / cctest / heap / heap - tester . h " <nl> mmm a / test / cctest / heap / test - spaces . cc <nl> ppp b / test / cctest / heap / test - spaces . cc <nl> <nl> # include " src / base / platform / platform . h " <nl> # include " src / factory . h " <nl> # include " src / heap / spaces - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / heap / incremental - marking . h - > src / objects - inl . h <nl> # include " src / objects - inl . h " <nl> # include " src / snapshot / snapshot . h " <nl> - # include " src / v8 . h " <nl> # include " test / cctest / cctest . h " <nl> # include " test / cctest / heap / heap - tester . h " <nl> # include " test / cctest / heap / heap - utils . h " <nl> mmm a / test / cctest / test - array - list . cc <nl> ppp b / test / cctest / test - array - list . cc <nl> <nl> <nl> # include < stdlib . h > <nl> <nl> - # include " src / v8 . h " <nl> - <nl> # include " src / factory . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> # include " src / objects - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> # include " test / cctest / cctest . h " <nl> <nl> namespace v8 { <nl> mmm a / test / cctest / test - code - layout . cc <nl> ppp b / test / cctest / test - code - layout . cc <nl> <nl> <nl> # include " src / factory . h " <nl> # include " src / isolate . h " <nl> - # include " src / objects . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> # include " src / objects - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> # include " test / cctest / cctest . h " <nl> <nl> namespace v8 { <nl> mmm a / test / cctest / test - conversions . cc <nl> ppp b / test / cctest / test - conversions . cc <nl> <nl> <nl> # include " src / base / platform / platform . h " <nl> # include " src / conversions . h " <nl> - # include " src / factory . h " <nl> + # include " src / factory - inl . h " <nl> # include " src / isolate . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> - # include " src / objects - inl . h " <nl> # include " src / objects . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> # include " src / unicode - cache . h " <nl> # include " src / v8 . h " <nl> # include " test / cctest / cctest . h " <nl> mmm a / test / cctest / test - identity - map . cc <nl> ppp b / test / cctest / test - identity - map . cc <nl> <nl> <nl> # include < set > <nl> <nl> - # include " src / factory . h " <nl> + # include " src / factory - inl . h " <nl> # include " src / identity - map . h " <nl> # include " src / isolate . h " <nl> # include " src / objects . h " <nl> # include " src / zone / zone . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> - # include " src / objects - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> - # include " src / v8 . h " <nl> # include " test / cctest / cctest . h " <nl> <nl> namespace v8 { <nl> mmm a / test / cctest / test - mementos . cc <nl> ppp b / test / cctest / test - mementos . cc <nl> <nl> # include " src / factory . h " <nl> # include " src / heap / heap . h " <nl> # include " src / isolate . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> # include " src / objects - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> # include " test / cctest / cctest . h " <nl> <nl> namespace v8 { <nl> mmm a / test / cctest / test - symbols . cc <nl> ppp b / test / cctest / test - symbols . cc <nl> <nl> # include " src / factory . h " <nl> # include " src / isolate . h " <nl> # include " src / objects . h " <nl> + # include " src / objects / name - inl . h " <nl> # include " src / ostreams . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> - # include " src / objects - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> - # include " src / v8 . h " <nl> # include " test / cctest / cctest . h " <nl> <nl> namespace v8 { <nl> mmm a / test / cctest / test - transitions . cc <nl> ppp b / test / cctest / test - transitions . cc <nl> <nl> # include " src / factory . h " <nl> # include " src / field - type . h " <nl> # include " src / global - handles . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / field - type . h - > src / objects - inl . h <nl> # include " src / objects - inl . h " <nl> # include " src / transitions . h " <nl> # include " test / cctest / cctest . h " <nl> mmm a / test / cctest / test - types . cc <nl> ppp b / test / cctest / test - types . cc <nl> <nl> # include < vector > <nl> <nl> # include " src / compiler / types . h " <nl> - # include " src / factory . h " <nl> + # include " src / factory - inl . h " <nl> # include " src / heap / heap . h " <nl> # include " src / isolate . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> - # include " src / objects - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> + # include " src / objects . h " <nl> # include " test / cctest / cctest . h " <nl> # include " test / cctest / types - fuzz . h " <nl> <nl> mmm a / test / cctest / test - weakmaps . cc <nl> ppp b / test / cctest / test - weakmaps . cc <nl> <nl> <nl> # include < utility > <nl> <nl> - # include " src / v8 . h " <nl> - <nl> # include " src / factory . h " <nl> # include " src / global - handles . h " <nl> # include " src / isolate . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> # include " src / objects - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> # include " test / cctest / cctest . h " <nl> # include " test / cctest / heap / heap - utils . h " <nl> <nl> mmm a / test / cctest / test - weaksets . cc <nl> ppp b / test / cctest / test - weaksets . cc <nl> <nl> <nl> # include < utility > <nl> <nl> - # include " src / v8 . h " <nl> - <nl> # include " src / factory . h " <nl> # include " src / global - handles . h " <nl> # include " src / isolate . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / factory . h - > src / objects - inl . h <nl> # include " src / objects - inl . h " <nl> - / / FIXME ( mstarzinger , marja ) : This is weird , but required because of the missing <nl> - / / ( disallowed ) include : src / feedback - vector . h - > <nl> - / / src / feedback - vector - inl . h <nl> - # include " src / feedback - vector - inl . h " <nl> # include " test / cctest / cctest . h " <nl> # include " test / cctest / heap / heap - utils . h " <nl> <nl>
[ iwyu ] Remove stale TODOs about objects - inl . h inclusion .
v8/v8
f7da4d7110569b641feac8e684d2ca13dd4eda9b
2017-10-09T11:14:59Z
mmm a / tensorflow / lite / micro / tools / make / flatbuffers_download . sh <nl> ppp b / tensorflow / lite / micro / tools / make / flatbuffers_download . sh <nl> else <nl> FLATBUFFERS_MD5 = " aa9adc93eb9b33fa1a2a90969e48baee " <nl> <nl> wget $ { FLATBUFFERS_URL } - O / tmp / $ { ZIP_PREFIX } . zip > & 2 <nl> - MD5 = ` md5sum / tmp / $ { ZIP_PREFIX } . zip | awk ' { print $ 1 } ' ` <nl> + <nl> + if ! command - v md5sum & > / dev / null <nl> + then <nl> + MD5 = ` md5 - r / tmp / $ { ZIP_PREFIX } . zip | awk ' { print $ 1 } ' ` <nl> + else <nl> + MD5 = ` md5sum / tmp / $ { ZIP_PREFIX } . zip | awk ' { print $ 1 } ' ` <nl> + fi <nl> <nl> if [ [ $ { MD5 } ! = $ { FLATBUFFERS_MD5 } ] ] <nl> then <nl>
Merge pull request from vikramdattu : bugfix / md5_r_when_no_md5sum
tensorflow/tensorflow
49fc208865e6fe12b98fca92baad0142e7191c82
2020-12-29T18:36:17Z
mmm a / src / heap / heap . cc <nl> ppp b / src / heap / heap . cc <nl> void Heap : : CreateInitialObjects ( ) { <nl> <nl> { <nl> StaticFeedbackVectorSpec spec ; <nl> - FeedbackVectorSlot load_ic_slot = spec . AddLoadICSlot ( ) ; <nl> - FeedbackVectorSlot keyed_load_ic_slot = spec . AddKeyedLoadICSlot ( ) ; <nl> - FeedbackVectorSlot store_ic_slot = spec . AddStoreICSlot ( ) ; <nl> - FeedbackVectorSlot keyed_store_ic_slot = spec . AddKeyedStoreICSlot ( ) ; <nl> - <nl> - DCHECK_EQ ( load_ic_slot , <nl> - FeedbackVectorSlot ( TypeFeedbackVector : : kDummyLoadICSlot ) ) ; <nl> - DCHECK_EQ ( keyed_load_ic_slot , <nl> + FeedbackVectorSlot slot = spec . AddLoadICSlot ( ) ; <nl> + DCHECK_EQ ( slot , FeedbackVectorSlot ( TypeFeedbackVector : : kDummyLoadICSlot ) ) ; <nl> + <nl> + slot = spec . AddKeyedLoadICSlot ( ) ; <nl> + DCHECK_EQ ( slot , <nl> FeedbackVectorSlot ( TypeFeedbackVector : : kDummyKeyedLoadICSlot ) ) ; <nl> - DCHECK_EQ ( store_ic_slot , <nl> - FeedbackVectorSlot ( TypeFeedbackVector : : kDummyStoreICSlot ) ) ; <nl> - DCHECK_EQ ( keyed_store_ic_slot , <nl> + <nl> + slot = spec . AddStoreICSlot ( ) ; <nl> + DCHECK_EQ ( slot , FeedbackVectorSlot ( TypeFeedbackVector : : kDummyStoreICSlot ) ) ; <nl> + <nl> + slot = spec . AddKeyedStoreICSlot ( ) ; <nl> + DCHECK_EQ ( slot , <nl> FeedbackVectorSlot ( TypeFeedbackVector : : kDummyKeyedStoreICSlot ) ) ; <nl> <nl> Handle < TypeFeedbackMetadata > dummy_metadata = <nl> void Heap : : CreateInitialObjects ( ) { <nl> Handle < TypeFeedbackVector > dummy_vector = <nl> TypeFeedbackVector : : New ( isolate ( ) , dummy_metadata ) ; <nl> <nl> - Object * megamorphic = * TypeFeedbackVector : : MegamorphicSentinel ( isolate ( ) ) ; <nl> - dummy_vector - > Set ( load_ic_slot , megamorphic , SKIP_WRITE_BARRIER ) ; <nl> - dummy_vector - > Set ( keyed_load_ic_slot , megamorphic , SKIP_WRITE_BARRIER ) ; <nl> - dummy_vector - > Set ( store_ic_slot , megamorphic , SKIP_WRITE_BARRIER ) ; <nl> - dummy_vector - > Set ( keyed_store_ic_slot , megamorphic , SKIP_WRITE_BARRIER ) ; <nl> - <nl> set_dummy_vector ( * dummy_vector ) ; <nl> + <nl> + / / Now initialize dummy vector ' s entries . <nl> + LoadICNexus ( isolate ( ) ) . ConfigureMegamorphic ( ) ; <nl> + StoreICNexus ( isolate ( ) ) . ConfigureMegamorphic ( ) ; <nl> + KeyedLoadICNexus ( isolate ( ) ) . ConfigureMegamorphicKeyed ( PROPERTY ) ; <nl> + KeyedStoreICNexus ( isolate ( ) ) . ConfigureMegamorphicKeyed ( PROPERTY ) ; <nl> } <nl> <nl> { <nl> mmm a / src / type - feedback - vector . h <nl> ppp b / src / type - feedback - vector . h <nl> class KeyedLoadICNexus : public FeedbackNexus { <nl> : FeedbackNexus ( vector , slot ) { <nl> DCHECK_EQ ( FeedbackVectorSlotKind : : KEYED_LOAD_IC , vector - > GetKind ( slot ) ) ; <nl> } <nl> + explicit KeyedLoadICNexus ( Isolate * isolate ) <nl> + : FeedbackNexus ( <nl> + TypeFeedbackVector : : DummyVector ( isolate ) , <nl> + FeedbackVectorSlot ( TypeFeedbackVector : : kDummyKeyedLoadICSlot ) ) { } <nl> KeyedLoadICNexus ( TypeFeedbackVector * vector , FeedbackVectorSlot slot ) <nl> : FeedbackNexus ( vector , slot ) { <nl> DCHECK_EQ ( FeedbackVectorSlotKind : : KEYED_LOAD_IC , vector - > GetKind ( slot ) ) ; <nl>
[ ic ] Properly initialize dummy feedback vector .
v8/v8
8c3cfa3a202b4ce466515b502b7e77810ddc2736
2016-09-27T11:01:51Z
mmm a / setup . py <nl> ppp b / setup . py <nl> <nl> <nl> CYTHON_EXTENSION_MODULE_NAMES = ( ' grpc . _cython . cygrpc ' , ) <nl> <nl> - CYTHON_HELPER_C_FILES = ( <nl> - os . path . join ( PYTHON_STEM , ' grpc / _cython / loader . c ' ) , <nl> - os . path . join ( PYTHON_STEM , ' grpc / _cython / imports . generated . c ' ) , <nl> - ) <nl> + CYTHON_HELPER_C_FILES = ( ) <nl> <nl> CORE_C_FILES = tuple ( grpc_core_dependencies . CORE_SOURCE_FILES ) <nl> <nl> mmm a / src / python / grpcio / grpc / _cython / _cygrpc / grpc . pxi <nl> ppp b / src / python / grpcio / grpc / _cython / _cygrpc / grpc . pxi <nl> <nl> cimport libc . time <nl> <nl> <nl> - cdef extern from " grpc / _cython / loader . h " : <nl> + # Typedef types with approximately the same semantics to provide their names to <nl> + # Cython <nl> + ctypedef int int32_t <nl> + ctypedef unsigned uint32_t <nl> + ctypedef long int64_t <nl> <nl> - ctypedef int int32_t <nl> - ctypedef unsigned uint32_t <nl> - ctypedef long int64_t <nl> <nl> - int pygrpc_load_core ( char * ) <nl> - int pygrpc_initialize_core ( ) <nl> + cdef extern from " grpc / support / alloc . h " : <nl> <nl> void * gpr_malloc ( size_t size ) nogil <nl> void gpr_free ( void * ptr ) nogil <nl> void * gpr_realloc ( void * p , size_t size ) nogil <nl> <nl> + <nl> + cdef extern from " grpc / byte_buffer_reader . h " : <nl> + <nl> + struct grpc_byte_buffer_reader : <nl> + # We don ' t care about the internals <nl> + pass <nl> + <nl> + <nl> + cdef extern from " grpc / grpc . h " : <nl> + <nl> ctypedef struct gpr_slice : <nl> # don ' t worry about writing out the members of gpr_slice ; we never access <nl> # them directly . <nl> cdef extern from " grpc / _cython / loader . h " : <nl> gpr_timespec gpr_time_add ( gpr_timespec a , gpr_timespec b ) nogil <nl> <nl> int gpr_time_cmp ( gpr_timespec a , gpr_timespec b ) nogil <nl> - <nl> + <nl> + ctypedef struct grpc_byte_buffer : <nl> + # We don ' t care about the internals . <nl> + pass <nl> + <nl> + grpc_byte_buffer * grpc_raw_byte_buffer_create ( gpr_slice * slices , <nl> + size_t nslices ) nogil <nl> + size_t grpc_byte_buffer_length ( grpc_byte_buffer * bb ) nogil <nl> + void grpc_byte_buffer_destroy ( grpc_byte_buffer * byte_buffer ) nogil <nl> + <nl> + int grpc_byte_buffer_reader_init ( grpc_byte_buffer_reader * reader , <nl> + grpc_byte_buffer * buffer ) nogil <nl> + int grpc_byte_buffer_reader_next ( grpc_byte_buffer_reader * reader , <nl> + gpr_slice * slice ) nogil <nl> + void grpc_byte_buffer_reader_destroy ( grpc_byte_buffer_reader * reader ) nogil <nl> + <nl> ctypedef enum grpc_status_code : <nl> GRPC_STATUS_OK <nl> GRPC_STATUS_CANCELLED <nl> cdef extern from " grpc / _cython / loader . h " : <nl> GRPC_STATUS_DATA_LOSS <nl> GRPC_STATUS__DO_NOT_USE <nl> <nl> - ctypedef enum grpc_ssl_roots_override_result : <nl> - GRPC_SSL_ROOTS_OVERRIDE_OK <nl> - GRPC_SSL_ROOTS_OVERRIDE_FAILED_PERMANENTLY <nl> - GRPC_SSL_ROOTS_OVERRIDE_FAILED <nl> - <nl> - ctypedef enum grpc_ssl_client_certificate_request_type : <nl> - GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE , <nl> - GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY <nl> - GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY <nl> - GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY <nl> - GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY <nl> - <nl> - struct grpc_byte_buffer_reader : <nl> - # We don ' t care about the internals <nl> - pass <nl> - <nl> - ctypedef struct grpc_byte_buffer : <nl> - # We don ' t care about the internals . <nl> - pass <nl> - <nl> - grpc_byte_buffer * grpc_raw_byte_buffer_create ( gpr_slice * slices , <nl> - size_t nslices ) nogil <nl> - size_t grpc_byte_buffer_length ( grpc_byte_buffer * bb ) nogil <nl> - void grpc_byte_buffer_destroy ( grpc_byte_buffer * byte_buffer ) nogil <nl> - <nl> - int grpc_byte_buffer_reader_init ( grpc_byte_buffer_reader * reader , <nl> - grpc_byte_buffer * buffer ) nogil <nl> - int grpc_byte_buffer_reader_next ( grpc_byte_buffer_reader * reader , <nl> - gpr_slice * slice ) nogil <nl> - void grpc_byte_buffer_reader_destroy ( grpc_byte_buffer_reader * reader ) nogil <nl> - <nl> const char * GRPC_ARG_PRIMARY_USER_AGENT_STRING <nl> const char * GRPC_ARG_ENABLE_CENSUS <nl> const char * GRPC_ARG_MAX_CONCURRENT_STREAMS <nl> cdef extern from " grpc / _cython / loader . h " : <nl> void grpc_server_cancel_all_calls ( grpc_server * server ) nogil <nl> void grpc_server_destroy ( grpc_server * server ) nogil <nl> <nl> + <nl> + cdef extern from " grpc / grpc_security . h " : <nl> + <nl> + ctypedef enum grpc_ssl_roots_override_result : <nl> + GRPC_SSL_ROOTS_OVERRIDE_OK <nl> + GRPC_SSL_ROOTS_OVERRIDE_FAILED_PERMANENTLY <nl> + GRPC_SSL_ROOTS_OVERRIDE_FAILED <nl> + <nl> + ctypedef enum grpc_ssl_client_certificate_request_type : <nl> + GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE , <nl> + GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY <nl> + GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY <nl> + GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY <nl> + GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY <nl> + <nl> ctypedef struct grpc_ssl_pem_key_cert_pair : <nl> const char * private_key <nl> const char * certificate_chain " cert_chain " <nl> cdef extern from " grpc / _cython / loader . h " : <nl> grpc_call_credentials * grpc_metadata_credentials_create_from_plugin ( <nl> grpc_metadata_credentials_plugin plugin , void * reserved ) nogil <nl> <nl> + <nl> + cdef extern from " grpc / compression . h " : <nl> + <nl> ctypedef enum grpc_compression_algorithm : <nl> GRPC_COMPRESS_NONE <nl> GRPC_COMPRESS_DEFLATE <nl> cdef extern from " grpc / _cython / loader . h " : <nl> int grpc_compression_options_is_algorithm_enabled ( <nl> const grpc_compression_options * opts , <nl> grpc_compression_algorithm algorithm ) nogil <nl> + <nl> mmm a / src / python / grpcio / grpc / _cython / cygrpc . pyx <nl> ppp b / src / python / grpcio / grpc / _cython / cygrpc . pyx <nl> cdef extern from " Python . h " : <nl> <nl> <nl> def _initialize ( ) : <nl> - if not pygrpc_initialize_core ( ) : <nl> - raise ImportError ( ' failed to initialize core gRPC library ' ) <nl> - <nl> + grpc_init ( ) <nl> grpc_set_ssl_roots_override_callback ( <nl> < grpc_ssl_roots_override_callback > ssl_roots_override_callback ) <nl> <nl> deleted file mode 100644 <nl> index c0080b5a47a . . 00000000000 <nl> mmm a / src / python / grpcio / grpc / _cython / imports . generated . c <nl> ppp / dev / null <nl> <nl> - / * <nl> - * <nl> - * Copyright 2016 , Google Inc . <nl> - * All rights reserved . <nl> - * <nl> - * Redistribution and use in source and binary forms , with or without <nl> - * modification , are permitted provided that the following conditions are <nl> - * met : <nl> - * <nl> - * * Redistributions of source code must retain the above copyright <nl> - * notice , this list of conditions and the following disclaimer . <nl> - * * Redistributions in binary form must reproduce the above <nl> - * copyright notice , this list of conditions and the following disclaimer <nl> - * in the documentation and / or other materials provided with the <nl> - * distribution . <nl> - * * Neither the name of Google Inc . nor the names of its <nl> - * contributors may be used to endorse or promote products derived from <nl> - * this software without specific prior written permission . <nl> - * <nl> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> - * " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> - * LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> - * A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> - * SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> - * LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> - * DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> - * THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> - * ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> - * OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> - * <nl> - * / <nl> - <nl> - / * TODO ( atash ) remove cruft * / <nl> - # include < grpc / support / port_platform . h > <nl> - <nl> - # include " imports . generated . h " <nl> deleted file mode 100644 <nl> index 8e5c9a8ce2b . . 00000000000 <nl> mmm a / src / python / grpcio / grpc / _cython / imports . generated . h <nl> ppp / dev / null <nl> <nl> - / * <nl> - * <nl> - * Copyright 2016 , Google Inc . <nl> - * All rights reserved . <nl> - * <nl> - * Redistribution and use in source and binary forms , with or without <nl> - * modification , are permitted provided that the following conditions are <nl> - * met : <nl> - * <nl> - * * Redistributions of source code must retain the above copyright <nl> - * notice , this list of conditions and the following disclaimer . <nl> - * * Redistributions in binary form must reproduce the above <nl> - * copyright notice , this list of conditions and the following disclaimer <nl> - * in the documentation and / or other materials provided with the <nl> - * distribution . <nl> - * * Neither the name of Google Inc . nor the names of its <nl> - * contributors may be used to endorse or promote products derived from <nl> - * this software without specific prior written permission . <nl> - * <nl> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> - * " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> - * LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> - * A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> - * SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> - * LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> - * DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> - * THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> - * ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> - * OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> - * <nl> - * / <nl> - <nl> - / * TODO ( atash ) remove cruft * / <nl> - # ifndef PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ <nl> - # define PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ <nl> - <nl> - # include < grpc / support / port_platform . h > <nl> - <nl> - # include < grpc / byte_buffer . h > <nl> - # include < grpc / byte_buffer_reader . h > <nl> - # include < grpc / compression . h > <nl> - # include < grpc / grpc . h > <nl> - # include < grpc / grpc_security . h > <nl> - # include < grpc / support / alloc . h > <nl> - # include < grpc / support / slice . h > <nl> - # include < grpc / support / time . h > <nl> - # include < grpc / status . h > <nl> - <nl> - # endif <nl> deleted file mode 100644 <nl> index 4698f0aff8d . . 00000000000 <nl> mmm a / src / python / grpcio / grpc / _cython / loader . c <nl> ppp / dev / null <nl> <nl> - / * <nl> - * <nl> - * Copyright 2016 , Google Inc . <nl> - * All rights reserved . <nl> - * <nl> - * Redistribution and use in source and binary forms , with or without <nl> - * modification , are permitted provided that the following conditions are <nl> - * met : <nl> - * <nl> - * * Redistributions of source code must retain the above copyright <nl> - * notice , this list of conditions and the following disclaimer . <nl> - * * Redistributions in binary form must reproduce the above <nl> - * copyright notice , this list of conditions and the following disclaimer <nl> - * in the documentation and / or other materials provided with the <nl> - * distribution . <nl> - * * Neither the name of Google Inc . nor the names of its <nl> - * contributors may be used to endorse or promote products derived from <nl> - * this software without specific prior written permission . <nl> - * <nl> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> - * " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> - * LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> - * A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> - * SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> - * LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> - * DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> - * THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> - * ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> - * OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> - * <nl> - * / <nl> - <nl> - # include " loader . h " <nl> - <nl> - # ifdef __cplusplus <nl> - extern " C " { <nl> - # endif / * __cpluslus * / <nl> - <nl> - int pygrpc_load_core ( char * path ) { return 1 ; } <nl> - <nl> - / / Cython doesn ' t have Py_AtExit bindings , so we call the C_API directly <nl> - int pygrpc_initialize_core ( void ) { <nl> - grpc_init ( ) ; <nl> - return 1 ; <nl> - } <nl> - <nl> - # ifdef __cplusplus <nl> - } <nl> - # endif / * __cpluslus * / <nl> - <nl> deleted file mode 100644 <nl> index 62fd2252047 . . 00000000000 <nl> mmm a / src / python / grpcio / grpc / _cython / loader . h <nl> ppp / dev / null <nl> <nl> - / * <nl> - * <nl> - * Copyright 2016 , Google Inc . <nl> - * All rights reserved . <nl> - * <nl> - * Redistribution and use in source and binary forms , with or without <nl> - * modification , are permitted provided that the following conditions are <nl> - * met : <nl> - * <nl> - * * Redistributions of source code must retain the above copyright <nl> - * notice , this list of conditions and the following disclaimer . <nl> - * * Redistributions in binary form must reproduce the above <nl> - * copyright notice , this list of conditions and the following disclaimer <nl> - * in the documentation and / or other materials provided with the <nl> - * distribution . <nl> - * * Neither the name of Google Inc . nor the names of its <nl> - * contributors may be used to endorse or promote products derived from <nl> - * this software without specific prior written permission . <nl> - * <nl> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> - * " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> - * LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> - * A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> - * SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> - * LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> - * DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> - * THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> - * ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> - * OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> - * <nl> - * / <nl> - <nl> - # ifndef PYGRPC_LOADER_H_ <nl> - # define PYGRPC_LOADER_H_ <nl> - <nl> - # include " imports . generated . h " <nl> - <nl> - / * Additional inclusions not covered by " imports . generated . h " * / <nl> - # include < grpc / byte_buffer_reader . h > <nl> - <nl> - / * TODO ( atash ) remove cruft * / <nl> - <nl> - # ifdef __cplusplus <nl> - extern " C " { <nl> - # endif / * __cpluslus * / <nl> - <nl> - / * Attempts to load the core if necessary , and return non - zero upon succes . * / <nl> - int pygrpc_load_core ( char * path ) ; <nl> - <nl> - / * Initializes grpc and registers grpc_shutdown ( ) to be called right before <nl> - * interpreter exit . Returns non - zero upon success . <nl> - * / <nl> - int pygrpc_initialize_core ( void ) ; <nl> - <nl> - # ifdef __cplusplus <nl> - } <nl> - # endif / * __cpluslus * / <nl> - <nl> - # endif / * GRPC_RB_BYTE_BUFFER_H_ * / <nl> - <nl> deleted file mode 100644 <nl> index d83bccad1db . . 00000000000 <nl> mmm a / templates / src / python / grpcio / grpc / _cython / imports . generated . c . template <nl> ppp / dev / null <nl> <nl> - % YAML 1 . 2 <nl> mmm - | <nl> - / * <nl> - * <nl> - * Copyright 2016 , Google Inc . <nl> - * All rights reserved . <nl> - * <nl> - * Redistribution and use in source and binary forms , with or without <nl> - * modification , are permitted provided that the following conditions are <nl> - * met : <nl> - * <nl> - * * Redistributions of source code must retain the above copyright <nl> - * notice , this list of conditions and the following disclaimer . <nl> - * * Redistributions in binary form must reproduce the above <nl> - * copyright notice , this list of conditions and the following disclaimer <nl> - * in the documentation and / or other materials provided with the <nl> - * distribution . <nl> - * * Neither the name of Google Inc . nor the names of its <nl> - * contributors may be used to endorse or promote products derived from <nl> - * this software without specific prior written permission . <nl> - * <nl> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> - * " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> - * LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> - * A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> - * SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> - * LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> - * DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> - * THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> - * ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> - * OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> - * <nl> - * / <nl> - <nl> - / * TODO ( atash ) remove cruft * / <nl> - # include < grpc / support / port_platform . h > <nl> - <nl> - # include " imports . generated . h " <nl> - <nl> - <nl> deleted file mode 100644 <nl> index b85bc3dbd8b . . 00000000000 <nl> mmm a / templates / src / python / grpcio / grpc / _cython / imports . generated . h . template <nl> ppp / dev / null <nl> <nl> - % YAML 1 . 2 <nl> mmm - | <nl> - / * <nl> - * <nl> - * Copyright 2016 , Google Inc . <nl> - * All rights reserved . <nl> - * <nl> - * Redistribution and use in source and binary forms , with or without <nl> - * modification , are permitted provided that the following conditions are <nl> - * met : <nl> - * <nl> - * * Redistributions of source code must retain the above copyright <nl> - * notice , this list of conditions and the following disclaimer . <nl> - * * Redistributions in binary form must reproduce the above <nl> - * copyright notice , this list of conditions and the following disclaimer <nl> - * in the documentation and / or other materials provided with the <nl> - * distribution . <nl> - * * Neither the name of Google Inc . nor the names of its <nl> - * contributors may be used to endorse or promote products derived from <nl> - * this software without specific prior written permission . <nl> - * <nl> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> - * " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> - * LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> - * A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> - * SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> - * LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> - * DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> - * THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> - * ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> - * OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> - * <nl> - * / <nl> - <nl> - / * TODO ( atash ) remove cruft * / <nl> - # ifndef PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ <nl> - # define PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ <nl> - <nl> - # include < grpc / support / port_platform . h > <nl> - <nl> - # include < grpc / byte_buffer . h > <nl> - # include < grpc / byte_buffer_reader . h > <nl> - # include < grpc / compression . h > <nl> - # include < grpc / grpc . h > <nl> - # include < grpc / grpc_security . h > <nl> - # include < grpc / support / alloc . h > <nl> - # include < grpc / support / slice . h > <nl> - # include < grpc / support / time . h > <nl> - # include < grpc / status . h > <nl> - <nl> - # endif <nl>
Remove Python ' loader ' hack
grpc/grpc
3acddb20dbac343dcf12a4ba46bfdf7ebacf37f9
2016-07-14T01:38:33Z
mmm a / wkhtmltopdf . man . m4 <nl> ppp b / wkhtmltopdf . man . m4 <nl> wkhtmltopdf [ OPTIONS ] . . . < input file > [ More inputfiles ] < output file > <nl> Converts one or more htmlpage to a pdf document . <nl> <nl> . SH OPTIONS <nl> + . PD 0 <nl> esyscmd ( ` . / wkhtmltopdf - - help | sed - ne / Options : / , / Proxy : / p | sed - e / Options : / d - e / Proxy / d | sed - re " s / ^ [ \ t ] * / / " | sed - re " s / ^ ( ( - [ a - zA - Z ] , ) ? - - [ a - z - ] + ) [ \ t ] * ( < [ a - z ] + > ) ? [ \ t ] * / . TP \ n \ \ \ fB \ 1 \ \ \ fR \ \ \ fI \ 3 \ \ \ fR \ n / " | sed - re " s / ' ' / \ \ \ ' / g " | sed - re " s / - / \ \ \ \ - / g " ) <nl> - <nl> + . PD <nl> . SH PROXY <nl> . PD 0 <nl> esyscmd ( ` . / wkhtmltopdf - - help | sed - ne " / By default proxy / , / < proxy > : = / p " | sed - re " s / ^ ( . * ) / . PP \ n \ 1 / " | sed - re " s / - / \ \ \ \ - / g " ' ) <nl>
Removed some spacing from the man page
wkhtmltopdf/wkhtmltopdf
ebfa371608b55e02db3a13d4b3ae9171f3d1f46b
2009-03-25T16:16:02Z
mmm a / xbmc / cores / VideoRenderers / OverlayRendererGUI . cpp <nl> ppp b / xbmc / cores / VideoRenderers / OverlayRendererGUI . cpp <nl> COverlayText : : COverlayText ( CDVDOverlayText * src ) <nl> else <nl> { <nl> if ( m_subalign = = SUBTITLE_ALIGN_TOP_INSIDE | | <nl> - m_subalign = = SUBTITLE_ALIGN_TOP_OUTSIDE ) <nl> + m_subalign = = SUBTITLE_ALIGN_BOTTOM_INSIDE ) <nl> m_align = ALIGN_VIDEO ; <nl> else <nl> m_align = ALIGN_SCREEN ; <nl>
fix c / p error in 3174a4f90c07d94c2075875af76c0414f5879b1f
xbmc/xbmc
f951e0990e9a163bea2394917264b6f3fb71c6f2
2015-09-20T16:12:29Z
mmm a / Source / Math / CPUMatrix . cpp <nl> ppp b / Source / Math / CPUMatrix . cpp <nl> void CPUMatrix < ElemType > : : MaxPoolingBackward ( const CPUMatrix < ElemType > & out , con <nl> int i0 = mpRowIndices ( row , 0 ) ; <nl> int size = indices ( i0 + + , 0 ) ; <nl> assert ( size > 0 ) ; <nl> + ElemType g = ( * this ) ( row , sample ) ; <nl> ElemType m = out ( row , sample ) ; <nl> - ElemType count = 0 ; <nl> - for ( int i = 0 ; i < size ; i + + ) <nl> - { <nl> - int dcol = indices ( i0 + i , 0 ) ; <nl> - assert ( 0 < = colBase + dcol & & colBase + dcol < grad . GetNumRows ( ) ) ; <nl> - if ( in ( colBase + dcol , sample ) > = m ) <nl> - count + = ElemType ( 1 ) ; <nl> - } <nl> - assert ( count > 0 ) ; <nl> - ElemType g = ( * this ) ( row , sample ) / count ; <nl> for ( int i = 0 ; i < size ; i + + ) <nl> { <nl> const int dcol = indices ( i0 + i , 0 ) ; <nl> + assert ( 0 < = colBase + dcol & & colBase + dcol < grad . GetNumRows ( ) ) ; <nl> if ( in ( colBase + dcol , sample ) > = m ) <nl> + { <nl> # pragma omp atomic <nl> grad ( colBase + dcol , sample ) + = g ; <nl> + break ; <nl> + } <nl> } <nl> } <nl> } <nl> mmm a / Source / Math / Convolution . cuh <nl> ppp b / Source / Math / Convolution . cuh <nl> __global__ void kMaxPoolingBackward ( int batchSize , const ElemType * out , const El <nl> int i0 = mpRowIndices [ row ] ; <nl> int size = indices [ i0 + + ] ; <nl> assert ( size > 0 ) ; <nl> + ElemType g = srcGrad [ row ] ; <nl> ElemType m = out [ row ] ; <nl> - ElemType count = 0 ; <nl> for ( int i = 0 ; i < size ; i + + ) <nl> { <nl> int dcol = indices [ i0 + i ] ; <nl> assert ( 0 < = colBase + dcol & & colBase + dcol < dstVecSize ) ; <nl> if ( in [ colBase + dcol ] > = m ) <nl> - count + = ElemType ( 1 ) ; <nl> - } <nl> - assert ( count > 0 ) ; <nl> - ElemType g = srcGrad [ row ] / count ; <nl> - for ( int i = 0 ; i < size ; i + + ) <nl> - { <nl> - int dcol = indices [ i0 + i ] ; <nl> - if ( in [ colBase + dcol ] > = m ) <nl> + { <nl> atomicAdd ( & grad [ colBase + dcol ] , g ) ; <nl> + break ; <nl> + } <nl> } <nl> <nl> in + = blockDim . y * dstVecSize ; <nl>
Change max pooling to behave similarly with NVidia .
microsoft/CNTK
fd0cf7bb729bdefc16e925781df4b8750977ba8f
2016-10-02T18:23:27Z
mmm a / tensorflow / python / keras / BUILD <nl> ppp b / tensorflow / python / keras / BUILD <nl> py_test ( <nl> deps = [ <nl> " : keras " , <nl> " / / tensorflow / python : client_testlib " , <nl> - " / / tensorflow / python / saved_model : save_test " , <nl> " / / third_party / py / numpy " , <nl> " @ absl_py / / absl / testing : parameterized " , <nl> ] , <nl> mmm a / tensorflow / python / keras / engine / training_utils_test . py <nl> ppp b / tensorflow / python / keras / engine / training_utils_test . py <nl> <nl> <nl> import numpy as np <nl> <nl> + <nl> + from tensorflow . python . client import session as session_lib <nl> from tensorflow . python import keras <nl> from tensorflow . python . eager import context <nl> from tensorflow . python . eager import def_function <nl> from tensorflow . python . framework import dtypes <nl> + from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import tensor_spec <nl> from tensorflow . python . framework import tensor_util <nl> from tensorflow . python . keras import backend as K <nl> <nl> from tensorflow . python . keras . utils import tf_utils <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . platform import test <nl> + from tensorflow . python . saved_model import loader <nl> from tensorflow . python . saved_model import save as save_lib <nl> - from tensorflow . python . saved_model import save_test <nl> + from tensorflow . python . saved_model import signature_constants <nl> + from tensorflow . python . saved_model import tag_constants <nl> <nl> <nl> class ModelInputsTest ( test . TestCase ) : <nl> def call ( self , inputs , * args ) : <nl> self . _assert_all_close ( expected_outputs , signature_outputs ) <nl> <nl> <nl> + def _import_and_infer ( save_dir , inputs ) : <nl> + " " " Import a SavedModel into a TF 1 . x - style graph and run ` signature_key ` . " " " <nl> + graph = ops . Graph ( ) <nl> + with graph . as_default ( ) , session_lib . Session ( ) as session : <nl> + model = loader . load ( session , [ tag_constants . SERVING ] , save_dir ) <nl> + signature = model . signature_def [ <nl> + signature_constants . DEFAULT_SERVING_SIGNATURE_DEF_KEY ] <nl> + assert set ( inputs . keys ( ) ) = = set ( signature . inputs . keys ( ) ) <nl> + feed_dict = { } <nl> + for arg_name in inputs . keys ( ) : <nl> + feed_dict [ graph . get_tensor_by_name ( signature . inputs [ arg_name ] . name ) ] = ( <nl> + inputs [ arg_name ] ) <nl> + output_dict = { } <nl> + for output_name , output_tensor_info in signature . outputs . items ( ) : <nl> + output_dict [ output_name ] = graph . get_tensor_by_name ( <nl> + output_tensor_info . name ) <nl> + return session . run ( output_dict , feed_dict = feed_dict ) <nl> + <nl> + <nl> class ModelSaveTest ( keras_parameterized . TestCase ) : <nl> <nl> @ keras_parameterized . run_with_all_model_types <nl> def test_model_save ( self ) : <nl> <nl> self . assertAllClose ( <nl> { model . output_names [ 0 ] : model . predict_on_batch ( inputs ) } , <nl> - save_test . _import_and_infer ( save_dir , <nl> - { model . input_names [ 0 ] : np . ones ( ( 8 , 5 ) ) } ) ) <nl> + _import_and_infer ( save_dir , { model . input_names [ 0 ] : np . ones ( ( 8 , 5 ) ) } ) ) <nl> <nl> if __name__ = = ' __main__ ' : <nl> test . main ( ) <nl>
Fix kokoro tests by removing dependency on save_test .
tensorflow/tensorflow
662053a4d35942d1c1b6800df98829e1046b1679
2018-12-11T01:23:23Z
mmm a / hphp / hack / src / server / fileOutline . ml <nl> ppp b / hphp / hack / src / server / fileOutline . ml <nl> and kind = <nl> | Method <nl> | Property <nl> | Const <nl> + | Enum <nl> + | Interface <nl> + | Trait <nl> <nl> and modifier = <nl> + | Final <nl> | Static <nl> + | Abstract <nl> + | Private <nl> + | Public <nl> + | Protected <nl> + | Async <nl> <nl> and def = { <nl> kind : kind ; <nl> and def = { <nl> children : def list ; <nl> } <nl> <nl> + let modifiers_of_ast_kinds l = <nl> + List . map l begin function <nl> + | Ast . Final - > Final <nl> + | Ast . Static - > Static <nl> + | Ast . Abstract - > Abstract <nl> + | Ast . Private - > Private <nl> + | Ast . Public - > Public <nl> + | Ast . Protected - > Protected <nl> + end <nl> + <nl> let string_of_kind = function <nl> | Function - > " function " <nl> | Class - > " class " <nl> | Method - > " method " <nl> | Property - > " property " <nl> | Const - > " const " <nl> + | Enum - > " enum " <nl> + | Interface - > " interface " <nl> + | Trait - > " trait " <nl> <nl> let string_of_modifier = function <nl> + | Final - > " final " <nl> | Static - > " static " <nl> - <nl> - let summarize_property var = <nl> + | Abstract - > " abstract " <nl> + | Private - > " private " <nl> + | Public - > " public " <nl> + | Protected - > " protected " <nl> + | Async - > " async " <nl> + <nl> + let summarize_property kinds var = <nl> + let modifiers = modifiers_of_ast_kinds kinds in <nl> let span , ( pos , name ) , expr_opt = var in <nl> { <nl> kind = Property ; <nl> name ; <nl> pos = Pos . to_absolute pos ; <nl> span = Pos . to_absolute span ; <nl> - modifiers = [ ] ; <nl> + modifiers ; <nl> children = [ ] ; <nl> } <nl> <nl> let summarize_abs_const ( pos , name ) = <nl> name ; <nl> pos = pos ; <nl> span = pos ; <nl> - modifiers = [ ] ; <nl> + modifiers = [ Abstract ] ; <nl> children = [ ] ; <nl> } <nl> <nl> + let modifier_of_fun_kind acc = function <nl> + | Ast . FAsync | Ast . FAsyncGenerator - > Async : : acc <nl> + | _ - > acc <nl> + <nl> let summarize_class acc class_ = <nl> let class_name = Utils . strip_ns ( snd class_ . Ast . c_name ) in <nl> let class_name_pos = Pos . to_absolute ( fst class_ . Ast . c_name ) in <nl> let c_span = Pos . to_absolute class_ . Ast . c_span in <nl> + let modifiers = <nl> + if class_ . Ast . c_final then [ Final ] else [ ] <nl> + in <nl> + let modifiers = match class_ . Ast . c_kind with <nl> + | Ast . Cabstract - > Abstract : : modifiers <nl> + | _ - > modifiers <nl> + in <nl> let children = List . concat <nl> ( List . map class_ . Ast . c_body ~ f : begin function <nl> | Ast . Method m - > <nl> - let modifiers = [ ] in <nl> - let modifiers = <nl> - if List . mem m . Ast . m_kind ( Ast . Static ) <nl> - then Static : : modifiers else modifiers <nl> - in <nl> + let modifiers = modifier_of_fun_kind [ ] m . Ast . m_fun_kind in <nl> + let modifiers = ( modifiers_of_ast_kinds m . Ast . m_kind ) @ modifiers in <nl> let method_ = { <nl> kind = Method ; <nl> name = snd m . Ast . m_name ; <nl> let summarize_class acc class_ = <nl> children = [ ] ; <nl> } in <nl> [ method_ ] <nl> - | Ast . ClassVars ( _ , _ , vars ) - > List . map vars ~ f : summarize_property <nl> - | Ast . XhpAttr ( _ , var , _ , _ ) - > [ summarize_property var ] <nl> + | Ast . ClassVars ( kinds , _ , vars ) - > <nl> + List . map vars ~ f : ( summarize_property kinds ) <nl> + | Ast . XhpAttr ( _ , var , _ , _ ) - > [ summarize_property [ ] var ] <nl> | Ast . Const ( _ , cl ) - > List . map cl ~ f : summarize_const <nl> | Ast . AbsConst ( _ , id ) - > [ summarize_abs_const id ] <nl> | _ - > [ ] <nl> end ) <nl> in <nl> + let kind = match class_ . Ast . c_kind with <nl> + | Ast . Cinterface - > Interface <nl> + | Ast . Ctrait - > Trait <nl> + | Ast . Cenum - > Enum <nl> + | _ - > Class <nl> + in <nl> let class_ = { <nl> - kind = Class ; <nl> + kind ; <nl> name = class_name ; <nl> pos = class_name_pos ; <nl> span = c_span ; <nl> - modifiers = [ ] ; <nl> + modifiers ; <nl> children ; <nl> } in <nl> class_ : : acc <nl> <nl> let summarize_fun acc f = <nl> + let modifiers = modifier_of_fun_kind [ ] f . Ast . f_fun_kind in <nl> let fun_ = { <nl> kind = Function ; <nl> name = Utils . strip_ns ( snd f . Ast . f_name ) ; <nl> pos = Pos . to_absolute ( fst f . Ast . f_name ) ; <nl> span = ( Pos . to_absolute f . Ast . f_span ) ; <nl> - modifiers = [ ] ; <nl> + modifiers ; <nl> children = [ ] <nl> } in <nl> fun_ : : acc <nl> let to_json_legacy input = <nl> List . fold_left defs ~ init : acc ~ f : begin fun acc def - > <nl> match def . kind with <nl> | Function - > ( def . pos , def . name , " function " ) : : acc <nl> - | Class - > <nl> + | Class | Enum | Interface | Trait - > <nl> let acc = ( def . pos , def . name , " class " ) : : acc in <nl> to_legacy ( prefix ^ def . name ^ " : : " ) acc def . children <nl> | Method - > <nl> mmm a / hphp / hack / test / outline / const . php . exp <nl> ppp b / hphp / hack / test / outline / const . php . exp <nl> C <nl> kind : class <nl> position : File " " , line 3 , characters 16 - 16 : <nl> span : File " " , line 3 , character 1 - line 9 , character 1 : <nl> - modifiers : <nl> + modifiers : abstract <nl> <nl> FOO <nl> kind : const <nl> C <nl> kind : const <nl> position : File " " , line 8 , characters 22 - 24 : <nl> span : File " " , line 8 , character 22 - line 8 , character 24 : <nl> - modifiers : <nl> + modifiers : abstract <nl> <nl> mmm a / hphp / hack / test / outline / file . php . exp <nl> ppp b / hphp / hack / test / outline / file . php . exp <nl> C <nl> kind : method <nl> position : File " " , line 8 , characters 19 - 21 : <nl> span : File " " , line 8 , character 3 - line 8 , character 26 : <nl> - modifiers : <nl> + modifiers : public <nl> <nl> bar <nl> kind : method <nl> position : File " " , line 10 , characters 26 - 28 : <nl> span : File " " , line 10 , character 3 - line 10 , character 33 : <nl> - modifiers : static <nl> + modifiers : public static <nl> <nl> new file mode 100644 <nl> index 00000000000 . . f97f28b8ffd <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / outline / kinds . php <nl> <nl> + < ? hh <nl> + <nl> + enum foo : string { } <nl> + <nl> + interface I { } <nl> + <nl> + trait T { } <nl> new file mode 100644 <nl> index 00000000000 . . 36e82828abe <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / outline / kinds . php . exp <nl> <nl> + foo <nl> + kind : enum <nl> + position : File " " , line 3 , characters 6 - 8 : <nl> + span : File " " , line 3 , character 1 - line 3 , character 20 : <nl> + modifiers : <nl> + <nl> + I <nl> + kind : interface <nl> + position : File " " , line 5 , characters 11 - 11 : <nl> + span : File " " , line 5 , character 1 - line 5 , character 14 : <nl> + modifiers : <nl> + <nl> + T <nl> + kind : trait <nl> + position : File " " , line 7 , characters 7 - 7 : <nl> + span : File " " , line 7 , character 1 - line 7 , character 10 : <nl> + modifiers : <nl> + <nl> mmm a / hphp / hack / test / outline / method_attribute . php . exp <nl> ppp b / hphp / hack / test / outline / method_attribute . php . exp <nl> C <nl> kind : method <nl> position : File " " , line 5 , characters 19 - 37 : <nl> span : File " " , line 4 , character 3 - line 8 , character 3 : <nl> - modifiers : <nl> + modifiers : public <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 8d9abda08b7 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / outline / modifiers . php <nl> <nl> + < ? hh <nl> + <nl> + async function fun ( ) { } <nl> + <nl> + abstract class C { <nl> + abstract const FOO ; <nl> + public static $ x1 ; <nl> + static public $ x2 ; <nl> + <nl> + protected static abstract async function bar1 ( ) ; <nl> + protected abstract static async function bar2 ( ) ; <nl> + static protected abstract async function bar3 ( ) ; <nl> + static abstract protected async function bar4 ( ) ; <nl> + abstract static protected async function bar5 ( ) ; <nl> + abstract protected static async function bar6 ( ) ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 38ad789bbf4 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / outline / modifiers . php . exp <nl> <nl> + fun <nl> + kind : function <nl> + position : File " " , line 3 , characters 16 - 18 : <nl> + span : File " " , line 3 , character 1 - line 3 , character 23 : <nl> + modifiers : async <nl> + <nl> + C <nl> + kind : class <nl> + position : File " " , line 5 , characters 16 - 16 : <nl> + span : File " " , line 5 , character 1 - line 16 , character 1 : <nl> + modifiers : abstract <nl> + <nl> + FOO <nl> + kind : const <nl> + position : File " " , line 6 , characters 18 - 20 : <nl> + span : File " " , line 6 , character 18 - line 6 , character 20 : <nl> + modifiers : abstract <nl> + <nl> + x1 <nl> + kind : property <nl> + position : File " " , line 7 , characters 17 - 19 : <nl> + span : File " " , line 7 , character 17 - line 7 , character 19 : <nl> + modifiers : public static <nl> + <nl> + x2 <nl> + kind : property <nl> + position : File " " , line 8 , characters 17 - 19 : <nl> + span : File " " , line 8 , character 17 - line 8 , character 19 : <nl> + modifiers : static public <nl> + <nl> + bar1 <nl> + kind : method <nl> + position : File " " , line 10 , characters 44 - 47 : <nl> + span : File " " , line 10 , character 3 - line 10 , character 50 : <nl> + modifiers : protected static abstract async <nl> + <nl> + bar2 <nl> + kind : method <nl> + position : File " " , line 11 , characters 44 - 47 : <nl> + span : File " " , line 11 , character 3 - line 11 , character 50 : <nl> + modifiers : protected abstract static async <nl> + <nl> + bar3 <nl> + kind : method <nl> + position : File " " , line 12 , characters 44 - 47 : <nl> + span : File " " , line 12 , character 3 - line 12 , character 50 : <nl> + modifiers : static protected abstract async <nl> + <nl> + bar4 <nl> + kind : method <nl> + position : File " " , line 13 , characters 44 - 47 : <nl> + span : File " " , line 13 , character 3 - line 13 , character 50 : <nl> + modifiers : static abstract protected async <nl> + <nl> + bar5 <nl> + kind : method <nl> + position : File " " , line 14 , characters 44 - 47 : <nl> + span : File " " , line 14 , character 3 - line 14 , character 50 : <nl> + modifiers : abstract static protected async <nl> + <nl> + bar6 <nl> + kind : method <nl> + position : File " " , line 15 , characters 44 - 47 : <nl> + span : File " " , line 15 , character 3 - line 15 , character 50 : <nl> + modifiers : abstract protected static async <nl> + <nl> mmm a / hphp / hack / test / outline / property . php . exp <nl> ppp b / hphp / hack / test / outline / property . php . exp <nl> C <nl> kind : property <nl> position : File " " , line 5 , characters 5 - 8 : <nl> span : File " " , line 5 , character 5 - line 5 , character 8 : <nl> - modifiers : <nl> + modifiers : public <nl> <nl> bar <nl> kind : property <nl> position : File " " , line 6 , characters 5 - 8 : <nl> span : File " " , line 6 , character 5 - line 6 , character 16 : <nl> - modifiers : <nl> + modifiers : public <nl> <nl> : xhp_property <nl> kind : property <nl> C <nl> kind : property <nl> position : File " " , line 14 , characters 19 - 36 : <nl> span : File " " , line 14 , character 19 - line 14 , character 36 : <nl> - modifiers : <nl> + modifiers : public <nl> <nl> implicit_property_with_init <nl> kind : property <nl> position : File " " , line 15 , characters 16 - 43 : <nl> span : File " " , line 15 , character 16 - line 15 , character 47 : <nl> - modifiers : <nl> + modifiers : public <nl> <nl> __construct <nl> kind : method <nl> position : File " " , line 12 , characters 19 - 29 : <nl> span : File " " , line 12 , character 3 - line 17 , character 3 : <nl> - modifiers : <nl> + modifiers : public <nl> <nl>
More modifiers and kinds in outline view
facebook/hhvm
90c058b396bd5d8e126efe17d7e2c4c938b0ae66
2016-05-09T23:31:22Z
mmm a / src / core / ext / filters / client_channel / client_channel . cc <nl> ppp b / src / core / ext / filters / client_channel / client_channel . cc <nl> ChannelData : : ChannelConfigHelper : : ApplyServiceConfig ( <nl> / / If resolver did not return a service config or returned an invalid service <nl> / / config , we need a fallback service config . <nl> if ( result . service_config_error ! = GRPC_ERROR_NONE ) { <nl> + if ( GRPC_TRACE_FLAG_ENABLED ( grpc_client_channel_routing_trace ) ) { <nl> + gpr_log ( GPR_INFO , " chand = % p : resolver returned service config error : % s " , <nl> + chand_ , grpc_error_string ( result . service_config_error ) ) ; <nl> + } <nl> / / If the service config was invalid , then fallback to the saved service <nl> / / config . If there is no saved config either , use the default service <nl> / / config . <nl> mmm a / src / core / ext / filters / client_channel / lb_policy / xds / cds . cc <nl> ppp b / src / core / ext / filters / client_channel / lb_policy / xds / cds . cc <nl> CdsLb : : ~ CdsLb ( ) { <nl> if ( GRPC_TRACE_FLAG_ENABLED ( grpc_cds_lb_trace ) ) { <nl> gpr_log ( GPR_INFO , " [ cdslb % p ] destroying cds LB policy " , this ) ; <nl> } <nl> - grpc_channel_args_destroy ( args_ ) ; <nl> } <nl> <nl> void CdsLb : : ShutdownLocked ( ) { <nl> void CdsLb : : ShutdownLocked ( ) { <nl> } <nl> xds_client_ - > CancelClusterDataWatch ( config_ - > cluster ( ) , cluster_watcher_ ) ; <nl> } <nl> - xds_client_ . reset ( ) ; <nl> + xds_client_ . reset ( DEBUG_LOCATION , " CdsLb " ) ; <nl> } <nl> + grpc_channel_args_destroy ( args_ ) ; <nl> + args_ = nullptr ; <nl> } <nl> <nl> void CdsLb : : MaybeDestroyChildPolicyLocked ( ) { <nl> mmm a / src / core / ext / filters / client_channel / lb_policy / xds / eds . cc <nl> ppp b / src / core / ext / filters / client_channel / lb_policy / xds / eds . cc <nl> EdsLb : : ~ EdsLb ( ) { <nl> if ( GRPC_TRACE_FLAG_ENABLED ( grpc_lb_eds_trace ) ) { <nl> gpr_log ( GPR_INFO , " [ edslb % p ] destroying xds LB policy " , this ) ; <nl> } <nl> - grpc_channel_args_destroy ( args_ ) ; <nl> } <nl> <nl> void EdsLb : : ShutdownLocked ( ) { <nl> void EdsLb : : ShutdownLocked ( ) { <nl> xds_client ( ) - > CancelEndpointDataWatch ( GetEdsResourceName ( ) , <nl> endpoint_watcher_ ) ; <nl> } <nl> - xds_client_from_channel_ . reset ( ) ; <nl> + xds_client_from_channel_ . reset ( DEBUG_LOCATION , " EdsLb " ) ; <nl> + } <nl> + if ( xds_client_ ! = nullptr ) { <nl> + grpc_pollset_set_del_pollset_set ( xds_client_ - > interested_parties ( ) , <nl> + interested_parties ( ) ) ; <nl> + xds_client_ . reset ( ) ; <nl> } <nl> - xds_client_ . reset ( ) ; <nl> + grpc_channel_args_destroy ( args_ ) ; <nl> + args_ = nullptr ; <nl> } <nl> <nl> void EdsLb : : MaybeDestroyChildPolicyLocked ( ) { <nl> void EdsLb : : UpdateLocked ( UpdateArgs args ) { <nl> if ( xds_client_from_channel_ = = nullptr ) { <nl> grpc_error * error = GRPC_ERROR_NONE ; <nl> xds_client_ = MakeOrphanable < XdsClient > ( <nl> - work_serializer ( ) , interested_parties ( ) , GetEdsResourceName ( ) , <nl> - nullptr / * service config watcher * / , * args_ , & error ) ; <nl> + work_serializer ( ) , GetEdsResourceName ( ) , * args_ , & error ) ; <nl> / / TODO ( roth ) : If we decide that we care about EDS - only mode , add <nl> / / proper error handling here . <nl> GPR_ASSERT ( error = = GRPC_ERROR_NONE ) ; <nl> + grpc_pollset_set_add_pollset_set ( xds_client_ - > interested_parties ( ) , <nl> + interested_parties ( ) ) ; <nl> if ( GRPC_TRACE_FLAG_ENABLED ( grpc_lb_eds_trace ) ) { <nl> gpr_log ( GPR_INFO , " [ edslb % p ] Created xds client % p " , this , <nl> xds_client_ . get ( ) ) ; <nl> mmm a / src / core / ext / filters / client_channel / resolver / xds / xds_resolver . cc <nl> ppp b / src / core / ext / filters / client_channel / resolver / xds / xds_resolver . cc <nl> class XdsResolver : public Resolver { <nl> <nl> void StartLocked ( ) override ; <nl> <nl> - void ShutdownLocked ( ) override { <nl> - if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_resolver_trace ) ) { <nl> - gpr_log ( GPR_INFO , " [ xds_resolver % p ] shutting down " , this ) ; <nl> - } <nl> - xds_client_ . reset ( ) ; <nl> - } <nl> + void ShutdownLocked ( ) override ; <nl> <nl> private : <nl> class ListenerWatcher : public XdsClient : : ListenerWatcherInterface { <nl> public : <nl> explicit ListenerWatcher ( RefCountedPtr < XdsResolver > resolver ) <nl> : resolver_ ( std : : move ( resolver ) ) { } <nl> - void OnListenerChanged ( std : : vector < XdsApi : : Route > routes ) override ; <nl> + void OnListenerChanged ( XdsApi : : LdsUpdate listener ) override ; <nl> + void OnError ( grpc_error * error ) override ; <nl> + void OnResourceDoesNotExist ( ) override ; <nl> + <nl> + private : <nl> + RefCountedPtr < XdsResolver > resolver_ ; <nl> + } ; <nl> + <nl> + class RouteConfigWatcher : public XdsClient : : RouteConfigWatcherInterface { <nl> + public : <nl> + explicit RouteConfigWatcher ( RefCountedPtr < XdsResolver > resolver ) <nl> + : resolver_ ( std : : move ( resolver ) ) { } <nl> + void OnRouteConfigChanged ( XdsApi : : RdsUpdate route_config ) override ; <nl> void OnError ( grpc_error * error ) override ; <nl> void OnResourceDoesNotExist ( ) override ; <nl> <nl> class XdsResolver : public Resolver { <nl> std : : map < absl : : string_view , RefCountedPtr < ClusterState > > clusters_ ; <nl> } ; <nl> <nl> - void OnListenerChanged ( std : : vector < XdsApi : : Route > routes ) ; <nl> - grpc_error * CreateServiceConfig ( RefCountedPtr < ServiceConfig > * service_config ) ; <nl> + void OnRouteConfigUpdate ( XdsApi : : RdsUpdate rds_update ) ; <nl> void OnError ( grpc_error * error ) ; <nl> - void PropagateUpdate ( ) ; <nl> + void OnResourceDoesNotExist ( ) ; <nl> + <nl> + grpc_error * CreateServiceConfig ( RefCountedPtr < ServiceConfig > * service_config ) ; <nl> + void GenerateResult ( ) ; <nl> void MaybeRemoveUnusedClusters ( ) ; <nl> <nl> std : : string server_name_ ; <nl> const grpc_channel_args * args_ ; <nl> grpc_pollset_set * interested_parties_ ; <nl> OrphanablePtr < XdsClient > xds_client_ ; <nl> + XdsClient : : ListenerWatcherInterface * listener_watcher_ = nullptr ; <nl> + std : : string route_config_name_ ; <nl> + XdsClient : : RouteConfigWatcherInterface * route_config_watcher_ = nullptr ; <nl> ClusterState : : ClusterStateMap cluster_state_map_ ; <nl> std : : vector < XdsApi : : Route > current_update_ ; <nl> } ; <nl> class XdsResolver : public Resolver { <nl> / / <nl> <nl> void XdsResolver : : ListenerWatcher : : OnListenerChanged ( <nl> - std : : vector < XdsApi : : Route > routes ) { <nl> + XdsApi : : LdsUpdate listener ) { <nl> if ( resolver_ - > xds_client_ = = nullptr ) return ; <nl> if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_resolver_trace ) ) { <nl> gpr_log ( GPR_INFO , " [ xds_resolver % p ] received updated listener data " , <nl> resolver_ . get ( ) ) ; <nl> } <nl> - resolver_ - > OnListenerChanged ( std : : move ( routes ) ) ; <nl> + if ( listener . route_config_name ! = resolver_ - > route_config_name_ ) { <nl> + if ( resolver_ - > route_config_watcher_ ! = nullptr ) { <nl> + resolver_ - > xds_client_ - > CancelRouteConfigDataWatch ( <nl> + resolver_ - > route_config_name_ , resolver_ - > route_config_watcher_ , <nl> + / * delay_unsubscription = * / ! listener . route_config_name . empty ( ) ) ; <nl> + resolver_ - > route_config_watcher_ = nullptr ; <nl> + } <nl> + resolver_ - > route_config_name_ = std : : move ( listener . route_config_name ) ; <nl> + if ( ! resolver_ - > route_config_name_ . empty ( ) ) { <nl> + auto watcher = absl : : make_unique < RouteConfigWatcher > ( resolver_ - > Ref ( ) ) ; <nl> + resolver_ - > route_config_watcher_ = watcher . get ( ) ; <nl> + resolver_ - > xds_client_ - > WatchRouteConfigData ( <nl> + resolver_ - > route_config_name_ , std : : move ( watcher ) ) ; <nl> + } <nl> + } <nl> + if ( resolver_ - > route_config_name_ . empty ( ) ) { <nl> + GPR_ASSERT ( listener . rds_update . has_value ( ) ) ; <nl> + resolver_ - > OnRouteConfigUpdate ( std : : move ( * listener . rds_update ) ) ; <nl> + } <nl> } <nl> <nl> void XdsResolver : : ListenerWatcher : : OnError ( grpc_error * error ) { <nl> if ( resolver_ - > xds_client_ = = nullptr ) return ; <nl> - gpr_log ( GPR_ERROR , " [ xds_resolver % p ] received error : % s " , resolver_ . get ( ) , <nl> - grpc_error_string ( error ) ) ; <nl> + gpr_log ( GPR_ERROR , " [ xds_resolver % p ] received listener error : % s " , <nl> + resolver_ . get ( ) , grpc_error_string ( error ) ) ; <nl> resolver_ - > OnError ( error ) ; <nl> } <nl> <nl> void XdsResolver : : ListenerWatcher : : OnResourceDoesNotExist ( ) { <nl> if ( resolver_ - > xds_client_ = = nullptr ) return ; <nl> - gpr_log ( GPR_ERROR , <nl> - " [ xds_resolver % p ] LDS / RDS resource does not exist - - returning " <nl> - " empty service config " , <nl> - resolver_ . get ( ) ) ; <nl> - Result result ; <nl> - result . service_config = <nl> - ServiceConfig : : Create ( " { } " , & result . service_config_error ) ; <nl> - GPR_ASSERT ( result . service_config ! = nullptr ) ; <nl> - result . args = grpc_channel_args_copy ( resolver_ - > args_ ) ; <nl> - resolver_ - > result_handler ( ) - > ReturnResult ( std : : move ( result ) ) ; <nl> + resolver_ - > OnResourceDoesNotExist ( ) ; <nl> + } <nl> + <nl> + / / <nl> + / / XdsResolver : : RouteConfigWatcher <nl> + / / <nl> + <nl> + void XdsResolver : : RouteConfigWatcher : : OnRouteConfigChanged ( <nl> + XdsApi : : RdsUpdate route_config ) { <nl> + if ( resolver_ - > xds_client_ = = nullptr ) return ; <nl> + if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_resolver_trace ) ) { <nl> + gpr_log ( GPR_INFO , " [ xds_resolver % p ] received updated route config data " , <nl> + resolver_ . get ( ) ) ; <nl> + } <nl> + resolver_ - > OnRouteConfigUpdate ( std : : move ( route_config ) ) ; <nl> + } <nl> + <nl> + void XdsResolver : : RouteConfigWatcher : : OnError ( grpc_error * error ) { <nl> + if ( resolver_ - > xds_client_ = = nullptr ) return ; <nl> + gpr_log ( GPR_ERROR , " [ xds_resolver % p ] received route config error : % s " , <nl> + resolver_ . get ( ) , grpc_error_string ( error ) ) ; <nl> + resolver_ - > OnError ( error ) ; <nl> + } <nl> + <nl> + void XdsResolver : : RouteConfigWatcher : : OnResourceDoesNotExist ( ) { <nl> + if ( resolver_ - > xds_client_ = = nullptr ) return ; <nl> + resolver_ - > OnResourceDoesNotExist ( ) ; <nl> } <nl> <nl> / / <nl> ConfigSelector : : CallConfig XdsResolver : : XdsConfigSelector : : GetCallConfig ( <nl> <nl> void XdsResolver : : StartLocked ( ) { <nl> grpc_error * error = GRPC_ERROR_NONE ; <nl> - xds_client_ = MakeOrphanable < XdsClient > ( <nl> - work_serializer ( ) , interested_parties_ , server_name_ , <nl> - absl : : make_unique < ListenerWatcher > ( Ref ( ) ) , * args_ , & error ) ; <nl> + xds_client_ = MakeOrphanable < XdsClient > ( work_serializer ( ) , server_name_ , <nl> + * args_ , & error ) ; <nl> if ( error ! = GRPC_ERROR_NONE ) { <nl> gpr_log ( GPR_ERROR , <nl> " Failed to create xds client - - channel will remain in " <nl> " TRANSIENT_FAILURE : % s " , <nl> grpc_error_string ( error ) ) ; <nl> result_handler ( ) - > ReturnError ( error ) ; <nl> + return ; <nl> + } <nl> + grpc_pollset_set_add_pollset_set ( xds_client_ - > interested_parties ( ) , <nl> + interested_parties_ ) ; <nl> + auto watcher = absl : : make_unique < ListenerWatcher > ( Ref ( ) ) ; <nl> + listener_watcher_ = watcher . get ( ) ; <nl> + xds_client_ - > WatchListenerData ( server_name_ , std : : move ( watcher ) ) ; <nl> + } <nl> + <nl> + void XdsResolver : : ShutdownLocked ( ) { <nl> + if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_resolver_trace ) ) { <nl> + gpr_log ( GPR_INFO , " [ xds_resolver % p ] shutting down " , this ) ; <nl> + } <nl> + if ( xds_client_ ! = nullptr ) { <nl> + if ( listener_watcher_ ! = nullptr ) { <nl> + xds_client_ - > CancelListenerDataWatch ( server_name_ , listener_watcher_ , <nl> + / * delay_unsubscription = * / false ) ; <nl> + } <nl> + if ( route_config_watcher_ ! = nullptr ) { <nl> + xds_client_ - > CancelRouteConfigDataWatch ( <nl> + server_name_ , route_config_watcher_ , / * delay_unsubscription = * / false ) ; <nl> + } <nl> + grpc_pollset_set_del_pollset_set ( xds_client_ - > interested_parties ( ) , <nl> + interested_parties_ ) ; <nl> + xds_client_ . reset ( ) ; <nl> + } <nl> + } <nl> + <nl> + void XdsResolver : : OnRouteConfigUpdate ( XdsApi : : RdsUpdate rds_update ) { <nl> + / / Find the relevant VirtualHost from the RouteConfiguration . <nl> + XdsApi : : RdsUpdate : : VirtualHost * vhost = <nl> + rds_update . FindVirtualHostForDomain ( server_name_ ) ; <nl> + if ( vhost = = nullptr ) { <nl> + OnError ( GRPC_ERROR_CREATE_FROM_COPIED_STRING ( <nl> + absl : : StrCat ( " could not find VirtualHost for " , server_name_ , <nl> + " in RouteConfiguration " ) <nl> + . c_str ( ) ) ) ; <nl> + return ; <nl> } <nl> + / / Save the list of routes in the resolver . <nl> + current_update_ = std : : move ( vhost - > routes ) ; <nl> + / / Send a new result to the channel . <nl> + GenerateResult ( ) ; <nl> + } <nl> + <nl> + void XdsResolver : : OnError ( grpc_error * error ) { <nl> + grpc_arg xds_client_arg = xds_client_ - > MakeChannelArg ( ) ; <nl> + Result result ; <nl> + result . args = grpc_channel_args_copy_and_add ( args_ , & xds_client_arg , 1 ) ; <nl> + result . service_config_error = error ; <nl> + result_handler ( ) - > ReturnResult ( std : : move ( result ) ) ; <nl> } <nl> <nl> - void XdsResolver : : OnListenerChanged ( std : : vector < XdsApi : : Route > routes ) { <nl> - / / Save the update in the resolver . <nl> - current_update_ = std : : move ( routes ) ; <nl> - / / Propagate the update by creating XdsConfigSelector , CreateServiceConfig , <nl> - / / and ReturnResult . <nl> - PropagateUpdate ( ) ; <nl> + void XdsResolver : : OnResourceDoesNotExist ( ) { <nl> + gpr_log ( GPR_ERROR , <nl> + " [ xds_resolver % p ] LDS / RDS resource does not exist - - returning " <nl> + " empty service config " , <nl> + this ) ; <nl> + Result result ; <nl> + result . service_config = <nl> + ServiceConfig : : Create ( " { } " , & result . service_config_error ) ; <nl> + GPR_ASSERT ( result . service_config ! = nullptr ) ; <nl> + result . args = grpc_channel_args_copy ( args_ ) ; <nl> + result_handler ( ) - > ReturnResult ( std : : move ( result ) ) ; <nl> } <nl> <nl> grpc_error * XdsResolver : : CreateServiceConfig ( <nl> grpc_error * XdsResolver : : CreateServiceConfig ( <nl> return error ; <nl> } <nl> <nl> - void XdsResolver : : OnError ( grpc_error * error ) { <nl> - grpc_arg xds_client_arg = xds_client_ - > MakeChannelArg ( ) ; <nl> - Result result ; <nl> - result . args = grpc_channel_args_copy_and_add ( args_ , & xds_client_arg , 1 ) ; <nl> - result . service_config_error = error ; <nl> - result_handler ( ) - > ReturnResult ( std : : move ( result ) ) ; <nl> - } <nl> - <nl> - void XdsResolver : : PropagateUpdate ( ) { <nl> + void XdsResolver : : GenerateResult ( ) { <nl> / / First create XdsConfigSelector , which may add new entries to the cluster <nl> / / state map , and then CreateServiceConfig for LB policies . <nl> auto config_selector = <nl> void XdsResolver : : MaybeRemoveUnusedClusters ( ) { <nl> } <nl> } <nl> if ( update_needed & & xds_client_ ! = nullptr ) { <nl> - / / Propagate the update by creating XdsConfigSelector , CreateServiceConfig , <nl> - / / and ReturnResult . <nl> - PropagateUpdate ( ) ; <nl> + / / Send a new result to the channel . <nl> + GenerateResult ( ) ; <nl> } <nl> } <nl> <nl> mmm a / src / core / ext / filters / client_channel / resolving_lb_policy . cc <nl> ppp b / src / core / ext / filters / client_channel / resolving_lb_policy . cc <nl> ResolvingLoadBalancingPolicy : : ~ ResolvingLoadBalancingPolicy ( ) { <nl> <nl> void ResolvingLoadBalancingPolicy : : ShutdownLocked ( ) { <nl> if ( resolver_ ! = nullptr ) { <nl> + if ( GRPC_TRACE_FLAG_ENABLED ( * tracer_ ) ) { <nl> + gpr_log ( GPR_INFO , " resolving_lb = % p : shutting down resolver = % p " , this , <nl> + resolver_ . get ( ) ) ; <nl> + } <nl> resolver_ . reset ( ) ; <nl> if ( lb_policy_ ! = nullptr ) { <nl> if ( GRPC_TRACE_FLAG_ENABLED ( * tracer_ ) ) { <nl> mmm a / src / core / ext / xds / xds_api . cc <nl> ppp b / src / core / ext / xds / xds_api . cc <nl> std : : string XdsApi : : Route : : Matchers : : HeaderMatcher : : ToString ( ) const { <nl> std : : string XdsApi : : Route : : Matchers : : ToString ( ) const { <nl> std : : vector < std : : string > contents ; <nl> contents . push_back ( path_matcher . ToString ( ) ) ; <nl> - for ( const auto & header_it : header_matchers ) { <nl> - contents . push_back ( header_it . ToString ( ) ) ; <nl> + for ( const HeaderMatcher & header_matcher : header_matchers ) { <nl> + contents . push_back ( header_matcher . ToString ( ) ) ; <nl> } <nl> if ( fraction_per_million . has_value ( ) ) { <nl> contents . push_back ( absl : : StrFormat ( " Fraction Per Million % d " , <nl> std : : string XdsApi : : Route : : ToString ( ) const { <nl> if ( ! cluster_name . empty ( ) ) { <nl> contents . push_back ( absl : : StrFormat ( " Cluster name : % s " , cluster_name ) ) ; <nl> } <nl> - for ( const auto & weighted_it : weighted_clusters ) { <nl> - contents . push_back ( weighted_it . ToString ( ) ) ; <nl> + for ( const ClusterWeight & cluster_weight : weighted_clusters ) { <nl> + contents . push_back ( cluster_weight . ToString ( ) ) ; <nl> } <nl> return absl : : StrJoin ( contents , " \ n " ) ; <nl> } <nl> MatchType DomainPatternMatchType ( const std : : string & domain_pattern ) { <nl> <nl> } / / namespace <nl> <nl> - const XdsApi : : RdsUpdate : : VirtualHost * <nl> - XdsApi : : RdsUpdate : : FindVirtualHostForDomain ( const std : : string & domain ) const { <nl> + XdsApi : : RdsUpdate : : VirtualHost * XdsApi : : RdsUpdate : : FindVirtualHostForDomain ( <nl> + const std : : string & domain ) { <nl> / / Find the best matched virtual host . <nl> / / The search order for 4 groups of domain patterns : <nl> / / 1 . Exact match . <nl> XdsApi : : RdsUpdate : : FindVirtualHostForDomain ( const std : : string & domain ) const { <nl> / / Within each group , longest match wins . <nl> / / If the same best matched domain pattern appears in multiple virtual hosts , <nl> / / the first matched virtual host wins . <nl> - const VirtualHost * target_vhost = nullptr ; <nl> + VirtualHost * target_vhost = nullptr ; <nl> MatchType best_match_type = INVALID_MATCH ; <nl> size_t longest_match = 0 ; <nl> / / Check each domain pattern in each virtual host to determine the best <nl> / / matched virtual host . <nl> - for ( const VirtualHost & vhost : virtual_hosts ) { <nl> + for ( VirtualHost & vhost : virtual_hosts ) { <nl> for ( const std : : string & domain_pattern : vhost . domains ) { <nl> / / Check the match type first . Skip the pattern if it ' s not better than <nl> / / current match . <nl> grpc_error * RouteConfigParse ( <nl> std : : string domain_pattern = UpbStringToStdString ( domains [ j ] ) ; <nl> const MatchType match_type = DomainPatternMatchType ( domain_pattern ) ; <nl> if ( match_type = = INVALID_MATCH ) { <nl> - return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( " Invalid domain pattern . " ) ; <nl> + return GRPC_ERROR_CREATE_FROM_COPIED_STRING ( <nl> + absl : : StrCat ( " Invalid domain pattern \ " " , domain_pattern , " \ " . " ) <nl> + . c_str ( ) ) ; <nl> } <nl> vhost . domains . emplace_back ( std : : move ( domain_pattern ) ) ; <nl> } <nl> grpc_error * RouteConfigParse ( <nl> grpc_error * LdsResponseParse ( <nl> XdsClient * client , TraceFlag * tracer , <nl> const envoy_service_discovery_v3_DiscoveryResponse * response , <nl> - const std : : string & expected_server_name , <nl> - absl : : optional < XdsApi : : LdsUpdate > * lds_update , upb_arena * arena ) { <nl> + const std : : set < absl : : string_view > & expected_listener_names , <nl> + XdsApi : : LdsUpdateMap * lds_update_map , upb_arena * arena ) { <nl> / / Get the resources from the response . <nl> size_t size ; <nl> const google_protobuf_Any * const * resources = <nl> grpc_error * LdsResponseParse ( <nl> return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( " Can ' t decode listener . " ) ; <nl> } <nl> / / Check listener name . Ignore unexpected listeners . <nl> - absl : : string_view name = <nl> - UpbStringToAbsl ( envoy_config_listener_v3_Listener_name ( listener ) ) ; <nl> - if ( name ! = expected_server_name ) continue ; <nl> + std : : string listener_name = <nl> + UpbStringToStdString ( envoy_config_listener_v3_Listener_name ( listener ) ) ; <nl> + if ( expected_listener_names . find ( listener_name ) = = <nl> + expected_listener_names . end ( ) ) { <nl> + continue ; <nl> + } <nl> + / / Fail if listener name is duplicated . <nl> + if ( lds_update_map - > find ( listener_name ) ! = lds_update_map - > end ( ) ) { <nl> + return GRPC_ERROR_CREATE_FROM_COPIED_STRING ( <nl> + absl : : StrCat ( " duplicate listener name \ " " , listener_name , " \ " " ) <nl> + . c_str ( ) ) ; <nl> + } <nl> + XdsApi : : LdsUpdate & lds_update = ( * lds_update_map ) [ listener_name ] ; <nl> / / Get api_listener and decode it to http_connection_manager . <nl> const envoy_config_listener_v3_ApiListener * api_listener = <nl> envoy_config_listener_v3_Listener_api_listener ( listener ) ; <nl> grpc_error * LdsResponseParse ( <nl> grpc_error * error = <nl> RouteConfigParse ( client , tracer , route_config , & rds_update ) ; <nl> if ( error ! = GRPC_ERROR_NONE ) return error ; <nl> - lds_update - > emplace ( ) ; <nl> - ( * lds_update ) - > rds_update = std : : move ( rds_update ) ; <nl> - return GRPC_ERROR_NONE ; <nl> + lds_update . rds_update = std : : move ( rds_update ) ; <nl> + continue ; <nl> } <nl> / / Validate that RDS must be used to get the route_config dynamically . <nl> if ( ! envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_has_rds ( <nl> grpc_error * LdsResponseParse ( <nl> " HttpConnectionManager ConfigSource for RDS does not specify ADS . " ) ; <nl> } <nl> / / Get the route_config_name . <nl> - lds_update - > emplace ( ) ; <nl> - ( * lds_update ) - > route_config_name = UpbStringToStdString ( <nl> + lds_update . route_config_name = UpbStringToStdString ( <nl> envoy_extensions_filters_network_http_connection_manager_v3_Rds_route_config_name ( <nl> rds ) ) ; <nl> - return GRPC_ERROR_NONE ; <nl> } <nl> return GRPC_ERROR_NONE ; <nl> } <nl> grpc_error * RdsResponseParse ( <nl> XdsClient * client , TraceFlag * tracer , <nl> const envoy_service_discovery_v3_DiscoveryResponse * response , <nl> const std : : set < absl : : string_view > & expected_route_configuration_names , <nl> - absl : : optional < XdsApi : : RdsUpdate > * rds_update , upb_arena * arena ) { <nl> + XdsApi : : RdsUpdateMap * rds_update_map , upb_arena * arena ) { <nl> / / Get the resources from the response . <nl> size_t size ; <nl> const google_protobuf_Any * const * resources = <nl> grpc_error * RdsResponseParse ( <nl> return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( " Can ' t decode route_config . " ) ; <nl> } <nl> / / Check route_config_name . Ignore unexpected route_config . <nl> - absl : : string_view route_config_name = UpbStringToAbsl ( <nl> + std : : string route_config_name = UpbStringToStdString ( <nl> envoy_config_route_v3_RouteConfiguration_name ( route_config ) ) ; <nl> if ( expected_route_configuration_names . find ( route_config_name ) = = <nl> expected_route_configuration_names . end ( ) ) { <nl> continue ; <nl> } <nl> + / / Fail if route config name is duplicated . <nl> + if ( rds_update_map - > find ( route_config_name ) ! = rds_update_map - > end ( ) ) { <nl> + return GRPC_ERROR_CREATE_FROM_COPIED_STRING ( <nl> + absl : : StrCat ( " duplicate route config name \ " " , route_config_name , <nl> + " \ " " ) <nl> + . c_str ( ) ) ; <nl> + } <nl> / / Parse the route_config . <nl> - XdsApi : : RdsUpdate local_rds_update ; <nl> + XdsApi : : RdsUpdate & rds_update = <nl> + ( * rds_update_map ) [ std : : move ( route_config_name ) ] ; <nl> grpc_error * error = <nl> - RouteConfigParse ( client , tracer , route_config , & local_rds_update ) ; <nl> + RouteConfigParse ( client , tracer , route_config , & rds_update ) ; <nl> if ( error ! = GRPC_ERROR_NONE ) return error ; <nl> - rds_update - > emplace ( std : : move ( local_rds_update ) ) ; <nl> - return GRPC_ERROR_NONE ; <nl> } <nl> return GRPC_ERROR_NONE ; <nl> } <nl> grpc_error * CdsResponseParse ( <nl> envoy_service_discovery_v3_DiscoveryResponse_resources ( response , & size ) ; <nl> / / Parse all the resources in the CDS response . <nl> for ( size_t i = 0 ; i < size ; + + i ) { <nl> - XdsApi : : CdsUpdate cds_update ; <nl> / / Check the type_url of the resource . <nl> absl : : string_view type_url = <nl> UpbStringToAbsl ( google_protobuf_Any_type_url ( resources [ i ] ) ) ; <nl> grpc_error * CdsResponseParse ( <nl> absl : : StrCat ( " duplicate resource name \ " " , cluster_name , " \ " " ) <nl> . c_str ( ) ) ; <nl> } <nl> + XdsApi : : CdsUpdate & cds_update = ( * cds_update_map ) [ std : : move ( cluster_name ) ] ; <nl> / / Check the cluster_discovery_type . <nl> if ( ! envoy_config_cluster_v3_Cluster_has_type ( cluster ) ) { <nl> return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( " DiscoveryType not found . " ) ; <nl> grpc_error * CdsResponseParse ( <nl> } <nl> cds_update . lrs_load_reporting_server_name . emplace ( " " ) ; <nl> } <nl> - cds_update_map - > emplace ( std : : move ( cluster_name ) , std : : move ( cds_update ) ) ; <nl> } <nl> return GRPC_ERROR_NONE ; <nl> } <nl> grpc_error * EdsResponseParse ( <nl> const google_protobuf_Any * const * resources = <nl> envoy_service_discovery_v3_DiscoveryResponse_resources ( response , & size ) ; <nl> for ( size_t i = 0 ; i < size ; + + i ) { <nl> - XdsApi : : EdsUpdate eds_update ; <nl> / / Check the type_url of the resource . <nl> absl : : string_view type_url = <nl> UpbStringToAbsl ( google_protobuf_Any_type_url ( resources [ i ] ) ) ; <nl> grpc_error * EdsResponseParse ( <nl> absl : : StrCat ( " duplicate resource name \ " " , eds_service_name , " \ " " ) <nl> . c_str ( ) ) ; <nl> } <nl> + XdsApi : : EdsUpdate & eds_update = <nl> + ( * eds_update_map ) [ std : : move ( eds_service_name ) ] ; <nl> / / Get the endpoints . <nl> size_t locality_size ; <nl> const envoy_config_endpoint_v3_LocalityLbEndpoints * const * endpoints = <nl> grpc_error * EdsResponseParse ( <nl> if ( error ! = GRPC_ERROR_NONE ) return error ; <nl> } <nl> } <nl> - eds_update_map - > emplace ( std : : move ( eds_service_name ) , std : : move ( eds_update ) ) ; <nl> } <nl> return GRPC_ERROR_NONE ; <nl> } <nl> std : : string TypeUrlInternalToExternal ( absl : : string_view type_url ) { <nl> } / / namespace <nl> <nl> XdsApi : : AdsParseResult XdsApi : : ParseAdsResponse ( <nl> - const grpc_slice & encoded_response , const std : : string & expected_server_name , <nl> + const grpc_slice & encoded_response , <nl> + const std : : set < absl : : string_view > & expected_listener_names , <nl> const std : : set < absl : : string_view > & expected_route_configuration_names , <nl> const std : : set < absl : : string_view > & expected_cluster_names , <nl> const std : : set < absl : : string_view > & expected_eds_service_names ) { <nl> XdsApi : : AdsParseResult XdsApi : : ParseAdsResponse ( <nl> / / Parse the response according to the resource type . <nl> if ( IsLds ( result . type_url ) ) { <nl> result . parse_error = <nl> - LdsResponseParse ( client_ , tracer_ , response , expected_server_name , <nl> - & result . lds_update , arena . ptr ( ) ) ; <nl> + LdsResponseParse ( client_ , tracer_ , response , expected_listener_names , <nl> + & result . lds_update_map , arena . ptr ( ) ) ; <nl> } else if ( IsRds ( result . type_url ) ) { <nl> result . parse_error = RdsResponseParse ( client_ , tracer_ , response , <nl> expected_route_configuration_names , <nl> - & result . rds_update , arena . ptr ( ) ) ; <nl> + & result . rds_update_map , arena . ptr ( ) ) ; <nl> } else if ( IsCds ( result . type_url ) ) { <nl> result . parse_error = <nl> CdsResponseParse ( client_ , tracer_ , response , expected_cluster_names , <nl> mmm a / src / core / ext / xds / xds_api . h <nl> ppp b / src / core / ext / xds / xds_api . h <nl> class XdsApi { <nl> return virtual_hosts = = other . virtual_hosts ; <nl> } <nl> std : : string ToString ( ) const ; <nl> - const VirtualHost * FindVirtualHostForDomain ( <nl> - const std : : string & domain ) const ; <nl> + VirtualHost * FindVirtualHostForDomain ( const std : : string & domain ) ; <nl> } ; <nl> <nl> / / TODO ( roth ) : When we can use absl : : variant < > , consider using that <nl> class XdsApi { <nl> / / If set to the empty string , will use the same server we obtained the CDS <nl> / / data from . <nl> absl : : optional < std : : string > lrs_load_reporting_server_name ; <nl> + <nl> + bool operator = = ( const CdsUpdate & other ) const { <nl> + return eds_service_name = = other . eds_service_name & & <nl> + lrs_load_reporting_server_name = = <nl> + other . lrs_load_reporting_server_name ; <nl> + } <nl> } ; <nl> <nl> using CdsUpdateMap = std : : map < std : : string / * cluster_name * / , CdsUpdate > ; <nl> class XdsApi { <nl> std : : string version ; <nl> std : : string nonce ; <nl> std : : string type_url ; <nl> - absl : : optional < LdsUpdate > lds_update ; <nl> - absl : : optional < RdsUpdate > rds_update ; <nl> + LdsUpdateMap lds_update_map ; <nl> + RdsUpdateMap rds_update_map ; <nl> CdsUpdateMap cds_update_map ; <nl> EdsUpdateMap eds_update_map ; <nl> } ; <nl> AdsParseResult ParseAdsResponse ( <nl> const grpc_slice & encoded_response , <nl> - const std : : string & expected_server_name , <nl> + const std : : set < absl : : string_view > & expected_listener_names , <nl> const std : : set < absl : : string_view > & expected_route_configuration_names , <nl> const std : : set < absl : : string_view > & expected_cluster_names , <nl> const std : : set < absl : : string_view > & expected_eds_service_names ) ; <nl> mmm a / src / core / ext / xds / xds_client . cc <nl> ppp b / src / core / ext / xds / xds_client . cc <nl> class XdsClient : : ChannelState : : AdsCallState <nl> <nl> void Orphan ( ) override { <nl> Finish ( ) ; <nl> - Unref ( ) ; <nl> + Unref ( DEBUG_LOCATION , " Orphan " ) ; <nl> } <nl> <nl> void Start ( RefCountedPtr < AdsCallState > ads_calld ) { <nl> if ( sent_ ) return ; <nl> sent_ = true ; <nl> ads_calld_ = std : : move ( ads_calld ) ; <nl> - Ref ( ) . release ( ) ; <nl> + Ref ( DEBUG_LOCATION , " timer " ) . release ( ) ; <nl> timer_pending_ = true ; <nl> grpc_timer_init ( <nl> & timer_ , <nl> class XdsClient : : ChannelState : : AdsCallState <nl> gpr_log ( GPR_INFO , " [ xds_client % p ] % s " , ads_calld_ - > xds_client ( ) , <nl> grpc_error_string ( watcher_error ) ) ; <nl> } <nl> - if ( type_url_ = = XdsApi : : kLdsTypeUrl | | <nl> - type_url_ = = XdsApi : : kRdsTypeUrl ) { <nl> - ads_calld_ - > xds_client ( ) - > listener_watcher_ - > OnError ( watcher_error ) ; <nl> + if ( type_url_ = = XdsApi : : kLdsTypeUrl ) { <nl> + ListenerState & state = ads_calld_ - > xds_client ( ) - > listener_map_ [ name_ ] ; <nl> + for ( const auto & p : state . watchers ) { <nl> + p . first - > OnError ( GRPC_ERROR_REF ( watcher_error ) ) ; <nl> + } <nl> + } else if ( type_url_ = = XdsApi : : kRdsTypeUrl ) { <nl> + RouteConfigState & state = <nl> + ads_calld_ - > xds_client ( ) - > route_config_map_ [ name_ ] ; <nl> + for ( const auto & p : state . watchers ) { <nl> + p . first - > OnError ( GRPC_ERROR_REF ( watcher_error ) ) ; <nl> + } <nl> } else if ( type_url_ = = XdsApi : : kCdsTypeUrl ) { <nl> ClusterState & state = ads_calld_ - > xds_client ( ) - > cluster_map_ [ name_ ] ; <nl> for ( const auto & p : state . watchers ) { <nl> p . first - > OnError ( GRPC_ERROR_REF ( watcher_error ) ) ; <nl> } <nl> - GRPC_ERROR_UNREF ( watcher_error ) ; <nl> } else if ( type_url_ = = XdsApi : : kEdsTypeUrl ) { <nl> EndpointState & state = ads_calld_ - > xds_client ( ) - > endpoint_map_ [ name_ ] ; <nl> for ( const auto & p : state . watchers ) { <nl> p . first - > OnError ( GRPC_ERROR_REF ( watcher_error ) ) ; <nl> } <nl> - GRPC_ERROR_UNREF ( watcher_error ) ; <nl> } else { <nl> GPR_UNREACHABLE_CODE ( return ) ; <nl> } <nl> + GRPC_ERROR_UNREF ( watcher_error ) ; <nl> } <nl> ads_calld_ . reset ( ) ; <nl> - Unref ( ) ; <nl> + Unref ( DEBUG_LOCATION , " timer " ) ; <nl> GRPC_ERROR_UNREF ( error ) ; <nl> } <nl> <nl> class XdsClient : : ChannelState : : AdsCallState <nl> <nl> void SendMessageLocked ( const std : : string & type_url ) ; <nl> <nl> - void AcceptLdsUpdate ( absl : : optional < XdsApi : : LdsUpdate > lds_update ) ; <nl> - void AcceptRdsUpdate ( absl : : optional < XdsApi : : RdsUpdate > rds_update ) ; <nl> + void AcceptLdsUpdate ( XdsApi : : LdsUpdateMap lds_update_map ) ; <nl> + void AcceptRdsUpdate ( XdsApi : : RdsUpdateMap rds_update_map ) ; <nl> void AcceptCdsUpdate ( XdsApi : : CdsUpdateMap cds_update_map ) ; <nl> void AcceptEdsUpdate ( XdsApi : : EdsUpdateMap eds_update_map ) ; <nl> <nl> XdsClient : : ChannelState : : ~ ChannelState ( ) { <nl> this ) ; <nl> } <nl> grpc_channel_destroy ( channel_ ) ; <nl> + xds_client_ . reset ( DEBUG_LOCATION , " ChannelState " ) ; <nl> } <nl> <nl> void XdsClient : : ChannelState : : Orphan ( ) { <nl> void XdsClient : : ChannelState : : StartConnectivityWatchLocked ( ) { <nl> grpc_channel_element * client_channel_elem = <nl> grpc_channel_stack_last_element ( grpc_channel_get_channel_stack ( channel_ ) ) ; <nl> GPR_ASSERT ( client_channel_elem - > filter = = & grpc_client_channel_filter ) ; <nl> - watcher_ = new StateWatcher ( Ref ( ) ) ; <nl> + watcher_ = new StateWatcher ( Ref ( DEBUG_LOCATION , " ChannelState + watch " ) ) ; <nl> grpc_client_channel_start_connectivity_watch ( <nl> client_channel_elem , GRPC_CHANNEL_IDLE , <nl> OrphanablePtr < AsyncConnectivityStateWatcherInterface > ( watcher_ ) ) ; <nl> void XdsClient : : ChannelState : : Unsubscribe ( const std : : string & type_url , <nl> const std : : string & name , <nl> bool delay_unsubscription ) { <nl> if ( ads_calld_ ! = nullptr ) { <nl> - ads_calld_ - > calld ( ) - > Unsubscribe ( type_url , name , delay_unsubscription ) ; <nl> - if ( ! ads_calld_ - > calld ( ) - > HasSubscribedResources ( ) ) ads_calld_ . reset ( ) ; <nl> + auto * calld = ads_calld_ - > calld ( ) ; <nl> + if ( calld ! = nullptr ) { <nl> + calld - > Unsubscribe ( type_url , name , delay_unsubscription ) ; <nl> + if ( ! calld - > HasSubscribedResources ( ) ) ads_calld_ . reset ( ) ; <nl> + } <nl> } <nl> } <nl> <nl> XdsClient : : ChannelState : : AdsCallState : : AdsCallState ( <nl> / / activity in xds_client ( ) - > interested_parties_ , which is comprised of <nl> / / the polling entities from client_channel . <nl> GPR_ASSERT ( xds_client ( ) ! = nullptr ) ; <nl> - GPR_ASSERT ( ! xds_client ( ) - > server_name_ . empty ( ) ) ; <nl> / / Create a call with the specified method name . <nl> const auto & method = <nl> xds_client ( ) - > bootstrap_ - > server ( ) . ShouldUseV3 ( ) <nl> XdsClient : : ChannelState : : AdsCallState : : AdsCallState ( <nl> / / Op : send request message . <nl> GRPC_CLOSURE_INIT ( & on_request_sent_ , OnRequestSent , this , <nl> grpc_schedule_on_exec_ctx ) ; <nl> - if ( xds_client ( ) - > listener_watcher_ ! = nullptr ) { <nl> - Subscribe ( XdsApi : : kLdsTypeUrl , xds_client ( ) - > server_name_ ) ; <nl> - if ( xds_client ( ) - > lds_result_ . has_value ( ) & & <nl> - ! xds_client ( ) - > lds_result_ - > route_config_name . empty ( ) ) { <nl> - Subscribe ( XdsApi : : kRdsTypeUrl , <nl> - xds_client ( ) - > lds_result_ - > route_config_name ) ; <nl> - } <nl> + for ( const auto & p : xds_client ( ) - > listener_map_ ) { <nl> + Subscribe ( XdsApi : : kLdsTypeUrl , std : : string ( p . first ) ) ; <nl> + } <nl> + for ( const auto & p : xds_client ( ) - > route_config_map_ ) { <nl> + Subscribe ( XdsApi : : kRdsTypeUrl , std : : string ( p . first ) ) ; <nl> } <nl> for ( const auto & p : xds_client ( ) - > cluster_map_ ) { <nl> Subscribe ( XdsApi : : kCdsTypeUrl , std : : string ( p . first ) ) ; <nl> bool XdsClient : : ChannelState : : AdsCallState : : HasSubscribedResources ( ) const { <nl> } <nl> <nl> void XdsClient : : ChannelState : : AdsCallState : : AcceptLdsUpdate ( <nl> - absl : : optional < XdsApi : : LdsUpdate > lds_update ) { <nl> - if ( ! lds_update . has_value ( ) ) { <nl> - gpr_log ( GPR_INFO , <nl> - " [ xds_client % p ] LDS update does not include requested resource " , <nl> - xds_client ( ) ) ; <nl> - if ( xds_client ( ) - > lds_result_ . has_value ( ) & & <nl> - ! xds_client ( ) - > lds_result_ - > route_config_name . empty ( ) ) { <nl> - Unsubscribe ( XdsApi : : kRdsTypeUrl , <nl> - xds_client ( ) - > lds_result_ - > route_config_name , <nl> - / * delay_unsubscription = * / false ) ; <nl> - xds_client ( ) - > rds_result_ . reset ( ) ; <nl> - } <nl> - xds_client ( ) - > lds_result_ . reset ( ) ; <nl> - xds_client ( ) - > listener_watcher_ - > OnResourceDoesNotExist ( ) ; <nl> - return ; <nl> - } <nl> + XdsApi : : LdsUpdateMap lds_update_map ) { <nl> if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_client_trace ) ) { <nl> gpr_log ( GPR_INFO , <nl> - " [ xds_client % p ] LDS update received : route_config_name = % s " , <nl> - xds_client ( ) , <nl> - ( ! lds_update - > route_config_name . empty ( ) <nl> - ? lds_update - > route_config_name . c_str ( ) <nl> - : " < inlined > " ) ) ; <nl> - if ( lds_update - > rds_update . has_value ( ) ) { <nl> - gpr_log ( GPR_INFO , " RouteConfiguration : % s " , <nl> - lds_update - > rds_update - > ToString ( ) . c_str ( ) ) ; <nl> - } <nl> + " [ xds_client % p ] LDS update received containing % " PRIuPTR <nl> + " resources " , <nl> + xds_client ( ) , lds_update_map . size ( ) ) ; <nl> } <nl> auto & lds_state = state_map_ [ XdsApi : : kLdsTypeUrl ] ; <nl> - auto & state = lds_state . subscribed_resources [ xds_client ( ) - > server_name_ ] ; <nl> - if ( state ! = nullptr ) state - > Finish ( ) ; <nl> - / / Ignore identical update . <nl> - if ( xds_client ( ) - > lds_result_ = = lds_update ) { <nl> + std : : set < std : : string > rds_resource_names_seen ; <nl> + for ( auto & p : lds_update_map ) { <nl> + const std : : string & listener_name = p . first ; <nl> + XdsApi : : LdsUpdate & lds_update = p . second ; <nl> + auto & state = lds_state . subscribed_resources [ listener_name ] ; <nl> + if ( state ! = nullptr ) state - > Finish ( ) ; <nl> if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_client_trace ) ) { <nl> - gpr_log ( GPR_INFO , <nl> - " [ xds_client % p ] LDS update identical to current , ignoring . " , <nl> - xds_client ( ) ) ; <nl> + gpr_log ( GPR_INFO , " [ xds_client % p ] LDS resource % s : route_config_name = % s " , <nl> + xds_client ( ) , listener_name . c_str ( ) , <nl> + ( ! lds_update . route_config_name . empty ( ) <nl> + ? lds_update . route_config_name . c_str ( ) <nl> + : " < inlined > " ) ) ; <nl> + if ( lds_update . rds_update . has_value ( ) ) { <nl> + gpr_log ( GPR_INFO , " RouteConfiguration : % s " , <nl> + lds_update . rds_update - > ToString ( ) . c_str ( ) ) ; <nl> + } <nl> + } <nl> + / / Record the RDS resource names seen . <nl> + if ( ! lds_update . route_config_name . empty ( ) ) { <nl> + rds_resource_names_seen . insert ( lds_update . route_config_name ) ; <nl> + } <nl> + / / Ignore identical update . <nl> + ListenerState & listener_state = xds_client ( ) - > listener_map_ [ listener_name ] ; <nl> + if ( listener_state . update . has_value ( ) & & <nl> + * listener_state . update = = lds_update ) { <nl> + if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_client_trace ) ) { <nl> + gpr_log ( GPR_INFO , <nl> + " [ xds_client % p ] LDS update for % s identical to current , " <nl> + " ignoring . " , <nl> + xds_client ( ) , listener_name . c_str ( ) ) ; <nl> + } <nl> + continue ; <nl> + } <nl> + / / Update the listener state . <nl> + listener_state . update = std : : move ( lds_update ) ; <nl> + / / Notify watchers . <nl> + for ( const auto & p : listener_state . watchers ) { <nl> + p . first - > OnListenerChanged ( * listener_state . update ) ; <nl> } <nl> - return ; <nl> } <nl> - if ( xds_client ( ) - > lds_result_ . has_value ( ) & & <nl> - ! xds_client ( ) - > lds_result_ - > route_config_name . empty ( ) ) { <nl> - Unsubscribe ( <nl> - XdsApi : : kRdsTypeUrl , xds_client ( ) - > lds_result_ - > route_config_name , <nl> - / * delay_unsubscription = * / ! lds_update - > route_config_name . empty ( ) ) ; <nl> - xds_client ( ) - > rds_result_ . reset ( ) ; <nl> - } <nl> - xds_client ( ) - > lds_result_ = std : : move ( lds_update ) ; <nl> - if ( xds_client ( ) - > lds_result_ - > rds_update . has_value ( ) ) { <nl> - / / If the RouteConfiguration was found inlined in LDS response , notify <nl> - / / the watcher immediately . <nl> - const XdsApi : : RdsUpdate : : VirtualHost * vhost = <nl> - xds_client ( ) - > lds_result_ - > rds_update - > FindVirtualHostForDomain ( <nl> - xds_client ( ) - > server_name_ ) ; <nl> - if ( vhost = = nullptr ) { <nl> - xds_client ( ) - > listener_watcher_ - > OnError ( <nl> - GRPC_ERROR_CREATE_FROM_STATIC_STRING ( <nl> - " no VirtualHost found for domain " ) ) ; <nl> - } else { <nl> - xds_client ( ) - > listener_watcher_ - > OnListenerChanged ( vhost - > routes ) ; <nl> + / / For any subscribed resource that is not present in the update , <nl> + / / remove it from the cache and notify watchers that it does not exist . <nl> + for ( const auto & p : lds_state . subscribed_resources ) { <nl> + const std : : string & listener_name = p . first ; <nl> + if ( lds_update_map . find ( listener_name ) = = lds_update_map . end ( ) ) { <nl> + ListenerState & listener_state = <nl> + xds_client ( ) - > listener_map_ [ listener_name ] ; <nl> + / / If the resource was newly requested but has not yet been received , <nl> + / / we don ' t want to generate an error for the watchers , because this LDS <nl> + / / response may be in reaction to an earlier request that did not yet <nl> + / / request the new resource , so its absence from the response does not <nl> + / / necessarily indicate that the resource does not exist . <nl> + / / For that case , we rely on the request timeout instead . <nl> + if ( ! listener_state . update . has_value ( ) ) continue ; <nl> + listener_state . update . reset ( ) ; <nl> + for ( const auto & p : listener_state . watchers ) { <nl> + p . first - > OnResourceDoesNotExist ( ) ; <nl> + } <nl> + } <nl> + } <nl> + / / For any RDS resource that is no longer referred to by any LDS <nl> + / / resources , remove it from the cache and notify watchers that it <nl> + / / does not exist . <nl> + auto & rds_state = state_map_ [ XdsApi : : kRdsTypeUrl ] ; <nl> + for ( const auto & p : rds_state . subscribed_resources ) { <nl> + const std : : string & rds_resource_name = p . first ; <nl> + if ( rds_resource_names_seen . find ( rds_resource_name ) = = <nl> + rds_resource_names_seen . end ( ) ) { <nl> + RouteConfigState & route_config_state = <nl> + xds_client ( ) - > route_config_map_ [ rds_resource_name ] ; <nl> + route_config_state . update . reset ( ) ; <nl> + for ( const auto & p : route_config_state . watchers ) { <nl> + p . first - > OnResourceDoesNotExist ( ) ; <nl> + } <nl> } <nl> - } else { <nl> - / / Send RDS request for dynamic resolution . <nl> - Subscribe ( XdsApi : : kRdsTypeUrl , <nl> - xds_client ( ) - > lds_result_ - > route_config_name ) ; <nl> } <nl> } <nl> <nl> void XdsClient : : ChannelState : : AdsCallState : : AcceptRdsUpdate ( <nl> - absl : : optional < XdsApi : : RdsUpdate > rds_update ) { <nl> - if ( ! rds_update . has_value ( ) ) { <nl> - gpr_log ( GPR_INFO , <nl> - " [ xds_client % p ] RDS update does not include requested resource " , <nl> - xds_client ( ) ) ; <nl> - xds_client ( ) - > rds_result_ . reset ( ) ; <nl> - xds_client ( ) - > listener_watcher_ - > OnResourceDoesNotExist ( ) ; <nl> - return ; <nl> - } <nl> + XdsApi : : RdsUpdateMap rds_update_map ) { <nl> if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_client_trace ) ) { <nl> - gpr_log ( GPR_INFO , " [ xds_client % p ] RDS update received : \ n % s " , xds_client ( ) , <nl> - rds_update - > ToString ( ) . c_str ( ) ) ; <nl> + gpr_log ( GPR_INFO , <nl> + " [ xds_client % p ] RDS update received containing % " PRIuPTR <nl> + " resources " , <nl> + xds_client ( ) , rds_update_map . size ( ) ) ; <nl> } <nl> - auto & rds_state = state_map_ [ XdsApi : : kRdsTypeUrl ] ; <nl> - auto & state = <nl> - rds_state <nl> - . subscribed_resources [ xds_client ( ) - > lds_result_ - > route_config_name ] ; <nl> - if ( state ! = nullptr ) state - > Finish ( ) ; <nl> - / / Ignore identical update . <nl> - if ( xds_client ( ) - > rds_result_ = = rds_update ) { <nl> + auto & rds_state = state_map_ [ XdsApi : : kLdsTypeUrl ] ; <nl> + for ( auto & p : rds_update_map ) { <nl> + const std : : string & route_config_name = p . first ; <nl> + XdsApi : : RdsUpdate & rds_update = p . second ; <nl> + auto & state = rds_state . subscribed_resources [ route_config_name ] ; <nl> + if ( state ! = nullptr ) state - > Finish ( ) ; <nl> if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_client_trace ) ) { <nl> - gpr_log ( GPR_INFO , <nl> - " [ xds_client % p ] RDS update identical to current , ignoring . " , <nl> - xds_client ( ) ) ; <nl> + gpr_log ( GPR_INFO , " [ xds_client % p ] RDS resource : \ n % s " , xds_client ( ) , <nl> + rds_update . ToString ( ) . c_str ( ) ) ; <nl> + } <nl> + RouteConfigState & route_config_state = <nl> + xds_client ( ) - > route_config_map_ [ route_config_name ] ; <nl> + / / Ignore identical update . <nl> + if ( route_config_state . update . has_value ( ) & & <nl> + * route_config_state . update = = rds_update ) { <nl> + if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_client_trace ) ) { <nl> + gpr_log ( GPR_INFO , <nl> + " [ xds_client % p ] RDS resource identical to current , ignoring " , <nl> + xds_client ( ) ) ; <nl> + } <nl> + continue ; <nl> + } <nl> + / / Update the cache . <nl> + route_config_state . update = std : : move ( rds_update ) ; <nl> + / / Notify all watchers . <nl> + for ( const auto & p : route_config_state . watchers ) { <nl> + p . first - > OnRouteConfigChanged ( * route_config_state . update ) ; <nl> } <nl> - return ; <nl> - } <nl> - xds_client ( ) - > rds_result_ = std : : move ( rds_update ) ; <nl> - / / Notify the watcher . <nl> - const XdsApi : : RdsUpdate : : VirtualHost * vhost = <nl> - xds_client ( ) - > rds_result_ - > FindVirtualHostForDomain ( <nl> - xds_client ( ) - > server_name_ ) ; <nl> - if ( vhost = = nullptr ) { <nl> - xds_client ( ) - > listener_watcher_ - > OnError ( <nl> - GRPC_ERROR_CREATE_FROM_STATIC_STRING ( <nl> - " no VirtualHost found for domain " ) ) ; <nl> - } else { <nl> - xds_client ( ) - > listener_watcher_ - > OnListenerChanged ( vhost - > routes ) ; <nl> } <nl> } <nl> <nl> void XdsClient : : ChannelState : : AdsCallState : : AcceptCdsUpdate ( <nl> / / Ignore identical update . <nl> ClusterState & cluster_state = xds_client ( ) - > cluster_map_ [ cluster_name ] ; <nl> if ( cluster_state . update . has_value ( ) & & <nl> - cds_update . eds_service_name = = cluster_state . update - > eds_service_name & & <nl> - cds_update . lrs_load_reporting_server_name = = <nl> - cluster_state . update - > lrs_load_reporting_server_name ) { <nl> + * cluster_state . update = = cds_update ) { <nl> if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_client_trace ) ) { <nl> gpr_log ( GPR_INFO , <nl> " [ xds_client % p ] CDS update identical to current , ignoring . " , <nl> void XdsClient : : ChannelState : : AdsCallState : : OnResponseReceivedLocked ( ) { <nl> recv_message_payload_ = nullptr ; <nl> / / Parse and validate the response . <nl> XdsApi : : AdsParseResult result = xds_client ( ) - > api_ . ParseAdsResponse ( <nl> - response_slice , xds_client ( ) - > server_name_ , <nl> + response_slice , ResourceNamesForRequest ( XdsApi : : kLdsTypeUrl ) , <nl> ResourceNamesForRequest ( XdsApi : : kRdsTypeUrl ) , <nl> ResourceNamesForRequest ( XdsApi : : kCdsTypeUrl ) , <nl> ResourceNamesForRequest ( XdsApi : : kEdsTypeUrl ) ) ; <nl> void XdsClient : : ChannelState : : AdsCallState : : OnResponseReceivedLocked ( ) { <nl> seen_response_ = true ; <nl> / / Accept the ADS response according to the type_url . <nl> if ( result . type_url = = XdsApi : : kLdsTypeUrl ) { <nl> - AcceptLdsUpdate ( std : : move ( result . lds_update ) ) ; <nl> + AcceptLdsUpdate ( std : : move ( result . lds_update_map ) ) ; <nl> } else if ( result . type_url = = XdsApi : : kRdsTypeUrl ) { <nl> - AcceptRdsUpdate ( std : : move ( result . rds_update ) ) ; <nl> + AcceptRdsUpdate ( std : : move ( result . rds_update_map ) ) ; <nl> } else if ( result . type_url = = XdsApi : : kCdsTypeUrl ) { <nl> AcceptCdsUpdate ( std : : move ( result . cds_update_map ) ) ; <nl> } else if ( result . type_url = = XdsApi : : kEdsTypeUrl ) { <nl> XdsClient : : ChannelState : : AdsCallState : : ResourceNamesForRequest ( <nl> for ( auto & p : it - > second . subscribed_resources ) { <nl> resource_names . insert ( p . first ) ; <nl> OrphanablePtr < ResourceState > & state = p . second ; <nl> - state - > Start ( Ref ( ) ) ; <nl> + state - > Start ( Ref ( DEBUG_LOCATION , " ResourceState " ) ) ; <nl> } <nl> } <nl> return resource_names ; <nl> grpc_channel * CreateXdsChannel ( const XdsBootstrap & bootstrap , <nl> } / / namespace <nl> <nl> XdsClient : : XdsClient ( std : : shared_ptr < WorkSerializer > work_serializer , <nl> - grpc_pollset_set * interested_parties , <nl> absl : : string_view server_name , <nl> - std : : unique_ptr < ListenerWatcherInterface > watcher , <nl> const grpc_channel_args & channel_args , grpc_error * * error ) <nl> : InternallyRefCounted < XdsClient > ( & grpc_xds_client_trace ) , <nl> request_timeout_ ( GetRequestTimeout ( channel_args ) ) , <nl> work_serializer_ ( std : : move ( work_serializer ) ) , <nl> - interested_parties_ ( interested_parties ) , <nl> + interested_parties_ ( grpc_pollset_set_create ( ) ) , <nl> bootstrap_ ( <nl> XdsBootstrap : : ReadFromFile ( this , & grpc_xds_client_trace , error ) ) , <nl> api_ ( this , & grpc_xds_client_trace , bootstrap_ . get ( ) ) , <nl> - server_name_ ( server_name ) , <nl> - listener_watcher_ ( std : : move ( watcher ) ) { <nl> + server_name_ ( server_name ) { <nl> if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_client_trace ) ) { <nl> gpr_log ( GPR_INFO , " [ xds_client % p ] creating xds client " , this ) ; <nl> } <nl> XdsClient : : XdsClient ( std : : shared_ptr < WorkSerializer > work_serializer , <nl> } <nl> chand_ = MakeOrphanable < ChannelState > ( <nl> Ref ( DEBUG_LOCATION , " XdsClient + ChannelState " ) , channel ) ; <nl> - if ( listener_watcher_ ! = nullptr ) { <nl> - chand_ - > Subscribe ( XdsApi : : kLdsTypeUrl , std : : string ( server_name ) ) ; <nl> - } <nl> } <nl> <nl> XdsClient : : ~ XdsClient ( ) { <nl> if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_client_trace ) ) { <nl> gpr_log ( GPR_INFO , " [ xds_client % p ] destroying xds client " , this ) ; <nl> } <nl> + grpc_pollset_set_destroy ( interested_parties_ ) ; <nl> } <nl> <nl> void XdsClient : : Orphan ( ) { <nl> void XdsClient : : Orphan ( ) { <nl> / / possible for ADS calls to be in progress . Unreffing the loadbalancing <nl> / / policies before those calls are done would lead to issues such as <nl> / / https : / / github . com / grpc / grpc / issues / 20928 . <nl> - if ( listener_watcher_ ! = nullptr ) { <nl> + if ( ! listener_map_ . empty ( ) ) { <nl> cluster_map_ . clear ( ) ; <nl> endpoint_map_ . clear ( ) ; <nl> } <nl> Unref ( DEBUG_LOCATION , " XdsClient : : Orphan ( ) " ) ; <nl> } <nl> <nl> + void XdsClient : : WatchListenerData ( <nl> + absl : : string_view listener_name , <nl> + std : : unique_ptr < ListenerWatcherInterface > watcher ) { <nl> + std : : string listener_name_str = std : : string ( listener_name ) ; <nl> + ListenerState & listener_state = listener_map_ [ listener_name_str ] ; <nl> + ListenerWatcherInterface * w = watcher . get ( ) ; <nl> + listener_state . watchers [ w ] = std : : move ( watcher ) ; <nl> + / / If we ' ve already received an LDS update , notify the new watcher <nl> + / / immediately . <nl> + if ( listener_state . update . has_value ( ) ) { <nl> + if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_client_trace ) ) { <nl> + gpr_log ( GPR_INFO , " [ xds_client % p ] returning cached listener data for % s " , <nl> + this , listener_name_str . c_str ( ) ) ; <nl> + } <nl> + w - > OnListenerChanged ( * listener_state . update ) ; <nl> + } <nl> + chand_ - > Subscribe ( XdsApi : : kLdsTypeUrl , listener_name_str ) ; <nl> + } <nl> + <nl> + void XdsClient : : CancelListenerDataWatch ( absl : : string_view listener_name , <nl> + ListenerWatcherInterface * watcher , <nl> + bool delay_unsubscription ) { <nl> + if ( shutting_down_ ) return ; <nl> + std : : string listener_name_str = std : : string ( listener_name ) ; <nl> + ListenerState & listener_state = listener_map_ [ listener_name_str ] ; <nl> + auto it = listener_state . watchers . find ( watcher ) ; <nl> + if ( it ! = listener_state . watchers . end ( ) ) { <nl> + listener_state . watchers . erase ( it ) ; <nl> + if ( listener_state . watchers . empty ( ) ) { <nl> + listener_map_ . erase ( listener_name_str ) ; <nl> + chand_ - > Unsubscribe ( XdsApi : : kLdsTypeUrl , listener_name_str , <nl> + delay_unsubscription ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void XdsClient : : WatchRouteConfigData ( <nl> + absl : : string_view route_config_name , <nl> + std : : unique_ptr < RouteConfigWatcherInterface > watcher ) { <nl> + std : : string route_config_name_str = std : : string ( route_config_name ) ; <nl> + RouteConfigState & route_config_state = <nl> + route_config_map_ [ route_config_name_str ] ; <nl> + RouteConfigWatcherInterface * w = watcher . get ( ) ; <nl> + route_config_state . watchers [ w ] = std : : move ( watcher ) ; <nl> + / / If we ' ve already received an RDS update , notify the new watcher <nl> + / / immediately . <nl> + if ( route_config_state . update . has_value ( ) ) { <nl> + if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_client_trace ) ) { <nl> + gpr_log ( GPR_INFO , <nl> + " [ xds_client % p ] returning cached route config data for % s " , this , <nl> + route_config_name_str . c_str ( ) ) ; <nl> + } <nl> + w - > OnRouteConfigChanged ( * route_config_state . update ) ; <nl> + } <nl> + chand_ - > Subscribe ( XdsApi : : kRdsTypeUrl , route_config_name_str ) ; <nl> + } <nl> + <nl> + void XdsClient : : CancelRouteConfigDataWatch ( absl : : string_view route_config_name , <nl> + RouteConfigWatcherInterface * watcher , <nl> + bool delay_unsubscription ) { <nl> + if ( shutting_down_ ) return ; <nl> + std : : string route_config_name_str = std : : string ( route_config_name ) ; <nl> + RouteConfigState & route_config_state = <nl> + route_config_map_ [ route_config_name_str ] ; <nl> + auto it = route_config_state . watchers . find ( watcher ) ; <nl> + if ( it ! = route_config_state . watchers . end ( ) ) { <nl> + route_config_state . watchers . erase ( it ) ; <nl> + if ( route_config_state . watchers . empty ( ) ) { <nl> + route_config_map_ . erase ( route_config_name_str ) ; <nl> + chand_ - > Unsubscribe ( XdsApi : : kRdsTypeUrl , route_config_name_str , <nl> + delay_unsubscription ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> void XdsClient : : WatchClusterData ( <nl> absl : : string_view cluster_name , <nl> std : : unique_ptr < ClusterWatcherInterface > watcher ) { <nl> void XdsClient : : WatchClusterData ( <nl> ClusterState & cluster_state = cluster_map_ [ cluster_name_str ] ; <nl> ClusterWatcherInterface * w = watcher . get ( ) ; <nl> cluster_state . watchers [ w ] = std : : move ( watcher ) ; <nl> - / / If we ' ve already received an CDS update , notify the new watcher <nl> + / / If we ' ve already received a CDS update , notify the new watcher <nl> / / immediately . <nl> if ( cluster_state . update . has_value ( ) ) { <nl> if ( GRPC_TRACE_FLAG_ENABLED ( grpc_xds_client_trace ) ) { <nl> XdsApi : : ClusterLoadReportMap XdsClient : : BuildLoadReportSnapshot ( <nl> } <nl> <nl> void XdsClient : : NotifyOnError ( grpc_error * error ) { <nl> - if ( listener_watcher_ ! = nullptr ) { <nl> - listener_watcher_ - > OnError ( GRPC_ERROR_REF ( error ) ) ; <nl> + for ( const auto & p : listener_map_ ) { <nl> + const ListenerState & listener_state = p . second ; <nl> + for ( const auto & p : listener_state . watchers ) { <nl> + p . first - > OnError ( GRPC_ERROR_REF ( error ) ) ; <nl> + } <nl> + } <nl> + for ( const auto & p : route_config_map_ ) { <nl> + const RouteConfigState & route_config_state = p . second ; <nl> + for ( const auto & p : route_config_state . watchers ) { <nl> + p . first - > OnError ( GRPC_ERROR_REF ( error ) ) ; <nl> + } <nl> } <nl> for ( const auto & p : cluster_map_ ) { <nl> const ClusterState & cluster_state = p . second ; <nl> RefCountedPtr < XdsClient > XdsClient : : GetFromChannelArgs ( <nl> const grpc_channel_args & args ) { <nl> XdsClient * xds_client = <nl> grpc_channel_args_find_pointer < XdsClient > ( & args , GRPC_ARG_XDS_CLIENT ) ; <nl> - if ( xds_client ! = nullptr ) return xds_client - > Ref ( ) ; <nl> - return nullptr ; <nl> + if ( xds_client = = nullptr ) return nullptr ; <nl> + return xds_client - > Ref ( DEBUG_LOCATION , " GetFromChannelArgs " ) ; <nl> } <nl> <nl> grpc_channel_args * XdsClient : : RemoveFromChannelArgs ( <nl> mmm a / src / core / ext / xds / xds_client . h <nl> ppp b / src / core / ext / xds / xds_client . h <nl> <nl> # include < grpc / support / port_platform . h > <nl> <nl> # include < set > <nl> + # include < vector > <nl> <nl> # include " absl / strings / string_view . h " <nl> # include " absl / types / optional . h " <nl> class XdsClient : public InternallyRefCounted < XdsClient > { <nl> public : <nl> virtual ~ ListenerWatcherInterface ( ) = default ; <nl> <nl> - virtual void OnListenerChanged ( std : : vector < XdsApi : : Route > routes ) = 0 ; <nl> + virtual void OnListenerChanged ( XdsApi : : LdsUpdate listener ) = 0 ; <nl> + <nl> + virtual void OnError ( grpc_error * error ) = 0 ; <nl> + <nl> + virtual void OnResourceDoesNotExist ( ) = 0 ; <nl> + } ; <nl> + <nl> + / / RouteConfiguration data watcher interface . Implemented by callers . <nl> + class RouteConfigWatcherInterface { <nl> + public : <nl> + virtual ~ RouteConfigWatcherInterface ( ) = default ; <nl> + <nl> + virtual void OnRouteConfigChanged ( XdsApi : : RdsUpdate route_config ) = 0 ; <nl> <nl> virtual void OnError ( grpc_error * error ) = 0 ; <nl> <nl> class XdsClient : public InternallyRefCounted < XdsClient > { <nl> <nl> / / If * error is not GRPC_ERROR_NONE after construction , then there was <nl> / / an error initializing the client . <nl> + / / TODO ( roth ) : Remove the server_name parameter as part of sharing the <nl> + / / XdsClient instance between channels . <nl> XdsClient ( std : : shared_ptr < WorkSerializer > work_serializer , <nl> - grpc_pollset_set * interested_parties , absl : : string_view server_name , <nl> - std : : unique_ptr < ListenerWatcherInterface > watcher , <nl> + absl : : string_view server_name , <nl> const grpc_channel_args & channel_args , grpc_error * * error ) ; <nl> ~ XdsClient ( ) ; <nl> <nl> + grpc_pollset_set * interested_parties ( ) const { return interested_parties_ ; } <nl> + <nl> void Orphan ( ) override ; <nl> <nl> + / / Start and cancel listener data watch for a listener . <nl> + / / The XdsClient takes ownership of the watcher , but the caller may <nl> + / / keep a raw pointer to the watcher , which may be used only for <nl> + / / cancellation . ( Because the caller does not own the watcher , the <nl> + / / pointer must not be used for any other purpose . ) <nl> + / / If the caller is going to start a new watch after cancelling the <nl> + / / old one , it should set delay_unsubscription to true . <nl> + void WatchListenerData ( absl : : string_view listener_name , <nl> + std : : unique_ptr < ListenerWatcherInterface > watcher ) ; <nl> + void CancelListenerDataWatch ( absl : : string_view listener_name , <nl> + ListenerWatcherInterface * watcher , <nl> + bool delay_unsubscription = false ) ; <nl> + <nl> + / / Start and cancel route config data watch for a listener . <nl> + / / The XdsClient takes ownership of the watcher , but the caller may <nl> + / / keep a raw pointer to the watcher , which may be used only for <nl> + / / cancellation . ( Because the caller does not own the watcher , the <nl> + / / pointer must not be used for any other purpose . ) <nl> + / / If the caller is going to start a new watch after cancelling the <nl> + / / old one , it should set delay_unsubscription to true . <nl> + void WatchRouteConfigData ( <nl> + absl : : string_view route_config_name , <nl> + std : : unique_ptr < RouteConfigWatcherInterface > watcher ) ; <nl> + void CancelRouteConfigDataWatch ( absl : : string_view route_config_name , <nl> + RouteConfigWatcherInterface * watcher , <nl> + bool delay_unsubscription = false ) ; <nl> + <nl> / / Start and cancel cluster data watch for a cluster . <nl> / / The XdsClient takes ownership of the watcher , but the caller may <nl> / / keep a raw pointer to the watcher , which may be used only for <nl> class XdsClient : public InternallyRefCounted < XdsClient > { <nl> OrphanablePtr < RetryableCall < LrsCallState > > lrs_calld_ ; <nl> } ; <nl> <nl> + struct ListenerState { <nl> + std : : map < ListenerWatcherInterface * , <nl> + std : : unique_ptr < ListenerWatcherInterface > > <nl> + watchers ; <nl> + / / The latest data seen from LDS . <nl> + absl : : optional < XdsApi : : LdsUpdate > update ; <nl> + } ; <nl> + <nl> + struct RouteConfigState { <nl> + std : : map < RouteConfigWatcherInterface * , <nl> + std : : unique_ptr < RouteConfigWatcherInterface > > <nl> + watchers ; <nl> + / / The latest data seen from RDS . <nl> + absl : : optional < XdsApi : : RdsUpdate > update ; <nl> + } ; <nl> + <nl> struct ClusterState { <nl> std : : map < ClusterWatcherInterface * , std : : unique_ptr < ClusterWatcherInterface > > <nl> watchers ; <nl> class XdsClient : public InternallyRefCounted < XdsClient > { <nl> std : : unique_ptr < XdsBootstrap > bootstrap_ ; <nl> XdsApi api_ ; <nl> <nl> + / / TODO ( roth ) : In order to share the XdsClient instance between <nl> + / / channels and servers , we will need to remove this field . In order <nl> + / / to do that , we ' ll need to figure out if we can stop sending the <nl> + / / server name as part of the node metadata in the LRS request . <nl> const std : : string server_name_ ; <nl> - std : : unique_ptr < ListenerWatcherInterface > listener_watcher_ ; <nl> <nl> / / The channel for communicating with the xds server . <nl> OrphanablePtr < ChannelState > chand_ ; <nl> <nl> - absl : : optional < XdsApi : : LdsUpdate > lds_result_ ; <nl> - absl : : optional < XdsApi : : RdsUpdate > rds_result_ ; <nl> - <nl> + / / One entry for each watched LDS resource . <nl> + std : : map < std : : string / * listener_name * / , ListenerState > listener_map_ ; <nl> + / / One entry for each watched RDS resource . <nl> + std : : map < std : : string / * route_config_name * / , RouteConfigState > <nl> + route_config_map_ ; <nl> / / One entry for each watched CDS resource . <nl> std : : map < std : : string / * cluster_name * / , ClusterState > cluster_map_ ; <nl> / / One entry for each watched EDS resource . <nl> std : : map < std : : string / * eds_service_name * / , EndpointState > endpoint_map_ ; <nl> + <nl> + / / Load report data . <nl> std : : map < <nl> std : : pair < std : : string / * cluster_name * / , std : : string / * eds_service_name * / > , <nl> LoadReportState > <nl>
Change XdsClient to support multiple LDS and RDS watchers .
grpc/grpc
5acae9fc0717e4b8041a22c39d99f19c752fe19d
2020-09-15T16:10:49Z
mmm a / cocos / editor - support / cocostudio / ActionTimeline / CCBoneNode . cpp <nl> ppp b / cocos / editor - support / cocostudio / ActionTimeline / CCBoneNode . cpp <nl> void BoneNode : : removeChild ( Node * child , bool cleanup / * = true * / ) <nl> void BoneNode : : removeFromBoneList ( BoneNode * bone ) <nl> { <nl> auto skeletonNode = dynamic_cast < SkeletonNode * > ( bone ) ; <nl> - if ( skeletonNode = = nullptr ) / / is not a nested skeleton <nl> + if ( _rootSkeleton ! = nullptr & & <nl> + skeletonNode = = nullptr & & bone - > _rootSkeleton = = _rootSkeleton ) / / is not a nested skeleton , and it is in skeleton tree <nl> { <nl> bone - > _rootSkeleton = nullptr ; <nl> auto subBones = bone - > getAllSubBones ( ) ; <nl> mmm a / web <nl> ppp b / web <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit f26bb7a49e7ed37e43496c7edd0937a3c34a47d0 <nl> + Subproject commit 0adcb2c24daa76f112981b57a0105887d47a5c42 <nl>
Merge pull request from pandamicro / skeleton - fix
cocos2d/cocos2d-x
ecd0f68018c48a64d0b015598bfacb4435e0a58c
2015-08-07T01:39:23Z
mmm a / lib / Sema / TypeCheckEffects . cpp <nl> ppp b / lib / Sema / TypeCheckEffects . cpp <nl> class Classification { <nl> } <nl> <nl> void merge ( Classification other ) { <nl> + bool oldAsync = IsAsync ; <nl> + <nl> if ( other . getResult ( ) > getResult ( ) ) <nl> * this = other ; <nl> - IsAsync | = other . IsAsync ; <nl> + <nl> + IsAsync = oldAsync | other . IsAsync ; <nl> } <nl> <nl> bool isInvalid ( ) const { return IsInvalid ; } <nl>
fix bug when merging Classifications that was clobbering IsAsync
apple/swift
0c352beb868f14bd071b2a85a64950f04f2ddbb1
2020-11-04T00:57:10Z
mmm a / . appveyor / install . bat <nl> ppp b / . appveyor / install . bat <nl> GOTO : EOF <nl> call : logInfo " Setup GYP / Ninja and generate VS solution " <nl> cd % LIB_DIR % <nl> git clone https : / / chromium . googlesource . com / external / gyp <nl> + cd gyp <nl> + git checkout a478c1ab51 <nl> SET PATH = % PATH % ; C : \ TBuild \ Libraries \ gyp ; C : \ TBuild \ Libraries \ ninja ; <nl> cd % SRC_DIR % <nl> git submodule init <nl> mmm a / Telegram / SourceFiles / codegen / scheme / codegen_scheme . py <nl> ppp b / Telegram / SourceFiles / codegen / scheme / codegen_scheme . py <nl> <nl> <nl> input_file = ' ' <nl> output_path = ' ' <nl> + next_output_path = False <nl> for arg in sys . argv [ 1 : ] : <nl> - if re . match ( r ' ^ - o ( . + ) ' , arg ) : <nl> + if next_output_path : <nl> + next_output_path = False <nl> + output_path = arg <nl> + elif arg = = ' - o ' : <nl> + next_output_path = True <nl> + elif re . match ( r ' ^ - o ( . + ) ' , arg ) : <nl> output_path = arg [ 2 : ] <nl> else : <nl> input_file = arg <nl> mmm a / Telegram / gyp / codegen_rules . gypi <nl> ppp b / Telegram / gyp / codegen_rules . gypi <nl> <nl> ] , <nl> ' action ' : [ <nl> ' python ' , ' < ( DEPTH ) / update_dependent . py ' , ' - - styles ' , <nl> - ' - I < ( res_loc ) ' , ' - I < ( src_loc ) ' , <nl> - ' - o < ( SHARED_INTERMEDIATE_DIR ) / update_dependent_styles . timestamp ' , <nl> + ' - I ' , ' < ( res_loc ) ' , ' - I ' , ' < ( src_loc ) ' , <nl> + ' - o ' , ' < ( SHARED_INTERMEDIATE_DIR ) / update_dependent_styles . timestamp ' , <nl> ' < @ ( style_files ) ' , <nl> ] , <nl> ' message ' : ' Updating dependent style files . . ' , <nl> <nl> ] , <nl> ' action ' : [ <nl> ' python ' , ' < ( DEPTH ) / update_dependent . py ' , ' - - qrc ' , <nl> - ' - o < ( SHARED_INTERMEDIATE_DIR ) / update_dependent_qrc . timestamp ' , <nl> + ' - o ' , ' < ( SHARED_INTERMEDIATE_DIR ) / update_dependent_qrc . timestamp ' , <nl> ' < @ ( qrc_files ) ' , <nl> ] , <nl> ' message ' : ' Updating dependent qrc files . . ' , <nl> <nl> ] , <nl> ' action ' : [ <nl> ' < ( PRODUCT_DIR ) / codegen_style < ( exe_ext ) ' , <nl> - ' - I < ( res_loc ) ' , ' - I < ( src_loc ) ' , <nl> - ' - o < ( SHARED_INTERMEDIATE_DIR ) / styles ' , <nl> - ' - w < ( PRODUCT_DIR ) / . . / . . ' , <nl> + ' - I ' , ' < ( res_loc ) ' , ' - I ' , ' < ( src_loc ) ' , <nl> + ' - o ' , ' < ( SHARED_INTERMEDIATE_DIR ) / styles ' , <nl> + ' - w ' , ' < ( PRODUCT_DIR ) / . . / . . ' , <nl> <nl> # GYP / Ninja bug workaround : if we specify just < ( RULE_INPUT_PATH ) <nl> # the < ( RULE_INPUT_ROOT ) variables won ' t be available in Ninja , <nl> <nl> ] , <nl> ' action ' : [ <nl> ' < ( PRODUCT_DIR ) / codegen_lang < ( exe_ext ) ' , <nl> - ' - o < ( SHARED_INTERMEDIATE_DIR ) ' , ' < ( res_loc ) / langs / lang . strings ' , <nl> - ' - w < ( PRODUCT_DIR ) / . . / . . ' , <nl> + ' - o ' , ' < ( SHARED_INTERMEDIATE_DIR ) ' , ' < ( res_loc ) / langs / lang . strings ' , <nl> + ' - w ' , ' < ( PRODUCT_DIR ) / . . / . . ' , <nl> ] , <nl> ' message ' : ' codegen_lang - ing lang . strings . . ' , <nl> ' process_outputs_as_sources ' : 1 , <nl> <nl> ] , <nl> ' action ' : [ <nl> ' < ( PRODUCT_DIR ) / codegen_numbers < ( exe_ext ) ' , <nl> - ' - o < ( SHARED_INTERMEDIATE_DIR ) ' , ' < ( res_loc ) / numbers . txt ' , <nl> - ' - w < ( PRODUCT_DIR ) / . . / . . ' , <nl> + ' - o ' , ' < ( SHARED_INTERMEDIATE_DIR ) ' , ' < ( res_loc ) / numbers . txt ' , <nl> + ' - w ' , ' < ( PRODUCT_DIR ) / . . / . . ' , <nl> ] , <nl> ' message ' : ' codegen_numbers - ing numbers . txt . . ' , <nl> ' process_outputs_as_sources ' : 1 , <nl> <nl> ] , <nl> ' action ' : [ <nl> ' python ' , ' < ( src_loc ) / codegen / scheme / codegen_scheme . py ' , <nl> - ' - o < ( SHARED_INTERMEDIATE_DIR ) ' , ' < ( res_loc ) / scheme . tl ' , <nl> + ' - o ' , ' < ( SHARED_INTERMEDIATE_DIR ) ' , ' < ( res_loc ) / scheme . tl ' , <nl> ] , <nl> ' message ' : ' codegen_scheme - ing scheme . tl . . ' , <nl> ' process_outputs_as_sources ' : 1 , <nl> <nl> ] , <nl> ' action ' : [ <nl> ' < ( PRODUCT_DIR ) / codegen_emoji < ( exe_ext ) ' , <nl> - ' - o < ( SHARED_INTERMEDIATE_DIR ) ' , <nl> + ' - o ' , ' < ( SHARED_INTERMEDIATE_DIR ) ' , <nl> ] , <nl> ' message ' : ' codegen_emoji - ing . . ' , <nl> ' process_outputs_as_sources ' : 1 , <nl> <nl> ] , <nl> ' action ' : [ <nl> ' < ( PRODUCT_DIR ) / codegen_style < ( exe_ext ) ' , <nl> - ' - I < ( res_loc ) ' , ' - I < ( src_loc ) ' , <nl> - ' - o < ( SHARED_INTERMEDIATE_DIR ) / styles ' , <nl> - ' - w < ( PRODUCT_DIR ) / . . / . . ' , <nl> + ' - I ' , ' < ( res_loc ) ' , ' - I ' , ' < ( src_loc ) ' , <nl> + ' - o ' , ' < ( SHARED_INTERMEDIATE_DIR ) / styles ' , <nl> + ' - w ' , ' < ( PRODUCT_DIR ) / . . / . . ' , <nl> <nl> # GYP / Ninja bug workaround : if we specify just < ( RULE_INPUT_PATH ) <nl> # the < ( RULE_INPUT_ROOT ) variables won ' t be available in Ninja , <nl> mmm a / Telegram / gyp / refresh . bat <nl> ppp b / Telegram / gyp / refresh . bat <nl> if not " % TDESKTOP_BUILD_DEFINES % " = = " " ( <nl> echo [ INFO ] Set build defines to ! BUILD_DEFINES ! <nl> ) <nl> <nl> + set GYP_MSVS_VERSION = 2015 <nl> + <nl> cd " % FullScriptPath % " <nl> call gyp - - depth = . - - generator - output = . . / . . - Goutput_dir = out ! BUILD_DEFINES ! Telegram . gyp - - format = ninja <nl> if % errorlevel % neq 0 goto error <nl> mmm a / docs / building - msvc . md <nl> ppp b / docs / building - msvc . md <nl> run ` git reset - - hard HEAD ` and execute ` gclient ` again <nl> < ! - - - - > <nl> <nl> git clone https : / / chromium . googlesource . com / external / gyp <nl> + cd gyp <nl> + git checkout a478c1ab51 <nl> SET PATH = % PATH % ; D : \ TBuild \ Libraries \ gyp ; D : \ TBuild \ Libraries \ ninja ; <nl> - cd . . \ tdesktop \ Telegram <nl> + cd . . \ . . \ tdesktop \ Telegram <nl> <nl> Also , actually add * * D : \ \ TBuild \ \ Libraries \ \ ninja \ \ * * ( not just for running the * * gyp * * command ) to your path environment variable , since Telegram needs it for the build process . <nl> <nl>
Fix build for the new GYP version , fix Appveyor .
telegramdesktop/tdesktop
475f0e954455218ae036ce2e0b75ef1b7797c7dd
2017-05-05T09:25:06Z
mmm a / src / containers / archive / fd_stream . hpp <nl> ppp b / src / containers / archive / fd_stream . hpp <nl> class blocking_fd_watcher_t : public fd_watcher_t { <nl> <nl> private : <nl> bool read_open_ , write_open_ ; <nl> - DISABLE_COPYING ( blocking_fd_watcher_t ) ; <nl> } ; <nl> <nl> / * linux_event_fd_watcher_t uses a linux_event_watcher to wait for IO , and makes <nl> class socket_stream_t : public fd_stream_t { <nl> virtual void on_write_error ( int errno_ ) ; <nl> virtual void do_shutdown_read ( ) ; <nl> virtual void do_shutdown_write ( ) ; <nl> - <nl> - private : <nl> - DISABLE_COPYING ( socket_stream_t ) ; <nl> } ; <nl> <nl> class unix_socket_stream_t : public socket_stream_t { <nl> class unix_socket_stream_t : public socket_stream_t { <nl> / / Blocks until all fds are received . <nl> MUST_USE archive_result_t recv_fds ( int64_t num_fds , fd_t * fds ) ; <nl> MUST_USE archive_result_t recv_fd ( fd_t * fd ) ; <nl> - <nl> - private : <nl> - DISABLE_COPYING ( unix_socket_stream_t ) ; <nl> } ; <nl> <nl> # endif / / CONTAINERS_ARCHIVE_FD_STREAM_HPP_ <nl>
minimal use of DISABLE_COPYING
rethinkdb/rethinkdb
c4bdcb5da99641f31180356d46496f4ac6463ca4
2012-06-21T00:50:24Z
mmm a / Source / ComputationNetworkLib / RNNNodes . cpp <nl> ppp b / Source / ComputationNetworkLib / RNNNodes . cpp <nl> void RNNNode < ElemType > : : Validate ( bool isFinalValidationPass ) <nl> / / now determine result dimensions <nl> auto dimsC = dimsB ; <nl> / / output dims - bugbug : this is hard - coded for bidirectional models <nl> - dimsC [ 0 ] = 2 * m_numHidden ; <nl> + dimsC [ 0 ] = 1 * m_numHidden ; <nl> <nl> / / N . B . - this is the magical call , the reason for the function <nl> / / dimensions would be outputRank * numSamples * minibatch * time . <nl> mmm a / Source / Math / CuDnnRNN . cpp <nl> ppp b / Source / Math / CuDnnRNN . cpp <nl> void CuDnnRNNExecutor < ElemType > : : ForwardCore ( <nl> SetXDesc ( shapeX ) ; <nl> <nl> size_t outputSize = shapeY . GetDim ( 0 ) ; <nl> - if ( outputSize ! = 2 * m_rnnT - > GetNumHidden ( ) ) <nl> + if ( outputSize ! = ( m_rnnT - > isBidirectional ( ) ? 2 : 1 ) * m_rnnT - > GetNumHidden ( ) ) <nl> InvalidArgument ( " CuDnn ForwardCore : Output leading dimension must be twice hidden size for bidirectional networks " ) ; <nl> if ( shapeY . GetDim ( 1 ) ! = miniBatch ) <nl> RuntimeError ( " CuDnn ForwardCore : Output minibatch size doesn ' t match input minibatch size " ) ; <nl> mmm a / Source / Math / CuDnnRNN . h <nl> ppp b / Source / Math / CuDnnRNN . h <nl> class CuDnnRNN <nl> return m_rnnDesc ; <nl> } <nl> <nl> + bool isBidirectional ( ) const { return m_bidirectional ; } <nl> + <nl> size_t GetNumLayers ( ) { return m_numLayers ; } <nl> size_t GetNumHidden ( ) { return m_numHidden ; } <nl> <nl> mmm a / Source / Math / GPUMatrix . cu <nl> ppp b / Source / Math / GPUMatrix . cu <nl> void GPUMatrix < ElemType > : : RNNForward ( const GPUMatrix < ElemType > & inputX , const Te <nl> if ( ! m_RNNWrapper ) <nl> m_RNNWrapper = std : : make_unique < RNNWrapper > ( ) ; <nl> if ( ! m_RNNWrapper - > m_rnnExecutor ) <nl> - m_RNNWrapper - > m_rnnExecutor = std : : make_unique < CuDnnRNNExecutor < ElemType > > ( shapeX , hiddenSize , numLayers , true , cudnnRNNMode_t : : CUDNN_LSTM ) ; <nl> + m_RNNWrapper - > m_rnnExecutor = std : : make_unique < CuDnnRNNExecutor < ElemType > > ( shapeX , hiddenSize , numLayers , false , cudnnRNNMode_t : : CUDNN_LSTM ) ; <nl> m_RNNWrapper - > m_rnnExecutor - > ForwardCore ( paramW , inputX , shapeX , * this , shapeY , reserve , workspace ) ; <nl> } <nl> <nl>
Cudnn rnn changing to unidirectional
microsoft/CNTK
f74bb2ba3ac24c913460bfed2457273e8febd43d
2016-05-14T01:43:21Z
mmm a / folly / io / async / NotificationQueue . h <nl> ppp b / folly / io / async / NotificationQueue . h <nl> <nl> # include < stdexcept > <nl> # include < utility > <nl> <nl> + # include < folly / FileUtil . h > <nl> # include < folly / io / async / EventBase . h > <nl> # include < folly / io / async / EventHandler . h > <nl> # include < folly / io / async / DelayedDestruction . h > <nl> class NotificationQueue { <nl> uint64_t value = 0 ; <nl> ssize_t rc = - 1 ; <nl> if ( eventfd_ > = 0 ) { <nl> - rc = : : read ( eventfd_ , & value , sizeof ( value ) ) ; <nl> + rc = readNoInt ( eventfd_ , & value , sizeof ( value ) ) ; <nl> } else { <nl> uint8_t value8 ; <nl> - rc = : : read ( pipeFds_ [ 0 ] , & value8 , sizeof ( value8 ) ) ; <nl> + rc = readNoInt ( pipeFds_ [ 0 ] , & value8 , sizeof ( value8 ) ) ; <nl> value = value8 ; <nl> } <nl> if ( rc < 0 ) { <nl>
notification queue read - > readNoInt
facebook/folly
7137cffd8b8fc6a38014f88d670ed934fb8c2b94
2015-12-09T02:20:20Z
mmm a / . travis . yml <nl> ppp b / . travis . yml <nl> os : linux <nl> language : minimal <nl> arch : amd64 <nl> cache : <nl> - ccache : true <nl> directories : <nl> - $ TRAVIS_BUILD_DIR / depends / built <nl> - $ TRAVIS_BUILD_DIR / depends / sdk - sources <nl> - $ TRAVIS_BUILD_DIR / ci / scratch / . ccache <nl> - $ TRAVIS_BUILD_DIR / releases / $ HOST <nl> - before_cache : <nl> - - if [ " $ { TRAVIS_OS_NAME } " = " osx " ] ; then brew cleanup ; fi <nl> stages : <nl> - lint <nl> - test <nl> script : <nl> - if [ $ SECONDS - gt 1200 ] ; then export CONTINUE = 0 ; fi # Likely the depends build took very long <nl> - if [ $ TRAVIS_REPO_SLUG = " bitcoin / bitcoin " ] ; then export CONTINUE = 1 ; fi # continue on repos with extended build time ( 90 minutes ) <nl> - if [ $ CONTINUE = " 1 " ] ; then set - o errexit ; source . / ci / test / 06_script_a . sh ; else set + o errexit ; echo " $ CACHE_ERR_MSG " ; false ; fi <nl> - - if [ $ SECONDS - gt 2000 ] ; then export CONTINUE = 0 ; fi # Likely the build took very long ; The tests take about 1000s , so we should abort if we have less than 50 * 60 - 1000 = 2000s left <nl> + - if [ [ $ SECONDS - gt 50 * 60 - $ EXPECTED_TESTS_DURATION_IN_SECONDS ] ] ; then export CONTINUE = 0 ; fi <nl> - if [ $ TRAVIS_REPO_SLUG = " bitcoin / bitcoin " ] ; then export CONTINUE = 1 ; fi # continue on repos with extended build time ( 90 minutes ) <nl> - if [ $ CONTINUE = " 1 " ] ; then set - o errexit ; source . / ci / test / 06_script_b . sh ; else set + o errexit ; echo " $ CACHE_ERR_MSG " ; false ; fi <nl> after_script : <nl> jobs : <nl> # Xcode 11 . 3 . 1 , macOS 10 . 14 , SDK 10 . 15 <nl> # https : / / docs . travis - ci . com / user / reference / osx / # macos - version <nl> osx_image : xcode11 . 3 <nl> - cache : <nl> - directories : <nl> - - $ TRAVIS_BUILD_DIR / ci / scratch / . ccache <nl> - - $ TRAVIS_BUILD_DIR / releases / $ HOST <nl> - - $ HOME / Library / Caches / Homebrew <nl> - - / usr / local / Homebrew <nl> addons : <nl> homebrew : <nl> packages : <nl> - - libtool <nl> - berkeley - db4 <nl> - - boost <nl> - miniupnpc <nl> - - qt <nl> - qrencode <nl> - - python3 <nl> - ccache <nl> - zeromq <nl> env : > - <nl> mmm a / ci / test / 00_setup_env . sh <nl> ppp b / ci / test / 00_setup_env . sh <nl> export BASE_SCRATCH_DIR = $ { BASE_SCRATCH_DIR : - $ BASE_ROOT_DIR / ci / scratch } <nl> export HOST = $ { HOST : - $ ( " $ BASE_ROOT_DIR / depends / config . guess " ) } <nl> # Whether to prefer BusyBox over GNU utilities <nl> export USE_BUSY_BOX = $ { USE_BUSY_BOX : - false } <nl> + <nl> export RUN_UNIT_TESTS = $ { RUN_UNIT_TESTS : - true } <nl> export RUN_FUNCTIONAL_TESTS = $ { RUN_FUNCTIONAL_TESTS : - true } <nl> export RUN_SECURITY_TESTS = $ { RUN_SECURITY_TESTS : - false } <nl> export TEST_RUNNER_ENV = $ { TEST_RUNNER_ENV : - } <nl> export RUN_FUZZ_TESTS = $ { RUN_FUZZ_TESTS : - false } <nl> + export EXPECTED_TESTS_DURATION_IN_SECONDS = $ { EXPECTED_TESTS_DURATION_IN_SECONDS : - 1000 } <nl> + <nl> export CONTAINER_NAME = $ { CONTAINER_NAME : - ci_unnamed } <nl> export DOCKER_NAME_TAG = $ { DOCKER_NAME_TAG : - ubuntu : 18 . 04 } <nl> # Randomize test order . <nl> mmm a / ci / test / 00_setup_env_mac . sh <nl> ppp b / ci / test / 00_setup_env_mac . sh <nl> export XCODE_BUILD_ID = 11C505 <nl> export RUN_UNIT_TESTS = false <nl> export RUN_FUNCTIONAL_TESTS = false <nl> export GOAL = " deploy " <nl> - export BITCOIN_CONFIG = " - - enable - gui - - enable - reduce - exports - - enable - werror " <nl> + export BITCOIN_CONFIG = " - - with - gui - - enable - reduce - exports - - enable - werror " <nl> mmm a / ci / test / 00_setup_env_mac_host . sh <nl> ppp b / ci / test / 00_setup_env_mac_host . sh <nl> export HOST = x86_64 - apple - darwin16 <nl> export DOCKER_NAME_TAG = ubuntu : 18 . 04 # Check that bionic can cross - compile to macos ( bionic is used in the gitian build as well ) <nl> export PIP_PACKAGES = " zmq " <nl> export GOAL = " install " <nl> - export BITCOIN_CONFIG = " - - enable - gui - - enable - reduce - exports - - enable - werror " <nl> - export RUN_SECURITY_TESTS = " true " <nl> - # Run without depends <nl> + export BITCOIN_CONFIG = " - - with - gui - - enable - reduce - exports - - enable - werror " <nl> export NO_DEPENDS = 1 <nl> export OSX_SDK = " " <nl> + export CCACHE_SIZE = 300M <nl> + <nl> + export RUN_SECURITY_TESTS = " true " <nl> + if [ " $ TRAVIS_REPO_SLUG " ! = " bitcoin / bitcoin " ] ; then <nl> + export RUN_FUNCTIONAL_TESTS = " false " <nl> + export EXPECTED_TESTS_DURATION_IN_SECONDS = 200 <nl> + fi <nl> mmm a / ci / test / 04_install . sh <nl> ppp b / ci / test / 04_install . sh <nl> if [ [ $ QEMU_USER_CMD = = qemu - s390 * ] ] ; then <nl> fi <nl> <nl> if [ " $ TRAVIS_OS_NAME " = = " osx " ] ; then <nl> - export PATH = " / usr / local / opt / ccache / libexec : $ PATH " <nl> $ { CI_RETRY_EXE } pip3 install $ PIP_PACKAGES <nl> fi <nl> <nl>
Merge : ci : Disable macOS functional tests on forked repos to avoid timeouts
bitcoin/bitcoin
90b5fc9ce41117b3a85170e6fc129e87bbd15f48
2020-07-14T06:40:32Z
mmm a / hphp / hack / src / full_fidelity / full_fidelity_declaration_parser . ml <nl> ppp b / hphp / hack / src / full_fidelity / full_fidelity_declaration_parser . ml <nl> module WithExpressionAndStatementAndTypeParser <nl> and parse_alias_declaration parser = <nl> ( * SPEC <nl> alias - declaration : <nl> - type name = type - to - be - aliased ; <nl> - newtype name type - constraintopt = type - to - be - aliased ; <nl> - <nl> - type - to - be - aliased : <nl> - type - specifier <nl> - qualified - name <nl> - NOTE alias name can contain generics . <nl> - TODO figure out the grammar for this add add second error pass to <nl> - report illegal names <nl> + type name generic - type - parameter - list - opt = type - specifier ; <nl> + newtype name generic - type - parameter - list - opt type - constraint - opt = <nl> + type - specifier ; <nl> * ) <nl> <nl> - ( * TODO : Produce an error if the " type " version has a constraint . * ) <nl> + ( * ERROR RECOVERY : We allow the " type " version to have a constraint in the <nl> + initial parse . <nl> + TODO : Produce an error in a later pass if the " type " version has a <nl> + constraint . * ) <nl> <nl> let ( parser , token ) = next_token parser in <nl> let token = make_token token in <nl>
Update comments in alias parser .
facebook/hhvm
8818e5ec816b1d6fa7836c5966eb12b82f558752
2016-09-13T16:49:45Z
mmm a / Telegram / SourceFiles / media / view / media_view_overlay_widget . cpp <nl> ppp b / Telegram / SourceFiles / media / view / media_view_overlay_widget . cpp <nl> void OverlayWidget : : refreshLang ( ) { <nl> InvokeQueued ( this , [ this ] { updateThemePreviewGeometry ( ) ; } ) ; <nl> } <nl> <nl> - void OverlayWidget : : moveToScreen ( ) { <nl> - auto widgetScreen = [ & ] ( auto & & widget ) - > QScreen * { <nl> + void OverlayWidget : : moveToScreen ( bool force ) { <nl> + const auto widgetScreen = [ & ] ( auto & & widget ) - > QScreen * { <nl> if ( auto handle = widget ? widget - > windowHandle ( ) : nullptr ) { <nl> return handle - > screen ( ) ; <nl> } <nl> return nullptr ; <nl> } ; <nl> - auto activeWindow = Core : : App ( ) . getActiveWindow ( ) ; <nl> - auto activeWindowScreen = widgetScreen ( activeWindow ) ; <nl> - auto myScreen = widgetScreen ( this ) ; <nl> + const auto activeWindow = Core : : App ( ) . getActiveWindow ( ) ; <nl> + const auto activeWindowScreen = widgetScreen ( activeWindow ) ; <nl> + const auto myScreen = widgetScreen ( this ) ; <nl> if ( activeWindowScreen & & myScreen & & myScreen ! = activeWindowScreen ) { <nl> windowHandle ( ) - > setScreen ( activeWindowScreen ) ; <nl> } <nl> - const auto screen = activeWindowScreen ? activeWindowScreen : QApplication : : primaryScreen ( ) ; <nl> + const auto screen = activeWindowScreen <nl> + ? activeWindowScreen <nl> + : QApplication : : primaryScreen ( ) ; <nl> const auto available = screen - > geometry ( ) ; <nl> - if ( geometry ( ) ! = available ) { <nl> - setGeometry ( available ) ; <nl> + if ( ! force & & geometry ( ) = = available ) { <nl> + return ; <nl> } <nl> + setGeometry ( available ) ; <nl> <nl> auto navSkip = 2 * st : : mediaviewControlMargin + st : : mediaviewControlSize ; <nl> _closeNav = myrtlrect ( width ( ) - st : : mediaviewControlMargin - st : : mediaviewControlSize , st : : mediaviewControlMargin , st : : mediaviewControlSize , st : : mediaviewControlSize ) ; <nl> void OverlayWidget : : moveToScreen ( ) { <nl> _rightNavIcon = centerrect ( _rightNav , st : : mediaviewRight ) ; <nl> <nl> _saveMsg . moveTo ( ( width ( ) - _saveMsg . width ( ) ) / 2 , ( height ( ) - _saveMsg . height ( ) ) / 2 ) ; <nl> + _photoRadialRect = QRect ( QPoint ( ( width ( ) - st : : radialSize . width ( ) ) / 2 , ( height ( ) - st : : radialSize . height ( ) ) / 2 ) , st : : radialSize ) ; <nl> + <nl> + resizeContentByScreenSize ( ) ; <nl> + update ( ) ; <nl> } <nl> <nl> bool OverlayWidget : : videoShown ( ) const { <nl> void OverlayWidget : : updateControls ( ) { <nl> updateThemePreviewGeometry ( ) ; <nl> <nl> _saveVisible = ( _photo & & _photo - > loaded ( ) ) <nl> - | | ( _doc & & ( _doc - > loaded ( DocumentData : : FilePathResolve : : Checked ) <nl> - | | ! documentContentShown ( ) ) ) ; <nl> + | | ( _doc & & _doc - > filepath ( DocumentData : : FilePathResolve : : Checked ) . isEmpty ( ) ) ; <nl> _saveNav = myrtlrect ( width ( ) - st : : mediaviewIconSize . width ( ) * 2 , height ( ) - st : : mediaviewIconSize . height ( ) , st : : mediaviewIconSize . width ( ) , st : : mediaviewIconSize . height ( ) ) ; <nl> _saveNavIcon = centerrect ( _saveNav , st : : mediaviewSave ) ; <nl> _moreNav = myrtlrect ( width ( ) - st : : mediaviewIconSize . width ( ) , height ( ) - st : : mediaviewIconSize . height ( ) , st : : mediaviewIconSize . width ( ) , st : : mediaviewIconSize . height ( ) ) ; <nl> QRect OverlayWidget : : contentRect ( ) const { <nl> void OverlayWidget : : contentSizeChanged ( ) { <nl> _width = _w ; <nl> _height = _h ; <nl> + resizeContentByScreenSize ( ) ; <nl> + } <nl> + <nl> + void OverlayWidget : : resizeContentByScreenSize ( ) { <nl> if ( _w > 0 & & _h > 0 ) { <nl> _zoomToScreen = float64 ( width ( ) ) / _w ; <nl> if ( _h * _zoomToScreen > height ( ) ) { <nl> void OverlayWidget : : contentSizeChanged ( ) { <nl> _w = qRound ( _w / ( - _zoomToScreen + 1 ) ) ; <nl> _h = qRound ( _h / ( - _zoomToScreen + 1 ) ) ; <nl> } <nl> - snapXY ( ) ; <nl> } else { <nl> _zoom = 0 ; <nl> + _w = _width ; <nl> + _h = _height ; <nl> } <nl> _x = ( width ( ) - _w ) / 2 ; <nl> _y = ( height ( ) - _h ) / 2 ; <nl> void OverlayWidget : : step_radial ( crl : : time ms , bool timer ) { <nl> update ( radialRect ( ) ) ; <nl> } <nl> const auto ready = _doc & & _doc - > loaded ( ) ; <nl> - const auto streamVideo = ready & & ( _doc - > isAnimation ( ) | | _doc - > isVideoFile ( ) ) ; <nl> + const auto streamVideo = ready & & _doc - > canBePlayed ( ) ; <nl> const auto tryOpenImage = ready & & ( _doc - > size < App : : kImageSizeLimit ) ; <nl> if ( ready & & ( ( tryOpenImage & & ! _radial . animating ( ) ) | | streamVideo ) ) { <nl> - _streamingStartPaused = false ; <nl> - if ( ! _doc - > data ( ) . isEmpty ( ) & & streamVideo ) { <nl> - displayDocument ( _doc , App : : histItemById ( _msgid ) ) ; <nl> + _streamingStartPaused = true ; <nl> + if ( streamVideo ) { <nl> + redisplayContent ( ) ; <nl> } else { <nl> auto & location = _doc - > location ( true ) ; <nl> if ( location . accessEnable ( ) ) { <nl> - if ( streamVideo <nl> - | | _doc - > isTheme ( ) <nl> + if ( _doc - > isTheme ( ) <nl> | | QImageReader ( location . name ( ) ) . canRead ( ) ) { <nl> - displayDocument ( _doc , App : : histItemById ( _msgid ) ) ; <nl> + redisplayContent ( ) ; <nl> } <nl> location . accessDisable ( ) ; <nl> } <nl> void OverlayWidget : : dropdownHidden ( ) { <nl> } <nl> <nl> void OverlayWidget : : onScreenResized ( int screen ) { <nl> - if ( isHidden ( ) ) return ; <nl> - <nl> - bool ignore = false ; <nl> - auto screens = QApplication : : screens ( ) ; <nl> - if ( screen > = 0 & & screen < screens . size ( ) ) { <nl> - if ( auto screenHandle = windowHandle ( ) - > screen ( ) ) { <nl> - if ( screens . at ( screen ) ! = screenHandle ) { <nl> - ignore = true ; <nl> - } <nl> - } <nl> + if ( isHidden ( ) ) { <nl> + return ; <nl> } <nl> - if ( ! ignore ) { <nl> + <nl> + const auto screens = QApplication : : screens ( ) ; <nl> + const auto changed = ( screen > = 0 & & screen < screens . size ( ) ) <nl> + ? screens [ screen ] <nl> + : nullptr ; <nl> + if ( ! windowHandle ( ) <nl> + | | ! windowHandle ( ) - > screen ( ) <nl> + | | ! changed <nl> + | | windowHandle ( ) - > screen ( ) = = changed ) { <nl> moveToScreen ( ) ; <nl> - const auto item = App : : histItemById ( _msgid ) ; <nl> - if ( _photo ) { <nl> - displayPhoto ( _photo , item ) ; <nl> - } else if ( _doc ) { <nl> - displayDocument ( _doc , item ) ; <nl> - } <nl> } <nl> } <nl> <nl> void OverlayWidget : : onSaveAs ( ) { <nl> <nl> if ( _doc - > data ( ) . isEmpty ( ) ) location . accessDisable ( ) ; <nl> } else { <nl> - if ( ! documentContentShown ( ) ) { <nl> - DocumentSaveClickHandler : : Save ( <nl> - fileOrigin ( ) , <nl> - _doc , <nl> - DocumentSaveClickHandler : : Mode : : ToNewFile ) ; <nl> - updateControls ( ) ; <nl> - } else { <nl> - _saveVisible = false ; <nl> - update ( _saveNav ) ; <nl> - } <nl> + DocumentSaveClickHandler : : Save ( <nl> + fileOrigin ( ) , <nl> + _doc , <nl> + DocumentSaveClickHandler : : Mode : : ToNewFile ) ; <nl> + updateControls ( ) ; <nl> updateOver ( _lastMouseMovePos ) ; <nl> } <nl> } else { <nl> void OverlayWidget : : onDownload ( ) { <nl> } <nl> QString toName ; <nl> if ( _doc ) { <nl> - const FileLocation & location ( _doc - > location ( true ) ) ; <nl> + const auto & location = _doc - > location ( true ) ; <nl> if ( location . accessEnable ( ) ) { <nl> if ( ! QDir ( ) . exists ( path ) ) QDir ( ) . mkpath ( path ) ; <nl> toName = filedialogNextFilename ( <nl> void OverlayWidget : : onDownload ( ) { <nl> } <nl> location . accessDisable ( ) ; <nl> } else { <nl> - if ( ! documentContentShown ( ) ) { <nl> + if ( _doc - > filepath ( DocumentData : : FilePathResolve : : Checked ) . isEmpty ( ) ) { <nl> DocumentSaveClickHandler : : Save ( <nl> fileOrigin ( ) , <nl> _doc , <nl> void OverlayWidget : : onDownload ( ) { <nl> void OverlayWidget : : onSaveCancel ( ) { <nl> if ( _doc & & _doc - > loading ( ) ) { <nl> _doc - > cancel ( ) ; <nl> + if ( _doc - > canBePlayed ( ) ) { <nl> + redisplayContent ( ) ; <nl> + } <nl> } <nl> } <nl> <nl> std : : optional < OverlayWidget : : CollageKey > OverlayWidget : : collageKey ( ) const { <nl> if ( const auto item = App : : histItemById ( _msgid ) ) { <nl> if ( const auto media = item - > media ( ) ) { <nl> if ( const auto page = media - > webpage ( ) ) { <nl> - for ( const auto item : page - > collage . items ) { <nl> + for ( const auto & item : page - > collage . items ) { <nl> if ( item = = _photo | | item = = _doc ) { <nl> return item ; <nl> } <nl> void OverlayWidget : : displayPhoto ( not_null < PhotoData * > photo , HistoryItem * item ) <nl> displayDocument ( nullptr , item ) ; <nl> return ; <nl> } <nl> + if ( isHidden ( ) ) { <nl> + moveToScreen ( ) ; <nl> + } <nl> + <nl> clearStreaming ( ) ; <nl> destroyThemePreview ( ) ; <nl> _doc = nullptr ; <nl> void OverlayWidget : : displayPhoto ( not_null < PhotoData * > photo , HistoryItem * item ) <nl> _radial . stop ( ) ; <nl> <nl> refreshMediaViewer ( ) ; <nl> - <nl> - _photoRadialRect = QRect ( QPoint ( ( width ( ) - st : : radialSize . width ( ) ) / 2 , ( height ( ) - st : : radialSize . height ( ) ) / 2 ) , st : : radialSize ) ; <nl> - <nl> - _zoom = 0 ; <nl> - <nl> refreshCaption ( item ) ; <nl> <nl> + _zoom = 0 ; <nl> _zoomToScreen = 0 ; <nl> Auth ( ) . downloader ( ) . clearPriorities ( ) ; <nl> _blurred = true ; <nl> void OverlayWidget : : displayPhoto ( not_null < PhotoData * > photo , HistoryItem * item ) <nl> _down = OverNone ; <nl> _w = ConvertScale ( photo - > width ( ) ) ; <nl> _h = ConvertScale ( photo - > height ( ) ) ; <nl> - if ( isHidden ( ) ) { <nl> - moveToScreen ( ) ; <nl> - } <nl> contentSizeChanged ( ) ; <nl> if ( _msgid & & item ) { <nl> _from = item - > senderOriginal ( ) ; <nl> void OverlayWidget : : destroyThemePreview ( ) { <nl> _themeCancel . destroy ( ) ; <nl> } <nl> <nl> + void OverlayWidget : : redisplayContent ( ) { <nl> + if ( isHidden ( ) ) { <nl> + return ; <nl> + } <nl> + const auto item = App : : histItemById ( _msgid ) ; <nl> + if ( _photo ) { <nl> + displayPhoto ( _photo , item ) ; <nl> + } else { <nl> + displayDocument ( _doc , item ) ; <nl> + } <nl> + } <nl> + <nl> / / Empty messages shown as docs : doc can be nullptr . <nl> void OverlayWidget : : displayDocument ( DocumentData * doc , HistoryItem * item ) { <nl> - const auto documentChanged = ! doc <nl> - | | ( doc ! = _doc ) <nl> - | | ( item & & item - > fullId ( ) ! = _msgid ) ; <nl> - if ( documentChanged <nl> - | | ( ! doc - > isAnimation ( ) & & ! doc - > isVideoFile ( ) ) <nl> - | | ! doc - > canBePlayed ( ) ) { <nl> - _fullScreenVideo = false ; <nl> - _current = QPixmap ( ) ; <nl> - clearStreaming ( ) ; <nl> - } else if ( videoShown ( ) ) { <nl> - _current = QPixmap ( ) ; <nl> - } <nl> - if ( documentChanged | | ! doc - > isTheme ( ) ) { <nl> - destroyThemePreview ( ) ; <nl> + if ( isHidden ( ) ) { <nl> + moveToScreen ( ) ; <nl> } <nl> + _fullScreenVideo = false ; <nl> + _current = QPixmap ( ) ; <nl> + clearStreaming ( ) ; <nl> + destroyThemePreview ( ) ; <nl> _doc = doc ; <nl> _photo = nullptr ; <nl> _radial . stop ( ) ; <nl> <nl> refreshMediaViewer ( ) ; <nl> - <nl> - if ( documentChanged ) { <nl> + if ( ( item ? item - > fullId ( ) : FullMsgId ( ) ) ! = _msgid ) { <nl> refreshCaption ( item ) ; <nl> } <nl> if ( _doc ) { <nl> void OverlayWidget : : displayDocument ( DocumentData * doc , HistoryItem * item ) { <nl> _doc - > automaticLoad ( fileOrigin ( ) , item ) ; <nl> <nl> if ( _doc - > canBePlayed ( ) ) { <nl> - initStreaming ( ) ; <nl> + if ( _doc - > loading ( ) ) { <nl> + initStreamingThumbnail ( ) ; <nl> + } else { <nl> + initStreaming ( ) ; <nl> + } <nl> } else if ( _doc - > isTheme ( ) ) { <nl> initThemePreview ( ) ; <nl> } else { <nl> void OverlayWidget : : displayDocument ( DocumentData * doc , HistoryItem * item ) { <nl> _w = contentSize . width ( ) ; <nl> _h = contentSize . height ( ) ; <nl> } <nl> - if ( isHidden ( ) ) { <nl> - moveToScreen ( ) ; <nl> - } <nl> contentSizeChanged ( ) ; <nl> if ( _msgid & & item ) { <nl> _from = item - > senderOriginal ( ) ; <nl> void OverlayWidget : : handleStreamingError ( Streaming : : Error & & error ) { <nl> _doc - > setInappPlaybackFailed ( ) ; <nl> } <nl> if ( ! _doc - > canBePlayed ( ) ) { <nl> - clearStreaming ( ) ; <nl> - displayDocument ( _doc , App : : histItemById ( _msgid ) ) ; <nl> + redisplayContent ( ) ; <nl> } else { <nl> _streamed - > lastError = std : : move ( error ) ; <nl> playbackWaitingChange ( false ) ; <nl> void OverlayWidget : : initThemePreview ( ) { <nl> Assert ( _doc & & _doc - > isTheme ( ) ) ; <nl> <nl> auto & location = _doc - > location ( ) ; <nl> - if ( ! location . isEmpty ( ) & & location . accessEnable ( ) ) { <nl> - _themePreviewShown = true ; <nl> - <nl> - Window : : Theme : : CurrentData current ; <nl> - current . backgroundId = Window : : Theme : : Background ( ) - > id ( ) ; <nl> - current . backgroundImage = Window : : Theme : : Background ( ) - > createCurrentImage ( ) ; <nl> - current . backgroundTiled = Window : : Theme : : Background ( ) - > tile ( ) ; <nl> - <nl> - const auto path = _doc - > location ( ) . name ( ) ; <nl> - const auto id = _themePreviewId = rand_value < uint64 > ( ) ; <nl> - const auto weak = make_weak ( this ) ; <nl> - crl : : async ( [ = , data = std : : move ( current ) ] ( ) mutable { <nl> - auto preview = Window : : Theme : : GeneratePreview ( <nl> - path , <nl> - std : : move ( data ) ) ; <nl> - crl : : on_main ( weak , [ = , result = std : : move ( preview ) ] ( ) mutable { <nl> - if ( id ! = _themePreviewId ) { <nl> - return ; <nl> - } <nl> - _themePreviewId = 0 ; <nl> - _themePreview = std : : move ( result ) ; <nl> - if ( _themePreview ) { <nl> - _themeApply . create ( <nl> - this , <nl> - langFactory ( lng_theme_preview_apply ) , <nl> - st : : themePreviewApplyButton ) ; <nl> - _themeApply - > show ( ) ; <nl> - _themeApply - > setClickedCallback ( [ this ] { <nl> - auto preview = std : : move ( _themePreview ) ; <nl> - close ( ) ; <nl> - Window : : Theme : : Apply ( std : : move ( preview ) ) ; <nl> - } ) ; <nl> - _themeCancel . create ( <nl> - this , <nl> - langFactory ( lng_cancel ) , <nl> - st : : themePreviewCancelButton ) ; <nl> - _themeCancel - > show ( ) ; <nl> - _themeCancel - > setClickedCallback ( [ this ] { close ( ) ; } ) ; <nl> - updateControls ( ) ; <nl> - } <nl> - update ( ) ; <nl> - } ) ; <nl> - } ) ; <nl> - location . accessDisable ( ) ; <nl> + if ( location . isEmpty ( ) | | ! location . accessEnable ( ) ) { <nl> + return ; <nl> } <nl> + _themePreviewShown = true ; <nl> + <nl> + Window : : Theme : : CurrentData current ; <nl> + current . backgroundId = Window : : Theme : : Background ( ) - > id ( ) ; <nl> + current . backgroundImage = Window : : Theme : : Background ( ) - > createCurrentImage ( ) ; <nl> + current . backgroundTiled = Window : : Theme : : Background ( ) - > tile ( ) ; <nl> + <nl> + const auto path = _doc - > location ( ) . name ( ) ; <nl> + const auto id = _themePreviewId = rand_value < uint64 > ( ) ; <nl> + const auto weak = make_weak ( this ) ; <nl> + crl : : async ( [ = , data = std : : move ( current ) ] ( ) mutable { <nl> + auto preview = Window : : Theme : : GeneratePreview ( <nl> + path , <nl> + std : : move ( data ) ) ; <nl> + crl : : on_main ( weak , [ = , result = std : : move ( preview ) ] ( ) mutable { <nl> + if ( id ! = _themePreviewId ) { <nl> + return ; <nl> + } <nl> + _themePreviewId = 0 ; <nl> + _themePreview = std : : move ( result ) ; <nl> + if ( _themePreview ) { <nl> + _themeApply . create ( <nl> + this , <nl> + langFactory ( lng_theme_preview_apply ) , <nl> + st : : themePreviewApplyButton ) ; <nl> + _themeApply - > show ( ) ; <nl> + _themeApply - > setClickedCallback ( [ this ] { <nl> + auto preview = std : : move ( _themePreview ) ; <nl> + close ( ) ; <nl> + Window : : Theme : : Apply ( std : : move ( preview ) ) ; <nl> + } ) ; <nl> + _themeCancel . create ( <nl> + this , <nl> + langFactory ( lng_cancel ) , <nl> + st : : themePreviewCancelButton ) ; <nl> + _themeCancel - > show ( ) ; <nl> + _themeCancel - > setClickedCallback ( [ this ] { close ( ) ; } ) ; <nl> + updateControls ( ) ; <nl> + } <nl> + update ( ) ; <nl> + } ) ; <nl> + } ) ; <nl> + location . accessDisable ( ) ; <nl> } <nl> <nl> void OverlayWidget : : refreshClipControllerGeometry ( ) { <nl> void OverlayWidget : : validatePhotoCurrentImage ( ) { <nl> } <nl> } <nl> <nl> + void OverlayWidget : : checkLoadingWhileStreaming ( ) { <nl> + if ( _streamed & & _doc - > loading ( ) ) { <nl> + crl : : on_main ( this , [ = , doc = _doc ] { <nl> + if ( ! isHidden ( ) & & _doc = = doc ) { <nl> + redisplayContent ( ) ; <nl> + } <nl> + } ) ; <nl> + } <nl> + } <nl> + <nl> void OverlayWidget : : paintEvent ( QPaintEvent * e ) { <nl> + checkLoadingWhileStreaming ( ) ; <nl> + <nl> const auto r = e - > rect ( ) ; <nl> const auto & region = e - > region ( ) ; <nl> const auto rects = region . rects ( ) ; <nl> mmm a / Telegram / SourceFiles / media / view / media_view_overlay_widget . h <nl> ppp b / Telegram / SourceFiles / media / view / media_view_overlay_widget . h <nl> private slots : <nl> void playbackWaitingChange ( bool waiting ) ; <nl> <nl> void updateOver ( QPoint mpos ) ; <nl> - void moveToScreen ( ) ; <nl> + void moveToScreen ( bool force = false ) ; <nl> bool moveToNext ( int delta ) ; <nl> void preloadData ( int delta ) ; <nl> <nl> private slots : <nl> void updateControls ( ) ; <nl> void updateActions ( ) ; <nl> void resizeCenteredControls ( ) ; <nl> + void resizeContentByScreenSize ( ) ; <nl> + void checkLoadingWhileStreaming ( ) ; <nl> <nl> void displayPhoto ( not_null < PhotoData * > photo , HistoryItem * item ) ; <nl> void displayDocument ( DocumentData * document , HistoryItem * item ) ; <nl> void displayFinished ( ) ; <nl> + void redisplayContent ( ) ; <nl> void findCurrent ( ) ; <nl> <nl> void updateCursor ( ) ; <nl>
Allow video download from media viewer .
telegramdesktop/tdesktop
5cae57601a1aabf7ccd4561820130763cf992057
2019-03-14T12:03:02Z
mmm a / Telegram / PrepareWin . bat <nl> ppp b / Telegram / PrepareWin . bat <nl> <nl> @ echo OFF <nl> <nl> set " AppVersionStrMajor = 0 . 8 " <nl> - set " AppVersion = 8050 " <nl> - set " AppVersionStrSmall = 0 . 8 . 50 " <nl> - set " AppVersionStr = 0 . 8 . 50 " <nl> - set " AppVersionStrFull = 0 . 8 . 50 . 0 " <nl> - set " DevChannel = 1 " <nl> + set " AppVersion = 8051 " <nl> + set " AppVersionStrSmall = 0 . 8 . 51 " <nl> + set " AppVersionStr = 0 . 8 . 51 " <nl> + set " AppVersionStrFull = 0 . 8 . 51 . 0 " <nl> + set " DevChannel = 0 " <nl> <nl> if % DevChannel % neq 0 goto preparedev <nl> <nl> mmm a / Telegram / SourceFiles / application . cpp <nl> ppp b / Telegram / SourceFiles / application . cpp <nl> void Application : : checkMapVersion ( ) { <nl> QString versionFeatures ; <nl> if ( cDevVersion ( ) & & Local : : oldMapVersion ( ) < 8050 ) { <nl> versionFeatures = QString : : fromUtf8 ( " \ xe2 \ x80 \ x94 Bug fixes in Windows notifications \ n \ xe2 \ x80 \ x94 Fixed input methods on Linux ( Fcitx and IBus ) " ) ; / / . replace ( ' @ ' , qsl ( " @ " ) + QChar ( 0x200D ) ) ; <nl> - } else if ( ! cDevVersion ( ) & & Local : : oldMapVersion ( ) < 8048 ) { <nl> + } else if ( ! cDevVersion ( ) & & Local : : oldMapVersion ( ) < 8051 ) { <nl> versionFeatures = lang ( lng_new_version_text ) . trimmed ( ) ; <nl> } <nl> if ( ! versionFeatures . isEmpty ( ) ) { <nl> mmm a / Telegram / SourceFiles / config . h <nl> ppp b / Telegram / SourceFiles / config . h <nl> Copyright ( c ) 2014 John Preston , https : / / desktop . telegram . org <nl> * / <nl> # pragma once <nl> <nl> - static const int32 AppVersion = 8050 ; <nl> - static const wchar_t * AppVersionStr = L " 0 . 8 . 50 " ; <nl> - static const bool DevVersion = true ; <nl> + static const int32 AppVersion = 8051 ; <nl> + static const wchar_t * AppVersionStr = L " 0 . 8 . 51 " ; <nl> + static const bool DevVersion = false ; <nl> <nl> static const wchar_t * AppNameOld = L " Telegram Win ( Unofficial ) " ; <nl> static const wchar_t * AppName = L " Telegram Desktop " ; <nl> mmm a / Telegram / SourceFiles / dialogswidget . cpp <nl> ppp b / Telegram / SourceFiles / dialogswidget . cpp <nl> void DialogsListWidget : : selectSkip ( int32 direction ) { <nl> if ( ! sel ) { <nl> if ( dialogs . list . count & & direction > 0 ) { <nl> sel = dialogs . list . begin ; <nl> - } else if ( contactsNoDialogs . list . count & & direction > 0 ) { <nl> + } else if ( false & & contactsNoDialogs . list . count & & direction > 0 ) { <nl> sel = contactsNoDialogs . list . begin ; <nl> } else { <nl> return ; <nl> void DialogsListWidget : : selectSkip ( int32 direction ) { <nl> } else if ( direction > 0 ) { <nl> if ( sel - > next - > next ) { <nl> sel = sel - > next ; <nl> - } else if ( sel - > next = = dialogs . list . end & & contactsNoDialogs . list . count ) { <nl> + } else if ( false & & sel - > next = = dialogs . list . end & & contactsNoDialogs . list . count ) { <nl> sel = contactsNoDialogs . list . begin ; <nl> contactSel = true ; <nl> } <nl> } else { <nl> if ( sel - > prev ) { <nl> sel = sel - > prev ; <nl> - } else if ( sel = = contactsNoDialogs . list . begin & & dialogs . list . count ) { <nl> + } else if ( false & & sel = = contactsNoDialogs . list . begin & & dialogs . list . count ) { <nl> sel = dialogs . list . end - > prev ; <nl> contactSel = false ; <nl> } <nl> void DialogsListWidget : : scrollToPeer ( const PeerId & peer , MsgId msgId ) { <nl> DialogsList : : RowByPeer : : const_iterator i = dialogs . list . rowByPeer . constFind ( peer ) ; <nl> if ( i ! = dialogs . list . rowByPeer . cend ( ) ) { <nl> fromY = i . value ( ) - > pos * st : : dlgHeight ; <nl> - } else { <nl> + } else if ( false ) { <nl> i = contactsNoDialogs . list . rowByPeer . constFind ( peer ) ; <nl> if ( i ! = contactsNoDialogs . list . rowByPeer . cend ( ) ) { <nl> fromY = ( i . value ( ) - > pos + dialogs . list . count ) * st : : dlgHeight ; <nl> void DialogsListWidget : : selectSkipPage ( int32 pixels , int32 direction ) { <nl> if ( ! sel ) { <nl> if ( direction > 0 & & dialogs . list . count ) { <nl> sel = dialogs . list . begin ; <nl> - } else if ( direction > 0 & & contactsNoDialogs . list . count ) { <nl> + } else if ( false & & direction > 0 & & contactsNoDialogs . list . count ) { <nl> sel = contactsNoDialogs . list . begin ; <nl> } else { <nl> return ; <nl> void DialogsListWidget : : selectSkipPage ( int32 pixels , int32 direction ) { <nl> while ( toSkip - - & & sel - > next - > next ) { <nl> sel = sel - > next ; <nl> } <nl> - if ( toSkip > = 0 & & sel - > next = = dialogs . list . end & & contactsNoDialogs . list . count ) { <nl> + if ( false & & toSkip > = 0 & & sel - > next = = dialogs . list . end & & contactsNoDialogs . list . count ) { <nl> sel = contactsNoDialogs . list . begin ; <nl> while ( toSkip - - & & sel - > next - > next ) { <nl> sel = sel - > next ; <nl> void DialogsListWidget : : peerAfter ( const PeerData * inPeer , MsgId inMsg , PeerData <nl> if ( _state = = DefaultState ) { <nl> DialogsList : : RowByPeer : : const_iterator i = dialogs . list . rowByPeer . constFind ( inPeer - > id ) ; <nl> if ( i = = dialogs . list . rowByPeer . constEnd ( ) ) { <nl> - i = contactsNoDialogs . list . rowByPeer . constFind ( inPeer - > id ) ; <nl> - if ( i = = contactsNoDialogs . list . rowByPeer . cend ( ) ) { <nl> - outPeer = 0 ; <nl> - outMsg = 0 ; <nl> - return ; <nl> - } <nl> - if ( i . value ( ) - > next ! = contactsNoDialogs . list . end ) { <nl> - outPeer = i . value ( ) - > next - > history - > peer ; <nl> - outMsg = ShowAtUnreadMsgId ; <nl> - return ; <nl> - } <nl> + / / i = contactsNoDialogs . list . rowByPeer . constFind ( inPeer - > id ) ; <nl> + / / if ( i = = contactsNoDialogs . list . rowByPeer . cend ( ) ) { <nl> + / / outPeer = 0 ; <nl> + / / outMsg = 0 ; <nl> + / / return ; <nl> + / / } <nl> + / / if ( i . value ( ) - > next ! = contactsNoDialogs . list . end ) { <nl> + / / outPeer = i . value ( ) - > next - > history - > peer ; <nl> + / / outMsg = ShowAtUnreadMsgId ; <nl> + / / return ; <nl> + / / } <nl> outPeer = 0 ; <nl> outMsg = 0 ; <nl> return ; <nl> void DialogsListWidget : : peerAfter ( const PeerData * inPeer , MsgId inMsg , PeerData <nl> outPeer = i . value ( ) - > next - > history - > peer ; <nl> outMsg = ShowAtUnreadMsgId ; <nl> return ; <nl> - } else if ( contactsNoDialogs . list . count ) { <nl> + } else if ( false & & contactsNoDialogs . list . count ) { <nl> outPeer = contactsNoDialogs . list . begin - > history - > peer ; <nl> outMsg = ShowAtUnreadMsgId ; <nl> return ; <nl> mmm a / Telegram / SourceFiles / pspecific_wnd . cpp <nl> ppp b / Telegram / SourceFiles / pspecific_wnd . cpp <nl> Copyright ( c ) 2014 John Preston , https : / / desktop . telegram . org <nl> # include " passcodewidget . h " <nl> <nl> # include < Shobjidl . h > <nl> - # include < dbghelp . h > <nl> # include < shellapi . h > <nl> + <nl> + # include < roapi . h > <nl> + # include < wrl \ client . h > <nl> + # include < wrl \ implements . h > <nl> + # include < windows . ui . notifications . h > <nl> + <nl> + # include < dbghelp . h > <nl> # include < Shlwapi . h > <nl> # include < Strsafe . h > <nl> # include < shlobj . h > <nl> Copyright ( c ) 2014 John Preston , https : / / desktop . telegram . org <nl> # include < intsafe . h > <nl> # include < guiddef . h > <nl> <nl> - # include < roapi . h > <nl> - # include < wrl \ client . h > <nl> - # include < wrl \ implements . h > <nl> - # include < windows . ui . notifications . h > <nl> - <nl> # include < qpa / qplatformnativeinterface . h > <nl> <nl> # define min ( a , b ) ( ( a ) < ( b ) ? ( a ) : ( b ) ) <nl> namespace { <nl> } <nl> void setupPropSys ( ) { <nl> HINSTANCE procId = LoadLibrary ( L " PROPSYS . DLL " ) ; <nl> - if ( ! loadFunction ( procId , " PropVariantToString " , procId ) ) return ; <nl> + if ( ! loadFunction ( procId , " PropVariantToString " , propVariantToString ) ) return ; <nl> } <nl> void setupToast ( HINSTANCE procId ) { <nl> if ( ! propVariantToString ) return ; <nl> void RegisterCustomScheme ( ) { <nl> <nl> void psNewVersion ( ) { <nl> RegisterCustomScheme ( ) ; <nl> - if ( Local : : oldSettingsVersion ( ) < 8050 ) { <nl> + if ( Local : : oldSettingsVersion ( ) < 8051 ) { <nl> CheckPinnedAppUserModelId ( ) ; <nl> } <nl> } <nl> void CheckPinnedAppUserModelId ( ) { <nl> <nl> QString path = pinnedPath ( ) ; <nl> std : : wstring p = QDir : : toNativeSeparators ( path ) . toStdWString ( ) ; <nl> - <nl> + <nl> WCHAR src [ MAX_PATH ] ; <nl> - GetModuleFileNameEx ( GetCurrentProcess ( ) , nullptr , src , MAX_PATH ) ; <nl> + GetModuleFileName ( GetModuleHandle ( 0 ) , src , MAX_PATH ) ; <nl> BY_HANDLE_FILE_INFORMATION srcinfo = { 0 } ; <nl> HANDLE srcfile = CreateFile ( src , 0x00 , 0x00 , NULL , OPEN_EXISTING , FILE_ATTRIBUTE_NORMAL , NULL ) ; <nl> if ( srcfile = = INVALID_HANDLE_VALUE ) return ; <nl> mmm a / Telegram / SourceFiles / stdafx . h <nl> ppp b / Telegram / SourceFiles / stdafx . h <nl> Full license : https : / / github . com / telegramdesktop / tdesktop / blob / master / LICENSE <nl> Copyright ( c ) 2014 John Preston , https : / / desktop . telegram . org <nl> * / <nl> # define __HUGE <nl> - <nl> + # define PSAPI_VERSION 1 / / fix WinXP <nl> / / # define Q_NO_TEMPLATE_FRIENDS / / fix some compiler difference issues <nl> <nl> # include < openssl / bn . h > <nl> mmm a / Telegram / Telegram . plist <nl> ppp b / Telegram / Telegram . plist <nl> <nl> < key > CFBundlePackageType < / key > <nl> < string > APPL < / string > <nl> < key > CFBundleShortVersionString < / key > <nl> - < string > 0 . 8 . 50 < / string > <nl> + < string > 0 . 8 . 51 < / string > <nl> < key > LSMinimumSystemVersion < / key > <nl> < string > $ ( MACOSX_DEPLOYMENT_TARGET ) < / string > <nl> < key > CFBundleSignature < / key > <nl> Binary files a / Telegram / Telegram . rc and b / Telegram / Telegram . rc differ <nl> mmm a / Telegram / Telegram . xcodeproj / project . pbxproj <nl> ppp b / Telegram / Telegram . xcodeproj / project . pbxproj <nl> <nl> buildSettings = { <nl> ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon ; <nl> COPY_PHASE_STRIP = NO ; <nl> - CURRENT_PROJECT_VERSION = 0 . 8 . 50 ; <nl> + CURRENT_PROJECT_VERSION = 0 . 8 . 51 ; <nl> DEBUG_INFORMATION_FORMAT = dwarf ; <nl> GCC_GENERATE_DEBUGGING_SYMBOLS = YES ; <nl> GCC_OPTIMIZATION_LEVEL = 0 ; <nl> <nl> buildSettings = { <nl> ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon ; <nl> COPY_PHASE_STRIP = YES ; <nl> - CURRENT_PROJECT_VERSION = 0 . 8 . 50 ; <nl> + CURRENT_PROJECT_VERSION = 0 . 8 . 51 ; <nl> GCC_GENERATE_DEBUGGING_SYMBOLS = NO ; <nl> GCC_OPTIMIZATION_LEVEL = fast ; <nl> GCC_PREFIX_HEADER = . / SourceFiles / stdafx . h ; <nl> <nl> CLANG_WARN__DUPLICATE_METHOD_MATCH = YES ; <nl> CODE_SIGN_IDENTITY = " " ; <nl> COPY_PHASE_STRIP = NO ; <nl> - CURRENT_PROJECT_VERSION = 0 . 8 . 50 ; <nl> + CURRENT_PROJECT_VERSION = 0 . 8 . 51 ; <nl> DEBUG_INFORMATION_FORMAT = " dwarf - with - dsym " ; <nl> DYLIB_COMPATIBILITY_VERSION = 0 . 8 ; <nl> - DYLIB_CURRENT_VERSION = 0 . 8 . 50 ; <nl> + DYLIB_CURRENT_VERSION = 0 . 8 . 51 ; <nl> ENABLE_STRICT_OBJC_MSGSEND = YES ; <nl> FRAMEWORK_SEARCH_PATHS = " " ; <nl> GCC_GENERATE_DEBUGGING_SYMBOLS = YES ; <nl> <nl> CLANG_WARN__DUPLICATE_METHOD_MATCH = YES ; <nl> CODE_SIGN_IDENTITY = " " ; <nl> COPY_PHASE_STRIP = NO ; <nl> - CURRENT_PROJECT_VERSION = 0 . 8 . 50 ; <nl> + CURRENT_PROJECT_VERSION = 0 . 8 . 51 ; <nl> DEBUG_INFORMATION_FORMAT = dwarf ; <nl> DYLIB_COMPATIBILITY_VERSION = 0 . 8 ; <nl> - DYLIB_CURRENT_VERSION = 0 . 8 . 50 ; <nl> + DYLIB_CURRENT_VERSION = 0 . 8 . 51 ; <nl> ENABLE_STRICT_OBJC_MSGSEND = YES ; <nl> FRAMEWORK_SEARCH_PATHS = " " ; <nl> GCC_GENERATE_DEBUGGING_SYMBOLS = YES ; <nl> mmm a / Telegram / Version . sh <nl> ppp b / Telegram / Version . sh <nl> <nl> - echo 0 . 8 8050 0 . 8 . 50 1 <nl> + echo 0 . 8 8051 0 . 8 . 51 0 <nl> # AppVersionStrMajor AppVersion AppVersionStr DevChannel <nl>
version 0 . 8 . 51 prepared , with win7 and winxp fix
telegramdesktop/tdesktop
6a05924dcfbf2a9fbe7a96fcfa9f3aca2e907b08
2015-08-16T16:37:56Z
mmm a / js / server / modules / @ arangodb / foxx / router / router . js <nl> ppp b / js / server / modules / @ arangodb / foxx / router / router . js <nl> const Router = module . exports = <nl> [ [ ' path ' , ' string ' ] , . . . repeat ( Math . max ( 1 , args . length - 2 ) , [ ' handler ' , ' function ' ] ) , [ ' name ' , ' string ' ] ] , <nl> [ [ ' path ' , ' string ' ] , . . . repeat ( Math . max ( 1 , args . length - 1 ) , [ ' handler ' , ' function ' ] ) ] , <nl> [ . . . repeat ( Math . max ( 1 , args . length - 1 ) , [ ' handler ' , ' function ' ] ) , [ ' name ' , ' string ' ] ] , <nl> - repeat ( args . length , [ ' handler ' , ' function ' ] ) <nl> + repeat ( Math . max ( 1 , args . length - 1 ) , [ ' handler ' , ' function ' ] ) <nl> ) ; <nl> const path = argv . path ; <nl> const handler = argv . handler ; <nl> ALL_METHODS . forEach ( function ( method ) { <nl> [ [ ' path ' , ' string ' ] , . . . repeat ( Math . max ( 1 , args . length - 2 ) , [ ' handler ' , ' function ' ] ) , [ ' name ' , ' string ' ] ] , <nl> [ [ ' path ' , ' string ' ] , . . . repeat ( Math . max ( 1 , args . length - 1 ) , [ ' handler ' , ' function ' ] ) ] , <nl> [ . . . repeat ( Math . max ( 1 , args . length - 1 ) , [ ' handler ' , ' function ' ] ) , [ ' name ' , ' string ' ] ] , <nl> - repeat ( args . length , [ ' handler ' , ' function ' ] ) <nl> + repeat ( Math . max ( 1 , args . length - 1 ) , [ ' handler ' , ' function ' ] ) <nl> ) ; <nl> const path = argv . path ; <nl> const handler = argv . handler ; <nl>
Require at least one arg in route def
arangodb/arangodb
f2a6864db404e134e66bac6621efcb68eb0c0583
2016-12-12T01:32:49Z
mmm a / Marlin / src / Marlin . cpp <nl> ppp b / Marlin / src / Marlin . cpp <nl> bool pin_is_protected ( const int8_t pin ) { <nl> return false ; <nl> } <nl> <nl> - # include " gcode / host / M114 . h " <nl> # include " gcode / host / M115 . h " <nl> <nl> # include " gcode / lcd / M117 . h " <nl> mmm a / Marlin / src / gcode / gcode . cpp <nl> ppp b / Marlin / src / gcode / gcode . cpp <nl> void GcodeSuite : : dwell ( millis_t time ) { <nl> / / <nl> / / Placeholders for non - migrated codes <nl> / / <nl> - extern void gcode_M114 ( ) ; <nl> extern void gcode_M115 ( ) ; <nl> extern void gcode_M117 ( ) ; <nl> extern void gcode_M118 ( ) ; <nl> void GcodeSuite : : process_next_command ( ) { <nl> <nl> case 92 : M92 ( ) ; break ; / / M92 : Set the steps - per - unit for one or more axes <nl> <nl> - case 114 : / / M114 : Report current position <nl> - gcode_M114 ( ) ; <nl> - break ; <nl> + case 114 : M114 ( ) ; break ; / / M114 : Report current position <nl> case 115 : / / M115 : Report capabilities <nl> gcode_M115 ( ) ; <nl> break ; <nl> similarity index 95 % <nl> rename from Marlin / src / gcode / host / M114 . h <nl> rename to Marlin / src / gcode / host / M114 . cpp <nl> mmm a / Marlin / src / gcode / host / M114 . h <nl> ppp b / Marlin / src / gcode / host / M114 . cpp <nl> <nl> * <nl> * / <nl> <nl> + # include " . . / . . / inc / MarlinConfig . h " <nl> + <nl> + # include " . . / gcode . h " <nl> + # include " . . / . . / module / motion . h " <nl> + # include " . . / . . / module / stepper . h " <nl> + <nl> # ifdef M114_DETAIL <nl> <nl> void report_xyze ( const float pos [ XYZE ] , const uint8_t n = 4 , const uint8_t precision = 3 ) { <nl> <nl> / * * <nl> * M114 : Report current position to host <nl> * / <nl> - void gcode_M114 ( ) { <nl> + void GcodeSuite : : M114 ( ) { <nl> <nl> # ifdef M114_DETAIL <nl> if ( parser . seen ( ' D ' ) ) { <nl>
Move M114 to cpp
MarlinFirmware/Marlin
81b8fbf4d6c5336bbb1fb4c686522fbaa145e5a2
2017-09-21T21:27:00Z
mmm a / tensorflow / lite / delegates / gpu / cl / opencl_wrapper . h <nl> ppp b / tensorflow / lite / delegates / gpu / cl / opencl_wrapper . h <nl> namespace cl { <nl> Status LoadOpenCL ( ) ; <nl> void LoadOpenCLFunctions ( void * libopencl , bool is_pixel ) ; <nl> <nl> - typedef cl_int ( * PFN_clGetPlatformIDs ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetPlatformIDs ) ( <nl> cl_uint / * num_entries * / , cl_platform_id * / * platforms * / , <nl> cl_uint * / * num_platforms * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clGetPlatformInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetPlatformInfo ) ( <nl> cl_platform_id / * platform * / , cl_platform_info / * param_name * / , <nl> size_t / * param_value_size * / , void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clGetDeviceIDs ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetDeviceIDs ) ( <nl> cl_platform_id / * platform * / , cl_device_type / * device_type * / , <nl> cl_uint / * num_entries * / , cl_device_id * / * devices * / , <nl> cl_uint * / * num_devices * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clGetDeviceInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetDeviceInfo ) ( <nl> cl_device_id / * device * / , cl_device_info / * param_name * / , <nl> size_t / * param_value_size * / , void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clCreateSubDevices ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clCreateSubDevices ) ( <nl> cl_device_id / * in_device * / , <nl> const cl_device_partition_property * / * properties * / , <nl> cl_uint / * num_devices * / , cl_device_id * / * out_devices * / , <nl> cl_uint * / * num_devices_ret * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_int ( * PFN_clRetainDevice ) ( cl_device_id / * device * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clRetainDevice ) ( cl_device_id / * device * / ) <nl> CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_int ( * PFN_clReleaseDevice ) ( cl_device_id / * device * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clReleaseDevice ) ( cl_device_id / * device * / ) <nl> CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_context ( * PFN_clCreateContext ) ( <nl> + typedef cl_context ( CL_API_CALL * PFN_clCreateContext ) ( <nl> const cl_context_properties * / * properties * / , cl_uint / * num_devices * / , <nl> const cl_device_id * / * devices * / , <nl> void ( CL_CALLBACK * / * pfn_notify * / ) ( const char * , const void * , size_t , <nl> void * ) , <nl> void * / * user_data * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_context ( * PFN_clCreateContextFromType ) ( <nl> + typedef cl_context ( CL_API_CALL * PFN_clCreateContextFromType ) ( <nl> const cl_context_properties * / * properties * / , <nl> cl_device_type / * device_type * / , <nl> void ( CL_CALLBACK * / * pfn_notify * / ) ( const char * , const void * , size_t , <nl> void * ) , <nl> void * / * user_data * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clRetainContext ) ( cl_context / * context * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clRetainContext ) ( cl_context / * context * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clReleaseContext ) ( cl_context / * context * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clReleaseContext ) ( cl_context / * context * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clGetContextInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetContextInfo ) ( <nl> cl_context / * context * / , cl_context_info / * param_name * / , <nl> size_t / * param_value_size * / , void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_command_queue ( * PFN_clCreateCommandQueueWithProperties ) ( <nl> + typedef cl_command_queue ( CL_API_CALL * PFN_clCreateCommandQueueWithProperties ) ( <nl> cl_context / * context * / , cl_device_id / * device * / , <nl> const cl_queue_properties * / * properties * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_2_0 ; <nl> - typedef cl_int ( * PFN_clRetainCommandQueue ) ( cl_command_queue / * command_queue * / ) <nl> - CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clReleaseCommandQueue ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clRetainCommandQueue ) ( <nl> + cl_command_queue / * command_queue * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> + typedef cl_int ( CL_API_CALL * PFN_clReleaseCommandQueue ) ( <nl> cl_command_queue / * command_queue * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clGetCommandQueueInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetCommandQueueInfo ) ( <nl> cl_command_queue / * command_queue * / , <nl> cl_command_queue_info / * param_name * / , size_t / * param_value_size * / , <nl> void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_mem ( * PFN_clCreateBuffer ) ( <nl> + typedef cl_mem ( CL_API_CALL * PFN_clCreateBuffer ) ( <nl> cl_context / * context * / , cl_mem_flags / * flags * / , size_t / * size * / , <nl> void * / * host_ptr * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_mem ( * PFN_clCreateSubBuffer ) ( <nl> + typedef cl_mem ( CL_API_CALL * PFN_clCreateSubBuffer ) ( <nl> cl_mem / * buffer * / , cl_mem_flags / * flags * / , <nl> cl_buffer_create_type / * buffer_create_type * / , <nl> const void * / * buffer_create_info * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_1_1 ; <nl> - typedef cl_mem ( * PFN_clCreateImage ) ( <nl> + typedef cl_mem ( CL_API_CALL * PFN_clCreateImage ) ( <nl> cl_context / * context * / , cl_mem_flags / * flags * / , <nl> const cl_image_format * / * image_format * / , <nl> const cl_image_desc * / * image_desc * / , void * / * host_ptr * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_mem ( * PFN_clCreatePipe ) ( <nl> + typedef cl_mem ( CL_API_CALL * PFN_clCreatePipe ) ( <nl> cl_context / * context * / , cl_mem_flags / * flags * / , <nl> cl_uint / * pipe_packet_size * / , cl_uint / * pipe_max_packets * / , <nl> const cl_pipe_properties * / * properties * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_2_0 ; <nl> - typedef cl_int ( * PFN_clRetainMemObject ) ( cl_mem / * memobj * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clRetainMemObject ) ( cl_mem / * memobj * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clReleaseMemObject ) ( cl_mem / * memobj * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clReleaseMemObject ) ( cl_mem / * memobj * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clGetSupportedImageFormats ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetSupportedImageFormats ) ( <nl> cl_context / * context * / , cl_mem_flags / * flags * / , <nl> cl_mem_object_type / * image_type * / , cl_uint / * num_entries * / , <nl> cl_image_format * / * image_formats * / , <nl> cl_uint * / * num_image_formats * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clGetMemObjectInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetMemObjectInfo ) ( <nl> cl_mem / * memobj * / , cl_mem_info / * param_name * / , <nl> size_t / * param_value_size * / , void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clGetImageInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetImageInfo ) ( <nl> cl_mem / * image * / , cl_image_info / * param_name * / , <nl> size_t / * param_value_size * / , void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clGetPipeInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetPipeInfo ) ( <nl> cl_mem / * pipe * / , cl_pipe_info / * param_name * / , <nl> size_t / * param_value_size * / , void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_2_0 ; <nl> - typedef cl_int ( * PFN_clSetMemObjectDestructorCallback ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clSetMemObjectDestructorCallback ) ( <nl> cl_mem / * memobj * / , <nl> void ( CL_CALLBACK * / * pfn_notify * / ) ( cl_mem / * memobj * / , <nl> void * / * user_data * / ) , <nl> void * / * user_data * / ) CL_API_SUFFIX__VERSION_1_1 ; <nl> - typedef void * ( * PFN_clSVMAlloc ) ( <nl> + typedef void * ( CL_API_CALL * PFN_clSVMAlloc ) ( <nl> cl_context / * context * / , cl_svm_mem_flags / * flags * / , size_t / * size * / , <nl> cl_uint / * alignment * / ) CL_API_SUFFIX__VERSION_2_0 ; <nl> - typedef void ( * PFN_clSVMFree ) ( cl_context / * context * / , <nl> - void * / * svm_pointer * / ) <nl> + typedef void ( CL_API_CALL * PFN_clSVMFree ) ( cl_context / * context * / , <nl> + void * / * svm_pointer * / ) <nl> CL_API_SUFFIX__VERSION_2_0 ; <nl> - typedef cl_sampler ( * PFN_clCreateSamplerWithProperties ) ( <nl> + typedef cl_sampler ( CL_API_CALL * PFN_clCreateSamplerWithProperties ) ( <nl> cl_context / * context * / , <nl> const cl_sampler_properties * / * normalized_coords * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_2_0 ; <nl> - typedef cl_int ( * PFN_clRetainSampler ) ( cl_sampler / * sampler * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clRetainSampler ) ( cl_sampler / * sampler * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clReleaseSampler ) ( cl_sampler / * sampler * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clReleaseSampler ) ( cl_sampler / * sampler * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clGetSamplerInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetSamplerInfo ) ( <nl> cl_sampler / * sampler * / , cl_sampler_info / * param_name * / , <nl> size_t / * param_value_size * / , void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_program ( * PFN_clCreateProgramWithSource ) ( <nl> + typedef cl_program ( CL_API_CALL * PFN_clCreateProgramWithSource ) ( <nl> cl_context / * context * / , cl_uint / * count * / , const char * * / * strings * / , <nl> const size_t * / * lengths * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_program ( * PFN_clCreateProgramWithBinary ) ( <nl> + typedef cl_program ( CL_API_CALL * PFN_clCreateProgramWithBinary ) ( <nl> cl_context / * context * / , cl_uint / * num_devices * / , <nl> const cl_device_id * / * device_list * / , const size_t * / * lengths * / , <nl> const unsigned char * * / * binaries * / , cl_int * / * binary_status * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_program ( * PFN_clCreateProgramWithBuiltInKernels ) ( <nl> + typedef cl_program ( CL_API_CALL * PFN_clCreateProgramWithBuiltInKernels ) ( <nl> cl_context / * context * / , cl_uint / * num_devices * / , <nl> const cl_device_id * / * device_list * / , const char * / * kernel_names * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_int ( * PFN_clRetainProgram ) ( cl_program / * program * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clRetainProgram ) ( cl_program / * program * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clReleaseProgram ) ( cl_program / * program * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clReleaseProgram ) ( cl_program / * program * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clBuildProgram ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clBuildProgram ) ( <nl> cl_program / * program * / , cl_uint / * num_devices * / , <nl> const cl_device_id * / * device_list * / , const char * / * options * / , <nl> void ( CL_CALLBACK * / * pfn_notify * / ) ( cl_program / * program * / , <nl> void * / * user_data * / ) , <nl> void * / * user_data * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clCompileProgram ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clCompileProgram ) ( <nl> cl_program / * program * / , cl_uint / * num_devices * / , <nl> const cl_device_id * / * device_list * / , const char * / * options * / , <nl> cl_uint / * num_input_headers * / , const cl_program * / * input_headers * / , <nl> typedef cl_int ( * PFN_clCompileProgram ) ( <nl> void ( CL_CALLBACK * / * pfn_notify * / ) ( cl_program / * program * / , <nl> void * / * user_data * / ) , <nl> void * / * user_data * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_program ( * PFN_clLinkProgram ) ( <nl> + typedef cl_program ( CL_API_CALL * PFN_clLinkProgram ) ( <nl> cl_context / * context * / , cl_uint / * num_devices * / , <nl> const cl_device_id * / * device_list * / , const char * / * options * / , <nl> cl_uint / * num_input_programs * / , const cl_program * / * input_programs * / , <nl> typedef cl_program ( * PFN_clLinkProgram ) ( <nl> void * / * user_data * / ) , <nl> void * / * user_data * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_int ( * PFN_clUnloadPlatformCompiler ) ( cl_platform_id / * platform * / ) <nl> - CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_int ( * PFN_clGetProgramInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clUnloadPlatformCompiler ) ( <nl> + cl_platform_id / * platform * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetProgramInfo ) ( <nl> cl_program / * program * / , cl_program_info / * param_name * / , <nl> size_t / * param_value_size * / , void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clGetProgramBuildInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetProgramBuildInfo ) ( <nl> cl_program / * program * / , cl_device_id / * device * / , <nl> cl_program_build_info / * param_name * / , size_t / * param_value_size * / , <nl> void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_kernel ( * PFN_clCreateKernel ) ( <nl> + typedef cl_kernel ( CL_API_CALL * PFN_clCreateKernel ) ( <nl> cl_program / * program * / , const char * / * kernel_name * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clCreateKernelsInProgram ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clCreateKernelsInProgram ) ( <nl> cl_program / * program * / , cl_uint / * num_kernels * / , <nl> cl_kernel * / * kernels * / , <nl> cl_uint * / * num_kernels_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clRetainKernel ) ( cl_kernel / * kernel * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clRetainKernel ) ( cl_kernel / * kernel * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clReleaseKernel ) ( cl_kernel / * kernel * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clReleaseKernel ) ( cl_kernel / * kernel * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clSetKernelArg ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clSetKernelArg ) ( <nl> cl_kernel / * kernel * / , cl_uint / * arg_index * / , size_t / * arg_size * / , <nl> const void * / * arg_value * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clSetKernelArgSVMPointer ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clSetKernelArgSVMPointer ) ( <nl> cl_kernel / * kernel * / , cl_uint / * arg_index * / , <nl> const void * / * arg_value * / ) CL_API_SUFFIX__VERSION_2_0 ; <nl> - typedef cl_int ( * PFN_clSetKernelExecInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clSetKernelExecInfo ) ( <nl> cl_kernel / * kernel * / , cl_kernel_exec_info / * param_name * / , <nl> size_t / * param_value_size * / , <nl> const void * / * param_value * / ) CL_API_SUFFIX__VERSION_2_0 ; <nl> - typedef cl_int ( * PFN_clGetKernelInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetKernelInfo ) ( <nl> cl_kernel / * kernel * / , cl_kernel_info / * param_name * / , <nl> size_t / * param_value_size * / , void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clGetKernelArgInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetKernelArgInfo ) ( <nl> cl_kernel / * kernel * / , cl_uint / * arg_indx * / , <nl> cl_kernel_arg_info / * param_name * / , size_t / * param_value_size * / , <nl> void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_int ( * PFN_clGetKernelWorkGroupInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetKernelWorkGroupInfo ) ( <nl> cl_kernel / * kernel * / , cl_device_id / * device * / , <nl> cl_kernel_work_group_info / * param_name * / , size_t / * param_value_size * / , <nl> void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clWaitForEvents ) ( cl_uint / * num_events * / , <nl> - const cl_event * / * event_list * / ) <nl> - CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clGetEventInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clWaitForEvents ) ( <nl> + cl_uint / * num_events * / , <nl> + const cl_event * / * event_list * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetEventInfo ) ( <nl> cl_event / * event * / , cl_event_info / * param_name * / , <nl> size_t / * param_value_size * / , void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_event ( * PFN_clCreateUserEvent ) ( cl_context / * context * / , <nl> - cl_int * / * errcode_ret * / ) <nl> + typedef cl_event ( CL_API_CALL * PFN_clCreateUserEvent ) ( cl_context / * context * / , <nl> + cl_int * / * errcode_ret * / ) <nl> CL_API_SUFFIX__VERSION_1_1 ; <nl> - typedef cl_int ( * PFN_clRetainEvent ) ( cl_event / * event * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clRetainEvent ) ( cl_event / * event * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clReleaseEvent ) ( cl_event / * event * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clReleaseEvent ) ( cl_event / * event * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clSetUserEventStatus ) ( cl_event / * event * / , <nl> - cl_int / * execution_status * / ) <nl> - CL_API_SUFFIX__VERSION_1_1 ; <nl> - typedef cl_int ( * PFN_clSetEventCallback ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clSetUserEventStatus ) ( <nl> + cl_event / * event * / , <nl> + cl_int / * execution_status * / ) CL_API_SUFFIX__VERSION_1_1 ; <nl> + typedef cl_int ( CL_API_CALL * PFN_clSetEventCallback ) ( <nl> cl_event / * event * / , cl_int / * command_exec_callback_type * / , <nl> void ( CL_CALLBACK * / * pfn_notify * / ) ( cl_event , cl_int , void * ) , <nl> void * / * user_data * / ) CL_API_SUFFIX__VERSION_1_1 ; <nl> - typedef cl_int ( * PFN_clGetEventProfilingInfo ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clGetEventProfilingInfo ) ( <nl> cl_event / * event * / , cl_profiling_info / * param_name * / , <nl> size_t / * param_value_size * / , void * / * param_value * / , <nl> size_t * / * param_value_size_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clFlush ) ( cl_command_queue / * command_queue * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clFlush ) ( cl_command_queue / * command_queue * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clFinish ) ( cl_command_queue / * command_queue * / ) <nl> + typedef cl_int ( CL_API_CALL * PFN_clFinish ) ( cl_command_queue / * command_queue * / ) <nl> CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueReadBuffer ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueReadBuffer ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * buffer * / , <nl> cl_bool / * blocking_read * / , size_t / * offset * / , size_t / * size * / , <nl> void * / * ptr * / , cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueReadBufferRect ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueReadBufferRect ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * buffer * / , <nl> cl_bool / * blocking_read * / , const size_t * / * buffer_offset * / , <nl> const size_t * / * host_offset * / , const size_t * / * region * / , <nl> typedef cl_int ( * PFN_clEnqueueReadBufferRect ) ( <nl> void * / * ptr * / , cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_1 ; <nl> - typedef cl_int ( * PFN_clEnqueueWriteBuffer ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueWriteBuffer ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * buffer * / , <nl> cl_bool / * blocking_write * / , size_t / * offset * / , size_t / * size * / , <nl> const void * / * ptr * / , cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueWriteBufferRect ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueWriteBufferRect ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * buffer * / , <nl> cl_bool / * blocking_write * / , const size_t * / * buffer_offset * / , <nl> const size_t * / * host_offset * / , const size_t * / * region * / , <nl> typedef cl_int ( * PFN_clEnqueueWriteBufferRect ) ( <nl> const void * / * ptr * / , cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_1 ; <nl> - typedef cl_int ( * PFN_clEnqueueFillBuffer ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueFillBuffer ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * buffer * / , <nl> const void * / * pattern * / , size_t / * pattern_size * / , size_t / * offset * / , <nl> size_t / * size * / , cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_int ( * PFN_clEnqueueCopyBuffer ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueCopyBuffer ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * src_buffer * / , <nl> cl_mem / * dst_buffer * / , size_t / * src_offset * / , size_t / * dst_offset * / , <nl> size_t / * size * / , cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueCopyBufferRect ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueCopyBufferRect ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * src_buffer * / , <nl> cl_mem / * dst_buffer * / , const size_t * / * src_origin * / , <nl> const size_t * / * dst_origin * / , const size_t * / * region * / , <nl> typedef cl_int ( * PFN_clEnqueueCopyBufferRect ) ( <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_1 ; <nl> - typedef cl_int ( * PFN_clEnqueueReadImage ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueReadImage ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * image * / , <nl> cl_bool / * blocking_read * / , const size_t * / * origin [ 3 ] * / , <nl> const size_t * / * region [ 3 ] * / , size_t / * row_pitch * / , <nl> typedef cl_int ( * PFN_clEnqueueReadImage ) ( <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueWriteImage ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueWriteImage ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * image * / , <nl> cl_bool / * blocking_write * / , const size_t * / * origin [ 3 ] * / , <nl> const size_t * / * region [ 3 ] * / , size_t / * input_row_pitch * / , <nl> typedef cl_int ( * PFN_clEnqueueWriteImage ) ( <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueFillImage ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueFillImage ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * image * / , <nl> const void * / * fill_color * / , const size_t * / * origin [ 3 ] * / , <nl> const size_t * / * region [ 3 ] * / , cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_int ( * PFN_clEnqueueCopyImage ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueCopyImage ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * src_image * / , <nl> cl_mem / * dst_image * / , const size_t * / * src_origin [ 3 ] * / , <nl> const size_t * / * dst_origin [ 3 ] * / , const size_t * / * region [ 3 ] * / , <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueCopyImageToBuffer ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueCopyImageToBuffer ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * src_image * / , <nl> cl_mem / * dst_buffer * / , const size_t * / * src_origin [ 3 ] * / , <nl> const size_t * / * region [ 3 ] * / , size_t / * dst_offset * / , <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueCopyBufferToImage ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueCopyBufferToImage ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * src_buffer * / , <nl> cl_mem / * dst_image * / , size_t / * src_offset * / , <nl> const size_t * / * dst_origin [ 3 ] * / , const size_t * / * region [ 3 ] * / , <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef void * ( * PFN_clEnqueueMapBuffer ) ( <nl> + typedef void * ( CL_API_CALL * PFN_clEnqueueMapBuffer ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * buffer * / , <nl> cl_bool / * blocking_map * / , cl_map_flags / * map_flags * / , <nl> size_t / * offset * / , size_t / * size * / , <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , cl_event * / * event * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef void * ( * PFN_clEnqueueMapImage ) ( <nl> + typedef void * ( CL_API_CALL * PFN_clEnqueueMapImage ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * image * / , <nl> cl_bool / * blocking_map * / , cl_map_flags / * map_flags * / , <nl> const size_t * / * origin [ 3 ] * / , const size_t * / * region [ 3 ] * / , <nl> typedef void * ( * PFN_clEnqueueMapImage ) ( <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , cl_event * / * event * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueUnmapMemObject ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueUnmapMemObject ) ( <nl> cl_command_queue / * command_queue * / , cl_mem / * memobj * / , <nl> void * / * mapped_ptr * / , cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueMigrateMemObjects ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueMigrateMemObjects ) ( <nl> cl_command_queue / * command_queue * / , cl_uint / * num_mem_objects * / , <nl> const cl_mem * / * mem_objects * / , cl_mem_migration_flags / * flags * / , <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_int ( * PFN_clEnqueueNDRangeKernel ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueNDRangeKernel ) ( <nl> cl_command_queue / * command_queue * / , cl_kernel / * kernel * / , <nl> cl_uint / * work_dim * / , const size_t * / * global_work_offset * / , <nl> const size_t * / * global_work_size * / , const size_t * / * local_work_size * / , <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueNativeKernel ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueNativeKernel ) ( <nl> cl_command_queue / * command_queue * / , <nl> void ( CL_CALLBACK * / * user_func * / ) ( void * ) , void * / * args * / , <nl> size_t / * cb_args * / , cl_uint / * num_mem_objects * / , <nl> typedef cl_int ( * PFN_clEnqueueNativeKernel ) ( <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueMarkerWithWaitList ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueMarkerWithWaitList ) ( <nl> cl_command_queue / * command_queue * / , cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_int ( * PFN_clEnqueueBarrierWithWaitList ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueBarrierWithWaitList ) ( <nl> cl_command_queue / * command_queue * / , cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_int ( * PFN_clEnqueueSVMFree ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueSVMFree ) ( <nl> cl_command_queue / * command_queue * / , cl_uint / * num_svm_pointers * / , <nl> void * [ ] / * svm_pointers [ ] * / , <nl> void ( CL_CALLBACK * / * pfn_free_func * / ) ( cl_command_queue / * queue * / , <nl> typedef cl_int ( * PFN_clEnqueueSVMFree ) ( <nl> void * / * user_data * / , cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_2_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueSVMMemcpy ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueSVMMemcpy ) ( <nl> cl_command_queue / * command_queue * / , cl_bool / * blocking_copy * / , <nl> void * / * dst_ptr * / , const void * / * src_ptr * / , size_t / * size * / , <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_2_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueSVMMemFill ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueSVMMemFill ) ( <nl> cl_command_queue / * command_queue * / , void * / * svm_ptr * / , <nl> const void * / * pattern * / , size_t / * pattern_size * / , size_t / * size * / , <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_2_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueSVMMap ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueSVMMap ) ( <nl> cl_command_queue / * command_queue * / , cl_bool / * blocking_map * / , <nl> cl_map_flags / * flags * / , void * / * svm_ptr * / , size_t / * size * / , <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_2_0 ; <nl> - typedef cl_int ( * PFN_clEnqueueSVMUnmap ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueSVMUnmap ) ( <nl> cl_command_queue / * command_queue * / , void * / * svm_ptr * / , <nl> cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> cl_event * / * event * / ) CL_API_SUFFIX__VERSION_2_0 ; <nl> - typedef void * ( * PFN_clGetExtensionFunctionAddressForPlatform ) ( <nl> + typedef void * ( CL_API_CALL * PFN_clGetExtensionFunctionAddressForPlatform ) ( <nl> cl_platform_id / * platform * / , <nl> const char * / * func_name * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_mem ( * PFN_clCreateImage2D ) ( <nl> + typedef cl_mem ( CL_API_CALL * PFN_clCreateImage2D ) ( <nl> cl_context / * context * / , cl_mem_flags / * flags * / , <nl> const cl_image_format * / * image_format * / , size_t / * image_width * / , <nl> size_t / * image_height * / , size_t / * image_row_pitch * / , <nl> void * / * host_ptr * / , cl_int * / * errcode_ret * / ) ; <nl> - typedef cl_mem ( * PFN_clCreateImage3D ) ( <nl> + typedef cl_mem ( CL_API_CALL * PFN_clCreateImage3D ) ( <nl> cl_context / * context * / , cl_mem_flags / * flags * / , <nl> const cl_image_format * / * image_format * / , size_t / * image_width * / , <nl> size_t / * image_height * / , size_t / * image_depth * / , <nl> size_t / * image_row_pitch * / , size_t / * image_slice_pitch * / , <nl> void * / * host_ptr * / , cl_int * / * errcode_ret * / ) ; <nl> - typedef cl_int ( * PFN_clEnqueueMarker ) ( cl_command_queue / * command_queue * / , <nl> - cl_event * / * event * / ) ; <nl> - typedef cl_int ( * PFN_clEnqueueWaitForEvents ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueMarker ) ( <nl> + cl_command_queue / * command_queue * / , cl_event * / * event * / ) ; <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueWaitForEvents ) ( <nl> cl_command_queue / * command_queue * / , cl_uint / * num_events * / , <nl> const cl_event * / * event_list * / ) ; <nl> - typedef cl_int ( * PFN_clEnqueueBarrier ) ( cl_command_queue / * command_queue * / ) ; <nl> - typedef cl_int ( * PFN_clUnloadCompiler ) ( ) ; <nl> - typedef void * ( * PFN_clGetExtensionFunctionAddress ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueBarrier ) ( <nl> + cl_command_queue / * command_queue * / ) ; <nl> + typedef cl_int ( CL_API_CALL * PFN_clUnloadCompiler ) ( ) ; <nl> + typedef void * ( CL_API_CALL * PFN_clGetExtensionFunctionAddress ) ( <nl> const char * / * func_name * / ) ; <nl> - typedef cl_command_queue ( * PFN_clCreateCommandQueue ) ( <nl> + typedef cl_command_queue ( CL_API_CALL * PFN_clCreateCommandQueue ) ( <nl> cl_context / * context * / , cl_device_id / * device * / , <nl> cl_command_queue_properties / * properties * / , cl_int * / * errcode_ret * / ) ; <nl> - typedef cl_sampler ( * PFN_clCreateSampler ) ( <nl> + typedef cl_sampler ( CL_API_CALL * PFN_clCreateSampler ) ( <nl> cl_context / * context * / , cl_bool / * normalized_coords * / , <nl> cl_addressing_mode / * addressing_mode * / , cl_filter_mode / * filter_mode * / , <nl> cl_int * / * errcode_ret * / ) ; <nl> - typedef cl_int ( * PFN_clEnqueueTask ) ( cl_command_queue / * command_queue * / , <nl> - cl_kernel / * kernel * / , <nl> - cl_uint / * num_events_in_wait_list * / , <nl> - const cl_event * / * event_wait_list * / , <nl> - cl_event * / * event * / ) ; <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueTask ) ( <nl> + cl_command_queue / * command_queue * / , cl_kernel / * kernel * / , <nl> + cl_uint / * num_events_in_wait_list * / , <nl> + const cl_event * / * event_wait_list * / , cl_event * / * event * / ) ; <nl> <nl> / / OpenGL sharing <nl> - typedef cl_mem ( * PFN_clCreateFromGLBuffer ) ( cl_context , cl_mem_flags , cl_GLuint , <nl> - int * ) ; <nl> - typedef cl_mem ( * PFN_clCreateFromGLTexture ) ( <nl> + typedef cl_mem ( CL_API_CALL * PFN_clCreateFromGLBuffer ) ( cl_context , cl_mem_flags , <nl> + cl_GLuint , int * ) ; <nl> + typedef cl_mem ( CL_API_CALL * PFN_clCreateFromGLTexture ) ( <nl> cl_context / * context * / , cl_mem_flags / * flags * / , cl_GLenum / * target * / , <nl> cl_GLint / * miplevel * / , cl_GLuint / * texture * / , <nl> cl_int * / * errcode_ret * / ) CL_API_SUFFIX__VERSION_1_2 ; <nl> - typedef cl_int ( * PFN_clEnqueueAcquireGLObjects ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueAcquireGLObjects ) ( <nl> cl_command_queue / * command_queue * / , cl_uint / * num_objects * / , <nl> const cl_mem * / * mem_objects * / , cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , cl_event * / * event * / ) ; <nl> <nl> - typedef cl_int ( * PFN_clEnqueueReleaseGLObjects ) ( <nl> + typedef cl_int ( CL_API_CALL * PFN_clEnqueueReleaseGLObjects ) ( <nl> cl_command_queue / * command_queue * / , cl_uint / * num_objects * / , <nl> const cl_mem * / * mem_objects * / , cl_uint / * num_events_in_wait_list * / , <nl> const cl_event * / * event_wait_list * / , <nl> typedef void * CLeglDisplayKHR ; <nl> / / CLeglSyncKHR is an opaque handle to an EGLSync object <nl> typedef void * CLeglSyncKHR ; <nl> <nl> - typedef cl_event ( * PFN_clCreateEventFromEGLSyncKHR ) ( <nl> + typedef cl_event ( CL_API_CALL * PFN_clCreateEventFromEGLSyncKHR ) ( <nl> cl_context / * context * / , CLeglSyncKHR / * sync * / , <nl> CLeglDisplayKHR / * display * / , cl_int * / * errcode_ret * / ) ; <nl> <nl>
Added CL_API_CALL to wrapper .
tensorflow/tensorflow
ead4af4261eb126daef10f472ee3ea2b79e503e2
2019-09-06T16:54:03Z
mmm a / graph / prim . cpp <nl> ppp b / graph / prim . cpp <nl> <nl> # include < queue > <nl> # include < vector > <nl> <nl> - typedef std : : pair < int , int > PII ; <nl> + using PII = std : : pair < int , int > ; <nl> <nl> int prim ( int x , const std : : vector < std : : vector < PII > > & graph ) { <nl> / / priority queue to maintain edges with respect to weights <nl>
Using to replace typedef
TheAlgorithms/C-Plus-Plus
e5ba4691306ffa205914bae01e0f33633a67f54b
2020-08-08T16:29:02Z
mmm a / db / db . cpp <nl> ppp b / db / db . cpp <nl> void receivedInsert ( Message & m ) { <nl> void testTheDb ( ) { <nl> / * this is not validly formatted , if you query this namespace bad things will happen * / <nl> theDataFileMgr . insert ( " sys . unittest . pdfile " , ( void * ) " hello worldx " , 13 ) ; <nl> + theDataFileMgr . insert ( " sys . unittest . pdfile " , ( void * ) " hello worldx " , 13 ) ; <nl> <nl> JSObj j1 ( ( const char * ) & js1 ) ; <nl> deleteObjects ( " sys . unittest . delete " , j1 , false ) ; <nl> mmm a / db / pdfile . cpp <nl> ppp b / db / pdfile . cpp <nl> void DataFileMgr : : insert ( const char * ns , const void * buf , int len ) { <nl> Record * oldlast = e - > lastRecord . rec ( ) ; <nl> r - > prevOfs = e - > lastRecord . getOfs ( ) ; <nl> r - > nextOfs = DiskLoc : : NullOfs ; <nl> + oldlast - > nextOfs = loc . getOfs ( ) ; <nl> e - > lastRecord = loc ; <nl> } <nl> } <nl>
insert bug
mongodb/mongo
357a80d99efc13fe62e4995f820eafae2c72fb35
2007-10-30T17:43:44Z
new file mode 100644 <nl> index 000000000000 . . 7c481e9d50d9 <nl> mmm / dev / null <nl> ppp b / validation - test / compiler_crashers / 28829 - replacement - ismaterializable - cannot - substitute - with - a - non - materializable - type . swift <nl> <nl> + / / This source file is part of the Swift . org open source project <nl> + / / Copyright ( c ) 2014 - 2017 Apple Inc . and the Swift project authors <nl> + / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + / / <nl> + / / See https : / / swift . org / LICENSE . txt for license information <nl> + / / See https : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + <nl> + / / REQUIRES : asserts <nl> + / / RUN : not - - crash % target - swift - frontend % s - emit - ir <nl> + & _ = = nil <nl>
Merge pull request from practicalswift / swiftc - 28829 - replacement - ismaterializable - cannot - substitute - with - a - non - materializable - type
apple/swift
2c191675aa1a72432c4b9281e0df2330c9875cc9
2017-07-26T04:36:27Z
mmm a / ChangeLog <nl> ppp b / ChangeLog <nl> <nl> + 2013 - 04 - 17 : Version 3 . 18 . 0 <nl> + <nl> + Enabled pretenuring of fast literals in high promotion mode . <nl> + <nl> + Removed preparser library ; link preparser executable against full V8 . <nl> + <nl> + Fixed set - up of intrinsic ' s ' constructor ' properties . <nl> + ( Chromium issue 229445 ) <nl> + <nl> + ES6 symbols : extended V8 API to support symbols ( issue 2158 ) . <nl> + <nl> + Removed ARM support for VFP2 . <nl> + <nl> + Made __proto__ a real JavaScript accessor property . <nl> + ( issue 1949 and issue 2606 ) <nl> + <nl> + Performance and stability improvements on all platforms . <nl> + <nl> + <nl> 2013 - 04 - 04 : Version 3 . 17 . 16 <nl> <nl> Stack trace API : poison stack frames below the first strict mode frame . <nl> mmm a / src / version . cc <nl> ppp b / src / version . cc <nl> <nl> / / cannot be changed without changing the SCons build script . <nl> # define MAJOR_VERSION 3 <nl> # define MINOR_VERSION 18 <nl> - # define BUILD_NUMBER 0 <nl> + # define BUILD_NUMBER 1 <nl> # define PATCH_LEVEL 0 <nl> / / Use 1 for candidates and 0 otherwise . <nl> / / ( Boolean macro values are not supported by all preprocessors . ) <nl>
Prepare push to trunk . Now working on version 3 . 18 . 1 .
v8/v8
f4b9b3b32c5ae5b4e4533b6f6f4c3d0650b12e7c
2013-04-17T11:48:16Z
mmm a / lib / IRGen / IRGenModule . cpp <nl> ppp b / lib / IRGen / IRGenModule . cpp <nl> <nl> # include " clang / Basic / TargetInfo . h " <nl> # include " clang / CodeGen / CodeGenABITypes . h " <nl> # include " clang / CodeGen / ModuleBuilder . h " <nl> - # include " clang / Lex / Preprocessor . h " <nl> # include " clang / Frontend / CodeGenOptions . h " <nl> # include " llvm / IR / Constants . h " <nl> # include " llvm / IR / DataLayout . h " <nl> static clang : : CodeGenerator * createClangCodeGenerator ( ASTContext & Context , <nl> auto & CGO = Importer - > getClangCodeGenOpts ( ) ; <nl> CGO . OptimizationLevel = Opts . Optimize ? 3 : 0 ; <nl> CGO . DisableFPElim = Opts . DisableFPElim ; <nl> - auto & HSI = Importer - > getClangPreprocessor ( ) . getHeaderSearchInfo ( ) ; <nl> - auto & PPO = Importer - > getClangPreprocessor ( ) . getPreprocessorOpts ( ) ; <nl> auto * ClangCodeGen = clang : : CreateLLVMCodeGen ( ClangContext . getDiagnostics ( ) , <nl> - ModuleName , HSI , PPO , CGO , <nl> - LLVMContext ) ; <nl> + ModuleName , CGO , LLVMContext ) ; <nl> ClangCodeGen - > Initialize ( ClangContext ) ; <nl> <nl> return ClangCodeGen ; <nl>
Revert " Adapt to Apple clang change fa1a389c72b32cf46195ef88f5f67b341372a4cb . "
apple/swift
fc1e5c7d948e372e3d5a5c973b92a61ee85860e9
2015-03-23T21:07:38Z
mmm a / emscripten - version . txt <nl> ppp b / emscripten - version . txt <nl> <nl> - 1 . 34 . 3 <nl> + 1 . 34 . 4 <nl> <nl>
1 . 34 . 4
emscripten-core/emscripten
591d309aba13db2987739c1bc6f807c7e47e80c6
2015-08-05T04:30:39Z
mmm a / scene / 3d / camera . cpp <nl> ppp b / scene / 3d / camera . cpp <nl> Camera : : Camera ( ) { <nl> current = false ; <nl> force_change = false ; <nl> mode = PROJECTION_PERSPECTIVE ; <nl> - set_perspective ( 65 . 0 , 0 . 1 , 100 . 0 ) ; <nl> + set_perspective ( 70 . 0 , 0 . 05 , 100 . 0 ) ; <nl> keep_aspect = KEEP_HEIGHT ; <nl> layers = 0xfffff ; <nl> v_offset = 0 ; <nl> mmm a / servers / visual / visual_server_scene . h <nl> ppp b / servers / visual / visual_server_scene . h <nl> class VisualServerScene { <nl> Camera ( ) { <nl> <nl> visible_layers = 0xFFFFFFFF ; <nl> - fov = 65 ; <nl> + fov = 70 ; <nl> type = PERSPECTIVE ; <nl> - znear = 0 . 1 ; <nl> + znear = 0 . 05 ; <nl> zfar = 100 ; <nl> size = 1 . 0 ; <nl> vaspect = false ; <nl>
Merge pull request from Calinou / tweak - camera - node - defaults
godotengine/godot
f2c1fd2e8f3734a7b8df9821af329c20e93ef72e
2017-11-26T13:53:28Z
mmm a / src / library_sdl . js <nl> ppp b / src / library_sdl . js <nl> var LibrarySDL = { <nl> defaults : { <nl> width : 320 , <nl> height : 200 , <nl> - copyOnLock : true <nl> + / / If true , SDL_LockSurface will copy the contents of each surface back to the Emscripten HEAP so that C code can access it . If false , <nl> + / / the surface contents are captured only back to JS code . <nl> + copyOnLock : true , <nl> + / / If true , SDL_LockSurface will discard the contents of each surface when SDL_LockSurface ( ) is called . This greatly improves performance <nl> + / / of SDL_LockSurface ( ) . If discardOnLock is true , copyOnLock is ignored . <nl> + discardOnLock : false , <nl> + / / If true , emulate compatibility with desktop SDL by ignoring alpha on the screen frontbuffer canvas . Setting this to false will improve <nl> + / / performance considerably and enables alpha - blending on the frontbuffer , so be sure to properly write 0xFF alpha for opaque pixels <nl> + / / if you set this to false ! <nl> + opaqueFrontBuffer : true <nl> } , <nl> <nl> version : null , <nl> var LibrarySDL = { <nl> <nl> if ( surf = = SDL . screen & & Module . screenIsReadOnly & & surfData . image ) return 0 ; <nl> <nl> - surfData . image = surfData . ctx . getImageData ( 0 , 0 , surfData . width , surfData . height ) ; <nl> - if ( surf = = SDL . screen ) { <nl> + if ( SDL . defaults . discardOnLock ) { <nl> + if ( ! surfData . image ) { <nl> + surfData . image = surfData . ctx . createImageData ( surfData . width , surfData . height ) ; <nl> + } <nl> + if ( ! SDL . defaults . opaqueFrontBuffer ) return ; <nl> + } else { <nl> + surfData . image = surfData . ctx . getImageData ( 0 , 0 , surfData . width , surfData . height ) ; <nl> + } <nl> + <nl> + / / Emulate desktop behavior and kill alpha values on the locked surface . ( very costly ! ) Set SDL . defaults . opaqueFrontBuffer = false <nl> + / / if you don ' t want this . <nl> + if ( surf = = SDL . screen & & SDL . defaults . opaqueFrontBuffer ) { <nl> var data = surfData . image . data ; <nl> var num = data . length ; <nl> for ( var i = 0 ; i < num / 4 ; i + + ) { <nl> var LibrarySDL = { <nl> } <nl> } <nl> <nl> - if ( SDL . defaults . copyOnLock ) { <nl> + if ( SDL . defaults . copyOnLock & & ! SDL . defaults . discardOnLock ) { <nl> / / Copy pixel data to somewhere accessible to ' C / C + + ' <nl> if ( surfData . isFlagSet ( 0x00200000 / * SDL_HWPALETTE * / ) ) { <nl> / / If this is neaded then <nl> var LibrarySDL = { <nl> } <nl> } else { <nl> var data32 = new Uint32Array ( data . buffer ) ; <nl> - num = data32 . length ; <nl> - if ( isScreen ) { <nl> + if ( isScreen & & SDL . defaults . opaqueFrontBuffer ) { <nl> + num = data32 . length ; <nl> while ( dst < num ) { <nl> / / HEAP32 [ src + + ] is an optimization . Instead , we could do { { { makeGetValue ( ' buffer ' , ' dst ' , ' i32 ' ) } } } ; <nl> data32 [ dst + + ] = HEAP32 [ src + + ] | 0xff000000 ; <nl> } <nl> } else { <nl> - while ( dst < num ) { <nl> - data32 [ dst + + ] = HEAP32 [ src + + ] ; <nl> - } <nl> + data32 . set ( HEAP32 . subarray ( src , src + data32 . length ) ) ; <nl> } <nl> } <nl> # else <nl> mmm a / tests / hello_world_sdl . cpp <nl> ppp b / tests / hello_world_sdl . cpp <nl> <nl> # include < stdio . h > <nl> # include < SDL / SDL . h > <nl> <nl> + # ifdef __EMSCRIPTEN__ <nl> + # include < emscripten . h > <nl> + # endif <nl> <nl> extern " C " int main ( int argc , char * * argv ) { <nl> printf ( " hello , world ! \ n " ) ; <nl> extern " C " int main ( int argc , char * * argv ) { <nl> SDL_Init ( SDL_INIT_VIDEO ) ; <nl> SDL_Surface * screen = SDL_SetVideoMode ( 256 , 256 , 32 , SDL_SWSURFACE ) ; <nl> <nl> + # ifdef TEST_SDL_LOCK_OPTS <nl> + EM_ASM ( " SDL . defaults . copyOnLock = false ; SDL . defaults . discardOnLock = true ; SDL . defaults . opaqueFrontBuffer = false ; " ) ; <nl> + # endif <nl> + <nl> if ( SDL_MUSTLOCK ( screen ) ) SDL_LockSurface ( screen ) ; <nl> for ( int i = 0 ; i < 256 ; i + + ) { <nl> for ( int j = 0 ; j < 256 ; j + + ) { <nl> - / / alpha component is actually ignored , since this is to the screen <nl> - * ( ( Uint32 * ) screen - > pixels + i * 256 + j ) = SDL_MapRGBA ( screen - > format , i , j , 255 - i , ( i + j ) % 255 ) ; <nl> + # ifdef TEST_SDL_LOCK_OPTS <nl> + / / Alpha behaves like in the browser , so write proper opaque pixels . <nl> + int alpha = 255 ; <nl> + # else <nl> + / / To emulate native behavior with blitting to screen , alpha component is ignored . Test that it is so by outputting <nl> + / / data ( and testing that it does get discarded ) <nl> + int alpha = ( i + j ) % 255 ; <nl> + # endif <nl> + * ( ( Uint32 * ) screen - > pixels + i * 256 + j ) = SDL_MapRGBA ( screen - > format , i , j , 255 - i , alpha ) ; <nl> } <nl> } <nl> if ( SDL_MUSTLOCK ( screen ) ) SDL_UnlockSurface ( screen ) ; <nl> mmm a / tests / test_browser . py <nl> ppp b / tests / test_browser . py <nl> def test_compressed_file ( self ) : <nl> def test_sdl_swsurface ( self ) : <nl> self . btest ( ' sdl_swsurface . c ' , expected = ' 1 ' ) <nl> <nl> + def test_sdl_surface_lock_opts ( self ) : <nl> + # Test Emscripten - specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface . <nl> + self . btest ( ' hello_world_sdl . cpp ' , reference = ' htmltest . png ' , message = ' You should see " hello , world ! " and a colored cube . ' , args = [ ' - DTEST_SDL_LOCK_OPTS ' ] ) <nl> + <nl> def test_sdl_image ( self ) : <nl> # load an image file , get pixel data . Also O2 coverage for - - preload - file , and memory - init <nl> shutil . copyfile ( path_from_root ( ' tests ' , ' screenshot . jpg ' ) , os . path . join ( self . get_dir ( ) , ' screenshot . jpg ' ) ) <nl>
Optimize Emscripten SDL handwritten JS - implemented SDL_LockSurface ( ) and SDL_UnlockSurface ( ) by adding an option to perform discarding locks that are fast no - ops and to avoid the screen - is - always - opaque emulation which fills alpha = 0xFF bytes to each pixel on lock and unlock . Call something like EM_ASM ( " SDL . defaults . copyOnLock = false ; SDL . defaults . discardOnLock = true ; SDL . defaults . opaqueFrontBuffer = false ; " ) ; at startup to enable these .
emscripten-core/emscripten
270bf2fcdeab375712e88b0e4fb903631561d7dc
2014-10-06T12:20:53Z
mmm a / BUILD <nl> ppp b / BUILD <nl> grpc_cc_library ( <nl> srcs = [ " src / core / lib / debug / trace . cc " ] , <nl> hdrs = [ " src / core / lib / debug / trace . h " ] , <nl> language = " c + + " , <nl> + public_hdrs = GRPC_PUBLIC_HDRS , <nl> deps = [ <nl> " grpc_codegen " , <nl> " : gpr " , <nl>
Mirroring BUILD changes - missed public headers
grpc/grpc
12587d5df5f6ce4a6601ae515ab097a05c989906
2017-10-05T15:53:41Z
mmm a / docs / en / interfaces / jdbc . md <nl> ppp b / docs / en / interfaces / jdbc . md <nl> <nl> <nl> There is an official JDBC driver for ClickHouse . See [ here ] ( https : / / github . com / yandex / clickhouse - jdbc ) . <nl> <nl> + JDBC drivers implemented by other organizations : <nl> + <nl> + - [ ClickHouse - Native - JDBC ] ( https : / / github . com / housepower / ClickHouse - Native - JDBC ) <nl>
Add a new JDBC driver implementation to the document
ClickHouse/ClickHouse
56d7e870d25a22a3f48dda499e3bfc9c09dee17b
2018-06-17T17:47:17Z
mmm a / tensorflow / contrib / distribute / python / BUILD <nl> ppp b / tensorflow / contrib / distribute / python / BUILD <nl> cuda_py_test ( <nl> " / / tensorflow / python / estimator : estimator_py " , <nl> ] , <nl> tags = [ <nl> + " no_oss " , <nl> " no_pip " , <nl> ] , <nl> ) <nl>
Disable flaky values_test
tensorflow/tensorflow
32a54c9f4fc2e66c7982291be2eab7a321c6234e
2018-11-17T02:41:52Z
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> <nl> - SET ( CMAKE_C_COMPILER " gcc - 5 " ) <nl> - SET ( CMAKE_CXX_COMPILER " g + + - 5 " ) <nl> - <nl> - project ( METRICA ) <nl> + project ( ClickHouse ) <nl> cmake_minimum_required ( VERSION 2 . 6 ) <nl> <nl> # отключаем варнинг о том , что в каждой директории должен быть CMakeLists . txt <nl>
Merge
ClickHouse/ClickHouse
3a7d8d8d8ba585de91afbf47c0e9757a562a85a7
2016-05-22T22:37:32Z
mmm a / xbmc / cores / paplayer / AudioDecoder . cpp <nl> ppp b / xbmc / cores / paplayer / AudioDecoder . cpp <nl> <nl> # include " utils / log . h " <nl> # include < math . h > <nl> <nl> - # define INTERNAL_BUFFER_LENGTH sizeof ( float ) * 2 * 44100 / / float samples , 2 channels , 44100 samples per sec = 1 second <nl> - <nl> CAudioDecoder : : CAudioDecoder ( ) <nl> { <nl> m_codec = NULL ; <nl> CAudioDecoder : : CAudioDecoder ( ) <nl> <nl> m_status = STATUS_NO_FILE ; <nl> m_canPlay = false ; <nl> - <nl> - m_gaplessBufferSize = 0 ; <nl> - m_blockSize = 4 ; <nl> } <nl> <nl> CAudioDecoder : : ~ CAudioDecoder ( ) <nl> void CAudioDecoder : : Destroy ( ) <nl> m_status = STATUS_NO_FILE ; <nl> <nl> m_pcmBuffer . Destroy ( ) ; <nl> - m_gaplessBufferSize = 0 ; <nl> <nl> if ( m_codec ) <nl> delete m_codec ; <nl> void CAudioDecoder : : Destroy ( ) <nl> m_canPlay = false ; <nl> } <nl> <nl> - bool CAudioDecoder : : Create ( const CFileItem & file , int64_t seekOffset , unsigned int nBufferSize ) <nl> + bool CAudioDecoder : : Create ( const CFileItem & file , int64_t seekOffset ) <nl> { <nl> Destroy ( ) ; <nl> <nl> CSingleLock lock ( m_critSection ) ; <nl> - / / create our pcm buffer <nl> - m_pcmBuffer . Create ( ( int ) std : : max < unsigned int > ( 2 , nBufferSize ) * <nl> - INTERNAL_BUFFER_LENGTH ) ; <nl> <nl> / / reset our playback timing variables <nl> m_eof = false ; <nl> bool CAudioDecoder : : Create ( const CFileItem & file , int64_t seekOffset , unsigned i <nl> Destroy ( ) ; <nl> return false ; <nl> } <nl> - m_blockSize = m_codec - > m_Channels * m_codec - > m_BitsPerSample / 8 ; <nl> - <nl> + unsigned int blockSize = ( m_codec - > m_BitsPerSample > > 3 ) * m_codec - > GetChannelInfo ( ) . Count ( ) ; <nl> + <nl> + / * allocate the pcmBuffer for 2 seconds of audio * / <nl> + m_pcmBuffer . Create ( 2 * blockSize * m_codec - > m_SampleRate ) ; <nl> + <nl> / / set total time from the given tag <nl> if ( file . HasMusicInfoTag ( ) & & file . GetMusicInfoTag ( ) - > GetDuration ( ) ) <nl> m_codec - > SetTotalTime ( file . GetMusicInfoTag ( ) - > GetDuration ( ) ) ; <nl> bool CAudioDecoder : : Create ( const CFileItem & file , int64_t seekOffset , unsigned i <nl> return true ; <nl> } <nl> <nl> - void CAudioDecoder : : GetDataFormat ( unsigned int * channels , unsigned int * samplerate , unsigned int * bitspersample ) <nl> + void CAudioDecoder : : GetDataFormat ( CAEChannelInfo * channelInfo , unsigned int * samplerate , unsigned int * encodedSampleRate , enum AEDataFormat * dataFormat ) <nl> { <nl> if ( ! m_codec ) <nl> return ; <nl> <nl> - if ( channels ) * channels = m_codec - > m_Channels ; <nl> - if ( samplerate ) * samplerate = m_codec - > m_SampleRate ; <nl> - if ( bitspersample ) * bitspersample = m_codec - > m_BitsPerSample ; <nl> + if ( channelInfo ) * channelInfo = m_codec - > GetChannelInfo ( ) ; <nl> + if ( samplerate ) * samplerate = m_codec - > m_SampleRate ; <nl> + if ( encodedSampleRate ) * encodedSampleRate = m_codec - > m_EncodedSampleRate ; <nl> + if ( dataFormat ) * dataFormat = m_codec - > m_DataFormat ; <nl> } <nl> <nl> int64_t CAudioDecoder : : Seek ( int64_t time ) <nl> unsigned int CAudioDecoder : : GetDataSize ( ) <nl> / / check for end of file and end of buffer <nl> if ( m_status = = STATUS_ENDING & & m_pcmBuffer . getMaxReadSize ( ) < PACKET_SIZE ) <nl> m_status = STATUS_ENDED ; <nl> - return m_pcmBuffer . getMaxReadSize ( ) / sizeof ( float ) ; <nl> + return std : : min ( m_pcmBuffer . getMaxReadSize ( ) / ( m_codec - > m_BitsPerSample > > 3 ) , ( unsigned int ) OUTPUT_SAMPLES ) ; <nl> } <nl> <nl> - void * CAudioDecoder : : GetData ( unsigned int size ) <nl> + void * CAudioDecoder : : GetData ( unsigned int samples ) <nl> { <nl> - if ( size > OUTPUT_SAMPLES ) <nl> + unsigned int size = samples * ( m_codec - > m_BitsPerSample > > 3 ) ; <nl> + if ( size > sizeof ( m_outputBuffer ) ) <nl> { <nl> - CLog : : Log ( LOGWARNING , " CAudioDecoder : : GetData ( ) more bytes / samples ( % i ) requested than we have to give ( % i ) ! " , size , OUTPUT_SAMPLES ) ; <nl> - size = OUTPUT_SAMPLES ; <nl> + CLog : : Log ( LOGERROR , " CAudioDecoder : : GetData - More data was requested then we have space to buffer ! " ) ; <nl> + return NULL ; <nl> } <nl> - / / first copy anything from our gapless buffer <nl> - if ( m_gaplessBufferSize > size ) <nl> + <nl> + if ( size > m_pcmBuffer . getMaxReadSize ( ) ) <nl> { <nl> - memcpy ( m_outputBuffer , m_gaplessBuffer , size * sizeof ( float ) ) ; <nl> - memmove ( m_gaplessBuffer , m_gaplessBuffer + size , ( m_gaplessBufferSize - size ) * sizeof ( float ) ) ; <nl> - m_gaplessBufferSize - = size ; <nl> - return m_outputBuffer ; <nl> + CLog : : Log ( LOGWARNING , " CAudioDecoder : : GetData ( ) more bytes / samples ( % i ) requested than we have to give ( % i ) ! " , size , m_pcmBuffer . getMaxReadSize ( ) ) ; <nl> + size = m_pcmBuffer . getMaxReadSize ( ) ; <nl> } <nl> - if ( m_gaplessBufferSize ) <nl> - memcpy ( m_outputBuffer , m_gaplessBuffer , m_gaplessBufferSize * sizeof ( float ) ) ; <nl> <nl> - if ( m_pcmBuffer . ReadData ( ( char * ) ( m_outputBuffer + m_gaplessBufferSize ) , ( size - m_gaplessBufferSize ) * sizeof ( float ) ) ) <nl> + if ( m_pcmBuffer . ReadData ( ( char * ) m_outputBuffer , size ) ) <nl> { <nl> - m_gaplessBufferSize = 0 ; <nl> - / / check for end of file + end of buffer <nl> - if ( m_status = = STATUS_ENDING & & m_pcmBuffer . getMaxReadSize ( ) < ( int ) ( OUTPUT_SAMPLES * sizeof ( float ) ) ) <nl> - { <nl> - CLog : : Log ( LOGINFO , " CAudioDecoder : : GetData ( ) ending track - only have % lu samples left " , ( unsigned long ) ( m_pcmBuffer . getMaxReadSize ( ) / sizeof ( float ) ) ) ; <nl> + if ( m_status = = STATUS_ENDING & & m_pcmBuffer . getMaxReadSize ( ) = = 0 ) <nl> m_status = STATUS_ENDED ; <nl> - } <nl> + <nl> return m_outputBuffer ; <nl> } <nl> - CLog : : Log ( LOGERROR , " CAudioDecoder : : GetData ( ) ReadBinary failed with % i samples " , size - m_gaplessBufferSize ) ; <nl> + <nl> + CLog : : Log ( LOGERROR , " CAudioDecoder : : GetData ( ) ReadBinary failed with % i samples " , samples ) ; <nl> return NULL ; <nl> } <nl> <nl> - void CAudioDecoder : : PrefixData ( void * data , unsigned int size ) <nl> - { <nl> - if ( ! data ) <nl> - { <nl> - CLog : : Log ( LOGERROR , " CAudioDecoder : : PrefixData ( ) failed - null data pointer " ) ; <nl> - return ; <nl> - } <nl> - m_gaplessBufferSize = std : : min < unsigned int > ( PACKET_SIZE , size ) ; <nl> - memcpy ( m_gaplessBuffer , data , m_gaplessBufferSize * sizeof ( float ) ) ; <nl> - if ( m_gaplessBufferSize ! = size ) <nl> - CLog : : Log ( LOGWARNING , " CAudioDecoder : : PrefixData - losing % i bytes of audio data in track transistion " , size - m_gaplessBufferSize ) ; <nl> - } <nl> - <nl> int CAudioDecoder : : ReadSamples ( int numsamples ) <nl> { <nl> if ( m_status = = STATUS_NO_FILE | | m_status = = STATUS_ENDING | | m_status = = STATUS_ENDED ) <nl> int CAudioDecoder : : ReadSamples ( int numsamples ) <nl> CSingleLock lock ( m_critSection ) ; <nl> <nl> / / Read in more data <nl> - int maxsize = std : : min < int > ( INPUT_SAMPLES , <nl> - ( m_pcmBuffer . getMaxWriteSize ( ) / ( int ) ( sizeof ( float ) ) ) ) ; <nl> + int maxsize = std : : min < int > ( INPUT_SAMPLES , m_pcmBuffer . getMaxWriteSize ( ) / ( m_codec - > m_BitsPerSample > > 3 ) ) ; <nl> numsamples = std : : min < int > ( numsamples , maxsize ) ; <nl> - numsamples - = ( numsamples % m_codec - > m_Channels ) ; / / make sure it ' s divisible by our number of channels <nl> + numsamples - = ( numsamples % m_codec - > GetChannelInfo ( ) . Count ( ) ) ; / / make sure it ' s divisible by our number of channels <nl> if ( numsamples ) <nl> { <nl> - int actualsamples = 0 ; <nl> - / / if our codec sends floating point , then read it <nl> - int result = READ_ERROR ; <nl> - if ( m_codec - > HasFloatData ( ) ) <nl> - result = m_codec - > ReadSamples ( m_inputBuffer , numsamples , & actualsamples ) ; <nl> - else <nl> - result = ReadPCMSamples ( m_inputBuffer , numsamples , & actualsamples ) ; <nl> - <nl> - if ( result ! = READ_ERROR & & actualsamples ) <nl> - { <nl> - / / do any post processing of the audio ( eg replaygain etc . ) <nl> - ProcessAudio ( m_inputBuffer , actualsamples ) ; <nl> + int readSize = 0 ; <nl> + int result = m_codec - > ReadPCM ( m_pcmInputBuffer , numsamples * ( m_codec - > m_BitsPerSample > > 3 ) , & readSize ) ; <nl> <nl> + if ( result ! = READ_ERROR & & readSize ) <nl> + { <nl> / / move it into our buffer <nl> - m_pcmBuffer . WriteData ( ( char * ) m_inputBuffer , actualsamples * sizeof ( float ) ) ; <nl> + m_pcmBuffer . WriteData ( ( char * ) m_pcmInputBuffer , readSize ) ; <nl> <nl> / / update status <nl> if ( m_status = = STATUS_QUEUING & & m_pcmBuffer . getMaxReadSize ( ) > m_pcmBuffer . getSize ( ) * 0 . 9 ) <nl> int CAudioDecoder : : ReadSamples ( int numsamples ) <nl> return RET_SLEEP ; / / nothing to do <nl> } <nl> <nl> - void CAudioDecoder : : ProcessAudio ( float * data , int numsamples ) <nl> - { <nl> - if ( g_guiSettings . m_replayGain . iType ! = REPLAY_GAIN_NONE ) <nl> - { <nl> - float gainFactor = GetReplayGain ( ) ; <nl> - for ( int i = 0 ; i < numsamples ; i + + ) <nl> - { <nl> - data [ i ] * = gainFactor ; <nl> - / / check the range ( is this needed here ? ) <nl> - if ( data [ i ] > 1 . 0f ) data [ i ] = 1 . 0f ; <nl> - if ( data [ i ] < - 1 . 0f ) data [ i ] = - 1 . 0f ; <nl> - } <nl> - } <nl> - } <nl> - <nl> float CAudioDecoder : : GetReplayGain ( ) <nl> { <nl> # define REPLAY_GAIN_DEFAULT_LEVEL 89 . 0f <nl> + if ( g_guiSettings . m_replayGain . iType = = REPLAY_GAIN_NONE ) <nl> + return 0 . 0f ; <nl> + <nl> / / Compute amount of gain <nl> float replaydB = ( float ) g_guiSettings . m_replayGain . iNoGainPreAmp ; <nl> float peak = 0 . 0f ; <nl> float CAudioDecoder : : GetReplayGain ( ) <nl> if ( fabs ( peak * replaygain ) > 1 . 0f ) <nl> replaygain = 1 . 0f / fabs ( peak ) ; <nl> } <nl> - return replaygain ; <nl> - } <nl> - <nl> - int CAudioDecoder : : ReadPCMSamples ( float * buffer , int numsamples , int * actualsamples ) <nl> - { <nl> - / / convert samples to bytes <nl> - numsamples * = ( m_codec - > m_BitsPerSample / 8 ) ; <nl> <nl> - / / read in our PCM data <nl> - int result = m_codec - > ReadPCM ( m_pcmInputBuffer , numsamples , actualsamples ) ; <nl> - <nl> - / / convert to floats ( - 1 . . . 1 ) range <nl> - int i ; <nl> - switch ( m_codec - > m_BitsPerSample ) <nl> - { <nl> - case 8 : <nl> - for ( i = 0 ; i < * actualsamples ; i + + ) <nl> - m_inputBuffer [ i ] = 1 . 0f / 0x7f * ( m_pcmInputBuffer [ i ] - 128 ) ; <nl> - break ; <nl> - case 16 : <nl> - * actualsamples / = 2 ; <nl> - for ( i = 0 ; i < * actualsamples ; i + + ) <nl> - m_inputBuffer [ i ] = 1 . 0f / 0x7fff * ( ( short * ) m_pcmInputBuffer ) [ i ] ; <nl> - break ; <nl> - case 24 : <nl> - * actualsamples / = 3 ; <nl> - for ( i = 0 ; i < * actualsamples ; i + + ) <nl> - m_inputBuffer [ i ] = 1 . 0f / 0x7fffff * ( ( ( int ) m_pcmInputBuffer [ 3 * i ] < < 0 ) | ( ( int ) m_pcmInputBuffer [ 3 * i + 1 ] < < 8 ) | ( ( ( int ) ( ( char * ) m_pcmInputBuffer ) [ 3 * i + 2 ] ) < < 16 ) ) ; <nl> - break ; <nl> - } <nl> - return result ; <nl> + return replaygain ; <nl> } <nl> <nl> mmm a / xbmc / cores / paplayer / AudioDecoder . h <nl> ppp b / xbmc / cores / paplayer / AudioDecoder . h <nl> <nl> # include " ICodec . h " <nl> # include " threads / CriticalSection . h " <nl> # include " utils / RingBuffer . h " <nl> + # include " cores / AudioEngine / Utils / AEChannelInfo . h " <nl> <nl> class CFileItem ; <nl> <nl> class CAudioDecoder <nl> CAudioDecoder ( ) ; <nl> ~ CAudioDecoder ( ) ; <nl> <nl> - bool Create ( const CFileItem & file , int64_t seekOffset , unsigned int nBufferSize ) ; <nl> + bool Create ( const CFileItem & file , int64_t seekOffset ) ; <nl> void Destroy ( ) ; <nl> <nl> int ReadSamples ( int numsamples ) ; <nl> class CAudioDecoder <nl> int GetStatus ( ) { return m_status ; } ; <nl> void SetStatus ( int status ) { m_status = status ; } ; <nl> <nl> - void GetDataFormat ( unsigned int * channels , unsigned int * samplerate , unsigned int * bitspersample ) ; <nl> - unsigned int GetChannels ( ) { if ( m_codec ) return m_codec - > m_Channels ; else return 0 ; } ; <nl> + void GetDataFormat ( CAEChannelInfo * channelInfo , unsigned int * samplerate , unsigned int * encodedSampleRate , enum AEDataFormat * dataFormat ) ; <nl> + unsigned int GetChannels ( ) { if ( m_codec ) return m_codec - > GetChannelInfo ( ) . Count ( ) ; else return 0 ; } ; <nl> / / Data management <nl> unsigned int GetDataSize ( ) ; <nl> - void * GetData ( unsigned int size ) ; <nl> - void PrefixData ( void * data , unsigned int size ) ; <nl> + void * GetData ( unsigned int samples ) ; <nl> ICodec * GetCodec ( ) const { return m_codec ; } <nl> - <nl> - private : <nl> - void ProcessAudio ( float * data , int numsamples ) ; <nl> - / / ReadPCMSamples ( ) - helper to convert PCM ( short / byte ) to float <nl> - int ReadPCMSamples ( float * buffer , int numsamples , int * actualsamples ) ; <nl> float GetReplayGain ( ) ; <nl> <nl> - / / block size ( number of bytes per sample * number of channels ) <nl> - int m_blockSize ; <nl> + private : <nl> / / pcm buffer <nl> CRingBuffer m_pcmBuffer ; <nl> <nl> / / output buffer ( for transferring data from the Pcm Buffer to the rest of the audio chain ) <nl> float m_outputBuffer [ OUTPUT_SAMPLES ] ; <nl> <nl> - / / gapless buffer ( left over samples from the previous audio decoder ) <nl> - float m_gaplessBuffer [ OUTPUT_SAMPLES ] ; <nl> - unsigned int m_gaplessBufferSize ; <nl> - <nl> / / input buffer ( for transferring data from the Codecs to our Pcm Ringbuffer <nl> BYTE m_pcmInputBuffer [ INPUT_SIZE ] ; <nl> float m_inputBuffer [ INPUT_SAMPLES ] ; <nl> mmm a / xbmc / cores / paplayer / PAPlayer . cpp <nl> ppp b / xbmc / cores / paplayer / PAPlayer . cpp <nl> <nl> * <nl> * / <nl> <nl> - # include " threads / SystemClock . h " <nl> # include " PAPlayer . h " <nl> # include " CodecFactory . h " <nl> # include " GUIInfoManager . h " <nl> - # include " guilib / AudioContext . h " <nl> # include " Application . h " <nl> # include " FileItem . h " <nl> # include " settings / AdvancedSettings . h " <nl> # include " settings / GUISettings . h " <nl> # include " settings / Settings . h " <nl> # include " music / tags / MusicInfoTag . h " <nl> - # include " . . / AudioRenderers / AudioRendererFactory . h " <nl> # include " utils / TimeUtils . h " <nl> # include " utils / log . h " <nl> # include " utils / MathUtils . h " <nl> <nl> - # ifdef _LINUX <nl> - # define XBMC_SAMPLE_RATE 44100 <nl> - # else <nl> - # define XBMC_SAMPLE_RATE 48000 <nl> - # endif <nl> + # include " threads / SingleLock . h " <nl> + # include " cores / AudioEngine / Utils / AEUtil . h " <nl> <nl> - # define VOLUME_FFWD_MUTE 900 / / 9dB <nl> - <nl> - # define FADE_TIME 2 * 2048 . 0f / XBMC_SAMPLE_RATE . 0f / / 2 packets <nl> - <nl> - # define TIME_TO_CACHE_NEXT_FILE 5000L / / 5 seconds <nl> - # define TIME_TO_CROSS_FADE 10000L / / 10 seconds <nl> + # define TIME_TO_CACHE_NEXT_FILE 5000 / * 5 seconds before end of song , start caching the next song * / <nl> + # define FAST_XFADE_TIME 80 / * 80 milliseconds * / <nl> <nl> / / PAP : Psycho - acoustic Audio Player <nl> / / Supporting all open audio codec standards . <nl> / / First one being nullsoft ' s nsv audio decoder format <nl> <nl> - PAPlayer : : PAPlayer ( IPlayerCallback & callback ) : IPlayer ( callback ) , CThread ( " PAPlayer " ) <nl> + PAPlayer : : PAPlayer ( IPlayerCallback & callback ) : <nl> + IPlayer ( callback ) , <nl> + CThread ( " PAPlayer " ) , <nl> + m_signalSpeedChange ( false ) , <nl> + m_playbackSpeed ( 1 ) , <nl> + m_isPlaying ( false ) , <nl> + m_isPaused ( false ) , <nl> + m_isFinished ( false ) , <nl> + m_currentStream ( NULL ) , <nl> + m_audioCallback ( NULL ) <nl> { <nl> - m_bIsPlaying = false ; <nl> - m_bPaused = false ; <nl> - m_cachingNextFile = false ; <nl> - m_currentlyCrossFading = false ; <nl> - m_bQueueFailed = false ; <nl> - <nl> - m_currentDecoder = 0 ; <nl> - <nl> - m_iSpeed = 1 ; <nl> - m_SeekTime = - 1 ; <nl> - m_IsFFwdRewding = false ; <nl> - m_timeOffset = 0 ; <nl> - <nl> - for ( int i = 0 ; i < 2 ; i + + ) <nl> - { <nl> - m_channelCount [ i ] = 0 ; <nl> - m_channelMap [ i ] = NULL ; <nl> - m_sampleRate [ i ] = 0 ; <nl> - m_bitsPerSample [ i ] = 0 ; <nl> - <nl> - m_pAudioDecoder [ i ] = NULL ; <nl> - m_pcmBuffer [ i ] = NULL ; <nl> - m_bufferPos [ i ] = 0 ; <nl> - m_Chunklen [ i ] = PACKET_SIZE ; <nl> - } <nl> - <nl> - m_currentStream = 0 ; <nl> - m_packet [ 0 ] [ 0 ] . packet = NULL ; <nl> - m_packet [ 1 ] [ 0 ] . packet = NULL ; <nl> - <nl> - m_bytesSentOut = 0 ; <nl> - m_BytesPerSecond = 0 ; <nl> - <nl> - m_resampleAudio = false ; <nl> - <nl> - m_visBufferLength = 0 ; <nl> - m_pCallback = NULL ; <nl> - <nl> - m_forceFadeToNext = false ; <nl> - m_CacheLevel = 0 ; <nl> - m_LastCacheLevelCheck = 0 ; <nl> - <nl> - m_currentFile = new CFileItem ; <nl> - m_nextFile = new CFileItem ; <nl> } <nl> <nl> PAPlayer : : ~ PAPlayer ( ) <nl> { <nl> - CloseFileInternal ( true ) ; <nl> - delete m_currentFile ; <nl> - delete m_nextFile ; <nl> - } <nl> + if ( ! m_isPaused ) <nl> + SoftStop ( true , true ) ; <nl> + CloseAllStreams ( false ) ; <nl> <nl> + / * wait for the thread to terminate * / <nl> + StopThread ( true ) ; / / true - wait for end of thread <nl> + } <nl> <nl> - void PAPlayer : : OnExit ( ) <nl> + bool PAPlayer : : HandlesType ( const CStdString & type ) <nl> { <nl> + ICodec * codec = CodecFactory : : CreateCodec ( type ) ; <nl> + if ( codec & & codec - > CanInit ( ) ) <nl> + { <nl> + delete codec ; <nl> + return true ; <nl> + } <nl> <nl> + return false ; <nl> } <nl> <nl> - bool PAPlayer : : OpenFile ( const CFileItem & file , const CPlayerOptions & options ) <nl> + void PAPlayer : : SoftStart ( bool wait / * = false * / ) <nl> { <nl> - if ( m_currentlyCrossFading ) CloseFileInternal ( false ) ; / / user seems to be in a hurry <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + for ( StreamList : : iterator itt = m_streams . begin ( ) ; itt ! = m_streams . end ( ) ; + + itt ) <nl> + { <nl> + StreamInfo * si = * itt ; <nl> + if ( si - > m_fadeOutTriggered ) <nl> + continue ; <nl> <nl> - m_crossFading = g_guiSettings . GetInt ( " musicplayer . crossfade " ) ; <nl> - / / WASAPI doesn ' t support multiple streams , no crossfading for cdda , cd - reading goes mad and no crossfading for last . fm doesn ' t like two connections <nl> - if ( file . IsCDDA ( ) | | file . IsLastFM ( ) | | g_guiSettings . GetString ( " audiooutput . audiodevice " ) . find ( " wasapi : " ) ! = CStdString : : npos ) m_crossFading = 0 ; <nl> - if ( m_crossFading & & IsPlaying ( ) ) <nl> + si - > m_stream - > FadeVolume ( 0 . 0f , 1 . 0f , FAST_XFADE_TIME ) ; <nl> + si - > m_stream - > Resume ( ) ; <nl> + } <nl> + <nl> + if ( wait ) <nl> { <nl> - / / do a short crossfade on trackskip <nl> - / / set to max 2 seconds for these prev / next transitions <nl> - if ( m_crossFading > 2 ) m_crossFading = 2 ; <nl> - / / queue for crossfading <nl> - bool result = QueueNextFile ( file , false ) ; <nl> - if ( result ) <nl> + / * wait for them to fade in * / <nl> + lock . Leave ( ) ; <nl> + Sleep ( FAST_XFADE_TIME ) ; <nl> + lock . Enter ( ) ; <nl> + <nl> + / * be sure they have faded in * / <nl> + while ( wait ) <nl> { <nl> - / / crossfading value may be update by QueueNextFile when nr of channels changed <nl> - if ( ! m_crossFading ) / / swap to next track <nl> - m_decoder [ m_currentDecoder ] . SetStatus ( STATUS_ENDED ) ; <nl> - else / / force to fade to next track immediately <nl> - m_forceFadeToNext = true ; <nl> + wait = false ; <nl> + for ( StreamList : : iterator itt = m_streams . begin ( ) ; itt ! = m_streams . end ( ) ; + + itt ) <nl> + { <nl> + StreamInfo * si = * itt ; <nl> + if ( si - > m_stream - > IsFading ( ) ) <nl> + { <nl> + lock . Leave ( ) ; <nl> + wait = true ; <nl> + Sleep ( 1 ) ; <nl> + lock . Enter ( ) ; <nl> + break ; <nl> + } <nl> + } <nl> } <nl> - return result ; <nl> - } <nl> - <nl> - / / normal opening of file , nothing playing or crossfading not enabled <nl> - / / however no need to return to gui audio device <nl> - CloseFileInternal ( false ) ; <nl> - <nl> - / / always open the file using the current decoder <nl> - m_currentDecoder = 0 ; <nl> - <nl> - if ( ! m_decoder [ m_currentDecoder ] . Create ( file , ( int64_t ) ( options . starttime * 1000 ) , m_crossFading ) ) <nl> - return false ; <nl> - <nl> - m_iSpeed = 1 ; <nl> - m_bPaused = false ; <nl> - m_bStopPlaying = false ; <nl> - m_bytesSentOut = 0 ; <nl> - <nl> - CLog : : Log ( LOGINFO , " PAPlayer : Playing % s " , file . GetPath ( ) . c_str ( ) ) ; <nl> - <nl> - m_timeOffset = ( int64_t ) ( options . starttime * 1000 ) ; <nl> - <nl> - unsigned int channel , sampleRate , bitsPerSample ; <nl> - m_decoder [ m_currentDecoder ] . GetDataFormat ( & channel , & sampleRate , & bitsPerSample ) ; <nl> - <nl> - if ( ! CreateStream ( m_currentStream , channel , sampleRate , bitsPerSample ) ) <nl> - { <nl> - m_decoder [ m_currentDecoder ] . Destroy ( ) ; <nl> - CLog : : Log ( LOGERROR , " PAPlayer : : Unable to create audio stream " ) ; <nl> } <nl> - <nl> - * m_currentFile = file ; <nl> - <nl> - if ( ! IsRunning ( ) ) <nl> - Create ( ) ; <nl> - <nl> - m_startEvent . Set ( ) ; <nl> - <nl> - m_bIsPlaying = true ; <nl> - m_cachingNextFile = false ; <nl> - m_currentlyCrossFading = false ; <nl> - m_forceFadeToNext = false ; <nl> - m_bQueueFailed = false ; <nl> - <nl> - m_decoder [ m_currentDecoder ] . Start ( ) ; / / start playback <nl> - <nl> - return true ; <nl> } <nl> <nl> - void PAPlayer : : UpdateCrossFadingTime ( const CFileItem & file ) <nl> + void PAPlayer : : SoftStop ( bool wait / * = false * / , bool close / * = true * / ) <nl> { <nl> - if ( ( m_crossFading = g_guiSettings . GetInt ( " musicplayer . crossfade " ) ) ) <nl> + / * fade all the streams out fast for a nice soft stop * / <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + for ( StreamList : : iterator itt = m_streams . begin ( ) ; itt ! = m_streams . end ( ) ; + + itt ) <nl> { <nl> - if ( <nl> - m_crossFading & & <nl> - ( <nl> - file . IsCDDA ( ) | | <nl> - file . IsLastFM ( ) | | <nl> - ( <nl> - file . HasMusicInfoTag ( ) & & ! g_guiSettings . GetBool ( " musicplayer . crossfadealbumtracks " ) & & <nl> - ( m_currentFile - > GetMusicInfoTag ( ) - > GetAlbum ( ) ! = " " ) & & <nl> - ( m_currentFile - > GetMusicInfoTag ( ) - > GetAlbum ( ) = = file . GetMusicInfoTag ( ) - > GetAlbum ( ) ) & & <nl> - ( m_currentFile - > GetMusicInfoTag ( ) - > GetDiscNumber ( ) = = file . GetMusicInfoTag ( ) - > GetDiscNumber ( ) ) & & <nl> - ( m_currentFile - > GetMusicInfoTag ( ) - > GetTrackNumber ( ) = = file . GetMusicInfoTag ( ) - > GetTrackNumber ( ) - 1 ) <nl> - ) <nl> - | | g_guiSettings . GetString ( " audiooutput . audiodevice " ) . find ( " wasapi : " ) ! = CStdString : : npos <nl> - ) <nl> - ) <nl> + StreamInfo * si = * itt ; <nl> + if ( si - > m_stream ) <nl> + si - > m_stream - > FadeVolume ( 1 . 0f , 0 . 0f , FAST_XFADE_TIME ) ; <nl> + <nl> + if ( close ) <nl> { <nl> - m_crossFading = 0 ; <nl> + si - > m_prepareTriggered = true ; <nl> + si - > m_playNextTriggered = true ; <nl> + si - > m_fadeOutTriggered = true ; <nl> } <nl> } <nl> - } <nl> <nl> - void PAPlayer : : OnNothingToQueueNotify ( ) <nl> - { <nl> - / / nothing to queue , stop playing <nl> - m_bQueueFailed = true ; <nl> - } <nl> + / * if we are going to wait for them to finish fading * / <nl> + if ( wait ) <nl> + { <nl> + / * wait for them to fade out * / <nl> + lock . Leave ( ) ; <nl> + Sleep ( FAST_XFADE_TIME ) ; <nl> + lock . Enter ( ) ; <nl> <nl> - bool PAPlayer : : QueueNextFile ( const CFileItem & file ) <nl> - { <nl> - return QueueNextFile ( file , true ) ; <nl> - } <nl> + / * be sure they have faded out * / <nl> + while ( wait ) <nl> + { <nl> + wait = false ; <nl> + for ( StreamList : : iterator itt = m_streams . begin ( ) ; itt ! = m_streams . end ( ) ; + + itt ) <nl> + { <nl> + StreamInfo * si = * itt ; <nl> + if ( si - > m_stream & & si - > m_stream - > IsFading ( ) ) <nl> + { <nl> + lock . Leave ( ) ; <nl> + wait = true ; <nl> + Sleep ( 1 ) ; <nl> + lock . Enter ( ) ; <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> <nl> - bool PAPlayer : : QueueNextFile ( const CFileItem & file , bool checkCrossFading ) <nl> - { <nl> - if ( IsPaused ( ) ) <nl> - Pause ( ) ; <nl> - <nl> - if ( file . GetPath ( ) = = m_currentFile - > GetPath ( ) & & <nl> - file . m_lStartOffset > 0 & & <nl> - file . m_lStartOffset = = m_currentFile - > m_lEndOffset ) <nl> - { / / continuing on a . cue sheet item - return true to say we ' ll handle the transistion <nl> - * m_nextFile = file ; <nl> - return true ; <nl> + / * if we are not closing the streams , pause them * / <nl> + if ( ! close ) <nl> + { <nl> + for ( StreamList : : iterator itt = m_streams . begin ( ) ; itt ! = m_streams . end ( ) ; + + itt ) <nl> + { <nl> + StreamInfo * si = * itt ; <nl> + si - > m_stream - > Pause ( ) ; <nl> + } <nl> + } <nl> } <nl> + } <nl> <nl> - / / check if we can handle this file at all <nl> - int decoder = 1 - m_currentDecoder ; <nl> - int64_t seekOffset = ( file . m_lStartOffset * 1000 ) / 75 ; <nl> - if ( ! m_decoder [ decoder ] . Create ( file , seekOffset , m_crossFading ) ) <nl> + void PAPlayer : : CloseAllStreams ( bool fade / * = true * / ) <nl> + { <nl> + if ( ! fade ) <nl> { <nl> - m_bQueueFailed = true ; <nl> - return false ; <nl> - } <nl> + CExclusiveLock lock ( m_streamsLock ) ; <nl> + while ( ! m_streams . empty ( ) ) <nl> + { <nl> + StreamInfo * si = m_streams . front ( ) ; <nl> + m_streams . pop_front ( ) ; <nl> + <nl> + if ( si - > m_stream ) <nl> + { <nl> + CAEFactory : : AE - > FreeStream ( si - > m_stream ) ; <nl> + si - > m_stream = NULL ; <nl> + } <nl> <nl> - / / ok , we ' re good to go on queuing this one up <nl> - CLog : : Log ( LOGINFO , " PAPlayer : Queuing next file % s " , file . GetPath ( ) . c_str ( ) ) ; <nl> + si - > m_decoder . Destroy ( ) ; <nl> + delete si ; <nl> + } <nl> <nl> - m_bQueueFailed = false ; <nl> - if ( checkCrossFading ) <nl> - { <nl> - UpdateCrossFadingTime ( file ) ; <nl> - } <nl> + while ( ! m_finishing . empty ( ) ) <nl> + { <nl> + StreamInfo * si = m_finishing . front ( ) ; <nl> + m_finishing . pop_front ( ) ; <nl> <nl> - unsigned int channels , samplerate , bitspersample ; <nl> - m_decoder [ decoder ] . GetDataFormat ( & channels , & samplerate , & bitspersample ) ; <nl> + if ( si - > m_stream ) <nl> + { <nl> + CAEFactory : : AE - > FreeStream ( si - > m_stream ) ; <nl> + si - > m_stream = NULL ; <nl> + } <nl> <nl> - / / check the number of channels isn ' t changing ( else we can ' t do crossfading ) <nl> - if ( m_crossFading & & m_decoder [ m_currentDecoder ] . GetChannels ( ) = = channels ) <nl> - { / / crossfading - need to create a new stream <nl> - if ( ! CreateStream ( 1 - m_currentStream , channels , samplerate , bitspersample ) ) <nl> - { <nl> - m_decoder [ decoder ] . Destroy ( ) ; <nl> - CLog : : Log ( LOGERROR , " PAPlayer : : Unable to create audio stream " ) ; <nl> + si - > m_decoder . Destroy ( ) ; <nl> + delete si ; <nl> } <nl> + m_currentStream = NULL ; <nl> } <nl> else <nl> - { / / no crossfading if nr of channels is not the same <nl> - m_crossFading = 0 ; <nl> - } <nl> - <nl> - * m_nextFile = file ; <nl> - <nl> - return true ; <nl> + { <nl> + SoftStop ( false , true ) ; <nl> + CExclusiveLock lock ( m_streamsLock ) ; <nl> + m_currentStream = NULL ; <nl> + } <nl> } <nl> <nl> - <nl> - <nl> - bool PAPlayer : : CloseFileInternal ( bool bAudioDevice / * = true * / ) <nl> + bool PAPlayer : : OpenFile ( const CFileItem & file , const CPlayerOptions & options ) <nl> { <nl> - if ( IsPaused ( ) ) <nl> - Pause ( ) ; <nl> + CloseAllStreams ( ) ; <nl> + m_crossFadeTime = g_guiSettings . GetInt ( " musicplayer . crossfade " ) * 1000 ; <nl> <nl> - m_bStopPlaying = true ; <nl> - m_bStop = true ; <nl> - <nl> - m_visBufferLength = 0 ; <nl> - StopThread ( ) ; <nl> - <nl> - / / kill both our streams if we need to <nl> - for ( int i = 0 ; i < 2 ; i + + ) <nl> - { <nl> - m_decoder [ i ] . Destroy ( ) ; <nl> - if ( bAudioDevice ) <nl> - FreeStream ( i ) ; <nl> - } <nl> - <nl> - m_currentFile - > Reset ( ) ; <nl> - m_nextFile - > Reset ( ) ; <nl> + if ( ! QueueNextFileEx ( file , false ) ) <nl> + return false ; <nl> <nl> - if ( bAudioDevice ) <nl> - g_audioContext . SetActiveDevice ( CAudioContext : : DEFAULT_DEVICE ) ; <nl> - else <nl> - FlushStreams ( ) ; <nl> + if ( ! IsRunning ( ) ) <nl> + Create ( ) ; <nl> <nl> + / * trigger playback start * / <nl> + m_isPlaying = true ; <nl> + m_startEvent . Set ( ) ; <nl> return true ; <nl> } <nl> <nl> - void PAPlayer : : FreeStream ( int stream ) <nl> + bool PAPlayer : : QueueNextFile ( const CFileItem & file ) <nl> { <nl> - if ( m_pAudioDecoder [ stream ] ) <nl> - { <nl> - DrainStream ( stream ) ; <nl> - <nl> - delete m_pAudioDecoder [ stream ] ; <nl> - free ( m_pcmBuffer [ stream ] ) ; <nl> - } <nl> - m_pAudioDecoder [ stream ] = NULL ; <nl> - m_pcmBuffer [ stream ] = NULL ; <nl> - <nl> - if ( m_packet [ stream ] [ 0 ] . packet ) <nl> - free ( m_packet [ stream ] [ 0 ] . packet ) ; <nl> - <nl> - for ( int i = 0 ; i < PACKET_COUNT ; i + + ) <nl> - { <nl> - m_packet [ stream ] [ i ] . packet = NULL ; <nl> - } <nl> - <nl> - m_resampler [ stream ] . DeInitialize ( ) ; <nl> + return QueueNextFileEx ( file ) ; <nl> } <nl> <nl> - void PAPlayer : : DrainStream ( int stream ) <nl> + bool PAPlayer : : QueueNextFileEx ( const CFileItem & file , bool fadeIn / * = true * / ) <nl> { <nl> - if ( m_bStopPlaying | | m_pAudioDecoder [ 1 - stream ] ) <nl> - { <nl> - m_pAudioDecoder [ stream ] - > Stop ( ) ; <nl> - return ; <nl> - } <nl> + StreamInfo * si = new StreamInfo ( ) ; <nl> <nl> - DWORD silence = m_pAudioDecoder [ stream ] - > GetChunkLen ( ) - m_bufferPos [ stream ] % m_pAudioDecoder [ stream ] - > GetChunkLen ( ) ; <nl> - <nl> - if ( silence > 0 & & m_bufferPos [ stream ] > 0 ) <nl> + if ( ! si - > m_decoder . Create ( file , ( file . m_lStartOffset * 1000 ) / 75 ) ) <nl> { <nl> - CLog : : Log ( LOGDEBUG , " PAPlayer : Drain - adding % d bytes of silence , real pcmdata size : % d , chunk size : % d " , silence , m_bufferPos [ stream ] , m_pAudioDecoder [ stream ] - > GetChunkLen ( ) ) ; <nl> - memset ( m_pcmBuffer [ stream ] + m_bufferPos [ stream ] , 0 , silence ) ; <nl> - m_bufferPos [ stream ] + = silence ; <nl> + CLog : : Log ( LOGWARNING , " PAPlayer : : QueueNextFileEx - Failed to create the decoder " ) ; <nl> + <nl> + delete si ; <nl> + m_callback . OnQueueNextItem ( ) ; <nl> + return false ; <nl> } <nl> <nl> - DWORD added = 0 ; <nl> - while ( m_bufferPos [ stream ] - added > = m_pAudioDecoder [ stream ] - > GetChunkLen ( ) ) <nl> + / * decode until there is data - available * / <nl> + si - > m_decoder . Start ( ) ; <nl> + while ( si - > m_decoder . GetDataSize ( ) = = 0 ) <nl> { <nl> - added + = m_pAudioDecoder [ stream ] - > AddPackets ( m_pcmBuffer [ stream ] + added , m_bufferPos [ stream ] - added ) ; <nl> - Sleep ( 1 ) ; <nl> - } <nl> - m_bufferPos [ stream ] = 0 ; <nl> + int status = si - > m_decoder . GetStatus ( ) ; <nl> + if ( status = = STATUS_ENDED | | <nl> + status = = STATUS_NO_FILE | | <nl> + si - > m_decoder . ReadSamples ( PACKET_SIZE ) = = RET_ERROR ) <nl> + { <nl> + CLog : : Log ( LOGINFO , " PAPlayer : : QueueNextFileEx - Error reading samples " ) ; <nl> <nl> - m_pAudioDecoder [ stream ] - > WaitCompletion ( ) ; <nl> - } <nl> + si - > m_decoder . Destroy ( ) ; <nl> + delete si ; <nl> + m_callback . OnQueueNextItem ( ) ; <nl> + return false ; <nl> + } <nl> <nl> - bool PAPlayer : : CreateStream ( int num , unsigned int channels , unsigned int samplerate , unsigned int bitspersample , CStdString codec ) <nl> - { <nl> - unsigned int outputSampleRate = ( channels < = 2 & & g_advancedSettings . m_audioResample ) ? g_advancedSettings . m_audioResample : samplerate ; <nl> + / * yield our time so that the main PAP thread doesnt stall * / <nl> + CThread : : Sleep ( 1 ) ; <nl> + } <nl> + <nl> + / * init the streaminfo struct * / <nl> + si - > m_decoder . GetDataFormat ( & si - > m_channelInfo , & si - > m_sampleRate , & si - > m_encodedSampleRate , & si - > m_dataFormat ) ; <nl> + si - > m_startOffset = file . m_lStartOffset * 1000 / 75 ; <nl> + si - > m_endOffset = file . m_lEndOffset * 1000 / 75 ; <nl> + si - > m_bytesPerSample = CAEUtil : : DataFormatToBits ( si - > m_dataFormat ) > > 3 ; <nl> + si - > m_bytesPerFrame = si - > m_bytesPerSample * si - > m_channelInfo . Count ( ) ; <nl> + si - > m_started = false ; <nl> + si - > m_finishing = false ; <nl> + si - > m_framesSent = 0 ; <nl> + si - > m_seekNextAtFrame = 0 ; <nl> + si - > m_seekFrame = - 1 ; <nl> + si - > m_stream = NULL ; <nl> + si - > m_volume = ( fadeIn & & m_crossFadeTime ) ? 0 . 0f : 1 . 0f ; <nl> + si - > m_fadeOutTriggered = false ; <nl> + si - > m_isSlaved = false ; <nl> + <nl> + if ( si - > m_decoder . TotalTime ( ) < TIME_TO_CACHE_NEXT_FILE + m_crossFadeTime ) <nl> + si - > m_prepareNextAtFrame = 0 ; <nl> + else <nl> + si - > m_prepareNextAtFrame = ( int ) ( ( si - > m_decoder . TotalTime ( ) - TIME_TO_CACHE_NEXT_FILE - m_crossFadeTime ) * si - > m_sampleRate / 1000 . 0f ) ; <nl> + si - > m_prepareTriggered = false ; <nl> <nl> - if ( m_pAudioDecoder [ num ] ! = NULL & & m_channelCount [ num ] = = channels & & m_sampleRate [ num ] = = outputSampleRate / * & & m_bitsPerSample [ num ] = = bitspersample * / ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " PAPlayer : Using existing audio renderer " ) ; <nl> - } <nl> + if ( si - > m_decoder . TotalTime ( ) < m_crossFadeTime ) <nl> + si - > m_playNextAtFrame = ( int ) ( ( si - > m_decoder . TotalTime ( ) / 2 ) * si - > m_sampleRate / 1000 . 0f ) ; <nl> else <nl> - { <nl> - FreeStream ( num ) ; <nl> - CLog : : Log ( LOGDEBUG , " PAPlayer : Creating new audio renderer " ) ; <nl> - m_bitsPerSample [ num ] = 16 ; <nl> - m_sampleRate [ num ] = outputSampleRate ; <nl> - m_channelCount [ num ] = channels ; <nl> - m_channelMap [ num ] = NULL ; <nl> - m_BytesPerSecond = ( m_bitsPerSample [ num ] / 8 ) * outputSampleRate * channels ; <nl> - <nl> - / * Open the device * / <nl> - m_pAudioDecoder [ num ] = CAudioRendererFactory : : Create ( <nl> - m_pCallback , / / pCallback <nl> - m_channelCount [ num ] , / / iChannels <nl> - m_channelMap [ num ] , / / channelMap <nl> - m_sampleRate [ num ] , / / uiSamplesPerSec <nl> - m_bitsPerSample [ num ] , / / uiBitsPerSample <nl> - false , / / bResample <nl> - true , / / bIsMusic <nl> - IAudioRenderer : : ENCODED_NONE / / bPassthrough <nl> - ) ; <nl> - <nl> - if ( ! m_pAudioDecoder [ num ] ) return false ; <nl> - <nl> - m_pcmBuffer [ num ] = ( unsigned char * ) malloc ( ( m_pAudioDecoder [ num ] - > GetChunkLen ( ) + PACKET_SIZE ) ) ; <nl> - m_bufferPos [ num ] = 0 ; <nl> - m_latency [ num ] = m_pAudioDecoder [ num ] - > GetDelay ( ) ; <nl> - m_Chunklen [ num ] = std : : max ( PACKET_SIZE , ( int ) m_pAudioDecoder [ num ] - > GetChunkLen ( ) ) ; <nl> - m_packet [ num ] [ 0 ] . packet = ( BYTE * ) malloc ( PACKET_SIZE * PACKET_COUNT ) ; <nl> - for ( int i = 1 ; i < PACKET_COUNT ; i + + ) <nl> - m_packet [ num ] [ i ] . packet = m_packet [ num ] [ i - 1 ] . packet + PACKET_SIZE ; <nl> - } <nl> - <nl> - / / set initial volume <nl> - SetStreamVolume ( num , g_settings . m_nVolumeLevel ) ; <nl> + si - > m_playNextAtFrame = ( int ) ( ( si - > m_decoder . TotalTime ( ) - m_crossFadeTime ) * si - > m_sampleRate / 1000 . 0f ) ; <nl> + si - > m_playNextTriggered = false ; <nl> <nl> - m_resampler [ num ] . InitConverter ( samplerate , bitspersample , channels , outputSampleRate , m_bitsPerSample [ num ] , PACKET_SIZE ) ; <nl> + PrepareStream ( si ) ; <nl> <nl> - / / TODO : How do we best handle the callback , given that our samplerate etc . may be <nl> - / / changing at this point ? <nl> + / * add the stream to the list * / <nl> + CExclusiveLock lock ( m_streamsLock ) ; <nl> + m_streams . push_back ( si ) ; <nl> <nl> - / / fire off our init to our callback <nl> - if ( m_pCallback ) <nl> - m_pCallback - > OnInitialize ( channels , outputSampleRate , m_bitsPerSample [ num ] ) ; <nl> return true ; <nl> } <nl> <nl> - void PAPlayer : : Pause ( ) <nl> + inline bool PAPlayer : : PrepareStream ( StreamInfo * si ) <nl> { <nl> - CLog : : Log ( LOGDEBUG , " PAPlayer : pause m_bplaying : % d " , m_bIsPlaying ) ; <nl> - if ( ! m_bIsPlaying | | ! m_pAudioDecoder ) <nl> - return ; <nl> + / * if we have a stream we are already prepared * / <nl> + if ( si - > m_stream ) <nl> + return true ; <nl> <nl> - m_bPaused = ! m_bPaused ; <nl> + / * get a paused stream * / <nl> + si - > m_stream = CAEFactory : : AE - > MakeStream ( <nl> + si - > m_dataFormat , <nl> + si - > m_sampleRate , <nl> + si - > m_encodedSampleRate , <nl> + si - > m_channelInfo , <nl> + AESTREAM_PAUSED <nl> + ) ; <nl> <nl> - if ( m_bPaused ) <nl> + if ( ! si - > m_stream ) <nl> { <nl> - if ( m_pAudioDecoder [ m_currentStream ] ) <nl> - m_pAudioDecoder [ m_currentStream ] - > Pause ( ) ; <nl> + CLog : : Log ( LOGDEBUG , " PAPlayer : : PrepareStream - Failed to get IAEStream " ) ; <nl> + return false ; <nl> + } <nl> <nl> - if ( m_currentlyCrossFading & & m_pAudioDecoder [ 1 - m_currentStream ] ) <nl> - m_pAudioDecoder [ 1 - m_currentStream ] - > Pause ( ) ; <nl> + si - > m_stream - > SetVolume ( si - > m_volume ) ; <nl> + si - > m_stream - > SetReplayGain ( si - > m_decoder . GetReplayGain ( ) ) ; <nl> <nl> - m_callback . OnPlayBackPaused ( ) ; <nl> - CLog : : Log ( LOGDEBUG , " PAPlayer : Playback paused " ) ; <nl> + / * if its not the first stream and crossfade is not enabled * / <nl> + if ( m_currentStream & & m_currentStream ! = si & & ! m_crossFadeTime ) <nl> + { <nl> + / * slave the stream for gapless * / <nl> + si - > m_isSlaved = true ; <nl> + m_currentStream - > m_stream - > RegisterSlave ( si - > m_stream ) ; <nl> } <nl> - else <nl> + <nl> + / * fill the stream ' s buffer * / <nl> + while ( si - > m_stream - > IsBuffering ( ) ) <nl> { <nl> - if ( m_pAudioDecoder [ m_currentStream ] ) <nl> - m_pAudioDecoder [ m_currentStream ] - > Resume ( ) ; <nl> + int status = si - > m_decoder . GetStatus ( ) ; <nl> + if ( status = = STATUS_ENDED | | <nl> + status = = STATUS_NO_FILE | | <nl> + si - > m_decoder . ReadSamples ( PACKET_SIZE ) = = RET_ERROR ) <nl> + { <nl> + CLog : : Log ( LOGINFO , " PAPlayer : : PrepareStream - Stream Finished " ) ; <nl> + break ; <nl> + } <nl> <nl> - if ( m_currentlyCrossFading & & m_pAudioDecoder [ 1 - m_currentStream ] ) <nl> - m_pAudioDecoder [ 1 - m_currentStream ] - > Resume ( ) ; <nl> + if ( ! QueueData ( si ) ) <nl> + break ; <nl> <nl> - m_callback . OnPlayBackResumed ( ) ; <nl> - CLog : : Log ( LOGDEBUG , " PAPlayer : Playback resumed " ) ; <nl> + / * yield our time so that the main PAP thread doesnt stall * / <nl> + CThread : : Sleep ( 1 ) ; <nl> } <nl> - } <nl> <nl> - void PAPlayer : : SetVolume ( float volume ) <nl> - { <nl> + CLog : : Log ( LOGINFO , " PAPlayer : : PrepareStream - Ready " ) ; <nl> <nl> + return true ; <nl> } <nl> <nl> - void PAPlayer : : SetDynamicRangeCompression ( long drc ) <nl> + bool PAPlayer : : CloseFile ( ) <nl> { <nl> - / / TODO : Add volume amplification <nl> - CLog : : Log ( LOGDEBUG , " PAPlayer : : SetDynamicRangeCompression - drc : % lu " , drc ) ; <nl> + m_callback . OnPlayBackStopped ( ) ; <nl> + return true ; <nl> } <nl> <nl> void PAPlayer : : Process ( ) <nl> { <nl> - CLog : : Log ( LOGDEBUG , " PAPlayer : Thread started " ) ; <nl> - if ( m_startEvent . WaitMSec ( 100 ) ) <nl> + if ( ! m_startEvent . WaitMSec ( 100 ) ) <nl> { <nl> - m_startEvent . Reset ( ) ; <nl> + CLog : : Log ( LOGDEBUG , " PAPlayer : : Process - Failed to receive start event " ) ; <nl> + return ; <nl> + } <nl> <nl> - do <nl> + CLog : : Log ( LOGDEBUG , " PAPlayer : : Process - Playback started " ) ; <nl> + while ( m_isPlaying & & ! m_bStop ) <nl> + { <nl> + / * this needs to happen outside of any locks to prevent deadlocks * / <nl> + if ( m_signalSpeedChange ) <nl> { <nl> - if ( ! m_bPaused ) <nl> - { <nl> - if ( ! ProcessPAP ( ) ) <nl> - break ; <nl> - } <nl> - else <nl> - { <nl> - Sleep ( 100 ) ; <nl> - } <nl> + m_callback . OnPlayBackSpeedChanged ( m_playbackSpeed ) ; <nl> + m_signalSpeedChange = false ; <nl> } <nl> - while ( ! m_bStopPlaying & & m_bIsPlaying & & ! m_bStop ) ; <nl> <nl> - CLog : : Log ( LOGINFO , " PAPlayer : End of playback reached " ) ; <nl> - m_bIsPlaying = false ; <nl> - if ( ! m_bStopPlaying & & ! m_bStop ) <nl> - m_callback . OnPlayBackEnded ( ) ; <nl> - else <nl> - m_callback . OnPlayBackStopped ( ) ; <nl> - } <nl> - CLog : : Log ( LOGDEBUG , " PAPlayer : Thread end " ) ; <nl> - } <nl> + double delay = 100 . 0 ; <nl> + double buffer = 100 . 0 ; <nl> + ProcessStreams ( delay , buffer ) ; <nl> <nl> - void PAPlayer : : ToFFRW ( int iSpeed ) <nl> - { <nl> - m_iSpeed = iSpeed ; <nl> - m_callback . OnPlayBackSpeedChanged ( iSpeed ) ; <nl> + if ( delay < buffer & & delay > 0 . 75 * buffer ) <nl> + CThread : : Sleep ( MathUtils : : round_int ( ( buffer - delay ) * 1000 . 0 ) ) ; <nl> + } <nl> } <nl> <nl> - void PAPlayer : : UpdateCacheLevel ( ) <nl> + inline void PAPlayer : : ProcessStreams ( double & delay , double & buffer ) <nl> { <nl> - / / check cachelevel every . 5 seconds <nl> - if ( ( XbmcThreads : : SystemClockMillis ( ) - m_LastCacheLevelCheck ) > 500 ) <nl> + CSharedLock sharedLock ( m_streamsLock ) ; <nl> + if ( m_isFinished & & m_streams . empty ( ) & & m_finishing . empty ( ) ) <nl> { <nl> - ICodec * codec = m_decoder [ m_currentDecoder ] . GetCodec ( ) ; <nl> - if ( codec ) <nl> - { <nl> - m_CacheLevel = codec - > GetCacheLevel ( ) ; <nl> - m_LastCacheLevelCheck = XbmcThreads : : SystemClockMillis ( ) ; <nl> - / / CLog : : Log ( LOGDEBUG , " Cachelevel : % i % % " , m_CacheLevel ) ; <nl> - } <nl> + m_isPlaying = false ; <nl> + delay = 0 ; <nl> + m_callback . OnPlayBackEnded ( ) ; <nl> + return ; <nl> } <nl> - } <nl> <nl> - bool PAPlayer : : ProcessPAP ( ) <nl> - { <nl> - / * <nl> - * Here ' s what we should be doing in each player loop : <nl> - * <nl> - * 1 . Run DoWork ( ) on our audio device to actually output audio . <nl> - * <nl> - * 2 . Pass our current buffer to the audio device to see if it wants anything , <nl> - * and if so , reduce our buffer size accordingly . <nl> - * <nl> - * 3 . Check whether we have space in our buffer for more data , and if so , <nl> - * read some more in . <nl> - * <nl> - * 4 . Check for end of file and return false if we reach it . <nl> - * <nl> - * 5 . Perform any seeking and ffwd / rewding as necessary . <nl> - * <nl> - * 6 . If we don ' t do anything in 2 . . . 5 , we can take a breather and break out for sleeping . <nl> - * / <nl> - while ( true ) <nl> + / * destroy any drained streams * / <nl> + for ( StreamList : : iterator itt = m_finishing . begin ( ) ; itt ! = m_finishing . end ( ) ; ) <nl> { <nl> - if ( m_bStop ) return false ; <nl> - <nl> - / / Check for . cue sheet item end <nl> - if ( m_currentFile - > m_lEndOffset & & GetTime ( ) > = GetTotalTime64 ( ) ) <nl> - { <nl> - CLog : : Log ( LOGINFO , " PAPlayer : Passed end of track in a . cue sheet item " ) ; <nl> - m_decoder [ m_currentDecoder ] . SetStatus ( STATUS_ENDED ) ; <nl> + StreamInfo * si = * itt ; <nl> + if ( si - > m_stream - > IsDrained ( ) ) <nl> + { <nl> + itt = m_finishing . erase ( itt ) ; <nl> + CAEFactory : : AE - > FreeStream ( si - > m_stream ) ; <nl> + delete si ; <nl> + CLog : : Log ( LOGDEBUG , " PAPlayer : : ProcessStreams - Stream Freed " ) ; <nl> } <nl> + else <nl> + + + itt ; <nl> + } <nl> <nl> - / / check whether we need to send off our callbacks etc . <nl> - int status = m_decoder [ m_currentDecoder ] . GetStatus ( ) ; <nl> - if ( status = = STATUS_NO_FILE ) <nl> - return false ; <nl> - <nl> - UpdateCacheLevel ( ) ; <nl> - <nl> - / / check whether we should queue the next file up <nl> - if ( ( GetTotalTime64 ( ) > 0 ) & & GetTotalTime64 ( ) - GetTime ( ) < TIME_TO_CACHE_NEXT_FILE + m_crossFading * 1000L & & ! m_cachingNextFile ) <nl> - { / / request the next file from our application <nl> - m_callback . OnQueueNextItem ( ) ; <nl> - m_cachingNextFile = true ; <nl> - } <nl> + sharedLock . Leave ( ) ; <nl> + CExclusiveLock lock ( m_streamsLock ) ; <nl> <nl> - if ( m_crossFading & & m_decoder [ 0 ] . GetChannels ( ) = = m_decoder [ 1 ] . GetChannels ( ) ) <nl> + for ( StreamList : : iterator itt = m_streams . begin ( ) ; itt ! = m_streams . end ( ) ; + + itt ) <nl> + { <nl> + StreamInfo * si = * itt ; <nl> + if ( ! m_currentStream & & ! si - > m_started ) <nl> + m_currentStream = si ; <nl> + / * if the stream is finishing * / <nl> + if ( ( si - > m_fadeOutTriggered & & si - > m_stream & & ! si - > m_stream - > IsFading ( ) ) | | ! ProcessStream ( si , delay , buffer ) ) <nl> { <nl> - if ( ( ( GetTotalTime64 ( ) - GetTime ( ) < m_crossFading * 1000L ) | | ( m_forceFadeToNext ) ) & & ! m_currentlyCrossFading ) <nl> - { / / request the next file from our application <nl> - if ( m_decoder [ 1 - m_currentDecoder ] . GetStatus ( ) = = STATUS_QUEUED & & m_pAudioDecoder [ 1 - m_currentStream ] ) <nl> - { <nl> - m_currentlyCrossFading = true ; <nl> - if ( m_forceFadeToNext ) <nl> - { <nl> - m_forceFadeToNext = false ; <nl> - m_crossFadeLength = m_crossFading * 1000L ; <nl> - } <nl> - else <nl> - { <nl> - m_crossFadeLength = GetTotalTime64 ( ) - GetTime ( ) ; <nl> - } <nl> - m_currentDecoder = 1 - m_currentDecoder ; <nl> - m_decoder [ m_currentDecoder ] . Start ( ) ; <nl> - m_currentStream = 1 - m_currentStream ; <nl> - CLog : : Log ( LOGDEBUG , " Starting Crossfade - resuming stream % i " , m_currentStream ) ; <nl> - <nl> - m_pAudioDecoder [ m_currentStream ] - > Resume ( ) ; <nl> - <nl> - m_callback . OnPlayBackStarted ( ) ; <nl> - m_timeOffset = m_nextFile - > m_lStartOffset * 1000 / 75 ; <nl> - m_bytesSentOut = 0 ; <nl> - * m_currentFile = * m_nextFile ; <nl> - m_nextFile - > Reset ( ) ; <nl> - m_cachingNextFile = false ; <nl> - } <nl> + if ( ! si - > m_prepareTriggered ) <nl> + { <nl> + si - > m_prepareTriggered = true ; <nl> + m_callback . OnQueueNextItem ( ) ; <nl> } <nl> - } <nl> <nl> - / / Check for EOF and queue the next track if applicable <nl> - if ( m_decoder [ m_currentDecoder ] . GetStatus ( ) = = STATUS_ENDED ) <nl> - { / / time to swap tracks <nl> - if ( m_nextFile - > GetPath ( ) ! = m_currentFile - > GetPath ( ) | | <nl> - ! m_nextFile - > m_lStartOffset | | <nl> - m_nextFile - > m_lStartOffset ! = m_currentFile - > m_lEndOffset ) <nl> - { / / don ' t have a . cue sheet item <nl> - int nextstatus = m_decoder [ 1 - m_currentDecoder ] . GetStatus ( ) ; <nl> - if ( nextstatus = = STATUS_QUEUED | | nextstatus = = STATUS_QUEUING | | nextstatus = = STATUS_PLAYING ) <nl> - { / / swap streams <nl> - CLog : : Log ( LOGDEBUG , " PAPlayer : Swapping tracks % i to % i " , m_currentDecoder , 1 - m_currentDecoder ) ; <nl> - if ( ! m_crossFading | | m_decoder [ 0 ] . GetChannels ( ) ! = m_decoder [ 1 ] . GetChannels ( ) ) <nl> - { / / playing gapless ( we use only the 1 output stream in this case ) <nl> - int prefixAmount = m_decoder [ m_currentDecoder ] . GetDataSize ( ) ; <nl> - CLog : : Log ( LOGDEBUG , " PAPlayer : : Prefixing % i samples of old data to new track for gapless playback " , prefixAmount ) ; <nl> - m_decoder [ 1 - m_currentDecoder ] . PrefixData ( m_decoder [ m_currentDecoder ] . GetData ( prefixAmount ) , prefixAmount ) ; <nl> - / / check if we need to change the resampler ( due to format change ) <nl> - unsigned int channels , samplerate , bitspersample ; <nl> - m_decoder [ m_currentDecoder ] . GetDataFormat ( & channels , & samplerate , & bitspersample ) ; <nl> - unsigned int channels2 , samplerate2 , bitspersample2 ; <nl> - m_decoder [ 1 - m_currentDecoder ] . GetDataFormat ( & channels2 , & samplerate2 , & bitspersample2 ) ; <nl> - / / change of channels - reinitialize our speaker configuration <nl> - if ( channels ! = channels2 | | ( g_advancedSettings . m_audioResample = = 0 & & ( samplerate ! = samplerate2 | | bitspersample ! = bitspersample2 ) ) ) <nl> - { <nl> - CLog : : Log ( LOGINFO , " PAPlayer : Stream properties have changed , restarting stream " ) ; <nl> - FreeStream ( m_currentStream ) ; <nl> - if ( ! CreateStream ( m_currentStream , channels2 , samplerate2 , bitspersample2 ) ) <nl> - { <nl> - CLog : : Log ( LOGERROR , " PAPlayer : Error creating stream ! " ) ; <nl> - return false ; <nl> - } <nl> - m_pAudioDecoder [ m_currentStream ] - > Resume ( ) ; <nl> - } <nl> - else if ( samplerate ! = samplerate2 | | bitspersample ! = bitspersample2 ) <nl> - { <nl> - CLog : : Log ( LOGINFO , " PAPlayer : Restarting resampler due to a change in data format " ) ; <nl> - m_resampler [ m_currentStream ] . DeInitialize ( ) ; <nl> - if ( ! m_resampler [ m_currentStream ] . InitConverter ( samplerate2 , bitspersample2 , channels2 , g_advancedSettings . m_audioResample , 16 , PACKET_SIZE ) ) <nl> - { <nl> - CLog : : Log ( LOGERROR , " PAPlayer : Error initializing resampler ! " ) ; <nl> - return false ; <nl> - } <nl> - } <nl> - CLog : : Log ( LOGINFO , " PAPlayer : Starting new track " ) ; <nl> - <nl> - m_decoder [ m_currentDecoder ] . Destroy ( ) ; <nl> - m_decoder [ 1 - m_currentDecoder ] . Start ( ) ; <nl> - m_callback . OnPlayBackStarted ( ) ; <nl> - m_timeOffset = m_nextFile - > m_lStartOffset * 1000 / 75 ; <nl> - m_bytesSentOut = 0 ; <nl> - * m_currentFile = * m_nextFile ; <nl> - m_nextFile - > Reset ( ) ; <nl> - m_cachingNextFile = false ; <nl> - m_currentDecoder = 1 - m_currentDecoder ; <nl> - } <nl> - else <nl> - { / / cross fading - shouldn ' t ever get here - if we do , return false <nl> - if ( ! m_currentlyCrossFading ) <nl> - { <nl> - CLog : : Log ( LOGERROR , " End of file Reached before crossfading kicked in ! " ) ; <nl> - return false ; <nl> - } <nl> - else <nl> - { <nl> - CLog : : Log ( LOGINFO , " End of file reached before crossfading finished ! " ) ; <nl> - return false ; <nl> - } <nl> + / * remove the stream * / <nl> + itt = m_streams . erase ( itt ) ; <nl> + / * if its the current stream * / <nl> + if ( si = = m_currentStream ) <nl> + { <nl> + / * if it was the last stream * / <nl> + if ( itt = = m_streams . end ( ) ) <nl> + { <nl> + / * if it didnt trigger the next queue item * / <nl> + if ( ! si - > m_prepareTriggered ) <nl> + { <nl> + m_callback . OnQueueNextItem ( ) ; <nl> + si - > m_prepareTriggered = true ; <nl> } <nl> + m_currentStream = NULL ; <nl> } <nl> else <nl> { <nl> - if ( GetTotalTime64 ( ) < = 0 & & ! m_bQueueFailed ) <nl> - { / / we did not know the duration so didn ' t queue the next song , try queueing it now <nl> - if ( ! m_cachingNextFile ) <nl> - { / / request the next file from our application <nl> - m_callback . OnQueueNextItem ( ) ; <nl> - m_cachingNextFile = true ; <nl> - } <nl> - } <nl> - else <nl> - { <nl> - / / no track queued - return and get another one once we are finished <nl> - / / with the current stream <nl> - WaitForStream ( ) ; <nl> - return false ; <nl> - } <nl> + m_currentStream = * itt ; <nl> } <nl> } <nl> - else <nl> - { <nl> - / / set the next track playing ( . cue sheet ) <nl> - m_decoder [ m_currentDecoder ] . SetStatus ( STATUS_PLAYING ) ; <nl> - m_callback . OnPlayBackStarted ( ) ; <nl> - m_timeOffset = m_nextFile - > m_lStartOffset * 1000 / 75 ; <nl> - m_bytesSentOut = 0 ; <nl> - * m_currentFile = * m_nextFile ; <nl> - m_nextFile - > Reset ( ) ; <nl> - m_cachingNextFile = false ; <nl> - } <nl> + <nl> + / * unregister the audio callback * / <nl> + si - > m_stream - > UnRegisterAudioCallback ( ) ; <nl> + si - > m_decoder . Destroy ( ) ; <nl> + si - > m_stream - > Drain ( ) ; <nl> + m_finishing . push_back ( si ) ; <nl> + return ; <nl> } <nl> <nl> - / / handle seeking and ffwd / rewding . <nl> - HandleSeeking ( ) ; <nl> - if ( ! HandleFFwdRewd ( ) ) <nl> + if ( ! si - > m_started ) <nl> + continue ; <nl> + <nl> + / * is it time to prepare the next stream ? * / <nl> + if ( si - > m_prepareNextAtFrame > 0 & & ! si - > m_prepareTriggered & & si - > m_framesSent > = si - > m_prepareNextAtFrame ) <nl> { <nl> - / / need to skip to the next track - let ' s see if we already have another one <nl> - m_decoder [ m_currentDecoder ] . SetStatus ( STATUS_ENDED ) ; <nl> - continue ; / / loop around to start the next track <nl> + si - > m_prepareTriggered = true ; <nl> + m_callback . OnQueueNextItem ( ) ; <nl> } <nl> <nl> - if ( ! m_bPaused ) <nl> + / * it is time to start playing the next stream ? * / <nl> + if ( si - > m_playNextAtFrame > 0 & & ! si - > m_playNextTriggered & & si - > m_framesSent > = si - > m_playNextAtFrame ) <nl> { <nl> - <nl> - / / Let our decoding stream ( s ) do their thing <nl> - int retVal = m_decoder [ m_currentDecoder ] . ReadSamples ( PACKET_SIZE ) ; <nl> - if ( retVal = = RET_ERROR ) <nl> + if ( ! si - > m_prepareTriggered ) <nl> { <nl> - m_decoder [ m_currentDecoder ] . Destroy ( ) ; <nl> - return false ; <nl> + si - > m_prepareTriggered = true ; <nl> + m_callback . OnQueueNextItem ( ) ; <nl> } <nl> <nl> - int retVal2 = m_decoder [ 1 - m_currentDecoder ] . ReadSamples ( PACKET_SIZE ) ; <nl> - if ( retVal2 = = RET_ERROR ) <nl> + if ( ! m_isFinished ) <nl> { <nl> - m_decoder [ 1 - m_currentDecoder ] . Destroy ( ) ; <nl> - } <nl> + if ( m_crossFadeTime ) <nl> + si - > m_stream - > FadeVolume ( 1 . 0f , 0 . 0f , m_crossFadeTime ) ; <nl> + m_currentStream = NULL ; <nl> <nl> - / / if we ' re cross - fading , then we do this for both streams , otherwise <nl> - / / we do it just for the one stream . <nl> - if ( m_currentlyCrossFading ) <nl> - { <nl> - if ( GetTime ( ) > = m_crossFadeLength ) / / finished <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " Finished Crossfading " ) ; <nl> - m_currentlyCrossFading = false ; <nl> - SetStreamVolume ( m_currentStream , g_settings . m_nVolumeLevel ) ; <nl> - FreeStream ( 1 - m_currentStream ) ; <nl> - m_decoder [ 1 - m_currentDecoder ] . Destroy ( ) ; <nl> - } <nl> - else <nl> - { <nl> - float fraction = ( float ) ( m_crossFadeLength - GetTime ( ) ) / ( float ) m_crossFadeLength - 0 . 5f ; <nl> - / / make sure we can take valid logs . <nl> - if ( fraction > 0 . 499f ) fraction = 0 . 499f ; <nl> - if ( fraction < - 0 . 499f ) fraction = - 0 . 499f ; <nl> - float volumeCurrent = 2000 . 0f * log10 ( 0 . 5f - fraction ) ; <nl> - float volumeNext = 2000 . 0f * log10 ( 0 . 5f + fraction ) ; <nl> - SetStreamVolume ( m_currentStream , g_settings . m_nVolumeLevel + ( int ) volumeCurrent ) ; <nl> - SetStreamVolume ( 1 - m_currentStream , g_settings . m_nVolumeLevel + ( int ) volumeNext ) ; <nl> - if ( AddPacketsToStream ( 1 - m_currentStream , m_decoder [ 1 - m_currentDecoder ] ) ) <nl> - retVal2 = RET_SUCCESS ; <nl> - } <nl> + / * unregister the audio callback * / <nl> + si - > m_stream - > UnRegisterAudioCallback ( ) ; <nl> } <nl> <nl> - / / add packets as necessary <nl> - if ( AddPacketsToStream ( m_currentStream , m_decoder [ m_currentDecoder ] ) ) <nl> - retVal = RET_SUCCESS ; <nl> + si - > m_playNextTriggered = true ; <nl> + } <nl> + } <nl> + } <nl> <nl> - if ( retVal = = RET_SLEEP & & retVal2 = = RET_SLEEP ) <nl> - { <nl> - float maximumSleepTime = m_pAudioDecoder [ m_currentStream ] - > GetCacheTime ( ) ; <nl> - <nl> - if ( m_pAudioDecoder [ 1 - m_currentStream ] ) <nl> - maximumSleepTime = std : : min ( maximumSleepTime , m_pAudioDecoder [ 1 - m_currentStream ] - > GetCacheTime ( ) ) ; <nl> + inline bool PAPlayer : : ProcessStream ( StreamInfo * si , double & delay , double & buffer ) <nl> + { <nl> + / * if playback needs to start on this stream , do it * / <nl> + if ( si = = m_currentStream & & ! si - > m_started ) <nl> + { <nl> + si - > m_started = true ; <nl> + si - > m_stream - > RegisterAudioCallback ( m_audioCallback ) ; <nl> + if ( ! si - > m_isSlaved ) <nl> + si - > m_stream - > Resume ( ) ; <nl> + si - > m_stream - > FadeVolume ( 0 . 0f , 1 . 0f , m_crossFadeTime ) ; <nl> + m_callback . OnPlayBackStarted ( ) ; <nl> + } <nl> <nl> - int sleep = std : : max ( ( int ) ( ( maximumSleepTime / 2 . 0f ) * 1000 . 0f ) , 1 ) ; <nl> + / * if we have not started yet and the stream has been primed * / <nl> + unsigned int space = si - > m_stream - > GetSpace ( ) ; <nl> + if ( ! si - > m_started & & ! space ) <nl> + return true ; <nl> <nl> - Sleep ( std : : min ( sleep , 15 ) ) ; <nl> - } <nl> + / * see if it is time yet to FF / RW or a direct seek * / <nl> + if ( ! si - > m_playNextTriggered & & ( ( m_playbackSpeed ! = 1 & & si - > m_framesSent > = si - > m_seekNextAtFrame ) | | si - > m_seekFrame > - 1 ) ) <nl> + { <nl> + / * if its a direct seek * / <nl> + if ( si - > m_seekFrame > - 1 ) <nl> + { <nl> + si - > m_framesSent = si - > m_seekFrame ; <nl> + si - > m_seekFrame = - 1 ; <nl> } <nl> + / * if its FF / RW * / <nl> else <nl> - Sleep ( 100 ) ; <nl> + { <nl> + si - > m_framesSent + = si - > m_sampleRate * ( m_playbackSpeed - 1 ) ; <nl> + si - > m_seekNextAtFrame = si - > m_framesSent + si - > m_sampleRate / 2 ; <nl> + } <nl> + <nl> + int64_t time = ( int64_t ) ( si - > m_startOffset + ( ( float ) si - > m_framesSent / ( float ) si - > m_sampleRate * 1000 . 0f ) ) ; <nl> + <nl> + / * if we are seeking back before the start of the track start normal playback * / <nl> + if ( time < si - > m_startOffset | | si - > m_framesSent < 0 ) <nl> + { <nl> + time = si - > m_startOffset ; <nl> + si - > m_framesSent = 0 ; <nl> + si - > m_seekNextAtFrame = 0 ; <nl> + ToFFRW ( 1 ) ; <nl> + } <nl> + <nl> + si - > m_decoder . Seek ( time ) ; <nl> } <nl> - return true ; <nl> - } <nl> <nl> - int64_t PAPlayer : : GetTime ( ) <nl> - { <nl> - int64_t timeplus = m_BytesPerSecond ? ( int64_t ) ( ( ( float ) m_bytesSentOut / ( float ) m_BytesPerSecond ) * 1000 . 0 ) : 0 ; <nl> - return m_timeOffset + timeplus - m_currentFile - > m_lStartOffset * 1000 / 75 ; <nl> - } <nl> + int status = si - > m_decoder . GetStatus ( ) ; <nl> + if ( status = = STATUS_ENDED | | <nl> + status = = STATUS_NO_FILE | | <nl> + si - > m_decoder . ReadSamples ( PACKET_SIZE ) = = RET_ERROR ) <nl> + { <nl> + CLog : : Log ( LOGINFO , " PAPlayer : : ProcessStream - Stream Finished " ) ; <nl> + return false ; <nl> + } <nl> <nl> - int64_t PAPlayer : : GetTotalTime64 ( ) <nl> - { <nl> - int64_t total = m_decoder [ m_currentDecoder ] . TotalTime ( ) ; <nl> - if ( m_currentFile - > m_lEndOffset ) <nl> - total = m_currentFile - > m_lEndOffset * 1000 / 75 ; <nl> - if ( m_currentFile - > m_lStartOffset ) <nl> - total - = m_currentFile - > m_lStartOffset * 1000 / 75 ; <nl> - return total ; <nl> - } <nl> + if ( ! QueueData ( si ) ) <nl> + return false ; <nl> <nl> - int PAPlayer : : GetTotalTime ( ) <nl> - { <nl> - return ( int ) ( GetTotalTime64 ( ) / 1000 ) ; <nl> + / * update the delay time if we are running * / <nl> + if ( si - > m_started ) <nl> + { <nl> + if ( si - > m_stream - > IsBuffering ( ) ) <nl> + delay = 0 . 0 ; <nl> + else <nl> + delay = std : : min ( delay , si - > m_stream - > GetDelay ( ) ) ; <nl> + buffer = std : : min ( buffer , si - > m_stream - > GetCacheTotal ( ) ) ; <nl> + } <nl> + <nl> + return true ; <nl> } <nl> <nl> - int PAPlayer : : GetCacheLevel ( ) const <nl> + bool PAPlayer : : QueueData ( StreamInfo * si ) <nl> { <nl> - const ICodec * codec = m_decoder [ m_currentDecoder ] . GetCodec ( ) ; <nl> - if ( codec ) <nl> - return codec - > GetCacheLevel ( ) ; <nl> + unsigned int space = si - > m_stream - > GetSpace ( ) ; <nl> + unsigned int samples = std : : min ( si - > m_decoder . GetDataSize ( ) , space / si - > m_bytesPerSample ) ; <nl> + if ( ! samples ) <nl> + return true ; <nl> <nl> - return - 1 ; <nl> + void * data = si - > m_decoder . GetData ( samples ) ; <nl> + if ( ! data ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " PAPlayer : : QueueData - Failed to get data from the decoder " ) ; <nl> + return false ; <nl> + } <nl> + <nl> + unsigned int added = si - > m_stream - > AddData ( data , samples * si - > m_bytesPerSample ) ; <nl> + si - > m_framesSent + = added / si - > m_bytesPerFrame ; <nl> + <nl> + return true ; <nl> } <nl> <nl> - int PAPlayer : : GetChannels ( ) <nl> + void PAPlayer : : OnExit ( ) <nl> { <nl> - ICodec * codec = m_decoder [ m_currentDecoder ] . GetCodec ( ) ; <nl> - if ( codec ) <nl> - return codec - > m_Channels ; <nl> - return 0 ; <nl> + <nl> } <nl> <nl> - int PAPlayer : : GetBitsPerSample ( ) <nl> + void PAPlayer : : RegisterAudioCallback ( IAudioCallback * pCallback ) <nl> { <nl> - ICodec * codec = m_decoder [ m_currentDecoder ] . GetCodec ( ) ; <nl> - if ( codec ) <nl> - return codec - > m_BitsPerSample ; <nl> - return 0 ; <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + m_audioCallback = pCallback ; <nl> + if ( m_currentStream & & m_currentStream - > m_stream ) <nl> + m_currentStream - > m_stream - > RegisterAudioCallback ( pCallback ) ; <nl> } <nl> <nl> - int PAPlayer : : GetSampleRate ( ) <nl> + void PAPlayer : : UnRegisterAudioCallback ( ) <nl> { <nl> - ICodec * codec = m_decoder [ m_currentDecoder ] . GetCodec ( ) ; <nl> - if ( codec ) <nl> - return ( int ) ( ( codec - > m_SampleRate / 1000 ) + 0 . 5 ) ; <nl> - return 0 ; <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + / * only one stream should have the callback , but we do it to all just incase * / <nl> + for ( StreamList : : iterator itt = m_streams . begin ( ) ; itt ! = m_streams . end ( ) ; + + itt ) <nl> + if ( ( * itt ) - > m_stream ) <nl> + ( * itt ) - > m_stream - > UnRegisterAudioCallback ( ) ; <nl> + m_audioCallback = NULL ; <nl> } <nl> <nl> - CStdString PAPlayer : : GetAudioCodecName ( ) <nl> + void PAPlayer : : OnNothingToQueueNotify ( ) <nl> { <nl> - ICodec * codec = m_decoder [ m_currentDecoder ] . GetCodec ( ) ; <nl> - if ( codec ) <nl> - return codec - > m_CodecName ; <nl> - return " " ; <nl> + m_isFinished = true ; <nl> } <nl> <nl> - int PAPlayer : : GetAudioBitrate ( ) <nl> + bool PAPlayer : : IsPlaying ( ) const <nl> { <nl> - ICodec * codec = m_decoder [ m_currentDecoder ] . GetCodec ( ) ; <nl> - if ( codec ) <nl> - return codec - > m_Bitrate ; <nl> - return 0 ; <nl> + return m_isPlaying ; <nl> } <nl> <nl> - bool PAPlayer : : CanSeek ( ) <nl> + bool PAPlayer : : IsPaused ( ) const <nl> { <nl> - return ( ( m_decoder [ m_currentDecoder ] . TotalTime ( ) > 0 ) & & m_decoder [ m_currentDecoder ] . CanSeek ( ) ) ; <nl> + return m_isPaused ; <nl> } <nl> <nl> - void PAPlayer : : Seek ( bool bPlus , bool bLargeStep ) <nl> + void PAPlayer : : Pause ( ) <nl> { <nl> - int64_t seek ; <nl> - if ( g_advancedSettings . m_musicUseTimeSeeking & & GetTotalTime ( ) > 2 * g_advancedSettings . m_musicTimeSeekForwardBig ) <nl> + if ( m_isPaused ) <nl> { <nl> - if ( bLargeStep ) <nl> - seek = bPlus ? g_advancedSettings . m_musicTimeSeekForwardBig : g_advancedSettings . m_musicTimeSeekBackwardBig ; <nl> - else <nl> - seek = bPlus ? g_advancedSettings . m_musicTimeSeekForward : g_advancedSettings . m_musicTimeSeekBackward ; <nl> - seek * = 1000 ; <nl> - seek + = GetTime ( ) ; <nl> + m_isPaused = false ; <nl> + SoftStart ( ) ; <nl> } <nl> else <nl> { <nl> - float percent ; <nl> - if ( bLargeStep ) <nl> - percent = bPlus ? ( float ) g_advancedSettings . m_musicPercentSeekForwardBig : ( float ) g_advancedSettings . m_musicPercentSeekBackwardBig ; <nl> - else <nl> - percent = bPlus ? ( float ) g_advancedSettings . m_musicPercentSeekForward : ( float ) g_advancedSettings . m_musicPercentSeekBackward ; <nl> - seek = ( int64_t ) ( GetTotalTime64 ( ) * ( GetPercentage ( ) + percent ) / 100 ) ; <nl> + m_isPaused = true ; <nl> + SoftStop ( true , false ) ; <nl> } <nl> - <nl> - SeekTime ( seek ) ; <nl> } <nl> <nl> - void PAPlayer : : SeekTime ( int64_t iTime / * = 0 * / ) <nl> + void PAPlayer : : SetVolume ( float volume ) <nl> { <nl> - if ( ! CanSeek ( ) ) return ; <nl> - int seekOffset = ( int ) ( iTime - GetTime ( ) ) ; <nl> - if ( m_currentFile - > m_lStartOffset ) <nl> - iTime + = m_currentFile - > m_lStartOffset * 1000 / 75 ; <nl> - m_SeekTime = iTime ; <nl> - m_callback . OnPlayBackSeek ( ( int ) m_SeekTime , seekOffset ) ; <nl> - CLog : : Log ( LOGDEBUG , " PAPlayer : : Seeking to time % f " , 0 . 001f * m_SeekTime ) ; <nl> - } <nl> <nl> - void PAPlayer : : SeekPercentage ( float fPercent / * = 0 * / ) <nl> - { <nl> - if ( fPercent < 0 . 0f ) fPercent = 0 . 0f ; <nl> - if ( fPercent > 100 . 0f ) fPercent = 100 . 0f ; <nl> - SeekTime ( ( int64_t ) ( fPercent * 0 . 01f * ( float ) GetTotalTime64 ( ) ) ) ; <nl> } <nl> <nl> - float PAPlayer : : GetPercentage ( ) <nl> + void PAPlayer : : SetDynamicRangeCompression ( long drc ) <nl> { <nl> - float percent = ( float ) GetTime ( ) * 100 . 0f / GetTotalTime64 ( ) ; <nl> - return percent ; <nl> + <nl> } <nl> <nl> - void PAPlayer : : HandleSeeking ( ) <nl> + void PAPlayer : : ToFFRW ( int iSpeed ) <nl> { <nl> - if ( m_SeekTime ! = - 1 ) <nl> - { <nl> - unsigned int time = XbmcThreads : : SystemClockMillis ( ) ; <nl> - m_timeOffset = m_decoder [ m_currentDecoder ] . Seek ( m_SeekTime ) ; <nl> - CLog : : Log ( LOGDEBUG , " Seek to time % f took % i ms " , 0 . 001f * m_SeekTime , ( int ) ( XbmcThreads : : SystemClockMillis ( ) - time ) ) ; <nl> - FlushStreams ( ) ; <nl> - m_SeekTime = - 1 ; <nl> - } <nl> - g_infoManager . m_performingSeek = false ; <nl> + m_playbackSpeed = iSpeed ; <nl> + m_signalSpeedChange = true ; <nl> } <nl> <nl> - void PAPlayer : : FlushStreams ( ) <nl> + int64_t PAPlayer : : GetTime ( ) <nl> { <nl> - m_bytesSentOut = 0 ; <nl> - for ( int stream = 0 ; stream < 2 ; stream + + ) <nl> - { <nl> - if ( m_pAudioDecoder [ stream ] & & m_packet [ stream ] ) <nl> - { <nl> - m_pAudioDecoder [ stream ] - > Stop ( ) ; <nl> - m_pAudioDecoder [ stream ] - > Resume ( ) ; <nl> - m_bufferPos [ stream ] = 0 ; <nl> - } <nl> - } <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + if ( ! m_currentStream ) <nl> + return 0 ; <nl> + <nl> + double time = ( double ) m_currentStream - > m_framesSent / ( double ) m_currentStream - > m_sampleRate ; <nl> + if ( m_currentStream - > m_stream ) <nl> + time - = m_currentStream - > m_stream - > GetDelay ( ) ; <nl> + <nl> + return ( int64_t ) ( time * 1000 . 0 ) ; <nl> } <nl> <nl> - bool PAPlayer : : HandleFFwdRewd ( ) <nl> + int64_t PAPlayer : : GetTotalTime64 ( ) <nl> { <nl> - if ( ! m_IsFFwdRewding & & m_iSpeed = = 1 ) <nl> - return true ; / / nothing to do <nl> - if ( m_IsFFwdRewding & & m_iSpeed = = 1 ) <nl> - { / / stop ffwd / rewd <nl> - m_IsFFwdRewding = false ; <nl> - SetVolume ( g_settings . m_nVolumeLevel ) ; <nl> - FlushStreams ( ) ; <nl> - return true ; <nl> - } <nl> - / / we ' re definitely fastforwarding or rewinding <nl> - int snippet = m_BytesPerSecond / 2 ; <nl> - if ( m_bytesSentOut > = snippet ) <nl> - { <nl> - / / Calculate offset to seek if we do FF / RW <nl> - int64_t time = GetTime ( ) ; <nl> - if ( m_IsFFwdRewding ) snippet = ( int ) m_bytesSentOut ; <nl> - time + = ( int64_t ) ( ( double ) snippet * ( m_iSpeed - 1 . 0 ) / m_BytesPerSecond * 1000 . 0 ) ; <nl> - <nl> - / / Is our offset inside the track range ? <nl> - if ( time > = 0 & & time < = m_decoder [ m_currentDecoder ] . TotalTime ( ) ) <nl> - { / / just set next position to read <nl> - m_IsFFwdRewding = true ; <nl> - time + = m_currentFile - > m_lStartOffset * 1000 / 75 ; <nl> - m_timeOffset = m_decoder [ m_currentDecoder ] . Seek ( time ) ; <nl> - FlushStreams ( ) ; <nl> - SetVolume ( g_settings . m_nVolumeLevel - VOLUME_FFWD_MUTE ) ; / / override xbmc mute <nl> - } <nl> - else if ( time < 0 ) <nl> - { / / . . . disable seeking and start the track again <nl> - time = m_currentFile - > m_lStartOffset * 1000 / 75 ; <nl> - m_timeOffset = m_decoder [ m_currentDecoder ] . Seek ( time ) ; <nl> - FlushStreams ( ) ; <nl> - m_iSpeed = 1 ; <nl> - SetVolume ( g_settings . m_nVolumeLevel ) ; / / override xbmc mute <nl> - } / / is our next position greater then the end sector . . . <nl> - else / / if ( time > m_codec - > m_TotalTime ) <nl> - { <nl> - / / restore volume level so the next track isn ' t muted <nl> - SetVolume ( g_settings . m_nVolumeLevel ) ; <nl> - CLog : : Log ( LOGDEBUG , " PAPlayer : End of track reached while seeking " ) ; <nl> - return false ; <nl> - } <nl> - } <nl> - return true ; <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + if ( ! m_currentStream ) <nl> + return 0 ; <nl> + <nl> + int64_t total = m_currentStream - > m_decoder . TotalTime ( ) ; <nl> + if ( m_currentStream - > m_endOffset ) <nl> + total = m_currentStream - > m_endOffset ; <nl> + total - = m_currentStream - > m_startOffset ; <nl> + return total ; <nl> } <nl> <nl> - void PAPlayer : : SetStreamVolume ( int stream , long nVolume ) <nl> + int PAPlayer : : GetTotalTime ( ) <nl> { <nl> - m_pAudioDecoder [ stream ] - > SetCurrentVolume ( nVolume ) ; <nl> + return ( int ) ( GetTotalTime64 ( ) / 1000 ) ; <nl> } <nl> <nl> - bool PAPlayer : : AddPacketsToStream ( int stream , CAudioDecoder & dec ) <nl> + int PAPlayer : : GetCacheLevel ( ) const <nl> { <nl> - if ( ! m_pAudioDecoder [ stream ] | | dec . GetStatus ( ) = = STATUS_NO_FILE ) <nl> - return false ; <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + if ( ! m_currentStream ) <nl> + return - 1 ; <nl> <nl> - bool ret = false ; <nl> - int amount = m_resampler [ stream ] . GetInputSamples ( ) ; <nl> - if ( amount > 0 & & amount < = ( int ) dec . GetDataSize ( ) ) <nl> - { / / resampler wants more data - let ' s feed it <nl> - m_resampler [ stream ] . PutFloatData ( ( float * ) dec . GetData ( amount ) , amount ) ; <nl> - ret = true ; <nl> - } <nl> - else if ( m_resampler [ stream ] . GetData ( m_packet [ stream ] [ 0 ] . packet ) ) <nl> - { <nl> - / / got some data from our resampler - construct audio packet <nl> - m_packet [ stream ] [ 0 ] . length = PACKET_SIZE ; <nl> - m_packet [ stream ] [ 0 ] . stream = stream ; <nl> + const ICodec * codec = m_currentStream - > m_decoder . GetCodec ( ) ; <nl> + if ( codec ) <nl> + return codec - > GetCacheLevel ( ) ; <nl> + return - 1 ; <nl> + } <nl> <nl> - unsigned char * pcmPtr = m_packet [ stream ] [ 0 ] . packet ; <nl> - int len = m_packet [ stream ] [ 0 ] . length ; <nl> - StreamCallback ( & m_packet [ stream ] [ 0 ] ) ; <nl> + int PAPlayer : : GetChannels ( ) <nl> + { <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + if ( ! m_currentStream ) <nl> + return 0 ; <nl> <nl> - memcpy ( m_pcmBuffer [ stream ] + m_bufferPos [ stream ] , pcmPtr , len ) ; <nl> - m_bufferPos [ stream ] + = len ; <nl> + return m_currentStream - > m_channelInfo . Count ( ) ; <nl> + } <nl> <nl> - while ( m_bufferPos [ stream ] > = ( int ) m_pAudioDecoder [ stream ] - > GetChunkLen ( ) ) <nl> - { <nl> - int rtn = m_pAudioDecoder [ stream ] - > AddPackets ( m_pcmBuffer [ stream ] , m_bufferPos [ stream ] ) ; <nl> + int PAPlayer : : GetBitsPerSample ( ) <nl> + { <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + if ( ! m_currentStream ) <nl> + return 0 ; <nl> <nl> - if ( rtn > 0 ) <nl> - { <nl> - m_bufferPos [ stream ] - = rtn ; <nl> - memmove ( m_pcmBuffer [ stream ] , m_pcmBuffer [ stream ] + rtn , m_bufferPos [ stream ] ) ; <nl> - } <nl> - else / / no pcm data added <nl> - { <nl> - int sleepTime = MathUtils : : round_int ( m_pAudioDecoder [ stream ] - > GetCacheTime ( ) * 200 . 0 ) ; <nl> - Sleep ( std : : max ( sleepTime , 1 ) ) ; <nl> - } <nl> - } <nl> + return m_currentStream - > m_bytesPerSample > > 3 ; <nl> + } <nl> <nl> - / / something done <nl> - ret = true ; <nl> - } <nl> + int PAPlayer : : GetSampleRate ( ) <nl> + { <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + if ( ! m_currentStream ) <nl> + return 0 ; <nl> <nl> - return ret ; <nl> + return m_currentStream - > m_sampleRate ; <nl> } <nl> <nl> - bool PAPlayer : : FindFreePacket ( int stream , DWORD * pdwPacket ) <nl> + CStdString PAPlayer : : GetAudioCodecName ( ) <nl> { <nl> - return true ; <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + if ( ! m_currentStream ) <nl> + return " " ; <nl> + <nl> + const ICodec * codec = m_currentStream - > m_decoder . GetCodec ( ) ; <nl> + if ( codec ) <nl> + return codec - > m_CodecName ; <nl> + return " " ; <nl> } <nl> <nl> - void PAPlayer : : RegisterAudioCallback ( IAudioCallback * pCallback ) <nl> + int PAPlayer : : GetAudioBitrate ( ) <nl> { <nl> - m_pCallback = pCallback ; <nl> - if ( m_pCallback ) <nl> - m_pCallback - > OnInitialize ( m_channelCount [ m_currentStream ] , m_sampleRate [ m_currentStream ] , m_bitsPerSample [ m_currentStream ] ) ; <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + if ( ! m_currentStream ) <nl> + return 0 ; <nl> + <nl> + const ICodec * codec = m_currentStream - > m_decoder . GetCodec ( ) ; <nl> + if ( codec ) <nl> + return codec - > m_Bitrate ; <nl> + return 0 ; <nl> } <nl> <nl> - void PAPlayer : : UnRegisterAudioCallback ( ) <nl> + bool PAPlayer : : CanSeek ( ) <nl> { <nl> - m_pCallback = NULL ; <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + if ( ! m_currentStream ) <nl> + return false ; <nl> + <nl> + return m_currentStream - > m_decoder . CanSeek ( ) ; <nl> } <nl> <nl> - void PAPlayer : : DoAudioWork ( ) <nl> + void PAPlayer : : Seek ( bool bPlus , bool bLargeStep ) <nl> { <nl> - if ( m_pCallback & & m_visBufferLength ) <nl> - { <nl> - m_pCallback - > OnAudioData ( ( BYTE * ) m_visBuffer , m_visBufferLength ) ; <nl> - m_visBufferLength = 0 ; <nl> - } <nl> } <nl> <nl> - void PAPlayer : : StreamCallback ( LPVOID pPacketContext ) <nl> + void PAPlayer : : SeekTime ( int64_t iTime / * = 0 * / ) <nl> { <nl> - AudioPacket * pkt = ( AudioPacket * ) pPacketContext ; <nl> - <nl> + if ( ! CanSeek ( ) ) return ; <nl> <nl> - / / only process from the current stream ( if we ' re crossfading for instance ) <nl> - if ( pkt - > stream ! = m_currentStream ) <nl> + CSharedLock lock ( m_streamsLock ) ; <nl> + if ( ! m_currentStream ) <nl> return ; <nl> <nl> - m_bytesSentOut + = pkt - > length ; <nl> + int seekOffset = ( int ) ( iTime - GetTime ( ) ) ; <nl> + if ( m_currentStream - > m_startOffset ) <nl> + iTime + = m_currentStream - > m_startOffset ; <nl> <nl> - if ( m_pCallback ) <nl> - { / / copy into our visualisation buffer . <nl> - / / can ' t use a memcpy ( ) here due to the context ( will crash otherwise ) <nl> - memcpy ( ( short * ) m_visBuffer , pkt - > packet , pkt - > length ) ; <nl> - m_visBufferLength = pkt - > length ; <nl> - } <nl> + if ( m_playbackSpeed ! = 1 ) <nl> + ToFFRW ( 1 ) ; <nl> + <nl> + m_currentStream - > m_seekFrame = ( int ) ( m_currentStream - > m_sampleRate * ( iTime / 1000 ) ) ; <nl> + m_callback . OnPlayBackSeek ( ( int ) iTime , seekOffset ) ; <nl> } <nl> <nl> - void CALLBACK StaticStreamCallback ( VOID * pStreamContext , VOID * pPacketContext , DWORD dwStatus ) <nl> + void PAPlayer : : SeekPercentage ( float fPercent / * = 0 * / ) <nl> { <nl> - PAPlayer * pPlayer = ( PAPlayer * ) pStreamContext ; <nl> - pPlayer - > StreamCallback ( pPacketContext ) ; <nl> + if ( fPercent < 0 . 0f ) fPercent = 0 . 0f ; <nl> + if ( fPercent > 100 . 0f ) fPercent = 100 . 0f ; <nl> + SeekTime ( ( int64_t ) ( fPercent * 0 . 01f * ( float ) GetTotalTime64 ( ) ) ) ; <nl> } <nl> <nl> - bool PAPlayer : : HandlesType ( const CStdString & type ) <nl> + float PAPlayer : : GetPercentage ( ) <nl> { <nl> - ICodec * codec = CodecFactory : : CreateCodec ( type ) ; <nl> - <nl> - if ( codec & & codec - > CanInit ( ) ) <nl> - { <nl> - delete codec ; <nl> - return true ; <nl> - } <nl> - if ( codec ) <nl> - delete codec ; <nl> - <nl> - return false ; <nl> + return GetTime ( ) * 100 . 0f / GetTotalTime64 ( ) ; <nl> } <nl> <nl> - / / Skip to next track / item inside the current media ( if supported ) . <nl> bool PAPlayer : : SkipNext ( ) <nl> { <nl> - if ( m_decoder [ m_currentDecoder ] . GetCodec ( ) & & m_decoder [ m_currentDecoder ] . GetCodec ( ) - > SkipNext ( ) ) <nl> - { <nl> - return true ; <nl> - } <nl> return false ; <nl> } <nl> - <nl> - void PAPlayer : : WaitForStream ( ) <nl> - { <nl> - / / should we wait for our other stream as well ? <nl> - / / currently we don ' t . <nl> - if ( m_pAudioDecoder [ m_currentStream ] ) <nl> - { <nl> - m_pAudioDecoder [ m_currentStream ] - > WaitCompletion ( ) ; <nl> - } <nl> - } <nl> mmm a / xbmc / cores / paplayer / PAPlayer . h <nl> ppp b / xbmc / cores / paplayer / PAPlayer . h <nl> <nl> * <nl> * / <nl> <nl> + # include < list > <nl> + <nl> # include " cores / IPlayer . h " <nl> # include " threads / Thread . h " <nl> # include " AudioDecoder . h " <nl> - # include " utils / ssrc . h " <nl> - # include " cores / AudioRenderers / IAudioRenderer . h " <nl> + # include " threads / SharedSection . h " <nl> <nl> - class CFileItem ; <nl> - # ifndef _LINUX <nl> - # define PACKET_COUNT 20 / / number of packets of size PACKET_SIZE ( defined in AudioDecoder . h ) <nl> - # else <nl> - # define PACKET_COUNT 1 <nl> - # endif <nl> - <nl> - # define STATUS_NO_FILE 0 <nl> - # define STATUS_QUEUING 1 <nl> - # define STATUS_QUEUED 2 <nl> - # define STATUS_PLAYING 3 <nl> - # define STATUS_ENDING 4 <nl> - # define STATUS_ENDED 5 <nl> - <nl> - struct AudioPacket <nl> - { <nl> - BYTE * packet ; <nl> - DWORD length ; <nl> - DWORD status ; <nl> - int stream ; <nl> - } ; <nl> + # include " cores / IAudioCallback . h " <nl> + # include " cores / AudioEngine / AEFactory . h " <nl> + # include " cores / AudioEngine / Interfaces / AEStream . h " <nl> <nl> + class CFileItem ; <nl> class PAPlayer : public IPlayer , public CThread <nl> { <nl> public : <nl> PAPlayer ( IPlayerCallback & callback ) ; <nl> virtual ~ PAPlayer ( ) ; <nl> <nl> + virtual void RegisterAudioCallback ( IAudioCallback * pCallback ) ; <nl> + virtual void UnRegisterAudioCallback ( ) ; <nl> virtual bool OpenFile ( const CFileItem & file , const CPlayerOptions & options ) ; <nl> virtual bool QueueNextFile ( const CFileItem & file ) ; <nl> virtual void OnNothingToQueueNotify ( ) ; <nl> - virtual bool CloseFile ( ) { return CloseFileInternal ( true ) ; } <nl> - virtual bool CloseFileInternal ( bool bAudioDevice = true ) ; <nl> - virtual bool IsPlaying ( ) const { return m_bIsPlaying ; } <nl> + virtual bool CloseFile ( ) ; <nl> + virtual bool IsPlaying ( ) const ; <nl> virtual void Pause ( ) ; <nl> - virtual bool IsPaused ( ) const { return m_bPaused ; } <nl> + virtual bool IsPaused ( ) const ; <nl> virtual bool HasVideo ( ) const { return false ; } <nl> virtual bool HasAudio ( ) const { return true ; } <nl> virtual bool CanSeek ( ) ; <nl> class PAPlayer : public IPlayer , public CThread <nl> virtual void ToFFRW ( int iSpeed = 0 ) ; <nl> virtual int GetCacheLevel ( ) const ; <nl> virtual int GetTotalTime ( ) ; <nl> - int64_t GetTotalTime64 ( ) ; <nl> virtual int GetAudioBitrate ( ) ; <nl> virtual int GetChannels ( ) ; <nl> virtual int GetBitsPerSample ( ) ; <nl> class PAPlayer : public IPlayer , public CThread <nl> virtual CStdString GetAudioCodecName ( ) ; <nl> virtual int64_t GetTime ( ) ; <nl> virtual void SeekTime ( int64_t iTime = 0 ) ; <nl> - / / Skip to next track / item inside the current media ( if supported ) . <nl> virtual bool SkipNext ( ) ; <nl> <nl> - void StreamCallback ( LPVOID pPacketContext ) ; <nl> - <nl> - virtual void RegisterAudioCallback ( IAudioCallback * pCallback ) ; <nl> - virtual void UnRegisterAudioCallback ( ) ; <nl> - <nl> static bool HandlesType ( const CStdString & type ) ; <nl> - virtual void DoAudioWork ( ) ; <nl> - <nl> protected : <nl> - <nl> virtual void OnStartup ( ) { } <nl> virtual void Process ( ) ; <nl> virtual void OnExit ( ) ; <nl> <nl> - void HandleSeeking ( ) ; <nl> - bool HandleFFwdRewd ( ) ; <nl> - <nl> - bool m_bPaused ; <nl> - bool m_bIsPlaying ; <nl> - bool m_bQueueFailed ; <nl> - bool m_bStopPlaying ; <nl> - bool m_cachingNextFile ; <nl> - int m_crossFading ; <nl> - bool m_currentlyCrossFading ; <nl> - int64_t m_crossFadeLength ; <nl> - <nl> - CEvent m_startEvent ; <nl> - <nl> - int m_iSpeed ; / / current playing speed <nl> - <nl> private : <nl> - <nl> - bool ProcessPAP ( ) ; / / does the actual reading and decode from our PAP dll <nl> - <nl> - int64_t m_SeekTime ; <nl> - int m_IsFFwdRewding ; <nl> - int64_t m_timeOffset ; <nl> - bool m_forceFadeToNext ; <nl> - <nl> - int m_currentDecoder ; <nl> - CAudioDecoder m_decoder [ 2 ] ; / / our 2 audiodecoders ( for crossfading + precaching ) <nl> - <nl> - # ifndef _LINUX <nl> - void SetupDirectSound ( int channels ) ; <nl> - # endif <nl> - <nl> - / / Our directsoundstream <nl> - friend void CALLBACK StaticStreamCallback ( LPVOID pStreamContext , LPVOID pPacketContext , DWORD dwStatus ) ; <nl> - bool AddPacketsToStream ( int stream , CAudioDecoder & dec ) ; <nl> - bool FindFreePacket ( int stream , DWORD * pdwPacket ) ; / / Looks for a free packet <nl> - void FreeStream ( int stream ) ; <nl> - # if defined ( _LINUX ) | | defined ( _WIN32 ) <nl> - void DrainStream ( int stream ) ; <nl> - # endif <nl> - bool CreateStream ( int stream , unsigned int channels , unsigned int samplerate , unsigned int bitspersample , CStdString codec = " " ) ; <nl> - void FlushStreams ( ) ; <nl> - void WaitForStream ( ) ; <nl> - void SetStreamVolume ( int stream , long nVolume ) ; <nl> - <nl> - void UpdateCrossFadingTime ( const CFileItem & file ) ; <nl> - bool QueueNextFile ( const CFileItem & file , bool checkCrossFading ) ; <nl> - void UpdateCacheLevel ( ) ; <nl> - <nl> - int m_currentStream ; <nl> - <nl> - IAudioRenderer * m_pAudioDecoder [ 2 ] ; <nl> - float m_latency [ 2 ] ; <nl> - unsigned char * m_pcmBuffer [ 2 ] ; <nl> - int m_bufferPos [ 2 ] ; <nl> - unsigned int m_Chunklen [ 2 ] ; <nl> - <nl> - unsigned int m_SampleRate ; <nl> - unsigned int m_Channels ; <nl> - unsigned int m_BitsPerSample ; <nl> - <nl> - unsigned int m_SampleRateOutput ; <nl> - unsigned int m_BitsPerSampleOutput ; <nl> - <nl> - AudioPacket m_packet [ 2 ] [ PACKET_COUNT ] ; <nl> - <nl> - IAudioCallback * m_pCallback ; <nl> - <nl> - int64_t m_bytesSentOut ; <nl> - <nl> - / / format ( this should be stored / retrieved from the audio device object probably ) <nl> - unsigned int m_channelCount [ 2 ] ; <nl> - enum PCMChannels * m_channelMap [ 2 ] ; <nl> - unsigned int m_sampleRate [ 2 ] ; <nl> - unsigned int m_bitsPerSample [ 2 ] ; <nl> - unsigned int m_BytesPerSecond ; <nl> - <nl> - unsigned int m_CacheLevel ; <nl> - unsigned int m_LastCacheLevelCheck ; <nl> - <nl> - / / resampler <nl> - Cssrc m_resampler [ 2 ] ; <nl> - bool m_resampleAudio ; <nl> - <nl> - / / our file <nl> - CFileItem * m_currentFile ; <nl> - CFileItem * m_nextFile ; <nl> - <nl> - / / stuff for visualisation <nl> - unsigned int m_visBufferLength ; <nl> - short m_visBuffer [ PACKET_SIZE + 2 ] ; <nl> - <nl> + typedef struct { <nl> + CAudioDecoder m_decoder ; / * the stream decoder * / <nl> + int64_t m_startOffset ; / * the stream start offset * / <nl> + int64_t m_endOffset ; / * the stream end offset * / <nl> + CAEChannelInfo m_channelInfo ; / * channel layout information * / <nl> + unsigned int m_sampleRate ; / * sample rate of the stream * / <nl> + unsigned int m_encodedSampleRate ; / * the encoded sample rate of raw streams * / <nl> + enum AEDataFormat m_dataFormat ; / * data format of the samples * / <nl> + unsigned int m_bytesPerSample ; / * number of bytes per audio sample * / <nl> + unsigned int m_bytesPerFrame ; / * number of bytes per audio frame * / <nl> + <nl> + bool m_started ; / * if playback of this stream has been started * / <nl> + bool m_finishing ; / * if this stream is finishing * / <nl> + int m_framesSent ; / * number of frames sent to the stream * / <nl> + int m_prepareNextAtFrame ; / * when to prepare the next stream * / <nl> + bool m_prepareTriggered ; / * if the next stream has been prepared * / <nl> + int m_playNextAtFrame ; / * when to start playing the next stream * / <nl> + bool m_playNextTriggered ; / * if this stream has started the next one * / <nl> + bool m_fadeOutTriggered ; / * if the stream has been told to fade out * / <nl> + int m_seekNextAtFrame ; / * the FF / RR sample to seek at * / <nl> + int m_seekFrame ; / * the exact position to seek too , - 1 for none * / <nl> + <nl> + IAEStream * m_stream ; / * the playback stream * / <nl> + float m_volume ; / * the initial volume level to set the stream to on creation * / <nl> + <nl> + bool m_isSlaved ; / * true if the stream has been slaved to another * / <nl> + } StreamInfo ; <nl> + <nl> + typedef std : : list < StreamInfo * > StreamList ; <nl> + <nl> + bool m_signalSpeedChange ; / * true if OnPlaybackSpeedChange needs to be called * / <nl> + int m_playbackSpeed ; / * the playback speed ( 1 = normal ) * / <nl> + bool m_isPlaying ; <nl> + bool m_isPaused ; <nl> + bool m_isFinished ; / * if there are no more songs in the queue * / <nl> + unsigned int m_crossFadeTime ; / * how long the crossfade is * / <nl> + CEvent m_startEvent ; / * event for playback start * / <nl> + StreamInfo * m_currentStream ; / * the current playing stream * / <nl> + IAudioCallback * m_audioCallback ; / * the viz audio callback * / <nl> + <nl> + CSharedSection m_streamsLock ; / * lock for the stream list * / <nl> + StreamList m_streams ; / * playing streams * / <nl> + StreamList m_finishing ; / * finishing streams * / <nl> + <nl> + bool QueueNextFileEx ( const CFileItem & file , bool fadeIn = true ) ; <nl> + void SoftStart ( bool wait = false ) ; <nl> + void SoftStop ( bool wait = false , bool close = true ) ; <nl> + void CloseAllStreams ( bool fade = true ) ; <nl> + void ProcessStreams ( double & delay , double & buffer ) ; <nl> + bool PrepareStream ( StreamInfo * si ) ; <nl> + bool ProcessStream ( StreamInfo * si , double & delay , double & buffer ) ; <nl> + bool QueueData ( StreamInfo * si ) ; <nl> + int64_t GetTotalTime64 ( ) ; <nl> } ; <nl> <nl>
[ AE ] paplayer : switch to AE
xbmc/xbmc
349ec408c33c250b614b68037fec4dd5de5ddcff
2012-05-09T21:40:54Z
mmm a / dbms / tests / integration / helpers / network . py <nl> ppp b / dbms / tests / integration / helpers / network . py <nl> def __enter__ ( self ) : <nl> def __exit__ ( self , exc_type , exc_val , exc_tb ) : <nl> self . heal_all ( ) <nl> <nl> + def __del__ ( self ) : <nl> + self . heal_all ( ) <nl> + <nl> <nl> class PartitionManagerDisbaler : <nl> def __init__ ( self , manager ) : <nl> mmm a / dbms / tests / integration / test_distributed_ddl / test . py <nl> ppp b / dbms / tests / integration / test_distributed_ddl / test . py <nl> def started_cluster ( ) : <nl> ddl_check_there_are_no_dublicates ( instance ) <nl> <nl> finally : <nl> - # Remove iptables rules for sacrifice instance <nl> - cluster . pm_random_drops . heal_all ( ) <nl> cluster . shutdown ( ) <nl> <nl> <nl>
Fixed a vague error message in integration tests . [ # CLICKHOUSE - 2 ]
ClickHouse/ClickHouse
2cf6e20fefa9bbb1d669c691cf05d1117349a8c4
2017-11-20T20:15:30Z
mmm a / test / cpp / end2end / client_lb_end2end_test . cc <nl> ppp b / test / cpp / end2end / client_lb_end2end_test . cc <nl> class ClientLbEnd2endTest : public : : testing : : Test { <nl> for ( size_t i = 0 ; i < servers_ . size ( ) ; + + i ) { <nl> servers_ [ i ] - > Shutdown ( ) ; <nl> } <nl> - / / Explicitly destroy all the members so that we can make sure grpc_shutdown <nl> - / / has finished by the end of this function , and thus all the registered <nl> - / / LB policy factories are removed . <nl> servers_ . clear ( ) ; <nl> creds_ . reset ( ) ; <nl> grpc_shutdown_blocking ( ) ; <nl> class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest { <nl> protected : <nl> void SetUp ( ) override { <nl> ClientLbEnd2endTest : : SetUp ( ) ; <nl> - grpc_core : : RegisterInterceptRecvTrailingMetadataLoadBalancingPolicy ( <nl> - ReportTrailerIntercepted , this ) ; <nl> + current_test_instance_ = this ; <nl> } <nl> <nl> void TearDown ( ) override { ClientLbEnd2endTest : : TearDown ( ) ; } <nl> <nl> + static void SetUpTestCase ( ) { <nl> + grpc_init ( ) ; <nl> + grpc_core : : RegisterInterceptRecvTrailingMetadataLoadBalancingPolicy ( <nl> + ReportTrailerIntercepted , nullptr ) ; <nl> + } <nl> + <nl> + static void TearDownTestCase ( ) { grpc_shutdown_blocking ( ) ; } <nl> + <nl> int trailers_intercepted ( ) { <nl> grpc : : internal : : MutexLock lock ( & mu_ ) ; <nl> return trailers_intercepted_ ; <nl> class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest { <nl> static void ReportTrailerIntercepted ( <nl> void * arg , const grpc_core : : LoadBalancingPolicy : : BackendMetricData * <nl> backend_metric_data ) { <nl> - ClientLbInterceptTrailingMetadataTest * self = <nl> - static_cast < ClientLbInterceptTrailingMetadataTest * > ( arg ) ; <nl> + ClientLbInterceptTrailingMetadataTest * self = current_test_instance_ ; <nl> grpc : : internal : : MutexLock lock ( & self - > mu_ ) ; <nl> self - > trailers_intercepted_ + + ; <nl> if ( backend_metric_data ! = nullptr ) { <nl> class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest { <nl> } <nl> } <nl> <nl> + static ClientLbInterceptTrailingMetadataTest * current_test_instance_ ; <nl> grpc : : internal : : Mutex mu_ ; <nl> int trailers_intercepted_ = 0 ; <nl> std : : unique_ptr < udpa : : data : : orca : : v1 : : OrcaLoadReport > load_report_ ; <nl> } ; <nl> <nl> + ClientLbInterceptTrailingMetadataTest * <nl> + ClientLbInterceptTrailingMetadataTest : : current_test_instance_ = nullptr ; <nl> + <nl> TEST_F ( ClientLbInterceptTrailingMetadataTest , InterceptsRetriesDisabled ) { <nl> const int kNumServers = 1 ; <nl> const int kNumRpcs = 10 ; <nl>
Merge pull request from grpc / revert - 22794 - revert - 22778 - fix - client - lb - end2end - tests
grpc/grpc
eaadbd40bb2d18fb3b7e0d89e16ff8316bb2ad33
2020-04-28T04:28:58Z
mmm a / extensions / GUI / CCScrollView / CCScrollView . cpp <nl> ppp b / extensions / GUI / CCScrollView / CCScrollView . cpp <nl> void ScrollView : : onAfterDraw ( ) <nl> <nl> void ScrollView : : visit ( Renderer * renderer , const Mat4 & parentTransform , uint32_t parentFlags ) <nl> { <nl> - / / quick return if not visible <nl> + / / quick return if not visible <nl> if ( ! isVisible ( ) ) <nl> { <nl> return ; <nl>
format code
cocos2d/cocos2d-x
d8eb1b26efe78204296820785be4fcd808860818
2014-08-14T06:15:16Z
mmm a / templates / cpp - template - default / proj . wp8 - xaml / App / App . xaml . cs <nl> ppp b / templates / cpp - template - default / proj . wp8 - xaml / App / App . xaml . cs <nl> public App ( ) <nl> / / Phone - specific initialization <nl> InitializePhoneApplication ( ) ; <nl> <nl> - / / Language display initialization <nl> - InitializeLanguage ( ) ; <nl> - <nl> - / / Show graphics profiling information while debugging . <nl> + / / Show graphics profiling information while debugging . <nl> if ( Debugger . IsAttached ) <nl> { <nl> / / Display the current frame rate counters . <nl> - Application . Current . Host . Settings . EnableFrameRateCounter = true ; <nl> + Application . Current . Host . Settings . EnableFrameRateCounter = false ; <nl> <nl> / / Show the areas of the app that are being redrawn in each frame . <nl> / / Application . Current . Host . Settings . EnableRedrawRegions = true ; <nl> private void ClearBackStackAfterReset ( object sender , NavigationEventArgs e ) <nl> <nl> # endregion <nl> <nl> - / / Initialize the app ' s font and flow direction as defined in its localized resource strings . <nl> - / / <nl> - / / To ensure that the font of your application is aligned with its supported languages and that the <nl> - / / FlowDirection for each of those languages follows its traditional direction , ResourceLanguage <nl> - / / and ResourceFlowDirection should be initialized in each resx file to match these values with that <nl> - / / file ' s culture . For example : <nl> - / / <nl> - / / AppResources . es - ES . resx <nl> - / / ResourceLanguage ' s value should be " es - ES " <nl> - / / ResourceFlowDirection ' s value should be " LeftToRight " <nl> - / / <nl> - / / AppResources . ar - SA . resx <nl> - / / ResourceLanguage ' s value should be " ar - SA " <nl> - / / ResourceFlowDirection ' s value should be " RightToLeft " <nl> - / / <nl> - / / For more info on localizing Windows Phone apps see http : / / go . microsoft . com / fwlink / ? LinkId = 262072 . <nl> - / / <nl> - private void InitializeLanguage ( ) <nl> - { <nl> - try <nl> - { <nl> - / / Set the font to match the display language defined by the <nl> - / / ResourceLanguage resource string for each supported language . <nl> - / / <nl> - / / Fall back to the font of the neutral language if the Display <nl> - / / language of the phone is not supported . <nl> - / / <nl> - / / If a compiler error is hit then ResourceLanguage is missing from <nl> - / / the resource file . <nl> - RootFrame . Language = XmlLanguage . GetLanguage ( AppResources . ResourceLanguage ) ; <nl> - <nl> - / / Set the FlowDirection of all elements under the root frame based <nl> - / / on the ResourceFlowDirection resource string for each <nl> - / / supported language . <nl> - / / <nl> - / / If a compiler error is hit then ResourceFlowDirection is missing from <nl> - / / the resource file . <nl> - FlowDirection flow = ( FlowDirection ) Enum . Parse ( typeof ( FlowDirection ) , AppResources . ResourceFlowDirection ) ; <nl> - RootFrame . FlowDirection = flow ; <nl> - } <nl> - catch <nl> - { <nl> - / / If an exception is caught here it is most likely due to either <nl> - / / ResourceLangauge not being correctly set to a supported language <nl> - / / code or ResourceFlowDirection is set to a value other than LeftToRight <nl> - / / or RightToLeft . <nl> - <nl> - if ( Debugger . IsAttached ) <nl> - { <nl> - Debugger . Break ( ) ; <nl> - } <nl> - <nl> - throw ; <nl> - } <nl> - <nl> - } <nl> - <nl> / / Helper method for adding or updating a key / value pair in isolated storage <nl> public bool AddOrUpdateValue ( string Key , Object value ) <nl> { <nl>
updated template code
cocos2d/cocos2d-x
1a5d148d8e9fdb508677c6f86559d35db4d93d81
2014-06-16T17:29:40Z
mmm a / cocos / platform / winrt / CCWinRTUtils . cpp <nl> ppp b / cocos / platform / winrt / CCWinRTUtils . cpp <nl> using namespace Windows : : Storage : : Pickers ; <nl> using namespace Windows : : Storage : : Streams ; <nl> using namespace Windows : : Networking : : Connectivity ; <nl> <nl> + <nl> + CC_DEPRECATED_ATTRIBUTE std : : wstring CC_DLL CCUtf8ToUnicode ( const char * pszUtf8Str , unsigned len / * = - 1 * / ) <nl> + { <nl> + if ( len = = - 1 ) <nl> + { <nl> + return StringUtf8ToWideChar ( pszUtf8Str ) ; <nl> + } <nl> + else <nl> + { <nl> + std : : wstring ret ; <nl> + do <nl> + { <nl> + if ( ! pszUtf8Str | | ! len ) break ; <nl> + <nl> + / / get UTF16 string length <nl> + int wLen = MultiByteToWideChar ( CP_UTF8 , 0 , pszUtf8Str , len , 0 , 0 ) ; <nl> + if ( 0 = = wLen | | 0xFFFD = = wLen ) break ; <nl> + <nl> + / / convert string <nl> + wchar_t * pwszStr = new wchar_t [ wLen + 1 ] ; <nl> + if ( ! pwszStr ) break ; <nl> + pwszStr [ wLen ] = 0 ; <nl> + MultiByteToWideChar ( CP_UTF8 , 0 , pszUtf8Str , len , pwszStr , wLen + 1 ) ; <nl> + ret = pwszStr ; <nl> + CC_SAFE_DELETE_ARRAY ( pwszStr ) ; <nl> + } while ( 0 ) ; <nl> + return ret ; <nl> + } <nl> + } <nl> + <nl> + CC_DEPRECATED_ATTRIBUTE std : : string CC_DLL CCUnicodeToUtf8 ( const wchar_t * pwszStr ) <nl> + { <nl> + return StringWideCharToUtf8 ( pwszStr ) ; <nl> + } <nl> + <nl> + <nl> std : : wstring StringUtf8ToWideChar ( const std : : string & strUtf8 ) <nl> { <nl> std : : wstring ret ; <nl> mmm a / cocos / platform / winrt / CCWinRTUtils . h <nl> ppp b / cocos / platform / winrt / CCWinRTUtils . h <nl> NS_CC_BEGIN <nl> <nl> <nl> <nl> - <nl> + CC_DEPRECATED_ATTRIBUTE std : : wstring CC_DLL CCUtf8ToUnicode ( const char * pszUtf8Str , unsigned len = - 1 ) ; <nl> + CC_DEPRECATED_ATTRIBUTE std : : string CC_DLL CCUnicodeToUtf8 ( const wchar_t * pwszStr ) ; <nl> <nl> std : : wstring CC_DLL StringUtf8ToWideChar ( const std : : string & strUtf8 ) ; <nl> std : : string CC_DLL StringWideCharToUtf8 ( const std : : wstring & strWideChar ) ; <nl>
CCUtf8ToUnicode and CCUnicodeToUtf8 deprecated
cocos2d/cocos2d-x
a16d04fb53d834ae9311737de9a204fd05cce3fa
2015-08-16T10:08:00Z
mmm a / modules / nonfree / test / test_main . cpp <nl> ppp b / modules / nonfree / test / test_main . cpp <nl> int main ( int argc , char * * argv ) <nl> # else / / HAVE_CUDA <nl> <nl> CV_TEST_MAIN ( " cv " ) <nl> - <nl> - # endif / / HAVE_CUDA <nl> mmm a / modules / nonfree / test / test_precomp . hpp <nl> ppp b / modules / nonfree / test / test_precomp . hpp <nl> <nl> # ifndef __OPENCV_TEST_PRECOMP_HPP__ <nl> # define __OPENCV_TEST_PRECOMP_HPP__ <nl> <nl> - # include < iostream > <nl> - <nl> - # include " cvconfig . h " <nl> - # include " opencv2 / opencv_modules . hpp " <nl> - <nl> # include " opencv2 / ts / ts . hpp " <nl> # include " opencv2 / imgproc / imgproc . hpp " <nl> # include " opencv2 / highgui / highgui . hpp " <nl> # include " opencv2 / nonfree / nonfree . hpp " <nl> <nl> + # include " opencv2 / opencv_modules . hpp " <nl> + # ifdef HAVE_OPENCV_OCL <nl> + # include " opencv2 / nonfree / ocl . hpp " <nl> + # endif <nl> + <nl> # if defined ( HAVE_OPENCV_GPU ) & & defined ( HAVE_CUDA ) <nl> # include " opencv2 / ts / gpu_test . hpp " <nl> # include " opencv2 / nonfree / gpu . hpp " <nl> similarity index 77 % <nl> rename from modules / ocl / test / test_surf . cpp <nl> rename to modules / nonfree / test / test_surf . ocl . cpp <nl> mmm a / modules / ocl / test / test_surf . cpp <nl> ppp b / modules / nonfree / test / test_surf . ocl . cpp <nl> <nl> / / <nl> / / M * / <nl> <nl> + # include " test_precomp . hpp " <nl> <nl> - # include " precomp . hpp " <nl> - # ifdef HAVE_OPENCL <nl> - <nl> - extern std : : string workdir ; <nl> + # ifdef HAVE_OPENCV_OCL <nl> <nl> using namespace std ; <nl> + using std : : tr1 : : get ; <nl> <nl> static bool keyPointsEquals ( const cv : : KeyPoint & p1 , const cv : : KeyPoint & p2 ) <nl> { <nl> static bool keyPointsEquals ( const cv : : KeyPoint & p1 , const cv : : KeyPoint & p2 ) <nl> return false ; <nl> } <nl> <nl> - <nl> - struct KeyPointLess : std : : binary_function < cv : : KeyPoint , cv : : KeyPoint , bool > <nl> - { <nl> - bool operator ( ) ( const cv : : KeyPoint & kp1 , const cv : : KeyPoint & kp2 ) const <nl> - { <nl> - return kp1 . pt . y < kp2 . pt . y | | ( kp1 . pt . y = = kp2 . pt . y & & kp1 . pt . x < kp2 . pt . x ) ; <nl> - } <nl> - } ; <nl> - <nl> - <nl> # define ASSERT_KEYPOINTS_EQ ( gold , actual ) EXPECT_PRED_FORMAT2 ( assertKeyPointsEquals , gold , actual ) ; <nl> <nl> static int getMatchedPointsCount ( std : : vector < cv : : KeyPoint > & gold , std : : vector < cv : : KeyPoint > & actual ) <nl> { <nl> - std : : sort ( actual . begin ( ) , actual . end ( ) , KeyPointLess ( ) ) ; <nl> - std : : sort ( gold . begin ( ) , gold . end ( ) , KeyPointLess ( ) ) ; <nl> + std : : sort ( actual . begin ( ) , actual . end ( ) , perf : : comparators : : KeypointGreater ( ) ) ; <nl> + std : : sort ( gold . begin ( ) , gold . end ( ) , perf : : comparators : : KeypointGreater ( ) ) ; <nl> <nl> int validCount = 0 ; <nl> <nl> static int getMatchedPointsCount ( const std : : vector < cv : : KeyPoint > & keypoints1 , co <nl> return validCount ; <nl> } <nl> <nl> - IMPLEMENT_PARAM_CLASS ( SURF_HessianThreshold , double ) <nl> - IMPLEMENT_PARAM_CLASS ( SURF_Octaves , int ) <nl> - IMPLEMENT_PARAM_CLASS ( SURF_OctaveLayers , int ) <nl> - IMPLEMENT_PARAM_CLASS ( SURF_Extended , bool ) <nl> - IMPLEMENT_PARAM_CLASS ( SURF_Upright , bool ) <nl> - <nl> - PARAM_TEST_CASE ( SURF , SURF_HessianThreshold , SURF_Octaves , SURF_OctaveLayers , SURF_Extended , SURF_Upright ) <nl> + # define PARAM_TEST_CASE ( name , . . . ) struct name : testing : : TestWithParam < std : : tr1 : : tuple < __VA_ARGS__ > > <nl> + # define IMPLEMENT_PARAM_CLASS ( name , type ) \ <nl> + namespace { \ <nl> + class name \ <nl> + { \ <nl> + public : \ <nl> + name ( type arg = type ( ) ) : val_ ( arg ) { } \ <nl> + operator type ( ) const { return val_ ; } \ <nl> + private : \ <nl> + type val_ ; \ <nl> + } ; \ <nl> + inline void PrintTo ( name param , std : : ostream * os ) \ <nl> + { \ <nl> + * os < < # name < < " ( " < < testing : : PrintToString ( static_cast < type > ( param ) ) < < " ) " ; \ <nl> + } } <nl> + <nl> + IMPLEMENT_PARAM_CLASS ( HessianThreshold , double ) <nl> + IMPLEMENT_PARAM_CLASS ( Octaves , int ) <nl> + IMPLEMENT_PARAM_CLASS ( OctaveLayers , int ) <nl> + IMPLEMENT_PARAM_CLASS ( Extended , bool ) <nl> + IMPLEMENT_PARAM_CLASS ( Upright , bool ) <nl> + <nl> + PARAM_TEST_CASE ( SURF , HessianThreshold , Octaves , OctaveLayers , Extended , Upright ) <nl> { <nl> double hessianThreshold ; <nl> int nOctaves ; <nl> PARAM_TEST_CASE ( SURF , SURF_HessianThreshold , SURF_Octaves , SURF_OctaveLayers , SU <nl> <nl> virtual void SetUp ( ) <nl> { <nl> - hessianThreshold = GET_PARAM ( 0 ) ; <nl> - nOctaves = GET_PARAM ( 1 ) ; <nl> - nOctaveLayers = GET_PARAM ( 2 ) ; <nl> - extended = GET_PARAM ( 3 ) ; <nl> - upright = GET_PARAM ( 4 ) ; <nl> + hessianThreshold = get < 0 > ( GetParam ( ) ) ; <nl> + nOctaves = get < 1 > ( GetParam ( ) ) ; <nl> + nOctaveLayers = get < 2 > ( GetParam ( ) ) ; <nl> + extended = get < 3 > ( GetParam ( ) ) ; <nl> + upright = get < 4 > ( GetParam ( ) ) ; <nl> } <nl> } ; <nl> + <nl> TEST_P ( SURF , Detector ) <nl> { <nl> - cv : : Mat image = readImage ( workdir + " fruits . jpg " , cv : : IMREAD_GRAYSCALE ) ; <nl> + cv : : Mat image = cv : : imread ( string ( cvtest : : TS : : ptr ( ) - > get_data_path ( ) ) + " shared / fruits . png " , cv : : IMREAD_GRAYSCALE ) ; <nl> ASSERT_FALSE ( image . empty ( ) ) ; <nl> <nl> cv : : ocl : : SURF_OCL surf ; <nl> TEST_P ( SURF , Detector ) <nl> <nl> TEST_P ( SURF , Descriptor ) <nl> { <nl> - cv : : Mat image = readImage ( workdir + " fruits . jpg " , cv : : IMREAD_GRAYSCALE ) ; <nl> + cv : : Mat image = cv : : imread ( string ( cvtest : : TS : : ptr ( ) - > get_data_path ( ) ) + " shared / fruits . png " , cv : : IMREAD_GRAYSCALE ) ; <nl> ASSERT_FALSE ( image . empty ( ) ) ; <nl> <nl> cv : : ocl : : SURF_OCL surf ; <nl> TEST_P ( SURF , Descriptor ) <nl> } <nl> <nl> INSTANTIATE_TEST_CASE_P ( OCL_Features2D , SURF , testing : : Combine ( <nl> - testing : : Values ( / * SURF_HessianThreshold ( 100 . 0 ) , * / SURF_HessianThreshold ( 500 . 0 ) , SURF_HessianThreshold ( 1000 . 0 ) ) , <nl> - testing : : Values ( SURF_Octaves ( 3 ) , SURF_Octaves ( 4 ) ) , <nl> - testing : : Values ( SURF_OctaveLayers ( 2 ) , SURF_OctaveLayers ( 3 ) ) , <nl> - testing : : Values ( SURF_Extended ( false ) , SURF_Extended ( true ) ) , <nl> - testing : : Values ( SURF_Upright ( false ) , SURF_Upright ( true ) ) ) ) ; <nl> + testing : : Values ( HessianThreshold ( 500 . 0 ) , HessianThreshold ( 1000 . 0 ) ) , <nl> + testing : : Values ( Octaves ( 3 ) , Octaves ( 4 ) ) , <nl> + testing : : Values ( OctaveLayers ( 2 ) , OctaveLayers ( 3 ) ) , <nl> + testing : : Values ( Extended ( false ) , Extended ( true ) ) , <nl> + testing : : Values ( Upright ( false ) , Upright ( true ) ) ) ) ; <nl> <nl> - # endif <nl> + # endif / / HAVE_OPENCV_OCL <nl> mmm a / modules / ocl / src / initialization . cpp <nl> ppp b / modules / ocl / src / initialization . cpp <nl> namespace cv <nl> size_t widthInBytes , size_t height , DevMemRW rw_type , DevMemType mem_type ) <nl> { <nl> cl_int status ; <nl> - <nl> * dev_ptr = clCreateBuffer ( clCxt - > impl - > clContext , gDevMemRWValueMap [ rw_type ] | gDevMemTypeValueMap [ mem_type ] , <nl> widthInBytes * height , 0 , & status ) ; <nl> openCLVerifyCall ( status ) ; <nl> mmm a / modules / ocl / test / precomp . hpp <nl> ppp b / modules / ocl / test / precomp . hpp <nl> <nl> # include " opencv2 / imgproc / imgproc . hpp " <nl> # include " opencv2 / video / video . hpp " <nl> # include " opencv2 / ts / ts . hpp " <nl> - # include " opencv2 / ts / ts_perf . hpp " <nl> # include " opencv2 / ocl / ocl . hpp " <nl> - # include " opencv2 / nonfree / nonfree . hpp " <nl> <nl> # include " utility . hpp " <nl> # include " interpolation . hpp " <nl>
SURF accuracy test is moved to nonfree
opencv/opencv
1be58f9a00797291959c06c89b801fa78989f683
2013-03-21T14:15:46Z
mmm a / src / library_pthread . js <nl> ppp b / src / library_pthread . js <nl> var LibraryPThread = { <nl> if ( detached ) { <nl> PThread . returnWorkerToPool ( worker ) ; <nl> } <nl> - # if EXIT_RUNTIME / / If building with - s EXIT_RUNTIME = 0 , no thread will post this message , so don ' t even compile it in . <nl> } else if ( cmd = = = ' exitProcess ' ) { <nl> / / A pthread has requested to exit the whole application process ( runtime ) . <nl> - noExitRuntime = false ; <nl> + # if ASSERTIONS <nl> + err ( " exitProcess requested by worker " ) ; <nl> + # endif <nl> try { <nl> exit ( d [ ' returnCode ' ] ) ; <nl> } catch ( e ) { <nl> if ( e instanceof ExitStatus ) return ; <nl> throw e ; <nl> } <nl> - # endif <nl> } else if ( cmd = = = ' cancelDone ' ) { <nl> PThread . returnWorkerToPool ( worker ) ; <nl> } else if ( cmd = = = ' objectTransfer ' ) { <nl> mmm a / src / postamble . js <nl> ppp b / src / postamble . js <nl> function exit ( status , implicit ) { <nl> return ; <nl> } <nl> <nl> + # if USE_PTHREADS <nl> + if ( ! implicit ) { <nl> + if ( ENVIRONMENT_IS_PTHREAD ) { <nl> + # if ASSERTIONS <nl> + err ( ' Pthread 0x ' + _pthread_self ( ) . toString ( 16 ) + ' called exit ( ) , posting exitProcess . ' ) ; <nl> + # endif <nl> + / / When running in a pthread we propagate the exit back to the main thread <nl> + / / where it can decide if the whole process should be shut down or not . <nl> + / / The pthread may have decided not to exit its own runtime , for example <nl> + / / because it runs a main loop , but that doesn ' t affect the main thread . <nl> + postMessage ( { ' cmd ' : ' exitProcess ' , ' returnCode ' : status } ) ; <nl> + throw new ExitStatus ( status ) ; <nl> + } else { <nl> + # if ASSERTIONS <nl> + err ( ' main thead called exit : noExitRuntime = ' + noExitRuntime ) ; <nl> + # endif <nl> + } <nl> + } <nl> + # endif <nl> + <nl> if ( noExitRuntime ) { <nl> # if ASSERTIONS <nl> / / if exit ( ) was called , we may warn the user if the runtime isn ' t actually being shut down <nl> if ( Module [ ' noInitialRun ' ] ) shouldRunNow = false ; <nl> <nl> # if EXIT_RUNTIME = = 0 <nl> # if USE_PTHREADS <nl> - if ( ! ENVIRONMENT_IS_PTHREAD ) / / EXIT_RUNTIME = 0 only applies to default behavior of the main browser thread <nl> + / / EXIT_RUNTIME = 0 only applies to the default behavior of the main browser <nl> + / / thread . <nl> + / / The default behaviour for pthreads is always to exit once they return <nl> + / / from their entry point ( or call pthread_exit ) . If we set noExitRuntime <nl> + / / to true here on pthreads they would never complete and attempt to <nl> + / / pthread_join to them would block forever . <nl> + / / pthreads can still choose to set ` noExitRuntime ` explicitly , or <nl> + / / call emscripten_unwind_to_js_event_loop to extend their lifetime beyond <nl> + / / their main function . See comment in src / worker . js for more . <nl> + noExitRuntime = ! ENVIRONMENT_IS_PTHREAD ; <nl> + # else <nl> + noExitRuntime = true ; <nl> # endif <nl> - noExitRuntime = true ; <nl> # endif <nl> <nl> # if USE_PTHREADS <nl> mmm a / src / worker . js <nl> ppp b / src / worker . js <nl> this . onmessage = function ( e ) { <nl> # if STACK_OVERFLOW_CHECK <nl> Module [ ' checkStackCookie ' ] ( ) ; <nl> # endif <nl> - # if ! MINIMAL_RUNTIME / / In MINIMAL_RUNTIME the noExitRuntime concept does not apply to pthreads . To exit a pthread with live runtime , use the function emscripten_unwind_to_js_event_loop ( ) in the pthread body . <nl> - / / The thread might have finished without calling pthread_exit ( ) . If so , then perform the exit operation ourselves . <nl> + # if ! MINIMAL_RUNTIME <nl> + / / In MINIMAL_RUNTIME the noExitRuntime concept does not apply to <nl> + / / pthreads . To exit a pthread with live runtime , use the function <nl> + / / emscripten_unwind_to_js_event_loop ( ) in the pthread body . <nl> + / / The thread might have finished without calling pthread_exit ( ) . If so , <nl> + / / then perform the exit operation ourselves . <nl> / / ( This is a no - op if explicit pthread_exit ( ) had been called prior . ) <nl> if ( ! Module [ ' getNoExitRuntime ' ] ( ) ) <nl> # endif <nl> this . onmessage = function ( e ) { <nl> throw ex ; <nl> } <nl> # endif <nl> - # if MINIMAL_RUNTIME <nl> / / ExitStatus not present in MINIMAL_RUNTIME <nl> - Module [ ' PThread ' ] . threadExit ( - 2 ) ; <nl> - throw ex ; / / ExitStatus not present in MINIMAL_RUNTIME <nl> - # else <nl> - Module [ ' PThread ' ] . threadExit ( ( ex instanceof Module [ ' ExitStatus ' ] ) ? ex . status : - 2 ) ; <nl> - if ( ! ( ex instanceof Module [ ' ExitStatus ' ] ) ) throw ex ; <nl> + # if ! MINIMAL_RUNTIME <nl> + if ( ex instanceof Module [ ' ExitStatus ' ] ) { <nl> + if ( Module [ ' getNoExitRuntime ' ] ( ) ) { <nl> + # if ASSERTIONS <nl> + err ( ' Pthread 0x ' + _pthread_self ( ) . toString ( 16 ) + ' called exit ( ) , staying alive due to noExitRuntime . ' ) ; <nl> # endif <nl> + } else { <nl> + # if ASSERTIONS <nl> + err ( ' Pthread 0x ' + _pthread_self ( ) . toString ( 16 ) + ' called exit ( ) , calling threadExit . ' ) ; <nl> + # endif <nl> + Module [ ' PThread ' ] . threadExit ( ex . status ) ; <nl> + } <nl> + } <nl> + else <nl> + # endif <nl> + { <nl> + Module [ ' PThread ' ] . threadExit ( - 2 ) ; <nl> + throw ex ; <nl> + } <nl> # if ASSERTIONS <nl> } else { <nl> / / else e = = ' unwind ' , and we should fall through here and keep the pthread alive for asynchronous events . <nl> new file mode 100644 <nl> index 00000000000 . . 8a6459263ff <nl> mmm / dev / null <nl> ppp b / tests / core / pthread / test_pthread_exit_runtime . c <nl> <nl> + # include < assert . h > <nl> + # include < pthread . h > <nl> + # include < stdlib . h > <nl> + # include < stdio . h > <nl> + <nl> + pthread_t t ; <nl> + <nl> + void * thread_main_exit ( void * arg ) { <nl> + printf ( " calling exit \ n " ) ; <nl> + exit ( 42 ) ; <nl> + } <nl> + <nl> + int main ( ) { <nl> + printf ( " main \ n " ) ; <nl> + int rc = pthread_create ( & t , NULL , thread_main_exit , NULL ) ; <nl> + assert ( rc = = 0 ) ; <nl> + void * thread_rtn = 0 ; <nl> + rc = pthread_join ( t , & thread_rtn ) ; <nl> + assert ( rc = = 0 ) ; <nl> + # if EXIT_RUNTIME <nl> + printf ( " done join - - should never get here \ n " ) ; <nl> + return 1 ; <nl> + # else <nl> + / / Since EXIT_RUNTIME is not set the exit ( ) in the thread is not expected to <nl> + / / bring down the whole process , only itself . <nl> + printf ( " done join - - thread exited with % ld \ n " , ( intptr_t ) thread_rtn ) ; <nl> + # ifdef REPORT_RESULT <nl> + REPORT_RESULT ( 43 ) ; <nl> + # endif <nl> + return 43 ; <nl> + # endif <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 52e01040d42 <nl> mmm / dev / null <nl> ppp b / tests / core / pthread / test_pthread_exit_runtime . out <nl> @ @ - 0 , 0 + 1 @ @ <nl> + onExit status : 42 <nl> new file mode 100644 <nl> index 00000000000 . . c6ee3d253a9 <nl> mmm / dev / null <nl> ppp b / tests / core / pthread / test_pthread_exit_runtime . pre . js <nl> <nl> + Module . preRun = function ( ) { <nl> + Module [ ' onExit ' ] = function ( status ) { <nl> + out ( ' onExit status : ' + status ) ; <nl> + if ( typeof reportResultToServer ! = = ' undefined ' ) { <nl> + reportResultToServer ( ' onExit status : ' + status ) ; <nl> + } <nl> + } ; <nl> + } <nl> mmm a / tests / test_browser . py <nl> ppp b / tests / test_browser . py <nl> def test_pthread_asan ( self , name , args = [ ] ) : <nl> def test_pthread_asan_use_after_free ( self ) : <nl> self . btest ( path_from_root ( ' tests ' , ' pthread ' , ' test_pthread_asan_use_after_free . cpp ' ) , expected = ' 1 ' , args = [ ' - fsanitize = address ' , ' - s ' , ' INITIAL_MEMORY = 256MB ' , ' - s ' , ' USE_PTHREADS ' , ' - s ' , ' PROXY_TO_PTHREAD ' , ' - - pre - js ' , path_from_root ( ' tests ' , ' pthread ' , ' test_pthread_asan_use_after_free . js ' ) ] ) <nl> <nl> + @ requires_threads <nl> + def test_pthread_exit_process ( self ) : <nl> + args = [ ' - s ' , ' USE_PTHREADS = 1 ' , <nl> + ' - s ' , ' PROXY_TO_PTHREAD ' , <nl> + ' - s ' , ' PTHREAD_POOL_SIZE = 2 ' , <nl> + ' - s ' , ' EXIT_RUNTIME ' , <nl> + ' - DEXIT_RUNTIME ' , <nl> + ' - O0 ' ] <nl> + args + = [ ' - - pre - js ' , path_from_root ( ' tests ' , ' core ' , ' pthread ' , ' test_pthread_exit_runtime . pre . js ' ) ] <nl> + self . btest ( path_from_root ( ' tests ' , ' core ' , ' pthread ' , ' test_pthread_exit_runtime . c ' ) , expected = ' onExit status : 42 ' , args = args ) <nl> + <nl> + @ requires_threads <nl> + def test_pthread_no_exit_process ( self ) : <nl> + # Same as above but without EXIT_RUNTIME . In this case we don ' t expect onExit to <nl> + # ever be called . <nl> + args = [ ' - s ' , ' USE_PTHREADS = 1 ' , <nl> + ' - s ' , ' PROXY_TO_PTHREAD ' , <nl> + ' - s ' , ' PTHREAD_POOL_SIZE = 2 ' , <nl> + ' - O0 ' ] <nl> + args + = [ ' - - pre - js ' , path_from_root ( ' tests ' , ' core ' , ' pthread ' , ' test_pthread_exit_runtime . pre . js ' ) ] <nl> + self . btest ( path_from_root ( ' tests ' , ' core ' , ' pthread ' , ' test_pthread_exit_runtime . c ' ) , expected = ' 43 ' , args = args ) <nl> + <nl> # Tests MAIN_THREAD_EM_ASM_INT ( ) function call signatures . <nl> def test_main_thread_em_asm_signatures ( self ) : <nl> self . btest ( path_from_root ( ' tests ' , ' core ' , ' test_em_asm_signatures . cpp ' ) , expected = ' 121 ' , args = [ ] ) <nl> mmm a / tests / test_core . py <nl> ppp b / tests / test_core . py <nl> def test_pthread_exceptions ( self ) : <nl> self . emcc_args + = [ ' - fexceptions ' ] <nl> self . do_run_in_out_file_test ( ' tests ' , ' core ' , ' pthread ' , ' exceptions . cpp ' ) <nl> <nl> + @ node_pthreads <nl> + def test_pthread_exit_process ( self ) : <nl> + self . set_setting ( ' PROXY_TO_PTHREAD ' ) <nl> + self . set_setting ( ' PTHREAD_POOL_SIZE ' , ' 2 ' ) <nl> + self . set_setting ( ' EXIT_RUNTIME ' ) <nl> + self . emcc_args + = [ ' - - pre - js ' , path_from_root ( ' tests ' , ' core ' , ' pthread ' , ' test_pthread_exit_runtime . pre . js ' ) ] <nl> + self . do_run_in_out_file_test ( ' tests ' , ' core ' , ' pthread ' , ' test_pthread_exit_runtime . c ' , assert_returncode = 42 ) <nl> + <nl> + @ node_pthreads <nl> + @ disabled ( ' https : / / github . com / emscripten - core / emscripten / issues / 12945 ' ) <nl> + def test_pthread_no_exit_process ( self ) : <nl> + # Same as above but without EXIT_RUNTIME <nl> + self . set_setting ( ' PROXY_TO_PTHREAD ' ) <nl> + self . set_setting ( ' PTHREAD_POOL_SIZE ' , ' 2 ' ) <nl> + self . emcc_args + = [ ' - - pre - js ' , path_from_root ( ' tests ' , ' core ' , ' pthread ' , ' test_pthread_exit_runtime . pre . js ' ) ] <nl> + self . do_run_in_out_file_test ( ' tests ' , ' core ' , ' pthread ' , ' test_pthread_exit_runtime . c ' , assert_returncode = 43 ) <nl> + <nl> def test_emscripten_atomics_stub ( self ) : <nl> self . do_run_in_out_file_test ( ' tests ' , ' core ' , ' pthread ' , ' emscripten_atomics . c ' ) <nl> <nl>
Handle exit ( ) from pthread by posting exitProcess back to main thread ( )
emscripten-core/emscripten
f28260bdc2ebbad8dee900aeb19c97b7c74ef250
2020-12-02T22:08:01Z
mmm a / tensorflow / python / data / kernel_tests / iterator_test . py <nl> ppp b / tensorflow / python / data / kernel_tests / iterator_test . py <nl> def testCapturingStateInOneShotRaisesException ( self ) : <nl> dataset = ( <nl> dataset_ops . Dataset . from_tensor_slices ( [ 0 . 0 , 1 . 0 , 2 . 0 ] ) <nl> . map ( lambda x : x + var ) ) <nl> - with self . assertRaisesRegexp ( <nl> + with self . assertRaisesRegex ( <nl> ValueError , r " ` Dataset . make_one_shot_iterator \ ( \ ) ` does not support " <nl> " datasets that capture stateful objects . + myvar " ) : <nl> dataset_ops . make_one_shot_iterator ( dataset ) <nl> def testOneShotIteratorInitializerFails ( self ) : <nl> next_element = iterator . get_next ( ) <nl> <nl> with self . cached_session ( ) as sess : <nl> - with self . assertRaisesRegexp ( errors . InvalidArgumentError , " " ) : <nl> + with self . assertRaisesRegex ( errors . InvalidArgumentError , " " ) : <nl> sess . run ( next_element ) <nl> <nl> # Test that subsequent attempts to use the iterator also fail . <nl> - with self . assertRaisesRegexp ( errors . InvalidArgumentError , " " ) : <nl> + with self . assertRaisesRegex ( errors . InvalidArgumentError , " " ) : <nl> sess . run ( next_element ) <nl> <nl> with self . cached_session ( ) as sess : <nl> <nl> def consumer_thread ( ) : <nl> - with self . assertRaisesRegexp ( errors . InvalidArgumentError , " " ) : <nl> + with self . assertRaisesRegex ( errors . InvalidArgumentError , " " ) : <nl> sess . run ( next_element ) <nl> <nl> num_threads = 8 <nl> def testNotInitializedError ( self ) : <nl> get_next = iterator . get_next ( ) <nl> <nl> with self . cached_session ( ) as sess : <nl> - with self . assertRaisesRegexp ( errors . FailedPreconditionError , <nl> + with self . assertRaisesRegex ( errors . FailedPreconditionError , <nl> " iterator has not been initialized " ) : <nl> sess . run ( get_next ) <nl> <nl> mmm a / tensorflow / python / ops / script_ops . py <nl> ppp b / tensorflow / python / ops / script_ops . py <nl> def __init__ ( self , func , Tout , is_grad_func , use_tape_cache = True ) : <nl> is_grad_func : Whether this EagerFunc is the gradient of another <nl> EagerPyFunc . <nl> use_tape_cache : ( Optional . ) Whether to cache ` func ` in the ` tape_cache ` . <nl> - NOTE ( lithuak ) : see the note for ` _eager_py_func ` . <nl> + For additional information , see description of ` _eager_py_func ` . <nl> This parameter should be removed once the # 35084 issue is fixed . <nl> " " " <nl> self . _func = func <nl> def eagerly_executed_grad ( * dy ) : <nl> is_grad_func = True ) <nl> <nl> <nl> - # NOTE ( lithuak ) : this function as a layer of indirection was added with one <nl> - # specific purpose : as a workaround for github issue # 35084 . <nl> - # It does all the same as ` eager_py_func ` used to do with one difference : <nl> - # it can be used to instruct underlying EagerFunc not to use ` tape_cache ` <nl> - # to avoid memory leak . When the issue # 35084 is fixed - this function should <nl> - # be removed , its body should be moved back to become the body of <nl> - # ` eager_py_func ` and all the call sites should be reverted to <nl> - # using ` eager_py_func ` without ` use_tape_cache ` argument of any value . <nl> def _eager_py_func ( func , inp , Tout , name = None , use_tape_cache = True ) : <nl> + " " " Wraps a python function into a TensorFlow op that executes it eagerly . <nl> + <nl> + This function is the internal implementation for ` eager_py_func ` , see the <nl> + ` eager_py_func ` docstring for the full description . <nl> + <nl> + Note : this function as a layer of indirection was added with one <nl> + specific purpose : as a workaround for github issue # 35084 . <nl> + It does all the same as ` eager_py_func ` used to do with one difference : <nl> + it can be used to instruct underlying EagerFunc not to use ` tape_cache ` <nl> + to avoid memory leak . When the issue # 35084 is fixed - this function should <nl> + be removed , its body should be moved back to become the body of <nl> + ` eager_py_func ` and all the call sites should be reverted to <nl> + using ` eager_py_func ` without ` use_tape_cache ` argument of any value . <nl> + <nl> + Args : <nl> + func : A Python function which accepts a list of ` Tensor ` objects having <nl> + element types that match the corresponding ` tf . Tensor ` objects in ` inp ` <nl> + and returns a list of ` Tensor ` objects ( or a single ` Tensor ` , or ` None ` ) <nl> + having element types that match the corresponding values in ` Tout ` . <nl> + inp : A list of ` Tensor ` objects . <nl> + Tout : A list or tuple of tensorflow data types or a single tensorflow data <nl> + type if there is only one , indicating what ` func ` returns ; an empty list <nl> + if no value is returned ( i . e . , if the return value is ` None ` ) . <nl> + name : A name for the operation ( optional ) . <nl> + <nl> + Returns : <nl> + A list of ` Tensor ` or a single ` Tensor ` which ` func ` computes ; an empty list <nl> + if ` func ` returns None . <nl> + " " " <nl> if ops . executing_eagerly_outside_functions ( ) : <nl> with ops . device ( context . context ( ) . host_address_space ( ) ) : <nl> return _internal_py_func ( func = func , inp = inp , Tout = Tout , eager = True , <nl>
Minor fixes for PR
tensorflow/tensorflow
a7bab87fea8e64f2af5a3685b7f0955d29899e0f
2020-08-19T14:57:16Z
new file mode 100644 <nl> index 0000000000 . . 46fb093941 <nl> mmm / dev / null <nl> ppp b / code / mathematical - algorithms / factorial / factorial . pl <nl> <nl> + # Part of Cosmos by OpenGenus Foundation <nl> + <nl> + $ num = 6 ; <nl> + $ factorial = 1 ; <nl> + for ( $ a = $ num ; $ a > 0 ; $ a = $ a - 1 ) { <nl> + $ factorial = $ factorial * $ a ; <nl> + } <nl> + print $ factorial ; <nl>
Create factorial . pl
OpenGenus/cosmos
c65677f3c060fdfd5df446405341edb561579030
2019-02-25T08:28:25Z
mmm a / . circleci / config . yml <nl> ppp b / . circleci / config . yml <nl> jobs : <nl> <nl> docker cp / home / circleci / project / doc_push_script . sh $ id : / var / lib / jenkins / workspace / doc_push_script . sh <nl> <nl> - if [ [ " $ { CIRCLE_BRANCH } " ! = " master " ] ] ; then <nl> - # Do a dry_run of the docs build . This will build the docs but not push them . <nl> - export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . / doc_push_script . sh docs / master master dry_run " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl> - else <nl> + # master branch docs push <nl> + if [ [ " $ { CIRCLE_BRANCH } " = = " master " ] ] ; then <nl> export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . / doc_push_script . sh docs / master master " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl> + <nl> + # stable release docs push . Due to some circleci limitations , we keep <nl> + # an eternal PR open ( # 16502 ) for merging v1 . 0 . 1 - > master for this job . <nl> + # XXX : The following code is only run on the v1 . 0 . 1 branch , which might <nl> + # not be exactly the same as what you see here . <nl> + elif [ [ " $ { CIRCLE_BRANCH } " = = " v1 . 0 . 1 " ] ] ; then <nl> + export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . / doc_push_script . sh docs / stable 1 . 0 . 1 " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl> + <nl> + # For open PRs : Do a dry_run of the docs build , don ' t push build <nl> + else <nl> + export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . / doc_push_script . sh docs / master master dry_run " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl> fi <nl> + <nl> echo $ { COMMAND } > . / command . sh & & unbuffer bash . / command . sh | ts <nl> <nl> + # Save the docs build so we can debug any problems <nl> + export DEBUG_COMMIT_DOCKER_IMAGE = $ { COMMIT_DOCKER_IMAGE } - debug <nl> + docker commit " $ id " $ { DEBUG_COMMIT_DOCKER_IMAGE } <nl> + docker push $ { DEBUG_COMMIT_DOCKER_IMAGE } <nl> + <nl> pytorch_macos_10_13_py3_build : <nl> macos : <nl> xcode : " 9 . 0 " <nl>
Backport the stable doc build on v1 . 0 . 1 to master ( )
pytorch/pytorch
72f070a124c3062939873a5468463d7c9e3ac4eb
2019-02-07T19:41:07Z
mmm a / xbmc / storage / cdioSupport . cpp <nl> ppp b / xbmc / storage / cdioSupport . cpp <nl> CCdInfo * CCdIoSupport : : GetCdInfo ( char * cDeviceFileName ) <nl> m_nFs = GuessFilesystem ( m_nStartTrack , i ) ; <nl> trackinfo ti ; <nl> ti . nfsInfo = m_nFs ; <nl> - / / valid UDF version for xbox <nl> - if ( ( m_nFs & FS_MASK ) = = FS_UDF ) <nl> - { <nl> - / / Is UDF 1 . 02 <nl> - if ( m_nUDFVerMajor > 0x1 ) <nl> - { <nl> - ti . nfsInfo = FS_UNKNOWN ; <nl> - m_strDiscLabel . Empty ( ) ; <nl> - } <nl> - else if ( m_nUDFVerMinor > 0x2 ) <nl> - { <nl> - ti . nfsInfo = FS_UNKNOWN ; <nl> - m_strDiscLabel . Empty ( ) ; <nl> - } <nl> - } <nl> - <nl> - if ( ( m_nFs & FS_MASK ) = = FS_ISO_UDF ) <nl> - { <nl> - / / fallback to iso9660 if not udf 1 . 02 <nl> - if ( m_nUDFVerMajor > 0x1 ) <nl> - ti . nfsInfo = FS_ISO_9660 ; <nl> - else if ( m_nUDFVerMinor > 0x2 ) <nl> - ti . nfsInfo = FS_ISO_9660 ; <nl> - } <nl> - <nl> ti . ms_offset = m_nMsOffset ; <nl> ti . isofs_size = m_nIsofsSize ; <nl> ti . nJolietLevel = m_nJolietLevel ; <nl> CCdInfo * CCdIoSupport : : GetCdInfo ( char * cDeviceFileName ) <nl> <nl> info - > SetTrackInformation ( i , ti ) ; <nl> <nl> - / / xbox does not support multisession cd ' s <nl> - if ( ! ( ( ( m_nFs & FS_MASK ) = = FS_ISO_9660 | | <nl> - ( m_nFs & FS_MASK ) = = FS_ISO_HFS | | <nl> - / * ( fs & FS_MASK ) = = FS_ISO_9660_INTERACTIVE ) & & ( fs & XA ) ) ) * / <nl> - ( m_nFs & FS_MASK ) = = FS_ISO_9660_INTERACTIVE ) ) ) <nl> - break ; / * no method for non - iso9660 multisessions * / <nl> } <nl> } <nl> : : cdio_destroy ( cdio ) ; <nl>
Merge pull request from wsoltys / cdio
xbmc/xbmc
05833a3af55da06f4c059a900784337a0d765ab3
2012-01-12T17:07:35Z
mmm a / src / builtins / builtins - console . cc <nl> ppp b / src / builtins / builtins - console . cc <nl> void ConsoleCall ( <nl> CHECK ( ! isolate - > has_scheduled_exception ( ) ) ; <nl> if ( ! isolate - > console_delegate ( ) ) return ; <nl> HandleScope scope ( isolate ) ; <nl> + <nl> + / / Access check . The current context has to match the context of all <nl> + / / arguments , otherwise the inspector might leak objects across contexts . <nl> + Handle < Context > context = handle ( isolate - > context ( ) , isolate ) ; <nl> + for ( int i = 0 ; i < args . length ( ) ; + + i ) { <nl> + Handle < Object > argument = args . at < Object > ( i ) ; <nl> + if ( ! argument - > IsJSObject ( ) ) continue ; <nl> + <nl> + Handle < JSObject > argument_obj = Handle < JSObject > : : cast ( argument ) ; <nl> + if ( argument - > IsAccessCheckNeeded ( isolate ) & & <nl> + ! isolate - > MayAccess ( context , argument_obj ) ) { <nl> + isolate - > ReportFailedAccessCheck ( argument_obj ) ; <nl> + return ; <nl> + } <nl> + } <nl> + <nl> debug : : ConsoleCallArguments wrapper ( args ) ; <nl> Handle < Object > context_id_obj = JSObject : : GetDataProperty ( <nl> args . target ( ) , isolate - > factory ( ) - > console_context_id_symbol ( ) ) ; <nl> mmm a / test / unittests / api / access - check - unittest . cc <nl> ppp b / test / unittests / api / access - check - unittest . cc <nl> TEST_F ( AccessRegressionTest , <nl> ASSERT_EQ ( getter_c2 - > native_context ( ) , * Utils : : OpenHandle ( * context2 ) ) ; <nl> } <nl> <nl> + namespace { <nl> + bool failed_access_check_callback_called ; <nl> + <nl> + class AccessCheckTestConsoleDelegate : public debug : : ConsoleDelegate { <nl> + public : <nl> + void Log ( const debug : : ConsoleCallArguments & args , <nl> + const debug : : ConsoleContext & context ) { <nl> + FAIL ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + } / / namespace <nl> + <nl> + / / Ensure that { console . log } does an access check for its arguments . <nl> + TEST_F ( AccessCheckTest , ConsoleLog ) { <nl> + isolate ( ) - > SetFailedAccessCheckCallbackFunction ( <nl> + [ ] ( v8 : : Local < v8 : : Object > host , v8 : : AccessType type , <nl> + v8 : : Local < v8 : : Value > data ) { <nl> + failed_access_check_callback_called = true ; <nl> + } ) ; <nl> + AccessCheckTestConsoleDelegate console { } ; <nl> + debug : : SetConsoleDelegate ( isolate ( ) , & console ) ; <nl> + <nl> + Local < ObjectTemplate > object_template = ObjectTemplate : : New ( isolate ( ) ) ; <nl> + object_template - > SetAccessCheckCallback ( AccessCheck ) ; <nl> + <nl> + Local < Context > context1 = Context : : New ( isolate ( ) , nullptr ) ; <nl> + Local < Context > context2 = Context : : New ( isolate ( ) , nullptr ) ; <nl> + <nl> + Local < Object > object1 = <nl> + object_template - > NewInstance ( context1 ) . ToLocalChecked ( ) ; <nl> + EXPECT_TRUE ( context2 - > Global ( ) <nl> + - > Set ( context2 , v8_str ( " object_from_context1 " ) , object1 ) <nl> + . IsJust ( ) ) ; <nl> + <nl> + Context : : Scope context_scope ( context2 ) ; <nl> + failed_access_check_callback_called = false ; <nl> + CompileRun ( isolate ( ) , " console . log ( object_from_context1 ) ; " ) . ToLocalChecked ( ) ; <nl> + <nl> + ASSERT_TRUE ( failed_access_check_callback_called ) ; <nl> + } <nl> + <nl> } / / namespace v8 <nl>
Calls to { console } require an access check for the provided arguments
v8/v8
a5fd60e15a3ed1cf5bf6deeed791e5dc5a40126f
2019-08-08T07:19:54Z
mmm a / contrib / cassandra <nl> ppp b / contrib / cassandra <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit bc593f2644a6c50c4057459e242e214a6af70969 <nl> + Subproject commit 9606ff1f70bd3fc5d395df32e626923c012ffb5f <nl> mmm a / src / Dictionaries / CassandraBlockInputStream . cpp <nl> ppp b / src / Dictionaries / CassandraBlockInputStream . cpp <nl> namespace <nl> <nl> void insertValue ( IColumn & column , const ValueType type , const CassValue * cass_value ) <nl> { <nl> + / / / Cassandra does not support unsigned integers <nl> switch ( type ) <nl> { <nl> case ValueType : : vtUInt8 : <nl> { <nl> - cass_uint32_t value ; <nl> - cass_value_get_uint32 ( cass_value , & value ) ; <nl> - static_cast < ColumnUInt8 & > ( column ) . insertValue ( value ) ; <nl> + cass_int8_t value ; <nl> + cass_value_get_int8 ( cass_value , & value ) ; <nl> + assert_cast < ColumnUInt8 & > ( column ) . insertValue ( value ) ; <nl> break ; <nl> } <nl> case ValueType : : vtUInt16 : <nl> { <nl> - cass_uint32_t value ; <nl> - cass_value_get_uint32 ( cass_value , & value ) ; <nl> - static_cast < ColumnUInt16 & > ( column ) . insertValue ( value ) ; <nl> + cass_int16_t value ; <nl> + cass_value_get_int16 ( cass_value , & value ) ; <nl> + assert_cast < ColumnUInt16 & > ( column ) . insertValue ( value ) ; <nl> break ; <nl> } <nl> case ValueType : : vtUInt32 : <nl> { <nl> - cass_uint32_t value ; <nl> - cass_value_get_uint32 ( cass_value , & value ) ; <nl> - static_cast < ColumnUInt32 & > ( column ) . insertValue ( value ) ; <nl> + cass_int32_t value ; <nl> + cass_value_get_int32 ( cass_value , & value ) ; <nl> + assert_cast < ColumnUInt32 & > ( column ) . insertValue ( value ) ; <nl> break ; <nl> } <nl> case ValueType : : vtUInt64 : <nl> { <nl> cass_int64_t value ; <nl> cass_value_get_int64 ( cass_value , & value ) ; <nl> - static_cast < ColumnUInt64 & > ( column ) . insertValue ( value ) ; <nl> + assert_cast < ColumnUInt64 & > ( column ) . insertValue ( value ) ; <nl> break ; <nl> } <nl> case ValueType : : vtInt8 : <nl> { <nl> cass_int8_t value ; <nl> cass_value_get_int8 ( cass_value , & value ) ; <nl> - static_cast < ColumnInt8 & > ( column ) . insertValue ( value ) ; <nl> + assert_cast < ColumnInt8 & > ( column ) . insertValue ( value ) ; <nl> break ; <nl> } <nl> case ValueType : : vtInt16 : <nl> { <nl> cass_int16_t value ; <nl> cass_value_get_int16 ( cass_value , & value ) ; <nl> - static_cast < ColumnInt16 & > ( column ) . insertValue ( value ) ; <nl> + assert_cast < ColumnInt16 & > ( column ) . insertValue ( value ) ; <nl> break ; <nl> } <nl> case ValueType : : vtInt32 : <nl> { <nl> cass_int32_t value ; <nl> cass_value_get_int32 ( cass_value , & value ) ; <nl> - static_cast < ColumnInt32 & > ( column ) . insertValue ( value ) ; <nl> + assert_cast < ColumnInt32 & > ( column ) . insertValue ( value ) ; <nl> break ; <nl> } <nl> case ValueType : : vtInt64 : <nl> { <nl> cass_int64_t value ; <nl> cass_value_get_int64 ( cass_value , & value ) ; <nl> - static_cast < ColumnInt64 & > ( column ) . insertValue ( value ) ; <nl> + assert_cast < ColumnInt64 & > ( column ) . insertValue ( value ) ; <nl> break ; <nl> } <nl> case ValueType : : vtFloat32 : <nl> { <nl> cass_float_t value ; <nl> cass_value_get_float ( cass_value , & value ) ; <nl> - static_cast < ColumnFloat32 & > ( column ) . insertValue ( value ) ; <nl> + assert_cast < ColumnFloat32 & > ( column ) . insertValue ( value ) ; <nl> break ; <nl> } <nl> case ValueType : : vtFloat64 : <nl> { <nl> cass_double_t value ; <nl> cass_value_get_double ( cass_value , & value ) ; <nl> - static_cast < ColumnFloat64 & > ( column ) . insertValue ( value ) ; <nl> + assert_cast < ColumnFloat64 & > ( column ) . insertValue ( value ) ; <nl> break ; <nl> } <nl> case ValueType : : vtString : <nl> namespace <nl> const char * value ; <nl> size_t value_length ; <nl> cass_value_get_string ( cass_value , & value , & value_length ) ; <nl> - static_cast < ColumnString & > ( column ) . insertData ( value , value_length ) ; <nl> + assert_cast < ColumnString & > ( column ) . insertData ( value , value_length ) ; <nl> break ; <nl> } <nl> case ValueType : : vtDate : <nl> { <nl> - cass_int64_t value ; <nl> - cass_value_get_int64 ( cass_value , & value ) ; <nl> - static_cast < ColumnUInt16 & > ( column ) . insertValue ( UInt32 { cass_date_from_epoch ( value ) } ) ; / / FIXME <nl> + cass_uint32_t value ; <nl> + cass_value_get_uint32 ( cass_value , & value ) ; <nl> + assert_cast < ColumnUInt16 & > ( column ) . insertValue ( static_cast < UInt16 > ( value ) ) ; <nl> break ; <nl> } <nl> case ValueType : : vtDateTime : <nl> { <nl> cass_int64_t value ; <nl> cass_value_get_int64 ( cass_value , & value ) ; <nl> - static_cast < ColumnUInt32 & > ( column ) . insertValue ( value ) ; <nl> + assert_cast < ColumnUInt32 & > ( column ) . insertValue ( static_cast < UInt32 > ( value / 1000 ) ) ; <nl> break ; <nl> } <nl> case ValueType : : vtUUID : <nl> namespace <nl> cass_value_get_uuid ( cass_value , & value ) ; <nl> std : : array < char , CASS_UUID_STRING_LENGTH > uuid_str ; <nl> cass_uuid_string ( value , uuid_str . data ( ) ) ; <nl> - static_cast < ColumnUInt128 & > ( column ) . insert ( parse < UUID > ( uuid_str . data ( ) , uuid_str . size ( ) ) ) ; <nl> + assert_cast < ColumnUInt128 & > ( column ) . insert ( parse < UUID > ( uuid_str . data ( ) , uuid_str . size ( ) ) ) ; <nl> break ; <nl> } <nl> } <nl> namespace <nl> <nl> Block CassandraBlockInputStream : : readImpl ( ) <nl> { <nl> - if ( has_more_pages ) <nl> + if ( ! has_more_pages ) <nl> return { } ; <nl> <nl> - MutableColumns columns ( description . sample_block . columns ( ) ) ; <nl> + MutableColumns columns = description . sample_block . cloneEmptyColumns ( ) ; <nl> CassFuture * query_future = cass_session_execute ( session , statement ) ; <nl> <nl> result = cass_future_get_result ( query_future ) ; <nl> namespace <nl> throw Exception { error_message , ErrorCodes : : CASSANDRA_INTERNAL_ERROR } ; <nl> } <nl> <nl> - const CassRow * row = cass_result_first_row ( result ) ; <nl> - const CassValue * map = cass_row_get_column ( row , 0 ) ; <nl> - iterator = cass_iterator_from_map ( map ) ; <nl> - while ( cass_iterator_next ( iterator ) ) { <nl> - const CassValue * cass_key = cass_iterator_get_map_key ( iterator ) ; <nl> - const CassValue * cass_value = cass_iterator_get_map_value ( iterator ) ; <nl> - auto pair_values = { std : : make_pair ( cass_key , 0ul ) , std : : make_pair ( cass_value , 1ul ) } ; <nl> - for ( const auto & [ value , idx ] : pair_values ) { <nl> - if ( description . types [ idx ] . second ) { <nl> - ColumnNullable & column_nullable = static_cast < ColumnNullable & > ( * columns [ idx ] ) ; <nl> - insertValue ( column_nullable . getNestedColumn ( ) , description . types [ idx ] . first , value ) ; <nl> + [ [ maybe_unused ] ] size_t row_count = 0 ; <nl> + assert ( cass_result_column_count ( result ) = = columns . size ( ) ) ; <nl> + CassIterator * rows_iter = cass_iterator_from_result ( result ) ; / / / Points to rows [ - 1 ] <nl> + while ( cass_iterator_next ( rows_iter ) ) <nl> + { <nl> + const CassRow * row = cass_iterator_get_row ( rows_iter ) ; <nl> + for ( size_t col_idx = 0 ; col_idx < columns . size ( ) ; + + col_idx ) <nl> + { <nl> + const CassValue * val = cass_row_get_column ( row , col_idx ) ; <nl> + if ( cass_value_is_null ( val ) ) <nl> + columns [ col_idx ] - > insertDefault ( ) ; <nl> + else if ( description . types [ col_idx ] . second ) <nl> + { <nl> + ColumnNullable & column_nullable = static_cast < ColumnNullable & > ( * columns [ col_idx ] ) ; <nl> + insertValue ( column_nullable . getNestedColumn ( ) , description . types [ col_idx ] . first , val ) ; <nl> column_nullable . getNullMapData ( ) . emplace_back ( 0 ) ; <nl> - } else { <nl> - insertValue ( * columns [ idx ] , description . types [ idx ] . first , value ) ; <nl> } <nl> + else <nl> + insertValue ( * columns [ col_idx ] , description . types [ col_idx ] . first , val ) ; <nl> } <nl> + + + row_count ; <nl> } <nl> + assert ( cass_result_row_count ( result ) = = row_count ) ; <nl> + cass_iterator_free ( rows_iter ) ; <nl> + <nl> + / / const CassRow * row = cass_result_first_row ( result ) ; <nl> + / / const CassValue * map = cass_row_get_column ( row , 0 ) ; <nl> + / / const CassValue * map = cass_row_get_column ( row , 0 ) ; <nl> + / / iterator = cass_iterator_from_map ( map ) ; <nl> + / / while ( cass_iterator_next ( iterator ) ) { <nl> + / / const CassValue * cass_key = cass_iterator_get_map_key ( iterator ) ; <nl> + / / const CassValue * cass_value = cass_iterator_get_map_value ( iterator ) ; <nl> + / / auto pair_values = { std : : make_pair ( cass_key , 0ul ) , std : : make_pair ( cass_value , 1ul ) } ; <nl> + / / for ( const auto & [ value , idx ] : pair_values ) { <nl> + / / if ( description . types [ idx ] . second ) { <nl> + / / ColumnNullable & column_nullable = static_cast < ColumnNullable & > ( * columns [ idx ] ) ; <nl> + / / insertValue ( column_nullable . getNestedColumn ( ) , description . types [ idx ] . first , value ) ; <nl> + / / column_nullable . getNullMapData ( ) . emplace_back ( 0 ) ; <nl> + / / } else { <nl> + / / insertValue ( * columns [ idx ] , description . types [ idx ] . first , value ) ; <nl> + / / } <nl> + / / } <nl> + / / } <nl> <nl> has_more_pages = cass_result_has_more_pages ( result ) ; <nl> <nl> namespace <nl> } <nl> <nl> <nl> + void cassandraCheck ( CassError code ) <nl> + { <nl> + if ( code ! = CASS_OK ) <nl> + throw Exception ( " Cassandra driver error " + std : : to_string ( code ) + " : " + cass_error_desc ( code ) , <nl> + ErrorCodes : : CASSANDRA_INTERNAL_ERROR ) ; <nl> + } <nl> + <nl> + void cassandraWaitAndCheck ( CassFuture * future ) <nl> + { <nl> + auto code = cass_future_error_code ( future ) ; / / / Waits if not ready <nl> + if ( code = = CASS_OK ) <nl> + { <nl> + cass_future_free ( future ) ; <nl> + return ; <nl> + } <nl> + const char * message ; <nl> + size_t message_len ; <nl> + cass_future_error_message ( future , & message , & message_len ) ; <nl> + String full_message = " Cassandra driver error " + std : : to_string ( code ) + " : " + cass_error_desc ( code ) + " : " + message ; <nl> + cass_future_free ( future ) ; / / / Frees message <nl> + throw Exception ( full_message , ErrorCodes : : CASSANDRA_INTERNAL_ERROR ) ; <nl> + } <nl> + <nl> } <nl> # endif <nl> mmm a / src / Dictionaries / CassandraBlockInputStream . h <nl> ppp b / src / Dictionaries / CassandraBlockInputStream . h <nl> <nl> <nl> namespace DB <nl> { <nl> + <nl> + void cassandraCheck ( CassError error ) ; <nl> + void cassandraWaitAndCheck ( CassFuture * future ) ; <nl> + <nl> + <nl> / / / Allows processing results of a Cassandra query as a sequence of Blocks , simplifies chaining <nl> class CassandraBlockInputStream final : public IBlockInputStream <nl> { <nl> mmm a / src / Dictionaries / CassandraDictionarySource . cpp <nl> ppp b / src / Dictionaries / CassandraDictionarySource . cpp <nl> <nl> # include " CassandraDictionarySource . h " <nl> # include " DictionarySourceFactory . h " <nl> # include " DictionaryStructure . h " <nl> + # include " ExternalQueryBuilder . h " <nl> + # include < common / logger_useful . h > <nl> <nl> namespace DB <nl> { <nl> static const size_t max_block_size = 8192 ; <nl> <nl> CassandraDictionarySource : : CassandraDictionarySource ( <nl> const DB : : DictionaryStructure & dict_struct_ , <nl> - const std : : string & host_ , <nl> + const String & host_ , <nl> UInt16 port_ , <nl> - const std : : string & user_ , <nl> - const std : : string & password_ , <nl> - const std : : string & method_ , <nl> - const std : : string & db_ , <nl> + const String & user_ , <nl> + const String & password_ , <nl> + / / const std : : string & method_ , <nl> + const String & db_ , <nl> + const String & table_ , <nl> const DB : : Block & sample_block_ ) <nl> - : dict_struct ( dict_struct_ ) <nl> + : log ( & Logger : : get ( " CassandraDictionarySource " ) ) <nl> + , dict_struct ( dict_struct_ ) <nl> , host ( host_ ) <nl> , port ( port_ ) <nl> , user ( user_ ) <nl> , password ( password_ ) <nl> - , method ( method_ ) <nl> + / / , method ( method_ ) <nl> , db ( db_ ) <nl> + , table ( table_ ) <nl> , sample_block ( sample_block_ ) <nl> - , cluster ( cass_cluster_new ( ) ) <nl> + , cluster ( cass_cluster_new ( ) ) / / FIXME will not be freed in case of exception <nl> , session ( cass_session_new ( ) ) <nl> { <nl> - cass_cluster_set_contact_points ( cluster , toConnectionString ( host , port ) . c_str ( ) ) ; <nl> + cassandraCheck ( cass_cluster_set_contact_points ( cluster , host . c_str ( ) ) ) ; <nl> + if ( port ) <nl> + cassandraCheck ( cass_cluster_set_port ( cluster , port ) ) ; <nl> + cass_cluster_set_credentials ( cluster , user . c_str ( ) , password . c_str ( ) ) ; <nl> + cassandraWaitAndCheck ( cass_session_connect_keyspace ( session , cluster , db . c_str ( ) ) ) ; <nl> } <nl> <nl> CassandraDictionarySource : : CassandraDictionarySource ( <nl> CassandraDictionarySource : : CassandraDictionarySource ( <nl> : CassandraDictionarySource ( <nl> dict_struct_ , <nl> config . getString ( config_prefix + " . host " ) , <nl> - config . getUInt ( config_prefix + " . port " ) , <nl> + config . getUInt ( config_prefix + " . port " , 0 ) , <nl> config . getString ( config_prefix + " . user " , " " ) , <nl> config . getString ( config_prefix + " . password " , " " ) , <nl> - config . getString ( config_prefix + " . method " , " " ) , <nl> - config . getString ( config_prefix + " . db " , " " ) , <nl> + / / config . getString ( config_prefix + " . method " , " " ) , <nl> + config . getString ( config_prefix + " . keyspace " , " " ) , <nl> + config . getString ( config_prefix + " . column_family " ) , <nl> sample_block_ ) <nl> { <nl> } <nl> CassandraDictionarySource : : CassandraDictionarySource ( const CassandraDictionarySo <nl> other . port , <nl> other . user , <nl> other . password , <nl> - other . method , <nl> + / / other . method , <nl> other . db , <nl> + other . table , <nl> other . sample_block } <nl> { <nl> } <nl> CassandraDictionarySource : : ~ CassandraDictionarySource ( ) { <nl> cass_cluster_free ( cluster ) ; <nl> } <nl> <nl> - std : : string CassandraDictionarySource : : toConnectionString ( const std : : string & host , const UInt16 port ) { <nl> - return host + ( port ! = 0 ? " : " + std : : to_string ( port ) : " " ) ; <nl> - } <nl> + / / std : : string CassandraDictionarySource : : toConnectionString ( const std : : string & host , const UInt16 port ) { <nl> + / / return host + ( port ! = 0 ? " : " + std : : to_string ( port ) : " " ) ; <nl> + / / } <nl> <nl> - BlockInputStreamPtr CassandraDictionarySource : : loadAll ( ) { <nl> - return std : : make_shared < CassandraBlockInputStream > ( nullptr , " " , sample_block , max_block_size ) ; <nl> + BlockInputStreamPtr CassandraDictionarySource : : loadAll ( ) <nl> + { <nl> + ExternalQueryBuilder builder { dict_struct , db , table , " " , IdentifierQuotingStyle : : DoubleQuotes } ; <nl> + String query = builder . composeLoadAllQuery ( ) ; <nl> + query . pop_back ( ) ; <nl> + query + = " ALLOW FILTERING ; " ; <nl> + LOG_INFO ( log , " Loading all using query : " < < query ) ; <nl> + return std : : make_shared < CassandraBlockInputStream > ( session , query , sample_block , max_block_size ) ; <nl> } <nl> <nl> std : : string CassandraDictionarySource : : toString ( ) const { <nl> return " Cassandra : " + / * db + ' . ' + collection + ' , ' + ( user . empty ( ) ? " " : " " + user + ' @ ' ) + * / host + ' : ' + DB : : toString ( port ) ; <nl> } <nl> <nl> + BlockInputStreamPtr CassandraDictionarySource : : loadIds ( const std : : vector < UInt64 > & ids ) <nl> + { <nl> + ExternalQueryBuilder builder { dict_struct , db , table , " " , IdentifierQuotingStyle : : DoubleQuotes } ; <nl> + String query = builder . composeLoadIdsQuery ( ids ) ; <nl> + query . pop_back ( ) ; <nl> + query + = " ALLOW FILTERING ; " ; <nl> + LOG_INFO ( log , " Loading ids using query : " < < query ) ; <nl> + return std : : make_shared < CassandraBlockInputStream > ( session , query , sample_block , max_block_size ) ; <nl> + } <nl> + <nl> + BlockInputStreamPtr CassandraDictionarySource : : loadKeys ( const Columns & key_columns , const std : : vector < size_t > & requested_rows ) <nl> + { <nl> + / / FIXME split conditions on partition key and clustering key <nl> + ExternalQueryBuilder builder { dict_struct , db , table , " " , IdentifierQuotingStyle : : DoubleQuotes } ; <nl> + String query = builder . composeLoadKeysQuery ( key_columns , requested_rows , ExternalQueryBuilder : : IN_WITH_TUPLES ) ; <nl> + query . pop_back ( ) ; <nl> + query + = " ALLOW FILTERING ; " ; <nl> + LOG_INFO ( log , " Loading keys using query : " < < query ) ; <nl> + return std : : make_shared < CassandraBlockInputStream > ( session , query , sample_block , max_block_size ) ; <nl> + } <nl> + <nl> <nl> } <nl> <nl> mmm a / src / Dictionaries / CassandraDictionarySource . h <nl> ppp b / src / Dictionaries / CassandraDictionarySource . h <nl> <nl> # pragma once <nl> <nl> + # if ! defined ( ARCADIA_BUILD ) <nl> # include < Common / config . h > <nl> - # include < Core / Block . h > <nl> + # endif <nl> <nl> # if USE_CASSANDRA <nl> <nl> - # include " DictionaryStructure . h " <nl> - # include " IDictionarySource . h " <nl> - # include < cassandra . h > <nl> + # include " DictionaryStructure . h " <nl> + # include " IDictionarySource . h " <nl> + # include < Core / Block . h > <nl> + # include < Poco / Logger . h > <nl> + # include < cassandra . h > <nl> <nl> namespace DB <nl> { <nl> class CassandraDictionarySource final : public IDictionarySource { <nl> CassandraDictionarySource ( <nl> const DictionaryStructure & dict_struct , <nl> - const std : : string & host , <nl> + const String & host , <nl> UInt16 port , <nl> - const std : : string & user , <nl> - const std : : string & password , <nl> - const std : : string & method , <nl> - const std : : string & db , <nl> + const String & user , <nl> + const String & password , <nl> + / / const std : : string & method , <nl> + const String & db , <nl> + const String & table , <nl> const Block & sample_block ) ; <nl> <nl> public : <nl> class CassandraDictionarySource final : public IDictionarySource { <nl> <nl> DictionarySourcePtr clone ( ) const override { return std : : make_unique < CassandraDictionarySource > ( * this ) ; } <nl> <nl> - BlockInputStreamPtr loadIds ( const std : : vector < UInt64 > & / * ids * / ) override <nl> - { <nl> - throw Exception { " Method loadIds is not implemented yet " , ErrorCodes : : NOT_IMPLEMENTED } ; <nl> - } <nl> + BlockInputStreamPtr loadIds ( const std : : vector < UInt64 > & ids ) override ; <nl> + / / { <nl> + / / throw Exception { " Method loadIds is not implemented yet " , ErrorCodes : : NOT_IMPLEMENTED } ; <nl> + / / } <nl> <nl> - BlockInputStreamPtr loadKeys ( const Columns & / * key_columns * / , const std : : vector < size_t > & / * requested_rows * / ) override <nl> - { <nl> - throw Exception { " Method loadKeys is not implemented yet " , ErrorCodes : : NOT_IMPLEMENTED } ; <nl> - } <nl> + BlockInputStreamPtr loadKeys ( const Columns & key_columns , const std : : vector < size_t > & requested_rows ) override ; <nl> + / / { <nl> + / / throw Exception { " Method loadKeys is not implemented yet " , ErrorCodes : : NOT_IMPLEMENTED } ; <nl> + / / } <nl> <nl> BlockInputStreamPtr loadUpdatedAll ( ) override <nl> { <nl> class CassandraDictionarySource final : public IDictionarySource { <nl> std : : string toString ( ) const override ; <nl> <nl> private : <nl> - static std : : string toConnectionString ( const std : : string & host , const UInt16 port ) ; <nl> + / / static std : : string toConnectionString ( const std : : string & host , const UInt16 port ) ; <nl> <nl> + Poco : : Logger * log ; <nl> const DictionaryStructure dict_struct ; <nl> - const std : : string host ; <nl> + const String host ; <nl> const UInt16 port ; <nl> - const std : : string user ; <nl> - const std : : string password ; <nl> - const std : : string method ; <nl> - const std : : string db ; <nl> + const String user ; <nl> + const String password ; <nl> + / / const std : : string method ; <nl> + const String db ; <nl> + const String table ; <nl> Block sample_block ; <nl> <nl> CassCluster * cluster ; <nl> mmm a / src / Dictionaries / ExternalQueryBuilder . cpp <nl> ppp b / src / Dictionaries / ExternalQueryBuilder . cpp <nl> void ExternalQueryBuilder : : writeQuoted ( const std : : string & s , WriteBuffer & out ) <nl> std : : string ExternalQueryBuilder : : composeLoadAllQuery ( ) const <nl> { <nl> WriteBufferFromOwnString out ; <nl> + composeLoadAllQuery ( out ) ; <nl> + writeChar ( ' ; ' , out ) ; <nl> + return out . str ( ) ; <nl> + } <nl> + <nl> + void ExternalQueryBuilder : : composeLoadAllQuery ( WriteBuffer & out ) const <nl> + { <nl> writeString ( " SELECT " , out ) ; <nl> <nl> if ( dict_struct . id ) <nl> std : : string ExternalQueryBuilder : : composeLoadAllQuery ( ) const <nl> writeString ( " WHERE " , out ) ; <nl> writeString ( where , out ) ; <nl> } <nl> - <nl> - writeChar ( ' ; ' , out ) ; <nl> - <nl> - return out . str ( ) ; <nl> } <nl> <nl> <nl> std : : string ExternalQueryBuilder : : composeUpdateQuery ( const std : : string & update_field , const std : : string & time_point ) const <nl> { <nl> - std : : string out = composeLoadAllQuery ( ) ; <nl> - std : : string update_query ; <nl> + WriteBufferFromOwnString out ; <nl> + composeLoadAllQuery ( out ) ; <nl> <nl> if ( ! where . empty ( ) ) <nl> - update_query = " AND " + update_field + " > = ' " + time_point + " ' " ; <nl> + writeString ( " AND " , out ) ; <nl> else <nl> - update_query = " WHERE " + update_field + " > = ' " + time_point + " ' " ; <nl> + writeString ( " WHERE " , out ) ; <nl> + <nl> + writeQuoted ( update_field , out ) ; <nl> + writeString ( " > = ' " , out ) ; <nl> + writeString ( time_point , out ) ; <nl> + writeChar ( ' \ ' ' , out ) ; <nl> <nl> - return out . insert ( out . size ( ) - 1 , update_query ) ; / / / This is done to insert " update_query " before " out " ' s semicolon <nl> + writeChar ( ' ; ' , out ) ; <nl> + return out . str ( ) ; <nl> } <nl> <nl> <nl> ExternalQueryBuilder : : composeLoadKeysQuery ( const Columns & key_columns , const st <nl> } <nl> else / * if ( method = = IN_WITH_TUPLES ) * / <nl> { <nl> - writeString ( composeKeyTupleDefinition ( ) , out ) ; <nl> + composeKeyTupleDefinition ( out ) ; <nl> writeString ( " IN ( " , out ) ; <nl> <nl> first = true ; <nl> void ExternalQueryBuilder : : composeKeyCondition ( const Columns & key_columns , cons <nl> const auto & key_description = ( * dict_struct . key ) [ i ] ; <nl> <nl> / / / key_i = value_i <nl> - writeString ( key_description . name , out ) ; <nl> + writeQuoted ( key_description . name , out ) ; <nl> writeString ( " = " , out ) ; <nl> key_description . type - > serializeAsTextQuoted ( * key_columns [ i ] , row , out , format_settings ) ; <nl> } <nl> void ExternalQueryBuilder : : composeKeyCondition ( const Columns & key_columns , cons <nl> } <nl> <nl> <nl> - std : : string ExternalQueryBuilder : : composeKeyTupleDefinition ( ) const <nl> + void ExternalQueryBuilder : : composeKeyTupleDefinition ( WriteBuffer & out ) const <nl> { <nl> if ( ! dict_struct . key ) <nl> throw Exception { " Composite key required for method " , ErrorCodes : : UNSUPPORTED_METHOD } ; <nl> <nl> - std : : string result { " ( " } ; <nl> + writeChar ( ' ( ' , out ) ; <nl> <nl> auto first = true ; <nl> for ( const auto & key : * dict_struct . key ) <nl> { <nl> if ( ! first ) <nl> - result + = " , " ; <nl> + writeString ( " , " , out ) ; <nl> <nl> first = false ; <nl> - result + = key . name ; <nl> + writeQuoted ( key . name , out ) ; <nl> } <nl> <nl> - result + = " ) " ; <nl> - <nl> - return result ; <nl> + writeChar ( ' ) ' , out ) ; <nl> } <nl> <nl> <nl> mmm a / src / Dictionaries / ExternalQueryBuilder . h <nl> ppp b / src / Dictionaries / ExternalQueryBuilder . h <nl> struct ExternalQueryBuilder <nl> private : <nl> const FormatSettings format_settings ; <nl> <nl> + void composeLoadAllQuery ( WriteBuffer & out ) const ; <nl> + <nl> / / / Expression in form ( x = c1 AND y = c2 . . . ) <nl> void composeKeyCondition ( const Columns & key_columns , const size_t row , WriteBuffer & out ) const ; <nl> <nl> / / / Expression in form ( x , y , . . . ) <nl> - std : : string composeKeyTupleDefinition ( ) const ; <nl> + void composeKeyTupleDefinition ( WriteBuffer & out ) const ; <nl> <nl> / / / Expression in form ( c1 , c2 , . . . ) <nl> void composeKeyTuple ( const Columns & key_columns , const size_t row , WriteBuffer & out ) const ; <nl> mmm a / tests / integration / helpers / cluster . py <nl> ppp b / tests / integration / helpers / cluster . py <nl> <nl> import psycopg2 <nl> import pymongo <nl> import pymysql <nl> + import cassandra . cluster <nl> from dicttoxml import dicttoxml <nl> from kazoo . client import KazooClient <nl> from kazoo . exceptions import KazooException <nl> def wait_schema_registry_to_start ( self , timeout = 10 ) : <nl> logging . warning ( " Can ' t connect to SchemaRegistry : % s " , str ( ex ) ) <nl> time . sleep ( 1 ) <nl> <nl> + def wait_cassandra_to_start ( self , timeout = 15 ) : <nl> + cass_client = cassandra . cluster . Cluster ( [ " localhost " ] , port = " 9043 " ) <nl> + start = time . time ( ) <nl> + while time . time ( ) - start < timeout : <nl> + try : <nl> + cass_client . connect ( ) . execute ( " drop keyspace if exists test ; " ) <nl> + logging . info ( " Connected to Cassandra % s " ) <nl> + return <nl> + except Exception as ex : <nl> + logging . warning ( " Can ' t connect to Minio : % s " , str ( ex ) ) <nl> + time . sleep ( 1 ) <nl> + <nl> def start ( self , destroy_dirs = True ) : <nl> if self . is_up : <nl> return <nl> def start ( self , destroy_dirs = True ) : <nl> <nl> if self . with_cassandra and self . base_cassandra_cmd : <nl> subprocess_check_call ( self . base_cassandra_cmd + [ ' up ' , ' - d ' , ' - - force - recreate ' ] ) <nl> - time . sleep ( 10 ) <nl> + self . wait_cassandra_to_start ( ) <nl> <nl> clickhouse_start_cmd = self . base_cmd + [ ' up ' , ' - d ' , ' - - no - recreate ' ] <nl> logging . info ( " Trying to create ClickHouse instance by command % s " , ' ' . join ( map ( str , clickhouse_start_cmd ) ) ) <nl> mmm a / tests / integration / helpers / docker_compose_cassandra . yml <nl> ppp b / tests / integration / helpers / docker_compose_cassandra . yml <nl> services : <nl> image : cassandra <nl> restart : always <nl> ports : <nl> - - 6340 : 6349 <nl> + - 9043 : 9042 <nl> mmm a / tests / integration / test_dictionaries_all_layouts_and_sources / external_sources . py <nl> ppp b / tests / integration / test_dictionaries_all_layouts_and_sources / external_sources . py <nl> <nl> from tzlocal import get_localzone <nl> import datetime <nl> import os <nl> + import uuid <nl> <nl> <nl> class ExternalSource ( object ) : <nl> def _get_schema ( self ) : <nl> return " https " <nl> <nl> class SourceCassandra ( ExternalSource ) : <nl> + TYPE_MAPPING = { <nl> + ' UInt8 ' : ' tinyint ' , <nl> + ' UInt16 ' : ' smallint ' , <nl> + ' UInt32 ' : ' int ' , <nl> + ' UInt64 ' : ' bigint ' , <nl> + ' Int8 ' : ' tinyint ' , <nl> + ' Int16 ' : ' smallint ' , <nl> + ' Int32 ' : ' int ' , <nl> + ' Int64 ' : ' bigint ' , <nl> + ' UUID ' : ' uuid ' , <nl> + ' Date ' : ' date ' , <nl> + ' DateTime ' : ' timestamp ' , <nl> + ' String ' : ' text ' , <nl> + ' Float32 ' : ' float ' , <nl> + ' Float64 ' : ' double ' <nl> + } <nl> + <nl> + def __init__ ( self , name , internal_hostname , internal_port , docker_hostname , docker_port , user , password ) : <nl> + ExternalSource . __init__ ( self , name , internal_hostname , internal_port , docker_hostname , docker_port , user , password ) <nl> + self . structure = dict ( ) <nl> + <nl> def get_source_str ( self , table_name ) : <nl> return ' ' ' <nl> < cassandra > <nl> < host > { host } < / host > <nl> < port > { port } < / port > <nl> + < keyspace > test < / keyspace > <nl> + < column_family > { table } < / column_family > <nl> < / cassandra > <nl> ' ' ' . format ( <nl> host = self . docker_hostname , <nl> port = self . docker_port , <nl> + table = table_name , <nl> ) <nl> <nl> def prepare ( self , structure , table_name , cluster ) : <nl> self . client = cassandra . cluster . Cluster ( [ self . internal_hostname ] , port = self . internal_port ) <nl> + self . session = self . client . connect ( ) <nl> + self . session . execute ( " create keyspace if not exists test with replication = { ' class ' : ' SimpleStrategy ' , ' replication_factor ' : 1 } ; " ) <nl> + self . structure [ table_name ] = structure <nl> + columns = [ ' " ' + col . name + ' " ' + self . TYPE_MAPPING [ col . field_type ] for col in structure . get_all_fields ( ) ] <nl> + keys = [ ' " ' + col . name + ' " ' for col in structure . keys ] <nl> + # FIXME use partition key <nl> + query = ' create table test . " { name } " ( { columns } , primary key ( " { some_col } " , { pk } ) ) ; ' . format ( <nl> + name = table_name , columns = ' , ' . join ( columns ) , some_col = structure . ordinary_fields [ 0 ] . name , pk = ' , ' . join ( keys ) ) <nl> + self . session . execute ( query ) <nl> self . prepared = True <nl> <nl> + def get_value_to_insert ( self , value , type ) : <nl> + if type = = ' UUID ' : <nl> + return uuid . UUID ( value ) <nl> + elif type = = ' DateTime ' : <nl> + local_datetime = datetime . datetime . strptime ( value , ' % Y - % m - % d % H : % M : % S ' ) <nl> + return get_localzone ( ) . localize ( local_datetime ) <nl> + return value <nl> + <nl> def load_data ( self , data , table_name ) : <nl> - pass <nl> + names_and_types = [ ( field . name , field . field_type ) for field in self . structure [ table_name ] . get_all_fields ( ) ] <nl> + columns = [ ' " ' + col [ 0 ] + ' " ' for col in names_and_types ] <nl> + insert = ' insert into test . " { table } " ( { columns } ) values ( { args } ) ' . format ( <nl> + table = table_name , columns = ' , ' . join ( columns ) , args = ' , ' . join ( [ ' % s ' ] * len ( columns ) ) ) <nl> + for row in data : <nl> + values = [ self . get_value_to_insert ( row . get_value_by_name ( col [ 0 ] ) , col [ 1 ] ) for col in names_and_types ] <nl> + self . session . execute ( insert , values ) <nl> <nl> class SourceRedis ( ExternalSource ) : <nl> def __init__ ( <nl> mmm a / tests / integration / test_dictionaries_all_layouts_and_sources / test . py <nl> ppp b / tests / integration / test_dictionaries_all_layouts_and_sources / test . py <nl> <nl> from external_sources import SourceMongo , SourceHTTP , SourceHTTPS , SourceRedis , SourceCassandra <nl> from external_sources import SourceMongo , SourceMongoURI , SourceHTTP , SourceHTTPS , SourceRedis , SourceCassandra <nl> import math <nl> - <nl> + import time <nl> SCRIPT_DIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> dict_configs_path = os . path . join ( SCRIPT_DIR , ' configs / dictionaries ' ) <nl> <nl> <nl> ] <nl> <nl> SOURCES = [ <nl> - SourceCassandra ( " Cassandra " , " localhost " , " 6340 " , " cassandra1 " , " 6349 " , " " , " " ) , <nl> + SourceCassandra ( " Cassandra " , " localhost " , " 9043 " , " cassandra1 " , " 9042 " , " " , " " ) , <nl> SourceMongo ( " MongoDB " , " localhost " , " 27018 " , " mongo1 " , " 27017 " , " root " , " clickhouse " ) , <nl> SourceMongoURI ( " MongoDB_URI " , " localhost " , " 27018 " , " mongo1 " , " 27017 " , " root " , " clickhouse " ) , <nl> SourceMySQL ( " MySQL " , " localhost " , " 3308 " , " mysql1 " , " 3306 " , " root " , " clickhouse " ) , <nl> <nl> <nl> DICTIONARIES = [ ] <nl> <nl> - # Key - value dictionaries with onle one possible field for key <nl> + # Key - value dictionaries with only one possible field for key <nl> SOURCES_KV = [ <nl> SourceRedis ( " RedisSimple " , " localhost " , " 6380 " , " redis1 " , " 6379 " , " " , " " , storage_type = " simple " ) , <nl> SourceRedis ( " RedisHash " , " localhost " , " 6380 " , " redis1 " , " 6379 " , " " , " " , storage_type = " hash_map " ) , <nl> def get_dictionaries ( fold , total_folds , all_dicts ) : <nl> return all_dicts [ fold * chunk_len : ( fold + 1 ) * chunk_len ] <nl> <nl> <nl> + # @ pytest . mark . timeout ( 3000 ) <nl> @ pytest . mark . parametrize ( " fold " , list ( range ( 10 ) ) ) <nl> def test_simple_dictionaries ( started_cluster , fold ) : <nl> fields = FIELDS [ " simple " ] <nl> def test_simple_dictionaries ( started_cluster , fold ) : <nl> <nl> node . query ( " system reload dictionaries " ) <nl> <nl> + # time . sleep ( 3000 ) <nl> + <nl> queries_with_answers = [ ] <nl> for dct in simple_dicts : <nl> for row in data : <nl>
minimal implementation
ClickHouse/ClickHouse
310d5225280029b9018c6caf0a2570bb70999e99
2020-05-26T19:21:18Z
mmm a / src / assembler . h <nl> ppp b / src / assembler . h <nl> class RelocInfo { <nl> NONE64 , / / never recorded 64 - bit value <nl> CODE_AGE_SEQUENCE , / / Not stored in RelocInfo array , used explictly by <nl> / / code aging . <nl> + <nl> + / / Encoded internal reference , used only on MIPS and MIPS64 . <nl> + / / Re - uses previous ARM - only encoding , to fit in RealRelocMode space . <nl> + INTERNAL_REFERENCE_ENCODED = CONST_POOL , <nl> + <nl> FIRST_REAL_RELOC_MODE = CODE_TARGET , <nl> LAST_REAL_RELOC_MODE = VENEER_POOL , <nl> FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE , <nl> class RelocInfo { <nl> static inline bool IsInternalReference ( Mode mode ) { <nl> return mode = = INTERNAL_REFERENCE ; <nl> } <nl> + static inline bool IsInternalReferenceEncoded ( Mode mode ) { <nl> + return mode = = INTERNAL_REFERENCE_ENCODED ; <nl> + } <nl> static inline bool IsDebugBreakSlot ( Mode mode ) { <nl> return mode = = DEBUG_BREAK_SLOT ; <nl> } <nl> mmm a / src / mips / assembler - mips - inl . h <nl> ppp b / src / mips / assembler - mips - inl . h <nl> void RelocInfo : : apply ( intptr_t delta , ICacheFlushMode icache_flush_mode ) { <nl> Assembler : : JumpLabelToJumpRegister ( pc_ ) ; <nl> } <nl> } <nl> - if ( IsInternalReference ( rmode_ ) ) { <nl> + if ( IsInternalReference ( rmode_ ) | | IsInternalReferenceEncoded ( rmode_ ) ) { <nl> / / Absolute code pointer inside code object moves with the code object . <nl> byte * p = reinterpret_cast < byte * > ( pc_ ) ; <nl> - int count = Assembler : : RelocateInternalReference ( p , delta ) ; <nl> + int count = Assembler : : RelocateInternalReference ( rmode_ , p , delta ) ; <nl> CpuFeatures : : FlushICache ( p , count * sizeof ( uint32_t ) ) ; <nl> } <nl> } <nl> mmm a / src / mips / assembler - mips . cc <nl> ppp b / src / mips / assembler - mips . cc <nl> Register ToRegister ( int num ) { <nl> / / Implementation of RelocInfo . <nl> <nl> const int RelocInfo : : kApplyMask = RelocInfo : : kCodeTargetMask | <nl> - 1 < < RelocInfo : : INTERNAL_REFERENCE ; <nl> + 1 < < RelocInfo : : INTERNAL_REFERENCE | <nl> + 1 < < RelocInfo : : INTERNAL_REFERENCE_ENCODED ; <nl> <nl> <nl> bool RelocInfo : : IsCodedSpecially ( ) { <nl> bool Assembler : : IsAndImmediate ( Instr instr ) { <nl> } <nl> <nl> <nl> - int Assembler : : target_at ( int32_t pos ) { <nl> + int Assembler : : target_at ( int32_t pos , bool is_internal ) { <nl> Instr instr = instr_at ( pos ) ; <nl> + if ( is_internal ) { <nl> + if ( instr = = 0 ) { <nl> + return kEndOfChain ; <nl> + } else { <nl> + int32_t instr_address = reinterpret_cast < int32_t > ( buffer_ + pos ) ; <nl> + int32_t delta = instr_address - instr ; <nl> + DCHECK ( pos > delta ) ; <nl> + return pos - delta ; <nl> + } <nl> + } <nl> if ( ( instr & ~ kImm16Mask ) = = 0 ) { <nl> / / Emitted label constant , not part of a branch . <nl> if ( instr = = 0 ) { <nl> int Assembler : : target_at ( int32_t pos ) { <nl> DCHECK ( pos > delta ) ; <nl> return pos - delta ; <nl> } <nl> - } else { / / IsLabel ( instr ) <nl> - int32_t imm28 = ( instr & static_cast < int32_t > ( kImm26Mask ) ) < < 2 ; <nl> - if ( imm28 = = kEndOfJumpChain ) { <nl> - / / EndOfChain sentinel is returned directly , not relative to pc or pos . <nl> - return kEndOfChain ; <nl> - } else { <nl> - return pos + imm28 ; <nl> - } <nl> + } else { <nl> + UNREACHABLE ( ) ; <nl> + return 0 ; <nl> } <nl> } <nl> <nl> <nl> - void Assembler : : target_at_put ( int32_t pos , int32_t target_pos ) { <nl> + void Assembler : : target_at_put ( int32_t pos , int32_t target_pos , <nl> + bool is_internal ) { <nl> Instr instr = instr_at ( pos ) ; <nl> + <nl> + if ( is_internal ) { <nl> + uint32_t imm = reinterpret_cast < uint32_t > ( buffer_ ) + target_pos ; <nl> + instr_at_put ( pos , imm ) ; <nl> + return ; <nl> + } <nl> if ( ( instr & ~ kImm16Mask ) = = 0 ) { <nl> DCHECK ( target_pos = = kEndOfChain | | target_pos > = 0 ) ; <nl> / / Emitted label constant , not part of a branch . <nl> void Assembler : : target_at_put ( int32_t pos , int32_t target_pos ) { <nl> <nl> instr_at_put ( pos , instr | ( imm26 & kImm26Mask ) ) ; <nl> } else { <nl> - uint32_t imm = reinterpret_cast < uint32_t > ( buffer_ ) + target_pos ; <nl> - instr_at_put ( pos , imm ) ; <nl> + UNREACHABLE ( ) ; <nl> } <nl> } <nl> <nl> void Assembler : : print ( Label * L ) { <nl> } else { <nl> PrintF ( " % d \ n " , instr ) ; <nl> } <nl> - next ( & l ) ; <nl> + next ( & l , internal_reference_positions_ . find ( l . pos ( ) ) ! = <nl> + internal_reference_positions_ . end ( ) ) ; <nl> } <nl> } else { <nl> PrintF ( " label in inconsistent state ( pos = % d ) \ n " , L - > pos_ ) ; <nl> void Assembler : : print ( Label * L ) { <nl> void Assembler : : bind_to ( Label * L , int pos ) { <nl> DCHECK ( 0 < = pos & & pos < = pc_offset ( ) ) ; / / Must have valid binding position . <nl> int32_t trampoline_pos = kInvalidSlotPos ; <nl> + bool is_internal = false ; <nl> if ( L - > is_linked ( ) & & ! trampoline_emitted_ ) { <nl> unbound_labels_count_ - - ; <nl> next_buffer_check_ + = kTrampolineSlotsSize ; <nl> void Assembler : : bind_to ( Label * L , int pos ) { <nl> while ( L - > is_linked ( ) ) { <nl> int32_t fixup_pos = L - > pos ( ) ; <nl> int32_t dist = pos - fixup_pos ; <nl> - next ( L ) ; / / Call next before overwriting link with target at fixup_pos . <nl> + is_internal = internal_reference_positions_ . find ( fixup_pos ) ! = <nl> + internal_reference_positions_ . end ( ) ; <nl> + next ( L , is_internal ) ; / / Call next before overwriting link with target at <nl> + / / fixup_pos . <nl> Instr instr = instr_at ( fixup_pos ) ; <nl> - if ( IsBranch ( instr ) ) { <nl> + if ( is_internal ) { <nl> + target_at_put ( fixup_pos , pos , is_internal ) ; <nl> + } else if ( ! is_internal & & IsBranch ( instr ) ) { <nl> if ( dist > kMaxBranchOffset ) { <nl> if ( trampoline_pos = = kInvalidSlotPos ) { <nl> trampoline_pos = get_trampoline_entry ( fixup_pos ) ; <nl> CHECK ( trampoline_pos ! = kInvalidSlotPos ) ; <nl> } <nl> DCHECK ( ( trampoline_pos - fixup_pos ) < = kMaxBranchOffset ) ; <nl> - target_at_put ( fixup_pos , trampoline_pos ) ; <nl> + target_at_put ( fixup_pos , trampoline_pos , false ) ; <nl> fixup_pos = trampoline_pos ; <nl> dist = pos - fixup_pos ; <nl> } <nl> - target_at_put ( fixup_pos , pos ) ; <nl> + target_at_put ( fixup_pos , pos , false ) ; <nl> } else { <nl> - target_at_put ( fixup_pos , pos ) ; <nl> + target_at_put ( fixup_pos , pos , false ) ; <nl> } <nl> } <nl> L - > bind_to ( pos ) ; <nl> void Assembler : : bind ( Label * L ) { <nl> } <nl> <nl> <nl> - void Assembler : : next ( Label * L ) { <nl> + void Assembler : : next ( Label * L , bool is_internal ) { <nl> DCHECK ( L - > is_linked ( ) ) ; <nl> - int link = target_at ( L - > pos ( ) ) ; <nl> + int link = target_at ( L - > pos ( ) , is_internal ) ; <nl> if ( link = = kEndOfChain ) { <nl> L - > Unuse ( ) ; <nl> } else { <nl> void Assembler : : bc1t ( int16_t offset , uint16_t cc ) { <nl> <nl> <nl> / / Debugging . <nl> - int Assembler : : RelocateInternalReference ( byte * pc , intptr_t pc_delta ) { <nl> + int Assembler : : RelocateInternalReference ( RelocInfo : : Mode rmode , byte * pc , <nl> + intptr_t pc_delta ) { <nl> Instr instr = instr_at ( pc ) ; <nl> - if ( IsLui ( instr ) ) { <nl> - Instr instr_lui = instr_at ( pc + 0 * Assembler : : kInstrSize ) ; <nl> - Instr instr_ori = instr_at ( pc + 1 * Assembler : : kInstrSize ) ; <nl> - DCHECK ( IsOri ( instr_ori ) ) ; <nl> - int32_t imm = ( instr_lui & static_cast < int32_t > ( kImm16Mask ) ) < < kLuiShift ; <nl> - imm | = ( instr_ori & static_cast < int32_t > ( kImm16Mask ) ) ; <nl> - if ( imm = = kEndOfJumpChain ) { <nl> - return 0 ; / / Number of instructions patched . <nl> - } <nl> - imm + = pc_delta ; <nl> - DCHECK ( ( imm & 3 ) = = 0 ) ; <nl> - <nl> - instr_lui & = ~ kImm16Mask ; <nl> - instr_ori & = ~ kImm16Mask ; <nl> - <nl> - instr_at_put ( pc + 0 * Assembler : : kInstrSize , <nl> - instr_lui | ( ( imm > > kLuiShift ) & kImm16Mask ) ) ; <nl> - instr_at_put ( pc + 1 * Assembler : : kInstrSize , <nl> - instr_ori | ( imm & kImm16Mask ) ) ; <nl> - return 2 ; / / Number of instructions patched . <nl> - } else if ( IsJ ( instr ) ) { <nl> - uint32_t imm28 = ( instr & static_cast < int32_t > ( kImm26Mask ) ) < < 2 ; <nl> - if ( static_cast < int32_t > ( imm28 ) = = kEndOfJumpChain ) { <nl> - return 0 ; / / Number of instructions patched . <nl> - } <nl> - imm28 + = pc_delta ; <nl> - imm28 & = kImm28Mask ; <nl> - DCHECK ( ( imm28 & 3 ) = = 0 ) ; <nl> - <nl> - instr & = ~ kImm26Mask ; <nl> - uint32_t imm26 = imm28 > > 2 ; <nl> - DCHECK ( is_uint26 ( imm26 ) ) ; <nl> <nl> - instr_at_put ( pc , instr | ( imm26 & kImm26Mask ) ) ; <nl> - return 1 ; / / Number of instructions patched . <nl> - } else { / / IsLabel ( instr ) <nl> + if ( RelocInfo : : IsInternalReference ( rmode ) ) { <nl> int32_t * p = reinterpret_cast < int32_t * > ( pc ) ; <nl> - uint32_t imm28 = ( instr & static_cast < int32_t > ( kImm26Mask ) ) < < 2 ; <nl> - if ( static_cast < int32_t > ( imm28 ) = = kEndOfJumpChain ) { <nl> + if ( * p = = 0 ) { <nl> return 0 ; / / Number of instructions patched . <nl> } <nl> * p + = pc_delta ; <nl> return 1 ; / / Number of instructions patched . <nl> + } else { <nl> + DCHECK ( RelocInfo : : IsInternalReferenceEncoded ( rmode ) ) ; <nl> + if ( IsLui ( instr ) ) { <nl> + Instr instr_lui = instr_at ( pc + 0 * Assembler : : kInstrSize ) ; <nl> + Instr instr_ori = instr_at ( pc + 1 * Assembler : : kInstrSize ) ; <nl> + DCHECK ( IsOri ( instr_ori ) ) ; <nl> + int32_t imm = ( instr_lui & static_cast < int32_t > ( kImm16Mask ) ) < < kLuiShift ; <nl> + imm | = ( instr_ori & static_cast < int32_t > ( kImm16Mask ) ) ; <nl> + if ( imm = = kEndOfJumpChain ) { <nl> + return 0 ; / / Number of instructions patched . <nl> + } <nl> + imm + = pc_delta ; <nl> + DCHECK ( ( imm & 3 ) = = 0 ) ; <nl> + <nl> + instr_lui & = ~ kImm16Mask ; <nl> + instr_ori & = ~ kImm16Mask ; <nl> + <nl> + instr_at_put ( pc + 0 * Assembler : : kInstrSize , <nl> + instr_lui | ( ( imm > > kLuiShift ) & kImm16Mask ) ) ; <nl> + instr_at_put ( pc + 1 * Assembler : : kInstrSize , <nl> + instr_ori | ( imm & kImm16Mask ) ) ; <nl> + return 2 ; / / Number of instructions patched . <nl> + } else if ( IsJ ( instr ) ) { <nl> + uint32_t imm28 = ( instr & static_cast < int32_t > ( kImm26Mask ) ) < < 2 ; <nl> + if ( static_cast < int32_t > ( imm28 ) = = kEndOfJumpChain ) { <nl> + return 0 ; / / Number of instructions patched . <nl> + } <nl> + imm28 + = pc_delta ; <nl> + imm28 & = kImm28Mask ; <nl> + DCHECK ( ( imm28 & 3 ) = = 0 ) ; <nl> + <nl> + instr & = ~ kImm26Mask ; <nl> + uint32_t imm26 = imm28 > > 2 ; <nl> + DCHECK ( is_uint26 ( imm26 ) ) ; <nl> + <nl> + instr_at_put ( pc , instr | ( imm26 & kImm26Mask ) ) ; <nl> + return 1 ; / / Number of instructions patched . <nl> + } else { <nl> + UNREACHABLE ( ) ; <nl> + return 0 ; <nl> + } <nl> } <nl> } <nl> <nl> void Assembler : : GrowBuffer ( ) { <nl> / / Relocate runtime entries . <nl> for ( RelocIterator it ( desc ) ; ! it . done ( ) ; it . next ( ) ) { <nl> RelocInfo : : Mode rmode = it . rinfo ( ) - > rmode ( ) ; <nl> - if ( rmode = = RelocInfo : : INTERNAL_REFERENCE ) { <nl> + if ( rmode = = RelocInfo : : INTERNAL_REFERENCE_ENCODED | | <nl> + rmode = = RelocInfo : : INTERNAL_REFERENCE ) { <nl> byte * p = reinterpret_cast < byte * > ( it . rinfo ( ) - > pc ( ) ) ; <nl> - RelocateInternalReference ( p , pc_delta ) ; <nl> + RelocateInternalReference ( rmode , p , pc_delta ) ; <nl> } <nl> } <nl> - <nl> DCHECK ( ! overflow ( ) ) ; <nl> } <nl> <nl> void Assembler : : dd ( Label * label ) { <nl> * reinterpret_cast < uint32_t * > ( pc_ ) = data ; <nl> pc_ + = sizeof ( uint32_t ) ; <nl> } else { <nl> - int target_pos ; <nl> - if ( label - > is_linked ( ) ) { <nl> - / / Point to previous instruction that uses the link . <nl> - target_pos = label - > pos ( ) ; <nl> - } else { <nl> - / / First entry of the link chain points to itself . <nl> - target_pos = pc_offset ( ) ; <nl> - } <nl> - label - > link_to ( pc_offset ( ) ) ; <nl> - / / Encode internal reference to unbound label . We set the least significant <nl> - / / bit to distinguish unbound internal references in GrowBuffer ( ) below . <nl> - int diff = target_pos - pc_offset ( ) ; <nl> - DCHECK_EQ ( 0 , diff & 3 ) ; <nl> - int imm26 = diff > > 2 ; <nl> - DCHECK ( is_int26 ( imm26 ) ) ; <nl> - / / Emit special LABEL instruction . <nl> - emit ( LABEL | ( imm26 & kImm26Mask ) ) ; <nl> + uint32_t target_pos = jump_address ( label ) ; <nl> + emit ( target_pos ) ; <nl> + internal_reference_positions_ . insert ( label - > pos ( ) ) ; <nl> } <nl> } <nl> <nl> void Assembler : : CheckTrampolinePool ( ) { <nl> / / Buffer growth ( and relocation ) must be blocked for internal <nl> / / references until associated instructions are emitted and available <nl> / / to be patched . <nl> - RecordRelocInfo ( RelocInfo : : INTERNAL_REFERENCE ) ; <nl> + RecordRelocInfo ( RelocInfo : : INTERNAL_REFERENCE_ENCODED ) ; <nl> lui ( at , ( imm32 & kHiMask ) > > kLuiShift ) ; <nl> ori ( at , at , ( imm32 & kImm16Mask ) ) ; <nl> } <nl> mmm a / src / mips / assembler - mips . h <nl> ppp b / src / mips / assembler - mips . h <nl> <nl> <nl> # include < stdio . h > <nl> <nl> + # include < set > <nl> + <nl> # include " src / assembler . h " <nl> # include " src / mips / constants - mips . h " <nl> # include " src / serialize . h " <nl> class Assembler : public AssemblerBase { <nl> void RecordDeoptReason ( const int reason , const int raw_position ) ; <nl> <nl> <nl> - static int RelocateInternalReference ( byte * pc , intptr_t pc_delta ) ; <nl> + static int RelocateInternalReference ( RelocInfo : : Mode rmode , byte * pc , <nl> + intptr_t pc_delta ) ; <nl> <nl> / / Writes a single byte or word of data in the code stream . Used for <nl> / / inline tables , e . g . , jump - tables . <nl> class Assembler : public AssemblerBase { <nl> int32_t buffer_space ( ) const { return reloc_info_writer . pos ( ) - pc_ ; } <nl> <nl> / / Decode branch instruction at pos and return branch target pos . <nl> - int target_at ( int32_t pos ) ; <nl> + int target_at ( int32_t pos , bool is_internal ) ; <nl> <nl> / / Patch branch instruction at pos to branch to given branch target pos . <nl> - void target_at_put ( int32_t pos , int32_t target_pos ) ; <nl> + void target_at_put ( int32_t pos , int32_t target_pos , bool is_internal ) ; <nl> <nl> / / Say if we need to relocate with this mode . <nl> bool MustUseReg ( RelocInfo : : Mode rmode ) ; <nl> class Assembler : public AssemblerBase { <nl> / / Labels . <nl> void print ( Label * L ) ; <nl> void bind_to ( Label * L , int pos ) ; <nl> - void next ( Label * L ) ; <nl> + void next ( Label * L , bool is_internal ) ; <nl> <nl> / / One trampoline consists of : <nl> / / - space for trampoline slots , <nl> class Assembler : public AssemblerBase { <nl> static const int kMaxBranchOffset = ( 1 < < ( 18 - 1 ) ) - 1 ; <nl> static const int kInvalidSlotPos = - 1 ; <nl> <nl> + / / Internal reference positions , required for unbounded internal reference <nl> + / / labels . <nl> + std : : set < int > internal_reference_positions_ ; <nl> + <nl> Trampoline trampoline_ ; <nl> bool internal_trampoline_exception_ ; <nl> <nl> mmm a / src / mips / constants - mips . h <nl> ppp b / src / mips / constants - mips . h <nl> enum Opcode { <nl> <nl> DADDI = ( ( 3 < < 3 ) + 0 ) < < kOpcodeShift , / / This is also BNEC . <nl> SPECIAL2 = ( ( 3 < < 3 ) + 4 ) < < kOpcodeShift , <nl> - LABEL = ( ( 3 < < 3 ) + 5 ) < < kOpcodeShift , <nl> SPECIAL3 = ( ( 3 < < 3 ) + 7 ) < < kOpcodeShift , <nl> <nl> LB = ( ( 4 < < 3 ) + 0 ) < < kOpcodeShift , <nl> mmm a / src / mips / macro - assembler - mips . cc <nl> ppp b / src / mips / macro - assembler - mips . cc <nl> void MacroAssembler : : J ( Label * L , BranchDelaySlot bdslot ) { <nl> { BlockGrowBufferScope block_buf_growth ( this ) ; <nl> / / Buffer growth ( and relocation ) must be blocked for internal references <nl> / / until associated instructions are emitted and available to be patched . <nl> - RecordRelocInfo ( RelocInfo : : INTERNAL_REFERENCE ) ; <nl> + RecordRelocInfo ( RelocInfo : : INTERNAL_REFERENCE_ENCODED ) ; <nl> j ( imm28 ) ; <nl> } <nl> / / Emit a nop in the branch delay slot if required . <nl> void MacroAssembler : : Jr ( Label * L , BranchDelaySlot bdslot ) { <nl> { BlockGrowBufferScope block_buf_growth ( this ) ; <nl> / / Buffer growth ( and relocation ) must be blocked for internal references <nl> / / until associated instructions are emitted and available to be patched . <nl> - RecordRelocInfo ( RelocInfo : : INTERNAL_REFERENCE ) ; <nl> + RecordRelocInfo ( RelocInfo : : INTERNAL_REFERENCE_ENCODED ) ; <nl> lui ( at , ( imm32 & kHiMask ) > > kLuiShift ) ; <nl> ori ( at , at , ( imm32 & kImm16Mask ) ) ; <nl> } <nl> void MacroAssembler : : Jalr ( Label * L , BranchDelaySlot bdslot ) { <nl> { BlockGrowBufferScope block_buf_growth ( this ) ; <nl> / / Buffer growth ( and relocation ) must be blocked for internal references <nl> / / until associated instructions are emitted and available to be patched . <nl> - RecordRelocInfo ( RelocInfo : : INTERNAL_REFERENCE ) ; <nl> + RecordRelocInfo ( RelocInfo : : INTERNAL_REFERENCE_ENCODED ) ; <nl> lui ( at , ( imm32 & kHiMask ) > > kLuiShift ) ; <nl> ori ( at , at , ( imm32 & kImm16Mask ) ) ; <nl> } <nl>
MIPS : reland Fix ' Assembler support for internal references . '
v8/v8
53d04bc14bc96a90a9f34f6098d2fbbad7245c01
2015-02-17T14:52:36Z
mmm a / java / src / main / java / com / google / protobuf / CodedOutputStream . java <nl> ppp b / java / src / main / java / com / google / protobuf / CodedOutputStream . java <nl> public static int computeUInt32SizeNoTag ( final int value ) { <nl> * Caller is responsible for converting the enum value to its numeric value . <nl> * / <nl> public static int computeEnumSizeNoTag ( final int value ) { <nl> - return computeRawVarint32Size ( value ) ; <nl> + return computeInt32SizeNoTag ( value ) ; <nl> } <nl> <nl> / * * <nl> mmm a / java / src / test / java / com / google / protobuf / CodedOutputStreamTest . java <nl> ppp b / java / src / test / java / com / google / protobuf / CodedOutputStreamTest . java <nl> public void testWriteWholePackedFieldsMessage ( ) throws Exception { <nl> assertEqualBytes ( TestUtil . getGoldenPackedFieldsMessage ( ) . toByteArray ( ) , <nl> rawBytes ) ; <nl> } <nl> + <nl> + / * * Test writing a message containing a negative enum value . This used to <nl> + * fail because the size was not properly computed as a sign - extended varint . * / <nl> + public void testWriteMessageWithNegativeEnumValue ( ) throws Exception { <nl> + protobuf_unittest . UnittestProto . SparseEnumMessage message = <nl> + protobuf_unittest . UnittestProto . SparseEnumMessage . newBuilder ( ) <nl> + . setSparseEnum ( protobuf_unittest . UnittestProto . TestSparseEnum . SPARSE_E ) <nl> + . build ( ) ; <nl> + assertTrue ( message . getSparseEnum ( ) . getNumber ( ) < 0 ) ; <nl> + byte [ ] rawBytes = message . toByteArray ( ) ; <nl> + protobuf_unittest . UnittestProto . SparseEnumMessage message2 = <nl> + protobuf_unittest . UnittestProto . SparseEnumMessage . parseFrom ( rawBytes ) ; <nl> + assertEquals ( protobuf_unittest . UnittestProto . TestSparseEnum . SPARSE_E , <nl> + message2 . getSparseEnum ( ) ) ; <nl> + } <nl> } <nl>
Fix issue 256 : compute the correct size for negative enum values , which need to be sign extended .
protocolbuffers/protobuf
eab9b5d47faf6c9611be323f4354f9548f6e378a
2011-02-18T04:35:54Z
mmm a / ios / sdk / WeexSDK / Sources / Manager / WXBridgeManager . m <nl> ppp b / ios / sdk / WeexSDK / Sources / Manager / WXBridgeManager . m <nl> + ( void ) _performBlockOnBridgeThread : ( void ( ^ ) ( void ) ) block <nl> if ( [ NSThread currentThread ] = = [ self jsThread ] ) { <nl> block ( ) ; <nl> } else { <nl> - [ self performSelector : @ selector ( _performBlockOnBridgeThread : instance : ) <nl> + [ self performSelector : @ selector ( _performBlockOnBridgeThread : ) <nl> onThread : [ self jsThread ] <nl> withObject : [ block copy ] <nl> waitUntilDone : NO ] ; <nl> + ( void ) _performBlockOnBackupBridgeThread : ( void ( ^ ) ( void ) ) block putInTaskQueue : ( <nl> return ; <nl> } <nl> if ( putInTaskQueue ) { <nl> - [ [ WXSDKManager bridgeMgr ] . jsTaskQueue addObject : block ] ; <nl> + [ WXBridgeManager _performBlockOnBackupBridgeThread : ^ { <nl> + [ [ WXSDKManager bridgeMgr ] . jsTaskQueue addObject : block ] ; <nl> + } putInTaskQueue : NO ] ; <nl> } else { <nl> - [ self performSelector : @ selector ( _performBlockOnBridgeThread : instance : ) <nl> - onThread : [ self backupJsThread ] <nl> - withObject : [ block copy ] <nl> - waitUntilDone : NO ] ; <nl> + if ( [ NSThread currentThread ] = = [ self backupJsThread ] ) { <nl> + block ( ) ; <nl> + } else { <nl> + [ self performSelector : @ selector ( _performBlockOnBridgeThread : instance : ) <nl> + onThread : [ self backupJsThread ] <nl> + withObject : [ block copy ] <nl> + waitUntilDone : NO ] ; <nl> + } <nl> } <nl> } <nl> <nl> - ( void ) createInstance : ( NSString * ) instance <nl> } <nl> <nl> - ( void ) executeJSTaskQueue { <nl> - if ( _jsTaskQueue . count = = 0 | | ! _supportMultiJSThread ) { <nl> - return ; <nl> - } <nl> - for ( id block in _jsTaskQueue ) { <nl> - [ WXBridgeManager _performBlockOnBackupBridgeThread : block putInTaskQueue : NO ] ; <nl> - } <nl> - [ _jsTaskQueue removeAllObjects ] ; <nl> + __weak typeof ( self ) weakSelf = self ; <nl> + [ WXBridgeManager _performBlockOnBackupBridgeThread : ^ { <nl> + if ( weakSelf . jsTaskQueue . count = = 0 | | ! weakSelf . supportMultiJSThread ) { <nl> + return ; <nl> + } <nl> + for ( id task in weakSelf . jsTaskQueue ) { <nl> + void ( ^ block ) ( void ) = task ; <nl> + block ( ) ; <nl> + } <nl> + [ weakSelf . jsTaskQueue removeAllObjects ] ; <nl> + } putInTaskQueue : NO ] ; <nl> } <nl> <nl> - ( WXThreadSafeMutableArray * ) instanceIdStack <nl>
[ iOS ] support jsTaskQueue thread safe
apache/incubator-weex
bc252dc491162f3598c529a1b9af1512b57eb91d
2019-08-07T08:20:54Z
mmm a / cocos2dx / platform / android / CCFileUtils_android . cpp <nl> ppp b / cocos2dx / platform / android / CCFileUtils_android . cpp <nl> unsigned char * CCFileUtils : : getFileData ( const char * pszFileName , const char * psz <nl> return pData ; <nl> } <nl> <nl> - void CCFileUtils : : setResource ( const char * pszZipFileName , const char * pszResPath ) <nl> + void CCFileUtils : : setResource ( const char * pszZipFileName ) <nl> { <nl> CCAssert ( 0 , " Have not implement ! " ) ; <nl> } <nl> mmm a / cocos2dx / platform / win32 / CCFileUtils_win32 . cpp <nl> ppp b / cocos2dx / platform / win32 / CCFileUtils_win32 . cpp <nl> unsigned char * CCFileUtils : : getFileData ( const char * pszFileName , const char * psz <nl> return pBuffer ; <nl> } <nl> <nl> - void CCFileUtils : : setResource ( const char * pszZipFileName , const char * pszResPath ) <nl> + void CCFileUtils : : setResource ( const char * pszZipFileName ) <nl> { <nl> CCAssert ( 0 , " Have not implement ! " ) ; <nl> } <nl>
issue , Modify the interface to fit the refactor of CCFileUtils .
cocos2d/cocos2d-x
7337119501b600a3578b329c118b06ef119d9234
2011-04-25T01:40:10Z
mmm a / ci / test / 00_setup_env_mac . sh <nl> ppp b / ci / test / 00_setup_env_mac . sh <nl> export LC_ALL = C . UTF - 8 <nl> export CONTAINER_NAME = ci_macos_cross <nl> export HOST = x86_64 - apple - darwin16 <nl> export PACKAGES = " cmake imagemagick libcap - dev librsvg2 - bin libz - dev libbz2 - dev libtiff - tools python3 - dev python3 - setuptools " <nl> - export OSX_SDK = 10 . 14 <nl> + export XCODE_VERSION = 10 . 2 . 1 <nl> + export XCODE_BUILD_ID = 10E1001 <nl> export RUN_UNIT_TESTS = false <nl> export RUN_FUNCTIONAL_TESTS = false <nl> export GOAL = " deploy " <nl> mmm a / ci / test / 05_before_script . sh <nl> ppp b / ci / test / 05_before_script . sh <nl> fi <nl> <nl> DOCKER_EXEC mkdir - p $ { DEPENDS_DIR } / SDKs $ { DEPENDS_DIR } / sdk - sources <nl> <nl> - if [ - n " $ OSX_SDK " ] & & [ ! - f $ { DEPENDS_DIR } / sdk - sources / MacOSX $ { OSX_SDK } . sdk . tar . gz ] ; then <nl> - curl - - location - - fail $ SDK_URL / MacOSX $ { OSX_SDK } . sdk . tar . gz - o $ { DEPENDS_DIR } / sdk - sources / MacOSX $ { OSX_SDK } . sdk . tar . gz <nl> + OSX_SDK_BASENAME = " Xcode - $ { XCODE_VERSION } - $ { XCODE_BUILD_ID } - extracted - SDK - with - libcxx - headers . tar . gz " <nl> + OSX_SDK_PATH = " $ { DEPENDS_DIR } / sdk - sources / $ { OSX_SDK_BASENAME } " <nl> + <nl> + if [ - n " $ XCODE_VERSION " ] & & [ ! - f " $ OSX_SDK_PATH " ] ; then <nl> + curl - - location - - fail " $ { SDK_URL } / $ { OSX_SDK_BASENAME } " - o " $ OSX_SDK_PATH " <nl> fi <nl> - if [ - n " $ OSX_SDK " ] & & [ - f $ { DEPENDS_DIR } / sdk - sources / MacOSX $ { OSX_SDK } . sdk . tar . gz ] ; then <nl> - DOCKER_EXEC tar - C $ { DEPENDS_DIR } / SDKs - xf $ { DEPENDS_DIR } / sdk - sources / MacOSX $ { OSX_SDK } . sdk . tar . gz <nl> + if [ - n " $ XCODE_VERSION " ] & & [ - f " $ OSX_SDK_PATH " ] ; then <nl> + DOCKER_EXEC tar - C " $ { DEPENDS_DIR } / SDKs " - xf " $ OSX_SDK_PATH " <nl> fi <nl> if [ [ $ HOST = * - mingw32 ] ] ; then <nl> DOCKER_EXEC update - alternatives - - set $ HOST - g + + \ $ \ ( which $ HOST - g + + - posix \ ) <nl> mmm a / contrib / gitian - build . py <nl> ppp b / contrib / gitian - build . py <nl> def main ( ) : <nl> args . macos = ' m ' in args . os <nl> <nl> # Disable for MacOS if no SDK found <nl> - if args . macos and not os . path . isfile ( ' gitian - builder / inputs / MacOSX10 . 14 . sdk . tar . gz ' ) : <nl> + if args . macos and not os . path . isfile ( ' gitian - builder / inputs / Xcode - 10 . 2 . 1 - 10E1001 - extracted - SDK - with - libcxx - headers . tar . gz ' ) : <nl> print ( ' Cannot build for MacOS , SDK does not exist . Will build for other OSes ' ) <nl> args . macos = False <nl> <nl> mmm a / contrib / gitian - descriptors / gitian - osx . yml <nl> ppp b / contrib / gitian - descriptors / gitian - osx . yml <nl> remotes : <nl> - " url " : " https : / / github . com / bitcoin / bitcoin . git " <nl> " dir " : " bitcoin " <nl> files : <nl> - - " MacOSX10 . 14 . sdk . tar . gz " <nl> + - " Xcode - 10 . 2 . 1 - 10E1001 - extracted - SDK - with - libcxx - headers . tar . gz " <nl> script : | <nl> set - e - o pipefail <nl> <nl> script : | <nl> BASEPREFIX = " $ { PWD } / depends " <nl> <nl> mkdir - p $ { BASEPREFIX } / SDKs <nl> - tar - C $ { BASEPREFIX } / SDKs - xf $ { BUILD_DIR } / MacOSX10 . 14 . sdk . tar . gz <nl> + tar - C $ { BASEPREFIX } / SDKs - xf $ { BUILD_DIR } / Xcode - 10 . 2 . 1 - 10E1001 - extracted - SDK - with - libcxx - headers . tar . gz <nl> <nl> # Build dependencies for each host <nl> for i in $ HOSTS ; do <nl> mmm a / depends / hosts / darwin . mk <nl> ppp b / depends / hosts / darwin . mk <nl> <nl> OSX_MIN_VERSION = 10 . 12 <nl> - OSX_SDK_VERSION = 10 . 14 <nl> - OSX_SDK = $ ( SDK_PATH ) / MacOSX $ ( OSX_SDK_VERSION ) . sdk <nl> + OSX_SDK_VERSION = 10 . 14 . 4 <nl> + XCODE_VERSION = 10 . 2 . 1 <nl> + XCODE_BUILD_ID = 10E1001 <nl> + OSX_SDK = $ ( SDK_PATH ) / Xcode - $ ( XCODE_VERSION ) - $ ( XCODE_BUILD_ID ) - extracted - SDK - with - libcxx - headers <nl> darwin_CC = clang - target $ ( host ) - mmacosx - version - min = $ ( OSX_MIN_VERSION ) - - sysroot $ ( OSX_SDK ) <nl> darwin_CXX = clang + + - target $ ( host ) - mmacosx - version - min = $ ( OSX_MIN_VERSION ) - - sysroot $ ( OSX_SDK ) - stdlib = libc + + <nl> <nl>
Adapt rest of tooling to new SDK naming scheme
bitcoin/bitcoin
3381e4a1892511d4d555853887c89badf4c940a9
2020-06-22T14:13:55Z
mmm a / src / builtins . h <nl> ppp b / src / builtins . h <nl> enum BuiltinExtraArguments { <nl> # define CODE_AGE_LIST ( V ) \ <nl> CODE_AGE_LIST_WITH_ARG ( CODE_AGE_LIST_IGNORE_ARG , V ) <nl> <nl> - # define CODE_AGE_LIST_WITH_NO_AGE ( V ) \ <nl> + # define CODE_AGE_LIST_COMPLETE ( V ) \ <nl> + V ( NotExecuted ) \ <nl> + V ( ExecutedOnce ) \ <nl> V ( NoAge ) \ <nl> CODE_AGE_LIST_WITH_ARG ( CODE_AGE_LIST_IGNORE_ARG , V ) <nl> <nl> mmm a / src / heap . cc <nl> ppp b / src / heap . cc <nl> void Heap : : CheckpointObjectStats ( ) { <nl> static_cast < int > ( object_sizes_last_time_ [ index ] ) ) ; <nl> FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST ( ADJUST_LAST_TIME_OBJECT_COUNT ) <nl> # undef ADJUST_LAST_TIME_OBJECT_COUNT <nl> - # define ADJUST_LAST_TIME_OBJECT_COUNT ( name ) \ <nl> - index = FIRST_CODE_AGE_SUB_TYPE + Code : : k # # name # # CodeAge ; \ <nl> - counters - > count_of_CODE_AGE_ # # name ( ) - > Increment ( \ <nl> - static_cast < int > ( object_counts_ [ index ] ) ) ; \ <nl> - counters - > count_of_CODE_AGE_ # # name ( ) - > Decrement ( \ <nl> - static_cast < int > ( object_counts_last_time_ [ index ] ) ) ; \ <nl> - counters - > size_of_CODE_AGE_ # # name ( ) - > Increment ( \ <nl> - static_cast < int > ( object_sizes_ [ index ] ) ) ; \ <nl> - counters - > size_of_CODE_AGE_ # # name ( ) - > Decrement ( \ <nl> + # define ADJUST_LAST_TIME_OBJECT_COUNT ( name ) \ <nl> + index = \ <nl> + FIRST_CODE_AGE_SUB_TYPE + Code : : k # # name # # CodeAge - Code : : kFirstCodeAge ; \ <nl> + counters - > count_of_CODE_AGE_ # # name ( ) - > Increment ( \ <nl> + static_cast < int > ( object_counts_ [ index ] ) ) ; \ <nl> + counters - > count_of_CODE_AGE_ # # name ( ) - > Decrement ( \ <nl> + static_cast < int > ( object_counts_last_time_ [ index ] ) ) ; \ <nl> + counters - > size_of_CODE_AGE_ # # name ( ) - > Increment ( \ <nl> + static_cast < int > ( object_sizes_ [ index ] ) ) ; \ <nl> + counters - > size_of_CODE_AGE_ # # name ( ) - > Decrement ( \ <nl> static_cast < int > ( object_sizes_last_time_ [ index ] ) ) ; <nl> - CODE_AGE_LIST_WITH_NO_AGE ( ADJUST_LAST_TIME_OBJECT_COUNT ) <nl> + CODE_AGE_LIST_COMPLETE ( ADJUST_LAST_TIME_OBJECT_COUNT ) <nl> # undef ADJUST_LAST_TIME_OBJECT_COUNT <nl> <nl> OS : : MemCopy ( object_counts_last_time_ , object_counts_ , sizeof ( object_counts_ ) ) ; <nl> mmm a / src / heap . h <nl> ppp b / src / heap . h <nl> class Heap { <nl> FIRST_CODE_KIND_SUB_TYPE + Code : : NUMBER_OF_KINDS , <nl> FIRST_CODE_AGE_SUB_TYPE = <nl> FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1 , <nl> - OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code : : kLastCodeAge + 1 <nl> + OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code : : kCodeAgeCount + 1 <nl> } ; <nl> <nl> void RecordObjectStats ( InstanceType type , size_t size ) { <nl> class Heap { <nl> } <nl> <nl> void RecordCodeSubTypeStats ( int code_sub_type , int code_age , size_t size ) { <nl> - ASSERT ( code_sub_type < Code : : NUMBER_OF_KINDS ) ; <nl> - ASSERT ( code_age < Code : : kLastCodeAge ) ; <nl> - object_counts_ [ FIRST_CODE_KIND_SUB_TYPE + code_sub_type ] + + ; <nl> - object_sizes_ [ FIRST_CODE_KIND_SUB_TYPE + code_sub_type ] + = size ; <nl> - object_counts_ [ FIRST_CODE_AGE_SUB_TYPE + code_age ] + + ; <nl> - object_sizes_ [ FIRST_CODE_AGE_SUB_TYPE + code_age ] + = size ; <nl> + int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type ; <nl> + int code_age_index = <nl> + FIRST_CODE_AGE_SUB_TYPE + code_age - Code : : kFirstCodeAge ; <nl> + ASSERT ( code_sub_type_index > = FIRST_CODE_KIND_SUB_TYPE & & <nl> + code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE ) ; <nl> + ASSERT ( code_age_index > = FIRST_CODE_AGE_SUB_TYPE & & <nl> + code_age_index < OBJECT_STATS_COUNT ) ; <nl> + object_counts_ [ code_sub_type_index ] + + ; <nl> + object_sizes_ [ code_sub_type_index ] + = size ; <nl> + object_counts_ [ code_age_index ] + + ; <nl> + object_sizes_ [ code_age_index ] + = size ; <nl> } <nl> <nl> void RecordFixedArraySubTypeStats ( int array_sub_type , size_t size ) { <nl> mmm a / src / mark - compact . cc <nl> ppp b / src / mark - compact . cc <nl> class MarkCompactMarkingVisitor : : ObjectStatsTracker < <nl> int object_size = obj - > Size ( ) ; <nl> ASSERT ( map - > instance_type ( ) = = CODE_TYPE ) ; <nl> Code * code_obj = Code : : cast ( obj ) ; <nl> - heap - > RecordCodeSubTypeStats ( code_obj - > kind ( ) , code_obj - > GetAge ( ) , <nl> + heap - > RecordCodeSubTypeStats ( code_obj - > kind ( ) , code_obj - > GetRawAge ( ) , <nl> object_size ) ; <nl> ObjectStatsVisitBase ( kVisitCode , map , obj ) ; <nl> } <nl> mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> void Code : : MarkCodeAsExecuted ( byte * sequence , Isolate * isolate ) { <nl> } <nl> <nl> <nl> + static Code : : Age EffectiveAge ( Code : : Age age ) { <nl> + if ( age = = Code : : kNotExecutedCodeAge ) { <nl> + / / Treat that ' s never been executed as old immediately . <nl> + age = Code : : kIsOldCodeAge ; <nl> + } else if ( age = = Code : : kExecutedOnceCodeAge ) { <nl> + / / Pre - age code that has only been executed once . <nl> + age = Code : : kPreAgedCodeAge ; <nl> + } <nl> + return age ; <nl> + } <nl> + <nl> + <nl> void Code : : MakeOlder ( MarkingParity current_parity ) { <nl> byte * sequence = FindCodeAgeSequence ( ) ; <nl> if ( sequence ! = NULL ) { <nl> Age age ; <nl> MarkingParity code_parity ; <nl> GetCodeAgeAndParity ( sequence , & age , & code_parity ) ; <nl> + age = EffectiveAge ( age ) ; <nl> if ( age ! = kLastCodeAge & & code_parity ! = current_parity ) { <nl> PatchPlatformCodeAge ( GetIsolate ( ) , <nl> sequence , <nl> void Code : : MakeOlder ( MarkingParity current_parity ) { <nl> <nl> <nl> bool Code : : IsOld ( ) { <nl> - Age age = GetAge ( ) ; <nl> - return age > = kIsOldCodeAge ; <nl> + return GetAge ( ) > = kIsOldCodeAge ; <nl> } <nl> <nl> <nl> byte * Code : : FindCodeAgeSequence ( ) { <nl> <nl> <nl> Code : : Age Code : : GetAge ( ) { <nl> + return EffectiveAge ( GetRawAge ( ) ) ; <nl> + } <nl> + <nl> + <nl> + Code : : Age Code : : GetRawAge ( ) { <nl> byte * sequence = FindCodeAgeSequence ( ) ; <nl> if ( sequence = = NULL ) { <nl> - return Code : : kNoAgeCodeAge ; <nl> + return kNoAgeCodeAge ; <nl> } <nl> Age age ; <nl> MarkingParity parity ; <nl> void Code : : GetCodeAgeAndParity ( Code * code , Age * age , <nl> # undef HANDLE_CODE_AGE <nl> stub = * builtins - > MarkCodeAsExecutedOnce ( ) ; <nl> if ( code = = stub ) { <nl> - / / Treat that ' s never been executed as old immediatly . <nl> - * age = kIsOldCodeAge ; <nl> + * age = kNotExecutedCodeAge ; <nl> * parity = NO_MARKING_PARITY ; <nl> return ; <nl> } <nl> stub = * builtins - > MarkCodeAsExecutedTwice ( ) ; <nl> if ( code = = stub ) { <nl> - / / Pre - age code that has only been executed once . <nl> - * age = kPreAgedCodeAge ; <nl> + * age = kExecutedOnceCodeAge ; <nl> * parity = NO_MARKING_PARITY ; <nl> return ; <nl> } <nl> mmm a / src / objects . h <nl> ppp b / src / objects . h <nl> class Code : public HeapObject { <nl> kNoAgeCodeAge = 0 , <nl> CODE_AGE_LIST ( DECLARE_CODE_AGE_ENUM ) <nl> kAfterLastCodeAge , <nl> + kFirstCodeAge = kNotExecutedCodeAge , <nl> kLastCodeAge = kAfterLastCodeAge - 1 , <nl> - kCodeAgeCount = kAfterLastCodeAge - 1 , <nl> + kCodeAgeCount = kAfterLastCodeAge - kNotExecutedCodeAge - 1 , <nl> kIsOldCodeAge = kSexagenarianCodeAge , <nl> kPreAgedCodeAge = kIsOldCodeAge - 1 <nl> } ; <nl> class Code : public HeapObject { <nl> static bool IsYoungSequence ( byte * sequence ) ; <nl> bool IsOld ( ) ; <nl> Age GetAge ( ) ; <nl> + / / Gets the raw code age , including psuedo code - age values such as <nl> + / / kNotExecutedCodeAge and kExecutedOnceCodeAge . <nl> + Age GetRawAge ( ) ; <nl> static inline Code * GetPreAgedCodeAgeStub ( Isolate * isolate ) { <nl> return GetCodeAgeStub ( isolate , kNotExecutedCodeAge , NO_MARKING_PARITY ) ; <nl> } <nl> mmm a / src / v8 - counters . cc <nl> ppp b / src / v8 - counters . cc <nl> Counters : : Counters ( Isolate * isolate ) { <nl> StatsCounter ( isolate , " c : " " V8 . CountOf_CODE_AGE - " # name ) ; \ <nl> size_of_CODE_AGE_ # # name # # _ = \ <nl> StatsCounter ( isolate , " c : " " V8 . SizeOf_CODE_AGE - " # name ) ; <nl> - CODE_AGE_LIST_WITH_NO_AGE ( SC ) <nl> + CODE_AGE_LIST_COMPLETE ( SC ) <nl> # undef SC <nl> } <nl> <nl> mmm a / src / v8 - counters . h <nl> ppp b / src / v8 - counters . h <nl> class Counters { <nl> { return & count_of_CODE_AGE_ # # name # # _ ; } \ <nl> StatsCounter * size_of_CODE_AGE_ # # name ( ) \ <nl> { return & size_of_CODE_AGE_ # # name # # _ ; } <nl> - CODE_AGE_LIST_WITH_NO_AGE ( SC ) <nl> + CODE_AGE_LIST_COMPLETE ( SC ) <nl> # undef SC <nl> <nl> enum Id { <nl> class Counters { <nl> # undef COUNTER_ID <nl> # define COUNTER_ID ( name ) kCountOfCODE_AGE__ # # name , \ <nl> kSizeOfCODE_AGE__ # # name , <nl> - CODE_AGE_LIST_WITH_NO_AGE ( COUNTER_ID ) <nl> + CODE_AGE_LIST_COMPLETE ( COUNTER_ID ) <nl> # undef COUNTER_ID <nl> stats_counter_count <nl> } ; <nl> class Counters { <nl> # define SC ( name ) \ <nl> StatsCounter size_of_CODE_AGE_ # # name # # _ ; \ <nl> StatsCounter count_of_CODE_AGE_ # # name # # _ ; <nl> - CODE_AGE_LIST_WITH_NO_AGE ( SC ) <nl> + CODE_AGE_LIST_COMPLETE ( SC ) <nl> # undef SC <nl> <nl> friend class Isolate ; <nl>
Add support for tracking NotExectuted / ExecutedOnceCodeAge ' s when - - track_gc_object_stats flag is set .
v8/v8
88be0606cf230dd7ff202ad9e0f89502e6e1f6c6
2013-11-06T09:29:09Z
mmm a / torch / Tensor . py <nl> ppp b / torch / Tensor . py <nl> def expand ( self , src , * args ) : <nl> src_size , src_stride ) <nl> return result <nl> <nl> + # TODO : maybe drop this in favour of csub ? : ( <nl> def sub ( self , * sizes ) : <nl> if len ( sizes ) = = 0 : <nl> raise ValueError ( ' sub requires at least two arguments ' ) <nl> def repeatTensor ( self , src , * args ) : <nl> xxtensor = xtensor . expandAs ( urtensor ) <nl> urtensor . copy ( xxtensor ) <nl> return result <nl> + <nl> + def __add__ ( self , other ) : <nl> + return self . clone ( ) . add ( other ) <nl> + __radd__ = __add__ <nl> + <nl> + def __sub__ ( self , other ) : <nl> + return self . clone ( ) . csub ( other ) <nl> + __rsub__ = __sub__ <nl> + <nl> + def __mul__ ( self , other ) : <nl> + # TODO : isTensor checks many cases , while it might be faster to only <nl> + # see if other is a number . It ' s a weird thing in Python , so share <nl> + # some THPUtils functions in C namespace in the future . <nl> + if isTensor ( other ) : <nl> + dim_self = self . dim ( ) <nl> + dim_other = other . dim ( ) <nl> + if dim_self = = 1 and dim_other = = 1 : <nl> + return self . dot ( other ) <nl> + elif dim_self = = 2 and dim_other = = 1 : <nl> + return self . new ( ) . mv ( self , other ) <nl> + elif dim_self = = 2 and dim_other = = 2 : <nl> + return self . new ( ) . mm ( self , other ) <nl> + else : <nl> + return self . clone ( ) . mul ( other ) <nl> + <nl> + def __rmul__ ( self , other ) : <nl> + # No need to check for tensor on lhs - it would execute it ' s __mul__ <nl> + return self . clone ( ) . mul ( other ) <nl> + <nl> + def __div__ ( self , other ) : <nl> + return self . clone ( ) . div ( other ) <nl> + __rdiv__ = __div__ <nl> + <nl> + def __mod__ ( self , other ) : <nl> + return self . clone ( ) . remainder ( other ) <nl> + <nl> + def __neg__ ( self ) : <nl> + return self . clone ( ) . mul ( - 1 ) <nl> + <nl> + <nl>
Add arithmetic operators
pytorch/pytorch
8e79e00f9545b98ecd562905932c1432939a987b
2016-06-20T00:16:33Z
mmm a / lib / BasicsC / shell - colors . h <nl> ppp b / lib / BasicsC / shell - colors . h <nl> extern " C " { <nl> / / / @ { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + # ifndef _WIN32 <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief color red <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> # define TRI_SHELL_COLOR_RED " \ x1b [ 31m " <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief color blod red <nl> + / / / @ brief color bold red <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> # define TRI_SHELL_COLOR_BOLD_RED " \ x1b [ 1 ; 31m " <nl> extern " C " { <nl> <nl> # define TRI_SHELL_COLOR_RESET " \ x1b [ 0m " <nl> <nl> + # else <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / Quick hack for windows <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> + # define TRI_SHELL_COLOR_RED " " <nl> + # define TRI_SHELL_COLOR_BOLD_RED " " <nl> + # define TRI_SHELL_COLOR_GREEN " " <nl> + # define TRI_SHELL_COLOR_BOLD_GREEN " " <nl> + # define TRI_SHELL_COLOR_BLUE " " <nl> + # define TRI_SHELL_COLOR_BOLD_BLUE " " <nl> + # define TRI_SHELL_COLOR_YELLOW " " <nl> + # define TRI_SHELL_COLOR_BOLD_YELLOW " " <nl> + # define TRI_SHELL_COLOR_WHITE " " <nl> + # define TRI_SHELL_COLOR_BOLD_WHITE " " <nl> + # define TRI_SHELL_COLOR_BLACK " " <nl> + # define TRI_SHELL_COLOR_BOLD_BLACK " " <nl> + # define TRI_SHELL_COLOR_BLINK " " <nl> + # define TRI_SHELL_COLOR_BRIGHT " " <nl> + # define TRI_SHELL_COLOR_RESET " " <nl> + <nl> + # endif <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ } <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl>
unset the escape codes for windows
arangodb/arangodb
6147a1d9c0048a85ea678293dd98227dedb0495e
2013-02-08T14:39:52Z
mmm a / tensorflow / workspace . bzl <nl> ppp b / tensorflow / workspace . bzl <nl> def tf_workspace ( path_prefix = " " , tf_repo_name = " " ) : <nl> native . http_archive ( <nl> name = " nsync " , <nl> urls = [ <nl> - " https : / / mirror . bazel . build / github . com / google / nsync / archive / ad722c76c6e6653f66be2e1f69521b7f7517da55 . tar . gz " , <nl> - # " https : / / github . com / google / nsync / archive / ad722c76c6e6653f66be2e1f69521b7f7517da55 . tar . gz " , <nl> + " https : / / mirror . bazel . build / github . com / google / nsync / archive / 839fcc53ff9be58218ed55397deb3f8376a1444e . tar . gz " , <nl> + # " https : / / github . com / google / nsync / archive / 839fcc53ff9be58218ed55397deb3f8376a1444e . tar . gz " , <nl> ] , <nl> - sha256 = " 7dd8ca49319f77e8226cd020a9210a525f88ac26e7041c59c95418223a1cdf55 " , <nl> - strip_prefix = " nsync - ad722c76c6e6653f66be2e1f69521b7f7517da55 " , <nl> + sha256 = " 124d105edb0313ef2d7f5bb86ec94d9f8de95479e55641c4254ffa8f795e9b37 " , <nl> + strip_prefix = " nsync - 839fcc53ff9be58218ed55397deb3f8376a1444e " , <nl> ) <nl> <nl> native . http_archive ( <nl>
nsync update : portability fixes for MacOS , s390x .
tensorflow/tensorflow
b20c66a2ad6055602b680ba8f7c8f359e104fd6b
2017-10-24T03:36:30Z
mmm a / SConstruct <nl> ppp b / SConstruct <nl> else : <nl> print ( " Automatically detected platform : " + selected_platform ) <nl> env_base [ " platform " ] = selected_platform <nl> <nl> + if selected_platform in [ " linux " , " bsd " , " x11 " ] : <nl> + if selected_platform = = " x11 " : <nl> + # Deprecated alias kept for compatibility . <nl> + print ( ' Platform " x11 " has been renamed to " linuxbsd " in Godot 4 . 0 . ' <nl> + ' Building for platform " linuxbsd " . ' ) <nl> + # Alias for convenience . <nl> + selected_platform = " linuxbsd " <nl> + <nl> if selected_platform in platform_list : <nl> tmppath = " . / platform / " + selected_platform <nl> sys . path . insert ( 0 , tmppath ) <nl>
Merge pull request from Calinou / scons - x11 - alias - linuxbsd
godotengine/godot
fc5e1d03447d658c9f0696eafd95eb596b32e27c
2020-03-28T19:35:53Z