diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
mmm a / arangod / Utils / Transaction . cpp <nl> ppp b / arangod / Utils / Transaction . cpp <nl> OperationResult Transaction : : documentLocal ( std : : string const & collectionName , <nl> } <nl> <nl> if ( ! options . silent ) { <nl> - resultBuilder . add ( VPackSlice ( mptr . vpack ( ) ) ) ; <nl> + resultBuilder . add ( VPackValue ( static_cast < void const * > ( mptr . vpack ( ) ) , VPackValueType : : External ) ) ; <nl> } <nl> <nl> return TRI_ERROR_NO_ERROR ; <nl>
trx - > document now returns an EXTERNAL VPACK in local case
arangodb/arangodb
507b062fe1b7c346a5180cd30f8b36ca93c9ac5d
2016-03-09T11:43:32Z
mmm a / tests / sqlite / benchmark . c <nl> ppp b / tests / sqlite / benchmark . c <nl> <nl> # include < time . h > <nl> # include < stdio . h > <nl> + # include < stdlib . h > <nl> # include < sqlite3 . h > <nl> <nl> # include < emscripten . h > <nl>
Fix an implicit declaration of atoi in sqlite ' s benchmark . c .
emscripten-core/emscripten
84f01eb74ec336f97645ddeb2315a08b737cfd6b
2014-03-04T08:09:11Z
mmm a / lib / Driver / Driver . cpp <nl> ppp b / lib / Driver / Driver . cpp <nl> Driver : : OutputMode Driver : : getOutputMode ( const ArgList & Args ) const { <nl> bool ShouldLink = false ; <nl> types : : ID CompileOutputType = types : : TY_INVALID ; <nl> <nl> - Arg * OutputModeArg ; <nl> - if ( ( OutputModeArg = Args . getLastArg ( options : : OPT_c ) ) ) { <nl> + const Arg * const OutputModeArg = Args . getLastArg ( options : : OPT_modes_Group ) ; <nl> + if ( ! OutputModeArg | | <nl> + OutputModeArg - > getOption ( ) . matches ( options : : OPT_emit_executable ) ) { <nl> + / / Default to producing a linked executable . As a result , the compile <nl> + / / action should produce an object file suitable for linking . <nl> + ShouldLink = true ; <nl> + CompileOutputType = types : : TY_Object ; <nl> + } else if ( OutputModeArg - > getOption ( ) . matches ( options : : OPT_c ) ) { <nl> / / The user has requested an object file . <nl> CompileOutputType = types : : TY_Object ; <nl> - } else if ( ( OutputModeArg = Args . getLastArg ( options : : OPT_S ) ) ) { <nl> + } else if ( OutputModeArg - > getOption ( ) . matches ( options : : OPT_S ) ) { <nl> / / The user has requested an assembly file . <nl> CompileOutputType = types : : TY_Assembly ; <nl> - } else if ( ( OutputModeArg = Args . getLastArg ( options : : OPT_emit_sil ) ) ) { <nl> + } else if ( OutputModeArg - > getOption ( ) . matches ( options : : OPT_emit_sil ) ) { <nl> / / The user has requested a SIL file . <nl> CompileOutputType = types : : TY_SIL ; <nl> - } else if ( ( OutputModeArg = Args . getLastArg ( options : : OPT_emit_silgen ) ) ) { <nl> + } else if ( OutputModeArg - > getOption ( ) . matches ( options : : OPT_emit_silgen ) ) { <nl> / / The user has requested a raw SIL file . <nl> CompileOutputType = types : : TY_RawSIL ; <nl> - } else if ( ( OutputModeArg = Args . getLastArg ( options : : OPT_parse ) ) | | <nl> - ( OutputModeArg = Args . getLastArg ( options : : OPT_dump_parse ) ) | | <nl> - ( OutputModeArg = Args . getLastArg ( options : : OPT_dump_ast ) ) | | <nl> - ( OutputModeArg = Args . getLastArg ( options : : OPT_print_ast ) ) ) { <nl> + } else if ( OutputModeArg - > getOption ( ) . matches ( options : : OPT_parse ) | | <nl> + OutputModeArg - > getOption ( ) . matches ( options : : OPT_dump_parse ) | | <nl> + OutputModeArg - > getOption ( ) . matches ( options : : OPT_dump_ast ) | | <nl> + OutputModeArg - > getOption ( ) . matches ( options : : OPT_print_ast ) | | <nl> + OutputModeArg - > getOption ( ) . matches ( options : : OPT_i ) | | <nl> + OutputModeArg - > getOption ( ) . matches ( options : : OPT_repl ) ) { <nl> / / These modes don ' t have any output . <nl> CompileOutputType = types : : TY_Nothing ; <nl> - } else if ( ( OutputModeArg = Args . getLastArg ( options : : OPT_emit_executable ) ) ) { <nl> - / / The user asked for a linked executable . As a result , the compile action <nl> - / / should produce an object file suitable for linking . <nl> - ShouldLink = true ; <nl> - CompileOutputType = types : : TY_Object ; <nl> } else { <nl> - / / Default to producing a linked executable . As a result , the compile <nl> - / / action should produce an object file suitable for linking . <nl> - ShouldLink = true ; <nl> - CompileOutputType = types : : TY_Object ; <nl> + llvm_unreachable ( " Unknown output mode option ! " ) ; <nl> } <nl> <nl> return OutputMode ( CompileOutputType , ShouldLink ) ; <nl>
[ driver ] Adjusted the implementation of Driver : : getOutputMode ( ) so that it uses Option : : matches ( ) instead of a series of ArgList : : getLastArg ( ) calls .
apple/swift
58ec58881972fa1c924601ea9b1b0f073aef63e5
2013-12-15T00:58:25Z
mmm a / docker / test / performance - comparison / report . py <nl> ppp b / docker / test / performance - comparison / report . py <nl> <nl> a { { color : # 06F ; text - decoration : none ; } } <nl> a : hover , a : active { { color : # F40 ; text - decoration : underline ; } } <nl> table { { border : 0 ; } } <nl> - . main { { margin - left : 10 % ; } } <nl> + . main { { margin : auto ; max - width : 95 % ; } } <nl> p . links a { { padding : 5px ; margin : 3px ; background : # FFF ; line - height : 2 ; white - space : nowrap ; box - shadow : 0 0 0 1px rgba ( 0 , 0 , 0 , 0 . 05 ) , 0 8px 25px - 5px rgba ( 0 , 0 , 0 , 0 . 1 ) ; } } <nl> <nl> . cancela , . cancela : link , . cancela : visited , . cancela : hover , . cancela : focus , . cancela : active { { <nl>
Fix bad markup
ClickHouse/ClickHouse
33f1fd3836c8c002c5d40aba2f6cd2d582591da6
2020-05-09T15:50:28Z
mmm a / include / swift / AST / Expr . h <nl> ppp b / include / swift / AST / Expr . h <nl> class CollectionExpr : public Expr { <nl> SourceLoc RBracketLoc ; <nl> ConcreteDeclRef Initializer ; <nl> <nl> - Expr * SemanticExpr = nullptr ; <nl> - <nl> / / / Retrieve the intrusive pointer storage from the subtype <nl> Expr * const * getTrailingObjectsPointer ( ) const ; <nl> Expr * * getTrailingObjectsPointer ( ) { <nl> class CollectionExpr : public Expr { <nl> SourceRange getSourceRange ( ) const { <nl> return SourceRange ( LBracketLoc , RBracketLoc ) ; <nl> } <nl> - <nl> - Expr * getSemanticExpr ( ) const { return SemanticExpr ; } <nl> - void setSemanticExpr ( Expr * e ) { SemanticExpr = e ; } <nl> <nl> static bool classof ( const Expr * e ) { <nl> return e - > getKind ( ) > = ExprKind : : First_CollectionExpr & & <nl> class DictionaryExpr final : public CollectionExpr , <nl> static bool classof ( const Expr * e ) { <nl> return e - > getKind ( ) = = ExprKind : : Dictionary ; <nl> } <nl> + <nl> + Type getElementType ( ) ; <nl> } ; <nl> <nl> / / / Subscripting expressions like a [ i ] that refer to an element within a <nl> mmm a / lib / AST / ASTDumper . cpp <nl> ppp b / lib / AST / ASTDumper . cpp <nl> class PrintExpr : public ExprVisitor < PrintExpr > { <nl> OS < < ' \ n ' ; <nl> printRec ( elt ) ; <nl> } <nl> - printSemanticExpr ( E - > getSemanticExpr ( ) ) ; <nl> PrintWithColorRAII ( OS , ParenthesisColor ) < < ' ) ' ; <nl> } <nl> void visitDictionaryExpr ( DictionaryExpr * E ) { <nl> printCommon ( E , " dictionary_expr " ) ; <nl> + PrintWithColorRAII ( OS , LiteralValueColor ) < < " initializer = " ; <nl> + E - > getInitializer ( ) . dump ( PrintWithColorRAII ( OS , LiteralValueColor ) . getOS ( ) ) ; <nl> for ( auto elt : E - > getElements ( ) ) { <nl> OS < < ' \ n ' ; <nl> printRec ( elt ) ; <nl> } <nl> - printSemanticExpr ( E - > getSemanticExpr ( ) ) ; <nl> PrintWithColorRAII ( OS , ParenthesisColor ) < < ' ) ' ; <nl> } <nl> void visitSubscriptExpr ( SubscriptExpr * E ) { <nl> mmm a / lib / AST / ASTWalker . cpp <nl> ppp b / lib / AST / ASTWalker . cpp <nl> class Traversal : public ASTVisitor < Traversal , Expr * , Stmt * , <nl> } <nl> <nl> Expr * visitCollectionExpr ( CollectionExpr * E ) { <nl> - HANDLE_SEMANTIC_EXPR ( E ) ; <nl> - <nl> for ( auto & elt : E - > getElements ( ) ) <nl> if ( Expr * Sub = doIt ( elt ) ) <nl> elt = Sub ; <nl> mmm a / lib / AST / Expr . cpp <nl> ppp b / lib / AST / Expr . cpp <nl> Type ArrayExpr : : getElementType ( ) { <nl> . subst ( init . getSubstitutions ( ) ) ; <nl> } <nl> <nl> + Type DictionaryExpr : : getElementType ( ) { <nl> + auto init = getInitializer ( ) ; <nl> + if ( ! init ) <nl> + return Type ( ) ; <nl> + <nl> + auto * decl = cast < ConstructorDecl > ( init . getDecl ( ) ) ; <nl> + return decl - > getMethodInterfaceType ( ) <nl> + - > getAs < AnyFunctionType > ( ) <nl> + - > getParams ( ) [ 0 ] <nl> + . getPlainType ( ) <nl> + . subst ( init . getSubstitutions ( ) ) ; <nl> + } <nl> + <nl> DictionaryExpr * DictionaryExpr : : create ( ASTContext & C , SourceLoc LBracketLoc , <nl> ArrayRef < Expr * > Elements , <nl> ArrayRef < SourceLoc > CommaLocs , <nl> mmm a / lib / SILGen / SILGenExpr . cpp <nl> ppp b / lib / SILGen / SILGenExpr . cpp <nl> namespace { <nl> RValue visitKeyPathExpr ( KeyPathExpr * E , SGFContext C ) ; <nl> RValue visitMagicIdentifierLiteralExpr ( MagicIdentifierLiteralExpr * E , <nl> SGFContext C ) ; <nl> - RValue visitArrayExpr ( ArrayExpr * E , SGFContext C ) ; <nl> - RValue visitDictionaryExpr ( DictionaryExpr * E , SGFContext C ) ; <nl> + RValue visitCollectionExpr ( CollectionExpr * E , SGFContext C ) ; <nl> RValue visitRebindSelfInConstructorExpr ( RebindSelfInConstructorExpr * E , <nl> SGFContext C ) ; <nl> RValue visitInjectIntoOptionalExpr ( InjectIntoOptionalExpr * E , SGFContext C ) ; <nl> visitMagicIdentifierLiteralExpr ( MagicIdentifierLiteralExpr * E , SGFContext C ) { <nl> llvm_unreachable ( " Unhandled MagicIdentifierLiteralExpr in switch . " ) ; <nl> } <nl> <nl> - RValue RValueEmitter : : visitArrayExpr ( ArrayExpr * E , SGFContext C ) { <nl> + RValue RValueEmitter : : visitCollectionExpr ( CollectionExpr * E , SGFContext C ) { <nl> auto loc = SILLocation ( E ) ; <nl> ArgumentScope scope ( SGF , loc ) ; <nl> <nl> RValue RValueEmitter : : visitArrayExpr ( ArrayExpr * E , SGFContext C ) { <nl> / / of emitting varargs . <nl> CanType arrayType , elementType ; <nl> if ( E - > getInitializer ( ) ) { <nl> - elementType = E - > getElementType ( ) - > getCanonicalType ( ) ; <nl> + if ( auto * arrayExpr = dyn_cast < ArrayExpr > ( E ) ) { <nl> + elementType = arrayExpr - > getElementType ( ) - > getCanonicalType ( ) ; <nl> + } else { <nl> + auto * dictionaryExpr = cast < DictionaryExpr > ( E ) ; <nl> + elementType = dictionaryExpr - > getElementType ( ) - > getCanonicalType ( ) ; <nl> + } <nl> arrayType = ArraySliceType : : get ( elementType ) - > getCanonicalType ( ) ; <nl> } else { <nl> arrayType = E - > getType ( ) - > getCanonicalType ( ) ; <nl> RValue RValueEmitter : : visitArrayExpr ( ArrayExpr * E , SGFContext C ) { <nl> loc , E - > getInitializer ( ) , std : : move ( args ) , E - > getType ( ) , C ) ; <nl> } <nl> <nl> - RValue RValueEmitter : : visitDictionaryExpr ( DictionaryExpr * E , SGFContext C ) { <nl> - return visit ( E - > getSemanticExpr ( ) , C ) ; <nl> - } <nl> - <nl> / / / Flattens one level of optional from a nested optional value . <nl> static ManagedValue flattenOptional ( SILGenFunction & SGF , SILLocation loc , <nl> ManagedValue optVal ) { <nl> mmm a / lib / Sema / CSApply . cpp <nl> ppp b / lib / Sema / CSApply . cpp <nl> namespace { <nl> if ( ! conformance ) <nl> return nullptr ; <nl> <nl> - / / Call the witness that builds the dictionary literal . <nl> - / / FIXME : callWitness ( ) may end up re - doing some work we already did <nl> - / / to convert the dictionary literal elements to the ( key , value ) tuple . <nl> - / / It would be nicer to re - use them . <nl> - / / FIXME : Cache the name . <nl> - / / FIXME : This location info is bogus . <nl> - Expr * typeRef = TypeExpr : : createImplicitHack ( expr - > getLoc ( ) , dictionaryTy , <nl> - tc . Context ) ; <nl> - cs . cacheExprTypes ( typeRef ) ; <nl> - <nl> DeclName name ( tc . Context , DeclBaseName : : createConstructor ( ) , <nl> { tc . Context . Id_dictionaryLiteral } ) ; <nl> - <nl> - / / Restructure the argument to provide the appropriate labels in the <nl> - / / tuple . <nl> - SmallVector < TupleTypeElt , 4 > typeElements ; <nl> - SmallVector < Identifier , 4 > names ; <nl> - bool first = true ; <nl> - for ( auto elt : expr - > getElements ( ) ) { <nl> - if ( first ) { <nl> - typeElements . push_back ( TupleTypeElt ( cs . getType ( elt ) , <nl> - tc . Context . Id_dictionaryLiteral ) ) ; <nl> - names . push_back ( tc . Context . Id_dictionaryLiteral ) ; <nl> - <nl> - first = false ; <nl> - continue ; <nl> - } <nl> - <nl> - typeElements . push_back ( cs . getType ( elt ) ) ; <nl> - names . push_back ( Identifier ( ) ) ; <nl> - } <nl> - <nl> - Type argType = TupleType : : get ( typeElements , tc . Context ) ; <nl> - assert ( isa < TupleType > ( argType . getPointer ( ) ) ) ; <nl> - <nl> - Expr * arg = <nl> - TupleExpr : : create ( tc . Context , expr - > getLBracketLoc ( ) , <nl> - expr - > getElements ( ) , <nl> - names , <nl> - { } , <nl> - expr - > getRBracketLoc ( ) , <nl> - / * HasTrailingClosure = * / false , <nl> - / * Implicit = * / true , <nl> - argType ) ; <nl> - <nl> - cs . cacheExprTypes ( arg ) ; <nl> - <nl> - cs . setExprTypes ( typeRef ) ; <nl> - cs . setExprTypes ( arg ) ; <nl> - <nl> - Expr * result = tc . callWitness ( typeRef , dc , dictionaryProto , <nl> - * conformance , name , arg , <nl> - diag : : dictionary_protocol_broken ) ; <nl> - if ( ! result ) <nl> + ConcreteDeclRef witness = <nl> + conformance - > getWitnessByName ( dictionaryTy - > getRValueType ( ) , name ) ; <nl> + if ( ! witness | | ! isa < AbstractFunctionDecl > ( witness . getDecl ( ) ) ) <nl> return nullptr ; <nl> + expr - > setInitializer ( witness ) ; <nl> <nl> - cs . cacheExprTypes ( result ) ; <nl> + auto elementType = expr - > getElementType ( ) ; <nl> + for ( auto & element : expr - > getElements ( ) ) { <nl> + element = coerceToType ( element , elementType , <nl> + cs . getConstraintLocator ( element ) ) ; <nl> + } <nl> <nl> - expr - > setSemanticExpr ( result ) ; <nl> return expr ; <nl> } <nl> <nl> mmm a / lib / Sema / CSDiag . cpp <nl> ppp b / lib / Sema / CSDiag . cpp <nl> namespace { <nl> llvm : : DenseMap < Pattern * , Type > PatternTypes ; <nl> llvm : : DenseMap < ParamDecl * , Type > ParamDeclTypes ; <nl> llvm : : DenseMap < ParamDecl * , Type > ParamDeclInterfaceTypes ; <nl> - llvm : : DenseMap < CollectionExpr * , Expr * > CollectionSemanticExprs ; <nl> llvm : : DenseSet < ValueDecl * > PossiblyInvalidDecls ; <nl> ExprTypeSaverAndEraser ( const ExprTypeSaverAndEraser & ) = delete ; <nl> void operator = ( const ExprTypeSaverAndEraser & ) = delete ; <nl> namespace { <nl> P - > setInvalid ( false ) ; <nl> } <nl> <nl> - / / If we have a CollectionExpr with a type checked SemanticExpr , <nl> - / / remove it so we can recalculate a new semantic form . <nl> - if ( auto * CE = dyn_cast < CollectionExpr > ( expr ) ) { <nl> - if ( auto SE = CE - > getSemanticExpr ( ) ) { <nl> - TS - > CollectionSemanticExprs [ CE ] = SE ; <nl> - CE - > setSemanticExpr ( nullptr ) ; <nl> - } <nl> - } <nl> - <nl> expr - > setType ( nullptr ) ; <nl> <nl> return { true , expr } ; <nl> namespace { <nl> paramDeclIfaceElt . first - > setInterfaceType ( paramDeclIfaceElt . second - > getInOutObjectType ( ) ) ; <nl> } <nl> <nl> - for ( auto CSE : CollectionSemanticExprs ) <nl> - CSE . first - > setSemanticExpr ( CSE . second ) ; <nl> - <nl> if ( ! PossiblyInvalidDecls . empty ( ) ) <nl> for ( auto D : PossiblyInvalidDecls ) <nl> if ( D - > hasInterfaceType ( ) ) <nl> namespace { <nl> / / we go digging through failed constraints , and expect their locators to <nl> / / still be meaningful . <nl> ~ ExprTypeSaverAndEraser ( ) { <nl> - for ( auto CSE : CollectionSemanticExprs ) <nl> - if ( ! CSE . first - > getType ( ) ) <nl> - CSE . first - > setSemanticExpr ( CSE . second ) ; <nl> - <nl> for ( auto exprElt : ExprTypes ) <nl> if ( ! exprElt . first - > getType ( ) ) <nl> exprElt . first - > setType ( exprElt . second ) ; <nl> mmm a / lib / Sema / CSGen . cpp <nl> ppp b / lib / Sema / CSGen . cpp <nl> namespace { <nl> } <nl> <nl> / / Remove any semantic expression injected by typechecking . <nl> - if ( auto CE = dyn_cast < CollectionExpr > ( expr ) ) { <nl> - CE - > setSemanticExpr ( nullptr ) ; <nl> - } else if ( auto ISLE = dyn_cast < InterpolatedStringLiteralExpr > ( expr ) ) { <nl> + if ( auto ISLE = dyn_cast < InterpolatedStringLiteralExpr > ( expr ) ) { <nl> ISLE - > setSemanticExpr ( nullptr ) ; <nl> } else if ( auto OLE = dyn_cast < ObjectLiteralExpr > ( expr ) ) { <nl> OLE - > setSemanticExpr ( nullptr ) ; <nl> mmm a / test / SILGen / literals . swift <nl> ppp b / test / SILGen / literals . swift <nl> func throwingElement < T : FooProtocol > ( ) throws - > [ T ] { <nl> return try [ makeBasic ( ) , makeThrowing ( ) ] <nl> } <nl> <nl> + class TakesDictionaryLiteral < Key , Value > : ExpressibleByDictionaryLiteral { <nl> + required init ( dictionaryLiteral elements : ( Key , Value ) . . . ) { } <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s8literals23returnsCustomDictionaryAA05TakesD7LiteralCyS2iGyF : $ @ convention ( thin ) ( ) - > @ owned TakesDictionaryLiteral < Int , Int > { <nl> + / / CHECK : [ [ TMP : % . * ] ] = apply % 2 ( % 0 , % 1 ) : $ @ convention ( method ) ( Builtin . IntLiteral , @ thin Int . Type ) - > Int <nl> + / / CHECK : [ [ ARRAY_LENGTH : % . * ] ] = integer_literal $ Builtin . Word , 2 <nl> + / / CHECK : / / function_ref _allocateUninitializedArray < A > ( _ : ) <nl> + / / CHECK : [ [ ALLOCATE_VARARGS : % . * ] ] = function_ref @ $ ss27_allocateUninitializedArrayySayxG_BptBwlF : $ @ convention ( thin ) < τ_0_0 > ( Builtin . Word ) - > ( @ owned Array < τ_0_0 > , Builtin . RawPointer ) <nl> + / / CHECK : [ [ ARR_TMP : % . * ] ] = apply [ [ ALLOCATE_VARARGS ] ] < ( Int , Int ) > ( [ [ ARRAY_LENGTH ] ] ) <nl> + / / CHECK : ( [ [ ARR : % . * ] ] , [ [ ADDRESS : % . * ] ] ) = destructure_tuple [ [ ARR_TMP ] ] <nl> + / / CHECK : [ [ TUPLE_ADDR : % . * ] ] = pointer_to_address % 9 : $ Builtin . RawPointer to [ strict ] $ * ( Int , Int ) <nl> + / / CHECK : [ [ KEY_ADDR : % . * ] ] = tuple_element_addr [ [ TUPLE_ADDR ] ] : $ * ( Int , Int ) , 0 <nl> + / / CHECK : [ [ VALUE_ADDR : % . * ] ] = tuple_element_addr [ [ TUPLE_ADDR ] ] : $ * ( Int , Int ) , 1 <nl> + / / CHECK : store [ [ TMP ] ] to [ trivial ] [ [ KEY_ADDR ] ] : $ * Int <nl> + / / CHECK : store [ [ TMP ] ] to [ trivial ] [ [ VALUE_ADDR ] ] : $ * Int <nl> + / / CHECK : [ [ IDX1 : % . * ] ] = integer_literal $ Builtin . Word , 1 <nl> + / / CHECK : [ [ TUPLE_ADDR1 : % . * ] ] = index_addr [ [ TUPLE_ADDR ] ] : $ * ( Int , Int ) , [ [ IDX1 ] ] : $ Builtin . Word <nl> + / / CHECK : [ [ KEY_ADDR : % . * ] ] = tuple_element_addr [ [ TUPLE_ADDR1 ] ] : $ * ( Int , Int ) , 0 <nl> + / / CHECK : [ [ VALUE_ADDR : % . * ] ] = tuple_element_addr [ [ TUPLE_ADDR1 ] ] : $ * ( Int , Int ) , 1 <nl> + / / CHECK : store [ [ TMP ] ] to [ trivial ] [ [ KEY_ADDR ] ] : $ * Int <nl> + / / CHECK : store [ [ TMP ] ] to [ trivial ] [ [ VALUE_ADDR ] ] : $ * Int <nl> + / / CHECK : [ [ METATYPE : % . * ] ] = metatype $ @ thick TakesDictionaryLiteral < Int , Int > . Type <nl> + / / CHECK : [ [ CTOR : % . * ] ] = class_method [ [ METATYPE ] ] : $ @ thick TakesDictionaryLiteral < Int , Int > . Type , # TakesDictionaryLiteral . init ! allocator . 1 : < Key , Value > ( TakesDictionaryLiteral < Key , Value > . Type ) - > ( ( Key , Value ) . . . ) - > TakesDictionaryLiteral < Key , Value > , $ @ convention ( method ) < τ_0_0 , τ_0_1 > ( @ owned Array < ( τ_0_0 , τ_0_1 ) > , @ thick TakesDictionaryLiteral < τ_0_0 , τ_0_1 > . Type ) - > @ owned TakesDictionaryLiteral < τ_0_0 , τ_0_1 > <nl> + / / CHECK : [ [ RESULT : % . * ] ] = apply [ [ CTOR ] ] < Int , Int > ( % 8 , % 21 ) <nl> + / / CHECK : return [ [ RESULT ] ] <nl> + <nl> + func returnsCustomDictionary ( ) - > TakesDictionaryLiteral < Int , Int > { <nl> + / / Use temporary to simplify generated_sil <nl> + let tmp = 77 <nl> + return [ tmp : tmp , tmp : tmp ] <nl> + } <nl> + <nl> struct Color : _ExpressibleByColorLiteral { <nl> init ( _colorLiteralRed red : Float , green : Float , blue : Float , alpha : Float ) { } <nl> } <nl> mmm a / test / SILGen / sil_locations . swift <nl> ppp b / test / SILGen / sil_locations . swift <nl> func tuple_element ( _ x : ( Int , Float ) ) { <nl> } <nl> <nl> func containers ( ) - > ( [ Int ] , Dictionary < String , Int > ) { <nl> - return ( [ 1 , 2 , 3 ] , [ " Ankeny " : 1 , " Burnside " : 2 , " Couch " : 3 ] ) <nl> + return ( [ 1 , 2 , 3 ] , [ " Ankeny " : 101 , " Burnside " : 102 , " Couch " : 103 ] ) <nl> / / CHECK - LABEL : sil hidden [ ossa ] @ $ s13sil_locations10containers { { [ _0 - 9a - zA - Z ] * } } F <nl> - / / CHECK : apply { { % . * } } < ( String , Int ) > ( { { % . * } } ) , loc " { { . * } } " : [ [ @ LINE - 2 ] ] : 23 <nl> <nl> - / / CHECK : string_literal utf8 " Ankeny " , loc " { { . * } } " : [ [ @ LINE - 4 ] ] : 23 <nl> + / / CHECK : string_literal utf8 " Ankeny " , loc " { { . * } } " : [ [ @ LINE - 3 ] ] : 23 <nl> <nl> - / / CHECK : integer_literal $ Builtin . IntLiteral , 1 , loc " { { . * } } " : [ [ @ LINE - 6 ] ] : 33 <nl> - / / CHECK : integer_literal $ Builtin . IntLiteral , 2 , loc " { { . * } } " : [ [ @ LINE - 7 ] ] : 48 <nl> + / / CHECK : integer_literal $ Builtin . IntLiteral , 101 , loc " { { . * } } " : [ [ @ LINE - 5 ] ] : 33 <nl> + / / CHECK : integer_literal $ Builtin . IntLiteral , 102 , loc " { { . * } } " : [ [ @ LINE - 6 ] ] : 50 <nl> <nl> - <nl> - <nl> + / / CHECK : apply { { % . * } } < String , Int > ( { { % . * } } , { { % . * } } ) : { { . * } } , loc " { { . * } } " : [ [ @ LINE - 8 ] ] : 22 <nl> } <nl> <nl> <nl>
Convert DictionaryExpr to not use tc . callWitness ( ) or generate a SemanticExpr .
apple/swift
0b03721ce8e04f066f56aed5c059058127dd4392
2019-06-13T22:58:03Z
mmm a / src / ast - inl . h <nl> ppp b / src / ast - inl . h <nl> DoWhileStatement : : DoWhileStatement ( ZoneStringList * labels ) <nl> : IterationStatement ( labels ) , <nl> cond_ ( NULL ) , <nl> condition_position_ ( - 1 ) , <nl> - next_id_ ( GetNextId ( ) ) { <nl> + continue_id_ ( GetNextId ( ) ) , <nl> + back_edge_id_ ( GetNextId ( ) ) { <nl> } <nl> <nl> <nl> WhileStatement : : WhileStatement ( ZoneStringList * labels ) <nl> : IterationStatement ( labels ) , <nl> cond_ ( NULL ) , <nl> - may_have_function_literal_ ( true ) { <nl> + may_have_function_literal_ ( true ) , <nl> + body_id_ ( GetNextId ( ) ) { <nl> } <nl> <nl> <nl> ForStatement : : ForStatement ( ZoneStringList * labels ) <nl> next_ ( NULL ) , <nl> may_have_function_literal_ ( true ) , <nl> loop_variable_ ( NULL ) , <nl> - next_id_ ( GetNextId ( ) ) { <nl> + continue_id_ ( GetNextId ( ) ) , <nl> + body_id_ ( GetNextId ( ) ) { <nl> } <nl> <nl> <nl> mmm a / src / ast . h <nl> ppp b / src / ast . h <nl> class DoWhileStatement : public IterationStatement { <nl> void set_condition_position ( int pos ) { condition_position_ = pos ; } <nl> <nl> / / Bailout support . <nl> - virtual int ContinueId ( ) const { return next_id_ ; } <nl> + virtual int ContinueId ( ) const { return continue_id_ ; } <nl> + int BackEdgeId ( ) const { return back_edge_id_ ; } <nl> <nl> private : <nl> Expression * cond_ ; <nl> int condition_position_ ; <nl> - int next_id_ ; <nl> + int continue_id_ ; <nl> + int back_edge_id_ ; <nl> } ; <nl> <nl> <nl> class WhileStatement : public IterationStatement { <nl> <nl> / / Bailout support . <nl> virtual int ContinueId ( ) const { return EntryId ( ) ; } <nl> + int BodyId ( ) const { return body_id_ ; } <nl> <nl> private : <nl> Expression * cond_ ; <nl> / / True if there is a function literal subexpression in the condition . <nl> bool may_have_function_literal_ ; <nl> + int body_id_ ; <nl> } ; <nl> <nl> <nl> class ForStatement : public IterationStatement { <nl> } <nl> <nl> / / Bailout support . <nl> - virtual int ContinueId ( ) const { return next_id_ ; } <nl> + virtual int ContinueId ( ) const { return continue_id_ ; } <nl> + int BodyId ( ) const { return body_id_ ; } <nl> <nl> bool is_fast_smi_loop ( ) { return loop_variable_ ! = NULL ; } <nl> Variable * loop_variable ( ) { return loop_variable_ ; } <nl> class ForStatement : public IterationStatement { <nl> / / True if there is a function literal subexpression in the condition . <nl> bool may_have_function_literal_ ; <nl> Variable * loop_variable_ ; <nl> - int next_id_ ; <nl> + int continue_id_ ; <nl> + int body_id_ ; <nl> } ; <nl> <nl> <nl> class IfStatement : public Statement { <nl> Statement * else_statement ) <nl> : condition_ ( condition ) , <nl> then_statement_ ( then_statement ) , <nl> - else_statement_ ( else_statement ) { } <nl> + else_statement_ ( else_statement ) , <nl> + then_id_ ( GetNextId ( ) ) , <nl> + else_id_ ( GetNextId ( ) ) { <nl> + } <nl> <nl> DECLARE_NODE_TYPE ( IfStatement ) <nl> <nl> class IfStatement : public Statement { <nl> Statement * then_statement ( ) const { return then_statement_ ; } <nl> Statement * else_statement ( ) const { return else_statement_ ; } <nl> <nl> + int ThenId ( ) const { return then_id_ ; } <nl> + int ElseId ( ) const { return else_id_ ; } <nl> + <nl> private : <nl> Expression * condition_ ; <nl> Statement * then_statement_ ; <nl> Statement * else_statement_ ; <nl> + int then_id_ ; <nl> + int else_id_ ; <nl> } ; <nl> <nl> <nl> class BinaryOperation : public Expression { <nl> int pos ) <nl> : op_ ( op ) , left_ ( left ) , right_ ( right ) , pos_ ( pos ) , is_smi_only_ ( false ) { <nl> ASSERT ( Token : : IsBinaryOp ( op ) ) ; <nl> + right_id_ = ( op = = Token : : AND | | op = = Token : : OR ) <nl> + ? GetNextId ( ) <nl> + : AstNode : : kNoNumber ; <nl> } <nl> <nl> / / Create the binary operation corresponding to a compound assignment . <nl> class BinaryOperation : public Expression { <nl> void RecordTypeFeedback ( TypeFeedbackOracle * oracle ) ; <nl> bool IsSmiOnly ( ) const { return is_smi_only_ ; } <nl> <nl> + / / Bailout support . <nl> + int RightId ( ) const { return right_id_ ; } <nl> + <nl> private : <nl> Token : : Value op_ ; <nl> Expression * left_ ; <nl> Expression * right_ ; <nl> int pos_ ; <nl> bool is_smi_only_ ; <nl> + / / The short - circuit logical operations have an AST ID for their <nl> + / / right - hand subexpression . <nl> + int right_id_ ; <nl> } ; <nl> <nl> <nl> class Conditional : public Expression { <nl> then_expression_ ( then_expression ) , <nl> else_expression_ ( else_expression ) , <nl> then_expression_position_ ( then_expression_position ) , <nl> - else_expression_position_ ( else_expression_position ) { } <nl> + else_expression_position_ ( else_expression_position ) , <nl> + then_id_ ( GetNextId ( ) ) , <nl> + else_id_ ( GetNextId ( ) ) { <nl> + } <nl> <nl> DECLARE_NODE_TYPE ( Conditional ) <nl> <nl> class Conditional : public Expression { <nl> Expression * then_expression ( ) const { return then_expression_ ; } <nl> Expression * else_expression ( ) const { return else_expression_ ; } <nl> <nl> - int then_expression_position ( ) { return then_expression_position_ ; } <nl> - int else_expression_position ( ) { return else_expression_position_ ; } <nl> + int then_expression_position ( ) const { return then_expression_position_ ; } <nl> + int else_expression_position ( ) const { return else_expression_position_ ; } <nl> + <nl> + int ThenId ( ) const { return then_id_ ; } <nl> + int ElseId ( ) const { return else_id_ ; } <nl> <nl> private : <nl> Expression * condition_ ; <nl> class Conditional : public Expression { <nl> Expression * else_expression_ ; <nl> int then_expression_position_ ; <nl> int else_expression_position_ ; <nl> + int then_id_ ; <nl> + int else_id_ ; <nl> } ; <nl> <nl> <nl> mmm a / src / full - codegen . cc <nl> ppp b / src / full - codegen . cc <nl> void FullCodeGenerator : : EmitLogicalOperation ( BinaryOperation * expr ) { <nl> <nl> context ( ) - > EmitLogicalLeft ( expr , & eval_right , & done ) ; <nl> <nl> + PrepareForBailoutForId ( expr - > RightId ( ) , NO_REGISTERS ) ; <nl> __ bind ( & eval_right ) ; <nl> if ( context ( ) - > IsTest ( ) ) ForwardBailoutToChild ( expr ) ; <nl> context ( ) - > HandleExpression ( expr - > right ( ) ) ; <nl> void FullCodeGenerator : : VisitIfStatement ( IfStatement * stmt ) { <nl> <nl> if ( stmt - > HasElseStatement ( ) ) { <nl> VisitForControl ( stmt - > condition ( ) , & then_part , & else_part , & then_part ) ; <nl> + PrepareForBailoutForId ( stmt - > ThenId ( ) , NO_REGISTERS ) ; <nl> __ bind ( & then_part ) ; <nl> Visit ( stmt - > then_statement ( ) ) ; <nl> __ jmp ( & done ) ; <nl> <nl> + PrepareForBailoutForId ( stmt - > ElseId ( ) , NO_REGISTERS ) ; <nl> __ bind ( & else_part ) ; <nl> Visit ( stmt - > else_statement ( ) ) ; <nl> } else { <nl> VisitForControl ( stmt - > condition ( ) , & then_part , & done , & then_part ) ; <nl> + PrepareForBailoutForId ( stmt - > ThenId ( ) , NO_REGISTERS ) ; <nl> __ bind ( & then_part ) ; <nl> Visit ( stmt - > then_statement ( ) ) ; <nl> + <nl> + PrepareForBailoutForId ( stmt - > ElseId ( ) , NO_REGISTERS ) ; <nl> } <nl> __ bind ( & done ) ; <nl> PrepareForBailoutForId ( stmt - > id ( ) , NO_REGISTERS ) ; <nl> void FullCodeGenerator : : VisitDoWhileStatement ( DoWhileStatement * stmt ) { <nl> & stack_check ) ; <nl> <nl> / / Check stack before looping . <nl> + PrepareForBailoutForId ( stmt - > BackEdgeId ( ) , NO_REGISTERS ) ; <nl> __ bind ( & stack_check ) ; <nl> EmitStackCheck ( stmt ) ; <nl> __ jmp ( & body ) ; <nl> <nl> - __ bind ( loop_statement . break_target ( ) ) ; <nl> PrepareForBailoutForId ( stmt - > ExitId ( ) , NO_REGISTERS ) ; <nl> + __ bind ( loop_statement . break_target ( ) ) ; <nl> decrement_loop_depth ( ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitWhileStatement ( WhileStatement * stmt ) { <nl> / / Emit the test at the bottom of the loop . <nl> __ jmp ( & test ) ; <nl> <nl> + PrepareForBailoutForId ( stmt - > BodyId ( ) , NO_REGISTERS ) ; <nl> __ bind ( & body ) ; <nl> Visit ( stmt - > body ( ) ) ; <nl> <nl> void FullCodeGenerator : : VisitWhileStatement ( WhileStatement * stmt ) { <nl> loop_statement . break_target ( ) , <nl> loop_statement . break_target ( ) ) ; <nl> <nl> - __ bind ( loop_statement . break_target ( ) ) ; <nl> PrepareForBailoutForId ( stmt - > ExitId ( ) , NO_REGISTERS ) ; <nl> + __ bind ( loop_statement . break_target ( ) ) ; <nl> decrement_loop_depth ( ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitForStatement ( ForStatement * stmt ) { <nl> / / Emit the test at the bottom of the loop ( even if empty ) . <nl> __ jmp ( & test ) ; <nl> <nl> + PrepareForBailoutForId ( stmt - > BodyId ( ) , NO_REGISTERS ) ; <nl> __ bind ( & body ) ; <nl> Visit ( stmt - > body ( ) ) ; <nl> <nl> - __ bind ( loop_statement . continue_target ( ) ) ; <nl> PrepareForBailoutForId ( stmt - > ContinueId ( ) , NO_REGISTERS ) ; <nl> - <nl> + __ bind ( loop_statement . continue_target ( ) ) ; <nl> SetStatementPosition ( stmt ) ; <nl> if ( stmt - > next ( ) ! = NULL ) { <nl> Visit ( stmt - > next ( ) ) ; <nl> void FullCodeGenerator : : VisitForStatement ( ForStatement * stmt ) { <nl> __ jmp ( & body ) ; <nl> } <nl> <nl> - __ bind ( loop_statement . break_target ( ) ) ; <nl> PrepareForBailoutForId ( stmt - > ExitId ( ) , NO_REGISTERS ) ; <nl> + __ bind ( loop_statement . break_target ( ) ) ; <nl> decrement_loop_depth ( ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitConditional ( Conditional * expr ) { <nl> Label true_case , false_case , done ; <nl> VisitForControl ( expr - > condition ( ) , & true_case , & false_case , & true_case ) ; <nl> <nl> + PrepareForBailoutForId ( expr - > ThenId ( ) , NO_REGISTERS ) ; <nl> __ bind ( & true_case ) ; <nl> SetExpressionPosition ( expr - > then_expression ( ) , <nl> expr - > then_expression_position ( ) ) ; <nl> void FullCodeGenerator : : VisitConditional ( Conditional * expr ) { <nl> __ jmp ( & done ) ; <nl> } <nl> <nl> + PrepareForBailoutForId ( expr - > ElseId ( ) , NO_REGISTERS ) ; <nl> __ bind ( & false_case ) ; <nl> if ( context ( ) - > IsTest ( ) ) ForwardBailoutToChild ( expr ) ; <nl> SetExpressionPosition ( expr - > else_expression ( ) , <nl> mmm a / src / hydrogen - instructions . h <nl> ppp b / src / hydrogen - instructions . h <nl> class LChunkBuilder ; <nl> / / HCallStub <nl> / / HConstant <nl> / / HControlInstruction <nl> + / / HDeoptimize <nl> / / HGoto <nl> / / HUnaryControlInstruction <nl> / / HBranch <nl> / / HCompareMapAndBranch <nl> / / HReturn <nl> / / HThrow <nl> - / / HDeoptimize <nl> / / HEnterInlined <nl> / / HFunctionLiteral <nl> / / HGlobalObject <nl> mmm a / src / hydrogen . cc <nl> ppp b / src / hydrogen . cc <nl> HBasicBlock : : HBasicBlock ( HGraph * graph ) <nl> first_instruction_index_ ( - 1 ) , <nl> last_instruction_index_ ( - 1 ) , <nl> deleted_phis_ ( 4 ) , <nl> - is_inline_return_target_ ( false ) , <nl> - inverted_ ( false ) , <nl> - deopt_predecessor_ ( NULL ) { <nl> + is_inline_return_target_ ( false ) { <nl> } <nl> <nl> <nl> void HStackCheckEliminator : : Process ( ) { <nl> for ( int i = 0 ; i < graph_ - > blocks ( ) - > length ( ) ; i + + ) { <nl> HBasicBlock * block = graph_ - > blocks ( ) - > at ( i ) ; <nl> if ( block - > IsLoopHeader ( ) ) { <nl> - HBasicBlock * backedge = block - > loop_information ( ) - > GetLastBackEdge ( ) ; <nl> - HBasicBlock * dominator = backedge ; <nl> - bool backedge_dominated_by_call = false ; <nl> - while ( dominator ! = block & & ! backedge_dominated_by_call ) { <nl> + HBasicBlock * back_edge = block - > loop_information ( ) - > GetLastBackEdge ( ) ; <nl> + HBasicBlock * dominator = back_edge ; <nl> + bool back_edge_dominated_by_call = false ; <nl> + while ( dominator ! = block & & ! back_edge_dominated_by_call ) { <nl> HInstruction * instr = dominator - > first ( ) ; <nl> - while ( instr ! = NULL & & ! backedge_dominated_by_call ) { <nl> + while ( instr ! = NULL & & ! back_edge_dominated_by_call ) { <nl> if ( instr - > IsCall ( ) ) { <nl> - RemoveStackCheck ( backedge ) ; <nl> - backedge_dominated_by_call = true ; <nl> + RemoveStackCheck ( back_edge ) ; <nl> + back_edge_dominated_by_call = true ; <nl> } <nl> instr = instr - > next ( ) ; <nl> } <nl> void TestContext : : ReturnInstruction ( HInstruction * instr , int ast_id ) { <nl> <nl> <nl> void TestContext : : BuildBranch ( HValue * value ) { <nl> + / / We expect the graph to be in edge - split form : there is no edge that <nl> + / / connects a branch node to a join node . We conservatively ensure that <nl> + / / property by always adding an empty block on the outgoing edges of this <nl> + / / branch . <nl> HGraphBuilder * builder = owner ( ) ; <nl> - HBasicBlock * materialize_true = builder - > graph ( ) - > CreateBasicBlock ( ) ; <nl> - HBasicBlock * materialize_false = builder - > graph ( ) - > CreateBasicBlock ( ) ; <nl> - HBranch * branch = new HBranch ( materialize_true , materialize_false , value ) ; <nl> + HBasicBlock * empty_true = builder - > graph ( ) - > CreateBasicBlock ( ) ; <nl> + HBasicBlock * empty_false = builder - > graph ( ) - > CreateBasicBlock ( ) ; <nl> + HBranch * branch = new HBranch ( empty_true , empty_false , value ) ; <nl> builder - > CurrentBlock ( ) - > Finish ( branch ) ; <nl> <nl> - HBasicBlock * true_block = if_true ( ) ; <nl> - HValue * true_value = invert_true ( ) <nl> - ? builder - > graph ( ) - > GetConstantFalse ( ) <nl> - : builder - > graph ( ) - > GetConstantTrue ( ) ; <nl> - materialize_true - > set_inverted ( invert_true ( ) ) ; <nl> - true_block - > set_deopt_predecessor ( materialize_true ) ; <nl> - <nl> - if ( true_block - > IsInlineReturnTarget ( ) ) { <nl> - materialize_true - > AddLeaveInlined ( true_value , true_block ) ; <nl> + HValue * const no_return_value = NULL ; <nl> + HBasicBlock * true_target = if_true ( ) ; <nl> + if ( true_target - > IsInlineReturnTarget ( ) ) { <nl> + empty_true - > AddLeaveInlined ( no_return_value , true_target ) ; <nl> } else { <nl> - materialize_true - > last_environment ( ) - > Push ( true_value ) ; <nl> - materialize_true - > Goto ( true_block ) ; <nl> + empty_true - > Goto ( true_target ) ; <nl> } <nl> <nl> - HBasicBlock * false_block = if_false ( ) ; <nl> - HValue * false_value = invert_false ( ) <nl> - ? builder - > graph ( ) - > GetConstantTrue ( ) <nl> - : builder - > graph ( ) - > GetConstantFalse ( ) ; <nl> - materialize_false - > set_inverted ( invert_false ( ) ) ; <nl> - false_block - > set_deopt_predecessor ( materialize_false ) ; <nl> - <nl> - if ( false_block - > IsInlineReturnTarget ( ) ) { <nl> - materialize_false - > AddLeaveInlined ( false_value , false_block ) ; <nl> + HBasicBlock * false_target = if_false ( ) ; <nl> + if ( false_target - > IsInlineReturnTarget ( ) ) { <nl> + empty_false - > AddLeaveInlined ( no_return_value , false_target ) ; <nl> } else { <nl> - materialize_false - > last_environment ( ) - > Push ( false_value ) ; <nl> - materialize_false - > Goto ( false_block ) ; <nl> + empty_false - > Goto ( false_target ) ; <nl> } <nl> builder - > subgraph ( ) - > set_exit_block ( NULL ) ; <nl> } <nl> void TestContext : : BuildBranch ( HValue * value ) { <nl> } while ( false ) <nl> <nl> <nl> + # define VISIT_FOR_CONTROL ( expr , true_block , false_block ) \ <nl> + do { \ <nl> + VisitForControl ( expr , true_block , false_block ) ; \ <nl> + if ( HasStackOverflow ( ) ) return ; \ <nl> + } while ( false ) <nl> + <nl> + <nl> / / ' thing ' could be an expression , statement , or list of statements . <nl> # define ADD_TO_SUBGRAPH ( graph , thing ) \ <nl> do { \ <nl> void HGraphBuilder : : VisitForValue ( Expression * expr ) { <nl> } <nl> <nl> <nl> + void HGraphBuilder : : VisitForControl ( Expression * expr , <nl> + HBasicBlock * true_block , <nl> + HBasicBlock * false_block ) { <nl> + TestContext for_test ( this , true_block , false_block ) ; <nl> + Visit ( expr ) ; <nl> + } <nl> + <nl> + <nl> HValue * HGraphBuilder : : VisitArgument ( Expression * expr ) { <nl> VisitForValue ( expr ) ; <nl> if ( HasStackOverflow ( ) | | ! subgraph ( ) - > HasExit ( ) ) return NULL ; <nl> void HGraphBuilder : : AddToSubgraph ( HSubgraph * graph , Expression * expr ) { <nl> } <nl> <nl> <nl> - void HGraphBuilder : : VisitCondition ( Expression * expr , <nl> - HBasicBlock * true_block , <nl> - HBasicBlock * false_block , <nl> - bool invert_true , <nl> - bool invert_false ) { <nl> - VisitForControl ( expr , true_block , false_block , invert_true , invert_false ) ; <nl> - CHECK_BAILOUT ; <nl> - # ifdef DEBUG <nl> - HValue * value = true_block - > predecessors ( ) - > at ( 0 ) - > last_environment ( ) - > Top ( ) ; <nl> - true_block - > set_cond ( HConstant : : cast ( value ) - > handle ( ) ) ; <nl> - <nl> - value = false_block - > predecessors ( ) - > at ( 0 ) - > last_environment ( ) - > Top ( ) ; <nl> - false_block - > set_cond ( HConstant : : cast ( value ) - > handle ( ) ) ; <nl> - # endif <nl> - <nl> - true_block - > SetJoinId ( expr - > id ( ) ) ; <nl> - false_block - > SetJoinId ( expr - > id ( ) ) ; <nl> - true_block - > last_environment ( ) - > Pop ( ) ; <nl> - false_block - > last_environment ( ) - > Pop ( ) ; <nl> - } <nl> - <nl> - <nl> - void HGraphBuilder : : AddConditionToSubgraph ( HSubgraph * subgraph , <nl> - Expression * expr , <nl> - HSubgraph * true_graph , <nl> - HSubgraph * false_graph ) { <nl> - SubgraphScope scope ( this , subgraph ) ; <nl> - VisitCondition ( expr , <nl> - true_graph - > entry_block ( ) , <nl> - false_graph - > entry_block ( ) , <nl> - false , <nl> - false ) ; <nl> - } <nl> - <nl> - <nl> - void HGraphBuilder : : VisitForControl ( Expression * expr , <nl> - HBasicBlock * true_block , <nl> - HBasicBlock * false_block , <nl> - bool invert_true , <nl> - bool invert_false ) { <nl> - TestContext for_test ( this , true_block , false_block , <nl> - invert_true , invert_false ) ; <nl> - Visit ( expr ) ; <nl> - } <nl> - <nl> - <nl> void HGraphBuilder : : AddToSubgraph ( HSubgraph * graph , <nl> ZoneList < Statement * > * stmts ) { <nl> SubgraphScope scope ( this , graph ) ; <nl> void HGraphBuilder : : VisitEmptyStatement ( EmptyStatement * stmt ) { <nl> <nl> void HGraphBuilder : : VisitIfStatement ( IfStatement * stmt ) { <nl> if ( stmt - > condition ( ) - > ToBooleanIsTrue ( ) ) { <nl> + AddSimulate ( stmt - > ThenId ( ) ) ; <nl> Visit ( stmt - > then_statement ( ) ) ; <nl> } else if ( stmt - > condition ( ) - > ToBooleanIsFalse ( ) ) { <nl> + AddSimulate ( stmt - > ElseId ( ) ) ; <nl> Visit ( stmt - > else_statement ( ) ) ; <nl> } else { <nl> HSubgraph * then_graph = CreateEmptySubgraph ( ) ; <nl> HSubgraph * else_graph = CreateEmptySubgraph ( ) ; <nl> - VisitCondition ( stmt - > condition ( ) , <nl> - then_graph - > entry_block ( ) , <nl> - else_graph - > entry_block ( ) , <nl> - false , false ) ; <nl> - if ( HasStackOverflow ( ) ) return ; <nl> + VISIT_FOR_CONTROL ( stmt - > condition ( ) , <nl> + then_graph - > entry_block ( ) , <nl> + else_graph - > entry_block ( ) ) ; <nl> + <nl> + then_graph - > entry_block ( ) - > SetJoinId ( stmt - > ThenId ( ) ) ; <nl> ADD_TO_SUBGRAPH ( then_graph , stmt - > then_statement ( ) ) ; <nl> + <nl> + else_graph - > entry_block ( ) - > SetJoinId ( stmt - > ElseId ( ) ) ; <nl> ADD_TO_SUBGRAPH ( else_graph , stmt - > else_statement ( ) ) ; <nl> + <nl> current_subgraph_ - > AppendJoin ( then_graph , else_graph , stmt ) ; <nl> } <nl> } <nl> void HGraphBuilder : : VisitReturnStatement ( ReturnStatement * stmt ) { <nl> TestContext * test = TestContext : : cast ( context ) ; <nl> VisitForControl ( stmt - > expression ( ) , <nl> test - > if_true ( ) , <nl> - test - > if_false ( ) , <nl> - false , <nl> - false ) ; <nl> + test - > if_false ( ) ) ; <nl> } else { <nl> HValue * return_value = NULL ; <nl> if ( context - > IsEffect ( ) ) { <nl> void HGraphBuilder : : VisitDoWhileStatement ( DoWhileStatement * stmt ) { <nl> } else { <nl> HSubgraph * go_back = CreateEmptySubgraph ( ) ; <nl> HSubgraph * exit = CreateEmptySubgraph ( ) ; <nl> - AddConditionToSubgraph ( body_graph , stmt - > cond ( ) , go_back , exit ) ; <nl> - if ( HasStackOverflow ( ) ) return ; <nl> + { <nl> + SubgraphScope scope ( this , body_graph ) ; <nl> + VISIT_FOR_CONTROL ( stmt - > cond ( ) , <nl> + go_back - > entry_block ( ) , <nl> + exit - > entry_block ( ) ) ; <nl> + go_back - > entry_block ( ) - > SetJoinId ( stmt - > BackEdgeId ( ) ) ; <nl> + exit - > entry_block ( ) - > SetJoinId ( stmt - > ExitId ( ) ) ; <nl> + } <nl> current_subgraph_ - > AppendDoWhile ( body_graph , stmt , go_back , exit ) ; <nl> } <nl> } <nl> void HGraphBuilder : : VisitWhileStatement ( WhileStatement * stmt ) { <nl> cond_graph = CreateLoopHeaderSubgraph ( environment ( ) ) ; <nl> body_graph = CreateEmptySubgraph ( ) ; <nl> exit_graph = CreateEmptySubgraph ( ) ; <nl> - AddConditionToSubgraph ( cond_graph , stmt - > cond ( ) , body_graph , exit_graph ) ; <nl> - if ( HasStackOverflow ( ) ) return ; <nl> + { <nl> + SubgraphScope scope ( this , cond_graph ) ; <nl> + VISIT_FOR_CONTROL ( stmt - > cond ( ) , <nl> + body_graph - > entry_block ( ) , <nl> + exit_graph - > entry_block ( ) ) ; <nl> + body_graph - > entry_block ( ) - > SetJoinId ( stmt - > BodyId ( ) ) ; <nl> + exit_graph - > entry_block ( ) - > SetJoinId ( stmt - > ExitId ( ) ) ; <nl> + } <nl> ADD_TO_SUBGRAPH ( body_graph , stmt - > body ( ) ) ; <nl> } <nl> <nl> void HGraphBuilder : : VisitForStatement ( ForStatement * stmt ) { <nl> cond_graph = CreateLoopHeaderSubgraph ( environment ( ) ) ; <nl> body_graph = CreateEmptySubgraph ( ) ; <nl> exit_graph = CreateEmptySubgraph ( ) ; <nl> - AddConditionToSubgraph ( cond_graph , stmt - > cond ( ) , body_graph , exit_graph ) ; <nl> - if ( HasStackOverflow ( ) ) return ; <nl> - ADD_TO_SUBGRAPH ( body_graph , stmt - > body ( ) ) ; <nl> + { <nl> + SubgraphScope scope ( this , cond_graph ) ; <nl> + VISIT_FOR_CONTROL ( stmt - > cond ( ) , <nl> + body_graph - > entry_block ( ) , <nl> + exit_graph - > entry_block ( ) ) ; <nl> + body_graph - > entry_block ( ) - > SetJoinId ( stmt - > BodyId ( ) ) ; <nl> + exit_graph - > entry_block ( ) - > SetJoinId ( stmt - > ExitId ( ) ) ; <nl> + } <nl> } else { <nl> body_graph = CreateLoopHeaderSubgraph ( environment ( ) ) ; <nl> - ADD_TO_SUBGRAPH ( body_graph , stmt - > body ( ) ) ; <nl> } <nl> + ADD_TO_SUBGRAPH ( body_graph , stmt - > body ( ) ) ; <nl> <nl> HSubgraph * next_graph = NULL ; <nl> body_graph - > ResolveContinue ( stmt ) ; <nl> void HGraphBuilder : : VisitSharedFunctionInfoLiteral ( <nl> void HGraphBuilder : : VisitConditional ( Conditional * expr ) { <nl> HSubgraph * then_graph = CreateEmptySubgraph ( ) ; <nl> HSubgraph * else_graph = CreateEmptySubgraph ( ) ; <nl> - VisitCondition ( expr - > condition ( ) , <nl> - then_graph - > entry_block ( ) , <nl> - else_graph - > entry_block ( ) , <nl> - false , false ) ; <nl> - if ( HasStackOverflow ( ) ) return ; <nl> + VISIT_FOR_CONTROL ( expr - > condition ( ) , <nl> + then_graph - > entry_block ( ) , <nl> + else_graph - > entry_block ( ) ) ; <nl> + <nl> + then_graph - > entry_block ( ) - > SetJoinId ( expr - > ThenId ( ) ) ; <nl> ADD_TO_SUBGRAPH ( then_graph , expr - > then_expression ( ) ) ; <nl> + <nl> + else_graph - > entry_block ( ) - > SetJoinId ( expr - > ElseId ( ) ) ; <nl> ADD_TO_SUBGRAPH ( else_graph , expr - > else_expression ( ) ) ; <nl> + <nl> current_subgraph_ - > AppendJoin ( then_graph , else_graph , expr ) ; <nl> ast_context ( ) - > ReturnValue ( Pop ( ) ) ; <nl> } <nl> bool HGraphBuilder : : TryInline ( Call * expr ) { <nl> if_true - > MarkAsInlineReturnTarget ( ) ; <nl> if_false - > MarkAsInlineReturnTarget ( ) ; <nl> / / AstContext constructor pushes on the context stack . <nl> - bool invert_true = TestContext : : cast ( ast_context ( ) ) - > invert_true ( ) ; <nl> - bool invert_false = TestContext : : cast ( ast_context ( ) ) - > invert_false ( ) ; <nl> - test_context = new TestContext ( this , if_true , if_false , <nl> - invert_true , invert_false ) ; <nl> + test_context = new TestContext ( this , if_true , if_false ) ; <nl> function_return_ = NULL ; <nl> } else { <nl> / / Inlined body is treated as if it occurs in the original call context . <nl> bool HGraphBuilder : : TryInline ( Call * expr ) { <nl> / / simply jumping to the false target . <nl> / / <nl> / / TODO ( 3168478 ) : refactor to avoid this . <nl> - HBasicBlock * materialize_true = graph ( ) - > CreateBasicBlock ( ) ; <nl> - HBasicBlock * materialize_false = graph ( ) - > CreateBasicBlock ( ) ; <nl> + HBasicBlock * empty_true = graph ( ) - > CreateBasicBlock ( ) ; <nl> + HBasicBlock * empty_false = graph ( ) - > CreateBasicBlock ( ) ; <nl> HBranch * branch = <nl> - new HBranch ( materialize_true , materialize_false , return_value ) ; <nl> + new HBranch ( empty_true , empty_false , return_value ) ; <nl> body - > exit_block ( ) - > Finish ( branch ) ; <nl> <nl> - materialize_true - > AddLeaveInlined ( graph ( ) - > GetConstantTrue ( ) , <nl> - test_context - > if_true ( ) ) ; <nl> - materialize_false - > AddLeaveInlined ( graph ( ) - > GetConstantFalse ( ) , <nl> - test_context - > if_false ( ) ) ; <nl> + HValue * const no_return_value = NULL ; <nl> + empty_true - > AddLeaveInlined ( no_return_value , test_context - > if_true ( ) ) ; <nl> + empty_false - > AddLeaveInlined ( no_return_value , test_context - > if_false ( ) ) ; <nl> } <nl> body - > set_exit_block ( NULL ) ; <nl> } <nl> bool HGraphBuilder : : TryInline ( Call * expr ) { <nl> if_false - > SetJoinId ( expr - > id ( ) ) ; <nl> ASSERT ( ast_context ( ) = = test_context ) ; <nl> delete test_context ; / / Destructor pops from expression context stack . <nl> - / / Forward to the real test context . <nl> <nl> - / / Discard the lingering branch value ( which may be true or false , <nl> - / / depending on whether the final condition was negated ) and jump to the <nl> - / / true target with a true branch value . <nl> + / / Forward to the real test context . <nl> + HValue * const no_return_value = NULL ; <nl> HBasicBlock * true_target = TestContext : : cast ( ast_context ( ) ) - > if_true ( ) ; <nl> - bool invert_true = TestContext : : cast ( ast_context ( ) ) - > invert_true ( ) ; <nl> - HValue * true_value = invert_true <nl> - ? graph ( ) - > GetConstantFalse ( ) <nl> - : graph ( ) - > GetConstantTrue ( ) ; <nl> - if_true - > last_environment ( ) - > Pop ( ) ; <nl> if ( true_target - > IsInlineReturnTarget ( ) ) { <nl> - if_true - > AddLeaveInlined ( true_value , true_target ) ; <nl> + if_true - > AddLeaveInlined ( no_return_value , true_target ) ; <nl> } else { <nl> - if_true - > last_environment ( ) - > Push ( true_value ) ; <nl> if_true - > Goto ( true_target ) ; <nl> } <nl> <nl> - / / Do the same for the false target . <nl> HBasicBlock * false_target = TestContext : : cast ( ast_context ( ) ) - > if_false ( ) ; <nl> - bool invert_false = TestContext : : cast ( ast_context ( ) ) - > invert_false ( ) ; <nl> - HValue * false_value = invert_false <nl> - ? graph ( ) - > GetConstantTrue ( ) <nl> - : graph ( ) - > GetConstantFalse ( ) ; <nl> - if_false - > last_environment ( ) - > Pop ( ) ; <nl> if ( false_target - > IsInlineReturnTarget ( ) ) { <nl> - if_false - > AddLeaveInlined ( false_value , false_target ) ; <nl> + if_false - > AddLeaveInlined ( no_return_value , false_target ) ; <nl> } else { <nl> - if_false - > last_environment ( ) - > Push ( false_value ) ; <nl> if_false - > Goto ( false_target ) ; <nl> } <nl> <nl> void HBasicBlock : : AddLeaveInlined ( HValue * return_value , HBasicBlock * target ) { <nl> ASSERT ( target - > IsInlineReturnTarget ( ) ) ; <nl> AddInstruction ( new HLeaveInlined ) ; <nl> HEnvironment * outer = last_environment ( ) - > outer ( ) ; <nl> - outer - > Push ( return_value ) ; <nl> + if ( return_value ! = NULL ) outer - > Push ( return_value ) ; <nl> UpdateEnvironment ( outer ) ; <nl> Goto ( target ) ; <nl> } <nl> void HGraphBuilder : : VisitUnaryOperation ( UnaryOperation * expr ) { <nl> TestContext * context = TestContext : : cast ( ast_context ( ) ) ; <nl> VisitForControl ( expr - > expression ( ) , <nl> context - > if_false ( ) , <nl> - context - > if_true ( ) , <nl> - ! context - > invert_false ( ) , <nl> - ! context - > invert_true ( ) ) ; <nl> + context - > if_true ( ) ) ; <nl> } else { <nl> HSubgraph * true_graph = CreateEmptySubgraph ( ) ; <nl> HSubgraph * false_graph = CreateEmptySubgraph ( ) ; <nl> - VisitCondition ( expr - > expression ( ) , <nl> - false_graph - > entry_block ( ) , <nl> - true_graph - > entry_block ( ) , <nl> - true , true ) ; <nl> - if ( HasStackOverflow ( ) ) return ; <nl> + VISIT_FOR_CONTROL ( expr - > expression ( ) , <nl> + false_graph - > entry_block ( ) , <nl> + true_graph - > entry_block ( ) ) ; <nl> + true_graph - > entry_block ( ) - > SetJoinId ( expr - > expression ( ) - > id ( ) ) ; <nl> true_graph - > environment ( ) - > Push ( graph_ - > GetConstantTrue ( ) ) ; <nl> + <nl> + false_graph - > entry_block ( ) - > SetJoinId ( expr - > expression ( ) - > id ( ) ) ; <nl> false_graph - > environment ( ) - > Push ( graph_ - > GetConstantFalse ( ) ) ; <nl> + <nl> current_subgraph_ - > AppendJoin ( true_graph , false_graph , expr ) ; <nl> ast_context ( ) - > ReturnValue ( Pop ( ) ) ; <nl> } <nl> void HGraphBuilder : : VisitBinaryOperation ( BinaryOperation * expr ) { <nl> / / Translate left subexpression . <nl> HBasicBlock * eval_right = graph ( ) - > CreateBasicBlock ( ) ; <nl> if ( is_logical_and ) { <nl> - VisitForControl ( expr - > left ( ) , eval_right , context - > if_false ( ) , <nl> - false , context - > invert_false ( ) ) ; <nl> + VISIT_FOR_CONTROL ( expr - > left ( ) , eval_right , context - > if_false ( ) ) ; <nl> } else { <nl> - VisitForControl ( expr - > left ( ) , context - > if_true ( ) , eval_right , <nl> - context - > invert_true ( ) , false ) ; <nl> + VISIT_FOR_CONTROL ( expr - > left ( ) , context - > if_true ( ) , eval_right ) ; <nl> } <nl> - if ( HasStackOverflow ( ) ) return ; <nl> - eval_right - > SetJoinId ( expr - > left ( ) - > id ( ) ) ; <nl> + eval_right - > SetJoinId ( expr - > RightId ( ) ) ; <nl> <nl> / / Translate right subexpression by visiting it in the same AST <nl> / / context as the entire expression . <nl> - eval_right - > last_environment ( ) - > Pop ( ) ; <nl> subgraph ( ) - > set_exit_block ( eval_right ) ; <nl> Visit ( expr - > right ( ) ) ; <nl> <nl> mmm a / src / hydrogen . h <nl> ppp b / src / hydrogen . h <nl> class HBasicBlock : public ZoneObject { <nl> bool IsInlineReturnTarget ( ) const { return is_inline_return_target_ ; } <nl> void MarkAsInlineReturnTarget ( ) { is_inline_return_target_ = true ; } <nl> <nl> - / / If this block is a successor of a branch , his flags tells whether the <nl> - / / preceding branch was inverted or not . <nl> - bool inverted ( ) { return inverted_ ; } <nl> - void set_inverted ( bool b ) { inverted_ = b ; } <nl> - <nl> - HBasicBlock * deopt_predecessor ( ) { return deopt_predecessor_ ; } <nl> - void set_deopt_predecessor ( HBasicBlock * block ) { deopt_predecessor_ = block ; } <nl> - <nl> Handle < Object > cond ( ) { return cond_ ; } <nl> void set_cond ( Handle < Object > value ) { cond_ = value ; } <nl> <nl> class HBasicBlock : public ZoneObject { <nl> ZoneList < int > deleted_phis_ ; <nl> SetOncePointer < HBasicBlock > parent_loop_header_ ; <nl> bool is_inline_return_target_ ; <nl> - bool inverted_ ; <nl> - HBasicBlock * deopt_predecessor_ ; <nl> Handle < Object > cond_ ; <nl> } ; <nl> <nl> class TestContext : public AstContext { <nl> public : <nl> TestContext ( HGraphBuilder * owner , <nl> HBasicBlock * if_true , <nl> - HBasicBlock * if_false , <nl> - bool invert_true , <nl> - bool invert_false ) <nl> + HBasicBlock * if_false ) <nl> : AstContext ( owner , Expression : : kTest ) , <nl> if_true_ ( if_true ) , <nl> - if_false_ ( if_false ) , <nl> - invert_true_ ( invert_true ) , <nl> - invert_false_ ( invert_false ) { <nl> + if_false_ ( if_false ) { <nl> } <nl> <nl> virtual void ReturnValue ( HValue * value ) ; <nl> class TestContext : public AstContext { <nl> HBasicBlock * if_true ( ) const { return if_true_ ; } <nl> HBasicBlock * if_false ( ) const { return if_false_ ; } <nl> <nl> - bool invert_true ( ) { return invert_true_ ; } <nl> - bool invert_false ( ) { return invert_false_ ; } <nl> - <nl> private : <nl> / / Build the shared core part of the translation unpacking a value into <nl> / / control flow . <nl> class TestContext : public AstContext { <nl> <nl> HBasicBlock * if_true_ ; <nl> HBasicBlock * if_false_ ; <nl> - bool invert_true_ ; <nl> - bool invert_false_ ; <nl> } ; <nl> <nl> <nl> class HGraphBuilder : public AstVisitor { <nl> void AddToSubgraph ( HSubgraph * graph , ZoneList < Statement * > * stmts ) ; <nl> void AddToSubgraph ( HSubgraph * graph , Statement * stmt ) ; <nl> void AddToSubgraph ( HSubgraph * graph , Expression * expr ) ; <nl> - void AddConditionToSubgraph ( HSubgraph * subgraph , <nl> - Expression * expr , <nl> - HSubgraph * true_graph , <nl> - HSubgraph * false_graph ) ; <nl> <nl> HValue * Top ( ) const { return environment ( ) - > Top ( ) ; } <nl> void Drop ( int n ) { environment ( ) - > Drop ( n ) ; } <nl> class HGraphBuilder : public AstVisitor { <nl> void VisitForEffect ( Expression * expr ) ; <nl> void VisitForControl ( Expression * expr , <nl> HBasicBlock * true_block , <nl> - HBasicBlock * false_block , <nl> - bool invert_true , <nl> - bool invert_false ) ; <nl> - <nl> - / / Visit an expression in a ' condition ' context , i . e . , in a control <nl> - / / context but not a subexpression of logical and , or , or not . <nl> - void VisitCondition ( Expression * expr , <nl> - HBasicBlock * true_graph , <nl> - HBasicBlock * false_graph , <nl> - bool invert_true , <nl> - bool invert_false ) ; <nl> + HBasicBlock * false_block ) ; <nl> + <nl> / / Visit an argument and wrap it in a PushArgument instruction . <nl> HValue * VisitArgument ( Expression * expr ) ; <nl> void VisitArgumentList ( ZoneList < Expression * > * arguments ) ; <nl> mmm a / src / ia32 / lithium - ia32 . cc <nl> ppp b / src / ia32 / lithium - ia32 . cc <nl> LOperand * LChunkBuilder : : FixedTemp ( XMMRegister reg ) { <nl> <nl> <nl> LInstruction * LChunkBuilder : : DoBlockEntry ( HBlockEntry * instr ) { <nl> - HBasicBlock * deopt_predecessor = instr - > block ( ) - > deopt_predecessor ( ) ; <nl> - if ( deopt_predecessor ! = NULL & & <nl> - deopt_predecessor - > inverted ( ) ) { <nl> - HEnvironment * env = current_block_ - > last_environment ( ) ; <nl> - HValue * value = env - > Pop ( ) ; <nl> - ASSERT ( value - > IsConstant ( ) ) ; <nl> - Handle < Object > obj = HConstant : : cast ( value ) - > handle ( ) ; <nl> - ASSERT ( * obj = = * Factory : : true_value ( ) | | * obj = = * Factory : : false_value ( ) ) ; <nl> - env - > Push ( * obj = = * Factory : : true_value ( ) <nl> - ? current_block_ - > graph ( ) - > GetConstantFalse ( ) <nl> - : current_block_ - > graph ( ) - > GetConstantTrue ( ) ) ; <nl> - } <nl> - <nl> return new LLabel ( instr - > block ( ) ) ; <nl> } <nl> <nl>
Fix issue 977 , occasional failure of the DeltaBlue benchmark .
v8/v8
4a6ef3ee281e6c5f72133f30be2a6494dceb5d75
2010-12-16T13:13:36Z
mmm a / lib / FrontendTool / FrontendTool . cpp <nl> ppp b / lib / FrontendTool / FrontendTool . cpp <nl> createOptRecordFile ( StringRef Filename , DiagnosticEngine & DE ) { <nl> <nl> struct PostSILGenInputs { <nl> std : : unique_ptr < SILModule > TheSILModule ; <nl> - bool astGuaranteedToCorrespondToSIL ; <nl> + bool ASTGuaranteedToCorrespondToSIL ; <nl> ModuleOrSourceFile ModuleOrPrimarySourceFile ; <nl> } ; <nl> <nl> static bool performCompile ( CompilerInstance & Instance , <nl> PSGIs . pop_front ( ) ; <nl> if ( performCompileStepsPostSILGen ( Instance , Invocation , <nl> std : : move ( PSGI . TheSILModule ) , <nl> - PSGI . astGuaranteedToCorrespondToSIL , <nl> + PSGI . ASTGuaranteedToCorrespondToSIL , <nl> PSGI . ModuleOrPrimarySourceFile , <nl> moduleIsPublic , <nl> ReturnValue , observer , Stats ) ) <nl>
Capitalized astGuaranteedToCorrespondToSIL .
apple/swift
6196ad74603d90aab76c643398a0512f078f5d2f
2018-01-17T20:21:00Z
mmm a / hphp / hack / src / options / globalOptions . ml <nl> ppp b / hphp / hack / src / options / globalOptions . ml <nl> type t = { <nl> tco_remote_worker_key : string option ; <nl> tco_remote_check_id : string option ; <nl> tco_num_remote_workers : int ; <nl> + so_remote_version_specifier : string option ; <nl> so_remote_worker_eden_checkout_threshold : int ; <nl> so_naming_sqlite_path : string option ; <nl> tco_disallow_array_as_tuple : bool ; <nl> let default = { <nl> tco_remote_worker_key = None ; <nl> tco_remote_check_id = None ; <nl> tco_num_remote_workers = 4 ; <nl> + so_remote_version_specifier = None ; <nl> so_remote_worker_eden_checkout_threshold = 10000 ; <nl> so_naming_sqlite_path = None ; <nl> tco_disallow_array_as_tuple = false ; <nl> let make <nl> ? tco_remote_worker_key <nl> ? tco_remote_check_id <nl> ? ( tco_num_remote_workers = default . tco_num_remote_workers ) <nl> + ? so_remote_version_specifier <nl> ? ( so_remote_worker_eden_checkout_threshold = default . so_remote_worker_eden_checkout_threshold ) <nl> ? so_naming_sqlite_path <nl> ? ( tco_disallow_array_as_tuple = default . tco_disallow_array_as_tuple ) <nl> let make <nl> tco_remote_worker_key ; <nl> tco_remote_check_id ; <nl> tco_num_remote_workers ; <nl> + so_remote_version_specifier ; <nl> so_remote_worker_eden_checkout_threshold ; <nl> so_naming_sqlite_path ; <nl> tco_disallow_array_as_tuple ; <nl> let tco_remote_check_id t = <nl> t . tco_remote_check_id <nl> let tco_num_remote_workers t = <nl> t . tco_num_remote_workers <nl> + let so_remote_version_specifier t = <nl> + t . so_remote_version_specifier <nl> let so_remote_worker_eden_checkout_threshold t = <nl> t . so_remote_worker_eden_checkout_threshold <nl> let so_naming_sqlite_path t = <nl> mmm a / hphp / hack / src / options / globalOptions . mli <nl> ppp b / hphp / hack / src / options / globalOptions . mli <nl> type t = { <nl> ( * Dictates the number of remote type checking workers * ) <nl> tco_num_remote_workers : int ; <nl> <nl> + ( * The version specifier that is used to identify the remote worker package version to install * ) <nl> + so_remote_version_specifier : string option ; <nl> + <nl> ( * Above this threshold of files to check , the remote type checking worker will not use Eden * ) <nl> so_remote_worker_eden_checkout_threshold : int ; <nl> <nl> val make : <nl> ? tco_remote_worker_key : string - > <nl> ? tco_remote_check_id : string - > <nl> ? tco_num_remote_workers : int - > <nl> + ? so_remote_version_specifier : string - > <nl> ? so_remote_worker_eden_checkout_threshold : int - > <nl> ? so_naming_sqlite_path : string - > <nl> ? tco_disallow_array_as_tuple : bool - > <nl> val tco_remote_type_check : t - > bool <nl> val tco_remote_worker_key : t - > string option <nl> val tco_remote_check_id : t - > string option <nl> val tco_num_remote_workers : t - > int <nl> + val so_remote_version_specifier : t - > string option <nl> val so_remote_worker_eden_checkout_threshold : t - > int <nl> val so_naming_sqlite_path : t - > string option <nl> val tco_disallow_array_as_tuple : t - > bool <nl> mmm a / hphp / hack / src / options / typecheckerOptions . ml <nl> ppp b / hphp / hack / src / options / typecheckerOptions . ml <nl> let remote_type_check = GlobalOptions . tco_remote_type_check <nl> let remote_worker_key = GlobalOptions . tco_remote_worker_key <nl> let remote_check_id = GlobalOptions . tco_remote_check_id <nl> let num_remote_workers = GlobalOptions . tco_num_remote_workers <nl> + let remote_version_specifier = GlobalOptions . so_remote_version_specifier <nl> let disallow_array_typehint = GlobalOptions . tco_disallow_array_typehint <nl> let disallow_array_literal = GlobalOptions . tco_disallow_array_literal <nl> let language_feature_logging = GlobalOptions . tco_language_feature_logging <nl> mmm a / hphp / hack / src / server / serverConfig . ml <nl> ppp b / hphp / hack / src / server / serverConfig . ml <nl> let load config_filename options = <nl> ? tco_remote_worker_key : ( local_config . ServerLocalConfig . remote_worker_key ) <nl> ? tco_remote_check_id : ( local_config . ServerLocalConfig . remote_check_id ) <nl> ? tco_num_remote_workers : ( Some local_config . ServerLocalConfig . num_remote_workers ) <nl> + ? so_remote_version_specifier : ( local_config . ServerLocalConfig . remote_version_specifier ) <nl> ? so_remote_worker_eden_checkout_threshold : ( int_opt " remote_worker_eden_checkout_threshold " config ) <nl> ? so_naming_sqlite_path : ( local_config . ServerLocalConfig . naming_sqlite_path ) <nl> ? tco_language_feature_logging : ( bool_opt " language_feature_logging " config ) <nl> mmm a / hphp / hack / src / server / serverLocalConfig . ml <nl> ppp b / hphp / hack / src / server / serverLocalConfig . ml <nl> type t = { <nl> remote_worker_eden_checkout_threshold : int ; <nl> ( * Dictates the number of remote type checking workers * ) <nl> num_remote_workers : int ; <nl> + ( * The version of the package the remote worker is to install * ) <nl> + remote_version_specifier : string option ; <nl> ( * Enables the reverse naming table to fall back to SQLite for queries . * ) <nl> naming_sqlite_path : string option ; <nl> enable_naming_table_fallback : bool ; <nl> let default = { <nl> remote_worker_key = None ; <nl> remote_check_id = None ; <nl> num_remote_workers = 4 ; <nl> + remote_version_specifier = None ; <nl> remote_worker_eden_checkout_threshold = 10000 ; <nl> naming_sqlite_path = None ; <nl> enable_naming_table_fallback = false ; <nl> let load_ fn ~ silent ~ current_version overrides = <nl> string_opt " remote_check_id " config in <nl> let num_remote_workers = int_ " num_remote_workers " <nl> ~ default : default . num_remote_workers config in <nl> + let remote_version_specifier = string_opt " remote_version_specifier " config in <nl> let remote_worker_eden_checkout_threshold = int_ <nl> " remote_worker_eden_checkout_threshold " <nl> ~ default : default . remote_worker_eden_checkout_threshold <nl> let load_ fn ~ silent ~ current_version overrides = <nl> remote_worker_key ; <nl> remote_check_id ; <nl> num_remote_workers ; <nl> + remote_version_specifier ; <nl> naming_sqlite_path ; <nl> enable_naming_table_fallback ; <nl> symbolindex_search_provider ; <nl>
Version specifier config option
facebook/hhvm
e74f30d4a6fa198a7c5177bce8f8c4babd5a768a
2019-08-12T03:00:07Z
mmm a / spec / api - menu - spec . js <nl> ppp b / spec / api - menu - spec . js <nl> describe ( ' Menu module ' , ( ) = > { <nl> } ) <nl> <nl> it ( ' returns immediately ' , ( ) = > { <nl> - const { browserWindow , x , y } = menu . popup ( w , { x : 100 , y : 100 } ) <nl> + const { browserWindow , x , y } = menu . popup ( w , { x : 100 , y : 101 } ) <nl> <nl> assert . equal ( browserWindow , w ) <nl> assert . equal ( x , 100 ) <nl> - assert . equal ( y , 100 ) <nl> + assert . equal ( y , 101 ) <nl> <nl> menu . closePopup ( w ) <nl> } ) <nl> <nl> it ( ' works without a given BrowserWindow and options ' , ( ) = > { <nl> - const { browserWindow , x , y } = menu . popup ( { x : 100 , y : 100 } ) <nl> + const { browserWindow , x , y } = menu . popup ( { x : 100 , y : 101 } ) <nl> <nl> assert . equal ( browserWindow . constructor . name , ' BrowserWindow ' ) <nl> assert . equal ( x , 100 ) <nl> - assert . equal ( y , 100 ) <nl> + assert . equal ( y , 101 ) <nl> <nl> menu . closePopup ( ) <nl> } ) <nl> <nl> it ( ' works without a given BrowserWindow ' , ( ) = > { <nl> - const { browserWindow , x , y } = menu . popup ( 100 , 100 ) <nl> + const { browserWindow , x , y } = menu . popup ( 100 , 101 ) <nl> <nl> assert . equal ( browserWindow . constructor . name , ' BrowserWindow ' ) <nl> assert . equal ( x , 100 ) <nl> - assert . equal ( y , 100 ) <nl> + assert . equal ( y , 101 ) <nl> <nl> menu . closePopup ( ) <nl> } ) <nl> <nl> it ( ' works with a given BrowserWindow and no options ' , ( ) = > { <nl> - const { browserWindow , x , y } = menu . popup ( w , 100 , 100 ) <nl> + const { browserWindow , x , y } = menu . popup ( w , 100 , 101 ) <nl> <nl> assert . equal ( browserWindow , w ) <nl> assert . equal ( x , 100 ) <nl> - assert . equal ( y , 100 ) <nl> + assert . equal ( y , 101 ) <nl> <nl> menu . closePopup ( w ) <nl> } ) <nl>
: construction_worker : Properly test x vs y
electron/electron
f7ebfff8ae39f74e6a88fbf5586778099d3b279e
2017-12-11T22:05:07Z
mmm a / jstests / replsets / recover_prepared_transactions_startup_secondary_application . js <nl> ppp b / jstests / replsets / recover_prepared_transactions_startup_secondary_application . js <nl> <nl> * up can then successfully apply commit or abort transaction oplog entries during secondary oplog <nl> * application . <nl> * <nl> - * @ tags : [ uses_transactions , uses_prepare_transaction ] <nl> + * @ tags : [ requires_persistence , uses_transactions , uses_prepare_transaction ] <nl> * / <nl> <nl> ( function ( ) { <nl> <nl> assert . eq ( secondaryTestColl . findOne ( { _id : 1 } ) , { _id : 1 , a : 3 } ) ; <nl> <nl> replTest . stopSet ( ) ; <nl> - } ( ) ) ; <nl> \ No newline at end of file <nl> + } ( ) ) ; <nl>
SERVER - 38165 add the ' requires_persistence ' tag to recover_prepared_transactions_startup_secondary_application . js so it does not run on the inMemory storage engine
mongodb/mongo
8fea1553c0ddc6bf632d6d9e951a510892d6f1c5
2019-04-22T19:28:24Z
mmm a / tensorflow / contrib / boosted_trees / lib / quantiles / weighted_quantiles_summary . h <nl> ppp b / tensorflow / contrib / boosted_trees / lib / quantiles / weighted_quantiles_summary . h <nl> <nl> # ifndef THIRD_PARTY_TENSORFLOW_CONTRIB_BOOSTED_TREES_LIB_QUANTILES_WEIGHTED_QUANTILES_SUMMARY_H_ <nl> # define THIRD_PARTY_TENSORFLOW_CONTRIB_BOOSTED_TREES_LIB_QUANTILES_WEIGHTED_QUANTILES_SUMMARY_H_ <nl> <nl> + # include < cstring > <nl> # include < vector > <nl> <nl> # include " tensorflow / contrib / boosted_trees / lib / quantiles / weighted_quantiles_buffer . h " <nl> class WeightedQuantilesSummary { <nl> <nl> struct SummaryEntry { <nl> SummaryEntry ( const ValueType & v , const WeightType & w , const WeightType & min , <nl> - const WeightType & max ) <nl> - : value ( v ) , weight ( w ) , min_rank ( min ) , max_rank ( max ) { } <nl> + const WeightType & max ) { <nl> + / / Explicitely initialize all of memory ( including padding from memory <nl> + / / alignment ) to allow the struct to be msan - resistant " plain old data " . <nl> + / / <nl> + / / POD = http : / / en . cppreference . com / w / cpp / concept / PODType <nl> + memset ( this , 0 , sizeof ( * this ) ) ; <nl> + <nl> + value = v ; <nl> + weight = w ; <nl> + min_rank = min ; <nl> + max_rank = max ; <nl> + } <nl> + <nl> + SummaryEntry ( ) { <nl> + memset ( this , 0 , sizeof ( * this ) ) ; <nl> <nl> - SummaryEntry ( ) : value ( 0 ) , weight ( 0 ) , min_rank ( 0 ) , max_rank ( 0 ) { } <nl> + value = 0 ; <nl> + weight = 0 ; <nl> + min_rank = 0 ; <nl> + max_rank = 0 ; <nl> + } <nl> <nl> bool operator = = ( const SummaryEntry & other ) const { <nl> return value = = other . value & & weight = = other . weight & & <nl>
Make SummaryEntry a msan - resistant plain - old - data ( something that can be safely memcpy ' d ) .
tensorflow/tensorflow
6523d8303c4df74cca1d914d4d5d4c126292b019
2017-08-31T04:20:50Z
mmm a / src / compiler . cc <nl> ppp b / src / compiler . cc <nl> CompilationInfo : : CompilationInfo ( ParseInfo * parse_info , CodeStub * code_stub , <nl> : nullptr ) , <nl> track_positions_ ( FLAG_hydrogen_track_positions | | <nl> isolate - > cpu_profiler ( ) - > is_profiling ( ) ) , <nl> + inlined_functions_ ( zone ) , <nl> opt_count_ ( has_shared_info ( ) ? shared_info ( ) - > opt_count ( ) : 0 ) , <nl> parameter_count_ ( 0 ) , <nl> optimization_id_ ( - 1 ) , <nl> mmm a / src / compiler . h <nl> ppp b / src / compiler . h <nl> class CompilationInfo { <nl> <nl> Handle < Code > GenerateCodeStub ( ) ; <nl> <nl> + typedef ZoneVector < Handle < SharedFunctionInfo > > InlinedFunctionList ; <nl> + InlinedFunctionList const & inlined_functions ( ) const { <nl> + return inlined_functions_ ; <nl> + } <nl> + void AddInlinedFunction ( Handle < SharedFunctionInfo > inlined_function ) { <nl> + inlined_functions_ . push_back ( inlined_function ) ; <nl> + } <nl> + <nl> protected : <nl> ParseInfo * parse_info_ ; <nl> <nl> class CompilationInfo { <nl> std : : vector < InlinedFunctionInfo > inlined_function_infos_ ; <nl> bool track_positions_ ; <nl> <nl> + InlinedFunctionList inlined_functions_ ; <nl> + <nl> / / A copy of shared_info ( ) - > opt_count ( ) to avoid handle deref <nl> / / during graph optimization . <nl> int opt_count_ ; <nl> mmm a / src / compiler / code - generator . cc <nl> ppp b / src / compiler / code - generator . cc <nl> Handle < Code > CodeGenerator : : GenerateCode ( ) { <nl> <nl> / / Define deoptimization literals for all inlined functions . <nl> DCHECK_EQ ( 0u , deoptimization_literals_ . size ( ) ) ; <nl> - for ( auto frame_state_descriptor : code ( ) - > frame_state_descriptors ( ) ) { <nl> - Handle < SharedFunctionInfo > shared_info ; <nl> - if ( frame_state_descriptor - > shared_info ( ) . ToHandle ( & shared_info ) & & <nl> - ! shared_info . is_identical_to ( info - > shared_info ( ) ) ) { <nl> + for ( auto shared_info : info - > inlined_functions ( ) ) { <nl> + if ( ! shared_info . is_identical_to ( info - > shared_info ( ) ) ) { <nl> DefineDeoptimizationLiteral ( shared_info ) ; <nl> } <nl> } <nl> mmm a / src / compiler / js - inlining . cc <nl> ppp b / src / compiler / js - inlining . cc <nl> Reduction JSInliner : : Reduce ( Node * node ) { <nl> info . zone ( ) ) ; <nl> } <nl> <nl> + / / Remember that we inlined this function . <nl> + info_ - > AddInlinedFunction ( info . shared_info ( ) ) ; <nl> + <nl> return InlineCall ( node , frame_state , start , end ) ; <nl> } <nl> <nl>
[ turbofan ] Record the SharedFunctionInfo of ALL inlined functions .
v8/v8
ffa0b4007cd7de0cfd6d37079ef360e3beeb5686
2015-06-11T04:34:30Z
mmm a / src / btree / backfill . hpp <nl> ppp b / src / btree / backfill . hpp <nl> class agnostic_backfill_callback_t { <nl> virtual void on_delete_range ( const key_range_t & range , signal_t * interruptor ) THROWS_ONLY ( interrupted_exc_t ) = 0 ; <nl> virtual void on_deletion ( const btree_key_t * key , repli_timestamp_t recency , signal_t * interruptor ) THROWS_ONLY ( interrupted_exc_t ) = 0 ; <nl> # if SLICE_ALT <nl> - virtual void on_pair ( alt : : alt_buf_parent_t parent , repli_timestamp_t recency , <nl> + virtual void on_pair ( alt : : alt_buf_parent_t leaf_node , repli_timestamp_t recency , <nl> const btree_key_t * key , const void * value , <nl> signal_t * interruptor ) THROWS_ONLY ( interrupted_exc_t ) = 0 ; <nl> # else <nl> mmm a / src / btree / btree_store . cc <nl> ppp b / src / btree / btree_store . cc <nl> void btree_store_t < protocol_t > : : reset_data ( <nl> interruptor ) ; <nl> } <nl> <nl> + # if SLICE_ALT <nl> + template < class protocol_t > <nl> + void btree_store_t < protocol_t > : : lock_sindex_queue ( alt_buf_lock_t * sindex_block , <nl> + mutex_t : : acq_t * acq ) { <nl> + # else <nl> template < class protocol_t > <nl> void btree_store_t < protocol_t > : : lock_sindex_queue ( buf_lock_t * sindex_block , mutex_t : : acq_t * acq ) { <nl> + # endif <nl> assert_thread ( ) ; <nl> + # if SLICE_ALT <nl> + / / RSI : WTF should we do here ? Why is there a mutex ? <nl> + <nl> + / / RSI : Do we really need to wait for write acquisition ? <nl> + <nl> + / / RSI : Should we be able to " get in line " for the mutex and release the sindex <nl> + / / block or something ? <nl> + guarantee ( ! sindex_block - > empty ( ) ) ; <nl> + sindex_block - > write_acq_signal ( ) - > wait ( ) ; <nl> + # else <nl> guarantee ( sindex_block - > is_acquired ( ) ) ; <nl> + # endif <nl> acq - > reset ( & sindex_queue_mutex ) ; <nl> } <nl> <nl> mmm a / src / btree / btree_store . hpp <nl> ppp b / src / btree / btree_store . hpp <nl> class btree_store_t : public store_view_t < protocol_t > { <nl> signal_t * interruptor ) <nl> THROWS_ONLY ( interrupted_exc_t ) ; <nl> <nl> + # if SLICE_ALT <nl> + void lock_sindex_queue ( alt : : alt_buf_lock_t * sindex_block , mutex_t : : acq_t * acq ) ; <nl> + # else <nl> void lock_sindex_queue ( buf_lock_t * sindex_block , mutex_t : : acq_t * acq ) ; <nl> + # endif <nl> <nl> void register_sindex_queue ( <nl> internal_disk_backed_queue_t * disk_backed_queue , <nl> class btree_store_t : public store_view_t < protocol_t > { <nl> sindex_access_vector_t * sindex_sbs_out ) <nl> THROWS_ONLY ( sindex_not_post_constructed_exc_t ) ; <nl> <nl> + / / RSI : Rename to acquire , not aquire . <nl> # if SLICE_ALT <nl> void aquire_post_constructed_sindex_superblocks_for_write ( <nl> block_id_t sindex_block_id , <nl> mmm a / src / btree / depth_first_traversal . hpp <nl> ppp b / src / btree / depth_first_traversal . hpp <nl> class scoped_key_value_t { <nl> guarantee ( buf_ . has ( ) ) ; <nl> return value_ ; <nl> } <nl> + # if SLICE_ALT <nl> + alt : : alt_buf_parent_t expose_buf ( ) { <nl> + guarantee ( buf_ . has ( ) ) ; <nl> + return alt : : alt_buf_parent_t ( buf_ . get ( ) ) ; <nl> + } <nl> + # endif <nl> <nl> - / / Releases the hold on the buf_lock_t , after which key ( ) and value ( ) may not be <nl> - / / used . <nl> + / / Releases the hold on the buf_lock_t , after which key ( ) , value ( ) , and <nl> + / / expose_buf ( ) may not be used . <nl> void reset ( ) { buf_ . reset ( ) ; } <nl> <nl> private : <nl> mmm a / src / btree / operations . cc <nl> ppp b / src / btree / operations . cc <nl> void set_superblock_metainfo ( transaction_t * txn , buf_lock_t * superblock , const s <nl> } <nl> } <nl> <nl> + # if SLICE_ALT <nl> + void delete_superblock_metainfo ( alt_buf_lock_t * superblock , <nl> + const std : : vector < char > & key ) { <nl> + # else <nl> void delete_superblock_metainfo ( transaction_t * txn , buf_lock_t * superblock , const std : : vector < char > & key ) { <nl> + # endif <nl> + # if SLICE_ALT <nl> + alt_buf_write_t write ( superblock ) ; <nl> + btree_superblock_t * const data <nl> + = static_cast < btree_superblock_t * > ( write . get_data_write ( ) ) ; <nl> + # else <nl> btree_superblock_t * data = static_cast < btree_superblock_t * > ( superblock - > get_data_write ( ) ) ; <nl> + # endif <nl> <nl> + # if SLICE_ALT <nl> + alt : : blob_t blob ( superblock - > cache ( ) - > get_block_size ( ) , <nl> + data - > metainfo_blob , btree_superblock_t : : METAINFO_BLOB_MAXREFLEN ) ; <nl> + # else <nl> blob_t blob ( txn - > get_cache ( ) - > get_block_size ( ) , <nl> data - > metainfo_blob , btree_superblock_t : : METAINFO_BLOB_MAXREFLEN ) ; <nl> + # endif <nl> <nl> std : : vector < char > metainfo ; <nl> <nl> { <nl> + # if SLICE_ALT <nl> + alt : : blob_acq_t acq ; <nl> + # else <nl> blob_acq_t acq ; <nl> + # endif <nl> buffer_group_t group ; <nl> + # if SLICE_ALT <nl> + blob . expose_all ( alt_buf_parent_t ( superblock ) , alt_access_t : : read , <nl> + & group , & acq ) ; <nl> + # else <nl> blob . expose_all ( txn , rwi_read , & group , & acq ) ; <nl> + # endif <nl> <nl> int64_t group_size = group . get_size ( ) ; <nl> metainfo . resize ( group_size ) ; <nl> void delete_superblock_metainfo ( transaction_t * txn , buf_lock_t * superblock , cons <nl> buffer_group_copy_data ( & group_cpy , const_view ( & group ) ) ; <nl> } <nl> <nl> + # if SLICE_ALT <nl> + blob . clear ( alt_buf_parent_t ( superblock ) ) ; <nl> + # else <nl> blob . clear ( txn ) ; <nl> + # endif <nl> <nl> uint32_t * size ; <nl> char * verybeg , * info_begin , * info_end ; <nl> void delete_superblock_metainfo ( transaction_t * txn , buf_lock_t * superblock , cons <nl> std : : vector < char > : : iterator q = metainfo . begin ( ) + ( info_end - metainfo . data ( ) ) ; <nl> metainfo . erase ( p , q ) ; <nl> <nl> + # if SLICE_ALT <nl> + blob . append_region ( alt_buf_parent_t ( superblock ) , metainfo . size ( ) ) ; <nl> + # else <nl> blob . append_region ( txn , metainfo . size ( ) ) ; <nl> + # endif <nl> <nl> { <nl> + # if SLICE_ALT <nl> + alt : : blob_acq_t acq ; <nl> + # else <nl> blob_acq_t acq ; <nl> + # endif <nl> buffer_group_t write_group ; <nl> + # if SLICE_ALT <nl> + blob . expose_all ( alt_buf_parent_t ( superblock ) , alt_access_t : : write , <nl> + & write_group , & acq ) ; <nl> + # else <nl> blob . expose_all ( txn , rwi_write , & write_group , & acq ) ; <nl> + # endif <nl> <nl> buffer_group_t group_cpy ; <nl> group_cpy . add_buffer ( metainfo . size ( ) , metainfo . data ( ) ) ; <nl> mmm a / src / btree / operations . hpp <nl> ppp b / src / btree / operations . hpp <nl> void get_superblock_metainfo ( transaction_t * txn , buf_lock_t * superblock , std : : ve <nl> void set_superblock_metainfo ( alt : : alt_buf_lock_t * superblock , <nl> const std : : vector < char > & key , <nl> const std : : vector < char > & value ) ; <nl> - # endif <nl> + # else <nl> void set_superblock_metainfo ( transaction_t * txn , buf_lock_t * superblock , const std : : vector < char > & key , const std : : vector < char > & value ) ; <nl> + # endif <nl> <nl> + # if SLICE_ALT <nl> + void delete_superblock_metainfo ( alt : : alt_buf_lock_t * superblock , <nl> + const std : : vector < char > & key ) ; <nl> + # else <nl> void delete_superblock_metainfo ( transaction_t * txn , buf_lock_t * superblock , const std : : vector < char > & key ) ; <nl> + # endif <nl> # if SLICE_ALT <nl> void clear_superblock_metainfo ( alt : : alt_buf_lock_t * superblock ) ; <nl> # else <nl> mmm a / src / buffer_cache / alt / alt . cc <nl> ppp b / src / buffer_cache / alt / alt . cc <nl> alt_buf_lock_t : : alt_buf_lock_t ( ) <nl> alt_buf_lock_t : : alt_buf_lock_t ( alt_buf_parent_t parent , <nl> block_id_t block_id , <nl> alt_access_t access ) <nl> - : txn_ ( parent . txn_ ) , <nl> + : txn_ ( parent . txn ( ) ) , <nl> cache_ ( txn_ - > cache ( ) ) , <nl> current_page_acq_ ( ) , <nl> snapshot_node_ ( NULL ) { <nl> alt_buf_lock_t : : alt_buf_lock_t ( alt_buf_lock_t * parent , <nl> <nl> alt_buf_lock_t : : alt_buf_lock_t ( alt_buf_parent_t parent , <nl> alt_create_t create ) <nl> - : txn_ ( parent . txn_ ) , <nl> + : txn_ ( parent . txn ( ) ) , <nl> cache_ ( txn_ - > cache ( ) ) , <nl> current_page_acq_ ( ) , <nl> snapshot_node_ ( NULL ) { <nl> mmm a / src / buffer_cache / alt / alt . hpp <nl> ppp b / src / buffer_cache / alt / alt . hpp <nl> class alt_buf_lock_t { <nl> <nl> class alt_buf_parent_t { <nl> public : <nl> + alt_buf_parent_t ( ) : txn_ ( NULL ) , lock_or_null_ ( NULL ) { } <nl> + <nl> explicit alt_buf_parent_t ( alt_buf_lock_t * lock ) <nl> : txn_ ( lock - > txn ( ) ) , lock_or_null_ ( lock ) { <nl> guarantee ( lock ! = NULL ) ; <nl> guarantee ( ! lock - > empty ( ) ) ; <nl> } <nl> + / / RSI : Replace this constructor with a create_dangerously static method . <nl> explicit alt_buf_parent_t ( alt_txn_t * txn ) <nl> : txn_ ( txn ) , lock_or_null_ ( NULL ) { } <nl> <nl> - alt_txn_t * txn ( ) const { return txn_ ; } <nl> - alt_cache_t * cache ( ) const { return txn_ - > cache ( ) ; } <nl> + alt_txn_t * txn ( ) const { <nl> + guarantee ( txn_ ! = NULL ) ; <nl> + return txn_ ; <nl> + } <nl> + alt_cache_t * cache ( ) const { <nl> + guarantee ( txn_ ! = NULL ) ; <nl> + return txn_ - > cache ( ) ; <nl> + } <nl> <nl> private : <nl> friend class alt_buf_lock_t ; <nl> mmm a / src / rdb_protocol / blob_wrapper . cc <nl> ppp b / src / rdb_protocol / blob_wrapper . cc <nl> <nl> / / Copyright 2010 - 2013 RethinkDB , all rights reserved . <nl> # include " rdb_protocol / blob_wrapper . hpp " <nl> <nl> - rdb_blob_wrapper_t : : rdb_blob_wrapper_t ( block_size_t blk_size , char * ref , int maxreflen ) <nl> - : internal ( blk_size , ref , maxreflen ) { } <nl> + # if SLICE_ALT <nl> + using namespace alt ; / / RSI <nl> + # endif <nl> + <nl> + rdb_blob_wrapper_t : : rdb_blob_wrapper_t ( block_size_t block_size , char * ref , <nl> + int maxreflen ) <nl> + : internal ( block_size , ref , maxreflen ) { } <nl> <nl> + # if SLICE_ALT <nl> + rdb_blob_wrapper_t : : rdb_blob_wrapper_t ( <nl> + block_size_t block_size , char * ref , int maxreflen , <nl> + alt_buf_parent_t parent , const std : : string & data ) <nl> + : internal ( block_size , ref , maxreflen ) <nl> + # else <nl> rdb_blob_wrapper_t : : rdb_blob_wrapper_t ( <nl> - block_size_t blk_size , char * ref , int maxreflen , <nl> - transaction_t * txn , const std : : string & data ) <nl> - : internal ( blk_size , ref , maxreflen ) <nl> + block_size_t block_size , char * ref , int maxreflen , <nl> + transaction_t * txn , const std : : string & data ) <nl> + : internal ( block_size , ref , maxreflen ) <nl> + # endif <nl> { <nl> # ifndef NDEBUG <nl> / * This is to check that this is actually a new blob that no one else could <nl> rdb_blob_wrapper_t : : rdb_blob_wrapper_t ( <nl> rassert ( * it = = 0 ) ; <nl> } <nl> # endif <nl> + # if SLICE_ALT <nl> + internal . append_region ( parent , data . size ( ) ) ; <nl> + internal . write_from_string ( data , parent , 0 ) ; <nl> + # else <nl> internal . append_region ( txn , data . size ( ) ) ; <nl> internal . write_from_string ( data , txn , 0 ) ; <nl> + # endif <nl> } <nl> <nl> int rdb_blob_wrapper_t : : refsize ( block_size_t block_size ) const { <nl> int64_t rdb_blob_wrapper_t : : valuesize ( ) const { <nl> return internal . valuesize ( ) ; <nl> } <nl> <nl> + # if SLICE_ALT <nl> + void rdb_blob_wrapper_t : : expose_all ( <nl> + alt_buf_parent_t parent , alt_access_t mode , <nl> + buffer_group_t * buffer_group_out , <nl> + alt : : blob_acq_t * acq_group_out ) { <nl> + # else <nl> void rdb_blob_wrapper_t : : expose_all ( <nl> - transaction_t * txn , access_t mode , <nl> - buffer_group_t * buffer_group_out , <nl> + transaction_t * txn , access_t mode , <nl> + buffer_group_t * buffer_group_out , <nl> blob_acq_t * acq_group_out ) { <nl> + # endif <nl> + # if SLICE_ALT <nl> + guarantee ( mode = = alt_access_t : : read , <nl> + " Other blocks might be referencing this blob , it ' s invalid to modify it in place . " ) ; <nl> + # else <nl> guarantee ( mode = = rwi_read , <nl> " Other blocks might be referencing this blob , it ' s invalid to modify it in place . " ) ; <nl> + # endif <nl> + # if SLICE_ALT <nl> + internal . expose_all ( parent , mode , buffer_group_out , acq_group_out ) ; <nl> + # else <nl> internal . expose_all ( txn , mode , buffer_group_out , acq_group_out ) ; <nl> + # endif <nl> } <nl> <nl> + # if SLICE_ALT <nl> + void rdb_blob_wrapper_t : : clear ( alt_buf_parent_t parent ) { <nl> + internal . clear ( parent ) ; <nl> + } <nl> + # else <nl> void rdb_blob_wrapper_t : : clear ( transaction_t * txn ) { <nl> internal . clear ( txn ) ; <nl> } <nl> + # endif <nl> mmm a / src / rdb_protocol / blob_wrapper . hpp <nl> ppp b / src / rdb_protocol / blob_wrapper . hpp <nl> <nl> # define RDB_PROTOCOL_BLOB_WRAPPER_HPP_ <nl> <nl> # include " btree / btree_store . hpp " <nl> + # include " btree / slice . hpp " / / RSI : for SLICE_ALT <nl> + # if SLICE_ALT <nl> + # include " buffer_cache / alt / alt_blob . hpp " <nl> + # endif <nl> # include " buffer_cache / blob . hpp " <nl> <nl> / * This class wraps a blob_t but hides some of its methods . We do this because <nl> class rdb_blob_wrapper_t { <nl> / * The allows you to write some data to the blob as well . This is because <nl> * the methods to do this are normally not present due to shared <nl> * references . * / <nl> + # if SLICE_ALT <nl> + rdb_blob_wrapper_t ( block_size_t block_size , char * ref , int maxreflen , <nl> + alt : : alt_buf_parent_t parent , const std : : string & data ) ; <nl> + # else <nl> rdb_blob_wrapper_t ( block_size_t block_size , char * ref , int maxreflen , <nl> transaction_t * txn , const std : : string & data ) ; <nl> + # endif <nl> <nl> int refsize ( block_size_t block_size ) const ; <nl> <nl> int64_t valuesize ( ) const ; <nl> <nl> / * This function only works in read mode . * / <nl> + # if SLICE_ALT <nl> + void expose_all ( alt : : alt_buf_parent_t parent , alt : : alt_access_t mode , <nl> + buffer_group_t * buffer_group_out , <nl> + alt : : blob_acq_t * acq_group_out ) ; <nl> + # else <nl> void expose_all ( transaction_t * txn , access_t mode , buffer_group_t * buffer_group_out , blob_acq_t * acq_group_out ) ; <nl> + # endif <nl> <nl> private : <nl> friend class rdb_value_deleter_t ; <nl> class rdb_blob_wrapper_t { <nl> * that you have to explicitly white list places where it occurs . If you ' re <nl> * not 100 % sure it ' s safe to call clear from a certain location don ' t <nl> * friend it . * / <nl> + # if SLICE_ALT <nl> + void clear ( alt : : alt_buf_parent_t parent ) ; <nl> + # else <nl> void clear ( transaction_t * txn ) ; <nl> + # endif <nl> <nl> + # if SLICE_ALT <nl> + alt : : blob_t internal ; <nl> + # else <nl> blob_t internal ; <nl> + # endif <nl> } ; <nl> <nl> # endif / / RDB_PROTOCOL_BLOB_WRAPPER_HPP_ <nl> mmm a / src / rdb_protocol / btree . cc <nl> ppp b / src / rdb_protocol / btree . cc <nl> <nl> # include " rdb_protocol / lazy_json . hpp " <nl> # include " rdb_protocol / transform_visitors . hpp " <nl> <nl> + # if SLICE_ALT <nl> + using namespace alt ; / / RSI <nl> + # endif <nl> + <nl> value_sizer_t < rdb_value_t > : : value_sizer_t ( block_size_t bs ) : block_size_ ( bs ) { } <nl> <nl> template < class Value > <nl> void find_keyvalue_location_for_write ( <nl> - const btree_loc_info_t & info , <nl> + const btree_loc_info_t & info , / / RSI : Remove txn from info ? <nl> keyvalue_location_t < Value > * kv_loc_out , <nl> profile : : trace_t * trace , <nl> promise_t < superblock_t * > * pass_back_superblock ) { <nl> + # if SLICE_ALT <nl> + find_keyvalue_location_for_write ( <nl> + info . superblock , info . key - > btree_key ( ) , kv_loc_out , <nl> + & info . btree - > slice - > stats , trace , pass_back_superblock ) ; <nl> + # else <nl> find_keyvalue_location_for_write ( <nl> info . btree - > txn , info . superblock , info . key - > btree_key ( ) , kv_loc_out , <nl> & info . btree - > slice - > root_eviction_priority , & info . btree - > slice - > stats , <nl> trace , pass_back_superblock ) ; <nl> + # endif <nl> } <nl> <nl> const rdb_value_t * value_sizer_t < rdb_value_t > : : as_rdb ( const void * p ) { <nl> bool value_sizer_t < rdb_value_t > : : fits ( const void * value , int length_available ) c <nl> } <nl> <nl> int value_sizer_t < rdb_value_t > : : max_possible_size ( ) const { <nl> + # if SLICE_ALT <nl> + return alt : : blob : : btree_maxreflen ; <nl> + # else <nl> return blob : : btree_maxreflen ; <nl> + # endif <nl> } <nl> <nl> block_magic_t value_sizer_t < rdb_value_t > : : leaf_magic ( ) { <nl> block_magic_t value_sizer_t < rdb_value_t > : : btree_leaf_magic ( ) const { <nl> block_size_t value_sizer_t < rdb_value_t > : : block_size ( ) const { return block_size_ ; } <nl> <nl> bool btree_value_fits ( block_size_t bs , int data_length , const rdb_value_t * value ) { <nl> + # if SLICE_ALT <nl> + return alt : : blob : : ref_fits ( bs , data_length , value - > value_ref ( ) , <nl> + alt : : blob : : btree_maxreflen ) ; <nl> + # else <nl> return blob : : ref_fits ( bs , data_length , value - > value_ref ( ) , blob : : btree_maxreflen ) ; <nl> + # endif <nl> } <nl> <nl> + # if SLICE_ALT <nl> + void rdb_get ( const store_key_t & store_key , btree_slice_t * slice , <nl> + superblock_t * superblock , point_read_response_t * response , <nl> + profile : : trace_t * trace ) { <nl> + # else <nl> void rdb_get ( const store_key_t & store_key , btree_slice_t * slice , transaction_t * txn , <nl> superblock_t * superblock , point_read_response_t * response , profile : : trace_t * trace ) { <nl> + # endif <nl> keyvalue_location_t < rdb_value_t > kv_location ; <nl> + # if SLICE_ALT <nl> + find_keyvalue_location_for_read ( superblock , store_key . btree_key ( ) , & kv_location , <nl> + & slice - > stats , trace ) ; <nl> + # else <nl> find_keyvalue_location_for_read ( txn , superblock , store_key . btree_key ( ) , & kv_location , <nl> slice - > root_eviction_priority , & slice - > stats , trace ) ; <nl> + # endif <nl> <nl> if ( ! kv_location . value . has ( ) ) { <nl> response - > data . reset ( new ql : : datum_t ( ql : : datum_t : : R_NULL ) ) ; <nl> } else { <nl> + # if SLICE_ALT <nl> + response - > data = get_data ( kv_location . value . get ( ) , <nl> + alt_buf_parent_t ( & kv_location . buf ) ) ; <nl> + # else <nl> response - > data = get_data ( kv_location . value . get ( ) , txn ) ; <nl> + # endif <nl> } <nl> } <nl> <nl> void kv_location_delete ( keyvalue_location_t < rdb_value_t > * kv_location , <nl> const store_key_t & key , <nl> + # if ! SLICE_ALT <nl> btree_slice_t * slice , <nl> + # endif <nl> repli_timestamp_t timestamp , <nl> + # if ! SLICE_ALT <nl> transaction_t * txn , <nl> + # endif <nl> rdb_modification_info_t * mod_info_out ) { <nl> guarantee ( kv_location - > value . has ( ) ) ; <nl> <nl> void kv_location_delete ( keyvalue_location_t < rdb_value_t > * kv_location , <nl> if ( mod_info_out ) { <nl> guarantee ( mod_info_out - > deleted . second . empty ( ) ) ; <nl> <nl> + # if SLICE_ALT <nl> + / / RSI : Prove that buf is valid . <nl> + block_size_t block_size = kv_location - > buf . cache ( ) - > get_block_size ( ) ; <nl> + # else <nl> block_size_t block_size = txn - > get_cache ( ) - > get_block_size ( ) ; <nl> + # endif <nl> mod_info_out - > deleted . second . assign ( kv_location - > value - > value_ref ( ) , <nl> kv_location - > value - > value_ref ( ) + <nl> kv_location - > value - > inline_size ( block_size ) ) ; <nl> void kv_location_delete ( keyvalue_location_t < rdb_value_t > * kv_location , <nl> <nl> kv_location - > value . reset ( ) ; <nl> null_key_modification_callback_t < rdb_value_t > null_cb ; <nl> + # if SLICE_ALT <nl> + apply_keyvalue_change ( kv_location , key . btree_key ( ) , timestamp , <nl> + false , & null_cb ) ; <nl> + # else <nl> apply_keyvalue_change ( txn , kv_location , key . btree_key ( ) , timestamp , <nl> false , & null_cb , & slice - > root_eviction_priority ) ; <nl> + # endif <nl> } <nl> <nl> void kv_location_set ( keyvalue_location_t < rdb_value_t > * kv_location , <nl> const store_key_t & key , <nl> counted_t < const ql : : datum_t > data , <nl> + # if ! SLICE_ALT <nl> btree_slice_t * slice , <nl> + # endif <nl> repli_timestamp_t timestamp , <nl> + # if ! SLICE_ALT <nl> transaction_t * txn , <nl> + # endif <nl> rdb_modification_info_t * mod_info_out ) { <nl> + # if SLICE_ALT <nl> + scoped_malloc_t < rdb_value_t > new_value ( alt : : blob : : btree_maxreflen ) ; <nl> + memset ( new_value . get ( ) , 0 , alt : : blob : : btree_maxreflen ) ; <nl> + # else <nl> scoped_malloc_t < rdb_value_t > new_value ( blob : : btree_maxreflen ) ; <nl> bzero ( new_value . get ( ) , blob : : btree_maxreflen ) ; <nl> + # endif <nl> <nl> / / TODO unnecessary copies they must go away . <nl> write_message_t wm ; <nl> void kv_location_set ( keyvalue_location_t < rdb_value_t > * kv_location , <nl> / / TODO more copies , good lord <nl> std : : string sered_data ( stream . vector ( ) . begin ( ) , stream . vector ( ) . end ( ) ) ; <nl> <nl> - rdb_blob_wrapper_t blob ( txn - > get_cache ( ) - > get_block_size ( ) , <nl> + # if SLICE_ALT <nl> + alt_cache_t * cache = kv_location - > buf . cache ( ) ; <nl> + const block_size_t block_size = cache - > get_block_size ( ) ; <nl> + rdb_blob_wrapper_t blob ( block_size , <nl> + new_value - > value_ref ( ) , alt : : blob : : btree_maxreflen , <nl> + alt_buf_parent_t ( & kv_location - > buf ) , sered_data ) ; <nl> + # else <nl> + const block_size_t block_size = txn - > get_cache ( ) - > get_block_size ( ) ; <nl> + rdb_blob_wrapper_t blob ( block_size , <nl> new_value - > value_ref ( ) , blob : : btree_maxreflen , <nl> txn , sered_data ) ; <nl> + # endif <nl> <nl> - block_size_t block_size = txn - > get_cache ( ) - > get_block_size ( ) ; <nl> if ( mod_info_out ) { <nl> guarantee ( mod_info_out - > added . second . empty ( ) ) ; <nl> mod_info_out - > added . second . assign ( new_value - > value_ref ( ) , <nl> void kv_location_set ( keyvalue_location_t < rdb_value_t > * kv_location , <nl> / / Actually update the leaf , if needed . <nl> kv_location - > value = std : : move ( new_value ) ; <nl> null_key_modification_callback_t < rdb_value_t > null_cb ; <nl> + # if SLICE_ALT <nl> + apply_keyvalue_change ( kv_location , key . btree_key ( ) , timestamp , <nl> + false , & null_cb ) ; <nl> + # else <nl> apply_keyvalue_change ( txn , kv_location , key . btree_key ( ) , timestamp , <nl> false , & null_cb , & slice - > root_eviction_priority ) ; <nl> + # endif <nl> / / ^ ^ ^ ^ ^ That means the key isn ' t expired . <nl> + / / RSI : replace ` false ` with an enum class <nl> } <nl> <nl> + # if SLICE_ALT <nl> + void kv_location_set ( keyvalue_location_t < rdb_value_t > * kv_location , <nl> + const store_key_t & key , <nl> + const std : : vector < char > & value_ref , <nl> + repli_timestamp_t timestamp ) { <nl> + # else <nl> void kv_location_set ( keyvalue_location_t < rdb_value_t > * kv_location , <nl> const store_key_t & key , <nl> const std : : vector < char > & value_ref , <nl> btree_slice_t * slice , <nl> repli_timestamp_t timestamp , <nl> transaction_t * txn ) { <nl> + # endif <nl> scoped_malloc_t < rdb_value_t > new_value ( <nl> value_ref . data ( ) , value_ref . data ( ) + value_ref . size ( ) ) ; <nl> <nl> / / Update the leaf , if needed . <nl> kv_location - > value = std : : move ( new_value ) ; <nl> null_key_modification_callback_t < rdb_value_t > null_cb ; <nl> + # if SLICE_ALT <nl> + apply_keyvalue_change ( kv_location , key . btree_key ( ) , timestamp , <nl> + false , & null_cb ) ; <nl> + # else <nl> apply_keyvalue_change ( txn , kv_location , key . btree_key ( ) , timestamp , <nl> false , & null_cb , & slice - > root_eviction_priority ) ; <nl> + # endif <nl> / / ^ ^ ^ ^ ^ That means the key isn ' t expired . <nl> } <nl> <nl> void kv_location_set ( keyvalue_location_t < rdb_value_t > * kv_location , <nl> const btree_loc_info_t & info , <nl> counted_t < const ql : : datum_t > data , <nl> rdb_modification_info_t * mod_info_out ) { <nl> + # if SLICE_ALT <nl> + kv_location_set ( kv_location , * info . key , data , <nl> + info . btree - > timestamp , mod_info_out ) ; <nl> + # else <nl> kv_location_set ( kv_location , * info . key , data , info . btree - > slice , <nl> info . btree - > timestamp , info . btree - > txn , mod_info_out ) ; <nl> + # endif <nl> } <nl> void kv_location_delete ( keyvalue_location_t < rdb_value_t > * kv_location , <nl> const btree_loc_info_t & info , <nl> rdb_modification_info_t * mod_info_out ) { <nl> + # if SLICE_ALT <nl> + / / RSI : Just pass timestamp , no ` info ` ? <nl> + kv_location_delete ( kv_location , * info . key , <nl> + info . btree - > timestamp , mod_info_out ) ; <nl> + # else <nl> kv_location_delete ( kv_location , * info . key , info . btree - > slice , <nl> info . btree - > timestamp , info . btree - > txn , mod_info_out ) ; <nl> + # endif <nl> } <nl> <nl> batched_replace_response_t rdb_replace_and_return_superblock ( <nl> batched_replace_response_t rdb_replace_and_return_superblock ( <nl> } else { <nl> / / Otherwise pass the entry with this key to the function . <nl> started_empty = false ; <nl> + # if SLICE_ALT <nl> + old_val = get_data ( kv_location . value . get ( ) , <nl> + alt_buf_parent_t ( & kv_location . buf ) ) ; <nl> + # else <nl> old_val = get_data ( kv_location . value . get ( ) , info . btree - > txn ) ; <nl> + # endif <nl> guarantee ( old_val - > get ( primary_key , ql : : NOTHROW ) . has ( ) ) ; <nl> } <nl> guarantee ( old_val . has ( ) ) ; <nl> void rdb_set ( const store_key_t & key , <nl> bool overwrite , <nl> btree_slice_t * slice , <nl> repli_timestamp_t timestamp , <nl> + # if ! SLICE_ALT <nl> transaction_t * txn , <nl> + # endif <nl> superblock_t * superblock , <nl> point_write_response_t * response_out , <nl> rdb_modification_info_t * mod_info , <nl> profile : : trace_t * trace ) { <nl> keyvalue_location_t < rdb_value_t > kv_location ; <nl> + # if SLICE_ALT <nl> + find_keyvalue_location_for_write ( superblock , key . btree_key ( ) , & kv_location , <nl> + & slice - > stats , trace ) ; <nl> + # else <nl> find_keyvalue_location_for_write ( txn , superblock , key . btree_key ( ) , & kv_location , <nl> & slice - > root_eviction_priority , & slice - > stats , trace ) ; <nl> + # endif <nl> const bool had_value = kv_location . value . has ( ) ; <nl> <nl> / * update the modification report * / <nl> if ( kv_location . value . has ( ) ) { <nl> + # if SLICE_ALT <nl> + mod_info - > deleted . first = get_data ( kv_location . value . get ( ) , <nl> + alt_buf_parent_t ( & kv_location . buf ) ) ; <nl> + # else <nl> mod_info - > deleted . first = get_data ( kv_location . value . get ( ) , txn ) ; <nl> + # endif <nl> } <nl> <nl> mod_info - > added . first = data ; <nl> <nl> if ( overwrite | | ! had_value ) { <nl> + # if SLICE_ALT <nl> + kv_location_set ( & kv_location , key , data , timestamp , mod_info ) ; <nl> + # else <nl> kv_location_set ( & kv_location , key , data , slice , timestamp , txn , mod_info ) ; <nl> + # endif <nl> guarantee ( mod_info - > deleted . second . empty ( ) = = ! had_value & & <nl> ! mod_info - > added . second . empty ( ) ) ; <nl> } <nl> class agnostic_rdb_backfill_callback_t : public agnostic_backfill_callback_t { <nl> cb_ - > on_deletion ( key , recency , interruptor ) ; <nl> } <nl> <nl> + # if SLICE_ALT <nl> + void on_pair ( alt_buf_parent_t leaf_node , repli_timestamp_t recency , <nl> + const btree_key_t * key , const void * val , <nl> + signal_t * interruptor ) THROWS_ONLY ( interrupted_exc_t ) { <nl> + # else <nl> void on_pair ( transaction_t * txn , repli_timestamp_t recency , const btree_key_t * key , const void * val , signal_t * interruptor ) THROWS_ONLY ( interrupted_exc_t ) { <nl> + # endif <nl> rassert ( kr_ . contains_key ( key - > contents , key - > size ) ) ; <nl> const rdb_value_t * value = static_cast < const rdb_value_t * > ( val ) ; <nl> <nl> rdb_protocol_details : : backfill_atom_t atom ; <nl> atom . key . assign ( key - > size , key - > contents ) ; <nl> + # if SLICE_ALT <nl> + atom . value = get_data ( value , leaf_node ) ; <nl> + # else <nl> atom . value = get_data ( value , txn ) ; <nl> + # endif <nl> atom . recency = recency ; <nl> cb_ - > on_keyvalue ( atom , interruptor ) ; <nl> } <nl> class agnostic_rdb_backfill_callback_t : public agnostic_backfill_callback_t { <nl> key_range_t kr_ ; <nl> } ; <nl> <nl> + # if SLICE_ALT <nl> + void rdb_backfill ( btree_slice_t * slice , const key_range_t & key_range , <nl> + repli_timestamp_t since_when , rdb_backfill_callback_t * callback , <nl> + superblock_t * superblock , <nl> + alt_buf_lock_t * sindex_block , <nl> + parallel_traversal_progress_t * p , signal_t * interruptor ) <nl> + THROWS_ONLY ( interrupted_exc_t ) { <nl> + # else <nl> void rdb_backfill ( btree_slice_t * slice , const key_range_t & key_range , <nl> repli_timestamp_t since_when , rdb_backfill_callback_t * callback , <nl> transaction_t * txn , superblock_t * superblock , <nl> buf_lock_t * sindex_block , <nl> parallel_traversal_progress_t * p , signal_t * interruptor ) <nl> THROWS_ONLY ( interrupted_exc_t ) { <nl> + # endif <nl> agnostic_rdb_backfill_callback_t agnostic_cb ( callback , key_range ) ; <nl> value_sizer_t < rdb_value_t > sizer ( slice - > cache ( ) - > get_block_size ( ) ) ; <nl> + # if SLICE_ALT <nl> + do_agnostic_btree_backfill ( & sizer , slice , key_range , since_when , & agnostic_cb , <nl> + superblock , sindex_block , p , interruptor ) ; <nl> + # else <nl> do_agnostic_btree_backfill ( & sizer , slice , key_range , since_when , & agnostic_cb , txn , superblock , sindex_block , p , interruptor ) ; <nl> + # endif <nl> } <nl> <nl> + # if SLICE_ALT <nl> + void rdb_delete ( const store_key_t & key , btree_slice_t * slice , <nl> + repli_timestamp_t timestamp , <nl> + superblock_t * superblock , point_delete_response_t * response , <nl> + rdb_modification_info_t * mod_info , <nl> + profile : : trace_t * trace ) { <nl> + # else <nl> void rdb_delete ( const store_key_t & key , btree_slice_t * slice , <nl> repli_timestamp_t timestamp , transaction_t * txn , <nl> superblock_t * superblock , point_delete_response_t * response , <nl> rdb_modification_info_t * mod_info , <nl> profile : : trace_t * trace ) { <nl> + # endif <nl> keyvalue_location_t < rdb_value_t > kv_location ; <nl> + # if SLICE_ALT <nl> + find_keyvalue_location_for_write ( superblock , key . btree_key ( ) , <nl> + & kv_location , & slice - > stats , trace ) ; <nl> + # else <nl> find_keyvalue_location_for_write ( txn , superblock , key . btree_key ( ) , <nl> & kv_location , & slice - > root_eviction_priority , & slice - > stats , trace ) ; <nl> + # endif <nl> bool exists = kv_location . value . has ( ) ; <nl> <nl> / * Update the modification report . * / <nl> if ( exists ) { <nl> + # if SLICE_ALT <nl> + mod_info - > deleted . first = get_data ( kv_location . value . get ( ) , <nl> + alt_buf_parent_t ( & kv_location . buf ) ) ; <nl> + # else <nl> mod_info - > deleted . first = get_data ( kv_location . value . get ( ) , txn ) ; <nl> + # endif <nl> } <nl> <nl> - if ( exists ) kv_location_delete ( & kv_location , key , slice , timestamp , txn , mod_info ) ; <nl> + / / RSI : We re - check exists condition , this be crazy . <nl> + <nl> + if ( exists ) { <nl> + # if SLICE_ALT <nl> + kv_location_delete ( & kv_location , key , timestamp , mod_info ) ; <nl> + # else <nl> + kv_location_delete ( & kv_location , key , slice , timestamp , txn , mod_info ) ; <nl> + # endif <nl> + } <nl> guarantee ( ! mod_info - > deleted . second . empty ( ) & & mod_info - > added . second . empty ( ) ) ; <nl> response - > result = ( exists ? point_delete_result_t : : DELETED : point_delete_result_t : : MISSING ) ; <nl> } <nl> <nl> - void rdb_value_deleter_t : : delete_value ( transaction_t * _txn , void * _value ) { <nl> + # if SLICE_ALT <nl> + void rdb_value_deleter_t : : delete_value ( alt_buf_parent_t parent , void * value ) { <nl> + # else <nl> + void rdb_value_deleter_t : : delete_value ( transaction_t * _txn , void * value ) { <nl> + # endif <nl> + # if SLICE_ALT <nl> + rdb_blob_wrapper_t blob ( parent . cache ( ) - > get_block_size ( ) , <nl> + static_cast < rdb_value_t * > ( value ) - > value_ref ( ) , <nl> + alt : : blob : : btree_maxreflen ) ; <nl> + blob . clear ( parent ) ; <nl> + # else <nl> rdb_blob_wrapper_t blob ( _txn - > get_cache ( ) - > get_block_size ( ) , <nl> - static_cast < rdb_value_t * > ( _value ) - > value_ref ( ) , blob : : btree_maxreflen ) ; <nl> + static_cast < rdb_value_t * > ( value ) - > value_ref ( ) , blob : : btree_maxreflen ) ; <nl> blob . clear ( _txn ) ; <nl> + # endif <nl> } <nl> <nl> + # if SLICE_ALT <nl> + void rdb_value_non_deleter_t : : delete_value ( alt_buf_parent_t , void * ) { } <nl> + # else <nl> void rdb_value_non_deleter_t : : delete_value ( transaction_t * , void * ) { } <nl> + # endif <nl> <nl> class sindex_key_range_tester_t : public key_tester_t { <nl> public : <nl> class sindex_key_range_tester_t : public key_tester_t { <nl> typedef btree_store_t < rdb_protocol_t > : : sindex_access_t sindex_access_t ; <nl> typedef btree_store_t < rdb_protocol_t > : : sindex_access_vector_t sindex_access_vector_t ; <nl> <nl> + # if SLICE_ALT <nl> + void sindex_erase_range ( const key_range_t & key_range , <nl> + const sindex_access_t * sindex_access , <nl> + auto_drainer_t : : lock_t , <nl> + signal_t * interruptor , bool release_superblock ) THROWS_NOTHING { <nl> + # else <nl> void sindex_erase_range ( const key_range_t & key_range , <nl> transaction_t * txn , const sindex_access_t * sindex_access , auto_drainer_t : : lock_t , <nl> signal_t * interruptor , bool release_superblock ) THROWS_NOTHING { <nl> + # endif <nl> <nl> value_sizer_t < rdb_value_t > rdb_sizer ( sindex_access - > btree - > cache ( ) - > get_block_size ( ) ) ; <nl> value_sizer_t < void > * sizer = & rdb_sizer ; <nl> void sindex_erase_range ( const key_range_t & key_range , <nl> sindex_key_range_tester_t tester ( key_range ) ; <nl> <nl> try { <nl> + # if SLICE_ALT <nl> + btree_erase_range_generic ( sizer , sindex_access - > btree , & tester , <nl> + & deleter , NULL , NULL , <nl> + sindex_access - > super_block . get ( ) , interruptor , <nl> + release_superblock ) ; <nl> + # else <nl> btree_erase_range_generic ( sizer , sindex_access - > btree , & tester , <nl> & deleter , NULL , NULL , txn , sindex_access - > super_block . get ( ) , interruptor , release_superblock ) ; <nl> + # endif <nl> } catch ( const interrupted_exc_t & ) { <nl> / / We were interrupted . That ' s fine nothing to be done about it . <nl> } <nl> } <nl> <nl> / * Spawns a coro to carry out the erase range for each sindex . * / <nl> + / / RSI : Make sure sindex_access has the right parent node . <nl> void spawn_sindex_erase_ranges ( <nl> const sindex_access_vector_t * sindex_access , <nl> const key_range_t & key_range , <nl> + # if ! SLICE_ALT <nl> transaction_t * txn , <nl> + # endif <nl> auto_drainer_t * drainer , <nl> auto_drainer_t : : lock_t , <nl> bool release_superblock , <nl> signal_t * interruptor ) { <nl> for ( auto it = sindex_access - > begin ( ) ; it ! = sindex_access - > end ( ) ; + + it ) { <nl> + # if SLICE_ALT <nl> + coro_t : : spawn_sometime ( boost : : bind ( <nl> + & sindex_erase_range , key_range , & * it , <nl> + auto_drainer_t : : lock_t ( drainer ) , interruptor , <nl> + release_superblock ) ) ; <nl> + # else <nl> coro_t : : spawn_sometime ( boost : : bind ( <nl> & sindex_erase_range , key_range , txn , & * it , <nl> auto_drainer_t : : lock_t ( drainer ) , interruptor , <nl> release_superblock ) ) ; <nl> + # endif <nl> } <nl> } <nl> <nl> void rdb_erase_range ( btree_slice_t * slice , key_tester_t * tester , <nl> const key_range_t & key_range , <nl> - transaction_t * txn , superblock_t * superblock , <nl> + # if ! SLICE_ALT <nl> + transaction_t * txn , <nl> + # endif <nl> + superblock_t * superblock , <nl> btree_store_t < rdb_protocol_t > * store , <nl> write_token_pair_t * token_pair , <nl> signal_t * interruptor ) { <nl> void rdb_erase_range ( btree_slice_t * slice , key_tester_t * tester , <nl> / * Dispatch the erase range to the sindexes . * / <nl> sindex_access_vector_t sindex_superblocks ; <nl> { <nl> + # if SLICE_ALT <nl> + scoped_ptr_t < alt_buf_lock_t > sindex_block ; <nl> + store - > acquire_sindex_block_for_write ( <nl> + token_pair , superblock - > expose_buf ( ) , <nl> + & sindex_block , superblock - > get_sindex_block_id ( ) , <nl> + interruptor ) ; <nl> + <nl> + store - > aquire_post_constructed_sindex_superblocks_for_write ( <nl> + sindex_block . get ( ) , & sindex_superblocks ) ; <nl> + # else <nl> scoped_ptr_t < buf_lock_t > sindex_block ; <nl> store - > acquire_sindex_block_for_write ( <nl> token_pair , txn , & sindex_block , superblock - > get_sindex_block_id ( ) , <nl> void rdb_erase_range ( btree_slice_t * slice , key_tester_t * tester , <nl> <nl> store - > aquire_post_constructed_sindex_superblocks_for_write ( <nl> sindex_block . get ( ) , txn , & sindex_superblocks ) ; <nl> + # endif <nl> <nl> mutex_t : : acq_t acq ; <nl> store - > lock_sindex_queue ( sindex_block . get ( ) , & acq ) ; <nl> void rdb_erase_range ( btree_slice_t * slice , key_tester_t * tester , <nl> <nl> { <nl> auto_drainer_t sindex_erase_drainer ; <nl> + # if SLICE_ALT <nl> + spawn_sindex_erase_ranges ( & sindex_superblocks , key_range , <nl> + & sindex_erase_drainer , auto_drainer_t : : lock_t ( & sindex_erase_drainer ) , <nl> + true , / * release the superblock * / interruptor ) ; <nl> + # else <nl> spawn_sindex_erase_ranges ( & sindex_superblocks , key_range , txn , <nl> & sindex_erase_drainer , auto_drainer_t : : lock_t ( & sindex_erase_drainer ) , <nl> true , / * release the superblock * / interruptor ) ; <nl> + # endif <nl> <nl> / * Notice , when we exit this block we destruct the sindex_erase_drainer <nl> * which means we ' ll wait until all of the sindex_erase_ranges finish <nl> void rdb_erase_range ( btree_slice_t * slice , key_tester_t * tester , <nl> <nl> rdb_value_deleter_t deleter ; <nl> <nl> + # if SLICE_ALT <nl> + btree_erase_range_generic ( sizer , slice , tester , & deleter , <nl> + left_key_supplied ? left_key_exclusive . btree_key ( ) : NULL , <nl> + right_key_supplied ? right_key_inclusive . btree_key ( ) : NULL , <nl> + superblock , interruptor ) ; <nl> + # else <nl> btree_erase_range_generic ( sizer , slice , tester , & deleter , <nl> left_key_supplied ? left_key_exclusive . btree_key ( ) : NULL , <nl> right_key_supplied ? right_key_inclusive . btree_key ( ) : NULL , <nl> txn , superblock , interruptor ) ; <nl> + # endif <nl> <nl> / / auto_drainer_t is destructed here so this waits for other coros to finish . <nl> } <nl> class rdb_rget_depth_first_traversal_callback_t <nl> / * This constructor does a traversal on the primary btree , it ' s not to be <nl> * used with sindexes . The constructor below is for use with sindexes . * / <nl> rdb_rget_depth_first_traversal_callback_t ( <nl> + # if ! SLICE_ALT <nl> transaction_t * txn , <nl> + # endif <nl> ql : : env_t * _ql_env , <nl> const ql : : batchspec_t & batchspec , <nl> const rdb_protocol_details : : transform_t & _transform , <nl> class rdb_rget_depth_first_traversal_callback_t <nl> sorting_t _sorting , <nl> rget_read_response_t * _response ) <nl> : bad_init ( false ) , <nl> + # if ! SLICE_ALT <nl> transaction ( txn ) , <nl> + # endif <nl> response ( _response ) , <nl> ql_env ( _ql_env ) , <nl> batcher ( batchspec . to_batcher ( ) ) , <nl> class rdb_rget_depth_first_traversal_callback_t <nl> * with multiple copies of each . This constructor will filter out the <nl> * duplicates . This was issue # 606 . * / <nl> rdb_rget_depth_first_traversal_callback_t ( <nl> + # if ! SLICE_ALT <nl> transaction_t * txn , <nl> + # endif <nl> ql : : env_t * _ql_env , <nl> const ql : : batchspec_t & batchspec , <nl> const rdb_protocol_details : : transform_t & _transform , <nl> class rdb_rget_depth_first_traversal_callback_t <nl> datum_range_t _sindex_range , <nl> rget_read_response_t * _response ) <nl> : bad_init ( false ) , <nl> + # if ! SLICE_ALT <nl> transaction ( txn ) , <nl> + # endif <nl> response ( _response ) , <nl> ql_env ( _ql_env ) , <nl> batcher ( batchspec . to_batcher ( ) ) , <nl> class rdb_rget_depth_first_traversal_callback_t <nl> } <nl> } <nl> try { <nl> + # if SLICE_ALT <nl> + lazy_json_t first_value ( static_cast < const rdb_value_t * > ( keyvalue . value ( ) ) , <nl> + keyvalue . expose_buf ( ) ) ; <nl> + # else <nl> lazy_json_t first_value ( static_cast < const rdb_value_t * > ( keyvalue . value ( ) ) , <nl> transaction ) ; <nl> + # endif <nl> first_value . get ( ) ; <nl> <nl> keyvalue . reset ( ) ; <nl> class rdb_rget_depth_first_traversal_callback_t <nl> <nl> <nl> bool bad_init ; <nl> + # if ! SLICE_ALT <nl> transaction_t * transaction ; <nl> + # endif <nl> rget_read_response_t * response ; <nl> ql : : env_t * ql_env ; <nl> ql : : batcher_t batcher ; <nl> class result_finalizer_visitor_t : public boost : : static_visitor < void > { <nl> } ; <nl> <nl> void rdb_rget_slice ( btree_slice_t * slice , const key_range_t & range , <nl> - transaction_t * txn , superblock_t * superblock , <nl> + # if ! SLICE_ALT <nl> + transaction_t * txn , <nl> + # endif <nl> + superblock_t * superblock , <nl> ql : : env_t * ql_env , const ql : : batchspec_t & batchspec , <nl> const rdb_protocol_details : : transform_t & transform , <nl> const boost : : optional < rdb_protocol_details : : terminal_t > & terminal , <nl> sorting_t sorting , <nl> rget_read_response_t * response ) { <nl> profile : : starter_t starter ( " Do range scan on primary index . " , ql_env - > trace ) ; <nl> + # if SLICE_ALT <nl> + rdb_rget_depth_first_traversal_callback_t callback ( <nl> + ql_env , batchspec , transform , terminal , range , sorting , response ) ; <nl> + btree_concurrent_traversal ( slice , superblock , range , & callback , <nl> + ( ! reversed ( sorting ) ? FORWARD : BACKWARD ) ) ; <nl> + # else <nl> rdb_rget_depth_first_traversal_callback_t callback ( <nl> txn , ql_env , batchspec , transform , terminal , range , sorting , response ) ; <nl> btree_concurrent_traversal ( slice , txn , superblock , range , & callback , <nl> ( ! reversed ( sorting ) ? FORWARD : BACKWARD ) ) ; <nl> + # endif <nl> <nl> response - > truncated = callback . batcher . should_send_batch ( ) ; <nl> <nl> void rdb_rget_secondary_slice ( <nl> btree_slice_t * slice , <nl> const datum_range_t & sindex_range , <nl> const rdb_protocol_t : : region_t & sindex_region , <nl> + # if ! SLICE_ALT <nl> transaction_t * txn , <nl> + # endif <nl> superblock_t * superblock , <nl> ql : : env_t * ql_env , <nl> const ql : : batchspec_t & batchspec , <nl> void rdb_rget_secondary_slice ( <nl> sindex_multi_bool_t sindex_multi , <nl> rget_read_response_t * response ) { <nl> profile : : starter_t starter ( " Do range scan on secondary index . " , ql_env - > trace ) ; <nl> + # if SLICE_ALT <nl> + rdb_rget_depth_first_traversal_callback_t callback ( <nl> + ql_env , batchspec , transform , terminal , sindex_region . inner , pk_range , <nl> + sorting , sindex_func , sindex_multi , sindex_range , response ) ; <nl> + btree_concurrent_traversal ( <nl> + slice , superblock , sindex_region . inner , & callback , <nl> + ( ! reversed ( sorting ) ? FORWARD : BACKWARD ) ) ; <nl> + # else <nl> rdb_rget_depth_first_traversal_callback_t callback ( <nl> txn , ql_env , batchspec , transform , terminal , sindex_region . inner , pk_range , <nl> sorting , sindex_func , sindex_multi , sindex_range , response ) ; <nl> btree_concurrent_traversal ( <nl> slice , txn , superblock , sindex_region . inner , & callback , <nl> ( ! reversed ( sorting ) ? FORWARD : BACKWARD ) ) ; <nl> + # endif <nl> <nl> response - > truncated = callback . batcher . should_send_batch ( ) ; <nl> <nl> boost : : apply_visitor ( result_finalizer_visitor_t ( ) , response - > result ) ; <nl> } <nl> <nl> - void rdb_distribution_get ( btree_slice_t * slice , int max_depth , const store_key_t & left_key , <nl> - transaction_t * txn , superblock_t * superblock , distribution_read_response_t * response ) { <nl> + # if SLICE_ALT <nl> + void rdb_distribution_get ( btree_slice_t * slice , int max_depth , <nl> + const store_key_t & left_key , <nl> + superblock_t * superblock , <nl> + distribution_read_response_t * response ) { <nl> + # else <nl> + void rdb_distribution_get ( btree_slice_t * slice , int max_depth , <nl> + const store_key_t & left_key , <nl> + transaction_t * txn , superblock_t * superblock , <nl> + distribution_read_response_t * response ) { <nl> + # endif <nl> int64_t key_count_out ; <nl> std : : vector < store_key_t > key_splits ; <nl> + # if SLICE_ALT <nl> + get_btree_key_distribution ( slice , superblock , max_depth , <nl> + & key_count_out , & key_splits ) ; <nl> + # else <nl> get_btree_key_distribution ( slice , txn , superblock , max_depth , & key_count_out , & key_splits ) ; <nl> + # endif <nl> <nl> int64_t keys_per_bucket ; <nl> if ( key_splits . size ( ) = = 0 ) { <nl> mmm a / src / rdb_protocol / btree . hpp <nl> ppp b / src / rdb_protocol / btree . hpp <nl> class rdb_modification_report_cb_t ; <nl> void rdb_get ( <nl> const store_key_t & key , <nl> btree_slice_t * slice , <nl> + # if ! SLICE_ALT <nl> transaction_t * txn , <nl> + # endif <nl> superblock_t * superblock , <nl> point_read_response_t * response , <nl> profile : : trace_t * trace ) ; <nl> class rdb_backfill_callback_t { <nl> } ; <nl> <nl> <nl> + # if SLICE_ALT <nl> + void rdb_backfill ( btree_slice_t * slice , const key_range_t & key_range , <nl> + repli_timestamp_t since_when , rdb_backfill_callback_t * callback , <nl> + superblock_t * superblock , <nl> + alt : : alt_buf_lock_t * sindex_block , <nl> + parallel_traversal_progress_t * p , signal_t * interruptor ) <nl> + THROWS_ONLY ( interrupted_exc_t ) ; <nl> + # else <nl> void rdb_backfill ( btree_slice_t * slice , const key_range_t & key_range , <nl> repli_timestamp_t since_when , rdb_backfill_callback_t * callback , <nl> transaction_t * txn , superblock_t * superblock , <nl> buf_lock_t * sindex_block , <nl> parallel_traversal_progress_t * p , signal_t * interruptor ) <nl> THROWS_ONLY ( interrupted_exc_t ) ; <nl> + # endif <nl> <nl> <nl> + # if SLICE_ALT <nl> + void rdb_delete ( const store_key_t & key , btree_slice_t * slice , repli_timestamp_t <nl> + timestamp , superblock_t * superblock , <nl> + point_delete_response_t * response , <nl> + rdb_modification_info_t * mod_info , <nl> + profile : : trace_t * trace ) ; <nl> + # else <nl> void rdb_delete ( const store_key_t & key , btree_slice_t * slice , repli_timestamp_t <nl> timestamp , transaction_t * txn , superblock_t * superblock , <nl> point_delete_response_t * response , <nl> rdb_modification_info_t * mod_info , <nl> profile : : trace_t * trace ) ; <nl> + # endif <nl> <nl> / * A deleter that doesn ' t actually delete the values . Needed for secondary <nl> * indexes which only have references . * / <nl> class rdb_value_non_deleter_t : public value_deleter_t { <nl> + # if SLICE_ALT <nl> + void delete_value ( alt : : alt_buf_parent_t parent , void * value ) ; <nl> + # else <nl> void delete_value ( transaction_t * _txn , void * _value ) ; <nl> + # endif <nl> } ; <nl> <nl> void rdb_erase_range ( btree_slice_t * slice , key_tester_t * tester , <nl> const key_range_t & keys , <nl> - transaction_t * txn , superblock_t * superblock , <nl> + # if ! SLICE_ALT <nl> + transaction_t * txn , <nl> + # endif <nl> + superblock_t * superblock , <nl> btree_store_t < rdb_protocol_t > * store , <nl> write_token_pair_t * token_pair , <nl> signal_t * interruptor ) ; <nl> void rdb_erase_range ( btree_slice_t * slice , key_tester_t * tester , <nl> / * RGETS * / <nl> size_t estimate_rget_response_size ( const counted_t < const ql : : datum_t > & datum ) ; <nl> <nl> + # if SLICE_ALT <nl> + void rdb_rget_slice ( btree_slice_t * slice , const key_range_t & range , <nl> + superblock_t * superblock , <nl> + ql : : env_t * ql_env , const ql : : batchspec_t & batchspec , <nl> + const rdb_protocol_details : : transform_t & transform , <nl> + const boost : : optional < rdb_protocol_details : : terminal_t > & terminal , <nl> + sorting_t sorting , <nl> + rget_read_response_t * response ) ; <nl> + # else <nl> void rdb_rget_slice ( btree_slice_t * slice , const key_range_t & range , <nl> transaction_t * txn , superblock_t * superblock , <nl> ql : : env_t * ql_env , const ql : : batchspec_t & batchspec , <nl> void rdb_rget_slice ( btree_slice_t * slice , const key_range_t & range , <nl> const boost : : optional < rdb_protocol_details : : terminal_t > & terminal , <nl> sorting_t sorting , <nl> rget_read_response_t * response ) ; <nl> + # endif <nl> <nl> void rdb_rget_secondary_slice ( <nl> btree_slice_t * slice , <nl> const datum_range_t & datum_range , <nl> const rdb_protocol_t : : region_t & sindex_region , <nl> + # if ! SLICE_ALT <nl> transaction_t * txn , <nl> + # endif <nl> superblock_t * superblock , <nl> ql : : env_t * ql_env , <nl> const ql : : batchspec_t & batchspec , <nl> void rdb_rget_secondary_slice ( <nl> sindex_multi_bool_t sindex_multi , <nl> rget_read_response_t * response ) ; <nl> <nl> + # if SLICE_ALT <nl> + void rdb_distribution_get ( btree_slice_t * slice , int max_depth , <nl> + const store_key_t & left_key , <nl> + superblock_t * superblock , <nl> + distribution_read_response_t * response ) ; <nl> + # else <nl> void rdb_distribution_get ( btree_slice_t * slice , int max_depth , const store_key_t & left_key , <nl> transaction_t * txn , superblock_t * superblock , distribution_read_response_t * response ) ; <nl> + # endif <nl> <nl> / * Secondary Indexes * / <nl> <nl> friend void rdb_update_sindexes ( <nl> const btree_store_t < rdb_protocol_t > : : sindex_access_vector_t & sindexes , <nl> const rdb_modification_report_t * modification , transaction_t * txn ) ; <nl> <nl> + # if SLICE_ALT <nl> + void delete_value ( alt : : alt_buf_parent_t parent , void * _value ) ; <nl> + # else <nl> void delete_value ( transaction_t * _txn , void * _value ) ; <nl> + # endif <nl> } ; <nl> <nl> <nl> mmm a / src / rdb_protocol / lazy_json . cc <nl> ppp b / src / rdb_protocol / lazy_json . cc <nl> <nl> # include " containers / archive / buffer_group_stream . hpp " <nl> # include " rdb_protocol / blob_wrapper . hpp " <nl> <nl> + # if SLICE_ALT <nl> + using namespace alt ; / / RSI <nl> + # endif <nl> + <nl> + # if SLICE_ALT <nl> + counted_t < const ql : : datum_t > get_data ( const rdb_value_t * value , <nl> + alt_buf_parent_t parent ) { <nl> + # else <nl> counted_t < const ql : : datum_t > get_data ( const rdb_value_t * value , <nl> transaction_t * txn ) { <nl> + # endif <nl> + # if SLICE_ALT <nl> + rdb_blob_wrapper_t blob ( parent . cache ( ) - > get_block_size ( ) , <nl> + const_cast < rdb_value_t * > ( value ) - > value_ref ( ) , <nl> + alt : : blob : : btree_maxreflen ) ; <nl> + # else <nl> rdb_blob_wrapper_t blob ( txn - > get_cache ( ) - > get_block_size ( ) , <nl> - const_cast < rdb_value_t * > ( value ) - > value_ref ( ) , blob : : btree_maxreflen ) ; <nl> + const_cast < rdb_value_t * > ( value ) - > value_ref ( ) , <nl> + blob : : btree_maxreflen ) ; <nl> + # endif <nl> <nl> counted_t < const ql : : datum_t > data ; <nl> <nl> + # if SLICE_ALT <nl> + alt : : blob_acq_t acq_group ; <nl> + buffer_group_t buffer_group ; <nl> + blob . expose_all ( parent , alt_access_t : : read , & buffer_group , & acq_group ) ; <nl> + # else <nl> blob_acq_t acq_group ; <nl> buffer_group_t buffer_group ; <nl> blob . expose_all ( txn , rwi_read , & buffer_group , & acq_group ) ; <nl> + # endif <nl> buffer_group_read_stream_t read_stream ( const_view ( & buffer_group ) ) ; <nl> archive_result_t res = deserialize ( & read_stream , & data ) ; <nl> guarantee_deserialization ( res , " rdb value " ) ; <nl> counted_t < const ql : : datum_t > get_data ( const rdb_value_t * value , <nl> <nl> const counted_t < const ql : : datum_t > & lazy_json_t : : get ( ) const { <nl> if ( ! pointee - > ptr ) { <nl> + # if SLICE_ALT <nl> + pointee - > ptr = get_data ( pointee - > rdb_value , pointee - > parent ) ; <nl> + # else <nl> pointee - > ptr = get_data ( pointee - > rdb_value , pointee - > txn ) ; <nl> + # endif <nl> } <nl> return pointee - > ptr ; <nl> } <nl> mmm a / src / rdb_protocol / lazy_json . hpp <nl> ppp b / src / rdb_protocol / lazy_json . hpp <nl> <nl> # ifndef RDB_PROTOCOL_LAZY_JSON_HPP_ <nl> # define RDB_PROTOCOL_LAZY_JSON_HPP_ <nl> <nl> + # include " btree / slice . hpp " / / RSI : for SLICE_ALT <nl> # include " buffer_cache / blob . hpp " <nl> # include " buffer_cache / types . hpp " <nl> # include " rdb_protocol / datum . hpp " <nl> struct rdb_value_t { <nl> } <nl> } ; <nl> <nl> + # if SLICE_ALT <nl> + counted_t < const ql : : datum_t > get_data ( const rdb_value_t * value , <nl> + alt : : alt_buf_parent_t parent ) ; <nl> + # else <nl> counted_t < const ql : : datum_t > get_data ( const rdb_value_t * value , <nl> transaction_t * txn ) ; <nl> + # endif <nl> <nl> class lazy_json_pointee_t : public single_threaded_countable_t < lazy_json_pointee_t > { <nl> + # if SLICE_ALT <nl> + / / RSI : Make sure callers / constructors get the lifetime of the buf parent right . <nl> + lazy_json_pointee_t ( const rdb_value_t * _rdb_value , alt : : alt_buf_parent_t _parent ) <nl> + : rdb_value ( _rdb_value ) , parent ( _parent ) { <nl> + # else <nl> lazy_json_pointee_t ( const rdb_value_t * _rdb_value , transaction_t * _txn ) <nl> : rdb_value ( _rdb_value ) , txn ( _txn ) { <nl> + # endif <nl> guarantee ( rdb_value ! = NULL ) ; <nl> + # if ! SLICE_ALT <nl> guarantee ( txn ! = NULL ) ; <nl> + # endif <nl> } <nl> <nl> + # if SLICE_ALT <nl> + explicit lazy_json_pointee_t ( const counted_t < const ql : : datum_t > & _ptr ) <nl> + : ptr ( _ptr ) , rdb_value ( NULL ) , parent ( ) { <nl> + guarantee ( ptr ) ; <nl> + } <nl> + # else <nl> explicit lazy_json_pointee_t ( const counted_t < const ql : : datum_t > & _ptr ) <nl> : ptr ( _ptr ) , rdb_value ( NULL ) , txn ( NULL ) { <nl> guarantee ( ptr ) ; <nl> } <nl> + # endif <nl> <nl> friend class lazy_json_t ; <nl> <nl> / / If empty , we haven ' t loaded the value yet . <nl> counted_t < const ql : : datum_t > ptr ; <nl> <nl> - / / A pointer to the rdb value buffer in the leaf node ( or perhaps a copy ) , and the <nl> - / / transaction with which to load it . <nl> + # if SLICE_ALT <nl> + / / A pointer to the rdb value buffer in the leaf node ( or perhaps a copy ) , and <nl> + / / the leaf node parent from which to load it . <nl> + const rdb_value_t * rdb_value ; <nl> + alt : : alt_buf_parent_t parent ; <nl> + # else <nl> + / / A pointer to the rdb value buffer in the leaf node ( or perhaps a copy ) , and <nl> + / / the transaction with which to load it . <nl> const rdb_value_t * rdb_value ; <nl> transaction_t * txn ; <nl> + # endif <nl> <nl> DISABLE_COPYING ( lazy_json_pointee_t ) ; <nl> } ; <nl> class lazy_json_t { <nl> explicit lazy_json_t ( const counted_t < const ql : : datum_t > & ptr ) <nl> : pointee ( new lazy_json_pointee_t ( ptr ) ) { } <nl> <nl> + # if SLICE_ALT <nl> + lazy_json_t ( const rdb_value_t * rdb_value , alt : : alt_buf_parent_t parent ) <nl> + : pointee ( new lazy_json_pointee_t ( rdb_value , parent ) ) { } <nl> + # else <nl> lazy_json_t ( const rdb_value_t * rdb_value , transaction_t * txn ) <nl> : pointee ( new lazy_json_pointee_t ( rdb_value , txn ) ) { } <nl> + # endif <nl> <nl> const counted_t < const ql : : datum_t > & get ( ) const ; <nl> <nl> mmm a / src / unittest / btree_metainfo . cc <nl> ppp b / src / unittest / btree_metainfo . cc <nl> <nl> # include " unittest / unittest_utils . hpp " <nl> # include " serializer / config . hpp " <nl> <nl> + using namespace alt ; / / RSI <nl> + <nl> namespace unittest { <nl> <nl> static const bool print_log_messages = false ; <nl> void run_metainfo_test ( ) { <nl> <nl> cache_t : : create ( & serializer ) ; <nl> <nl> + # if SLICE_ALT <nl> + alt_cache_t cache ( & serializer ) ; <nl> + # else <nl> const mirrored_cache_config_t cache_dynamic_config ; <nl> cache_t cache ( & serializer , cache_dynamic_config , & get_global_perfmon_collection ( ) ) ; <nl> + # endif <nl> <nl> btree_slice_t : : create ( & cache , std : : vector < char > ( ) , std : : vector < char > ( ) ) ; <nl> std : : map < std : : string , std : : string > mirror ; <nl> void run_metainfo_test ( ) { <nl> <nl> order_token_t otok = order_source . check_in ( " metainfo unittest " ) ; <nl> <nl> + # if SLICE_ALT <nl> + scoped_ptr_t < alt_txn_t > txn ; <nl> + # else <nl> scoped_ptr_t < transaction_t > txn ; <nl> + # endif <nl> scoped_ptr_t < real_superblock_t > superblock ; <nl> + # if SLICE_ALT <nl> + get_btree_superblock_and_txn ( & btree , alt_access_t : : write , 1 , <nl> + repli_timestamp_t : : invalid , otok , <nl> + WRITE_DURABILITY_SOFT , <nl> + & superblock , & txn ) ; <nl> + # else <nl> get_btree_superblock_and_txn ( & btree , rwi_write , rwi_write , 1 , repli_timestamp_t : : invalid , otok , WRITE_DURABILITY_SOFT , & superblock , & txn ) ; <nl> + # endif <nl> + # if SLICE_ALT <nl> + alt_buf_lock_t * sb_buf = superblock - > get ( ) ; <nl> + # else <nl> buf_lock_t * sb_buf = superblock - > get ( ) ; <nl> + # endif <nl> <nl> int op = random ( ) % 100 ; <nl> if ( op = = 0 ) { <nl> + # if SLICE_ALT <nl> + clear_superblock_metainfo ( sb_buf ) ; <nl> + # else <nl> clear_superblock_metainfo ( txn . get ( ) , sb_buf ) ; <nl> + # endif <nl> mirror . clear ( ) ; <nl> if ( print_log_messages ) { <nl> puts ( " clear " ) ; <nl> void run_metainfo_test ( ) { <nl> } <nl> std : : string key = random_existing_key ( mirror ) ; <nl> std : : vector < char > value_out ; <nl> + # if SLICE_ALT <nl> + bool found = get_superblock_metainfo ( sb_buf , string_to_vector ( key ) , <nl> + & value_out ) ; <nl> + # else <nl> bool found = get_superblock_metainfo ( txn . get ( ) , sb_buf , string_to_vector ( key ) , & value_out ) ; <nl> + # endif <nl> EXPECT_TRUE ( found ) ; <nl> if ( found ) { <nl> EXPECT_EQ ( mirror [ key ] , vector_to_string ( value_out ) ) ; <nl> void run_metainfo_test ( ) { <nl> continue ; <nl> } <nl> std : : vector < char > value_out ; <nl> - bool found = get_superblock_metainfo ( txn . get ( ) , sb_buf , string_to_vector ( key ) , & value_out ) ; <nl> + # if SLICE_ALT <nl> + const bool found = get_superblock_metainfo ( sb_buf , string_to_vector ( key ) , <nl> + & value_out ) ; <nl> + # else <nl> + const bool found = get_superblock_metainfo ( txn . get ( ) , sb_buf , string_to_vector ( key ) , & value_out ) ; <nl> + # endif <nl> EXPECT_FALSE ( found ) ; <nl> if ( found ) { <nl> EXPECT_EQ ( mirror [ key ] , vector_to_string ( value_out ) ) ; <nl> void run_metainfo_test ( ) { <nl> } <nl> std : : string key = random_existing_key ( mirror ) ; <nl> std : : string value = random_string ( ) ; <nl> + # if SLICE_ALT <nl> + set_superblock_metainfo ( sb_buf , string_to_vector ( key ) , <nl> + string_to_vector ( value ) ) ; <nl> + # else <nl> set_superblock_metainfo ( txn . get ( ) , sb_buf , string_to_vector ( key ) , string_to_vector ( value ) ) ; <nl> + # endif <nl> mirror [ key ] = value ; <nl> if ( print_log_messages ) { <nl> printf ( " update ' % s ' = ' % s ' \ n " , key . c_str ( ) , value . c_str ( ) ) ; <nl> void run_metainfo_test ( ) { <nl> continue ; <nl> } <nl> std : : string value = random_string ( ) ; <nl> + # if SLICE_ALT <nl> + set_superblock_metainfo ( sb_buf , string_to_vector ( key ) , <nl> + string_to_vector ( value ) ) ; <nl> + # else <nl> set_superblock_metainfo ( txn . get ( ) , sb_buf , string_to_vector ( key ) , string_to_vector ( value ) ) ; <nl> + # endif <nl> mirror [ key ] = value ; <nl> if ( print_log_messages ) { <nl> printf ( " insert ' % s ' = ' % s ' \ n " , key . c_str ( ) , value . c_str ( ) ) ; <nl> void run_metainfo_test ( ) { <nl> continue ; <nl> } <nl> std : : string key = random_existing_key ( mirror ) ; <nl> + # if SLICE_ALT <nl> + delete_superblock_metainfo ( sb_buf , string_to_vector ( key ) ) ; <nl> + # else <nl> delete_superblock_metainfo ( txn . get ( ) , sb_buf , string_to_vector ( key ) ) ; <nl> + # endif <nl> mirror . erase ( key ) ; <nl> if ( print_log_messages ) { <nl> printf ( " delete ' % s ' \ n " , key . c_str ( ) ) ; <nl> } <nl> } else { <nl> std : : vector < std : : pair < std : : vector < char > , std : : vector < char > > > pairs ; <nl> + # if SLICE_ALT <nl> + get_superblock_metainfo ( sb_buf , & pairs ) ; <nl> + # else <nl> get_superblock_metainfo ( txn . get ( ) , sb_buf , & pairs ) ; <nl> + # endif <nl> std : : map < std : : string , std : : string > mirror_copy = mirror ; <nl> if ( print_log_messages ) { <nl> puts ( " scan . . . " ) ; <nl> mmm a / src / unittest / btree_sindex . cc <nl> ppp b / src / unittest / btree_sindex . cc <nl> <nl> # include " arch / io / disk . hpp " <nl> # include " btree / btree_store . hpp " <nl> # include " btree / operations . hpp " <nl> + # include " btree / slice . hpp " / / RSI : for SLICE_ALT <nl> + # if SLICE_ALT <nl> + # include " buffer_cache / alt / alt . hpp " <nl> + # endif <nl> # include " buffer_cache / blob . hpp " <nl> # include " unittest / unittest_utils . hpp " <nl> # include " rdb_protocol / btree . hpp " <nl> # include " rdb_protocol / protocol . hpp " <nl> # include " serializer / config . hpp " <nl> <nl> + # if SLICE_ALT <nl> + using namespace alt ; / / RSI <nl> + # endif <nl> + <nl> namespace unittest { <nl> <nl> void run_sindex_low_level_operations_test ( ) { <nl> void run_sindex_low_level_operations_test ( ) { <nl> <nl> cache_t : : create ( & serializer ) ; <nl> <nl> + # if SLICE_ALT <nl> + alt_cache_t cache ( & serializer ) ; <nl> + # else <nl> mirrored_cache_config_t cache_dynamic_config ; <nl> cache_t cache ( & serializer , cache_dynamic_config , & get_global_perfmon_collection ( ) ) ; <nl> + # endif <nl> <nl> / / Passing in blank metainfo . We don ' t need metainfo for this unittest . <nl> btree_slice_t : : create ( & cache , std : : vector < char > ( ) , std : : vector < char > ( ) ) ; <nl> void run_sindex_low_level_operations_test ( ) { <nl> <nl> { <nl> order_token_t otok = order_source . check_in ( " sindex unittest " ) ; <nl> + # if SLICE_ALT <nl> + scoped_ptr_t < alt_txn_t > txn ; <nl> + # else <nl> scoped_ptr_t < transaction_t > txn ; <nl> + # endif <nl> scoped_ptr_t < real_superblock_t > superblock ; <nl> + # if SLICE_ALT <nl> + get_btree_superblock_and_txn ( & btree , alt_access_t : : write , 1 , <nl> + repli_timestamp_t : : invalid , otok , <nl> + WRITE_DURABILITY_SOFT , <nl> + & superblock , & txn ) ; <nl> + # else <nl> get_btree_superblock_and_txn ( & btree , rwi_write , rwi_write , 1 , repli_timestamp_t : : invalid , otok , WRITE_DURABILITY_SOFT , & superblock , & txn ) ; <nl> + # endif <nl> <nl> + # if SLICE_ALT <nl> + alt_buf_lock_t sindex_block ( superblock - > expose_buf ( ) , <nl> + superblock - > get_sindex_block_id ( ) , <nl> + alt_access_t : : write ) ; <nl> + # else <nl> buf_lock_t sindex_block ( txn . get ( ) , superblock - > get_sindex_block_id ( ) , rwi_write ) ; <nl> + # endif <nl> <nl> + # if SLICE_ALT <nl> + initialize_secondary_indexes ( & sindex_block ) ; <nl> + # else <nl> initialize_secondary_indexes ( txn . get ( ) , & sindex_block ) ; <nl> + # endif <nl> } <nl> <nl> for ( int i = 0 ; i < 100 ; + + i ) { <nl> void run_sindex_low_level_operations_test ( ) { <nl> mirror [ id ] = s ; <nl> <nl> order_token_t otok = order_source . check_in ( " sindex unittest " ) ; <nl> + # if SLICE_ALT <nl> + scoped_ptr_t < alt_txn_t > txn ; <nl> + # else <nl> scoped_ptr_t < transaction_t > txn ; <nl> + # endif <nl> scoped_ptr_t < real_superblock_t > superblock ; <nl> + # if SLICE_ALT <nl> + get_btree_superblock_and_txn ( & btree , alt_access_t : : write , 1 , <nl> + repli_timestamp_t : : invalid , otok , <nl> + WRITE_DURABILITY_SOFT , <nl> + & superblock , & txn ) ; <nl> + # else <nl> get_btree_superblock_and_txn ( & btree , rwi_write , rwi_write , 1 , repli_timestamp_t : : invalid , otok , WRITE_DURABILITY_SOFT , & superblock , & txn ) ; <nl> + # endif <nl> + # if SLICE_ALT <nl> + alt_buf_lock_t sindex_block ( superblock - > expose_buf ( ) , <nl> + superblock - > get_sindex_block_id ( ) , <nl> + alt_access_t : : write ) ; <nl> + # else <nl> buf_lock_t sindex_block ( txn . get ( ) , superblock - > get_sindex_block_id ( ) , rwi_write ) ; <nl> + # endif <nl> <nl> + # if SLICE_ALT <nl> + set_secondary_index ( & sindex_block , id , s ) ; <nl> + # else <nl> set_secondary_index ( txn . get ( ) , & sindex_block , id , s ) ; <nl> + # endif <nl> } <nl> <nl> { <nl> order_token_t otok = order_source . check_in ( " sindex unittest " ) ; <nl> + # if SLICE_ALT <nl> + scoped_ptr_t < alt_txn_t > txn ; <nl> + # else <nl> scoped_ptr_t < transaction_t > txn ; <nl> + # endif <nl> scoped_ptr_t < real_superblock_t > superblock ; <nl> + # if SLICE_ALT <nl> + get_btree_superblock_and_txn ( & btree , alt_access_t : : write , 1 , <nl> + repli_timestamp_t : : invalid , <nl> + otok , WRITE_DURABILITY_SOFT , <nl> + & superblock , & txn ) ; <nl> + # else <nl> get_btree_superblock_and_txn ( & btree , rwi_write , rwi_write , 1 , repli_timestamp_t : : invalid , otok , WRITE_DURABILITY_SOFT , & superblock , & txn ) ; <nl> + # endif <nl> + # if SLICE_ALT <nl> + alt_buf_lock_t sindex_block ( superblock - > expose_buf ( ) , <nl> + superblock - > get_sindex_block_id ( ) , <nl> + alt_access_t : : write ) ; <nl> + # else <nl> buf_lock_t sindex_block ( txn . get ( ) , superblock - > get_sindex_block_id ( ) , rwi_write ) ; <nl> + # endif <nl> <nl> std : : map < std : : string , secondary_index_t > sindexes ; <nl> + # if SLICE_ALT <nl> + get_secondary_indexes ( & sindex_block , & sindexes ) ; <nl> + # else <nl> get_secondary_indexes ( txn . get ( ) , & sindex_block , & sindexes ) ; <nl> + # endif <nl> <nl> ASSERT_TRUE ( sindexes = = mirror ) ; <nl> } <nl> void run_sindex_btree_store_api_test ( ) { <nl> 1 , WRITE_DURABILITY_SOFT , & token_pair , <nl> & txn , & super_block , & dummy_interuptor ) ; <nl> <nl> - UNUSED bool b = store . add_sindex ( <nl> + UNUSED bool b = store . add_sindex ( <nl> & token_pair , <nl> id , <nl> std : : vector < char > ( ) , <nl> void run_sindex_btree_store_api_test ( ) { <nl> read_token_pair_t token_pair ; <nl> store . new_read_token_pair ( & token_pair ) ; <nl> <nl> + # if SLICE_ALT <nl> + scoped_ptr_t < alt_txn_t > txn ; <nl> + # else <nl> scoped_ptr_t < transaction_t > txn ; <nl> + # endif <nl> scoped_ptr_t < real_superblock_t > main_sb ; <nl> scoped_ptr_t < real_superblock_t > sindex_super_block ; <nl> <nl> + # if SLICE_ALT <nl> + store . acquire_superblock_for_read ( <nl> + & token_pair . main_read_token , & txn , & main_sb , <nl> + & dummy_interuptor , true ) ; <nl> + # else <nl> store . acquire_superblock_for_read ( rwi_read , <nl> & token_pair . main_read_token , & txn , & main_sb , <nl> & dummy_interuptor , true ) ; <nl> + # endif <nl> <nl> store_key_t key ( " foo " ) ; <nl> <nl> + # if SLICE_ALT <nl> + bool sindex_exists = store . acquire_sindex_superblock_for_read ( id , <nl> + main_sb - > get_sindex_block_id ( ) , & token_pair , <nl> + main_sb - > expose_buf ( ) , & sindex_super_block , <nl> + static_cast < std : : vector < char > * > ( NULL ) , & dummy_interuptor ) ; <nl> + # else <nl> bool sindex_exists = store . acquire_sindex_superblock_for_read ( id , <nl> main_sb - > get_sindex_block_id ( ) , & token_pair , <nl> txn . get ( ) , & sindex_super_block , <nl> static_cast < std : : vector < char > * > ( NULL ) , & dummy_interuptor ) ; <nl> + # endif <nl> ASSERT_TRUE ( sindex_exists ) ; <nl> <nl> point_read_response_t response ; <nl> <nl> + # if SLICE_ALT <nl> + rdb_get ( key , store . get_sindex_slice ( id ) , <nl> + sindex_super_block . get ( ) , & response , NULL ) ; <nl> + # else <nl> rdb_get ( key , store . get_sindex_slice ( id ) , txn . get ( ) , <nl> sindex_super_block . get ( ) , & response , NULL ) ; <nl> + # endif <nl> <nl> ASSERT_EQ ( ql : : datum_t ( 1 . 0 ) , * response . data ) ; <nl> } <nl>
Made rdb_protocol / btree . cc closer to compiling . Maybe about 2 / 3 the way there . . .
rethinkdb/rethinkdb
d4bd53f67abf6aceffd68a6a08373cd38b17898d
2013-11-19T03:59:36Z
mmm a / src / mongo / db / ops / write_ops_exec . cpp <nl> ppp b / src / mongo / db / ops / write_ops_exec . cpp <nl> WriteResult performInserts ( OperationContext * txn , const InsertOp & wholeOp ) { <nl> <nl> size_t bytesInBatch = 0 ; <nl> std : : vector < BSONObj > batch ; <nl> - const size_t maxBatchSize = internalQueryExecYieldIterations / 2 ; <nl> + const size_t maxBatchSize = internalInsertMaxBatchSize ; <nl> batch . reserve ( std : : min ( wholeOp . documents . size ( ) , maxBatchSize ) ) ; <nl> <nl> for ( auto & & doc : wholeOp . documents ) { <nl> mmm a / src / mongo / db / query / query_knobs . cpp <nl> ppp b / src / mongo / db / query / query_knobs . cpp <nl> MONGO_EXPORT_SERVER_PARAMETER ( internalQueryExecYieldPeriodMS , int , 10 ) ; <nl> <nl> MONGO_EXPORT_SERVER_PARAMETER ( internalQueryFacetBufferSizeBytes , int , 100 * 1024 * 1024 ) ; <nl> <nl> + MONGO_EXPORT_SERVER_PARAMETER ( internalInsertMaxBatchSize , <nl> + int , <nl> + internalQueryExecYieldIterations / 2 ) ; <nl> + <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / query / query_knobs . h <nl> ppp b / src / mongo / db / query / query_knobs . h <nl> const int64_t insertVectorMaxBytes = 256 * 1024 ; <nl> / / The number of bytes to buffer at once during a $ facet stage . <nl> extern std : : atomic < int > internalQueryFacetBufferSizeBytes ; / / NOLINT <nl> <nl> + extern std : : atomic < int > internalInsertMaxBatchSize ; / / NOLINT <nl> + <nl> } / / namespace mongo <nl>
SERVER - 25821 make max insert batch size on primary tunable
mongodb/mongo
bf547fcd6cb2538ac9bbe78ee88c485c468d6ad8
2016-10-12T22:19:33Z
mmm a / src / compiler / change - lowering . cc <nl> ppp b / src / compiler / change - lowering . cc <nl> Reduction ChangeLowering : : Reduce ( Node * node ) { <nl> Node * control = graph ( ) - > start ( ) ; <nl> switch ( node - > opcode ( ) ) { <nl> case IrOpcode : : kChangeBitToBool : <nl> - return ChangeBitToBool ( node - > InputAt ( 0 ) , control ) ; <nl> + return ChangeBitToBool ( node - > InputAt ( 0 ) ) ; <nl> case IrOpcode : : kChangeBoolToBit : <nl> return ChangeBoolToBit ( node - > InputAt ( 0 ) ) ; <nl> + case IrOpcode : : kChangeWord32ToBit : <nl> + return ChangeWord32ToBit ( node - > InputAt ( 0 ) ) ; <nl> + case IrOpcode : : kChangeWord64ToBit : <nl> + return ChangeWord64ToBit ( node - > InputAt ( 0 ) ) ; <nl> case IrOpcode : : kChangeFloat64ToTagged : <nl> return ChangeFloat64ToTagged ( node - > InputAt ( 0 ) , control ) ; <nl> case IrOpcode : : kChangeInt32ToTagged : <nl> Node * ChangeLowering : : Uint32LessThanOrEqual ( Node * lhs , Node * rhs ) { <nl> } <nl> <nl> <nl> - Reduction ChangeLowering : : ChangeBitToBool ( Node * val , Node * control ) { <nl> + Reduction ChangeLowering : : ChangeBitToBool ( Node * value ) { <nl> MachineType const type = static_cast < MachineType > ( kTypeBool | kRepTagged ) ; <nl> - return Replace ( graph ( ) - > NewNode ( common ( ) - > Select ( type ) , val , <nl> + return Replace ( graph ( ) - > NewNode ( common ( ) - > Select ( type ) , value , <nl> jsgraph ( ) - > TrueConstant ( ) , <nl> jsgraph ( ) - > FalseConstant ( ) ) ) ; <nl> } <nl> <nl> <nl> - Reduction ChangeLowering : : ChangeBoolToBit ( Node * val ) { <nl> + Reduction ChangeLowering : : ChangeBoolToBit ( Node * value ) { <nl> + return Replace ( graph ( ) - > NewNode ( machine ( ) - > WordEqual ( ) , value , <nl> + jsgraph ( ) - > TrueConstant ( ) ) ) ; <nl> + } <nl> + <nl> + <nl> + Reduction ChangeLowering : : ChangeWord32ToBit ( Node * value ) { <nl> + return Replace ( <nl> + graph ( ) - > NewNode ( machine ( ) - > Word32Equal ( ) , <nl> + graph ( ) - > NewNode ( machine ( ) - > Word32Equal ( ) , value , <nl> + jsgraph ( ) - > Int32Constant ( 0 ) ) , <nl> + jsgraph ( ) - > Int32Constant ( 0 ) ) ) ; <nl> + } <nl> + <nl> + <nl> + Reduction ChangeLowering : : ChangeWord64ToBit ( Node * value ) { <nl> return Replace ( <nl> - graph ( ) - > NewNode ( machine ( ) - > WordEqual ( ) , val , jsgraph ( ) - > TrueConstant ( ) ) ) ; <nl> + graph ( ) - > NewNode ( machine ( ) - > Word32Equal ( ) , <nl> + graph ( ) - > NewNode ( machine ( ) - > Word64Equal ( ) , value , <nl> + jsgraph ( ) - > Int64Constant ( 0 ) ) , <nl> + jsgraph ( ) - > Int32Constant ( 0 ) ) ) ; <nl> } <nl> <nl> <nl> mmm a / src / compiler / change - lowering . h <nl> ppp b / src / compiler / change - lowering . h <nl> class JSGraph ; <nl> class Linkage ; <nl> class MachineOperatorBuilder ; <nl> <nl> + <nl> class ChangeLowering FINAL : public Reducer { <nl> public : <nl> ChangeLowering ( JSGraph * jsgraph , Linkage * linkage ) <nl> class ChangeLowering FINAL : public Reducer { <nl> Node * TestNotSmi ( Node * value ) ; <nl> Node * Uint32LessThanOrEqual ( Node * lhs , Node * rhs ) ; <nl> <nl> - Reduction ChangeBitToBool ( Node * value , Node * control ) ; <nl> + Reduction ChangeBitToBool ( Node * value ) ; <nl> Reduction ChangeBoolToBit ( Node * value ) ; <nl> + Reduction ChangeWord32ToBit ( Node * value ) ; <nl> + Reduction ChangeWord64ToBit ( Node * value ) ; <nl> Reduction ChangeFloat64ToTagged ( Node * value , Node * control ) ; <nl> Reduction ChangeInt32ToTagged ( Node * value , Node * control ) ; <nl> Reduction ChangeTaggedToFloat64 ( Node * value , Node * control ) ; <nl> mmm a / src / compiler / js - graph . cc <nl> ppp b / src / compiler / js - graph . cc <nl> Node * JSGraph : : ExternalConstant ( ExternalReference reference ) { <nl> } <nl> <nl> <nl> + Type * JSGraph : : ZeroOneRangeType ( ) { <nl> + if ( ! zero_one_range_type_ . is_set ( ) ) { <nl> + zero_one_range_type_ . set ( <nl> + Type : : Range ( factory ( ) - > NewNumber ( 0 ) , factory ( ) - > NewNumber ( 1 ) , zone ( ) ) ) ; <nl> + } <nl> + return zero_one_range_type_ . get ( ) ; <nl> + } <nl> + <nl> + <nl> void JSGraph : : GetCachedNodes ( NodeVector * nodes ) { <nl> cache_ . GetCachedNodes ( nodes ) ; <nl> SetOncePointer < Node > * ptrs [ ] = { <nl> mmm a / src / compiler / js - graph . h <nl> ppp b / src / compiler / js - graph . h <nl> namespace v8 { <nl> namespace internal { <nl> namespace compiler { <nl> <nl> + / / Forward declarations . <nl> class Typer ; <nl> <nl> / / Implements a facade on a Graph , enhancing the graph with JS - specific <nl> class JSGraph : public ZoneObject { <nl> / / stubs and runtime functions that do not require a context . <nl> Node * NoContextConstant ( ) { return ZeroConstant ( ) ; } <nl> <nl> + / / Cached common types . <nl> + Type * ZeroOneRangeType ( ) ; <nl> + <nl> JSOperatorBuilder * javascript ( ) { return javascript_ ; } <nl> CommonOperatorBuilder * common ( ) { return common_ ; } <nl> MachineOperatorBuilder * machine ( ) { return machine_ ; } <nl> class JSGraph : public ZoneObject { <nl> SetOncePointer < Node > one_constant_ ; <nl> SetOncePointer < Node > nan_constant_ ; <nl> <nl> + SetOncePointer < Type > zero_one_range_type_ ; <nl> + <nl> CommonNodeCache cache_ ; <nl> <nl> Node * ImmovableHeapConstant ( Handle < HeapObject > value ) ; <nl> mmm a / src / compiler / opcodes . h <nl> ppp b / src / compiler / opcodes . h <nl> <nl> JS_CONTEXT_OP_LIST ( V ) \ <nl> JS_OTHER_OP_LIST ( V ) <nl> <nl> - / / Opcodes for VirtuaMachine - level operators . <nl> + / / Opcodes for VirtualMachine - level operators . <nl> # define SIMPLIFIED_OP_LIST ( V ) \ <nl> V ( AnyToBoolean ) \ <nl> V ( BooleanNot ) \ <nl> <nl> V ( ChangeUint32ToTagged ) \ <nl> V ( ChangeFloat64ToTagged ) \ <nl> V ( ChangeBoolToBit ) \ <nl> + V ( ChangeWord32ToBit ) \ <nl> + V ( ChangeWord64ToBit ) \ <nl> V ( ChangeBitToBool ) \ <nl> V ( LoadField ) \ <nl> V ( LoadBuffer ) \ <nl> mmm a / src / compiler / representation - change . h <nl> ppp b / src / compiler / representation - change . h <nl> class RepresentationChanger { <nl> : jsgraph_ ( jsgraph ) , <nl> simplified_ ( simplified ) , <nl> isolate_ ( isolate ) , <nl> + bit_range_ ( Type : : Range ( isolate - > factory ( ) - > NewNumber ( 0 ) , <nl> + isolate - > factory ( ) - > NewNumber ( 1 ) , <nl> + jsgraph - > zone ( ) ) ) , <nl> testing_type_errors_ ( false ) , <nl> type_error_ ( false ) { } <nl> <nl> / / TODO ( titzer ) : should Word64 also be implicitly convertable to others ? <nl> - static const MachineTypeUnion rWord = <nl> - kRepBit | kRepWord8 | kRepWord16 | kRepWord32 ; <nl> + static const MachineTypeUnion rWord = kRepWord8 | kRepWord16 | kRepWord32 ; <nl> <nl> Node * GetRepresentationFor ( Node * node , MachineTypeUnion output_type , <nl> MachineTypeUnion use_type ) { <nl> class RepresentationChanger { <nl> break ; <nl> } <nl> / / Select the correct X - > Word32 operator . <nl> - const Operator * op = NULL ; <nl> - if ( output_type & kRepFloat64 ) { <nl> + const Operator * op ; <nl> + if ( output_type & kRepBit ) { <nl> + return node ; / / No change necessary . <nl> + } else if ( output_type & kRepFloat64 ) { <nl> if ( output_type & kTypeUint32 | | use_unsigned ) { <nl> op = machine ( ) - > ChangeFloat64ToUint32 ( ) ; <nl> } else { <nl> class RepresentationChanger { <nl> } <nl> case IrOpcode : : kNumberConstant : { <nl> double value = OpParameter < double > ( node ) ; <nl> - if ( std : : isnan ( value ) | | value = = 0 . 0 ) { <nl> - return jsgraph ( ) - > Int32Constant ( 0 ) ; <nl> - } <nl> - return jsgraph ( ) - > Int32Constant ( 1 ) ; <nl> + if ( value = = 0 | | std : : isnan ( value ) ) return jsgraph ( ) - > Int32Constant ( 0 ) ; <nl> + return jsgraph ( ) - > Int32Constant ( 1 ) ; / / value ! = + 0 . 0 , - 0 . 0 , NaN <nl> } <nl> case IrOpcode : : kHeapConstant : { <nl> - Handle < Object > handle = OpParameter < Unique < Object > > ( node ) . handle ( ) ; <nl> - DCHECK ( * handle = = isolate ( ) - > heap ( ) - > true_value ( ) | | <nl> - * handle = = isolate ( ) - > heap ( ) - > false_value ( ) ) ; <nl> - return jsgraph ( ) - > Int32Constant ( <nl> - * handle = = isolate ( ) - > heap ( ) - > true_value ( ) ? 1 : 0 ) ; <nl> + Handle < Object > object = OpParameter < Unique < Object > > ( node ) . handle ( ) ; <nl> + return jsgraph ( ) - > Int32Constant ( object - > BooleanValue ( ) ? 1 : 0 ) ; <nl> } <nl> default : <nl> break ; <nl> class RepresentationChanger { <nl> / / Select the correct X - > Bit operator . <nl> const Operator * op ; <nl> if ( output_type & rWord ) { <nl> - return node ; / / No change necessary . <nl> + op = simplified ( ) - > ChangeWord32ToBit ( ) ; <nl> } else if ( output_type & kRepWord64 ) { <nl> - return node ; / / TODO ( titzer ) : No change necessary , on 64 - bit . <nl> + op = simplified ( ) - > ChangeWord64ToBit ( ) ; <nl> } else if ( output_type & kRepTagged ) { <nl> - op = simplified ( ) - > ChangeBoolToBit ( ) ; <nl> + Type * upper = NodeProperties : : GetBounds ( node ) . upper ; <nl> + if ( upper - > Is ( Type : : Boolean ( ) ) ) { <nl> + op = simplified ( ) - > ChangeBoolToBit ( ) ; <nl> + } else if ( upper - > Is ( Type : : Signed32 ( ) ) ) { <nl> + / / Tagged - > Int32 - > Bit <nl> + node = InsertChangeTaggedToInt32 ( node ) ; <nl> + op = simplified ( ) - > ChangeWord32ToBit ( ) ; <nl> + } else if ( upper - > Is ( Type : : Unsigned32 ( ) ) ) { <nl> + / / Tagged - > Uint32 - > Bit <nl> + node = InsertChangeTaggedToUint32 ( node ) ; <nl> + op = simplified ( ) - > ChangeWord32ToBit ( ) ; <nl> + } else { <nl> + return TypeError ( node , output_type , kRepBit ) ; <nl> + } <nl> } else { <nl> return TypeError ( node , output_type , kRepBit ) ; <nl> } <nl> - return jsgraph ( ) - > graph ( ) - > NewNode ( op , node ) ; <nl> + return graph ( ) - > NewNode ( op , node ) ; <nl> } <nl> <nl> Node * GetWord64RepresentationFor ( Node * node , MachineTypeUnion output_type ) { <nl> class RepresentationChanger { <nl> JSGraph * jsgraph_ ; <nl> SimplifiedOperatorBuilder * simplified_ ; <nl> Isolate * isolate_ ; <nl> + Type * bit_range_ ; <nl> <nl> friend class RepresentationChangerTester ; / / accesses the below fields . <nl> <nl> class RepresentationChanger { <nl> } <nl> <nl> Node * InsertChangeFloat32ToFloat64 ( Node * node ) { <nl> - return jsgraph ( ) - > graph ( ) - > NewNode ( machine ( ) - > ChangeFloat32ToFloat64 ( ) , <nl> - node ) ; <nl> + return graph ( ) - > NewNode ( machine ( ) - > ChangeFloat32ToFloat64 ( ) , node ) ; <nl> } <nl> <nl> Node * InsertChangeTaggedToFloat64 ( Node * node ) { <nl> - return jsgraph ( ) - > graph ( ) - > NewNode ( simplified ( ) - > ChangeTaggedToFloat64 ( ) , <nl> - node ) ; <nl> + return graph ( ) - > NewNode ( simplified ( ) - > ChangeTaggedToFloat64 ( ) , node ) ; <nl> + } <nl> + <nl> + Node * InsertChangeTaggedToInt32 ( Node * node ) { <nl> + return graph ( ) - > NewNode ( simplified ( ) - > ChangeTaggedToInt32 ( ) , node ) ; <nl> + } <nl> + <nl> + Node * InsertChangeTaggedToUint32 ( Node * node ) { <nl> + return graph ( ) - > NewNode ( simplified ( ) - > ChangeTaggedToUint32 ( ) , node ) ; <nl> } <nl> <nl> - JSGraph * jsgraph ( ) { return jsgraph_ ; } <nl> - Isolate * isolate ( ) { return isolate_ ; } <nl> - SimplifiedOperatorBuilder * simplified ( ) { return simplified_ ; } <nl> - MachineOperatorBuilder * machine ( ) { return jsgraph ( ) - > machine ( ) ; } <nl> + Graph * graph ( ) const { return jsgraph ( ) - > graph ( ) ; } <nl> + JSGraph * jsgraph ( ) const { return jsgraph_ ; } <nl> + Isolate * isolate ( ) const { return isolate_ ; } <nl> + SimplifiedOperatorBuilder * simplified ( ) const { return simplified_ ; } <nl> + MachineOperatorBuilder * machine ( ) const { return jsgraph ( ) - > machine ( ) ; } <nl> } ; <nl> <nl> } / / namespace compiler <nl> mmm a / src / compiler / simplified - operator - reducer . cc <nl> ppp b / src / compiler / simplified - operator - reducer . cc <nl> Reduction SimplifiedOperatorReducer : : Reduce ( Node * node ) { <nl> if ( m . IsChangeBitToBool ( ) ) return Replace ( m . node ( ) - > InputAt ( 0 ) ) ; <nl> break ; <nl> } <nl> + case IrOpcode : : kChangeWord32ToBit : <nl> + return ReduceChangeWord32ToBit ( node ) ; <nl> case IrOpcode : : kChangeFloat64ToTagged : { <nl> Float64Matcher m ( node - > InputAt ( 0 ) ) ; <nl> if ( m . HasValue ( ) ) return ReplaceNumber ( m . Value ( ) ) ; <nl> Reduction SimplifiedOperatorReducer : : ReduceAnyToBoolean ( Node * node ) { <nl> } <nl> <nl> <nl> + Reduction SimplifiedOperatorReducer : : ReduceChangeWord32ToBit ( Node * node ) { <nl> + Node * const input = NodeProperties : : GetValueInput ( node , 0 ) ; <nl> + Type * const input_type = NodeProperties : : GetBounds ( input ) . upper ; <nl> + if ( input_type - > Is ( jsgraph ( ) - > ZeroOneRangeType ( ) ) ) { <nl> + / / ChangeWord32ToBit ( x : bit ) = > x <nl> + return Replace ( input ) ; <nl> + } <nl> + return NoChange ( ) ; <nl> + } <nl> + <nl> + <nl> Reduction SimplifiedOperatorReducer : : Change ( Node * node , const Operator * op , <nl> Node * a ) { <nl> DCHECK_EQ ( node - > InputCount ( ) , OperatorProperties : : GetTotalInputCount ( op ) ) ; <nl> mmm a / src / compiler / simplified - operator - reducer . h <nl> ppp b / src / compiler / simplified - operator - reducer . h <nl> namespace internal { <nl> <nl> / / Forward declarations . <nl> class Factory ; <nl> - class Heap ; <nl> <nl> namespace compiler { <nl> <nl> class SimplifiedOperatorReducer FINAL : public Reducer { <nl> <nl> private : <nl> Reduction ReduceAnyToBoolean ( Node * node ) ; <nl> + Reduction ReduceChangeWord32ToBit ( Node * node ) ; <nl> <nl> Reduction Change ( Node * node , const Operator * op , Node * a ) ; <nl> Reduction ReplaceFloat64 ( double value ) ; <nl> mmm a / src / compiler / simplified - operator . cc <nl> ppp b / src / compiler / simplified - operator . cc <nl> const ElementAccess & ElementAccessOf ( const Operator * op ) { <nl> V ( ChangeInt32ToTagged , Operator : : kNoProperties , 1 ) \ <nl> V ( ChangeUint32ToTagged , Operator : : kNoProperties , 1 ) \ <nl> V ( ChangeFloat64ToTagged , Operator : : kNoProperties , 1 ) \ <nl> - V ( ChangeBoolToBit , Operator : : kNoProperties , 1 ) \ <nl> V ( ChangeBitToBool , Operator : : kNoProperties , 1 ) \ <nl> + V ( ChangeBoolToBit , Operator : : kNoProperties , 1 ) \ <nl> + V ( ChangeWord32ToBit , Operator : : kNoProperties , 1 ) \ <nl> + V ( ChangeWord64ToBit , Operator : : kNoProperties , 1 ) \ <nl> V ( ObjectIsSmi , Operator : : kNoProperties , 1 ) \ <nl> V ( ObjectIsNonNegativeSmi , Operator : : kNoProperties , 1 ) <nl> <nl> mmm a / src / compiler / simplified - operator . h <nl> ppp b / src / compiler / simplified - operator . h <nl> class SimplifiedOperatorBuilder FINAL { <nl> const Operator * ChangeInt32ToTagged ( ) ; <nl> const Operator * ChangeUint32ToTagged ( ) ; <nl> const Operator * ChangeFloat64ToTagged ( ) ; <nl> - const Operator * ChangeBoolToBit ( ) ; <nl> const Operator * ChangeBitToBool ( ) ; <nl> + const Operator * ChangeBoolToBit ( ) ; <nl> + const Operator * ChangeWord32ToBit ( ) ; <nl> + const Operator * ChangeWord64ToBit ( ) ; <nl> <nl> const Operator * ObjectIsSmi ( ) ; <nl> const Operator * ObjectIsNonNegativeSmi ( ) ; <nl> mmm a / src / compiler / typer . cc <nl> ppp b / src / compiler / typer . cc <nl> Bounds Typer : : Visitor : : TypeChangeFloat64ToTagged ( Node * node ) { <nl> } <nl> <nl> <nl> + Bounds Typer : : Visitor : : TypeChangeBitToBool ( Node * node ) { <nl> + Bounds arg = Operand ( node , 0 ) ; <nl> + / / TODO ( neis ) : DCHECK ( arg . upper - > Is ( Type : : Boolean ( ) ) ) ; <nl> + return Bounds ( ChangeRepresentation ( arg . lower , Type : : TaggedPointer ( ) , zone ( ) ) , <nl> + ChangeRepresentation ( arg . upper , Type : : TaggedPointer ( ) , zone ( ) ) ) ; <nl> + } <nl> + <nl> + <nl> Bounds Typer : : Visitor : : TypeChangeBoolToBit ( Node * node ) { <nl> Bounds arg = Operand ( node , 0 ) ; <nl> / / TODO ( neis ) : DCHECK ( arg . upper - > Is ( Type : : Boolean ( ) ) ) ; <nl> Bounds Typer : : Visitor : : TypeChangeBoolToBit ( Node * node ) { <nl> } <nl> <nl> <nl> - Bounds Typer : : Visitor : : TypeChangeBitToBool ( Node * node ) { <nl> - Bounds arg = Operand ( node , 0 ) ; <nl> - / / TODO ( neis ) : DCHECK ( arg . upper - > Is ( Type : : Boolean ( ) ) ) ; <nl> + Bounds Typer : : Visitor : : TypeChangeWord32ToBit ( Node * node ) { <nl> + return Bounds ( <nl> + ChangeRepresentation ( Type : : Boolean ( ) , Type : : UntaggedBit ( ) , zone ( ) ) ) ; <nl> + } <nl> + <nl> + <nl> + Bounds Typer : : Visitor : : TypeChangeWord64ToBit ( Node * node ) { <nl> return Bounds ( <nl> - ChangeRepresentation ( arg . lower , Type : : TaggedPointer ( ) , zone ( ) ) , <nl> - ChangeRepresentation ( arg . upper , Type : : TaggedPointer ( ) , zone ( ) ) ) ; <nl> + ChangeRepresentation ( Type : : Boolean ( ) , Type : : UntaggedBit ( ) , zone ( ) ) ) ; <nl> } <nl> <nl> <nl> mmm a / src / compiler / verifier . cc <nl> ppp b / src / compiler / verifier . cc <nl> void Verifier : : Visitor : : Pre ( Node * node ) { <nl> / / CheckUpperIs ( node , to ) ) ; <nl> break ; <nl> } <nl> + case IrOpcode : : kChangeBitToBool : { <nl> + / / Boolean / \ UntaggedInt1 - > Boolean / \ TaggedPtr <nl> + / / TODO ( neis ) : Activate once ChangeRepresentation works in typer . <nl> + / / Type * from = Type : : Intersect ( Type : : Boolean ( ) , Type : : UntaggedInt1 ( ) ) ; <nl> + / / Type * to = Type : : Intersect ( Type : : Boolean ( ) , Type : : TaggedPtr ( ) ) ; <nl> + / / CheckValueInputIs ( node , 0 , from ) ) ; <nl> + / / CheckUpperIs ( node , to ) ) ; <nl> + break ; <nl> + } <nl> case IrOpcode : : kChangeBoolToBit : { <nl> / / Boolean / \ TaggedPtr - > Boolean / \ UntaggedInt1 <nl> / / TODO ( neis ) : Activate once ChangeRepresentation works in typer . <nl> void Verifier : : Visitor : : Pre ( Node * node ) { <nl> / / CheckUpperIs ( node , to ) ) ; <nl> break ; <nl> } <nl> - case IrOpcode : : kChangeBitToBool : { <nl> - / / Boolean / \ UntaggedInt1 - > Boolean / \ TaggedPtr <nl> - / / TODO ( neis ) : Activate once ChangeRepresentation works in typer . <nl> - / / Type * from = Type : : Intersect ( Type : : Boolean ( ) , Type : : UntaggedInt1 ( ) ) ; <nl> - / / Type * to = Type : : Intersect ( Type : : Boolean ( ) , Type : : TaggedPtr ( ) ) ; <nl> - / / CheckValueInputIs ( node , 0 , from ) ) ; <nl> - / / CheckUpperIs ( node , to ) ) ; <nl> + case IrOpcode : : kChangeWord32ToBit : { <nl> + / / TODO ( rossberg ) : Check . <nl> + CheckValueInputIs ( node , 0 , Type : : Integral32 ( ) ) ; <nl> + break ; <nl> + } <nl> + case IrOpcode : : kChangeWord64ToBit : { <nl> + / / TODO ( rossberg ) : Check . <nl> break ; <nl> } <nl> <nl> mmm a / test / cctest / compiler / test - representation - change . cc <nl> ppp b / test / cctest / compiler / test - representation - change . cc <nl> class RepresentationChangerTester : public HandleAndZoneScope , <nl> CHECK_EQ ( expected , m . Value ( ) ) ; <nl> } <nl> <nl> - Node * Parameter ( int index = 0 ) { <nl> - return graph ( ) - > NewNode ( common ( ) - > Parameter ( index ) , graph ( ) - > start ( ) ) ; <nl> + Node * Parameter ( Type * type , int index = 0 ) { <nl> + Node * node = graph ( ) - > NewNode ( common ( ) - > Parameter ( index ) , graph ( ) - > start ( ) ) ; <nl> + NodeProperties : : SetBounds ( node , Bounds ( type ) ) ; <nl> + return node ; <nl> } <nl> <nl> + Node * Parameter ( int index = 0 ) { return Parameter ( Type : : Any ( ) , index ) ; } <nl> + <nl> void CheckTypeError ( MachineTypeUnion from , MachineTypeUnion to ) { <nl> changer ( ) - > testing_type_errors_ = true ; <nl> changer ( ) - > type_error_ = false ; <nl> class RepresentationChangerTester : public HandleAndZoneScope , <nl> CHECK_EQ ( n , c ) ; <nl> } <nl> } ; <nl> - } <nl> - } <nl> - } / / namespace v8 : : internal : : compiler <nl> + <nl> + } / / namespace compiler <nl> + } / / namespace internal <nl> + } / / namespace v8 <nl> <nl> <nl> static const MachineType all_reps [ ] = { kRepBit , kRepWord32 , kRepWord64 , <nl> kRepFloat32 , kRepFloat64 , kRepTagged } ; <nl> <nl> <nl> - TEST ( BoolToBit_constant ) { <nl> + TEST ( ToBit_constant ) { <nl> RepresentationChangerTester r ; <nl> <nl> Node * true_node = r . jsgraph ( ) - > TrueConstant ( ) ; <nl> TEST ( BoolToBit_constant ) { <nl> Node * false_bit = <nl> r . changer ( ) - > GetRepresentationFor ( false_node , kRepTagged , kRepBit ) ; <nl> r . CheckInt32Constant ( false_bit , 0 ) ; <nl> + <nl> + { <nl> + FOR_FLOAT64_INPUTS ( i ) { <nl> + Node * node = r . jsgraph ( ) - > Constant ( * i ) ; <nl> + Node * bit = r . changer ( ) - > GetRepresentationFor ( node , kRepTagged , kRepBit ) ; <nl> + r . CheckInt32Constant ( bit , DoubleToBoolean ( * i ) ? 1 : 0 ) ; <nl> + } <nl> + } <nl> + <nl> + { <nl> + FOR_INT32_INPUTS ( i ) { <nl> + Node * node = r . jsgraph ( ) - > Int32Constant ( * i ) ; <nl> + Node * bit = r . changer ( ) - > GetRepresentationFor ( node , kRepWord32 , kRepBit ) ; <nl> + r . CheckInt32Constant ( bit , * i = = 0 ? 0 : 1 ) ; <nl> + } <nl> + } <nl> } <nl> <nl> <nl> TEST ( ToUint32_constant ) { <nl> <nl> <nl> static void CheckChange ( IrOpcode : : Value expected , MachineTypeUnion from , <nl> - MachineTypeUnion to ) { <nl> + MachineTypeUnion to , Type * from_type = Type : : Any ( ) ) { <nl> RepresentationChangerTester r ; <nl> <nl> - Node * n = r . Parameter ( ) ; <nl> + Node * n = r . Parameter ( from_type ) ; <nl> Node * c = r . changer ( ) - > GetRepresentationFor ( n , from , to ) ; <nl> <nl> CHECK_NE ( c , n ) ; <nl> static void CheckChange ( IrOpcode : : Value expected , MachineTypeUnion from , <nl> <nl> static void CheckTwoChanges ( IrOpcode : : Value expected2 , <nl> IrOpcode : : Value expected1 , MachineTypeUnion from , <nl> - MachineTypeUnion to ) { <nl> + MachineTypeUnion to , <nl> + Type * from_type = Type : : Any ( ) ) { <nl> RepresentationChangerTester r ; <nl> <nl> - Node * n = r . Parameter ( ) ; <nl> + Node * n = r . Parameter ( from_type ) ; <nl> Node * c1 = r . changer ( ) - > GetRepresentationFor ( n , from , to ) ; <nl> <nl> CHECK_NE ( c1 , n ) ; <nl> static void CheckTwoChanges ( IrOpcode : : Value expected2 , <nl> <nl> <nl> TEST ( SingleChanges ) { <nl> - CheckChange ( IrOpcode : : kChangeBoolToBit , kRepTagged , kRepBit ) ; <nl> + CheckChange ( IrOpcode : : kChangeBoolToBit , kRepTagged , kRepBit , Type : : Boolean ( ) ) ; <nl> + CheckTwoChanges ( IrOpcode : : kChangeTaggedToInt32 , IrOpcode : : kChangeWord32ToBit , <nl> + kRepTagged , kRepBit , Type : : Signed32 ( ) ) ; <nl> + CheckTwoChanges ( IrOpcode : : kChangeTaggedToUint32 , IrOpcode : : kChangeWord32ToBit , <nl> + kRepTagged , kRepBit , Type : : Unsigned32 ( ) ) ; <nl> + CheckChange ( IrOpcode : : kChangeWord32ToBit , kRepWord8 , kRepBit ) ; <nl> + CheckChange ( IrOpcode : : kChangeWord32ToBit , kRepWord16 , kRepBit ) ; <nl> + CheckChange ( IrOpcode : : kChangeWord32ToBit , kRepWord32 , kRepBit ) ; <nl> + CheckChange ( IrOpcode : : kChangeWord64ToBit , kRepWord64 , kRepBit ) ; <nl> CheckChange ( IrOpcode : : kChangeBitToBool , kRepBit , kRepTagged ) ; <nl> <nl> CheckChange ( IrOpcode : : kChangeInt32ToTagged , kRepWord32 | kTypeInt32 , <nl> TEST ( Nops ) { <nl> r . CheckNop ( kRepFloat32 | kTypeNumber , kRepFloat32 ) ; <nl> r . CheckNop ( kRepFloat32 , kRepFloat32 | kTypeNumber ) ; <nl> <nl> - / / 32 - bit or 64 - bit words can be used as branch conditions ( kRepBit ) . <nl> - r . CheckNop ( kRepWord32 , kRepBit ) ; <nl> - r . CheckNop ( kRepWord32 , kRepBit | kTypeBool ) ; <nl> - r . CheckNop ( kRepWord64 , kRepBit ) ; <nl> - r . CheckNop ( kRepWord64 , kRepBit | kTypeBool ) ; <nl> - <nl> / / 32 - bit words can be used as smaller word sizes and vice versa , because <nl> / / loads from memory implicitly sign or zero extend the value to the <nl> / / full machine word size , and stores implicitly truncate . <nl> mmm a / test / mjsunit / compiler / regress - bit - number - constant . js <nl> ppp b / test / mjsunit / compiler / regress - bit - number - constant . js <nl> var foo = ( function Module ( stdlib , foreign , heap ) { <nl> } ) ( stdlib , foreign , buffer ) . foo ; <nl> <nl> assertFalse ( foo ( 1 ) ) ; <nl> + assertTrue ( foo ( 0 ) ) ; <nl> mmm a / test / unittests / compiler / change - lowering - unittest . cc <nl> ppp b / test / unittests / compiler / change - lowering - unittest . cc <nl> TARGET_TEST_P ( ChangeLoweringCommonTest , ChangeBoolToBit ) { <nl> Node * node = graph ( ) - > NewNode ( simplified ( ) - > ChangeBoolToBit ( ) , val ) ; <nl> Reduction reduction = Reduce ( node ) ; <nl> ASSERT_TRUE ( reduction . Changed ( ) ) ; <nl> - <nl> EXPECT_THAT ( reduction . replacement ( ) , IsWordEqual ( val , IsTrueConstant ( ) ) ) ; <nl> } <nl> <nl> <nl> + TARGET_TEST_P ( ChangeLoweringCommonTest , ChangeWord32ToBit ) { <nl> + Node * val = Parameter ( 0 ) ; <nl> + Node * node = graph ( ) - > NewNode ( simplified ( ) - > ChangeWord32ToBit ( ) , val ) ; <nl> + Reduction reduction = Reduce ( node ) ; <nl> + ASSERT_TRUE ( reduction . Changed ( ) ) ; <nl> + EXPECT_THAT ( reduction . replacement ( ) , <nl> + IsWord32Equal ( IsWord32Equal ( val , IsInt32Constant ( 0 ) ) , <nl> + IsInt32Constant ( 0 ) ) ) ; <nl> + } <nl> + <nl> + <nl> + TARGET_TEST_P ( ChangeLoweringCommonTest , ChangeWord64ToBit ) { <nl> + Node * val = Parameter ( 0 ) ; <nl> + Node * node = graph ( ) - > NewNode ( simplified ( ) - > ChangeWord64ToBit ( ) , val ) ; <nl> + Reduction reduction = Reduce ( node ) ; <nl> + ASSERT_TRUE ( reduction . Changed ( ) ) ; <nl> + EXPECT_THAT ( reduction . replacement ( ) , <nl> + IsWord32Equal ( IsWord64Equal ( val , IsInt64Constant ( 0 ) ) , <nl> + IsInt32Constant ( 0 ) ) ) ; <nl> + } <nl> + <nl> + <nl> TARGET_TEST_P ( ChangeLoweringCommonTest , ChangeFloat64ToTagged ) { <nl> Node * val = Parameter ( 0 ) ; <nl> Node * node = graph ( ) - > NewNode ( simplified ( ) - > ChangeFloat64ToTagged ( ) , val ) ; <nl> mmm a / test / unittests / compiler / simplified - operator - reducer - unittest . cc <nl> ppp b / test / unittests / compiler / simplified - operator - reducer - unittest . cc <nl> TEST_F ( SimplifiedOperatorReducerTest , ChangeBoolToBitWithChangeBitToBool ) { <nl> } <nl> <nl> <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / ChangeWord32ToBit <nl> + <nl> + <nl> + TEST_F ( SimplifiedOperatorReducerTest , ChangeWord32ToBitWithBitType ) { <nl> + Handle < Object > zero = factory ( ) - > NewNumber ( 0 ) ; <nl> + Handle < Object > one = factory ( ) - > NewNumber ( 1 ) ; <nl> + Type * const kBitTypes [ ] = { <nl> + Type : : Constant ( zero , zone ( ) ) , Type : : Constant ( one , zone ( ) ) , <nl> + Type : : Range ( zero , zero , zone ( ) ) , Type : : Range ( one , one , zone ( ) ) , <nl> + Type : : Range ( zero , one , zone ( ) ) } ; <nl> + TRACED_FOREACH ( Type * , type , kBitTypes ) { <nl> + Node * param0 = Parameter ( type , 0 ) ; <nl> + Reduction reduction = <nl> + Reduce ( graph ( ) - > NewNode ( simplified ( ) - > ChangeWord32ToBit ( ) , param0 ) ) ; <nl> + ASSERT_TRUE ( reduction . Changed ( ) ) ; <nl> + EXPECT_EQ ( param0 , reduction . replacement ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / ChangeFloat64ToTagged <nl> <nl> mmm a / test / unittests / compiler / simplified - operator - unittest . cc <nl> ppp b / test / unittests / compiler / simplified - operator - unittest . cc <nl> const PureOperator kPureOperators [ ] = { <nl> PURE ( ChangeUint32ToTagged , Operator : : kNoProperties , 1 ) , <nl> PURE ( ChangeFloat64ToTagged , Operator : : kNoProperties , 1 ) , <nl> PURE ( ChangeBoolToBit , Operator : : kNoProperties , 1 ) , <nl> + PURE ( ChangeWord32ToBit , Operator : : kNoProperties , 1 ) , <nl> + PURE ( ChangeWord64ToBit , Operator : : kNoProperties , 1 ) , <nl> PURE ( ChangeBitToBool , Operator : : kNoProperties , 1 ) , <nl> PURE ( ObjectIsSmi , Operator : : kNoProperties , 1 ) , <nl> PURE ( ObjectIsNonNegativeSmi , Operator : : kNoProperties , 1 ) <nl>
[ turbofan ] Correctify representation changes to bit .
v8/v8
fec1bba85223a3a32182e4874be46b355f6df89e
2015-01-08T09:48:41Z
mmm a / scene / gui / line_edit . cpp <nl> ppp b / scene / gui / line_edit . cpp <nl> void LineEdit : : menu_option ( int p_option ) { <nl> select_all ( ) ; <nl> } break ; <nl> case MENU_UNDO : { <nl> - undo ( ) ; <nl> + if ( editable ) { <nl> + undo ( ) ; <nl> + } <nl> } break ; <nl> } <nl> } <nl>
Merge pull request from Noshyaar / pr - lineedit
godotengine/godot
8022c6858997df4190bcc360c3579a0d572ac8fc
2017-07-17T20:18:53Z
mmm a / dbtests / jsobjtests . cpp <nl> ppp b / dbtests / jsobjtests . cpp <nl> namespace JsobjTests { <nl> class LabelBase { <nl> public : <nl> void run ( ) { <nl> - cout < < expected ( ) . toString ( ) < < endl ; <nl> - cout < < actual ( ) . toString ( ) < < endl ; <nl> ASSERT ( ! expected ( ) . woCompare ( actual ( ) ) ) ; <nl> } <nl> protected : <nl>
Remove logging
mongodb/mongo
e66877f86a845c25379d28c36d26ad50459e00f8
2009-02-10T16:24:10Z
mmm a / modules / gdnative / nativescript / nativescript . cpp <nl> ppp b / modules / gdnative / nativescript / nativescript . cpp <nl> void NativeScriptLanguage : : init_library ( const Ref < GDNativeLibrary > & lib ) { <nl> <nl> void * proc_ptr ; <nl> <nl> - gdn - > get_symbol ( _init_call_name , proc_ptr ) ; <nl> - <nl> - ( ( void ( * ) ( godot_string * ) ) proc_ptr ) ( ( godot_string * ) & lib_path ) ; <nl> + Error err = gdn - > get_symbol ( _init_call_name , proc_ptr ) ; <nl> <nl> + if ( err ! = OK ) { <nl> + ERR_PRINT ( String ( " No " + _init_call_name + " in \ " " + lib_path + " \ " found " ) . utf8 ( ) . get_data ( ) ) ; <nl> + } else { <nl> + ( ( void ( * ) ( godot_string * ) ) proc_ptr ) ( ( godot_string * ) & lib_path ) ; <nl> + } <nl> } else { <nl> / / already initialized . Nice . <nl> } <nl> void NativeReloadNode : : _notification ( int p_what ) { <nl> / / here the library registers all the classes and stuff . <nl> <nl> void * proc_ptr ; <nl> - L - > get ( ) - > get_symbol ( " godot_nativescript_init " , proc_ptr ) ; <nl> - <nl> - ( ( void ( * ) ( void * ) ) proc_ptr ) ( ( void * ) & L - > key ( ) ) ; <nl> + Error err = L - > get ( ) - > get_symbol ( " godot_nativescript_init " , proc_ptr ) ; <nl> + if ( err ! = OK ) { <nl> + ERR_PRINT ( String ( " No godot_nativescript_init in \ " " + L - > key ( ) + " \ " found " ) . utf8 ( ) . get_data ( ) ) ; <nl> + } else { <nl> + ( ( void ( * ) ( void * ) ) proc_ptr ) ( ( void * ) & L - > key ( ) ) ; <nl> + } <nl> <nl> for ( Map < String , Set < NativeScript * > > : : Element * U = NSL - > library_script_users . front ( ) ; U ; U = U - > next ( ) ) { <nl> for ( Set < NativeScript * > : : Element * S = U - > get ( ) . front ( ) ; S ; S = S - > next ( ) ) { <nl>
Merge pull request from touilleMan / nativescript - fix - loading
godotengine/godot
edd881cd7068cdda4763229c2aa60c7bca0c2157
2017-10-16T08:10:32Z
mmm a / buildscripts / resmokeconfig / suites / sharded_multi_stmt_txn_jscore_passthrough . yml <nl> ppp b / buildscripts / resmokeconfig / suites / sharded_multi_stmt_txn_jscore_passthrough . yml <nl> selector : <nl> exclude_with_any_tags : <nl> # " Cowardly refusing to override read concern of command : . . . " <nl> - assumes_read_concern_unchanged <nl> + - assumes_unsharded_collection <nl> # " writeConcern is not allowed within a multi - statement transaction " <nl> - assumes_write_concern_unchanged <nl> - assumes_against_mongod_not_mongos <nl>
SERVER - 42167 Exclude ' assumes_unsharded_collection ' tag from ' sharded_multi_stmt_txn_jscore_passthrough ' suite
mongodb/mongo
ab1006e0831321c6e80693d7cf42b2d90fec34ee
2019-07-11T16:43:48Z
mmm a / cmake / Cuda . cmake <nl> ppp b / cmake / Cuda . cmake <nl> list ( APPEND Caffe2_LINKER_LIBS $ { CUDA_CUDART_LIBRARY } <nl> $ { CUDA_curand_LIBRARY } $ { CUDA_CUBLAS_LIBRARIES } ) <nl> <nl> # find libcuda . so and lbnvrtc . so <nl> + # For libcuda . so , we will find it under lib , lib64 , and then the <nl> + # stubs folder , in case we are building on a system that does not <nl> + # have cuda driver installed . <nl> find_library ( CUDA_CUDA_LIB cuda <nl> PATHS $ { CUDA_TOOLKIT_ROOT_DIR } <nl> - PATH_SUFFIXES lib lib64 ) <nl> + PATH_SUFFIXES lib lib64 lib / stubs lib64 / stubs ) <nl> find_library ( CUDA_NVRTC_LIB nvrtc <nl> PATHS $ { CUDA_TOOLKIT_ROOT_DIR } <nl> PATH_SUFFIXES lib lib64 ) <nl> find_library ( CUDA_NVRTC_LIB nvrtc <nl> caffe2_select_nvcc_arch_flags ( NVCC_FLAGS_EXTRA ) <nl> list ( APPEND CUDA_NVCC_FLAGS $ { NVCC_FLAGS_EXTRA } ) <nl> message ( STATUS " Added CUDA NVCC flags for : $ { NVCC_FLAGS_EXTRA_readable } " ) <nl> + <nl> if ( CUDA_CUDA_LIB ) <nl> message ( STATUS " Found libcuda : $ { CUDA_CUDA_LIB } " ) <nl> list ( APPEND Caffe2_LINKER_LIBS $ { CUDA_CUDA_LIB } ) <nl> + else ( ) <nl> + message ( FATAL_ERROR " Cannot find libcuda . so . Please file an issue on https : / / github . com / caffe2 / caffe2 with your build output . " ) <nl> endif ( ) <nl> if ( CUDA_NVRTC_LIB ) <nl> message ( STATUS " Found libnvrtc : $ { CUDA_NVRTC_LIB } " ) <nl> list ( APPEND Caffe2_LINKER_LIBS $ { CUDA_NVRTC_LIB } ) <nl> + else ( ) <nl> + message ( FATAL_ERROR " Cannot find libnvrtc . so . Please file an issue on https : / / github . com / caffe2 / caffe2 with your build output . " ) <nl> endif ( ) <nl> <nl> # disable some nvcc diagnostic that apears in boost , glog , glags , opencv , etc . <nl>
cmake cuda : add libcuda . so find paths , and produce error if it is not found .
pytorch/pytorch
6bf2e156d40f28925243be844b864334eeaf22e5
2017-01-04T07:14:07Z
mmm a / AUTHORS <nl> ppp b / AUTHORS <nl> Developers : <nl> Renaming CCScriptSupport : : executeAssert to CCScriptSupport : : handleAssert . <nl> Adding Native Client support for develop branch ( cocos2d - x 3 . 0 ) and some warnings fixes . <nl> Cleanup travis build scripts . <nl> + Refactoring emscripten building script . <nl> <nl> Peter Young ( young40 ) <nl> Implements setVisible ( ) for CCEditBox <nl>
Update AUTHORS [ ci skip ]
cocos2d/cocos2d-x
456f422c8a7ccdadf5f62bb789577ca294c2481c
2013-06-26T05:44:24Z
mmm a / dbms / src / Common / COW . h <nl> ppp b / dbms / src / Common / COW . h <nl> <nl> public : <nl> / / / Correctly use const qualifiers in your interface . <nl> <nl> - virtual ~ IColumn ( ) { } <nl> + virtual ~ Column ( ) { } <nl> } ; <nl> <nl> * It will provide ' create ' and ' mutate ' methods . <nl>
Fixed typo in comment .
ClickHouse/ClickHouse
e62999d8b91f9e3f92138d0f7a02ec268e625b34
2019-04-20T20:25:19Z
mmm a / atom / browser / resources / mac / Info . plist <nl> ppp b / atom / browser / resources / mac / Info . plist <nl> <nl> < key > CFBundleIconFile < / key > <nl> < string > electron . icns < / string > <nl> < key > CFBundleVersion < / key > <nl> - < string > 1 . 4 . 1 < / string > <nl> + < string > 1 . 4 . 2 < / string > <nl> < key > CFBundleShortVersionString < / key > <nl> - < string > 1 . 4 . 1 < / string > <nl> + < string > 1 . 4 . 2 < / string > <nl> < key > LSApplicationCategoryType < / key > <nl> < string > public . app - category . developer - tools < / string > <nl> < key > LSMinimumSystemVersion < / key > <nl> mmm a / atom / browser / resources / win / atom . rc <nl> ppp b / atom / browser / resources / win / atom . rc <nl> END <nl> / / <nl> <nl> VS_VERSION_INFO VERSIONINFO <nl> - FILEVERSION 1 , 4 , 1 , 0 <nl> - PRODUCTVERSION 1 , 4 , 1 , 0 <nl> + FILEVERSION 1 , 4 , 2 , 0 <nl> + PRODUCTVERSION 1 , 4 , 2 , 0 <nl> FILEFLAGSMASK 0x3fL <nl> # ifdef _DEBUG <nl> FILEFLAGS 0x1L <nl> BEGIN <nl> BEGIN <nl> VALUE " CompanyName " , " GitHub , Inc . " <nl> VALUE " FileDescription " , " Electron " <nl> - VALUE " FileVersion " , " 1 . 4 . 1 " <nl> + VALUE " FileVersion " , " 1 . 4 . 2 " <nl> VALUE " InternalName " , " electron . exe " <nl> VALUE " LegalCopyright " , " Copyright ( C ) 2015 GitHub , Inc . All rights reserved . " <nl> VALUE " OriginalFilename " , " electron . exe " <nl> VALUE " ProductName " , " Electron " <nl> - VALUE " ProductVersion " , " 1 . 4 . 1 " <nl> + VALUE " ProductVersion " , " 1 . 4 . 2 " <nl> VALUE " SquirrelAwareVersion " , " 1 " <nl> END <nl> END <nl> mmm a / atom / common / atom_version . h <nl> ppp b / atom / common / atom_version . h <nl> <nl> <nl> # define ATOM_MAJOR_VERSION 1 <nl> # define ATOM_MINOR_VERSION 4 <nl> - # define ATOM_PATCH_VERSION 1 <nl> + # define ATOM_PATCH_VERSION 2 <nl> <nl> # define ATOM_VERSION_IS_RELEASE 1 <nl> <nl> mmm a / electron . gyp <nl> ppp b / electron . gyp <nl> <nl> ' product_name % ' : ' Electron ' , <nl> ' company_name % ' : ' GitHub , Inc ' , <nl> ' company_abbr % ' : ' github ' , <nl> - ' version % ' : ' 1 . 4 . 1 ' , <nl> + ' version % ' : ' 1 . 4 . 2 ' , <nl> ' js2c_input_dir ' : ' < ( SHARED_INTERMEDIATE_DIR ) / js2c ' , <nl> } , <nl> ' includes ' : [ <nl> mmm a / package . json <nl> ppp b / package . json <nl> <nl> { <nl> " name " : " electron " , <nl> - " version " : " 1 . 4 . 1 " , <nl> + " version " : " 1 . 4 . 2 " , <nl> " devDependencies " : { <nl> " asar " : " ^ 0 . 11 . 0 " , <nl> " browserify " : " ^ 13 . 1 . 0 " , <nl>
Bump v1 . 4 . 2
electron/electron
5a56de501a0b8e6d69c7027ae174c7905f341a8a
2016-09-30T09:39:27Z
mmm a / src / butil / logging . h <nl> ppp b / src / butil / logging . h <nl> const LogSeverity BLOG_0 = BLOG_ERROR ; <nl> / / LOG_IS_ON ( DFATAL ) always holds in debug mode . In particular , CHECK ( ) s will <nl> / / always fire if they fail . <nl> # define LOG_IS_ON ( severity ) \ <nl> - ( logging : : BLOG_ # # severity > = : : logging : : GetMinLogLevel ( ) ) <nl> + ( : : logging : : BLOG_ # # severity > = : : logging : : GetMinLogLevel ( ) ) <nl> <nl> # if defined ( __GNUC__ ) <nl> / / We emit an anonymous static int * variable at every VLOG_IS_ON ( n ) site . <nl>
fix bug : logging conflict
apache/incubator-brpc
ee1fee9ae87d0fe69a83f741f9993db73ce21aa7
2019-08-24T09:54:49Z
mmm a / . travis . yml <nl> ppp b / . travis . yml <nl> script : <nl> - cp Marlin / Configuration . h Marlin / Configuration . h . backup <nl> - cp Marlin / Configuration_adv . h Marlin / Configuration_adv . h . backup <nl> # change extruder numbers from 1 to 2 <nl> - - sed - i ' s / # define EXTRUDERS 1 / # define EXTRUDERS 2 / g ' Marlin / Configuration . h <nl> - - rm - rf . build / <nl> - - ino build - m mega2560 <nl> - # change extruder numbers from 2 to 3 <nl> - - sed - i ' s / # define EXTRUDERS 2 / # define EXTRUDERS 3 / g ' Marlin / Configuration . h <nl> - - rm - rf . build / <nl> - - ino build - m mega2560 <nl> + # commented out for the moment fails build but compiles fine in Arduino <nl> + # - sed - i ' s / # define EXTRUDERS 1 / # define EXTRUDERS 2 / g ' Marlin / Configuration . h <nl> + # - rm - rf . build / <nl> + # - ino build - m mega2560 <nl> + # change extruder numbers from 2 to 3 , needs to be a board with 3 extruders defined in pins . h <nl> + # - sed - i ' s / # define MOTHERBOARD BOARD_ULTIMAKER / # define MOTHERBOARD BOARD_AZTEEG_X3_PRO / g ' Marlin / Configuration . h <nl> + # - sed - i ' s / # define EXTRUDERS 2 / # define EXTRUDERS 3 / g ' Marlin / Configuration . h <nl> + # - rm - rf . build / <nl> + # - ino build - m mega2560 <nl> # enable PIDTEMPBED <nl> - cp Marlin / Configuration . h . backup Marlin / Configuration . h <nl> - sed - i ' s / \ / \ / # define PIDTEMPBED / # define PIDTEMPBED / g ' Marlin / Configuration . h <nl> script : <nl> - rm - rf . build / <nl> - ino build - m mega2560 <nl> # MAKRPANEL <nl> - - cp Marlin / Configuration . h . backup Marlin / Configuration . h <nl> - - sed - i ' s / \ / \ / # define MAKRPANEL / # define MAKRPANEL / g ' Marlin / Configuration . h <nl> - - rm - rf . build / <nl> - - ino build - m mega2560 <nl> + # Needs to use melzi and sanguino hardware <nl> + # - cp Marlin / Configuration . h . backup Marlin / Configuration . h <nl> + # - sed - i ' s / \ / \ / # define MAKRPANEL / # define MAKRPANEL / g ' Marlin / Configuration . h <nl> + # - rm - rf . build / <nl> + # - ino build - m mega2560 <nl> # REPRAP_DISCOUNT_SMART_CONTROLLER <nl> - cp Marlin / Configuration . h . backup Marlin / Configuration . h <nl> - sed - i ' s / \ / \ / # define REPRAP_DISCOUNT_SMART_CONTROLLER / # define REPRAP_DISCOUNT_SMART_CONTROLLER / g ' Marlin / Configuration . h <nl> script : <nl> - sed - i ' s / \ / \ / # define REPRAP_DISCOUNT_FULL_GRAPHIC_SMART_CONTROLLER / # define REPRAP_DISCOUNT_FULL_GRAPHIC_SMART_CONTROLLER / g ' Marlin / Configuration . h <nl> - rm - rf . build / <nl> - ino build - m mega2560 <nl> - # REPRAPWORLD_KEYPAD <nl> - - cp Marlin / Configuration . h . backup Marlin / Configuration . h <nl> - - sed - i ' s / \ / \ / # define ULTRA_LCD / # define ULTRA_LCD / g ' Marlin / Configuration . h <nl> - - sed - i ' s / \ / \ / # define REPRAPWORLD_KEYPAD / # define REPRAPWORLD_KEYPAD / g ' Marlin / Configuration . h <nl> - - sed - i ' s / \ / \ / # define REPRAPWORLD_KEYPAD_MOVE_STEP 10 . 0 / # define REPRAPWORLD_KEYPAD_MOVE_STEP 10 . 0 / g ' Marlin / Configuration . h <nl> - - rm - rf . build / <nl> - - ino build - m mega2560 <nl> + # REPRAPWORLD_KEYPAD <nl> + # Cant find configuration details to get it to compile <nl> + # - cp Marlin / Configuration . h . backup Marlin / Configuration . h <nl> + # - sed - i ' s / \ / \ / # define ULTRA_LCD / # define ULTRA_LCD / g ' Marlin / Configuration . h <nl> + # - sed - i ' s / \ / \ / # define REPRAPWORLD_KEYPAD / # define REPRAPWORLD_KEYPAD / g ' Marlin / Configuration . h <nl> + # - sed - i ' s / \ / \ / # define REPRAPWORLD_KEYPAD_MOVE_STEP 10 . 0 / # define REPRAPWORLD_KEYPAD_MOVE_STEP 10 . 0 / g ' Marlin / Configuration . h <nl> + # - rm - rf . build / <nl> + # - ino build - m mega2560 <nl> # RA_CONTROL_PANEL <nl> - cp Marlin / Configuration . h . backup Marlin / Configuration . h <nl> - sed - i ' s / \ / \ / # define RA_CONTROL_PANEL / # define RA_CONTROL_PANEL / g ' Marlin / Configuration . h <nl> script : <nl> - ino build - m mega2560 <nl> # # # I2C PANELS # # # <nl> # LCD_I2C_SAINSMART_YWROBOT <nl> - - cp Marlin / Configuration . h . backup Marlin / Configuration . h <nl> - - sed - i ' s / \ / \ / # define LCD_I2C_SAINSMART_YWROBOT / # define LCD_I2C_SAINSMART_YWROBOT / g ' Marlin / Configuration . h <nl> - - rm - rf . build / <nl> - - ino build - m mega2560 <nl> + # Failing at the moment needs different library <nl> + # - cp Marlin / Configuration . h . backup Marlin / Configuration . h <nl> + # - sed - i ' s / \ / \ / # define LCD_I2C_SAINSMART_YWROBOT / # define LCD_I2C_SAINSMART_YWROBOT / g ' Marlin / Configuration . h <nl> + # - rm - rf . build / <nl> + # - ino build - m mega2560 <nl> # LCD_I2C_PANELOLU2 <nl> - cp Marlin / Configuration . h . backup Marlin / Configuration . h <nl> - sed - i ' s / \ / \ / # define LCD_I2C_PANELOLU2 / # define LCD_I2C_PANELOLU2 / g ' Marlin / Configuration . h <nl> script : <nl> - sed - i ' s / \ / \ / # define LCD_I2C_VIKI / # define LCD_I2C_VIKI / g ' Marlin / Configuration . h <nl> - rm - rf . build / <nl> - ino build - m mega2560 <nl> - # SAV_3DLCD <nl> - - cp Marlin / Configuration . h . backup Marlin / Configuration . h <nl> - - sed - i ' s / \ / \ / # define SAV_3DLCD / # define SAV_3DLCD / g ' Marlin / Configuration . h <nl> - - rm - rf . build / <nl> - - ino build - m mega2560 <nl> # Enable filament sensor <nl> - cp Marlin / Configuration . h . backup Marlin / Configuration . h <nl> - sed - i ' s / \ / \ / # define FILAMENT_SENSOR / # define FILAMENT_SENSOR / g ' Marlin / Configuration . h <nl>
Commented out configs that are failing with no easy fixes for the moment
MarlinFirmware/Marlin
9d57c376db87f14ebd5b3f074b0af42d09664f11
2015-01-03T22:46:15Z
mmm a / folly / futures / Future . h <nl> ppp b / folly / futures / Future . h <nl> class FutureBase { <nl> } <nl> <nl> / / Variant : returns a value <nl> - / / e . g . f . then ( [ ] ( Try < T > t ) { return t . value ( ) ; } ) ; <nl> + / / e . g . f . thenTry ( [ ] ( Try < T > t ) { return t . value ( ) ; } ) ; <nl> template < typename F , typename R , bool isTry , typename . . . Args > <nl> typename std : : enable_if < ! R : : ReturnsFuture : : value , typename R : : Return > : : type <nl> thenImplementation ( F & & func , futures : : detail : : argResult < isTry , F , Args . . . > ) ; <nl> <nl> / / Variant : returns a Future <nl> - / / e . g . f . then ( [ ] ( Try < T > t ) { return makeFuture < T > ( t ) ; } ) ; <nl> + / / e . g . f . thenTry ( [ ] ( Try < T > t ) { return makeFuture < T > ( t ) ; } ) ; <nl> template < typename F , typename R , bool isTry , typename . . . Args > <nl> typename std : : enable_if < R : : ReturnsFuture : : value , typename R : : Return > : : type <nl> thenImplementation ( F & & func , futures : : detail : : argResult < isTry , F , Args . . . > ) ; <nl> DeferredExecutor * stealDeferredExecutor ( SemiFuture < T > & future ) ; <nl> / / / - The consumer - side should generally start with a SemiFuture , not a Future . <nl> / / / - Example , when a library creates and returns a future , it should usually <nl> / / / return a ` SemiFuture ` , not a Future . <nl> - / / / - Reason : so the thread policy for continuations ( ` . then ( ) ` , etc . ) can be <nl> + / / / - Reason : so the thread policy for continuations ( ` . thenValue ` , etc . ) can be <nl> / / / specified by the library ' s caller ( using ` . via ( ) ` ) . <nl> / / / - A SemiFuture is converted to a Future using ` . via ( ) ` . <nl> / / / - Use ` makePromiseContract ( ) ` when creating both a Promise and an associated <nl> DeferredExecutor * stealDeferredExecutor ( SemiFuture < T > & future ) ; <nl> / / / <nl> / / / When practical , prefer SemiFuture / Future ' s nonblocking style / pattern : <nl> / / / <nl> - / / / - the nonblocking style uses continuations , e . g . , ` . then ( ) ` , etc . ; the <nl> + / / / - the nonblocking style uses continuations , e . g . , ` . thenValue ` , etc . ; the <nl> / / / continuations are deferred until the result is available . <nl> / / / - the blocking style blocks until complete , e . g . , ` . wait ( ) ` , ` . get ( ) ` , etc . <nl> / / / - the two styles cannot be mixed within the same future ; use one or the <nl> class SemiFuture : private futures : : detail : : FutureBase < T > { <nl> / / / Preconditions : <nl> / / / <nl> / / / - ` valid ( ) = = true ` ( else throws FutureInvalid ) <nl> - / / / - must not have a continuation , e . g . , via ` . then ( ) ` or similar <nl> + / / / - must not have a continuation , e . g . , via ` . thenValue ( ) ` or similar <nl> / / / <nl> / / / Postconditions : <nl> / / / <nl> std : : pair < Promise < T > , SemiFuture < T > > makePromiseContract ( ) { <nl> / / / - The consumer - side should generally start with a SemiFuture , not a Future . <nl> / / / - Example , when a library creates and returns a future , it should usually <nl> / / / return a ` SemiFuture ` , not a Future . <nl> - / / / - Reason : so the thread policy for continuations ( ` . then ( ) ` , etc . ) can be <nl> + / / / - Reason : so the thread policy for continuations ( ` . thenValue ` , etc . ) can be <nl> / / / specified by the library ' s caller ( using ` . via ( ) ` ) . <nl> / / / - A SemiFuture is converted to a Future using ` . via ( ) ` . <nl> / / / - Use ` makePromiseContract ( ) ` when creating both a Promise and an associated <nl> std : : pair < Promise < T > , SemiFuture < T > > makePromiseContract ( ) { <nl> / / / <nl> / / / When practical , prefer SemiFuture / Future ' s nonblocking style / pattern : <nl> / / / <nl> - / / / - the nonblocking style uses continuations , e . g . , ` . then ( ) ` , etc . ; the <nl> + / / / - the nonblocking style uses continuations , e . g . , ` . thenValue ` , etc . ; the <nl> / / / continuations are deferred until the result is available . <nl> / / / - the blocking style blocks until complete , e . g . , ` . wait ( ) ` , ` . get ( ) ` , etc . <nl> / / / - the two styles cannot be mixed within the same future ; use one or the <nl> class Future : private futures : : detail : : FutureBase < T > { <nl> / / / <nl> / / / A Future for the return type of func is returned . <nl> / / / <nl> - / / / Future < string > f2 = f1 . then ( [ ] ( Try < T > & & ) { return string ( " foo " ) ; } ) ; <nl> + / / / Future < string > f2 = f1 . thenTry ( [ ] ( Try < T > & & ) { return string ( " foo " ) ; } ) ; <nl> / / / <nl> / / / Preconditions : <nl> / / / <nl> class Future : private futures : : detail : : FutureBase < T > { <nl> / / / struct Worker { R doWork ( Try < T > ) ; } <nl> / / / <nl> / / / Worker * w ; <nl> - / / / Future < R > f2 = f1 . then ( & Worker : : doWork , w ) ; <nl> + / / / Future < R > f2 = f1 . thenTry ( & Worker : : doWork , w ) ; <nl> / / / <nl> / / / This is just sugar for <nl> / / / <nl> - / / / f1 . then ( std : : bind ( & Worker : : doWork , w ) ) ; <nl> + / / / f1 . thenTry ( std : : bind ( & Worker : : doWork , w ) ) ; <nl> / / / <nl> / / / Preconditions : <nl> / / / <nl> class Future : private futures : : detail : : FutureBase < T > { <nl> <nl> / / clang - format off <nl> [ [ deprecated ( <nl> - " must be rvalue - qualified , e . g . , std : : move ( future ) . then ( ) " ) ] ] <nl> + " must be rvalue - qualified , e . g . , std : : move ( future ) . thenValue ( ) " ) ] ] <nl> Future < Unit > then ( ) & = delete ; <nl> / / clang - format on <nl> <nl> class Future : private futures : : detail : : FutureBase < T > { <nl> / / / throw std : : runtime_error ( " oh no ! " ) ; <nl> / / / return 42 ; <nl> / / / } ) <nl> - / / / . onError ( [ ] ( std : : runtime_error & e ) { <nl> + / / / . thenError < std : : runtime_error > ( [ ] ( std : : runtime_error & e ) { <nl> / / / LOG ( INFO ) < < " std : : runtime_error : " < < e . what ( ) ; <nl> / / / return - 1 ; / / or makeFuture < int > ( - 1 ) <nl> / / / } ) ; <nl>
Modify comments in Future . h to use thenValue , thenTry and thenError rather than then and onError
facebook/folly
13deb4573016a85a636d4dbab90aac5a7651bb52
2018-09-15T19:57:32Z
mmm a / tensorflow / contrib / losses / python / losses / loss_ops . py <nl> ppp b / tensorflow / contrib / losses / python / losses / loss_ops . py <nl> <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import nn <nl> from tensorflow . python . ops import nn_ops <nl> - <nl> + from tensorflow . python . util . deprecation import deprecated <nl> <nl> __all__ = [ " absolute_difference " , <nl> " add_loss " , <nl> def _safe_mean ( losses , num_present ) : <nl> return _safe_div ( total_loss , num_present ) <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . compute_weighted_loss instead . " ) <nl> @ deprecated_args ( <nl> " 2016 - 11 - 25 " , " ` weight ` is being deprecated , use ` weights ` . " , " weight " ) <nl> def compute_weighted_loss ( <nl> def _num_present ( losses , weights , per_batch = False ) : <nl> return num_per_batch if per_batch else math_ops . reduce_sum ( num_per_batch ) <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . add_loss instead . " ) <nl> @ add_arg_scope <nl> def add_loss ( loss , loss_collection = ops . GraphKeys . LOSSES ) : <nl> " " " Adds a externally defined loss to the collection of losses . <nl> def add_loss ( loss , loss_collection = ops . GraphKeys . LOSSES ) : <nl> ops . add_to_collection ( loss_collection , loss ) <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . get_losses instead . " ) <nl> def get_losses ( scope = None , loss_collection = ops . GraphKeys . LOSSES ) : <nl> " " " Gets the list of losses from the loss_collection . <nl> <nl> def get_losses ( scope = None , loss_collection = ops . GraphKeys . LOSSES ) : <nl> return ops . get_collection ( loss_collection , scope ) <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . get_regularization_losses instead . " ) <nl> def get_regularization_losses ( scope = None ) : <nl> " " " Gets the regularization losses . <nl> <nl> def get_regularization_losses ( scope = None ) : <nl> return ops . get_collection ( ops . GraphKeys . REGULARIZATION_LOSSES , scope ) <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . get_total_loss instead . " ) <nl> def get_total_loss ( add_regularization_losses = True , name = " total_loss " ) : <nl> " " " Returns a tensor whose value represents the total loss . <nl> <nl> def get_total_loss ( add_regularization_losses = True , name = " total_loss " ) : <nl> return math_ops . add_n ( losses , name = name ) <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . absolute_difference instead . " ) <nl> @ deprecated_args ( <nl> " 2016 - 11 - 25 " , <nl> " ` targets ` is being deprecated , use ` labels ` . " <nl> def absolute_difference ( <nl> return compute_weighted_loss ( losses , weights , scope = scope ) <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . sigmoid_cross_entropy instead . " ) <nl> @ deprecated_args ( <nl> " 2016 - 11 - 25 " , " ` weight ` is being deprecated , use ` weights ` " , " weight " ) <nl> def sigmoid_cross_entropy ( <nl> def sigmoid_cross_entropy ( <nl> return compute_weighted_loss ( losses , weights , scope = scope ) <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . softmax_cross_entropy instead . " ) <nl> @ deprecated_args ( <nl> " 2016 - 11 - 25 " , " ` weight ` is being deprecated , use ` weights ` " , " weight " ) <nl> def softmax_cross_entropy ( <nl> def softmax_cross_entropy ( <nl> return compute_weighted_loss ( losses , weights , scope = scope ) <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . sparse_softmax_cross_entropy instead . " ) <nl> @ deprecated_args ( <nl> " 2016 - 11 - 25 " , " ` weight ` is being deprecated , use ` weights ` " , " weight " ) <nl> def sparse_softmax_cross_entropy ( <nl> def sparse_softmax_cross_entropy ( <nl> return compute_weighted_loss ( losses , weights , scope = scope ) <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . log_loss instead . " ) <nl> @ deprecated_args ( <nl> " 2016 - 11 - 25 " , <nl> " ` targets ` is being deprecated , use ` labels ` . " <nl> def log_loss ( <nl> return compute_weighted_loss ( losses , weights , scope = scope ) <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . hinge_loss instead . " ) <nl> @ deprecated_args ( <nl> " 2016 - 11 - 25 " , " ` target ` is being deprecated , use ` labels ` . " , " target " ) <nl> def hinge_loss ( logits , labels = None , scope = None , target = None ) : <nl> def hinge_loss ( logits , labels = None , scope = None , target = None ) : <nl> return nn_ops . relu ( math_ops . sub ( all_ones , math_ops . mul ( labels , logits ) ) ) <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . mean_squared_error instead . " ) <nl> @ deprecated_args ( <nl> " 2016 - 11 - 25 " , <nl> " ` targets ` is being deprecated , use ` labels ` . " <nl> def mean_squared_error ( <nl> return compute_weighted_loss ( losses , weights , scope = scope ) <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . mean_pairwise_squared_error instead . " ) <nl> @ deprecated_args ( <nl> " 2016 - 11 - 25 " , <nl> " ` targets ` is being deprecated , use ` labels ` . " <nl> def mean_pairwise_squared_error ( <nl> return mean_loss <nl> <nl> <nl> + @ deprecated ( " 2016 - 12 - 30 " , " Use tf . losses . cosine_distance instead . " ) <nl> @ deprecated_args ( <nl> " 2016 - 11 - 25 " , <nl> " ` targets ` is being deprecated , use ` labels ` . " <nl> mmm a / tensorflow / python / BUILD <nl> ppp b / tensorflow / python / BUILD <nl> py_library ( <nl> " : training " , <nl> " : ops " , <nl> " : test_ops " , <nl> + " / / tensorflow / python / ops / losses " , <nl> " / / tensorflow / python / debug : debug_py " , <nl> ] + if_not_windows ( [ <nl> " / / tensorflow / contrib : contrib_py " , <nl> mmm a / tensorflow / python / __init__ . py <nl> ppp b / tensorflow / python / __init__ . py <nl> <nl> from tensorflow . python . ops import resources <nl> from tensorflow . python . ops import sdca_ops as sdca <nl> from tensorflow . python . ops import image_ops as image <nl> + from tensorflow . python . ops import losses <nl> from tensorflow . python . ops import sets <nl> from tensorflow . python . user_ops import user_ops <nl> from tensorflow . python . util import compat <nl> <nl> ' graph_util ' , <nl> ' image ' , <nl> ' logging ' , <nl> + ' losses ' , <nl> ' newaxis ' , <nl> ' nn ' , <nl> ' python_io ' , <nl> <nl> remove_undocumented ( __name__ , _allowed_symbols , <nl> [ framework_lib , array_ops , client_lib , check_ops , <nl> compat , constant_op , control_flow_ops , functional_ops , <nl> - histogram_ops , io_ops , math_ops , nn , resource_loader , <nl> - resources , sets , script_ops , session_ops , sparse_ops , <nl> - state_ops , string_ops , summary , tensor_array_ops , train , <nl> - layers ] ) <nl> + histogram_ops , io_ops , losses , math_ops , nn , <nl> + resource_loader , resources , sets , script_ops , session_ops , <nl> + sparse_ops , state_ops , string_ops , summary , <nl> + tensor_array_ops , train , layers ] ) <nl> <nl> # Special dunders that we choose to export : <nl> _exported_dunders = set ( [ <nl> mmm a / tensorflow / python / kernel_tests / BUILD <nl> ppp b / tensorflow / python / kernel_tests / BUILD <nl> tf_py_test ( <nl> additional_deps = [ " / / tensorflow : tensorflow_py " ] , <nl> ) <nl> <nl> + tf_py_test ( <nl> + name = " losses_test " , <nl> + size = " small " , <nl> + srcs = [ " losses_test . py " ] , <nl> + additional_deps = [ " / / tensorflow : tensorflow_py " ] , <nl> + ) <nl> + <nl> tf_py_test ( <nl> name = " matrix_inverse_op_test " , <nl> size = " small " , <nl> new file mode 100644 <nl> index 0000000000000 . . 2393124ba3155 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / kernel_tests / losses_test . py <nl> <nl> + # Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + " " " Tests for losses . " " " <nl> + # pylint : disable = unused - import , g - bad - import - order <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + # pylint : enable = unused - import <nl> + <nl> + import numpy as np <nl> + import tensorflow as tf <nl> + <nl> + <nl> + class AbsoluteDifferenceLossTest ( tf . test . TestCase ) : <nl> + <nl> + def setUp ( self ) : <nl> + self . _predictions = tf . constant ( [ 4 , 8 , 12 , 8 , 1 , 3 ] , shape = ( 2 , 3 ) ) <nl> + self . _labels = tf . constant ( [ 1 , 9 , 2 , - 5 , - 2 , 6 ] , shape = ( 2 , 3 ) ) <nl> + <nl> + def testValueErrorThrownWhenWeightIsNone ( self ) : <nl> + with self . test_session ( ) : <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . absolute_difference ( <nl> + self . _predictions , self . _predictions , weights = None ) <nl> + <nl> + def testAllCorrectNoLossWeight ( self ) : <nl> + loss = tf . losses . absolute_difference ( <nl> + self . _predictions , self . _predictions ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 0 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLoss ( self ) : <nl> + loss = tf . losses . absolute_difference ( <nl> + self . _labels , self . _predictions ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 5 . 5 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithPythonScalarWeight ( self ) : <nl> + weights = 2 . 3 <nl> + loss = tf . losses . absolute_difference ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 5 . 5 * weights , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithScalarTensorWeight ( self ) : <nl> + weights = 2 . 3 <nl> + loss = tf . losses . absolute_difference ( <nl> + self . _labels , self . _predictions , tf . constant ( weights ) ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 5 . 5 * weights , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithOneDimBatchSpecificWeights ( self ) : <nl> + weights = tf . constant ( [ 1 . 2 , 0 . 0 ] , shape = [ 2 , ] ) <nl> + loss = tf . losses . absolute_difference ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 5 . 6 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithTwoDimBatchSpecificWeights ( self ) : <nl> + weights = tf . constant ( [ 1 . 2 , 0 . 0 ] , shape = [ 2 , 1 ] ) <nl> + loss = tf . losses . absolute_difference ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 5 . 6 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithSampleSpecificWeights ( self ) : <nl> + weights = tf . constant ( [ 3 , 6 , 5 , 0 , 4 , 2 ] , shape = [ 2 , 3 ] ) <nl> + loss = tf . losses . absolute_difference ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 16 . 6 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithSampleSpecificWeightsMostZero ( self ) : <nl> + weights = tf . constant ( [ 0 , 0 , 0 , 0 , 0 , 2 ] , shape = [ 2 , 3 ] ) <nl> + loss = tf . losses . absolute_difference ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 6 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testLossWithSampleSpecificWeightsAllZero ( self ) : <nl> + weights = tf . zeros ( ( 2 , 3 ) ) <nl> + loss = tf . losses . absolute_difference ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 0 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + <nl> + class SoftmaxCrossEntropyLossTest ( tf . test . TestCase ) : <nl> + <nl> + def testNoneWeightRaisesValueError ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 1 , 0 , 0 ] , <nl> + [ 0 , 1 , 0 ] , <nl> + [ 0 , 0 , 1 ] ] ) <nl> + with self . test_session ( ) : <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . softmax_cross_entropy ( labels , logits , weights = None ) <nl> + <nl> + def testAllCorrect ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 1 , 0 , 0 ] , <nl> + [ 0 , 1 , 0 ] , <nl> + [ 0 , 0 , 1 ] ] ) <nl> + loss = tf . losses . softmax_cross_entropy ( labels , logits ) <nl> + self . assertEquals ( ' softmax_cross_entropy_loss / value ' , loss . op . name ) <nl> + self . assertAlmostEqual ( loss . eval ( ) , 0 . 0 , 3 ) <nl> + <nl> + def testAllWrong ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 , 0 , 1 ] , <nl> + [ 1 , 0 , 0 ] , <nl> + [ 0 , 1 , 0 ] ] ) <nl> + <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . softmax_cross_entropy ( labels , logits ) <nl> + self . assertEquals ( loss . op . name , ' softmax_cross_entropy_loss / value ' ) <nl> + self . assertAlmostEqual ( loss . eval ( ) , 10 . 0 , 3 ) <nl> + <nl> + def testNonZeroLossWithPythonScalarWeight ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 , 0 , 1 ] , <nl> + [ 1 , 0 , 0 ] , <nl> + [ 0 , 1 , 0 ] ] ) <nl> + weights = 2 . 3 <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . softmax_cross_entropy ( labels , logits , weights ) <nl> + self . assertAlmostEqual ( weights * 10 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithScalarTensorWeight ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 , 0 , 1 ] , <nl> + [ 1 , 0 , 0 ] , <nl> + [ 0 , 1 , 0 ] ] ) <nl> + weights = 2 . 3 <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . softmax_cross_entropy ( <nl> + labels , logits , tf . constant ( weights ) ) <nl> + self . assertAlmostEqual ( weights * 10 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithOneDimBatchSpecificWeights ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 , 0 , 1 ] , <nl> + [ 1 , 0 , 0 ] , <nl> + [ 0 , 1 , 0 ] ] ) <nl> + weights = tf . constant ( [ 1 . 2 , 3 . 4 , 5 . 6 ] , shape = [ 3 ] ) <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . softmax_cross_entropy ( labels , logits , weights ) <nl> + self . assertAlmostEqual ( ( 1 . 2 + 3 . 4 + 5 . 6 ) * 10 . 0 / 3 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testAllWrongAllWeightsMissing ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 , 0 , 1 ] , <nl> + [ 1 , 0 , 0 ] , <nl> + [ 0 , 1 , 0 ] ] ) <nl> + weights = tf . constant ( [ 0 , 0 , 0 ] , shape = [ 3 ] ) <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . softmax_cross_entropy ( labels , logits , weights ) <nl> + self . assertAlmostEqual ( 0 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testSomeWeightsMissing ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 , 0 , 1 ] , <nl> + [ 1 , 0 , 0 ] , <nl> + [ 0 , 1 , 0 ] ] ) <nl> + weights = tf . constant ( [ 1 . 2 , 0 , 0 ] , shape = [ 3 ] ) <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . softmax_cross_entropy ( labels , logits , weights ) <nl> + self . assertAlmostEqual ( 12 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testSoftmaxWithMeasurementSpecificWeightsRaisesException ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 100 . 0 , - 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , - 100 . 0 , 100 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 1 , 0 , 0 ] , <nl> + [ 0 , 1 , 0 ] , <nl> + [ 0 , 0 , 1 ] ] ) <nl> + weights = tf . constant ( [ [ 3 , 4 , 5 ] , <nl> + [ 2 , 6 , 0 ] , <nl> + [ 8 , 0 , 1 ] ] ) <nl> + <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . softmax_cross_entropy ( <nl> + labels , logits , weights = weights ) . eval ( ) <nl> + <nl> + def testSoftmaxLabelSmoothing ( self ) : <nl> + with self . test_session ( ) : <nl> + # Softmax Cross Entropy Loss is : <nl> + # - \ sum_i p_i \ log q_i <nl> + # where for a softmax activation <nl> + # \ log q_i = x_i - \ log \ sum_j \ exp x_j <nl> + # = x_i - x_max - \ log \ sum_j \ exp ( x_j - x_max ) <nl> + # For our activations , [ 100 , - 100 , - 100 ] the log partion function becomes <nl> + # \ log ( exp ( 0 ) + exp ( - 200 ) + exp ( - 200 ) ) = 0 <nl> + # so our log softmaxes become : [ 0 , - 200 , - 200 ] <nl> + # so our cross entropy loss is : <nl> + # - ( 1 - L + L / n ) * 0 + 400 * L / n = 400 L / n <nl> + logits = tf . constant ( [ [ 100 . 0 , - 100 . 0 , - 100 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 1 , 0 , 0 ] ] ) <nl> + label_smoothing = 0 . 1 <nl> + loss = tf . losses . softmax_cross_entropy ( <nl> + labels , logits , label_smoothing = label_smoothing ) <nl> + self . assertEquals ( loss . op . name , ' softmax_cross_entropy_loss / value ' ) <nl> + expected_value = 400 . 0 * label_smoothing / 3 . 0 <nl> + self . assertAlmostEqual ( loss . eval ( ) , expected_value , 3 ) <nl> + <nl> + <nl> + class SparseSoftmaxCrossEntropyLossTest ( tf . test . TestCase ) : <nl> + <nl> + def testNoneWeightRaisesValueError ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 ] , [ 1 ] , [ 2 ] ] ) <nl> + with self . test_session ( ) : <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . sparse_softmax_cross_entropy ( <nl> + labels , logits , weights = None ) <nl> + <nl> + def testAllCorrectInt32Labels ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 ] , [ 1 ] , [ 2 ] ] , dtype = tf . int32 ) <nl> + loss = tf . losses . sparse_softmax_cross_entropy ( labels , logits ) <nl> + self . assertEquals ( loss . op . name , ' sparse_softmax_cross_entropy_loss / value ' ) <nl> + self . assertAlmostEqual ( loss . eval ( ) , 0 . 0 , 3 ) <nl> + <nl> + def testAllCorrectInt64Labels ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 ] , [ 1 ] , [ 2 ] ] , dtype = tf . int64 ) <nl> + loss = tf . losses . sparse_softmax_cross_entropy ( labels , logits ) <nl> + self . assertEquals ( loss . op . name , ' sparse_softmax_cross_entropy_loss / value ' ) <nl> + self . assertAlmostEqual ( loss . eval ( ) , 0 . 0 , 3 ) <nl> + <nl> + def testAllCorrectNonColumnLabels ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ 0 , 1 , 2 ] ) <nl> + loss = tf . losses . sparse_softmax_cross_entropy ( labels , logits ) <nl> + self . assertEquals ( loss . op . name , ' sparse_softmax_cross_entropy_loss / value ' ) <nl> + self . assertAlmostEqual ( loss . eval ( ) , 0 . 0 , 3 ) <nl> + <nl> + def testAllWrongInt32Labels ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 2 ] , [ 0 ] , [ 1 ] ] , dtype = tf . int32 ) <nl> + <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . sparse_softmax_cross_entropy ( labels , logits ) <nl> + self . assertEquals ( loss . op . name , ' sparse_softmax_cross_entropy_loss / value ' ) <nl> + self . assertAlmostEqual ( loss . eval ( ) , 10 . 0 , 3 ) <nl> + <nl> + def testAllWrongInt64Labels ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 2 ] , [ 0 ] , [ 1 ] ] , dtype = tf . int64 ) <nl> + <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . sparse_softmax_cross_entropy ( labels , logits ) <nl> + self . assertEquals ( loss . op . name , ' sparse_softmax_cross_entropy_loss / value ' ) <nl> + self . assertAlmostEqual ( loss . eval ( ) , 10 . 0 , 3 ) <nl> + <nl> + def testAllWrongNonColumnLabels ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ 2 , 0 , 1 ] ) <nl> + <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . sparse_softmax_cross_entropy ( labels , logits ) <nl> + self . assertEquals ( loss . op . name , ' sparse_softmax_cross_entropy_loss / value ' ) <nl> + self . assertAlmostEqual ( loss . eval ( ) , 10 . 0 , 3 ) <nl> + <nl> + def testNonZeroLossWithPythonScalarWeight ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 2 ] , [ 0 ] , [ 1 ] ] ) <nl> + weights = 2 . 3 <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . sparse_softmax_cross_entropy ( <nl> + labels , logits , weights ) <nl> + self . assertAlmostEqual ( weights * 10 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithScalarTensorWeight ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 2 ] , [ 0 ] , [ 1 ] ] ) <nl> + weights = 2 . 3 <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . sparse_softmax_cross_entropy ( <nl> + labels , logits , tf . constant ( weights ) ) <nl> + self . assertAlmostEqual ( weights * 10 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithOneDimBatchSpecificWeights ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 2 ] , [ 0 ] , [ 1 ] ] ) <nl> + weights = tf . constant ( [ 1 . 2 , 3 . 4 , 5 . 6 ] , shape = [ 3 ] ) <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . sparse_softmax_cross_entropy ( <nl> + labels , logits , weights ) <nl> + self . assertAlmostEqual ( ( 1 . 2 + 3 . 4 + 5 . 6 ) * 10 . 0 / 3 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithColumnWeights ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 2 ] , [ 0 ] , [ 1 ] ] ) <nl> + weights = tf . constant ( [ [ 1 . 2 ] , [ 3 . 4 ] , [ 5 . 6 ] ] ) <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . sparse_softmax_cross_entropy ( <nl> + labels , logits , weights ) <nl> + self . assertAlmostEqual ( ( 1 . 2 + 3 . 4 + 5 . 6 ) * 10 . 0 / 3 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testAllWrongAllWeightsMissing ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 2 ] , [ 0 ] , [ 1 ] ] ) <nl> + weights = tf . constant ( [ 0 , 0 , 0 ] , shape = [ 3 ] ) <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . sparse_softmax_cross_entropy ( <nl> + labels , logits , weights ) <nl> + self . assertAlmostEqual ( 0 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testSomeWeightsMissing ( self ) : <nl> + logits = tf . constant ( [ [ 10 . 0 , 0 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 10 . 0 , 0 . 0 ] , <nl> + [ 0 . 0 , 0 . 0 , 10 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 2 ] , [ 0 ] , [ 1 ] ] ) <nl> + weights = tf . constant ( [ 1 . 2 , 0 , 0 ] , shape = [ 3 ] ) <nl> + with self . test_session ( ) : <nl> + loss = tf . losses . sparse_softmax_cross_entropy ( <nl> + labels , logits , weights ) <nl> + self . assertAlmostEqual ( 12 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testMeasurementSpecificWeightsRaisesException ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 100 . 0 , - 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , - 100 . 0 , 100 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 ] , [ 1 ] , [ 2 ] ] ) <nl> + weights = tf . constant ( [ [ 3 , 4 , 5 ] , <nl> + [ 2 , 6 , 0 ] , <nl> + [ 8 , 0 , 1 ] ] ) <nl> + <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . sparse_softmax_cross_entropy ( <nl> + labels , logits , weights = weights ) . eval ( ) <nl> + <nl> + def testInconsistentWeightSizeRaisesException ( self ) : <nl> + " " " The weight tensor has incorrect number of elements . " " " <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 100 . 0 , - 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , - 100 . 0 , 100 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 ] , [ 1 ] , [ 2 ] ] ) <nl> + weights = tf . constant ( [ 1 . 2 , 3 . 4 , 5 . 6 , 7 . 8 ] ) <nl> + <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . sparse_softmax_cross_entropy ( <nl> + labels , logits , weights = weights ) . eval ( ) <nl> + <nl> + def testInconsistentLabelSizeRaisesException ( self ) : <nl> + " " " The label tensor has incorrect number of elements . " " " <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 100 . 0 , - 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , - 100 . 0 , 100 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 ] , [ 1 ] , [ 2 ] , [ 3 ] ] ) <nl> + weights = tf . constant ( [ 1 . 2 , 3 . 4 , 5 . 6 ] ) <nl> + <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . sparse_softmax_cross_entropy ( <nl> + labels , logits , weights = weights ) . eval ( ) <nl> + <nl> + def testInconsistentWeightShapeRaisesException ( self ) : <nl> + " " " The weight tensor has incorrect shape . " " " <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 100 . 0 , - 100 . 0 , - 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , 100 . 0 , - 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , - 100 . 0 , 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , - 100 . 0 , - 100 . 0 , 100 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 ] , [ 1 ] , [ 2 ] , [ 3 ] ] ) <nl> + weights = tf . constant ( [ [ 1 . 2 , 3 . 4 ] , [ 5 . 6 , 7 . 8 ] ] ) <nl> + <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . sparse_softmax_cross_entropy ( <nl> + labels , logits , weights = weights ) . eval ( ) <nl> + <nl> + def testInconsistentLabelShapeRaisesException ( self ) : <nl> + " " " The label tensor has incorrect shape . " " " <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 100 . 0 , - 100 . 0 , - 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , 100 . 0 , - 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , - 100 . 0 , 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , - 100 . 0 , - 100 . 0 , 100 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 , 1 ] , [ 2 , 3 ] ] ) <nl> + weights = tf . constant ( [ 1 . 2 , 3 . 4 , 5 . 6 , 7 . 8 ] ) <nl> + <nl> + with self . assertRaises ( tf . errors . InvalidArgumentError ) : <nl> + tf . losses . sparse_softmax_cross_entropy ( <nl> + labels , logits , weights = weights ) . eval ( ) <nl> + <nl> + <nl> + class SigmoidCrossEntropyLossTest ( tf . test . TestCase ) : <nl> + <nl> + def testAllCorrectSigmoid ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 100 . 0 , - 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , - 100 . 0 , 100 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 1 , 0 , 0 ] , <nl> + [ 0 , 1 , 0 ] , <nl> + [ 0 , 0 , 1 ] ] ) <nl> + loss = tf . losses . sigmoid_cross_entropy ( labels , logits ) <nl> + self . assertEquals ( loss . op . name , ' sigmoid_cross_entropy_loss / value ' ) <nl> + self . assertAlmostEqual ( 0 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testLossWithSingleDimPlaceholderForLogitsAndWeights1 ( self ) : <nl> + logits = tf . placeholder ( tf . float32 , shape = ( None , 1 ) ) <nl> + labels = tf . placeholder ( tf . float32 , shape = ( None , 1 ) ) <nl> + weights = tf . ones_like ( logits , dtype = tf . float32 ) <nl> + <nl> + loss = tf . losses . sigmoid_cross_entropy ( labels , logits , weights ) <nl> + <nl> + with self . test_session ( ) as sess : <nl> + loss = sess . run ( loss , feed_dict = { <nl> + logits : np . ones ( ( 32 , 1 ) ) , <nl> + labels : np . ones ( ( 32 , 1 ) ) , <nl> + } ) <nl> + self . assertAlmostEqual ( 0 . 313 , loss , 3 ) <nl> + <nl> + def testLossWithSingleDimPlaceholderForLogitsAndWeights2 ( self ) : <nl> + logits = tf . placeholder ( tf . float32 , shape = ( None , 2 ) ) <nl> + labels = tf . placeholder ( tf . float32 , shape = ( None , 2 ) ) <nl> + weights = tf . ones_like ( logits , dtype = tf . float32 ) <nl> + <nl> + loss = tf . losses . sigmoid_cross_entropy ( labels , logits , weights ) <nl> + <nl> + with self . test_session ( ) as sess : <nl> + loss = sess . run ( loss , feed_dict = { <nl> + logits : np . ones ( ( 32 , 2 ) ) , <nl> + labels : np . ones ( ( 32 , 2 ) ) , <nl> + } ) <nl> + self . assertAlmostEqual ( 0 . 313 , loss , 3 ) <nl> + <nl> + def testAllWrongSigmoid ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 100 . 0 , - 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , - 100 . 0 , 100 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 , 0 , 1 ] , <nl> + [ 1 , 0 , 0 ] , <nl> + [ 0 , 1 , 0 ] ] ) <nl> + loss = tf . losses . sigmoid_cross_entropy ( labels , logits ) <nl> + self . assertEquals ( loss . op . name , ' sigmoid_cross_entropy_loss / value ' ) <nl> + self . assertAlmostEqual ( loss . eval ( ) , 600 . 0 / 9 . 0 , 3 ) <nl> + <nl> + def testAllWrongSigmoidWithMeasurementSpecificWeights ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 100 . 0 , - 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , - 100 . 0 , 100 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 0 , 0 , 1 ] , <nl> + [ 1 , 0 , 0 ] , <nl> + [ 0 , 1 , 0 ] ] ) <nl> + weights = tf . constant ( [ [ 3 , 4 , 5 ] , <nl> + [ 2 , 6 , 0 ] , <nl> + [ 8 , 0 , 1 ] ] ) <nl> + loss = tf . losses . sigmoid_cross_entropy ( <nl> + labels , logits , weights ) <nl> + self . assertEquals ( loss . op . name , ' sigmoid_cross_entropy_loss / value ' ) <nl> + self . assertAlmostEqual ( 1700 . 0 / 7 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testMultiCorrectSigmoid ( self ) : <nl> + logits = tf . constant ( [ [ 100 . 0 , - 100 . 0 , 100 . 0 ] , <nl> + [ 100 . 0 , 100 . 0 , - 100 . 0 ] , <nl> + [ - 100 . 0 , 100 . 0 , 100 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 1 , 0 , 1 ] , <nl> + [ 1 , 1 , 0 ] , <nl> + [ 0 , 1 , 1 ] ] ) <nl> + loss = tf . losses . sigmoid_cross_entropy ( labels , logits ) <nl> + self . assertEquals ( loss . op . name , ' sigmoid_cross_entropy_loss / value ' ) <nl> + <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( loss . eval ( ) , 0 . 0 , 3 ) <nl> + <nl> + def testSigmoidLabelSmoothingCorrect ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ 100 . 0 , - 100 . 0 , - 100 . 0 ] ] ) <nl> + labels = tf . constant ( [ [ 1 , 0 , 1 ] ] ) <nl> + # Sigmoid cross entropy loss is : <nl> + # max ( x , 0 ) - x * z + log ( 1 + exp ( - abs ( x ) ) ) <nl> + # The new labels are : <nl> + # z ' = z * ( 1 - L ) + 0 . 5 L <nl> + # 1 - > 1 - 0 . 5 L <nl> + # 0 - > 0 . 5 L <nl> + # here we expect : <nl> + # 1 / 3 * ( 100 - 100 * ( 1 - 0 . 5 L ) + 0 <nl> + # + 0 + 100 * ( 0 . 5 L ) + 0 <nl> + # + 0 + 100 * ( 1 - 0 . 5 L ) + 0 ) <nl> + # = 1 / 3 * ( 100 + 50 L ) <nl> + label_smoothing = 0 . 1 <nl> + loss = tf . losses . sigmoid_cross_entropy ( <nl> + labels , logits , label_smoothing = label_smoothing ) <nl> + self . assertEquals ( loss . op . name , ' sigmoid_cross_entropy_loss / value ' ) <nl> + expected_value = ( 100 . 0 + 50 . 0 * label_smoothing ) / 3 . 0 <nl> + self . assertAlmostEqual ( loss . eval ( ) , expected_value , 3 ) <nl> + <nl> + def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel ( self ) : <nl> + with self . test_session ( ) : <nl> + label_smoothing = 0 . 1 <nl> + sigmoid_logits = tf . constant ( [ [ 100 . 0 , - 100 . 0 , - 100 . 0 ] ] ) <nl> + sigmoid_labels = tf . constant ( [ [ 1 , 0 , 1 ] ] ) <nl> + sigmoid_loss = tf . losses . sigmoid_cross_entropy ( <nl> + sigmoid_labels , sigmoid_logits , label_smoothing = label_smoothing ) <nl> + <nl> + softmax_logits = tf . constant ( [ [ 0 . 0 , 100 . 0 ] , [ 100 . 0 , 0 . 0 ] , [ 100 . 0 , 0 . 0 ] ] ) <nl> + softmax_labels = tf . constant ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 0 , 1 ] ] ) <nl> + softmax_loss = tf . losses . softmax_cross_entropy ( <nl> + softmax_labels , softmax_logits , label_smoothing = label_smoothing ) <nl> + self . assertAlmostEqual ( sigmoid_loss . eval ( ) , softmax_loss . eval ( ) , 3 ) <nl> + <nl> + <nl> + class LogLossTest ( tf . test . TestCase ) : <nl> + <nl> + def setUp ( self ) : <nl> + predictions = np . asarray ( [ . 9 , . 2 , . 2 , . 8 , . 4 , . 6 ] ) . reshape ( ( 2 , 3 ) ) <nl> + labels = np . asarray ( [ 1 . 0 , 0 . 0 , 1 . 0 , 1 . 0 , 0 . 0 , 0 . 0 ] ) . reshape ( ( 2 , 3 ) ) <nl> + <nl> + self . _np_predictions = predictions <nl> + self . _np_labels = labels <nl> + <nl> + epsilon = 1e - 7 <nl> + self . _expected_losses = np . multiply ( <nl> + labels , np . log ( predictions + epsilon ) ) + np . multiply ( <nl> + 1 - labels , np . log ( 1 - predictions + epsilon ) ) <nl> + <nl> + self . _predictions = tf . constant ( predictions ) <nl> + self . _labels = tf . constant ( labels ) <nl> + <nl> + def testValueErrorThrownWhenWeightIsNone ( self ) : <nl> + with self . test_session ( ) : <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . log_loss ( self . _labels , self . _labels , weights = None ) <nl> + <nl> + def testAllCorrectNoLossWeight ( self ) : <nl> + loss = tf . losses . log_loss ( self . _labels , self . _labels ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 0 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testAllCorrectNoLossWeightWithPlaceholder ( self ) : <nl> + tf_predictions = tf . placeholder ( tf . float32 , shape = self . _np_labels . shape ) <nl> + loss = tf . losses . log_loss ( self . _labels , tf_predictions ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 0 . 0 , loss . eval ( feed_dict = { <nl> + tf_predictions : self . _np_labels } ) , 3 ) <nl> + <nl> + def testNonZeroLoss ( self ) : <nl> + loss = tf . losses . log_loss ( self . _labels , self . _predictions ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( - np . sum ( self . _expected_losses ) / 6 . 0 , <nl> + loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithPythonScalarWeight ( self ) : <nl> + weights = 2 . 3 <nl> + loss = tf . losses . log_loss ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( weights * - np . sum ( self . _expected_losses ) / 6 . 0 , <nl> + loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithScalarTensorWeight ( self ) : <nl> + weights = 2 . 3 <nl> + loss = tf . losses . log_loss ( <nl> + self . _labels , self . _predictions , tf . constant ( weights ) ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( weights * - np . sum ( self . _expected_losses ) / 6 . 0 , <nl> + loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithScalarTensorWeightAndPlaceholder ( self ) : <nl> + tf_predictions = tf . placeholder ( tf . float32 , <nl> + shape = self . _np_predictions . shape ) <nl> + weights = 2 . 3 <nl> + loss = tf . losses . log_loss ( <nl> + self . _labels , tf_predictions , tf . constant ( weights ) ) <nl> + with self . test_session ( ) as sess : <nl> + loss = sess . run ( loss , feed_dict = { tf_predictions : self . _np_predictions } ) <nl> + self . assertAlmostEqual ( weights * - np . sum ( self . _expected_losses ) / 6 . 0 , <nl> + loss , 3 ) <nl> + <nl> + def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly ( self ) : <nl> + tf_predictions = tf . placeholder ( tf . float32 , shape = [ None , None ] ) <nl> + weights = 2 . 3 <nl> + loss = tf . losses . log_loss ( <nl> + self . _labels , tf_predictions , tf . constant ( weights ) ) <nl> + with self . test_session ( ) as sess : <nl> + loss = sess . run ( loss , feed_dict = { tf_predictions : self . _np_predictions } ) <nl> + self . assertAlmostEqual ( weights * - np . sum ( self . _expected_losses ) / 6 . 0 , <nl> + loss , 3 ) <nl> + <nl> + def testNonZeroLossWithOneDimBatchSpecificWeights ( self ) : <nl> + weights = tf . constant ( [ 1 . 2 , 3 . 4 ] , shape = [ 2 ] ) <nl> + expected_losses = np . multiply ( <nl> + self . _expected_losses , <nl> + np . asarray ( [ 1 . 2 , 1 . 2 , 1 . 2 , 3 . 4 , 3 . 4 , 3 . 4 ] ) . reshape ( ( 2 , 3 ) ) ) <nl> + loss = tf . losses . log_loss ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( - np . sum ( expected_losses ) / 6 . 0 , <nl> + loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero ( self ) : <nl> + weights = tf . constant ( [ 1 . 2 , 0 ] , shape = [ 2 ] ) <nl> + expected_losses = np . multiply ( <nl> + self . _expected_losses , <nl> + np . asarray ( [ 1 . 2 , 1 . 2 , 1 . 2 , 0 , 0 , 0 ] ) . reshape ( ( 2 , 3 ) ) ) <nl> + loss = tf . losses . log_loss ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( - np . sum ( expected_losses ) / 3 . 0 , <nl> + loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero ( self ) : <nl> + weights = tf . constant ( [ 1 . 2 , 0 ] , shape = [ 2 , 1 ] ) <nl> + expected_losses = np . multiply ( <nl> + self . _expected_losses , <nl> + np . asarray ( [ 1 . 2 , 1 . 2 , 1 . 2 , 0 , 0 , 0 ] ) . reshape ( ( 2 , 3 ) ) ) <nl> + loss = tf . losses . log_loss ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( - np . sum ( expected_losses ) / 3 . 0 , <nl> + loss . eval ( ) , 3 ) <nl> + <nl> + def testWeightsWithSameNumDimsButWrongShapeThrowsException ( self ) : <nl> + weights = tf . constant ( np . random . normal ( size = ( 2 , 4 ) ) , shape = [ 2 , 4 ] ) <nl> + with self . test_session ( ) : <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . log_loss ( self . _labels , self . _predictions , weights ) <nl> + <nl> + def testNonZeroLossWithMeasurementSpecificWeights ( self ) : <nl> + weights = np . array ( [ 3 , 6 , 5 , 0 , 4 , 2 ] ) . reshape ( ( 2 , 3 ) ) <nl> + expected_losses = np . multiply ( self . _expected_losses , weights ) <nl> + <nl> + loss = tf . losses . log_loss ( <nl> + self . _labels , <nl> + self . _predictions , <nl> + tf . constant ( weights , shape = ( 2 , 3 ) ) ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( - np . sum ( expected_losses ) / 5 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder ( self ) : <nl> + weights = np . array ( [ 3 , 6 , 5 , 0 , 4 , 2 ] ) . reshape ( ( 2 , 3 ) ) <nl> + expected_losses = np . multiply ( self . _expected_losses , weights ) <nl> + <nl> + tf_predictions = tf . placeholder ( tf . float32 , shape = [ 2 , 3 ] ) <nl> + loss = tf . losses . log_loss ( <nl> + self . _labels , <nl> + tf_predictions , <nl> + tf . constant ( weights , shape = ( 2 , 3 ) ) ) <nl> + <nl> + with self . test_session ( ) as sess : <nl> + loss = sess . run ( loss , feed_dict = { tf_predictions : self . _np_predictions } ) <nl> + self . assertAlmostEqual ( - np . sum ( expected_losses ) / 5 . 0 , loss , 3 ) <nl> + <nl> + def testNonZeroLossWithSampleSpecificWeightsMostZero ( self ) : <nl> + weights = np . array ( [ 0 , 0 , 0 , 0 , 0 , 2 ] ) . reshape ( ( 2 , 3 ) ) <nl> + expected_losses = np . multiply ( self . _expected_losses , weights ) <nl> + <nl> + loss = tf . losses . log_loss ( <nl> + self . _labels , <nl> + self . _predictions , <nl> + tf . constant ( weights , shape = ( 2 , 3 ) ) ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( - np . sum ( expected_losses ) , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder ( self ) : <nl> + weights = np . array ( [ 0 , 0 , 0 , 0 , 0 , 2 ] ) . reshape ( ( 2 , 3 ) ) <nl> + expected_losses = np . multiply ( self . _expected_losses , weights ) <nl> + <nl> + tf_predictions = tf . placeholder ( tf . float32 , shape = [ 2 , 3 ] ) <nl> + tf_weights = tf . constant ( weights , shape = ( 2 , 3 ) ) <nl> + loss = tf . losses . log_loss ( self . _labels , tf_predictions , tf_weights ) <nl> + <nl> + with self . test_session ( ) as sess : <nl> + loss = sess . run ( loss , feed_dict = { tf_predictions : self . _np_predictions } ) <nl> + self . assertAlmostEqual ( - np . sum ( expected_losses ) , loss , 3 ) <nl> + <nl> + def testLossWithSampleSpecificWeightsAllZero ( self ) : <nl> + tf_weights = tf . zeros ( shape = ( 2 , 3 ) ) <nl> + loss = tf . losses . log_loss ( <nl> + self . _labels , self . _predictions , tf_weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 0 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + <nl> + class HingeLossTest ( tf . test . TestCase ) : <nl> + <nl> + def testIncompatibleShapes ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ - 1 . 0 ] , [ 2 . 1 ] ] ) <nl> + labels = tf . constant ( [ 0 . 0 , 1 . 0 ] ) <nl> + with self . assertRaises ( ValueError ) : <nl> + _ = tf . losses . hinge_loss ( labels , logits ) . eval ( ) <nl> + <nl> + def testAllOutsideMargin ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ 1 . 2 , - 1 . 4 , - 1 . 0 , 2 . 1 ] ) <nl> + labels = tf . constant ( [ 1 . 0 , 0 . 0 , 0 . 0 , 1 . 0 ] ) <nl> + loss = tf . losses . hinge_loss ( labels , logits ) <nl> + self . assertAllClose ( loss . eval ( ) , 0 . 0 , atol = 1e - 3 ) <nl> + <nl> + def testSomeInsideMargin ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ - 0 . 7 ] , [ - 1 . 4 ] , [ 1 . 4 ] , [ 0 . 6 ] ] ) <nl> + labels = tf . constant ( [ [ 0 . 0 ] , [ 0 . 0 ] , [ 1 . 0 ] , [ 1 . 0 ] ] ) <nl> + loss = tf . losses . hinge_loss ( labels , logits ) <nl> + # Examples 1 and 4 are on the correct side of the hyperplane but within <nl> + # the margin so they incur some ( small ) loss . <nl> + self . assertAllClose ( loss . eval ( ) , 0 . 175 , atol = 1e - 3 ) <nl> + <nl> + def testSomeMisclassified ( self ) : <nl> + with self . test_session ( ) : <nl> + logits = tf . constant ( [ [ [ 1 . 2 ] , [ 0 . 4 ] , [ - 1 . 0 ] , [ - 1 . 1 ] ] ] ) <nl> + labels = tf . constant ( [ [ [ 1 . 0 ] , [ 0 . 0 ] , [ 0 . 0 ] , [ 1 . 0 ] ] ] ) <nl> + loss = tf . losses . hinge_loss ( labels , logits ) <nl> + # Examples 2 and 4 are on the wrong side of the hyperplane so they incur <nl> + # some ( fairly large ) loss . <nl> + self . assertAllClose ( loss . eval ( ) , 0 . 875 , atol = 1e - 3 ) <nl> + <nl> + <nl> + class MeanSquaredErrorTest ( tf . test . TestCase ) : <nl> + <nl> + def setUp ( self ) : <nl> + self . _predictions = tf . constant ( [ 4 , 8 , 12 , 8 , 1 , 3 ] , shape = ( 2 , 3 ) ) <nl> + self . _labels = tf . constant ( [ 1 , 9 , 2 , - 5 , - 2 , 6 ] , shape = ( 2 , 3 ) ) <nl> + <nl> + def testValueErrorThrownWhenWeightIsNone ( self ) : <nl> + with self . test_session ( ) : <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . mean_squared_error ( <nl> + self . _predictions , self . _predictions , weights = None ) <nl> + <nl> + def testAllCorrectNoLossWeight ( self ) : <nl> + loss = tf . losses . mean_squared_error ( <nl> + self . _predictions , self . _predictions ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 0 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLoss ( self ) : <nl> + loss = tf . losses . mean_squared_error ( <nl> + self . _labels , self . _predictions ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 49 . 5 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithPythonScalarWeight ( self ) : <nl> + weights = 2 . 3 <nl> + loss = tf . losses . mean_squared_error ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 49 . 5 * weights , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithScalarTensorWeight ( self ) : <nl> + weights = 2 . 3 <nl> + loss = tf . losses . mean_squared_error ( <nl> + self . _labels , self . _predictions , tf . constant ( weights ) ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 49 . 5 * weights , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithOneDimBatchSpecificWeights ( self ) : <nl> + weights = tf . constant ( [ 1 . 2 , 3 . 4 ] , shape = [ 2 , ] ) <nl> + loss = tf . losses . mean_squared_error ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 767 . 8 / 6 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithTwoDimBatchSpecificWeights ( self ) : <nl> + weights = tf . constant ( [ 1 . 2 , 3 . 4 ] , shape = [ 2 , 1 ] ) <nl> + loss = tf . losses . mean_squared_error ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 767 . 8 / 6 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithSampleSpecificWeights ( self ) : <nl> + weights = tf . constant ( [ 3 , 6 , 5 , 0 , 4 , 2 ] , shape = [ 2 , 3 ] ) <nl> + loss = tf . losses . mean_squared_error ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 587 / 5 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithSampleSpecificWeightsMostZero ( self ) : <nl> + weights = tf . constant ( [ 0 , 0 , 0 , 0 , 0 , 2 ] , shape = [ 2 , 3 ] ) <nl> + loss = tf . losses . mean_squared_error ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 18 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testLossWithSampleSpecificWeightsAllZero ( self ) : <nl> + weights = tf . zeros ( ( 2 , 3 ) ) <nl> + loss = tf . losses . mean_squared_error ( <nl> + self . _labels , self . _predictions , weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 0 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + <nl> + class MeanPairwiseSquaresErrorTest ( tf . test . TestCase ) : <nl> + <nl> + def setUp ( self ) : <nl> + self . _predictions = np . array ( [ [ 4 , 8 , 12 ] , <nl> + [ 8 , 1 , 3 ] ] ) <nl> + self . _labels = np . array ( [ [ 1 , 9 , 2 ] , <nl> + [ - 5 , - 5 , 7 ] ] ) <nl> + <nl> + batch_size , dims = self . _labels . shape <nl> + <nl> + # Compute the expected loss ' manually ' . <nl> + total = np . zeros ( ( batch_size , 1 ) ) <nl> + for b in range ( batch_size ) : <nl> + for i in range ( dims ) : <nl> + for j in range ( dims ) : <nl> + x = self . _predictions [ b , i ] . item ( ) - self . _predictions [ b , j ] . item ( ) <nl> + y = self . _labels [ b , i ] . item ( ) - self . _labels [ b , j ] . item ( ) <nl> + tmp = ( x - y ) * ( x - y ) <nl> + total [ b ] + = tmp <nl> + <nl> + self . _expected_losses = np . divide ( total , 9 . 0 ) <nl> + <nl> + def testValueErrorThrownWhenWeightIsNone ( self ) : <nl> + with self . test_session ( ) : <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . mean_pairwise_squared_error ( <nl> + predictions = tf . constant ( self . _labels ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + weights = None ) <nl> + <nl> + def testAllCorrectNoLossWeight ( self ) : <nl> + loss = tf . losses . mean_pairwise_squared_error ( <nl> + predictions = tf . constant ( self . _labels ) , <nl> + labels = tf . constant ( self . _labels ) ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 0 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLoss ( self ) : <nl> + loss = tf . losses . mean_pairwise_squared_error ( <nl> + predictions = tf . constant ( self . _predictions ) , <nl> + labels = tf . constant ( self . _labels ) ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( np . sum ( self . _expected_losses ) , loss . eval ( ) , 3 ) <nl> + <nl> + def testGradientWithZeroWeight ( self ) : <nl> + with tf . Graph ( ) . as_default ( ) : <nl> + tf . set_random_seed ( 0 ) <nl> + <nl> + inputs = tf . ones ( ( 2 , 3 ) ) <nl> + weights = tf . get_variable ( ' weights ' , <nl> + shape = [ 3 , 4 ] , <nl> + initializer = tf . truncated_normal_initializer ( ) ) <nl> + predictions = tf . matmul ( inputs , weights ) <nl> + <nl> + optimizer = tf . train . MomentumOptimizer ( learning_rate = 0 . 001 , momentum = 0 . 9 ) <nl> + loss = tf . losses . mean_pairwise_squared_error ( <nl> + predictions , <nl> + predictions , <nl> + 0 ) <nl> + <nl> + gradients_to_variables = optimizer . compute_gradients ( loss ) <nl> + <nl> + init_op = tf . global_variables_initializer ( ) <nl> + <nl> + with self . test_session ( ) as sess : <nl> + sess . run ( init_op ) <nl> + for grad , _ in gradients_to_variables : <nl> + np_grad = sess . run ( grad ) <nl> + self . assertFalse ( np . isnan ( np_grad ) . any ( ) ) <nl> + <nl> + def testNonZeroLossWithPythonScalarWeight ( self ) : <nl> + weights = 2 . 3 <nl> + loss = tf . losses . mean_pairwise_squared_error ( <nl> + predictions = tf . constant ( self . _predictions ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + weights = weights ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( weights * np . sum ( self . _expected_losses ) , <nl> + loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithScalarTensorWeight ( self ) : <nl> + weights = 2 . 3 <nl> + loss = tf . losses . mean_pairwise_squared_error ( <nl> + predictions = tf . constant ( self . _predictions ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + weights = tf . constant ( weights ) ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( weights * np . sum ( self . _expected_losses ) , <nl> + loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithScalarZeroWeight ( self ) : <nl> + weights = 0 <nl> + loss = tf . losses . mean_pairwise_squared_error ( <nl> + predictions = tf . constant ( self . _predictions ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + weights = tf . constant ( weights ) ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithScalarTensorWeightWithPlaceholder ( self ) : <nl> + weights = 2 . 3 <nl> + tf_predictions = tf . placeholder ( tf . float32 , shape = self . _predictions . shape ) <nl> + tf_labels = tf . placeholder ( tf . float32 , shape = self . _labels . shape ) <nl> + loss = tf . losses . mean_pairwise_squared_error ( <nl> + predictions = tf_predictions , <nl> + labels = tf_labels , <nl> + weights = tf . constant ( weights ) ) <nl> + with self . test_session ( ) as sess : <nl> + loss = sess . run ( loss , feed_dict = { <nl> + tf_predictions : self . _predictions , <nl> + tf_labels : self . _labels , <nl> + } ) <nl> + self . assertAlmostEqual ( weights * np . sum ( self . _expected_losses ) , loss , 3 ) <nl> + <nl> + def testNonZeroLossWithOneDimBatchSpecificWeights ( self ) : <nl> + weights = np . asarray ( [ 2 . 0 , 1 . 0 ] ) . reshape ( ( 2 , 1 ) ) <nl> + expected_losses = np . multiply ( weights , self . _expected_losses ) <nl> + <nl> + loss = tf . losses . mean_pairwise_squared_error ( <nl> + predictions = tf . constant ( self . _predictions ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + weights = tf . constant ( weights , shape = [ 2 ] ) ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( np . sum ( expected_losses ) , loss . eval ( ) , 3 ) <nl> + <nl> + def testZeroLossWithOneDimBatchZeroWeights ( self ) : <nl> + weights = np . asarray ( [ 0 . 0 , 0 . 0 ] ) . reshape ( ( 2 , 1 ) ) <nl> + loss = tf . losses . mean_pairwise_squared_error ( <nl> + predictions = tf . constant ( self . _predictions ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + weights = tf . constant ( weights , shape = [ 2 ] ) ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 0 , loss . eval ( ) , 3 ) <nl> + <nl> + def testNonZeroLossWithOneDimBatchSpecificWeightsAndPlaceholders ( self ) : <nl> + weights = np . asarray ( [ 1 . 2 , 3 . 4 ] ) . reshape ( ( 2 , 1 ) ) <nl> + expected_losses = np . multiply ( weights , self . _expected_losses ) <nl> + <nl> + tf_predictions = tf . placeholder ( tf . float32 , shape = self . _predictions . shape ) <nl> + tf_labels = tf . placeholder ( tf . int32 , shape = self . _labels . shape ) <nl> + loss = tf . losses . mean_pairwise_squared_error ( <nl> + predictions = tf_predictions , <nl> + labels = tf_labels , <nl> + weights = tf . constant ( weights , shape = [ 2 ] ) ) <nl> + <nl> + with self . test_session ( ) as sess : <nl> + loss = sess . run ( loss , feed_dict = { <nl> + tf_predictions : self . _predictions , <nl> + tf_labels : self . _labels , <nl> + } ) <nl> + self . assertAlmostEqual ( np . sum ( expected_losses ) , loss , 3 ) <nl> + <nl> + def testLossWithAllZeroBatchSpecificWeights ( self ) : <nl> + weights = np . zeros ( ( 2 , 1 ) ) <nl> + loss = tf . losses . mean_pairwise_squared_error ( <nl> + predictions = tf . constant ( self . _predictions ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + weights = tf . constant ( weights , shape = [ 2 ] ) ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 0 . 0 , loss . eval ( ) , 3 ) <nl> + <nl> + <nl> + class CosineDistanceLossTest ( tf . test . TestCase ) : <nl> + <nl> + def setUp ( self ) : <nl> + self . _predictions = np . asarray ( [ [ 1 , 0 , 0 ] , # Batch 1 <nl> + [ 0 , 0 , - 1 ] , <nl> + [ 1 , 0 , 0 ] , # Batch 2 <nl> + [ 1 , 0 , 0 ] , <nl> + [ 0 , 0 , - 1 ] , # Batch 3 <nl> + [ 1 , 0 , 0 ] ] ) . reshape ( ( 3 , 2 , 3 ) ) <nl> + <nl> + self . _labels = np . asarray ( [ [ 1 , 0 , 0 ] , <nl> + [ 0 , 0 , 1 ] , <nl> + [ 0 , 1 , 0 ] , <nl> + [ 1 , 0 , 0 ] , <nl> + [ 0 , 0 , 1 ] , <nl> + [ 0 , 1 , 0 ] ] ) . reshape ( ( 3 , 2 , 3 ) ) <nl> + <nl> + def testValueErrorThrownWhenWeightIsNone ( self ) : <nl> + with self . test_session ( ) : <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . cosine_distance ( <nl> + predictions = tf . constant ( self . _labels ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + dim = 2 , <nl> + weights = None ) <nl> + <nl> + def testAllCorrectNoWeights ( self ) : <nl> + loss = tf . losses . cosine_distance ( <nl> + predictions = tf . constant ( self . _labels ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + dim = 2 ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 0 , loss . eval ( ) , 5 ) <nl> + <nl> + def testPartiallyCorrectWithIntegerValues ( self ) : <nl> + loss = tf . losses . cosine_distance ( <nl> + predictions = tf . constant ( self . _predictions ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + dim = 2 ) <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 1 , loss . eval ( ) , 5 ) <nl> + <nl> + def testPartiallyCorrectFloatingPointValues ( self ) : <nl> + predictions = np . matrix ( ( <nl> + ' 0 . 819031913261206 0 . 567041924552012 0 . 087465312324590 ; ' <nl> + ' - 0 . 665139432070255 - 0 . 739487441769973 - 0 . 103671883216994 ; ' <nl> + ' 0 . 707106781186548 - 0 . 707106781186548 0 ' ) ) <nl> + labels = np . matrix ( ( <nl> + ' 0 . 819031913261206 0 . 567041924552012 0 . 087465312324590 ; ' <nl> + ' 0 . 665139432070255 0 . 739487441769973 0 . 103671883216994 ; ' <nl> + ' 0 . 707106781186548 0 . 707106781186548 0 ' ) ) <nl> + <nl> + tf_preds = tf . constant ( predictions , shape = ( 3 , 1 , 3 ) , dtype = tf . float32 ) <nl> + tf_labels = tf . constant ( labels , shape = ( 3 , 1 , 3 ) , dtype = tf . float32 ) <nl> + loss = tf . losses . cosine_distance ( tf_labels , tf_preds , dim = 2 ) <nl> + <nl> + with self . test_session ( ) : <nl> + self . assertAlmostEqual ( 1 . 0 , loss . eval ( ) , 5 ) <nl> + <nl> + def testSampleSpecificWeights ( self ) : <nl> + loss = tf . losses . cosine_distance ( <nl> + predictions = tf . constant ( self . _predictions ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + dim = 2 , <nl> + weights = tf . constant ( [ 1 , 0 , 0 ] ) ) <nl> + with self . test_session ( ) : <nl> + self . assertEqual ( 1 . 0 , loss . eval ( ) ) <nl> + <nl> + def testMeasurementSpecificWeights ( self ) : <nl> + loss = tf . losses . cosine_distance ( <nl> + predictions = tf . constant ( self . _predictions ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + dim = 2 , <nl> + weights = tf . constant ( [ 1 , 0 , 0 , 1 , 1 , 1 ] , shape = ( 3 , 2 ) ) ) <nl> + with self . test_session ( ) : <nl> + self . assertEqual ( 3 . 0 / 4 . 0 , loss . eval ( ) ) <nl> + <nl> + def testValueErrorThrownWithShapelessPlaceholder ( self ) : <nl> + tf_predictions = tf . placeholder ( tf . float32 ) <nl> + with self . test_session ( ) : <nl> + with self . assertRaises ( ValueError ) : <nl> + tf . losses . cosine_distance ( <nl> + predictions = tf_predictions , <nl> + labels = tf . constant ( self . _labels ) , <nl> + dim = 2 , <nl> + weights = tf . constant ( [ 1 , 0 , 0 , 1 , 1 , 1 ] , shape = ( 3 , 2 ) ) ) <nl> + <nl> + def testMeasurementSpecificWeightsWithPlaceholderWithShape ( self ) : <nl> + tf_predictions = tf . placeholder ( tf . float32 , shape = self . _labels . shape ) <nl> + loss = tf . losses . cosine_distance ( <nl> + predictions = tf_predictions , <nl> + labels = tf . constant ( self . _labels ) , <nl> + dim = 2 , <nl> + weights = tf . constant ( [ 1 , 0 , 0 , 1 , 1 , 1 ] , shape = ( 3 , 2 ) ) ) <nl> + with self . test_session ( ) as sess : <nl> + loss = sess . run ( loss , feed_dict = { tf_predictions : self . _predictions } ) <nl> + self . assertEqual ( 3 . 0 / 4 . 0 , loss ) <nl> + <nl> + def testZeroLossWhenAllSampleSpecificWeightsAreZero ( self ) : <nl> + loss = tf . losses . cosine_distance ( <nl> + predictions = tf . constant ( self . _predictions ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + dim = 2 , <nl> + weights = tf . zeros ( ( 3 , ) ) ) <nl> + with self . test_session ( ) : <nl> + self . assertEqual ( 0 , loss . eval ( ) ) <nl> + <nl> + def testZeroLossWhenAllMeasurementSpecificWeightsAreZero ( self ) : <nl> + loss = tf . losses . cosine_distance ( <nl> + predictions = tf . constant ( self . _predictions ) , <nl> + labels = tf . constant ( self . _labels ) , <nl> + dim = 2 , <nl> + weights = tf . zeros ( ( 3 , 2 ) ) ) <nl> + with self . test_session ( ) : <nl> + self . assertEqual ( 0 , loss . eval ( ) ) <nl> + <nl> + <nl> + class AddLossTest ( tf . test . TestCase ) : <nl> + <nl> + def testNoCollectLossesBatch2 ( self ) : <nl> + logits = tf . constant ( [ [ 1 . 2 , 0 . 4 , - 1 . 0 , - 1 . 1 ] ] * 2 ) <nl> + labels = tf . constant ( [ [ 1 . 0 , 0 . 0 , 0 . 0 , 1 . 0 ] ] * 2 ) <nl> + self . assertFalse ( tf . losses . get_losses ( ) ) <nl> + tf . losses . absolute_difference ( logits , labels , loss_collection = None ) <nl> + tf . losses . log_loss ( logits , labels , loss_collection = None ) <nl> + tf . losses . mean_squared_error ( logits , labels , loss_collection = None ) <nl> + tf . losses . sigmoid_cross_entropy ( logits , labels , loss_collection = None ) <nl> + tf . losses . softmax_cross_entropy ( logits , labels , loss_collection = None ) <nl> + self . assertFalse ( tf . losses . get_losses ( ) ) <nl> + <nl> + if __name__ = = ' __main__ ' : <nl> + tf . test . main ( ) <nl> new file mode 100644 <nl> index 0000000000000 . . 4a2edd99b2b1a <nl> mmm / dev / null <nl> ppp b / tensorflow / python / ops / losses / BUILD <nl> <nl> + package ( <nl> + default_visibility = [ " / / tensorflow : internal " ] , <nl> + features = [ <nl> + " - layering_check " , <nl> + " - parse_headers " , <nl> + ] , <nl> + ) <nl> + <nl> + licenses ( [ " notice " ] ) # Apache 2 . 0 <nl> + <nl> + py_library ( <nl> + name = " losses " , <nl> + srcs = [ <nl> + " __init__ . py " , <nl> + " losses . py " , <nl> + " util . py " , <nl> + ] , <nl> + srcs_version = " PY2AND3 " , <nl> + visibility = [ " / / tensorflow : internal " ] , <nl> + deps = [ <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : nn " , <nl> + " / / tensorflow / python : nn_ops " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " all_files " , <nl> + srcs = glob ( <nl> + [ " * * / * " ] , <nl> + exclude = [ <nl> + " * * / METADATA " , <nl> + " * * / OWNERS " , <nl> + ] , <nl> + ) , <nl> + visibility = [ " / / tensorflow : __subpackages__ " ] , <nl> + ) <nl> new file mode 100644 <nl> index 0000000000000 . . 3b0d0d8e5a5f2 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / ops / losses / __init__ . py <nl> <nl> + # Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + " " " Loss functions and helpers to manipulate them . <nl> + " " " <nl> + <nl> + # pylint : disable = wildcard - import <nl> + from tensorflow . python . ops . losses . losses import * <nl> + from tensorflow . python . ops . losses . util import * <nl> new file mode 100644 <nl> index 0000000000000 . . e6c2a558b3faa <nl> mmm / dev / null <nl> ppp b / tensorflow / python / ops / losses / losses . py <nl> <nl> + # Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Loss operations for use in neural networks . <nl> + <nl> + Note : All the losses are added to the ` GraphKeys . LOSSES ` collection by default . <nl> + <nl> + @ @ absolute_difference <nl> + @ @ compute_weighted_loss <nl> + @ @ cosine_distance <nl> + @ @ hinge_loss <nl> + @ @ log_loss <nl> + @ @ mean_pairwise_squared_error <nl> + @ @ mean_squared_error <nl> + @ @ sigmoid_cross_entropy <nl> + @ @ softmax_cross_entropy <nl> + @ @ sparse_softmax_cross_entropy <nl> + <nl> + " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import math_ops <nl> + from tensorflow . python . ops import nn <nl> + from tensorflow . python . ops import nn_ops <nl> + from tensorflow . python . ops . losses import util <nl> + <nl> + <nl> + def _scale_losses ( losses , weights ) : <nl> + " " " Computes the scaled loss . <nl> + <nl> + Args : <nl> + losses : A ` Tensor ` of size [ batch_size , d1 , . . . dN ] . <nl> + weights : A ` Tensor ` of size [ 1 ] , [ batch_size ] or [ batch_size , d1 , . . . dN ] . <nl> + The ` losses ` are reduced ( tf . reduce_sum ) until its dimension matches <nl> + that of ` weights ` at which point the reduced ` losses ` are element - wise <nl> + multiplied by ` weights ` and a final reduce_sum is computed on the result . <nl> + Conceptually , this operation is equivalent to broadcasting ( tiling ) <nl> + ` weights ` to be the same size as ` losses ` , performing an element - wise <nl> + multiplication , and summing the result . <nl> + <nl> + Returns : <nl> + A scalar tf . float32 ` Tensor ` whose value represents the sum of the scaled <nl> + ` losses ` . <nl> + " " " <nl> + # First , compute the sum of the losses over all elements : <nl> + start_index = max ( 0 , weights . get_shape ( ) . ndims ) <nl> + reduction_indices = list ( range ( start_index , losses . get_shape ( ) . ndims ) ) <nl> + reduced_losses = math_ops . reduce_sum ( losses , <nl> + reduction_indices = reduction_indices ) <nl> + reduced_losses = math_ops . mul ( reduced_losses , weights ) <nl> + return math_ops . reduce_sum ( reduced_losses ) <nl> + <nl> + <nl> + def _safe_div ( numerator , denominator , name = " value " ) : <nl> + " " " Computes a safe divide which returns 0 if the denominator is zero . <nl> + <nl> + Note that the function contains an additional conditional check that is <nl> + necessary for avoiding situations where the loss is zero causing NaNs to <nl> + creep into the gradient computation . <nl> + <nl> + Args : <nl> + numerator : An arbitrary ` Tensor ` . <nl> + denominator : A ` Tensor ` whose shape matches ` numerator ` and whose values are <nl> + assumed to be non - negative . <nl> + name : An optional name for the returned op . <nl> + <nl> + Returns : <nl> + The element - wise value of the numerator divided by the denominator . <nl> + " " " <nl> + return math_ops . select ( <nl> + math_ops . greater ( denominator , 0 ) , <nl> + math_ops . div ( numerator , math_ops . select ( <nl> + math_ops . equal ( denominator , 0 ) , <nl> + array_ops . ones_like ( denominator ) , denominator ) ) , <nl> + array_ops . zeros_like ( numerator ) , <nl> + name = name ) <nl> + <nl> + <nl> + def _safe_mean ( losses , num_present ) : <nl> + " " " Computes a safe mean of the losses . <nl> + <nl> + Args : <nl> + losses : A tensor whose elements contain individual loss measurements . <nl> + num_present : The number of measurable losses in the tensor . <nl> + <nl> + Returns : <nl> + A scalar representing the mean of the losses . If ` num_present ` is zero , <nl> + then zero is returned . <nl> + " " " <nl> + total_loss = math_ops . reduce_sum ( losses ) <nl> + return _safe_div ( total_loss , num_present ) <nl> + <nl> + <nl> + def _num_present ( losses , weights , per_batch = False ) : <nl> + " " " Computes the number of elements in the loss function induced by ` weights ` . <nl> + <nl> + A given weights tensor induces different numbers of usable elements in the <nl> + ` losses ` tensor . The ` weights ` tensor is broadcast across ` losses ` for all <nl> + possible dimensions . For example , if ` losses ` is a tensor of dimension <nl> + [ 4 , 5 , 6 , 3 ] and ` weights ` is a tensor of size [ 4 , 5 ] , then ` weights ` is , in <nl> + effect , tiled to match the size of ` losses ` . Following this effective tile , <nl> + the total number of present elements is the number of non - zero weights . <nl> + <nl> + Args : <nl> + losses : A tensor of size [ batch_size , d1 , . . . dN ] . <nl> + weights : A tensor of size [ 1 ] or [ batch_size , d1 , . . . dK ] where K < N . <nl> + per_batch : Whether to return the number of elements per batch or as a sum <nl> + total . <nl> + <nl> + Returns : <nl> + The number of present ( non - zero ) elements in the losses tensor . If <nl> + ` per_batch ` is True , the value is returned as a tensor of size <nl> + [ batch_size ] . Otherwise , a single scalar tensor is returned . <nl> + " " " <nl> + # If weights is a scalar , its easy to compute : <nl> + if weights . get_shape ( ) . ndims = = 0 : <nl> + batch_size = array_ops . reshape ( array_ops . slice ( array_ops . shape ( losses ) , <nl> + [ 0 ] , [ 1 ] ) , [ ] ) <nl> + num_per_batch = math_ops . div ( math_ops . to_float ( array_ops . size ( losses ) ) , <nl> + math_ops . to_float ( batch_size ) ) <nl> + num_per_batch = math_ops . select ( math_ops . equal ( weights , 0 ) , <nl> + 0 . 0 , num_per_batch ) <nl> + num_per_batch = math_ops . mul ( array_ops . ones ( <nl> + array_ops . reshape ( batch_size , [ 1 ] ) ) , num_per_batch ) <nl> + return num_per_batch if per_batch else math_ops . reduce_sum ( num_per_batch ) <nl> + <nl> + # First , count the number of nonzero weights : <nl> + if weights . get_shape ( ) . ndims > = 1 : <nl> + reduction_indices = list ( range ( 1 , weights . get_shape ( ) . ndims ) ) <nl> + num_nonzero_per_batch = math_ops . reduce_sum ( <nl> + math_ops . to_float ( math_ops . not_equal ( weights , 0 ) ) , <nl> + reduction_indices = reduction_indices ) <nl> + <nl> + # Next , determine the number of elements that weight would broadcast to : <nl> + broadcast_dims = array_ops . slice ( array_ops . shape ( losses ) , <nl> + [ weights . get_shape ( ) . ndims ] , [ - 1 ] ) <nl> + num_to_broadcast = math_ops . to_float ( math_ops . reduce_prod ( broadcast_dims ) ) <nl> + <nl> + num_per_batch = math_ops . mul ( num_nonzero_per_batch , num_to_broadcast ) <nl> + return num_per_batch if per_batch else math_ops . reduce_sum ( num_per_batch ) <nl> + <nl> + <nl> + def compute_weighted_loss ( <nl> + losses , weights = 1 . 0 , scope = None , loss_collection = ops . GraphKeys . LOSSES ) : <nl> + " " " Computes the weighted loss . <nl> + <nl> + Args : <nl> + losses : A tensor of size [ batch_size , d1 , . . . dN ] . <nl> + weights : A tensor of size [ 1 ] or [ batch_size , d1 , . . . dK ] where K < N . <nl> + scope : the scope for the operations performed in computing the loss . <nl> + loss_collection : the loss will be added to these collections . <nl> + <nl> + Returns : <nl> + A scalar ` Tensor ` that returns the weighted loss . <nl> + <nl> + Raises : <nl> + ValueError : If ` weights ` is ` None ` or the shape is not compatible with <nl> + ` losses ` , or if the number of dimensions ( rank ) of either ` losses ` or <nl> + ` weights ` is missing . <nl> + " " " <nl> + with ops . name_scope ( scope , " weighted_loss " , [ losses , weights ] ) : <nl> + losses = ops . convert_to_tensor ( losses ) <nl> + input_dtype = losses . dtype <nl> + losses = math_ops . to_float ( losses ) <nl> + weights = math_ops . to_float ( ops . convert_to_tensor ( weights ) ) <nl> + <nl> + if losses . get_shape ( ) . ndims is None : <nl> + raise ValueError ( " losses . get_shape ( ) . ndims cannot be None " ) <nl> + weights_shape = weights . get_shape ( ) <nl> + if weights_shape . ndims is None : <nl> + raise ValueError ( " weight . get_shape ( ) . ndims cannot be None " ) <nl> + <nl> + if weights_shape . ndims > 1 and weights_shape . dims [ - 1 ] . is_compatible_with ( 1 ) : <nl> + weights = array_ops . squeeze ( weights , [ - 1 ] ) <nl> + <nl> + total_loss = _scale_losses ( losses , weights ) <nl> + num_present = _num_present ( losses , weights ) <nl> + mean_loss = _safe_mean ( total_loss , num_present ) <nl> + # convert the result back to the input type <nl> + mean_loss = math_ops . cast ( mean_loss , input_dtype ) <nl> + util . add_loss ( mean_loss , loss_collection ) <nl> + return mean_loss <nl> + <nl> + <nl> + def absolute_difference ( <nl> + labels , predictions , weights = 1 . 0 , scope = None , <nl> + loss_collection = ops . GraphKeys . LOSSES ) : <nl> + " " " Adds an Absolute Difference loss to the training procedure . <nl> + <nl> + ` weights ` acts as a coefficient for the loss . If a scalar is provided , then <nl> + the loss is simply scaled by the given value . If ` weights ` is a tensor of <nl> + size [ batch_size ] , then the total loss for each sample of the batch is <nl> + rescaled by the corresponding element in the ` weight ` vector . If the shape of <nl> + ` weight ` matches the shape of ` predictions ` , then the loss of each <nl> + measurable element of ` predictions ` is scaled by the corresponding value of <nl> + ` weight ` . <nl> + <nl> + Args : <nl> + labels : The ground truth output tensor , same dimensions as ' predictions ' . <nl> + predictions : The predicted outputs . <nl> + weights : Coefficients for the loss a scalar , a tensor of shape <nl> + [ batch_size ] or a tensor whose shape matches ` predictions ` . <nl> + scope : The scope for the operations performed in computing the loss . <nl> + loss_collection : collection to which this loss will be added . <nl> + <nl> + Returns : <nl> + A scalar ` Tensor ` representing the loss value . <nl> + <nl> + Raises : <nl> + ValueError : If the shape of ` predictions ` doesn ' t match that of ` labels ` or <nl> + if the shape of ` weight ` is invalid . <nl> + " " " <nl> + with ops . name_scope ( scope , " absolute_difference " , <nl> + [ predictions , labels , weights ] ) as scope : <nl> + predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> + predictions = math_ops . to_float ( predictions ) <nl> + labels = math_ops . to_float ( labels ) <nl> + losses = math_ops . abs ( math_ops . sub ( predictions , labels ) ) <nl> + return compute_weighted_loss ( losses , weights , scope , loss_collection ) <nl> + <nl> + <nl> + def cosine_distance ( <nl> + labels , predictions , dim = None , weights = 1 . 0 , scope = None , <nl> + loss_collection = ops . GraphKeys . LOSSES ) : <nl> + " " " Adds a cosine - distance loss to the training procedure . <nl> + <nl> + Note that the function assumes that ` predictions ` and ` labels ` are already <nl> + unit - normalized . <nl> + <nl> + Args : <nl> + labels : A ` Tensor ` whose shape matches ' predictions ' <nl> + predictions : An arbitrary matrix . <nl> + dim : The dimension along which the cosine distance is computed . <nl> + weights : Coefficients for the loss a scalar , a tensor of shape <nl> + [ batch_size ] or a tensor whose shape matches ` predictions ` . <nl> + scope : The scope for the operations performed in computing the loss . <nl> + loss_collection : collection to which this loss will be added . <nl> + <nl> + Returns : <nl> + A scalar ` Tensor ` representing the loss value . <nl> + <nl> + Raises : <nl> + ValueError : If ` predictions ` shape doesn ' t match ` labels ` shape , or <nl> + ` weights ` is ` None ` . <nl> + " " " <nl> + if dim is None : <nl> + raise ValueError ( " ` dim ` cannot be None . " ) <nl> + with ops . name_scope ( scope , " cosine_distance_loss " , <nl> + [ predictions , labels , weights ] ) as scope : <nl> + predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> + <nl> + predictions = math_ops . to_float ( predictions ) <nl> + labels = math_ops . to_float ( labels ) <nl> + <nl> + radial_diffs = math_ops . mul ( predictions , labels ) <nl> + losses = 1 - math_ops . reduce_sum ( radial_diffs , reduction_indices = [ dim , ] ) <nl> + return compute_weighted_loss ( losses , weights , scope , loss_collection ) <nl> + <nl> + <nl> + def hinge_loss ( labels , logits , weights = 1 . 0 , scope = None , <nl> + loss_collection = ops . GraphKeys . LOSSES ) : <nl> + " " " Adds a hinge loss to the training procedure . <nl> + <nl> + Args : <nl> + labels : The ground truth output tensor . Its shape should match the shape of <nl> + logits . The values of the tensor are expected to be 0 . 0 or 1 . 0 . <nl> + logits : The logits , a float tensor . <nl> + weights : Coefficients for the loss a scalar , a tensor of shape <nl> + [ batch_size ] or a tensor whose shape matches ` predictions ` . <nl> + scope : The scope for the operations performed in computing the loss . <nl> + loss_collection : collection to which the loss will be added . <nl> + <nl> + Returns : <nl> + A scalar ` Tensor ` of the loss value . <nl> + <nl> + Raises : <nl> + ValueError : If the shapes of ` logits ` and ` labels ` don ' t match . <nl> + " " " <nl> + with ops . name_scope ( scope , " hinge_loss " , [ logits , labels ] ) as scope : <nl> + logits . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> + # We first need to convert binary labels to - 1 / 1 labels ( as floats ) . <nl> + labels = math_ops . to_float ( labels ) <nl> + all_ones = array_ops . ones_like ( labels ) <nl> + labels = math_ops . sub ( 2 * labels , all_ones ) <nl> + losses = nn_ops . relu ( math_ops . sub ( all_ones , math_ops . mul ( labels , logits ) ) ) <nl> + return compute_weighted_loss ( losses , weights , scope , loss_collection ) <nl> + <nl> + <nl> + def log_loss ( labels , predictions , weights = 1 . 0 , epsilon = 1e - 7 , scope = None , <nl> + loss_collection = ops . GraphKeys . LOSSES ) : <nl> + " " " Adds a Log Loss term to the training procedure . <nl> + <nl> + ` weight ` acts as a coefficient for the loss . If a scalar is provided , then the <nl> + loss is simply scaled by the given value . If ` weight ` is a tensor of size <nl> + [ batch_size ] , then the total loss for each sample of the batch is rescaled <nl> + by the corresponding element in the ` weight ` vector . If the shape of <nl> + ` weight ` matches the shape of ` predictions ` , then the loss of each <nl> + measurable element of ` predictions ` is scaled by the corresponding value of <nl> + ` weight ` . <nl> + <nl> + Args : <nl> + labels : The ground truth output tensor , same dimensions as ' predictions ' . <nl> + predictions : The predicted outputs . <nl> + weights : Coefficients for the loss a scalar , a tensor of shape <nl> + [ batch_size ] or a tensor whose shape matches ` predictions ` . <nl> + epsilon : A small increment to add to avoid taking a log of zero . <nl> + scope : The scope for the operations performed in computing the loss . <nl> + loss_collection : collection to which the loss will be added . <nl> + <nl> + Returns : <nl> + A scalar ` Tensor ` representing the loss value . <nl> + <nl> + Raises : <nl> + ValueError : If the shape of ` predictions ` doesn ' t match that of ` labels ` or <nl> + if the shape of ` weight ` is invalid . <nl> + " " " <nl> + with ops . name_scope ( scope , " log_loss " , <nl> + [ predictions , labels , weights ] ) as scope : <nl> + predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> + predictions = math_ops . to_float ( predictions ) <nl> + labels = math_ops . to_float ( labels ) <nl> + losses = - math_ops . mul ( <nl> + labels , <nl> + math_ops . log ( predictions + epsilon ) ) - math_ops . mul ( <nl> + ( 1 - labels ) , math_ops . log ( 1 - predictions + epsilon ) ) <nl> + return compute_weighted_loss ( losses , weights , scope , loss_collection ) <nl> + <nl> + <nl> + def mean_pairwise_squared_error ( labels , predictions , weights = 1 . 0 , scope = None , <nl> + loss_collection = ops . GraphKeys . LOSSES ) : <nl> + " " " Adds a pairwise - errors - squared loss to the training procedure . <nl> + <nl> + Unlike ` mean_squared_error ` , which is a measure of the differences between <nl> + corresponding elements of ` predictions ` and ` labels ` , <nl> + ` mean_pairwise_squared_error ` is a measure of the differences between pairs of <nl> + corresponding elements of ` predictions ` and ` labels ` . <nl> + <nl> + For example , if ` labels ` = [ a , b , c ] and ` predictions ` = [ x , y , z ] , there are <nl> + three pairs of differences are summed to compute the loss : <nl> + loss = [ ( ( a - b ) - ( x - y ) ) . ^ 2 + ( ( a - c ) - ( x - z ) ) . ^ 2 + ( ( b - c ) - ( y - z ) ) . ^ 2 ] / 3 <nl> + <nl> + Note that since the inputs are of size [ batch_size , d0 , . . . dN ] , the <nl> + corresponding pairs are computed within each batch sample but not across <nl> + samples within a batch . For example , if ` predictions ` represents a batch of <nl> + 16 grayscale images of dimension [ batch_size , 100 , 200 ] , then the set of pairs <nl> + is drawn from each image , but not across images . <nl> + <nl> + ` weight ` acts as a coefficient for the loss . If a scalar is provided , then the <nl> + loss is simply scaled by the given value . If ` weight ` is a tensor of size <nl> + [ batch_size ] , then the total loss for each sample of the batch is rescaled <nl> + by the corresponding element in the ` weight ` vector . <nl> + <nl> + Args : <nl> + labels : The ground truth output tensor , whose shape must match the shape of <nl> + the ` predictions ` tensor . <nl> + predictions : The predicted outputs , a tensor of size [ batch_size , d0 , . . dN ] <nl> + where N + 1 is the total number of dimensions in ` predictions ` . <nl> + weights : Coefficients for the loss a scalar , a tensor of shape [ batch_size ] <nl> + or a tensor whose shape matches ` predictions ` . <nl> + scope : The scope for the operations performed in computing the loss . <nl> + loss_collection : collection to which the loss will be added . <nl> + <nl> + Returns : <nl> + A scalar ` Tensor ` representing the loss value . <nl> + <nl> + Raises : <nl> + ValueError : If the shape of ` predictions ` doesn ' t match that of ` labels ` or <nl> + if the shape of ` weight ` is invalid . <nl> + " " " <nl> + with ops . name_scope ( scope , " mean_pairwise_squared_error " , <nl> + [ predictions , labels , weights ] ) as scope : <nl> + predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> + predictions = math_ops . to_float ( predictions ) <nl> + labels = math_ops . to_float ( labels ) <nl> + weights = math_ops . to_float ( ops . convert_to_tensor ( weights ) ) <nl> + <nl> + diffs = math_ops . sub ( predictions , labels ) <nl> + <nl> + # Need to verify here since the function doesn ' t use compute_weighted_loss <nl> + if diffs . get_shape ( ) . ndims is None : <nl> + raise ValueError ( " diffs . get_shape ( ) . ndims cannot be None " ) <nl> + if weights . get_shape ( ) . ndims is None : <nl> + raise ValueError ( " weights . get_shape ( ) . ndims cannot be None " ) <nl> + <nl> + reduction_indices = list ( range ( 1 , diffs . get_shape ( ) . ndims ) ) <nl> + <nl> + sum_squares_diff_per_batch = math_ops . reduce_sum ( <nl> + math_ops . square ( diffs ) , <nl> + reduction_indices = reduction_indices ) <nl> + num_present_per_batch = _num_present ( diffs , weights , per_batch = True ) <nl> + <nl> + term1 = 2 . 0 * _safe_div ( sum_squares_diff_per_batch , <nl> + num_present_per_batch ) <nl> + <nl> + sum_diff = math_ops . reduce_sum ( diffs , reduction_indices = reduction_indices ) <nl> + term2 = 2 . 0 * _safe_div ( math_ops . square ( sum_diff ) , <nl> + math_ops . square ( num_present_per_batch ) ) <nl> + <nl> + loss = _scale_losses ( term1 - term2 , weights ) <nl> + <nl> + mean_loss = math_ops . select ( math_ops . reduce_sum ( num_present_per_batch ) > 0 , <nl> + loss , <nl> + array_ops . zeros_like ( loss ) , <nl> + name = " value " ) <nl> + util . add_loss ( mean_loss , loss_collection ) <nl> + return mean_loss <nl> + <nl> + <nl> + def mean_squared_error ( labels , predictions , weights = 1 . 0 , scope = None , <nl> + loss_collection = ops . GraphKeys . LOSSES ) : <nl> + " " " Adds a Sum - of - Squares loss to the training procedure . <nl> + <nl> + ` weight ` acts as a coefficient for the loss . If a scalar is provided , then the <nl> + loss is simply scaled by the given value . If ` weight ` is a tensor of size <nl> + [ batch_size ] , then the total loss for each sample of the batch is rescaled <nl> + by the corresponding element in the ` weight ` vector . If the shape of <nl> + ` weight ` matches the shape of ` predictions ` , then the loss of each <nl> + measurable element of ` predictions ` is scaled by the corresponding value of <nl> + ` weight ` . <nl> + <nl> + Args : <nl> + labels : The ground truth output tensor , same dimensions as ' predictions ' . <nl> + predictions : The predicted outputs . <nl> + weights : Coefficients for the loss a scalar , a tensor of shape <nl> + [ batch_size ] or a tensor whose shape matches ` predictions ` . <nl> + scope : The scope for the operations performed in computing the loss . <nl> + loss_collection : collection to which the loss will be added . <nl> + <nl> + Returns : <nl> + A scalar ` Tensor ` representing the loss value . <nl> + <nl> + Raises : <nl> + ValueError : If the shape of ` predictions ` doesn ' t match that of ` labels ` or <nl> + if the shape of ` weight ` is invalid . <nl> + " " " <nl> + with ops . name_scope ( scope , " mean_squared_error " , <nl> + [ predictions , labels , weights ] ) as scope : <nl> + predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> + predictions = math_ops . to_float ( predictions ) <nl> + labels = math_ops . to_float ( labels ) <nl> + losses = math_ops . square ( math_ops . sub ( predictions , labels ) ) <nl> + return compute_weighted_loss ( losses , weights , scope , loss_collection ) <nl> + <nl> + <nl> + def sigmoid_cross_entropy ( <nl> + multi_class_labels , logits , weights = 1 . 0 , label_smoothing = 0 , scope = None , <nl> + loss_collection = ops . GraphKeys . LOSSES ) : <nl> + " " " Creates a cross - entropy loss using tf . nn . sigmoid_cross_entropy_with_logits . <nl> + <nl> + ` weight ` acts as a coefficient for the loss . If a scalar is provided , <nl> + then the loss is simply scaled by the given value . If ` weight ` is a <nl> + tensor of size [ ` batch_size ` ] , then the loss weights apply to each <nl> + corresponding sample . <nl> + <nl> + If ` label_smoothing ` is nonzero , smooth the labels towards 1 / 2 : <nl> + <nl> + new_multiclass_labels = multiclass_labels * ( 1 - label_smoothing ) <nl> + + 0 . 5 * label_smoothing <nl> + <nl> + Args : <nl> + multi_class_labels : [ batch_size , num_classes ] target labels in ( 0 , 1 ) . <nl> + logits : [ batch_size , num_classes ] logits outputs of the network . <nl> + weights : Coefficients for the loss . The tensor must be a scalar , a tensor of <nl> + shape [ batch_size ] or shape [ batch_size , num_classes ] . <nl> + label_smoothing : If greater than 0 then smooth the labels . <nl> + scope : The scope for the operations performed in computing the loss . <nl> + loss_collection : collection to which the loss will be added . <nl> + <nl> + Returns : <nl> + A scalar ` Tensor ` representing the loss value . <nl> + <nl> + Raises : <nl> + ValueError : If the shape of ` logits ` doesn ' t match that of <nl> + ` multi_class_labels ` or if the shape of ` weight ` is invalid , or if <nl> + ` weight ` is None . <nl> + " " " <nl> + with ops . name_scope ( scope , " sigmoid_cross_entropy_loss " , <nl> + [ logits , multi_class_labels , weights ] ) as scope : <nl> + logits . get_shape ( ) . assert_is_compatible_with ( multi_class_labels . get_shape ( ) ) <nl> + <nl> + multi_class_labels = math_ops . cast ( multi_class_labels , logits . dtype ) <nl> + <nl> + if label_smoothing > 0 : <nl> + multi_class_labels = ( multi_class_labels * ( 1 - label_smoothing ) + <nl> + 0 . 5 * label_smoothing ) <nl> + <nl> + losses = nn . sigmoid_cross_entropy_with_logits ( logits , multi_class_labels , <nl> + name = " xentropy " ) <nl> + return compute_weighted_loss ( losses , weights , scope , loss_collection ) <nl> + <nl> + <nl> + def softmax_cross_entropy ( <nl> + onehot_labels , logits , weights = 1 . 0 , label_smoothing = 0 , scope = None , <nl> + loss_collection = ops . GraphKeys . LOSSES ) : <nl> + " " " Creates a cross - entropy loss using tf . nn . softmax_cross_entropy_with_logits . <nl> + <nl> + ` weight ` acts as a coefficient for the loss . If a scalar is provided , <nl> + then the loss is simply scaled by the given value . If ` weight ` is a <nl> + tensor of size [ ` batch_size ` ] , then the loss weights apply to each <nl> + corresponding sample . <nl> + <nl> + If ` label_smoothing ` is nonzero , smooth the labels towards 1 / num_classes : <nl> + new_onehot_labels = onehot_labels * ( 1 - label_smoothing ) <nl> + + label_smoothing / num_classes <nl> + <nl> + Args : <nl> + onehot_labels : [ batch_size , num_classes ] target one_hot_encoded labels . <nl> + logits : [ batch_size , num_classes ] logits outputs of the network . <nl> + weights : Coefficients for the loss . The tensor must be a scalar or a tensor <nl> + of shape [ batch_size ] . <nl> + label_smoothing : If greater than 0 then smooth the labels . <nl> + scope : the scope for the operations performed in computing the loss . <nl> + loss_collection : collection to which the loss will be added . <nl> + <nl> + Returns : <nl> + A scalar ` Tensor ` representing the loss value . <nl> + <nl> + Raises : <nl> + ValueError : If the shape of ` logits ` doesn ' t match that of ` onehot_labels ` <nl> + or if the shape of ` weight ` is invalid or if ` weight ` is None . <nl> + " " " <nl> + with ops . name_scope ( scope , " softmax_cross_entropy_loss " , <nl> + [ logits , onehot_labels , weights ] ) as scope : <nl> + logits . get_shape ( ) . assert_is_compatible_with ( onehot_labels . get_shape ( ) ) <nl> + <nl> + onehot_labels = math_ops . cast ( onehot_labels , logits . dtype ) <nl> + <nl> + if label_smoothing > 0 : <nl> + num_classes = math_ops . cast ( <nl> + array_ops . shape ( onehot_labels ) [ 1 ] , logits . dtype ) <nl> + smooth_positives = 1 . 0 - label_smoothing <nl> + smooth_negatives = label_smoothing / num_classes <nl> + onehot_labels = onehot_labels * smooth_positives + smooth_negatives <nl> + <nl> + losses = nn . softmax_cross_entropy_with_logits ( logits , onehot_labels , <nl> + name = " xentropy " ) <nl> + return compute_weighted_loss ( losses , weights , scope , loss_collection ) <nl> + <nl> + <nl> + def sparse_softmax_cross_entropy ( labels , logits , weights = 1 . 0 , scope = None , <nl> + loss_collection = ops . GraphKeys . LOSSES ) : <nl> + " " " Cross - entropy loss using ` tf . nn . sparse_softmax_cross_entropy_with_logits ` . <nl> + <nl> + ` weight ` acts as a coefficient for the loss . If a scalar is provided , <nl> + then the loss is simply scaled by the given value . If ` weight ` is a <nl> + tensor of size [ ` batch_size ` ] , then the loss weights apply to each <nl> + corresponding sample . <nl> + <nl> + Args : <nl> + labels : [ batch_size , 1 ] or [ batch_size ] target labels of dtype ` int32 ` or <nl> + ` int64 ` in the range ` [ 0 , num_classes ) ` . <nl> + logits : [ batch_size , num_classes ] logits outputs of the network . <nl> + weights : Coefficients for the loss . The tensor must be a scalar or a tensor <nl> + of shape [ batch_size ] or [ batch_size , 1 ] . <nl> + scope : the scope for the operations performed in computing the loss . <nl> + loss_collection : collection to which the loss will be added . <nl> + <nl> + Returns : <nl> + A scalar ` Tensor ` representing the loss value . <nl> + <nl> + Raises : <nl> + ValueError : If the shapes of logits , labels , and weight are incompatible , or <nl> + if ` weight ` is None . <nl> + " " " <nl> + with ops . name_scope ( scope , " sparse_softmax_cross_entropy_loss " , <nl> + [ logits , labels , weights ] ) as scope : <nl> + labels = array_ops . reshape ( labels , shape = [ array_ops . shape ( labels ) [ 0 ] ] ) <nl> + weights = array_ops . squeeze ( weights ) <nl> + <nl> + losses = nn . sparse_softmax_cross_entropy_with_logits ( logits , labels , <nl> + name = " xentropy " ) <nl> + return compute_weighted_loss ( losses , weights , scope , loss_collection ) <nl> new file mode 100644 <nl> index 0000000000000 . . aaf324891f3f3 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / ops / losses / util . py <nl> <nl> + # Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Utilities for manipulating the loss collections . <nl> + <nl> + <nl> + @ @ add_loss <nl> + @ @ get_losses <nl> + @ @ get_regularization_losses <nl> + @ @ get_total_loss <nl> + <nl> + " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . ops import math_ops <nl> + <nl> + <nl> + def add_loss ( loss , loss_collection = ops . GraphKeys . LOSSES ) : <nl> + " " " Adds a externally defined loss to the collection of losses . <nl> + <nl> + Args : <nl> + loss : A loss ` Tensor ` . <nl> + loss_collection : Optional collection to add the loss to . <nl> + " " " <nl> + if loss_collection : <nl> + ops . add_to_collection ( loss_collection , loss ) <nl> + <nl> + <nl> + def get_losses ( scope = None , loss_collection = ops . GraphKeys . LOSSES ) : <nl> + " " " Gets the list of losses from the loss_collection . <nl> + <nl> + Args : <nl> + scope : an optional scope for filtering the losses to return . <nl> + loss_collection : Optional losses collection . <nl> + <nl> + Returns : <nl> + a list of loss tensors . <nl> + " " " <nl> + return ops . get_collection ( loss_collection , scope ) <nl> + <nl> + <nl> + def get_regularization_losses ( scope = None ) : <nl> + " " " Gets the regularization losses . <nl> + <nl> + Args : <nl> + scope : an optional scope for filtering the losses to return . <nl> + <nl> + Returns : <nl> + A list of loss variables . <nl> + " " " <nl> + return ops . get_collection ( ops . GraphKeys . REGULARIZATION_LOSSES , scope ) <nl> + <nl> + <nl> + def get_total_loss ( add_regularization_losses = True , name = " total_loss " ) : <nl> + " " " Returns a tensor whose value represents the total loss . <nl> + <nl> + Notice that the function adds the given losses to the regularization losses . <nl> + <nl> + Args : <nl> + add_regularization_losses : A boolean indicating whether or not to use the <nl> + regularization losses in the sum . <nl> + name : The name of the returned tensor . <nl> + <nl> + Returns : <nl> + A ` Tensor ` whose value represents the total loss . <nl> + <nl> + Raises : <nl> + ValueError : if ` losses ` is not iterable . <nl> + " " " <nl> + losses = get_losses ( ) <nl> + if add_regularization_losses : <nl> + losses + = get_regularization_losses ( ) <nl> + return math_ops . add_n ( losses , name = name ) <nl>
Moves tf . contrib . losses into core , with changes .
tensorflow/tensorflow
896285a8dca7bddbf328b3728683acf619f26c13
2016-12-02T17:06:39Z
mmm a / cmake / find_ccache . cmake <nl> ppp b / cmake / find_ccache . cmake <nl> <nl> find_program ( CCACHE_FOUND ccache ) <nl> + <nl> if ( CCACHE_FOUND AND NOT CMAKE_CXX_COMPILER_LAUNCHER MATCHES " ccache " AND NOT CMAKE_CXX_COMPILER MATCHES " ccache " ) <nl> - execute_process ( COMMAND $ { CCACHE_FOUND } " - V " OUTPUT_VARIABLE CCACHE_VERSION ) <nl> - string ( REGEX REPLACE " ccache version ( [ 0 - 9 \ \ . ] + ) . * " " \ \ 1 " CCACHE_VERSION $ { CCACHE_VERSION } ) <nl> + execute_process ( COMMAND $ { CCACHE_FOUND } " - V " OUTPUT_VARIABLE CCACHE_VERSION ) <nl> + string ( REGEX REPLACE " ccache version ( [ 0 - 9 \ \ . ] + ) . * " " \ \ 1 " CCACHE_VERSION $ { CCACHE_VERSION } ) <nl> <nl> - if ( CCACHE_VERSION VERSION_GREATER " 3 . 2 . 0 " OR NOT CMAKE_CXX_COMPILER_ID STREQUAL " Clang " ) <nl> - # message ( STATUS " Using $ { CCACHE_FOUND } $ { CCACHE_VERSION } " ) <nl> - set_property ( GLOBAL PROPERTY RULE_LAUNCH_COMPILE $ { CCACHE_FOUND } ) <nl> - set_property ( GLOBAL PROPERTY RULE_LAUNCH_LINK $ { CCACHE_FOUND } ) <nl> - else ( ) <nl> - message ( STATUS " Not using $ { CCACHE_FOUND } $ { CCACHE_VERSION } bug : https : / / bugzilla . samba . org / show_bug . cgi ? id = 8118 " ) <nl> - endif ( ) <nl> + if ( CCACHE_VERSION VERSION_GREATER " 3 . 2 . 0 " OR NOT CMAKE_CXX_COMPILER_ID STREQUAL " Clang " ) <nl> + # message ( STATUS " Using $ { CCACHE_FOUND } $ { CCACHE_VERSION } " ) <nl> + set_property ( GLOBAL PROPERTY RULE_LAUNCH_COMPILE $ { CCACHE_FOUND } ) <nl> + set_property ( GLOBAL PROPERTY RULE_LAUNCH_LINK $ { CCACHE_FOUND } ) <nl> + else ( ) <nl> + message ( STATUS " Not using $ { CCACHE_FOUND } $ { CCACHE_VERSION } bug : https : / / bugzilla . samba . org / show_bug . cgi ? id = 8118 " ) <nl> + endif ( ) <nl> endif ( ) <nl> mmm a / cmake / find_cxx . cmake <nl> ppp b / cmake / find_cxx . cmake <nl> if ( USE_LIBCXX ) <nl> find_library ( LIBCXXFS_LIBRARY c + + fs ) <nl> find_library ( LIBCXXABI_LIBRARY c + + abi ) <nl> <nl> + set ( CMAKE_CXX_FLAGS " $ { CMAKE_CXX_FLAGS } - stdlib = libc + + " ) <nl> + <nl> target_link_libraries ( global - libs INTERFACE $ { EXCEPTION_HANDLING_LIBRARY } ) <nl> else ( ) <nl> set ( LIBCXX_LIBRARY cxx ) <nl> if ( USE_LIBCXX ) <nl> target_link_libraries ( global - libs INTERFACE $ { LIBCXX_LIBRARY } $ { LIBCXXABI_LIBRARY } $ { LIBCXXFS_LIBRARY } ) <nl> <nl> set ( HAVE_LIBCXX 1 ) <nl> - set ( CMAKE_CXX_FLAGS " $ { CMAKE_CXX_FLAGS } - stdlib = libc + + " ) <nl> <nl> message ( STATUS " Using libcxx : $ { LIBCXX_LIBRARY } " ) <nl> message ( STATUS " Using libcxxfs : $ { LIBCXXFS_LIBRARY } " ) <nl> mmm a / contrib / libcxx - cmake / CMakeLists . txt <nl> ppp b / contrib / libcxx - cmake / CMakeLists . txt <nl> $ { LIBCXX_SOURCE_DIR } / src / random . cpp <nl> <nl> add_library ( cxx $ { SRCS } ) <nl> <nl> - target_include_directories ( cxx BEFORE PUBLIC $ < BUILD_INTERFACE : $ { LIBCXX_SOURCE_DIR } / include > ) <nl> + target_include_directories ( cxx SYSTEM BEFORE PUBLIC $ < BUILD_INTERFACE : $ { LIBCXX_SOURCE_DIR } / include > ) <nl> target_compile_definitions ( cxx PRIVATE - D_LIBCPP_BUILDING_LIBRARY - DLIBCXX_BUILDING_LIBCXXABI ) <nl> target_compile_options ( cxx PUBLIC - nostdinc + + - Wno - reserved - id - macro ) <nl> target_link_libraries ( cxx PUBLIC cxxabi ) <nl> mmm a / contrib / libcxxabi - cmake / CMakeLists . txt <nl> ppp b / contrib / libcxxabi - cmake / CMakeLists . txt <nl> target_include_directories ( cxxabi SYSTEM BEFORE <nl> ) <nl> target_compile_definitions ( cxxabi PRIVATE - D_LIBCPP_BUILDING_LIBRARY ) <nl> target_compile_options ( cxxabi PRIVATE - nostdinc + + - fno - sanitize = undefined - Wno - macro - redefined ) # If we don ' t disable UBSan , infinite recursion happens in dynamic_cast . <nl> - <nl> - if ( USE_UNWIND ) <nl> - target_link_libraries ( cxxabi PRIVATE $ { UNWIND_LIBRARIES } ) <nl> - else ( ) <nl> - target_link_libraries ( cxxabi PRIVATE gcc_eh ) <nl> - endif ( ) <nl> + target_link_libraries ( cxxabi PUBLIC $ { EXCEPTION_HANDLING_LIBRARY } ) <nl> <nl> install ( <nl> TARGETS cxxabi <nl> mmm a / dbms / programs / performance - test / PerformanceTestInfo . cpp <nl> ppp b / dbms / programs / performance - test / PerformanceTestInfo . cpp <nl> void PerformanceTestInfo : : applySettings ( XMLConfigurationPtr config ) <nl> } <nl> <nl> extractSettings ( config , " settings " , config_settings , settings_to_apply ) ; <nl> - settings . loadFromChanges ( settings_to_apply ) ; <nl> + settings . applyChanges ( settings_to_apply ) ; <nl> <nl> if ( settings_contain ( " average_rows_speed_precision " ) ) <nl> TestStats : : avg_rows_speed_precision = <nl> mmm a / dbms / programs / server / Server . cpp <nl> ppp b / dbms / programs / server / Server . cpp <nl> int Server : : main ( const std : : vector < std : : string > & / * args * / ) <nl> <nl> / / / Init trace collector only after trace_log system table was created <nl> / / / Disable it if we collect test coverage information , because it will work extremely slow . <nl> - # if USE_UNWIND & & ! WITH_COVERAGE <nl> + / / / <nl> + / / / It also cannot work with sanitizers . <nl> + / / / Sanitizers are using quick " frame walking " stack unwinding ( this implies - fno - omit - frame - pointer ) <nl> + / / / And they do unwiding frequently ( on every malloc / free , thread / mutex operations , etc ) . <nl> + / / / They change % rbp during unwinding and it confuses libunwind if signal comes during sanitizer unwiding <nl> + / / / and query profiler decide to unwind stack with libunwind at this moment . <nl> + / / / <nl> + / / / Symptoms : you ' ll get silent Segmentation Fault - without sanitizer message and without usual ClickHouse diagnostics . <nl> + / / / <nl> + / / / Look at compiler - rt / lib / sanitizer_common / sanitizer_stacktrace . h <nl> + / / / <nl> + # if USE_UNWIND & & ! WITH_COVERAGE & & ! defined ( SANITIZER ) <nl> / / / QueryProfiler cannot work reliably with any other libunwind or without PHDR cache . <nl> if ( hasPHDRCache ( ) ) <nl> global_context - > initializeTraceCollector ( ) ; <nl> mmm a / dbms / src / Common / ErrorCodes . cpp <nl> ppp b / dbms / src / Common / ErrorCodes . cpp <nl> namespace ErrorCodes <nl> extern const int VIOLATED_CONSTRAINT = 469 ; <nl> extern const int QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW = 470 ; <nl> extern const int SETTINGS_ARE_NOT_SUPPORTED = 471 ; <nl> - extern const int IMMUTABLE_SETTING = 472 ; <nl> + extern const int READONLY_SETTING = 472 ; <nl> extern const int UNKNOWN_POLICY = 473 ; <nl> extern const int UNKNOWN_DISK = 474 ; <nl> extern const int UNKNOWN_PROTOCOL = 475 ; <nl> extern const int PATH_ACCESS_DENIED = 476 ; <nl> <nl> - <nl> extern const int KEEPER_EXCEPTION = 999 ; <nl> extern const int POCO_EXCEPTION = 1000 ; <nl> extern const int STD_EXCEPTION = 1001 ; <nl> mmm a / dbms / src / Common / ProfileEvents . cpp <nl> ppp b / dbms / src / Common / ProfileEvents . cpp <nl> <nl> M ( MarkCacheMisses , " " ) \ <nl> M ( CreatedReadBufferOrdinary , " " ) \ <nl> M ( CreatedReadBufferAIO , " " ) \ <nl> + M ( CreatedReadBufferAIOFailed , " " ) \ <nl> M ( CreatedWriteBufferOrdinary , " " ) \ <nl> M ( CreatedWriteBufferAIO , " " ) \ <nl> + M ( CreatedWriteBufferAIOFailed , " " ) \ <nl> M ( DiskReadElapsedMicroseconds , " Total time spent waiting for read syscall . This include reads from page cache . " ) \ <nl> M ( DiskWriteElapsedMicroseconds , " Total time spent waiting for write syscall . This include writes to page cache . " ) \ <nl> M ( NetworkReceiveElapsedMicroseconds , " " ) \ <nl> mmm a / dbms / src / Common / ThreadStatus . h <nl> ppp b / dbms / src / Common / ThreadStatus . h <nl> class ThreadGroupStatus <nl> InternalTextLogsQueueWeakPtr logs_queue_ptr ; <nl> <nl> std : : vector < UInt32 > thread_numbers ; <nl> + std : : vector < UInt32 > os_thread_ids ; <nl> <nl> / / / The first thread created this thread group <nl> UInt32 master_thread_number = 0 ; <nl> mmm a / dbms / src / Core / Defines . h <nl> ppp b / dbms / src / Core / Defines . h <nl> <nl> # endif <nl> # endif <nl> <nl> + / / / TODO Strange enough , there is no way to detect UB sanitizer . <nl> + <nl> / / / Explicitly allow undefined behaviour for certain functions . Use it as a function attribute . <nl> / / / It is useful in case when compiler cannot see ( and exploit ) it , but UBSan can . <nl> / / / Example : multiplication of signed integers with possibility of overflow when both sides are from user input . <nl> mmm a / dbms / src / Core / Settings . h <nl> ppp b / dbms / src / Core / Settings . h <nl> struct Settings : public SettingsCollection < Settings > <nl> * but we are not going to do it , because settings is used everywhere as static struct fields . <nl> * / <nl> <nl> - / / / M ( mutable ) for normal settings , IM ( immutable ) for not updateable settings . <nl> - # define LIST_OF_SETTINGS ( M , IM ) \ <nl> + # define LIST_OF_SETTINGS ( M ) \ <nl> M ( SettingUInt64 , min_compress_block_size , 65536 , " The actual size of the block to compress , if the uncompressed data less than max_compress_block_size is no less than this value and no less than the volume of data for one mark . " ) \ <nl> M ( SettingUInt64 , max_compress_block_size , 1048576 , " The maximum size of blocks of uncompressed data before compressing for writing to a table . " ) \ <nl> M ( SettingUInt64 , max_block_size , DEFAULT_BLOCK_SIZE , " Maximum block size for reading " ) \ <nl> mmm a / dbms / src / Core / SettingsCommon . h <nl> ppp b / dbms / src / Core / SettingsCommon . h <nl> class Field ; <nl> class ReadBuffer ; <nl> class WriteBuffer ; <nl> <nl> - namespace ErrorCodes <nl> - { <nl> - extern const int IMMUTABLE_SETTING ; <nl> - } <nl> - <nl> / * * One setting for any type . <nl> * Stores a value within itself , as well as a flag - whether the value was changed . <nl> * This is done so that you can send to the remote servers only changed settings ( or explicitly specified in the config ) values . <nl> class SettingsCollection <nl> using DeserializeFunction = void ( * ) ( Derived & , ReadBuffer & buf ) ; <nl> using CastValueWithoutApplyingFunction = Field ( * ) ( const Field & ) ; <nl> <nl> + <nl> struct MemberInfo <nl> { <nl> IsChangedFunction is_changed ; <nl> StringRef name ; <nl> StringRef description ; <nl> - / / / Can be updated after first load for config / definition . <nl> - / / / Non updatable settings can be ` changed ` , <nl> - / / / if they were overwritten in config / definition . <nl> - const bool updateable ; <nl> GetStringFunction get_string ; <nl> GetFieldFunction get_field ; <nl> SetStringFunction set_string ; <nl> class SettingsCollection <nl> const_reference ( const const_reference & src ) = default ; <nl> const StringRef & getName ( ) const { return member - > name ; } <nl> const StringRef & getDescription ( ) const { return member - > description ; } <nl> - bool isUpdateable ( ) const { return member - > updateable ; } <nl> bool isChanged ( ) const { return member - > isChanged ( * collection ) ; } <nl> Field getValue ( ) const { return member - > get_field ( * collection ) ; } <nl> String getValueAsString ( ) const { return member - > get_string ( * collection ) ; } <nl> class SettingsCollection <nl> reference ( const const_reference & src ) : const_reference ( src ) { } <nl> void setValue ( const Field & value ) { this - > member - > set_field ( * const_cast < Derived * > ( this - > collection ) , value ) ; } <nl> void setValue ( const String & value ) { this - > member - > set_string ( * const_cast < Derived * > ( this - > collection ) , value ) ; } <nl> - void updateValue ( const Field & value ) <nl> - { <nl> - if ( ! this - > member - > updateable ) <nl> - throw Exception ( " Setting ' " + this - > member - > name . toString ( ) + " ' is restricted for updates . " , ErrorCodes : : IMMUTABLE_SETTING ) ; <nl> - setValue ( value ) ; <nl> - } <nl> - void updateValue ( const String & value ) <nl> - { <nl> - if ( ! this - > member - > updateable ) <nl> - throw Exception ( " Setting ' " + this - > member - > name . toString ( ) + " ' is restricted for updates . " , ErrorCodes : : IMMUTABLE_SETTING ) ; <nl> - setValue ( value ) ; <nl> - } <nl> } ; <nl> <nl> / / / Iterator to iterating through all the settings . <nl> class SettingsCollection <nl> void set ( size_t index , const String & value ) { ( * this ) [ index ] . setValue ( value ) ; } <nl> void set ( const String & name , const String & value ) { ( * this ) [ name ] . setValue ( value ) ; } <nl> <nl> - / / / Updates setting ' s value . Checks it ' mutability . <nl> - void update ( size_t index , const Field & value ) { ( * this ) [ index ] . updateValue ( value ) ; } <nl> - <nl> - void update ( const String & name , const Field & value ) { ( * this ) [ name ] . updateValue ( value ) ; } <nl> - <nl> - void update ( size_t index , const String & value ) { ( * this ) [ index ] . updateValue ( value ) ; } <nl> - <nl> - void update ( const String & name , const String & value ) { ( * this ) [ name ] . updateValue ( value ) ; } <nl> - <nl> / / / Returns value of a setting . <nl> Field get ( size_t index ) const { return ( * this ) [ index ] . getValue ( ) ; } <nl> Field get ( const String & name ) const { return ( * this ) [ name ] . getValue ( ) ; } <nl> class SettingsCollection <nl> return found_changes ; <nl> } <nl> <nl> - / / / Applies change to the settings . Doesn ' t check settings mutability . <nl> - void loadFromChange ( const SettingChange & change ) <nl> + / / / Applies change to concrete setting . <nl> + void applyChange ( const SettingChange & change ) <nl> { <nl> set ( change . name , change . value ) ; <nl> } <nl> <nl> - / / / Applies changes to the settings . Should be used in initial settings loading . <nl> - / / / ( on table creation or loading from config ) <nl> - void loadFromChanges ( const SettingsChanges & changes ) <nl> - { <nl> - for ( const SettingChange & change : changes ) <nl> - loadFromChange ( change ) ; <nl> - } <nl> - <nl> - / / / Applies change to the settings , checks settings mutability . <nl> - void updateFromChange ( const SettingChange & change ) <nl> - { <nl> - update ( change . name , change . value ) ; <nl> - } <nl> - <nl> - / / / Applies changes to the settings . Should be used for settigns update . <nl> - / / / ( ALTER MODIFY SETTINGS ) <nl> - void updateFromChanges ( const SettingsChanges & changes ) <nl> + / / / Applies changes to the settings . <nl> + void applyChanges ( const SettingsChanges & changes ) <nl> { <nl> for ( const SettingChange & change : changes ) <nl> - updateFromChange ( change ) ; <nl> + applyChange ( change ) ; <nl> } <nl> <nl> - <nl> void copyChangesFrom ( const Derived & src ) <nl> { <nl> for ( const auto & member : members ( ) ) <nl> class SettingsCollection <nl> } ; <nl> <nl> # define DECLARE_SETTINGS_COLLECTION ( LIST_OF_SETTINGS_MACRO ) \ <nl> - LIST_OF_SETTINGS_MACRO ( DECLARE_SETTINGS_COLLECTION_DECLARE_VARIABLES_HELPER_ , DECLARE_SETTINGS_COLLECTION_DECLARE_VARIABLES_HELPER_ ) <nl> + LIST_OF_SETTINGS_MACRO ( DECLARE_SETTINGS_COLLECTION_DECLARE_VARIABLES_HELPER_ ) <nl> <nl> <nl> # define IMPLEMENT_SETTINGS_COLLECTION ( DERIVED_CLASS_NAME , LIST_OF_SETTINGS_MACRO ) \ <nl> class SettingsCollection <nl> using Derived = DERIVED_CLASS_NAME ; \ <nl> struct Functions \ <nl> { \ <nl> - LIST_OF_SETTINGS_MACRO ( IMPLEMENT_SETTINGS_COLLECTION_DEFINE_FUNCTIONS_HELPER_ , IMPLEMENT_SETTINGS_COLLECTION_DEFINE_FUNCTIONS_HELPER_ ) \ <nl> + LIST_OF_SETTINGS_MACRO ( IMPLEMENT_SETTINGS_COLLECTION_DEFINE_FUNCTIONS_HELPER_ ) \ <nl> } ; \ <nl> - LIST_OF_SETTINGS_MACRO ( IMPLEMENT_SETTINGS_COLLECTION_ADD_MUTABLE_MEMBER_INFO_HELPER_ , IMPLEMENT_SETTINGS_COLLECTION_ADD_IMMUTABLE_MEMBER_INFO_HELPER_ ) \ <nl> + LIST_OF_SETTINGS_MACRO ( IMPLEMENT_SETTINGS_COLLECTION_ADD_MEMBER_INFO_HELPER_ ) \ <nl> } <nl> <nl> <nl> class SettingsCollection <nl> static void NAME # # _setField ( Derived & collection , const Field & value ) { collection . NAME . set ( value ) ; } \ <nl> static void NAME # # _serialize ( const Derived & collection , WriteBuffer & buf ) { collection . NAME . serialize ( buf ) ; } \ <nl> static void NAME # # _deserialize ( Derived & collection , ReadBuffer & buf ) { collection . NAME . deserialize ( buf ) ; } \ <nl> - static Field NAME # # _castValueWithoutApplying ( const Field & value ) { TYPE temp { DEFAULT } ; temp . set ( value ) ; return temp . toField ( ) ; } <nl> + static Field NAME # # _castValueWithoutApplying ( const Field & value ) { TYPE temp { DEFAULT } ; temp . set ( value ) ; return temp . toField ( ) ; } \ <nl> <nl> <nl> - # define IMPLEMENT_SETTINGS_COLLECTION_ADD_MUTABLE_MEMBER_INFO_HELPER_ ( TYPE , NAME , DEFAULT , DESCRIPTION ) \ <nl> + # define IMPLEMENT_SETTINGS_COLLECTION_ADD_MEMBER_INFO_HELPER_ ( TYPE , NAME , DEFAULT , DESCRIPTION ) \ <nl> add ( { [ ] ( const Derived & d ) { return d . NAME . changed ; } , \ <nl> - StringRef ( # NAME , strlen ( # NAME ) ) , StringRef ( DESCRIPTION , strlen ( DESCRIPTION ) ) , true , \ <nl> + StringRef ( # NAME , strlen ( # NAME ) ) , StringRef ( DESCRIPTION , strlen ( DESCRIPTION ) ) , \ <nl> & Functions : : NAME # # _getString , & Functions : : NAME # # _getField , \ <nl> & Functions : : NAME # # _setString , & Functions : : NAME # # _setField , \ <nl> & Functions : : NAME # # _serialize , & Functions : : NAME # # _deserialize , \ <nl> & Functions : : NAME # # _castValueWithoutApplying } ) ; <nl> - <nl> - # define IMPLEMENT_SETTINGS_COLLECTION_ADD_IMMUTABLE_MEMBER_INFO_HELPER_ ( TYPE , NAME , DEFAULT , DESCRIPTION ) \ <nl> - add ( { [ ] ( const Derived & d ) { return d . NAME . changed ; } , \ <nl> - StringRef ( # NAME , strlen ( # NAME ) ) , StringRef ( DESCRIPTION , strlen ( DESCRIPTION ) ) , false , \ <nl> - & Functions : : NAME # # _getString , & Functions : : NAME # # _getField , \ <nl> - & Functions : : NAME # # _setString , & Functions : : NAME # # _setField , \ <nl> - & Functions : : NAME # # _serialize , & Functions : : NAME # # _deserialize , \ <nl> - & Functions : : NAME # # _castValueWithoutApplying } ) ; <nl> } <nl> mmm a / dbms / src / DataStreams / IBlockInputStream . h <nl> ppp b / dbms / src / DataStreams / IBlockInputStream . h <nl> class IBlockInputStream <nl> / / / NOTE : Acquire a read lock , therefore f ( ) should be thread safe <nl> std : : shared_lock lock ( children_mutex ) ; <nl> <nl> - for ( auto & child : children ) <nl> + / / Reduce lock scope and avoid recursive locking since that is undefined for shared_mutex . <nl> + const auto children_copy = children ; <nl> + lock . unlock ( ) ; <nl> + <nl> + for ( auto & child : children_copy ) <nl> if ( f ( * child ) ) <nl> return ; <nl> } <nl> mmm a / dbms / src / DataStreams / PushingToViewsBlockOutputStream . cpp <nl> ppp b / dbms / src / DataStreams / PushingToViewsBlockOutputStream . cpp <nl> PushingToViewsBlockOutputStream : : PushingToViewsBlockOutputStream ( <nl> * Although now any insertion into the table is done via PushingToViewsBlockOutputStream , <nl> * but it ' s clear that here is not the best place for this functionality . <nl> * / <nl> - addTableLock ( storage - > lockStructureForShare ( true , context . getCurrentQueryId ( ) ) ) ; <nl> + addTableLock ( storage - > lockStructureForShare ( true , context . getInitialQueryId ( ) ) ) ; <nl> <nl> / / / If the " root " table deduplactes blocks , there are no need to make deduplication for children <nl> / / / Moreover , deduplication for AggregatingMergeTree children could produce false positives due to low size of inserting blocks <nl> mmm a / dbms / src / Dictionaries / CacheDictionary . cpp <nl> ppp b / dbms / src / Dictionaries / CacheDictionary . cpp <nl> CacheDictionary : : CacheDictionary ( <nl> , dict_struct ( dict_struct_ ) <nl> , source_ptr { std : : move ( source_ptr_ ) } <nl> , dict_lifetime ( dict_lifetime_ ) <nl> + , log ( & Logger : : get ( " ExternalDictionaries " ) ) <nl> , size { roundUpToPowerOfTwoOrZero ( std : : max ( size_ , size_t ( max_collision_length ) ) ) } <nl> , size_overlap_mask { this - > size - 1 } <nl> , cells { this - > size } <nl> BlockInputStreamPtr CacheDictionary : : getBlockInputStream ( const Names & column_na <nl> return std : : make_shared < BlockInputStreamType > ( shared_from_this ( ) , max_block_size , getCachedIds ( ) , column_names ) ; <nl> } <nl> <nl> + std : : exception_ptr CacheDictionary : : getLastException ( ) const <nl> + { <nl> + const ProfilingScopedReadRWLock read_lock { rw_lock , ProfileEvents : : DictCacheLockReadNs } ; <nl> + return last_exception ; <nl> + } <nl> + <nl> void registerDictionaryCache ( DictionaryFactory & factory ) <nl> { <nl> auto create_layout = [ = ] ( const std : : string & name , <nl> mmm a / dbms / src / Dictionaries / CacheDictionary . h <nl> ppp b / dbms / src / Dictionaries / CacheDictionary . h <nl> <nl> # include < shared_mutex > <nl> # include < variant > <nl> # include < vector > <nl> + # include < common / logger_useful . h > <nl> # include < Columns / ColumnDecimal . h > <nl> # include < Columns / ColumnString . h > <nl> # include < pcg_random . hpp > <nl> class CacheDictionary final : public IDictionary <nl> void isInVectorConstant ( const PaddedPODArray < Key > & child_ids , const Key ancestor_id , PaddedPODArray < UInt8 > & out ) const override ; <nl> void isInConstantVector ( const Key child_id , const PaddedPODArray < Key > & ancestor_ids , PaddedPODArray < UInt8 > & out ) const override ; <nl> <nl> + std : : exception_ptr getLastException ( ) const override ; <nl> + <nl> template < typename T > <nl> using ResultArrayType = std : : conditional_t < IsDecimalNumber < T > , DecimalPaddedPODArray < T > , PaddedPODArray < T > > ; <nl> <nl> class CacheDictionary final : public IDictionary <nl> <nl> const std : : string name ; <nl> const DictionaryStructure dict_struct ; <nl> - const DictionarySourcePtr source_ptr ; <nl> + mutable DictionarySourcePtr source_ptr ; <nl> const DictionaryLifetime dict_lifetime ; <nl> + Logger * const log ; <nl> <nl> mutable std : : shared_mutex rw_lock ; <nl> <nl> class CacheDictionary final : public IDictionary <nl> Attribute * hierarchical_attribute = nullptr ; <nl> std : : unique_ptr < ArenaWithFreeLists > string_arena ; <nl> <nl> + mutable std : : exception_ptr last_exception ; <nl> + mutable size_t error_count = 0 ; <nl> + mutable std : : chrono : : system_clock : : time_point backoff_end_time ; <nl> + <nl> mutable pcg64 rnd_engine ; <nl> <nl> mutable size_t bytes_allocated = 0 ; <nl> mmm a / dbms / src / Dictionaries / CacheDictionary . inc . h <nl> ppp b / dbms / src / Dictionaries / CacheDictionary . inc . h <nl> <nl> # include < Columns / ColumnsNumber . h > <nl> # include < Common / ProfilingScopedRWLock . h > <nl> # include < Common / typeid_cast . h > <nl> + # include < common / DateLUT . h > <nl> # include < DataStreams / IBlockInputStream . h > <nl> # include < ext / map . h > <nl> # include < ext / range . h > <nl> template < typename PresentIdHandler , typename AbsentIdHandler > <nl> void CacheDictionary : : update ( <nl> const std : : vector < Key > & requested_ids , PresentIdHandler & & on_cell_updated , AbsentIdHandler & & on_id_not_found ) const <nl> { <nl> + CurrentMetrics : : Increment metric_increment { CurrentMetrics : : DictCacheRequests } ; <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysRequested , requested_ids . size ( ) ) ; <nl> + <nl> std : : unordered_map < Key , UInt8 > remaining_ids { requested_ids . size ( ) } ; <nl> for ( const auto id : requested_ids ) <nl> remaining_ids . insert ( { id , 0 } ) ; <nl> <nl> - std : : uniform_int_distribution < UInt64 > distribution { dict_lifetime . min_sec , dict_lifetime . max_sec } ; <nl> + const auto now = std : : chrono : : system_clock : : now ( ) ; <nl> <nl> const ProfilingScopedWriteRWLock write_lock { rw_lock , ProfileEvents : : DictCacheLockWriteNs } ; <nl> <nl> + if ( now > backoff_end_time ) <nl> { <nl> - CurrentMetrics : : Increment metric_increment { CurrentMetrics : : DictCacheRequests } ; <nl> - Stopwatch watch ; <nl> - auto stream = source_ptr - > loadIds ( requested_ids ) ; <nl> - stream - > readPrefix ( ) ; <nl> - <nl> - const auto now = std : : chrono : : system_clock : : now ( ) ; <nl> - <nl> - while ( const auto block = stream - > read ( ) ) <nl> + try <nl> { <nl> - const auto id_column = typeid_cast < const ColumnUInt64 * > ( block . safeGetByPosition ( 0 ) . column . get ( ) ) ; <nl> - if ( ! id_column ) <nl> - throw Exception { name + " : id column has type different from UInt64 . " , ErrorCodes : : TYPE_MISMATCH } ; <nl> - <nl> - const auto & ids = id_column - > getData ( ) ; <nl> + if ( error_count ) <nl> + { <nl> + / / / Recover after error : we have to clone the source here because <nl> + / / / it could keep connections which should be reset after error . <nl> + source_ptr = source_ptr - > clone ( ) ; <nl> + } <nl> <nl> - / / / cache column pointers <nl> - const auto column_ptrs = ext : : map < std : : vector > ( <nl> - ext : : range ( 0 , attributes . size ( ) ) , [ & block ] ( size_t i ) { return block . safeGetByPosition ( i + 1 ) . column . get ( ) ; } ) ; <nl> + Stopwatch watch ; <nl> + auto stream = source_ptr - > loadIds ( requested_ids ) ; <nl> + stream - > readPrefix ( ) ; <nl> <nl> - for ( const auto i : ext : : range ( 0 , ids . size ( ) ) ) <nl> + while ( const auto block = stream - > read ( ) ) <nl> { <nl> - const auto id = ids [ i ] ; <nl> + const auto id_column = typeid_cast < const ColumnUInt64 * > ( block . safeGetByPosition ( 0 ) . column . get ( ) ) ; <nl> + if ( ! id_column ) <nl> + throw Exception { name + " : id column has type different from UInt64 . " , ErrorCodes : : TYPE_MISMATCH } ; <nl> <nl> - const auto find_result = findCellIdx ( id , now ) ; <nl> - const auto & cell_idx = find_result . cell_idx ; <nl> + const auto & ids = id_column - > getData ( ) ; <nl> <nl> - auto & cell = cells [ cell_idx ] ; <nl> + / / / cache column pointers <nl> + const auto column_ptrs = ext : : map < std : : vector > ( <nl> + ext : : range ( 0 , attributes . size ( ) ) , [ & block ] ( size_t i ) { return block . safeGetByPosition ( i + 1 ) . column . get ( ) ; } ) ; <nl> <nl> - for ( const auto attribute_idx : ext : : range ( 0 , attributes . size ( ) ) ) <nl> + for ( const auto i : ext : : range ( 0 , ids . size ( ) ) ) <nl> { <nl> - const auto & attribute_column = * column_ptrs [ attribute_idx ] ; <nl> - auto & attribute = attributes [ attribute_idx ] ; <nl> - <nl> - setAttributeValue ( attribute , cell_idx , attribute_column [ i ] ) ; <nl> + const auto id = ids [ i ] ; <nl> + <nl> + const auto find_result = findCellIdx ( id , now ) ; <nl> + const auto & cell_idx = find_result . cell_idx ; <nl> + <nl> + auto & cell = cells [ cell_idx ] ; <nl> + <nl> + for ( const auto attribute_idx : ext : : range ( 0 , attributes . size ( ) ) ) <nl> + { <nl> + const auto & attribute_column = * column_ptrs [ attribute_idx ] ; <nl> + auto & attribute = attributes [ attribute_idx ] ; <nl> + <nl> + setAttributeValue ( attribute , cell_idx , attribute_column [ i ] ) ; <nl> + } <nl> + <nl> + / / / if cell id is zero and zero does not map to this cell , then the cell is unused <nl> + if ( cell . id = = 0 & & cell_idx ! = zero_cell_idx ) <nl> + element_count . fetch_add ( 1 , std : : memory_order_relaxed ) ; <nl> + <nl> + cell . id = id ; <nl> + if ( dict_lifetime . min_sec ! = 0 & & dict_lifetime . max_sec ! = 0 ) <nl> + { <nl> + std : : uniform_int_distribution < UInt64 > distribution { dict_lifetime . min_sec , dict_lifetime . max_sec } ; <nl> + cell . setExpiresAt ( now + std : : chrono : : seconds { distribution ( rnd_engine ) } ) ; <nl> + } <nl> + else <nl> + cell . setExpiresAt ( std : : chrono : : time_point < std : : chrono : : system_clock > : : max ( ) ) ; <nl> + <nl> + / / / inform caller <nl> + on_cell_updated ( id , cell_idx ) ; <nl> + / / / mark corresponding id as found <nl> + remaining_ids [ id ] = 1 ; <nl> } <nl> + } <nl> <nl> - / / / if cell id is zero and zero does not map to this cell , then the cell is unused <nl> - if ( cell . id = = 0 & & cell_idx ! = zero_cell_idx ) <nl> - element_count . fetch_add ( 1 , std : : memory_order_relaxed ) ; <nl> + stream - > readSuffix ( ) ; <nl> <nl> - cell . id = id ; <nl> - if ( dict_lifetime . min_sec ! = 0 & & dict_lifetime . max_sec ! = 0 ) <nl> - cell . setExpiresAt ( std : : chrono : : system_clock : : now ( ) + std : : chrono : : seconds { distribution ( rnd_engine ) } ) ; <nl> - else <nl> - cell . setExpiresAt ( std : : chrono : : time_point < std : : chrono : : system_clock > : : max ( ) ) ; <nl> + error_count = 0 ; <nl> + last_exception = std : : exception_ptr { } ; <nl> + backoff_end_time = std : : chrono : : system_clock : : time_point { } ; <nl> <nl> - / / / inform caller <nl> - on_cell_updated ( id , cell_idx ) ; <nl> - / / / mark corresponding id as found <nl> - remaining_ids [ id ] = 1 ; <nl> - } <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheRequestTimeNs , watch . elapsed ( ) ) ; <nl> } <nl> + catch ( . . . ) <nl> + { <nl> + + + error_count ; <nl> + last_exception = std : : current_exception ( ) ; <nl> + backoff_end_time = now + std : : chrono : : seconds ( ExternalLoadableBackoff { } . calculateDuration ( rnd_engine , error_count ) ) ; <nl> <nl> - stream - > readSuffix ( ) ; <nl> - <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysRequested , requested_ids . size ( ) ) ; <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheRequestTimeNs , watch . elapsed ( ) ) ; <nl> + tryLogException ( last_exception , log , " Could not update cache dictionary ' " + getName ( ) + <nl> + " ' , next update is scheduled at " + DateLUT : : instance ( ) . timeToString ( std : : chrono : : system_clock : : to_time_t ( backoff_end_time ) ) ) ; <nl> + } <nl> } <nl> <nl> size_t not_found_num = 0 , found_num = 0 ; <nl> <nl> - const auto now = std : : chrono : : system_clock : : now ( ) ; <nl> / / / Check which ids have not been found and require setting null_value <nl> for ( const auto & id_found_pair : remaining_ids ) <nl> { <nl> void CacheDictionary : : update ( <nl> <nl> const auto find_result = findCellIdx ( id , now ) ; <nl> const auto & cell_idx = find_result . cell_idx ; <nl> - <nl> auto & cell = cells [ cell_idx ] ; <nl> <nl> - / / / Set null_value for each attribute <nl> - for ( auto & attribute : attributes ) <nl> - setDefaultAttributeValue ( attribute , cell_idx ) ; <nl> + if ( error_count ) <nl> + { <nl> + if ( find_result . outdated ) <nl> + { <nl> + / / / We have expired data for that ` id ` so we can continue using it . <nl> + bool was_default = cell . isDefault ( ) ; <nl> + cell . setExpiresAt ( backoff_end_time ) ; <nl> + if ( was_default ) <nl> + cell . setDefault ( ) ; <nl> + if ( was_default ) <nl> + on_id_not_found ( id , cell_idx ) ; <nl> + else <nl> + on_cell_updated ( id , cell_idx ) ; <nl> + continue ; <nl> + } <nl> + / / / We don ' t have expired data for that ` id ` so all we can do is to rethrow ` last_exception ` . <nl> + std : : rethrow_exception ( last_exception ) ; <nl> + } <nl> <nl> / / / Check if cell had not been occupied before and increment element counter if it hadn ' t <nl> if ( cell . id = = 0 & & cell_idx ! = zero_cell_idx ) <nl> element_count . fetch_add ( 1 , std : : memory_order_relaxed ) ; <nl> <nl> cell . id = id ; <nl> + <nl> if ( dict_lifetime . min_sec ! = 0 & & dict_lifetime . max_sec ! = 0 ) <nl> - cell . setExpiresAt ( std : : chrono : : system_clock : : now ( ) + std : : chrono : : seconds { distribution ( rnd_engine ) } ) ; <nl> + { <nl> + std : : uniform_int_distribution < UInt64 > distribution { dict_lifetime . min_sec , dict_lifetime . max_sec } ; <nl> + cell . setExpiresAt ( now + std : : chrono : : seconds { distribution ( rnd_engine ) } ) ; <nl> + } <nl> else <nl> cell . setExpiresAt ( std : : chrono : : time_point < std : : chrono : : system_clock > : : max ( ) ) ; <nl> <nl> + / / / Set null_value for each attribute <nl> cell . setDefault ( ) ; <nl> + for ( auto & attribute : attributes ) <nl> + setDefaultAttributeValue ( attribute , cell_idx ) ; <nl> <nl> / / / inform caller that the cell has not been found <nl> on_id_not_found ( id , cell_idx ) ; <nl> mmm a / dbms / src / Dictionaries / IDictionary . h <nl> ppp b / dbms / src / Dictionaries / IDictionary . h <nl> struct IDictionaryBase : public IExternalLoadable <nl> return source & & source - > isModified ( ) ; <nl> } <nl> <nl> + virtual std : : exception_ptr getLastException ( ) const { return { } ; } <nl> + <nl> std : : shared_ptr < IDictionaryBase > shared_from_this ( ) <nl> { <nl> return std : : static_pointer_cast < IDictionaryBase > ( IExternalLoadable : : shared_from_this ( ) ) ; <nl> mmm a / dbms / src / Functions / FunctionJoinGet . cpp <nl> ppp b / dbms / src / Functions / FunctionJoinGet . cpp <nl> FunctionBasePtr FunctionBuilderJoinGet : : buildImpl ( const ColumnsWithTypeAndName & <nl> auto join = storage_join - > getJoin ( ) ; <nl> DataTypes data_types ( arguments . size ( ) ) ; <nl> <nl> - auto table_lock = storage_join - > lockStructureForShare ( false , context . getCurrentQueryId ( ) ) ; <nl> + auto table_lock = storage_join - > lockStructureForShare ( false , context . getInitialQueryId ( ) ) ; <nl> for ( size_t i = 0 ; i < arguments . size ( ) ; + + i ) <nl> data_types [ i ] = arguments [ i ] . type ; <nl> <nl> mmm a / dbms / src / Functions / registerFunctionsIntrospection . cpp <nl> ppp b / dbms / src / Functions / registerFunctionsIntrospection . cpp <nl> class FunctionFactory ; <nl> void registerFunctionAddressToSymbol ( FunctionFactory & factory ) ; <nl> void registerFunctionDemangle ( FunctionFactory & factory ) ; <nl> void registerFunctionAddressToLine ( FunctionFactory & factory ) ; <nl> + void registerFunctionTrap ( FunctionFactory & factory ) ; <nl> <nl> void registerFunctionsIntrospection ( FunctionFactory & factory ) <nl> { <nl> registerFunctionAddressToSymbol ( factory ) ; <nl> registerFunctionDemangle ( factory ) ; <nl> registerFunctionAddressToLine ( factory ) ; <nl> + registerFunctionTrap ( factory ) ; <nl> } <nl> <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . e05d5efa4f7 <nl> mmm / dev / null <nl> ppp b / dbms / src / Functions / trap . cpp <nl> <nl> + # if 0 <nl> + <nl> + # include < Functions / IFunction . h > <nl> + # include < Functions / FunctionFactory . h > <nl> + # include < Functions / FunctionHelpers . h > <nl> + # include < DataTypes / DataTypeString . h > <nl> + # include < DataTypes / DataTypesNumber . h > <nl> + # include < Columns / ColumnString . h > <nl> + <nl> + # include < thread > <nl> + # include < memory > <nl> + # include < cstdlib > <nl> + # include < unistd . h > <nl> + <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + namespace ErrorCodes <nl> + { <nl> + extern const int ILLEGAL_COLUMN ; <nl> + extern const int ILLEGAL_TYPE_OF_ARGUMENT ; <nl> + extern const int BAD_ARGUMENTS ; <nl> + } <nl> + <nl> + <nl> + / / / Various illegal actions to test diagnostic features of ClickHouse itself . Should not be enabled in production builds . <nl> + class FunctionTrap : public IFunction <nl> + { <nl> + public : <nl> + static constexpr auto name = " trap " ; <nl> + static FunctionPtr create ( const Context & ) <nl> + { <nl> + return std : : make_shared < FunctionTrap > ( ) ; <nl> + } <nl> + <nl> + String getName ( ) const override <nl> + { <nl> + return name ; <nl> + } <nl> + <nl> + size_t getNumberOfArguments ( ) const override <nl> + { <nl> + return 1 ; <nl> + } <nl> + <nl> + DataTypePtr getReturnTypeImpl ( const DataTypes & arguments ) const override <nl> + { <nl> + if ( ! isString ( arguments [ 0 ] ) ) <nl> + throw Exception ( " The only argument for function " + getName ( ) + " must be constant String " , ErrorCodes : : ILLEGAL_TYPE_OF_ARGUMENT ) ; <nl> + <nl> + return std : : make_shared < DataTypeUInt8 > ( ) ; <nl> + } <nl> + <nl> + void executeImpl ( Block & block , const ColumnNumbers & arguments , size_t result , size_t input_rows_count ) override <nl> + { <nl> + if ( const ColumnConst * column = checkAndGetColumnConst < ColumnString > ( block . getByPosition ( arguments [ 0 ] ) . column . get ( ) ) ) <nl> + { <nl> + String mode = column - > getValue < String > ( ) ; <nl> + <nl> + if ( mode = = " read nullptr c + + " ) <nl> + { <nl> + volatile int x = * reinterpret_cast < const volatile int * > ( 0 ) ; <nl> + ( void ) x ; <nl> + } <nl> + else if ( mode = = " read nullptr asm " ) <nl> + { <nl> + __asm__ volatile ( " movq $ 0 , % rax " ) ; <nl> + __asm__ volatile ( " movq ( % rax ) , % rax " ) ; <nl> + } <nl> + else if ( mode = = " illegal instruction " ) <nl> + { <nl> + __asm__ volatile ( " ud2a " ) ; <nl> + } <nl> + else if ( mode = = " abort " ) <nl> + { <nl> + abort ( ) ; <nl> + } <nl> + else if ( mode = = " use after free " ) <nl> + { <nl> + int * x_ptr ; <nl> + { <nl> + auto x = std : : make_unique < int > ( ) ; <nl> + x_ptr = x . get ( ) ; <nl> + } <nl> + * x_ptr = 1 ; <nl> + ( void ) x_ptr ; <nl> + } <nl> + else if ( mode = = " use after scope " ) <nl> + { <nl> + volatile int * x_ptr ; <nl> + [ & ] { <nl> + volatile int x = 0 ; <nl> + x_ptr = & x ; <nl> + ( void ) x ; <nl> + } ( ) ; <nl> + [ & ] { <nl> + volatile int y = 1 ; <nl> + * x_ptr = 2 ; <nl> + ( void ) y ; <nl> + } ( ) ; <nl> + ( void ) x_ptr ; <nl> + } <nl> + else if ( mode = = " uninitialized memory " ) <nl> + { <nl> + int x ; <nl> + ( void ) write ( 2 , & x , sizeof ( x ) ) ; <nl> + } <nl> + else if ( mode = = " data race " ) <nl> + { <nl> + int x = 0 ; <nl> + std : : thread t1 ( [ & ] { + + x ; } ) ; <nl> + std : : thread t2 ( [ & ] { + + x ; } ) ; <nl> + t1 . join ( ) ; <nl> + t2 . join ( ) ; <nl> + } <nl> + else <nl> + throw Exception ( " Unknown trap mode " , ErrorCodes : : BAD_ARGUMENTS ) ; <nl> + } <nl> + else <nl> + throw Exception ( " The only argument for function " + getName ( ) + " must be constant String " , ErrorCodes : : ILLEGAL_COLUMN ) ; <nl> + <nl> + block . getByPosition ( result ) . column = block . getByPosition ( result ) . type - > createColumnConst ( input_rows_count , 0ULL ) ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> + void registerFunctionTrap ( FunctionFactory & factory ) <nl> + { <nl> + factory . registerFunction < FunctionTrap > ( ) ; <nl> + } <nl> + <nl> + } <nl> + <nl> + # else <nl> + <nl> + namespace DB <nl> + { <nl> + class FunctionFactory ; <nl> + void registerFunctionTrap ( FunctionFactory & ) { } <nl> + } <nl> + <nl> + # endif <nl> mmm a / dbms / src / IO / createReadBufferFromFileBase . cpp <nl> ppp b / dbms / src / IO / createReadBufferFromFileBase . cpp <nl> namespace ProfileEvents <nl> { <nl> extern const Event CreatedReadBufferOrdinary ; <nl> extern const Event CreatedReadBufferAIO ; <nl> + extern const Event CreatedReadBufferAIOFailed ; <nl> } <nl> <nl> namespace DB <nl> { <nl> - # if ! defined ( __linux__ ) <nl> - namespace ErrorCodes <nl> - { <nl> - extern const int NOT_IMPLEMENTED ; <nl> - } <nl> - # endif <nl> <nl> std : : unique_ptr < ReadBufferFromFileBase > createReadBufferFromFileBase ( const std : : string & filename_ , size_t estimated_size , <nl> size_t aio_threshold , size_t buffer_size_ , int flags_ , char * existing_memory_ , size_t alignment ) <nl> { <nl> - if ( ( aio_threshold = = 0 ) | | ( estimated_size < aio_threshold ) ) <nl> + # if defined ( __linux__ ) | | defined ( __FreeBSD__ ) <nl> + if ( aio_threshold & & estimated_size > = aio_threshold ) <nl> { <nl> - ProfileEvents : : increment ( ProfileEvents : : CreatedReadBufferOrdinary ) ; <nl> - return std : : make_unique < ReadBufferFromFile > ( filename_ , buffer_size_ , flags_ , existing_memory_ , alignment ) ; <nl> + / / / Attempt to open a file with O_DIRECT <nl> + try <nl> + { <nl> + auto res = std : : make_unique < ReadBufferAIO > ( filename_ , buffer_size_ , flags_ , existing_memory_ ) ; <nl> + ProfileEvents : : increment ( ProfileEvents : : CreatedReadBufferAIO ) ; <nl> + return res ; <nl> + } <nl> + catch ( const ErrnoException & ) <nl> + { <nl> + / / / Fallback to cached IO if O_DIRECT is not supported . <nl> + ProfileEvents : : increment ( ProfileEvents : : CreatedReadBufferAIOFailed ) ; <nl> + } <nl> } <nl> - else <nl> - { <nl> - # if defined ( __linux__ ) | | defined ( __FreeBSD__ ) <nl> - ProfileEvents : : increment ( ProfileEvents : : CreatedReadBufferAIO ) ; <nl> - return std : : make_unique < ReadBufferAIO > ( filename_ , buffer_size_ , flags_ , existing_memory_ ) ; <nl> # else <nl> - throw Exception ( " AIO is implemented only on Linux and FreeBSD " , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> + ( void ) aio_threshold ; <nl> + ( void ) estimated_size ; <nl> # endif <nl> - } <nl> + <nl> + ProfileEvents : : increment ( ProfileEvents : : CreatedReadBufferOrdinary ) ; <nl> + return std : : make_unique < ReadBufferFromFile > ( filename_ , buffer_size_ , flags_ , existing_memory_ , alignment ) ; <nl> } <nl> <nl> } <nl> mmm a / dbms / src / IO / createWriteBufferFromFileBase . cpp <nl> ppp b / dbms / src / IO / createWriteBufferFromFileBase . cpp <nl> namespace ProfileEvents <nl> { <nl> extern const Event CreatedWriteBufferOrdinary ; <nl> extern const Event CreatedWriteBufferAIO ; <nl> + extern const Event CreatedWriteBufferAIOFailed ; <nl> } <nl> <nl> namespace DB <nl> { <nl> <nl> - # if ! defined ( __linux__ ) <nl> - namespace ErrorCodes <nl> - { <nl> - extern const int NOT_IMPLEMENTED ; <nl> - } <nl> - # endif <nl> - <nl> std : : unique_ptr < WriteBufferFromFileBase > createWriteBufferFromFileBase ( const std : : string & filename_ , size_t estimated_size , <nl> size_t aio_threshold , size_t buffer_size_ , int flags_ , mode_t mode , char * existing_memory_ , <nl> size_t alignment ) <nl> { <nl> - if ( ( aio_threshold = = 0 ) | | ( estimated_size < aio_threshold ) ) <nl> + # if defined ( __linux__ ) | | defined ( __FreeBSD__ ) <nl> + if ( aio_threshold & & estimated_size > = aio_threshold ) <nl> { <nl> - ProfileEvents : : increment ( ProfileEvents : : CreatedWriteBufferOrdinary ) ; <nl> - return std : : make_unique < WriteBufferFromFile > ( filename_ , buffer_size_ , flags_ , mode , existing_memory_ , alignment ) ; <nl> + / / / Attempt to open a file with O_DIRECT <nl> + try <nl> + { <nl> + auto res = std : : make_unique < WriteBufferAIO > ( filename_ , buffer_size_ , flags_ , mode , existing_memory_ ) ; <nl> + ProfileEvents : : increment ( ProfileEvents : : CreatedWriteBufferAIO ) ; <nl> + return res ; <nl> + } <nl> + catch ( const ErrnoException & ) <nl> + { <nl> + / / / Fallback to cached IO if O_DIRECT is not supported . <nl> + ProfileEvents : : increment ( ProfileEvents : : CreatedWriteBufferAIOFailed ) ; <nl> + } <nl> } <nl> - else <nl> - { <nl> - # if defined ( __linux__ ) | | defined ( __FreeBSD__ ) <nl> - ProfileEvents : : increment ( ProfileEvents : : CreatedWriteBufferAIO ) ; <nl> - return std : : make_unique < WriteBufferAIO > ( filename_ , buffer_size_ , flags_ , mode , existing_memory_ ) ; <nl> # else <nl> - throw Exception ( " AIO is implemented only on Linux and FreeBSD " , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> + ( void ) aio_threshold ; <nl> + ( void ) estimated_size ; <nl> # endif <nl> - } <nl> + <nl> + ProfileEvents : : increment ( ProfileEvents : : CreatedWriteBufferOrdinary ) ; <nl> + return std : : make_unique < WriteBufferFromFile > ( filename_ , buffer_size_ , flags_ , mode , existing_memory_ , alignment ) ; <nl> } <nl> <nl> } <nl> mmm a / dbms / src / Interpreters / Context . cpp <nl> ppp b / dbms / src / Interpreters / Context . cpp <nl> void Context : : updateSettingsChanges ( const SettingsChanges & changes ) <nl> if ( change . name = = " profile " ) <nl> setProfile ( change . value . safeGet < String > ( ) ) ; <nl> else <nl> - settings . updateFromChange ( change ) ; <nl> + settings . applyChange ( change ) ; <nl> } <nl> } <nl> <nl> String Context : : getCurrentQueryId ( ) const <nl> } <nl> <nl> <nl> + String Context : : getInitialQueryId ( ) const <nl> + { <nl> + return client_info . initial_query_id ; <nl> + } <nl> + <nl> + <nl> void Context : : setCurrentDatabase ( const String & name ) <nl> { <nl> auto lock = getLock ( ) ; <nl> mmm a / dbms / src / Interpreters / Context . h <nl> ppp b / dbms / src / Interpreters / Context . h <nl> class Context <nl> <nl> String getCurrentDatabase ( ) const ; <nl> String getCurrentQueryId ( ) const ; <nl> + <nl> + / / / Id of initiating query for distributed queries ; or current query id if it ' s not a distributed query . <nl> + String getInitialQueryId ( ) const ; <nl> + <nl> void setCurrentDatabase ( const String & name ) ; <nl> void setCurrentQueryId ( const String & query_id ) ; <nl> <nl> mmm a / dbms / src / Interpreters / ExternalLoader . cpp <nl> ppp b / dbms / src / Interpreters / ExternalLoader . cpp <nl> <nl> # include " ExternalLoader . h " <nl> <nl> - # include < cmath > <nl> # include < mutex > <nl> # include < pcg_random . hpp > <nl> # include < common / DateLUT . h > <nl> class ExternalLoader : : LoadingDispatcher : private boost : : noncopyable <nl> class ExternalLoader : : PeriodicUpdater : private boost : : noncopyable <nl> { <nl> public : <nl> + static constexpr UInt64 check_period_sec = 5 ; <nl> + <nl> PeriodicUpdater ( ConfigFilesReader & config_files_reader_ , LoadingDispatcher & loading_dispatcher_ ) <nl> : config_files_reader ( config_files_reader_ ) , loading_dispatcher ( loading_dispatcher_ ) <nl> { <nl> class ExternalLoader : : PeriodicUpdater : private boost : : noncopyable <nl> <nl> ~ PeriodicUpdater ( ) { enable ( false ) ; } <nl> <nl> - void enable ( bool enable_ , const ExternalLoaderUpdateSettings & settings_ = { } ) <nl> + void enable ( bool enable_ ) <nl> { <nl> std : : unique_lock lock { mutex } ; <nl> enabled = enable_ ; <nl> - settings = settings_ ; <nl> <nl> if ( enable_ ) <nl> { <nl> class ExternalLoader : : PeriodicUpdater : private boost : : noncopyable <nl> return std : : chrono : : system_clock : : now ( ) + std : : chrono : : seconds { distribution ( rnd_engine ) } ; <nl> } <nl> <nl> - std : : uniform_int_distribution < UInt64 > distribution ( 0 , static_cast < UInt64 > ( std : : exp2 ( error_count - 1 ) ) ) ; <nl> - std : : chrono : : seconds delay ( std : : min < UInt64 > ( settings . backoff_max_sec , settings . backoff_initial_sec + distribution ( rnd_engine ) ) ) ; <nl> - return std : : chrono : : system_clock : : now ( ) + delay ; <nl> + return std : : chrono : : system_clock : : now ( ) + std : : chrono : : seconds ( ExternalLoadableBackoff { } . calculateDuration ( rnd_engine , error_count ) ) ; <nl> } <nl> <nl> private : <nl> class ExternalLoader : : PeriodicUpdater : private boost : : noncopyable <nl> setThreadName ( " ExterLdrReload " ) ; <nl> <nl> std : : unique_lock lock { mutex } ; <nl> - auto timeout = [ this ] { return std : : chrono : : seconds ( settings . check_period_sec ) ; } ; <nl> auto pred = [ this ] { return ! enabled ; } ; <nl> - while ( ! event . wait_for ( lock , timeout ( ) , pred ) ) <nl> + while ( ! event . wait_for ( lock , std : : chrono : : seconds ( check_period_sec ) , pred ) ) <nl> { <nl> lock . unlock ( ) ; <nl> loading_dispatcher . setConfiguration ( config_files_reader . read ( ) ) ; <nl> class ExternalLoader : : PeriodicUpdater : private boost : : noncopyable <nl> <nl> mutable std : : mutex mutex ; <nl> bool enabled = false ; <nl> - ExternalLoaderUpdateSettings settings ; <nl> ThreadFromGlobalPool thread ; <nl> std : : condition_variable event ; <nl> mutable pcg64 rnd_engine { randomSeed ( ) } ; <nl> void ExternalLoader : : enableAsyncLoading ( bool enable ) <nl> loading_dispatcher - > enableAsyncLoading ( enable ) ; <nl> } <nl> <nl> - void ExternalLoader : : enablePeriodicUpdates ( bool enable_ , const ExternalLoaderUpdateSettings & settings_ ) <nl> + void ExternalLoader : : enablePeriodicUpdates ( bool enable_ ) <nl> { <nl> - periodic_updater - > enable ( enable_ , settings_ ) ; <nl> + periodic_updater - > enable ( enable_ ) ; <nl> } <nl> <nl> bool ExternalLoader : : hasCurrentlyLoadedObjects ( ) const <nl> mmm a / dbms / src / Interpreters / ExternalLoader . h <nl> ppp b / dbms / src / Interpreters / ExternalLoader . h <nl> <nl> <nl> namespace DB <nl> { <nl> - struct ExternalLoaderUpdateSettings <nl> - { <nl> - UInt64 check_period_sec = 5 ; <nl> - UInt64 backoff_initial_sec = 5 ; <nl> - / / / 10 minutes <nl> - UInt64 backoff_max_sec = 10 * 60 ; <nl> - <nl> - ExternalLoaderUpdateSettings ( ) = default ; <nl> - ExternalLoaderUpdateSettings ( UInt64 check_period_sec_ , UInt64 backoff_initial_sec_ , UInt64 backoff_max_sec_ ) <nl> - : check_period_sec ( check_period_sec_ ) , backoff_initial_sec ( backoff_initial_sec_ ) , backoff_max_sec ( backoff_max_sec_ ) { } <nl> - } ; <nl> - <nl> - <nl> / * External configuration structure . <nl> * <nl> * < external_group > <nl> class ExternalLoader <nl> void enableAsyncLoading ( bool enable ) ; <nl> <nl> / / / Sets settings for periodic updates . <nl> - void enablePeriodicUpdates ( bool enable , const ExternalLoaderUpdateSettings & settings = { } ) ; <nl> + void enablePeriodicUpdates ( bool enable ) ; <nl> <nl> / / / Returns the status of the object . <nl> / / / If the object has not been loaded yet then the function returns Status : : NOT_LOADED . <nl> mmm a / dbms / src / Interpreters / IExternalLoadable . cpp <nl> ppp b / dbms / src / Interpreters / IExternalLoadable . cpp <nl> <nl> # include < Interpreters / IExternalLoadable . h > <nl> <nl> # include < Poco / Util / AbstractConfiguration . h > <nl> - <nl> + # include < cmath > <nl> <nl> namespace DB <nl> { <nl> ExternalLoadableLifetime : : ExternalLoadableLifetime ( const Poco : : Util : : AbstractCon <nl> max_sec = has_min ? config . getUInt64 ( config_prefix + " . max " ) : min_sec ; <nl> } <nl> <nl> + <nl> + UInt64 ExternalLoadableBackoff : : calculateDuration ( pcg64 & rnd_engine , size_t error_count ) const <nl> + { <nl> + if ( error_count < 1 ) <nl> + error_count = 1 ; <nl> + std : : uniform_int_distribution < UInt64 > distribution ( 0 , static_cast < UInt64 > ( std : : exp2 ( error_count - 1 ) ) ) ; <nl> + return std : : min < UInt64 > ( backoff_max_sec , backoff_initial_sec + distribution ( rnd_engine ) ) ; <nl> + } <nl> + <nl> } <nl> mmm a / dbms / src / Interpreters / IExternalLoadable . h <nl> ppp b / dbms / src / Interpreters / IExternalLoadable . h <nl> <nl> # include < string > <nl> # include < memory > <nl> # include < boost / noncopyable . hpp > <nl> + # include < pcg_random . hpp > <nl> # include < Core / Types . h > <nl> <nl> <nl> struct ExternalLoadableLifetime <nl> } ; <nl> <nl> <nl> + / / / Delay before trying to load again after error . <nl> + struct ExternalLoadableBackoff <nl> + { <nl> + UInt64 backoff_initial_sec = 5 ; <nl> + UInt64 backoff_max_sec = 10 * 60 ; / / / 10 minutes <nl> + <nl> + / / / Calculates time to try loading again after error . <nl> + UInt64 calculateDuration ( pcg64 & rnd_engine , size_t error_count = 1 ) const ; <nl> + } ; <nl> + <nl> + <nl> / / / Basic interface for external loadable objects . Is used in ExternalLoader . <nl> class IExternalLoadable : public std : : enable_shared_from_this < IExternalLoadable > , private boost : : noncopyable <nl> { <nl> mmm a / dbms / src / Interpreters / InterpreterDescribeQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterDescribeQuery . cpp <nl> BlockInputStreamPtr InterpreterDescribeQuery : : executeImpl ( ) <nl> table = context . getTable ( database_name , table_name ) ; <nl> } <nl> <nl> - auto table_lock = table - > lockStructureForShare ( false , context . getCurrentQueryId ( ) ) ; <nl> + auto table_lock = table - > lockStructureForShare ( false , context . getInitialQueryId ( ) ) ; <nl> columns = table - > getColumns ( ) ; <nl> } <nl> <nl> mmm a / dbms / src / Interpreters / InterpreterInsertQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterInsertQuery . cpp <nl> BlockIO InterpreterInsertQuery : : execute ( ) <nl> checkAccess ( query ) ; <nl> StoragePtr table = getTable ( query ) ; <nl> <nl> - auto table_lock = table - > lockStructureForShare ( true , context . getCurrentQueryId ( ) ) ; <nl> + auto table_lock = table - > lockStructureForShare ( true , context . getInitialQueryId ( ) ) ; <nl> <nl> / / / We create a pipeline of several streams , into which we will write data . <nl> BlockOutputStreamPtr out ; <nl> mmm a / dbms / src / Interpreters / InterpreterKillQueryQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterKillQueryQuery . cpp <nl> Block InterpreterKillQueryQuery : : getSelectResult ( const String & columns , const S <nl> if ( where_expression ) <nl> select_query + = " WHERE " + queryToString ( where_expression ) ; <nl> <nl> - auto use_processors = context . getSettingsRef ( ) . experimental_use_processors ; <nl> - context . getSettingsRef ( ) . experimental_use_processors = false ; <nl> - <nl> - SCOPE_EXIT ( context . getSettingsRef ( ) . experimental_use_processors = use_processors ) ; <nl> - <nl> BlockIO block_io = executeQuery ( select_query , context , true ) ; <nl> Block res = block_io . in - > read ( ) ; <nl> <nl> mmm a / dbms / src / Interpreters / InterpreterRenameQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterRenameQuery . cpp <nl> struct RenameDescription <nl> to_table_name ( elem . to . table ) <nl> { } <nl> <nl> - TableStructureWriteLockHolder from_table_lock ; <nl> - <nl> String from_database_name ; <nl> String from_table_name ; <nl> <nl> BlockIO InterpreterRenameQuery : : execute ( ) <nl> } <nl> } ; <nl> <nl> - std : : map < UniqueTableName , TableStructureWriteLockHolder > tables_from_locks ; <nl> - <nl> / / / Don ' t allow to drop tables ( that we are renaming ) ; don ' t allow to create tables in places where tables will be renamed . <nl> std : : map < UniqueTableName , std : : unique_ptr < DDLGuard > > table_guards ; <nl> <nl> BlockIO InterpreterRenameQuery : : execute ( ) <nl> UniqueTableName from ( descriptions . back ( ) . from_database_name , descriptions . back ( ) . from_table_name ) ; <nl> UniqueTableName to ( descriptions . back ( ) . to_database_name , descriptions . back ( ) . to_table_name ) ; <nl> <nl> - if ( ! tables_from_locks . count ( from ) ) <nl> - if ( auto table = context . tryGetTable ( from . database_name , from . table_name ) ) <nl> - tables_from_locks . emplace ( from , table - > lockExclusively ( context . getCurrentQueryId ( ) ) ) ; <nl> - <nl> - descriptions . back ( ) . from_table_lock = tables_from_locks [ from ] ; <nl> - <nl> - if ( ! table_guards . count ( from ) ) <nl> - table_guards . emplace ( from , context . getDDLGuard ( from . database_name , from . table_name ) ) ; <nl> - <nl> - if ( ! table_guards . count ( to ) ) <nl> - table_guards . emplace ( to , context . getDDLGuard ( to . database_name , to . table_name ) ) ; <nl> + table_guards [ from ] ; <nl> + table_guards [ to ] ; <nl> } <nl> <nl> - / * * All tables are locked . If there are more than one rename in chain , <nl> - * we need to hold global lock while doing all renames . Order matters to avoid deadlocks . <nl> - * It provides atomicity of all RENAME chain as a whole , from the point of view of DBMS client , <nl> - * but only in cases when there was no exceptions during this process and server does not fall . <nl> - * / <nl> - <nl> - decltype ( context . getLock ( ) ) lock ; <nl> - <nl> - if ( descriptions . size ( ) > 1 ) <nl> - lock = context . getLock ( ) ; <nl> + / / / Must do it in consistent order . <nl> + for ( auto & table_guard : table_guards ) <nl> + table_guard . second = context . getDDLGuard ( table_guard . first . database_name , table_guard . first . table_name ) ; <nl> <nl> for ( auto & elem : descriptions ) <nl> { <nl> context . assertTableDoesntExist ( elem . to_database_name , elem . to_table_name ) ; <nl> + auto from_table = context . getTable ( elem . from_database_name , elem . from_table_name ) ; <nl> + auto from_table_lock = from_table - > lockExclusively ( context . getCurrentQueryId ( ) ) ; <nl> <nl> context . getDatabase ( elem . from_database_name ) - > renameTable ( <nl> - context , elem . from_table_name , * context . getDatabase ( elem . to_database_name ) , elem . to_table_name , elem . from_table_lock ) ; <nl> + context , <nl> + elem . from_table_name , <nl> + * context . getDatabase ( elem . to_database_name ) , <nl> + elem . to_table_name , <nl> + from_table_lock ) ; <nl> } <nl> <nl> return { } ; <nl> mmm a / dbms / src / Interpreters / InterpreterSelectQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterSelectQuery . cpp <nl> InterpreterSelectQuery : : InterpreterSelectQuery ( <nl> } <nl> <nl> if ( storage ) <nl> - table_lock = storage - > lockStructureForShare ( false , context . getCurrentQueryId ( ) ) ; <nl> + table_lock = storage - > lockStructureForShare ( false , context . getInitialQueryId ( ) ) ; <nl> <nl> syntax_analyzer_result = SyntaxAnalyzer ( context , options ) . analyze ( <nl> query_ptr , source_header . getNamesAndTypesList ( ) , required_result_column_names , storage , NamesAndTypesList ( ) ) ; <nl> mmm a / dbms / src / Interpreters / ProcessList . cpp <nl> ppp b / dbms / src / Interpreters / ProcessList . cpp <nl> QueryStatusInfo QueryStatus : : getInfo ( bool get_thread_list , bool get_profile_even <nl> { <nl> std : : lock_guard lock ( thread_group - > mutex ) ; <nl> res . thread_numbers = thread_group - > thread_numbers ; <nl> + res . os_thread_ids = thread_group - > os_thread_ids ; <nl> } <nl> <nl> if ( get_profile_events ) <nl> mmm a / dbms / src / Interpreters / ProcessList . h <nl> ppp b / dbms / src / Interpreters / ProcessList . h <nl> struct QueryStatusInfo <nl> <nl> / / / Optional fields , filled by request <nl> std : : vector < UInt32 > thread_numbers ; <nl> + std : : vector < UInt32 > os_thread_ids ; <nl> std : : shared_ptr < ProfileEvents : : Counters > profile_counters ; <nl> std : : shared_ptr < Settings > query_settings ; <nl> } ; <nl> mmm a / dbms / src / Interpreters / QueryLog . cpp <nl> ppp b / dbms / src / Interpreters / QueryLog . cpp <nl> Block QueryLogElement : : createBlock ( ) <nl> { std : : make_shared < DataTypeUInt32 > ( ) , " revision " } , <nl> <nl> { std : : make_shared < DataTypeArray > ( std : : make_shared < DataTypeUInt32 > ( ) ) , " thread_numbers " } , <nl> + { std : : make_shared < DataTypeArray > ( std : : make_shared < DataTypeUInt32 > ( ) ) , " os_thread_ids " } , <nl> { std : : make_shared < DataTypeArray > ( std : : make_shared < DataTypeString > ( ) ) , " ProfileEvents . Names " } , <nl> { std : : make_shared < DataTypeArray > ( std : : make_shared < DataTypeUInt64 > ( ) ) , " ProfileEvents . Values " } , <nl> { std : : make_shared < DataTypeArray > ( std : : make_shared < DataTypeString > ( ) ) , " Settings . Names " } , <nl> void QueryLogElement : : appendToBlock ( Block & block ) const <nl> columns [ i + + ] - > insert ( threads_array ) ; <nl> } <nl> <nl> + { <nl> + Array threads_array ; <nl> + threads_array . reserve ( os_thread_ids . size ( ) ) ; <nl> + for ( const UInt32 thread_number : os_thread_ids ) <nl> + threads_array . emplace_back ( UInt64 ( thread_number ) ) ; <nl> + columns [ i + + ] - > insert ( threads_array ) ; <nl> + } <nl> + <nl> if ( profile_counters ) <nl> { <nl> auto column_names = columns [ i + + ] . get ( ) ; <nl> mmm a / dbms / src / Interpreters / QueryLog . h <nl> ppp b / dbms / src / Interpreters / QueryLog . h <nl> struct QueryLogElement <nl> ClientInfo client_info ; <nl> <nl> std : : vector < UInt32 > thread_numbers ; <nl> + std : : vector < UInt32 > os_thread_ids ; <nl> std : : shared_ptr < ProfileEvents : : Counters > profile_counters ; <nl> std : : shared_ptr < Settings > query_settings ; <nl> <nl> mmm a / dbms / src / Interpreters / ThreadStatusExt . cpp <nl> ppp b / dbms / src / Interpreters / ThreadStatusExt . cpp <nl> void ThreadStatus : : initializeQuery ( ) <nl> thread_group - > memory_tracker . setDescription ( " ( for query ) " ) ; <nl> <nl> thread_group - > thread_numbers . emplace_back ( thread_number ) ; <nl> + thread_group - > os_thread_ids . emplace_back ( os_thread_id ) ; <nl> thread_group - > master_thread_number = thread_number ; <nl> thread_group - > master_thread_os_id = os_thread_id ; <nl> <nl> void ThreadStatus : : attachQuery ( const ThreadGroupStatusPtr & thread_group_ , bool <nl> <nl> / / / NOTE : A thread may be attached multiple times if it is reused from a thread pool . <nl> thread_group - > thread_numbers . emplace_back ( thread_number ) ; <nl> + thread_group - > os_thread_ids . emplace_back ( os_thread_id ) ; <nl> } <nl> <nl> if ( query_context ) <nl> mmm a / dbms / src / Interpreters / executeQuery . cpp <nl> ppp b / dbms / src / Interpreters / executeQuery . cpp <nl> static std : : tuple < ASTPtr , BlockIO > executeQueryImpl ( <nl> } <nl> <nl> elem . thread_numbers = std : : move ( info . thread_numbers ) ; <nl> + elem . os_thread_ids = std : : move ( info . os_thread_ids ) ; <nl> elem . profile_counters = std : : move ( info . profile_counters ) ; <nl> <nl> if ( log_queries ) <nl> static std : : tuple < ASTPtr , BlockIO > executeQueryImpl ( <nl> elem . memory_usage = info . peak_memory_usage > 0 ? info . peak_memory_usage : 0 ; <nl> <nl> elem . thread_numbers = std : : move ( info . thread_numbers ) ; <nl> + elem . os_thread_ids = std : : move ( info . os_thread_ids ) ; <nl> elem . profile_counters = std : : move ( info . profile_counters ) ; <nl> } <nl> <nl> mmm a / dbms / src / Storages / AlterCommands . cpp <nl> ppp b / dbms / src / Storages / AlterCommands . cpp <nl> void AlterCommands : : validate ( const IStorage & table , const Context & context ) <nl> } <nl> } <nl> else if ( command . type = = AlterCommand : : MODIFY_SETTING ) <nl> - { <nl> for ( const auto & change : command . settings_changes ) <nl> - { <nl> - if ( ! table . hasSetting ( change . name ) ) <nl> - { <nl> - throw Exception { " Storage ' " + table . getName ( ) + " ' doesn ' t have setting ' " + change . name + " ' " , ErrorCodes : : UNKNOWN_SETTING } ; <nl> - } <nl> - } <nl> - } <nl> + table . checkSettingCanBeChanged ( change . name ) ; <nl> } <nl> <nl> / * * Existing defaulted columns may require default expression extensions with a type conversion , <nl> mmm a / dbms / src / Storages / IStorage . cpp <nl> ppp b / dbms / src / Storages / IStorage . cpp <nl> bool IStorage : : isVirtualColumn ( const String & column_name ) const <nl> return getColumns ( ) . get ( column_name ) . is_virtual ; <nl> } <nl> <nl> - bool IStorage : : hasSetting ( const String & / * setting_name * / ) const <nl> + void IStorage : : checkSettingCanBeChanged ( const String & / * setting_name * / ) const <nl> { <nl> if ( ! supportsSettings ( ) ) <nl> throw Exception ( " Storage ' " + getName ( ) + " ' doesn ' t support settings . " , ErrorCodes : : SETTINGS_ARE_NOT_SUPPORTED ) ; <nl> - return false ; <nl> } <nl> <nl> TableStructureReadLockHolder IStorage : : lockStructureForShare ( bool will_add_new_data , const String & query_id ) <nl> IDatabase : : ASTModifier IStorage : : getSettingsModifier ( const SettingsChanges & new <nl> / / / Make storage settings unique <nl> for ( const auto & change : new_changes ) <nl> { <nl> - if ( hasSetting ( change . name ) ) <nl> - { <nl> - auto finder = [ & change ] ( const SettingChange & c ) { return c . name = = change . name ; } ; <nl> - if ( auto it = std : : find_if ( storage_changes . begin ( ) , storage_changes . end ( ) , finder ) ; it ! = storage_changes . end ( ) ) <nl> - it - > value = change . value ; <nl> - else <nl> - storage_changes . push_back ( change ) ; <nl> - } <nl> + checkSettingCanBeChanged ( change . name ) ; <nl> + <nl> + auto finder = [ & change ] ( const SettingChange & c ) { return c . name = = change . name ; } ; <nl> + if ( auto it = std : : find_if ( storage_changes . begin ( ) , storage_changes . end ( ) , finder ) ; it ! = storage_changes . end ( ) ) <nl> + it - > value = change . value ; <nl> else <nl> - throw Exception { " Storage ' " + getName ( ) + " ' doesn ' t have setting ' " + change . name + " ' " , ErrorCodes : : UNKNOWN_SETTING } ; <nl> + storage_changes . push_back ( change ) ; <nl> } <nl> } <nl> } ; <nl> mmm a / dbms / src / Storages / IStorage . h <nl> ppp b / dbms / src / Storages / IStorage . h <nl> class IStorage : public std : : enable_shared_from_this < IStorage > <nl> / / / If | need_all | is set , then checks that all the columns of the table are in the block . <nl> void check ( const Block & block , bool need_all = false ) const ; <nl> <nl> - / / / Check storage has setting . Exception will be thrown if it doesn ' t support settings at all . <nl> - virtual bool hasSetting ( const String & setting_name ) const ; <nl> + / / / Check storage has setting and setting can be modified . <nl> + virtual void checkSettingCanBeChanged ( const String & setting_name ) const ; <nl> <nl> protected : / / / still thread - unsafe part . <nl> void setIndices ( IndicesDescription indices_ ) ; <nl> class IStorage : public std : : enable_shared_from_this < IStorage > <nl> virtual bool isVirtualColumn ( const String & column_name ) const ; <nl> <nl> / / / Returns modifier of settings in storage definition <nl> - virtual IDatabase : : ASTModifier getSettingsModifier ( const SettingsChanges & new_changes ) const ; <nl> + IDatabase : : ASTModifier getSettingsModifier ( const SettingsChanges & new_changes ) const ; <nl> <nl> private : <nl> ColumnsDescription columns ; / / / combined real and virtual columns <nl> mmm a / dbms / src / Storages / Kafka / KafkaSettings . cpp <nl> ppp b / dbms / src / Storages / Kafka / KafkaSettings . cpp <nl> void KafkaSettings : : loadFromQuery ( ASTStorage & storage_def ) <nl> { <nl> try <nl> { <nl> - loadFromChanges ( storage_def . settings - > changes ) ; <nl> + applyChanges ( storage_def . settings - > changes ) ; <nl> } <nl> catch ( Exception & e ) <nl> { <nl> mmm a / dbms / src / Storages / Kafka / KafkaSettings . h <nl> ppp b / dbms / src / Storages / Kafka / KafkaSettings . h <nl> struct KafkaSettings : public SettingsCollection < KafkaSettings > <nl> { <nl> <nl> <nl> - / / / M ( mutable ) for normal settings , IM ( immutable ) for not updateable settings . <nl> - # define LIST_OF_KAFKA_SETTINGS ( M , IM ) \ <nl> - IM ( SettingString , kafka_broker_list , " " , " A comma - separated list of brokers for Kafka engine . " ) \ <nl> - IM ( SettingString , kafka_topic_list , " " , " A list of Kafka topics . " ) \ <nl> - IM ( SettingString , kafka_group_name , " " , " A group of Kafka consumers . " ) \ <nl> - IM ( SettingString , kafka_format , " " , " The message format for Kafka engine . " ) \ <nl> - IM ( SettingChar , kafka_row_delimiter , ' \ 0 ' , " The character to be considered as a delimiter in Kafka message . " ) \ <nl> - IM ( SettingString , kafka_schema , " " , " Schema identifier ( used by schema - based formats ) for Kafka engine " ) \ <nl> - IM ( SettingUInt64 , kafka_num_consumers , 1 , " The number of consumers per table for Kafka engine . " ) \ <nl> - IM ( SettingUInt64 , kafka_max_block_size , 0 , " The maximum block size per table for Kafka engine . " ) \ <nl> - IM ( SettingUInt64 , kafka_skip_broken_messages , 0 , " Skip at least this number of broken messages from Kafka topic per block " ) \ <nl> - IM ( SettingUInt64 , kafka_commit_every_batch , 0 , " Commit every consumed and handled batch instead of a single commit after writing a whole block " ) <nl> + # define LIST_OF_KAFKA_SETTINGS ( M ) \ <nl> + M ( SettingString , kafka_broker_list , " " , " A comma - separated list of brokers for Kafka engine . " ) \ <nl> + M ( SettingString , kafka_topic_list , " " , " A list of Kafka topics . " ) \ <nl> + M ( SettingString , kafka_group_name , " " , " A group of Kafka consumers . " ) \ <nl> + M ( SettingString , kafka_format , " " , " The message format for Kafka engine . " ) \ <nl> + M ( SettingChar , kafka_row_delimiter , ' \ 0 ' , " The character to be considered as a delimiter in Kafka message . " ) \ <nl> + M ( SettingString , kafka_schema , " " , " Schema identifier ( used by schema - based formats ) for Kafka engine " ) \ <nl> + M ( SettingUInt64 , kafka_num_consumers , 1 , " The number of consumers per table for Kafka engine . " ) \ <nl> + M ( SettingUInt64 , kafka_max_block_size , 0 , " The maximum block size per table for Kafka engine . " ) \ <nl> + M ( SettingUInt64 , kafka_skip_broken_messages , 0 , " Skip at least this number of broken messages from Kafka topic per block " ) \ <nl> + M ( SettingUInt64 , kafka_commit_every_batch , 0 , " Commit every consumed and handled batch instead of a single commit after writing a whole block " ) <nl> <nl> DECLARE_SETTINGS_COLLECTION ( LIST_OF_KAFKA_SETTINGS ) <nl> <nl> mmm a / dbms / src / Storages / Kafka / StorageKafka . cpp <nl> ppp b / dbms / src / Storages / Kafka / StorageKafka . cpp <nl> namespace ErrorCodes <nl> extern const int BAD_ARGUMENTS ; <nl> extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH ; <nl> extern const int UNSUPPORTED_METHOD ; <nl> + extern const int UNKNOWN_SETTING ; <nl> + extern const int READONLY_SETTING ; <nl> } <nl> <nl> namespace <nl> bool StorageKafka : : streamToViews ( ) <nl> } <nl> <nl> <nl> - bool StorageKafka : : hasSetting ( const String & setting_name ) const <nl> + void StorageKafka : : checkSettingCanBeChanged ( const String & setting_name ) const <nl> { <nl> - return KafkaSettings : : findIndex ( setting_name ) ! = KafkaSettings : : npos ; <nl> - } <nl> + if ( KafkaSettings : : findIndex ( setting_name ) = = KafkaSettings : : npos ) <nl> + throw Exception { " Storage ' " + getName ( ) + " ' doesn ' t have setting ' " + setting_name + " ' " , ErrorCodes : : UNKNOWN_SETTING } ; <nl> <nl> - IDatabase : : ASTModifier StorageKafka : : getSettingsModifier ( const SettingsChanges & / * new_changes * / ) const <nl> - { <nl> - throw Exception ( " Storage ' " + getName ( ) + " ' doesn ' t support settings alter " , ErrorCodes : : UNSUPPORTED_METHOD ) ; <nl> + throw Exception { " Setting ' " + setting_name + " ' is readonly for storage ' " + getName ( ) + " ' " , ErrorCodes : : READONLY_SETTING } ; <nl> } <nl> <nl> void registerStorageKafka ( StorageFactory & factory ) <nl> mmm a / dbms / src / Storages / Kafka / StorageKafka . h <nl> ppp b / dbms / src / Storages / Kafka / StorageKafka . h <nl> class StorageKafka : public ext : : shared_ptr_helper < StorageKafka > , public IStorag <nl> const auto & getSchemaName ( ) const { return schema_name ; } <nl> const auto & skipBroken ( ) const { return skip_broken ; } <nl> <nl> - bool hasSetting ( const String & setting_name ) const override ; <nl> - <nl> + void checkSettingCanBeChanged ( const String & setting_name ) const override ; <nl> <nl> protected : <nl> StorageKafka ( <nl> class StorageKafka : public ext : : shared_ptr_helper < StorageKafka > , public IStorag <nl> size_t num_consumers_ , UInt64 max_block_size_ , size_t skip_broken , <nl> bool intermediate_commit_ ) ; <nl> <nl> - IDatabase : : ASTModifier getSettingsModifier ( const SettingsChanges & new_changes ) const override ; <nl> private : <nl> / / Configuration and state <nl> String table_name ; <nl> mmm a / dbms / src / Storages / LiveView / StorageLiveView . cpp <nl> ppp b / dbms / src / Storages / LiveView / StorageLiveView . cpp <nl> void registerStorageLiveView ( StorageFactory & factory ) <nl> { <nl> factory . registerStorage ( " LiveView " , [ ] ( const StorageFactory : : Arguments & args ) <nl> { <nl> - if ( ! args . local_context . getSettingsRef ( ) . allow_experimental_live_view ) <nl> + if ( ! args . attach & & ! args . local_context . getSettingsRef ( ) . allow_experimental_live_view ) <nl> throw Exception ( " Experimental LIVE VIEW feature is not enabled ( the setting ' allow_experimental_live_view ' ) " , ErrorCodes : : SUPPORT_IS_DISABLED ) ; <nl> <nl> return StorageLiveView : : create ( args . table_name , args . database_name , args . local_context , args . query , args . columns ) ; <nl> mmm a / dbms / src / Storages / MergeTree / IMergedBlockOutputStream . cpp <nl> ppp b / dbms / src / Storages / MergeTree / IMergedBlockOutputStream . cpp <nl> void IMergedBlockOutputStream : : calculateAndSerializeSkipIndices ( <nl> { <nl> / / / Creating block for update <nl> Block indices_update_block ( skip_indexes_columns ) ; <nl> - size_t skip_index_current_mark = 0 ; <nl> + size_t skip_index_current_data_mark = 0 ; <nl> <nl> / / / Filling and writing skip indices like in IMergedBlockOutputStream : : writeColumn <nl> for ( size_t i = 0 ; i < skip_indices . size ( ) ; + + i ) <nl> void IMergedBlockOutputStream : : calculateAndSerializeSkipIndices ( <nl> const auto index = skip_indices [ i ] ; <nl> auto & stream = * skip_indices_streams [ i ] ; <nl> size_t prev_pos = 0 ; <nl> - skip_index_current_mark = skip_index_mark ; <nl> + skip_index_current_data_mark = skip_index_data_mark ; <nl> while ( prev_pos < rows ) <nl> { <nl> UInt64 limit = 0 ; <nl> void IMergedBlockOutputStream : : calculateAndSerializeSkipIndices ( <nl> } <nl> else <nl> { <nl> - limit = index_granularity . getMarkRows ( skip_index_current_mark ) ; <nl> + limit = index_granularity . getMarkRows ( skip_index_current_data_mark ) ; <nl> if ( skip_indices_aggregators [ i ] - > empty ( ) ) <nl> { <nl> skip_indices_aggregators [ i ] = index - > createIndexAggregator ( ) ; <nl> void IMergedBlockOutputStream : : calculateAndSerializeSkipIndices ( <nl> / / / to be compatible with normal . mrk2 file format <nl> if ( can_use_adaptive_granularity ) <nl> writeIntBinary ( 1UL , stream . marks ) ; <nl> - <nl> - + + skip_index_current_mark ; <nl> } <nl> + / / / this mark is aggregated , go to the next one <nl> + skip_index_current_data_mark + + ; <nl> } <nl> <nl> size_t pos = prev_pos ; <nl> void IMergedBlockOutputStream : : calculateAndSerializeSkipIndices ( <nl> prev_pos = pos ; <nl> } <nl> } <nl> - skip_index_mark = skip_index_current_mark ; <nl> + skip_index_data_mark = skip_index_current_data_mark ; <nl> } <nl> <nl> void IMergedBlockOutputStream : : finishSkipIndicesSerialization ( <nl> mmm a / dbms / src / Storages / MergeTree / IMergedBlockOutputStream . h <nl> ppp b / dbms / src / Storages / MergeTree / IMergedBlockOutputStream . h <nl> class IMergedBlockOutputStream : public IBlockOutputStream <nl> size_t aio_threshold ; <nl> <nl> size_t current_mark = 0 ; <nl> - size_t skip_index_mark = 0 ; <nl> + <nl> + / / / Number of mark in data from which skip indices have to start <nl> + / / / aggregation . I . e . it ' s data mark number , not skip indices mark . <nl> + size_t skip_index_data_mark = 0 ; <nl> <nl> const bool can_use_adaptive_granularity ; <nl> const std : : string marks_file_extension ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeData . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeData . cpp <nl> namespace ErrorCodes <nl> extern const int INCORRECT_FILE_NAME ; <nl> extern const int BAD_DATA_PART_NAME ; <nl> extern const int UNKNOWN_SETTING ; <nl> + extern const int READONLY_SETTING ; <nl> } <nl> <nl> <nl> void MergeTreeData : : checkAlter ( const AlterCommands & commands , const Context & c <nl> setTTLExpressions ( new_columns . getColumnTTLs ( ) , new_ttl_table_ast , / * only_check = * / true ) ; <nl> <nl> for ( const auto & setting : new_changes ) <nl> - { <nl> - if ( ! hasSetting ( setting . name ) ) <nl> - throw Exception { " Storage ' " + getName ( ) + " ' doesn ' t have setting ' " + setting . name + " ' " , ErrorCodes : : UNKNOWN_SETTING } ; <nl> - } <nl> + checkSettingCanBeChanged ( setting . name ) ; <nl> <nl> / / / Check that type conversions are possible . <nl> ExpressionActionsPtr unused_expression ; <nl> void MergeTreeData : : changeSettings ( <nl> if ( ! new_changes . empty ( ) ) <nl> { <nl> MergeTreeSettings copy = * getSettings ( ) ; <nl> - copy . updateFromChanges ( new_changes ) ; <nl> + copy . applyChanges ( new_changes ) ; <nl> storage_settings . set ( std : : make_unique < const MergeTreeSettings > ( copy ) ) ; <nl> } <nl> } <nl> <nl> - bool MergeTreeData : : hasSetting ( const String & setting_name ) const <nl> + void MergeTreeData : : checkSettingCanBeChanged ( const String & setting_name ) const <nl> { <nl> - return MergeTreeSettings : : findIndex ( setting_name ) ! = MergeTreeSettings : : npos ; <nl> + if ( MergeTreeSettings : : findIndex ( setting_name ) = = MergeTreeSettings : : npos ) <nl> + throw Exception { " Storage ' " + getName ( ) + " ' doesn ' t have setting ' " + setting_name + " ' " , ErrorCodes : : UNKNOWN_SETTING } ; <nl> + if ( MergeTreeSettings : : isReadonlySetting ( setting_name ) ) <nl> + throw Exception { " Setting ' " + setting_name + " ' is readonly for storage ' " + getName ( ) + " ' " , ErrorCodes : : READONLY_SETTING } ; <nl> + <nl> } <nl> <nl> void MergeTreeData : : removeEmptyColumnsFromPart ( MergeTreeData : : MutableDataPartPtr & data_part ) <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeData . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeData . h <nl> class MergeTreeData : public IStorage <nl> TableStructureWriteLockHolder & table_lock_holder ) ; <nl> <nl> / / / All MergeTreeData children have settings . <nl> - bool hasSetting ( const String & setting_name ) const override ; <nl> + void checkSettingCanBeChanged ( const String & setting_name ) const override ; <nl> <nl> / / / Remove columns , that have been markedd as empty after zeroing values with expired ttl <nl> void removeEmptyColumnsFromPart ( MergeTreeData : : MutableDataPartPtr & data_part ) ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeSettings . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeSettings . cpp <nl> void MergeTreeSettings : : loadFromQuery ( ASTStorage & storage_def ) <nl> { <nl> try <nl> { <nl> - loadFromChanges ( storage_def . settings - > changes ) ; <nl> + applyChanges ( storage_def . settings - > changes ) ; <nl> } <nl> catch ( Exception & e ) <nl> { <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeSettings . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeSettings . h <nl> <nl> <nl> # include < Core / Defines . h > <nl> # include < Core / SettingsCommon . h > <nl> + # include < Common / SettingsChanges . h > <nl> <nl> <nl> namespace Poco <nl> class ASTStorage ; <nl> struct MergeTreeSettings : public SettingsCollection < MergeTreeSettings > <nl> { <nl> <nl> - / / / M ( mutable ) for normal settings , IM ( immutable ) for not updateable settings . <nl> - # define LIST_OF_MERGE_TREE_SETTINGS ( M , IM ) \ <nl> - IM ( SettingUInt64 , index_granularity , 8192 , " How many rows correspond to one primary key value . " ) \ <nl> + # define LIST_OF_MERGE_TREE_SETTINGS ( M ) \ <nl> + M ( SettingUInt64 , index_granularity , 8192 , " How many rows correspond to one primary key value . " ) \ <nl> \ <nl> / * * Merge settings . * / \ <nl> M ( SettingUInt64 , max_bytes_to_merge_at_max_space_in_pool , 150ULL * 1024 * 1024 * 1024 , " Maximum in total size of parts to merge , when there are maximum free threads in background pool ( or entries in replication queue ) . " ) \ <nl> struct MergeTreeSettings : public SettingsCollection < MergeTreeSettings > <nl> M ( SettingBool , use_minimalistic_part_header_in_zookeeper , false , " Store part header ( checksums and columns ) in a compact format and a single part znode instead of separate znodes ( < part > / columns and < part > / checksums ) . This can dramatically reduce snapshot size in ZooKeeper . Before enabling check that all replicas support new format . " ) \ <nl> M ( SettingUInt64 , finished_mutations_to_keep , 100 , " How many records about mutations that are done to keep . If zero , then keep all of them . " ) \ <nl> M ( SettingUInt64 , min_merge_bytes_to_use_direct_io , 10ULL * 1024 * 1024 * 1024 , " Minimal amount of bytes to enable O_DIRECT in merge ( 0 - disabled ) . " ) \ <nl> - IM ( SettingUInt64 , index_granularity_bytes , 10 * 1024 * 1024 , " Approximate amount of bytes in single granule ( 0 - disabled ) . " ) \ <nl> + M ( SettingUInt64 , index_granularity_bytes , 10 * 1024 * 1024 , " Approximate amount of bytes in single granule ( 0 - disabled ) . " ) \ <nl> M ( SettingInt64 , merge_with_ttl_timeout , 3600 * 24 , " Minimal time in seconds , when merge with TTL can be repeated . " ) \ <nl> M ( SettingBool , ttl_only_drop_parts , false , " Only drop altogether the expired parts and not partially prune them . " ) \ <nl> M ( SettingBool , write_final_mark , 1 , " Write final mark after end of column ( 0 - disabled , do nothing if index_granularity_bytes = 0 ) " ) \ <nl> struct MergeTreeSettings : public SettingsCollection < MergeTreeSettings > <nl> <nl> / / / NOTE : will rewrite the AST to add immutable settings . <nl> void loadFromQuery ( ASTStorage & storage_def ) ; <nl> + <nl> + / / / We check settings after storage creation <nl> + static bool isReadonlySetting ( const String & name ) <nl> + { <nl> + return name = = " index_granularity " | | name = = " index_granularity_bytes " ; <nl> + } <nl> } ; <nl> <nl> using MergeTreeSettingsPtr = std : : shared_ptr < const MergeTreeSettings > ; <nl> mmm a / dbms / src / Storages / MergeTree / MergedBlockOutputStream . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergedBlockOutputStream . cpp <nl> void MergedBlockOutputStream : : writeImpl ( const Block & block , const IColumn : : Perm <nl> else if ( skip_indexes_column_name_to_position . end ( ) ! = skip_index_column_it ) <nl> { <nl> const auto & index_column = * skip_indexes_columns [ skip_index_column_it - > second ] . column ; <nl> - writeColumn ( column . name , * column . type , index_column , offset_columns , false , serialization_states [ i ] , current_mark ) ; <nl> + std : : tie ( std : : ignore , new_index_offset ) = writeColumn ( column . name , * column . type , index_column , offset_columns , false , serialization_states [ i ] , current_mark ) ; <nl> } <nl> else <nl> { <nl> void MergedBlockOutputStream : : writeImpl ( const Block & block , const IColumn : : Perm <nl> <nl> rows_count + = rows ; <nl> <nl> + / / / Should be written before index offset update , because we calculate , <nl> + / / / indices of currently written granules <nl> calculateAndSerializeSkipIndices ( skip_indexes_columns , rows ) ; <nl> <nl> { <nl> mmm a / dbms / src / Storages / MergeTree / MergedColumnOnlyOutputStream . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergedColumnOnlyOutputStream . cpp <nl> void MergedColumnOnlyOutputStream : : write ( const Block & block ) <nl> if ( ! rows ) <nl> return ; <nl> <nl> - calculateAndSerializeSkipIndices ( skip_indexes_columns , rows ) ; <nl> <nl> size_t new_index_offset = 0 ; <nl> size_t new_current_mark = 0 ; <nl> void MergedColumnOnlyOutputStream : : write ( const Block & block ) <nl> std : : tie ( new_current_mark , new_index_offset ) = writeColumn ( column . name , * column . type , * column . column , offset_columns , skip_offsets , serialization_states [ i ] , current_mark ) ; <nl> } <nl> <nl> + / / / Should be written before index offset update , because we calculate , <nl> + / / / indices of currently written granules <nl> + calculateAndSerializeSkipIndices ( skip_indexes_columns , rows ) ; <nl> + <nl> index_offset = new_index_offset ; <nl> current_mark = new_current_mark ; <nl> } <nl> mmm a / dbms / src / Storages / System / StorageSystemDictionaries . cpp <nl> ppp b / dbms / src / Storages / System / StorageSystemDictionaries . cpp <nl> void StorageSystemDictionaries : : fillData ( MutableColumns & res_columns , const Con <nl> res_columns [ i + + ] - > insert ( static_cast < Int8 > ( load_result . status ) ) ; <nl> res_columns [ i + + ] - > insert ( load_result . origin ) ; <nl> <nl> - if ( load_result . object ) <nl> - { <nl> - const auto dict_ptr = std : : static_pointer_cast < const IDictionaryBase > ( load_result . object ) ; <nl> + std : : exception_ptr last_exception = load_result . exception ; <nl> <nl> + const auto dict_ptr = std : : dynamic_pointer_cast < const IDictionaryBase > ( load_result . object ) ; <nl> + if ( dict_ptr ) <nl> + { <nl> res_columns [ i + + ] - > insert ( dict_ptr - > getTypeName ( ) ) ; <nl> <nl> const auto & dict_struct = dict_ptr - > getStructure ( ) ; <nl> void StorageSystemDictionaries : : fillData ( MutableColumns & res_columns , const Con <nl> res_columns [ i + + ] - > insert ( dict_ptr - > getElementCount ( ) ) ; <nl> res_columns [ i + + ] - > insert ( dict_ptr - > getLoadFactor ( ) ) ; <nl> res_columns [ i + + ] - > insert ( dict_ptr - > getSource ( ) - > toString ( ) ) ; <nl> + <nl> + if ( ! last_exception ) <nl> + last_exception = dict_ptr - > getLastException ( ) ; <nl> } <nl> else <nl> { <nl> void StorageSystemDictionaries : : fillData ( MutableColumns & res_columns , const Con <nl> res_columns [ i + + ] - > insert ( static_cast < UInt64 > ( std : : chrono : : system_clock : : to_time_t ( load_result . loading_start_time ) ) ) ; <nl> res_columns [ i + + ] - > insert ( std : : chrono : : duration_cast < std : : chrono : : duration < float > > ( load_result . loading_duration ) . count ( ) ) ; <nl> <nl> - if ( load_result . exception ) <nl> - res_columns [ i + + ] - > insert ( getExceptionMessage ( load_result . exception , false ) ) ; <nl> + if ( last_exception ) <nl> + res_columns [ i + + ] - > insert ( getExceptionMessage ( last_exception , false ) ) ; <nl> else <nl> res_columns [ i + + ] - > insertDefault ( ) ; <nl> } <nl> mmm a / dbms / src / Storages / System / StorageSystemProcesses . cpp <nl> ppp b / dbms / src / Storages / System / StorageSystemProcesses . cpp <nl> NamesAndTypesList StorageSystemProcesses : : getNamesAndTypes ( ) <nl> { " query " , std : : make_shared < DataTypeString > ( ) } , <nl> <nl> { " thread_numbers " , std : : make_shared < DataTypeArray > ( std : : make_shared < DataTypeUInt32 > ( ) ) } , <nl> + { " os_thread_ids " , std : : make_shared < DataTypeArray > ( std : : make_shared < DataTypeUInt32 > ( ) ) } , <nl> { " ProfileEvents . Names " , std : : make_shared < DataTypeArray > ( std : : make_shared < DataTypeString > ( ) ) } , <nl> { " ProfileEvents . Values " , std : : make_shared < DataTypeArray > ( std : : make_shared < DataTypeUInt64 > ( ) ) } , <nl> { " Settings . Names " , std : : make_shared < DataTypeArray > ( std : : make_shared < DataTypeString > ( ) ) } , <nl> void StorageSystemProcesses : : fillData ( MutableColumns & res_columns , const Contex <nl> res_columns [ i + + ] - > insert ( threads_array ) ; <nl> } <nl> <nl> + { <nl> + Array threads_array ; <nl> + threads_array . reserve ( process . os_thread_ids . size ( ) ) ; <nl> + for ( const UInt32 thread_number : process . os_thread_ids ) <nl> + threads_array . emplace_back ( thread_number ) ; <nl> + res_columns [ i + + ] - > insert ( threads_array ) ; <nl> + } <nl> + <nl> { <nl> IColumn * column_profile_events_names = res_columns [ i + + ] . get ( ) ; <nl> IColumn * column_profile_events_values = res_columns [ i + + ] . get ( ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 4142706259a <nl> mmm / dev / null <nl> ppp b / dbms / tests / integration / test_dictionaries / configs / dictionaries / dictionary_preset_cache_xypairs . xml <nl> <nl> + < yandex > <nl> + < dictionary > <nl> + < name > cache_xypairs < / name > <nl> + < source > <nl> + < clickhouse > <nl> + < host > localhost < / host > <nl> + < port > 9000 < / port > <nl> + < user > default < / user > <nl> + < password > < / password > <nl> + < db > test < / db > <nl> + < table > xypairs < / table > <nl> + < / clickhouse > <nl> + < / source > <nl> + < lifetime > 1 < / lifetime > <nl> + < layout > <nl> + < cache > <nl> + < size_in_cells > 5 < / size_in_cells > <nl> + < / cache > <nl> + < / layout > <nl> + < structure > <nl> + < id > <nl> + < name > x < / name > <nl> + < / id > <nl> + < attribute > <nl> + < name > y < / name > <nl> + < type > UInt64 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < / structure > <nl> + < / dictionary > <nl> + < / yandex > <nl> mmm a / dbms / tests / integration / test_dictionaries / test . py <nl> ppp b / dbms / tests / integration / test_dictionaries / test . py <nl> def get_status ( dictionary_name ) : <nl> return instance . query ( " SELECT status FROM system . dictionaries WHERE name = ' " + dictionary_name + " ' " ) . rstrip ( " \ n " ) <nl> <nl> <nl> + def get_last_exception ( dictionary_name ) : <nl> + return instance . query ( " SELECT last_exception FROM system . dictionaries WHERE name = ' " + dictionary_name + " ' " ) . rstrip ( " \ n " ) . replace ( " \ \ ' " , " ' " ) <nl> + <nl> + <nl> def get_loading_start_time ( dictionary_name ) : <nl> s = instance . query ( " SELECT loading_start_time FROM system . dictionaries WHERE name = ' " + dictionary_name + " ' " ) . rstrip ( " \ n " ) <nl> if s = = " 0000 - 00 - 00 00 : 00 : 00 " : <nl> def test_reload_after_fail_by_timer ( started_cluster ) : <nl> time . sleep ( 6 ) ; <nl> query ( " SELECT dictGetInt32 ( ' no_file_2 ' , ' a ' , toUInt64 ( 9 ) ) " ) = = " 10 \ n " <nl> assert get_status ( " no_file_2 " ) = = " LOADED " <nl> + <nl> + <nl> + def test_reload_after_fail_in_cache_dictionary ( started_cluster ) : <nl> + query = instance . query <nl> + query_and_get_error = instance . query_and_get_error <nl> + <nl> + # Can ' t get a value from the cache dictionary because the source ( table ` test . xypairs ` ) doesn ' t respond . <nl> + expected_error = " Table test . xypairs doesn ' t exist " <nl> + assert expected_error in query_and_get_error ( " SELECT dictGetUInt64 ( ' cache_xypairs ' , ' y ' , toUInt64 ( 1 ) ) " ) <nl> + assert get_status ( " cache_xypairs " ) = = " LOADED " <nl> + assert expected_error in get_last_exception ( " cache_xypairs " ) <nl> + <nl> + # Create table ` test . xypairs ` . <nl> + query ( ' ' ' <nl> + drop table if exists test . xypairs ; <nl> + create table test . xypairs ( x UInt64 , y UInt64 ) engine = Log ; <nl> + insert into test . xypairs values ( 1 , 56 ) , ( 3 , 78 ) ; <nl> + ' ' ' ) <nl> + <nl> + # Cache dictionary now works . <nl> + assert_eq_with_retry ( instance , " SELECT dictGet ( ' cache_xypairs ' , ' y ' , toUInt64 ( 1 ) ) " , " 56 " , ignore_error = True ) <nl> + query ( " SELECT dictGet ( ' cache_xypairs ' , ' y ' , toUInt64 ( 2 ) ) " ) = = " 0 " <nl> + assert get_last_exception ( " cache_xypairs " ) = = " " <nl> + <nl> + # Drop table ` test . xypairs ` . <nl> + query ( ' drop table if exists test . xypairs ' ) <nl> + <nl> + # Values are cached so we can get them . <nl> + query ( " SELECT dictGet ( ' cache_xypairs ' , ' y ' , toUInt64 ( 1 ) ) " ) = = " 56 " <nl> + query ( " SELECT dictGet ( ' cache_xypairs ' , ' y ' , toUInt64 ( 2 ) ) " ) = = " 0 " <nl> + assert get_last_exception ( " cache_xypairs " ) = = " " <nl> + <nl> + # But we can ' t get a value from the source table which isn ' t cached . <nl> + assert expected_error in query_and_get_error ( " SELECT dictGetUInt64 ( ' cache_xypairs ' , ' y ' , toUInt64 ( 3 ) ) " ) <nl> + assert expected_error in get_last_exception ( " cache_xypairs " ) <nl> + <nl> + # Passed time should not spoil the cache . <nl> + time . sleep ( 5 ) ; <nl> + query ( " SELECT dictGet ( ' cache_xypairs ' , ' y ' , toUInt64 ( 1 ) ) " ) = = " 56 " <nl> + query ( " SELECT dictGet ( ' cache_xypairs ' , ' y ' , toUInt64 ( 2 ) ) " ) = = " 0 " <nl> + assert expected_error in query_and_get_error ( " SELECT dictGetUInt64 ( ' cache_xypairs ' , ' y ' , toUInt64 ( 3 ) ) " ) <nl> + assert expected_error in get_last_exception ( " cache_xypairs " ) <nl> + <nl> + # Create table ` test . xypairs ` again with changed values . <nl> + query ( ' ' ' <nl> + drop table if exists test . xypairs ; <nl> + create table test . xypairs ( x UInt64 , y UInt64 ) engine = Log ; <nl> + insert into test . xypairs values ( 1 , 57 ) , ( 3 , 79 ) ; <nl> + ' ' ' ) <nl> + <nl> + # The cache dictionary returns new values now . <nl> + assert_eq_with_retry ( instance , " SELECT dictGet ( ' cache_xypairs ' , ' y ' , toUInt64 ( 1 ) ) " , " 57 " ) <nl> + query ( " SELECT dictGet ( ' cache_xypairs ' , ' y ' , toUInt64 ( 2 ) ) " ) = = " 0 " <nl> + query ( " SELECT dictGet ( ' cache_xypairs ' , ' y ' , toUInt64 ( 3 ) ) " ) = = " 79 " <nl> + assert get_last_exception ( " cache_xypairs " ) = = " " <nl> mmm a / dbms / tests / integration / test_storage_kafka / test . py <nl> ppp b / dbms / tests / integration / test_storage_kafka / test . py <nl> def insert ( ) : <nl> assert int ( result ) = = messages_num * threads_num , ' ClickHouse lost some messages : { } ' . format ( result ) <nl> <nl> <nl> - @ pytest . mark . timeout ( 120 ) <nl> + @ pytest . mark . timeout ( 300 ) <nl> def test_kafka_commit_on_block_write ( kafka_cluster ) : <nl> instance . query ( ' ' ' <nl> DROP TABLE IF EXISTS test . view ; <nl> mmm a / dbms / tests / queries / 0_stateless / 00933_test_fix_extra_seek_on_compressed_cache . sh <nl> ppp b / dbms / tests / queries / 0_stateless / 00933_test_fix_extra_seek_on_compressed_cache . sh <nl> $ CLICKHOUSE_CLIENT - - use_uncompressed_cache = 1 - - query_id = " test - query - uncompresse <nl> sleep 1 <nl> $ CLICKHOUSE_CLIENT - - query = " SYSTEM FLUSH LOGS " <nl> <nl> - $ CLICKHOUSE_CLIENT - - query = " SELECT ProfileEvents . Values [ indexOf ( ProfileEvents . Names , ' Seek ' ) ] , ProfileEvents . Values [ indexOf ( ProfileEvents . Names , ' ReadCompressedBytes ' ) ] , ProfileEvents . Values [ indexOf ( ProfileEvents . Names , ' UncompressedCacheHits ' ) ] AS hit FROM system . query_log WHERE ( query_id = ' test - query - uncompressed - cache ' ) AND ( type = 2 ) ORDER BY event_time DESC LIMIT 1 " <nl> + $ CLICKHOUSE_CLIENT - - query = " SELECT ProfileEvents . Values [ indexOf ( ProfileEvents . Names , ' Seek ' ) ] , ProfileEvents . Values [ indexOf ( ProfileEvents . Names , ' ReadCompressedBytes ' ) ] , ProfileEvents . Values [ indexOf ( ProfileEvents . Names , ' UncompressedCacheHits ' ) ] AS hit FROM system . query_log WHERE ( query_id = ' test - query - uncompressed - cache ' ) AND ( type = 2 ) AND event_date > = yesterday ( ) ORDER BY event_time DESC LIMIT 1 " <nl> <nl> $ CLICKHOUSE_CLIENT - - query = " DROP TABLE IF EXISTS small_table " <nl> <nl> deleted file mode 100755 <nl> index 2095683720e . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00960_live_view_watch_events_live . py <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - import os <nl> - import sys <nl> - import signal <nl> - <nl> - CURDIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> - sys . path . insert ( 0 , os . path . join ( CURDIR , ' helpers ' ) ) <nl> - <nl> - from client import client , prompt , end_of_block <nl> - <nl> - log = None <nl> - # uncomment the line below for debugging <nl> - # log = sys . stdout <nl> - <nl> - with client ( name = ' client1 > ' , log = log ) as client1 , client ( name = ' client2 > ' , log = log ) as client2 : <nl> - client1 . expect ( prompt ) <nl> - client2 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client1 . expect ( prompt ) <nl> - client2 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client2 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' WATCH test . lv EVENTS ' ) <nl> - client1 . expect ( ' 1 . * ' + end_of_block ) <nl> - client2 . send ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) <nl> - client1 . expect ( ' 2 . * ' + end_of_block ) <nl> - client2 . send ( ' INSERT INTO test . mt VALUES ( 4 ) , ( 5 ) , ( 6 ) ' ) <nl> - client1 . expect ( ' 3 . * ' + end_of_block ) <nl> - # send Ctrl - C <nl> - client1 . send ( ' \ x03 ' , eol = ' ' ) <nl> - match = client1 . expect ( ' ( % s ) | ( [ # \ $ ] ) ' % prompt ) <nl> - if match . groups ( ) [ 1 ] : <nl> - client1 . send ( client1 . command ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> deleted file mode 100644 <nl> index 6fbbedf1b21 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00961_temporary_live_view_watch . reference <nl> ppp / dev / null <nl> <nl> - 0 1 <nl> - 6 2 <nl> - 21 3 <nl> deleted file mode 100644 <nl> index 7992da92f97 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00961_temporary_live_view_watch . sql <nl> ppp / dev / null <nl> <nl> - SET allow_experimental_live_view = 1 ; <nl> - <nl> - DROP TABLE IF EXISTS test . lv ; <nl> - DROP TABLE IF EXISTS test . mt ; <nl> - <nl> - CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ; <nl> - CREATE TEMPORARY LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ; <nl> - <nl> - WATCH test . lv LIMIT 0 ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ; <nl> - <nl> - WATCH test . lv LIMIT 0 ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 4 ) , ( 5 ) , ( 6 ) ; <nl> - <nl> - WATCH test . lv LIMIT 0 ; <nl> - <nl> - DROP TABLE test . lv ; <nl> - DROP TABLE test . mt ; <nl> deleted file mode 100755 <nl> index 3dbec01b29a . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00962_temporary_live_view_watch_live . py <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - import os <nl> - import sys <nl> - import signal <nl> - <nl> - CURDIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> - sys . path . insert ( 0 , os . path . join ( CURDIR , ' helpers ' ) ) <nl> - <nl> - from client import client , prompt , end_of_block <nl> - <nl> - log = None <nl> - # uncomment the line below for debugging <nl> - # log = sys . stdout <nl> - <nl> - with client ( name = ' client1 > ' , log = log ) as client1 , client ( name = ' client2 > ' , log = log ) as client2 : <nl> - client1 . expect ( prompt ) <nl> - client2 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client1 . expect ( prompt ) <nl> - client2 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client2 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TEMPORARY LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' WATCH test . lv ' ) <nl> - client1 . expect ( r ' 0 . * 1 ' + end_of_block ) <nl> - client2 . send ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) <nl> - client1 . expect ( r ' 6 . * 2 ' + end_of_block ) <nl> - client2 . send ( ' INSERT INTO test . mt VALUES ( 4 ) , ( 5 ) , ( 6 ) ' ) <nl> - client1 . expect ( r ' 21 . * 3 ' + end_of_block ) <nl> - # send Ctrl - C <nl> - client1 . send ( ' \ x03 ' , eol = ' ' ) <nl> - match = client1 . expect ( ' ( % s ) | ( [ # \ $ ] ) ' % prompt ) <nl> - if match . groups ( ) [ 1 ] : <nl> - client1 . send ( client1 . command ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> deleted file mode 100755 <nl> index b324c1b90cc . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00963_temporary_live_view_watch_live_timeout . py . disabled <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - import os <nl> - import sys <nl> - import signal <nl> - <nl> - CURDIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> - sys . path . insert ( 0 , os . path . join ( CURDIR , ' helpers ' ) ) <nl> - <nl> - from client import client , prompt , end_of_block <nl> - <nl> - log = None <nl> - # uncomment the line below for debugging <nl> - # log = sys . stdout <nl> - <nl> - with client ( name = ' client1 > ' , log = log ) as client1 , client ( name = ' client2 > ' , log = log ) as client2 : <nl> - client1 . expect ( prompt ) <nl> - client2 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client1 . expect ( prompt ) <nl> - client2 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client2 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' SET temporary_live_view_timeout = 1 ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TEMPORARY LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' WATCH test . lv ' ) <nl> - client1 . expect ( r ' 0 . * 1 ' + end_of_block ) <nl> - client2 . send ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) <nl> - client2 . expect ( prompt ) <nl> - client1 . expect ( r ' 6 . * 2 ' + end_of_block ) <nl> - client2 . send ( ' INSERT INTO test . mt VALUES ( 4 ) , ( 5 ) , ( 6 ) ' ) <nl> - client2 . expect ( prompt ) <nl> - client1 . expect ( r ' 21 . * 3 ' + end_of_block ) <nl> - # send Ctrl - C <nl> - client1 . send ( ' \ x03 ' , eol = ' ' ) <nl> - match = client1 . expect ( ' ( % s ) | ( [ # \ $ ] ) ' % prompt ) <nl> - if match . groups ( ) [ 1 ] : <nl> - client1 . send ( client1 . command ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' SELECT sleep ( 1 ) ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . lv ' ) <nl> - client1 . expect ( ' Table test . lv doesn \ ' t exist ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> deleted file mode 100755 <nl> index 528f18839bb . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00964_live_view_watch_events_heartbeat . py <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - import os <nl> - import sys <nl> - import signal <nl> - <nl> - CURDIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> - sys . path . insert ( 0 , os . path . join ( CURDIR , ' helpers ' ) ) <nl> - <nl> - from client import client , prompt , end_of_block <nl> - <nl> - log = None <nl> - # uncomment the line below for debugging <nl> - # log = sys . stdout <nl> - <nl> - with client ( name = ' client1 > ' , log = log ) as client1 , client ( name = ' client2 > ' , log = log ) as client2 : <nl> - client1 . expect ( prompt ) <nl> - client2 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client1 . expect ( prompt ) <nl> - client2 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client2 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' SET live_view_heartbeat_interval = 1 ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TEMPORARY LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' WATCH test . lv EVENTS ' ) <nl> - client2 . send ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) <nl> - client1 . expect ( ' 2 . * ' + end_of_block ) <nl> - client1 . expect ( ' Progress : 2 . 00 rows . * \ ) ' ) <nl> - # wait for heartbeat <nl> - client1 . expect ( ' Progress : 2 . 00 rows . * \ ) ' ) <nl> - # send Ctrl - C <nl> - client1 . send ( ' \ x03 ' , eol = ' ' ) <nl> - match = client1 . expect ( ' ( % s ) | ( [ # \ $ ] ) ' % prompt ) <nl> - if match . groups ( ) [ 1 ] : <nl> - client1 . send ( client1 . command ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> deleted file mode 100755 <nl> index 2723936f876 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00965_live_view_watch_heartbeat . py <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - import os <nl> - import sys <nl> - import signal <nl> - <nl> - CURDIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> - sys . path . insert ( 0 , os . path . join ( CURDIR , ' helpers ' ) ) <nl> - <nl> - from client import client , prompt , end_of_block <nl> - <nl> - log = None <nl> - # uncomment the line below for debugging <nl> - # log = sys . stdout <nl> - <nl> - with client ( name = ' client1 > ' , log = log ) as client1 , client ( name = ' client2 > ' , log = log ) as client2 : <nl> - client1 . expect ( prompt ) <nl> - client2 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client1 . expect ( prompt ) <nl> - client2 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client2 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' SET live_view_heartbeat_interval = 1 ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TEMPORARY LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' WATCH test . lv ' ) <nl> - client1 . expect ( r ' 0 . * 1 ' + end_of_block ) <nl> - client2 . send ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) <nl> - client1 . expect ( r ' 6 . * 2 ' + end_of_block ) <nl> - client1 . expect ( ' Progress : 2 . 00 rows . * \ ) ' ) <nl> - # wait for heartbeat <nl> - client1 . expect ( ' Progress : 2 . 00 rows . * \ ) ' ) <nl> - # send Ctrl - C <nl> - client1 . send ( ' \ x03 ' , eol = ' ' ) <nl> - match = client1 . expect ( ' ( % s ) | ( [ # \ $ ] ) ' % prompt ) <nl> - if match . groups ( ) [ 1 ] : <nl> - client1 . send ( client1 . command ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> deleted file mode 100755 <nl> index 72ab3ea8818 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00966_live_view_watch_events_http . py <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - import os <nl> - import sys <nl> - <nl> - CURDIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> - sys . path . insert ( 0 , os . path . join ( CURDIR , ' helpers ' ) ) <nl> - <nl> - from client import client , prompt , end_of_block <nl> - from httpclient import client as http_client <nl> - <nl> - log = None <nl> - # uncomment the line below for debugging <nl> - # log = sys . stdout <nl> - <nl> - with client ( name = ' client1 > ' , log = log ) as client1 : <nl> - client1 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client1 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - <nl> - <nl> - with http_client ( { ' method ' : ' GET ' , ' url ' : ' / ? allow_experimental_live_view = 1 & query = WATCH % 20test . lv % 20EVENTS ' } , name = ' client2 > ' , log = log ) as client2 : <nl> - client2 . expect ( ' . * 1 \ n ' ) <nl> - client1 . send ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) <nl> - client1 . expect ( prompt ) <nl> - client2 . expect ( ' . * 2 \ n ' ) <nl> - <nl> - client1 . send ( ' DROP TABLE test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> deleted file mode 100644 <nl> index e69de29bb2d . . 00000000000 <nl> deleted file mode 100755 <nl> index e2f33971c3d . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00967_live_view_watch_http . py <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - import os <nl> - import sys <nl> - <nl> - CURDIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> - sys . path . insert ( 0 , os . path . join ( CURDIR , ' helpers ' ) ) <nl> - <nl> - from client import client , prompt , end_of_block <nl> - from httpclient import client as http_client <nl> - <nl> - log = None <nl> - # uncomment the line below for debugging <nl> - # log = sys . stdout <nl> - <nl> - with client ( name = ' client1 > ' , log = log ) as client1 : <nl> - client1 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client1 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - <nl> - <nl> - with http_client ( { ' method ' : ' GET ' , ' url ' : ' / ? allow_experimental_live_view = 1 & query = WATCH % 20test . lv ' } , name = ' client2 > ' , log = log ) as client2 : <nl> - client2 . expect ( ' . * 0 \ t1 \ n ' ) <nl> - client1 . send ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) <nl> - client1 . expect ( prompt ) <nl> - client2 . expect ( ' . * 6 \ t2 \ n ' ) <nl> - <nl> - client1 . send ( ' DROP TABLE test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> deleted file mode 100644 <nl> index e69de29bb2d . . 00000000000 <nl> deleted file mode 100644 <nl> index 5ae423d90d1 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00968_live_view_select_format_jsoneachrowwithprogress . reference <nl> ppp / dev / null <nl> <nl> - { " row " : { " a " : 1 } } <nl> - { " row " : { " a " : 2 } } <nl> - { " row " : { " a " : 3 } } <nl> - { " progress " : { " read_rows " : " 3 " , " read_bytes " : " 36 " , " written_rows " : " 0 " , " written_bytes " : " 0 " , " total_rows_to_read " : " 0 " } } <nl> deleted file mode 100644 <nl> index 1023cdf6b29 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00968_live_view_select_format_jsoneachrowwithprogress . sql <nl> ppp / dev / null <nl> <nl> - SET allow_experimental_live_view = 1 ; <nl> - <nl> - DROP TABLE IF EXISTS test . lv ; <nl> - DROP TABLE IF EXISTS test . mt ; <nl> - <nl> - CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ; <nl> - CREATE LIVE VIEW test . lv AS SELECT * FROM test . mt ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ; <nl> - <nl> - SELECT * FROM test . lv FORMAT JSONEachRowWithProgress ; <nl> - <nl> - DROP TABLE test . lv ; <nl> - DROP TABLE test . mt ; <nl> deleted file mode 100644 <nl> index 287a1ced92d . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00969_live_view_watch_format_jsoneachrowwithprogress . reference <nl> ppp / dev / null <nl> <nl> - { " row " : { " sum ( a ) " : " 0 " , " _version " : " 1 " } } <nl> - { " progress " : { " read_rows " : " 1 " , " read_bytes " : " 16 " , " written_rows " : " 0 " , " written_bytes " : " 0 " , " total_rows_to_read " : " 0 " } } <nl> - { " row " : { " sum ( a ) " : " 6 " , " _version " : " 2 " } } <nl> - { " progress " : { " read_rows " : " 1 " , " read_bytes " : " 16 " , " written_rows " : " 0 " , " written_bytes " : " 0 " , " total_rows_to_read " : " 0 " } } <nl> - { " row " : { " sum ( a ) " : " 21 " , " _version " : " 3 " } } <nl> - { " progress " : { " read_rows " : " 1 " , " read_bytes " : " 16 " , " written_rows " : " 0 " , " written_bytes " : " 0 " , " total_rows_to_read " : " 0 " } } <nl> deleted file mode 100644 <nl> index 3e46d55c014 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00969_live_view_watch_format_jsoneachrowwithprogress . sql <nl> ppp / dev / null <nl> <nl> - SET allow_experimental_live_view = 1 ; <nl> - <nl> - DROP TABLE IF EXISTS test . lv ; <nl> - DROP TABLE IF EXISTS test . mt ; <nl> - <nl> - CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ; <nl> - CREATE LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ; <nl> - <nl> - WATCH test . lv LIMIT 0 FORMAT JSONEachRowWithProgress ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ; <nl> - <nl> - WATCH test . lv LIMIT 0 FORMAT JSONEachRowWithProgress ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 4 ) , ( 5 ) , ( 6 ) ; <nl> - <nl> - WATCH test . lv LIMIT 0 FORMAT JSONEachRowWithProgress ; <nl> - <nl> - DROP TABLE test . lv ; <nl> - DROP TABLE test . mt ; <nl> deleted file mode 100755 <nl> index 8435cdc147a . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00970_live_view_watch_events_http_heartbeat . py <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - import os <nl> - import sys <nl> - <nl> - CURDIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> - sys . path . insert ( 0 , os . path . join ( CURDIR , ' helpers ' ) ) <nl> - <nl> - from client import client , prompt , end_of_block <nl> - from httpclient import client as http_client <nl> - <nl> - log = None <nl> - # uncomment the line below for debugging <nl> - # log = sys . stdout <nl> - <nl> - with client ( name = ' client1 > ' , log = log ) as client1 : <nl> - client1 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client1 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - <nl> - with http_client ( { ' method ' : ' GET ' , ' url ' : ' / ? allow_experimental_live_view = 1 & live_view_heartbeat_interval = 1 & query = WATCH % 20test . lv % 20EVENTS % 20FORMAT % 20JSONEachRowWithProgress ' } , name = ' client2 > ' , log = log ) as client2 : <nl> - client2 . expect ( ' { " progress " : { " read_rows " : " 1 " , " read_bytes " : " 8 " , " written_rows " : " 0 " , " written_bytes " : " 0 " , " total_rows_to_read " : " 0 " } } \ n ' , escape = True ) <nl> - client2 . expect ( ' { " row " : { " version " : " 1 " } ' , escape = True ) <nl> - client2 . expect ( ' { " progress " : { " read_rows " : " 1 " , " read_bytes " : " 8 " , " written_rows " : " 0 " , " written_bytes " : " 0 " , " total_rows_to_read " : " 0 " } } ' , escape = True ) <nl> - # heartbeat is provided by progress message <nl> - client2 . expect ( ' { " progress " : { " read_rows " : " 1 " , " read_bytes " : " 8 " , " written_rows " : " 0 " , " written_bytes " : " 0 " , " total_rows_to_read " : " 0 " } } ' , escape = True ) <nl> - <nl> - client1 . send ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) <nl> - client1 . expect ( prompt ) <nl> - <nl> - client2 . expect ( ' { " row " : { " version " : " 2 " } } \ n ' , escape = True ) <nl> - <nl> - client1 . send ( ' DROP TABLE test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> deleted file mode 100644 <nl> index e69de29bb2d . . 00000000000 <nl> deleted file mode 100755 <nl> index 2317d705efe . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00971_live_view_watch_http_heartbeat . py <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - import os <nl> - import sys <nl> - <nl> - CURDIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> - sys . path . insert ( 0 , os . path . join ( CURDIR , ' helpers ' ) ) <nl> - <nl> - from client import client , prompt , end_of_block <nl> - from httpclient import client as http_client <nl> - <nl> - log = None <nl> - # uncomment the line below for debugging <nl> - # log = sys . stdout <nl> - <nl> - with client ( name = ' client1 > ' , log = log ) as client1 : <nl> - client1 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client1 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - <nl> - with http_client ( { ' method ' : ' GET ' , ' url ' : ' / ? allow_experimental_live_view = 1 & live_view_heartbeat_interval = 1 & query = WATCH % 20test . lv % 20FORMAT % 20JSONEachRowWithProgress ' } , name = ' client2 > ' , log = log ) as client2 : <nl> - client2 . expect ( ' " progress " . * ' , ) <nl> - client2 . expect ( ' { " row " : { " sum ( a ) " : " 0 " , " _version " : " 1 " } } \ n ' , escape = True ) <nl> - client2 . expect ( ' " progress " . * \ n ' ) <nl> - # heartbeat is provided by progress message <nl> - client2 . expect ( ' " progress " . * \ n ' ) <nl> - <nl> - client1 . send ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) <nl> - client1 . expect ( prompt ) <nl> - <nl> - client2 . expect ( ' " progress " . * " read_rows " : " 2 " . * \ n ' ) <nl> - client2 . expect ( ' { " row " : { " sum ( a ) " : " 6 " , " _version " : " 2 " } } \ n ' , escape = True ) <nl> - <nl> - client1 . send ( ' DROP TABLE test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> deleted file mode 100644 <nl> index e69de29bb2d . . 00000000000 <nl> deleted file mode 100644 <nl> index 135516b0cd3 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00972_live_view_select_1 . sql <nl> ppp / dev / null <nl> <nl> - SET allow_experimental_live_view = 1 ; <nl> - <nl> - DROP TABLE IF EXISTS test . lv ; <nl> - <nl> - CREATE LIVE VIEW test . lv AS SELECT 1 ; <nl> - <nl> - SELECT * FROM test . lv ; <nl> - <nl> - DROP TABLE test . lv ; <nl> deleted file mode 100644 <nl> index 75236c0daf7 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00973_live_view_select . reference <nl> ppp / dev / null <nl> <nl> - 6 1 <nl> - 6 1 <nl> - 12 2 <nl> - 12 2 <nl> deleted file mode 100644 <nl> index 4b5ca0a2dd7 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00973_live_view_select . sql <nl> ppp / dev / null <nl> <nl> - SET allow_experimental_live_view = 1 ; <nl> - <nl> - DROP TABLE IF EXISTS test . lv ; <nl> - DROP TABLE IF EXISTS test . mt ; <nl> - <nl> - CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ; <nl> - CREATE LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ; <nl> - <nl> - SELECT * , _version FROM test . lv ; <nl> - SELECT * , _version FROM test . lv ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ; <nl> - <nl> - SELECT * , _version FROM test . lv ; <nl> - SELECT * , _version FROM test . lv ; <nl> - <nl> - DROP TABLE test . lv ; <nl> - DROP TABLE test . mt ; <nl> deleted file mode 100644 <nl> index 6d50f0e9c3a . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00974_live_view_select_with_aggregation . reference <nl> ppp / dev / null <nl> <nl> - 6 <nl> - 21 <nl> deleted file mode 100644 <nl> index 3faaec8f623 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00974_live_view_select_with_aggregation . sql <nl> ppp / dev / null <nl> <nl> - SET allow_experimental_live_view = 1 ; <nl> - <nl> - DROP TABLE IF EXISTS test . lv ; <nl> - DROP TABLE IF EXISTS test . mt ; <nl> - <nl> - CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ; <nl> - CREATE LIVE VIEW test . lv AS SELECT * FROM test . mt ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ; <nl> - <nl> - SELECT sum ( a ) FROM test . lv ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 4 ) , ( 5 ) , ( 6 ) ; <nl> - <nl> - SELECT sum ( a ) FROM test . lv ; <nl> - <nl> - DROP TABLE test . lv ; <nl> - DROP TABLE test . mt ; <nl> mmm a / dbms / tests / queries / 0_stateless / 00974_text_log_table_not_empty . sh <nl> ppp b / dbms / tests / queries / 0_stateless / 00974_text_log_table_not_empty . sh <nl> do <nl> <nl> $ { CLICKHOUSE_CLIENT } - - query = " SYSTEM FLUSH LOGS " <nl> sleep 0 . 1 ; <nl> - if [ [ $ ( $ CLICKHOUSE_CURL - sS " $ CLICKHOUSE_URL " - d " SELECT count ( ) > 0 FROM system . text_log WHERE position ( system . text_log . message , ' SELECT 6103 ' ) > 0 " ) = = 1 ] ] ; then echo 1 ; exit ; fi ; <nl> + if [ [ $ ( $ CLICKHOUSE_CURL - sS " $ CLICKHOUSE_URL " - d " SELECT count ( ) > 0 FROM system . text_log WHERE position ( system . text_log . message , ' SELECT 6103 ' ) > 0 AND event_date > = yesterday ( ) " ) = = 1 ] ] ; then echo 1 ; exit ; fi ; <nl> <nl> done ; <nl> <nl> deleted file mode 100644 <nl> index e69de29bb2d . . 00000000000 <nl> deleted file mode 100644 <nl> index 02c1644d193 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00975_live_view_create . sql <nl> ppp / dev / null <nl> <nl> - SET allow_experimental_live_view = 1 ; <nl> - <nl> - DROP TABLE IF EXISTS test . mt ; <nl> - <nl> - CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ; <nl> - CREATE LIVE VIEW test . lv AS SELECT * FROM test . mt ; <nl> - <nl> - DROP TABLE test . lv ; <nl> - DROP TABLE test . mt ; <nl> deleted file mode 100644 <nl> index 453bd800469 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00976_live_view_select_version . reference <nl> ppp / dev / null <nl> <nl> - 1 1 <nl> - 2 1 <nl> - 3 1 <nl> deleted file mode 100644 <nl> index ae1c59a92d7 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00976_live_view_select_version . sql <nl> ppp / dev / null <nl> <nl> - SET allow_experimental_live_view = 1 ; <nl> - <nl> - DROP TABLE IF EXISTS test . lv ; <nl> - DROP TABLE IF EXISTS test . mt ; <nl> - <nl> - CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ; <nl> - CREATE LIVE VIEW test . lv AS SELECT * FROM test . mt ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ; <nl> - <nl> - SELECT * , _version FROM test . lv ; <nl> - <nl> - DROP TABLE test . lv ; <nl> - DROP TABLE test . mt ; <nl> deleted file mode 100644 <nl> index 01e79c32a8c . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00977_live_view_watch_events . reference <nl> ppp / dev / null <nl> <nl> - 1 <nl> - 2 <nl> - 3 <nl> deleted file mode 100644 <nl> index 3e0d066fb8d . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00977_live_view_watch_events . sql <nl> ppp / dev / null <nl> <nl> - SET allow_experimental_live_view = 1 ; <nl> - <nl> - DROP TABLE IF EXISTS test . lv ; <nl> - DROP TABLE IF EXISTS test . mt ; <nl> - <nl> - CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ; <nl> - CREATE LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ; <nl> - <nl> - WATCH test . lv EVENTS LIMIT 0 ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ; <nl> - <nl> - WATCH test . lv EVENTS LIMIT 0 ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 4 ) , ( 5 ) , ( 6 ) ; <nl> - <nl> - WATCH test . lv EVENTS LIMIT 0 ; <nl> - <nl> - DROP TABLE test . lv ; <nl> - DROP TABLE test . mt ; <nl> deleted file mode 100644 <nl> index 6fbbedf1b21 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00978_live_view_watch . reference <nl> ppp / dev / null <nl> <nl> - 0 1 <nl> - 6 2 <nl> - 21 3 <nl> deleted file mode 100644 <nl> index b8d0d93ccab . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00978_live_view_watch . sql <nl> ppp / dev / null <nl> <nl> - SET allow_experimental_live_view = 1 ; <nl> - <nl> - DROP TABLE IF EXISTS test . lv ; <nl> - DROP TABLE IF EXISTS test . mt ; <nl> - <nl> - CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ; <nl> - CREATE LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ; <nl> - <nl> - WATCH test . lv LIMIT 0 ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ; <nl> - <nl> - WATCH test . lv LIMIT 0 ; <nl> - <nl> - INSERT INTO test . mt VALUES ( 4 ) , ( 5 ) , ( 6 ) ; <nl> - <nl> - WATCH test . lv LIMIT 0 ; <nl> - <nl> - DROP TABLE test . lv ; <nl> - DROP TABLE test . mt ; <nl> deleted file mode 100755 <nl> index 8c5bc5b8eb2 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00979_live_view_watch_live . py <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - import os <nl> - import sys <nl> - import signal <nl> - <nl> - CURDIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> - sys . path . insert ( 0 , os . path . join ( CURDIR , ' helpers ' ) ) <nl> - <nl> - from client import client , prompt , end_of_block <nl> - <nl> - log = None <nl> - # uncomment the line below for debugging <nl> - # log = sys . stdout <nl> - <nl> - with client ( name = ' client1 > ' , log = log ) as client1 , client ( name = ' client2 > ' , log = log ) as client2 : <nl> - client1 . expect ( prompt ) <nl> - client2 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client1 . expect ( prompt ) <nl> - client2 . send ( ' SET allow_experimental_live_view = 1 ' ) <nl> - client2 . expect ( prompt ) <nl> - <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE IF EXISTS test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' CREATE LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' WATCH test . lv ' ) <nl> - client1 . expect ( r ' 0 . * 1 ' + end_of_block ) <nl> - client2 . send ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) <nl> - client1 . expect ( r ' 6 . * 2 ' + end_of_block ) <nl> - client2 . expect ( prompt ) <nl> - client2 . send ( ' INSERT INTO test . mt VALUES ( 4 ) , ( 5 ) , ( 6 ) ' ) <nl> - client1 . expect ( r ' 21 . * 3 ' + end_of_block ) <nl> - client2 . expect ( prompt ) <nl> - for i in range ( 1 , 129 ) : <nl> - client2 . send ( ' INSERT INTO test . mt VALUES ( 1 ) ' ) <nl> - client1 . expect ( r ' % d . * % d ' % ( 21 + i , 3 + i ) + end_of_block ) <nl> - client2 . expect ( prompt ) <nl> - # send Ctrl - C <nl> - client1 . send ( ' \ x03 ' , eol = ' ' ) <nl> - match = client1 . expect ( ' ( % s ) | ( [ # \ $ ] ) ' % prompt ) <nl> - if match . groups ( ) [ 1 ] : <nl> - client1 . send ( client1 . command ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . lv ' ) <nl> - client1 . expect ( prompt ) <nl> - client1 . send ( ' DROP TABLE test . mt ' ) <nl> - client1 . expect ( prompt ) <nl> deleted file mode 100644 <nl> index e69de29bb2d . . 00000000000 <nl> deleted file mode 100644 <nl> index 7f9fcbb2e9c . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00980_create_temporary_live_view . reference <nl> ppp / dev / null <nl> <nl> - temporary_live_view_timeout 5 <nl> - live_view_heartbeat_interval 15 <nl> - 0 <nl> deleted file mode 100644 <nl> index 037c2a9e587 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00980_create_temporary_live_view . sql <nl> ppp / dev / null <nl> <nl> - SET allow_experimental_live_view = 1 ; <nl> - <nl> - DROP TABLE IF EXISTS test . lv ; <nl> - DROP TABLE IF EXISTS test . mt ; <nl> - <nl> - SELECT name , value from system . settings WHERE name = ' temporary_live_view_timeout ' ; <nl> - SELECT name , value from system . settings WHERE name = ' live_view_heartbeat_interval ' ; <nl> - <nl> - SET temporary_live_view_timeout = 1 ; <nl> - CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ; <nl> - CREATE TEMPORARY LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ; <nl> - <nl> - SHOW TABLES LIKE ' lv ' ; <nl> - SELECT sleep ( 2 ) ; <nl> - SHOW TABLES LIKE ' lv ' ; <nl> - <nl> - DROP TABLE test . mt ; <nl> deleted file mode 100644 <nl> index 782671cdfaf . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00991_live_view_watch_event_live . python <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - <nl> - import subprocess <nl> - import threading <nl> - import Queue as queue <nl> - import os <nl> - import sys <nl> - import signal <nl> - <nl> - <nl> - CLICKHOUSE_CLIENT = os . environ . get ( ' CLICKHOUSE_CLIENT ' ) <nl> - CLICKHOUSE_CURL = os . environ . get ( ' CLICKHOUSE_CURL ' ) <nl> - CLICKHOUSE_URL = os . environ . get ( ' CLICKHOUSE_URL ' ) <nl> - <nl> - <nl> - def send_query ( query ) : <nl> - cmd = list ( CLICKHOUSE_CLIENT . split ( ) ) <nl> - cmd + = [ ' - - query ' , query ] <nl> - # print ( cmd ) <nl> - return subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . STDOUT ) . stdout <nl> - <nl> - <nl> - def send_query_in_process_group ( query ) : <nl> - cmd = list ( CLICKHOUSE_CLIENT . split ( ) ) <nl> - cmd + = [ ' - - query ' , query ] <nl> - # print ( cmd ) <nl> - return subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . STDOUT , preexec_fn = os . setsid ) <nl> - <nl> - <nl> - def read_lines_and_push_to_queue ( pipe , queue ) : <nl> - try : <nl> - for line in iter ( pipe . readline , ' ' ) : <nl> - line = line . strip ( ) <nl> - print ( line ) <nl> - sys . stdout . flush ( ) <nl> - queue . put ( line ) <nl> - except KeyboardInterrupt : <nl> - pass <nl> - <nl> - queue . put ( None ) <nl> - <nl> - <nl> - def test ( ) : <nl> - send_query ( ' DROP TABLE IF EXISTS test . lv ' ) . read ( ) <nl> - send_query ( ' DROP TABLE IF EXISTS test . mt ' ) . read ( ) <nl> - send_query ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) . read ( ) <nl> - send_query ( ' CREATE LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) . read ( ) <nl> - <nl> - q = queue . Queue ( ) <nl> - p = send_query_in_process_group ( ' WATCH test . lv ' ) <nl> - thread = threading . Thread ( target = read_lines_and_push_to_queue , args = ( p . stdout , q ) ) <nl> - thread . start ( ) <nl> - <nl> - line = q . get ( ) <nl> - print ( line ) <nl> - assert ( line = = ' 0 \ t1 ' ) <nl> - <nl> - send_query ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) . read ( ) <nl> - line = q . get ( ) <nl> - print ( line ) <nl> - assert ( line = = ' 6 \ t2 ' ) <nl> - <nl> - send_query ( ' INSERT INTO test . mt VALUES ( 4 ) , ( 5 ) , ( 6 ) ' ) . read ( ) <nl> - line = q . get ( ) <nl> - print ( line ) <nl> - assert ( line = = ' 21 \ t3 ' ) <nl> - <nl> - # Send Ctrl + C to client . <nl> - os . killpg ( os . getpgid ( p . pid ) , signal . SIGINT ) <nl> - # This insert shouldn ' t affect lv . <nl> - send_query ( ' INSERT INTO test . mt VALUES ( 7 ) , ( 8 ) , ( 9 ) ' ) . read ( ) <nl> - line = q . get ( ) <nl> - print ( line ) <nl> - assert ( line is None ) <nl> - <nl> - send_query ( ' DROP TABLE if exists test . lv ' ) . read ( ) <nl> - send_query ( ' DROP TABLE if exists test . lv ' ) . read ( ) <nl> - <nl> - thread . join ( ) <nl> - <nl> - test ( ) <nl> deleted file mode 100644 <nl> index 1e94cdade41 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00991_live_view_watch_event_live . reference <nl> ppp / dev / null <nl> <nl> - 0 1 <nl> - 0 1 <nl> - 6 2 <nl> - 6 2 <nl> - 21 3 <nl> - 21 3 <nl> - None <nl> deleted file mode 100755 <nl> index 938547ca0cb . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00991_live_view_watch_http . python <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - <nl> - import subprocess <nl> - import threading <nl> - import Queue as queue <nl> - import os <nl> - import sys <nl> - <nl> - <nl> - CLICKHOUSE_CLIENT = os . environ . get ( ' CLICKHOUSE_CLIENT ' ) <nl> - CLICKHOUSE_CURL = os . environ . get ( ' CLICKHOUSE_CURL ' ) <nl> - CLICKHOUSE_URL = os . environ . get ( ' CLICKHOUSE_URL ' ) <nl> - <nl> - <nl> - def send_query ( query ) : <nl> - cmd = list ( CLICKHOUSE_CLIENT . split ( ) ) <nl> - cmd + = [ ' - - query ' , query ] <nl> - # print ( cmd ) <nl> - return subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . STDOUT ) . stdout <nl> - <nl> - <nl> - def send_http_query ( query ) : <nl> - cmd = list ( CLICKHOUSE_CURL . split ( ) ) # list ( [ ' curl ' , ' - sSN ' , ' - - max - time ' , ' 10 ' ] ) <nl> - cmd + = [ ' - sSN ' , CLICKHOUSE_URL , ' - d ' , query ] <nl> - return subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . STDOUT ) . stdout <nl> - <nl> - <nl> - def read_lines_and_push_to_queue ( pipe , queue ) : <nl> - for line in iter ( pipe . readline , ' ' ) : <nl> - line = line . strip ( ) <nl> - print ( line ) <nl> - sys . stdout . flush ( ) <nl> - queue . put ( line ) <nl> - <nl> - queue . put ( None ) <nl> - <nl> - <nl> - def test ( ) : <nl> - send_query ( ' DROP TABLE IF EXISTS test . lv ' ) . read ( ) <nl> - send_query ( ' DROP TABLE IF EXISTS test . mt ' ) . read ( ) <nl> - send_query ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) . read ( ) <nl> - send_query ( ' CREATE LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) . read ( ) <nl> - <nl> - q = queue . Queue ( ) <nl> - pipe = send_http_query ( ' WATCH test . lv ' ) <nl> - thread = threading . Thread ( target = read_lines_and_push_to_queue , args = ( pipe , q ) ) <nl> - thread . start ( ) <nl> - <nl> - line = q . get ( ) <nl> - print ( line ) <nl> - assert ( line = = ' 0 \ t1 ' ) <nl> - <nl> - send_query ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) . read ( ) <nl> - line = q . get ( ) <nl> - print ( line ) <nl> - assert ( line = = ' 6 \ t2 ' ) <nl> - <nl> - send_query ( ' DROP TABLE if exists test . lv ' ) . read ( ) <nl> - send_query ( ' DROP TABLE if exists test . lv ' ) . read ( ) <nl> - <nl> - thread . join ( ) <nl> - <nl> - test ( ) <nl> deleted file mode 100644 <nl> index 489457d751b . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00991_live_view_watch_http . reference <nl> ppp / dev / null <nl> <nl> - 0 1 <nl> - 0 1 <nl> - 6 2 <nl> - 6 2 <nl> deleted file mode 100644 <nl> index 70063adc6e3 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00991_temporary_live_view_watch_events_heartbeat . python <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - <nl> - import subprocess <nl> - import threading <nl> - import Queue as queue <nl> - import os <nl> - import sys <nl> - import signal <nl> - <nl> - <nl> - CLICKHOUSE_CLIENT = os . environ . get ( ' CLICKHOUSE_CLIENT ' ) <nl> - CLICKHOUSE_CURL = os . environ . get ( ' CLICKHOUSE_CURL ' ) <nl> - CLICKHOUSE_URL = os . environ . get ( ' CLICKHOUSE_URL ' ) <nl> - <nl> - <nl> - def send_query ( query ) : <nl> - cmd = list ( CLICKHOUSE_CLIENT . split ( ) ) <nl> - cmd + = [ ' - - query ' , query ] <nl> - # print ( cmd ) <nl> - return subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . STDOUT ) . stdout <nl> - <nl> - <nl> - def send_query_in_process_group ( query ) : <nl> - cmd = list ( CLICKHOUSE_CLIENT . split ( ) ) <nl> - cmd + = [ ' - - query ' , query , ' - - live_view_heartbeat_interval = 1 ' , ' - - progress ' ] <nl> - # print ( cmd ) <nl> - return subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . STDOUT , preexec_fn = os . setsid ) <nl> - <nl> - <nl> - def read_lines_and_push_to_queue ( pipe , queue ) : <nl> - try : <nl> - for line in iter ( pipe . readline , ' ' ) : <nl> - line = line . strip ( ) <nl> - # print ( line ) <nl> - sys . stdout . flush ( ) <nl> - queue . put ( line ) <nl> - except KeyboardInterrupt : <nl> - pass <nl> - <nl> - queue . put ( None ) <nl> - <nl> - <nl> - def test ( ) : <nl> - send_query ( ' DROP TABLE IF EXISTS test . lv ' ) . read ( ) <nl> - send_query ( ' DROP TABLE IF EXISTS test . mt ' ) . read ( ) <nl> - send_query ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) . read ( ) <nl> - send_query ( ' CREATE TEMPORARY LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) . read ( ) <nl> - <nl> - q = queue . Queue ( ) <nl> - p = send_query_in_process_group ( ' WATCH test . lv ' ) <nl> - thread = threading . Thread ( target = read_lines_and_push_to_queue , args = ( p . stdout , q ) ) <nl> - thread . start ( ) <nl> - <nl> - line = q . get ( ) <nl> - # print ( line ) <nl> - assert ( line . endswith ( ' 0 \ t1 ' ) ) <nl> - assert ( ' Progress : 0 . 00 rows ' in line ) <nl> - <nl> - send_query ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) . read ( ) <nl> - line = q . get ( ) <nl> - assert ( line . endswith ( ' 6 \ t2 ' ) ) <nl> - assert ( ' Progress : 1 . 00 rows ' in line ) <nl> - <nl> - # send_query ( ' INSERT INTO test . mt VALUES ( 4 ) , ( 5 ) , ( 6 ) ' ) . read ( ) <nl> - # line = q . get ( ) <nl> - # print ( line ) <nl> - # assert ( line . endswith ( ' 6 \ t2 ' ) ) <nl> - # assert ( ' Progress : 1 . 00 rows ' in line ) <nl> - <nl> - # Send Ctrl + C to client . <nl> - os . killpg ( os . getpgid ( p . pid ) , signal . SIGINT ) <nl> - # This insert shouldn ' t affect lv . <nl> - send_query ( ' INSERT INTO test . mt VALUES ( 7 ) , ( 8 ) , ( 9 ) ' ) . read ( ) <nl> - line = q . get ( ) <nl> - # print ( line ) <nl> - # assert ( line is None ) <nl> - <nl> - send_query ( ' DROP TABLE if exists test . lv ' ) . read ( ) <nl> - send_query ( ' DROP TABLE if exists test . lv ' ) . read ( ) <nl> - <nl> - thread . join ( ) <nl> - <nl> - test ( ) <nl> deleted file mode 100644 <nl> index e69de29bb2d . . 00000000000 <nl> deleted file mode 100644 <nl> index d290018a02c . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00991_temporary_live_view_watch_live . python <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - <nl> - import subprocess <nl> - import threading <nl> - import Queue as queue <nl> - import os <nl> - import sys <nl> - import signal <nl> - <nl> - <nl> - CLICKHOUSE_CLIENT = os . environ . get ( ' CLICKHOUSE_CLIENT ' ) <nl> - CLICKHOUSE_CURL = os . environ . get ( ' CLICKHOUSE_CURL ' ) <nl> - CLICKHOUSE_URL = os . environ . get ( ' CLICKHOUSE_URL ' ) <nl> - <nl> - <nl> - def send_query ( query ) : <nl> - cmd = list ( CLICKHOUSE_CLIENT . split ( ) ) <nl> - cmd + = [ ' - - query ' , query ] <nl> - # print ( cmd ) <nl> - return subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . STDOUT ) . stdout <nl> - <nl> - <nl> - def send_query_in_process_group ( query ) : <nl> - cmd = list ( CLICKHOUSE_CLIENT . split ( ) ) <nl> - cmd + = [ ' - - query ' , query ] <nl> - # print ( cmd ) <nl> - return subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . STDOUT , preexec_fn = os . setsid ) <nl> - <nl> - <nl> - def read_lines_and_push_to_queue ( pipe , queue ) : <nl> - try : <nl> - for line in iter ( pipe . readline , ' ' ) : <nl> - line = line . strip ( ) <nl> - print ( line ) <nl> - sys . stdout . flush ( ) <nl> - queue . put ( line ) <nl> - except KeyboardInterrupt : <nl> - pass <nl> - <nl> - queue . put ( None ) <nl> - <nl> - <nl> - def test ( ) : <nl> - send_query ( ' DROP TABLE IF EXISTS test . lv ' ) . read ( ) <nl> - send_query ( ' DROP TABLE IF EXISTS test . mt ' ) . read ( ) <nl> - send_query ( ' CREATE TABLE test . mt ( a Int32 ) Engine = MergeTree order by tuple ( ) ' ) . read ( ) <nl> - send_query ( ' CREATE TEMPORARY LIVE VIEW test . lv AS SELECT sum ( a ) FROM test . mt ' ) . read ( ) <nl> - <nl> - q = queue . Queue ( ) <nl> - p = send_query_in_process_group ( ' WATCH test . lv ' ) <nl> - thread = threading . Thread ( target = read_lines_and_push_to_queue , args = ( p . stdout , q ) ) <nl> - thread . start ( ) <nl> - <nl> - line = q . get ( ) <nl> - print ( line ) <nl> - assert ( line = = ' 0 \ t1 ' ) <nl> - <nl> - send_query ( ' INSERT INTO test . mt VALUES ( 1 ) , ( 2 ) , ( 3 ) ' ) . read ( ) <nl> - line = q . get ( ) <nl> - print ( line ) <nl> - assert ( line = = ' 6 \ t2 ' ) <nl> - <nl> - send_query ( ' INSERT INTO test . mt VALUES ( 4 ) , ( 5 ) , ( 6 ) ' ) . read ( ) <nl> - line = q . get ( ) <nl> - print ( line ) <nl> - assert ( line = = ' 21 \ t3 ' ) <nl> - <nl> - # Send Ctrl + C to client . <nl> - os . killpg ( os . getpgid ( p . pid ) , signal . SIGINT ) <nl> - # This insert shouldn ' t affect lv . <nl> - send_query ( ' INSERT INTO test . mt VALUES ( 7 ) , ( 8 ) , ( 9 ) ' ) . read ( ) <nl> - line = q . get ( ) <nl> - print ( line ) <nl> - assert ( line is None ) <nl> - <nl> - send_query ( ' DROP TABLE if exists test . lv ' ) . read ( ) <nl> - send_query ( ' DROP TABLE if exists test . lv ' ) . read ( ) <nl> - <nl> - thread . join ( ) <nl> - <nl> - test ( ) <nl> deleted file mode 100644 <nl> index 1e94cdade41 . . 00000000000 <nl> mmm a / dbms / tests / queries / 0_stateless / 00991_temporary_live_view_watch_live . reference <nl> ppp / dev / null <nl> <nl> - 0 1 <nl> - 0 1 <nl> - 6 2 <nl> - 6 2 <nl> - 21 3 <nl> - 21 3 <nl> - None <nl> similarity index 50 % <nl> rename from dbms / tests / queries / 0_stateless / 00972_live_view_select_1 . reference <nl> rename to dbms / tests / queries / 0_stateless / 01000_bad_size_of_marks_skip_idx . reference <nl> mmm a / dbms / tests / queries / 0_stateless / 00972_live_view_select_1 . reference <nl> ppp b / dbms / tests / queries / 0_stateless / 01000_bad_size_of_marks_skip_idx . reference <nl> @ @ - 1 + 1 , 2 @ @ <nl> 1 <nl> + 1 <nl> new file mode 100644 <nl> index 00000000000 . . 7af19fec695 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01000_bad_size_of_marks_skip_idx . sql <nl> <nl> + SET allow_experimental_data_skipping_indices = 1 ; <nl> + <nl> + DROP TABLE IF EXISTS bad_skip_idx ; <nl> + <nl> + CREATE TABLE bad_skip_idx <nl> + ( <nl> + id UInt64 , <nl> + value String <nl> + ) ENGINE MergeTree ( ) <nl> + ORDER BY id SETTINGS index_granularity_bytes = 64 , vertical_merge_algorithm_min_rows_to_activate = 0 , vertical_merge_algorithm_min_columns_to_activate = 0 ; - - actually vertical merge is not required condition for this bug , but it ' s more easy to reproduce ( becuse we don ' t recalc granularities ) <nl> + <nl> + - - 7 rows per granule <nl> + INSERT INTO bad_skip_idx SELECT number , concat ( ' x ' , toString ( number ) ) FROM numbers ( 1000 ) ; <nl> + <nl> + - - 3 rows per granule <nl> + INSERT INTO bad_skip_idx SELECT number , concat ( ' xxxxxxxxxx ' , toString ( number ) ) FROM numbers ( 1000 , 1000 ) ; <nl> + <nl> + SELECT COUNT ( * ) from bad_skip_idx WHERE value = ' xxxxxxxxxx1015 ' ; - - check no exception <nl> + <nl> + INSERT INTO bad_skip_idx SELECT number , concat ( ' x ' , toString ( number ) ) FROM numbers ( 1000 ) ; <nl> + <nl> + ALTER TABLE bad_skip_idx ADD INDEX idx value TYPE bloom_filter ( 0 . 01 ) GRANULARITY 4 ; <nl> + <nl> + OPTIMIZE TABLE bad_skip_idx FINAL ; <nl> + <nl> + SELECT COUNT ( * ) from bad_skip_idx WHERE value = ' xxxxxxxxxx1015 ' ; - - check no exception <nl> + <nl> + DROP TABLE IF EXISTS bad_skip_idx ; <nl> similarity index 100 % <nl> rename from dbms / tests / queries / 0_stateless / 00960_live_view_watch_events_live . reference <nl> rename to dbms / tests / queries / 0_stateless / 01001_rename_merge_race_condition . reference <nl> new file mode 100755 <nl> index 00000000000 . . b0f1dda7c45 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01001_rename_merge_race_condition . sh <nl> <nl> + # ! / usr / bin / env bash <nl> + <nl> + CURDIR = $ ( cd " $ ( dirname " $ { BASH_SOURCE [ 0 ] } " ) " & & pwd ) <nl> + . $ CURDIR / . . / shell_config . sh <nl> + <nl> + set - e <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query " DROP TABLE IF EXISTS test1 " ; <nl> + $ CLICKHOUSE_CLIENT - - query " DROP TABLE IF EXISTS test2 " ; <nl> + $ CLICKHOUSE_CLIENT - - query " CREATE TABLE test1 ( x UInt64 ) ENGINE = Memory " ; <nl> + <nl> + <nl> + function thread1 ( ) <nl> + { <nl> + while true ; do <nl> + seq 1 1000 | sed - r - e ' s / . + / RENAME TABLE test1 TO test2 ; RENAME TABLE test2 TO test1 ; / ' | $ CLICKHOUSE_CLIENT - n <nl> + done <nl> + } <nl> + <nl> + function thread2 ( ) <nl> + { <nl> + while true ; do <nl> + $ CLICKHOUSE_CLIENT - - query " SELECT * FROM merge ( currentDatabase ( ) , ' ^ test [ 12 ] $ ' ) " <nl> + done <nl> + } <nl> + <nl> + # https : / / stackoverflow . com / questions / 9954794 / execute - a - shell - function - with - timeout <nl> + export - f thread1 ; <nl> + export - f thread2 ; <nl> + <nl> + TIMEOUT = 10 <nl> + <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread2 2 > / dev / null & <nl> + <nl> + wait <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query " DROP TABLE IF EXISTS test1 " ; <nl> + $ CLICKHOUSE_CLIENT - - query " DROP TABLE IF EXISTS test2 " ; <nl> similarity index 100 % <nl> rename from dbms / tests / queries / 0_stateless / 00962_temporary_live_view_watch_live . reference <nl> rename to dbms / tests / queries / 0_stateless / 01002_alter_nullable_adaptive_granularity . reference <nl> new file mode 100755 <nl> index 00000000000 . . 85fc847f3f3 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01002_alter_nullable_adaptive_granularity . sh <nl> <nl> + # ! / usr / bin / env bash <nl> + <nl> + CURDIR = $ ( cd " $ ( dirname " $ { BASH_SOURCE [ 0 ] } " ) " & & pwd ) <nl> + . $ CURDIR / . . / shell_config . sh <nl> + <nl> + set - e <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query " DROP TABLE IF EXISTS test " ; <nl> + $ CLICKHOUSE_CLIENT - - query " CREATE TABLE test ( x UInt8 , s String MATERIALIZED toString ( rand64 ( ) ) ) ENGINE = MergeTree ORDER BY s " ; <nl> + <nl> + function thread1 ( ) <nl> + { <nl> + while true ; do <nl> + $ CLICKHOUSE_CLIENT - - query " INSERT INTO test SELECT rand ( ) FROM numbers ( 1000 ) " ; <nl> + done <nl> + } <nl> + <nl> + function thread2 ( ) <nl> + { <nl> + while true ; do <nl> + $ CLICKHOUSE_CLIENT - n - - query " ALTER TABLE test MODIFY COLUMN x Nullable ( UInt8 ) ; " ; <nl> + sleep 0 . 0 $ RANDOM <nl> + $ CLICKHOUSE_CLIENT - n - - query " ALTER TABLE test MODIFY COLUMN x UInt8 ; " ; <nl> + sleep 0 . 0 $ RANDOM <nl> + done <nl> + } <nl> + <nl> + function thread3 ( ) <nl> + { <nl> + while true ; do <nl> + $ CLICKHOUSE_CLIENT - n - - query " SELECT count ( ) FROM test FORMAT Null " ; <nl> + done <nl> + } <nl> + <nl> + function thread4 ( ) <nl> + { <nl> + while true ; do <nl> + $ CLICKHOUSE_CLIENT - n - - query " OPTIMIZE TABLE test FINAL " ; <nl> + sleep 0 . 1 $ RANDOM <nl> + done <nl> + } <nl> + <nl> + # https : / / stackoverflow . com / questions / 9954794 / execute - a - shell - function - with - timeout <nl> + export - f thread1 ; <nl> + export - f thread2 ; <nl> + export - f thread3 ; <nl> + export - f thread4 ; <nl> + <nl> + TIMEOUT = 10 <nl> + <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread2 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread3 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread4 2 > / dev / null & <nl> + <nl> + wait <nl> + <nl> + $ CLICKHOUSE_CLIENT - q " DROP TABLE test " <nl> similarity index 100 % <nl> rename from dbms / tests / queries / 0_stateless / 00963_temporary_live_view_watch_live_timeout . reference <nl> rename to dbms / tests / queries / 0_stateless / 01003_kill_query_race_condition . reference <nl> new file mode 100755 <nl> index 00000000000 . . d8a73ac24a4 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01003_kill_query_race_condition . sh <nl> <nl> + # ! / usr / bin / env bash <nl> + <nl> + CURDIR = $ ( cd " $ ( dirname " $ { BASH_SOURCE [ 0 ] } " ) " & & pwd ) <nl> + . $ CURDIR / . . / shell_config . sh <nl> + <nl> + set - e <nl> + <nl> + function thread1 ( ) <nl> + { <nl> + while true ; do <nl> + $ CLICKHOUSE_CLIENT - - query_id = hello - - query " SELECT count ( ) FROM numbers ( 1000000000 ) " - - format Null ; <nl> + done <nl> + } <nl> + <nl> + function thread2 ( ) <nl> + { <nl> + while true ; do <nl> + $ CLICKHOUSE_CLIENT - - query " KILL QUERY WHERE query_id = ' hello ' " - - format Null ; <nl> + sleep 0 . $ RANDOM <nl> + done <nl> + } <nl> + <nl> + function thread3 ( ) <nl> + { <nl> + while true ; do <nl> + $ CLICKHOUSE_CLIENT - - query " SHOW PROCESSLIST " - - format Null ; <nl> + $ CLICKHOUSE_CLIENT - - query " SELECT * FROM system . processes " - - format Null ; <nl> + done <nl> + } <nl> + <nl> + # https : / / stackoverflow . com / questions / 9954794 / execute - a - shell - function - with - timeout <nl> + export - f thread1 ; <nl> + export - f thread2 ; <nl> + export - f thread3 ; <nl> + <nl> + TIMEOUT = 10 <nl> + <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + <nl> + timeout $ TIMEOUT bash - c thread2 2 > / dev / null & <nl> + <nl> + timeout $ TIMEOUT bash - c thread3 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread3 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread3 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread3 2 > / dev / null & <nl> + <nl> + wait <nl> similarity index 100 % <nl> rename from dbms / tests / queries / 0_stateless / 00964_live_view_watch_events_heartbeat . reference <nl> rename to dbms / tests / queries / 0_stateless / 01004_rename_deadlock . reference <nl> new file mode 100755 <nl> index 00000000000 . . 5d5726bb001 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01004_rename_deadlock . sh <nl> <nl> + # ! / usr / bin / env bash <nl> + <nl> + CURDIR = $ ( cd " $ ( dirname " $ { BASH_SOURCE [ 0 ] } " ) " & & pwd ) <nl> + . $ CURDIR / . . / shell_config . sh <nl> + <nl> + set - e <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query " DROP TABLE IF EXISTS test1 " ; <nl> + $ CLICKHOUSE_CLIENT - - query " DROP TABLE IF EXISTS test2 " ; <nl> + $ CLICKHOUSE_CLIENT - - query " CREATE TABLE test1 ( x UInt8 ) ENGINE = MergeTree ORDER BY x " ; <nl> + $ CLICKHOUSE_CLIENT - - query " CREATE TABLE test2 ( x UInt8 ) ENGINE = MergeTree ORDER BY x " ; <nl> + <nl> + function thread1 ( ) <nl> + { <nl> + while true ; do <nl> + $ CLICKHOUSE_CLIENT - - query " RENAME TABLE test1 TO test_tmp , test2 TO test1 , test_tmp TO test2 " <nl> + done <nl> + } <nl> + <nl> + function thread2 ( ) <nl> + { <nl> + while true ; do <nl> + $ CLICKHOUSE_CLIENT - - query " SELECT * FROM test1 UNION ALL SELECT * FROM test2 " - - format Null <nl> + done <nl> + } <nl> + <nl> + function thread3 ( ) <nl> + { <nl> + while true ; do <nl> + $ CLICKHOUSE_CLIENT - - query " SELECT * FROM system . tables " - - format Null <nl> + done <nl> + } <nl> + <nl> + # https : / / stackoverflow . com / questions / 9954794 / execute - a - shell - function - with - timeout <nl> + export - f thread1 ; <nl> + export - f thread2 ; <nl> + export - f thread3 ; <nl> + <nl> + TIMEOUT = 10 <nl> + <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread2 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread3 2 > / dev / null & <nl> + <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread2 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread3 2 > / dev / null & <nl> + <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread2 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread3 2 > / dev / null & <nl> + <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread2 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread3 2 > / dev / null & <nl> + <nl> + wait <nl> + <nl> + $ CLICKHOUSE_CLIENT - q " DROP TABLE test1 " <nl> + $ CLICKHOUSE_CLIENT - q " DROP TABLE test2 " <nl> similarity index 100 % <nl> rename from dbms / tests / queries / 0_stateless / 00965_live_view_watch_heartbeat . reference <nl> rename to dbms / tests / queries / 0_stateless / 01005_rwr_shard_deadlock . reference <nl> new file mode 100755 <nl> index 00000000000 . . 1afd3acd324 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01005_rwr_shard_deadlock . sh <nl> <nl> + # ! / usr / bin / env bash <nl> + <nl> + CURDIR = $ ( cd " $ ( dirname " $ { BASH_SOURCE [ 0 ] } " ) " & & pwd ) <nl> + . $ CURDIR / . . / shell_config . sh <nl> + <nl> + set - e <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query " DROP TABLE IF EXISTS test1 " ; <nl> + $ CLICKHOUSE_CLIENT - - query " CREATE TABLE test1 ( x UInt8 ) ENGINE = MergeTree ORDER BY tuple ( ) " ; <nl> + <nl> + function thread1 ( ) <nl> + { <nl> + while true ; do <nl> + $ CLICKHOUSE_CLIENT - - query " ALTER TABLE test1 MODIFY COLUMN x Nullable ( UInt8 ) " <nl> + $ CLICKHOUSE_CLIENT - - query " ALTER TABLE test1 MODIFY COLUMN x UInt8 " <nl> + done <nl> + } <nl> + <nl> + function thread2 ( ) <nl> + { <nl> + while true ; do <nl> + $ CLICKHOUSE_CLIENT - - query " SELECT x FROM test1 WHERE x IN ( SELECT x FROM remote ( ' 127 . 0 . 0 . 2 ' , currentDatabase ( ) , test1 ) ) " - - format Null <nl> + done <nl> + } <nl> + <nl> + # https : / / stackoverflow . com / questions / 9954794 / execute - a - shell - function - with - timeout <nl> + export - f thread1 ; <nl> + export - f thread2 ; <nl> + <nl> + TIMEOUT = 10 <nl> + <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread2 2 > / dev / null & <nl> + <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread2 2 > / dev / null & <nl> + <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread2 2 > / dev / null & <nl> + <nl> + timeout $ TIMEOUT bash - c thread1 2 > / dev / null & <nl> + timeout $ TIMEOUT bash - c thread2 2 > / dev / null & <nl> + <nl> + wait <nl> + <nl> + $ CLICKHOUSE_CLIENT - q " DROP TABLE test1 " <nl> new file mode 100755 <nl> index 00000000000 . . 4a6ad411298 <nl> mmm / dev / null <nl> ppp b / dbms / tests / stress <nl> <nl> + # ! / usr / bin / env bash <nl> + <nl> + # https : / / stackoverflow . com / questions / 360201 / how - do - i - kill - background - processes - jobs - when - my - shell - script - exits <nl> + trap ' kill - 9 $ ( jobs - p ) ' EXIT <nl> + <nl> + function thread ( ) <nl> + { <nl> + while true ; do <nl> + . / clickhouse - test - - order random 2 > & 1 | awk ' / ^ \ w + : / { printf ( " \ 033 [ 0 ; % s % sm \ 033 [ 0m " , ( ' $ 1 ' % 2 ? " 4 " : " 10 " ) , ( int ( ' $ 1 ' / 2 ) % 8 ) ) } ' <nl> + done <nl> + } <nl> + <nl> + # https : / / stackoverflow . com / questions / 9954794 / execute - a - shell - function - with - timeout <nl> + export - f thread ; <nl> + <nl> + NUM_THREADS = $ { 1 : - " 16 " } <nl> + TIMEOUT = $ { 2 : - " 300 " } <nl> + <nl> + for i in $ ( seq 1 $ NUM_THREADS ) ; do <nl> + timeout $ TIMEOUT bash - c " thread $ i " 2 > / dev / null & <nl> + done <nl> + <nl> + wait <nl> mmm a / docker / packager / packager <nl> ppp b / docker / packager / packager <nl> def parse_env_variables ( build_type , compiler , sanitizer , package_type , cache , di <nl> result . append ( " ALIEN_PKGS = ' " + ' ' . join ( [ ' - - ' + pkg for pkg in alien_pkgs ] ) + " ' " ) <nl> <nl> if unbundled : <nl> - cmake_flags . append ( ' - DUNBUNDLED = 1 - DENABLE_MYSQL = 0 - DENABLE_POCO_ODBC = 0 - DENABLE_ODBC = 0 - DUSE_CAPNP = 0 ' ) <nl> + cmake_flags . append ( ' - DUNBUNDLED = 1 - DENABLE_MYSQL = 0 - DENABLE_POCO_ODBC = 0 - DENABLE_ODBC = 0 ' ) <nl> <nl> if split_binary : <nl> cmake_flags . append ( ' - DUSE_STATIC_LIBRARIES = 0 - DSPLIT_SHARED_LIBRARIES = 1 - DCLICKHOUSE_SPLIT_BINARY = 1 ' ) <nl>
Merge master
ClickHouse/ClickHouse
e7ee9b7a67403d0df630edcf5a5d74ac63158a2e
2019-09-02T08:46:00Z
new file mode 100644 <nl> index 000000000000 . . 29fa3650734d <nl> mmm / dev / null <nl> ppp b / validation - test / compiler_crashers / 28597 - first - char - of - sub - string - may - not - be - a - digit . swift <nl> <nl> + / / This source file is part of the Swift . org open source project <nl> + / / Copyright ( c ) 2014 - 2016 Apple Inc . and the Swift project authors <nl> + / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + / / <nl> + / / See https : / / swift . org / LICENSE . txt for license information <nl> + / / See https : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + <nl> + / / RUN : not - - crash % target - swift - frontend % s - emit - ir <nl> + / / REQUIRES : asserts <nl> + / / non - fuzz <nl> + let β1 = 0 <nl>
Merge pull request from practicalswift / swiftc - 28597 - first - char - of - sub - string - may - not - be - a - digit
apple/swift
146a537b0b3407ba6ac49933612706d319079381
2017-01-04T12:45:18Z
mmm a / include / swift / AST / DiagnosticsSIL . def <nl> ppp b / include / swift / AST / DiagnosticsSIL . def <nl> ERROR ( constexpr_imported_func_not_onone , none , " imported constant evaluable " <nl> <nl> ERROR ( non_physical_addressof , none , <nl> " addressof only works with purely physical lvalues ; " <nl> - " use ` withUnsafePointer ` or ` withUnsafeBytes ` unless you ' re implementing " <nl> - " ` withUnsafePointer ` or ` withUnsafeBytes ` " , ( ) ) <nl> + " use ' withUnsafePointer ' or ' withUnsafeBytes ' unless you ' re implementing " <nl> + " ' withUnsafePointer ' or ' withUnsafeBytes ' " , ( ) ) <nl> ERROR ( non_borrowed_indirect_addressof , none , <nl> " addressof only works with borrowable in - memory rvalues ; " <nl> - " use ` withUnsafePointer ` or ` withUnsafeBytes ` unless you ' re implementing " <nl> - " ` withUnsafePointer ` or ` withUnsafeBytes ` " , ( ) ) <nl> + " use ' withUnsafePointer ' or ' withUnsafeBytes ' unless you ' re implementing " <nl> + " ' withUnsafePointer ' or ' withUnsafeBytes ' " , ( ) ) <nl> <nl> REMARK ( opt_remark_passed , none , " % 0 " , ( StringRef ) ) <nl> REMARK ( opt_remark_missed , none , " % 0 " , ( StringRef ) ) <nl> mmm a / include / swift / AST / DiagnosticsSema . def <nl> ppp b / include / swift / AST / DiagnosticsSema . def <nl> ERROR ( property_wrapper_type_not_usable_from_inline , none , <nl> " must be ' @ usableFromInline ' or public " , <nl> ( bool , bool ) ) <nl> WARNING ( property_wrapper_wrapperValue , none , <nl> - " property wrapper ' s ` wrapperValue ` property should be renamed to " <nl> + " property wrapper ' s ' wrapperValue ' property should be renamed to " <nl> " ' projectedValue ' ; use of ' wrapperValue ' is deprecated " , ( ) ) <nl> WARNING ( property_wrapper_init_initialValue , none , <nl> - " property wrapper ' s ` init ( initialValue : ) ` should be renamed " <nl> + " property wrapper ' s ' init ( initialValue : ) ' should be renamed " <nl> " to ' init ( wrappedValue : ) ' ; use of ' init ( initialValue : ) ' is deprecated " , <nl> ( ) ) <nl> ERROR ( property_wrapper_projection_value_missing , none , <nl> mmm a / test / decl / var / property_wrapper_aliases . swift <nl> ppp b / test / decl / var / property_wrapper_aliases . swift <nl> struct Wrapper < T > { <nl> struct OldWrapper < T > { <nl> var wrappedValue : T <nl> <nl> - var wrapperValue : Wrapper < T > { / / expected - warning { { property wrapper ' s ` wrapperValue ` property should be renamed to ' projectedValue ' ; use of ' wrapperValue ' is deprecated } } { { 7 - 19 = projectedValue } } <nl> + var wrapperValue : Wrapper < T > { / / expected - warning { { property wrapper ' s ' wrapperValue ' property should be renamed to ' projectedValue ' ; use of ' wrapperValue ' is deprecated } } { { 7 - 19 = projectedValue } } <nl> return Wrapper ( wrappedValue : wrappedValue ) <nl> } <nl> } <nl> struct OldWrapper < T > { <nl> struct OldWrapperWithInit < T > { <nl> var wrappedValue : T <nl> <nl> - init ( initialValue : T ) { / / expected - warning { { property wrapper ' s ` init ( initialValue : ) ` should be renamed to ' init ( wrappedValue : ) ' ; use of ' init ( initialValue : ) ' is deprecated } } { { 8 - 8 = wrappedValue } } <nl> + init ( initialValue : T ) { / / expected - warning { { property wrapper ' s ' init ( initialValue : ) ' should be renamed to ' init ( wrappedValue : ) ' ; use of ' init ( initialValue : ) ' is deprecated } } { { 8 - 8 = wrappedValue } } <nl> self . wrappedValue = initialValue <nl> } <nl> } <nl> struct OldWrapperWithInit < T > { <nl> struct OldWrapperWithInit2 < T > { <nl> var wrappedValue : T <nl> <nl> - init ( initialValue value : T ) { / / expected - warning { { property wrapper ' s ` init ( initialValue : ) ` should be renamed to ' init ( wrappedValue : ) ' ; use of ' init ( initialValue : ) ' is deprecated } } { { 8 - 20 = wrappedValue } } <nl> + init ( initialValue value : T ) { / / expected - warning { { property wrapper ' s ' init ( initialValue : ) ' should be renamed to ' init ( wrappedValue : ) ' ; use of ' init ( initialValue : ) ' is deprecated } } { { 8 - 20 = wrappedValue } } <nl> self . wrappedValue = value <nl> } <nl> } <nl>
Merge pull request from mdiep / remove - backticks - in - diagnostics
apple/swift
d8bc35bc0a1892016b3961c93bca7a7edfbe488c
2020-02-18T03:32:13Z
mmm a / tensorflow / tools / pip_package / setup . py <nl> ppp b / tensorflow / tools / pip_package / setup . py <nl> <nl> if ' tf_nightly ' in project_name : <nl> for i , pkg in enumerate ( REQUIRED_PACKAGES ) : <nl> if ' tensorboard ' in pkg : <nl> - REQUIRED_PACKAGES [ i ] = ' tb - nightly > = 1 . 11 . 0a0 , < 1 . 12 . 0a0 ' <nl> + REQUIRED_PACKAGES [ i ] = ' tb - nightly > = 1 . 12 . 0a0 , < 1 . 13 . 0a0 ' <nl> break <nl> <nl> # weakref . finalize and enum were introduced in Python 3 . 4 <nl>
Update tb - nightly dep to > = 1 . 12 . 0a0 , < 1 . 13 . 0a0
tensorflow/tensorflow
e62cd643839d264659285a273bcf34df1057136e
2018-09-25T23:09:29Z
mmm a / editor / plugins / script_editor_plugin . cpp <nl> ppp b / editor / plugins / script_editor_plugin . cpp <nl> void ScriptEditor : : _save_history ( ) { <nl> <nl> void ScriptEditor : : _go_to_tab ( int p_idx ) { <nl> <nl> + ScriptEditorBase * current = _get_current_editor ( ) ; <nl> + if ( current ) { <nl> + if ( current - > is_unsaved ( ) ) { <nl> + <nl> + current - > apply_code ( ) ; <nl> + } <nl> + } <nl> + <nl> Control * c = Object : : cast_to < Control > ( tab_container - > get_child ( p_idx ) ) ; <nl> if ( ! c ) <nl> return ; <nl>
FIX Script editor doesn ´ t send unsaved script
godotengine/godot
d33b4f4de824182464a7600d5645c250dfd84133
2019-05-05T04:56:19Z
mmm a / system / include / libc / grp . h <nl> ppp b / system / include / libc / grp . h <nl> void setgrfile ( const char * ) ; <nl> char * group_from_gid ( gid_t , int ) ; <nl> int setgroupent ( int ) ; <nl> # endif / * ! __CYGWIN__ * / <nl> - int initgroups ( const char * , gid_t ) ; <nl> # endif / * ! _XOPEN_SOURCE * / <nl> # endif / * ! _POSIX_SOURCE * / <nl> + int initgroups ( const char * , gid_t ) ; <nl> # endif / * ! __INSIDE_CYGWIN__ * / <nl> <nl> # ifdef __cplusplus <nl>
Move initgroups outside of ! _XOPEN_SOURCE .
emscripten-core/emscripten
15fb8c884e14f237050612d1e63ce259b23a98f6
2013-06-25T03:09:54Z
mmm a / README . md <nl> ppp b / README . md <nl> npm install electron - - save - dev - - save - exact <nl> <nl> The ` - - save - exact ` flag is recommended as Electron does not follow semantic <nl> versioning . For info on how to manage Electron versions in your apps , see <nl> - [ Electron versioning ] ( https : / / electronjs . org / docs / tutorial / electron - versioning ) . <nl> + [ Electron versioning ] ( https : / / electronjs . org / docs / tutorial / versioning ) . <nl> <nl> For more installation options and troubleshooting tips , see <nl> [ installation ] ( https : / / electronjs . org / docs / tutorial / installation ) . <nl> mmm a / docs / README . md <nl> ppp b / docs / README . md <nl> an issue : <nl> * [ Glossary of Terms ] ( glossary . md ) <nl> * [ Supported Platforms ] ( tutorial / supported - platforms . md ) <nl> * [ Security ] ( tutorial / security . md ) <nl> - * [ Versioning ] ( versioning . md ) <nl> + * [ Versioning ] ( tutorial / versioning . md ) <nl> * [ Application Distribution ] ( tutorial / application - distribution . md ) <nl> * [ Mac App Store Submission Guide ] ( tutorial / mac - app - store - submission - guide . md ) <nl> * [ Windows Store Guide ] ( tutorial / windows - store - guide . md ) <nl> mmm a / docs / development / releasing . md <nl> ppp b / docs / development / releasing . md <nl> This document describes the process for releasing a new version of Electron . <nl> Run ` npm run prepare - release - - - - notesOnly ` to view auto generated release <nl> notes . The notes generated should help you determine if this is a major , minor , <nl> patch , or beta version change . Read the <nl> - [ Version Change Rules ] ( . . / versioning . md # semver ) for more information . <nl> + [ Version Change Rules ] ( . . / tutorial / versioning . md # semver ) for more information . <nl> <nl> # # Run the prepare - release script <nl> The prepare release script will do the following : <nl> release to npm . <nl> <nl> [ the releases page ] : https : / / github . com / electron / electron / releases <nl> [ this bump commit ] : https : / / github . com / electron / electron / commit / 78ec1b8f89b3886b856377a1756a51617bc33f5a <nl> - [ versioning ] : / docs / versioning . md <nl> + [ versioning ] : / docs / tutorial / versioning . md <nl> <nl> # # Fix missing binaries of a release manually <nl> <nl> mmm a / docs / tutorial / about . md <nl> ppp b / docs / tutorial / about . md <nl> In Electron , Node . js and Chromium share a single V8 instance — usually the versi <nl> <nl> # # # Versioning <nl> <nl> - Due to the hard dependency on Node . js and Chromium , Electron is in a tricky versioning position and [ does not follow ` semver ` ] ( http : / / semver . org ) . You should therefore always reference a specific version of Electron . [ Read more about Electron ' s versioning ] ( https : / / electronjs . org / docs / tutorial / electron - versioning ) or see the [ versions currently in use ] ( https : / / electronjs . org / # electron - versions ) . <nl> + Due to the hard dependency on Node . js and Chromium , Electron is in a tricky versioning position and [ does not follow ` semver ` ] ( http : / / semver . org ) . You should therefore always reference a specific version of Electron . [ Read more about Electron ' s versioning ] ( https : / / electronjs . org / docs / tutorial / versioning ) or see the [ versions currently in use ] ( https : / / electronjs . org / # electron - versions ) . <nl> <nl> # # # LTS <nl> <nl> new file mode 100644 <nl> index 000000000000 . . 417105f360a2 <nl> mmm / dev / null <nl> ppp b / docs / tutorial / electron - versioning . md <nl> <nl> + Moved to [ Versioning ] ( versioning . md ) <nl> + <nl> + < ! - - <nl> + <nl> + This file will eventually be removed <nl> + <nl> + - - > <nl> \ No newline at end of file <nl> similarity index 92 % <nl> rename from docs / versioning . md <nl> rename to docs / tutorial / versioning . md <nl> mmm a / docs / versioning . md <nl> ppp b / docs / tutorial / versioning . md <nl> Electron versions * < 2 . 0 * have not conformed to the [ semver ] ( http : / / semver . org ) <nl> <nl> Here is an example of the 1 . x strategy : <nl> <nl> - ! [ ] ( https : / / cdn . rawgit . com / electron / electron / version2 / docs / images / versioning - sketch - 0 . png ) <nl> + ! [ ] ( . . / images / versioning - sketch - 0 . png ) <nl> <nl> An app developed with ` 1 . 8 . 1 ` cannot take the ` 1 . 8 . 3 ` bug fix without either absorbing the ` 1 . 8 . 2 ` feature , or by backporting the fix and maintaining a new release line . <nl> <nl> Note that most chromium updates will be considered breaking . Fixes that can be b <nl> <nl> Stabilization branches are branches that run parallel to master , taking in only cherry - picked commits that are related to security or stability . These branches are never merged back to master . <nl> <nl> - ! [ ] ( https : / / cdn . rawgit . com / electron / electron / version2 / docs / images / versioning - sketch - 1 . png ) <nl> + ! [ ] ( . . / images / versioning - sketch - 1 . png ) <nl> <nl> Stabilization branches are always either * * major * * or * * minor * * version lines , and named against the following template ` $ MAJOR - $ MINOR - x ` e . g . ` 2 - 0 - x ` . <nl> <nl> We allow for multiple stabilization branches to exist simultaneously , and intend to support at least two in parallel at all times , backporting security fixes as necessary . <nl> - ! [ ] ( https : / / cdn . rawgit . com / electron / electron / version2 / docs / images / versioning - sketch - 2 . png ) <nl> + ! [ ] ( . . / images / versioning - sketch - 2 . png ) <nl> <nl> Older lines will not be supported by GitHub , but other groups can take ownership and backport stability and security fixes on their own . We discourage this , but recognize that it makes life easier for many app developers . <nl> <nl> For each major and minor bump , you should expect too see something like the foll <nl> An example lifecycle in pictures : <nl> <nl> * A new release branch is created that includes the latest set of features . It is published as ` 2 . 0 . 0 - beta . 1 ` . <nl> - ! [ ] ( https : / / cdn . rawgit . com / electron / electron / version2 / docs / images / versioning - sketch - 3 . png ) <nl> + ! [ ] ( . . / images / versioning - sketch - 3 . png ) <nl> * A bug fix comes into master that can be pack - ported to the release branch . The patch is applied , and a new beta is published as ` 2 . 0 . 0 - beta . 2 ` . <nl> - ! [ ] ( https : / / cdn . rawgit . com / electron / electron / version2 / docs / images / versioning - sketch - 4 . png ) <nl> + ! [ ] ( . . / images / versioning - sketch - 4 . png ) <nl> * The beta is considered _generally stable_ and it is published again as a non - beta under ` 2 . 0 . 0 ` . <nl> - ! [ ] ( https : / / cdn . rawgit . com / electron / electron / version2 / docs / images / versioning - sketch - 5 . png ) <nl> + ! [ ] ( . . / images / versioning - sketch - 5 . png ) <nl> * Later , a zero - day exploit is revealed and a fix is applied to master . We pack - port the fix to the ` 2 - 0 - x ` line and release ` 2 . 0 . 1 ` . <nl> - ! [ ] ( https : / / cdn . rawgit . com / electron / electron / version2 / docs / images / versioning - sketch - 6 . png ) <nl> + ! [ ] ( . . / images / versioning - sketch - 6 . png ) <nl> <nl> A few examples of how various semver ranges will pick up new releases : <nl> <nl> - ! [ ] ( https : / / cdn . rawgit . com / electron / electron / version2 / docs / images / versioning - sketch - 7 . png ) <nl> + ! [ ] ( . . / images / versioning - sketch - 7 . png ) <nl> <nl> # Missing Features : Alphas , and Nightly <nl> Our strategy has a few tradeoffs , which for now we feel are appropriate . Most importantly that new features in master may take a while before reaching a stable release line . If you want to try a new feature immediately , you will have to build Electron yourself . <nl>
docs : changes based on review
electron/electron
1bce00545def684f2c7a8a6179c0c263978043cb
2017-12-05T19:43:59Z
mmm a / editor / scene_tree_dock . cpp <nl> ppp b / editor / scene_tree_dock . cpp <nl> void SceneTreeDock : : _tool_selected ( int p_tool , bool p_confirm_override ) { <nl> _delete_confirm ( ) ; <nl> <nl> } else { <nl> - if ( remove_list . size ( ) > = 2 ) { <nl> - delete_dialog - > set_text ( vformat ( TTR ( " Delete % d nodes ? " ) , remove_list . size ( ) ) ) ; <nl> - } else if ( remove_list . size ( ) = = 1 & & remove_list [ 0 ] = = editor_data - > get_edited_scene_root ( ) ) { <nl> - delete_dialog - > set_text ( vformat ( TTR ( " Delete the root node \ " % s \ " ? " ) , remove_list [ 0 ] - > get_name ( ) ) ) ; <nl> - } else if ( remove_list . size ( ) = = 1 & & remove_list [ 0 ] - > get_filename ( ) = = " " & & remove_list [ 0 ] - > get_child_count ( ) > = 1 ) { <nl> - / / Display this message only for non - instanced scenes <nl> - delete_dialog - > set_text ( vformat ( TTR ( " Delete node \ " % s \ " and its children ? " ) , remove_list [ 0 ] - > get_name ( ) ) ) ; <nl> + String msg ; <nl> + if ( remove_list . size ( ) > 1 ) { <nl> + bool any_children = false ; <nl> + for ( int i = 0 ; ! any_children & & i < remove_list . size ( ) ; i + + ) { <nl> + any_children = remove_list [ i ] - > get_child_count ( ) > 0 ; <nl> + } <nl> + <nl> + msg = vformat ( any_children ? TTR ( " Delete % d nodes and any children ? " ) : TTR ( " Delete % d nodes ? " ) , remove_list . size ( ) ) ; <nl> } else { <nl> - delete_dialog - > set_text ( vformat ( TTR ( " Delete node \ " % s \ " ? " ) , remove_list [ 0 ] - > get_name ( ) ) ) ; <nl> + Node * node = remove_list [ 0 ] ; <nl> + if ( node = = editor_data - > get_edited_scene_root ( ) ) { <nl> + msg = vformat ( TTR ( " Delete the root node \ " % s \ " ? " ) , node - > get_name ( ) ) ; <nl> + } else if ( node - > get_filename ( ) = = " " & & node - > get_child_count ( ) > 0 ) { <nl> + / / Display this message only for non - instanced scenes <nl> + msg = vformat ( TTR ( " Delete node \ " % s \ " and its children ? " ) , node - > get_name ( ) ) ; <nl> + } else { <nl> + msg = vformat ( TTR ( " Delete node \ " % s \ " ? " ) , node - > get_name ( ) ) ; <nl> + } <nl> } <nl> <nl> + delete_dialog - > set_text ( msg ) ; <nl> + <nl> / / Resize the dialog to its minimum size . <nl> / / This prevents the dialog from being too wide after displaying <nl> / / a deletion confirmation for a node with a long name . <nl>
Deleting multiple nodes displays correct message .
godotengine/godot
8b046ed4772bcbc8037398b76ead3f7876a338a4
2020-07-01T10:59:55Z
mmm a / include / mlir / IR / OpDefinition . h <nl> ppp b / include / mlir / IR / OpDefinition . h <nl> namespace OpTrait { <nl> template < typename ConcreteType > class OneResult ; <nl> } <nl> <nl> - / / / This type trait produces true if the specified type is in the specified <nl> - / / / type list . <nl> - template < typename same , typename first , typename . . . more > <nl> - struct typelist_contains { <nl> - static const bool value = std : : is_same < same , first > : : value | | <nl> - typelist_contains < same , more . . . > : : value ; <nl> - } ; <nl> - template < typename same , typename first > <nl> - struct typelist_contains < same , first > : std : : is_same < same , first > { } ; <nl> - <nl> / / / This class represents success / failure for operation parsing . It is <nl> / / / essentially a simple wrapper class around LogicalResult that allows for <nl> / / / explicit conversion to bool . This allows for the parser to chain together <nl> template < unsigned RegionIdx > class NthRegionIsIsolatedFromAbove { <nl> template < typename ConcreteType , template < typename T > class . . . Traits > <nl> class Op : public OpState , <nl> public Traits < ConcreteType > . . . , <nl> - public FoldingHook < <nl> - ConcreteType , <nl> - typelist_contains < OpTrait : : OneResult < ConcreteType > , OpState , <nl> - Traits < ConcreteType > . . . > : : value > { <nl> + public FoldingHook < ConcreteType , <nl> + llvm : : is_one_of < OpTrait : : OneResult < ConcreteType > , <nl> + Traits < ConcreteType > . . . > : : value > { <nl> public : <nl> / / / Return if this operation contains the provided trait . <nl> template < template < typename T > class Trait > <nl> static constexpr bool hasTrait ( ) { <nl> - return typelist_contains < Trait < ConcreteType > , OpState , <nl> - Traits < ConcreteType > . . . > : : value ; <nl> + return llvm : : is_one_of < Trait < ConcreteType > , Traits < ConcreteType > . . . > : : value ; <nl> } <nl> <nl> / / / Return the operation that this refers to . <nl>
NFC : Replace typelist_contains with llvm : : is_one_of . This should also fix weird build failures on MSVC related to typelist_contains for missing template arguments .
tensorflow/tensorflow
7eb5140a0c14e376cb3449b0aab673828835122a
2019-06-09T23:22:49Z
mmm a / tensorflow / core / platform / file_system . cc <nl> ppp b / tensorflow / core / platform / file_system . cc <nl> void ForEach ( int first , int last , const std : : function < void ( int ) > & f ) { <nl> FileSystem : : ~ FileSystem ( ) { } <nl> <nl> string FileSystem : : TranslateName ( const string & name ) const { <nl> + / / If the name is empty , CleanPath returns " . " which is incorrect and <nl> + / / we should return the empty path instead . <nl> + if ( name . empty ( ) ) return name ; <nl> return io : : CleanPath ( name ) ; <nl> } <nl> <nl> mmm a / tensorflow / python / lib / io / file_io_test . py <nl> ppp b / tensorflow / python / lib / io / file_io_test . py <nl> def setUp ( self ) : <nl> def tearDown ( self ) : <nl> file_io . delete_recursively ( self . _base_dir ) <nl> <nl> + def testEmptyFilename ( self ) : <nl> + f = file_io . FileIO ( " " , mode = " r " ) <nl> + with self . assertRaises ( errors . NotFoundError ) : <nl> + _ = f . read ( ) <nl> + <nl> def testFileDoesntExist ( self ) : <nl> file_path = os . path . join ( self . _base_dir , " temp_file " ) <nl> self . assertFalse ( file_io . file_exists ( file_path ) ) <nl>
Handling empty path names by fixing how TranslateName ( ) works . Currently that calls out to io : : CleanPath which converts " " to " . " which isn ' t correct .
tensorflow/tensorflow
b650bf5a32432770a4c2b580464b0b736993a210
2017-09-22T20:37:07Z
mmm a / Makefile <nl> ppp b / Makefile <nl> CNTKLIBRARY_COMMON_SRC = \ <nl> $ ( SOURCEDIR ) / CNTKv2LibraryDll / Serialization . cpp \ <nl> $ ( SOURCEDIR ) / CNTKv2LibraryDll / DistributedCommunicator . cpp \ <nl> $ ( SOURCEDIR ) / CNTKv2LibraryDll / DistributedLearnerBase . cpp \ <nl> + $ ( SOURCEDIR ) / CNTKv2LibraryDll / TrainingSession . cpp \ <nl> $ ( SOURCEDIR ) / CNTKv2LibraryDll / DataParallelDistributedLearner . cpp \ <nl> $ ( SOURCEDIR ) / CNTKv2LibraryDll / proto / CNTK . pb . cc \ <nl> <nl> mmm a / Source / CNTKv2LibraryDll / API / CNTKLibrary . h <nl> ppp b / Source / CNTKv2LibraryDll / API / CNTKLibrary . h <nl> namespace CNTK <nl> typedef TrainingParameterPerSampleSchedule < double > MomentumPerSampleSchedule ; <nl> typedef TrainingParameterPerMinibatchSchedule < double > MomentumPerMinibatchSchedule ; <nl> <nl> + typedef TrainingParameterSchedule < size_t > MinibatchSizeSchedule ; <nl> + <nl> / / / <nl> / / / This class allows to specify momentum as time constant in place of momentum per sample in <nl> / / / all of Learners factory methods . The specified values are then automatically converted into <nl> namespace CNTK <nl> m_learner - > ResetSmoothedGradients ( ) ; <nl> } <nl> <nl> + / / <nl> + / / Returns the total number of samples needed for warmup . <nl> + / / After reaching this number of samples the learner switches to the distributed mode . <nl> + / / Warm up is useful for <nl> + / / <nl> + virtual size_t ParallelizationAfter ( ) <nl> + { <nl> + return 0 ; <nl> + } <nl> + <nl> / / <nl> / / Method to update the parameters associated with this learner . By returning false , this method indicates that <nl> / / learning has stopped for all of the parameters associated with this learner <nl> namespace CNTK <nl> bool resetSGDMomentumAfterAggregation = true , <nl> double blockLearningRate = 1 . 0 ) ; <nl> <nl> + / / / <nl> + / / / Describes an input stream : its name , element type , storage , etc . <nl> + / / / <nl> + struct StreamInformation <nl> + { <nl> + std : : wstring m_name ; / / Unique name of the stream <nl> + size_t m_id ; / / Unique identifier of the stream <nl> + StorageFormat m_storageFormat ; / / Storage format of the stream <nl> + DataType m_elementType ; / / Element type of the stream <nl> + NDShape m_sampleLayout ; / / Layout of the sample for the stream <nl> + } ; <nl> + <nl> + inline bool operator = = ( const StreamInformation & left , const StreamInformation & right ) <nl> + { <nl> + return ( ( left . m_id = = right . m_id ) & & <nl> + ( left . m_name = = right . m_name ) & & <nl> + ( left . m_storageFormat = = right . m_storageFormat ) & & <nl> + ( left . m_elementType = = right . m_elementType ) & & <nl> + ( left . m_sampleLayout = = right . m_sampleLayout ) ) ; <nl> + } <nl> + <nl> / / / <nl> / / / Trainer is the top - level abstraction responsible for the orchestration of the training of a model <nl> / / / using the specified learners and training data either explicitly supplied as Value objects or from <nl> / / / a MinibatchSource object . <nl> / / / <nl> - class Trainer <nl> + class Trainer : public std : : enable_shared_from_this < Trainer > <nl> { <nl> public : <nl> - / / / <nl> - / / / Construct a Trainer to train the specified ' model ' with the specified ' trainingLoss ' Variable as the training criterion <nl> - / / / and using the specified set of ' parameterLearners ' for updating the model ' s parameters using computed gradients . <nl> - / / / <nl> - CNTK_API Trainer ( const FunctionPtr & model , const FunctionPtr & lossFunction , const std : : vector < LearnerPtr > & parameterLearners ) ; <nl> - <nl> - / / / <nl> - / / / Construct a Trainer to train the specified ' model ' with the specified ' trainingLoss ' as the training criterion , <nl> - / / / the specified ' evaluationFunction ' as the criterion for evaluating the trained model ' s quality , and using the specified set <nl> - / / / of ' parameterLearners ' for updating the model ' s parameters using computed gradients . <nl> - / / / <nl> - / / TODO : Add overload for multiple evaluation criterion <nl> - CNTK_API Trainer ( const FunctionPtr & model , const FunctionPtr & lossFunction , const FunctionPtr & evaluationFunction , const std : : vector < LearnerPtr > & parameterLearners ) ; <nl> - <nl> / / / <nl> / / / Optimize model parameters using the specified ' arguments ' minibatch of training samples . <nl> / / / Returns false if all parameter learners indicate end of learning ( through their Update method ' s return value ) . <nl> namespace CNTK <nl> CNTK_API size_t TotalNumberOfSamplesSeen ( ) const ; <nl> <nl> private : <nl> + template < typename T1 , typename . . . CtorArgTypes > <nl> + friend std : : shared_ptr < T1 > MakeSharedObject ( CtorArgTypes & & . . . ctorArgs ) ; <nl> + <nl> + Trainer ( const FunctionPtr & model , const FunctionPtr & lossFunction , const std : : vector < LearnerPtr > & parameterLearners ) ; <nl> + Trainer ( const FunctionPtr & model , const FunctionPtr & lossFunction , const FunctionPtr & evaluationFunction , const std : : vector < LearnerPtr > & parameterLearners ) ; <nl> + <nl> void ExecuteForwardBackward ( <nl> const std : : unordered_map < Variable , ValuePtr > & arguments , <nl> std : : unordered_map < Variable , ValuePtr > & outputsToFetch , <nl> namespace CNTK <nl> } ; <nl> <nl> / / / <nl> - / / / Describes an input stream : its name , element type , storage , etc . <nl> + / / / Construct a Trainer to train the specified ' model ' with the specified ' trainingLoss ' Variable as the training criterion <nl> + / / / and using the specified set of ' parameterLearners ' for updating the model ' s parameters using computed gradients . <nl> / / / <nl> - struct StreamInformation <nl> - { <nl> - std : : wstring m_name ; / / Unique name of the stream <nl> - size_t m_id ; / / Unique identifier of the stream <nl> - StorageFormat m_storageFormat ; / / Storage format of the stream <nl> - DataType m_elementType ; / / Element type of the stream <nl> - NDShape m_sampleLayout ; / / Layout of the sample for the stream <nl> - } ; <nl> + CNTK_API TrainerPtr CreateTrainer ( const FunctionPtr & model , const FunctionPtr & lossFunction , const std : : vector < LearnerPtr > & parameterLearners ) ; <nl> <nl> - inline bool operator = = ( const StreamInformation & left , const StreamInformation & right ) <nl> - { <nl> - return ( ( left . m_id = = right . m_id ) & & <nl> - ( left . m_name = = right . m_name ) & & <nl> - ( left . m_storageFormat = = right . m_storageFormat ) & & <nl> - ( left . m_elementType = = right . m_elementType ) & & <nl> - ( left . m_sampleLayout = = right . m_sampleLayout ) ) ; <nl> - } <nl> + / / / <nl> + / / / Construct a Trainer to train the specified ' model ' with the specified ' trainingLoss ' as the training criterion , <nl> + / / / the specified ' evaluationFunction ' as the criterion for evaluating the trained model ' s quality , and using the specified set <nl> + / / / of ' parameterLearners ' for updating the model ' s parameters using computed gradients . <nl> + / / / <nl> + CNTK_API TrainerPtr CreateTrainer ( const FunctionPtr & model , const FunctionPtr & lossFunction , const FunctionPtr & evaluationFunction , const std : : vector < LearnerPtr > & parameterLearners ) ; <nl> } <nl> <nl> namespace std { <nl> namespace CNTK <nl> / / / In case the size is specified in terms of both # sequences and # samples , the smaller of the 2 is taken . <nl> / / / An empty map is returned when the MinibatchSource has no more data to return . <nl> / / / <nl> - virtual const std : : unordered_map < StreamInformation , MinibatchData > & GetNextMinibatch ( size_t minibatchSizeInSamples , <nl> + CNTK_API const std : : unordered_map < StreamInformation , MinibatchData > & GetNextMinibatch ( <nl> size_t minibatchSizeInSequences , <nl> - const DeviceDescriptor & device = DeviceDescriptor : : UseDefaultDevice ( ) ) = 0 ; <nl> + size_t minibatchSizeInSamples , <nl> + const DeviceDescriptor & device = DeviceDescriptor : : UseDefaultDevice ( ) ) ; <nl> <nl> / / / <nl> - / / / Returns whether the MinibatchSource is running in distributed manner <nl> + / / / Same as above but allows to specify partition of data in a distributed environment . <nl> + / / / Depending on the number of workers the data is splitted in different partitions , <nl> + / / / and depending on the worker rank , only a particular partition is read . <nl> / / / <nl> - virtual bool IsDistributed ( ) const = 0 ; <nl> + CNTK_API virtual const std : : unordered_map < StreamInformation , MinibatchData > & GetNextMinibatch ( <nl> + size_t minibatchSizeInSequences , <nl> + size_t minibatchSizeInSamples , <nl> + size_t numberOfWorkers , <nl> + size_t workerRank , <nl> + const DeviceDescriptor & device = DeviceDescriptor : : UseDefaultDevice ( ) ) = 0 ; <nl> <nl> / / / <nl> / / / Destruct this MinibatchSource . <nl> namespace CNTK <nl> / / / <nl> / / / Instantiate the CNTK built - in test format minibatch source <nl> / / / <nl> - inline MinibatchSourcePtr TextFormatMinibatchSource ( const std : : wstring & dataFilePath , const std : : vector < StreamConfiguration > & streamConfigs , size_t epochSize = MinibatchSource : : InfinitelyRepeat , bool randomize = true , size_t distributedAfterSampleCount = MinibatchSource : : InfiniteSamples ) <nl> + inline MinibatchSourcePtr TextFormatMinibatchSource ( const std : : wstring & dataFilePath , const std : : vector < StreamConfiguration > & streamConfigs , size_t epochSize = MinibatchSource : : InfinitelyRepeat , bool randomize = true ) <nl> { <nl> : : CNTK : : Dictionary minibatchSourceConfiguration ; <nl> minibatchSourceConfiguration [ L " epochSize " ] = epochSize ; <nl> namespace CNTK <nl> <nl> deserializerConfiguration [ L " input " ] = inputStreamsConfig ; <nl> minibatchSourceConfiguration [ L " deserializers " ] = std : : vector < : : CNTK : : DictionaryValue > ( { deserializerConfiguration } ) ; <nl> - <nl> - / / TODO : change all these dictionary names to string constants <nl> - minibatchSourceConfiguration [ L " distributedAfterSampleCount " ] = distributedAfterSampleCount ; <nl> - <nl> return CreateCompositeMinibatchSource ( minibatchSourceConfiguration ) ; <nl> } <nl> <nl> namespace CNTK <nl> / / / Distributed communicator that allows quantized aggregations . <nl> / / / <nl> CNTK_API QuantizedDistributedCommunicatorPtr QuantizedMPICommunicator ( bool zeroThresholdFor1Bit , bool useQuantizationForSelfStripe , size_t numQuantizationBits ) ; <nl> + <nl> + / / / <nl> + / / / Base abstract class that represents a training session . <nl> + / / / Dervied classes can redefine different aspects of training , overriding base virtual methods ( GetMinibatchSize , OnMinibatchStart , etc . ) <nl> + / / / <nl> + class TrainingSession <nl> + { <nl> + public : <nl> + CNTK_API TrainingSession ( const MinibatchSourcePtr & trainingSource , <nl> + const TrainerPtr & trainer , <nl> + const std : : unordered_map < Variable , StreamInformation > & modelInputToMinibatchSourceStream , <nl> + size_t checkpointFrequencyInSamples , <nl> + const std : : wstring & checkPointFileName ) ; <nl> + <nl> + / / / <nl> + / / / Runs the session . <nl> + / / / <nl> + CNTK_API virtual void Run ( const DeviceDescriptor & computeDevice ) ; <nl> + <nl> + / / / <nl> + / / / Restores a session from a checkpoint . <nl> + / / / <nl> + CNTK_API virtual void RestoreFromCheckpoint ( const std : : wstring & checkpointFileName ) ; <nl> + <nl> + virtual ~ TrainingSession ( ) { } <nl> + <nl> + protected : <nl> + / / <nl> + / / Called each time before a new minibatch is requested from the minibatch source <nl> + / / during training ( from Run method ) . <nl> + / / <nl> + virtual size_t GetMinibatchSize ( ) = 0 ; <nl> + <nl> + / / <nl> + / / Optionally overridable callback that is invoked before each minibatch . <nl> + / / <nl> + virtual void OnMinibatchStart ( ) { } ; <nl> + <nl> + / / <nl> + / / Accessors . <nl> + / / <nl> + TrainerPtr Trainer ( ) const { return m_trainer ; } <nl> + <nl> + MinibatchSourcePtr TrainingMinibatchSource ( ) const { return m_trainingSource ; } <nl> + <nl> + private : <nl> + / / Disallow copy and move construction and assignment <nl> + TrainingSession ( const TrainingSession & ) = delete ; TrainingSession & operator = ( const TrainingSession & ) = delete ; TrainingSession & operator = ( TrainingSession & & ) = delete ; TrainingSession ( TrainingSession & & ) = delete ; <nl> + <nl> + void SaveCheckpoint ( ) ; <nl> + <nl> + static const std : : wstring s_checkpointIndex ; <nl> + static const std : : wstring s_trainingMinibatchSource ; <nl> + <nl> + const size_t m_checkpointFrequencyinSamples ; <nl> + const std : : wstring m_checkPointFileName ; <nl> + size_t m_currentCheckpointIndex ; <nl> + <nl> + MinibatchSourcePtr m_trainingSource ; <nl> + TrainerPtr m_trainer ; <nl> + std : : unordered_map < Variable , StreamInformation > m_modelInputToMinibatchSourceStream ; <nl> + size_t m_parallelAfterSamples ; <nl> + size_t m_workerRank ; <nl> + size_t m_numberOfWorkers ; <nl> + } ; <nl> + <nl> + CNTK_API TrainingSessionPtr CreateBasicTrainingSession ( const MinibatchSourcePtr & trainingSource , <nl> + const TrainerPtr & trainer , <nl> + const std : : unordered_map < Variable , StreamInformation > & modelInputToMinibatchSourceStream , <nl> + const MinibatchSizeSchedule & minibatchSizeSchedule , <nl> + size_t checkpointFrequencyinSamples , <nl> + const std : : wstring & checkPointFileName ) ; <nl> } <nl> <nl> <nl> mmm a / Source / CNTKv2LibraryDll / API / CNTKLibraryInternals . h <nl> ppp b / Source / CNTKv2LibraryDll / API / CNTKLibraryInternals . h <nl> namespace CNTK <nl> struct VariableFields ; <nl> typedef std : : shared_ptr < VariableFields > VariableFieldsPtr ; <nl> <nl> + class TrainingSession ; <nl> + typedef std : : shared_ptr < TrainingSession > TrainingSessionPtr ; <nl> + <nl> + class Trainer ; <nl> + typedef std : : shared_ptr < Trainer > TrainerPtr ; <nl> + <nl> namespace Internal <nl> { <nl> CNTK_API FunctionPtr IsWithin ( const Variable & operand , int offset , const std : : wstring & name = L " " ) ; <nl> mmm a / Source / CNTKv2LibraryDll / CNTKv2LibraryDll . vcxproj <nl> ppp b / Source / CNTKv2LibraryDll / CNTKv2LibraryDll . vcxproj <nl> <nl> < ClInclude Include = " PrimitiveFunction . h " / > <nl> < ClInclude Include = " PrimitiveOpType . h " / > <nl> < ClInclude Include = " Serialization . h " / > <nl> + < ClInclude Include = " TrainingSession . h " / > <nl> < ClInclude Include = " Utils . h " / > <nl> < ClInclude Include = " stdafx . h " / > <nl> < ClInclude Include = " targetver . h " / > <nl> <nl> < PrecompiledHeader > Create < / PrecompiledHeader > <nl> < / ClCompile > <nl> < ClCompile Include = " Trainer . cpp " / > <nl> + < ClCompile Include = " TrainingSession . cpp " / > <nl> < ClCompile Include = " Utils . cpp " / > <nl> < ClCompile Include = " Value . cpp " / > <nl> < ClCompile Include = " Variable . cpp " / > <nl> mmm a / Source / CNTKv2LibraryDll / CNTKv2LibraryDll . vcxproj . filters <nl> ppp b / Source / CNTKv2LibraryDll / CNTKv2LibraryDll . vcxproj . filters <nl> <nl> < ClCompile Include = " PrimitiveFunction . cpp " / > <nl> < ClCompile Include = " DistributedLearnerBase . cpp " / > <nl> < ClCompile Include = " DataParallelDistributedLearner . cpp " / > <nl> + < ClCompile Include = " TrainingSession . cpp " / > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> < ClInclude Include = " stdafx . h " / > <nl> <nl> < ClInclude Include = " DataParallelDistributedLearner . h " / > <nl> < ClInclude Include = " BlockFunction . h " / > <nl> < ClInclude Include = " Variable . h " / > <nl> + < ClInclude Include = " TrainingSession . h " / > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> < Filter Include = " API " > <nl> <nl> < Filter > proto < / Filter > <nl> < / None > <nl> < / ItemGroup > <nl> - < / Project > <nl> \ No newline at end of file <nl> + < / Project > <nl> mmm a / Source / CNTKv2LibraryDll / DistributedLearnerBase . h <nl> ppp b / Source / CNTKv2LibraryDll / DistributedLearnerBase . h <nl> namespace CNTK <nl> <nl> void RestoreFromCheckpoint ( const Dictionary & checkpoint ) override ; <nl> <nl> + size_t ParallelizationAfter ( ) override <nl> + { <nl> + return m_distributeAfterSamples ; <nl> + } <nl> + <nl> protected : <nl> DistributedLearnerBase ( DistributedCommunicatorPtr communicator , LearnerPtr learner , size_t distributeAfterSamples ) ; <nl> <nl> mmm a / Source / CNTKv2LibraryDll / MinibatchSource . cpp <nl> ppp b / Source / CNTKv2LibraryDll / MinibatchSource . cpp <nl> namespace CNTK <nl> return GetNextMinibatch ( 0 , minibatchSizeInSamples , device ) ; <nl> } <nl> <nl> + const std : : unordered_map < StreamInformation , MinibatchData > & MinibatchSource : : GetNextMinibatch ( size_t minibatchSizeInSequences , size_t minibatchSizeInSamples , const DeviceDescriptor & device / * = DeviceDescriptor : : UseDefaultDevice ( ) * / ) <nl> + { <nl> + return GetNextMinibatch ( minibatchSizeInSequences , minibatchSizeInSamples , 1 , 0 , device ) ; <nl> + } <nl> + <nl> const StreamInformation & MinibatchSource : : StreamInfo ( const std : : wstring & streamName ) <nl> { <nl> std : : unordered_set < const StreamInformation * > matchingStreamInfos ; <nl> namespace CNTK <nl> } <nl> <nl> / * static * / const std : : wstring CompositeMinibatchSource : : PositionAttributeName = L " minibatchSourcePosition " ; <nl> - / * static * / const std : : wstring CompositeMinibatchSource : : DistributedAfterSampleCountAttributeName = L " minibatchDistributedAfterSampleCount " ; <nl> <nl> CompositeMinibatchSource : : CompositeMinibatchSource ( const Dictionary & configuration ) <nl> : m_epochEndReached ( false ) , <nl> namespace CNTK <nl> m_randomizedWindow ( MinibatchSource : : DefaultRandomizationWindow ) , <nl> m_truncationLength ( 0 ) , <nl> m_numWorkers ( 1 ) , <nl> - m_workerRank ( 0 ) , <nl> - m_distributed ( false ) , <nl> - m_distributedAfterSampleCount ( MinibatchSource : : InfiniteSamples ) <nl> + m_workerRank ( 0 ) <nl> { <nl> / / The CNTK reader implementation requires for each deserializer both the module and deserializer type be specified <nl> / / This is redundant and the V2 API users will just specify type from which the module is automatically inferred <nl> namespace CNTK <nl> m_truncationLength = augmentedConfiguration [ truncationLengthConfigurationKey ] . Value < size_t > ( ) ; <nl> } <nl> <nl> - / / TODO : change all the dictionary names to string constants <nl> - const wchar_t * distributedAfterSampleCountConfigurationKey = L " distributedAfterSampleCount " ; <nl> - if ( augmentedConfiguration . Contains ( distributedAfterSampleCountConfigurationKey ) ) <nl> - m_distributedAfterSampleCount = augmentedConfiguration [ distributedAfterSampleCountConfigurationKey ] . Value < size_t > ( ) ; <nl> - <nl> typedef Reader * ( * CreateCompositeDataReaderProc ) ( const ConfigParameters * parameters ) ; <nl> CreateCompositeDataReaderProc createReaderProc = ( CreateCompositeDataReaderProc ) Plugin ( ) . Load ( L " CompositeDataReader " , " CreateCompositeDataReader " ) ; <nl> std : : shared_ptr < Microsoft : : MSR : : CNTK : : Reader > compositeDataReader ( createReaderProc ( & config ) ) ; <nl> namespace CNTK <nl> / * virtual * / const std : : unordered_map < StreamInformation , MinibatchData > & <nl> CompositeMinibatchSource : : GetNextMinibatch ( size_t minibatchSizeInSequences , <nl> size_t minibatchSizeInSamples , <nl> + size_t numberOfWorkers , <nl> + size_t workerRank , <nl> const DeviceDescriptor & device / * = DeviceDescriptor : : UseDefaultDevice ( ) * / ) / * override * / <nl> { <nl> m_minibatchData . clear ( ) ; <nl> namespace CNTK <nl> if ( minibatchSizeInSamples = = 0 ) <nl> InvalidArgument ( " GetNextMinibatch : Requested minibatch sizes must be > 0 " ) ; <nl> <nl> - / / For the first number of m_distributedAfterSampleCount samples , minibatch source won ' t run distributed . <nl> - bool wasDistributed = m_distributed ; <nl> - if ( ! m_distributed & & IsDistributed ( ) ) <nl> - { <nl> - m_distributed = true ; <nl> - <nl> - if ( m_numWorkers = = 1 ) <nl> - { <nl> - MPIWrapperPtr mpi = MPIWrapper : : GetInstance ( ) ; <nl> - if ( mpi = = nullptr ) <nl> - { <nl> - / / create mpi instance if intended to be distributed <nl> - mpi = MPIWrapper : : GetInstance ( true ) ; <nl> - } <nl> - m_numWorkers = mpi - > NumNodesInUse ( ) ; <nl> - m_workerRank = mpi - > CurrentNodeRank ( ) ; <nl> - } <nl> - } <nl> - <nl> if ( m_prevMinibatchSize = = 0 ) <nl> { <nl> EpochConfiguration epochConfig ; <nl> - epochConfig . m_numberOfWorkers = m_distributed ? m_numWorkers : 1 ; <nl> - epochConfig . m_workerRank = m_distributed ? m_workerRank : 0 ; <nl> + epochConfig . m_numberOfWorkers = numberOfWorkers ; <nl> + epochConfig . m_workerRank = workerRank ; <nl> epochConfig . m_minibatchSizeInSamples = minibatchSizeInSamples ; <nl> epochConfig . m_truncationSize = m_truncationLength ; <nl> <nl> namespace CNTK <nl> <nl> m_shim - > StartEpoch ( epochConfig , inputs ) ; <nl> m_prevMinibatchSize = minibatchSizeInSamples ; <nl> - wasDistributed = m_distributed ; <nl> + m_workerRank = workerRank ; <nl> + m_numWorkers = numberOfWorkers ; <nl> } <nl> <nl> - if ( minibatchSizeInSamples ! = m_prevMinibatchSize | | wasDistributed ! = m_distributed ) <nl> + if ( minibatchSizeInSamples ! = m_prevMinibatchSize | | m_workerRank ! = workerRank | | m_numWorkers ! = numberOfWorkers ) <nl> { <nl> std : : map < std : : wstring , int > inputDescriptions ; <nl> for ( const auto & s : m_streamInfos ) <nl> inputDescriptions [ s . m_name ] = AsCNTKImplDeviceId ( device ) ; <nl> <nl> ReaderConfiguration newConfig ; <nl> - newConfig . m_numberOfWorkers = m_distributed ? m_numWorkers : 1 ; <nl> - newConfig . m_workerRank = m_distributed ? m_workerRank : 0 ; <nl> + newConfig . m_numberOfWorkers = numberOfWorkers ; <nl> + newConfig . m_workerRank = workerRank ; <nl> newConfig . m_minibatchSizeInSamples = minibatchSizeInSamples ; <nl> newConfig . m_truncationSize = m_truncationLength ; <nl> <nl> m_shim - > SetConfiguration ( newConfig , inputDescriptions ) ; <nl> <nl> m_prevMinibatchSize = minibatchSizeInSamples ; <nl> + m_workerRank = workerRank ; <nl> + m_numWorkers = numberOfWorkers ; <nl> } <nl> <nl> auto hasData = m_shim - > GetMinibatch ( m_matrices ) ; <nl> namespace CNTK <nl> { <nl> Dictionary checkpointState ; <nl> checkpointState [ PositionAttributeName ] = m_shim - > GetCurrentSamplePosition ( ) ; <nl> - checkpointState [ DistributedAfterSampleCountAttributeName ] = m_distributedAfterSampleCount ; <nl> return checkpointState ; <nl> } <nl> <nl> namespace CNTK <nl> { <nl> auto checkpointedMinibatchSourcePosition = checkpoint [ PositionAttributeName ] . Value < size_t > ( ) ; <nl> m_shim - > SetCurrentSamplePosition ( checkpointedMinibatchSourcePosition ) ; <nl> - m_distributedAfterSampleCount = checkpoint [ DistributedAfterSampleCountAttributeName ] . Value < size_t > ( ) ; <nl> } <nl> } <nl> mmm a / Source / CNTKv2LibraryDll / MinibatchSource . h <nl> ppp b / Source / CNTKv2LibraryDll / MinibatchSource . h <nl> namespace CNTK <nl> <nl> virtual const std : : unordered_set < StreamInformation > & StreamInfos ( ) override { return m_streamInfos ; } <nl> <nl> - virtual const std : : unordered_map < StreamInformation , MinibatchData > & GetNextMinibatch ( size_t minibatchSizeInSamples , <nl> - size_t minibatchSizeInSequences , <nl> - const DeviceDescriptor & device = DeviceDescriptor : : UseDefaultDevice ( ) ) override ; <nl> + const std : : unordered_map < StreamInformation , MinibatchData > & GetNextMinibatch ( <nl> + size_t minibatchSizeInSamples , <nl> + size_t minibatchSizeInSequences , <nl> + size_t numberOfWorkers , <nl> + size_t workerRank , <nl> + const DeviceDescriptor & device = DeviceDescriptor : : UseDefaultDevice ( ) ) override ; <nl> <nl> virtual Dictionary GetCheckpointState ( ) const override ; <nl> virtual void RestoreFromCheckpoint ( const Dictionary & checkpoint ) override ; <nl> <nl> - virtual bool IsDistributed ( ) const override <nl> - { <nl> - return m_shim - > GetCurrentSamplePosition ( ) > = m_distributedAfterSampleCount ; <nl> - } <nl> - <nl> private : <nl> static Microsoft : : MSR : : CNTK : : InputStreamDescription GetInputStreamDescription ( const StreamInformation & s , const DeviceDescriptor & device ) <nl> { <nl> namespace CNTK <nl> return Microsoft : : MSR : : CNTK : : InputStreamDescription ( s . m_name , CNTKdeviceId , CNTKMatrixType , CNTKMatrixFormat ) ; <nl> } <nl> <nl> - private : <nl> + private : <nl> std : : unordered_set < StreamInformation > m_streamInfos ; <nl> bool m_epochEndReached ; <nl> - bool m_distributed ; <nl> size_t m_numWorkers ; <nl> size_t m_workerRank ; <nl> - size_t m_distributedAfterSampleCount ; <nl> size_t m_prevMinibatchSize ; <nl> size_t m_epochSize ; <nl> size_t m_randomizedWindow ; <nl> mmm a / Source / CNTKv2LibraryDll / Trainer . cpp <nl> ppp b / Source / CNTKv2LibraryDll / Trainer . cpp <nl> namespace CNTK <nl> { <nl> return m_parameterLearners - > ParameterLearners ( ) . front ( ) - > TotalNumberOfSamplesSeen ( ) ; <nl> } <nl> + <nl> + TrainerPtr CreateTrainer ( const FunctionPtr & model , const FunctionPtr & lossFunction , const std : : vector < LearnerPtr > & parameterLearners ) <nl> + { <nl> + return MakeSharedObject < Trainer > ( model , lossFunction , parameterLearners ) ; <nl> + } <nl> + <nl> + TrainerPtr CreateTrainer ( const FunctionPtr & model , const FunctionPtr & lossFunction , const FunctionPtr & evaluationFunction , const std : : vector < LearnerPtr > & parameterLearners ) <nl> + { <nl> + return MakeSharedObject < Trainer > ( model , lossFunction , evaluationFunction , parameterLearners ) ; <nl> + } <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . b73291bb7db <nl> mmm / dev / null <nl> ppp b / Source / CNTKv2LibraryDll / TrainingSession . cpp <nl> <nl> + / / <nl> + / / Copyright ( c ) Microsoft . All rights reserved . <nl> + / / Licensed under the MIT license . See LICENSE . md file in the project root for full license information . <nl> + / / <nl> + <nl> + # include " stdafx . h " <nl> + # include " CNTKLibrary . h " <nl> + # include " TrainingSession . h " <nl> + # include " fileutil . h " <nl> + <nl> + namespace CNTK <nl> + { <nl> + const std : : wstring TrainingSession : : s_checkpointIndex = L " CheckpointIndex " ; <nl> + const std : : wstring TrainingSession : : s_trainingMinibatchSource = L " TrainingMinibatchSource " ; <nl> + <nl> + TrainingSessionPtr CreateBasicTrainingSession ( const MinibatchSourcePtr & trainingSource , <nl> + const TrainerPtr & trainer , <nl> + const std : : unordered_map < Variable , StreamInformation > & modelInputToMinibatchSourceStream , <nl> + const MinibatchSizeSchedule & minibatchSizeSchedule , <nl> + size_t checkpointFrequencyinSamples , <nl> + const std : : wstring & checkPointFileName ) <nl> + { <nl> + return MakeSharedObject < BasicTrainingSession > ( trainingSource , <nl> + trainer , <nl> + modelInputToMinibatchSourceStream , <nl> + minibatchSizeSchedule , <nl> + checkpointFrequencyinSamples , <nl> + checkPointFileName ) ; <nl> + } <nl> + <nl> + TrainingSession : : TrainingSession ( <nl> + const MinibatchSourcePtr & trainingSource , <nl> + const TrainerPtr & trainer , <nl> + const std : : unordered_map < Variable , StreamInformation > & modelInputToMinibatchSourceStream , <nl> + size_t checkpointFrequencyInSamples , <nl> + const std : : wstring & checkPointFileName ) : <nl> + m_trainingSource ( trainingSource ) , <nl> + m_trainer ( trainer ) , <nl> + m_modelInputToMinibatchSourceStream ( modelInputToMinibatchSourceStream ) , <nl> + m_checkpointFrequencyinSamples ( checkpointFrequencyInSamples ) , <nl> + m_checkPointFileName ( checkPointFileName ) , <nl> + m_currentCheckpointIndex ( 0 ) , <nl> + m_parallelAfterSamples ( 0 ) , <nl> + m_workerRank ( 0 ) , <nl> + m_numberOfWorkers ( 1 ) <nl> + { <nl> + if ( ! trainingSource ) <nl> + InvalidArgument ( " Minibatch source is not allowed to be null . " ) ; <nl> + if ( ! trainer ) <nl> + InvalidArgument ( " Trainer is not allowed to be null . " ) ; <nl> + if ( modelInputToMinibatchSourceStream . empty ( ) ) <nl> + InvalidArgument ( " Input mapping is not allowed to be empty . " ) ; <nl> + if ( m_checkPointFileName . empty ( ) ) <nl> + InvalidArgument ( " Checkpoint file name is not allowed to be empty . " ) ; <nl> + <nl> + / / Let ' s calculate the warm up period the distributed learners may need . <nl> + / / We will take the maximum warm up period required . <nl> + auto learners = trainer - > ParameterLearners ( ) ; <nl> + m_parallelAfterSamples = 0 ; <nl> + for ( const auto & l : learners ) <nl> + { <nl> + auto distributed = std : : dynamic_pointer_cast < DistributedLearner > ( l ) ; <nl> + if ( distributed ) <nl> + { <nl> + m_parallelAfterSamples = std : : max ( m_parallelAfterSamples , distributed - > ParallelizationAfter ( ) ) ; <nl> + m_workerRank = distributed - > GetCommunicator ( ) - > CurrentWorker ( ) . m_globalRank ; <nl> + m_numberOfWorkers = distributed - > GetCommunicator ( ) - > Workers ( ) . size ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void TrainingSession : : Run ( const DeviceDescriptor & computeDevice ) <nl> + { <nl> + std : : unordered_map < Variable , ValuePtr > minibatch ; <nl> + bool shouldTrain = true ; <nl> + size_t numberOfWorkers = 1 ; <nl> + size_t workerRank = 0 ; <nl> + while ( shouldTrain ) <nl> + { <nl> + size_t mbSize = GetMinibatchSize ( ) ; <nl> + <nl> + if ( m_parallelAfterSamples > = m_trainer - > TotalNumberOfSamplesSeen ( ) ) <nl> + { <nl> + numberOfWorkers = m_numberOfWorkers ; <nl> + workerRank = m_workerRank ; <nl> + } <nl> + <nl> + auto minibatchData = m_trainingSource - > GetNextMinibatch ( 0 / * numberOfSequences * / , mbSize , numberOfWorkers , workerRank , computeDevice ) ; <nl> + <nl> + minibatch . clear ( ) ; <nl> + if ( ! minibatchData . empty ( ) ) <nl> + { <nl> + for ( auto v : m_modelInputToMinibatchSourceStream ) <nl> + minibatch . insert ( { v . first , minibatchData [ v . second ] . m_data } ) ; <nl> + } <nl> + <nl> + OnMinibatchStart ( ) ; <nl> + shouldTrain = m_trainer - > TrainMinibatch ( minibatch , computeDevice ) ; <nl> + <nl> + / / Check whether to create a checkpoint <nl> + size_t checkpointIndex = m_trainer - > TotalNumberOfSamplesSeen ( ) / m_checkpointFrequencyinSamples ; <nl> + if ( checkpointIndex > m_currentCheckpointIndex ) <nl> + { <nl> + m_currentCheckpointIndex = checkpointIndex ; <nl> + SaveCheckpoint ( ) ; <nl> + } <nl> + } <nl> + <nl> + SaveCheckpoint ( ) ; <nl> + } <nl> + <nl> + void TrainingSession : : RestoreFromCheckpoint ( const std : : wstring & checkpointFileName ) <nl> + { <nl> + Dictionary externalState = m_trainer - > RestoreFromCheckpoint ( checkpointFileName ) ; <nl> + m_currentCheckpointIndex = externalState [ s_checkpointIndex ] . Value < size_t > ( ) ; <nl> + m_trainingSource - > RestoreFromCheckpoint ( externalState [ s_trainingMinibatchSource ] . Value < Dictionary > ( ) ) ; <nl> + } <nl> + <nl> + void TrainingSession : : SaveCheckpoint ( ) <nl> + { <nl> + Dictionary externalState ; <nl> + externalState [ s_checkpointIndex ] = m_currentCheckpointIndex ; <nl> + externalState [ s_trainingMinibatchSource ] = m_trainingSource - > GetCheckpointState ( ) ; <nl> + <nl> + std : : wstring tempFileName = m_checkPointFileName + L " . tmp " ; <nl> + m_trainer - > SaveCheckpoint ( tempFileName , externalState ) ; <nl> + <nl> + / / Perform the actual renaming only on the main worker . <nl> + if ( m_workerRank = = 0 ) <nl> + { <nl> + _wunlink ( m_checkPointFileName . c_str ( ) ) ; <nl> + renameOrDie ( tempFileName , m_checkPointFileName ) ; <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . f4888634f6a <nl> mmm / dev / null <nl> ppp b / Source / CNTKv2LibraryDll / TrainingSession . h <nl> <nl> + / / <nl> + / / Copyright ( c ) Microsoft . All rights reserved . <nl> + / / Licensed under the MIT license . See LICENSE . md file in the project root for full license information . <nl> + / / <nl> + <nl> + # pragma once <nl> + <nl> + # include " CNTKLibrary . h " <nl> + <nl> + namespace CNTK <nl> + { <nl> + class BasicTrainingSession : public TrainingSession <nl> + { <nl> + public : <nl> + CNTK_API BasicTrainingSession ( <nl> + MinibatchSourcePtr trainingSource , <nl> + TrainerPtr trainer , <nl> + const std : : unordered_map < Variable , StreamInformation > & modelInputToMinibatchSourceStream , <nl> + const MinibatchSizeSchedule & minibatchSizeSchedule , <nl> + size_t checkpointFrequencyInSamples , <nl> + const std : : wstring & checkPointFileName ) <nl> + : TrainingSession ( trainingSource , trainer , modelInputToMinibatchSourceStream , checkpointFrequencyInSamples , checkPointFileName ) , <nl> + m_minibatchSizeSchedule ( minibatchSizeSchedule ) <nl> + { <nl> + if ( m_minibatchSizeSchedule . Unit ( ) = = MinibatchSizeSchedule : : UnitType : : Minibatch ) <nl> + LogicError ( " Currently CNTK only supports minibatch size schedule based on samples . " ) ; <nl> + } <nl> + <nl> + protected : <nl> + CNTK_API size_t GetMinibatchSize ( ) override <nl> + { <nl> + return m_minibatchSizeSchedule [ Trainer ( ) - > TotalNumberOfSamplesSeen ( ) ] ; <nl> + } <nl> + <nl> + private : <nl> + const MinibatchSizeSchedule m_minibatchSizeSchedule ; <nl> + } ; <nl> + } <nl> \ No newline at end of file <nl> mmm a / Source / CNTKv2LibraryDll / Utils . cpp <nl> ppp b / Source / CNTKv2LibraryDll / Utils . cpp <nl> namespace CNTK <nl> template void DictionaryValue : : FreePtrAsType < NDArrayView > ( ) ; <nl> <nl> template class TrainingParameterSchedule < double > ; <nl> + template class TrainingParameterSchedule < size_t > ; <nl> <nl> Learners : : Learners ( const std : : vector < LearnerPtr > & learners ) : <nl> m_learners ( learners ) , <nl> mmm a / Source / Readers / CNTKBinaryReader / BinaryChunkDeserializer . cpp <nl> ppp b / Source / Readers / CNTKBinaryReader / BinaryChunkDeserializer . cpp <nl> void BinaryChunkDeserializer : : Initialize ( const std : : map < std : : wstring , std : : wstri <nl> else if ( desType = = DeserializerType : : SparseBinaryDataDeserializer ) <nl> m_deserializers [ c ] = make_shared < SparseBinaryDataDeserializer > ( m_file ) ; <nl> else <nl> - RuntimeError ( " Unknown deserializer type % d requested . " , desType ) ; <nl> + RuntimeError ( " Unknown deserializer type % d requested . " , ( int ) desType ) ; <nl> <nl> streamDescription - > m_id = c ; <nl> streamDescription - > m_elementType = m_deserializers [ c ] - > GetElementType ( ) ; <nl> mmm a / Source / Readers / ReaderLib / NoRandomizer . cpp <nl> ppp b / Source / Readers / ReaderLib / NoRandomizer . cpp <nl> void NoRandomizer : : SetCurrentSamplePosition ( size_t samplePosition ) <nl> void NoRandomizer : : SetConfiguration ( const ReaderConfiguration & config ) <nl> { <nl> * ( ( ReaderConfiguration * ) & m_config ) = config ; <nl> - <nl> - / / TODO : should be removed . <nl> - / / Currently no restriction on the epoch size at all when SetConfiguration is used . <nl> - m_config . m_totalEpochSizeInSamples = std : : numeric_limits < size_t > ( ) . max ( ) / 2 ; / / Make sure we do not exceed size_t <nl> - m_config . m_epochIndex = 0 ; <nl> } <nl> <nl> } } } <nl> mmm a / Tests / UnitTests / V2LibraryDistributionTests / FrameModeTests . cpp <nl> ppp b / Tests / UnitTests / V2LibraryDistributionTests / FrameModeTests . cpp <nl> namespace <nl> auto minibatchSource = TextFormatMinibatchSource ( g_inputFile , <nl> { { g_featureStreamName , classifier . inputDim } , { g_labelsStreamName , classifier . ouputDim } } , <nl> totalNumberOfSamples , <nl> - true , <nl> - name = = L " blockmomentum " ? MinibatchSource : : InfiniteSamples : 0 ) ; <nl> + true ) ; <nl> <nl> auto featureStreamInfo = minibatchSource - > StreamInfo ( g_featureStreamName ) ; <nl> auto labelStreamInfo = minibatchSource - > StreamInfo ( g_labelsStreamName ) ; <nl> <nl> double learningRatePerSample = 0 . 02 ; <nl> <nl> - Trainer trainer ( classifier . output , classifier . trainingLoss , classifier . prediction , { factory ( { SGDLearner ( classifier . output - > Parameters ( ) , LearningRatePerSampleSchedule ( learningRatePerSample ) ) } ) } ) ; <nl> - size_t outputFrequencyInMinibatches = 20 ; <nl> - size_t currentCheckpointIndex = 0 ; <nl> + auto trainer = CreateTrainer ( classifier . output , classifier . trainingLoss , classifier . prediction , { factory ( { SGDLearner ( classifier . output - > Parameters ( ) , LearningRatePerSampleSchedule ( learningRatePerSample ) ) } ) } ) ; <nl> size_t checkpointFrequency = 7000 ; <nl> - size_t index = 0 ; <nl> - bool updated = true ; <nl> - while ( updated ) <nl> - { <nl> - auto minibatchData = minibatchSource - > GetNextMinibatch ( minibatchSize , device ) ; <nl> - if ( minibatchData . empty ( ) ) <nl> - updated = trainer . TrainMinibatch ( { } , device ) ; <nl> - else <nl> - updated = trainer . TrainMinibatch ( { { classifier . features , minibatchData [ featureStreamInfo ] . m_data } , { classifier . labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - <nl> - size_t checkpointIndex = trainer . TotalNumberOfSamplesSeen ( ) / checkpointFrequency ; <nl> - if ( checkpointIndex > currentCheckpointIndex ) <nl> - { <nl> - trainer . SaveCheckpoint ( L " test " ) ; <nl> - currentCheckpointIndex = checkpointIndex ; <nl> - } <nl> <nl> - PrintTrainingProgress ( trainer , index + + , outputFrequencyInMinibatches ) ; <nl> - } <nl> + TrainingSessionPtr session = CreateBasicTrainingSession ( <nl> + minibatchSource , <nl> + trainer , <nl> + { { classifier . features , featureStreamInfo } , { classifier . labels , labelStreamInfo } } , <nl> + MinibatchSizeSchedule ( minibatchSize , MinibatchSizeSchedule : : UnitType : : Sample ) , <nl> + checkpointFrequency , <nl> + L " test " ) ; <nl> + <nl> + session - > Run ( device ) ; <nl> } <nl> <nl> FeedForwardClassifier BuildFeedForwardClassifer ( const DeviceDescriptor & device ) <nl> namespace <nl> <nl> void TestFrameMode ( ) <nl> { <nl> - std : : this_thread : : sleep_for ( std : : chrono : : seconds ( 16 ) ) ; <nl> / / Create a set of trainers . <nl> std : : map < std : : wstring , std : : function < DistributedLearnerPtr ( LearnerPtr ) > > learners ; <nl> learners [ L " simple " ] = [ ] ( LearnerPtr l ) { return CreateDataParallelDistributedLearner ( MPICommunicator ( ) , l , 0 ) ; } ; <nl> mmm a / Tests / UnitTests / V2LibraryDistributionTests / Main . cpp <nl> ppp b / Tests / UnitTests / V2LibraryDistributionTests / Main . cpp <nl> int main ( int argc , char * argv [ ] ) <nl> } <nl> # endif <nl> <nl> - std : : this_thread : : sleep_for ( std : : chrono : : seconds ( 15 ) ) ; <nl> - <nl> if ( argc ! = 2 ) <nl> { <nl> fprintf ( stderr , " Expecting a log file parameter . \ n " ) ; <nl> mmm a / Tests / UnitTests / V2LibraryTests / CifarResNet . cpp <nl> ppp b / Tests / UnitTests / V2LibraryTests / CifarResNet . cpp <nl> void TrainResNetCifarClassifer ( const DeviceDescriptor & device , bool testSaveAndR <nl> } <nl> <nl> LearningRatePerSampleSchedule learningRatePerSample = 0 . 0078125 ; <nl> - Trainer trainer ( classifierOutput , trainingLoss , prediction , { SGDLearner ( classifierOutput - > Parameters ( ) , learningRatePerSample ) } ) ; <nl> + auto trainer = CreateTrainer ( classifierOutput , trainingLoss , prediction , { SGDLearner ( classifierOutput - > Parameters ( ) , learningRatePerSample ) } ) ; <nl> <nl> const size_t minibatchSize = 32 ; <nl> size_t numMinibatchesToTrain = 2000 ; <nl> void TrainResNetCifarClassifer ( const DeviceDescriptor & device , bool testSaveAndR <nl> for ( size_t i = 0 ; i < numMinibatchesToTrain ; + + i ) <nl> { <nl> auto minibatchData = minibatchSource - > GetNextMinibatch ( minibatchSize , device ) ; <nl> - trainer . TrainMinibatch ( { { imageInput , minibatchData [ imageStreamInfo ] . m_data } , { labelsVar , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer - > TrainMinibatch ( { { imageInput , minibatchData [ imageStreamInfo ] . m_data } , { labelsVar , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> PrintTrainingProgress ( trainer , i , outputFrequencyInMinibatches ) ; <nl> } <nl> } <nl> mmm a / Tests / UnitTests / V2LibraryTests / Common . h <nl> ppp b / Tests / UnitTests / V2LibraryTests / Common . h <nl> inline void OpenStream ( std : : fstream & stream , const std : : wstring & filename , bool <nl> stream . exceptions ( std : : ios_base : : badbit ) ; <nl> } <nl> <nl> - inline void PrintTrainingProgress ( const CNTK : : Trainer & trainer , size_t minibatchIdx , size_t outputFrequencyInMinibatches ) <nl> + inline void PrintTrainingProgress ( const CNTK : : TrainerPtr trainer , size_t minibatchIdx , size_t outputFrequencyInMinibatches ) <nl> { <nl> - if ( ( minibatchIdx % outputFrequencyInMinibatches ) = = 0 & & trainer . PreviousMinibatchSampleCount ( ) ! = 0 ) <nl> + if ( ( minibatchIdx % outputFrequencyInMinibatches ) = = 0 & & trainer - > PreviousMinibatchSampleCount ( ) ! = 0 ) <nl> { <nl> - double trainLossValue = trainer . PreviousMinibatchLossAverage ( ) ; <nl> - double evaluationValue = trainer . PreviousMinibatchEvaluationAverage ( ) ; <nl> + double trainLossValue = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> + double evaluationValue = trainer - > PreviousMinibatchEvaluationAverage ( ) ; <nl> printf ( " Minibatch % d : CrossEntropy loss = % . 8g , Evaluation criterion = % . 8g \ n " , ( int ) minibatchIdx , trainLossValue , evaluationValue ) ; <nl> } <nl> } <nl> mmm a / Tests / UnitTests / V2LibraryTests / MinibatchSourceTest . cpp <nl> ppp b / Tests / UnitTests / V2LibraryTests / MinibatchSourceTest . cpp <nl> class MockCommunicator : public DistributedCommunicator <nl> } <nl> } ; <nl> <nl> - MinibatchSourcePtr TextFormatMinibatchSource ( const std : : wstring & dataFilePath , const std : : vector < StreamConfiguration > & streamConfigs , size_t epochSize , bool randomize , size_t distributedAfterSampleCount , size_t numWorkers , size_t workerRank , size_t chunkSizeInBytes ) <nl> + MinibatchSourcePtr TextFormatMinibatchSource ( const std : : wstring & dataFilePath , const std : : vector < StreamConfiguration > & streamConfigs , size_t epochSize , bool randomize , size_t chunkSizeInBytes ) <nl> { <nl> : : CNTK : : Dictionary minibatchSourceConfiguration ; <nl> minibatchSourceConfiguration [ L " epochSize " ] = epochSize ; <nl> MinibatchSourcePtr TextFormatMinibatchSource ( const std : : wstring & dataFilePath , c <nl> deserializerConfiguration [ L " input " ] = inputStreamsConfig ; <nl> deserializerConfiguration [ L " chunkSizeInBytes " ] = chunkSizeInBytes ; <nl> minibatchSourceConfiguration [ L " deserializers " ] = std : : vector < : : CNTK : : DictionaryValue > ( { deserializerConfiguration } ) ; <nl> - minibatchSourceConfiguration [ L " distributedAfterSampleCount " ] = distributedAfterSampleCount ; <nl> - minibatchSourceConfiguration [ L " numWorkers " ] = numWorkers ; <nl> - minibatchSourceConfiguration [ L " workerRank " ] = workerRank ; <nl> return CreateCompositeMinibatchSource ( minibatchSourceConfiguration ) ; <nl> } <nl> <nl> - void TestMinibatchSourceWarmStart ( size_t numMBs , size_t minibatchSize , size_t warmStartSamples , bool randomize , size_t chunkSizeInBytes , bool expectNoData = false ) <nl> + void TestMinibatchSourceWarmStart ( size_t minibatchSize , size_t warmStartSamples , bool randomize , size_t chunkSizeInBytes , bool expectNoData = false ) <nl> { <nl> + / / TODO : Currently this test is based on the number of samples . <nl> + / / We should switch to the real data instead . <nl> + <nl> const size_t inputDim = 2 ; <nl> const size_t numOutputClasses = 2 ; <nl> auto featureStreamName = L " features " ; <nl> auto labelsStreamName = L " labels " ; <nl> - const size_t numWorkers = 2 ; <nl> <nl> + const size_t numberOfSamplesInSweep = 10000 ; <nl> + <nl> + / / Let ' s create two workers . <nl> auto minibatchSource = TextFormatMinibatchSource ( <nl> L " SimpleDataTrain_cntk_text . txt " , <nl> { { featureStreamName , inputDim } , { labelsStreamName , numOutputClasses } } , <nl> - MinibatchSource : : InfinitelyRepeat , <nl> + numberOfSamplesInSweep , <nl> randomize , <nl> - warmStartSamples , <nl> - numWorkers , <nl> - 0 , <nl> chunkSizeInBytes ) ; <nl> <nl> + auto featureStreamInfo = minibatchSource - > StreamInfo ( featureStreamName ) ; <nl> + auto labelStreamInfo = minibatchSource - > StreamInfo ( labelsStreamName ) ; <nl> + <nl> auto minibatchSource2 = TextFormatMinibatchSource ( <nl> L " SimpleDataTrain_cntk_text . txt " , <nl> { { featureStreamName , inputDim } , { labelsStreamName , numOutputClasses } } , <nl> - MinibatchSource : : InfinitelyRepeat , <nl> + numberOfSamplesInSweep , <nl> randomize , <nl> - warmStartSamples , <nl> - numWorkers , <nl> - 1 , <nl> chunkSizeInBytes ) ; <nl> <nl> - auto featureStreamInfo = minibatchSource - > StreamInfo ( featureStreamName ) ; <nl> - auto labelStreamInfo = minibatchSource - > StreamInfo ( labelsStreamName ) ; <nl> - <nl> - auto featureStreamInfo2 = minibatchSource2 - > StreamInfo ( featureStreamName ) ; <nl> - auto labelStreamInfo2 = minibatchSource2 - > StreamInfo ( labelsStreamName ) ; <nl> - <nl> size_t totalSamples = 0 ; <nl> - for ( size_t i = 0 ; i < numMBs ; + + i ) <nl> + bool hasData = true ; <nl> + while ( hasData ) <nl> { <nl> - bool distributed = minibatchSource - > IsDistributed ( ) ; <nl> - bool distributed2 = minibatchSource2 - > IsDistributed ( ) ; <nl> - if ( distributed ! = ( totalSamples > = warmStartSamples ) | | distributed ! = distributed2 ) <nl> + if ( totalSamples < warmStartSamples ) <nl> { <nl> - ReportFailure ( " TestMinibatchSourceWarmStart failed in distributed state : expected % d , actual % d " , <nl> - totalSamples > = warmStartSamples , distributed ) ; <nl> - } <nl> - <nl> - auto minibatchData = minibatchSource - > GetNextMinibatch ( minibatchSize ) ; <nl> - auto minibatchData2 = minibatchSource2 - > GetNextMinibatch ( minibatchSize ) ; <nl> + auto minibatchData = minibatchSource - > GetNextMinibatch ( 0 , minibatchSize , 1 , 0 ) ; <nl> + auto minibatchData2 = minibatchSource2 - > GetNextMinibatch ( 0 , minibatchSize , 1 , 0 ) ; <nl> <nl> - size_t expectedNumSamples = minibatchSize ; <nl> - size_t numSamples = minibatchData [ featureStreamInfo ] . m_numSamples ; <nl> + if ( minibatchData [ featureStreamInfo ] . m_numSamples ! = minibatchData2 [ featureStreamInfo ] . m_numSamples ) <nl> + ReportFailure ( " Data does not match , reads are not deterministic ! ! ! " ) ; <nl> <nl> - if ( expectNoData & & distributed2 ) <nl> - { <nl> - if ( numSamples ! = expectedNumSamples / 2 | | ! minibatchData2 . empty ( ) ) <nl> - ReportFailure ( " TestMinibatchSourceWarmStart failed in sample count : expected % lu , distributed % d ( 0 : % lu ) " , expectedNumSamples , distributed , numSamples ) ; <nl> + / / Because they are supposed to read the same data - adding it only once . <nl> + totalSamples + = minibatchData [ featureStreamInfo ] . m_numSamples ; <nl> } <nl> else <nl> { <nl> - size_t numSamples2 = minibatchData2 [ featureStreamInfo ] . m_numSamples ; <nl> - if ( numSamples ! = numSamples2 ) <nl> - ReportFailure ( " TestMinibatchSourceWarmStart failed in sample count : expected % lu , distributed % d ( 0 : % lu , 1 : % lu ) " , expectedNumSamples , distributed , numSamples , numSamples2 ) ; <nl> + / / We are in distributed mode , the sum should be equal to the minibatch size <nl> + / / or less at the end of the sweep . <nl> + auto minibatchData = minibatchSource - > GetNextMinibatch ( 0 , minibatchSize , 2 , 0 ) ; <nl> + auto minibatchData2 = minibatchSource2 - > GetNextMinibatch ( 0 , minibatchSize , 2 , 1 ) ; <nl> + <nl> + hasData = ! minibatchData . empty ( ) | | ! minibatchData2 . empty ( ) ; <nl> + if ( ! hasData ) <nl> + break ; <nl> + <nl> + / / Update the counter <nl> + size_t accumulative = 0 ; <nl> + if ( ! minibatchData . empty ( ) ) <nl> + accumulative + = minibatchData [ featureStreamInfo ] . m_numSamples ; <nl> + if ( ! minibatchData2 . empty ( ) ) <nl> + accumulative + = minibatchData2 [ featureStreamInfo ] . m_numSamples ; <nl> + <nl> + totalSamples + = accumulative ; <nl> + <nl> + if ( expectNoData ) / / second worker does not have any data . <nl> + { <nl> + if ( minibatchData [ featureStreamInfo ] . m_numSamples ! = minibatchSize / 2 & & totalSamples ! = numberOfSamplesInSweep ) <nl> + ReportFailure ( " TestMinibatchSourceWarmStart failed because data did not match . " <nl> + " Expected minibatch size ' % d ' , acutal ' % d ' . Total number of sample ' % d ' , sweep ' % d ' . " , <nl> + ( int ) minibatchSize , <nl> + ( int ) minibatchData [ featureStreamInfo ] . m_numSamples , <nl> + ( int ) totalSamples , <nl> + ( int ) numberOfSamplesInSweep ) ; <nl> + } <nl> + else <nl> + { <nl> + if ( accumulative ! = minibatchSize & & <nl> + minibatchData [ featureStreamInfo ] . m_numSamples ! = minibatchSize / 2 & & <nl> + minibatchData2 [ featureStreamInfo ] . m_numSamples ! = minibatchSize / 2 & & <nl> + totalSamples ! = numberOfSamplesInSweep ) <nl> + ReportFailure ( " TestMinibatchSourceWarmStart failed because data did not match . " <nl> + " Expected minibatch size ' % d ' , acutal ' % d ' . Total number of sample ' % d ' , sweep ' % d ' . " , <nl> + ( int ) minibatchSize , <nl> + ( int ) accumulative , <nl> + ( int ) totalSamples , <nl> + ( int ) numberOfSamplesInSweep ) ; <nl> + } <nl> } <nl> - <nl> - totalSamples + = expectedNumSamples ; <nl> } <nl> + <nl> + if ( totalSamples ! = numberOfSamplesInSweep ) <nl> + ReportFailure ( " Expected sweep number ' % d ' did not match the actual ' % d ' . " , <nl> + ( int ) numberOfSamplesInSweep , <nl> + ( int ) totalSamples ) ; <nl> } <nl> <nl> void MinibatchSourceTests ( ) <nl> { <nl> / / Test no - randomize minibatch source with small data chunks <nl> - TestMinibatchSourceWarmStart ( 10 , 64 , 128 , false , 1024 ) ; <nl> - TestMinibatchSourceWarmStart ( 10 , 64 , 0 , false , 1024 ) ; <nl> - TestMinibatchSourceWarmStart ( 10 , 64 , 100 , false , 1024 ) ; <nl> + TestMinibatchSourceWarmStart ( 64 , 128 , false , 1024 ) ; <nl> + TestMinibatchSourceWarmStart ( 64 , 0 , false , 1024 ) ; <nl> + TestMinibatchSourceWarmStart ( 64 , 100 , false , 1024 ) ; <nl> <nl> / / Test no - randomized minibatch source with a single chunk <nl> size_t chunk32MB = 1024 * 1024 * 32 ; <nl> - TestMinibatchSourceWarmStart ( 10 , 64 , 128 , false , chunk32MB ) ; <nl> - TestMinibatchSourceWarmStart ( 10 , 64 , 0 , false , chunk32MB ) ; <nl> - TestMinibatchSourceWarmStart ( 10 , 64 , 100 , false , chunk32MB ) ; <nl> + TestMinibatchSourceWarmStart ( 64 , 128 , false , chunk32MB ) ; <nl> + TestMinibatchSourceWarmStart ( 64 , 0 , false , chunk32MB ) ; <nl> + TestMinibatchSourceWarmStart ( 64 , 100 , false , chunk32MB ) ; <nl> <nl> / / Test randomized minibatch source with small data chunks <nl> - TestMinibatchSourceWarmStart ( 10 , 64 , 0 , true , 1024 ) ; <nl> - TestMinibatchSourceWarmStart ( 10 , 64 , 128 , true , 1024 ) ; <nl> + TestMinibatchSourceWarmStart ( 64 , 0 , true , 1024 ) ; <nl> + TestMinibatchSourceWarmStart ( 64 , 128 , true , 1024 ) ; <nl> <nl> / / Test randomized minibatch source with no data for one of the workers <nl> / / due to decimation based on chunks <nl> bool expectNoData = true ; <nl> - TestMinibatchSourceWarmStart ( 10 , 64 , 0 , true , chunk32MB , expectNoData ) ; <nl> - TestMinibatchSourceWarmStart ( 10 , 64 , 128 , true , chunk32MB , expectNoData ) ; <nl> + TestMinibatchSourceWarmStart ( 64 , 0 , true , chunk32MB , expectNoData ) ; <nl> + TestMinibatchSourceWarmStart ( 64 , 128 , true , chunk32MB , expectNoData ) ; <nl> } <nl> \ No newline at end of file <nl> mmm a / Tests / UnitTests / V2LibraryTests / Seq2Seq . cpp <nl> ppp b / Tests / UnitTests / V2LibraryTests / Seq2Seq . cpp <nl> void TrainSequenceToSequenceTranslator ( const DeviceDescriptor & device , bool useS <nl> AdditionalLearningOptions additionalOptions ; <nl> additionalOptions . gradientClippingThresholdPerSample = 2 . 3 ; <nl> additionalOptions . gradientClippingWithTruncation = true ; <nl> - Trainer trainer ( z , ce , errs , { MomentumSGDLearner ( z - > Parameters ( ) , learningRatePerSample , momentumTimeConstant , / * unitGainMomentum = * / true , additionalOptions ) } ) ; <nl> + <nl> + auto trainer = CreateTrainer ( z , ce , errs , { MomentumSGDLearner ( z - > Parameters ( ) , learningRatePerSample , momentumTimeConstant , / * unitGainMomentum = * / true , additionalOptions ) } ) ; <nl> <nl> size_t outputFrequencyInMinibatches = 1 ; <nl> size_t minibatchSize1 = 72 ; <nl> void TrainSequenceToSequenceTranslator ( const DeviceDescriptor & device , bool useS <nl> if ( ! restorationDone & & ( i = = numMinibatchesToRestoreFromCheckpointAfter ) ) <nl> { <nl> printf ( " Trainer restoring from checkpoint at path % S \ n " , modelFile ) ; <nl> - trainer . RestoreFromCheckpoint ( modelFile ) ; <nl> + trainer - > RestoreFromCheckpoint ( modelFile ) ; <nl> minibatchSource - > RestoreFromCheckpoint ( minibatchSourceCheckpoint ) ; <nl> i = numMinibatchesToCheckpointAfter ; <nl> restorationDone = true ; <nl> void TrainSequenceToSequenceTranslator ( const DeviceDescriptor & device , bool useS <nl> if ( minibatchData . empty ( ) ) <nl> break ; <nl> <nl> - trainer . TrainMinibatch ( { { rawInput , minibatchData [ rawInputStreamInfo ] . m_data } , { rawLabels , minibatchData [ rawLabelsStreamInfo ] . m_data } } , device ) ; <nl> + trainer - > TrainMinibatch ( { { rawInput , minibatchData [ rawInputStreamInfo ] . m_data } , { rawLabels , minibatchData [ rawLabelsStreamInfo ] . m_data } } , device ) ; <nl> PrintTrainingProgress ( trainer , i , outputFrequencyInMinibatches ) ; <nl> <nl> if ( ( i + 1 ) = = numMinibatchesToCheckpointAfter ) <nl> { <nl> printf ( " Trainer checkpointing to path % S \ n " , modelFile ) ; <nl> - trainer . SaveCheckpoint ( modelFile ) ; <nl> + trainer - > SaveCheckpoint ( modelFile ) ; <nl> minibatchSourceCheckpoint = minibatchSource - > GetCheckpointState ( ) ; <nl> } <nl> <nl> mmm a / Tests / UnitTests / V2LibraryTests / SequenceClassification . cpp <nl> ppp b / Tests / UnitTests / V2LibraryTests / SequenceClassification . cpp <nl> void TrainLSTMSequenceClassifer ( const DeviceDescriptor & device , bool useSparseLa <nl> <nl> LearningRatePerSampleSchedule learningRatePerSample = 0 . 0005 ; <nl> MomentumAsTimeConstantSchedule momentumTimeConstant = 256 ; <nl> - Trainer trainer ( classifierOutput , trainingLoss , prediction , <nl> - { MomentumSGDLearner ( classifierOutput - > Parameters ( ) , learningRatePerSample , <nl> - momentumTimeConstant , / * unitGainMomentum = * / true ) } ) ; <nl> + auto trainer = CreateTrainer ( classifierOutput , trainingLoss , prediction , <nl> + { MomentumSGDLearner ( classifierOutput - > Parameters ( ) , learningRatePerSample , <nl> + momentumTimeConstant , / * unitGainMomentum = * / true ) } ) ; <nl> <nl> size_t outputFrequencyInMinibatches = 1 ; <nl> for ( size_t i = 0 ; true ; i + + ) <nl> void TrainLSTMSequenceClassifer ( const DeviceDescriptor & device , bool useSparseLa <nl> if ( minibatchData . empty ( ) ) <nl> break ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> PrintTrainingProgress ( trainer , i , outputFrequencyInMinibatches ) ; <nl> } <nl> } <nl> void TestLearningRateControl ( const DeviceDescriptor & device ) <nl> <nl> LearningRatePerSampleSchedule learningRateSchedule ( { { 2 , 0 . 0005 } , { 2 , 0 . 00025 } } , actualMBSize ) ; <nl> auto learner = SGDLearner ( classifierOutput - > Parameters ( ) , learningRateSchedule ) ; <nl> - Trainer trainer ( classifierOutput , trainingLoss , prediction , { learner } ) ; <nl> + auto trainer = CreateTrainer ( classifierOutput , trainingLoss , prediction , { learner } ) ; <nl> FloatingPointCompare ( learner - > LearningRate ( ) , 0 . 0005 , " Learner : : LearningRate does not match expectation " ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> FloatingPointCompare ( learner - > LearningRate ( ) , 0 . 0005 , " Learner : : LearningRate does not match expectation " ) ; <nl> <nl> const wchar_t * modelFile = L " seq2seq . model " ; <nl> - trainer . SaveCheckpoint ( modelFile ) ; <nl> + trainer - > SaveCheckpoint ( modelFile ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - auto MB2Loss = trainer . PreviousMinibatchLossAverage ( ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + auto MB2Loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( learner - > LearningRate ( ) , 0 . 00025 , " Learner : : LearningRate does not match expectation " ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - auto MB3Loss = trainer . PreviousMinibatchLossAverage ( ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + auto MB3Loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( learner - > LearningRate ( ) , 0 . 00025 , " Learner : : LearningRate does not match expectation " ) ; <nl> <nl> - trainer . RestoreFromCheckpoint ( modelFile ) ; <nl> + trainer - > RestoreFromCheckpoint ( modelFile ) ; <nl> FloatingPointCompare ( learner - > LearningRate ( ) , 0 . 0005 , " Learner : : LearningRate does not match expectation " ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - auto postRestoreMB2Loss = trainer . PreviousMinibatchLossAverage ( ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + auto postRestoreMB2Loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( postRestoreMB2Loss , MB2Loss , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> <nl> FloatingPointCompare ( learner - > LearningRate ( ) , 0 . 00025 , " Learner : : LearningRate does not match expectation " ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - auto postRestoreMB3Loss = trainer . PreviousMinibatchLossAverage ( ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + auto postRestoreMB3Loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( postRestoreMB3Loss , MB3Loss , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> <nl> - trainer . RestoreFromCheckpoint ( modelFile ) ; <nl> + trainer - > RestoreFromCheckpoint ( modelFile ) ; <nl> FloatingPointCompare ( learner - > LearningRate ( ) , 0 . 0005 , " Learner : : LearningRate does not match expectation " ) ; <nl> <nl> learner - > ResetLearningRate ( LearningRatePerSampleSchedule ( 0 . 0004 ) ) ; <nl> FloatingPointCompare ( learner - > LearningRate ( ) , 0 . 0004 , " Learner : : LearningRate does not match expectation " ) ; <nl> <nl> - trainer . SaveCheckpoint ( modelFile ) ; <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - postRestoreMB2Loss = trainer . PreviousMinibatchLossAverage ( ) ; <nl> + trainer - > SaveCheckpoint ( modelFile ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + postRestoreMB2Loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( postRestoreMB2Loss , MB2Loss , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> <nl> FloatingPointCompare ( learner - > LearningRate ( ) , 0 . 0004 , " Learner : : LearningRate does not match expectation " ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - postRestoreMB3Loss = trainer . PreviousMinibatchLossAverage ( ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + postRestoreMB3Loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( postRestoreMB3Loss , MB3Loss , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> <nl> FloatingPointCompare ( learner - > LearningRate ( ) , 0 . 0004 , " Learner : : LearningRate does not match expectation " ) ; <nl> <nl> - trainer . RestoreFromCheckpoint ( modelFile ) ; <nl> + trainer - > RestoreFromCheckpoint ( modelFile ) ; <nl> FloatingPointCompare ( learner - > LearningRate ( ) , 0 . 0004 , " Learner : : LearningRate does not match expectation " ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - postRestoreMB2Loss = trainer . PreviousMinibatchLossAverage ( ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + postRestoreMB2Loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( postRestoreMB2Loss , MB2Loss , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> <nl> FloatingPointCompare ( learner - > LearningRate ( ) , 0 . 0004 , " Learner : : LearningRate does not match expectation " ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - postRestoreMB3Loss = trainer . PreviousMinibatchLossAverage ( ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + postRestoreMB3Loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( postRestoreMB3Loss , MB3Loss , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> <nl> FloatingPointCompare ( learner - > LearningRate ( ) , 0 . 0004 , " Learner : : LearningRate does not match expectation " ) ; <nl> mmm a / Tests / UnitTests / V2LibraryTests / SerializationTests . cpp <nl> ppp b / Tests / UnitTests / V2LibraryTests / SerializationTests . cpp <nl> void TestFunctionSerialization ( const DeviceDescriptor & device ) <nl> TestFunctionSaveAndLoad ( BuildLSTMClassifierNet ( inputVar , 5 , device ) , device ) ; <nl> } <nl> <nl> - Trainer BuildTrainer ( const FunctionPtr & function , const Variable & labels , <nl> + TrainerPtr BuildTrainer ( const FunctionPtr & function , const Variable & labels , <nl> LearningRateSchedule lr = LearningRatePerSampleSchedule ( 0 . 005 ) , <nl> MomentumSchedule m = MomentumAsTimeConstantSchedule ( 0 . 0 ) ) <nl> { <nl> auto trainingLoss = CNTK : : CrossEntropyWithSoftmax ( function , labels , L " lossFunction " ) ; <nl> auto prediction = CNTK : : ClassificationError ( function , labels , L " classificationError " ) ; <nl> auto learner = MomentumSGDLearner ( function - > Parameters ( ) , lr , m , / * unitGainMomentum = * / true ) ; <nl> - return Trainer ( function , trainingLoss , prediction , { learner } ) ; <nl> + return CreateTrainer ( function , trainingLoss , prediction , { learner } ) ; <nl> } <nl> <nl> void TestFunctionSerializationDuringTraining ( const FunctionPtr & function , const Variable & labels , const MinibatchSourcePtr & minibatchSource , const DeviceDescriptor & device ) <nl> void TestFunctionSerializationDuringTraining ( const FunctionPtr & function , const <nl> <nl> Dictionary model = classifierOutput1 - > Serialize ( ) ; <nl> <nl> - trainer1 . TrainMinibatch ( { { classifierOutput1 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer1 - > TrainMinibatch ( { { classifierOutput1 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> <nl> auto classifierOutput2 = Function : : Deserialize ( model , device ) ; <nl> <nl> void TestFunctionSerializationDuringTraining ( const FunctionPtr & function , const <nl> throw std : : runtime_error ( " TestModelSerialization : original and reloaded functions are not identical . " ) ; <nl> } <nl> <nl> - Trainer trainer2 = BuildTrainer ( classifierOutput3 , labels ) ; <nl> + auto trainer2 = BuildTrainer ( classifierOutput3 , labels ) ; <nl> <nl> for ( int j = 0 ; j < 3 ; + + j ) <nl> { <nl> - trainer1 . TrainMinibatch ( { { classifierOutput1 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - trainer2 . TrainMinibatch ( { { classifierOutput3 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer1 - > TrainMinibatch ( { { classifierOutput1 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer2 - > TrainMinibatch ( { { classifierOutput3 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> <nl> - double mbLoss1 = trainer1 . PreviousMinibatchLossAverage ( ) ; <nl> - double mbLoss2 = trainer2 . PreviousMinibatchLossAverage ( ) ; <nl> + double mbLoss1 = trainer1 - > PreviousMinibatchLossAverage ( ) ; <nl> + double mbLoss2 = trainer2 - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( mbLoss1 , mbLoss2 , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> } <nl> } <nl> void TestTrainingWithCheckpointing ( const FunctionPtr & function1 , const FunctionP <nl> <nl> assert ( AreEqual ( function1 , function2 ) ) ; <nl> <nl> - trainer2 . SaveCheckpoint ( L " trainer . v2 . checkpoint " ) ; <nl> - trainer2 . RestoreFromCheckpoint ( L " trainer . v2 . checkpoint " ) ; <nl> + trainer2 - > SaveCheckpoint ( L " trainer . v2 . checkpoint " ) ; <nl> + trainer2 - > RestoreFromCheckpoint ( L " trainer . v2 . checkpoint " ) ; <nl> <nl> if ( ! AreEqual ( function1 , function2 ) ) <nl> { <nl> throw std : : runtime_error ( " TestModelSerialization : reloaded function is not identical to the original . " ) ; <nl> } <nl> <nl> - trainer1 . TrainMinibatch ( { { function1 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer1 - > TrainMinibatch ( { { function1 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> <nl> if ( AreEqual ( function1 , function2 ) ) <nl> { <nl> throw std : : runtime_error ( " TestModelSerialization : reloaded function is still identical to the original after it was trained . " ) ; <nl> } <nl> <nl> - trainer2 . TrainMinibatch ( { { function2 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer2 - > TrainMinibatch ( { { function2 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> <nl> if ( ! AreEqual ( function1 , function2 ) ) <nl> { <nl> void TestTrainingWithCheckpointing ( const FunctionPtr & function1 , const FunctionP <nl> <nl> for ( int i = 0 ; i < 3 ; + + i ) <nl> { <nl> - trainer2 . SaveCheckpoint ( L " trainer . v2 . checkpoint " ) ; <nl> - trainer2 . RestoreFromCheckpoint ( L " trainer . v2 . checkpoint " ) ; <nl> + trainer2 - > SaveCheckpoint ( L " trainer . v2 . checkpoint " ) ; <nl> + trainer2 - > RestoreFromCheckpoint ( L " trainer . v2 . checkpoint " ) ; <nl> <nl> if ( ! AreEqual ( function1 , function2 ) ) <nl> { <nl> void TestTrainingWithCheckpointing ( const FunctionPtr & function1 , const FunctionP <nl> <nl> for ( int j = 0 ; j < 3 ; + + j ) <nl> { <nl> - trainer1 . TrainMinibatch ( { { function1 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - trainer2 . TrainMinibatch ( { { function2 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer1 - > TrainMinibatch ( { { function1 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer2 - > TrainMinibatch ( { { function2 - > Arguments ( ) [ 0 ] , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> <nl> - double mbLoss1 = trainer1 . PreviousMinibatchLossAverage ( ) ; <nl> - double mbLoss2 = trainer2 . PreviousMinibatchLossAverage ( ) ; <nl> + double mbLoss1 = trainer1 - > PreviousMinibatchLossAverage ( ) ; <nl> + double mbLoss2 = trainer2 - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( mbLoss1 , mbLoss2 , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> } <nl> } <nl> void TestLegacyModelSaving ( const DeviceDescriptor & device ) <nl> <nl> LearningRatePerSampleSchedule learningRateSchedule ( { { 2 , 0 . 0005 } , { 2 , 0 . 00025 } } , actualMBSize ) ; <nl> auto learner = SGDLearner ( classifierOutput - > Parameters ( ) , learningRateSchedule ) ; <nl> - Trainer trainer ( classifierOutput , trainingLoss , prediction , { learner } ) ; <nl> + auto trainer = CreateTrainer ( classifierOutput , trainingLoss , prediction , { learner } ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> <nl> const wchar_t * modelFile = L " seq2seq . legacy . model " ; <nl> Internal : : SaveAsLegacyModel ( classifierOutput , modelFile ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - auto MB2Loss = trainer . PreviousMinibatchLossAverage ( ) ; <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + auto MB2Loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> <nl> classifierOutput - > RestoreModel ( modelFile ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - auto postRestoreMB2Loss = trainer . PreviousMinibatchLossAverage ( ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + auto postRestoreMB2Loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( postRestoreMB2Loss , MB2Loss , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> <nl> classifierOutput - > RestoreModel ( modelFile ) ; <nl> Internal : : SaveAsLegacyModel ( classifierOutput , modelFile ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> <nl> classifierOutput - > RestoreModel ( modelFile ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - postRestoreMB2Loss = trainer . PreviousMinibatchLossAverage ( ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + postRestoreMB2Loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( postRestoreMB2Loss , MB2Loss , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> <nl> <nl> LearningRatePerSampleSchedule learningRateSchedule2 ( { { 0 . 04 , 0 . 02 , 0 . 01 , 0 . 008 , 0 . 004 , 0 . 002 , 0 . 001 } } , actualMBSize ) ; <nl> MomentumAsTimeConstantSchedule momentumSchedule ( { { 900 , 800 , 700 , 600 , 500 } } , actualMBSize ) ; <nl> auto learner2 = AdamLearner ( classifierOutput - > Parameters ( ) , learningRateSchedule , momentumSchedule , / * unitGainMomentum = * / true ) ; <nl> - Trainer trainer2 ( classifierOutput , trainingLoss , prediction , { learner } ) ; <nl> - <nl> + auto trainer2 = CreateTrainer ( classifierOutput , trainingLoss , prediction , { learner } ) ; <nl> <nl> classifierOutput - > RestoreModel ( modelFile ) ; <nl> <nl> vector < double > expectedLoss ; <nl> for ( int i = 0 ; i < 10 ; i + + ) <nl> { <nl> - trainer . SaveCheckpoint ( L " trainer . checkpoint " + std : : to_wstring ( i ) ) ; <nl> + trainer - > SaveCheckpoint ( L " trainer . checkpoint " + std : : to_wstring ( i ) ) ; <nl> Internal : : SaveAsLegacyModel ( classifierOutput , modelFile + std : : to_wstring ( i ) ) ; <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - expectedLoss . push_back ( trainer . PreviousMinibatchLossAverage ( ) ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + expectedLoss . push_back ( trainer - > PreviousMinibatchLossAverage ( ) ) ; <nl> } <nl> <nl> for ( int i = 0 ; i < 10 ; i + + ) <nl> { <nl> - trainer . RestoreFromCheckpoint ( L " trainer . checkpoint " + std : : to_wstring ( i ) ) ; <nl> + trainer - > RestoreFromCheckpoint ( L " trainer . checkpoint " + std : : to_wstring ( i ) ) ; <nl> classifierOutput - > RestoreModel ( modelFile + std : : to_wstring ( i ) ) ; <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - double loss = trainer . PreviousMinibatchLossAverage ( ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + double loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( loss , expectedLoss [ i ] , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> } <nl> } <nl> void TestCheckpointingWithStatefulNodes ( const DeviceDescriptor & device ) <nl> auto featureStreamInfo = minibatchSource - > StreamInfo ( features ) ; <nl> auto labelStreamInfo = minibatchSource - > StreamInfo ( labels ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> <nl> vector < double > expectedLoss ; <nl> for ( int i = 0 ; i < epochSize / minibatchSize ; i + + ) <nl> { <nl> - trainer . SaveCheckpoint ( L " stateful_nodes . model " + std : : to_wstring ( i ) ) ; <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - expectedLoss . push_back ( trainer . PreviousMinibatchLossAverage ( ) ) ; <nl> + trainer - > SaveCheckpoint ( L " stateful_nodes . model " + std : : to_wstring ( i ) ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + expectedLoss . push_back ( trainer - > PreviousMinibatchLossAverage ( ) ) ; <nl> } <nl> <nl> for ( int i = 0 ; i < epochSize / minibatchSize ; i + + ) <nl> { <nl> - trainer . RestoreFromCheckpoint ( L " stateful_nodes . model " + std : : to_wstring ( i ) ) ; <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> - double loss = trainer . PreviousMinibatchLossAverage ( ) ; <nl> + trainer - > RestoreFromCheckpoint ( L " stateful_nodes . model " + std : : to_wstring ( i ) ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + double loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> FloatingPointCompare ( loss , expectedLoss [ i ] , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> } <nl> } <nl> mmm a / Tests / UnitTests / V2LibraryTests / TrainerTests . cpp <nl> ppp b / Tests / UnitTests / V2LibraryTests / TrainerTests . cpp <nl> void TrainSimpleFeedForwardClassifer ( const DeviceDescriptor & device ) <nl> <nl> LearningRatePerSampleSchedule learningRatePerSample = 0 . 02 ; <nl> minibatchSource = TextFormatMinibatchSource ( L " SimpleDataTrain_cntk_text . txt " , { { L " features " , inputDim } , { L " labels " , numOutputClasses } } ) ; <nl> - Trainer trainer ( classifierOutput , trainingLoss , prediction , { SGDLearner ( classifierOutput - > Parameters ( ) , learningRatePerSample ) } ) ; <nl> + auto trainer = CreateTrainer ( classifierOutput , trainingLoss , prediction , { SGDLearner ( classifierOutput - > Parameters ( ) , learningRatePerSample ) } ) ; <nl> size_t outputFrequencyInMinibatches = 20 ; <nl> size_t trainingCheckpointFrequency = 100 ; <nl> for ( size_t i = 0 ; i < numMinibatchesToTrain ; + + i ) <nl> { <nl> auto minibatchData = minibatchSource - > GetNextMinibatch ( minibatchSize , device ) ; <nl> - trainer . TrainMinibatch ( { { input , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer - > TrainMinibatch ( { { input , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> PrintTrainingProgress ( trainer , i , outputFrequencyInMinibatches ) ; <nl> <nl> if ( ( i % trainingCheckpointFrequency ) = = ( trainingCheckpointFrequency - 1 ) ) <nl> { <nl> const wchar_t * ckpName = L " feedForward . net " ; <nl> - trainer . SaveCheckpoint ( ckpName ) ; <nl> - trainer . RestoreFromCheckpoint ( ckpName ) ; <nl> + trainer - > SaveCheckpoint ( ckpName ) ; <nl> + trainer - > RestoreFromCheckpoint ( ckpName ) ; <nl> } <nl> } <nl> } <nl> void TrainMNISTClassifier ( const DeviceDescriptor & device ) <nl> auto labelStreamInfo = minibatchSource - > StreamInfo ( labelsStreamName ) ; <nl> <nl> LearningRatePerSampleSchedule learningRatePerSample = 0 . 003125 ; <nl> - Trainer trainer ( classifierOutput , trainingLoss , prediction , { SGDLearner ( classifierOutput - > Parameters ( ) , learningRatePerSample ) } ) ; <nl> + auto trainer = CreateTrainer ( classifierOutput , trainingLoss , prediction , { SGDLearner ( classifierOutput - > Parameters ( ) , learningRatePerSample ) } ) ; <nl> <nl> size_t outputFrequencyInMinibatches = 20 ; <nl> for ( size_t i = 0 ; i < numMinibatchesToTrain ; + + i ) <nl> { <nl> auto minibatchData = minibatchSource - > GetNextMinibatch ( minibatchSize , device ) ; <nl> - trainer . TrainMinibatch ( { { input , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer - > TrainMinibatch ( { { input , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> PrintTrainingProgress ( trainer , i , outputFrequencyInMinibatches ) ; <nl> } <nl> } <nl> mmm a / Tests / UnitTests / V2LibraryTests / TruncatedLSTMAcousticModel . cpp <nl> ppp b / Tests / UnitTests / V2LibraryTests / TruncatedLSTMAcousticModel . cpp <nl> void TrainTruncatedLSTMAcousticModelClassifer ( const DeviceDescriptor & device , bo <nl> LearningRatePerSampleSchedule learningRatePerSample = 0 . 000781 ; <nl> MomentumAsTimeConstantSchedule momentumTimeConstant = 6074 ; <nl> auto learner = MomentumSGDLearner ( classifierOutput - > Parameters ( ) , learningRatePerSample , momentumTimeConstant , / * unitGainMomentum = * / true ) ; <nl> - Trainer trainer ( classifierOutput , trainingLoss , prediction , { learner } ) ; <nl> + auto trainer = CreateTrainer ( classifierOutput , trainingLoss , prediction , { learner } ) ; <nl> <nl> size_t outputFrequencyInMinibatches = 1 ; <nl> for ( size_t i = 0 ; true ; i + + ) <nl> void TrainTruncatedLSTMAcousticModelClassifer ( const DeviceDescriptor & device , bo <nl> if ( actualMaxSequenceLength ! = truncationLength ) <nl> ReportFailure ( " Actual max sequence length ( % d ) in minibatch data does not equal specified truncation length ( % d ) " , ( int ) actualMaxSequenceLength , ( int ) truncationLength ) ; <nl> <nl> - trainer . TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> + trainer - > TrainMinibatch ( { { features , minibatchData [ featureStreamInfo ] . m_data } , { labels , minibatchData [ labelStreamInfo ] . m_data } } , device ) ; <nl> PrintTrainingProgress ( trainer , i , outputFrequencyInMinibatches ) ; <nl> } <nl> } <nl> mmm a / bindings / csharp / Swig / cntk_cs . i <nl> ppp b / bindings / csharp / Swig / cntk_cs . i <nl> <nl> double blockLearningRate = 1 . 0 ) ; <nl> <nl> % ignore CNTK : : Trainer ; <nl> + % ignore CNTK : : CreateTrainer ; <nl> % ignore CNTK : : StreamInformation ; <nl> % ignore std : : hash < : : CNTK : : StreamInformation > ; <nl> <nl> <nl> % ignore CNTK : : QuantizedMPICommunicator ( bool zeroThresholdFor1Bit , bool useQuantizationForSelfStripe , size_t numQuantizationBits ) ; <nl> % ignore CNTK : : MinibatchInfo ; <nl> % ignore CNTK : : DistributedTrainer ; <nl> + % ignore CNTK : : TrainingSession ; <nl> + % ignore CNTK : : CreateBasicTrainingSession ; <nl> + % ignore CNTK : : Create ; <nl> % ignore CNTK : : CreateDataParallelDistributedTrainer ( DistributedCommunicatorPtr communicator , bool useAsyncBufferedParameterUpdate , size_t distributedAfterSampleCount = 0 ) ; <nl> % ignore CNTK : : CreateQuantizedDataParallelDistributedTrainer ( QuantizedDistributedCommunicatorPtr communicator , <nl> bool useAsyncBufferedParameterUpdate , <nl> mmm a / bindings / python / cntk / cntk_py . i <nl> ppp b / bindings / python / cntk / cntk_py . i <nl> <nl> % template ( ) std : : vector < std : : shared_ptr < CNTK : : Function > > ; <nl> % template ( ) std : : vector < std : : shared_ptr < CNTK : : Learner > > ; <nl> % template ( ) std : : vector < std : : shared_ptr < CNTK : : DistributedLearner > > ; <nl> + % template ( ) std : : vector < std : : shared_ptr < CNTK : : Trainer > > ; <nl> % template ( ) std : : pair < size_t , double > ; <nl> % template ( ) std : : vector < std : : pair < size_t , double > > ; <nl> % template ( ) std : : vector < std : : pair < CNTK : : Variable , CNTK : : Variable > > ; <nl> <nl> % { <nl> # define SWIG_FILE_WITH_INIT <nl> % } <nl> + <nl> % init % { <nl> import_array ( ) ; <nl> % } <nl> public : <nl> / / Because SWIG cannot properly handle smart pointers to derived classes ( causes memory leak during the check ) , <nl> / / we need custom constructors . <nl> <nl> - % extend CNTK : : Trainer <nl> + % extend Trainer <nl> { <nl> Trainer ( const FunctionPtr & model , const FunctionPtr & lossFunction , const FunctionPtr & evaluationFunction , const std : : vector < DistributedLearnerPtr > & parameterLearners ) <nl> { <nl> public : <nl> learners . reserve ( parameterLearners . size ( ) ) ; <nl> for ( const auto & l : parameterLearners ) <nl> learners . push_back ( l ) ; <nl> - return new CNTK : : Trainer ( model , lossFunction , evaluationFunction , learners ) ; <nl> + return CreateTrainer ( model , lossFunction , evaluationFunction , learners ) ; <nl> } <nl> <nl> Trainer ( const FunctionPtr & model , const FunctionPtr & lossFunction , const FunctionPtr & evaluationFunction , const std : : vector < LearnerPtr > & parameterLearners ) <nl> { <nl> - return new CNTK : : Trainer ( model , lossFunction , evaluationFunction , parameterLearners ) ; <nl> + return CreateTrainer ( model , lossFunction , evaluationFunction , parameterLearners ) ; <nl> } <nl> } <nl> <nl> public : <nl> % unordered_map_ref_conversion ( CNTK : : Parameter , SWIGTYPE_p_CNTK__Parameter , CNTK : : NDArrayViewPtr , SWIGTYPE_p_std__shared_ptrT_CNTK__NDArrayView ) ; <nl> % unordered_map_ref_conversion ( CNTK : : Variable , SWIGTYPE_p_CNTK__Variable , CNTK : : Variable , SWIGTYPE_p_CNTK__Variable ) ; <nl> <nl> + % shared_ptr ( CNTK : : Trainer ) <nl> % shared_ptr ( CNTK : : Function ) <nl> % shared_ptr ( CNTK : : NDArrayView ) <nl> % shared_ptr ( CNTK : : Value ) <nl> mmm a / bindings / python / cntk / trainer . py <nl> ppp b / bindings / python / cntk / trainer . py <nl> def __init__ ( self , model , loss_function , eval_function , parameter_learners ) : <nl> if not isinstance ( parameter_learners , list ) : <nl> parameter_learners = [ parameter_learners ] <nl> <nl> - super ( Trainer , self ) . __init__ ( model , loss_function , eval_function , parameter_learners ) <nl> + trainer = cntk_py . create_trainer ( model , loss_function , eval_function , parameter_learners ) <nl> + # transplant into this class instance <nl> + self . __dict__ = trainer . __dict__ <nl> <nl> def train_minibatch ( self , arguments , outputs = None , device = None ) : <nl> ' ' ' <nl> mmm a / bindings / python / cntk / utils / swig_helper . py <nl> ppp b / bindings / python / cntk / utils / swig_helper . py <nl> def map_if_possible ( obj ) : <nl> from cntk . ops . variables import Variable , Parameter , Constant <nl> from cntk . ops . functions import Function <nl> from cntk . learner import Learner <nl> + from cntk . trainer import Trainer <nl> from cntk . io import MinibatchSource , MinibatchData , StreamConfiguration <nl> from cntk . axis import Axis <nl> from cntk . distributed import WorkerDescriptor , Communicator , DistributedLearner <nl> def map_if_possible ( obj ) : <nl> cntk_py . Learner : Learner , <nl> cntk_py . Value : Value , <nl> cntk_py . MinibatchSource : MinibatchSource , <nl> + cntk_py . Trainer : Trainer , <nl> cntk_py . MinibatchData : MinibatchData , <nl> cntk_py . StreamConfiguration : StreamConfiguration , <nl> cntk_py . Axis : Axis , <nl>
Introducing training session object
microsoft/CNTK
6a2249a99ce96c7628bb5c64375c763e64f0a53a
2017-01-17T09:22:41Z
mmm a / python / pywatchman / __init__ . py <nl> ppp b / python / pywatchman / __init__ . py <nl> def receive ( self ) : <nl> if not buf [ 0 ] : <nl> raise WatchmanError ( ' empty watchman response ' ) <nl> <nl> - elen , _1 , _2 = bser . pdu_info ( buf [ 0 ] ) <nl> + _1 , _2 , elen = bser . pdu_info ( buf [ 0 ] ) <nl> <nl> rlen = len ( buf [ 0 ] ) <nl> while elen > rlen : <nl> def receive ( self ) : <nl> if not buf [ 0 ] : <nl> raise WatchmanError ( ' empty watchman response ' ) <nl> <nl> - elen , recv_bser_version , recv_bser_capabilities = bser . pdu_info ( buf [ 0 ] ) <nl> + recv_bser_version , recv_bser_capabilities , elen = bser . pdu_info ( buf [ 0 ] ) <nl> <nl> if hasattr ( self , ' bser_version ' ) : <nl> # Readjust BSER version and capabilities if necessary <nl> mmm a / python / pywatchman / bser . c <nl> ppp b / python / pywatchman / bser . c <nl> static PyObject * bser_pdu_info ( PyObject * self , PyObject * args ) <nl> if ( ! pdu_info_helper ( self , args , & version , & capabilities , & total_len ) ) { <nl> return NULL ; <nl> } <nl> - return Py_BuildValue ( " Lkk " , total_len , version , capabilities ) ; <nl> + return Py_BuildValue ( " kkL " , version , capabilities , total_len ) ; <nl> } <nl> <nl> static PyObject * bser_pdu_len ( PyObject * self , PyObject * args ) <nl> mmm a / python / pywatchman / pybser . py <nl> ppp b / python / pywatchman / pybser . py <nl> def pdu_info ( buf ) : <nl> else : <nl> raise ValueError ( ' Invalid BSER header ' ) <nl> <nl> - return expected_len + pos2 , bser_version , bser_capabilities <nl> + return bser_version , bser_capabilities , expected_len + pos2 <nl> <nl> def pdu_len ( buf ) : <nl> - return pdu_info ( buf ) [ 0 ] <nl> + return pdu_info ( buf ) [ 2 ] <nl> <nl> def loads ( buf , mutable = True , value_encoding = None , value_errors = None ) : <nl> " " " Deserialize a BSER - encoded blob . <nl> mmm a / python / tests / tests . py <nl> ppp b / python / tests / tests . py <nl> def test_pdu_info ( self ) : <nl> enc = self . bser_mod . dumps ( 1 ) <nl> DEFAULT_BSER_VERSION = 1 <nl> DEFAULT_BSER_CAPABILITIES = 0 <nl> - self . assertEqual ( ( len ( enc ) , DEFAULT_BSER_VERSION , <nl> - DEFAULT_BSER_CAPABILITIES ) , <nl> - self . bser_mod . pdu_info ( enc ) ) <nl> + self . assertEqual ( ( DEFAULT_BSER_VERSION , DEFAULT_BSER_CAPABILITIES , <nl> + len ( enc ) ) , self . bser_mod . pdu_info ( enc ) ) <nl> <nl> # try a bigger one ; prove that we get the correct length <nl> # even though we receive just a portion of the complete <nl> # data <nl> enc = self . bser_mod . dumps ( [ 1 , 2 , 3 , " hello there , much larger " ] ) <nl> - self . assertEqual ( ( len ( enc ) , DEFAULT_BSER_VERSION , <nl> - DEFAULT_BSER_CAPABILITIES ) , <nl> - self . bser_mod . pdu_info ( enc [ 0 : 7 ] ) ) <nl> + self . assertEqual ( ( DEFAULT_BSER_VERSION , DEFAULT_BSER_CAPABILITIES , <nl> + len ( enc ) ) , self . bser_mod . pdu_info ( enc [ 0 : 7 ] ) ) <nl> + <nl> + def test_pdu_len ( self ) : <nl> + enc = self . bser_mod . dumps ( 1 ) <nl> + self . assertEqual ( len ( enc ) , self . bser_mod . pdu_len ( enc ) ) <nl> + <nl> + # try a bigger one ; prove that we get the correct length <nl> + # even though we receive just a portion of the complete <nl> + # data <nl> + enc = self . bser_mod . dumps ( [ 1 , 2 , 3 , " hello there , much larger " ] ) <nl> + self . assertEqual ( len ( enc ) , self . bser_mod . pdu_len ( enc [ 0 : 7 ] ) ) <nl> <nl> def test_garbage ( self ) : <nl> # can ' t use the with form here because Python 2 . 6 <nl>
Reorders return value of bser . pdu_info to match the order in the BSER header . Fixes tests .
facebook/watchman
b9121307e0d1e24f2be6ba65b38337f1e0a9bc3c
2016-07-18T18:31:26Z
mmm a / caffe2 / operators / dataset_ops . cc <nl> ppp b / caffe2 / operators / dataset_ops . cc <nl> class CreateTensorVectorOp final : public Operator < Context > { <nl> OUTPUT_TAGS ( TENSOR_VECTOR ) ; <nl> } ; <nl> <nl> + template < class Context > <nl> + class TensorVectorSizeOp final : public Operator < Context > { <nl> + public : <nl> + USE_OPERATOR_CONTEXT_FUNCTIONS ; <nl> + USE_SIMPLE_CTOR_DTOR ( TensorVectorSizeOp ) ; <nl> + <nl> + bool RunOnDevice ( ) override { <nl> + auto & vector_ptr = <nl> + OperatorBase : : Input < TensorVectorPtr < Context > > ( TENSOR_VECTOR ) ; <nl> + auto * size = Output ( SIZE ) ; <nl> + size - > Resize ( ) ; <nl> + / / 32 - bit should be enough here <nl> + * size - > template mutable_data < int32_t > ( ) = vector_ptr - > size ( ) ; <nl> + return true ; <nl> + } <nl> + <nl> + private : <nl> + INPUT_TAGS ( TENSOR_VECTOR ) ; <nl> + OUTPUT_TAGS ( SIZE ) ; <nl> + } ; <nl> + <nl> template < class Context > <nl> class ConcatTensorVectorOp final : public Operator < Context > { <nl> public : <nl> REGISTER_CPU_OPERATOR ( CheckDatasetConsistency , CheckDatasetConsistencyOp ) ; <nl> REGISTER_CPU_OPERATOR ( Append , AppendOp < CPUContext > ) ; <nl> REGISTER_CPU_OPERATOR ( AtomicAppend , AtomicAppendOp < CPUContext > ) ; <nl> REGISTER_CPU_OPERATOR ( CreateTensorVector , CreateTensorVectorOp < CPUContext > ) ; <nl> + REGISTER_CPU_OPERATOR ( TensorVectorSize , TensorVectorSizeOp < CPUContext > ) ; <nl> REGISTER_CPU_OPERATOR ( ConcatTensorVector , ConcatTensorVectorOp < CPUContext > ) ; <nl> REGISTER_CPU_OPERATOR ( CollectTensor , CollectTensorOp < CPUContext > ) ; <nl> <nl> OPERATOR_SCHEMA ( CreateTensorVector ) <nl> . NumOutputs ( 1 ) <nl> . SetDoc ( " Create a std : : unique_ptr < std : : vector < Tensor > > " ) ; <nl> <nl> + OPERATOR_SCHEMA ( TensorVectorSize ) <nl> + . NumInputs ( 1 ) <nl> + . NumOutputs ( 1 ) <nl> + . SetDoc ( " Get the size of the input vector " ) <nl> + . Input ( 0 , " tensor vector " , " std : : unique_ptr < std : : vector < Tensor > > " ) <nl> + . Output ( 0 , " size " , " int32_t size " ) ; <nl> + <nl> OPERATOR_SCHEMA ( ConcatTensorVector ) <nl> . NumInputs ( 1 ) <nl> . NumOutputs ( 1 ) <nl> SHOULD_NOT_DO_GRADIENT ( CheckDatasetConsistency ) ; <nl> SHOULD_NOT_DO_GRADIENT ( Append ) ; <nl> SHOULD_NOT_DO_GRADIENT ( AtomicAppend ) ; <nl> SHOULD_NOT_DO_GRADIENT ( CreateTensorVector ) ; <nl> + SHOULD_NOT_DO_GRADIENT ( TensorVectorSize ) ; <nl> SHOULD_NOT_DO_GRADIENT ( ConcatTensorVector ) ; <nl> SHOULD_NOT_DO_GRADIENT ( CollectTensor ) ; <nl> } / / namespace <nl> mmm a / caffe2 / python / operator_test / dataset_ops_test . py <nl> ppp b / caffe2 / python / operator_test / dataset_ops_test . py <nl> def test_dataset_ops ( self ) : <nl> workspace . RunNetOnce ( read_init_net ) <nl> workspace . CreateNet ( read_next_net ) <nl> <nl> - for i , entry in enumerate ( entries ) : <nl> + for entry in entries : <nl> workspace . RunNet ( str ( read_next_net ) ) <nl> actual = FetchRecord ( batch ) <nl> _assert_records_equal ( actual , entry ) <nl> def test_dataset_ops ( self ) : <nl> workspace . RunNetOnce ( read_init_net ) <nl> <nl> workspace . CreateNet ( read_next_net ) <nl> - read_next_net_name = str ( read_next_net ) <nl> <nl> for i in range ( len ( entries ) ) : <nl> k = idx [ i ] if i in idx else i <nl> def test_collect_tensor_ops ( self ) : <nl> # concat the collected tensors <nl> concat_net = core . Net ( ' concat_net ' ) <nl> bconcated_map = { } <nl> + bsize_map = { } <nl> for b in blobs : <nl> bconcated_map [ b ] = b + ' _concated ' <nl> + bsize_map [ b ] = b + ' _size ' <nl> concat_net . ConcatTensorVector ( [ bvec_map [ b ] ] , [ bconcated_map [ b ] ] ) <nl> + concat_net . TensorVectorSize ( [ bvec_map [ b ] ] , [ bsize_map [ b ] ] ) <nl> <nl> workspace . RunNetOnce ( concat_net ) <nl> <nl> def test_collect_tensor_ops ( self ) : <nl> reference_result = workspace . FetchBlob ( bconcated_map [ blobs [ 0 ] ] ) <nl> self . assertEqual ( reference_result . shape , <nl> ( min ( num_to_collect , max_example_to_cover ) , 2 ) ) <nl> + size = workspace . FetchBlob ( bsize_map [ blobs [ 0 ] ] ) <nl> + self . assertEqual ( tuple ( ) , size . shape ) <nl> + self . assertEqual ( min ( num_to_collect , max_example_to_cover ) , size . item ( ) ) <nl> <nl> hist , _ = np . histogram ( reference_result [ : , 0 ] , bins = 10 , <nl> range = ( 1 , max_example_to_cover ) ) <nl>
TensorVectorSizeOp
pytorch/pytorch
8482cf982360769adc5dd9c12df4c6350dc83865
2017-04-07T21:46:19Z
mmm a / templates / cocos2dx_files . json <nl> ppp b / templates / cocos2dx_files . json <nl> <nl> " external / curl / include / android / curl / multi . h " , <nl> " external / curl / include / android / curl / stdcheaders . h " , <nl> " external / curl / include / android / curl / typecheck - gcc . h " , <nl> + " external / curl / include / android / curl / types . h " , <nl> " external / curl / include / ios / curl / curl . h " , <nl> " external / curl / include / ios / curl / curlbuild - 32 . h " , <nl> " external / curl / include / ios / curl / curlbuild - 64 . h " , <nl> <nl> " external / websockets / include / mac / lws_config . h " , <nl> " external / websockets / include / win10 / libwebsockets . h " , <nl> " external / websockets / include / win10 / lws_config . h " , <nl> + " external / websockets / include / win10 / private - libwebsockets . h " , <nl> " external / websockets / include / win32 / libwebsockets . h " , <nl> " external / websockets / include / win32 / lws_config . h " , <nl> " external / websockets / include / win32 / win32helpers / getopt . h " , <nl> <nl> " external / websockets / include / winrt_8 . 1 / libwebsockets . h " , <nl> " external / websockets / include / winrt_8 . 1 / lws_config . h " , <nl> " external / websockets / include / winrt_8 . 1 / private - libwebsockets . h " , <nl> - " external / websockets / include / winrt_8 . 1 / win32helpers / getopt . h " , <nl> - " external / websockets / include / winrt_8 . 1 / win32helpers / gettimeofday . h " , <nl> " external / websockets / include / wp_8 . 1 / libwebsockets . h " , <nl> " external / websockets / include / wp_8 . 1 / lws_config . h " , <nl> " external / websockets / include / wp_8 . 1 / private - libwebsockets . h " , <nl> - " external / websockets / include / wp_8 . 1 / win32helpers / getopt . h " , <nl> - " external / websockets / include / wp_8 . 1 / win32helpers / gettimeofday . h " , <nl> " external / websockets / prebuilt / android / Android . mk " , <nl> " external / websockets / prebuilt / android / armeabi - v7a / libwebsockets . a " , <nl> " external / websockets / prebuilt / android / armeabi / libwebsockets . a " , <nl>
Merge pull request from CocosRobot / update_cocosfiles_1460338764
cocos2d/cocos2d-x
4bd6834ad65c983c20e799d1997dc9cc54ad0582
2016-04-11T02:17:22Z
mmm a / test / rql_test / drivers / driver . js <nl> ppp b / test / rql_test / drivers / driver . js <nl> var Promise = require ( Path . resolve ( __dirname , ' . . ' , ' . . ' , ' . . ' , ' build ' , ' packag <nl> <nl> var r = require ( Path . resolve ( __dirname , ' . . ' , ' importRethinkDB . js ' ) ) . r ; <nl> <nl> + / / - - global variables <nl> + <nl> + / / Tests are stored in list until they can be sequentially evaluated <nl> + var tests = [ r . dbCreate ( ' test ' ) ] <nl> + <nl> + var failure_count = 0 ; <nl> + var tests_run = 0 ; <nl> + <nl> + / / Provides a context for variables <nl> + var defines = { } <nl> + <nl> + var reqlConn = null ; / / set as testing begins <nl> + <nl> + var tables_to_cleanup = [ ] / / pre - existing tables <nl> + var tables_to_delete = [ ] / / created by this script <nl> + <nl> / / - - input validation <nl> <nl> / / all argument numbers are + 1 because the script is # 1 <nl> if ( process . argv [ 3 ] | | process . env . TEST_DB_AND_TABLE_NAME ) { <nl> } <nl> } <nl> } <nl> - required_external_tables . reverse ( ) <nl> + required_external_tables . reverse ( ) / / so pop ' ing them gets the right order <nl> <nl> var TRACE_ENABLED = process . env . VERBOSE | | false ; <nl> <nl> / / - - utilities - - <nl> <nl> - failure_count = 0 ; <nl> - <nl> function printTestFailure ( name , src , messages ) { <nl> failure_count + = 1 ; <nl> console . log . apply ( console , [ " \ nTEST FAILURE : " + name + " \ nTEST BODY : " + src + " \ n " ] . concat ( messages ) . concat ( [ " \ n " ] ) ) ; <nl> } <nl> <nl> - function clone ( source ) { <nl> - result = { } <nl> - for ( var key in source ) { <nl> - result [ key ] = source [ key ] ; <nl> - } <nl> - return result ; <nl> - } <nl> - <nl> function eq_test ( expected , result , compOpts , partial ) { <nl> TRACE ( " eq_test : " + JSON . stringify ( expected ) + " | = = | " + JSON . stringify ( result ) + " | = = | " + partial ) ; <nl> + <nl> if ( expected instanceof Function ) { <nl> return expected ( result ) ; <nl> + <nl> } else if ( result instanceof Function ) { <nl> return result ( expected ) ; <nl> <nl> function returnTrue ( ) { <nl> return fun ; <nl> } <nl> <nl> - / / Tests are stored in list until they can be sequentially evaluated <nl> - var tests = [ ] <nl> - <nl> - / / Any variables defined by the tests are stored here <nl> - var defines = { } <nl> - <nl> function TRACE ( ) { <nl> if ( TRACE_ENABLED ) { <nl> console . log . apply ( console , [ " TRACE " ] . concat ( [ ] . splice . call ( arguments , 0 ) ) ) ; <nl> function TRACE ( ) { <nl> <nl> / / - - Setup atexit handler for cleanup <nl> <nl> - var connection = null ; / / set as testing begins <nl> - <nl> - var tables_to_cleanup = [ ] / / pre - existing tables <nl> - var tables_to_delete = [ ] / / created by this script <nl> - <nl> function atexitCleanup ( exitCode ) { <nl> <nl> promisesToKeep = [ ] ; <nl> <nl> - if ( connection ) { <nl> - <nl> + if ( ! reqlConn ) { <nl> + console . warn ( ' Unable to clean up as there is no open ReQL connection ' ) <nl> + } else { <nl> console . log ( ' Cleaning up ' ) <nl> <nl> / / - cleanup tables <nl> <nl> cleanTable = function ( dbName , tableName , fullName ) { <nl> - return r . db ( dbName ) . tableList ( ) . count ( tableName ) . eq ( 1 ) . run ( connection , function ( err , value ) { <nl> + return r . db ( dbName ) . tableList ( ) . count ( tableName ) . eq ( 1 ) . run ( reqlConn , function ( err , value ) { <nl> if ( err ) { throw ' In cleanup there was no table " ' + fullName + ' " to clean ' ; } <nl> } ) . then ( function ( ) { <nl> - return r . db ( dbName ) . table ( tableName ) . indexList ( ) . forEach ( r . db ( dbName ) . table ( tableName ) . indexDrop ( r . row ) ) . run ( connection , function ( err , value ) { <nl> + return r . db ( dbName ) . table ( tableName ) . indexList ( ) . forEach ( r . db ( dbName ) . table ( tableName ) . indexDrop ( r . row ) ) . run ( reqlConn , function ( err , value ) { <nl> throw ' In cleanup failure to remove indexes from table " ' + fullName + ' " : ' + err ; <nl> } ) ; <nl> } ) . then ( function ( ) { <nl> - return r . db ( dbName ) . table ( tableName ) . delete ( ) . run ( connection , function ( err , value ) { <nl> + return r . db ( dbName ) . table ( tableName ) . delete ( ) . run ( reqlConn , function ( err , value ) { <nl> throw ' In cleanup failure to remove data from table " ' + fullName + ' " : ' + err ; <nl> } ) ; <nl> } ) . error ( function ( e ) { <nl> function atexitCleanup ( exitCode ) { <nl> / / - remove tables <nl> <nl> deleteTable = function ( dbName , tableName , fullName ) { <nl> - return r . db ( dbName ) . tableDrop ( tableName ) . run ( connection , function ( err , value ) { <nl> + return r . db ( dbName ) . tableDrop ( tableName ) . run ( reqlConn , function ( err , value ) { <nl> if ( err ) { <nl> if ( exitCode = = 0 ) { exitCode = 1 ; } <nl> console . error ( ' In cleanup there was no table " ' + fullName + ' " to delete : ' + err ) ; <nl> function atexitCleanup ( exitCode ) { <nl> <nl> promisesToKeep . push ( deleteTable ( dbName , tableName , fullName ) ) ; <nl> } <nl> - } <nl> - <nl> - if ( promisesToKeep . length > 0 ) { <nl> - Promise . all ( promisesToKeep ) . then ( function ( ) { <nl> + <nl> + if ( promisesToKeep . length > 0 ) { <nl> + Promise . all ( promisesToKeep ) . then ( function ( ) { <nl> + process . exit ( exitCode ) ; <nl> + } ) ; <nl> + } else { <nl> process . exit ( exitCode ) ; <nl> - } ) ; <nl> - } else { <nl> - process . exit ( exitCode ) ; <nl> + } <nl> } <nl> } <nl> <nl> process . on ( ' unexpectedException ' , <nl> ) <nl> <nl> / / Connect first to cpp server <nl> - r . connect ( { port : DRIVER_PORT } , function ( cpp_conn_err , cpp_conn ) { <nl> + r . connect ( { port : DRIVER_PORT } , function ( err , conn ) { <nl> <nl> - if ( cpp_conn_err ) { <nl> - console . error ( " Failed to connect to server : " , cpp_conn_err ) ; <nl> + if ( err ) { <nl> + console . error ( " Failed to connect to server : " , err ) ; <nl> process . exit ( 1 ) ; <nl> } <nl> - connection = cpp_conn ; <nl> - <nl> - / / Pull a test off the queue and run it <nl> - function runTest ( ) { try { <nl> - var testPair = tests . shift ( ) ; <nl> - if ( testPair ) { <nl> - if ( testPair instanceof Function ) { <nl> - TRACE ( " = = = = runTest = = function " ) ; <nl> - testPair ( runTest , cpp_conn ) ; <nl> + reqlConn = conn ; <nl> + <nl> + / / Start the chain of tests <nl> + runTest ( ) ; <nl> + } ) ; <nl> + <nl> + / / Pull a test off the queue and run it <nl> + function runTest ( ) { <nl> + try { <nl> + var test = tests . shift ( ) ; <nl> + if ( test ) { <nl> + if ( test instanceof Function ) { <nl> + / / - - function such as setup_table <nl> + TRACE ( " = = = = runTest = = = = function : " + test . name ) ; <nl> + test ( test ) ; <nl> return ; <nl> + <nl> } else { <nl> - var src = testPair [ 0 ] <nl> - var exp_val = testPair [ 1 ] <nl> - var testName = testPair [ 2 ] ; <nl> - var runopts = testPair [ 3 ] ; <nl> - var testopts = testPair [ 4 ] ; <nl> - TRACE ( " = = = = runTest = = non - function : " + src ) <nl> - <nl> - if ( ! runopts ) { <nl> - runopts = { maxBatchRows : 3 } <nl> + / / - - regular test <nl> + TRACE ( " = = = = runTest = = = = non - function : " + test . src ) <nl> + <nl> + if ( ! test . runopts ) { <nl> + test . runopts = { maxBatchRows : 3 } <nl> } else { <nl> - for ( var opt in runopts ) { <nl> - runopts [ opt ] = eval ( runopts [ opt ] ) <nl> + for ( var opt in test . runopts ) { <nl> + test . runopts [ opt ] = eval ( test . runopts [ opt ] ) <nl> } <nl> - if ( ! ( " maxBatchRows " in runopts ) ) { <nl> - runopts . maxBatchRows = 3 <nl> + if ( ! ( " maxBatchRows " in test . runopts ) ) { <nl> + test . runopts . maxBatchRows = 3 <nl> } <nl> } <nl> - if ( ! testopts ) { <nl> - testopts = { } <nl> + if ( ! test . testopts ) { <nl> + test . testopts = { } <nl> } <nl> compOpts = { } <nl> - if ( ' precision ' in testopts ) { <nl> - compOpts [ ' precision ' ] = testopts [ ' precision ' ] <nl> + if ( ' precision ' in test . testopts ) { <nl> + compOpts [ ' precision ' ] = test . testopts [ ' precision ' ] <nl> } <nl> - <nl> + <nl> / / - convert expected value into a function for comparison <nl> + var exp_fun = null ; <nl> try { <nl> with ( defines ) { <nl> - var exp_fun = eval ( exp_val ) ; <nl> + exp_fun = eval ( test . expectedSrc ) ; <nl> } <nl> } catch ( err ) { <nl> / / Oops , this shouldn ' t have happened <nl> - console . log ( testName ) ; <nl> - console . log ( exp_val ) ; <nl> + console . log ( test . name ) ; <nl> + console . log ( test . expectedSrc ) ; <nl> throw err ; <nl> } <nl> if ( ! exp_fun ) exp_fun = returnTrue ( ) ; <nl> if ( ! ( exp_fun instanceof Function ) ) exp_fun = eq ( exp_fun , compOpts ) ; <nl> - <nl> - TRACE ( ' expected value : ' + exp_fun . toString ( ) + ' from ' + exp_val ) <nl> - <nl> - / / - build the test <nl> - var test = null ; <nl> + <nl> + test . exp_fun = exp_fun ; <nl> + <nl> + TRACE ( ' expected value : ' + test . exp_fun + ' from ' + test . expectedSrc ) <nl> + <nl> + / / - evaluate the test <nl> + <nl> + var result = null ; <nl> try { <nl> with ( defines ) { <nl> TRACE ( " before eval " ) ; <nl> - test = eval ( src ) ; <nl> + result = eval ( test . src ) ; <nl> TRACE ( " after eval " ) ; <nl> - } <nl> - } catch ( bld_err ) { <nl> - TRACE ( " build error " ) <nl> - if ( exp_fun . isErr ) { <nl> - if ( ! exp_fun ( bld_err ) ) { <nl> - printTestFailure ( testName , src , <nl> - [ " Error eval ' ing test src not equal to expected err : \ n \ tERROR : " , bld_err , " \ n \ tExpected : " , exp_fun ] ) ; <nl> - } <nl> - } else { <nl> - printTestFailure ( testName , src , [ " Error eval ' ing test src : \ n \ t " , bld_err ] ) ; <nl> - } <nl> - <nl> - / / continue to next test <nl> - runTest ( ) ; <nl> - return ; <nl> - } <nl> - <nl> - / / Run test first on cpp server <nl> - if ( testopts & & testopts [ ' reql - query ' ] = = false ) { <nl> - TRACE ( " non reql - query result : " + test ) <nl> - if ( test instanceof Object & & test . each ) { <nl> - process_iterable ( test , testopts ) ; <nl> - } else { <nl> - afterArray ( null , test , testopts ) ; <nl> - } <nl> - } else { <nl> - TRACE ( " processing query : " + test ) <nl> - try { <nl> - var clone_runopts = runopts ? clone ( runopts ) : { } ; <nl> - var clone_testopts = testopts ? clone ( testopts ) : { } ; <nl> - with ( defines ) { <nl> - test . run ( cpp_conn , clone_runopts , function ( err , cursor ) { run_callback ( err , cursor , clone_testopts ) } ) ; <nl> - } <nl> - <nl> - } catch ( err ) { <nl> - TRACE ( " querry error - " + err ) <nl> - if ( exp_fun . isErr ) { <nl> - if ( ! exp_fun ( err ) ) { <nl> - printTestFailure ( testName , src , [ " Error running test not equal to expected err : \ n \ tERROR : " , err , " \ n \ tEXPECTED : " , exp_val ] ) ; <nl> + <nl> + if ( result instanceof r . table ( ' ' ) . __proto__ . __proto__ . __proto__ . constructor ) { <nl> + TRACE ( " processing query : " + result ) <nl> + with ( defines ) { <nl> + result . run ( reqlConn , test . runopts , function ( err , value ) { processResult ( err , value , test ) } ) ; <nl> + return ; <nl> } <nl> } else { <nl> - printTestFailure ( testName , src , [ " Error running test : \ n \ t " , err ] ) ; <nl> + resultText = result <nl> + try { <nl> + resultText = result . toString ( ) ; <nl> + } catch ( err ) { } <nl> + TRACE ( " non reql - query result : " + resultText ) <nl> + processResult ( null , result , test ) ; / / will go on to next test <nl> + return ; <nl> } <nl> - <nl> - / / Continue to next test <nl> - runTest ( ) ; <nl> - return ; <nl> } <nl> - } <nl> - <nl> - function afterArray ( cpp_err , cpp_res ) { try { <nl> - TRACE ( " afterArray - src : " + src + " , err : " + cpp_err + " , result : " + JSON . stringify ( cpp_res ) + " expected function : " + exp_fun . toString ( ) ) ; <nl> - if ( cpp_err ) { <nl> - if ( exp_fun . isErr ) { <nl> - if ( ! exp_fun ( cpp_err ) ) { <nl> - printTestFailure ( testName , src , [ " Error running test on server not equal to expected err : " , " \ n \ tERROR : " , JSON . stringify ( cpp_err ) , " \ n \ tEXPECTED " , exp_val ] ) ; <nl> - } <nl> - } else { <nl> - var info ; <nl> - if ( cpp_err . msg ) { <nl> - info = cpp_err . msg ; <nl> - } else if ( cpp_err . message ) { <nl> - info = cpp_err . message ; <nl> - } else { <nl> - info = JSON . stringify ( cpp_res ) ; <nl> - } <nl> - <nl> - if ( cpp_err . stack ) { <nl> - info + = " \ n \ nStack : \ n " + cpp_err . stack . toString ( ) ; <nl> - } <nl> - printTestFailure ( testName , src , [ " Error running test on server : " , " \ n \ tERROR : " , info ] ) ; <nl> - } <nl> - } else if ( ! exp_fun ( cpp_res ) ) { <nl> - printTestFailure ( testName , src , [ " CPP result is not equal to expected result : " , " \ n \ tVALUE : " , JSON . stringify ( cpp_res ) , " \ n \ tEXPECTED : " , exp_val ] ) ; <nl> - } <nl> - <nl> - / / Continue to next test . Tests are fully sequential <nl> - / / so you can rely on previous queries results in <nl> - / / subsequent tests . <nl> - runTest ( ) ; <nl> + } catch ( result ) { <nl> + TRACE ( " querry error - " + result . toString ( ) ) <nl> + TRACE ( " stack : " + String ( result . stack ) ) ; <nl> + processResult ( null , result , test ) ; / / will go on to next test <nl> return ; <nl> - } catch ( err ) { <nl> - console . log ( " stack : " + String ( err . stack ) ) <nl> - unexpectedException ( " afterArray " , testName , err ) ; <nl> - } } <nl> - <nl> - function process_iterable ( feed , test_options ) { <nl> - TRACE ( ' process_iterable ' ) <nl> - var accumulator = [ ] ; <nl> - <nl> - feed . each ( <nl> - function ( err , _row ) { <nl> - TRACE ( ' process_iterable_internal ' ) <nl> - if ( err ) { <nl> - console . log ( " stack : " + String ( err . stack ) ) ; <nl> - unexpectedException ( " run_callback " , testName , err ) ; <nl> - } else { <nl> - try { <nl> - if ( test_options & & test_options . rowfilter ) { <nl> - filterFunction = new Function ( ' input ' , test_options . rowfilter ) ; <nl> - accumulator . push ( filterFunction ( _row ) ) ; <nl> - } else { <nl> - accumulator . push ( _row ) ; <nl> - } <nl> - } catch ( err ) { <nl> - console . log ( " stack : " + String ( err . stack ) ) ; <nl> - unexpectedException ( " run_callback : < < " + test_options . filter + " > > " , testName , err ) ; <nl> - } <nl> - } <nl> - } , <nl> - function ( ) { <nl> - if ( test_options & & test_options . arrayfilter ) { <nl> - arrayFunction = new Function ( ' input ' , test_options . arrayfilter ) ; <nl> - accumulator = arrayFunction ( accumulator ) <nl> - } <nl> - afterArray ( null , accumulator , test_options ) ; <nl> - } <nl> - ) ; <nl> } <nl> - <nl> - function run_callback ( cpp_err , cpp_res_cursor , test_options ) { try { <nl> - TRACE ( " run_callback src : " + src + " , err : " + cpp_err + " , result : " + cpp_res_cursor ) ; <nl> - <nl> - if ( test_options & & test_options [ ' variable ' ] ) { <nl> - defines [ test_options [ ' variable ' ] ] = cpp_res_cursor ; <nl> - } <nl> - <nl> - if ( cpp_err ) { <nl> - afterArray ( cpp_err , null , test_options ) ; <nl> - } else if ( cpp_res_cursor instanceof Object & & cpp_res_cursor . toArray ) { <nl> - try { <nl> - cpp_res_cursor . toArray ( function ( err , result ) { afterArray ( err , result , test_options ) } ) ; <nl> - } catch ( err ) { <nl> - if ( err instanceof r . Error . RqlDriverError ) { <nl> - / / probably a Feed <nl> - afterArray ( null , null , test_options ) ; <nl> - } else { <nl> - throw err ; <nl> - } <nl> - } <nl> - } else { <nl> - afterArray ( null , cpp_res_cursor , test_options ) ; <nl> - } <nl> - } catch ( err ) { <nl> - console . log ( " stack : " + String ( err . stack ) ) <nl> - unexpectedException ( " run_callback " , testName , err ) ; <nl> - } } <nl> + <nl> } <nl> } else { <nl> / / We ' ve hit the end of our test list <nl> r . connect ( { port : DRIVER_PORT } , function ( cpp_conn_err , cpp_conn ) { <nl> } <nl> } catch ( err ) { <nl> console . log ( " stack : " + String ( err . stack ) ) <nl> - unexpectedException ( " runTest " , testName , testPair [ 1 ] , err ) ; <nl> - } } <nl> + unexpectedException ( " runTest " , test . name , test [ 1 ] , err ) ; <nl> + } <nl> + } <nl> <nl> - / / Start the recursion though all the tests <nl> - r . dbCreate ( ' test ' ) . run ( cpp_conn , runTest ) ; <nl> - } ) ; <nl> + function processResult ( err , result , test ) { <nl> + / / prepare the result to be compared ( e . g . : collect feeds and cursor results ) <nl> + TRACE ( ' processResult result : ' + result + ' , err : ' + err + ' , testopts : ' + JSON . stringify ( test . testopts ) ) <nl> + var accumulator = [ ] ; <nl> + <nl> + try { <nl> + / / - if an error go straight to compare <nl> + <nl> + if ( err ) { <nl> + TRACE ( ' processResult ' ) ; <nl> + compareResult ( err , null , test ) ; <nl> + } <nl> + <nl> + / / - store variable if called for <nl> + <nl> + else if ( test . testopts & & test . testopts . variable ) { <nl> + TRACE ( ' processResult string variable ' ) ; <nl> + defines [ test . testopts . variable ] = result ; <nl> + runTest ( ) ; / / Continue to next test . <nl> + } <nl> + <nl> + / / - pull out feeds and cursors to arrays <nl> + <nl> + else if ( result instanceof Object & & result . each ) { <nl> + if ( ! isNaN ( testopts . result_limit ) ) { <nl> + if ( testopts . result_limit > 0 ) { <nl> + result . next ( <nl> + } <nl> + } else { <nl> + TRACE ( ' processResult collecting full cursor ' ) ; <nl> + result . each ( <nl> + function ( err , row ) { <nl> + TRACE ( ' processResult_iter ' ) <nl> + if ( err ) { <nl> + console . log ( " stack : " + String ( err . stack ) ) ; <nl> + unexpectedException ( " processResult " , test . name , err ) ; <nl> + } else { <nl> + try { <nl> + if ( test . testopts & & test . testopts . rowfilter ) { <nl> + filterFunction = new Function ( ' input ' , test . testopts . rowfilter ) ; <nl> + row = filterFunction ( row ) <nl> + if ( row ) { <nl> + accumulator . push ( ) ; <nl> + } <nl> + } else { <nl> + accumulator . push ( row ) ; <nl> + } <nl> + if ( test . testopts . result_limit & & accumulator . length > = test . testopts . result_limit ) { <nl> + return false ; / / stop iterating <nl> + } <nl> + } catch ( err ) { <nl> + console . log ( " stack : " + String ( err . stack ) ) ; <nl> + unexpectedException ( " processResult_iter < < " + test . testopts . filter + " > > " , test . name , err ) ; <nl> + } <nl> + } <nl> + } , <nl> + function ( ) { <nl> + TRACE ( ' processResult_final ' + test ) <nl> + if ( test . testopts & & test . testopts . arrayfilter ) { <nl> + arrayFunction = new Function ( ' input ' , test . testopts . arrayfilter ) ; <nl> + accumulator = arrayFunction ( accumulator ) <nl> + } <nl> + compareResult ( null , accumulator , test ) ; <nl> + } <nl> + ) ; <nl> + } <nl> + } <nl> + <nl> + / / - otherwise go to compare <nl> + <nl> + else { <nl> + compareResult ( null , result , test ) ; <nl> + } <nl> + } catch ( err ) { <nl> + console . log ( " stack : " + String ( err . stack ) ) <nl> + unexpectedException ( " processResult " , test . name , err ) ; <nl> + } <nl> + } <nl> + <nl> + function compareResult ( error , value , test ) { <nl> + try { <nl> + expextedText = null <nl> + TRACE ( " compareResult - err : " + JSON . stringify ( error ) + " , result : " + JSON . stringify ( value ) + " expected function : " + test . exp_fun . toString ( ) ) ; <nl> + if ( error ) { <nl> + if ( test . exp_fun . isErr ) { <nl> + if ( ! test . exp_fun ( error ) ) { <nl> + printTestFailure ( test . name , src , [ " Error running test on server not equal to expected err : " , " \ n \ tERROR : " , JSON . stringify ( error ) , " \ n \ tEXPECTED " , test . expectedSrc ] ) ; <nl> + } <nl> + } else { <nl> + var info ; <nl> + if ( error . msg ) { <nl> + info = error . msg ; <nl> + } else if ( error . message ) { <nl> + info = error . message ; <nl> + } else { <nl> + info = JSON . stringify ( value ) ; <nl> + } <nl> + <nl> + if ( error . stack ) { <nl> + info + = " \ n \ nStack : \ n " + error . stack . toString ( ) ; <nl> + } <nl> + printTestFailure ( test . name , test . src , [ " Error running test on server : " , " \ n \ tERROR : " , info ] ) ; <nl> + } <nl> + } else if ( ! test . exp_fun ( value ) ) { <nl> + printTestFailure ( test . name , test . src , [ " CPP result is not equal to expected result : " , " \ n \ tVALUE : " , JSON . stringify ( value ) , " \ n \ tEXPECTED : " , test . expectedSrc ] ) ; <nl> + } <nl> + <nl> + runTest ( ) ; / / Continue to next test . <nl> + } catch ( err ) { <nl> + console . log ( " stack : " + String ( err . stack ) ) <nl> + unexpectedException ( " compareResult " , test . name , err ) ; <nl> + } <nl> + } <nl> <nl> function unexpectedException ( ) { <nl> console . log ( " Oops , this shouldn ' t have happened : " ) ; <nl> function unexpectedException ( ) { <nl> <nl> / / Invoked by generated code to add test and expected result <nl> / / Really constructs list of tests to be sequentially evaluated <nl> - function test ( testSrc , resSrc , name , runopts , testopts ) { <nl> - tests . push ( [ testSrc , resSrc , name , runopts , testopts ] ) <nl> + function test ( testSrc , expectedSrc , name , runopts , testopts ) { <nl> + tests . push ( { <nl> + ' src ' : testSrc , <nl> + ' expectedSrc ' : expectedSrc , <nl> + ' name ' : name , <nl> + ' runopts ' : runopts , <nl> + ' testopts ' : testopts <nl> + } ) <nl> } <nl> <nl> function setup_table ( table_variable_name , table_name , db_name ) { <nl> - tests . push ( function ( next , cpp_conn ) { <nl> + tests . push ( function ( test ) { <nl> try { <nl> if ( required_external_tables . length > 0 ) { <nl> / / use an external table <nl> function setup_table ( table_variable_name , table_name , db_name ) { <nl> table = required_external_tables . pop ( ) ; <nl> defines [ table_variable_name ] = r . db ( table [ 0 ] ) . table ( table [ 1 ] ) ; <nl> tables_to_cleanup . push ( [ table [ 0 ] , table [ 1 ] ] ) <nl> - next ( ) ; <nl> + runTest ( ) ; <nl> } else { <nl> / / create the table as provided <nl> <nl> - r . db ( db_name ) . tableCreate ( table_name ) . run ( cpp_conn , { } , function ( err , res ) { <nl> + r . db ( db_name ) . tableCreate ( table_name ) . run ( reqlConn , { } , function ( err , res ) { <nl> if ( err ) { <nl> unexpectedException ( " setup_table " , err ) ; <nl> } <nl> function setup_table ( table_variable_name , table_name , db_name ) { <nl> } <nl> defines [ table_variable_name ] = r . db ( " test " ) . table ( table_name ) ; <nl> tables_to_delete . push ( [ db_name , table_name ] ) <nl> - next ( ) ; <nl> + runTest ( ) ; <nl> } ) ; <nl> } <nl> } catch ( err ) { <nl> function setup_table ( table_variable_name , table_name , db_name ) { <nl> } ) ; <nl> } <nl> <nl> + / / Invoked by generated code to fetch from a cursor <nl> + function fetch ( cursor , limit ) { <nl> + fun = function fetch_inner ( test ) { <nl> + try { <nl> + if ( limit ) { <nl> + limit = parseInt ( limit ) ; <nl> + if ( isNaN ( limit ) ) { <nl> + unexpectedException ( " The limit value of fetch must be null " ) <nl> + } <nl> + } <nl> + if ( ! test . testopts ) { <nl> + test . testopts = { } ; <nl> + } <nl> + test . testopts . result_limit = limit ; <nl> + TRACE ( ' fetching ' + ( limit | | " all " ) + ' items ' ) <nl> + processResult ( null , cursor , test ) <nl> + } catch ( err ) { <nl> + console . log ( " stack : " + String ( err . stack ) ) <nl> + unexpectedException ( " processResult " , test . name , err ) ; <nl> + } <nl> + } <nl> + fun . toString = function ( ) { <nl> + return ' fetch_inner ( ) limit = ' + limit ; <nl> + } ; <nl> + return fun ; <nl> + } <nl> + <nl> / / Invoked by generated code to define variables to used within <nl> / / subsequent tests <nl> - function define ( expr ) { <nl> - tests . push ( function ( next , cpp_conn ) { <nl> + function define ( expr , variable ) { <nl> + tests . push ( function ( test ) { <nl> + TRACE ( ' setting define : ' + variable + ' = ' + expr ) ; <nl> with ( defines ) { <nl> - eval ( " defines . " + expr ) ; <nl> + eval ( " defines . " + variable + " = " + expr ) ; <nl> } <nl> - next ( ) ; <nl> + runTest ( ) ; <nl> } ) ; <nl> } <nl> <nl> function bag ( expected , compOpts , partial ) { <nl> } ; <nl> } <nl> <nl> + / / Invoked by generated code to ensure at least these contents are in the result <nl> function partial ( expected , compOpts ) { <nl> if ( Array . isArray ( expected ) ) { <nl> return bag ( expected , compOpts , true ) ; <nl> function err_regex ( err_name , err_pat , err_frames ) { <nl> <nl> function err_predicate ( err_name , err_pred , err_frames , desc ) { <nl> var err_frames = null ; / / TODO : test for frames <nl> - var fun = function ( other ) { <nl> + var fun = function err_predicate_return ( other ) { <nl> if ( ! ( other instanceof Error ) ) return false ; <nl> if ( err_name & & ! ( other . name = = = err_name ) ) return false ; <nl> <nl> function err_predicate ( err_name , err_pred , err_frames , desc ) { <nl> } <nl> <nl> function builtin_err ( err_name , err_msg ) { <nl> - var fun = function ( other ) { <nl> + var fun = function builtin_err_test ( other ) { <nl> if ( ! ( other . name = = = err_name ) ) return false ; <nl> if ( ! ( other . message = = = err_msg ) ) return false ; <nl> return true ; <nl> function builtin_err ( err_name , err_msg ) { <nl> } <nl> <nl> function arrlen ( length , eq_fun ) { <nl> - var fun = function ( thing ) { <nl> + var fun = function arrlen_test ( thing ) { <nl> if ( ! thing . length | | thing . length ! = = length ) return false ; <nl> return ! eq_fun | | thing . every ( eq_fun ) ; <nl> } ; <nl> function arrlen ( length , eq_fun ) { <nl> } <nl> <nl> function uuid ( ) { <nl> - var fun = function ( thing ) { <nl> + var fun = function uuid_test ( thing ) { <nl> return thing . match & & thing . match ( / [ a - z0 - 9 ] { 8 } - [ a - z0 - 9 ] { 4 } - [ a - z0 - 9 ] { 4 } - [ a - z0 - 9 ] { 4 } - [ a - z0 - 9 ] { 12 } / ) ; <nl> } ; <nl> fun . toString = function ( ) { <nl> mmm a / test / rql_test / drivers / driver . py <nl> ppp b / test / rql_test / drivers / driver . py <nl> class PyTestDriver : <nl> def __init__ ( self ) : <nl> print ( ' Creating default connection to server on port % d \ n ' % DRIVER_PORT ) <nl> self . cpp_conn = self . connect ( ) <nl> - self . scope = { } <nl> - <nl> - if ' test ' not in r . db_list ( ) . run ( self . cpp_conn ) : <nl> - r . db_create ( ' test ' ) . run ( self . cpp_conn ) <nl> + self . scope = globals ( ) <nl> <nl> def connect ( self ) : <nl> return r . connect ( host = ' localhost ' , port = DRIVER_PORT ) <nl> <nl> - def define ( self , expr ) : <nl> + def define ( self , expr , variable ) : <nl> + <nl> try : <nl> - exec ( expr , globals ( ) , self . scope ) <nl> + exec compile ( ' % s = % s ' % ( variable , expr ) , ' < string > ' , ' single ' ) in self . scope # handle thinkgs like : a [ ' b ' ] = b <nl> except Exception as e : <nl> print_test_failure ( ' Exception while processing define ' , expr , str ( e ) ) <nl> - <nl> + <nl> def run ( self , src , expected , name , runopts , testopts ) : <nl> if runopts : <nl> runopts [ " profile " ] = True <nl> def run ( self , src , expected , name , runopts , testopts ) : <nl> else : <nl> conn = self . cpp_conn <nl> <nl> - # Try to build the expected result <nl> + # - - build the expected result <nl> + <nl> if expected : <nl> - exp_val = eval ( expected , dict ( list ( globals ( ) . items ( ) ) + list ( self . scope . items ( ) ) ) ) <nl> + exp_val = eval ( expected , self . scope ) <nl> else : <nl> # This test might not have come with an expected result , we ' ll just ensure it doesn ' t fail <nl> exp_val = ( ) <nl> <nl> - # Run the test <nl> - if ' reql - query ' in testopts and str ( testopts [ ' reql - query ' ] ) . lower ( ) = = ' false ' : <nl> - try : <nl> - result = eval ( src , globals ( ) , self . scope ) <nl> - except Exception as err : <nl> - result = err <nl> - else : <nl> - # Try to build the test <nl> - try : <nl> - query = eval ( src , dict ( list ( globals ( ) . items ( ) ) + list ( self . scope . items ( ) ) ) ) <nl> - except Exception as err : <nl> - if not isinstance ( exp_val , Err ) : <nl> - print_test_failure ( name , src , " Error eval ' ing test src : \ n \ t % s " % repr ( err ) ) <nl> - elif not eq ( exp_val , * * compOptions ) ( err ) : <nl> - print_test_failure ( name , src , " Error eval ' ing test src not equal to expected err : \ n \ tERROR : % s \ n \ tEXPECTED : % s " % ( repr ( err ) , repr ( exp_val ) ) ) <nl> - <nl> - return # Can ' t continue with this test if there is no test query <nl> - <nl> - # Check pretty - printing <nl> - check_pp ( src , query ) <nl> - <nl> - # Run the test <nl> - result = None <nl> - try : <nl> - result = query . run ( conn , * * runopts ) <nl> + # - - evaluate the command <nl> + <nl> + try : <nl> + result = eval ( src , self . scope ) <nl> + <nl> + # - run as a query if it is one <nl> + <nl> + if isinstance ( result , r . RqlQuery ) : <nl> + <nl> + # Check pretty - printing <nl> + <nl> + check_pp ( src , result ) <nl> + <nl> + # run the query <nl> + <nl> + result = result . run ( conn , * * runopts ) <nl> if result and " profile " in runopts and runopts [ " profile " ] and " value " in result : <nl> result = result [ " value " ] <nl> - except Exception as err : <nl> - result = err <nl> - <nl> - # Save variable if requested <nl> + # ToDo : do something reasonable with the profile <nl> + <nl> + # - Save variable if requested <nl> + <nl> + if ' variable ' in testopts : <nl> + # ToDo : hadnle complex variables like : a [ 2 ] <nl> + self . scope [ testopts [ ' variable ' ] ] = result <nl> <nl> - if ' variable ' in testopts : <nl> - self . scope [ testopts [ ' variable ' ] ] = result <nl> + except Exception as err : <nl> + result = err <nl> <nl> # Compare to the expected result <nl> <nl> def run ( self , src , expected , name , runopts , testopts ) : <nl> elif not eq ( exp_val , * * compOptions ) ( result ) : <nl> print_test_failure ( name , src , " Error running test on server not equal to expected err : \ n \ tERROR : % s \ n \ tEXPECTED : % s " % ( repr ( result ) , repr ( exp_val ) ) ) <nl> elif not eq ( exp_val , * * compOptions ) ( result ) : <nl> - print_test_failure ( name , src , " CPP result is not equal to expected result : \ n \ tVALUE : % s \ n \ tEXPECTED : % s " % ( repr ( result ) , repr ( exp_val ) ) ) <nl> + print_test_failure ( name , src , " Result is not equal to expected result : \ n \ tVALUE : % s \ n \ tEXPECTED : % s " % ( repr ( result ) , repr ( exp_val ) ) ) <nl> <nl> driver = PyTestDriver ( ) <nl> <nl> def check_no_table_specified ( ) : <nl> if DB_AND_TABLE_NAME ! = " no_table_specified " : <nl> raise ValueError ( " This test isn ' t meant to be run against a specific table " ) <nl> <nl> - def define ( expr ) : <nl> - driver . define ( expr ) <nl> + def define ( expr , variable = None ) : <nl> + driver . define ( expr , variable = variable ) <nl> <nl> def bag ( lst ) : <nl> return Bag ( lst ) <nl> def partial ( expected ) : <nl> else : <nl> raise ValueError ( ' partial can only work on dicts or iterables , got : % s ( % s ) ' % ( type ( expected ) . __name__ , repr ( expected ) ) ) <nl> <nl> + def fetch ( cursor , limit = None ) : <nl> + ' ' ' Pull items from a cursor ' ' ' <nl> + if limit is not None : <nl> + try : <nl> + limit = int ( limit ) <nl> + assert limit > 0 <nl> + except Exception : <nl> + " On fetch limit must be None or > 0 , got : % s " % repr ( limit ) <nl> + result = [ ] <nl> + for i , value in enumerate ( cursor , start = 1 ) : <nl> + result . append ( value ) <nl> + if i > = limit : <nl> + break <nl> + return result <nl> + <nl> def err ( err_type , err_msg = None , frames = None ) : <nl> return Err ( err_type , err_msg , frames ) <nl> <nl> mmm a / test / rql_test / drivers / driver . rb <nl> ppp b / test / rql_test / drivers / driver . rb <nl> <nl> $ success_count = 0 <nl> <nl> DRIVER_PORT = ( ARGV [ 0 ] | | ENV [ ' RDB_DRIVER_PORT ' ] | | raise ( ' driver port not supplied ' ) ) . to_i <nl> - puts ( ' Using driver port # { DRIVER_PORT } ' ) <nl> + puts ( " Using driver port # { DRIVER_PORT } " ) <nl> <nl> $ required_external_tables = [ ] <nl> if ARGV [ 1 ] | | ENV [ ' TEST_DB_AND_TABLE_NAME ' ] <nl> <nl> <nl> # - - <nl> <nl> - def show x <nl> + $ reql_conn = RethinkDB : : Connection . new ( : host = > ' localhost ' , : port = > DRIVER_PORT ) <nl> + begin <nl> + r . db_create ( ' test ' ) . run ( $ reql_conn ) <nl> + rescue <nl> + end <nl> + <nl> + $ defines = binding <nl> + <nl> + # - - <nl> + <nl> + def show ( x ) <nl> if x . class = = Err <nl> name = x . type . sub ( / ^ RethinkDB : : / , " " ) <nl> return " < # { name } # { ' ~ ' if x . regex } # { show x . message } > " <nl> def bag ( list , partial = false ) <nl> end <nl> <nl> def partial ( expected ) <nl> - if expected . kind_of ? ( Array ) <nl> - bag ( expected , true ) <nl> - elsif expected . kind_of ? ( Bag ) <nl> - bag ( expected . items , true ) <nl> - elsif expected . kind_of ? ( Hash ) <nl> - PartitalHash . new ( expected ) <nl> - else <nl> - raise ( " partial can only handle Hashs , Arrays , or Bags . Got : # { expected . class } " ) <nl> + if expected . kind_of ? ( Array ) <nl> + bag ( expected , true ) <nl> + elsif expected . kind_of ? ( Bag ) <nl> + bag ( expected . items , true ) <nl> + elsif expected . kind_of ? ( Hash ) <nl> + PartitalHash . new ( expected ) <nl> + else <nl> + raise ( " partial can only handle Hashs , Arrays , or Bags . Got : # { expected . class } " ) <nl> + end <nl> + end <nl> + <nl> + def fetch ( cursor , limit = nil ) <nl> + raise " The limit value of fetch must be nil or > 0 , got : # { limit } " unless limit . nil ? or true <nl> + if limit . nil ? <nl> + return cursor . to_a <nl> + else <nl> + result = [ ] <nl> + limit . times do <nl> + result . push ( cursor . next ) <nl> end <nl> + return result <nl> + end <nl> end <nl> <nl> def arrlen ( len , x ) <nl> def err_regex ( type , message , backtrace = [ ] ) <nl> Err . new ( type , message , backtrace , true ) <nl> end <nl> <nl> - def eq_test ( expected , result , testopts = { } ) <nl> - return cmp_test ( expected , result , testopts ) = = 0 <nl> - end <nl> - <nl> class Number <nl> def initialize ( value ) <nl> @ value = value <nl> def float_cmp ( value ) <nl> <nl> def cmp_test ( expected , result , testopts = { } , partial = false ) <nl> if expected . object_id = = NoError . object_id <nl> - return - 1 if result . class = = Err <nl> + if result . class = = Err <nl> + puts e <nl> + puts e . backtrace <nl> + return - 1 <nl> + end <nl> return 0 <nl> end <nl> <nl> def cmp_test ( expected , result , testopts = { } , partial = false ) <nl> end <nl> end <nl> <nl> - def eval_env ; binding ; end <nl> - $ defines = eval_env <nl> - <nl> - $ cpp_conn = RethinkDB : : Connection . new ( : host = > ' localhost ' , : port = > DRIVER_PORT ) <nl> - begin <nl> - r . db_create ( ' test ' ) . run ( $ cpp_conn ) <nl> - rescue <nl> - end <nl> - <nl> def test ( src , expected , name , opthash = nil , testopts = nil ) <nl> if opthash <nl> $ opthash = Hash [ opthash . map { | k , v | [ k , v . is_a ? ( String ) ? eval ( v , $ defines ) : v ] } ] <nl> def test ( src , expected , name , opthash = nil , testopts = nil ) <nl> end <nl> $ test_count + = 1 <nl> <nl> - if not ( testopts and testopts . key ? ( : ' reql - query ' ) and testopts [ : ' reql - query ' ] . to_s ( ) . downcase = = ' false ' ) <nl> - # check that it evaluates without running it <nl> - begin <nl> - eval ( src , $ defines ) <nl> - rescue Exception = > e <nl> - result = err ( e . class . name . sub ( / ^ RethinkDB : : / , " " ) , e . message . split ( " \ n " ) [ 0 ] , " TODO " ) <nl> - return check_result ( name , src , result , expected , testopts ) <nl> - end <nl> - end <nl> - <nl> - # construct the query <nl> - queryString = ' ' <nl> - if testopts and testopts . key ? ( : ' variable ' ) <nl> - queryString + = testopts [ : ' variable ' ] + " = " <nl> - end <nl> + # - - run the command <nl> <nl> - if not ( testopts and testopts . key ? ( : ' reql - query ' ) and testopts [ : ' reql - query ' ] . to_s ( ) . downcase = = ' false ' ) <nl> - queryString + = ' ( ' + src + ' ) ' # handle cases like : r ( 1 ) + 3 <nl> - if opthash <nl> - opthash . each { | key , value | opthash [ key ] = eval ( value . to_s ) } <nl> - queryString + = ' . run ( $ cpp_conn , ' + opthash . to_s + ' ) ' <nl> + result = nil <nl> + begin <nl> + <nl> + # - save variable if requested <nl> + <nl> + if testopts & & testopts . key ? ( : ' variable ' ) <nl> + queryString = " # { testopts [ : variable ] } = # { src } " # handle cases like : r ( 1 ) + 3 <nl> else <nl> - queryString + = ' . run ( $ cpp_conn ) ' <nl> + queryString = " # { src } " # handle cases like : r ( 1 ) + 3 <nl> end <nl> - else <nl> - queryString + = src <nl> - end <nl> - <nl> - # run the query <nl> - begin <nl> - result = eval queryString , $ defines <nl> + <nl> + # - run the command <nl> + <nl> + result = $ defines . eval ( queryString ) <nl> + <nl> + # - run as a query if it is one <nl> + <nl> + if result . kind_of ? ( RethinkDB : : RQL ) <nl> + <nl> + if testopts and testopts . key ? ( : ' variable ' ) <nl> + queryString = " # { testopts [ : variable ] } = ( # { src } ) " # handle cases like : r ( 1 ) + 3 <nl> + else <nl> + queryString = " ( # { src } ) " # handle cases like : r ( 1 ) + 3 <nl> + end <nl> + <nl> + if opthash and opthash . length > 0 <nl> + opthash . each { | key , value | opthash [ key ] = eval ( value . to_s ) } <nl> + queryString + = ' . run ( $ reql_conn , ' + opthash . to_s + ' ) ' <nl> + else <nl> + queryString + = ' . run ( $ reql_conn ) ' <nl> + end <nl> + <nl> + result = $ defines . eval ( queryString ) <nl> + <nl> + # - convert cursors from from an Enumerator to a Enumerable , see issue # 3682 <nl> + <nl> + if result . kind_of ? ( RethinkDB : : Cursor ) & & testopts and testopts . key ? ( : ' variable ' ) <nl> + result = $ defines . eval ( " # { testopts [ : variable ] } = # { testopts [ : variable ] } . each " ) <nl> + $ stderr . puts ( " got here # { result } " ) <nl> + end <nl> + <nl> + end <nl> + <nl> rescue Exception = > e <nl> - result = err ( e . class . name . sub ( / ^ RethinkDB : : / , " " ) , e . message . split ( " \ n " ) [ 0 ] , " TODO " ) <nl> + result = err ( e . class . name . sub ( / ^ RethinkDB : : / , " " ) , e . message . split ( " \ n " ) [ 0 ] , e . backtrace ) <nl> end <nl> - return check_result ( name , src , result , expected , testopts ) <nl> <nl> + # - - return the result <nl> + <nl> + return check_result ( name , src , result , expected , testopts ) <nl> end <nl> <nl> def setup_table ( table_variable_name , table_name , db_name = " test " ) <nl> def setup_table ( table_variable_name , table_name , db_name = " test " ) <nl> if $ required_external_tables . count > 0 <nl> # use one of the required tables <nl> table_name , db_name = $ required_external_tables . pop <nl> - raise " External table # { db_name } . # { table_name } did not exist " unless r . db ( db_name ) . table_list ( ) . set_intersection ( [ table_name ] ) . count ( ) . eq ( 1 ) . run ( $ cpp_conn ) <nl> + raise " External table # { db_name } . # { table_name } did not exist " unless r . db ( db_name ) . table_list ( ) . set_intersection ( [ table_name ] ) . count ( ) . eq ( 1 ) . run ( $ reql_conn ) <nl> <nl> puts ( " Using existing table : # { db_name } . # { table_name } , will be : # { table_variable_name } " ) <nl> <nl> at_exit do <nl> - res = r . db ( db_name ) . table ( table_name ) . delete ( ) . run ( $ cpp_conn ) <nl> + res = r . db ( db_name ) . table ( table_name ) . delete ( ) . run ( $ reql_conn ) <nl> raise " Failed to clean out contents from table # { db_name } . # { table_name } : # { res } " unless res [ " errors " ] = = 0 <nl> - res = r . db ( db_name ) . table ( table_name ) . index_list ( ) . for_each ( r . db ( db_name ) . table ( table_name ) . index_drop ( r . row ) ) . run ( $ cpp_conn ) <nl> + res = r . db ( db_name ) . table ( table_name ) . index_list ( ) . for_each ( r . db ( db_name ) . table ( table_name ) . index_drop ( r . row ) ) . run ( $ reql_conn ) <nl> raise " Failed to remove table indexes from # { db_name } . # { table_name } : # { res } " unless res [ " errors " ] = = 0 <nl> end <nl> else <nl> # create a new table <nl> - if r . db ( db_name ) . table_list ( ) . set_intersection ( [ table_name ] ) . count ( ) . eq ( 1 ) . run ( $ cpp_conn ) <nl> - res = r . db ( db_name ) . table_drop ( table_name ) . run ( $ cpp_conn ) <nl> + if r . db ( db_name ) . table_list ( ) . set_intersection ( [ table_name ] ) . count ( ) . eq ( 1 ) . run ( $ reql_conn ) <nl> + res = r . db ( db_name ) . table_drop ( table_name ) . run ( $ reql_conn ) <nl> raise " Unable to delete table before use # { db_name } . # { table_name } : # { res } " unless res [ ' errors ' ] = = 0 <nl> end <nl> - res = r . db ( db_name ) . table_create ( table_name ) . run ( $ cpp_conn ) <nl> + res = r . db ( db_name ) . table_create ( table_name ) . run ( $ reql_conn ) <nl> raise " Unable to create table # { db_name } . # { table_name } : # { res } " unless res [ " tables_created " ] = = 1 <nl> <nl> puts ( " Created table : # { db_name } . # { table_name } , will be : # { table_variable_name } " ) <nl> - $ stdout . flush <nl> <nl> at_exit do <nl> - res = r . db ( db_name ) . table_drop ( table_name ) . run ( $ cpp_conn ) <nl> + res = r . db ( db_name ) . table_drop ( table_name ) . run ( $ reql_conn ) <nl> raise " Failed to delete table # { db_name } . # { table_name } : # { res } " unless res [ " tables_dropped " ] = = 1 <nl> end <nl> end <nl> def check_result ( name , src , result , expected , testopts = { } ) <nl> end <nl> if successfulTest <nl> begin <nl> - if ! eq_test ( expected , result , testopts ) <nl> + if cmp_test ( expected , result , testopts ) ! = 0 <nl> fail_test ( name , src , result , expected ) <nl> successfulTest = false <nl> end <nl> def check_result ( name , src , result , expected , testopts = { } ) <nl> end <nl> end <nl> <nl> - def fail_test ( name , src , res , expected ) <nl> + def fail_test ( name , src , result , expected ) <nl> $ stderr . puts " TEST FAILURE : # { name } " <nl> $ stderr . puts " \ tBODY : # { src } " <nl> - $ stderr . puts " \ tVALUE : # { show res } " <nl> + $ stderr . puts " \ tVALUE : # { show result } " <nl> $ stderr . puts " \ tEXPECTED : # { show expected } " <nl> + if result . nil ? = = false & & result . class = = Err <nl> + $ stderr . puts " \ tEXCEPTION : # { result . message } " <nl> + $ stderr . puts result . backtrace . join ( " \ n " ) <nl> + end <nl> $ stderr . puts " " <nl> end <nl> <nl> def the_end <nl> end <nl> end <nl> <nl> - def define expr <nl> - eval expr , $ defines <nl> + def define ( expr , variable = nil ) <nl> + if variable <nl> + $ defines . eval ( " # { variable } = # { expr } " ) <nl> + else <nl> + $ defines . eval ( expr ) <nl> + end <nl> end <nl> <nl> True = true <nl> mmm a / test / rql_test / src / changefeeds / basic . yaml <nl> ppp b / test / rql_test / src / changefeeds / basic . yaml <nl> <nl> desc : Test basic changefeed operations <nl> table_variable_name : tbl <nl> tests : <nl> - <nl> + <nl> + # Start an initial group of changefeeds <nl> + - py : even_inserts = tbl . changes ( ) . filter ( ( r . row [ ' new_val ' ] [ ' id ' ] % 2 ) . eq ( 0 ) ) [ ' new_val ' ] [ ' id ' ] . limit ( 3 ) <nl> + rb : even_inserts = tbl . changes ( ) . filter { | row | ( row [ ' new_val ' ] [ ' id ' ] % 2 ) . eq ( 0 ) } [ ' new_val ' ] [ ' id ' ] . limit ( 3 ) <nl> + js : even_inserts = tbl . changes ( ) . filter ( r . row ( ' new_ ' + ' val ' ) ( ' id ' ) . mod ( 2 ) . eq ( 0 ) ) ( ' new_ ' + ' val ' ) ( ' id ' ) . limit ( 3 ) <nl> + <nl> # Fill in some data <nl> - cd : tbl . insert ( [ { ' id ' : 1 } , { ' id ' : 2 } , { ' id ' : 3 } , { ' id ' : 4 } , { ' id ' : 5 } , { ' id ' : 6 } ] ) <nl> - rb : tbl . insert ( [ { ' id ' = > 1 } , { ' id ' = > 2 } , { ' id ' = > 3 } , { ' id ' = > 4 } , { ' id ' = > 5 } , { ' id ' = > 6 } ] ) <nl> ot : ( { ' skipped ' : 0 , ' deleted ' : 0 , ' unchanged ' : 0 , ' errors ' : 0 , ' replaced ' : 0 , ' inserted ' : 6 } ) <nl> <nl> # Test that point changefeeds support return - initial <nl> tests : <nl> - py : even_changes = tbl . changes ( squash = False ) . filter ( ( r . row [ ' new_val ' ] [ ' id ' ] % 2 ) . eq ( 0 ) ) . limit ( 2 ) <nl> rb : even_changes = tbl . changes ( squash : false ) . filter { | row | ( row [ ' new_val ' ] [ ' id ' ] % 2 ) . eq ( 0 ) } . limit ( 2 ) <nl> js : even_changes = tbl . changes ( { squash : false } ) . filter ( r . row ( ' new ' + ' _ ' + ' val ' ) ( ' id ' ) . mod ( 2 ) . eq ( 0 ) ) . limit ( 2 ) <nl> + <nl> + # Check the insert changefeeds <nl> + - cd : even_inserts <nl> + ot : bag ( [ 2 , 4 , 6 ] ) <nl> + <nl> + # Start a second round of changefeed watchers <nl> + - py : even_changes = tbl . changes ( ) . filter ( ( r . row [ ' new_val ' ] [ ' id ' ] % 2 ) . eq ( 0 ) ) [ ' new_val ' ] [ ' id ' ] . limit ( 2 ) <nl> + rb : even_changes = tbl . changes ( ) . filter { | row | ( row [ ' new_val ' ] [ ' id ' ] % 2 ) . eq ( 0 ) } [ ' new_val ' ] [ ' id ' ] . limit ( 2 ) <nl> + js : even_changes = tbl . changes ( ) . filter ( r . row ( ' new_ ' + ' val ' ) ( ' id ' ) . mod ( 2 ) . eq ( 0 ) ) ( ' new_ ' + ' val ' ) ( ' id ' ) . limit ( 2 ) <nl> <nl> # Insert more than the watchers are waiting for <nl> - cd : tbl . insert ( [ { ' id ' : 7 } , { ' id ' : 8 } , { ' id ' : 9 } , { ' id ' : 10 } ] ) <nl> - rb : tbl . insert ( [ { ' id ' = > 7 } , { ' id ' = > 8 } , { ' id ' = > 9 } , { ' id ' = > 10 } ] ) <nl> ot : ( { ' skipped ' : 0 , ' deleted ' : 0 , ' unchanged ' : 0 , ' errors ' : 0 , ' replaced ' : 0 , ' inserted ' : 4 } ) <nl> <nl> # Check that our limited watchers have been satified <nl> - - ot : [ 7 , 8 , 9 , 10 ] <nl> - py : " sorted ( [ x [ ' new_val ' ] [ ' id ' ] for x in all_changes ] ) " <nl> - rb : ( all_changes . map { | x | x [ ' new_val ' ] [ ' id ' ] } ) . sort <nl> - js : <nl> - cd : all_changes <nl> - testopts : <nl> - rowfilter : return input [ ' new ' + ' _ ' + ' val ' ] . id <nl> - # note : the underscore causes our re - writer to kick in <nl> - arrayfilter : return input . sort ( function ( a , b ) { return a - b } ) <nl> - testopts : <nl> - reql - query : False <nl> - <nl> - - ot : [ 8 , 10 ] <nl> - py : " sorted ( [ x [ ' new_val ' ] [ ' id ' ] for x in even_changes ] ) " <nl> - rb : ( even_changes . map { | x | x [ ' new_val ' ] [ ' id ' ] } ) . sort <nl> - js : <nl> - cd : even_changes <nl> - testopts : <nl> - rowfilter : return input [ ' new ' + ' _ ' + ' val ' ] . id <nl> - # note : the underscore causes our re - writer to kick in <nl> - arrayfilter : return input . sort ( function ( a , b ) { return a - b } ) <nl> - testopts : <nl> - reql - query : False <nl> - <nl> + <nl> + - cd : even_changes <nl> + ot : bag ( [ 8 , 10 ] ) <nl> + <nl> + # Full format insert <nl> + - cd : format_insert = tbl . changes ( ) . limit ( 1 ) <nl> + - cd : tbl . insert ( [ { ' id ' : 11 } ] ) <nl> + - cd : <nl> + py : list ( format_insert ) <nl> + rb : Array ( format_insert ) <nl> + js : format_insert <nl> + ot : [ { ' old_val ' : null , ' new_val ' : { ' id ' : 11 } } ] <nl> + <nl> + # Full format update <nl> + - cd : format_update = tbl . changes ( ) . limit ( 1 ) <nl> + - cd : tbl . get ( 11 ) . update ( { ' update ' : ' a ' } ) <nl> + ot : ( { ' skipped ' : 0 , ' deleted ' : 0 , ' unchanged ' : 0 , ' errors ' : 0 , ' replaced ' : 1 , ' inserted ' : 0 } ) <nl> + - cd : <nl> + py : list ( format_update ) <nl> + rb : Array ( format_update ) <nl> + js : format_update <nl> + ot : " [ { ' old_val ' : { ' id ' : 11 } , ' new_val ' : { ' id ' : 11 , ' update ' : ' a ' } } ] " <nl> <nl> # Check that our point watcher works correctly <nl> - rb : tbl . get ( 7 ) . update ( { a : 1 } ) [ ' replaced ' ] <nl> tests : <nl> ot : ( [ { " new_val " = > null } , <nl> { " new_val " = > { " id " = > 7 } , " old_val " = > null } , <nl> { " new_val " = > { " a " = > 1 , " id " = > 7 } , " old_val " = > { " id " = > 7 } } ] ) <nl> - testopts : <nl> - reql - query : False <nl> - <nl> - # Squash Territory <nl> - <nl> - # Bad squash value <nl> - - py : tbl . changes ( squash = None ) <nl> - rb : tbl . changes ( squash : nil ) <nl> - js : tbl . changes ( { squash : null } ) <nl> - ot : err ( ' RqlRuntimeError ' , ' Expected BOOL or NUMBER but found NULL ' ) <nl> - <nl> - - py : tbl . changes ( squash = - 10 ) <nl> - rb : tbl . changes ( squash : - 10 ) <nl> - js : tbl . changes ( { squash : - 10 } ) <nl> - ot : err ( ' RqlRuntimeError ' , ' Expected BOOL or a positive NUMBER but found a negative NUMBER ' ) <nl> - <nl> - - py : tbl . changes ( squash = true ) . type_of ( ) <nl> - rb : tbl . changes ( squash : true ) . type_of ( ) <nl> - js : tbl . changes ( { squash : true } ) . typeOf ( ) <nl> - ot : ( " STREAM " ) <nl> - <nl> - - cd : normal_changes = tbl . changes ( ) . limit ( 1 ) <nl> - <nl> - - py : false_squash_changes = tbl . changes ( squash = False ) . limit ( 2 ) <nl> - js : false_squash_changes = tbl . changes ( { squash : false } ) . limit ( 2 ) <nl> - rb : false_squash_changes = tbl . changes ( squash : false ) . limit ( 2 ) <nl> - <nl> - - py : long_squash_changes = tbl . changes ( squash = 0 . 5 ) . limit ( 1 ) <nl> - js : long_squash_changes = tbl . changes ( { squash : 0 . 5 } ) . limit ( 1 ) <nl> - rb : long_squash_changes = tbl . changes ( squash : 0 . 5 ) . limit ( 1 ) <nl> - <nl> - - py : squash_changes = tbl . changes ( squash = true ) . limit ( 1 ) <nl> - js : squash_changes = tbl . changes ( { squash : true } ) . limit ( 1 ) <nl> - rb : squash_changes = tbl . changes ( squash : true ) . limit ( 1 ) <nl> - <nl> - - cd : tbl . insert ( { ' id ' : 100 } ) [ ' inserted ' ] <nl> - js : tbl . insert ( { ' id ' : 100 } ) ( ' inserted ' ) <nl> - ot : 1 <nl> - <nl> - - cd : tbl . get ( 100 ) . update ( { ' a ' : 1 } ) [ ' replaced ' ] <nl> - js : tbl . get ( 100 ) . update ( { ' a ' : 1 } ) ( ' replaced ' ) <nl> - ot : 1 <nl> - <nl> - - cd : normal_changes <nl> - ot : ( [ { ' new_val ' : { ' a ' : 1 , ' id ' : 100 } , ' old_val ' : null } ] ) <nl> - testopts : <nl> - reql - query : False <nl> - <nl> - - cd : false_squash_changes <nl> - ot : ( [ { ' new_val ' : { ' id ' : 100 } , ' old_val ' : null } , <nl> - { ' new_val ' : { ' a ' : 1 , ' id ' : 100 } , ' old_val ' : { ' id ' : 100 } } ] ) <nl> - testopts : <nl> - reql - query : False <nl> - <nl> - - cd : long_squash_changes <nl> - ot : ( [ { ' new_val ' : { ' a ' : 1 , ' id ' : 100 } , ' old_val ' : null } ] ) <nl> - testopts : <nl> - reql - query : False <nl> - <nl> - - cd : squash_changes <nl> - ot : ( [ { ' new_val ' : { ' a ' : 1 , ' id ' : 100 } , ' old_val ' : null } ] ) <nl> - testopts : <nl> - reql - query : False <nl> mmm a / test / rql_test / src / changefeeds / idxcopy . yaml <nl> ppp b / test / rql_test / src / changefeeds / idxcopy . yaml <nl> table_variable_name : tbl <nl> tests : <nl> - rb : tbl . index_create ( ' a ' ) [ ' created ' ] <nl> ot : 1 <nl> + <nl> - rb : changes = tbl . orderby ( index : ' a ' ) . limit ( 10 ) . changes . limit ( 9 ) <nl> + <nl> - rb : tbl . insert ( ( 0 . . . 12 ) . map { | i | { id : i , a : 5 } } ) [ ' inserted ' ] <nl> ot : 12 <nl> + <nl> - rb : tbl . get_all ( 0 , 1 , 8 , index : ' id ' ) . delete [ ' deleted ' ] <nl> ot : 3 <nl> - - rb : changes . to_a . sort { | a , b | a [ ' new_val ' ] [ ' id ' ] < = > b [ ' new_val ' ] [ ' id ' ] } <nl> - ot : ( [ { " new_val " = > { " a " = > 5 , " id " = > 2 } , " old_val " = > nil } , <nl> + - rb : fetch ( changes ) <nl> + ot : bag ( [ { " new_val " = > { " a " = > 5 , " id " = > 2 } , " old_val " = > nil } , <nl> { " new_val " = > { " a " = > 5 , " id " = > 3 } , " old_val " = > nil } , <nl> { " new_val " = > { " a " = > 5 , " id " = > 4 } , " old_val " = > nil } , <nl> { " new_val " = > { " a " = > 5 , " id " = > 5 } , " old_val " = > nil } , <nl> new file mode 100644 <nl> index 00000000000 . . 680eec89a14 <nl> mmm / dev / null <nl> ppp b / test / rql_test / src / changefeeds / point . yaml <nl> <nl> + desc : Test point changebasics <nl> + table_variable_name : tbl <nl> + tests : <nl> + <nl> + # - - basic <nl> + <nl> + # start a feed <nl> + <nl> + - cd : basic = tbl . get ( 1 ) . changes ( ) <nl> + <nl> + # - inital return <nl> + <nl> + - cd : fetch ( basic , 1 ) <nl> + ot : [ { ' new_val ' : null } ] <nl> + <nl> + # - inserts <nl> + <nl> + - cd : tbl . insert ( { ' id ' : 1 } ) <nl> + ot : ( { ' skipped ' : 0 , ' deleted ' : 0 , ' unchanged ' : 0 , ' errors ' : 0 , ' replaced ' : 0 , ' inserted ' : 1 } ) <nl> + <nl> + - cd : fetch ( basic , 1 ) <nl> + ot : [ { ' old_val ' : null , ' new_val ' : { ' id ' : 1 } } ] <nl> + <nl> + # - updates <nl> + <nl> + - cd : tbl . get ( 1 ) . update ( { ' update ' : 1 } ) <nl> + ot : ( { ' skipped ' : 0 , ' deleted ' : 0 , ' unchanged ' : 0 , ' errors ' : 0 , ' replaced ' : 1 , ' inserted ' : 0 } ) <nl> + <nl> + - cd : fetch ( basic , 1 ) <nl> + ot : [ { ' old_val ' : { ' id ' : 1 } , ' new_val ' : { ' id ' : 1 , ' update ' : 1 } } ] <nl> + <nl> + # - pluck on values <nl> + <nl> + <nl> + # - subquery <nl> + <nl> + # - deletions <nl> + <nl> + <nl> + <nl> + <nl> + <nl> + <nl> + # - - virtual tables <nl> \ No newline at end of file <nl> mmm a / test / rql_test / src / changefeeds / squash . yaml <nl> ppp b / test / rql_test / src / changefeeds / squash . yaml <nl> <nl> desc : Test changefeed squashing <nl> table_variable_name : tbl <nl> tests : <nl> - <nl> + <nl> + # Check type <nl> + <nl> - py : tbl . changes ( squash = true ) . type_of ( ) <nl> rb : tbl . changes ( squash : true ) . type_of ( ) <nl> js : tbl . changes ( { squash : true } ) . typeOf ( ) <nl> ot : ( " STREAM " ) <nl> - <nl> + <nl> + # comparison changes <nl> + <nl> - cd : normal_changes = tbl . changes ( ) . limit ( 1 ) <nl> - <nl> + <nl> - py : false_squash_changes = tbl . changes ( squash = False ) . limit ( 2 ) <nl> js : false_squash_changes = tbl . changes ( { squash : false } ) . limit ( 2 ) <nl> rb : false_squash_changes = tbl . changes ( squash : false ) . limit ( 2 ) <nl> tests : <nl> js : tbl . get ( 100 ) . update ( { ' a ' : 1 } ) ( ' replaced ' ) <nl> ot : 1 <nl> <nl> - - cd : normal_changes <nl> - ot : ( [ { ' new_val ' : { ' a ' : 1 , ' id ' : 100 } , ' old_val ' : null } ] ) <nl> - testopts : <nl> - reql - query : False <nl> - <nl> - cd : false_squash_changes <nl> ot : ( [ { ' new_val ' : { ' id ' : 100 } , ' old_val ' : null } , <nl> { ' new_val ' : { ' a ' : 1 , ' id ' : 100 } , ' old_val ' : { ' id ' : 100 } } ] ) <nl> - testopts : <nl> - reql - query : False <nl> <nl> - cd : long_squash_changes <nl> ot : ( [ { ' new_val ' : { ' a ' : 1 , ' id ' : 100 } , ' old_val ' : null } ] ) <nl> - testopts : <nl> - reql - query : False <nl> <nl> - cd : squash_changes <nl> ot : ( [ { ' new_val ' : { ' a ' : 1 , ' id ' : 100 } , ' old_val ' : null } ] ) <nl> - testopts : <nl> - reql - query : False <nl> + <nl> + # Bad squash values <nl> + <nl> + - py : tbl . changes ( squash = null ) <nl> + rb : tbl . changes ( squash : null ) <nl> + js : tbl . changes ( { squash : null } ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Expected BOOL or NUMBER but found NULL . ' ) <nl> + <nl> + - py : tbl . changes ( squash = - 10 ) <nl> + rb : tbl . changes ( squash : - 10 ) <nl> + js : tbl . changes ( { squash : - 10 } ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Expected BOOL or a positive NUMBER but found a negative NUMBER . ' ) <nl> new file mode 100644 <nl> index 00000000000 . . ff7a43f1d6a <nl> mmm / dev / null <nl> ppp b / test / rql_test / src / changefeeds / table . yaml <nl> <nl> + desc : Test changefeeds on a table <nl> + table_variable_name : tbl <nl> + tests : <nl> + <nl> + # - inserts <nl> + <nl> + - cd : all_inserts = tbl . changes ( ) . limit ( 2 ) <nl> + - cd : tbl . insert ( [ { ' id ' : 1 } , { ' id ' : 2 } ] ) <nl> + ot : partial ( { ' errors ' : 0 , ' inserted ' : 2 } ) <nl> + - cd : all_inserts <nl> + ot : " bag ( [ { ' old_val ' : null , ' new_val ' : { ' id ' : 1 } } , { ' old_val ' : null , ' new_val ' : { ' id ' : 2 } } ] ) " <nl> + <nl> + # - updates <nl> + <nl> + - cd : updates = tbl . changes ( ) . limit ( 1 ) <nl> + - cd : tbl . get ( 1 ) . update ( { ' version ' : 1 } ) <nl> + ot : partial ( { ' errors ' : 0 , ' replaced ' : 1 } ) <nl> + - cd : updates <nl> + ot : [ { ' old_val ' : { ' id ' : 1 } , ' new_val ' : { ' id ' : 1 , ' version ' : 1 } } ] <nl> + <nl> + # - pluck on values <nl> + <nl> + <nl> + <nl> + # - changes overflow <nl> + # <nl> + # ToDo : enable this when we get <nl> + # <nl> + # - cd : overflow = tbl . changes ( ) <nl> + # # add more than 100 , 000 entries to make sure we get the overflow error <nl> + # - cd : r . between ( 1 , 100002 ) . for_each ( tbl . insert ( { } ) ) <nl> + # - cd : overflow <nl> + # ot : error ( RqlDriverError , ' Changefeed cache over array size limit , skipped 1 elements . ' ) <nl> + # <nl> + <nl> + <nl> \ No newline at end of file <nl> mmm a / test / rql_test / src / datum / binary . yaml <nl> ppp b / test / rql_test / src / datum / binary . yaml <nl> tests : <nl> # Short binary data from 0 to 12 characters <nl> # Not fully implemented for JS as comparing Buffer objects is non - trivial <nl> - def : <nl> - cd : s = " " <nl> + rb : s = " " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : s <nl> - cd : r . binary ( s ) . count ( ) <nl> ot : 0 <nl> <nl> - def : <nl> - cd : s = " \ x00 " <nl> + rb : s = " \ x00 " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' \ x00 ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " \ x00 " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : s <nl> - cd : r . binary ( s ) . count ( ) <nl> ot : 1 <nl> <nl> - def : <nl> - cd : s = " \ x00 \ x42 " <nl> + rb : s = " \ x00 \ x42 " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' \ x00 \ x42 ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " \ x00 \ x42 " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : s <nl> - cd : r . binary ( s ) . count ( ) <nl> ot : 2 <nl> <nl> - def : <nl> - cd : s = " \ x00 \ xfe \ x7a " <nl> + rb : s = " \ x00 \ xfe \ x7a " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' \ x00 \ xfe \ x7a ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " \ x00 \ xfe \ x7a " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : s <nl> - cd : r . binary ( s ) . count ( ) <nl> ot : 3 <nl> <nl> - def : <nl> - cd : s = " \ xed \ xfe \ x00 \ xba " <nl> + rb : s = " \ xed \ xfe \ x00 \ xba " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' \ xed \ xfe \ x00 \ xba ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " \ xed \ xfe \ x00 \ xba " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : s <nl> - cd : r . binary ( s ) . count ( ) <nl> ot : 4 <nl> <nl> - def : <nl> - cd : s = " \ x50 \ xf9 \ x00 \ x77 \ xf9 " <nl> + rb : s = " \ x50 \ xf9 \ x00 \ x77 \ xf9 " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' \ x50 \ xf9 \ x00 \ x77 \ xf9 ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " \ x50 \ xf9 \ x00 \ x77 \ xf9 " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : s <nl> - cd : r . binary ( s ) . count ( ) <nl> ot : 5 <nl> <nl> - def : <nl> - cd : s = " \ x2f \ xe3 \ xb5 \ x57 \ x00 \ x92 " <nl> + rb : s = " \ x2f \ xe3 \ xb5 \ x57 \ x00 \ x92 " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' \ x2f \ xe3 \ xb5 \ x57 \ x00 \ x92 ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " \ x2f \ xe3 \ xb5 \ x57 \ x00 \ x92 " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : s <nl> - cd : r . binary ( s ) . count ( ) <nl> ot : 6 <nl> <nl> - def : <nl> - cd : s = " \ xa9 \ x43 \ x54 \ xe9 \ x00 \ xf8 \ xfb " <nl> + rb : s = " \ xa9 \ x43 \ x54 \ xe9 \ x00 \ xf8 \ xfb " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' \ xa9 \ x43 \ x54 \ xe9 \ x00 \ xf8 \ xfb ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " \ xa9 \ x43 \ x54 \ xe9 \ x00 \ xf8 \ xfb " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : s <nl> - cd : r . binary ( s ) . count ( ) <nl> ot : 7 <nl> <nl> - def : <nl> - cd : s = " \ x57 \ xbb \ xe5 \ x82 \ x8b \ xd3 \ x00 \ xf9 " <nl> + rb : s = " \ x57 \ xbb \ xe5 \ x82 \ x8b \ xd3 \ x00 \ xf9 " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' \ x57 \ xbb \ xe5 \ x82 \ x8b \ xd3 \ x00 \ xf9 ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " \ x57 \ xbb \ xe5 \ x82 \ x8b \ xd3 \ x00 \ xf9 " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : s <nl> - cd : r . binary ( s ) . count ( ) <nl> ot : 8 <nl> <nl> - def : <nl> - cd : s = " \ x44 \ x1b \ x3e \ x00 \ x13 \ x19 \ x29 \ x2a \ xbf " <nl> + rb : s = " \ x44 \ x1b \ x3e \ x00 \ x13 \ x19 \ x29 \ x2a \ xbf " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' \ x44 \ x1b \ x3e \ x00 \ x13 \ x19 \ x29 \ x2a \ xbf ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " \ x44 \ x1b \ x3e \ x00 \ x13 \ x19 \ x29 \ x2a \ xbf " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : s <nl> - cd : r . binary ( s ) . count ( ) <nl> ot : 9 <nl> <nl> - def : <nl> - cd : s = " \ x8a \ x1d \ x09 \ x00 \ x5d \ x60 \ x6b \ x2e \ x70 \ xd9 " <nl> + rb : s = " \ x8a \ x1d \ x09 \ x00 \ x5d \ x60 \ x6b \ x2e \ x70 \ xd9 " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' \ x8a \ x1d \ x09 \ x00 \ x5d \ x60 \ x6b \ x2e \ x70 \ xd9 ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " \ x8a \ x1d \ x09 \ x00 \ x5d \ x60 \ x6b \ x2e \ x70 \ xd9 " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : s <nl> - cd : r . binary ( s ) . count ( ) <nl> ot : 10 <nl> <nl> - def : <nl> - cd : s = " \ x00 \ xaf \ x47 \ x4b \ x38 \ x99 \ x14 \ x8d \ x8f \ x10 \ x51 " <nl> + rb : s = " \ x00 \ xaf \ x47 \ x4b \ x38 \ x99 \ x14 \ x8d \ x8f \ x10 \ x51 " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' \ x00 \ xaf \ x47 \ x4b \ x38 \ x99 \ x14 \ x8d \ x8f \ x10 \ x51 ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " \ x00 \ xaf \ x47 \ x4b \ x38 \ x99 \ x14 \ x8d \ x8f \ x10 \ x51 " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : s <nl> - cd : r . binary ( s ) . count ( ) <nl> ot : 11 <nl> <nl> - def : <nl> - cd : s = " \ x45 \ x39 \ x00 \ xf7 \ xc2 \ x37 \ xfd \ xe0 \ x38 \ x82 \ x40 \ xa9 " <nl> + cd : s = " \ x45 \ x39 \ x00 \ xf7 \ xc2 \ x37 \ xfd \ xe0 \ x38 \ x82 \ x40 \ xa9 " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' \ x45 \ x39 \ x00 \ xf7 \ xc2 \ x37 \ xfd \ xe0 \ x38 \ x82 \ x40 \ xa9 ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " \ x45 \ x39 \ x00 \ xf7 \ xc2 \ x37 \ xfd \ xe0 \ x38 \ x82 \ x40 \ xa9 " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : s <nl> - cd : r . binary ( s ) . count ( ) <nl> tests : <nl> # Test comparisons <nl> # Binary objects to use , in order of increasing value <nl> - def : <nl> - cd : a = " \ x00 " <nl> + js : a = Buffer ( " \ x00 " , ' binary ' ) <nl> + rb : a = " \ x00 " . force_encoding ( ' BINARY ' ) <nl> py : a = b ' \ x00 ' <nl> - def : <nl> - cd : b = " \ x00 \ x01 " <nl> + js : b = Buffer ( " \ x00 \ x01 " , ' binary ' ) <nl> + rb : b = " \ x00 \ x01 " . force_encoding ( ' BINARY ' ) <nl> py : b = b ' \ x00 \ x01 ' <nl> - def : <nl> - cd : c = " \ x01 " <nl> + js : c = Buffer ( " \ x01 " , ' binary ' ) <nl> + rb : c = " \ x01 " . force_encoding ( ' BINARY ' ) <nl> py : c = b ' \ x01 ' <nl> - def : <nl> - cd : d = " \ x70 \ x22 " <nl> + js : d = Buffer ( " \ x70 \ x22 " , ' binary ' ) <nl> + rb : d = " \ x70 \ x22 " . force_encoding ( ' BINARY ' ) <nl> py : d = b ' \ x70 \ x22 ' <nl> - def : <nl> - cd : e = " \ x80 " <nl> + js : e = Buffer ( " \ x80 " , ' binary ' ) <nl> + rb : e = " \ x80 " . force_encoding ( ' BINARY ' ) <nl> py : e = b ' \ x80 ' <nl> - def : <nl> - cd : f = " \ xFE " <nl> + js : f = Buffer ( " \ xFE " , ' binary ' ) <nl> + rb : f = " \ xFE " . force_encoding ( ' BINARY ' ) <nl> py : f = b ' \ xFE ' <nl> - - def : <nl> - rb : a . force_encoding ( ' BINARY ' ) <nl> - js : a = Buffer ( a , ' binary ' ) <nl> - - def : <nl> - rb : b . force_encoding ( ' BINARY ' ) <nl> - js : b = Buffer ( b , ' binary ' ) <nl> - - def : <nl> - rb : c . force_encoding ( ' BINARY ' ) <nl> - js : c = Buffer ( c , ' binary ' ) <nl> - - def : <nl> - rb : d . force_encoding ( ' BINARY ' ) <nl> - js : d = Buffer ( d , ' binary ' ) <nl> - - def : <nl> - rb : e . force_encoding ( ' BINARY ' ) <nl> - js : e = Buffer ( e , ' binary ' ) <nl> - - def : <nl> - rb : f . force_encoding ( ' BINARY ' ) <nl> - js : f = Buffer ( f , ' binary ' ) <nl> <nl> # a - > a <nl> - cd : r . binary ( a ) . eq ( r . binary ( a ) ) <nl> tests : <nl> ot : b ' fg ' <nl> <nl> # Test binary_format optarg <nl> - - rb : r . binary ( b ) <nl> - py : r . binary ( b ) <nl> + - cd : r . binary ( b ) <nl> runopts : <nl> binary_format : ' " native " ' <nl> ot : b <nl> tests : <nl> ot : err ( ' RqlRuntimeError ' , ' Expected type STRING but found ARRAY . ' , [ ] ) <nl> <nl> # Test errors <nl> + <nl> # Missing ' data ' field <nl> - py : r . expr ( { ' $ reql_type $ ' : ' BINARY ' } ) <nl> + rb : r . expr ( { ' $ reql_type $ ' : ' BINARY ' } ) <nl> ot : err ( ' RqlRuntimeError ' , ' Invalid binary pseudotype : ' + ' lacking ` data ` key . ' , [ ] ) <nl> + <nl> # Invalid base64 format <nl> - py : r . expr ( { ' $ reql_type $ ' : ' BINARY ' , ' data ' : ' ABCDEFGH = = AA ' } ) <nl> ot : err ( ' RqlRuntimeError ' , ' Invalid base64 format , data found after padding character \ ' = \ ' . ' , [ ] ) <nl> tests : <nl> ot : err ( ' RqlRuntimeError ' , ' Invalid base64 character found : ' + ' \ ' ^ \ ' . ' , [ ] ) <nl> - py : r . expr ( { ' $ reql_type $ ' : ' BINARY ' , ' data ' : ' ABCDE ' } ) <nl> ot : err ( ' RqlRuntimeError ' , ' Invalid base64 length : ' + ' 1 character remaining , cannot decode a full byte . ' , [ ] ) <nl> + <nl> # Invalid coercions <nl> - - rb : r . binary ( ' a ' ) . coerce_to ( ' array ' ) <nl> + - cd : r . binary ( a ) . coerce_to ( ' array ' ) <nl> ot : err ( ' RqlRuntimeError ' , ' Cannot coerce BINARY to ARRAY . ' , [ ] ) <nl> - - rb : r . binary ( ' a ' ) . coerce_to ( ' object ' ) <nl> + - cd : r . binary ( a ) . coerce_to ( ' object ' ) <nl> ot : err ( ' RqlRuntimeError ' , ' Cannot coerce BINARY to OBJECT . ' , [ ] ) <nl> - - rb : r . binary ( ' a ' ) . coerce_to ( ' bool ' ) <nl> + - cd : r . binary ( a ) . coerce_to ( ' bool ' ) <nl> ot : err ( ' RqlRuntimeError ' , ' Cannot coerce BINARY to BOOL . ' , [ ] ) <nl> - - rb : r . binary ( ' a ' ) . coerce_to ( ' number ' ) <nl> + - cd : r . binary ( a ) . coerce_to ( ' number ' ) <nl> ot : err ( ' RqlRuntimeError ' , ' Cannot coerce BINARY to NUMBER . ' , [ ] ) <nl> - - rb : r . binary ( ' a ' ) . coerce_to ( ' nu ' + ' ll ' ) <nl> + - cd : r . binary ( a ) . coerce_to ( ' nu ' + ' ll ' ) <nl> ot : err ( ' RqlRuntimeError ' , ' Cannot coerce BINARY to NULL . ' , [ ] ) <nl> mmm a / test / rql_test / src / json . yaml <nl> ppp b / test / rql_test / src / json . yaml <nl> tests : <nl> ot : ( ' { " $ reql_type $ " : " GEOMETRY " , " coordinates " : [ 0 , 0 ] , " type " : " Point " } ' ) <nl> <nl> - def : <nl> - cd : s = " \ x66 \ x6f \ x6f " <nl> + rb : s = " \ x66 \ x6f \ x6f " . force_encoding ( ' BINARY ' ) <nl> py : s = b ' \ x66 \ x6f \ x6f ' <nl> - - def : <nl> - rb : s . force_encoding ( ' BINARY ' ) <nl> - js : s = Buffer ( s , ' binary ' ) <nl> + js : s = Buffer ( " \ x66 \ x6f \ x6f " , ' binary ' ) <nl> - cd : r . binary ( s ) <nl> ot : ( s ) <nl> <nl> mmm a / test / rql_test / src / regression / 1133 . yaml <nl> ppp b / test / rql_test / src / regression / 1133 . yaml <nl> <nl> desc : Regression tests for issue # 1133 , which concerns circular references in the drivers . <nl> <nl> tests : <nl> - - def : a = { } ; b = { ' a ' : a } ; a [ ' b ' ] = b ; <nl> + - def : a = { } <nl> + - def : b = { ' a ' : a } <nl> + - def : a [ ' b ' ] = b <nl> <nl> - cd : r . expr ( a ) <nl> ot : <nl> mmm a / test / rql_test / src / sindex / truncation . yaml <nl> ppp b / test / rql_test / src / sindex / truncation . yaml <nl> tests : <nl> - def : <nl> rb : f = ( 0 . . . 1 ) . map { | i | " d " * 1000 + sprintf ( " 6 % 02d " , i ) } <nl> - def : <nl> - rb : | <nl> - vals = ( a + b + c + d + e + f ) . map { | x | { a : x , num : x . match ( / [ 0 - 9 ] + / ) . to_s . to_i } } <nl> + rb : " vals = ( a + b + c + d + e + f ) . map { | x | { a : x , num : x . match ( / [ 0 - 9 ] + / ) . to_s . to_i } } " <nl> <nl> - rb : tbl . insert ( vals ) [ ' inserted ' ] <nl> ot : 303 <nl> tests : <nl> runopts : <nl> max_batch_rows : " 10 " <nl> ot : vals . map { | x | x [ : num ] } . select { | x | x > = 150 } <nl> - <nl> - # Test half - indexed ordering . THIS IS INCREDIBLY SENSITIVE TO ALL <nl> - # KINDS OF THINGS DON ' T TOUCH IT . <nl> - - def : <nl> - rb : runner = tbl <nl> - - def : <nl> - rb : subrunner = class < < runner ; def run ( * args ) ; args [ - 1 ] = { max_batch_rows : 100 , first_batch_scaledown_factor : 1 , max_batch_bytes : 1000000000000 , max_batch_seconds : 10000000000 } ; self . orderby ( ' a ' , index : ' idi ' ) . run ( * args ) . to_a . count ; end ; end <nl> - - rb : runner <nl> + <nl> + - rb : result = tbl . orderby ( ' a ' , index : ' idi ' ) <nl> + runopts : <nl> + max_batch_rows : 100 <nl> + first_batch_scaledown_factor : 1 <nl> + max_batch_bytes : 1000000000000 <nl> + max_batch_seconds : 10000000000 <nl> + - rb : fetch ( result ) . length <nl> ot : 303 <nl> mmm a / test / rql_test / src / times / constructors . yaml <nl> ppp b / test / rql_test / src / times / constructors . yaml <nl> tests : <nl> - def : ctz = " + 00 : 00 " <nl> - def : tzs = [ " Z " , " + 00 " , " + 0000 " , " + 00 : 00 " ] <nl> - def : cdt = [ cdate + " T " + ctime + ctz ] <nl> - - def : <nl> - rb : dts = dates . map { | d | times . map { | t | tzs . map { | tz | d + " T " + t + tz } } } . flatten <nl> + - rb : dts = dates . map { | d | times . map { | t | tzs . map { | tz | d + " T " + t + tz } } } . flatten <nl> - rb : r ( dts ) . map { | x | r . iso8601 ( x ) . to_iso8601 } . distinct <nl> ot : ( cdt ) <nl> <nl> tests : <nl> - def : bad_dates = [ " 201301 " , " 2013 - 0101 " , " 2a13 " , " 2013 + 01 " , " 2013 - 01 - 01 . 1 " ] <nl> - def : bad_times = [ " a3 " , " 13 : 0000 " , " 13 : 000 " , " 13 : 00 . 00 " , " 130000 . 00000000a " ] <nl> - def : bad_tzs = [ " X " , " - 7 " , " - 07 : - 1 " , " + 07 + 01 " , " PST " , " UTC " , " Z + 00 " ] <nl> - - def : <nl> - rb : bad_dts1 = bad_dates . map { | d | times . map { | t | tzs . map { | tz | d + " T " + t + tz } } } . flatten <nl> - - def : <nl> - rb : bad_dts2 = dates . map { | d | bad_times . map { | t | tzs . map { | tz | d + " T " + t + tz } } } . flatten <nl> - - def : <nl> - rb : bad_dts3 = dates . map { | d | times . map { | t | bad_tzs . map { | tz | d + " T " + t + tz } } } . flatten <nl> - - def : <nl> - rb : bad_dts = bad_dts1 + bad_dts2 + bad_dts3 <nl> - # We have to hack this because we have no way to handle exceptions <nl> - # inside of ReQL , and the easiest way to access the connection object <nl> - # is by stealing the arguments the test framework provides to ` run ` . <nl> - - def : <nl> - rb : subrunner = class < < bad_dts ; def run ( * args ) ; self . map { | x | begin ; RethinkDB : : RQL . new . expr ( { : s = > x , : d = > RethinkDB : : RQL . new . iso8601 ( x ) } ) . run ( * args ) ; rescue RethinkDB : : RqlRuntimeError = > e ; null ; end } . select { | x | x ! = null } ; end ; end <nl> - - rb : bad_dts <nl> + <nl> + - rb : bad_dts1 = bad_dates . map { | d | times . map { | t | tzs . map { | tz | d + " T " + t + tz } } } . flatten <nl> + - rb : bad_dts2 = dates . map { | d | bad_times . map { | t | tzs . map { | tz | d + " T " + t + tz } } } . flatten <nl> + - rb : bad_dts3 = dates . map { | d | times . map { | t | bad_tzs . map { | tz | d + " T " + t + tz } } } . flatten <nl> + - rb : bad_dts = bad_dts1 + bad_dts2 + bad_dts3 <nl> + <nl> + - rb : bad_dts . map { | x | begin ; r . expr ( { : s = > x , : d = > r . iso8601 ( x ) } ) . run ( $ reql_conn ) ; rescue RethinkDB : : RqlRuntimeError = > e ; nil ; end } . select { | x | x ! = nil } ; <nl> ot : ( [ ] ) <nl> <nl> # Check that we can ' t put a valid date into any invalid timezone . <nl> - - def : <nl> - rb : bad_dts_in_tz = dts . map { | dt | bad_tzs . map { | tz | { : dt = > dt , : tz = > tz } } } . flatten <nl> - # We have to hack this because we have no way to handle exceptions <nl> - # inside of ReQL , and the easiest way to access the connection object <nl> - # is by stealing the arguments the test framework provides to ` run ` . <nl> - - def : <nl> - rb : subrunner = class < < bad_dts_in_tz ; def run ( * args ) ; self . map { | x | begin ; RethinkDB : : RQL . new . expr ( { : dt = > x [ : dt ] , : tz = > x [ : tz ] , : s = > RethinkDB : : RQL . new . iso8601 ( x [ : dt ] ) . to_epoch_time . epoch_time . in_timezone ( x [ : tz ] ) . to_iso8601 . run ( * args ) } ) ; rescue RethinkDB : : RqlRuntimeError = > e ; null ; end } . select { | x | x ! = null } ; end ; end <nl> - - rb : bad_dts_in_tz <nl> + - rb : bad_dts_in_tz = r . expr ( dts . map { | dt | bad_tzs . map { | tz | { : dt = > dt , : tz = > tz } } } . flatten ) <nl> + - rb : bad_dts_in_tz . map { | x | begin ; r . expr ( { : dt = > x [ : dt ] , : tz = > x [ : tz ] , : s = > r . iso8601 ( x [ : dt ] ) . to_epoch_time . epoch_time . in_timezone ( x [ : tz ] ) . to_iso8601 . run ( $ reql_conn ) } ) ; rescue RethinkDB : : RqlRuntimeError = > e ; nil ; end } . select { | x | x ! = nil } <nl> ot : ( [ ] ) <nl> mmm a / test / rql_test / src / times / timezones . yaml <nl> ppp b / test / rql_test / src / times / timezones . yaml <nl> tests : <nl> js : tps . map ( [ r . row . timezone ( ) , r . row . day ( ) ] ) <nl> ot : ( [ [ " + 00 : 59 " , 30 ] , [ " + 01 : 00 " , 30 ] , [ " + 01 : 01 " , 30 ] ] ) <nl> <nl> - # They ' re all the same time , just in different timezones . <nl> + # They ' re all the same time , just in different timezones . <nl> + <nl> - rb : ts . concat_map { | x | ts . map { | y | x - y } } . distinct <nl> py : ts . concat_map ( lambda x : ts . map ( lambda y : x - y ) ) . distinct ( ) <nl> js : ts . concatMap ( function ( x ) { return ts . map ( function ( y ) { return x . sub ( y ) ; } ) ; } ) . distinct ( ) <nl> ot : ( [ 0 ] ) <nl> <nl> - - def : bad_tzs = [ " - 00 " , " - 00 : 00 " , " " , " UTC + 00 " , " + 00 : 60 " , " + 25 : 00 " ] <nl> - <nl> - - def : <nl> - rb : subrunner = class < < bad_tzs ; def run ( * args ) ; self . map { | x | begin ; RethinkDB : : RQL . new . expr ( [ x , RethinkDB : : RQL . new . now . in_timezone ( x ) ] ) . run ( * args ) ; rescue RethinkDB : : RqlRuntimeError = > e ; null ; end } . select { | x | x ! = null } ; end ; end <nl> - <nl> - - rb : bad_tzs <nl> - ot : ( [ ] ) <nl> - <nl> - - def : <nl> - rb : subrunner = class < < bad_tzs ; def run ( * args ) ; self . map { | x | begin ; RethinkDB : : RQL . new . expr ( [ x , RethinkDB : : RQL . new . time ( 2013 , 1 , 1 , x ) ] ) . run ( * args ) ; rescue RethinkDB : : RqlRuntimeError = > e ; null ; end } . select { | x | x ! = null } ; end ; end <nl> - <nl> - - rb : bad_tzs <nl> - ot : ( [ ] ) <nl> + # Invalid timezones <nl> + <nl> + - cd : r . now ( ) . in_timezone ( " " ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Timezone ` ` does not start with ` - ` or ` + ` . ' ) <nl> + <nl> + - cd : r . now ( ) . in_timezone ( " - 00 " ) <nl> + ot : err ( ' RqlRuntimeError ' , ' ` - 00 ` is not a valid time offset . ' ) <nl> + <nl> + - cd : r . now ( ) . in_timezone ( " - 00 : 00 " ) <nl> + ot : err ( ' RqlRuntimeError ' , ' ` - 00 : 00 ` is not a valid time offset . ' ) <nl> + <nl> + - cd : r . now ( ) . in_timezone ( " UTC + 00 " ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Timezone ` UTC + 00 ` does not start with ` - ` or ` + ` . ' ) <nl> + <nl> + - cd : r . now ( ) . in_timezone ( " + 00 : 60 " ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Minutes out of range in ` + 00 : 60 ` . ' ) <nl> + <nl> + - cd : r . now ( ) . in_timezone ( " + 25 : 00 " ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Hours out of range in ` + 25 : 00 ` . ' ) <nl> <nl> + <nl> + - cd : r . time ( 2013 , 1 , 1 , " " ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Timezone ` ` does not start with ` - ` or ` + ` . ' ) <nl> + <nl> + - cd : r . time ( 2013 , 1 , 1 , " - 00 " ) <nl> + ot : err ( ' RqlRuntimeError ' , ' ` - 00 ` is not a valid time offset . ' ) <nl> + <nl> + - cd : r . time ( 2013 , 1 , 1 , " - 00 : 00 " ) <nl> + ot : err ( ' RqlRuntimeError ' , ' ` - 00 : 00 ` is not a valid time offset . ' ) <nl> + <nl> + - cd : r . time ( 2013 , 1 , 1 , " UTC + 00 " ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Timezone ` UTC + 00 ` does not start with ` - ` or ` + ` . ' ) <nl> + <nl> + - cd : r . time ( 2013 , 1 , 1 , " + 00 : 60 " ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Minutes out of range in ` + 00 : 60 ` . ' ) <nl> + <nl> + - cd : r . time ( 2013 , 1 , 1 , " + 25 : 00 " ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Hours out of range in ` + 25 : 00 ` . ' ) <nl> mmm a / test / rql_test / test - runner <nl> ppp b / test / rql_test / test - runner <nl> interpreters = { <nl> class TestGroup ( object ) : <nl> <nl> testLanguageEntry = collections . namedtuple ( ' testLanguageEntry ' , [ ' command ' , ' expected ' , ' definition ' , ' runopts ' , ' testopts ' ] ) <nl> - variableRegex = re . compile ( ' ^ \ s * ( ? P < quoteString > [ \ ' \ " ] * ) \ s * ( ? P < variableName > [ a - zA - Z ] [ \ w \ [ \ ] \ { \ } \ ' \ " ] * ) \ s * = \ s * ( ? P < expression > [ ^ = ] . + ) $ ' ) <nl> + variableRegex = re . compile ( ' ^ \ s * ( ? P < quoteChar > [ \ ' \ " ] * ) \ s * ( ? P < variableName > [ a - zA - Z ] [ \ w \ [ \ ] \ { \ } \ ' \ " ] * ) \ s * = \ s * ( ? P < expression > [ ^ = ] . + ) $ ' ) <nl> <nl> @ classmethod <nl> def buildYamlTest ( cls , testName , sourceFile , language , outputPath , shards = 1 , useSpecificTable = False ) : <nl> class TestGroup ( object ) : <nl> <nl> # Does this test define a variable ? <nl> if testCode . definition : <nl> - cls . write_def ( out , testCode . definition ) <nl> + cls . write_def ( testName , out , testCode . definition ) <nl> <nl> # Write the commands <nl> if testCode . command : <nl> class TestGroup ( object ) : <nl> variableMatch = cls . variableRegex . match ( test_case ) <nl> if variableMatch : <nl> testCode . testopts [ ' variable ' ] = variableMatch . group ( ' variableName ' ) <nl> - test_case = variableMatch . group ( ' quoteString ' ) + variableMatch . group ( ' expression ' ) <nl> + test_case = variableMatch . group ( ' quoteChar ' ) + variableMatch . group ( ' expression ' ) <nl> <nl> try : <nl> out . write ( " test ( % ( code ) s , % ( expected ) s , ' % ( name ) s ' , % ( driveropts ) s , % ( testopts ) s ) \ n " % { <nl> class TestGroup ( object ) : <nl> except Exception as e : <nl> raise Exception ( " Error while processing test : % s \ n % s \ n % s " % ( testName , str ( testCode . command ) , str ( e ) ) ) <nl> <nl> - @ staticmethod <nl> - def write_def ( out , defs ) : <nl> + @ classmethod <nl> + def write_def ( cls , testName , out , defs ) : <nl> for definition in defs : <nl> - out . write ( ' define ( % s ) \ n ' % definition ) <nl> + variableString = ' ' <nl> + <nl> + # parse the variable name <nl> + <nl> + variableMatch = cls . variableRegex . match ( definition ) <nl> + if not variableMatch : <nl> + raise Exception ( ' Error while processing test % s : def entry missing variable name : % s ' % ( testName , str ( definition ) ) ) <nl> + <nl> + variableString = variableMatch . group ( ' variableName ' ) <nl> + definition = variableMatch . group ( ' quoteChar ' ) + variableMatch . group ( ' expression ' ) <nl> + <nl> + # write the output <nl> + <nl> + out . write ( ' define ( % s , " % s " ) \ n ' % ( definition , variableString ) ) <nl> <nl> # Tests may specify generic test strings valid for all languages or language specific versions <nl> @ classmethod <nl>
first commit for Test point change feeds
rethinkdb/rethinkdb
f2e8cccfabc857b286f07b893ed6e9c03fd6151e
2015-01-30T19:14:08Z
mmm a / src / mips64 / macro - assembler - mips64 . cc <nl> ppp b / src / mips64 / macro - assembler - mips64 . cc <nl> void MacroAssembler : : Subu ( Register rd , Register rs , const Operand & rt ) { <nl> addiu ( rd , rs , <nl> static_cast < int32_t > ( <nl> - rt . imm64_ ) ) ; / / No subiu instr , use addiu ( x , y , - imm ) . <nl> - } else if ( - rt . imm64_ > > 16 = = 0 & & ! MustUseReg ( rt . rmode_ ) ) { <nl> - / / Use load - imm and addu when loading - imm generates one instruction . <nl> - DCHECK ( ! rs . is ( at ) ) ; <nl> - li ( at , - rt . imm64_ ) ; <nl> - addu ( rd , rs , at ) ; <nl> } else { <nl> - / / li handles the relocation . <nl> DCHECK ( ! rs . is ( at ) ) ; <nl> - li ( at , rt ) ; <nl> - subu ( rd , rs , at ) ; <nl> + if ( - rt . imm64_ > > 16 = = 0 & & ! MustUseReg ( rt . rmode_ ) ) { <nl> + / / Use load - imm and addu when loading - imm generates one instruction . <nl> + li ( at , - rt . imm64_ ) ; <nl> + addu ( rd , rs , at ) ; <nl> + } else { <nl> + / / li handles the relocation . <nl> + li ( at , rt ) ; <nl> + subu ( rd , rs , at ) ; <nl> + } <nl> } <nl> } <nl> } <nl> void MacroAssembler : : Subu ( Register rd , Register rs , const Operand & rt ) { <nl> void MacroAssembler : : Dsubu ( Register rd , Register rs , const Operand & rt ) { <nl> if ( rt . is_reg ( ) ) { <nl> dsubu ( rd , rs , rt . rm ( ) ) ; <nl> + } else if ( is_int16 ( - rt . imm64_ ) & & ! MustUseReg ( rt . rmode_ ) ) { <nl> + daddiu ( rd , rs , <nl> + static_cast < int32_t > ( <nl> + - rt . imm64_ ) ) ; / / No dsubiu instr , use daddiu ( x , y , - imm ) . <nl> } else { <nl> - if ( is_int16 ( - rt . imm64_ ) & & ! MustUseReg ( rt . rmode_ ) ) { <nl> - daddiu ( rd , rs , <nl> - static_cast < int32_t > ( <nl> - - rt . imm64_ ) ) ; / / No dsubiu instr , use daddiu ( x , y , - imm ) . <nl> - } else if ( - rt . imm64_ > > 16 = = 0 & & ! MustUseReg ( rt . rmode_ ) ) { <nl> + DCHECK ( ! rs . is ( at ) ) ; <nl> + int li_count = InstrCountForLi64Bit ( rt . imm64_ ) ; <nl> + int li_neg_count = InstrCountForLi64Bit ( - rt . imm64_ ) ; <nl> + if ( li_neg_count < li_count & & ! MustUseReg ( rt . rmode_ ) ) { <nl> / / Use load - imm and daddu when loading - imm generates one instruction . <nl> - DCHECK ( ! rs . is ( at ) ) ; <nl> - li ( at , - rt . imm64_ ) ; <nl> - daddu ( rd , rs , at ) ; <nl> + DCHECK ( rt . imm64_ ! = std : : numeric_limits < int32_t > : : min ( ) ) ; <nl> + li ( at , Operand ( - rt . imm64_ ) ) ; <nl> + Daddu ( rd , rs , at ) ; <nl> } else { <nl> / / li handles the relocation . <nl> - DCHECK ( ! rs . is ( at ) ) ; <nl> li ( at , rt ) ; <nl> dsubu ( rd , rs , at ) ; <nl> } <nl> void MacroAssembler : : li ( Register dst , Handle < Object > value , LiFlags mode ) { <nl> li ( dst , Operand ( value ) , mode ) ; <nl> } <nl> <nl> + static inline int InstrCountForLiLower32Bit ( int64_t value ) { <nl> + if ( ! is_int16 ( static_cast < int32_t > ( value ) ) & & ( value & kUpper16MaskOf64 ) & & <nl> + ( value & kImm16Mask ) ) { <nl> + return 2 ; <nl> + } else { <nl> + return 1 ; <nl> + } <nl> + } <nl> + <nl> void MacroAssembler : : LiLower32BitHelper ( Register rd , Operand j ) { <nl> if ( is_int16 ( static_cast < int32_t > ( j . imm64_ ) ) ) { <nl> daddiu ( rd , zero_reg , ( j . imm64_ & kImm16Mask ) ) ; <nl> static inline int InstrCountForLoadReplicatedConst32 ( int64_t value ) { <nl> return INT_MAX ; <nl> } <nl> <nl> - void MacroAssembler : : li ( Register rd , Operand j , LiFlags mode ) { <nl> + int MacroAssembler : : InstrCountForLi64Bit ( int64_t value ) { <nl> + if ( is_int32 ( value ) ) { <nl> + return InstrCountForLiLower32Bit ( value ) ; <nl> + } else { <nl> + int bit31 = value > > 31 & 0x1 ; <nl> + if ( ( value & kUpper16MaskOf64 ) = = 0 & & is_int16 ( value > > 32 ) & & <nl> + kArchVariant = = kMips64r6 ) { <nl> + return 2 ; <nl> + } else if ( ( value & ( kHigher16MaskOf64 | kUpper16MaskOf64 ) ) = = 0 & & <nl> + kArchVariant = = kMips64r6 ) { <nl> + return 2 ; <nl> + } else if ( ( value & kImm16Mask ) = = 0 & & is_int16 ( ( value > > 32 ) + bit31 ) & & <nl> + kArchVariant = = kMips64r6 ) { <nl> + return 2 ; <nl> + } else if ( ( value & kImm16Mask ) = = 0 & & <nl> + ( ( value > > 31 ) & 0x1ffff ) = = ( ( 0x20000 - bit31 ) & 0x1ffff ) & & <nl> + kArchVariant = = kMips64r6 ) { <nl> + return 2 ; <nl> + } else if ( is_int16 ( static_cast < int32_t > ( value ) ) & & <nl> + is_int16 ( ( value > > 32 ) + bit31 ) & & kArchVariant = = kMips64r6 ) { <nl> + return 2 ; <nl> + } else if ( is_int16 ( static_cast < int32_t > ( value ) ) & & <nl> + ( ( value > > 31 ) & 0x1ffff ) = = ( ( 0x20000 - bit31 ) & 0x1ffff ) & & <nl> + kArchVariant = = kMips64r6 ) { <nl> + return 2 ; <nl> + } else if ( base : : bits : : IsPowerOfTwo64 ( value + 1 ) ) { <nl> + return 2 ; <nl> + } else { <nl> + int shift_cnt = base : : bits : : CountTrailingZeros64 ( value ) ; <nl> + int rep32_count = InstrCountForLoadReplicatedConst32 ( value ) ; <nl> + int64_t tmp = value > > shift_cnt ; <nl> + if ( is_uint16 ( tmp ) ) { <nl> + return 2 ; <nl> + } else if ( is_int16 ( tmp ) ) { <nl> + return 2 ; <nl> + } else if ( rep32_count < 3 ) { <nl> + return 2 ; <nl> + } else if ( is_int32 ( tmp ) ) { <nl> + return 3 ; <nl> + } else { <nl> + shift_cnt = 16 + base : : bits : : CountTrailingZeros64 ( value > > 16 ) ; <nl> + tmp = value > > shift_cnt ; <nl> + if ( is_uint16 ( tmp ) ) { <nl> + return 3 ; <nl> + } else if ( is_int16 ( tmp ) ) { <nl> + return 3 ; <nl> + } else if ( rep32_count < 4 ) { <nl> + return 3 ; <nl> + } else if ( kArchVariant = = kMips64r6 ) { <nl> + int64_t imm = value ; <nl> + int count = InstrCountForLiLower32Bit ( imm ) ; <nl> + imm = ( imm > > 32 ) + bit31 ; <nl> + if ( imm & kImm16Mask ) { <nl> + count + + ; <nl> + } <nl> + imm = ( imm > > 16 ) + ( imm > > 15 & 0x1 ) ; <nl> + if ( imm & kImm16Mask ) { <nl> + count + + ; <nl> + } <nl> + return count ; <nl> + } else { <nl> + if ( is_int48 ( value ) ) { <nl> + int64_t k = value > > 16 ; <nl> + int count = InstrCountForLiLower32Bit ( k ) + 1 ; <nl> + if ( value & kImm16Mask ) { <nl> + count + + ; <nl> + } <nl> + return count ; <nl> + } else { <nl> + int64_t k = value > > 32 ; <nl> + int count = InstrCountForLiLower32Bit ( k ) ; <nl> + if ( ( value > > 16 ) & kImm16Mask ) { <nl> + count + = 3 ; <nl> + if ( value & kImm16Mask ) { <nl> + count + + ; <nl> + } <nl> + } else { <nl> + count + + ; <nl> + if ( value & kImm16Mask ) { <nl> + count + + ; <nl> + } <nl> + } <nl> + return count ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + UNREACHABLE ( ) ; <nl> + return INT_MAX ; <nl> + } <nl> + <nl> + void MacroAssembler : : li_optimized ( Register rd , Operand j , LiFlags mode ) { <nl> DCHECK ( ! j . is_reg ( ) ) ; <nl> + DCHECK ( ! MustUseReg ( j . rmode_ ) ) ; <nl> + DCHECK ( mode = = OPTIMIZE_SIZE ) ; <nl> BlockTrampolinePoolScope block_trampoline_pool ( this ) ; <nl> - if ( ! MustUseReg ( j . rmode_ ) & & mode = = OPTIMIZE_SIZE ) { <nl> - / / Normal load of an immediate value which does not need Relocation Info . <nl> - if ( is_int32 ( j . imm64_ ) ) { <nl> - LiLower32BitHelper ( rd , j ) ; <nl> + / / Normal load of an immediate value which does not need Relocation Info . <nl> + if ( is_int32 ( j . imm64_ ) ) { <nl> + LiLower32BitHelper ( rd , j ) ; <nl> + } else { <nl> + int bit31 = j . imm64_ > > 31 & 0x1 ; <nl> + if ( ( j . imm64_ & kUpper16MaskOf64 ) = = 0 & & is_int16 ( j . imm64_ > > 32 ) & & <nl> + kArchVariant = = kMips64r6 ) { <nl> + / / 64 - bit value which consists of an unsigned 16 - bit value in its <nl> + / / least significant 32 - bits , and a signed 16 - bit value in its <nl> + / / most significant 32 - bits . <nl> + ori ( rd , zero_reg , j . imm64_ & kImm16Mask ) ; <nl> + dahi ( rd , j . imm64_ > > 32 & kImm16Mask ) ; <nl> + } else if ( ( j . imm64_ & ( kHigher16MaskOf64 | kUpper16MaskOf64 ) ) = = 0 & & <nl> + kArchVariant = = kMips64r6 ) { <nl> + / / 64 - bit value which consists of an unsigned 16 - bit value in its <nl> + / / least significant 48 - bits , and a signed 16 - bit value in its <nl> + / / most significant 16 - bits . <nl> + ori ( rd , zero_reg , j . imm64_ & kImm16Mask ) ; <nl> + dati ( rd , j . imm64_ > > 48 & kImm16Mask ) ; <nl> + } else if ( ( j . imm64_ & kImm16Mask ) = = 0 & & <nl> + is_int16 ( ( j . imm64_ > > 32 ) + bit31 ) & & <nl> + kArchVariant = = kMips64r6 ) { <nl> + / / 16 LSBs ( Least Significant Bits ) all set to zero . <nl> + / / 48 MSBs ( Most Significant Bits ) hold a signed 32 - bit value . <nl> + lui ( rd , j . imm64_ > > kLuiShift & kImm16Mask ) ; <nl> + dahi ( rd , ( ( j . imm64_ > > 32 ) + bit31 ) & kImm16Mask ) ; <nl> + } else if ( ( j . imm64_ & kImm16Mask ) = = 0 & & <nl> + ( ( j . imm64_ > > 31 ) & 0x1ffff ) = = ( ( 0x20000 - bit31 ) & 0x1ffff ) & & <nl> + kArchVariant = = kMips64r6 ) { <nl> + / / 16 LSBs all set to zero . <nl> + / / 48 MSBs hold a signed value which can ' t be represented by signed <nl> + / / 32 - bit number , and the middle 16 bits are all zero , or all one . <nl> + lui ( rd , j . imm64_ > > kLuiShift & kImm16Mask ) ; <nl> + dati ( rd , ( ( j . imm64_ > > 48 ) + bit31 ) & kImm16Mask ) ; <nl> + } else if ( is_int16 ( static_cast < int32_t > ( j . imm64_ ) ) & & <nl> + is_int16 ( ( j . imm64_ > > 32 ) + bit31 ) & & <nl> + kArchVariant = = kMips64r6 ) { <nl> + / / 32 LSBs contain a signed 16 - bit number . <nl> + / / 32 MSBs contain a signed 16 - bit number . <nl> + daddiu ( rd , zero_reg , j . imm64_ & kImm16Mask ) ; <nl> + dahi ( rd , ( ( j . imm64_ > > 32 ) + bit31 ) & kImm16Mask ) ; <nl> + } else if ( is_int16 ( static_cast < int32_t > ( j . imm64_ ) ) & & <nl> + ( ( j . imm64_ > > 31 ) & 0x1ffff ) = = ( ( 0x20000 - bit31 ) & 0x1ffff ) & & <nl> + kArchVariant = = kMips64r6 ) { <nl> + / / 48 LSBs contain an unsigned 16 - bit number . <nl> + / / 16 MSBs contain a signed 16 - bit number . <nl> + daddiu ( rd , zero_reg , j . imm64_ & kImm16Mask ) ; <nl> + dati ( rd , ( ( j . imm64_ > > 48 ) + bit31 ) & kImm16Mask ) ; <nl> + } else if ( base : : bits : : IsPowerOfTwo64 ( j . imm64_ + 1 ) ) { <nl> + / / 64 - bit values which have their " n " MSBs set to one , and their <nl> + / / " 64 - n " LSBs set to zero . " n " must meet the restrictions 0 < n < 64 . <nl> + int shift_cnt = 64 - base : : bits : : CountTrailingZeros64 ( j . imm64_ + 1 ) ; <nl> + daddiu ( rd , zero_reg , - 1 ) ; <nl> + if ( shift_cnt < 32 ) { <nl> + dsrl ( rd , rd , shift_cnt ) ; <nl> + } else { <nl> + dsrl32 ( rd , rd , shift_cnt & 31 ) ; <nl> + } <nl> } else { <nl> - int bit31 = j . imm64_ > > 31 & 0x1 ; <nl> + int shift_cnt = base : : bits : : CountTrailingZeros64 ( j . imm64_ ) ; <nl> int rep32_count = InstrCountForLoadReplicatedConst32 ( j . imm64_ ) ; <nl> - if ( ( j . imm64_ & kUpper16MaskOf64 ) = = 0 & & is_int16 ( j . imm64_ > > 32 ) & & <nl> - kArchVariant = = kMips64r6 ) { <nl> - / / 64 - bit value which consists of an unsigned 16 - bit value in its <nl> - / / least significant 32 - bits , and a signed 16 - bit value in its <nl> - / / most significant 32 - bits . <nl> - ori ( rd , zero_reg , j . imm64_ & kImm16Mask ) ; <nl> - dahi ( rd , j . imm64_ > > 32 & kImm16Mask ) ; <nl> - } else if ( ( j . imm64_ & ( kHigher16MaskOf64 | kUpper16MaskOf64 ) ) = = 0 & & <nl> - kArchVariant = = kMips64r6 ) { <nl> - / / 64 - bit value which consists of an unsigned 16 - bit value in its <nl> - / / least significant 48 - bits , and a signed 16 - bit value in its <nl> - / / most significant 16 - bits . <nl> - ori ( rd , zero_reg , j . imm64_ & kImm16Mask ) ; <nl> - dati ( rd , j . imm64_ > > 48 & kImm16Mask ) ; <nl> - } else if ( ( j . imm64_ & kImm16Mask ) = = 0 & & <nl> - is_int16 ( ( j . imm64_ > > 32 ) + bit31 ) & & <nl> - kArchVariant = = kMips64r6 ) { <nl> - / / 16 LSBs ( Least Significant Bits ) all set to zero . <nl> - / / 48 MSBs ( Most Significant Bits ) hold a signed 32 - bit value . <nl> - lui ( rd , j . imm64_ > > kLuiShift & kImm16Mask ) ; <nl> - dahi ( rd , ( ( j . imm64_ > > 32 ) + bit31 ) & kImm16Mask ) ; <nl> - } else if ( ( j . imm64_ & kImm16Mask ) = = 0 & & <nl> - ( ( j . imm64_ > > 31 ) & 0x1ffff ) = = <nl> - ( ( 0x20000 - bit31 ) & 0x1ffff ) & & <nl> - kArchVariant = = kMips64r6 ) { <nl> - / / 16 LSBs all set to zero . <nl> - / / 48 MSBs hold a signed value which can ' t be represented by signed <nl> - / / 32 - bit number , and the middle 16 bits are all zero , or all one . <nl> - lui ( rd , j . imm64_ > > kLuiShift & kImm16Mask ) ; <nl> - dati ( rd , ( ( j . imm64_ > > 48 ) + bit31 ) & kImm16Mask ) ; <nl> - } else if ( is_int16 ( static_cast < int32_t > ( j . imm64_ ) ) & & <nl> - is_int16 ( ( j . imm64_ > > 32 ) + bit31 ) & & <nl> - kArchVariant = = kMips64r6 ) { <nl> - / / 32 LSBs contain a signed 16 - bit number . <nl> - / / 32 MSBs contain a signed 16 - bit number . <nl> - daddiu ( rd , zero_reg , j . imm64_ & kImm16Mask ) ; <nl> - dahi ( rd , ( ( j . imm64_ > > 32 ) + bit31 ) & kImm16Mask ) ; <nl> - } else if ( is_int16 ( static_cast < int32_t > ( j . imm64_ ) ) & & <nl> - ( ( j . imm64_ > > 31 ) & 0x1ffff ) = = <nl> - ( ( 0x20000 - bit31 ) & 0x1ffff ) & & <nl> - kArchVariant = = kMips64r6 ) { <nl> - / / 48 LSBs contain an unsigned 16 - bit number . <nl> - / / 16 MSBs contain a signed 16 - bit number . <nl> - daddiu ( rd , zero_reg , j . imm64_ & kImm16Mask ) ; <nl> - dati ( rd , ( ( j . imm64_ > > 48 ) + bit31 ) & kImm16Mask ) ; <nl> - } else if ( base : : bits : : IsPowerOfTwo64 ( j . imm64_ + 1 ) ) { <nl> - / / 64 - bit values which have their " n " MSBs set to one , and their <nl> - / / " 64 - n " LSBs set to zero . " n " must meet the restrictions 0 < n < 64 . <nl> - int shift_cnt = 64 - base : : bits : : CountTrailingZeros64 ( j . imm64_ + 1 ) ; <nl> - daddiu ( rd , zero_reg , - 1 ) ; <nl> + int64_t tmp = j . imm64_ > > shift_cnt ; <nl> + if ( is_uint16 ( tmp ) ) { <nl> + / / Value can be computed by loading a 16 - bit unsigned value , and <nl> + / / then shifting left . <nl> + ori ( rd , zero_reg , tmp & kImm16Mask ) ; <nl> + if ( shift_cnt < 32 ) { <nl> + dsll ( rd , rd , shift_cnt ) ; <nl> + } else { <nl> + dsll32 ( rd , rd , shift_cnt & 31 ) ; <nl> + } <nl> + } else if ( is_int16 ( tmp ) ) { <nl> + / / Value can be computed by loading a 16 - bit signed value , and <nl> + / / then shifting left . <nl> + daddiu ( rd , zero_reg , static_cast < int32_t > ( tmp ) ) ; <nl> if ( shift_cnt < 32 ) { <nl> - dsrl ( rd , rd , shift_cnt ) ; <nl> + dsll ( rd , rd , shift_cnt ) ; <nl> } else { <nl> - dsrl32 ( rd , rd , shift_cnt & 31 ) ; <nl> + dsll32 ( rd , rd , shift_cnt & 31 ) ; <nl> + } <nl> + } else if ( rep32_count < 3 ) { <nl> + / / Value being loaded has 32 LSBs equal to the 32 MSBs , and the <nl> + / / value loaded into the 32 LSBs can be loaded with a single <nl> + / / MIPS instruction . <nl> + LiLower32BitHelper ( rd , j ) ; <nl> + Dins ( rd , rd , 32 , 32 ) ; <nl> + } else if ( is_int32 ( tmp ) ) { <nl> + / / Loads with 3 instructions . <nl> + / / Value can be computed by loading a 32 - bit signed value , and <nl> + / / then shifting left . <nl> + lui ( rd , tmp > > kLuiShift & kImm16Mask ) ; <nl> + ori ( rd , rd , tmp & kImm16Mask ) ; <nl> + if ( shift_cnt < 32 ) { <nl> + dsll ( rd , rd , shift_cnt ) ; <nl> + } else { <nl> + dsll32 ( rd , rd , shift_cnt & 31 ) ; <nl> } <nl> } else { <nl> - int shift_cnt = base : : bits : : CountTrailingZeros64 ( j . imm64_ ) ; <nl> - int64_t tmp = j . imm64_ > > shift_cnt ; <nl> + shift_cnt = 16 + base : : bits : : CountTrailingZeros64 ( j . imm64_ > > 16 ) ; <nl> + tmp = j . imm64_ > > shift_cnt ; <nl> if ( is_uint16 ( tmp ) ) { <nl> - / / Value can be computed by loading a 16 - bit unsigned value , and <nl> - / / then shifting left . <nl> + / / Value can be computed by loading a 16 - bit unsigned value , <nl> + / / shifting left , and " or " ing in another 16 - bit unsigned value . <nl> ori ( rd , zero_reg , tmp & kImm16Mask ) ; <nl> if ( shift_cnt < 32 ) { <nl> dsll ( rd , rd , shift_cnt ) ; <nl> } else { <nl> dsll32 ( rd , rd , shift_cnt & 31 ) ; <nl> } <nl> + ori ( rd , rd , j . imm64_ & kImm16Mask ) ; <nl> } else if ( is_int16 ( tmp ) ) { <nl> - / / Value can be computed by loading a 16 - bit signed value , and <nl> - / / then shifting left . <nl> + / / Value can be computed by loading a 16 - bit signed value , <nl> + / / shifting left , and " or " ing in a 16 - bit unsigned value . <nl> daddiu ( rd , zero_reg , static_cast < int32_t > ( tmp ) ) ; <nl> if ( shift_cnt < 32 ) { <nl> dsll ( rd , rd , shift_cnt ) ; <nl> } else { <nl> dsll32 ( rd , rd , shift_cnt & 31 ) ; <nl> } <nl> - } else if ( rep32_count < 3 ) { <nl> + ori ( rd , rd , j . imm64_ & kImm16Mask ) ; <nl> + } else if ( rep32_count < 4 ) { <nl> / / Value being loaded has 32 LSBs equal to the 32 MSBs , and the <nl> - / / value loaded into the 32 LSBs can be loaded with a single <nl> - / / MIPS instruction . <nl> + / / value in the 32 LSBs requires 2 MIPS instructions to load . <nl> LiLower32BitHelper ( rd , j ) ; <nl> Dins ( rd , rd , 32 , 32 ) ; <nl> - } else if ( is_int32 ( tmp ) ) { <nl> - / / Loads with 3 instructions . <nl> - / / Value can be computed by loading a 32 - bit signed value , and <nl> - / / then shifting left . <nl> - lui ( rd , tmp > > kLuiShift & kImm16Mask ) ; <nl> - ori ( rd , rd , tmp & kImm16Mask ) ; <nl> - if ( shift_cnt < 32 ) { <nl> - dsll ( rd , rd , shift_cnt ) ; <nl> - } else { <nl> - dsll32 ( rd , rd , shift_cnt & 31 ) ; <nl> + } else if ( kArchVariant = = kMips64r6 ) { <nl> + / / Loads with 3 - 4 instructions . <nl> + / / Catch - all case to get any other 64 - bit values which aren ' t <nl> + / / handled by special cases above . <nl> + int64_t imm = j . imm64_ ; <nl> + LiLower32BitHelper ( rd , j ) ; <nl> + imm = ( imm > > 32 ) + bit31 ; <nl> + if ( imm & kImm16Mask ) { <nl> + dahi ( rd , imm & kImm16Mask ) ; <nl> + } <nl> + imm = ( imm > > 16 ) + ( imm > > 15 & 0x1 ) ; <nl> + if ( imm & kImm16Mask ) { <nl> + dati ( rd , imm & kImm16Mask ) ; <nl> } <nl> } else { <nl> - shift_cnt = 16 + base : : bits : : CountTrailingZeros64 ( j . imm64_ > > 16 ) ; <nl> - tmp = j . imm64_ > > shift_cnt ; <nl> - if ( is_uint16 ( tmp ) ) { <nl> - / / Value can be computed by loading a 16 - bit unsigned value , <nl> - / / shifting left , and " or " ing in another 16 - bit unsigned value . <nl> - ori ( rd , zero_reg , tmp & kImm16Mask ) ; <nl> - if ( shift_cnt < 32 ) { <nl> - dsll ( rd , rd , shift_cnt ) ; <nl> - } else { <nl> - dsll32 ( rd , rd , shift_cnt & 31 ) ; <nl> - } <nl> - ori ( rd , rd , j . imm64_ & kImm16Mask ) ; <nl> - } else if ( is_int16 ( tmp ) ) { <nl> - / / Value can be computed by loading a 16 - bit signed value , <nl> - / / shifting left , and " or " ing in a 16 - bit unsigned value . <nl> - daddiu ( rd , zero_reg , static_cast < int32_t > ( tmp ) ) ; <nl> - if ( shift_cnt < 32 ) { <nl> - dsll ( rd , rd , shift_cnt ) ; <nl> - } else { <nl> - dsll32 ( rd , rd , shift_cnt & 31 ) ; <nl> - } <nl> - ori ( rd , rd , j . imm64_ & kImm16Mask ) ; <nl> - } else if ( rep32_count < 4 ) { <nl> - / / Value being loaded has 32 LSBs equal to the 32 MSBs , and the <nl> - / / value in the 32 LSBs requires 2 MIPS instructions to load . <nl> - LiLower32BitHelper ( rd , j ) ; <nl> - Dins ( rd , rd , 32 , 32 ) ; <nl> - } else if ( kArchVariant = = kMips64r6 ) { <nl> - / / Loads with 3 - 4 instructions . <nl> - / / Catch - all case to get any other 64 - bit values which aren ' t <nl> - / / handled by special cases above . <nl> - int64_t imm = j . imm64_ ; <nl> - LiLower32BitHelper ( rd , j ) ; <nl> - imm = ( imm > > 32 ) + bit31 ; <nl> - if ( imm & kImm16Mask ) { <nl> - dahi ( rd , imm & kImm16Mask ) ; <nl> - } <nl> - imm = ( imm > > 16 ) + ( imm > > 15 & 0x1 ) ; <nl> - if ( imm & kImm16Mask ) { <nl> - dati ( rd , imm & kImm16Mask ) ; <nl> + if ( is_int48 ( j . imm64_ ) ) { <nl> + Operand k = Operand ( j . imm64_ > > 16 ) ; <nl> + LiLower32BitHelper ( rd , k ) ; <nl> + dsll ( rd , rd , 16 ) ; <nl> + if ( j . imm64_ & kImm16Mask ) { <nl> + ori ( rd , rd , j . imm64_ & kImm16Mask ) ; <nl> } <nl> } else { <nl> - if ( is_int48 ( j . imm64_ ) ) { <nl> - Operand k = Operand ( j . imm64_ > > 16 ) ; <nl> - LiLower32BitHelper ( rd , k ) ; <nl> + Operand k = Operand ( j . imm64_ > > 32 ) ; <nl> + LiLower32BitHelper ( rd , k ) ; <nl> + if ( ( j . imm64_ > > 16 ) & kImm16Mask ) { <nl> + dsll ( rd , rd , 16 ) ; <nl> + ori ( rd , rd , ( j . imm64_ > > 16 ) & kImm16Mask ) ; <nl> dsll ( rd , rd , 16 ) ; <nl> if ( j . imm64_ & kImm16Mask ) { <nl> ori ( rd , rd , j . imm64_ & kImm16Mask ) ; <nl> } <nl> } else { <nl> - Operand k = Operand ( j . imm64_ > > 32 ) ; <nl> - LiLower32BitHelper ( rd , k ) ; <nl> - if ( ( j . imm64_ > > 16 ) & kImm16Mask ) { <nl> - dsll ( rd , rd , 16 ) ; <nl> - ori ( rd , rd , ( j . imm64_ > > 16 ) & kImm16Mask ) ; <nl> - dsll ( rd , rd , 16 ) ; <nl> - if ( j . imm64_ & kImm16Mask ) { <nl> - ori ( rd , rd , j . imm64_ & kImm16Mask ) ; <nl> - } <nl> - } else { <nl> - dsll32 ( rd , rd , 0 ) ; <nl> - if ( j . imm64_ & kImm16Mask ) { <nl> - ori ( rd , rd , j . imm64_ & kImm16Mask ) ; <nl> - } <nl> + dsll32 ( rd , rd , 0 ) ; <nl> + if ( j . imm64_ & kImm16Mask ) { <nl> + ori ( rd , rd , j . imm64_ & kImm16Mask ) ; <nl> } <nl> } <nl> } <nl> } <nl> } <nl> } <nl> + } <nl> + } <nl> + <nl> + void MacroAssembler : : li ( Register rd , Operand j , LiFlags mode ) { <nl> + DCHECK ( ! j . is_reg ( ) ) ; <nl> + BlockTrampolinePoolScope block_trampoline_pool ( this ) ; <nl> + if ( ! MustUseReg ( j . rmode_ ) & & mode = = OPTIMIZE_SIZE ) { <nl> + int li_count = InstrCountForLi64Bit ( j . imm64_ ) ; <nl> + int li_neg_count = InstrCountForLi64Bit ( - j . imm64_ ) ; <nl> + int li_not_count = InstrCountForLi64Bit ( ~ j . imm64_ ) ; <nl> + / / Loading - MIN_INT64 could cause problems , but loading MIN_INT64 takes only <nl> + / / two instructions so no need to check for this . <nl> + if ( li_neg_count < = li_not_count & & li_neg_count < li_count - 1 ) { <nl> + DCHECK ( j . imm64_ ! = std : : numeric_limits < int64_t > : : min ( ) ) ; <nl> + li_optimized ( rd , Operand ( - j . imm64_ ) , mode ) ; <nl> + Dsubu ( rd , zero_reg , rd ) ; <nl> + } else if ( li_neg_count > li_not_count & & li_not_count < li_count - 1 ) { <nl> + DCHECK ( j . imm64_ ! = std : : numeric_limits < int64_t > : : min ( ) ) ; <nl> + li_optimized ( rd , Operand ( ~ j . imm64_ ) , mode ) ; <nl> + nor ( rd , rd , rd ) ; <nl> + } else { <nl> + li_optimized ( rd , j , mode ) ; <nl> + } <nl> } else if ( MustUseReg ( j . rmode_ ) ) { <nl> RecordRelocInfo ( j . rmode_ , j . imm64_ ) ; <nl> lui ( rd , ( j . imm64_ > > 32 ) & kImm16Mask ) ; <nl> mmm a / src / mips64 / macro - assembler - mips64 . h <nl> ppp b / src / mips64 / macro - assembler - mips64 . h <nl> class MacroAssembler : public Assembler { <nl> <nl> / / Load int32 in the rd register . <nl> void li ( Register rd , Operand j , LiFlags mode = OPTIMIZE_SIZE ) ; <nl> + void li_optimized ( Register rd , Operand j , LiFlags mode = OPTIMIZE_SIZE ) ; <nl> + static int InstrCountForLi64Bit ( int64_t value ) ; <nl> inline void LiLower32BitHelper ( Register rd , Operand j ) ; <nl> inline void li ( Register rd , int64_t j , LiFlags mode = OPTIMIZE_SIZE ) { <nl> li ( rd , Operand ( j ) , mode ) ; <nl> mmm a / test / cctest / test - assembler - mips64 . cc <nl> ppp b / test / cctest / test - assembler - mips64 . cc <nl> uint64_t run_li_macro ( uint64_t imm , LiFlags mode , int32_t num_instr = 0 ) { <nl> assm . GetCode ( isolate , & desc ) ; <nl> Handle < Code > code = isolate - > factory ( ) - > NewCode ( <nl> desc , Code : : ComputeFlags ( Code : : STUB ) , Handle < Code > ( ) ) ; <nl> + # ifdef OBJECT_PRINT <nl> + code - > Print ( std : : cout ) ; <nl> + # endif <nl> F2 f = FUNCTION_CAST < F2 > ( code - > entry ( ) ) ; <nl> <nl> uint64_t res = reinterpret_cast < uint64_t > ( <nl> TEST ( li_macro ) { <nl> { 0x00000001fffffffe , 4 , 2 } , / / max_uint32 < < 1 <nl> / / r2 - lui + ori + dsll + ori <nl> / / r6 - daddiu + dahi <nl> - { 0x0000fffffffffffe , 5 , 2 } , / / max_uint48 - 1 <nl> - / / r2 - ori + dsll + ori + dsll + ori <nl> + { 0x0000fffffffffffe , 4 , 2 } , / / max_uint48 - 1 <nl> + / / r2 - daddiu + dsll32 + ori + dsubu <nl> + / / Loading imm directly would require ori + dsll + ori + dsll + ori . <nl> + / / Optimized by loading - imm and using dsubu to get imm . <nl> / / r6 - daddiu + dati <nl> { 0xffffffff00000000 , 2 , 2 } , / / max_uint32 < < 32 <nl> / / r2 - daddiu + dsll32 <nl> TEST ( li_macro ) { <nl> { 0xffff8000ffff0000 , 3 , 2 } , <nl> / / r2 - lui + ori + dsll <nl> / / r6 - lui + dahi <nl> + { 0x0000ffffffff0000 , 4 , 2 } , <nl> + / / r2 - ori + dsll + ori + dsll <nl> + / / r6 - lui + dati <nl> { 0x1234ffff80000000 , 3 , 2 } , <nl> / / r2 - lui + ori + dsll <nl> / / r6 - lui + dati <nl> TEST ( li_macro ) { <nl> { 0xffff8000ffff8000 , 2 , 2 } , <nl> / / r2 - daddiu + dinsu <nl> / / r6 - daddiu + dahi <nl> - { 0xffff0000ffff8000 , 5 , 3 } , <nl> - / / r2 - lui + dsll + ori + dsll + ori <nl> + { 0xffff0000ffff8000 , 4 , 3 } , <nl> + / / r2 - ori + dsll32 + ori + dsubu <nl> + / / Loading imm directly would require lui + dsll + ori + dsll + ori . <nl> + / / Optimized by loading - imm and using dsubu to get imm . <nl> / / r6 - daddiu + dahi + dati <nl> { 0x8000000080000000 , 2 , 2 } , <nl> / / lui + dinsu <nl> TEST ( li_macro ) { <nl> { 0x1ffffabcd , 4 , 2 } , <nl> / / r2 - lui + ori + dsll + ori <nl> / / r6 - daddiu + dahi <nl> - { 0xffffffffabcd , 5 , 2 } , <nl> - / / r2 - ori + dsll + ori + dsll + ori <nl> + { 0xffffffffabcd , 4 , 2 } , <nl> + / / r2 - daddiu + dsll32 + ori + dsubu <nl> + / / Loading imm directly would require ori + dsll + ori + dsll + ori . <nl> + / / Optimized by loading - imm and using dsubu to get imm . <nl> / / r6 - daddiu + dati <nl> - { 0x1ffffffffabcd , 6 , 2 } , <nl> - / / r2 - lui + ori + dsll + ori + dsll + ori <nl> + { 0x1ffffffffabcd , 4 , 2 } , <nl> + / / r2 - daddiu + dsll32 + ori + dsubu <nl> + / / Loading imm directly would require lui + ori + dsll + ori + dsll + ori . <nl> + / / Optimized by loading - imm and using dsubu to get imm . <nl> / / r6 - daddiu + dati <nl> { 0xffff7fff80010000 , 5 , 2 } , <nl> / / r2 - lui + ori + dsll + ori + dsll <nl> TEST ( li_macro ) { <nl> / / r2 - lui + ori + dsll + ori + dsll + ori instruction sequence , <nl> / / r6 - lui + ori + dahi + dati . <nl> / / Load using full instruction sequence . <nl> + { 0xffff0000ffffffff , 3 , 3 } , <nl> + / / r2 - ori + dsll32 + nor <nl> + / / Loading imm directly would require lui + dsll + ori + dsll + ori . <nl> + / / Optimized by loading ~ imm and using nor to get imm . Loading - imm would <nl> + / / require one instruction more . <nl> + / / r6 - daddiu + dahi + dati <nl> } ; <nl> <nl> size_t nr_test_cases = sizeof ( tc ) / sizeof ( TestCase_li ) ; <nl> uint64_t run_Subu ( uint64_t imm , int32_t num_instr ) { <nl> assm . GetCode ( isolate , & desc ) ; <nl> Handle < Code > code = isolate - > factory ( ) - > NewCode ( <nl> desc , Code : : ComputeFlags ( Code : : STUB ) , Handle < Code > ( ) ) ; <nl> + # ifdef OBJECT_PRINT <nl> + code - > Print ( std : : cout ) ; <nl> + # endif <nl> F2 f = FUNCTION_CAST < F2 > ( code - > entry ( ) ) ; <nl> <nl> uint64_t res = reinterpret_cast < uint64_t > ( <nl> uint64_t run_Dsubu ( uint64_t imm , int32_t num_instr ) { <nl> assm . GetCode ( isolate , & desc ) ; <nl> Handle < Code > code = isolate - > factory ( ) - > NewCode ( <nl> desc , Code : : ComputeFlags ( Code : : STUB ) , Handle < Code > ( ) ) ; <nl> + # ifdef OBJECT_PRINT <nl> + code - > Print ( std : : cout ) ; <nl> + # endif <nl> F2 f = FUNCTION_CAST < F2 > ( code - > entry ( ) ) ; <nl> <nl> uint64_t res = reinterpret_cast < uint64_t > ( <nl> TEST ( Dsubu ) { <nl> / / r6 - ori + dati + dsubu . <nl> / / The result of 0 - min_int64 eqauls max_int64 + 1 , which wraps around to <nl> / / min_int64 again . <nl> + { 0xffff0000ffffffff , 0x0000ffff00000001 , 4 } , <nl> + / / The test case above generates : <nl> + / / r2 - ori + dsrl32 + ori + daddu instruction sequence , <nl> + / / r6 - daddiu + dahi + dati + dsubu . <nl> + / / For r2 loading imm would take more instructions than loading - imm so we <nl> + / / can load - imm and add with daddu . <nl> } ; <nl> <nl> size_t nr_test_cases = sizeof ( tc ) / sizeof ( TestCaseDsubu ) ; <nl>
MIPS64 : Add optimizations to li and Dsubu macro .
v8/v8
37b461a932362779dbe3dd8b9b0bc8adddfc0b29
2017-06-19T12:20:17Z
mmm a / include / spdlog / details / os . h <nl> ppp b / include / spdlog / details / os . h <nl> SPDLOG_API bool is_color_terminal ( ) SPDLOG_NOEXCEPT ; <nl> SPDLOG_API bool in_terminal ( FILE * file ) SPDLOG_NOEXCEPT ; <nl> <nl> # if ( defined ( SPDLOG_WCHAR_TO_UTF8_SUPPORT ) | | defined ( SPDLOG_WCHAR_FILENAMES ) ) & & defined ( _WIN32 ) <nl> - void wstr_to_utf8buf ( wstring_view_t wstr , memory_buf_t & target ) ; <nl> + SPDLOG_API void wstr_to_utf8buf ( wstring_view_t wstr , memory_buf_t & target ) ; <nl> # endif <nl> <nl> / / Return directory name from given path or empty string <nl>
Fix
gabime/spdlog
0317731dc91c951a075ff48e2939144a2a1a634b
2020-05-26T20:34:17Z
mmm a / BUILD <nl> ppp b / BUILD <nl> grpc_cc_library ( <nl> " src / core / lib / support / log_windows . cc " , <nl> " src / core / lib / support / mpscq . cc " , <nl> " src / core / lib / support / murmur_hash . cc " , <nl> - " src / core / lib / support / stack_lockfree . cc " , <nl> " src / core / lib / support / string . cc " , <nl> " src / core / lib / support / string_posix . cc " , <nl> " src / core / lib / support / string_util_windows . cc " , <nl> grpc_cc_library ( <nl> " src / core / lib / support / atomic_with_atm . h " , <nl> " src / core / lib / support / atomic_with_std . h " , <nl> " src / core / lib / support / env . h " , <nl> - " src / core / lib / support / memory . h " , <nl> - " src / core / lib / support / vector . h " , <nl> " src / core / lib / support / manual_constructor . h " , <nl> + " src / core / lib / support / memory . h " , <nl> " src / core / lib / support / mpscq . h " , <nl> " src / core / lib / support / murmur_hash . h " , <nl> " src / core / lib / support / spinlock . h " , <nl> - " src / core / lib / support / stack_lockfree . h " , <nl> " src / core / lib / support / string . h " , <nl> " src / core / lib / support / string_windows . h " , <nl> " src / core / lib / support / time_precise . h " , <nl> " src / core / lib / support / tmpfile . h " , <nl> + " src / core / lib / support / vector . h " , <nl> ] , <nl> language = " c + + " , <nl> public_hdrs = GPR_PUBLIC_HDRS , <nl> deps = [ <nl> " gpr_codegen " , <nl> - " @ com_google_absl / / absl / container : inlined_vector " <nl> + " @ com_google_absl / / absl / container : inlined_vector " , <nl> ] , <nl> ) <nl> <nl> grpc_cc_library ( <nl> " src / core / lib / transport / transport_op_string . cc " , <nl> ] , <nl> hdrs = [ <nl> + " src / core / lib / backoff / backoff . h " , <nl> " src / core / lib / channel / channel_args . h " , <nl> " src / core / lib / channel / channel_stack . h " , <nl> " src / core / lib / channel / channel_stack_builder . h " , <nl> grpc_cc_library ( <nl> " src / core / lib / http / format_request . h " , <nl> " src / core / lib / http / httpcli . h " , <nl> " src / core / lib / http / parser . h " , <nl> + " src / core / lib / iomgr / block_annotate . h " , <nl> " src / core / lib / iomgr / call_combiner . h " , <nl> " src / core / lib / iomgr / closure . h " , <nl> " src / core / lib / iomgr / combiner . h " , <nl> grpc_cc_library ( <nl> " src / core / lib / iomgr / socket_utils_posix . h " , <nl> " src / core / lib / iomgr / socket_windows . h " , <nl> " src / core / lib / iomgr / sys_epoll_wrapper . h " , <nl> - " src / core / lib / iomgr / block_annotate . h " , <nl> " src / core / lib / iomgr / tcp_client . h " , <nl> " src / core / lib / iomgr / tcp_client_posix . h " , <nl> " src / core / lib / iomgr / tcp_posix . h " , <nl> grpc_cc_library ( <nl> " src / core / lib / transport / timeout_encoding . h " , <nl> " src / core / lib / transport / transport . h " , <nl> " src / core / lib / transport / transport_impl . h " , <nl> - " src / core / lib / backoff / backoff . h " , <nl> ] , <nl> external_deps = [ <nl> " zlib " , <nl> grpc_cc_library ( <nl> " src / core / ext / transport / chttp2 / transport / bin_decoder . h " , <nl> " src / core / ext / transport / chttp2 / transport / bin_encoder . h " , <nl> " src / core / ext / transport / chttp2 / transport / chttp2_transport . h " , <nl> - " src / core / ext / transport / chttp2 / transport / frame . h " , <nl> " src / core / ext / transport / chttp2 / transport / flow_control . h " , <nl> + " src / core / ext / transport / chttp2 / transport / frame . h " , <nl> " src / core / ext / transport / chttp2 / transport / frame_data . h " , <nl> " src / core / ext / transport / chttp2 / transport / frame_goaway . h " , <nl> " src / core / ext / transport / chttp2 / transport / frame_ping . h " , <nl> mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> add_dependencies ( buildtests_c gpr_log_test ) <nl> add_dependencies ( buildtests_c gpr_manual_constructor_test ) <nl> add_dependencies ( buildtests_c gpr_mpscq_test ) <nl> add_dependencies ( buildtests_c gpr_spinlock_test ) <nl> - add_dependencies ( buildtests_c gpr_stack_lockfree_test ) <nl> add_dependencies ( buildtests_c gpr_string_test ) <nl> add_dependencies ( buildtests_c gpr_sync_test ) <nl> add_dependencies ( buildtests_c gpr_thd_test ) <nl> add_library ( gpr <nl> src / core / lib / support / log_windows . cc <nl> src / core / lib / support / mpscq . cc <nl> src / core / lib / support / murmur_hash . cc <nl> - src / core / lib / support / stack_lockfree . cc <nl> src / core / lib / support / string . cc <nl> src / core / lib / support / string_posix . cc <nl> src / core / lib / support / string_util_windows . cc <nl> target_link_libraries ( gpr_spinlock_test <nl> endif ( gRPC_BUILD_TESTS ) <nl> if ( gRPC_BUILD_TESTS ) <nl> <nl> - add_executable ( gpr_stack_lockfree_test <nl> - test / core / support / stack_lockfree_test . cc <nl> - ) <nl> - <nl> - <nl> - target_include_directories ( gpr_stack_lockfree_test <nl> - PRIVATE $ { CMAKE_CURRENT_SOURCE_DIR } <nl> - PRIVATE $ { CMAKE_CURRENT_SOURCE_DIR } / include <nl> - PRIVATE $ { BORINGSSL_ROOT_DIR } / include <nl> - PRIVATE $ { PROTOBUF_ROOT_DIR } / src <nl> - PRIVATE $ { BENCHMARK_ROOT_DIR } / include <nl> - PRIVATE $ { ZLIB_ROOT_DIR } <nl> - PRIVATE $ { CMAKE_CURRENT_BINARY_DIR } / third_party / zlib <nl> - PRIVATE $ { CARES_INCLUDE_DIR } <nl> - PRIVATE $ { CMAKE_CURRENT_BINARY_DIR } / third_party / cares / cares <nl> - PRIVATE $ { CMAKE_CURRENT_BINARY_DIR } / third_party / gflags / include <nl> - PRIVATE $ { CMAKE_CURRENT_SOURCE_DIR } / third_party / abseil - cpp <nl> - ) <nl> - <nl> - target_link_libraries ( gpr_stack_lockfree_test <nl> - $ { _gRPC_ALLTARGETS_LIBRARIES } <nl> - gpr_test_util <nl> - gpr <nl> - ) <nl> - <nl> - endif ( gRPC_BUILD_TESTS ) <nl> - if ( gRPC_BUILD_TESTS ) <nl> - <nl> add_executable ( gpr_string_test <nl> test / core / support / string_test . cc <nl> ) <nl> mmm a / Makefile <nl> ppp b / Makefile <nl> gpr_log_test : $ ( BINDIR ) / $ ( CONFIG ) / gpr_log_test <nl> gpr_manual_constructor_test : $ ( BINDIR ) / $ ( CONFIG ) / gpr_manual_constructor_test <nl> gpr_mpscq_test : $ ( BINDIR ) / $ ( CONFIG ) / gpr_mpscq_test <nl> gpr_spinlock_test : $ ( BINDIR ) / $ ( CONFIG ) / gpr_spinlock_test <nl> - gpr_stack_lockfree_test : $ ( BINDIR ) / $ ( CONFIG ) / gpr_stack_lockfree_test <nl> gpr_string_test : $ ( BINDIR ) / $ ( CONFIG ) / gpr_string_test <nl> gpr_sync_test : $ ( BINDIR ) / $ ( CONFIG ) / gpr_sync_test <nl> gpr_thd_test : $ ( BINDIR ) / $ ( CONFIG ) / gpr_thd_test <nl> buildtests_c : privatelibs_c \ <nl> $ ( BINDIR ) / $ ( CONFIG ) / gpr_manual_constructor_test \ <nl> $ ( BINDIR ) / $ ( CONFIG ) / gpr_mpscq_test \ <nl> $ ( BINDIR ) / $ ( CONFIG ) / gpr_spinlock_test \ <nl> - $ ( BINDIR ) / $ ( CONFIG ) / gpr_stack_lockfree_test \ <nl> $ ( BINDIR ) / $ ( CONFIG ) / gpr_string_test \ <nl> $ ( BINDIR ) / $ ( CONFIG ) / gpr_sync_test \ <nl> $ ( BINDIR ) / $ ( CONFIG ) / gpr_thd_test \ <nl> test_c : buildtests_c <nl> $ ( Q ) $ ( BINDIR ) / $ ( CONFIG ) / gpr_mpscq_test | | ( echo test gpr_mpscq_test failed ; exit 1 ) <nl> $ ( E ) " [ RUN ] Testing gpr_spinlock_test " <nl> $ ( Q ) $ ( BINDIR ) / $ ( CONFIG ) / gpr_spinlock_test | | ( echo test gpr_spinlock_test failed ; exit 1 ) <nl> - $ ( E ) " [ RUN ] Testing gpr_stack_lockfree_test " <nl> - $ ( Q ) $ ( BINDIR ) / $ ( CONFIG ) / gpr_stack_lockfree_test | | ( echo test gpr_stack_lockfree_test failed ; exit 1 ) <nl> $ ( E ) " [ RUN ] Testing gpr_string_test " <nl> $ ( Q ) $ ( BINDIR ) / $ ( CONFIG ) / gpr_string_test | | ( echo test gpr_string_test failed ; exit 1 ) <nl> $ ( E ) " [ RUN ] Testing gpr_sync_test " <nl> LIBGPR_SRC = \ <nl> src / core / lib / support / log_windows . cc \ <nl> src / core / lib / support / mpscq . cc \ <nl> src / core / lib / support / murmur_hash . cc \ <nl> - src / core / lib / support / stack_lockfree . cc \ <nl> src / core / lib / support / string . cc \ <nl> src / core / lib / support / string_posix . cc \ <nl> src / core / lib / support / string_util_windows . cc \ <nl> endif <nl> endif <nl> <nl> <nl> - GPR_STACK_LOCKFREE_TEST_SRC = \ <nl> - test / core / support / stack_lockfree_test . cc \ <nl> - <nl> - GPR_STACK_LOCKFREE_TEST_OBJS = $ ( addprefix $ ( OBJDIR ) / $ ( CONFIG ) / , $ ( addsuffix . o , $ ( basename $ ( GPR_STACK_LOCKFREE_TEST_SRC ) ) ) ) <nl> - ifeq ( $ ( NO_SECURE ) , true ) <nl> - <nl> - # You can ' t build secure targets if you don ' t have OpenSSL . <nl> - <nl> - $ ( BINDIR ) / $ ( CONFIG ) / gpr_stack_lockfree_test : openssl_dep_error <nl> - <nl> - else <nl> - <nl> - <nl> - <nl> - $ ( BINDIR ) / $ ( CONFIG ) / gpr_stack_lockfree_test : $ ( GPR_STACK_LOCKFREE_TEST_OBJS ) $ ( LIBDIR ) / $ ( CONFIG ) / libgpr_test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgpr . a <nl> - $ ( E ) " [ LD ] Linking $ @ " <nl> - $ ( Q ) mkdir - p ` dirname $ @ ` <nl> - $ ( Q ) $ ( LD ) $ ( LDFLAGS ) $ ( GPR_STACK_LOCKFREE_TEST_OBJS ) $ ( LIBDIR ) / $ ( CONFIG ) / libgpr_test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgpr . a $ ( LDLIBS ) $ ( LDLIBS_SECURE ) - o $ ( BINDIR ) / $ ( CONFIG ) / gpr_stack_lockfree_test <nl> - <nl> - endif <nl> - <nl> - $ ( OBJDIR ) / $ ( CONFIG ) / test / core / support / stack_lockfree_test . o : $ ( LIBDIR ) / $ ( CONFIG ) / libgpr_test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgpr . a <nl> - <nl> - deps_gpr_stack_lockfree_test : $ ( GPR_STACK_LOCKFREE_TEST_OBJS : . o = . dep ) <nl> - <nl> - ifneq ( $ ( NO_SECURE ) , true ) <nl> - ifneq ( $ ( NO_DEPS ) , true ) <nl> - - include $ ( GPR_STACK_LOCKFREE_TEST_OBJS : . o = . dep ) <nl> - endif <nl> - endif <nl> - <nl> - <nl> GPR_STRING_TEST_SRC = \ <nl> test / core / support / string_test . cc \ <nl> <nl> mmm a / build . yaml <nl> ppp b / build . yaml <nl> filegroups : <nl> - src / core / lib / support / log_windows . cc <nl> - src / core / lib / support / mpscq . cc <nl> - src / core / lib / support / murmur_hash . cc <nl> - - src / core / lib / support / stack_lockfree . cc <nl> - src / core / lib / support / string . cc <nl> - src / core / lib / support / string_posix . cc <nl> - src / core / lib / support / string_util_windows . cc <nl> filegroups : <nl> - src / core / lib / support / mpscq . h <nl> - src / core / lib / support / murmur_hash . h <nl> - src / core / lib / support / spinlock . h <nl> - - src / core / lib / support / stack_lockfree . h <nl> - src / core / lib / support / string . h <nl> - src / core / lib / support / string_windows . h <nl> - src / core / lib / support / time_precise . h <nl> targets : <nl> - gpr_test_util <nl> - gpr <nl> uses_polling : false <nl> - - name : gpr_stack_lockfree_test <nl> - cpu_cost : 7 <nl> - build : test <nl> - language : c <nl> - src : <nl> - - test / core / support / stack_lockfree_test . cc <nl> - deps : <nl> - - gpr_test_util <nl> - - gpr <nl> - uses_polling : false <nl> - name : gpr_string_test <nl> build : test <nl> language : c <nl> mmm a / config . m4 <nl> ppp b / config . m4 <nl> if test " $ PHP_GRPC " ! = " no " ; then <nl> src / core / lib / support / log_windows . cc \ <nl> src / core / lib / support / mpscq . cc \ <nl> src / core / lib / support / murmur_hash . cc \ <nl> - src / core / lib / support / stack_lockfree . cc \ <nl> src / core / lib / support / string . cc \ <nl> src / core / lib / support / string_posix . cc \ <nl> src / core / lib / support / string_util_windows . cc \ <nl> mmm a / config . w32 <nl> ppp b / config . w32 <nl> if ( PHP_GRPC ! = " no " ) { <nl> " src \ \ core \ \ lib \ \ support \ \ log_windows . cc " + <nl> " src \ \ core \ \ lib \ \ support \ \ mpscq . cc " + <nl> " src \ \ core \ \ lib \ \ support \ \ murmur_hash . cc " + <nl> - " src \ \ core \ \ lib \ \ support \ \ stack_lockfree . cc " + <nl> " src \ \ core \ \ lib \ \ support \ \ string . cc " + <nl> " src \ \ core \ \ lib \ \ support \ \ string_posix . cc " + <nl> " src \ \ core \ \ lib \ \ support \ \ string_util_windows . cc " + <nl> mmm a / gRPC - Core . podspec <nl> ppp b / gRPC - Core . podspec <nl> Pod : : Spec . new do | s | <nl> ' src / core / lib / support / mpscq . h ' , <nl> ' src / core / lib / support / murmur_hash . h ' , <nl> ' src / core / lib / support / spinlock . h ' , <nl> - ' src / core / lib / support / stack_lockfree . h ' , <nl> ' src / core / lib / support / string . h ' , <nl> ' src / core / lib / support / string_windows . h ' , <nl> ' src / core / lib / support / time_precise . h ' , <nl> Pod : : Spec . new do | s | <nl> ' src / core / lib / support / log_windows . cc ' , <nl> ' src / core / lib / support / mpscq . cc ' , <nl> ' src / core / lib / support / murmur_hash . cc ' , <nl> - ' src / core / lib / support / stack_lockfree . cc ' , <nl> ' src / core / lib / support / string . cc ' , <nl> ' src / core / lib / support / string_posix . cc ' , <nl> ' src / core / lib / support / string_util_windows . cc ' , <nl> Pod : : Spec . new do | s | <nl> ' src / core / lib / support / mpscq . h ' , <nl> ' src / core / lib / support / murmur_hash . h ' , <nl> ' src / core / lib / support / spinlock . h ' , <nl> - ' src / core / lib / support / stack_lockfree . h ' , <nl> ' src / core / lib / support / string . h ' , <nl> ' src / core / lib / support / string_windows . h ' , <nl> ' src / core / lib / support / time_precise . h ' , <nl> mmm a / grpc . gemspec <nl> ppp b / grpc . gemspec <nl> Gem : : Specification . new do | s | <nl> s . files + = % w ( src / core / lib / support / mpscq . h ) <nl> s . files + = % w ( src / core / lib / support / murmur_hash . h ) <nl> s . files + = % w ( src / core / lib / support / spinlock . h ) <nl> - s . files + = % w ( src / core / lib / support / stack_lockfree . h ) <nl> s . files + = % w ( src / core / lib / support / string . h ) <nl> s . files + = % w ( src / core / lib / support / string_windows . h ) <nl> s . files + = % w ( src / core / lib / support / time_precise . h ) <nl> Gem : : Specification . new do | s | <nl> s . files + = % w ( src / core / lib / support / log_windows . cc ) <nl> s . files + = % w ( src / core / lib / support / mpscq . cc ) <nl> s . files + = % w ( src / core / lib / support / murmur_hash . cc ) <nl> - s . files + = % w ( src / core / lib / support / stack_lockfree . cc ) <nl> s . files + = % w ( src / core / lib / support / string . cc ) <nl> s . files + = % w ( src / core / lib / support / string_posix . cc ) <nl> s . files + = % w ( src / core / lib / support / string_util_windows . cc ) <nl> mmm a / grpc . gyp <nl> ppp b / grpc . gyp <nl> <nl> ' src / core / lib / support / log_windows . cc ' , <nl> ' src / core / lib / support / mpscq . cc ' , <nl> ' src / core / lib / support / murmur_hash . cc ' , <nl> - ' src / core / lib / support / stack_lockfree . cc ' , <nl> ' src / core / lib / support / string . cc ' , <nl> ' src / core / lib / support / string_posix . cc ' , <nl> ' src / core / lib / support / string_util_windows . cc ' , <nl> mmm a / package . xml <nl> ppp b / package . xml <nl> <nl> < file baseinstalldir = " / " name = " src / core / lib / support / mpscq . h " role = " src " / > <nl> < file baseinstalldir = " / " name = " src / core / lib / support / murmur_hash . h " role = " src " / > <nl> < file baseinstalldir = " / " name = " src / core / lib / support / spinlock . h " role = " src " / > <nl> - < file baseinstalldir = " / " name = " src / core / lib / support / stack_lockfree . h " role = " src " / > <nl> < file baseinstalldir = " / " name = " src / core / lib / support / string . h " role = " src " / > <nl> < file baseinstalldir = " / " name = " src / core / lib / support / string_windows . h " role = " src " / > <nl> < file baseinstalldir = " / " name = " src / core / lib / support / time_precise . h " role = " src " / > <nl> <nl> < file baseinstalldir = " / " name = " src / core / lib / support / log_windows . cc " role = " src " / > <nl> < file baseinstalldir = " / " name = " src / core / lib / support / mpscq . cc " role = " src " / > <nl> < file baseinstalldir = " / " name = " src / core / lib / support / murmur_hash . cc " role = " src " / > <nl> - < file baseinstalldir = " / " name = " src / core / lib / support / stack_lockfree . cc " role = " src " / > <nl> < file baseinstalldir = " / " name = " src / core / lib / support / string . cc " role = " src " / > <nl> < file baseinstalldir = " / " name = " src / core / lib / support / string_posix . cc " role = " src " / > <nl> < file baseinstalldir = " / " name = " src / core / lib / support / string_util_windows . cc " role = " src " / > <nl> deleted file mode 100644 <nl> index 7a4ede3b92d . . 00000000000 <nl> mmm a / src / core / lib / support / stack_lockfree . cc <nl> ppp / dev / null <nl> <nl> - / * <nl> - * <nl> - * Copyright 2015 gRPC authors . <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * <nl> - * / <nl> - <nl> - # include " src / core / lib / support / stack_lockfree . h " <nl> - <nl> - # include < stdlib . h > <nl> - # include < string . h > <nl> - <nl> - # include < grpc / support / alloc . h > <nl> - # include < grpc / support / atm . h > <nl> - # include < grpc / support / log . h > <nl> - # include < grpc / support / port_platform . h > <nl> - <nl> - / * The lockfree node structure is a single architecture - level <nl> - word that allows for an atomic CAS to set it up . * / <nl> - struct lockfree_node_contents { <nl> - / * next thing to look at . Actual index for head , next index otherwise * / <nl> - uint16_t index ; <nl> - # ifdef GPR_ARCH_64 <nl> - uint16_t pad ; <nl> - uint32_t aba_ctr ; <nl> - # else <nl> - # ifdef GPR_ARCH_32 <nl> - uint16_t aba_ctr ; <nl> - # else <nl> - # error Unsupported bit width architecture <nl> - # endif <nl> - # endif <nl> - } ; <nl> - <nl> - / * Use a union to make sure that these are in the same bits as an atm word * / <nl> - typedef union lockfree_node { <nl> - gpr_atm atm ; <nl> - struct lockfree_node_contents contents ; <nl> - } lockfree_node ; <nl> - <nl> - / * make sure that entries aligned to 8 - bytes * / <nl> - # define ENTRY_ALIGNMENT_BITS 3 <nl> - / * reserve this entry as invalid * / <nl> - # define INVALID_ENTRY_INDEX ( ( 1 < < 16 ) - 1 ) <nl> - <nl> - struct gpr_stack_lockfree { <nl> - lockfree_node * entries ; <nl> - lockfree_node head ; / * An atomic entry describing curr head * / <nl> - } ; <nl> - <nl> - gpr_stack_lockfree * gpr_stack_lockfree_create ( size_t entries ) { <nl> - gpr_stack_lockfree * stack ; <nl> - stack = ( gpr_stack_lockfree * ) gpr_malloc ( sizeof ( * stack ) ) ; <nl> - / * Since we only allocate 16 bits to represent an entry number , <nl> - * make sure that we are within the desired range * / <nl> - / * Reserve the highest entry number as a dummy * / <nl> - GPR_ASSERT ( entries < INVALID_ENTRY_INDEX ) ; <nl> - stack - > entries = ( lockfree_node * ) gpr_malloc_aligned ( <nl> - entries * sizeof ( stack - > entries [ 0 ] ) , ENTRY_ALIGNMENT_BITS ) ; <nl> - / * Clear out all entries * / <nl> - memset ( stack - > entries , 0 , entries * sizeof ( stack - > entries [ 0 ] ) ) ; <nl> - memset ( & stack - > head , 0 , sizeof ( stack - > head ) ) ; <nl> - <nl> - GPR_ASSERT ( sizeof ( stack - > entries - > atm ) = = sizeof ( stack - > entries - > contents ) ) ; <nl> - <nl> - / * Point the head at reserved dummy entry * / <nl> - stack - > head . contents . index = INVALID_ENTRY_INDEX ; <nl> - / * Fill in the pad and aba_ctr to avoid confusing memcheck tools * / <nl> - # ifdef GPR_ARCH_64 <nl> - stack - > head . contents . pad = 0 ; <nl> - # endif <nl> - stack - > head . contents . aba_ctr = 0 ; <nl> - return stack ; <nl> - } <nl> - <nl> - void gpr_stack_lockfree_destroy ( gpr_stack_lockfree * stack ) { <nl> - gpr_free_aligned ( stack - > entries ) ; <nl> - gpr_free ( stack ) ; <nl> - } <nl> - <nl> - int gpr_stack_lockfree_push ( gpr_stack_lockfree * stack , int entry ) { <nl> - lockfree_node head ; <nl> - lockfree_node newhead ; <nl> - lockfree_node curent ; <nl> - lockfree_node newent ; <nl> - <nl> - / * First fill in the entry ' s index and aba ctr for new head * / <nl> - newhead . contents . index = ( uint16_t ) entry ; <nl> - # ifdef GPR_ARCH_64 <nl> - / * Fill in the pad to avoid confusing memcheck tools * / <nl> - newhead . contents . pad = 0 ; <nl> - # endif <nl> - <nl> - / * Also post - increment the aba_ctr * / <nl> - curent . atm = gpr_atm_no_barrier_load ( & stack - > entries [ entry ] . atm ) ; <nl> - newhead . contents . aba_ctr = + + curent . contents . aba_ctr ; <nl> - gpr_atm_no_barrier_store ( & stack - > entries [ entry ] . atm , curent . atm ) ; <nl> - <nl> - do { <nl> - / * Atomically get the existing head value for use * / <nl> - head . atm = gpr_atm_no_barrier_load ( & ( stack - > head . atm ) ) ; <nl> - / * Point to it * / <nl> - newent . atm = gpr_atm_no_barrier_load ( & stack - > entries [ entry ] . atm ) ; <nl> - newent . contents . index = head . contents . index ; <nl> - gpr_atm_no_barrier_store ( & stack - > entries [ entry ] . atm , newent . atm ) ; <nl> - } while ( ! gpr_atm_rel_cas ( & ( stack - > head . atm ) , head . atm , newhead . atm ) ) ; <nl> - / * Use rel_cas above to make sure that entry index is set properly * / <nl> - return head . contents . index = = INVALID_ENTRY_INDEX ; <nl> - } <nl> - <nl> - int gpr_stack_lockfree_pop ( gpr_stack_lockfree * stack ) { <nl> - lockfree_node head ; <nl> - lockfree_node newhead ; <nl> - <nl> - do { <nl> - head . atm = gpr_atm_acq_load ( & ( stack - > head . atm ) ) ; <nl> - if ( head . contents . index = = INVALID_ENTRY_INDEX ) { <nl> - return - 1 ; <nl> - } <nl> - newhead . atm = <nl> - gpr_atm_no_barrier_load ( & ( stack - > entries [ head . contents . index ] . atm ) ) ; <nl> - <nl> - } while ( ! gpr_atm_no_barrier_cas ( & ( stack - > head . atm ) , head . atm , newhead . atm ) ) ; <nl> - <nl> - return head . contents . index ; <nl> - } <nl> deleted file mode 100644 <nl> index 337ecc2b17a . . 00000000000 <nl> mmm a / src / core / lib / support / stack_lockfree . h <nl> ppp / dev / null <nl> <nl> - / * <nl> - * <nl> - * Copyright 2015 gRPC authors . <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * <nl> - * / <nl> - <nl> - # ifndef GRPC_CORE_LIB_SUPPORT_STACK_LOCKFREE_H <nl> - # define GRPC_CORE_LIB_SUPPORT_STACK_LOCKFREE_H <nl> - <nl> - # include < stddef . h > <nl> - <nl> - # ifdef __cplusplus <nl> - extern " C " { <nl> - # endif <nl> - <nl> - typedef struct gpr_stack_lockfree gpr_stack_lockfree ; <nl> - <nl> - / * This stack must specify the maximum number of entries to track . <nl> - The current implementation only allows up to 65534 entries * / <nl> - gpr_stack_lockfree * gpr_stack_lockfree_create ( size_t entries ) ; <nl> - void gpr_stack_lockfree_destroy ( gpr_stack_lockfree * stack ) ; <nl> - <nl> - / * Pass in a valid entry number for the next stack entry * / <nl> - / * Returns 1 if this is the first element on the stack , 0 otherwise * / <nl> - int gpr_stack_lockfree_push ( gpr_stack_lockfree * , int entry ) ; <nl> - <nl> - / * Returns - 1 on empty or the actual entry number * / <nl> - int gpr_stack_lockfree_pop ( gpr_stack_lockfree * stack ) ; <nl> - <nl> - # ifdef __cplusplus <nl> - } <nl> - # endif <nl> - <nl> - # endif / * GRPC_CORE_LIB_SUPPORT_STACK_LOCKFREE_H * / <nl> mmm a / src / python / grpcio / grpc_core_dependencies . py <nl> ppp b / src / python / grpcio / grpc_core_dependencies . py <nl> <nl> ' src / core / lib / support / log_windows . cc ' , <nl> ' src / core / lib / support / mpscq . cc ' , <nl> ' src / core / lib / support / murmur_hash . cc ' , <nl> - ' src / core / lib / support / stack_lockfree . cc ' , <nl> ' src / core / lib / support / string . cc ' , <nl> ' src / core / lib / support / string_posix . cc ' , <nl> ' src / core / lib / support / string_util_windows . cc ' , <nl> mmm a / test / core / support / BUILD <nl> ppp b / test / core / support / BUILD <nl> grpc_cc_test ( <nl> ] , <nl> ) <nl> <nl> - grpc_cc_test ( <nl> - name = " stack_lockfree_test " , <nl> - srcs = [ " stack_lockfree_test . cc " ] , <nl> - language = " C + + " , <nl> - deps = [ <nl> - " / / : gpr " , <nl> - " / / test / core / util : gpr_test_util " , <nl> - ] , <nl> - ) <nl> - <nl> grpc_cc_test ( <nl> name = " string_test " , <nl> srcs = [ " string_test . cc " ] , <nl> grpc_cc_test ( <nl> grpc_cc_test ( <nl> name = " memory_test " , <nl> srcs = [ " memory_test . cc " ] , <nl> + external_deps = [ <nl> + " gtest " , <nl> + ] , <nl> language = " C + + " , <nl> deps = [ <nl> " / / : grpc " , <nl> " / / test / core / util : gpr_test_util " , <nl> ] , <nl> - external_deps = [ <nl> - " gtest " , <nl> - ] , <nl> ) <nl> <nl> grpc_cc_test ( <nl> name = " vector_test " , <nl> srcs = [ " vector_test . cc " ] , <nl> + external_deps = [ <nl> + " gtest " , <nl> + ] , <nl> language = " C + + " , <nl> deps = [ <nl> " / / : grpc " , <nl> " / / test / core / util : gpr_test_util " , <nl> ] , <nl> - external_deps = [ <nl> - " gtest " , <nl> - ] , <nl> ) <nl> deleted file mode 100644 <nl> index e6d0c9b795c . . 00000000000 <nl> mmm a / test / core / support / stack_lockfree_test . cc <nl> ppp / dev / null <nl> <nl> - / * <nl> - * <nl> - * Copyright 2015 gRPC authors . <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * <nl> - * / <nl> - <nl> - # include " src / core / lib / support / stack_lockfree . h " <nl> - <nl> - # include < stdlib . h > <nl> - <nl> - # include < grpc / support / alloc . h > <nl> - # include < grpc / support / log . h > <nl> - # include < grpc / support / sync . h > <nl> - # include < grpc / support / thd . h > <nl> - # include " test / core / util / test_config . h " <nl> - <nl> - / * max stack size supported * / <nl> - # define MAX_STACK_SIZE 65534 <nl> - <nl> - # define MAX_THREADS 32 <nl> - <nl> - static void test_serial_sized ( size_t size ) { <nl> - gpr_stack_lockfree * stack = gpr_stack_lockfree_create ( size ) ; <nl> - size_t i ; <nl> - size_t j ; <nl> - <nl> - / * First try popping empty * / <nl> - GPR_ASSERT ( gpr_stack_lockfree_pop ( stack ) = = - 1 ) ; <nl> - <nl> - / * Now add one item and check it * / <nl> - gpr_stack_lockfree_push ( stack , 3 ) ; <nl> - GPR_ASSERT ( gpr_stack_lockfree_pop ( stack ) = = 3 ) ; <nl> - GPR_ASSERT ( gpr_stack_lockfree_pop ( stack ) = = - 1 ) ; <nl> - <nl> - / * Now add repeatedly more items and check them * / <nl> - for ( i = 1 ; i < size ; i * = 2 ) { <nl> - for ( j = 0 ; j < = i ; j + + ) { <nl> - GPR_ASSERT ( gpr_stack_lockfree_push ( stack , ( int ) j ) = = ( j = = 0 ) ) ; <nl> - } <nl> - for ( j = 0 ; j < = i ; j + + ) { <nl> - GPR_ASSERT ( gpr_stack_lockfree_pop ( stack ) = = ( int ) ( i - j ) ) ; <nl> - } <nl> - GPR_ASSERT ( gpr_stack_lockfree_pop ( stack ) = = - 1 ) ; <nl> - } <nl> - <nl> - gpr_stack_lockfree_destroy ( stack ) ; <nl> - } <nl> - <nl> - static void test_serial ( ) { <nl> - size_t i ; <nl> - for ( i = 128 ; i < MAX_STACK_SIZE ; i * = 2 ) { <nl> - test_serial_sized ( i ) ; <nl> - } <nl> - test_serial_sized ( MAX_STACK_SIZE ) ; <nl> - } <nl> - <nl> - struct test_arg { <nl> - gpr_stack_lockfree * stack ; <nl> - int stack_size ; <nl> - int nthreads ; <nl> - int rank ; <nl> - int sum ; <nl> - } ; <nl> - <nl> - static void test_mt_body ( void * v ) { <nl> - struct test_arg * arg = ( struct test_arg * ) v ; <nl> - int lo , hi ; <nl> - int i ; <nl> - int res ; <nl> - lo = arg - > rank * arg - > stack_size / arg - > nthreads ; <nl> - hi = ( arg - > rank + 1 ) * arg - > stack_size / arg - > nthreads ; <nl> - for ( i = lo ; i < hi ; i + + ) { <nl> - gpr_stack_lockfree_push ( arg - > stack , i ) ; <nl> - if ( ( res = gpr_stack_lockfree_pop ( arg - > stack ) ) ! = - 1 ) { <nl> - arg - > sum + = res ; <nl> - } <nl> - } <nl> - while ( ( res = gpr_stack_lockfree_pop ( arg - > stack ) ) ! = - 1 ) { <nl> - arg - > sum + = res ; <nl> - } <nl> - } <nl> - <nl> - static void test_mt_sized ( size_t size , int nth ) { <nl> - gpr_stack_lockfree * stack ; <nl> - struct test_arg args [ MAX_THREADS ] ; <nl> - gpr_thd_id thds [ MAX_THREADS ] ; <nl> - int sum ; <nl> - int i ; <nl> - gpr_thd_options options = gpr_thd_options_default ( ) ; <nl> - <nl> - stack = gpr_stack_lockfree_create ( size ) ; <nl> - for ( i = 0 ; i < nth ; i + + ) { <nl> - args [ i ] . stack = stack ; <nl> - args [ i ] . stack_size = ( int ) size ; <nl> - args [ i ] . nthreads = nth ; <nl> - args [ i ] . rank = i ; <nl> - args [ i ] . sum = 0 ; <nl> - } <nl> - gpr_thd_options_set_joinable ( & options ) ; <nl> - for ( i = 0 ; i < nth ; i + + ) { <nl> - GPR_ASSERT ( gpr_thd_new ( & thds [ i ] , test_mt_body , & args [ i ] , & options ) ) ; <nl> - } <nl> - sum = 0 ; <nl> - for ( i = 0 ; i < nth ; i + + ) { <nl> - gpr_thd_join ( thds [ i ] ) ; <nl> - sum = sum + args [ i ] . sum ; <nl> - } <nl> - GPR_ASSERT ( ( unsigned ) sum = = ( ( unsigned ) size * ( size - 1 ) ) / 2 ) ; <nl> - gpr_stack_lockfree_destroy ( stack ) ; <nl> - } <nl> - <nl> - static void test_mt ( ) { <nl> - size_t size ; <nl> - int nth ; <nl> - for ( nth = 1 ; nth < MAX_THREADS ; nth + + ) { <nl> - for ( size = 128 ; size < MAX_STACK_SIZE ; size * = 2 ) { <nl> - test_mt_sized ( size , nth ) ; <nl> - } <nl> - test_mt_sized ( MAX_STACK_SIZE , nth ) ; <nl> - } <nl> - } <nl> - <nl> - int main ( int argc , char * * argv ) { <nl> - grpc_test_init ( argc , argv ) ; <nl> - test_serial ( ) ; <nl> - test_mt ( ) ; <nl> - return 0 ; <nl> - } <nl> mmm a / tools / doxygen / Doxyfile . c + + . internal <nl> ppp b / tools / doxygen / Doxyfile . c + + . internal <nl> src / core / lib / support / memory . h \ <nl> src / core / lib / support / mpscq . h \ <nl> src / core / lib / support / murmur_hash . h \ <nl> src / core / lib / support / spinlock . h \ <nl> - src / core / lib / support / stack_lockfree . h \ <nl> src / core / lib / support / string . h \ <nl> src / core / lib / support / string_windows . h \ <nl> src / core / lib / support / time_precise . h \ <nl> mmm a / tools / doxygen / Doxyfile . core . internal <nl> ppp b / tools / doxygen / Doxyfile . core . internal <nl> src / core / lib / support / mpscq . h \ <nl> src / core / lib / support / murmur_hash . cc \ <nl> src / core / lib / support / murmur_hash . h \ <nl> src / core / lib / support / spinlock . h \ <nl> - src / core / lib / support / stack_lockfree . cc \ <nl> - src / core / lib / support / stack_lockfree . h \ <nl> src / core / lib / support / string . cc \ <nl> src / core / lib / support / string . h \ <nl> src / core / lib / support / string_posix . cc \ <nl> mmm a / tools / run_tests / generated / sources_and_headers . json <nl> ppp b / tools / run_tests / generated / sources_and_headers . json <nl> <nl> " third_party " : false , <nl> " type " : " target " <nl> } , <nl> - { <nl> - " deps " : [ <nl> - " gpr " , <nl> - " gpr_test_util " <nl> - ] , <nl> - " headers " : [ ] , <nl> - " is_filegroup " : false , <nl> - " language " : " c " , <nl> - " name " : " gpr_stack_lockfree_test " , <nl> - " src " : [ <nl> - " test / core / support / stack_lockfree_test . cc " <nl> - ] , <nl> - " third_party " : false , <nl> - " type " : " target " <nl> - } , <nl> { <nl> " deps " : [ <nl> " gpr " , <nl> <nl> " src / core / lib / support / log_windows . cc " , <nl> " src / core / lib / support / mpscq . cc " , <nl> " src / core / lib / support / murmur_hash . cc " , <nl> - " src / core / lib / support / stack_lockfree . cc " , <nl> " src / core / lib / support / string . cc " , <nl> " src / core / lib / support / string_posix . cc " , <nl> " src / core / lib / support / string_util_windows . cc " , <nl> <nl> " src / core / lib / support / mpscq . h " , <nl> " src / core / lib / support / murmur_hash . h " , <nl> " src / core / lib / support / spinlock . h " , <nl> - " src / core / lib / support / stack_lockfree . h " , <nl> " src / core / lib / support / string . h " , <nl> " src / core / lib / support / string_windows . h " , <nl> " src / core / lib / support / time_precise . h " , <nl> <nl> " src / core / lib / support / mpscq . h " , <nl> " src / core / lib / support / murmur_hash . h " , <nl> " src / core / lib / support / spinlock . h " , <nl> - " src / core / lib / support / stack_lockfree . h " , <nl> " src / core / lib / support / string . h " , <nl> " src / core / lib / support / string_windows . h " , <nl> " src / core / lib / support / time_precise . h " , <nl> mmm a / tools / run_tests / generated / tests . json <nl> ppp b / tools / run_tests / generated / tests . json <nl> <nl> ] , <nl> " uses_polling " : false <nl> } , <nl> - { <nl> - " args " : [ ] , <nl> - " benchmark " : false , <nl> - " ci_platforms " : [ <nl> - " linux " , <nl> - " mac " , <nl> - " posix " , <nl> - " windows " <nl> - ] , <nl> - " cpu_cost " : 7 , <nl> - " exclude_configs " : [ ] , <nl> - " exclude_iomgrs " : [ ] , <nl> - " flaky " : false , <nl> - " gtest " : false , <nl> - " language " : " c " , <nl> - " name " : " gpr_stack_lockfree_test " , <nl> - " platforms " : [ <nl> - " linux " , <nl> - " mac " , <nl> - " posix " , <nl> - " windows " <nl> - ] , <nl> - " uses_polling " : false <nl> - } , <nl> { <nl> " args " : [ ] , <nl> " benchmark " : false , <nl>
Merge pull request from vjpai / destack
grpc/grpc
bc61c538a96df6881ade63a29eee1126f735342f
2017-11-28T21:05:21Z
mmm a / src / share / configuration_monitor . hpp <nl> ppp b / src / share / configuration_monitor . hpp <nl> class configuration_monitor final { <nl> / / Methods <nl> <nl> configuration_monitor ( const std : : string & user_core_configuration_file_path , <nl> - const std : : string & system_core_configuration_file_path = constants : : get_system_core_configuration_file_path ( ) ) : user_core_configuration_file_path_ ( user_core_configuration_file_path ) , <nl> - system_core_configuration_file_path_ ( system_core_configuration_file_path ) { <nl> + const std : : string & system_core_configuration_file_path ) { <nl> std : : vector < std : : string > targets = { <nl> - user_core_configuration_file_path_ , <nl> - system_core_configuration_file_path_ , <nl> + user_core_configuration_file_path , <nl> + system_core_configuration_file_path , <nl> } ; <nl> <nl> file_monitor_ = std : : make_unique < file_monitor > ( targets ) ; <nl> <nl> - file_monitor_ - > register_stream_finished . connect ( [ this ] { <nl> - / / ` core_configuration_updated ` is enqueued by ` file_monitor : : start ` only if the file exists . <nl> - / / We have to enqueue it manually for when the file does not exist . <nl> + file_monitor_ - > file_changed . connect ( [ this , <nl> + user_core_configuration_file_path , <nl> + system_core_configuration_file_path ] ( auto & & changed_file_path , <nl> + auto & & file_body ) { <nl> + auto file_path = changed_file_path ; <nl> + <nl> + if ( filesystem : : exists ( user_core_configuration_file_path ) ) { <nl> + if ( changed_file_path = = system_core_configuration_file_path ) { <nl> + return ; <nl> + } <nl> + } else { <nl> + if ( changed_file_path = = user_core_configuration_file_path ) { <nl> + / / user_core_configuration_file_path is removed . <nl> + <nl> + if ( filesystem : : exists ( system_core_configuration_file_path ) ) { <nl> + file_path = system_core_configuration_file_path ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + logger : : get_logger ( ) . info ( " Load { 0 } . . . " , file_path ) ; <nl> + <nl> + auto c = std : : make_shared < core_configuration > ( file_path ) ; <nl> + <nl> + if ( core_configuration_ & & ! c - > is_loaded ( ) ) { <nl> + return ; <nl> + } <nl> + <nl> + if ( core_configuration_ & & core_configuration_ - > to_json ( ) = = c - > to_json ( ) ) { <nl> + return ; <nl> + } <nl> <nl> { <nl> std : : lock_guard < std : : mutex > lock ( core_configuration_mutex_ ) ; <nl> <nl> - if ( core_configuration_ ) { <nl> - / / ` core_configuration_updated ` is already called . <nl> - / / We do not need call the method again . <nl> - return ; <nl> - } <nl> + core_configuration_ = c ; <nl> } <nl> <nl> - core_configuration_file_changed ( ) ; <nl> - } ) ; <nl> + logger : : get_logger ( ) . info ( " core_configuration is updated . " ) ; <nl> <nl> - file_monitor_ - > file_changed . connect ( [ this ] ( auto & & file_path ) { <nl> - core_configuration_file_changed ( ) ; <nl> + core_configuration_updated ( c ) ; <nl> } ) ; <nl> } <nl> <nl> class configuration_monitor final { <nl> file_monitor_ = nullptr ; <nl> } <nl> <nl> - void start ( ) { <nl> - file_monitor_ - > start ( ) ; <nl> + void async_start ( ) { <nl> + file_monitor_ - > async_start ( ) ; <nl> } <nl> <nl> std : : shared_ptr < const core_configuration > get_core_configuration ( void ) const { <nl> class configuration_monitor final { <nl> } <nl> <nl> private : <nl> - void core_configuration_file_changed ( void ) { <nl> - std : : string file_path = system_core_configuration_file_path_ ; <nl> - if ( filesystem : : exists ( user_core_configuration_file_path_ ) ) { <nl> - file_path = user_core_configuration_file_path_ ; <nl> - } <nl> - <nl> - logger : : get_logger ( ) . info ( " Load { 0 } . . . " , file_path ) ; <nl> - <nl> - auto c = std : : make_shared < core_configuration > ( file_path ) ; <nl> - <nl> - if ( core_configuration_ & & ! c - > is_loaded ( ) ) { <nl> - return ; <nl> - } <nl> - <nl> - if ( core_configuration_ & & core_configuration_ - > to_json ( ) = = c - > to_json ( ) ) { <nl> - return ; <nl> - } <nl> - <nl> - { <nl> - std : : lock_guard < std : : mutex > lock ( core_configuration_mutex_ ) ; <nl> - <nl> - core_configuration_ = c ; <nl> - } <nl> - <nl> - logger : : get_logger ( ) . info ( " core_configuration is updated . " ) ; <nl> - <nl> - core_configuration_updated ( c ) ; <nl> - } <nl> - <nl> - std : : string user_core_configuration_file_path_ ; <nl> - std : : string system_core_configuration_file_path_ ; <nl> - <nl> std : : unique_ptr < file_monitor > file_monitor_ ; <nl> <nl> std : : shared_ptr < const core_configuration > core_configuration_ ; <nl> mmm a / tests / src / configuration_monitor / test . cpp <nl> ppp b / tests / src / configuration_monitor / test . cpp <nl> class test_configuration_monitor final { <nl> last_core_configuration_ = core_configuration ; <nl> } ) ; <nl> <nl> - configuration_monitor_ - > start ( ) ; <nl> + configuration_monitor_ - > async_start ( ) ; <nl> <nl> wait ( ) ; <nl> } <nl> TEST_CASE ( " configuration_monitor " ) { <nl> <nl> monitor . wait ( ) ; <nl> <nl> - REQUIRE ( monitor . get_count ( ) = = 2 ) ; <nl> - REQUIRE ( monitor . get_last_core_configuration ( ) - > get_global_configuration ( ) . get_show_in_menu_bar ( ) = = false ) ; <nl> + REQUIRE ( monitor . get_count ( ) = = 3 ) ; <nl> + REQUIRE ( monitor . get_last_core_configuration ( ) - > get_global_configuration ( ) . get_show_in_menu_bar ( ) = = true ) ; <nl> <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / Update system . json <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> - system ( " echo ' { \ " global \ " : { \ " show_in_menu_bar \ " : true } } ' > target / system . json " ) ; <nl> + system ( " echo ' { \ " global \ " : { \ " show_in_menu_bar \ " : false } } ' > target / system . json " ) ; <nl> <nl> monitor . wait ( ) ; <nl> <nl> - REQUIRE ( monitor . get_count ( ) = = 3 ) ; <nl> - REQUIRE ( monitor . get_last_core_configuration ( ) - > get_global_configuration ( ) . get_show_in_menu_bar ( ) = = true ) ; <nl> + REQUIRE ( monitor . get_count ( ) = = 4 ) ; <nl> + REQUIRE ( monitor . get_last_core_configuration ( ) - > get_global_configuration ( ) . get_show_in_menu_bar ( ) = = false ) ; <nl> <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / Update user . json <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> - system ( " echo ' { \ " global \ " : { \ " show_in_menu_bar \ " : false } } ' > target / user . json " ) ; <nl> + system ( " echo ' { \ " global \ " : { \ " show_in_menu_bar \ " : true } } ' > target / user . json " ) ; <nl> <nl> monitor . wait ( ) ; <nl> <nl> - REQUIRE ( monitor . get_count ( ) = = 4 ) ; <nl> - REQUIRE ( monitor . get_last_core_configuration ( ) - > get_global_configuration ( ) . get_show_in_menu_bar ( ) = = false ) ; <nl> + REQUIRE ( monitor . get_count ( ) = = 5 ) ; <nl> + REQUIRE ( monitor . get_last_core_configuration ( ) - > get_global_configuration ( ) . get_show_in_menu_bar ( ) = = true ) ; <nl> } <nl> <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> TEST_CASE ( " configuration_monitor " ) { <nl> REQUIRE ( monitor . get_last_core_configuration ( ) - > get_global_configuration ( ) . get_show_in_menu_bar ( ) = = false ) ; <nl> } <nl> <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / There are both user . json and system . json at start <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + { <nl> + system ( " rm - rf target " ) ; <nl> + system ( " mkdir - p target " ) ; <nl> + system ( " echo ' { \ " global \ " : { \ " show_in_menu_bar \ " : false } } ' > target / system . json " ) ; <nl> + system ( " echo ' { \ " global \ " : { \ " show_in_menu_bar \ " : false } } ' > target / user . json " ) ; <nl> + <nl> + test_configuration_monitor monitor ; <nl> + <nl> + REQUIRE ( monitor . get_count ( ) = = 1 ) ; <nl> + REQUIRE ( monitor . get_last_core_configuration ( ) - > get_global_configuration ( ) . get_show_in_menu_bar ( ) = = false ) ; <nl> + } <nl> + <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / Broken json <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl>
update for file_monitor changes
pqrs-org/Karabiner-Elements
4bc54e99e3f91b362e896e3de3945844545bc4f6
2018-08-22T11:09:26Z
mmm a / ChangeLog . rst <nl> ppp b / ChangeLog . rst <nl> <nl> <nl> Error ( ` godbolt < https : / / godbolt . org / z / GoxM4e > ` __ ) : <nl> <nl> - ` fmt / core . h : 1438 : 3 : error : static_assert failed due to requirement <nl> + ` ` fmt / core . h : 1438 : 3 : error : static_assert failed due to requirement <nl> ' fmt : : v7 : : formattable < how_about_no > ( ) ' " Cannot format an argument . <nl> To make type T formattable provide a formatter < T > specialization : <nl> https : / / fmt . dev / latest / api . html # udt " <nl> - . . . ` <nl> + . . . ` ` <nl> <nl> * Added the ` make_args_checked < https : / / fmt . dev / 7 . 1 . 0 / api . html # argument - lists > ` _ <nl> function template that allows you to write formatting functions with <nl>
Tweak markup
fmtlib/fmt
39bde329bd5edf05f3eef57bf679fd9aa70f7a39
2020-10-25T16:19:38Z
mmm a / tools / internal_ci / helper_scripts / prepare_build_macos_rc <nl> ppp b / tools / internal_ci / helper_scripts / prepare_build_macos_rc <nl> pip install virtualenv - - user python <nl> pip install - U Mako six tox setuptools twisted pyyaml - - user python <nl> export PYTHONPATH = / Library / Python / 3 . 4 / site - packages <nl> <nl> + # Install Python 3 . 7 <nl> + curl - O https : / / www . python . org / ftp / python / 3 . 7 . 0 / python - 3 . 7 . 0 - macosx10 . 9 . pkg <nl> + sudo installer - pkg . / python - 3 . 7 . 0 - macosx10 . 9 . pkg - target / <nl> + <nl> # set xcode version for Obj - C tests <nl> sudo xcode - select - switch / Applications / Xcode_9 . 2 . app / Contents / Developer / <nl> <nl> mmm a / tools / internal_ci / helper_scripts / prepare_build_windows . bat <nl> ppp b / tools / internal_ci / helper_scripts / prepare_build_windows . bat <nl> <nl> <nl> @ rem make sure msys binaries are preferred over cygwin binaries <nl> @ rem set path to python 2 . 7 <nl> - set PATH = C : \ tools \ msys64 \ usr \ bin ; C : \ Python27 ; % PATH % <nl> + set PATH = C : \ tools \ msys64 \ usr \ bin ; C : \ Python27 ; C : \ Python37 ; % PATH % <nl> <nl> @ rem If this is a PR using RUN_TESTS_FLAGS var , then add flags to filter tests <nl> if defined KOKORO_GITHUB_PULL_REQUEST_NUMBER if defined RUN_TESTS_FLAGS ( <nl> netsh interface ip add dnsservers " Local Area Connection 8 " 8 . 8 . 4 . 4 index = 3 <nl> @ rem Needed for big_query_utils <nl> python - m pip install google - api - python - client <nl> <nl> + @ rem Install Python 3 . 7 <nl> + chocolatey install - y - r python3 - - version 3 . 7 <nl> + <nl> @ rem Disable some unwanted dotnet options <nl> set NUGET_XMLDOC_MODE = skip <nl> set DOTNET_SKIP_FIRST_TIME_EXPERIENCE = true <nl>
Install Python3 . 7 on Windows and MacOS
grpc/grpc
3dce7cb69d935ee9c3ba031195e1bf291231d947
2018-07-09T18:34:12Z
mmm a / test / functional / wallet_bumpfee . py <nl> ppp b / test / functional / wallet_bumpfee . py <nl> def test_unconfirmed_not_spendable ( rbf_node , rbf_node_address ) : <nl> <nl> <nl> def test_bumpfee_metadata ( rbf_node , dest_address ) : <nl> - rbfid = rbf_node . sendtoaddress ( dest_address , Decimal ( " 0 . 00100000 " ) , " comment value " , " to value " ) <nl> + assert ( rbf_node . getbalance ( ) < 49 ) <nl> + rbf_node . generatetoaddress ( 101 , rbf_node . getnewaddress ( ) ) <nl> + rbfid = rbf_node . sendtoaddress ( dest_address , 49 , " comment value " , " to value " ) <nl> bumped_tx = rbf_node . bumpfee ( rbfid ) <nl> bumped_wtx = rbf_node . gettransaction ( bumped_tx [ " txid " ] ) <nl> assert_equal ( bumped_wtx [ " comment " ] , " comment value " ) <nl>
Merge : wallet_bumpfee . py : Make sure coin selection produces change
bitcoin/bitcoin
6c21a801f3df4942d09d5a33d3dab04807f7bb37
2019-07-02T12:49:26Z
mmm a / protobuf . bzl <nl> ppp b / protobuf . bzl <nl> def internal_gen_well_known_protos_java ( srcs ) : <nl> Args : <nl> srcs : the well known protos <nl> " " " <nl> - root = Label ( " % s / / protobuf_java " % ( REPOSITORY_NAME ) ) . workspace_root <nl> - pkg = PACKAGE_NAME + " / " if PACKAGE_NAME else " " <nl> + root = Label ( " % s / / protobuf_java " % ( native . repository_name ( ) ) ) . workspace_root <nl> + pkg = native . package_name ( ) + " / " if native . package_name ( ) else " " <nl> if root = = " " : <nl> include = " - I % ssrc " % pkg <nl> else : <nl>
remove PACKAGE_NAME and REPOSITORY_NAME deprecated usage ( )
protocolbuffers/protobuf
4fcb36c51c1c0355c2a23b1f06ec583f2428cd30
2018-05-21T20:48:10Z
mmm a / include / swift / AST / FileUnit . h <nl> ppp b / include / swift / AST / FileUnit . h <nl> class FileUnit : public DeclContext { <nl> const ModuleDecl * importedModule , <nl> SmallSetVector < Identifier , 4 > & spiGroups ) const { } ; <nl> <nl> + virtual Optional < Fingerprint > <nl> + loadFingerprint ( const IterableDeclContext * IDC ) const { return None ; } <nl> + <nl> protected : <nl> / / / Look up an operator declaration . Do not call directly , use <nl> / / / \ c DirectOperatorLookupRequest instead . <nl> mmm a / include / swift / AST / LazyResolver . h <nl> ppp b / include / swift / AST / LazyResolver . h <nl> class alignas ( void * ) LazyMemberLoader { <nl> loadNamedMembers ( const IterableDeclContext * IDC , DeclBaseName N , <nl> uint64_t contextData ) = 0 ; <nl> <nl> - / / / Returns the fingerprint associated with the given iterable decl context , <nl> - / / / or \ c None if no such fingerprint is available . <nl> - virtual Optional < Fingerprint > <nl> - loadFingerprint ( const IterableDeclContext * IDC ) = 0 ; <nl> - <nl> / / / Populates the given vector with all conformances for \ p D . <nl> / / / <nl> / / / The implementation should \ em not call setConformances on \ p D . <nl> mmm a / include / swift / AST / Module . h <nl> ppp b / include / swift / AST / Module . h <nl> class ModuleDecl : public DeclContext , public TypeDecl { <nl> ObjCSelector selector , <nl> SmallVectorImpl < AbstractFunctionDecl * > & results ) const ; <nl> <nl> + Optional < Fingerprint > <nl> + loadFingerprint ( const IterableDeclContext * IDC ) const ; <nl> + <nl> / / / Find all SPI names imported from \ p importedModule by this module , <nl> / / / collecting the identifiers in \ p spiGroups . <nl> void lookupImportedSPIGroups ( <nl> mmm a / include / swift / Serialization / SerializedModuleLoader . h <nl> ppp b / include / swift / Serialization / SerializedModuleLoader . h <nl> class SerializedASTFile final : public LoadedFile { <nl> ObjCSelector selector , <nl> SmallVectorImpl < AbstractFunctionDecl * > & results ) const override ; <nl> <nl> + Optional < Fingerprint > <nl> + loadFingerprint ( const IterableDeclContext * IDC ) const override ; <nl> + <nl> virtual void <nl> lookupImportedSPIGroups ( <nl> const ModuleDecl * importedModule , <nl> mmm a / lib / AST / DeclContext . cpp <nl> ppp b / lib / AST / DeclContext . cpp <nl> IterableDeclContext : : castDeclToIterableDeclContext ( const Decl * D ) { <nl> <nl> Optional < Fingerprint > IterableDeclContext : : getBodyFingerprint ( ) const { <nl> auto & ctx = getASTContext ( ) ; <nl> - / / If this decl comes from a serialized module , grab its fingerprint from <nl> - / / the file . <nl> - if ( ! getAsGenericContext ( ) - > getParentSourceFile ( ) ) { <nl> - auto ci = ctx . getOrCreateLazyIterableContextData ( this , <nl> - / * lazyLoader = * / nullptr ) ; <nl> - return ci - > loader - > loadFingerprint ( this ) ; <nl> - } <nl> auto mutableThis = const_cast < IterableDeclContext * > ( this ) ; <nl> return evaluateOrDefault ( ctx . evaluator , ParseMembersRequest { mutableThis } , <nl> FingerprintAndMembers ( ) ) <nl> mmm a / lib / AST / FrontendSourceFileDepGraphFactory . cpp <nl> ppp b / lib / AST / FrontendSourceFileDepGraphFactory . cpp <nl> ModuleDepGraphFactory : : ModuleDepGraphFactory ( const ModuleDecl * Mod , <nl> bool emitDot ) <nl> : AbstractSourceFileDepGraphFactory ( Mod - > getASTContext ( ) . hadError ( ) , <nl> Mod - > getNameStr ( ) , <nl> - Fingerprint ( std : : string { Fingerprint : : DIGEST_LENGTH , ' 0 ' } ) , <nl> + Fingerprint ( " 00000000000000000000000000000000 " ) , <nl> emitDot , <nl> Mod - > getASTContext ( ) . Diags ) , <nl> Mod ( Mod ) { } <nl> mmm a / lib / AST / Module . cpp <nl> ppp b / lib / AST / Module . cpp <nl> void ModuleDecl : : lookupObjCMethods ( <nl> FORWARD ( lookupObjCMethods , ( selector , results ) ) ; <nl> } <nl> <nl> + Optional < Fingerprint > <nl> + ModuleDecl : : loadFingerprint ( const IterableDeclContext * IDC ) const { <nl> + for ( auto file : getFiles ( ) ) { <nl> + if ( auto FP = file - > loadFingerprint ( IDC ) ) <nl> + return FP ; <nl> + } <nl> + return None ; <nl> + } <nl> + <nl> void ModuleDecl : : lookupImportedSPIGroups ( <nl> const ModuleDecl * importedModule , <nl> llvm : : SmallSetVector < Identifier , 4 > & spiGroups ) const { <nl> mmm a / lib / ClangImporter / ClangImporter . cpp <nl> ppp b / lib / ClangImporter / ClangImporter . cpp <nl> void ClangImporter : : Implementation : : lookupAllObjCMembers ( <nl> } <nl> } <nl> <nl> - Optional < Fingerprint > <nl> - ClangImporter : : Implementation : : loadFingerprint ( const IterableDeclContext * ) { <nl> - / / Clang decls are not fingerprinted in Swift . <nl> - return None ; <nl> - } <nl> - <nl> TinyPtrVector < ValueDecl * > <nl> ClangImporter : : Implementation : : loadNamedMembers ( <nl> const IterableDeclContext * IDC , DeclBaseName N , uint64_t contextData ) { <nl> mmm a / lib / ClangImporter / ImporterImpl . h <nl> ppp b / lib / ClangImporter / ImporterImpl . h <nl> class LLVM_LIBRARY_VISIBILITY ClangImporter : : Implementation <nl> loadNamedMembers ( const IterableDeclContext * IDC , DeclBaseName N , <nl> uint64_t contextData ) override ; <nl> <nl> - virtual Optional < Fingerprint > <nl> - loadFingerprint ( const IterableDeclContext * IDC ) override ; <nl> - <nl> private : <nl> void <nl> loadAllMembersOfObjcContainer ( Decl * D , <nl> mmm a / lib / Parse / ParseRequests . cpp <nl> ppp b / lib / Parse / ParseRequests . cpp <nl> ParseMembersRequest : : evaluate ( Evaluator & evaluator , <nl> } <nl> } <nl> <nl> - return FingerprintAndMembers { None , ctx . AllocateCopy ( members ) } ; <nl> + Optional < Fingerprint > fp = None ; <nl> + if ( ! idc - > getDecl ( ) - > isImplicit ( ) ) { <nl> + fp = idc - > getDecl ( ) - > getModuleContext ( ) - > loadFingerprint ( idc ) ; <nl> + } <nl> + return FingerprintAndMembers { fp , ctx . AllocateCopy ( members ) } ; <nl> } <nl> <nl> unsigned bufferID = * sf - > getBufferID ( ) ; <nl> mmm a / lib / Serialization / ModuleFile . cpp <nl> ppp b / lib / Serialization / ModuleFile . cpp <nl> void ModuleFile : : loadDerivativeFunctionConfigurations ( <nl> } <nl> <nl> Optional < Fingerprint > <nl> - ModuleFile : : loadFingerprint ( const IterableDeclContext * IDC ) { <nl> + ModuleFile : : loadFingerprint ( const IterableDeclContext * IDC ) const { <nl> PrettyStackTraceDecl trace ( " loading fingerprints for " , IDC - > getDecl ( ) ) ; <nl> <nl> assert ( IDC - > wasDeserialized ( ) ) ; <nl> mmm a / lib / Serialization / ModuleFile . h <nl> ppp b / lib / Serialization / ModuleFile . h <nl> class ModuleFile <nl> loadNamedMembers ( const IterableDeclContext * IDC , DeclBaseName N , <nl> uint64_t contextData ) override ; <nl> <nl> - virtual Optional < Fingerprint > <nl> - loadFingerprint ( const IterableDeclContext * IDC ) override ; <nl> - <nl> virtual void <nl> loadAllConformances ( const Decl * D , uint64_t contextData , <nl> SmallVectorImpl < ProtocolConformance * > & Conforms ) override ; <nl> class ModuleFile <nl> Optional < StringRef > getGroupNameByUSR ( StringRef USR ) const ; <nl> Optional < BasicDeclLocs > getBasicDeclLocsForDecl ( const Decl * D ) const ; <nl> Identifier getDiscriminatorForPrivateValue ( const ValueDecl * D ) ; <nl> + Optional < Fingerprint > loadFingerprint ( const IterableDeclContext * IDC ) const ; <nl> + <nl> <nl> / / MARK : Deserialization interface <nl> <nl> mmm a / lib / Serialization / SerializedModuleLoader . cpp <nl> ppp b / lib / Serialization / SerializedModuleLoader . cpp <nl> void SerializedASTFile : : lookupObjCMethods ( <nl> File . lookupObjCMethods ( selector , results ) ; <nl> } <nl> <nl> + Optional < Fingerprint > <nl> + SerializedASTFile : : loadFingerprint ( const IterableDeclContext * IDC ) const { <nl> + return File . loadFingerprint ( IDC ) ; <nl> + } <nl> + <nl> void SerializedASTFile : : lookupImportedSPIGroups ( <nl> const ModuleDecl * importedModule , <nl> llvm : : SmallSetVector < Identifier , 4 > & spiGroups ) const { <nl>
Migrate loadFingerprint onto ModuleDecl and Friends
apple/swift
883902411b285568e17f98545729b925ea1254d7
2020-11-18T20:20:14Z
new file mode 100644 <nl> index 00000000000 . . 804d94753a2 <nl> mmm / dev / null <nl> ppp b / src / hydrogen - deoptimizing - mark . cc <nl> <nl> + / / Copyright 2013 the V8 project authors . All rights reserved . <nl> + / / Redistribution and use in source and binary forms , with or without <nl> + / / modification , are permitted provided that the following conditions are <nl> + / / met : <nl> + / / <nl> + / / * Redistributions of source code must retain the above copyright <nl> + / / notice , this list of conditions and the following disclaimer . <nl> + / / * Redistributions in binary form must reproduce the above <nl> + / / copyright notice , this list of conditions and the following <nl> + / / disclaimer in the documentation and / or other materials provided <nl> + / / with the distribution . <nl> + / / * Neither the name of Google Inc . nor the names of its <nl> + / / contributors may be used to endorse or promote products derived <nl> + / / from this software without specific prior written permission . <nl> + / / <nl> + / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + # include " hydrogen - deoptimizing - mark . h " <nl> + <nl> + namespace v8 { <nl> + namespace internal { <nl> + <nl> + void HPropagateDeoptimizingMarkPhase : : MarkAsDeoptimizing ( ) { <nl> + HBasicBlock * block = graph ( ) - > entry_block ( ) ; <nl> + ZoneList < HBasicBlock * > stack ( graph ( ) - > blocks ( ) - > length ( ) , zone ( ) ) ; <nl> + while ( block ! = NULL ) { <nl> + const ZoneList < HBasicBlock * > * dominated_blocks ( block - > dominated_blocks ( ) ) ; <nl> + if ( ! dominated_blocks - > is_empty ( ) ) { <nl> + if ( block - > IsDeoptimizing ( ) ) { <nl> + for ( int i = 0 ; i < dominated_blocks - > length ( ) ; + + i ) { <nl> + dominated_blocks - > at ( i ) - > MarkAsDeoptimizing ( ) ; <nl> + } <nl> + } <nl> + for ( int i = 1 ; i < dominated_blocks - > length ( ) ; + + i ) { <nl> + stack . Add ( dominated_blocks - > at ( i ) , zone ( ) ) ; <nl> + } <nl> + block = dominated_blocks - > at ( 0 ) ; <nl> + } else if ( ! stack . is_empty ( ) ) { <nl> + / / Pop next block from stack . <nl> + block = stack . RemoveLast ( ) ; <nl> + } else { <nl> + / / All blocks processed . <nl> + block = NULL ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + void HPropagateDeoptimizingMarkPhase : : NullifyUnreachableInstructions ( ) { <nl> + if ( ! FLAG_unreachable_code_elimination ) return ; <nl> + for ( int i = 0 ; i < graph ( ) - > blocks ( ) - > length ( ) ; + + i ) { <nl> + HBasicBlock * block = graph ( ) - > blocks ( ) - > at ( i ) ; <nl> + bool nullify = false ; <nl> + const ZoneList < HBasicBlock * > * predecessors = block - > predecessors ( ) ; <nl> + int predecessors_length = predecessors - > length ( ) ; <nl> + bool all_predecessors_deoptimizing = ( predecessors_length > 0 ) ; <nl> + for ( int j = 0 ; j < predecessors_length ; + + j ) { <nl> + if ( ! predecessors - > at ( j ) - > IsDeoptimizing ( ) ) { <nl> + all_predecessors_deoptimizing = false ; <nl> + break ; <nl> + } <nl> + } <nl> + if ( all_predecessors_deoptimizing ) nullify = true ; <nl> + for ( HInstructionIterator it ( block ) ; ! it . Done ( ) ; it . Advance ( ) ) { <nl> + HInstruction * instr = it . Current ( ) ; <nl> + / / Leave the basic structure of the graph intact . <nl> + if ( instr - > IsBlockEntry ( ) ) continue ; <nl> + if ( instr - > IsControlInstruction ( ) ) continue ; <nl> + if ( instr - > IsSimulate ( ) ) continue ; <nl> + if ( instr - > IsEnterInlined ( ) ) continue ; <nl> + if ( instr - > IsLeaveInlined ( ) ) continue ; <nl> + if ( nullify ) { <nl> + HInstruction * last_dummy = NULL ; <nl> + for ( int j = 0 ; j < instr - > OperandCount ( ) ; + + j ) { <nl> + HValue * operand = instr - > OperandAt ( j ) ; <nl> + / / Insert an HDummyUse for each operand , unless the operand <nl> + / / is an HDummyUse itself . If it ' s even from the same block , <nl> + / / remember it as a potential replacement for the instruction . <nl> + if ( operand - > IsDummyUse ( ) ) { <nl> + if ( operand - > block ( ) = = instr - > block ( ) & & <nl> + last_dummy = = NULL ) { <nl> + last_dummy = HInstruction : : cast ( operand ) ; <nl> + } <nl> + continue ; <nl> + } <nl> + if ( operand - > IsControlInstruction ( ) ) { <nl> + / / Inserting a dummy use for a value that ' s not defined anywhere <nl> + / / will fail . Some instructions define fake inputs on such <nl> + / / values as control flow dependencies . <nl> + continue ; <nl> + } <nl> + HDummyUse * dummy = new ( graph ( ) - > zone ( ) ) HDummyUse ( operand ) ; <nl> + dummy - > InsertBefore ( instr ) ; <nl> + last_dummy = dummy ; <nl> + } <nl> + if ( last_dummy = = NULL ) last_dummy = graph ( ) - > GetConstant1 ( ) ; <nl> + instr - > DeleteAndReplaceWith ( last_dummy ) ; <nl> + continue ; <nl> + } <nl> + if ( instr - > IsSoftDeoptimize ( ) ) { <nl> + ASSERT ( block - > IsDeoptimizing ( ) ) ; <nl> + nullify = true ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + void HPropagateDeoptimizingMarkPhase : : Run ( ) { <nl> + / / Skip this phase if there is nothing to be done anyway . <nl> + if ( ! graph ( ) - > has_soft_deoptimize ( ) ) return ; <nl> + MarkAsDeoptimizing ( ) ; <nl> + NullifyUnreachableInstructions ( ) ; <nl> + } <nl> + <nl> + } } / / namespace v8 : : internal <nl> new file mode 100644 <nl> index 00000000000 . . 7d6e6e4bda3 <nl> mmm / dev / null <nl> ppp b / src / hydrogen - deoptimizing - mark . h <nl> <nl> + / / Copyright 2013 the V8 project authors . All rights reserved . <nl> + / / Redistribution and use in source and binary forms , with or without <nl> + / / modification , are permitted provided that the following conditions are <nl> + / / met : <nl> + / / <nl> + / / * Redistributions of source code must retain the above copyright <nl> + / / notice , this list of conditions and the following disclaimer . <nl> + / / * Redistributions in binary form must reproduce the above <nl> + / / copyright notice , this list of conditions and the following <nl> + / / disclaimer in the documentation and / or other materials provided <nl> + / / with the distribution . <nl> + / / * Neither the name of Google Inc . nor the names of its <nl> + / / contributors may be used to endorse or promote products derived <nl> + / / from this software without specific prior written permission . <nl> + / / <nl> + / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + # ifndef V8_HYDROGEN_DEOPTIMIZING_MARK_H_ <nl> + # define V8_HYDROGEN_DEOPTIMIZING_MARK_H_ <nl> + <nl> + # include " hydrogen . h " <nl> + <nl> + namespace v8 { <nl> + namespace internal { <nl> + <nl> + <nl> + / / Mark all blocks that are dominated by an unconditional soft deoptimize to <nl> + / / prevent code motion across those blocks . <nl> + class HPropagateDeoptimizingMarkPhase : public HPhase { <nl> + public : <nl> + explicit HPropagateDeoptimizingMarkPhase ( HGraph * graph ) <nl> + : HPhase ( " H_Propagate deoptimizing mark " , graph ) { } <nl> + <nl> + void Run ( ) ; <nl> + <nl> + private : <nl> + void MarkAsDeoptimizing ( ) ; <nl> + void NullifyUnreachableInstructions ( ) ; <nl> + <nl> + DISALLOW_COPY_AND_ASSIGN ( HPropagateDeoptimizingMarkPhase ) ; <nl> + } ; <nl> + <nl> + <nl> + } } / / namespace v8 : : internal <nl> + <nl> + # endif / / V8_HYDROGEN_DEOPTIMIZING_MARK_H_ <nl> mmm a / src / hydrogen . cc <nl> ppp b / src / hydrogen . cc <nl> <nl> # include " hydrogen - canonicalize . h " <nl> # include " hydrogen - dce . h " <nl> # include " hydrogen - dehoist . h " <nl> + # include " hydrogen - deoptimizing - mark . h " <nl> # include " hydrogen - environment - liveness . h " <nl> # include " hydrogen - escape - analysis . h " <nl> # include " hydrogen - infer - representation . h " <nl> void HGraph : : AssignDominators ( ) { <nl> } <nl> <nl> <nl> - / / Mark all blocks that are dominated by an unconditional soft deoptimize to <nl> - / / prevent code motion across those blocks . <nl> - void HGraph : : PropagateDeoptimizingMark ( ) { <nl> - HPhase phase ( " H_Propagate deoptimizing mark " , this ) ; <nl> - / / Skip this phase if there is nothing to be done anyway . <nl> - if ( ! has_soft_deoptimize ( ) ) return ; <nl> - MarkAsDeoptimizingRecursively ( entry_block ( ) ) ; <nl> - NullifyUnreachableInstructions ( ) ; <nl> - } <nl> - <nl> - <nl> - void HGraph : : MarkAsDeoptimizingRecursively ( HBasicBlock * block ) { <nl> - for ( int i = 0 ; i < block - > dominated_blocks ( ) - > length ( ) ; + + i ) { <nl> - HBasicBlock * dominated = block - > dominated_blocks ( ) - > at ( i ) ; <nl> - if ( block - > IsDeoptimizing ( ) ) dominated - > MarkAsDeoptimizing ( ) ; <nl> - MarkAsDeoptimizingRecursively ( dominated ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> - void HGraph : : NullifyUnreachableInstructions ( ) { <nl> - if ( ! FLAG_unreachable_code_elimination ) return ; <nl> - int block_count = blocks_ . length ( ) ; <nl> - for ( int i = 0 ; i < block_count ; + + i ) { <nl> - HBasicBlock * block = blocks_ . at ( i ) ; <nl> - bool nullify = false ; <nl> - const ZoneList < HBasicBlock * > * predecessors = block - > predecessors ( ) ; <nl> - int predecessors_length = predecessors - > length ( ) ; <nl> - bool all_predecessors_deoptimizing = ( predecessors_length > 0 ) ; <nl> - for ( int j = 0 ; j < predecessors_length ; + + j ) { <nl> - if ( ! predecessors - > at ( j ) - > IsDeoptimizing ( ) ) { <nl> - all_predecessors_deoptimizing = false ; <nl> - break ; <nl> - } <nl> - } <nl> - if ( all_predecessors_deoptimizing ) nullify = true ; <nl> - for ( HInstructionIterator it ( block ) ; ! it . Done ( ) ; it . Advance ( ) ) { <nl> - HInstruction * instr = it . Current ( ) ; <nl> - / / Leave the basic structure of the graph intact . <nl> - if ( instr - > IsBlockEntry ( ) ) continue ; <nl> - if ( instr - > IsControlInstruction ( ) ) continue ; <nl> - if ( instr - > IsSimulate ( ) ) continue ; <nl> - if ( instr - > IsEnterInlined ( ) ) continue ; <nl> - if ( instr - > IsLeaveInlined ( ) ) continue ; <nl> - if ( nullify ) { <nl> - HInstruction * last_dummy = NULL ; <nl> - for ( int j = 0 ; j < instr - > OperandCount ( ) ; + + j ) { <nl> - HValue * operand = instr - > OperandAt ( j ) ; <nl> - / / Insert an HDummyUse for each operand , unless the operand <nl> - / / is an HDummyUse itself . If it ' s even from the same block , <nl> - / / remember it as a potential replacement for the instruction . <nl> - if ( operand - > IsDummyUse ( ) ) { <nl> - if ( operand - > block ( ) = = instr - > block ( ) & & <nl> - last_dummy = = NULL ) { <nl> - last_dummy = HInstruction : : cast ( operand ) ; <nl> - } <nl> - continue ; <nl> - } <nl> - if ( operand - > IsControlInstruction ( ) ) { <nl> - / / Inserting a dummy use for a value that ' s not defined anywhere <nl> - / / will fail . Some instructions define fake inputs on such <nl> - / / values as control flow dependencies . <nl> - continue ; <nl> - } <nl> - HDummyUse * dummy = new ( zone ( ) ) HDummyUse ( operand ) ; <nl> - dummy - > InsertBefore ( instr ) ; <nl> - last_dummy = dummy ; <nl> - } <nl> - if ( last_dummy = = NULL ) last_dummy = GetConstant1 ( ) ; <nl> - instr - > DeleteAndReplaceWith ( last_dummy ) ; <nl> - continue ; <nl> - } <nl> - if ( instr - > IsSoftDeoptimize ( ) ) { <nl> - ASSERT ( block - > IsDeoptimizing ( ) ) ; <nl> - nullify = true ; <nl> - } <nl> - } <nl> - } <nl> - } <nl> - <nl> - <nl> bool HGraph : : CheckArgumentsPhiUses ( ) { <nl> int block_count = blocks_ . length ( ) ; <nl> for ( int i = 0 ; i < block_count ; + + i ) { <nl> bool HGraph : : Optimize ( SmartArrayPointer < char > * bailout_reason ) { <nl> Run < HEnvironmentLivenessAnalysisPhase > ( ) ; <nl> } <nl> <nl> - PropagateDeoptimizingMark ( ) ; <nl> + Run < HPropagateDeoptimizingMarkPhase > ( ) ; <nl> if ( ! CheckConstPhiUses ( ) ) { <nl> * bailout_reason = SmartArrayPointer < char > ( StrDup ( <nl> " Unsupported phi use of const variable " ) ) ; <nl> mmm a / src / hydrogen . h <nl> ppp b / src / hydrogen . h <nl> class HGraph : public ZoneObject { <nl> void AssignDominators ( ) ; <nl> void SetupInformativeDefinitions ( ) ; <nl> void RestoreActualValues ( ) ; <nl> - void PropagateDeoptimizingMark ( ) ; <nl> <nl> / / Returns false if there are phi - uses of the arguments - object <nl> / / which are not supported by the optimizing compiler . <nl> class HGraph : public ZoneObject { <nl> phase . Run ( ) ; <nl> } <nl> <nl> - void MarkAsDeoptimizingRecursively ( HBasicBlock * block ) ; <nl> - void NullifyUnreachableInstructions ( ) ; <nl> void RecursivelyMarkPhiDeoptimizeOnUndefined ( HPhi * phi ) ; <nl> void CheckForBackEdge ( HBasicBlock * block , HBasicBlock * successor ) ; <nl> void SetupInformativeDefinitionsInBlock ( HBasicBlock * block ) ; <nl> mmm a / tools / gyp / v8 . gyp <nl> ppp b / tools / gyp / v8 . gyp <nl> <nl> ' . . / . . / src / hydrogen - dce . h ' , <nl> ' . . / . . / src / hydrogen - dehoist . cc ' , <nl> ' . . / . . / src / hydrogen - dehoist . h ' , <nl> + ' . . / . . / src / hydrogen - deoptimizing - mark . cc ' , <nl> + ' . . / . . / src / hydrogen - deoptimizing - mark . h ' , <nl> ' . . / . . / src / hydrogen - environment - liveness . cc ' , <nl> ' . . / . . / src / hydrogen - environment - liveness . h ' , <nl> ' . . / . . / src / hydrogen - escape - analysis . cc ' , <nl>
Turn propagate deoptimizing mark into a proper HPhase .
v8/v8
fb77805ab7aeab09785c0185b75a439fcdd33ad0
2013-07-15T09:53:00Z
mmm a / cmake / Utils . cmake <nl> ppp b / cmake / Utils . cmake <nl> endfunction ( set_default_configuration_release ) <nl> <nl> function ( format_gencode_flags flags out ) <nl> foreach ( ver $ { flags } ) <nl> - set ( $ { out } " $ { $ { out } } - gencode arch = compute_ $ { ver } , code = sm_ $ { ver } ; " PARENT_SCOPE ) <nl> + set ( $ { out } " $ { $ { out } } - gencode arch = compute_ $ { ver } , code = sm_ $ { ver } ; " ) <nl> endforeach ( ) <nl> + set ( $ { out } " $ { $ { out } } " PARENT_SCOPE ) <nl> endfunction ( format_gencode_flags flags ) <nl> \ No newline at end of file <nl> mmm a / plugin / updater_gpu / README . md <nl> ppp b / plugin / updater_gpu / README . md <nl> $ make PLUGIN_UPDATER_GPU = ON GTEST_PATH = $ { CACHE_PREFIX } test <nl> ` ` ` <nl> <nl> # # Changelog <nl> - # # # # # 2017 / 6 / 26 <nl> + # # # # # 2017 / 7 / 10 <nl> + * Memory performance improved 4x for gpu_hist <nl> <nl> + # # # # # 2017 / 6 / 26 <nl> * Change API to use tree_method parameter <nl> * Increase required cmake version to 3 . 5 <nl> * Add compute arch 3 . 5 to default archs <nl> mmm a / plugin / updater_gpu / benchmark / benchmark . py <nl> ppp b / plugin / updater_gpu / benchmark / benchmark . py <nl> def run_benchmark ( args , gpu_algorithm , cpu_algorithm ) : <nl> <nl> param = { ' objective ' : ' binary : logistic ' , <nl> ' max_depth ' : 6 , <nl> - ' silent ' : 1 , <nl> + ' silent ' : 0 , <nl> ' n_gpus ' : 1 , <nl> ' gpu_id ' : 0 , <nl> ' eval_metric ' : ' auc ' } <nl> def run_benchmark ( args , gpu_algorithm , cpu_algorithm ) : <nl> xgb . train ( param , dtrain , args . iterations ) <nl> print ( " Time : % s seconds " % ( str ( time . time ( ) - tmp ) ) ) <nl> <nl> + param [ ' silent ' ] = 1 <nl> param [ ' tree_method ' ] = cpu_algorithm <nl> print ( " Training with ' % s ' " % param [ ' tree_method ' ] ) <nl> tmp = time . time ( ) <nl> mmm a / plugin / updater_gpu / src / device_helpers . cuh <nl> ppp b / plugin / updater_gpu / src / device_helpers . cuh <nl> <nl> * Copyright 2017 XGBoost contributors <nl> * / <nl> # pragma once <nl> + # include < dmlc / logging . h > <nl> + # include < thrust / binary_search . h > <nl> # include < thrust / device_vector . h > <nl> # include < thrust / random . h > <nl> # include < thrust / system / cuda / error . h > <nl> + # include < thrust / system / cuda / execution_policy . h > <nl> # include < thrust / system_error . h > <nl> - # include " nccl . h " <nl> # include < algorithm > <nl> # include < chrono > <nl> # include < ctime > <nl> <nl> # include < sstream > <nl> # include < string > <nl> # include < vector > <nl> - <nl> + # include " nccl . h " <nl> <nl> / / Uncomment to enable <nl> / / # define DEVICE_TIMER <nl> inline int get_device_idx ( int gpu_id ) { <nl> * Timers <nl> * / <nl> <nl> - # define MAX_WARPS 32 / / Maximum number of warps to time <nl> - # define MAX_SLOTS 10 <nl> - # define TIMER_BLOCKID 0 / / Block to time <nl> - struct DeviceTimerGlobal { <nl> - # ifdef DEVICE_TIMER <nl> - <nl> - clock_t total_clocks [ MAX_SLOTS ] [ MAX_WARPS ] ; <nl> - int64_t count [ MAX_SLOTS ] [ MAX_WARPS ] ; <nl> - <nl> - # endif <nl> - <nl> - / / Clear device memory . Call at start of kernel . <nl> - __device__ void Init ( ) { <nl> - # ifdef DEVICE_TIMER <nl> - if ( blockIdx . x = = TIMER_BLOCKID & & threadIdx . x < MAX_WARPS ) { <nl> - for ( int SLOT = 0 ; SLOT < MAX_SLOTS ; SLOT + + ) { <nl> - total_clocks [ SLOT ] [ threadIdx . x ] = 0 ; <nl> - count [ SLOT ] [ threadIdx . x ] = 0 ; <nl> - } <nl> - } <nl> - # endif <nl> - } <nl> - <nl> - void HostPrint ( ) { <nl> - # ifdef DEVICE_TIMER <nl> - DeviceTimerGlobal h_timer ; <nl> - safe_cuda ( <nl> - cudaMemcpyFromSymbol ( & h_timer , ( * this ) , sizeof ( DeviceTimerGlobal ) ) ) ; <nl> - <nl> - for ( int SLOT = 0 ; SLOT < MAX_SLOTS ; SLOT + + ) { <nl> - if ( h_timer . count [ SLOT ] [ 0 ] = = 0 ) { <nl> - continue ; <nl> - } <nl> - <nl> - clock_t sum_clocks = 0 ; <nl> - int64_t sum_count = 0 ; <nl> - <nl> - for ( int WARP = 0 ; WARP < MAX_WARPS ; WARP + + ) { <nl> - if ( h_timer . count [ SLOT ] [ WARP ] = = 0 ) { <nl> - continue ; <nl> - } <nl> - <nl> - sum_clocks + = h_timer . total_clocks [ SLOT ] [ WARP ] ; <nl> - sum_count + = h_timer . count [ SLOT ] [ WARP ] ; <nl> - } <nl> - <nl> - printf ( " Slot % d : % d clocks per call , called % d times . \ n " , SLOT , <nl> - sum_clocks / sum_count , h_timer . count [ SLOT ] [ 0 ] ) ; <nl> - } <nl> - # endif <nl> - } <nl> - } ; <nl> - <nl> - struct DeviceTimer { <nl> - # ifdef DEVICE_TIMER <nl> - clock_t start ; <nl> - int slot ; <nl> - DeviceTimerGlobal & GTimer ; <nl> - # endif <nl> - <nl> - # ifdef DEVICE_TIMER <nl> - __device__ DeviceTimer ( DeviceTimerGlobal & GTimer , int slot ) / / NOLINT <nl> - : GTimer ( GTimer ) , <nl> - start ( clock ( ) ) , <nl> - slot ( slot ) { } <nl> - # else <nl> - __device__ DeviceTimer ( DeviceTimerGlobal & GTimer , int slot ) { } / / NOLINT <nl> - # endif <nl> - <nl> - __device__ void End ( ) { <nl> - # ifdef DEVICE_TIMER <nl> - int warp_id = threadIdx . x / 32 ; <nl> - int lane_id = threadIdx . x % 32 ; <nl> - if ( blockIdx . x = = TIMER_BLOCKID & & lane_id = = 0 ) { <nl> - GTimer . count [ slot ] [ warp_id ] + = 1 ; <nl> - GTimer . total_clocks [ slot ] [ warp_id ] + = clock ( ) - start ; <nl> - } <nl> - # endif <nl> - } <nl> - } ; <nl> - <nl> struct Timer { <nl> typedef std : : chrono : : high_resolution_clock ClockT ; <nl> <nl> struct CubMemory { <nl> void * d_temp_storage ; <nl> size_t temp_storage_bytes ; <nl> <nl> + / / Thrust <nl> + typedef char value_type ; <nl> + <nl> CubMemory ( ) : d_temp_storage ( NULL ) , temp_storage_bytes ( 0 ) { } <nl> <nl> ~ CubMemory ( ) { Free ( ) ; } <nl> <nl> void Free ( ) { <nl> - if ( d_temp_storage ! = NULL ) { <nl> + if ( this - > IsAllocated ( ) ) { <nl> safe_cuda ( cudaFree ( d_temp_storage ) ) ; <nl> } <nl> } <nl> <nl> - void LazyAllocate ( size_t n_bytes ) { <nl> - if ( n_bytes > temp_storage_bytes ) { <nl> + void LazyAllocate ( size_t num_bytes ) { <nl> + if ( num_bytes > temp_storage_bytes ) { <nl> Free ( ) ; <nl> - safe_cuda ( cudaMalloc ( & d_temp_storage , n_bytes ) ) ; <nl> - temp_storage_bytes = n_bytes ; <nl> + safe_cuda ( cudaMalloc ( & d_temp_storage , num_bytes ) ) ; <nl> + temp_storage_bytes = num_bytes ; <nl> } <nl> } <nl> + / / Thrust <nl> + char * allocate ( std : : ptrdiff_t num_bytes ) { <nl> + LazyAllocate ( num_bytes ) ; <nl> + return reinterpret_cast < char * > ( d_temp_storage ) ; <nl> + } <nl> + <nl> + / / Thrust <nl> + void deallocate ( char * ptr , size_t n ) { <nl> + / / Do nothing <nl> + } <nl> <nl> bool IsAllocated ( ) { return d_temp_storage ! = NULL ; } <nl> } ; <nl> void print ( const thrust : : device_vector < T > & v , size_t max_items = 10 ) { <nl> std : : cout < < " \ n " ; <nl> } <nl> <nl> - template < typename T , memory_type MemoryT > <nl> + template < typename T > <nl> void print ( const dvec < T > & v , size_t max_items = 10 ) { <nl> std : : vector < T > h = v . as_vector ( ) ; <nl> for ( int i = 0 ; i < std : : min ( max_items , h . size ( ) ) ; i + + ) { <nl> struct BernoulliRng { <nl> t1234 . printElapsed ( name ) ; \ <nl> } while ( 0 ) <nl> <nl> + / / Load balancing search <nl> + <nl> + template < typename func_t > <nl> + class LauncherItr { <nl> + public : <nl> + int idx ; <nl> + func_t f ; <nl> + XGBOOST_DEVICE LauncherItr ( ) : idx ( 0 ) { } <nl> + XGBOOST_DEVICE LauncherItr ( int idx , func_t f ) : idx ( idx ) , f ( f ) { } <nl> + XGBOOST_DEVICE LauncherItr & operator = ( int output ) { <nl> + f ( idx , output ) ; <nl> + return * this ; <nl> + } <nl> + } ; <nl> + <nl> + template < typename func_t > <nl> + <nl> + / * * <nl> + * \ class DiscardLambdaItr <nl> + * <nl> + * \ brief Thrust compatible iterator type - discards algorithm output and <nl> + * launches device lambda with the index of the output and the algorithm output as arguments . <nl> + * <nl> + * \ author Rory <nl> + * \ date 7 / 9 / 2017 <nl> + * / <nl> + <nl> + class DiscardLambdaItr { <nl> + public : <nl> + / / Required iterator traits <nl> + typedef DiscardLambdaItr self_type ; / / / < My own type <nl> + typedef ptrdiff_t <nl> + difference_type ; / / / < Type to express the result of subtracting <nl> + / / / one iterator from another <nl> + typedef LauncherItr < func_t > <nl> + value_type ; / / / < The type of the element the iterator can point to <nl> + typedef value_type * pointer ; / / / < The type of a pointer to an element the <nl> + / / / iterator can point to <nl> + typedef value_type reference ; / / / < The type of a reference to an element the <nl> + / / / iterator can point to <nl> + typedef typename thrust : : detail : : iterator_facade_category < <nl> + thrust : : any_system_tag , thrust : : random_access_traversal_tag , value_type , <nl> + reference > : : type iterator_category ; / / / < The iterator category <nl> + private : <nl> + difference_type offset ; <nl> + func_t f ; <nl> + <nl> + public : <nl> + XGBOOST_DEVICE DiscardLambdaItr ( func_t f ) : offset ( 0 ) , f ( f ) { } <nl> + XGBOOST_DEVICE DiscardLambdaItr ( difference_type offset , func_t f ) <nl> + : offset ( offset ) , f ( f ) { } <nl> + <nl> + XGBOOST_DEVICE self_type operator + ( const int & b ) const { <nl> + return DiscardLambdaItr ( offset + b , f ) ; <nl> + } <nl> + XGBOOST_DEVICE self_type operator + + ( ) { <nl> + offset + + ; <nl> + return * this ; <nl> + } <nl> + XGBOOST_DEVICE self_type operator + + ( int ) { <nl> + self_type retval = * this ; <nl> + offset + + ; <nl> + return retval ; <nl> + } <nl> + XGBOOST_DEVICE self_type & operator + = ( const int & b ) { <nl> + offset + = b ; <nl> + return * this ; <nl> + } <nl> + XGBOOST_DEVICE reference operator * ( ) const { <nl> + return LauncherItr < func_t > ( offset , f ) ; <nl> + } <nl> + <nl> + XGBOOST_DEVICE reference operator [ ] ( int idx ) { <nl> + self_type offset = ( * this ) + idx ; <nl> + return * offset ; <nl> + } <nl> + } ; <nl> + <nl> + / * * <nl> + * \ fn template < typename func_t , typename segments_t > void TransformLbs ( int device_idx , dh : : CubMemory * temp_memory , int count , thrust : : device_ptr < segments_t > segments , int num_segments , func_t f ) <nl> + * <nl> + * \ brief Load balancing search function . Reads a CSR type matrix description and allows a function <nl> + * to be executed on each element . Search ' modern GPU load balancing search for more <nl> + * information ' . <nl> + * <nl> + * \ author Rory <nl> + * \ date 7 / 9 / 2017 <nl> + * <nl> + * \ tparam segments_t Type of the segments t . <nl> + * \ param device_idx Zero - based index of the device . <nl> + * \ param [ in , out ] temp_memory Temporary memory allocator . <nl> + * \ param count Number of elements . <nl> + * \ param segments Device pointed to segments . <nl> + * \ param num_segments Number of segments . <nl> + * \ param f Lambda to be executed on matrix elements . <nl> + * / <nl> + <nl> + template < typename func_t , typename segments_t > <nl> + void TransformLbs ( int device_idx , dh : : CubMemory * temp_memory , int count , <nl> + thrust : : device_ptr < segments_t > segments , int num_segments , <nl> + func_t f ) { <nl> + safe_cuda ( cudaSetDevice ( device_idx ) ) ; <nl> + auto counting = thrust : : make_counting_iterator ( 0 ) ; <nl> + <nl> + auto f_wrapper = [ = ] __device__ ( int idx , int upper_bound ) { <nl> + f ( idx , upper_bound - 1 ) ; <nl> + } ; <nl> + <nl> + DiscardLambdaItr < decltype ( f_wrapper ) > itr ( f_wrapper ) ; <nl> + <nl> + thrust : : upper_bound ( thrust : : cuda : : par ( * temp_memory ) , segments , <nl> + segments + num_segments , counting , counting + count , itr ) ; <nl> + } <nl> + <nl> } / / namespace dh <nl> mmm a / plugin / updater_gpu / src / gpu_hist_builder . cu <nl> ppp b / plugin / updater_gpu / src / gpu_hist_builder . cu <nl> <nl> / * ! <nl> * Copyright 2017 Rory mitchell <nl> * / <nl> - # include < cub / cub . cuh > <nl> # include < thrust / binary_search . h > <nl> # include < thrust / count . h > <nl> # include < thrust / sequence . h > <nl> # include < thrust / sort . h > <nl> + # include < cub / cub . cuh > <nl> # include < algorithm > <nl> # include < functional > <nl> # include < future > <nl> # include < numeric > <nl> # include " common . cuh " <nl> # include " device_helpers . cuh " <nl> + # include " dmlc / timer . h " <nl> # include " gpu_hist_builder . cuh " <nl> <nl> namespace xgboost { <nl> namespace tree { <nl> <nl> void DeviceGMat : : Init ( int device_idx , const common : : GHistIndexMatrix & gmat , <nl> - bst_uint begin , bst_uint end ) { <nl> + bst_uint element_begin , bst_uint element_end , <nl> + bst_uint row_begin , bst_uint row_end , int n_bins ) { <nl> dh : : safe_cuda ( cudaSetDevice ( device_idx ) ) ; <nl> - CHECK_EQ ( gidx . size ( ) , end - begin ) < < " gidx must be externally allocated " ; <nl> - CHECK_EQ ( ridx . size ( ) , end - begin ) < < " ridx must be externally allocated " ; <nl> - <nl> - thrust : : copy ( gmat . index . data ( ) + begin , gmat . index . data ( ) + end , gidx . tbegin ( ) ) ; <nl> - thrust : : device_vector < int > row_ptr = gmat . row_ptr ; <nl> - <nl> - auto counting = thrust : : make_counting_iterator ( begin ) ; <nl> - thrust : : upper_bound ( row_ptr . begin ( ) , row_ptr . end ( ) , counting , <nl> - counting + gidx . size ( ) , ridx . tbegin ( ) ) ; <nl> - thrust : : transform ( ridx . tbegin ( ) , ridx . tend ( ) , ridx . tbegin ( ) , <nl> - [ = ] __device__ ( int val ) { return val - 1 ; } ) ; <nl> + CHECK ( gidx_buffer . size ( ) ) < < " gidx_buffer must be externally allocated " ; <nl> + CHECK_EQ ( row_ptr . size ( ) , ( row_end - row_begin ) + 1 ) <nl> + < < " row_ptr must be externally allocated " ; <nl> + <nl> + common : : CompressedBufferWriter cbw ( n_bins ) ; <nl> + std : : vector < common : : compressed_byte_t > host_buffer ( gidx_buffer . size ( ) ) ; <nl> + cbw . Write ( host_buffer . data ( ) , gmat . index . begin ( ) + element_begin , <nl> + gmat . index . begin ( ) + element_end ) ; <nl> + gidx_buffer = host_buffer ; <nl> + gidx = common : : CompressedIterator < int > ( gidx_buffer . data ( ) , n_bins ) ; <nl> + <nl> + / / row_ptr <nl> + thrust : : copy ( gmat . row_ptr . data ( ) + row_begin , <nl> + gmat . row_ptr . data ( ) + row_end + 1 , row_ptr . tbegin ( ) ) ; <nl> + / / normalise row_ptr <nl> + bst_uint start = gmat . row_ptr [ row_begin ] ; <nl> + thrust : : transform ( row_ptr . tbegin ( ) , row_ptr . tend ( ) , row_ptr . tbegin ( ) , <nl> + [ = ] __device__ ( int val ) { return val - start ; } ) ; <nl> } <nl> <nl> void DeviceHist : : Init ( int n_bins_in ) { <nl> HistBuilder : : HistBuilder ( bst_gpair * ptr , int n_bins ) <nl> __device__ void HistBuilder : : Add ( bst_gpair gpair , int gidx , int nidx ) const { <nl> int hist_idx = nidx * n_bins + gidx ; <nl> atomicAdd ( & ( d_hist [ hist_idx ] . grad ) , gpair . grad ) ; / / OPTMARK : This and below <nl> - / / line lead to about 3X <nl> - / / slowdown due to memory <nl> - / / dependency and access <nl> - / / pattern issues . <nl> + / / line lead to about 3X <nl> + / / slowdown due to memory <nl> + / / dependency and access <nl> + / / pattern issues . <nl> atomicAdd ( & ( d_hist [ hist_idx ] . hess ) , gpair . hess ) ; <nl> } <nl> <nl> void GPUHistBuilder : : InitData ( const std : : vector < bst_gpair > & gpair , <nl> / / process ) <nl> } <nl> <nl> - <nl> CHECK ( fmat . SingleColBlock ( ) ) < < " grow_gpu_hist : must have single column " <nl> " block . Try setting ' tree_method ' " <nl> " parameter to ' exact ' " ; <nl> void GPUHistBuilder : : InitData ( const std : : vector < bst_gpair > & gpair , <nl> / / ba . allocate ( master_device , ) ; <nl> <nl> / / allocate vectors across all devices <nl> + temp_memory . resize ( n_devices ) ; <nl> hist_vec . resize ( n_devices ) ; <nl> nodes . resize ( n_devices ) ; <nl> nodes_temp . resize ( n_devices ) ; <nl> void GPUHistBuilder : : InitData ( const std : : vector < bst_gpair > & gpair , <nl> h_feature_segments . size ( ) , / / constant and same on all devices <nl> & prediction_cache [ d_idx ] , num_rows_segment , & position [ d_idx ] , <nl> num_rows_segment , & position_tmp [ d_idx ] , num_rows_segment , <nl> - & device_gpair [ d_idx ] , num_rows_segment , & device_matrix [ d_idx ] . gidx , <nl> - num_elements_segment , / / constant and same on all devices <nl> - & device_matrix [ d_idx ] . ridx , <nl> - num_elements_segment , / / constant and same on all devices <nl> + & device_gpair [ d_idx ] , num_rows_segment , <nl> + & device_matrix [ d_idx ] . gidx_buffer , <nl> + common : : CompressedBufferWriter : : CalculateBufferSize ( <nl> + num_elements_segment , <nl> + n_bins ) , / / constant and same on all devices <nl> + & device_matrix [ d_idx ] . row_ptr , num_rows_segment + 1 , <nl> & gidx_feature_map [ d_idx ] , n_bins , / / constant and same on all devices <nl> & gidx_fvalue_map [ d_idx ] , <nl> hmat_ . cut . size ( ) ) ; / / constant and same on all devices <nl> <nl> / / Copy Host to Device ( assumes comes after ba . allocate that sets device ) <nl> - device_matrix [ d_idx ] . Init ( device_idx , gmat_ , <nl> - device_element_segments [ d_idx ] , <nl> - device_element_segments [ d_idx + 1 ] ) ; <nl> + device_matrix [ d_idx ] . Init ( <nl> + device_idx , gmat_ , device_element_segments [ d_idx ] , <nl> + device_element_segments [ d_idx + 1 ] , device_row_segments [ d_idx ] , <nl> + device_row_segments [ d_idx + 1 ] , n_bins ) ; <nl> gidx_feature_map [ d_idx ] = h_gidx_feature_map ; <nl> gidx_fvalue_map [ d_idx ] = hmat_ . cut ; <nl> feature_segments [ d_idx ] = h_feature_segments ; <nl> void GPUHistBuilder : : BuildHist ( int depth ) { <nl> size_t begin = device_element_segments [ d_idx ] ; <nl> size_t end = device_element_segments [ d_idx + 1 ] ; <nl> size_t row_begin = device_row_segments [ d_idx ] ; <nl> + size_t row_end = device_row_segments [ d_idx + 1 ] ; <nl> <nl> - auto d_ridx = device_matrix [ d_idx ] . ridx . data ( ) ; <nl> - auto d_gidx = device_matrix [ d_idx ] . gidx . data ( ) ; <nl> + auto d_gidx = device_matrix [ d_idx ] . gidx ; <nl> + auto d_row_ptr = device_matrix [ d_idx ] . row_ptr . tbegin ( ) ; <nl> auto d_position = position [ d_idx ] . data ( ) ; <nl> auto d_gpair = device_gpair [ d_idx ] . data ( ) ; <nl> auto d_left_child_smallest = left_child_smallest [ d_idx ] . data ( ) ; <nl> auto hist_builder = hist_vec [ d_idx ] . GetBuilder ( ) ; <nl> + dh : : TransformLbs ( <nl> + device_idx , & temp_memory [ d_idx ] , end - begin , d_row_ptr , <nl> + row_end - row_begin , [ = ] __device__ ( int local_idx , int local_ridx ) { <nl> + int nidx = d_position [ local_ridx ] ; / / OPTMARK : latency <nl> + if ( ! is_active ( nidx , depth ) ) return ; <nl> + <nl> + / / Only increment smallest node <nl> + bool is_smallest = ( d_left_child_smallest [ parent_nidx ( nidx ) ] & & <nl> + is_left_child ( nidx ) ) | | <nl> + ( ! d_left_child_smallest [ parent_nidx ( nidx ) ] & & <nl> + ! is_left_child ( nidx ) ) ; <nl> + if ( ! is_smallest & & depth > 0 ) return ; <nl> <nl> - dh : : launch_n ( device_idx , end - begin , [ = ] __device__ ( int local_idx ) { <nl> - int ridx = d_ridx [ local_idx ] ; / / OPTMARK : latency <nl> - int nidx = d_position [ ridx - row_begin ] ; / / OPTMARK : latency <nl> - if ( ! is_active ( nidx , depth ) ) return ; <nl> - <nl> - / / Only increment smallest node <nl> - bool is_smallest = <nl> - ( d_left_child_smallest [ parent_nidx ( nidx ) ] & & is_left_child ( nidx ) ) | | <nl> - ( ! d_left_child_smallest [ parent_nidx ( nidx ) ] & & ! is_left_child ( nidx ) ) ; <nl> - if ( ! is_smallest & & depth > 0 ) return ; <nl> - <nl> - int gidx = d_gidx [ local_idx ] ; <nl> - bst_gpair gpair = d_gpair [ ridx - row_begin ] ; <nl> - <nl> - hist_builder . Add ( gpair , gidx , nidx ) ; / / OPTMARK : This is slow , could use <nl> - / / shared memory or cache results <nl> - / / intead of writing to global <nl> - / / memory every time in atomic way . <nl> - } ) ; <nl> + int gidx = d_gidx [ local_idx ] ; <nl> + bst_gpair gpair = d_gpair [ local_ridx ] ; <nl> + <nl> + hist_builder . Add ( gpair , gidx , <nl> + nidx ) ; / / OPTMARK : This is slow , could use <nl> + / / shared memory or cache results <nl> + / / intead of writing to global <nl> + / / memory every time in atomic way . <nl> + } ) ; <nl> } <nl> <nl> - / / dh : : safe_cuda ( cudaDeviceSynchronize ( ) ) ; <nl> dh : : synchronize_n_devices ( n_devices , dList ) ; <nl> <nl> - / / time . printElapsed ( " Add Time " ) ; <nl> + / / time . printElapsed ( " Add Time " ) ; <nl> <nl> / / ( in - place ) reduce each element of histogram ( for only current level ) across <nl> / / multiple gpus <nl> void GPUHistBuilder : : BuildHist ( int depth ) { <nl> dh : : safe_cuda ( cudaSetDevice ( device_idx ) ) ; <nl> dh : : safe_cuda ( cudaStreamSynchronize ( * ( streams [ d_idx ] ) ) ) ; <nl> } <nl> - / / if no NCCL , then presume only 1 GPU , then already correct <nl> + / / if no NCCL , then presume only 1 GPU , then already correct <nl> <nl> / / time . printElapsed ( " Reduce - Add Time " ) ; <nl> <nl> __global__ void find_split_kernel ( <nl> left_child_smallest = & d_left_child_smallest_temp [ blockIdx . x ] ; <nl> } <nl> <nl> - * Nodeleft = Node ( <nl> - split . left_sum , <nl> - CalcGain ( gpu_param , split . left_sum . grad , split . left_sum . hess ) , <nl> - CalcWeight ( gpu_param , split . left_sum . grad , split . left_sum . hess ) ) ; <nl> + * Nodeleft = <nl> + Node ( split . left_sum , <nl> + CalcGain ( gpu_param , split . left_sum . grad , split . left_sum . hess ) , <nl> + CalcWeight ( gpu_param , split . left_sum . grad , split . left_sum . hess ) ) ; <nl> <nl> - * Noderight = Node ( <nl> - split . right_sum , <nl> - CalcGain ( gpu_param , split . right_sum . grad , split . right_sum . hess ) , <nl> - CalcWeight ( gpu_param , split . right_sum . grad , split . right_sum . hess ) ) ; <nl> + * Noderight = <nl> + Node ( split . right_sum , <nl> + CalcGain ( gpu_param , split . right_sum . grad , split . right_sum . hess ) , <nl> + CalcWeight ( gpu_param , split . right_sum . grad , split . right_sum . hess ) ) ; <nl> <nl> / / Record smallest node <nl> if ( split . left_sum . hess < = split . right_sum . hess ) { <nl> void GPUHistBuilder : : LaunchFindSplit ( int depth ) { <nl> feature_segments [ d_idx ] . data ( ) , depth , ( info - > num_col ) , <nl> ( hmat_ . row_ptr . back ( ) ) , nodes [ d_idx ] . data ( ) , nodes_temp [ d_idx ] . data ( ) , <nl> nodes_child_temp [ d_idx ] . data ( ) , nodes_offset_device , <nl> - fidx_min_map [ d_idx ] . data ( ) , gidx_fvalue_map [ d_idx ] . data ( ) , GPUTrainingParam ( param ) , <nl> - left_child_smallest_temp [ d_idx ] . data ( ) , colsample , <nl> - feature_flags [ d_idx ] . data ( ) ) ; <nl> + fidx_min_map [ d_idx ] . data ( ) , gidx_fvalue_map [ d_idx ] . data ( ) , <nl> + GPUTrainingParam ( param ) , left_child_smallest_temp [ d_idx ] . data ( ) , <nl> + colsample , feature_flags [ d_idx ] . data ( ) ) ; <nl> } <nl> <nl> / / nccl only on devices that did split <nl> void GPUHistBuilder : : LaunchFindSplit ( int depth ) { <nl> feature_segments [ d_idx ] . data ( ) , depth , ( info - > num_col ) , <nl> ( hmat_ . row_ptr . back ( ) ) , nodes [ d_idx ] . data ( ) , NULL , NULL , <nl> nodes_offset_device , fidx_min_map [ d_idx ] . data ( ) , <nl> - gidx_fvalue_map [ d_idx ] . data ( ) , GPUTrainingParam ( param ) , <nl> + gidx_fvalue_map [ d_idx ] . data ( ) , GPUTrainingParam ( param ) , <nl> left_child_smallest [ d_idx ] . data ( ) , colsample , <nl> feature_flags [ d_idx ] . data ( ) ) ; <nl> <nl> void GPUHistBuilder : : LaunchFindSplit ( int depth ) { <nl> feature_segments [ d_idx ] . data ( ) , depth , ( info - > num_col ) , <nl> ( hmat_ . row_ptr . back ( ) ) , nodes [ d_idx ] . data ( ) , NULL , NULL , <nl> nodes_offset_device , fidx_min_map [ d_idx ] . data ( ) , <nl> - gidx_fvalue_map [ d_idx ] . data ( ) , GPUTrainingParam ( param ) , <nl> + gidx_fvalue_map [ d_idx ] . data ( ) , GPUTrainingParam ( param ) , <nl> left_child_smallest [ d_idx ] . data ( ) , colsample , <nl> feature_flags [ d_idx ] . data ( ) ) ; <nl> } <nl> void GPUHistBuilder : : LaunchFindSplit ( int depth ) { <nl> } <nl> <nl> void GPUHistBuilder : : InitFirstNode ( const std : : vector < bst_gpair > & gpair ) { <nl> - # ifdef _WIN32 <nl> - / / Visual studio complains about C : / Program Files ( x86 ) / Microsoft Visual <nl> - / / Studio 14 . 0 / VC / bin / . . / . . / VC / INCLUDE \ utility ( 445 ) : error : static assertion <nl> - / / failed with " tuple index out of bounds " <nl> - / / and C : / Program Files ( x86 ) / Microsoft Visual Studio <nl> - / / 14 . 0 / VC / bin / . . / . . / VC / INCLUDE \ future ( 1888 ) : error : no instance of function <nl> - / / template " std : : _Invoke_stored " matches the argument list <nl> - std : : vector < bst_gpair > future_results ( n_devices ) ; <nl> + / / Perform asynchronous reduction on each gpu <nl> + std : : vector < bst_gpair > device_sums ( n_devices ) ; <nl> + # pragma omp parallel for num_threads ( n_devices ) <nl> for ( int d_idx = 0 ; d_idx < n_devices ; d_idx + + ) { <nl> int device_idx = dList [ d_idx ] ; <nl> - <nl> + dh : : safe_cuda ( cudaSetDevice ( device_idx ) ) ; <nl> auto begin = device_gpair [ d_idx ] . tbegin ( ) ; <nl> auto end = device_gpair [ d_idx ] . tend ( ) ; <nl> bst_gpair init = bst_gpair ( ) ; <nl> auto binary_op = thrust : : plus < bst_gpair > ( ) ; <nl> - <nl> - dh : : safe_cuda ( cudaSetDevice ( device_idx ) ) ; <nl> - future_results [ d_idx ] = thrust : : reduce ( begin , end , init , binary_op ) ; <nl> - } <nl> - <nl> - / / sum over devices on host ( with blocking get ( ) ) <nl> - bst_gpair sum = bst_gpair ( ) ; <nl> - for ( int d_idx = 0 ; d_idx < n_devices ; d_idx + + ) { <nl> - int device_idx = dList [ d_idx ] ; <nl> - sum + = future_results [ d_idx ] ; <nl> - } <nl> - # else <nl> - / / asynch reduce per device <nl> - <nl> - std : : vector < std : : future < bst_gpair > > future_results ( n_devices ) ; <nl> - for ( int d_idx = 0 ; d_idx < n_devices ; d_idx + + ) { <nl> - / / std : : async captures the algorithm parameters by value <nl> - / / use std : : launch : : async to ensure the creation of a new thread <nl> - future_results [ d_idx ] = std : : async ( std : : launch : : async , [ = ] { <nl> - int device_idx = dList [ d_idx ] ; <nl> - dh : : safe_cuda ( cudaSetDevice ( device_idx ) ) ; <nl> - auto begin = device_gpair [ d_idx ] . tbegin ( ) ; <nl> - auto end = device_gpair [ d_idx ] . tend ( ) ; <nl> - bst_gpair init = bst_gpair ( ) ; <nl> - auto binary_op = thrust : : plus < bst_gpair > ( ) ; <nl> - return thrust : : reduce ( begin , end , init , binary_op ) ; <nl> - } ) ; <nl> + device_sums [ d_idx ] = thrust : : reduce ( begin , end , init , binary_op ) ; <nl> } <nl> <nl> - / / sum over devices on host ( with blocking get ( ) ) <nl> bst_gpair sum = bst_gpair ( ) ; <nl> for ( int d_idx = 0 ; d_idx < n_devices ; d_idx + + ) { <nl> - int device_idx = dList [ d_idx ] ; <nl> - sum + = future_results [ d_idx ] . get ( ) ; <nl> + sum + = device_sums [ d_idx ] ; <nl> } <nl> - # endif <nl> <nl> / / Setup first node so all devices have same first node ( here done same on all <nl> / / devices , or could have done one device and Bcast if worried about exact <nl> void GPUHistBuilder : : InitFirstNode ( const std : : vector < bst_gpair > & gpair ) { <nl> <nl> dh : : launch_n ( device_idx , 1 , [ = ] __device__ ( int idx ) { <nl> bst_gpair sum_gradients = sum ; <nl> - d_nodes [ idx ] = Node ( <nl> - sum_gradients , <nl> - CalcGain ( gpu_param , sum_gradients . grad , sum_gradients . hess ) , <nl> - CalcWeight ( gpu_param , sum_gradients . grad , <nl> - sum_gradients . hess ) ) ; <nl> + d_nodes [ idx ] = <nl> + Node ( sum_gradients , <nl> + CalcGain ( gpu_param , sum_gradients . grad , sum_gradients . hess ) , <nl> + CalcWeight ( gpu_param , sum_gradients . grad , sum_gradients . hess ) ) ; <nl> } ) ; <nl> } <nl> / / synch all devices to host before moving on ( No , can avoid because BuildHist <nl> void GPUHistBuilder : : UpdatePositionDense ( int depth ) { <nl> auto d_position = position [ d_idx ] . data ( ) ; <nl> Node * d_nodes = nodes [ d_idx ] . data ( ) ; <nl> auto d_gidx_fvalue_map = gidx_fvalue_map [ d_idx ] . data ( ) ; <nl> - auto d_gidx = device_matrix [ d_idx ] . gidx . data ( ) ; <nl> + auto d_gidx = device_matrix [ d_idx ] . gidx ; <nl> int n_columns = info - > num_col ; <nl> size_t begin = device_row_segments [ d_idx ] ; <nl> size_t end = device_row_segments [ d_idx + 1 ] ; <nl> void GPUHistBuilder : : UpdatePositionSparse ( int depth ) { <nl> Node * d_nodes = nodes [ d_idx ] . data ( ) ; <nl> auto d_gidx_feature_map = gidx_feature_map [ d_idx ] . data ( ) ; <nl> auto d_gidx_fvalue_map = gidx_fvalue_map [ d_idx ] . data ( ) ; <nl> - auto d_gidx = device_matrix [ d_idx ] . gidx . data ( ) ; <nl> - auto d_ridx = device_matrix [ d_idx ] . ridx . data ( ) ; <nl> + auto d_gidx = device_matrix [ d_idx ] . gidx ; <nl> + auto d_row_ptr = device_matrix [ d_idx ] . row_ptr . tbegin ( ) ; <nl> <nl> size_t row_begin = device_row_segments [ d_idx ] ; <nl> size_t row_end = device_row_segments [ d_idx + 1 ] ; <nl> void GPUHistBuilder : : UpdatePositionSparse ( int depth ) { <nl> / / Update node based on fvalue where exists <nl> / / OPTMARK : This kernel is very inefficient for both compute and memory , <nl> / / dominated by memory dependency / access patterns <nl> - dh : : launch_n ( <nl> - device_idx , element_end - element_begin , [ = ] __device__ ( int local_idx ) { <nl> - int ridx = d_ridx [ local_idx ] ; <nl> - int pos = d_position [ ridx - row_begin ] ; <nl> + <nl> + dh : : TransformLbs ( <nl> + device_idx , & temp_memory [ d_idx ] , element_end - element_begin , d_row_ptr , <nl> + row_end - row_begin , [ = ] __device__ ( int local_idx , int local_ridx ) { <nl> + int pos = d_position [ local_ridx ] ; <nl> if ( ! is_active ( pos , depth ) ) { <nl> return ; <nl> } <nl> void GPUHistBuilder : : UpdatePositionSparse ( int depth ) { <nl> float fvalue = d_gidx_fvalue_map [ gidx ] ; <nl> <nl> if ( fvalue < = node . split . fvalue ) { <nl> - d_position_tmp [ ridx - row_begin ] = left_child_nidx ( pos ) ; <nl> + d_position_tmp [ local_ridx ] = left_child_nidx ( pos ) ; <nl> } else { <nl> - d_position_tmp [ ridx - row_begin ] = right_child_nidx ( pos ) ; <nl> + d_position_tmp [ local_ridx ] = right_child_nidx ( pos ) ; <nl> } <nl> } <nl> } ) ; <nl> void GPUHistBuilder : : ColSampleLevel ( ) { <nl> h_feature_flags [ fidx ] = 1 ; <nl> } <nl> <nl> - / / copy from Host to Device for all devices <nl> - / / for ( auto & f : feature_flags ) { / / this doesn ' t set device as should <nl> - / / f = h_feature_flags ; <nl> - / / } <nl> for ( int d_idx = 0 ; d_idx < n_devices ; d_idx + + ) { <nl> int device_idx = dList [ d_idx ] ; <nl> dh : : safe_cuda ( cudaSetDevice ( device_idx ) ) ; <nl> mmm a / plugin / updater_gpu / src / gpu_hist_builder . cuh <nl> ppp b / plugin / updater_gpu / src / gpu_hist_builder . cuh <nl> <nl> # include < vector > <nl> # include " . . / . . / src / common / hist_util . h " <nl> # include " . . / . . / src / tree / param . h " <nl> + # include " . . / . . / src / common / compressed_iterator . h " <nl> # include " device_helpers . cuh " <nl> # include " types . cuh " <nl> - <nl> - # ifndef NCCL <nl> - # define NCCL 1 <nl> - # endif <nl> - <nl> - # if ( NCCL ) <nl> # include " nccl . h " <nl> - # endif <nl> <nl> namespace xgboost { <nl> - <nl> namespace tree { <nl> <nl> struct DeviceGMat { <nl> - dh : : dvec < int > gidx ; <nl> - dh : : dvec < int > ridx ; <nl> + dh : : dvec < common : : compressed_byte_t > gidx_buffer ; <nl> + common : : CompressedIterator < int > gidx ; <nl> + dh : : dvec < int > row_ptr ; <nl> void Init ( int device_idx , const common : : GHistIndexMatrix & gmat , <nl> - bst_uint begin , bst_uint end ) ; <nl> + bst_uint begin , bst_uint end , bst_uint row_begin , bst_uint row_end , int n_bins ) ; <nl> } ; <nl> <nl> struct HistBuilder { <nl> class GPUHistBuilder { <nl> dh : : bulk_allocator < dh : : memory_type : : DEVICE > ba ; <nl> / / dh : : bulk_allocator < dh : : memory_type : : DEVICE_MANAGED > ba ; / / can ' t be used <nl> / / with NCCL <nl> - dh : : CubMemory cub_mem ; <nl> <nl> std : : vector < int > feature_set_tree ; <nl> std : : vector < int > feature_set_level ; <nl> class GPUHistBuilder { <nl> std : : vector < int > device_row_segments ; <nl> std : : vector < int > device_element_segments ; <nl> <nl> + std : : vector < dh : : CubMemory > temp_memory ; <nl> std : : vector < DeviceHist > hist_vec ; <nl> std : : vector < dh : : dvec < Node > > nodes ; <nl> std : : vector < dh : : dvec < Node > > nodes_temp ; <nl> class GPUHistBuilder { <nl> std : : vector < dh : : dvec < float > > gidx_fvalue_map ; <nl> <nl> std : : vector < cudaStream_t * > streams ; <nl> - # if ( NCCL ) <nl> std : : vector < ncclComm_t > comms ; <nl> std : : vector < std : : vector < ncclComm_t > > find_split_comms ; <nl> - # endif <nl> } ; <nl> } / / namespace tree <nl> } / / namespace xgboost <nl> new file mode 100644 <nl> index 0000000000 . . 910f668bc1 <nl> mmm / dev / null <nl> ppp b / plugin / updater_gpu / test / cpp / test_device_helpers . cu <nl> <nl> + <nl> + / * ! <nl> + * Copyright 2017 XGBoost contributors <nl> + * / <nl> + # include < thrust / device_vector . h > <nl> + # include < xgboost / base . h > <nl> + # include " . . / . . / src / device_helpers . cuh " <nl> + # include " gtest / gtest . h " <nl> + <nl> + static const std : : vector < int > gidx = { 0 , 2 , 5 , 1 , 3 , 6 , 0 , 2 , 0 , 7 } ; <nl> + static const std : : vector < int > row_ptr = { 0 , 3 , 6 , 8 , 10 } ; <nl> + static const std : : vector < int > lbs_seg_output = { 0 , 0 , 0 , 1 , 1 , 1 , 2 , 2 , 3 , 3 } ; <nl> + <nl> + thrust : : device_vector < int > test_lbs ( ) { <nl> + thrust : : device_vector < int > device_gidx = gidx ; <nl> + thrust : : device_vector < int > device_row_ptr = row_ptr ; <nl> + thrust : : device_vector < int > device_output_row ( gidx . size ( ) , 0 ) ; <nl> + auto d_output_row = device_output_row . data ( ) ; <nl> + dh : : CubMemory temp_memory ; <nl> + dh : : TransformLbs ( <nl> + 0 , & temp_memory , gidx . size ( ) , device_row_ptr . data ( ) , row_ptr . size ( ) - 1 , <nl> + [ = ] __device__ ( int idx , int ridx ) { d_output_row [ idx ] = ridx ; } ) ; <nl> + <nl> + dh : : safe_cuda ( cudaDeviceSynchronize ( ) ) ; <nl> + return device_output_row ; <nl> + } <nl> + <nl> + TEST ( lbs , Test ) { ASSERT_TRUE ( test_lbs ( ) = = lbs_seg_output ) ; } <nl> new file mode 100644 <nl> index 0000000000 . . 794c933987 <nl> mmm / dev / null <nl> ppp b / src / common / compressed_iterator . h <nl> <nl> + / * ! <nl> + * Copyright 2017 by Contributors <nl> + * \ file compressed_iterator . h <nl> + * / <nl> + # pragma once <nl> + # include < xgboost / base . h > <nl> + # include < cmath > <nl> + # include < cstddef > <nl> + # include " dmlc / logging . h " <nl> + <nl> + namespace xgboost { <nl> + namespace common { <nl> + <nl> + typedef unsigned char compressed_byte_t ; <nl> + <nl> + namespace detail { <nl> + inline void SetBit ( compressed_byte_t * byte , int bit_idx ) { <nl> + * byte | = 1 < < bit_idx ; <nl> + } <nl> + template < typename T > <nl> + inline T CheckBit ( const T & byte , int bit_idx ) { <nl> + return byte & ( 1 < < bit_idx ) ; <nl> + } <nl> + inline void ClearBit ( compressed_byte_t * byte , int bit_idx ) { <nl> + * byte & = ~ ( 1 < < bit_idx ) ; <nl> + } <nl> + static const int padding = 4 ; / / Assign padding so we can read slightly off <nl> + / / the beginning of the array <nl> + <nl> + / / The number of bits required to represent a given unsigned range <nl> + static int SymbolBits ( int num_symbols ) { <nl> + return std : : ceil ( std : : log2 ( num_symbols ) ) ; <nl> + } <nl> + } / / namespace detail <nl> + <nl> + / * * <nl> + * \ class CompressedBufferWriter <nl> + * <nl> + * \ brief Writes bit compressed symbols to a memory buffer . Use <nl> + * CompressedIterator to read symbols back from buffer . Currently limited to a <nl> + * maximum symbol size of 28 bits . <nl> + * <nl> + * \ author Rory <nl> + * \ date 7 / 9 / 2017 <nl> + * / <nl> + <nl> + class CompressedBufferWriter { <nl> + private : <nl> + int symbol_bits_ ; <nl> + size_t offset_ ; <nl> + <nl> + public : <nl> + explicit CompressedBufferWriter ( int num_symbols ) : offset_ ( 0 ) { <nl> + symbol_bits_ = detail : : SymbolBits ( num_symbols ) ; <nl> + } <nl> + <nl> + / * * <nl> + * \ fn static size_t CompressedBufferWriter : : CalculateBufferSize ( int <nl> + * num_elements , int num_symbols ) <nl> + * <nl> + * \ brief Calculates number of bytes requiredm for a given number of elements <nl> + * and a symbol range . <nl> + * <nl> + * \ author Rory <nl> + * \ date 7 / 9 / 2017 <nl> + * <nl> + * \ param num_elements Number of elements . <nl> + * \ param num_symbols Max number of symbols ( alphabet size ) <nl> + * <nl> + * \ return The calculated buffer size . <nl> + * / <nl> + <nl> + static size_t CalculateBufferSize ( int num_elements , int num_symbols ) { <nl> + const int bits_per_byte = 8 ; <nl> + int compressed_size = std : : ceil ( <nl> + static_cast < double > ( detail : : SymbolBits ( num_symbols ) * num_elements ) / <nl> + bits_per_byte ) ; <nl> + return compressed_size + detail : : padding ; <nl> + } <nl> + <nl> + template < typename T > <nl> + void WriteSymbol ( compressed_byte_t * buffer , T symbol , size_t offset ) { <nl> + const int bits_per_byte = 8 ; <nl> + <nl> + for ( int i = 0 ; i < symbol_bits_ ; i + + ) { <nl> + size_t byte_idx = ( ( offset + 1 ) * symbol_bits_ - ( i + 1 ) ) / bits_per_byte ; <nl> + byte_idx + = detail : : padding ; <nl> + int bit_idx = <nl> + ( ( bits_per_byte + i ) - ( ( offset + 1 ) * symbol_bits_ ) ) % bits_per_byte ; <nl> + <nl> + if ( detail : : CheckBit ( symbol , i ) ) { <nl> + detail : : SetBit ( & buffer [ byte_idx ] , bit_idx ) ; <nl> + } else { <nl> + detail : : ClearBit ( & buffer [ byte_idx ] , bit_idx ) ; <nl> + } <nl> + } <nl> + } <nl> + template < typename iter_t > <nl> + void Write ( compressed_byte_t * buffer , iter_t input_begin , iter_t input_end ) { <nl> + uint64_t tmp = 0 ; <nl> + int stored_bits = 0 ; <nl> + const int max_stored_bits = 64 - symbol_bits_ ; <nl> + int buffer_position = detail : : padding ; <nl> + const int num_symbols = input_end - input_begin ; <nl> + for ( int i = 0 ; i < num_symbols ; i + + ) { <nl> + typename std : : iterator_traits < iter_t > : : value_type symbol = input_begin [ i ] ; <nl> + if ( stored_bits > max_stored_bits ) { <nl> + / / Eject only full bytes <nl> + int tmp_bytes = stored_bits / 8 ; <nl> + for ( int j = 0 ; j < tmp_bytes ; j + + ) { <nl> + buffer [ buffer_position ] = tmp > > ( stored_bits - ( j + 1 ) * 8 ) ; <nl> + buffer_position + + ; <nl> + } <nl> + stored_bits - = tmp_bytes * 8 ; <nl> + tmp & = ( 1 < < stored_bits ) - 1 ; <nl> + } <nl> + / / Store symbol <nl> + tmp < < = symbol_bits_ ; <nl> + tmp | = symbol ; <nl> + stored_bits + = symbol_bits_ ; <nl> + } <nl> + <nl> + / / Eject all bytes <nl> + int tmp_bytes = std : : ceil ( static_cast < float > ( stored_bits ) / 8 ) ; <nl> + for ( int j = 0 ; j < tmp_bytes ; j + + ) { <nl> + int shift_bits = stored_bits - ( j + 1 ) * 8 ; <nl> + if ( shift_bits > = 0 ) { <nl> + buffer [ buffer_position ] = tmp > > shift_bits ; <nl> + } else { <nl> + buffer [ buffer_position ] = tmp < < std : : abs ( shift_bits ) ; <nl> + } <nl> + buffer_position + + ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + template < typename T > <nl> + <nl> + / * * <nl> + * \ class CompressedIterator <nl> + * <nl> + * \ brief Read symbols from a bit compressed memory buffer . Usable on device and <nl> + * host . <nl> + * <nl> + * \ author Rory <nl> + * \ date 7 / 9 / 2017 <nl> + * / <nl> + <nl> + class CompressedIterator { <nl> + public : <nl> + typedef CompressedIterator < T > self_type ; / / / < My own type <nl> + typedef ptrdiff_t <nl> + difference_type ; / / / < Type to express the result of subtracting <nl> + / / / one iterator from another <nl> + typedef T value_type ; / / / < The type of the element the iterator can point to <nl> + typedef value_type * pointer ; / / / < The type of a pointer to an element the <nl> + / / / iterator can point to <nl> + typedef value_type reference ; / / / < The type of a reference to an element the <nl> + / / / iterator can point to <nl> + private : <nl> + compressed_byte_t * buffer_ ; <nl> + int symbol_bits_ ; <nl> + size_t offset_ ; <nl> + <nl> + public : <nl> + CompressedIterator ( ) : buffer_ ( nullptr ) , symbol_bits_ ( 0 ) , offset_ ( 0 ) { } <nl> + CompressedIterator ( compressed_byte_t * buffer , int num_symbols ) <nl> + : buffer_ ( buffer ) , offset_ ( 0 ) { <nl> + symbol_bits_ = detail : : SymbolBits ( num_symbols ) ; <nl> + } <nl> + <nl> + XGBOOST_DEVICE reference operator * ( ) const { <nl> + const int bits_per_byte = 8 ; <nl> + size_t start_bit_idx = ( ( offset_ + 1 ) * symbol_bits_ - 1 ) ; <nl> + size_t start_byte_idx = start_bit_idx / bits_per_byte ; <nl> + start_byte_idx + = detail : : padding ; <nl> + <nl> + / / Read 5 bytes - the maximum we will need <nl> + uint64_t tmp = static_cast < uint64_t > ( buffer_ [ start_byte_idx - 4 ] ) < < 32 | <nl> + static_cast < uint64_t > ( buffer_ [ start_byte_idx - 3 ] ) < < 24 | <nl> + static_cast < uint64_t > ( buffer_ [ start_byte_idx - 2 ] ) < < 16 | <nl> + static_cast < uint64_t > ( buffer_ [ start_byte_idx - 1 ] ) < < 8 | <nl> + buffer_ [ start_byte_idx ] ; <nl> + int bit_shift = <nl> + ( bits_per_byte - ( ( offset_ + 1 ) * symbol_bits_ ) ) % bits_per_byte ; <nl> + tmp > > = bit_shift ; <nl> + / / Mask off unneeded bits <nl> + uint64_t mask = ( 1 < < symbol_bits_ ) - 1 ; <nl> + return static_cast < T > ( tmp & mask ) ; <nl> + } <nl> + <nl> + XGBOOST_DEVICE reference operator [ ] ( int idx ) const { <nl> + self_type offset = ( * this ) ; <nl> + offset . offset_ + = idx ; <nl> + return * offset ; <nl> + } <nl> + } ; <nl> + } / / namespace common <nl> + } / / namespace xgboost <nl> new file mode 100644 <nl> index 0000000000 . . 4ea1fc8e15 <nl> mmm / dev / null <nl> ppp b / tests / cpp / common / test_compressed_iterator . cc <nl> <nl> + # include " . . / . . / . . / src / common / compressed_iterator . h " <nl> + # include " gtest / gtest . h " <nl> + <nl> + namespace xgboost { <nl> + namespace common { <nl> + TEST ( CompressedIterator , Test ) { <nl> + ASSERT_TRUE ( detail : : SymbolBits ( 256 ) = = 8 ) ; <nl> + ASSERT_TRUE ( detail : : SymbolBits ( 150 ) = = 8 ) ; <nl> + std : : vector < int > test_cases = { 3 , 426 , 21 , 64 , 256 , 100000 , INT32_MAX } ; <nl> + int num_elements = 1000 ; <nl> + int repetitions = 1000 ; <nl> + srand ( 9 ) ; <nl> + <nl> + for ( auto alphabet_size : test_cases ) { <nl> + for ( int i = 0 ; i < repetitions ; i + + ) { <nl> + std : : vector < int > input ( num_elements ) ; <nl> + std : : generate ( input . begin ( ) , input . end ( ) , <nl> + [ = ] ( ) { return rand ( ) % alphabet_size ; } ) ; <nl> + CompressedBufferWriter cbw ( alphabet_size ) ; <nl> + <nl> + / / Test write entire array <nl> + std : : vector < unsigned char > buffer ( <nl> + CompressedBufferWriter : : CalculateBufferSize ( input . size ( ) , <nl> + alphabet_size ) ) ; <nl> + <nl> + cbw . Write ( buffer . data ( ) , input . begin ( ) , input . end ( ) ) ; <nl> + <nl> + CompressedIterator < int > ci ( buffer . data ( ) , alphabet_size ) ; <nl> + std : : vector < int > output ( input . size ( ) ) ; <nl> + for ( int i = 0 ; i < input . size ( ) ; i + + ) { <nl> + output [ i ] = ci [ i ] ; <nl> + } <nl> + <nl> + ASSERT_TRUE ( input = = output ) ; <nl> + <nl> + / / Test write Symbol <nl> + std : : vector < unsigned char > buffer2 ( <nl> + CompressedBufferWriter : : CalculateBufferSize ( input . size ( ) , <nl> + alphabet_size ) ) ; <nl> + for ( int i = 0 ; i < input . size ( ) ; i + + ) { <nl> + cbw . WriteSymbol ( buffer2 . data ( ) , input [ i ] , i ) ; <nl> + } <nl> + CompressedIterator < int > ci2 ( buffer . data ( ) , alphabet_size ) ; <nl> + std : : vector < int > output2 ( input . size ( ) ) ; <nl> + for ( int i = 0 ; i < input . size ( ) ; i + + ) { <nl> + output2 [ i ] = ci2 [ i ] ; <nl> + } <nl> + ASSERT_TRUE ( input = = output2 ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + } / / namespace common <nl> + } / / namespace xgboost <nl> \ No newline at end of file <nl>
[ GPU - Plugin ] Add load balancing search to gpu_hist . Add compressed iterator . ( )
dmlc/xgboost
530f01e21c005e28caebc1236067c23e20326b16
2017-07-11T10:36:39Z
mmm a / benchmark / linux / linux - perf - events . h <nl> ppp b / benchmark / linux / linux - perf - events . h <nl> <nl> # include < unistd . h > / / for syscall <nl> <nl> # include < cerrno > / / for errno <nl> - # include < cstring > / / for memset <nl> + # include < cstring > / / for std : : memset <nl> # include < stdexcept > <nl> <nl> # include < iostream > <nl> template < int TYPE = PERF_TYPE_HARDWARE > class LinuxEvents { <nl> <nl> public : <nl> explicit LinuxEvents ( std : : vector < int > config_vec ) : fd ( 0 ) , working ( true ) { <nl> - memset ( & attribs , 0 , sizeof ( attribs ) ) ; <nl> + std : : memset ( & attribs , 0 , sizeof ( attribs ) ) ; <nl> attribs . type = TYPE ; <nl> attribs . size = sizeof ( attribs ) ; <nl> attribs . disabled = 1 ; <nl> mmm a / benchmark / twitter / sax_tweet_reader_visitor . h <nl> ppp b / benchmark / twitter / sax_tweet_reader_visitor . h <nl> simdjson_really_inline void sax_tweet_reader_visitor : : field_lookup : : neg ( const ch <nl> } <nl> <nl> sax_tweet_reader_visitor : : field_lookup : : field_lookup ( ) { <nl> - add ( " \ " statuses \ " " , strlen ( " \ " statuses \ " " ) , containers : : top_object , field_type : : array , 0 ) ; / / { " statuses " : [ . . . ] <nl> - # define TWEET_FIELD ( KEY , TYPE ) add ( " \ " " # KEY " \ " " , strlen ( " \ " " # KEY " \ " " ) , containers : : tweet , TYPE , offsetof ( tweet , KEY ) ) ; <nl> + add ( " \ " statuses \ " " , std : : strlen ( " \ " statuses \ " " ) , containers : : top_object , field_type : : array , 0 ) ; / / { " statuses " : [ . . . ] <nl> + # define TWEET_FIELD ( KEY , TYPE ) add ( " \ " " # KEY " \ " " , std : : strlen ( " \ " " # KEY " \ " " ) , containers : : tweet , TYPE , offsetof ( tweet , KEY ) ) ; <nl> TWEET_FIELD ( id , field_type : : unsigned_integer ) ; <nl> TWEET_FIELD ( in_reply_to_status_id , field_type : : nullable_unsigned_integer ) ; <nl> TWEET_FIELD ( retweet_count , field_type : : unsigned_integer ) ; <nl> sax_tweet_reader_visitor : : field_lookup : : field_lookup ( ) { <nl> TWEET_FIELD ( created_at , field_type : : string ) ; <nl> TWEET_FIELD ( user , field_type : : object ) <nl> # undef TWEET_FIELD <nl> - # define USER_FIELD ( KEY , TYPE ) add ( " \ " " # KEY " \ " " , strlen ( " \ " " # KEY " \ " " ) , containers : : user , TYPE , offsetof ( tweet , user ) + offsetof ( twitter_user , KEY ) ) ; <nl> + # define USER_FIELD ( KEY , TYPE ) add ( " \ " " # KEY " \ " " , std : : strlen ( " \ " " # KEY " \ " " ) , containers : : user , TYPE , offsetof ( tweet , user ) + offsetof ( twitter_user , KEY ) ) ; <nl> USER_FIELD ( id , field_type : : unsigned_integer ) ; <nl> USER_FIELD ( screen_name , field_type : : string ) ; <nl> # undef USER_FIELD <nl> mmm a / doc / basics . md <nl> ppp b / doc / basics . md <nl> In some cases , you may have valid JSON strings that you do not wish to parse but <nl> / / Starts with a valid JSON document as a string . <nl> / / It does not have to be null - terminated . <nl> const char * some_string = " [ 1 , 2 , 3 , 4 ] " ; <nl> - size_t length = strlen ( some_string ) ; <nl> + size_t length = std : : strlen ( some_string ) ; <nl> / / Create a buffer to receive the minified string . Make sure that there is enough room ( length bytes ) . <nl> std : : unique_ptr < char [ ] > buffer { new char [ length ] } ; <nl> size_t new_length { } ; / / It will receive the minified length . <nl> The simdjson library has fast functions to validate UTF - 8 strings . They are many <nl> <nl> ` ` ` C + + <nl> const char * some_string = " [ 1 , 2 , 3 , 4 ] " ; <nl> - size_t length = strlen ( some_string ) ; <nl> + size_t length = std : : strlen ( some_string ) ; <nl> bool is_ok = simdjson : : validate_utf8 ( some_string , length ) ; <nl> ` ` ` <nl> <nl> mmm a / doc / basics_doxygen . md <nl> ppp b / doc / basics_doxygen . md <nl> In some cases , you may have valid JSON strings that you do not wish to parse but <nl> / / Starts with a valid JSON document as a string . <nl> / / It does not have to be null - terminated . <nl> const char * some_string = " [ 1 , 2 , 3 , 4 ] " ; <nl> - size_t length = strlen ( some_string ) ; <nl> + size_t length = std : : strlen ( some_string ) ; <nl> / / Create a buffer to receive the minified string . Make sure that there is enough room ( length bytes ) . <nl> std : : unique_ptr < char [ ] > buffer { new char [ length ] } ; <nl> size_t new_length { } ; / / It will receive the minified length . <nl> The simdjson library has fast functions to validate UTF - 8 strings . They are many <nl> <nl> ` ` ` <nl> const char * some_string = " [ 1 , 2 , 3 , 4 ] " ; <nl> - size_t length = strlen ( some_string ) ; <nl> + size_t length = std : : strlen ( some_string ) ; <nl> bool is_ok = simdjson : : validate_utf8 ( some_string , length ) ; <nl> ` ` ` <nl> <nl> mmm a / doc / performance . md <nl> ppp b / doc / performance . md <nl> are still some scenarios where tuning can enhance performance . <nl> * [ Visual Studio ] ( # visual - studio ) <nl> * [ Downclocking ] ( # downclocking ) <nl> * [ Best Use of the DOM API ] ( # best - use - of - the - dom - api ) <nl> + * [ Padding and Temporary Copies ] ( # padding - and - temporary - copies ) <nl> + <nl> <nl> Reusing the parser for maximum efficiency <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> Best Use of the DOM API <nl> <nl> The simdjson API provides access to the JSON DOM ( document - object - model ) content as a tree of ` dom : : element ` instances , each representing an object , an array or an atomic type ( null , true , false , number ) . These ` dom : : element ` instances are lightweight objects ( e . g . , spanning 16 bytes ) and it might be advantageous to pass them by value , as opposed to passing them by reference or by pointer . <nl> <nl> + Padding and Temporary Copies <nl> + mmmmmmmmmmmm - - <nl> + <nl> + The simdjson function ` parser . parse ` reads data from a padded buffer , containing SIMDJSON_PADDING extra bytes added at the end . <nl> + If you are passing a ` padded_string ` to ` parser . parse ` or loading the JSON directly from <nl> + disk ( ` parser . load ` ) , padding is automatically handled . <nl> + When calling ` parser . parse ` on a pointer ( e . g . , ` parser . parse ( mystring , mylength ) ` ) a temporary copy is made by default with adequate padding and you , again , do not need to be concerned with padding . <nl> + <nl> + Some users may not be able use our ` padded_string ` class or to load the data directly from disk ( ` parser . load ` ) . They may need to pass data pointers to the library . If these users wish to avoid temporary copies and corresponding temporary memory allocations , they may want to call ` parser . parse ` with the ` realloc_if_needed ` parameter set to false ( e . g . , ` parser . parse ( mystring , mylength , false ) ` ) . In such cases , they need to ensure that there are at least SIMDJSON_PADDING extra bytes at the end that can be safely accessed and read . They do not need to initialize the padded bytes to any value in particular . The following example is safe : <nl> + <nl> + <nl> + ` ` ` C + + <nl> + const char * json = R " ( { " key " : " value " } ) " ; <nl> + const size_t json_len = std : : strlen ( json ) ; <nl> + std : : unique_ptr < char [ ] > padded_json_copy { new char [ json_len + SIMDJSON_PADDING ] } ; <nl> + memcpy ( padded_json_copy . get ( ) , json , json_len ) ; <nl> + memset ( padded_json_copy . get ( ) + json_len , 0 , SIMDJSON_PADDING ) ; <nl> + simdjson : : dom : : parser parser ; <nl> + simdjson : : dom : : element element = parser . parse ( padded_json_copy . get ( ) , json_len , false ) ; <nl> + ` ` ` ` <nl> + <nl> + Setting the ` realloc_if_needed ` parameter false in this manner may lead to better performance , but it requires that the user takes more responsibilities : the simdjson library cannot verify that the input buffer was padded . <nl> \ No newline at end of file <nl> mmm a / include / simdjson / dom / document - inl . h <nl> ppp b / include / simdjson / dom / document - inl . h <nl> inline bool document : : dump_raw_tape ( std : : ostream & os ) const noexcept { <nl> switch ( type ) { <nl> case ' " ' : / / we have a string <nl> os < < " string \ " " ; <nl> - memcpy ( & string_length , string_buf . get ( ) + payload , sizeof ( uint32_t ) ) ; <nl> + std : : memcpy ( & string_length , string_buf . get ( ) + payload , sizeof ( uint32_t ) ) ; <nl> os < < internal : : escape_json_string ( std : : string_view ( <nl> ( const char * ) ( string_buf . get ( ) + payload + sizeof ( uint32_t ) ) , <nl> string_length <nl> inline bool document : : dump_raw_tape ( std : : ostream & os ) const noexcept { <nl> return false ; <nl> } <nl> double answer ; <nl> - memcpy ( & answer , & tape [ + + tape_idx ] , sizeof ( answer ) ) ; <nl> + std : : memcpy ( & answer , & tape [ + + tape_idx ] , sizeof ( answer ) ) ; <nl> os < < answer < < ' \ n ' ; <nl> break ; <nl> case ' n ' : / / we have a null <nl> mmm a / include / simdjson / dom / parsedjson_iterator - inl . h <nl> ppp b / include / simdjson / dom / parsedjson_iterator - inl . h <nl> dom : : parser : : Iterator : : Iterator ( <nl> current_val ( o . current_val ) <nl> { <nl> depth_index = new scopeindex_t [ max_depth + 1 ] ; <nl> - memcpy ( depth_index , o . depth_index , ( depth + 1 ) * sizeof ( depth_index [ 0 ] ) ) ; <nl> + std : : memcpy ( depth_index , o . depth_index , ( depth + 1 ) * sizeof ( depth_index [ 0 ] ) ) ; <nl> } <nl> <nl> dom : : parser : : Iterator : : ~ Iterator ( ) noexcept { <nl> mmm a / include / simdjson / dom / parsedjson_iterator . h <nl> ppp b / include / simdjson / dom / parsedjson_iterator . h <nl> class [ [ deprecated ( " Use the new DOM navigation API instead ( see doc / basics . md ) " ) <nl> / / return the length of the string in bytes <nl> inline uint32_t get_string_length ( ) const { <nl> uint32_t answer ; <nl> - memcpy ( & answer , <nl> + std : : memcpy ( & answer , <nl> reinterpret_cast < const char * > ( doc . string_buf . get ( ) + <nl> ( current_val & internal : : JSON_VALUE_MASK ) ) , <nl> sizeof ( uint32_t ) ) ; <nl> class [ [ deprecated ( " Use the new DOM navigation API instead ( see doc / basics . md ) " ) <nl> / / case of error <nl> } <nl> double answer ; <nl> - memcpy ( & answer , & doc . tape [ location + 1 ] , sizeof ( answer ) ) ; <nl> + std : : memcpy ( & answer , & doc . tape [ location + 1 ] , sizeof ( answer ) ) ; <nl> return answer ; <nl> } <nl> <nl> mmm a / include / simdjson / dom / parser - inl . h <nl> ppp b / include / simdjson / dom / parser - inl . h <nl> inline simdjson_result < element > parser : : parse ( const uint8_t * buf , size_t len , bo <nl> if ( realloc_if_needed ) { <nl> tmp_buf . reset ( ( uint8_t * ) internal : : allocate_padded_buffer ( len ) ) ; <nl> if ( tmp_buf . get ( ) = = nullptr ) { return MEMALLOC ; } <nl> - memcpy ( ( void * ) tmp_buf . get ( ) , buf , len ) ; <nl> + std : : memcpy ( ( void * ) tmp_buf . get ( ) , buf , len ) ; <nl> } <nl> _error = implementation - > parse ( realloc_if_needed ? tmp_buf . get ( ) : buf , len , doc ) ; <nl> if ( _error ) { return _error ; } <nl> mmm a / include / simdjson / dom / parser . h <nl> ppp b / include / simdjson / dom / parser . h <nl> class parser { <nl> * The buffer must have at least SIMDJSON_PADDING extra allocated bytes . It does not matter what <nl> * those bytes are initialized to , as long as they are allocated . <nl> * <nl> - * If realloc_if_needed is true , it is assumed that the buffer does * not * have enough padding , <nl> - * and it is copied into an enlarged temporary buffer before parsing . <nl> + * If realloc_if_needed is true ( the default ) , it is assumed that the buffer does * not * have enough padding , <nl> + * and it is copied into an enlarged temporary buffer before parsing . Thus the following is safe : <nl> + * <nl> + * const char * json = R " ( { " key " : " value " } ) " ; <nl> + * const size_t json_len = std : : strlen ( json ) ; <nl> + * simdjson : : dom : : parser parser ; <nl> + * simdjson : : dom : : element element = parser . parse ( json , json_len ) ; <nl> + * <nl> + * If you set realloc_if_needed to false ( e . g . , parser . parse ( json , json_len , false ) ) , <nl> + * you must provide a buffer with at least SIMDJSON_PADDING extra bytes at the end . <nl> + * The benefit of setting realloc_if_needed to false is that you avoid a temporary <nl> + * memory allocation and a copy . <nl> + * <nl> + * The padded bytes may be read . It is not important how you initialize <nl> + * these bytes though we recommend a sensible default like null character values or spaces . <nl> + * For example , the following low - level code is safe : <nl> + * <nl> + * const char * json = R " ( { " key " : " value " } ) " ; <nl> + * const size_t json_len = std : : strlen ( json ) ; <nl> + * std : : unique_ptr < char [ ] > padded_json_copy { new char [ json_len + SIMDJSON_PADDING ] } ; <nl> + * std : : memcpy ( padded_json_copy . get ( ) , json , json_len ) ; <nl> + * std : : memset ( padded_json_copy . get ( ) + json_len , ' \ 0 ' , SIMDJSON_PADDING ) ; <nl> + * simdjson : : dom : : parser parser ; <nl> + * simdjson : : dom : : element element = parser . parse ( padded_json_copy . get ( ) , json_len , false ) ; <nl> * <nl> * # # # Parser Capacity <nl> * <nl> mmm a / include / simdjson / error . h <nl> ppp b / include / simdjson / error . h <nl> enum error_code { <nl> * <nl> * dom : : parser parser ; <nl> * dom : : element doc ; <nl> - * auto error = parser . parse ( " foo " ) . get ( doc ) ; <nl> + * auto error = parser . parse ( " foo " , 3 ) . get ( doc ) ; <nl> * if ( error ) { printf ( " Error : % s \ n " , error_message ( error ) ) ; } <nl> * <nl> * @ return The error message . <nl> mmm a / include / simdjson / internal / tape_ref - inl . h <nl> ppp b / include / simdjson / internal / tape_ref - inl . h <nl> simdjson_really_inline T tape_ref : : next_tape_value ( ) const noexcept { <nl> / / It is not generally safe . It is safer , and often faster to rely <nl> / / on memcpy . Yes , it is uglier , but it is also encapsulated . <nl> T x ; <nl> - memcpy ( & x , & doc - > tape [ json_index + 1 ] , sizeof ( uint64_t ) ) ; <nl> + std : : memcpy ( & x , & doc - > tape [ json_index + 1 ] , sizeof ( uint64_t ) ) ; <nl> return x ; <nl> } <nl> <nl> simdjson_really_inline uint32_t internal : : tape_ref : : get_string_length ( ) const noexcept { <nl> size_t string_buf_index = size_t ( tape_value ( ) ) ; <nl> uint32_t len ; <nl> - memcpy ( & len , & doc - > string_buf [ string_buf_index ] , sizeof ( len ) ) ; <nl> + std : : memcpy ( & len , & doc - > string_buf [ string_buf_index ] , sizeof ( len ) ) ; <nl> return len ; <nl> } <nl> <nl> mmm a / include / simdjson / padded_string - inl . h <nl> ppp b / include / simdjson / padded_string - inl . h <nl> inline char * allocate_padded_buffer ( size_t length ) noexcept { <nl> / / We write zeroes in the padded region to avoid having uninitized <nl> / / garbage . If nothing else , garbage getting read might trigger a <nl> / / warning in a memory checking . <nl> - memset ( padded_buffer + length , 0 , totalpaddedlength - length ) ; <nl> + std : : memset ( padded_buffer + length , 0 , totalpaddedlength - length ) ; <nl> return padded_buffer ; <nl> } / / allocate_padded_buffer ( ) <nl> <nl> inline padded_string : : padded_string ( size_t length ) noexcept <nl> inline padded_string : : padded_string ( const char * data , size_t length ) noexcept <nl> : viable_size ( length ) , data_ptr ( internal : : allocate_padded_buffer ( length ) ) { <nl> if ( ( data ! = nullptr ) and ( data_ptr ! = nullptr ) ) { <nl> - memcpy ( data_ptr , data , length ) ; <nl> + std : : memcpy ( data_ptr , data , length ) ; <nl> data_ptr [ length ] = ' \ 0 ' ; / / easier when you need a c_str <nl> } <nl> } <nl> inline padded_string : : padded_string ( const char * data , size_t length ) noexcept <nl> inline padded_string : : padded_string ( const std : : string & str_ ) noexcept <nl> : viable_size ( str_ . size ( ) ) , data_ptr ( internal : : allocate_padded_buffer ( str_ . size ( ) ) ) { <nl> if ( data_ptr ! = nullptr ) { <nl> - memcpy ( data_ptr , str_ . data ( ) , str_ . size ( ) ) ; <nl> + std : : memcpy ( data_ptr , str_ . data ( ) , str_ . size ( ) ) ; <nl> data_ptr [ str_ . size ( ) ] = ' \ 0 ' ; / / easier when you need a c_str <nl> } <nl> } <nl> inline padded_string : : padded_string ( const std : : string & str_ ) noexcept <nl> inline padded_string : : padded_string ( std : : string_view sv_ ) noexcept <nl> : viable_size ( sv_ . size ( ) ) , data_ptr ( internal : : allocate_padded_buffer ( sv_ . size ( ) ) ) { <nl> if ( data_ptr ! = nullptr ) { <nl> - memcpy ( data_ptr , sv_ . data ( ) , sv_ . size ( ) ) ; <nl> + std : : memcpy ( data_ptr , sv_ . data ( ) , sv_ . size ( ) ) ; <nl> data_ptr [ sv_ . size ( ) ] = ' \ 0 ' ; / / easier when you need a c_str <nl> } <nl> } <nl> mmm a / src / generic / stage1 / buf_block_reader . h <nl> ppp b / src / generic / stage1 / buf_block_reader . h <nl> simdjson_really_inline const uint8_t * buf_block_reader < STEP_SIZE > : : full_block ( ) <nl> template < size_t STEP_SIZE > <nl> simdjson_really_inline size_t buf_block_reader < STEP_SIZE > : : get_remainder ( uint8_t * dst ) const { <nl> if ( len = = idx ) { return 0 ; } / / memcpy ( dst , null , 0 ) will trigger an error with some sanitizers <nl> - memset ( dst , 0x20 , STEP_SIZE ) ; / / memset STEP_SIZE because it ' s more efficient to write out 8 or 16 bytes at once . <nl> - memcpy ( dst , buf + idx , len - idx ) ; <nl> + std : : memset ( dst , 0x20 , STEP_SIZE ) ; / / std : : memset STEP_SIZE because it ' s more efficient to write out 8 or 16 bytes at once . <nl> + std : : memcpy ( dst , buf + idx , len - idx ) ; <nl> return len - idx ; <nl> } <nl> <nl> mmm a / src / generic / stage2 / tape_builder . h <nl> ppp b / src / generic / stage2 / tape_builder . h <nl> SIMDJSON_WARN_UNUSED simdjson_really_inline error_code tape_builder : : visit_root_ <nl> / / <nl> uint8_t * copy = static_cast < uint8_t * > ( malloc ( iter . remaining_len ( ) + SIMDJSON_PADDING ) ) ; <nl> if ( copy = = nullptr ) { return MEMALLOC ; } <nl> - memcpy ( copy , value , iter . remaining_len ( ) ) ; <nl> - memset ( copy + iter . remaining_len ( ) , ' ' , SIMDJSON_PADDING ) ; <nl> + std : : memcpy ( copy , value , iter . remaining_len ( ) ) ; <nl> + std : : memset ( copy + iter . remaining_len ( ) , ' ' , SIMDJSON_PADDING ) ; <nl> error_code error = visit_number ( iter , copy ) ; <nl> free ( copy ) ; <nl> return error ; <nl> mmm a / tests / basictests . cpp <nl> ppp b / tests / basictests . cpp <nl> namespace parse_api_tests { <nl> uint64_t count = 0 ; <nl> constexpr const int BATCH_SIZE = 128 ; <nl> uint8_t empty_batches_ndjson [ BATCH_SIZE * 16 + SIMDJSON_PADDING ] ; <nl> - memset ( & empty_batches_ndjson [ 0 ] , ' ' , BATCH_SIZE * 16 + SIMDJSON_PADDING ) ; <nl> - memcpy ( & empty_batches_ndjson [ BATCH_SIZE * 3 + 2 ] , " 1 " , 1 ) ; <nl> - memcpy ( & empty_batches_ndjson [ BATCH_SIZE * 10 + 4 ] , " 2 " , 1 ) ; <nl> - memcpy ( & empty_batches_ndjson [ BATCH_SIZE * 11 + 6 ] , " 3 " , 1 ) ; <nl> + std : : memset ( & empty_batches_ndjson [ 0 ] , ' ' , BATCH_SIZE * 16 + SIMDJSON_PADDING ) ; <nl> + std : : memcpy ( & empty_batches_ndjson [ BATCH_SIZE * 3 + 2 ] , " 1 " , 1 ) ; <nl> + std : : memcpy ( & empty_batches_ndjson [ BATCH_SIZE * 10 + 4 ] , " 2 " , 1 ) ; <nl> + std : : memcpy ( & empty_batches_ndjson [ BATCH_SIZE * 11 + 6 ] , " 3 " , 1 ) ; <nl> simdjson : : dom : : document_stream stream ; <nl> ASSERT_SUCCESS ( parser . parse_many ( empty_batches_ndjson , BATCH_SIZE * 16 ) . get ( stream ) ) ; <nl> for ( auto doc : stream ) { <nl> mmm a / tests / errortests . cpp <nl> ppp b / tests / errortests . cpp <nl> namespace adversarial { <nl> bool number_overrun_at_root ( ) { <nl> TEST_START ( ) ; <nl> constexpr const char * json = " 1 " PADDING_FILLED_WITH_NUMBERS " , " ; <nl> - constexpr size_t len = 1 ; / / strlen ( " 1 " ) ; <nl> + constexpr size_t len = 1 ; / / std : : strlen ( " 1 " ) ; <nl> <nl> dom : : parser parser ; <nl> uint64_t foo ; <nl> namespace adversarial { <nl> bool number_overrun_in_array ( ) { <nl> TEST_START ( ) ; <nl> constexpr const char * json = " [ 1 " PADDING_FILLED_WITH_NUMBERS " ] " ; <nl> - constexpr size_t len = 2 ; / / strlen ( " [ 1 " ) ; <nl> + constexpr size_t len = 2 ; / / std : : strlen ( " [ 1 " ) ; <nl> <nl> dom : : parser parser ; <nl> uint64_t foo ; <nl> namespace adversarial { <nl> bool number_overrun_in_object ( ) { <nl> TEST_START ( ) ; <nl> constexpr const char * json = " { \ " key \ " : 1 " PADDING_FILLED_WITH_NUMBERS " } " ; <nl> - constexpr size_t len = 8 ; / / strlen ( " { \ " key \ " : 1 " ) ; <nl> + constexpr size_t len = 8 ; / / std : : strlen ( " { \ " key \ " : 1 " ) ; <nl> <nl> dom : : parser parser ; <nl> uint64_t foo ; <nl> namespace adversarial { <nl> TEST_SUCCEED ( ) ; <nl> } <nl> bool run ( ) { <nl> - static_assert ( 33 > SIMDJSON_PADDING , " corruption test doesn ' t have enough padding " ) ; / / 33 = strlen ( PADDING_FILLED_WITH_NUMBERS ) <nl> + static_assert ( 33 > SIMDJSON_PADDING , " corruption test doesn ' t have enough padding " ) ; / / 33 = std : : strlen ( PADDING_FILLED_WITH_NUMBERS ) <nl> return true <nl> & & number_overrun_at_root ( ) <nl> & & number_overrun_in_array ( ) <nl> mmm a / tests / jsoncheck . cpp <nl> ppp b / tests / jsoncheck . cpp <nl> static bool has_extension ( const char * filename , const char * extension ) { <nl> } <nl> <nl> bool starts_with ( const char * pre , const char * str ) { <nl> - size_t len_pre = strlen ( pre ) , len_str = strlen ( str ) ; <nl> + size_t len_pre = std : : strlen ( pre ) , len_str = std : : strlen ( str ) ; <nl> return len_str < len_pre ? false : strncmp ( pre , str , len_pre ) = = 0 ; <nl> } <nl> <nl> bool contains ( const char * pre , const char * str ) { <nl> bool validate ( const char * dirname ) { <nl> bool everything_fine = true ; <nl> const char * extension = " . json " ; <nl> - size_t dirlen = strlen ( dirname ) ; <nl> + size_t dirlen = std : : strlen ( dirname ) ; <nl> struct dirent * * entry_list ; <nl> int c = scandir ( dirname , & entry_list , nullptr , alphasort ) ; <nl> if ( c < 0 ) { <nl> bool validate ( const char * dirname ) { <nl> if ( has_extension ( name , extension ) ) { <nl> printf ( " validating : file % s " , name ) ; <nl> fflush ( nullptr ) ; <nl> - size_t namelen = strlen ( name ) ; <nl> + size_t namelen = std : : strlen ( name ) ; <nl> size_t fullpathlen = dirlen + 1 + namelen + 1 ; <nl> char * fullpath = static_cast < char * > ( malloc ( fullpathlen ) ) ; <nl> snprintf ( fullpath , fullpathlen , " % s % s % s " , dirname , needsep ? " / " : " " , name ) ; <nl> mmm a / tests / minefieldcheck . cpp <nl> ppp b / tests / minefieldcheck . cpp <nl> static bool has_extension ( const char * filename , const char * extension ) { <nl> } <nl> <nl> bool starts_with ( const char * pre , const char * str ) { <nl> - size_t len_pre = strlen ( pre ) , len_str = strlen ( str ) ; <nl> + size_t len_pre = std : : strlen ( pre ) , len_str = std : : strlen ( str ) ; <nl> return len_str < len_pre ? false : strncmp ( pre , str , len_pre ) = = 0 ; <nl> } <nl> <nl> bool contains ( const char * pre , const char * str ) { <nl> bool validate_minefield ( const char * dirname ) { <nl> bool everything_fine = true ; <nl> const char * extension = " . json " ; <nl> - size_t dirlen = strlen ( dirname ) ; <nl> + size_t dirlen = std : : strlen ( dirname ) ; <nl> struct dirent * * entry_list ; <nl> int c = scandir ( dirname , & entry_list , nullptr , alphasort ) ; <nl> if ( c < 0 ) { <nl> bool validate_minefield ( const char * dirname ) { <nl> if ( has_extension ( name , extension ) ) { <nl> printf ( " validating : file % s " , name ) ; <nl> fflush ( nullptr ) ; <nl> - size_t namelen = strlen ( name ) ; <nl> + size_t namelen = std : : strlen ( name ) ; <nl> size_t fullpathlen = dirlen + 1 + namelen + 1 ; <nl> char * fullpath = static_cast < char * > ( malloc ( fullpathlen ) ) ; <nl> snprintf ( fullpath , fullpathlen , " % s % s % s " , dirname , needsep ? " / " : " " , name ) ; <nl> mmm a / tests / numberparsingcheck . cpp <nl> ppp b / tests / numberparsingcheck . cpp <nl> size_t invalid_count ; <nl> const char * really_bad [ ] = { " 013 } " , " 0x14 " , " 0e ] " , " 0e + ] " , " 0e + - 1 ] " } ; <nl> <nl> bool starts_with ( const char * pre , const char * str ) { <nl> - size_t lenpre = strlen ( pre ) ; <nl> + size_t lenpre = std : : strlen ( pre ) ; <nl> return strncmp ( pre , str , lenpre ) = = 0 ; <nl> } <nl> <nl> bool validate ( const char * dirname ) { <nl> parse_error = 0 ; <nl> size_t total_count = 0 ; <nl> const char * extension = " . json " ; <nl> - size_t dirlen = strlen ( dirname ) ; <nl> + size_t dirlen = std : : strlen ( dirname ) ; <nl> struct dirent * * entry_list ; <nl> int c = scandir ( dirname , & entry_list , 0 , alphasort ) ; <nl> if ( c < 0 ) { <nl> bool validate ( const char * dirname ) { <nl> for ( int i = 0 ; i < c ; i + + ) { <nl> const char * name = entry_list [ i ] - > d_name ; <nl> if ( has_extension ( name , extension ) ) { <nl> - size_t filelen = strlen ( name ) ; <nl> + size_t filelen = std : : strlen ( name ) ; <nl> fullpath = ( char * ) malloc ( dirlen + filelen + 1 + 1 ) ; <nl> strcpy ( fullpath , dirname ) ; <nl> if ( needsep ) { <nl> mmm a / tests / parse_many_test . cpp <nl> ppp b / tests / parse_many_test . cpp <nl> static bool has_extension ( const char * filename , const char * extension ) { <nl> } <nl> <nl> bool starts_with ( const char * pre , const char * str ) { <nl> - size_t len_pre = strlen ( pre ) , len_str = strlen ( str ) ; <nl> + size_t len_pre = std : : strlen ( pre ) , len_str = std : : strlen ( str ) ; <nl> return len_str < len_pre ? false : strncmp ( pre , str , len_pre ) = = 0 ; <nl> } <nl> <nl> bool validate ( const char * dirname ) { <nl> const char * extension2 = " . jsonl " ; <nl> const char * extension3 = " . json " ; / / bad json files shoud fail <nl> <nl> - size_t dirlen = strlen ( dirname ) ; <nl> + size_t dirlen = std : : strlen ( dirname ) ; <nl> struct dirent * * entry_list ; <nl> int c = scandir ( dirname , & entry_list , nullptr , alphasort ) ; <nl> if ( c < 0 ) { <nl> bool validate ( const char * dirname ) { <nl> / * Finding the file path * / <nl> printf ( " validating : file % s " , name ) ; <nl> fflush ( nullptr ) ; <nl> - size_t namelen = strlen ( name ) ; <nl> + size_t namelen = std : : strlen ( name ) ; <nl> size_t fullpathlen = dirlen + 1 + namelen + 1 ; <nl> char * fullpath = static_cast < char * > ( malloc ( fullpathlen ) ) ; <nl> snprintf ( fullpath , fullpathlen , " % s % s % s " , dirname , needsep ? " / " : " " , name ) ; <nl> mmm a / tests / pointercheck . cpp <nl> ppp b / tests / pointercheck . cpp <nl> bool issue1142 ( ) { <nl> ASSERT_EQUAL ( std : : string ( R " ( [ ] ) " ) , simdjson : : minify ( example3 ) ) ; <nl> <nl> const char * input_array = " [ ] " ; <nl> - size_t input_length = strlen ( input_array ) ; <nl> + size_t input_length = std : : strlen ( input_array ) ; <nl> auto element4 = parser . parse ( input_array , input_length ) . at_pointer ( " " ) ; ; <nl> ASSERT_EQUAL ( std : : string ( R " ( [ ] ) " ) , simdjson : : minify ( element4 ) ) ; <nl> <nl> mmm a / tests / readme_examples . cpp <nl> ppp b / tests / readme_examples . cpp <nl> SIMDJSON_POP_DISABLE_WARNINGS <nl> <nl> void minify ( ) { <nl> const char * some_string = " [ 1 , 2 , 3 , 4 ] " ; <nl> - size_t length = strlen ( some_string ) ; <nl> + size_t length = std : : strlen ( some_string ) ; <nl> std : : unique_ptr < char [ ] > buffer { new char [ length ] } ; <nl> size_t new_length { } ; <nl> auto error = simdjson : : minify ( some_string , length , buffer . get ( ) , new_length ) ; <nl> void minify ( ) { <nl> abort ( ) ; <nl> } else { <nl> const char * expected_string = " [ 1 , 2 , 3 , 4 ] " ; <nl> - size_t expected_length = strlen ( expected_string ) ; <nl> + size_t expected_length = std : : strlen ( expected_string ) ; <nl> if ( expected_length ! = new_length ) { <nl> std : : cerr < < " mismatched length ( error ) " < < std : : endl ; <nl> abort ( ) ; <nl> void minify ( ) { <nl> <nl> bool is_correct ( ) { <nl> const char * some_string = " [ 1 , 2 , 3 , 4 ] " ; <nl> - size_t length = strlen ( some_string ) ; <nl> + size_t length = std : : strlen ( some_string ) ; <nl> bool is_ok = simdjson : : validate_utf8 ( some_string , length ) ; <nl> return is_ok ; <nl> } <nl> <nl> bool is_correct_string_view ( ) { <nl> const char * some_string = " [ 1 , 2 , 3 , 4 ] " ; <nl> - size_t length = strlen ( some_string ) ; <nl> + size_t length = std : : strlen ( some_string ) ; <nl> std : : string_view v ( some_string , length ) ; <nl> bool is_ok = simdjson : : validate_utf8 ( v ) ; <nl> return is_ok ; <nl> bool is_correct_string ( ) { <nl> return is_ok ; <nl> } <nl> <nl> + void parse_documentation ( ) { <nl> + const char * json = R " ( { " key " : " value " } ) " ; <nl> + const size_t json_len = std : : strlen ( json ) ; <nl> + simdjson : : dom : : parser parser ; <nl> + simdjson : : dom : : element element = parser . parse ( json , json_len ) ; <nl> + / / Next line is to avoid unused warning . <nl> + ( void ) element ; <nl> + } <nl> + <nl> + <nl> + void parse_documentation_lowlevel ( ) { <nl> + / / Such low - level code is not generally recommended . Please <nl> + / / see parse_documentation ( ) instead . <nl> + / / Motivation : https : / / github . com / simdjson / simdjson / issues / 1175 <nl> + const char * json = R " ( { " key " : " value " } ) " ; <nl> + const size_t json_len = std : : strlen ( json ) ; <nl> + std : : unique_ptr < char [ ] > padded_json_copy { new char [ json_len + SIMDJSON_PADDING ] } ; <nl> + std : : memcpy ( padded_json_copy . get ( ) , json , json_len ) ; <nl> + std : : memset ( padded_json_copy . get ( ) + json_len , ' \ 0 ' , SIMDJSON_PADDING ) ; <nl> + simdjson : : dom : : parser parser ; <nl> + simdjson : : dom : : element element = parser . parse ( padded_json_copy . get ( ) , json_len , false ) ; <nl> + / / Next line is to avoid unused warning . <nl> + ( void ) element ; <nl> + } <nl> + <nl> int main ( ) { <nl> basics_dom_1 ( ) ; <nl> basics_dom_2 ( ) ; <nl> mmm a / tests / stringparsingcheck . cpp <nl> ppp b / tests / stringparsingcheck . cpp <nl> static bool has_extension ( const char * filename , const char * extension ) { <nl> } <nl> <nl> bool starts_with ( const char * pre , const char * str ) { <nl> - size_t lenpre = strlen ( pre ) , lenstr = strlen ( str ) ; <nl> + size_t lenpre = std : : strlen ( pre ) , lenstr = std : : strlen ( str ) ; <nl> return lenstr < lenpre ? false : strncmp ( pre , str , lenpre ) = = 0 ; <nl> } <nl> <nl> bool validate ( const char * dirname ) { <nl> size_t total_strings = 0 ; <nl> probable_bug = false ; <nl> const char * extension = " . json " ; <nl> - size_t dirlen = strlen ( dirname ) ; <nl> + size_t dirlen = std : : strlen ( dirname ) ; <nl> struct dirent * * entry_list ; <nl> int c = scandir ( dirname , & entry_list , 0 , alphasort ) ; <nl> if ( c < 0 ) { <nl> bool validate ( const char * dirname ) { <nl> for ( int i = 0 ; i < c ; i + + ) { <nl> const char * name = entry_list [ i ] - > d_name ; <nl> if ( has_extension ( name , extension ) ) { <nl> - size_t filelen = strlen ( name ) ; <nl> + size_t filelen = std : : strlen ( name ) ; <nl> fullpath = ( char * ) malloc ( dirlen + filelen + 1 + 1 ) ; <nl> strcpy ( fullpath , dirname ) ; <nl> if ( needsep ) { <nl> mmm a / tests / unicode_tests . cpp <nl> ppp b / tests / unicode_tests . cpp <nl> void test ( ) { <nl> " \ x91 \ x85 \ x95 \ x9e " , <nl> " \ x6c \ x02 \ x8e \ x18 " } ; <nl> for ( size_t i = 0 ; i < 8 ; i + + ) { <nl> - size_t len = strlen ( goodsequences [ i ] ) ; <nl> + size_t len = std : : strlen ( goodsequences [ i ] ) ; <nl> if ( ! simdjson : : validate_utf8 ( goodsequences [ i ] , len ) ) { <nl> printf ( " bug goodsequences [ % zu ] \ n " , i ) ; <nl> abort ( ) ; <nl> } <nl> } <nl> for ( size_t i = 0 ; i < 26 ; i + + ) { <nl> - size_t len = strlen ( badsequences [ i ] ) ; <nl> + size_t len = std : : strlen ( badsequences [ i ] ) ; <nl> if ( simdjson : : validate_utf8 ( badsequences [ i ] , len ) ) { <nl> printf ( " bug lookup2 badsequences [ % zu ] \ n " , i ) ; <nl> abort ( ) ; <nl>
Improve documentation on padding
simdjson/simdjson
f410213003b378d6f7edf17a9a44c94e75bc36ba
2020-09-23T07:07:14Z
mmm a / tensorflow / lite / core / api / flatbuffer_conversions . cc <nl> ppp b / tensorflow / lite / core / api / flatbuffer_conversions . cc <nl> TfLiteStatus ParseRsqrt ( const Operator * , ErrorReporter * , BuiltinDataAllocator * , <nl> return kTfLiteOk ; <nl> } <nl> <nl> - / / We have this parse function instead of directly returning kTfLiteOk from the <nl> - / / switch - case in ParseOpData because this function is used as part of the <nl> - / / selective registration for the OpResolver implementation in micro . <nl> - TfLiteStatus ParseShape ( const Operator * , ErrorReporter * error_reporter , <nl> + TfLiteStatus ParseShape ( const Operator * op , ErrorReporter * error_reporter , <nl> BuiltinDataAllocator * allocator , void * * builtin_data ) { <nl> SafeBuiltinDataAllocator safe_allocator ( allocator ) ; <nl> std : : unique_ptr < TfLiteShapeParams , <nl> SafeBuiltinDataAllocator : : BuiltinDataDeleter > <nl> params = safe_allocator . Allocate < TfLiteShapeParams > ( ) ; <nl> TF_LITE_ENSURE ( error_reporter , params ! = nullptr ) ; <nl> + <nl> + const ShapeOptions * schema_params = op - > builtin_options_as_ShapeOptions ( ) ; <nl> + <nl> + if ( schema_params ! = nullptr ) { <nl> + TF_LITE_ENSURE_STATUS ( ConvertTensorType ( schema_params - > out_type ( ) , <nl> + & params - > out_type , error_reporter ) ) ; <nl> + } else { <nl> + / / TODO ( b / 157480169 ) : We should either return kTfLiteError or fill in some <nl> + / / reasonable defaults in the params struct . We are not doing so until we <nl> + / / better undertand the ramifications of changing the legacy behavior . <nl> + } <nl> + <nl> * builtin_data = params . release ( ) ; <nl> return kTfLiteOk ; <nl> } <nl>
fixed flatbuffer_conversions . cc
tensorflow/tensorflow
af5a74df40bfe47a85544ae02d8853f3596a86e4
2020-09-21T17:15:04Z
mmm a / js / common / tests / shell / shell - database . js <nl> ppp b / js / common / tests / shell / shell - database . js <nl> function DatabaseSuite ( ) { <nl> <nl> assertTrue ( internal . db . _createDatabase ( " UnitTestsDatabase0 " , { } , users ) ) ; <nl> <nl> - internal . db . _useDatabase ( " UnitTestsDatabase0 " ) ; <nl> var userManager = require ( " @ arangodb / users " ) ; <nl> var user = userManager . document ( " admin " ) ; <nl> <nl> function DatabaseSuite ( ) { <nl> assertFalse ( user . active ) ; <nl> assertEqual ( " f " , user . extra . gender ) ; <nl> <nl> - internal . db . _useDatabase ( " _system " ) ; <nl> - <nl> assertTrue ( internal . db . _dropDatabase ( " UnitTestsDatabase0 " ) ) ; <nl> } , <nl> <nl> function DatabaseSuite ( ) { <nl> ] ; <nl> assertTrue ( internal . db . _createDatabase ( " UnitTestsDatabase0 " , { } , users ) ) ; <nl> <nl> - internal . db . _useDatabase ( " UnitTestsDatabase0 " ) ; <nl> var userManager = require ( " @ arangodb / users " ) ; <nl> var user = userManager . document ( " admin " ) ; <nl> assertEqual ( " admin " , user . user ) ; <nl> assertTrue ( user . active ) ; <nl> assertEqual ( " m " , user . extra . gender ) ; <nl> <nl> - internal . db . _useDatabase ( " _system " ) ; <nl> - <nl> assertTrue ( internal . db . _dropDatabase ( " UnitTestsDatabase0 " ) ) ; <nl> } , <nl> <nl>
fixed test
arangodb/arangodb
ab5234ec9f4fac291257581cecb4aaea68b0db4a
2016-06-03T13:28:35Z
mmm a / hphp / runtime / vm / jit / translator - x64 . cpp <nl> ppp b / hphp / runtime / vm / jit / translator - x64 . cpp <nl> TranslatorX64 : : enterTC ( TCA start , void * data ) { <nl> [ ] ( vixl : : Simulator * s ) { <nl> if ( tl_regState = = VMRegState : : DIRTY ) { <nl> / / This is a pseudo - copy of the logic in sync_regstate . <nl> - <nl> - ActRec fakeAr ; <nl> - fakeAr . m_savedRbp = s - > xreg ( JIT : : ARM : : rVmFp . code ( ) ) ; <nl> - fakeAr . m_savedRip = reinterpret_cast < uint64_t > ( s - > pc ( ) ) ; <nl> - <nl> - tx64 - > fixupMap ( ) . fixupWork ( g_vmContext , & fakeAr ) ; <nl> + tx64 - > fixupMap ( ) . fixupWorkSimulated ( g_vmContext ) ; <nl> tl_regState = VMRegState : : CLEAN ; <nl> } <nl> } <nl> mmm a / hphp / test / run <nl> ppp b / hphp / test / run <nl> function get_options ( $ argv ) { <nl> ' args : ' = > ' a : ' , <nl> ' log ' = > ' l ' , <nl> ' failure - file : ' = > ' ' , <nl> + ' arm ' = > ' ' , <nl> ) ; <nl> $ options = array ( ) ; <nl> $ files = array ( ) ; <nl> function hhvm_cmd ( $ options , $ test , $ test_run = null ) { <nl> mode_cmd ( $ options ) , <nl> ' - vEval . EnableArgsInBacktraces = true ' , <nl> read_file ( find_test_ext ( $ test , ' opts ' ) ) , <nl> + isset ( $ options [ ' arm ' ] ) ? ' - vEval . SimulateARM = 1 ' : ' ' , <nl> extra_args ( $ options ) , <nl> ' - vResourceLimit . CoreFileSize = 0 ' , <nl> ' - - file ' , <nl>
Fix ARM exception crash , add - - arm option to test / run
facebook/hhvm
c7d175ec1de074e94f13901252b2c200b1322e38
2013-11-27T02:23:36Z
mmm a / test / Profiler / coverage_relative_path . swift <nl> ppp b / test / Profiler / coverage_relative_path . swift <nl> <nl> / / clean directory , put the source there , and cd into it . <nl> / / RUN : rm - rf % t <nl> / / RUN : mkdir - p % t / foo / bar / baz <nl> - / / RUN : cp % s % t / foo / bar / baz / coverage_relative_path . swift <nl> + / / RUN : echo " func coverage ( ) { } " > % t / foo / bar / baz / coverage_relative_path . swift <nl> / / RUN : cd % t / foo / bar <nl> <nl> - / / RUN : % target - swift - frontend - profile - generate - profile - coverage - mapping - Xllvm - enable - name - compression = false - emit - ir baz / coverage_relative_path . swift | % FileCheck - check - prefix = ABSOLUTE % s <nl> + / / RUN : % target - swift - frontend - profile - generate - profile - coverage - mapping - Xllvm - enable - name - compression = false - emit - ir % / t / foo / bar / baz / coverage_relative_path . swift | % FileCheck - check - prefix = ABSOLUTE % s <nl> / / <nl> / / ABSOLUTE : @ __llvm_coverage_mapping = { { . * " \ \ 01 . * foo . * bar . * baz . * coverage_relative_path \ . swift } } <nl> <nl> - / / RUN : % target - swift - frontend - profile - generate - profile - coverage - mapping - Xllvm - enable - name - compression = false - coverage - prefix - map $ PWD = . - emit - ir baz / coverage_relative_path . swift | % FileCheck - check - prefix = RELATIVE % s <nl> + / / RUN : % target - swift - frontend - profile - generate - profile - coverage - mapping - Xllvm - enable - name - compression = false - coverage - prefix - map % / t / foo / bar = . - emit - ir % / t / foo / bar / baz / coverage_relative_path . swift | % FileCheck - check - prefix = RELATIVE % s <nl> / / <nl> / / RELATIVE : @ __llvm_coverage_mapping = { { . * " \ \ 01 [ ^ / ] * } } . { { / | \ \ } } baz { { . * coverage_relative_path \ . swift } } <nl> <nl>
Merge remote - tracking branch ' origin / master ' into master - next
apple/swift
09ba761f7fe6078bebed86918e930d06e61b2251
2020-06-17T21:18:50Z
mmm a / extra_fonts / README . txt <nl> ppp b / extra_fonts / README . txt <nl> <nl> <nl> Typefaces for source code beautification <nl> https : / / github . com / chrissimpkins / codeface <nl> + <nl> + Programmation fonts <nl> + http : / / s9w . github . io / font_compare / <nl> <nl> Proggy Programming Fonts <nl> http : / / upperbounds . net <nl>
Links
ocornut/imgui
4630815fb068a800cb46989ecb1a221a8dea9526
2015-08-11T20:45:42Z
mmm a / src / runtime / vm / translator / asm - x64 . h <nl> ppp b / src / runtime / vm / translator / asm - x64 . h <nl> struct X64Assembler { <nl> ALWAYS_INLINE { <nl> ASSERT ( ( op . flags & IF_JCC ) = = 0 ) ; <nl> ssize_t delta = imm - ( ( ssize_t ) code . frontier + 2 ) ; <nl> - ASSERT ( imm = = 0 | | deltaFits ( delta , sz : : byte ) ) ; <nl> / / Emit opcode and 8 - bit immediate <nl> byte ( 0xEB ) ; <nl> - byte ( delta ) ; <nl> + byte ( safe_cast < int8_t > ( delta ) ) ; <nl> } <nl> <nl> void emitCJ8 ( X64Instr op , int jcond , ssize_t imm ) <nl> struct X64Assembler { <nl> / / this is for jcc only <nl> ASSERT ( op . flags & IF_JCC ) ; <nl> ssize_t delta = imm - ( ( ssize_t ) code . frontier + 2 ) ; <nl> - ASSERT ( imm = = 0 | | deltaFits ( delta , sz : : byte ) ) ; <nl> / / Emit opcode <nl> byte ( jcond | 0x70 ) ; <nl> / / Emit 8 - bit offset <nl> - byte ( delta ) ; <nl> + byte ( safe_cast < int8_t > ( delta ) ) ; <nl> } <nl> <nl> void emitJ32 ( X64Instr op , ssize_t imm ) ALWAYS_INLINE { <nl> / / call and jmp are supported , jcc is not supported <nl> ASSERT ( ( op . flags & IF_JCC ) = = 0 ) ; <nl> - ssize_t delta = imm - ( ( ssize_t ) code . frontier + 5 ) ; <nl> - ASSERT ( imm = = 0 | | deltaFits ( delta , sz : : dword ) ) ; <nl> + int32_t delta = safe_cast < int32_t > ( imm - ( ( ssize_t ) code . frontier + 5 ) ) ; <nl> uint8_t * bdelta = ( uint8_t * ) & delta ; <nl> uint8_t instr [ ] = { op . table [ 2 ] , <nl> bdelta [ 0 ] , bdelta [ 1 ] , bdelta [ 2 ] , bdelta [ 3 ] } ; <nl> struct X64Assembler { <nl> ALWAYS_INLINE { <nl> / / jcc is supported , call and jmp are not supported <nl> ASSERT ( op . flags & IF_JCC ) ; <nl> - ssize_t delta = imm - ( ( ssize_t ) code . frontier + 6 ) ; <nl> - ASSERT ( imm = = 0 | | deltaFits ( delta , sz : : dword ) ) ; <nl> + int32_t delta = safe_cast < int32_t > ( imm - ( ( ssize_t ) code . frontier + 6 ) ) ; <nl> uint8_t * bdelta = ( uint8_t * ) & delta ; <nl> uint8_t instr [ 6 ] = { 0x0f , uint8_t ( 0x80 | jcond ) , <nl> bdelta [ 0 ] , bdelta [ 1 ] , bdelta [ 2 ] , bdelta [ 3 ] } ; <nl> struct X64Assembler { <nl> inline void patchJcc ( CodeAddress jmp , CodeAddress dest ) { <nl> ASSERT ( jmp [ 0 ] = = 0x0F & & ( jmp [ 1 ] & 0xF0 ) = = 0x80 ) ; <nl> ssize_t diff = dest - ( jmp + 6 ) ; <nl> - ASSERT ( deltaFits ( diff , sz : : dword ) ) ; <nl> - * ( int32_t * ) ( jmp + 2 ) = ( int32_t ) diff ; <nl> + * ( int32_t * ) ( jmp + 2 ) = safe_cast < int32_t > ( diff ) ; <nl> } <nl> <nl> inline void patchJcc8 ( CodeAddress jmp , CodeAddress dest ) { <nl> ASSERT ( ( jmp [ 0 ] & 0xF0 ) = = 0x70 ) ; <nl> ssize_t diff = dest - ( jmp + 2 ) ; / / one for opcode , one for offset <nl> - ASSERT ( deltaFits ( diff , sz : : byte ) ) ; <nl> - * ( int8_t * ) ( jmp + 1 ) = ( int8_t ) diff ; <nl> + * ( int8_t * ) ( jmp + 1 ) = safe_cast < int8_t > ( diff ) ; <nl> } <nl> <nl> inline void patchJmp ( CodeAddress jmp , CodeAddress dest ) { <nl> ASSERT ( jmp [ 0 ] = = 0xE9 ) ; <nl> ssize_t diff = dest - ( jmp + 5 ) ; <nl> - ASSERT ( deltaFits ( diff , sz : : dword ) ) ; <nl> - * ( int32_t * ) ( jmp + 1 ) = ( int32_t ) diff ; <nl> + * ( int32_t * ) ( jmp + 1 ) = safe_cast < int32_t > ( diff ) ; <nl> } <nl> <nl> inline void patchJmp8 ( CodeAddress jmp , CodeAddress dest ) { <nl> ASSERT ( jmp [ 0 ] = = 0xEB ) ; <nl> ssize_t diff = dest - ( jmp + 2 ) ; / / one for opcode , one for offset <nl> - ASSERT ( deltaFits ( diff , sz : : byte ) ) ; <nl> - * ( int8_t * ) ( jmp + 1 ) = ( int8_t ) diff ; <nl> + * ( int8_t * ) ( jmp + 1 ) = safe_cast < int8_t > ( diff ) ; <nl> } <nl> <nl> / * <nl> struct X64Assembler { <nl> } \ <nl> / * op imm32 , rdest * / \ <nl> inline void name # # _imm32_reg64 ( int64_t imm , register_name_t rdest ) { \ <nl> - ASSERT ( deltaFits ( imm , sz : : dword ) ) ; \ <nl> - emitIR ( instr_ # # name , rdest , imm ) ; \ <nl> + emitIR ( instr_ # # name , rdest , safe_cast < int32_t > ( imm ) ) ; \ <nl> } \ <nl> / * op imm32 , edest * / \ <nl> inline void name # # _imm32_reg32 ( int64_t imm , register_name_t rdest ) { \ <nl> - ASSERT ( deltaFits ( imm , sz : : dword ) ) ; \ <nl> - emitIR32 ( instr_ # # name , rdest , imm ) ; \ <nl> + emitIR32 ( instr_ # # name , rdest , safe_cast < int32_t > ( imm ) ) ; \ <nl> } \ <nl> / * opl imm , disp ( rdest ) * / \ <nl> inline void name # # _imm32_disp_reg32 ( int64_t imm , int disp , \ <nl> register_name_t rdest ) { \ <nl> - ASSERT ( deltaFits ( imm , sz : : dword ) ) ; \ <nl> emitIM32 ( instr_ # # name , rdest , reg : : noreg , \ <nl> - sz : : byte , disp , imm ) ; \ <nl> + sz : : byte , disp , safe_cast < int32_t > ( imm ) ) ; \ <nl> } \ <nl> / * opq imm , disp ( rdest ) * / \ <nl> inline void name # # _imm64_disp_reg64 ( int64_t imm , int disp , \ <nl> mmm a / src / runtime / vm / translator / translator - x64 . cpp <nl> ppp b / src / runtime / vm / translator / translator - x64 . cpp <nl> struct JccBlock { <nl> : m_a ( & a ) , <nl> m_jcc8 ( a . code . frontier ) , <nl> m_dg ( new DiamondGuard ( a ) ) { <nl> - a . jcc8 ( Jcc , 0 ) ; <nl> + a . jcc8 ( Jcc , m_a - > code . frontier ) ; <nl> } <nl> <nl> ~ JccBlock ( ) { <nl> class IfElseBlock : boost : : noncopyable { <nl> explicit IfElseBlock ( X64Assembler & a ) : <nl> m_a ( a ) , m_jcc8 ( a . code . frontier ) , m_jmp8 ( NULL ) { <nl> tx64 - > m_regMap . freeze ( ) ; <nl> - m_a . jcc8 ( Jcc , m_jmp8 ) ; / / 1f <nl> + m_a . jcc8 ( Jcc , m_a . code . frontier ) ; / / 1f <nl> } <nl> void Else ( ) { <nl> ASSERT ( m_jmp8 = = NULL ) ; <nl> TranslatorX64 : : translateIterInit ( const Tracelet & t , <nl> / / returns 1 <nl> a . test_reg64_reg64 ( rax , rax ) ; <nl> TCA toPatch = a . code . frontier ; <nl> - a . jz ( 0 ) ; / / 1f <nl> + a . jz ( a . code . frontier ) ; / / 1f <nl> emitBindJmp ( notTaken ) ; <nl> / / 1 : <nl> a . patchJcc ( toPatch , a . code . frontier ) ; <nl> mmm a / src / util / base . h <nl> ppp b / src / util / base . h <nl> <nl> # include < boost / tuple / tuple . hpp > <nl> # include < boost / filesystem / operations . hpp > <nl> # include < boost / type_traits . hpp > <nl> + # include < boost / numeric / conversion / cast . hpp > <nl> <nl> # include " util / hash . h " <nl> # include " util / assert . h " <nl> struct file_closer { <nl> # endif <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + / * <nl> + * DEBUG - only wrapper around boost : : numeric_cast that converts any <nl> + * thrown exceptions to a failed assertion . <nl> + * / <nl> + template < typename To , typename From > <nl> + To safe_cast ( From val ) { <nl> + if ( debug ) { <nl> + try { <nl> + return boost : : numeric_cast < To > ( val ) ; <nl> + } catch ( std : : bad_cast & bce ) { <nl> + std : : cerr < < " conversion of " < < val < < " failed in " <nl> + < < __PRETTY_FUNCTION__ < < " : " <nl> + < < bce . what ( ) < < std : : endl ; <nl> + not_reached ( ) ; <nl> + } <nl> + } else { <nl> + return static_cast < To > ( val ) ; <nl> + } <nl> + } <nl> } <nl> <nl> namespace boost { <nl>
Add HPHP : : safe_cast and use it in the assembler
facebook/hhvm
93f7a60128e3642b3ab386cc15cfd376422bde26
2012-09-13T21:12:42Z
mmm a / osquery / tables / CMakeLists . txt <nl> ppp b / osquery / tables / CMakeLists . txt <nl> else ( ) <nl> ADD_OSQUERY_LINK ( FALSE " cryptsetup " ) <nl> ADD_OSQUERY_LINK ( FALSE " udev " ) <nl> ADD_OSQUERY_LINK ( FALSE " uuid " ) <nl> + ADD_OSQUERY_LINK ( FALSE " ip4tc " ) <nl> endif ( ) <nl> <nl> file ( GLOB OSQUERY_CROSS_TABLES " [ ! ue ] * / * . cpp " ) <nl> new file mode 100644 <nl> index 0000000000 . . 64b0faae46 <nl> mmm / dev / null <nl> ppp b / osquery / tables / networking / linux / iptables . cpp <nl> <nl> + / * <nl> + * Copyright ( c ) 2014 , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the BSD - style license found in the <nl> + * LICENSE file in the root directory of this source tree . An additional grant <nl> + * of patent rights can be found in the PATENTS file in the same directory . <nl> + * <nl> + * / <nl> + <nl> + # include < sstream > <nl> + <nl> + # include < arpa / inet . h > <nl> + # include < libiptc / libiptc . h > <nl> + <nl> + # include < boost / algorithm / string / split . hpp > <nl> + # include < boost / algorithm / string / trim . hpp > <nl> + <nl> + # include < osquery / tables . h > <nl> + # include < osquery / filesystem . h > <nl> + # include < osquery / logger . h > <nl> + <nl> + namespace osquery { <nl> + namespace tables { <nl> + <nl> + const std : : string kLinuxIpTablesNames = " / proc / net / ip_tables_names " ; <nl> + const char MAP [ ] = { ' 0 ' , ' 1 ' , ' 2 ' , ' 3 ' , ' 4 ' , ' 5 ' , ' 6 ' , ' 7 ' , ' 8 ' , ' 9 ' , ' A ' , ' B ' , ' C ' , ' D ' , ' E ' , ' F ' } ; <nl> + const int HIGH_BITS = 4 ; <nl> + const int LOW_BITS = 15 ; <nl> + <nl> + void parseIpEntry ( ipt_ip * ip , Row & r ) { <nl> + r [ " protocol " ] = INTEGER ( ip - > proto ) ; <nl> + if ( strlen ( ip - > iniface ) ) { <nl> + r [ " iniface " ] = TEXT ( ip - > iniface ) ; <nl> + } else { <nl> + r [ " iniface " ] = " all " ; <nl> + } <nl> + if ( strlen ( ip - > outiface ) ) { <nl> + r [ " outiface " ] = TEXT ( ip - > outiface ) ; <nl> + } else { <nl> + r [ " outiface " ] = " all " ; <nl> + } <nl> + char src_ip_string [ INET6_ADDRSTRLEN ] = { 0 } ; <nl> + if ( inet_ntop ( AF_INET , ( struct in_addr * ) & ip - > src , src_ip_string , INET6_ADDRSTRLEN ) ! = NULL ) { <nl> + r [ " src_ip " ] = TEXT ( src_ip_string ) ; <nl> + } <nl> + char dst_ip_string [ INET6_ADDRSTRLEN ] = { 0 } ; <nl> + if ( inet_ntop ( AF_INET , ( struct in_addr * ) & ip - > dst , dst_ip_string , INET6_ADDRSTRLEN ) ! = NULL ) { <nl> + r [ " dst_ip " ] = TEXT ( dst_ip_string ) ; <nl> + } <nl> + char src_ip_mask [ INET6_ADDRSTRLEN ] = { 0 } ; <nl> + if ( inet_ntop ( AF_INET , ( struct in_addr * ) & ip - > smsk , src_ip_mask , INET6_ADDRSTRLEN ) ! = NULL ) { <nl> + r [ " src_mask " ] = TEXT ( src_ip_mask ) ; <nl> + } <nl> + char dst_ip_mask [ INET6_ADDRSTRLEN ] = { 0 } ; <nl> + if ( inet_ntop ( AF_INET , ( struct in_addr * ) & ip - > dmsk , dst_ip_mask , INET6_ADDRSTRLEN ) ! = NULL ) { <nl> + r [ " dst_mask " ] = TEXT ( dst_ip_mask ) ; <nl> + } <nl> + <nl> + char aux_char [ 2 ] ; <nl> + std : : string iniface_mask = " " ; <nl> + for ( int i = 0 ; ip - > iniface_mask [ i ] ! = 0x00 & & i < IFNAMSIZ ; i + + ) { <nl> + aux_char [ 0 ] = MAP [ ( int ) ip - > iniface_mask [ i ] > > HIGH_BITS ] ; <nl> + aux_char [ 1 ] = MAP [ ( int ) ip - > iniface_mask [ i ] & LOW_BITS ] ; <nl> + iniface_mask + = aux_char [ 0 ] ; <nl> + iniface_mask + = aux_char [ 1 ] ; <nl> + } <nl> + <nl> + r [ " iniface_mask " ] = TEXT ( iniface_mask ) ; <nl> + std : : string outiface_mask = " " ; <nl> + for ( int i = 0 ; ip - > outiface_mask [ i ] ! = 0x00 & & i < IFNAMSIZ ; i + + ) { <nl> + aux_char [ 0 ] = MAP [ ( int ) ip - > outiface_mask [ i ] > > HIGH_BITS ] ; <nl> + aux_char [ 1 ] = MAP [ ( int ) ip - > outiface_mask [ i ] & LOW_BITS ] ; <nl> + outiface_mask + = aux_char [ 0 ] ; <nl> + outiface_mask + = aux_char [ 1 ] ; <nl> + } <nl> + r [ " outiface_mask " ] = TEXT ( outiface_mask ) ; <nl> + } <nl> + <nl> + QueryData getIptablesRules ( const std : : string & content ) { <nl> + QueryData results ; <nl> + <nl> + for ( auto & line : split ( content , " \ n " ) ) { <nl> + if ( line . size ( ) = = 0 ) { <nl> + continue ; <nl> + } <nl> + <nl> + / / Inline trim each line . <nl> + boost : : trim ( line ) ; <nl> + <nl> + Row r ; <nl> + <nl> + r [ " filter_name " ] = TEXT ( line ) ; <nl> + <nl> + / / Initialize the access to iptc <nl> + auto handle = ( struct iptc_handle * ) iptc_init ( line . c_str ( ) ) ; <nl> + <nl> + if ( handle ) { <nl> + / / Iterate through chains <nl> + for ( auto chain = iptc_first_chain ( ( struct iptc_handle * ) handle ) ; chain ; chain = iptc_next_chain ( ( struct iptc_handle * ) handle ) ) { <nl> + r [ " chain " ] = TEXT ( chain ) ; <nl> + <nl> + struct ipt_counters counters ; <nl> + const char * policy ; <nl> + <nl> + if ( ( policy = iptc_get_policy ( chain , & counters , ( struct iptc_handle * ) handle ) ) ) { <nl> + r [ " policy " ] = TEXT ( policy ) ; <nl> + r [ " packets " ] = INTEGER ( counters . pcnt ) ; <nl> + r [ " bytes " ] = INTEGER ( counters . bcnt ) ; <nl> + } <nl> + <nl> + struct ipt_entry * prev_rule ; <nl> + <nl> + / / Iterating through all the rules per chain <nl> + for ( auto chain_rule = iptc_first_rule ( chain , ( struct iptc_handle * ) handle ) ; chain_rule ; chain_rule = iptc_next_rule ( prev_rule , ( struct iptc_handle * ) handle ) ) { <nl> + prev_rule = ( struct ipt_entry * ) chain_rule ; <nl> + <nl> + auto target = iptc_get_target ( chain_rule , ( struct iptc_handle * ) handle ) ; <nl> + if ( target ) { <nl> + r [ " target " ] = TEXT ( target ) ; <nl> + } <nl> + <nl> + if ( chain_rule - > target_offset ) { <nl> + r [ " match " ] = " yes " ; <nl> + } else { <nl> + r [ " match " ] = " no " ; <nl> + } <nl> + <nl> + struct ipt_ip * ip = ( struct ipt_ip * ) & chain_rule - > ip ; <nl> + parseIpEntry ( ip , r ) ; <nl> + <nl> + results . push_back ( r ) ; <nl> + } / / Rule iteration <nl> + results . push_back ( r ) ; <nl> + } / / Chain iteration <nl> + <nl> + iptc_free ( ( struct iptc_handle * ) handle ) ; <nl> + <nl> + } <nl> + } / / Filter table iteration <nl> + <nl> + return results ; <nl> + } <nl> + <nl> + QueryData genIptables ( QueryContext & context ) { <nl> + std : : string content ; <nl> + QueryData results ; <nl> + <nl> + auto s = osquery : : readFile ( kLinuxIpTablesNames , content ) ; <nl> + <nl> + if ( s . ok ( ) ) { <nl> + return getIptablesRules ( content ) ; <nl> + } else { <nl> + LOG ( ERROR ) < < " Error reading " < < kLinuxIpTablesNames < < " : " < < s . toString ( ) ; <nl> + return { } ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000 . . 3d5eb07b2c <nl> mmm / dev / null <nl> ppp b / osquery / tables / networking / linux / tests / iptables_tests . cpp <nl> <nl> + / * <nl> + * Copyright ( c ) 2014 , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the BSD - style license found in the <nl> + * LICENSE file in the root directory of this source tree . An additional grant <nl> + * of patent rights can be found in the PATENTS file in the same directory . <nl> + * <nl> + * / <nl> + <nl> + # include < gtest / gtest . h > <nl> + <nl> + # include < osquery / logger . h > <nl> + # include < osquery / database . h > <nl> + <nl> + # include < libiptc / libiptc . h > <nl> + # include < arpa / inet . h > <nl> + <nl> + # include " osquery / core / test_util . h " <nl> + <nl> + namespace osquery { <nl> + namespace tables { <nl> + <nl> + void parseIpEntry ( ipt_ip * ip , Row & row ) ; <nl> + <nl> + ipt_ip * getIpEntryContent ( ) { <nl> + static ipt_ip ip_entry ; <nl> + <nl> + ip_entry . proto = 6 ; <nl> + memset ( ip_entry . iniface , 0 , IFNAMSIZ ) ; <nl> + strcpy ( ip_entry . outiface , " eth0 " ) ; <nl> + inet_aton ( " 123 . 123 . 123 . 123 " , & ip_entry . src ) ; <nl> + inet_aton ( " 45 . 45 . 45 . 45 " , & ip_entry . dst ) ; <nl> + inet_aton ( " 250 . 251 . 252 . 253 " , & ip_entry . smsk ) ; <nl> + inet_aton ( " 253 . 252 . 251 . 250 " , & ip_entry . dmsk ) ; <nl> + memset ( ip_entry . iniface_mask , 0xfe , IFNAMSIZ ) ; <nl> + memset ( ip_entry . outiface_mask , 0xfa , IFNAMSIZ ) ; <nl> + <nl> + return & ip_entry ; <nl> + } <nl> + <nl> + Row getIpEntryExpectedResults ( ) { <nl> + Row row ; <nl> + <nl> + row [ " protocol " ] = " 6 " ; <nl> + row [ " iniface " ] = " all " ; <nl> + row [ " outiface " ] = " eth0 " ; <nl> + row [ " src_ip " ] = " 123 . 123 . 123 . 123 " ; <nl> + row [ " dst_ip " ] = " 45 . 45 . 45 . 45 " ; <nl> + row [ " src_mask " ] = " 250 . 251 . 252 . 253 " ; <nl> + row [ " dst_mask " ] = " 253 . 252 . 251 . 250 " ; <nl> + row [ " iniface_mask " ] = " FEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFE " ; <nl> + row [ " outiface_mask " ] = " FAFAFAFAFAFAFAFAFAFAFAFAFAFAFAFA " ; <nl> + <nl> + return row ; <nl> + } <nl> + <nl> + class IptablesTests : public testing : : Test { } ; <nl> + <nl> + TEST_F ( IptablesTests , test_iptables_ip_entry ) { <nl> + Row row ; <nl> + parseIpEntry ( getIpEntryContent ( ) , row ) ; <nl> + EXPECT_EQ ( row , getIpEntryExpectedResults ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000 . . f88216a37e <nl> mmm / dev / null <nl> ppp b / osquery / tables / specs / linux / iptables . table <nl> <nl> + table_name ( " iptables " ) <nl> + description ( " Linux IP packet filtering and NAT tool . " ) <nl> + schema ( [ <nl> + Column ( " filter_name " , TEXT , " Packet matching filter table name . " ) , <nl> + Column ( " chain " , TEXT , " Size of module content . " ) , <nl> + Column ( " policy " , TEXT , " Policy that applies for this rule . " ) , <nl> + Column ( " target " , TEXT , " Target that applies for this rule . " ) , <nl> + Column ( " protocol " , INTEGER , " Protocol number identification . " ) , <nl> + Column ( " src_ip " , TEXT , " Source IP address . " ) , <nl> + Column ( " src_mask " , TEXT , " Source IP address mask . " ) , <nl> + Column ( " iniface " , TEXT , " Input interface for the rule . " ) , <nl> + Column ( " iniface_mask " , TEXT , " Input interface mask for the rule . " ) , <nl> + Column ( " dst_ip " , TEXT , " Destination IP address . " ) , <nl> + Column ( " dst_mask " , TEXT , " Destination IP address mask . " ) , <nl> + Column ( " outiface " , TEXT , " Output interface for the rule . " ) , <nl> + Column ( " outiface_mask " , TEXT , " Output interface mask for the rule . " ) , <nl> + Column ( " match " , TEXT , " Matching rule that applies . " ) , <nl> + Column ( " packets " , INTEGER , " Number of matching packets for this rule . " ) , <nl> + Column ( " bytes " , INTEGER , " Number of matching bytes for this rule . " ) , <nl> + ] ) <nl> + implementation ( " iptables @ genIptables " ) <nl> mmm a / tools / provision / centos . sh <nl> ppp b / tools / provision / centos . sh <nl> function main_centos ( ) { <nl> package rpm - devel <nl> package rpm - build <nl> package libblkid - devel <nl> + package iptables <nl> + package iptables - devel <nl> + patch_iptables_headers <nl> <nl> install_cmake <nl> <nl> mmm a / tools / provision / lib . sh <nl> ppp b / tools / provision / lib . sh <nl> function install_boost ( ) { <nl> fi <nl> } <nl> <nl> + function patch_iptables_headers ( ) { <nl> + IPV4FILE = " / usr / include / linux / netfilter_ipv4 / ip_tables . h " <nl> + IPV6FILE = " / usr / include / linux / netfilter_ipv6 / ip6_tables . h " <nl> + CODE_TO_PATCH = " return ( void \ * ) e + e - > target_offset ; " <nl> + if [ [ - f " $ IPV4FILE " ] ] ; then <nl> + if [ [ - n ` grep " $ CODE_TO_PATCH " " $ IPV4FILE " ` ] ] ; then <nl> + log " IPv4 code to patch found , backing up first " <nl> + sudo cp " $ IPV4FILE " " $ IPV4FILE . osquery " <nl> + PATCH = " return ( struct ipt_entry_target * ) ( ( char * ) e + e - > target_offset ) ; " <nl> + cat " $ IPV4FILE " | sudo bash - c " sed \ " s / $ CODE_TO_PATCH / $ PATCH / g \ " > \ " $ IPV4FILE \ " " <nl> + log " IPv4 headers patched succesfully " <nl> + else <nl> + log " IPv4 code to patch not found , skipping . " <nl> + fi <nl> + else <nl> + log " IPv4 iptables headers not found , skipping . " <nl> + fi <nl> + if [ [ - f " $ IPV6FILE " ] ] ; then <nl> + if [ [ - n ` grep " $ CODE_TO_PATCH " " $ IPV6FILE " ` ] ] ; then <nl> + log " IPv6 code to patch found , backing up first " <nl> + sudo cp " $ IPV6FILE " " $ IPV6FILE . osquery " <nl> + PATCH = " return ( struct ip6t_entry_target * ) ( ( char * ) e + e - > target_offset ) ; " <nl> + cat " $ IPV6FILE " | sudo bash - c " sed \ " s / $ CODE_TO_PATCH / $ PATCH / g \ " > \ " $ IPV6FILE \ " " <nl> + log " IPv6 headers patched succesfully " <nl> + else <nl> + log " IPv6 code to patch not found , skipping . " <nl> + fi <nl> + else <nl> + log " IPv6 iptables headers not found , skipping . " <nl> + fi <nl> + } <nl> + <nl> function install_gflags ( ) { <nl> if [ [ ! - f / usr / local / lib / libgflags . a ] ] ; then <nl> if [ [ ! - f v2 . 1 . 1 . tar . gz ] ] ; then <nl> mmm a / tools / provision / rhel . sh <nl> ppp b / tools / provision / rhel . sh <nl> function main_rhel ( ) { <nl> package xz <nl> package xz - devel <nl> package subscription - manager <nl> + package iptables <nl> + package iptables - devel <nl> + patch_iptables_headers <nl> <nl> if [ [ - z ` rpm - qa epel - release ` ] ] ; then <nl> if [ [ $ DISTRO = " rhel6 " ] ] ; then <nl> mmm a / tools / provision / ubuntu . sh <nl> ppp b / tools / provision / ubuntu . sh <nl> function main_ubuntu ( ) { <nl> package libbz2 - dev <nl> package devscripts <nl> package debhelper <nl> + package iptables <nl> + package iptables - dev <nl> + patch_iptables_headers <nl> <nl> if [ [ $ DISTRO = " precise " ] ] ; then <nl> package clang - 3 . 4 <nl>
Adding new table to display iptables filters , chains and rules
osquery/osquery
4f21090fb8a71b8e8e400fcf7b9125e2e35ef9ec
2015-05-09T02:11:49Z
mmm a / editor / editor_settings . cpp <nl> ppp b / editor / editor_settings . cpp <nl> void EditorSettings : : _load_defaults ( Ref < ConfigFile > p_extra_config ) { <nl> hints [ " interface / editor / code_font " ] = PropertyInfo ( Variant : : STRING , " interface / editor / code_font " , PROPERTY_HINT_GLOBAL_FILE , " * . ttf , * . otf " , PROPERTY_USAGE_DEFAULT ) ; <nl> _initial_set ( " interface / editor / dim_editor_on_dialog_popup " , true ) ; <nl> _initial_set ( " interface / editor / low_processor_mode_sleep_usec " , 6900 ) ; / / ~ 144 FPS <nl> - hints [ " interface / editor / low_processor_mode_sleep_usec " ] = PropertyInfo ( Variant : : REAL , " interface / editor / low_processor_mode_sleep_usec " , PROPERTY_HINT_RANGE , " 1 , 100000 , 1 " , PROPERTY_USAGE_DEFAULT ) ; <nl> + hints [ " interface / editor / low_processor_mode_sleep_usec " ] = PropertyInfo ( Variant : : REAL , " interface / editor / low_processor_mode_sleep_usec " , PROPERTY_HINT_RANGE , " 1 , 100000 , 1 " , PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED ) ; <nl> _initial_set ( " interface / editor / unfocused_low_processor_mode_sleep_usec " , 50000 ) ; / / 20 FPS <nl> - hints [ " interface / editor / unfocused_low_processor_mode_sleep_usec " ] = PropertyInfo ( Variant : : REAL , " interface / editor / unfocused_low_processor_mode_sleep_usec " , PROPERTY_HINT_RANGE , " 1 , 100000 , 1 " , PROPERTY_USAGE_DEFAULT ) ; <nl> + hints [ " interface / editor / unfocused_low_processor_mode_sleep_usec " ] = PropertyInfo ( Variant : : REAL , " interface / editor / unfocused_low_processor_mode_sleep_usec " , PROPERTY_HINT_RANGE , " 1 , 100000 , 1 " , PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED ) ; <nl> _initial_set ( " interface / editor / separate_distraction_mode " , false ) ; <nl> _initial_set ( " interface / editor / automatically_open_screenshots " , true ) ; <nl> _initial_set ( " interface / editor / hide_console_window " , false ) ; <nl>
Merge pull request from Calinou / low - processor - mode - editor - restart
godotengine/godot
9ab02761d7ee793f194b833c3d337de08a0f9f4f
2019-08-17T09:23:27Z
mmm a / Telegram / Resources / lang . txt <nl> ppp b / Telegram / Resources / lang . txt <nl> lng_maintitle : " Telegram D " ; <nl> lng_menu_contacts : " Contacts " ; <nl> lng_menu_settings : " Settings " ; <nl> lng_menu_about : " About " ; <nl> + lng_menu_update : " Update " ; <nl> <nl> lng_open_from_tray : " Open Telegram " ; <nl> lng_minimize_to_tray : " Minimize to tray " ; <nl> mmm a / Telegram / Resources / style . txt <nl> ppp b / Telegram / Resources / style . txt <nl> sysUpd : sysButton { <nl> overColor : white ; <nl> duration : 150 ; <nl> } <nl> + updateBlinkDuration : 500 ; <nl> sysMin : sysButton ( sysUpd ) { <nl> img : sprite ( 207px , 1px , 19px , 19px ) ; <nl> } <nl> mediaviewLoaderSkip : 9px ; <nl> <nl> minPhotoWidth : 90px ; <nl> minPhotoHeight : 90px ; <nl> + maxMediaSize : 400px ; <nl> <nl> usernameFont : font ( 14px ) ; <nl> usernameColor : # 777 ; <nl> mmm a / Telegram / SourceFiles / app . cpp <nl> ppp b / Telegram / SourceFiles / app . cpp <nl> namespace App { <nl> return ( peer_id & 0x100000000L ) ? int32 ( peer_id & 0xFFFFFFFFL ) : 0 ; <nl> } <nl> <nl> + int32 onlineForSort ( int32 online , int32 now ) { <nl> + if ( online < = 0 ) { <nl> + switch ( online ) { <nl> + case - 2 : { <nl> + QDate yesterday ( date ( now ) . date ( ) ) ; <nl> + yesterday . addDays ( - 1 ) ; <nl> + return int32 ( QDateTime ( yesterday ) . toTime_t ( ) ) ; <nl> + } break ; <nl> + <nl> + case - 3 : { <nl> + QDate weekago ( date ( now ) . date ( ) ) ; <nl> + weekago . addDays ( - 7 ) ; <nl> + return int32 ( QDateTime ( weekago ) . toTime_t ( ) ) ; <nl> + } break ; <nl> + <nl> + case - 4 : { <nl> + QDate monthago ( date ( now ) . date ( ) ) ; <nl> + monthago . addDays ( - 30 ) ; <nl> + return int32 ( QDateTime ( monthago ) . toTime_t ( ) ) ; <nl> + } break ; <nl> + } <nl> + } <nl> + return online ; <nl> + } <nl> + <nl> int32 onlineWillChangeIn ( int32 online , int32 now ) { <nl> if ( online < = 0 ) return 86400 ; <nl> if ( online > now ) { <nl> mmm a / Telegram / SourceFiles / app . h <nl> ppp b / Telegram / SourceFiles / app . h <nl> namespace App { <nl> int32 userFromPeer ( const PeerId & peer_id ) ; <nl> int32 chatFromPeer ( const PeerId & peer_id ) ; <nl> <nl> + int32 onlineForSort ( int32 online , int32 now ) ; <nl> int32 onlineWillChangeIn ( int32 onlineOnServer , int32 nowOnServer ) ; <nl> QString onlineText ( int32 onlineOnServer , int32 nowOnServer , bool precise = false ) ; <nl> <nl> mmm a / Telegram / SourceFiles / config . h <nl> ppp b / Telegram / SourceFiles / config . h <nl> enum { <nl> MTPAckSendWaiting = 10000 , / / how much time to wait for some more requests , when sending msg acks <nl> MTPResendThreshold = 1 , / / how much ints should message contain for us not to resend , but to check it ' s state <nl> MTPContainerLives = 600 , / / container lives 10 minutes in haveSent map <nl> + MTPMinReceiveDelay = 4000 , / / 4 seconds <nl> MTPMaxReceiveDelay = 64000 , / / 64 seconds <nl> MTPConnectionOldTimeout = 192000 , / / 192 seconds <nl> MTPTcpConnectionWaitTimeout = 3000 , / / 3 seconds waiting for tcp , until we accept http <nl> enum { <nl> <nl> MTPDebugBufferSize = 1024 * 1024 , / / 1 mb start size <nl> <nl> - MinReceiveDelay = 1000 , / / 1 seconds <nl> MaxSelectedItems = 100 , <nl> <nl> MaxPhoneTailLength = 18 , / / rest of the phone number , without country code ( seen 12 at least ) <nl> mmm a / Telegram / SourceFiles / history . cpp <nl> ppp b / Telegram / SourceFiles / history . cpp <nl> HistoryItem * regItem ( HistoryItem * item , bool returnExisting ) { <nl> return item ; <nl> } <nl> <nl> - HistoryPhoto : : HistoryPhoto ( const MTPDphoto & photo , int32 width ) : data ( App : : feedPhoto ( photo ) ) <nl> - , openl ( new PhotoLink ( data ) ) <nl> - , w ( width ) { <nl> + HistoryPhoto : : HistoryPhoto ( const MTPDphoto & photo , int32 width ) : HistoryMedia ( width ) <nl> + , data ( App : : feedPhoto ( photo ) ) <nl> + , openl ( new PhotoLink ( data ) ) { <nl> init ( ) ; <nl> } <nl> <nl> - HistoryPhoto : : HistoryPhoto ( PeerData * chat , const MTPDphoto & photo , int32 width ) : data ( App : : feedPhoto ( photo ) ) <nl> - , openl ( new PhotoLink ( data , chat ) ) <nl> - , w ( width ) { <nl> + HistoryPhoto : : HistoryPhoto ( PeerData * chat , const MTPDphoto & photo , int32 width ) : HistoryMedia ( width ) <nl> + , data ( App : : feedPhoto ( photo ) ) <nl> + , openl ( new PhotoLink ( data , chat ) ) { <nl> init ( ) ; <nl> } <nl> <nl> void HistoryPhoto : : initDimensions ( const HistoryItem * parent ) { <nl> if ( ! tw | | ! th ) { <nl> tw = th = 1 ; <nl> } <nl> - int32 thumbw = qMax ( tw , int32 ( st : : minPhotoWidth ) ) , maxthumbh = thumbw ; <nl> - int32 thumbh = qRound ( th * float64 ( thumbw ) / tw ) ; <nl> - if ( thumbh > maxthumbh ) { <nl> - thumbw = qRound ( thumbw * float64 ( maxthumbh ) / thumbh ) ; <nl> - thumbh = maxthumbh ; <nl> - if ( thumbw < st : : minPhotoWidth ) { <nl> - thumbw = st : : minPhotoWidth ; <nl> - } <nl> - } <nl> - if ( thumbh < st : : minPhotoHeight ) { <nl> - thumbh = st : : minPhotoHeight ; <nl> - } <nl> + int32 thumbw = tw ; <nl> + int32 thumbh = th ; <nl> if ( ! w ) { <nl> w = thumbw ; <nl> + } else { <nl> + thumbh = w ; / / square chat photo updates <nl> } <nl> _maxw = w ; <nl> _height = _minh = thumbh ; <nl> + if ( _maxw < st : : minPhotoWidth ) { <nl> + _maxw = st : : minPhotoWidth ; <nl> + } <nl> + if ( _height < st : : minPhotoHeight ) { <nl> + _height = st : : minPhotoHeight ; <nl> + } <nl> } <nl> <nl> int32 HistoryPhoto : : resize ( int32 width , bool dontRecountText , const HistoryItem * parent ) { <nl> - w = width ; <nl> + w = qMin ( width , _maxw ) ; <nl> <nl> int32 tw = convertScale ( data - > full - > width ( ) ) , th = convertScale ( data - > full - > height ( ) ) ; <nl> + if ( tw > st : : maxMediaSize ) { <nl> + th = ( st : : maxMediaSize * th ) / tw ; <nl> + tw = st : : maxMediaSize ; <nl> + } <nl> _height = th ; <nl> if ( tw > w ) { <nl> _height = ( w * _height / tw ) ; <nl> QString formatDurationAndSizeText ( qint64 duration , qint64 size ) { <nl> <nl> int32 _downloadWidth = 0 , _openWithWidth = 0 , _cancelWidth = 0 , _buttonWidth = 0 ; <nl> <nl> - HistoryVideo : : HistoryVideo ( const MTPDvideo & video , int32 width ) : data ( App : : feedVideo ( video ) ) <nl> + HistoryVideo : : HistoryVideo ( const MTPDvideo & video , int32 width ) : HistoryMedia ( width ) <nl> + , data ( App : : feedVideo ( video ) ) <nl> , _openl ( new VideoOpenLink ( data ) ) <nl> , _savel ( new VideoSaveLink ( data ) ) <nl> , _cancell ( new VideoCancelLink ( data ) ) <nl> - , w ( width ) <nl> , _dldDone ( 0 ) <nl> , _uplDone ( 0 ) <nl> { <nl> void HistoryVideo : : unregItem ( HistoryItem * item ) { <nl> App : : unregVideoItem ( data , item ) ; <nl> } <nl> <nl> - int32 HistoryVideo : : resize ( int32 width , bool dontRecountText , const HistoryItem * parent ) { <nl> - w = width ; <nl> - return _height ; <nl> - } <nl> - <nl> const QString HistoryVideo : : inDialogsText ( ) const { <nl> return lang ( lng_in_dlg_video ) ; <nl> } <nl> void HistoryVideo : : draw ( QPainter & p , const HistoryItem * parent , bool selected , i <nl> } <nl> } <nl> <nl> - HistoryAudio : : HistoryAudio ( const MTPDaudio & audio , int32 width ) : data ( App : : feedAudio ( audio ) ) <nl> + HistoryAudio : : HistoryAudio ( const MTPDaudio & audio , int32 width ) : HistoryMedia ( width ) <nl> + , data ( App : : feedAudio ( audio ) ) <nl> , _openl ( new AudioOpenLink ( data ) ) <nl> , _savel ( new AudioSaveLink ( data ) ) <nl> , _cancell ( new AudioCancelLink ( data ) ) <nl> - , w ( width ) <nl> , _dldDone ( 0 ) <nl> , _uplDone ( 0 ) <nl> { <nl> void HistoryAudio : : unregItem ( HistoryItem * item ) { <nl> App : : unregAudioItem ( data , item ) ; <nl> } <nl> <nl> - int32 HistoryAudio : : resize ( int32 width , bool dontRecountText , const HistoryItem * parent ) { <nl> - w = width ; <nl> - return _height ; <nl> - } <nl> - <nl> const QString HistoryAudio : : inDialogsText ( ) const { <nl> return lang ( lng_in_dlg_audio ) ; <nl> } <nl> HistoryMedia * HistoryAudio : : clone ( ) const { <nl> return new HistoryAudio ( * this ) ; <nl> } <nl> <nl> - HistoryDocument : : HistoryDocument ( const MTPDdocument & document , int32 width ) : data ( App : : feedDocument ( document ) ) <nl> + HistoryDocument : : HistoryDocument ( const MTPDdocument & document , int32 width ) : HistoryMedia ( width ) <nl> + , data ( App : : feedDocument ( document ) ) <nl> , _openl ( new DocumentOpenLink ( data ) ) <nl> , _savel ( new DocumentSaveLink ( data ) ) <nl> , _cancell ( new DocumentCancelLink ( data ) ) <nl> - , w ( width ) <nl> , _name ( data - > name ) <nl> , _dldDone ( 0 ) <nl> , _uplDone ( 0 ) <nl> void HistoryDocument : : updateFrom ( const MTPMessageMedia & media ) { <nl> } <nl> <nl> int32 HistoryDocument : : resize ( int32 width , bool dontRecountText , const HistoryItem * parent ) { <nl> - w = width ; <nl> + w = qMin ( width , _maxw ) ; <nl> if ( parent = = animated . msg ) { <nl> + if ( w > st : : maxMediaSize ) { <nl> + w = st : : maxMediaSize ; <nl> + } <nl> _height = animated . h ; <nl> if ( animated . w > w ) { <nl> _height = ( w * _height / animated . w ) ; <nl> HistoryMedia * HistoryDocument : : clone ( ) const { <nl> return new HistoryDocument ( * this ) ; <nl> } <nl> <nl> - HistoryContact : : HistoryContact ( int32 userId , const QString & first , const QString & last , const QString & phone ) : userId ( userId ) <nl> - , w ( 0 ) <nl> + HistoryContact : : HistoryContact ( int32 userId , const QString & first , const QString & last , const QString & phone ) : HistoryMedia ( 0 ) <nl> + , userId ( userId ) <nl> , phone ( App : : formatPhone ( phone ) ) <nl> , contact ( App : : userLoaded ( userId ) ) <nl> { <nl> void HistoryContact : : initDimensions ( const HistoryItem * parent ) { <nl> } <nl> <nl> int32 HistoryContact : : resize ( int32 width , bool dontRecountText , const HistoryItem * parent ) { <nl> - w = width ; <nl> + w = qMin ( width , _maxw ) ; <nl> return _height ; <nl> } <nl> <nl> void ImageLinkData : : load ( ) { <nl> manager . getData ( this ) ; <nl> } <nl> <nl> - HistoryImageLink : : HistoryImageLink ( const QString & url , int32 width ) : w ( width ) { <nl> + HistoryImageLink : : HistoryImageLink ( const QString & url , int32 width ) : HistoryMedia ( width ) { <nl> if ( url . startsWith ( qsl ( " location : " ) ) ) { <nl> data = App : : imageLink ( url , GoogleMapsLink , qsl ( " https : / / maps . google . com / maps ? q = " ) + url . mid ( 9 ) + qsl ( " & ll = " ) + url . mid ( 9 ) + qsl ( " & z = 17 " ) ) ; <nl> } else { <nl> HistoryImageLink : : HistoryImageLink ( const QString & url , int32 width ) : w ( width ) { <nl> m = reInstagram . match ( url ) ; <nl> if ( m . hasMatch ( ) ) { <nl> data = App : : imageLink ( qsl ( " instagram : " ) + m . captured ( 3 ) , InstagramLink , url ) ; <nl> + data - > title = qsl ( " instagram . com / p / " ) + m . captured ( 3 ) ; <nl> } else { <nl> data = 0 ; <nl> } <nl> void HistoryImageLink : : draw ( QPainter & p , const HistoryItem * parent , bool selecte <nl> if ( data ) { <nl> switch ( data - > type ) { <nl> case YouTubeLink : p . drawPixmap ( QPoint ( ( width - st : : youtubeIcon . pxWidth ( ) ) / 2 , ( _height - st : : youtubeIcon . pxHeight ( ) ) / 2 ) , App : : sprite ( ) , st : : youtubeIcon ) ; break ; <nl> - case InstagramLink : p . drawPixmap ( QPoint ( ( width - st : : instagramIcon . pxWidth ( ) ) / 2 , ( _height - st : : instagramIcon . pxHeight ( ) ) / 2 ) , App : : sprite ( ) , st : : instagramIcon ) ; break ; <nl> + / / case InstagramLink : p . drawPixmap ( QPoint ( ( width - st : : instagramIcon . pxWidth ( ) ) / 2 , ( _height - st : : instagramIcon . pxHeight ( ) ) / 2 ) , App : : sprite ( ) , st : : instagramIcon ) ; break ; <nl> } <nl> if ( ! data - > title . isEmpty ( ) | | ! data - > duration . isEmpty ( ) ) { <nl> p . fillRect ( 0 , 0 , width , st : : msgDateFont - > height + 2 * st : : msgDateImgPadding . y ( ) , st : : msgDateImgBg - > b ) ; <nl> void HistoryImageLink : : draw ( QPainter & p , const HistoryItem * parent , bool selecte <nl> } <nl> <nl> int32 HistoryImageLink : : resize ( int32 width , bool dontRecountText , const HistoryItem * parent ) { <nl> - w = width ; <nl> + w = qMin ( width , _maxw ) ; <nl> <nl> int32 tw = convertScale ( fullWidth ( ) ) , th = convertScale ( fullHeight ( ) ) ; <nl> + if ( tw > st : : maxMediaSize ) { <nl> + th = ( st : : maxMediaSize * th ) / tw ; <nl> + tw = st : : maxMediaSize ; <nl> + } <nl> _height = th ; <nl> if ( tw > w ) { <nl> _height = ( w * _height / tw ) ; <nl> void HistoryMessage : : draw ( QPainter & p , uint32 selection ) const { <nl> _fromVersion = _from - > nameVersion ; <nl> } <nl> int32 left = _out ? st : : msgMargin . right ( ) : st : : msgMargin . left ( ) , width = _history - > width - st : : msgMargin . left ( ) - st : : msgMargin . right ( ) , mwidth = st : : msgMaxWidth ; <nl> - if ( _media & & _media - > maxWidth ( ) > mwidth ) mwidth = _media - > maxWidth ( ) ; <nl> + if ( _media ) { <nl> + if ( _media - > maxWidth ( ) > mwidth ) mwidth = _media - > maxWidth ( ) ; <nl> + if ( _media - > currentWidth ( ) < mwidth ) mwidth = _media - > currentWidth ( ) ; <nl> + } <nl> if ( width > mwidth ) { <nl> if ( _out ) left + = width - mwidth ; <nl> width = mwidth ; <nl> mmm a / Telegram / SourceFiles / history . h <nl> ppp b / Telegram / SourceFiles / history . h <nl> HistoryItem * regItem ( HistoryItem * item , bool returnExisting = false ) ; <nl> class HistoryMedia : public HistoryElem { <nl> public : <nl> <nl> + HistoryMedia ( int32 width = 0 ) : w ( width ) { <nl> + } <nl> + <nl> virtual HistoryMediaType type ( ) const = 0 ; <nl> virtual const QString inDialogsText ( ) const = 0 ; <nl> virtual bool hasPoint ( int32 x , int32 y , const HistoryItem * parent , int32 width = - 1 ) const = 0 ; <nl> virtual int32 countHeight ( const HistoryItem * parent , int32 width = - 1 ) const { <nl> return height ( ) ; <nl> } <nl> + virtual int32 resize ( int32 width , bool dontRecountText = false , const HistoryItem * parent = 0 ) { <nl> + w = qMin ( width , _maxw ) ; <nl> + return _height ; <nl> + } <nl> virtual TextLinkPtr getLink ( int32 x , int32 y , const HistoryItem * parent , int32 width = - 1 ) const = 0 ; <nl> virtual void draw ( QPainter & p , const HistoryItem * parent , bool selected , int32 width = - 1 ) const = 0 ; <nl> virtual bool uploading ( ) const { <nl> class HistoryMedia : public HistoryElem { <nl> return false ; <nl> } <nl> <nl> + int32 currentWidth ( ) const { <nl> + return w ; <nl> + } <nl> + <nl> + protected : <nl> + <nl> + int32 w ; <nl> + <nl> } ; <nl> <nl> class HistoryPhoto : public HistoryMedia { <nl> class HistoryPhoto : public HistoryMedia { <nl> private : <nl> PhotoData * data ; <nl> TextLinkPtr openl ; <nl> - int32 w ; <nl> + <nl> } ; <nl> <nl> QString formatSizeText ( qint64 size ) ; <nl> class HistoryVideo : public HistoryMedia { <nl> void initDimensions ( const HistoryItem * parent ) ; <nl> <nl> void draw ( QPainter & p , const HistoryItem * parent , bool selected , int32 width = - 1 ) const ; <nl> - int32 resize ( int32 width , bool dontRecountText = false , const HistoryItem * parent = 0 ) ; <nl> HistoryMediaType type ( ) const { <nl> return MediaTypeVideo ; <nl> } <nl> class HistoryVideo : public HistoryMedia { <nl> private : <nl> VideoData * data ; <nl> TextLinkPtr _openl , _savel , _cancell ; <nl> - int32 w ; <nl> - <nl> + <nl> QString _size ; <nl> int32 _thumbw , _thumbx , _thumby ; <nl> <nl> class HistoryAudio : public HistoryMedia { <nl> void initDimensions ( const HistoryItem * parent ) ; <nl> <nl> void draw ( QPainter & p , const HistoryItem * parent , bool selected , int32 width = - 1 ) const ; <nl> - int32 resize ( int32 width , bool dontRecountText = false , const HistoryItem * parent = 0 ) ; <nl> HistoryMediaType type ( ) const { <nl> return MediaTypeAudio ; <nl> } <nl> class HistoryAudio : public HistoryMedia { <nl> private : <nl> AudioData * data ; <nl> TextLinkPtr _openl , _savel , _cancell ; <nl> - int32 w ; <nl> <nl> QString _size ; <nl> <nl> class HistoryDocument : public HistoryMedia { <nl> <nl> DocumentData * data ; <nl> TextLinkPtr _openl , _savel , _cancell ; <nl> - int32 w ; <nl> <nl> int32 _namew ; <nl> QString _name , _size ; <nl> class HistoryImageLink : public HistoryMedia { <nl> <nl> private : <nl> ImageLinkData * data ; <nl> - int32 w ; <nl> + <nl> } ; <nl> <nl> class HistoryMessage : public HistoryItem { <nl> mmm a / Telegram / SourceFiles / mtproto / mtpConnection . cpp <nl> ppp b / Telegram / SourceFiles / mtproto / mtpConnection . cpp <nl> void MTProtoConnection : : restart ( ) { <nl> } <nl> <nl> void MTProtoConnection : : stop ( ) { <nl> - data - > stop ( ) ; <nl> - thread - > quit ( ) ; <nl> + if ( data ) data - > stop ( ) ; <nl> + if ( thread ) thread - > quit ( ) ; <nl> } <nl> <nl> void MTProtoConnection : : stopped ( ) { <nl> void MTPabstractTcpConnection : : socketRead ( ) { <nl> } <nl> <nl> MTPautoConnection : : MTPautoConnection ( QThread * thread ) : status ( WaitingBoth ) , <nl> - tcpNonce ( MTP : : nonce < MTPint128 > ( ) ) , httpNonce ( MTP : : nonce < MTPint128 > ( ) ) , _tcpTimeout ( 1 ) { <nl> + tcpNonce ( MTP : : nonce < MTPint128 > ( ) ) , httpNonce ( MTP : : nonce < MTPint128 > ( ) ) , _tcpTimeout ( MTPMinReceiveDelay ) { <nl> moveToThread ( thread ) ; <nl> <nl> manager . moveToThread ( thread ) ; <nl> void MTPautoConnection : : onSocketConnected ( ) { <nl> DEBUG_LOG ( ( " Connection Info : sending fake req_pq through tcp transport " ) ) ; <nl> <nl> if ( _tcpTimeout < 0 ) _tcpTimeout = - _tcpTimeout ; <nl> - tcpTimeoutTimer . start ( _tcpTimeout * 1000 ) ; <nl> + tcpTimeoutTimer . start ( _tcpTimeout ) ; <nl> <nl> tcpSend ( buffer ) ; <nl> } else if ( status = = WaitingHttp | | status = = UsingHttp ) { <nl> void MTPautoConnection : : onSocketConnected ( ) { <nl> <nl> void MTPautoConnection : : onTcpTimeoutTimer ( ) { <nl> if ( status = = HttpReady | | status = = WaitingBoth | | status = = WaitingTcp ) { <nl> - if ( _tcpTimeout < 64 ) _tcpTimeout * = 2 ; <nl> + if ( _tcpTimeout < MTPMaxReceiveDelay ) _tcpTimeout * = 2 ; <nl> _tcpTimeout = - _tcpTimeout ; <nl> <nl> QAbstractSocket : : SocketState state = sock . state ( ) ; <nl> MTProtoConnectionPrivate : : MTProtoConnectionPrivate ( QThread * thread , MTProtoConne <nl> , conn ( 0 ) <nl> , retryTimeout ( 1 ) <nl> , oldConnection ( true ) <nl> - , receiveDelay ( MinReceiveDelay ) <nl> + , receiveDelay ( MTPMinReceiveDelay ) <nl> , firstSentAt ( - 1 ) <nl> , pingId ( 0 ) <nl> , toSendPingId ( 0 ) <nl> void MTProtoConnectionPrivate : : onReceivedSome ( ) { <nl> int32 ms = getms ( true ) - firstSentAt ; <nl> DEBUG_LOG ( ( " MTP Info : response in % 1ms , receiveDelay : % 2ms " ) . arg ( ms ) . arg ( receiveDelay ) ) ; <nl> <nl> - if ( ms > 0 & & ms * 2 < int32 ( receiveDelay ) ) receiveDelay = qMax ( ms * 2 , int32 ( MinReceiveDelay ) ) ; <nl> + if ( ms > 0 & & ms * 2 < int32 ( receiveDelay ) ) receiveDelay = qMax ( ms * 2 , int32 ( MTPMinReceiveDelay ) ) ; <nl> firstSentAt = - 1 ; <nl> } <nl> } <nl> <nl> void MTProtoConnectionPrivate : : onOldConnection ( ) { <nl> oldConnection = true ; <nl> - receiveDelay = MinReceiveDelay ; <nl> + receiveDelay = MTPMinReceiveDelay ; <nl> DEBUG_LOG ( ( " This connection marked as old ! delay now % 1ms " ) . arg ( receiveDelay ) ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / mtproto / mtpSession . cpp <nl> ppp b / Telegram / SourceFiles / mtproto / mtpSession . cpp <nl> void MTProtoSession : : restart ( ) { <nl> } <nl> <nl> void MTProtoSession : : stop ( ) { <nl> - while ( connections . size ( ) ) { <nl> + while ( ! connections . isEmpty ( ) ) { <nl> connections . back ( ) - > stop ( ) ; <nl> connections . pop_back ( ) ; <nl> } <nl> mmm a / Telegram / SourceFiles / profilewidget . cpp <nl> ppp b / Telegram / SourceFiles / profilewidget . cpp <nl> void ProfileInner : : reorderParticipants ( ) { <nl> UserData * self = App : : self ( ) ; <nl> for ( ChatData : : Participants : : const_iterator i = _peerChat - > participants . cbegin ( ) , e = _peerChat - > participants . cend ( ) ; i ! = e ; + + i ) { <nl> UserData * user = i . key ( ) ; <nl> - int32 until = user - > onlineTill ; <nl> + int32 until = App : : onlineForSort ( user - > onlineTill , t ) ; <nl> Participants : : iterator before = _participants . begin ( ) ; <nl> if ( user ! = self ) { <nl> if ( before ! = _participants . end ( ) & & ( * before ) = = self ) { <nl> + + before ; <nl> } <nl> - while ( before ! = _participants . end ( ) & & ( * before ) - > onlineTill > = until ) { <nl> + while ( before ! = _participants . end ( ) & & App : : onlineForSort ( ( * before ) - > onlineTill , t ) > = until ) { <nl> + + before ; <nl> } <nl> } <nl> mmm a / Telegram / SourceFiles / sysbuttons . cpp <nl> ppp b / Telegram / SourceFiles / sysbuttons . cpp <nl> Copyright ( c ) 2014 John Preston , https : / / tdesktop . com <nl> # include " window . h " <nl> # include " application . h " <nl> <nl> - SysBtn : : SysBtn ( QWidget * parent , const style : : sysButton & st ) : Button ( parent ) , <nl> - _st ( st ) , a_color ( _st . color - > c ) { <nl> - resize ( _st . size ) ; <nl> + SysBtn : : SysBtn ( QWidget * parent , const style : : sysButton & st , const QString & text ) : Button ( parent ) , <nl> + _st ( st ) , a_color ( _st . color - > c ) , _text ( text ) , _overLevel ( 0 ) { <nl> + int32 w = _st . size . width ( ) + ( _text . isEmpty ( ) ? 0 : ( ( _st . size . width ( ) - _st . img . pxWidth ( ) ) / 2 + st : : titleTextButton . font - > m . width ( _text ) ) ) ; <nl> + resize ( w , _st . size . height ( ) ) ; <nl> setCursor ( style : : cur_default ) ; <nl> connect ( this , SIGNAL ( stateChanged ( int , ButtonStateChangeSource ) ) , this , SLOT ( onStateChange ( int , ButtonStateChangeSource ) ) ) ; <nl> } <nl> <nl> + void SysBtn : : setOverLevel ( float64 level ) { <nl> + _overLevel = level ; <nl> + update ( ) ; <nl> + } <nl> + <nl> void SysBtn : : onStateChange ( int oldState , ButtonStateChangeSource source ) { <nl> a_color . start ( ( _state & StateOver ? _st . overColor : _st . color ) - > c ) ; <nl> <nl> void SysBtn : : onStateChange ( int oldState , ButtonStateChangeSource source ) { <nl> void SysBtn : : paintEvent ( QPaintEvent * e ) { <nl> QPainter p ( this ) ; <nl> <nl> - int x = ( width ( ) - _st . img . pxWidth ( ) ) / 2 , y = ( height ( ) - _st . img . pxHeight ( ) ) / 2 ; <nl> - p . fillRect ( x , y , _st . img . pxWidth ( ) , _st . img . pxHeight ( ) , a_color . current ( ) ) ; <nl> + int x = width ( ) - ( ( _st . size . width ( ) + _st . img . pxWidth ( ) ) / 2 ) , y = ( height ( ) - _st . img . pxHeight ( ) ) / 2 ; <nl> + QColor c = a_color . current ( ) ; <nl> + if ( _overLevel > 0 ) { <nl> + if ( _overLevel > = 1 ) { <nl> + c = _st . overColor - > c ; <nl> + } else { <nl> + c . setRedF ( c . redF ( ) * ( 1 - _overLevel ) + _st . overColor - > c . redF ( ) * _overLevel ) ; <nl> + c . setGreenF ( c . greenF ( ) * ( 1 - _overLevel ) + _st . overColor - > c . greenF ( ) * _overLevel ) ; <nl> + c . setBlueF ( c . blueF ( ) * ( 1 - _overLevel ) + _st . overColor - > c . blueF ( ) * _overLevel ) ; <nl> + } <nl> + } <nl> + p . fillRect ( x , y , _st . img . pxWidth ( ) , _st . img . pxHeight ( ) , c ) ; <nl> p . drawPixmap ( QPoint ( x , y ) , App : : sprite ( ) , _st . img ) ; <nl> + <nl> + if ( ! _text . isEmpty ( ) ) { <nl> + p . setFont ( st : : titleTextButton . font - > f ) ; <nl> + p . setPen ( c ) ; <nl> + p . drawText ( ( _st . size . width ( ) - _st . img . pxWidth ( ) ) / 2 , st : : titleTextButton . textTop + st : : titleTextButton . font - > ascent , _text ) ; <nl> + } <nl> } <nl> <nl> HitTestType SysBtn : : hitTest ( const QPoint & p ) const { <nl> void CloseBtn : : onClick ( ) { <nl> wnd - > close ( ) ; <nl> } <nl> <nl> - UpdateBtn : : UpdateBtn ( QWidget * parent , Window * window ) : SysBtn ( parent , st : : sysUpd ) , wnd ( window ) { <nl> + UpdateBtn : : UpdateBtn ( QWidget * parent , Window * window , const QString & text ) : SysBtn ( parent , st : : sysUpd , text ) , wnd ( window ) { <nl> connect ( this , SIGNAL ( clicked ( ) ) , this , SLOT ( onClick ( ) ) ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / sysbuttons . h <nl> ppp b / Telegram / SourceFiles / sysbuttons . h <nl> class SysBtn : public Button , public Animated { <nl> <nl> public : <nl> <nl> - SysBtn ( QWidget * parent , const style : : sysButton & st ) ; <nl> + SysBtn ( QWidget * parent , const style : : sysButton & st , const QString & text = QString ( ) ) ; <nl> <nl> void paintEvent ( QPaintEvent * e ) ; <nl> <nl> HitTestType hitTest ( const QPoint & p ) const ; <nl> <nl> + void setOverLevel ( float64 level ) ; <nl> + <nl> bool animStep ( float64 ms ) ; <nl> <nl> public slots : <nl> public slots : <nl> <nl> style : : sysButton _st ; <nl> anim : : cvalue a_color ; <nl> + float64 _overLevel ; <nl> + QString _text ; <nl> <nl> } ; <nl> <nl> class UpdateBtn : public SysBtn { <nl> <nl> public : <nl> <nl> - UpdateBtn ( QWidget * parent , Window * window ) ; <nl> + UpdateBtn ( QWidget * parent , Window * window , const QString & text = QString ( ) ) ; <nl> <nl> public slots : <nl> <nl> mmm a / Telegram / SourceFiles / title . cpp <nl> ppp b / Telegram / SourceFiles / title . cpp <nl> TitleWidget : : TitleWidget ( Window * window ) <nl> , _settings ( this , lang ( lng_menu_settings ) , st : : titleTextButton ) <nl> , _contacts ( this , lang ( lng_menu_contacts ) , st : : titleTextButton ) <nl> , _about ( this , lang ( lng_menu_about ) , st : : titleTextButton ) <nl> - , _update ( this , window ) <nl> + , _update ( this , window , lang ( lng_menu_update ) ) <nl> , _minimize ( this , window ) <nl> , _maximize ( this , window ) <nl> , _restore ( this , window ) <nl> TitleWidget : : TitleWidget ( Window * window ) <nl> setGeometry ( 0 , 0 , wnd - > width ( ) , st : : titleHeight ) ; <nl> stateChanged ( ) ; <nl> <nl> + _update . hide ( ) ; <nl> if ( App : : app ( ) - > updatingState ( ) = = Application : : UpdatingReady ) { <nl> - _update . show ( ) ; <nl> - } else { <nl> - _update . hide ( ) ; <nl> + showUpdateBtn ( ) ; <nl> } <nl> <nl> connect ( & _settings , SIGNAL ( clicked ( ) ) , window , SLOT ( showSettings ( ) ) ) ; <nl> void TitleWidget : : paintEvent ( QPaintEvent * e ) { <nl> p . drawPixmap ( st : : titleIconPos , App : : sprite ( ) , st : : titleIconRect ) ; <nl> } <nl> <nl> + bool TitleWidget : : animStep ( float64 ms ) { <nl> + float64 phase = sin ( M_PI_2 * ( ms / st : : updateBlinkDuration ) ) ; <nl> + if ( phase < 0 ) phase = - phase ; <nl> + _update . setOverLevel ( phase ) ; <nl> + return true ; <nl> + } <nl> + <nl> void TitleWidget : : setHideLevel ( float64 level ) { <nl> if ( level ! = hideLevel ) { <nl> hideLevel = level ; <nl> TitleWidget : : ~ TitleWidget ( ) { <nl> <nl> void TitleWidget : : resizeEvent ( QResizeEvent * e ) { <nl> QPoint p ( width ( ) - ( ( cPlatform ( ) = = dbipWindows & & lastMaximized ) ? 0 : st : : sysBtnDelta ) , 0 ) ; <nl> - <nl> + <nl> + if ( ! _update . isHidden ( ) ) { <nl> + p . setX ( p . x ( ) - _update . width ( ) ) ; <nl> + _update . move ( p ) ; <nl> + p . setX ( p . x ( ) + _update . width ( ) ) ; <nl> + } <nl> + <nl> if ( cPlatform ( ) = = dbipWindows ) { <nl> p . setX ( p . x ( ) - _close . width ( ) ) ; <nl> _close . move ( p ) ; <nl> void TitleWidget : : resizeEvent ( QResizeEvent * e ) { <nl> _minimize . move ( p ) ; <nl> } <nl> <nl> - if ( ! _update . isHidden ( ) ) { <nl> - p . setX ( p . x ( ) - _update . width ( ) ) ; <nl> - _update . move ( p ) ; <nl> - } <nl> - <nl> _settings . move ( st : : titleMenuOffset , 0 ) ; <nl> if ( MTP : : authedId ( ) ) { <nl> _contacts . show ( ) ; <nl> void TitleWidget : : stateChanged ( Qt : : WindowState state ) { <nl> void TitleWidget : : showUpdateBtn ( ) { <nl> if ( App : : app ( ) - > updatingState ( ) = = Application : : UpdatingReady | | cEvalScale ( cConfigScale ( ) ) ! = cEvalScale ( cRealScale ( ) ) ) { <nl> _update . show ( ) ; <nl> + _minimize . hide ( ) ; <nl> + _restore . hide ( ) ; <nl> + _maximize . hide ( ) ; <nl> + _close . hide ( ) ; <nl> + anim : : start ( this ) ; <nl> } else { <nl> _update . hide ( ) ; <nl> + if ( cPlatform ( ) = = dbipWindows ) { <nl> + _minimize . show ( ) ; <nl> + maximizedChanged ( wnd - > windowState ( ) . testFlag ( Qt : : WindowMaximized ) , true ) ; <nl> + _close . show ( ) ; <nl> + } <nl> + anim : : stop ( this ) ; <nl> } <nl> resizeEvent ( 0 ) ; <nl> update ( ) ; <nl> } <nl> <nl> - void TitleWidget : : maximizedChanged ( bool maximized ) { <nl> - if ( lastMaximized = = maximized ) return ; <nl> + void TitleWidget : : maximizedChanged ( bool maximized , bool force ) { <nl> + if ( lastMaximized = = maximized & & ! force ) return ; <nl> <nl> lastMaximized = maximized ; <nl> <nl> - if ( cPlatform ( ) ! = dbipWindows ) return ; <nl> + if ( cPlatform ( ) ! = dbipWindows | | ! _update . isHidden ( ) ) return ; <nl> if ( maximized ) { <nl> _maximize . clearState ( ) ; <nl> } else { <nl> mmm a / Telegram / SourceFiles / title . h <nl> ppp b / Telegram / SourceFiles / title . h <nl> class TitleHider : public QWidget { <nl> <nl> } ; <nl> <nl> - class TitleWidget : public QWidget { <nl> + class TitleWidget : public QWidget , public Animated { <nl> Q_OBJECT <nl> <nl> public : <nl> class TitleWidget : public QWidget { <nl> void mousePressEvent ( QMouseEvent * e ) ; <nl> void mouseDoubleClickEvent ( QMouseEvent * e ) ; <nl> <nl> - void maximizedChanged ( bool maximized ) ; <nl> + void maximizedChanged ( bool maximized , bool force = false ) ; <nl> <nl> HitTestType hitTest ( const QPoint & p ) ; <nl> <nl> void setHideLevel ( float64 level ) ; <nl> <nl> + bool animStep ( float64 ms ) ; <nl> + <nl> ~ TitleWidget ( ) ; <nl> <nl> public slots : <nl> public slots : <nl> float64 hideLevel ; <nl> TitleHider * hider ; <nl> <nl> + float64 _lastUpdateMs ; <nl> + <nl> FlatButton _settings , _contacts , _about ; <nl> <nl> UpdateBtn _update ; <nl>
min connection timeout 4secs , fixed photo display , thumbs size is less now , update button text and animation added
telegramdesktop/tdesktop
799a81966ae19b6ba55bcc41a8d3457aec55241c
2014-11-18T12:41:33Z
mmm a / src / CalcManager / CEngine / History . cpp <nl> ppp b / src / CalcManager / CEngine / History . cpp <nl> void CHistoryCollector : : PopLastOpndStart ( ) <nl> <nl> void CHistoryCollector : : AddOpenBraceToHistory ( ) <nl> { <nl> - int iCommandEnd = AddCommand ( std : : make_shared < CParentheses > ( IDC_OPENP ) ) ; <nl> + AddCommand ( std : : make_shared < CParentheses > ( IDC_OPENP ) ) ; <nl> int ichOpndStart = IchAddSzToEquationSz ( CCalcEngine : : OpCodeToString ( IDC_OPENP ) , - 1 ) ; <nl> PushLastOpndStart ( ichOpndStart ) ; <nl> <nl> void CHistoryCollector : : AddOpenBraceToHistory ( ) <nl> <nl> void CHistoryCollector : : AddCloseBraceToHistory ( ) <nl> { <nl> - int iCommandEnd = AddCommand ( std : : make_shared < CParentheses > ( IDC_CLOSEP ) ) ; <nl> + AddCommand ( std : : make_shared < CParentheses > ( IDC_CLOSEP ) ) ; <nl> IchAddSzToEquationSz ( CCalcEngine : : OpCodeToString ( IDC_CLOSEP ) , - 1 ) ; <nl> SetExpressionDisplay ( ) ; <nl> PopLastOpndStart ( ) ; <nl> std : : shared_ptr < CalculatorVector < int > > CHistoryCollector : : GetOperandCommandsFrom <nl> std : : shared_ptr < CalculatorVector < int > > commands = std : : make_shared < CalculatorVector < int > > ( ) ; <nl> / / Check for negate <nl> bool fNegative = ( numStr [ 0 ] = = L ' - ' ) ; <nl> - bool fSciFmt = false ; <nl> - bool fDecimal = false ; <nl> <nl> for ( size_t i = ( fNegative ? 1 : 0 ) ; i < numStr . length ( ) ; i + + ) <nl> { <nl> if ( numStr [ i ] = = m_decimalSymbol ) <nl> { <nl> IFT ( commands - > Append ( IDC_PNT ) ) ; <nl> - fDecimal = true ; <nl> } <nl> else if ( numStr [ i ] = = L ' e ' ) <nl> { <nl> IFT ( commands - > Append ( IDC_EXP ) ) ; <nl> - fSciFmt = true ; <nl> } <nl> else if ( numStr [ i ] = = L ' - ' ) <nl> { <nl>
Remove unused values
microsoft/calculator
6c0e4e81e744186107a5ca60b398b7efa357e232
2019-03-07T00:47:02Z
mmm a / tensorflow / python / kernel_tests / ctc_loss_op_test . py <nl> ppp b / tensorflow / python / kernel_tests / ctc_loss_op_test . py <nl> def testBasic ( self ) : <nl> <nl> self . _testCTCLoss ( inputs , seq_lens , labels , loss_truth , grad_truth ) <nl> <nl> + <nl> + def test_time_major ( self ) : <nl> + " " " Testing time_major param . <nl> + <nl> + testing if transposing and setting time_major = False will result in the same loss <nl> + " " " <nl> + # [ max_time x batch_size x depth tensor ] <nl> + inputs = np . random . randn ( 2 , 2 , 3 ) . astype ( np . float32 ) <nl> + labels = SimpleSparseTensorFrom ( [ [ 0 , 1 ] , [ 1 , 0 ] ] ) <nl> + seq_lens = np . array ( [ 2 , 2 ] , dtype = np . int32 ) <nl> + <nl> + <nl> + inputs_t = tf . constant ( inputs ) <nl> + <nl> + # Transposing tensor to [ batch_size x max_time x depth tensor ] <nl> + inputs_t_transposed = tf . constant ( inputs . transpose ( 1 , 0 , 2 ) ) <nl> + <nl> + <nl> + with self . test_session ( use_gpu = False ) as sess : <nl> + loss = tf . nn . ctc_loss ( inputs = inputs_t , <nl> + labels = labels , <nl> + sequence_length = seq_lens ) <nl> + loss_transposed = tf . nn . ctc_loss ( inputs = inputs_t_transposed , <nl> + labels = labels , <nl> + sequence_length = seq_lens , time_major = False ) <nl> + <nl> + ( tf_loss , tf_loss_transposed ) = sess . run ( [ loss , loss_transposed ] ) <nl> + self . assertAllEqual ( tf_loss , tf_loss_transposed ) <nl> + <nl> + <nl> if __name__ = = " __main__ " : <nl> tf . test . main ( ) <nl> mmm a / tensorflow / python / ops / ctc_ops . py <nl> ppp b / tensorflow / python / ops / ctc_ops . py <nl> <nl> from tensorflow . python . framework import ops <nl> <nl> from tensorflow . python . ops import gen_ctc_ops <nl> + from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops . nn_grad import _BroadcastMul <nl> <nl> <nl> # pylint : disable = protected - access , invalid - name <nl> def ctc_loss ( inputs , labels , sequence_length , <nl> - preprocess_collapse_repeated = False , ctc_merge_repeated = True ) : <nl> + preprocess_collapse_repeated = False , ctc_merge_repeated = True , time_major = True ) : <nl> " " " Computes the CTC ( Connectionist Temporal Classification ) Loss . <nl> <nl> This op implements the CTC loss as presented in the article : <nl> def ctc_loss ( inputs , labels , sequence_length , <nl> Untested . Very likely will not learn to output repeated classes . <nl> <nl> Args : <nl> - inputs : 3 - D ` float ` ` Tensor ` sized <nl> - ` [ max_time x batch_size x num_classes ] ` . The logits . <nl> + inputs : 3 - D ` float ` ` Tensor ` . <nl> + If time_major = = False , this will be a ` Tensor ` shaped : <nl> + ` [ batch_size x max_time x num_classes ] ` . <nl> + If time_major = = True ( default ) , this will be a ` Tensor ` shaped : <nl> + ` [ max_time x batch_size x num_classes ] ` . <nl> + The logits . <nl> labels : An ` int32 ` ` SparseTensor ` . <nl> ` labels . indices [ i , : ] = = [ b , t ] ` means ` labels . values [ i ] ` stores <nl> the id for ( batch b , time t ) . <nl> def ctc_loss ( inputs , labels , sequence_length , <nl> preprocess_collapse_repeated : Boolean . Default : False . <nl> If True , repeated labels are collapsed prior to the CTC calculation . <nl> ctc_merge_repeated : Boolean . Default : True . <nl> + time_major : The shape format of the ` inputs ` Tensors . <nl> + If True , these ` Tensors ` must be shaped ` [ max_time , batch_size , num_classes ] ` . <nl> + If False , these ` Tensors ` must be shaped ` [ batch_size , max_time , num_classes ] ` . <nl> + Using ` time_major = True ` ( default ) is a bit more efficient because it avoids <nl> + transposes at the beginning of the ctc_loss calculation . However , most <nl> + TensorFlow data is batch - major , so by this function also accepts inputs <nl> + in batch - major form . <nl> <nl> Returns : <nl> A 1 - D ` float ` ` Tensor ` , size ` [ batch ] ` , containing the negative log probabilities . <nl> def ctc_loss ( inputs , labels , sequence_length , <nl> if not isinstance ( labels , ops . SparseTensor ) : <nl> raise TypeError ( " Expected labels to be a SparseTensor " ) <nl> <nl> + # For internal calculations , we transpose to [ time , batch , num_classes ] <nl> + if not time_major : <nl> + inputs = array_ops . transpose ( inputs , [ 1 , 0 , 2 ] ) # ( B , T , N ) = > ( T , B , N ) <nl> + <nl> loss , _ = gen_ctc_ops . _ctc_loss ( <nl> inputs , <nl> labels . indices , <nl>
time_major parameter for ctc_loss op ( )
tensorflow/tensorflow
cc10ef178129876fcd30538f5af33565f09987f6
2016-09-14T01:02:07Z
mmm a / examples / example_allegro5 / main . cpp <nl> ppp b / examples / example_allegro5 / main . cpp <nl> int main ( int , char * * ) <nl> al_register_event_source ( queue , al_get_keyboard_event_source ( ) ) ; <nl> al_register_event_source ( queue , al_get_mouse_event_source ( ) ) ; <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> IMGUI_CHECKVERSION ( ) ; <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> + <nl> + / / Setup Platform / Renderer bindings <nl> ImGui_ImplAllegro5_Init ( display ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / example_apple_opengl2 / main . mm <nl> ppp b / examples / example_apple_opengl2 / main . mm <nl> - ( void ) applicationDidFinishLaunching : ( NSNotification * ) aNotification <nl> if ( [ view openGLContext ] = = nil ) <nl> NSLog ( @ " No OpenGL Context ! " ) ; <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> IMGUI_CHECKVERSION ( ) ; <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> <nl> + / / Setup Platform / Renderer bindings <nl> ImGui_ImplOSX_Init ( ) ; <nl> ImGui_ImplOpenGL2_Init ( ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / example_freeglut_opengl2 / main . cpp <nl> ppp b / examples / example_freeglut_opengl2 / main . cpp <nl> int main ( int argc , char * * argv ) <nl> / / otherwise it is possible to install our own functions and call the imgui_impl_freeglut . h functions ourselves . <nl> glutDisplayFunc ( glut_display_func ) ; <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> <nl> + / / Setup Platform / Renderer bindings <nl> ImGui_ImplFreeGLUT_Init ( ) ; <nl> ImGui_ImplFreeGLUT_InstallFuncs ( ) ; <nl> ImGui_ImplOpenGL2_Init ( ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / example_glfw_opengl2 / main . cpp <nl> ppp b / examples / example_glfw_opengl2 / main . cpp <nl> int main ( int , char * * ) <nl> glfwMakeContextCurrent ( window ) ; <nl> glfwSwapInterval ( 1 ) ; / / Enable vsync <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> IMGUI_CHECKVERSION ( ) ; <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableGamepad ; / / Enable Gamepad Controls <nl> <nl> + / / Setup Platform / Renderer bindings <nl> + / / If you have already installed GLFW callbacks in your app , call ImGui_ImplGlfw_InitForOpenGL ( ) with install_callbacks = false and call them yourself . <nl> ImGui_ImplGlfw_InitForOpenGL ( window , true ) ; <nl> ImGui_ImplOpenGL2_Init ( ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / example_glfw_opengl3 / main . cpp <nl> ppp b / examples / example_glfw_opengl3 / main . cpp <nl> int main ( int , char * * ) <nl> return 1 ; <nl> } <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> IMGUI_CHECKVERSION ( ) ; <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableGamepad ; / / Enable Gamepad Controls <nl> <nl> + / / Setup Platform / Renderer bindings <nl> + / / If you have already installed GLFW callbacks in your app , call ImGui_ImplGlfw_InitForOpenGL ( ) with install_callbacks = false and call them yourself . <nl> ImGui_ImplGlfw_InitForOpenGL ( window , true ) ; <nl> ImGui_ImplOpenGL3_Init ( glsl_version ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / example_glfw_vulkan / main . cpp <nl> ppp b / examples / example_glfw_vulkan / main . cpp <nl> int main ( int , char * * ) <nl> ImGui_ImplVulkanH_WindowData * wd = & g_WindowData ; <nl> SetupVulkanWindowData ( wd , surface , w , h ) ; <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> IMGUI_CHECKVERSION ( ) ; <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableGamepad ; / / Enable Gamepad Controls <nl> <nl> - / / Setup GLFW binding <nl> + / / Setup Platform / Renderer bindings <nl> + / / If you have already installed GLFW callbacks in your app , call ImGui_ImplGlfw_InitForVulkan ( ) with install_callbacks = false and call the functions yourself . <nl> ImGui_ImplGlfw_InitForVulkan ( window , true ) ; <nl> - <nl> - / / Setup Vulkan binding <nl> ImGui_ImplVulkan_InitInfo init_info = { } ; <nl> init_info . Instance = g_Instance ; <nl> init_info . PhysicalDevice = g_PhysicalDevice ; <nl> int main ( int , char * * ) <nl> init_info . CheckVkResultFn = check_vk_result ; <nl> ImGui_ImplVulkan_Init ( & init_info , wd - > RenderPass ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / example_marmalade / main . cpp <nl> ppp b / examples / example_marmalade / main . cpp <nl> int main ( int , char * * ) <nl> { <nl> IwGxInit ( ) ; <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> IMGUI_CHECKVERSION ( ) ; <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> + <nl> + / / Setup Platform / Renderer bindings <nl> ImGui_Marmalade_Init ( true ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / example_sdl_opengl2 / main . cpp <nl> ppp b / examples / example_sdl_opengl2 / main . cpp <nl> int main ( int , char * * ) <nl> SDL_GLContext gl_context = SDL_GL_CreateContext ( window ) ; <nl> SDL_GL_SetSwapInterval ( 1 ) ; / / Enable vsync <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> IMGUI_CHECKVERSION ( ) ; <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> <nl> + / / Setup Platform / Renderer bindings <nl> ImGui_ImplSDL2_InitForOpenGL ( window , gl_context ) ; <nl> ImGui_ImplOpenGL2_Init ( ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / example_sdl_opengl3 / main . cpp <nl> ppp b / examples / example_sdl_opengl3 / main . cpp <nl> int main ( int , char * * ) <nl> return 1 ; <nl> } <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> IMGUI_CHECKVERSION ( ) ; <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> <nl> + / / Setup Platform / Renderer bindings <nl> ImGui_ImplSDL2_InitForOpenGL ( window , gl_context ) ; <nl> ImGui_ImplOpenGL3_Init ( glsl_version ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / example_sdl_vulkan / main . cpp <nl> ppp b / examples / example_sdl_vulkan / main . cpp <nl> int main ( int , char * * ) <nl> ImGui_ImplVulkanH_WindowData * wd = & g_WindowData ; <nl> SetupVulkanWindowData ( wd , surface , w , h ) ; <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> <nl> - / / Setup SDL binding <nl> + / / Setup Platform / Renderer bindings <nl> ImGui_ImplSDL2_InitForVulkan ( window ) ; <nl> - <nl> - / / Setup Vulkan binding <nl> ImGui_ImplVulkan_InitInfo init_info = { } ; <nl> init_info . Instance = g_Instance ; <nl> init_info . PhysicalDevice = g_PhysicalDevice ; <nl> int main ( int , char * * ) <nl> init_info . CheckVkResultFn = check_vk_result ; <nl> ImGui_ImplVulkan_Init ( & init_info , wd - > RenderPass ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / example_win32_directx10 / main . cpp <nl> ppp b / examples / example_win32_directx10 / main . cpp <nl> int main ( int , char * * ) <nl> ShowWindow ( hwnd , SW_SHOWDEFAULT ) ; <nl> UpdateWindow ( hwnd ) ; <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> IMGUI_CHECKVERSION ( ) ; <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> <nl> + / / Setup Platform / Renderer bindings <nl> ImGui_ImplWin32_Init ( hwnd ) ; <nl> ImGui_ImplDX10_Init ( g_pd3dDevice ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / example_win32_directx11 / main . cpp <nl> ppp b / examples / example_win32_directx11 / main . cpp <nl> int main ( int , char * * ) <nl> ShowWindow ( hwnd , SW_SHOWDEFAULT ) ; <nl> UpdateWindow ( hwnd ) ; <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> IMGUI_CHECKVERSION ( ) ; <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> <nl> + / / Setup Platform / Renderer bindings <nl> ImGui_ImplWin32_Init ( hwnd ) ; <nl> ImGui_ImplDX11_Init ( g_pd3dDevice , g_pd3dDeviceContext ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / example_win32_directx12 / main . cpp <nl> ppp b / examples / example_win32_directx12 / main . cpp <nl> int main ( int , char * * ) <nl> ShowWindow ( hwnd , SW_SHOWDEFAULT ) ; <nl> UpdateWindow ( hwnd ) ; <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> IMGUI_CHECKVERSION ( ) ; <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> <nl> + / / Setup Platform / Renderer bindings <nl> ImGui_ImplWin32_Init ( hwnd ) ; <nl> ImGui_ImplDX12_Init ( g_pd3dDevice , NUM_FRAMES_IN_FLIGHT , <nl> DXGI_FORMAT_R8G8B8A8_UNORM , <nl> g_pd3dSrvDescHeap - > GetCPUDescriptorHandleForHeapStart ( ) , <nl> g_pd3dSrvDescHeap - > GetGPUDescriptorHandleForHeapStart ( ) ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / example_win32_directx9 / main . cpp <nl> ppp b / examples / example_win32_directx9 / main . cpp <nl> int main ( int , char * * ) <nl> return 0 ; <nl> } <nl> <nl> - / / Setup Dear ImGui binding <nl> + / / Setup Dear ImGui context <nl> IMGUI_CHECKVERSION ( ) ; <nl> ImGui : : CreateContext ( ) ; <nl> ImGuiIO & io = ImGui : : GetIO ( ) ; ( void ) io ; <nl> / / io . ConfigFlags | = ImGuiConfigFlags_NavEnableKeyboard ; / / Enable Keyboard Controls <nl> + <nl> + / / Setup Platform / Renderer bindings <nl> ImGui_ImplWin32_Init ( hwnd ) ; <nl> ImGui_ImplDX9_Init ( g_pd3dDevice ) ; <nl> <nl> - / / Setup style <nl> + / / Setup Style <nl> ImGui : : StyleColorsDark ( ) ; <nl> / / ImGui : : StyleColorsClassic ( ) ; <nl> <nl> mmm a / examples / imgui_impl_glfw . cpp <nl> ppp b / examples / imgui_impl_glfw . cpp <nl> void ImGui_ImplGlfw_CharCallback ( GLFWwindow * , unsigned int c ) <nl> io . AddInputCharacter ( ( unsigned short ) c ) ; <nl> } <nl> <nl> - void ImGui_ImplGlfw_InstallCallbacks ( GLFWwindow * window ) <nl> - { <nl> - glfwSetMouseButtonCallback ( window , ImGui_ImplGlfw_MouseButtonCallback ) ; <nl> - glfwSetScrollCallback ( window , ImGui_ImplGlfw_ScrollCallback ) ; <nl> - glfwSetKeyCallback ( window , ImGui_ImplGlfw_KeyCallback ) ; <nl> - glfwSetCharCallback ( window , ImGui_ImplGlfw_CharCallback ) ; <nl> - } <nl> - <nl> static bool ImGui_ImplGlfw_Init ( GLFWwindow * window , bool install_callbacks , GlfwClientApi client_api ) <nl> { <nl> g_Window = window ; <nl> static bool ImGui_ImplGlfw_Init ( GLFWwindow * window , bool install_callbacks , Glfw <nl> g_MouseCursors [ ImGuiMouseCursor_Hand ] = glfwCreateStandardCursor ( GLFW_HAND_CURSOR ) ; <nl> <nl> if ( install_callbacks ) <nl> - ImGui_ImplGlfw_InstallCallbacks ( window ) ; <nl> + { <nl> + glfwSetMouseButtonCallback ( window , ImGui_ImplGlfw_MouseButtonCallback ) ; <nl> + glfwSetScrollCallback ( window , ImGui_ImplGlfw_ScrollCallback ) ; <nl> + glfwSetKeyCallback ( window , ImGui_ImplGlfw_KeyCallback ) ; <nl> + glfwSetCharCallback ( window , ImGui_ImplGlfw_CharCallback ) ; <nl> + } <nl> <nl> g_ClientApi = client_api ; <nl> return true ; <nl> mmm a / examples / imgui_impl_glfw . h <nl> ppp b / examples / imgui_impl_glfw . h <nl> IMGUI_IMPL_API bool ImGui_ImplGlfw_InitForVulkan ( GLFWwindow * window , bool in <nl> IMGUI_IMPL_API void ImGui_ImplGlfw_Shutdown ( ) ; <nl> IMGUI_IMPL_API void ImGui_ImplGlfw_NewFrame ( ) ; <nl> <nl> - / / GLFW callbacks ( installed by default if you enable ' install_callbacks ' during initialization ) <nl> - / / Provided here if you want to chain callbacks . <nl> - / / You can also handle inputs yourself and use those as a reference . <nl> + / / GLFW callbacks are installed by default if you call the InitXXX function with ' install_callbacks = true ' . <nl> + / / If you already have GLFW callbacks installed by your application , call the InitXXX function with install_callbacks = false , <nl> + / / then call the functions yourselves from your own GLFW callbacks . <nl> + / / You may also handle inputs yourself and use those as a reference . <nl> IMGUI_IMPL_API void ImGui_ImplGlfw_MouseButtonCallback ( GLFWwindow * window , int button , int action , int mods ) ; <nl> IMGUI_IMPL_API void ImGui_ImplGlfw_ScrollCallback ( GLFWwindow * window , double xoffset , double yoffset ) ; <nl> IMGUI_IMPL_API void ImGui_ImplGlfw_KeyCallback ( GLFWwindow * window , int key , int scancode , int action , int mods ) ; <nl>
Examples : Misc comments mainly related to GLFW callbacks . ( )
ocornut/imgui
9d155c73bcd4c99e0bd3e7c6c135e31c790d9759
2018-11-08T15:06:22Z
mmm a / swoole_client . c <nl> ppp b / swoole_client . c <nl> static void client_onClose ( swClient * cli ) <nl> php_swoole_client_free ( zobject , cli TSRMLS_CC ) ; <nl> } <nl> client_execute_callback ( zobject , SW_CLIENT_CB_onClose ) ; <nl> + sw_zval_ptr_dtor ( & zobject ) ; <nl> } <nl> <nl> static void client_onError ( swClient * cli ) <nl> static void client_onError ( swClient * cli ) <nl> # endif <nl> zval * zobject = cli - > object ; <nl> zend_update_property_long ( swoole_client_class_entry_ptr , zobject , ZEND_STRL ( " errCode " ) , SwooleG . error TSRMLS_CC ) ; <nl> - php_swoole_client_free ( zobject , cli TSRMLS_CC ) ; <nl> + if ( ! cli - > released ) <nl> + { <nl> + php_swoole_client_free ( zobject , cli TSRMLS_CC ) ; <nl> + } <nl> client_execute_callback ( zobject , SW_CLIENT_CB_onError ) ; <nl> + sw_zval_ptr_dtor ( & zobject ) ; <nl> } <nl> <nl> static void client_check_setting ( swClient * cli , zval * zset TSRMLS_DC ) <nl> void php_swoole_check_reactor ( ) <nl> <nl> void php_swoole_client_free ( zval * zobject , swClient * cli TSRMLS_DC ) <nl> { <nl> - if ( cli - > async ) <nl> - { <nl> - sw_zval_ptr_dtor ( & zobject ) ; <nl> - } <nl> / / long tcp connection , delete from php_sw_long_connections <nl> if ( cli - > keep ) <nl> { <nl> mmm a / swoole_http_client . c <nl> ppp b / swoole_http_client . c <nl> static void http_client_onClose ( swClient * cli ) <nl> http_client_free ( zobject TSRMLS_CC ) ; <nl> } <nl> http_client_execute_callback ( zobject , SW_CLIENT_CB_onClose ) ; <nl> + sw_zval_ptr_dtor ( & zobject ) ; <nl> } <nl> <nl> / * * <nl> static void http_client_onError ( swClient * cli ) <nl> # endif <nl> zval * zobject = cli - > object ; <nl> zend_update_property_long ( swoole_http_client_class_entry_ptr , zobject , ZEND_STRL ( " errCode " ) , SwooleG . error TSRMLS_CC ) ; <nl> - http_client_free ( zobject TSRMLS_CC ) ; <nl> + if ( ! cli - > released ) <nl> + { <nl> + http_client_free ( zobject TSRMLS_CC ) ; <nl> + } <nl> http_client_execute_callback ( zobject , SW_CLIENT_CB_onError ) ; <nl> + sw_zval_ptr_dtor ( & zobject ) ; <nl> } <nl> <nl> static void http_client_onReceive ( swClient * cli , char * data , uint32_t length ) <nl>
fix client crash when execute onError .
swoole/swoole-src
ec1d313498a8e13d601022240278ffcf07af6e2f
2016-06-03T02:45:21Z
mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / include / catch2 / catch . hpp <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / include / catch2 / catch . hpp <nl> <nl> / * <nl> - * Catch v2 . 7 . 0 <nl> - * Generated : 2019 - 03 - 07 21 : 34 : 30 . 252164 <nl> + * Catch v2 . 7 . 2 <nl> + * Generated : 2019 - 04 - 22 23 : 13 : 14 . 687465 <nl> * mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> * This file has been merged from multiple headers . Please don ' t edit it directly <nl> * Copyright ( c ) 2019 Two Blue Cubes Ltd . All rights reserved . <nl> <nl> <nl> # define CATCH_VERSION_MAJOR 2 <nl> # define CATCH_VERSION_MINOR 7 <nl> - # define CATCH_VERSION_PATCH 0 <nl> + # define CATCH_VERSION_PATCH 2 <nl> <nl> # ifdef __clang__ <nl> # pragma clang system_header <nl> class PredicateMatcher : public MatcherBase < T > { <nl> <nl> / / The following functions create the actual matcher objects . <nl> / / The user has to explicitly specify type to the function , because <nl> - / / infering std : : function < bool ( T const & ) > is hard ( but possible ) and <nl> + / / inferring std : : function < bool ( T const & ) > is hard ( but possible ) and <nl> / / requires a lot of TMP . <nl> template < typename T > <nl> Generic : : PredicateMatcher < T > Predicate ( std : : function < bool ( T const & ) > const & predicate , std : : string const & description = " " ) { <nl> namespace Catch { <nl> namespace Matchers { <nl> <nl> namespace Vector { <nl> - namespace Detail { <nl> - template < typename InputIterator , typename T > <nl> - size_t count ( InputIterator first , InputIterator last , T const & item ) { <nl> - size_t cnt = 0 ; <nl> - for ( ; first ! = last ; + + first ) { <nl> - if ( * first = = item ) { <nl> - + + cnt ; <nl> - } <nl> - } <nl> - return cnt ; <nl> - } <nl> - template < typename InputIterator , typename T > <nl> - bool contains ( InputIterator first , InputIterator last , T const & item ) { <nl> - for ( ; first ! = last ; + + first ) { <nl> - if ( * first = = item ) { <nl> - return true ; <nl> - } <nl> - } <nl> - return false ; <nl> - } <nl> - } <nl> - <nl> template < typename T > <nl> struct ContainsElementMatcher : MatcherBase < std : : vector < T > > { <nl> <nl> namespace Matchers { <nl> std : : vector < T > const & m_comparator ; <nl> } ; <nl> <nl> + template < typename T > <nl> + struct ApproxMatcher : MatcherBase < std : : vector < T > > { <nl> + <nl> + ApproxMatcher ( std : : vector < T > const & comparator ) : m_comparator ( comparator ) { } <nl> + <nl> + bool match ( std : : vector < T > const & v ) const override { <nl> + if ( m_comparator . size ( ) ! = v . size ( ) ) <nl> + return false ; <nl> + for ( std : : size_t i = 0 ; i < v . size ( ) ; + + i ) <nl> + if ( m_comparator [ i ] ! = approx ( v [ i ] ) ) <nl> + return false ; <nl> + return true ; <nl> + } <nl> + std : : string describe ( ) const override { <nl> + return " is approx : " + : : Catch : : Detail : : stringify ( m_comparator ) ; <nl> + } <nl> + template < typename = typename std : : enable_if < std : : is_constructible < double , T > : : value > : : type > <nl> + ApproxMatcher & epsilon ( T const & newEpsilon ) { <nl> + approx . epsilon ( newEpsilon ) ; <nl> + return * this ; <nl> + } <nl> + template < typename = typename std : : enable_if < std : : is_constructible < double , T > : : value > : : type > <nl> + ApproxMatcher & margin ( T const & newMargin ) { <nl> + approx . margin ( newMargin ) ; <nl> + return * this ; <nl> + } <nl> + template < typename = typename std : : enable_if < std : : is_constructible < double , T > : : value > : : type > <nl> + ApproxMatcher & scale ( T const & newScale ) { <nl> + approx . scale ( newScale ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + std : : vector < T > const & m_comparator ; <nl> + mutable Catch : : Detail : : Approx approx = Catch : : Detail : : Approx : : custom ( ) ; <nl> + } ; <nl> + <nl> template < typename T > <nl> struct UnorderedEqualsMatcher : MatcherBase < std : : vector < T > > { <nl> UnorderedEqualsMatcher ( std : : vector < T > const & target ) : m_target ( target ) { } <nl> namespace Matchers { <nl> if ( m_target . size ( ) ! = vec . size ( ) ) { <nl> return false ; <nl> } <nl> - auto lfirst = m_target . begin ( ) , llast = m_target . end ( ) ; <nl> - auto rfirst = vec . begin ( ) , rlast = vec . end ( ) ; <nl> - / / Cut common prefix to optimize checking of permuted parts <nl> - while ( lfirst ! = llast & & * lfirst = = * rfirst ) { <nl> - + + lfirst ; + + rfirst ; <nl> - } <nl> - if ( lfirst = = llast ) { <nl> - return true ; <nl> - } <nl> - <nl> - for ( auto mid = lfirst ; mid ! = llast ; + + mid ) { <nl> - / / Skip already counted items <nl> - if ( Detail : : contains ( lfirst , mid , * mid ) ) { <nl> - continue ; <nl> - } <nl> - size_t num_vec = Detail : : count ( rfirst , rlast , * mid ) ; <nl> - if ( num_vec = = 0 | | Detail : : count ( lfirst , llast , * mid ) ! = num_vec ) { <nl> - return false ; <nl> - } <nl> - } <nl> - <nl> - return true ; <nl> + return std : : is_permutation ( m_target . begin ( ) , m_target . end ( ) , vec . begin ( ) ) ; <nl> } <nl> <nl> std : : string describe ( ) const override { <nl> namespace Matchers { <nl> return Vector : : EqualsMatcher < T > ( comparator ) ; <nl> } <nl> <nl> + template < typename T > <nl> + Vector : : ApproxMatcher < T > Approx ( std : : vector < T > const & comparator ) { <nl> + return Vector : : ApproxMatcher < T > ( comparator ) ; <nl> + } <nl> + <nl> template < typename T > <nl> Vector : : UnorderedEqualsMatcher < T > UnorderedEquals ( std : : vector < T > const & target ) { <nl> return Vector : : UnorderedEqualsMatcher < T > ( target ) ; <nl> namespace Generators { <nl> } / / namespace Catch <nl> <nl> # define GENERATE ( . . . ) \ <nl> - Catch : : Generators : : generate ( CATCH_INTERNAL_LINEINFO , [ ] { using namespace Catch : : Generators ; return makeGenerators ( __VA_ARGS__ ) ; } ) <nl> + Catch : : Generators : : generate ( CATCH_INTERNAL_LINEINFO , [ ] { using namespace Catch : : Generators ; return makeGenerators ( __VA_ARGS__ ) ; } ) <nl> + # define GENERATE_COPY ( . . . ) \ <nl> + Catch : : Generators : : generate ( CATCH_INTERNAL_LINEINFO , [ = ] { using namespace Catch : : Generators ; return makeGenerators ( __VA_ARGS__ ) ; } ) <nl> + # define GENERATE_REF ( . . . ) \ <nl> + Catch : : Generators : : generate ( CATCH_INTERNAL_LINEINFO , [ & ] { using namespace Catch : : Generators ; return makeGenerators ( __VA_ARGS__ ) ; } ) <nl> <nl> / / end catch_generators . hpp <nl> / / start catch_generators_generic . hpp <nl> namespace Generators { <nl> } <nl> } ; <nl> <nl> - template < typename T , typename U , typename Func > <nl> + # if defined ( __cpp_lib_is_invocable ) & & __cpp_lib_is_invocable > = 201703 <nl> + / / std : : result_of is deprecated in C + + 17 and removed in C + + 20 . Hence , it is <nl> + / / replaced with std : : invoke_result here . Also * _t format is preferred over <nl> + / / typename * : : type format . <nl> + template < typename Func , typename U > <nl> + using MapFunctionReturnType = std : : remove_reference_t < std : : remove_cv_t < std : : invoke_result_t < Func , U > > > ; <nl> + # else <nl> + template < typename Func , typename U > <nl> + using MapFunctionReturnType = typename std : : remove_reference < typename std : : remove_cv < typename std : : result_of < Func ( U ) > : : type > : : type > : : type ; <nl> + # endif <nl> + <nl> + template < typename Func , typename U , typename T = MapFunctionReturnType < Func , U > > <nl> GeneratorWrapper < T > map ( Func & & function , GeneratorWrapper < U > & & generator ) { <nl> return GeneratorWrapper < T > ( <nl> pf : : make_unique < MapGenerator < T , U , Func > > ( std : : forward < Func > ( function ) , std : : move ( generator ) ) <nl> ) ; <nl> } <nl> - template < typename T , typename Func > <nl> - GeneratorWrapper < T > map ( Func & & function , GeneratorWrapper < T > & & generator ) { <nl> + <nl> + template < typename T , typename U , typename Func > <nl> + GeneratorWrapper < T > map ( Func & & function , GeneratorWrapper < U > & & generator ) { <nl> return GeneratorWrapper < T > ( <nl> - pf : : make_unique < MapGenerator < T , T , Func > > ( std : : forward < Func > ( function ) , std : : move ( generator ) ) <nl> + pf : : make_unique < MapGenerator < T , U , Func > > ( std : : forward < Func > ( function ) , std : : move ( generator ) ) <nl> ) ; <nl> } <nl> <nl> namespace Catch { <nl> virtual ShowDurations : : OrNot showDurations ( ) const = 0 ; <nl> virtual TestSpec const & testSpec ( ) const = 0 ; <nl> virtual bool hasTestFilters ( ) const = 0 ; <nl> + virtual std : : vector < std : : string > const & getTestsOrTags ( ) const = 0 ; <nl> virtual RunTests : : InWhatOrder runOrder ( ) const = 0 ; <nl> virtual unsigned int rngSeed ( ) const = 0 ; <nl> virtual int benchmarkResolutionMultiple ( ) const = 0 ; <nl> namespace Catch { <nl> arcSafeRelease ( m_substr ) ; <nl> } <nl> <nl> - bool match ( NSString * arg ) const override { <nl> + bool match ( NSString * const & str ) const override { <nl> return false ; <nl> } <nl> <nl> namespace Catch { <nl> struct Equals : StringHolder { <nl> Equals ( NSString * substr ) : StringHolder ( substr ) { } <nl> <nl> - bool match ( NSString * str ) const override { <nl> + bool match ( NSString * const & str ) const override { <nl> return ( str ! = nil | | m_substr = = nil ) & & <nl> [ str isEqualToString : m_substr ] ; <nl> } <nl> namespace Catch { <nl> struct Contains : StringHolder { <nl> Contains ( NSString * substr ) : StringHolder ( substr ) { } <nl> <nl> - bool match ( NSString * str ) const { <nl> + bool match ( NSString * const & str ) const override { <nl> return ( str ! = nil | | m_substr = = nil ) & & <nl> [ str rangeOfString : m_substr ] . location ! = NSNotFound ; <nl> } <nl> namespace Catch { <nl> struct StartsWith : StringHolder { <nl> StartsWith ( NSString * substr ) : StringHolder ( substr ) { } <nl> <nl> - bool match ( NSString * str ) const override { <nl> + bool match ( NSString * const & str ) const override { <nl> return ( str ! = nil | | m_substr = = nil ) & & <nl> [ str rangeOfString : m_substr ] . location = = 0 ; <nl> } <nl> namespace Catch { <nl> struct EndsWith : StringHolder { <nl> EndsWith ( NSString * substr ) : StringHolder ( substr ) { } <nl> <nl> - bool match ( NSString * str ) const override { <nl> + bool match ( NSString * const & str ) const override { <nl> return ( str ! = nil | | m_substr = = nil ) & & <nl> [ str rangeOfString : m_substr ] . location = = [ str length ] - [ m_substr length ] ; <nl> } <nl> namespace Catch { <nl> public : <nl> NamePattern ( std : : string const & name ) ; <nl> virtual ~ NamePattern ( ) ; <nl> - virtual bool matches ( TestCaseInfo const & testCase ) const override ; <nl> + bool matches ( TestCaseInfo const & testCase ) const override ; <nl> private : <nl> WildcardPattern m_wildcardPattern ; <nl> } ; <nl> namespace Catch { <nl> public : <nl> TagPattern ( std : : string const & tag ) ; <nl> virtual ~ TagPattern ( ) ; <nl> - virtual bool matches ( TestCaseInfo const & testCase ) const override ; <nl> + bool matches ( TestCaseInfo const & testCase ) const override ; <nl> private : <nl> std : : string m_tag ; <nl> } ; <nl> namespace Catch { <nl> public : <nl> ExcludedPattern ( PatternPtr const & underlyingPattern ) ; <nl> virtual ~ ExcludedPattern ( ) ; <nl> - virtual bool matches ( TestCaseInfo const & testCase ) const override ; <nl> + bool matches ( TestCaseInfo const & testCase ) const override ; <nl> private : <nl> PatternPtr m_underlyingPattern ; <nl> } ; <nl> namespace Catch { <nl> std : : string getProcessName ( ) const ; <nl> std : : string const & getReporterName ( ) const ; <nl> <nl> - std : : vector < std : : string > const & getTestsOrTags ( ) const ; <nl> + std : : vector < std : : string > const & getTestsOrTags ( ) const override ; <nl> std : : vector < std : : string > const & getSectionsToRun ( ) const override ; <nl> <nl> - virtual TestSpec const & testSpec ( ) const override ; <nl> + TestSpec const & testSpec ( ) const override ; <nl> bool hasTestFilters ( ) const override ; <nl> <nl> bool showHelp ( ) const ; <nl> namespace Catch { <nl> / / Returns double formatted as % . 3f ( format expected on output ) <nl> std : : string getFormattedDuration ( double duration ) ; <nl> <nl> + std : : string serializeFilters ( std : : vector < std : : string > const & container ) ; <nl> + <nl> template < typename DerivedT > <nl> struct StreamingReporterBase : IStreamingReporter { <nl> <nl> namespace Catch { <nl> void testRunStarting ( TestRunInfo const & _testRunInfo ) override { <nl> currentTestRunInfo = _testRunInfo ; <nl> } <nl> + <nl> void testGroupStarting ( GroupInfo const & _groupInfo ) override { <nl> currentGroupInfo = _groupInfo ; <nl> } <nl> namespace Catch { <nl> <nl> class ReporterFactory : public IReporterFactory { <nl> <nl> - virtual IStreamingReporterPtr create ( ReporterConfig const & config ) const override { <nl> + IStreamingReporterPtr create ( ReporterConfig const & config ) const override { <nl> return std : : unique_ptr < T > ( new T ( config ) ) ; <nl> } <nl> <nl> - virtual std : : string getDescription ( ) const override { <nl> + std : : string getDescription ( ) const override { <nl> return T : : getDescription ( ) ; <nl> } <nl> } ; <nl> namespace Catch { <nl> <nl> class ListenerFactory : public IReporterFactory { <nl> <nl> - virtual IStreamingReporterPtr create ( ReporterConfig const & config ) const override { <nl> + IStreamingReporterPtr create ( ReporterConfig const & config ) const override { <nl> return std : : unique_ptr < T > ( new T ( config ) ) ; <nl> } <nl> - virtual std : : string getDescription ( ) const override { <nl> + std : : string getDescription ( ) const override { <nl> return std : : string ( ) ; <nl> } <nl> } ; <nl> namespace Catch { <nl> void testCaseEnded ( TestCaseStats const & _testCaseStats ) override ; <nl> void testGroupEnded ( TestGroupStats const & _testGroupStats ) override ; <nl> void testRunEnded ( TestRunStats const & _testRunStats ) override ; <nl> - <nl> + void testRunStarting ( TestRunInfo const & _testRunInfo ) override ; <nl> private : <nl> <nl> void lazyPrint ( ) ; <nl> namespace Catch { <nl> <nl> void printTotalsDivider ( Totals const & totals ) ; <nl> void printSummaryDivider ( ) ; <nl> + void printTestFilters ( ) ; <nl> <nl> private : <nl> bool m_headerPrinted = false ; <nl> namespace TestCaseTracking { <nl> <nl> public : <nl> <nl> - static TrackerContext & instance ( ) ; <nl> - <nl> ITracker & startRun ( ) ; <nl> void endRun ( ) ; <nl> <nl> namespace Detail { <nl> return marginComparison ( m_value , other , m_margin ) | | marginComparison ( m_value , other , m_epsilon * ( m_scale + std : : fabs ( m_value ) ) ) ; <nl> } <nl> <nl> - void Approx : : setMargin ( double margin ) { <nl> - CATCH_ENFORCE ( margin > = 0 , <nl> - " Invalid Approx : : margin : " < < margin < < ' . ' <nl> + void Approx : : setMargin ( double newMargin ) { <nl> + CATCH_ENFORCE ( newMargin > = 0 , <nl> + " Invalid Approx : : margin : " < < newMargin < < ' . ' <nl> < < " Approx : : Margin has to be non - negative . " ) ; <nl> - m_margin = margin ; <nl> + m_margin = newMargin ; <nl> } <nl> <nl> - void Approx : : setEpsilon ( double epsilon ) { <nl> - CATCH_ENFORCE ( epsilon > = 0 & & epsilon < = 1 . 0 , <nl> - " Invalid Approx : : epsilon : " < < epsilon < < ' . ' <nl> + void Approx : : setEpsilon ( double newEpsilon ) { <nl> + CATCH_ENFORCE ( newEpsilon > = 0 & & newEpsilon < = 1 . 0 , <nl> + " Invalid Approx : : epsilon : " < < newEpsilon < < ' . ' <nl> < < " Approx : : epsilon has to be in [ 0 , 1 ] " ) ; <nl> - m_epsilon = epsilon ; <nl> + m_epsilon = newEpsilon ; <nl> } <nl> <nl> } / / end namespace Detail <nl> class Column { <nl> m_suffix = false ; <nl> auto width = m_column . m_width - indent ( ) ; <nl> m_end = m_pos ; <nl> + if ( line ( ) [ m_pos ] = = ' \ n ' ) { <nl> + + + m_end ; <nl> + } <nl> while ( m_end < line ( ) . size ( ) & & line ( ) [ m_end ] ! = ' \ n ' ) <nl> + + m_end ; <nl> <nl> namespace Catch { <nl> m_stream ( openStream ( ) ) <nl> { <nl> TestSpecParser parser ( ITagAliasRegistry : : get ( ) ) ; <nl> - if ( data . testsOrTags . empty ( ) ) { <nl> - parser . parse ( " ~ [ . ] " ) ; / / All not hidden tests <nl> - } <nl> - else { <nl> + if ( ! data . testsOrTags . empty ( ) ) { <nl> m_hasTestFilters = true ; <nl> for ( auto const & testOrTags : data . testsOrTags ) <nl> parser . parse ( testOrTags ) ; <nl> namespace { <nl> originalBackgroundAttributes = csbiInfo . wAttributes & ~ ( FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE | FOREGROUND_INTENSITY ) ; <nl> } <nl> <nl> - virtual void use ( Colour : : Code _colourCode ) override { <nl> + void use ( Colour : : Code _colourCode ) override { <nl> switch ( _colourCode ) { <nl> case Colour : : None : return setTextAttribute ( originalForegroundAttributes ) ; <nl> case Colour : : White : return setTextAttribute ( FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE ) ; <nl> namespace { <nl> / / https : / / github . com / philsquared / Catch / pull / 131 <nl> class PosixColourImpl : public IColourImpl { <nl> public : <nl> - virtual void use ( Colour : : Code _colourCode ) override { <nl> + void use ( Colour : : Code _colourCode ) override { <nl> switch ( _colourCode ) { <nl> case Colour : : None : <nl> case Colour : : White : return setColour ( " [ 0m " ) ; <nl> namespace Catch { <nl> class Context : public IMutableContext , NonCopyable { <nl> <nl> public : / / IContext <nl> - virtual IResultCapture * getResultCapture ( ) override { <nl> + IResultCapture * getResultCapture ( ) override { <nl> return m_resultCapture ; <nl> } <nl> - virtual IRunner * getRunner ( ) override { <nl> + IRunner * getRunner ( ) override { <nl> return m_runner ; <nl> } <nl> <nl> - virtual IConfigPtr const & getConfig ( ) const override { <nl> + IConfigPtr const & getConfig ( ) const override { <nl> return m_config ; <nl> } <nl> <nl> - virtual ~ Context ( ) override ; <nl> + ~ Context ( ) override ; <nl> <nl> public : / / IMutableContext <nl> - virtual void setResultCapture ( IResultCapture * resultCapture ) override { <nl> + void setResultCapture ( IResultCapture * resultCapture ) override { <nl> m_resultCapture = resultCapture ; <nl> } <nl> - virtual void setRunner ( IRunner * runner ) override { <nl> + void setRunner ( IRunner * runner ) override { <nl> m_runner = runner ; <nl> } <nl> - virtual void setConfig ( IConfigPtr const & config ) override { <nl> + void setConfig ( IConfigPtr const & config ) override { <nl> m_config = config ; <nl> } <nl> <nl> namespace Catch { <nl> # include < stdbool . h > <nl> # include < sys / types . h > <nl> # include < unistd . h > <nl> - # include < sys / sysctl . h > <nl> # include < cstddef > <nl> # include < ostream > <nl> <nl> - namespace Catch { <nl> + # ifdef __apple_build_version__ <nl> + / / These headers will only compile with AppleClang ( XCode ) <nl> + / / For other compilers ( Clang , GCC , . . . ) we need to exclude them <nl> + # include < sys / sysctl . h > <nl> + # endif <nl> <nl> + namespace Catch { <nl> + # ifdef __apple_build_version__ <nl> / / The following function is taken directly from the following technical note : <nl> - / / http : / / developer . apple . com / library / mac / # qa / qa2004 / qa1361 . html <nl> + / / https : / / developer . apple . com / library / archive / qa / qa1361 / _index . html <nl> <nl> / / Returns true if the current process is being debugged ( either <nl> / / running under the debugger or has a debugger attached post facto ) . <nl> bool isDebuggerActive ( ) { <nl> - <nl> int mib [ 4 ] ; <nl> struct kinfo_proc info ; <nl> std : : size_t size ; <nl> namespace Catch { <nl> <nl> return ( ( info . kp_proc . p_flag & P_TRACED ) ! = 0 ) ; <nl> } <nl> + # else <nl> + bool isDebuggerActive ( ) { <nl> + / / We need to find another way to determine this for non - appleclang compilers on macOS <nl> + return false ; <nl> + } <nl> + # endif <nl> } / / namespace Catch <nl> <nl> # elif defined ( CATCH_PLATFORM_LINUX ) <nl> namespace Catch { <nl> public : <nl> ~ ExceptionTranslatorRegistry ( ) ; <nl> virtual void registerTranslator ( const IExceptionTranslator * translator ) ; <nl> - virtual std : : string translateActiveException ( ) const override ; <nl> + std : : string translateActiveException ( ) const override ; <nl> std : : string tryTranslators ( ) const ; <nl> <nl> private : <nl> namespace Catch { <nl> / / Windows can easily distinguish between SO and SigSegV , <nl> / / but SigInt , SigTerm , etc are handled differently . <nl> static SignalDefs signalDefs [ ] = { <nl> - { EXCEPTION_ILLEGAL_INSTRUCTION , " SIGILL - Illegal instruction signal " } , <nl> - { EXCEPTION_STACK_OVERFLOW , " SIGSEGV - Stack overflow " } , <nl> - { EXCEPTION_ACCESS_VIOLATION , " SIGSEGV - Segmentation violation signal " } , <nl> - { EXCEPTION_INT_DIVIDE_BY_ZERO , " Divide by zero error " } , <nl> + { static_cast < DWORD > ( EXCEPTION_ILLEGAL_INSTRUCTION ) , " SIGILL - Illegal instruction signal " } , <nl> + { static_cast < DWORD > ( EXCEPTION_STACK_OVERFLOW ) , " SIGSEGV - Stack overflow " } , <nl> + { static_cast < DWORD > ( EXCEPTION_ACCESS_VIOLATION ) , " SIGSEGV - Segmentation violation signal " } , <nl> + { static_cast < DWORD > ( EXCEPTION_INT_DIVIDE_BY_ZERO ) , " Divide by zero error " } , <nl> } ; <nl> <nl> LONG CALLBACK FatalConditionHandler : : handleVectoredException ( PEXCEPTION_POINTERS ExceptionInfo ) { <nl> namespace Catch { <nl> if ( strerror_s ( buffer , errno ) ) { <nl> CATCH_RUNTIME_ERROR ( " Could not translate errno to a string " ) ; <nl> } <nl> - CATCH_RUNTIME_ERROR ( " Coul dnot open the temp file : ' " < < m_buffer < < " ' because : " < < buffer ) ; <nl> + CATCH_RUNTIME_ERROR ( " Could not open the temp file : ' " < < m_buffer < < " ' because : " < < buffer ) ; <nl> } <nl> } <nl> # else <nl> namespace Catch { <nl> <nl> auto const & allTestCases = getAllTestCasesSorted ( * config ) ; <nl> for ( auto const & testCase : allTestCases ) { <nl> - if ( ! context . aborting ( ) & & matchTest ( testCase , testSpec , * config ) ) <nl> + bool matching = ( ! testSpec . hasFilters ( ) & & ! testCase . isHidden ( ) ) | | <nl> + ( testSpec . hasFilters ( ) & & matchTest ( testCase , testSpec , * config ) ) ; <nl> + <nl> + if ( ! context . aborting ( ) & & matching ) <nl> totals + = context . runTest ( testCase ) ; <nl> else <nl> context . reporter ( ) . skipTest ( testCase ) ; <nl> namespace Catch { <nl> void enforceNotReservedTag ( std : : string const & tag , SourceLineInfo const & _lineInfo ) { <nl> CATCH_ENFORCE ( ! isReservedTag ( tag ) , <nl> " Tag name : [ " < < tag < < " ] is not allowed . \ n " <nl> - < < " Tag names starting with non alpha - numeric characters are reserved \ n " <nl> + < < " Tag names starting with non alphanumeric characters are reserved \ n " <nl> < < _lineInfo ) ; <nl> } <nl> } <nl> namespace Catch { <nl> else if ( prop = = TestCaseInfo : : None ) <nl> enforceNotReservedTag ( tag , _lineInfo ) ; <nl> <nl> + / / Merged hide tags like ` [ . approvals ] ` should be added as <nl> + / / ` [ . ] [ approvals ] ` . The ` [ . ] ` is added at later point , so <nl> + / / we only strip the prefix <nl> + if ( startsWith ( tag , ' . ' ) & & tag . size ( ) > 1 ) { <nl> + tag . erase ( 0 , 1 ) ; <nl> + } <nl> tags . push_back ( tag ) ; <nl> tag . clear ( ) ; <nl> inTag = false ; <nl> namespace Catch { <nl> std : : vector < TestCase > filterTests ( std : : vector < TestCase > const & testCases , TestSpec const & testSpec , IConfig const & config ) { <nl> std : : vector < TestCase > filtered ; <nl> filtered . reserve ( testCases . size ( ) ) ; <nl> - for ( auto const & testCase : testCases ) <nl> - if ( matchTest ( testCase , testSpec , config ) ) <nl> - filtered . push_back ( testCase ) ; <nl> + for ( auto const & testCase : testCases ) { <nl> + if ( ( ! testSpec . hasFilters ( ) & & ! testCase . isHidden ( ) ) | | <nl> + ( testSpec . hasFilters ( ) & & matchTest ( testCase , testSpec , config ) ) ) { <nl> + filtered . push_back ( testCase ) ; <nl> + } <nl> + } <nl> return filtered ; <nl> } <nl> std : : vector < TestCase > const & getAllTestCasesSorted ( IConfig const & config ) { <nl> namespace TestCaseTracking { <nl> <nl> ITracker : : ~ ITracker ( ) = default ; <nl> <nl> - TrackerContext & TrackerContext : : instance ( ) { <nl> - static TrackerContext s_instance ; <nl> - return s_instance ; <nl> - } <nl> - <nl> ITracker & TrackerContext : : startRun ( ) { <nl> m_rootTracker = std : : make_shared < SectionTracker > ( NameAndLocation ( " { root } " , CATCH_INTERNAL_LINEINFO ) , * this , nullptr ) ; <nl> m_currentTracker = nullptr ; <nl> namespace Catch { <nl> } <nl> <nl> Version const & libraryVersion ( ) { <nl> - static Version version ( 2 , 7 , 0 , " " , 0 ) ; <nl> + static Version version ( 2 , 7 , 2 , " " , 0 ) ; <nl> return version ; <nl> } <nl> <nl> namespace Catch { <nl> return std : : string ( buffer ) ; <nl> } <nl> <nl> + std : : string serializeFilters ( std : : vector < std : : string > const & container ) { <nl> + ReusableStringStream oss ; <nl> + bool first = true ; <nl> + for ( auto & & filter : container ) <nl> + { <nl> + if ( ! first ) <nl> + oss < < ' ' ; <nl> + else <nl> + first = false ; <nl> + <nl> + oss < < filter ; <nl> + } <nl> + return oss . str ( ) ; <nl> + } <nl> + <nl> TestEventListenerBase : : TestEventListenerBase ( ReporterConfig const & _config ) <nl> : StreamingReporterBase ( _config ) { } <nl> <nl> void ConsoleReporter : : testRunEnded ( TestRunStats const & _testRunStats ) { <nl> stream < < std : : endl ; <nl> StreamingReporterBase : : testRunEnded ( _testRunStats ) ; <nl> } <nl> + void ConsoleReporter : : testRunStarting ( TestRunInfo const & _testInfo ) { <nl> + StreamingReporterBase : : testRunStarting ( _testInfo ) ; <nl> + printTestFilters ( ) ; <nl> + } <nl> <nl> void ConsoleReporter : : lazyPrint ( ) { <nl> <nl> void ConsoleReporter : : printSummaryDivider ( ) { <nl> stream < < getLineOfChars < ' - ' > ( ) < < ' \ n ' ; <nl> } <nl> <nl> + void ConsoleReporter : : printTestFilters ( ) { <nl> + if ( m_config - > testSpec ( ) . hasFilters ( ) ) <nl> + stream < < Colour ( Colour : : BrightYellow ) < < " Filters : " < < serializeFilters ( m_config - > getTestsOrTags ( ) ) < < ' \ n ' ; <nl> + } <nl> + <nl> CATCH_REGISTER_REPORTER ( " console " , ConsoleReporter ) <nl> <nl> } / / end namespace Catch <nl> namespace Catch { <nl> void JunitReporter : : testRunStarting ( TestRunInfo const & runInfo ) { <nl> CumulativeReporterBase : : testRunStarting ( runInfo ) ; <nl> xml . startElement ( " testsuites " ) ; <nl> - if ( m_config - > rngSeed ( ) ! = 0 ) { <nl> - xml . startElement ( " properties " ) ; <nl> - xml . scopedElement ( " property " ) <nl> - . writeAttribute ( " name " , " random - seed " ) <nl> - . writeAttribute ( " value " , m_config - > rngSeed ( ) ) ; <nl> - xml . endElement ( ) ; <nl> - } <nl> } <nl> <nl> void JunitReporter : : testGroupStarting ( GroupInfo const & groupInfo ) { <nl> namespace Catch { <nl> <nl> void JunitReporter : : writeGroup ( TestGroupNode const & groupNode , double suiteTime ) { <nl> XmlWriter : : ScopedElement e = xml . scopedElement ( " testsuite " ) ; <nl> + <nl> TestGroupStats const & stats = groupNode . value ; <nl> xml . writeAttribute ( " name " , stats . groupInfo . name ) ; <nl> xml . writeAttribute ( " errors " , unexpectedExceptions ) ; <nl> namespace Catch { <nl> xml . writeAttribute ( " time " , suiteTime ) ; <nl> xml . writeAttribute ( " timestamp " , getCurrentTimestamp ( ) ) ; <nl> <nl> + / / Write properties if there are any <nl> + if ( m_config - > hasTestFilters ( ) | | m_config - > rngSeed ( ) ! = 0 ) { <nl> + auto properties = xml . scopedElement ( " properties " ) ; <nl> + if ( m_config - > hasTestFilters ( ) ) { <nl> + xml . scopedElement ( " property " ) <nl> + . writeAttribute ( " name " , " filters " ) <nl> + . writeAttribute ( " value " , serializeFilters ( m_config - > getTestsOrTags ( ) ) ) ; <nl> + } <nl> + if ( m_config - > rngSeed ( ) ! = 0 ) { <nl> + xml . scopedElement ( " property " ) <nl> + . writeAttribute ( " name " , " random - seed " ) <nl> + . writeAttribute ( " value " , m_config - > rngSeed ( ) ) ; <nl> + } <nl> + } <nl> + <nl> / / Write test cases <nl> for ( auto const & child : groupNode . children ) <nl> writeTestCase ( * child ) ; <nl> namespace Catch { <nl> m_xml . startElement ( " Catch " ) ; <nl> if ( ! m_config - > name ( ) . empty ( ) ) <nl> m_xml . writeAttribute ( " name " , m_config - > name ( ) ) ; <nl> + if ( m_config - > testSpec ( ) . hasFilters ( ) ) <nl> + m_xml . writeAttribute ( " filters " , serializeFilters ( m_config - > getTestsOrTags ( ) ) ) ; <nl> if ( m_config - > rngSeed ( ) ! = 0 ) <nl> m_xml . scopedElement ( " Randomness " ) <nl> . writeAttribute ( " seed " , m_config - > rngSeed ( ) ) ; <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / lib / cmake / Catch2 / Catch2ConfigVersion . cmake <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / lib / cmake / Catch2 / Catch2ConfigVersion . cmake <nl> <nl> # The variable CVF_VERSION must be set before calling configure_file ( ) . <nl> <nl> <nl> - set ( PACKAGE_VERSION " 2 . 7 . 0 " ) <nl> + set ( PACKAGE_VERSION " 2 . 7 . 2 " ) <nl> <nl> if ( PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION ) <nl> set ( PACKAGE_VERSION_COMPATIBLE FALSE ) <nl> else ( ) <nl> <nl> - if ( " 2 . 7 . 0 " MATCHES " ^ ( [ 0 - 9 ] + ) \ \ . " ) <nl> + if ( " 2 . 7 . 2 " MATCHES " ^ ( [ 0 - 9 ] + ) \ \ . " ) <nl> set ( CVF_VERSION_MAJOR " $ { CMAKE_MATCH_1 } " ) <nl> else ( ) <nl> - set ( CVF_VERSION_MAJOR " 2 . 7 . 0 " ) <nl> + set ( CVF_VERSION_MAJOR " 2 . 7 . 2 " ) <nl> endif ( ) <nl> <nl> if ( PACKAGE_FIND_VERSION_MAJOR STREQUAL CVF_VERSION_MAJOR ) <nl> else ( ) <nl> endif ( ) <nl> <nl> <nl> + # if the installed project requested no architecture check , don ' t perform the check <nl> + if ( " FALSE " ) <nl> + return ( ) <nl> + endif ( ) <nl> + <nl> # if the installed or the using project don ' t have CMAKE_SIZEOF_VOID_P set , ignore it : <nl> if ( " $ { CMAKE_SIZEOF_VOID_P } " STREQUAL " " OR " " STREQUAL " " ) <nl> return ( ) <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / lib / cmake / Catch2 / CatchAddTests . cmake <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / lib / cmake / Catch2 / CatchAddTests . cmake <nl> function ( add_command NAME ) <nl> set ( script " $ { script } $ { NAME } ( $ { _args } ) \ n " PARENT_SCOPE ) <nl> endfunction ( ) <nl> <nl> + macro ( _add_catch_test_labels LINE ) <nl> + # convert to list of tags <nl> + string ( REPLACE " ] [ " " ] \ \ ; [ " tags $ { line } ) <nl> + <nl> + add_command ( <nl> + set_tests_properties " $ { prefix } $ { test } $ { suffix } " <nl> + PROPERTIES <nl> + LABELS " $ { tags } " <nl> + ) <nl> + endmacro ( ) <nl> + <nl> + macro ( _add_catch_test LINE ) <nl> + set ( test $ { line } ) <nl> + # use escape commas to handle properly test cases with commans inside the name <nl> + string ( REPLACE " , " " \ \ , " test_name $ { test } ) <nl> + # . . . and add to script <nl> + add_command ( <nl> + add_test " $ { prefix } $ { test } $ { suffix } " <nl> + $ { TEST_EXECUTOR } <nl> + " $ { TEST_EXECUTABLE } " <nl> + " $ { test_name } " <nl> + $ { extra_args } <nl> + ) <nl> + <nl> + add_command ( <nl> + set_tests_properties " $ { prefix } $ { test } $ { suffix } " <nl> + PROPERTIES <nl> + WORKING_DIRECTORY " $ { TEST_WORKING_DIR } " <nl> + $ { properties } <nl> + ) <nl> + list ( APPEND tests " $ { prefix } $ { test } $ { suffix } " ) <nl> + endmacro ( ) <nl> + <nl> # Run test executable to get list of available tests <nl> if ( NOT EXISTS " $ { TEST_EXECUTABLE } " ) <nl> message ( FATAL_ERROR <nl> if ( NOT EXISTS " $ { TEST_EXECUTABLE } " ) <nl> ) <nl> endif ( ) <nl> execute_process ( <nl> - COMMAND $ { TEST_EXECUTOR } " $ { TEST_EXECUTABLE } " $ { spec } - - list - test - names - only <nl> + COMMAND $ { TEST_EXECUTOR } " $ { TEST_EXECUTABLE } " $ { spec } - - list - tests <nl> OUTPUT_VARIABLE output <nl> RESULT_VARIABLE result <nl> ) <nl> elseif ( $ { result } LESS 0 ) <nl> endif ( ) <nl> <nl> string ( REPLACE " \ n " " ; " output " $ { output } " ) <nl> + set ( test ) <nl> + set ( tags_regex " ( \ \ [ ( [ ^ \ \ [ ] * ) \ \ ] ) + $ " ) <nl> <nl> # Parse output <nl> foreach ( line $ { output } ) <nl> - set ( test $ { line } ) <nl> - # use escape commas to handle properly test cases with commans inside the name <nl> - string ( REPLACE " , " " \ \ , " test_name $ { test } ) <nl> - # . . . and add to script <nl> - add_command ( add_test <nl> - " $ { prefix } $ { test } $ { suffix } " <nl> - $ { TEST_EXECUTOR } <nl> - " $ { TEST_EXECUTABLE } " <nl> - " $ { test_name } " <nl> - $ { extra_args } <nl> - ) <nl> - add_command ( set_tests_properties <nl> - " $ { prefix } $ { test } $ { suffix } " <nl> - PROPERTIES <nl> - WORKING_DIRECTORY " $ { TEST_WORKING_DIR } " <nl> - $ { properties } <nl> - ) <nl> - list ( APPEND tests " $ { prefix } $ { test } $ { suffix } " ) <nl> + # lines without leading whitespaces are catch output not tests <nl> + if ( $ { line } MATCHES " ^ [ \ t ] + " ) <nl> + # strip leading spaces and tabs <nl> + string ( REGEX REPLACE " ^ [ \ t ] + " " " line $ { line } ) <nl> + <nl> + if ( $ { line } MATCHES " $ { tags_regex } " ) <nl> + _add_catch_test_labels ( $ { line } ) <nl> + else ( ) <nl> + _add_catch_test ( $ { line } ) <nl> + endif ( ) <nl> + endif ( ) <nl> endforeach ( ) <nl> <nl> # Create a list of all discovered tests , which users may use to e . g . set <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / lib / cmake / Catch2 / ParseAndAddCatchTests . cmake <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / lib / cmake / Catch2 / ParseAndAddCatchTests . cmake <nl> <nl> # set ( OptionalCatchTestLauncher $ { MPIEXEC } $ { MPIEXEC_NUMPROC_FLAG } $ { NUMPROC } ) # <nl> # just before calling this ParseAndAddCatchTests function # <nl> # # <nl> + # The AdditionalCatchParameters optional variable can be used to pass extra argument to the test # <nl> + # command . For example , to include successful tests in the output , one can write # <nl> + # set ( AdditionalCatchParameters - - success ) # <nl> + # # <nl> + # After the script , the ParseAndAddCatchTests_TESTS property for the target , and for each source # <nl> + # file in the target is set , and contains the list of the tests extracted from that target , or # <nl> + # from that file . This is useful , for example to add further labels or properties to the tests . # <nl> + # # <nl> # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # <nl> <nl> - cmake_minimum_required ( VERSION 2 . 8 . 8 ) <nl> + if ( CMAKE_MINIMUM_REQUIRED_VERSION VERSION_LESS 2 . 8 . 8 ) <nl> + message ( FATAL_ERROR " ParseAndAddCatchTests requires CMake 2 . 8 . 8 or newer " ) <nl> + endif ( ) <nl> <nl> option ( PARSE_CATCH_TESTS_VERBOSE " Print Catch to CTest parser debug messages " OFF ) <nl> option ( PARSE_CATCH_TESTS_NO_HIDDEN_TESTS " Exclude tests with [ ! hide ] , [ . ] or [ . foo ] tags " OFF ) <nl> option ( PARSE_CATCH_TESTS_ADD_FIXTURE_IN_TEST_NAME " Add fixture class name to the <nl> option ( PARSE_CATCH_TESTS_ADD_TARGET_IN_TEST_NAME " Add target name to the test name " ON ) <nl> option ( PARSE_CATCH_TESTS_ADD_TO_CONFIGURE_DEPENDS " Add test file to CMAKE_CONFIGURE_DEPENDS property " OFF ) <nl> <nl> - function ( PrintDebugMessage ) <nl> + function ( ParseAndAddCatchTests_PrintDebugMessage ) <nl> if ( PARSE_CATCH_TESTS_VERBOSE ) <nl> message ( STATUS " ParseAndAddCatchTests : $ { ARGV } " ) <nl> endif ( ) <nl> endfunction ( ) <nl> # - full line comments ( i . e . / / . . . ) <nl> # contents have been read into ' $ { CppCode } ' . <nl> # ! keep partial line comments <nl> - function ( RemoveComments CppCode ) <nl> + function ( ParseAndAddCatchTests_RemoveComments CppCode ) <nl> string ( ASCII 2 CMakeBeginBlockComment ) <nl> string ( ASCII 3 CMakeEndBlockComment ) <nl> string ( REGEX REPLACE " / \ \ * " " $ { CMakeBeginBlockComment } " $ { CppCode } " $ { $ { CppCode } } " ) <nl> function ( RemoveComments CppCode ) <nl> endfunction ( ) <nl> <nl> # Worker function <nl> - function ( ParseFile SourceFile TestTarget ) <nl> + function ( ParseAndAddCatchTests_ParseFile SourceFile TestTarget ) <nl> + # If SourceFile is an object library , do not scan it ( as it is not a file ) . Exit without giving a warning about a missing file . <nl> + if ( SourceFile MATCHES " \ \ \ $ < TARGET_OBJECTS : . + > " ) <nl> + ParseAndAddCatchTests_PrintDebugMessage ( " Detected OBJECT library : $ { SourceFile } this will not be scanned for tests . " ) <nl> + return ( ) <nl> + endif ( ) <nl> # According to CMake docs EXISTS behavior is well - defined only for full paths . <nl> get_filename_component ( SourceFile $ { SourceFile } ABSOLUTE ) <nl> if ( NOT EXISTS $ { SourceFile } ) <nl> message ( WARNING " Cannot find source file : $ { SourceFile } " ) <nl> return ( ) <nl> endif ( ) <nl> - PrintDebugMessage ( " parsing $ { SourceFile } " ) <nl> + ParseAndAddCatchTests_PrintDebugMessage ( " parsing $ { SourceFile } " ) <nl> file ( STRINGS $ { SourceFile } Contents NEWLINE_CONSUME ) <nl> <nl> # Remove block and fullline comments <nl> - RemoveComments ( Contents ) <nl> + ParseAndAddCatchTests_RemoveComments ( Contents ) <nl> <nl> # Find definition of test names <nl> string ( REGEX MATCHALL " [ \ t ] * ( CATCH_ ) ? ( TEST_CASE_METHOD | SCENARIO | TEST_CASE ) [ \ t ] * \ \ ( [ ^ \ ) ] + \ \ ) + [ \ t \ n ] * { + [ \ t ] * ( / / [ ^ \ n ] * [ Tt ] [ Ii ] [ Mm ] [ Ee ] [ Oo ] [ Uu ] [ Tt ] [ \ t ] * [ 0 - 9 ] + ) * " Tests " $ { Contents } " ) <nl> <nl> if ( PARSE_CATCH_TESTS_ADD_TO_CONFIGURE_DEPENDS AND Tests ) <nl> - PrintDebugMessage ( " Adding $ { SourceFile } to CMAKE_CONFIGURE_DEPENDS property " ) <nl> + ParseAndAddCatchTests_PrintDebugMessage ( " Adding $ { SourceFile } to CMAKE_CONFIGURE_DEPENDS property " ) <nl> set_property ( <nl> DIRECTORY <nl> APPEND <nl> function ( ParseFile SourceFile TestTarget ) <nl> <nl> list ( APPEND Labels $ { Tags } ) <nl> <nl> - list ( FIND Labels " ! hide " IndexOfHideLabel ) <nl> set ( HiddenTagFound OFF ) <nl> foreach ( label $ { Labels } ) <nl> string ( REGEX MATCH " ^ ! hide | ^ \ \ . " result $ { label } ) <nl> function ( ParseFile SourceFile TestTarget ) <nl> endif ( result ) <nl> endforeach ( label ) <nl> if ( PARSE_CATCH_TESTS_NO_HIDDEN_TESTS AND $ { HiddenTagFound } AND $ { CMAKE_VERSION } VERSION_LESS " 3 . 9 " ) <nl> - PrintDebugMessage ( " Skipping test \ " $ { CTestName } \ " as it has [ ! hide ] , [ . ] or [ . foo ] label " ) <nl> + ParseAndAddCatchTests_PrintDebugMessage ( " Skipping test \ " $ { CTestName } \ " as it has [ ! hide ] , [ . ] or [ . foo ] label " ) <nl> else ( ) <nl> - PrintDebugMessage ( " Adding test \ " $ { CTestName } \ " " ) <nl> + ParseAndAddCatchTests_PrintDebugMessage ( " Adding test \ " $ { CTestName } \ " " ) <nl> if ( Labels ) <nl> - PrintDebugMessage ( " Setting labels to $ { Labels } " ) <nl> + ParseAndAddCatchTests_PrintDebugMessage ( " Setting labels to $ { Labels } " ) <nl> endif ( ) <nl> <nl> # Escape commas in the test spec <nl> string ( REPLACE " , " " \ \ , " Name $ { Name } ) <nl> <nl> # Add the test and set its properties <nl> - add_test ( NAME " \ " $ { CTestName } \ " " COMMAND $ { OptionalCatchTestLauncher } $ { TestTarget } $ { Name } $ { AdditionalCatchParameters } ) <nl> + add_test ( NAME " \ " $ { CTestName } \ " " COMMAND $ { OptionalCatchTestLauncher } $ < TARGET_FILE : $ { TestTarget } > $ { Name } $ { AdditionalCatchParameters } ) <nl> # Old CMake versions do not document VERSION_GREATER_EQUAL , so we use VERSION_GREATER with 3 . 8 instead <nl> if ( PARSE_CATCH_TESTS_NO_HIDDEN_TESTS AND $ { HiddenTagFound } AND $ { CMAKE_VERSION } VERSION_GREATER " 3 . 8 " ) <nl> - PrintDebugMessage ( " Setting DISABLED test property " ) <nl> + ParseAndAddCatchTests_PrintDebugMessage ( " Setting DISABLED test property " ) <nl> set_tests_properties ( " \ " $ { CTestName } \ " " PROPERTIES DISABLED ON ) <nl> else ( ) <nl> set_tests_properties ( " \ " $ { CTestName } \ " " PROPERTIES FAIL_REGULAR_EXPRESSION " No tests ran " <nl> LABELS " $ { Labels } " ) <nl> endif ( ) <nl> + set_property ( <nl> + TARGET $ { TestTarget } <nl> + APPEND <nl> + PROPERTY ParseAndAddCatchTests_TESTS " \ " $ { CTestName } \ " " ) <nl> + set_property ( <nl> + SOURCE $ { SourceFile } <nl> + APPEND <nl> + PROPERTY ParseAndAddCatchTests_TESTS " \ " $ { CTestName } \ " " ) <nl> endif ( ) <nl> <nl> <nl> endfunction ( ) <nl> <nl> # entry point <nl> function ( ParseAndAddCatchTests TestTarget ) <nl> - PrintDebugMessage ( " Started parsing $ { TestTarget } " ) <nl> + ParseAndAddCatchTests_PrintDebugMessage ( " Started parsing $ { TestTarget } " ) <nl> get_target_property ( SourceFiles $ { TestTarget } SOURCES ) <nl> - PrintDebugMessage ( " Found the following sources : $ { SourceFiles } " ) <nl> + ParseAndAddCatchTests_PrintDebugMessage ( " Found the following sources : $ { SourceFiles } " ) <nl> foreach ( SourceFile $ { SourceFiles } ) <nl> - ParseFile ( $ { SourceFile } $ { TestTarget } ) <nl> + ParseAndAddCatchTests_ParseFile ( $ { SourceFile } $ { TestTarget } ) <nl> endforeach ( ) <nl> - PrintDebugMessage ( " Finished parsing $ { TestTarget } " ) <nl> + ParseAndAddCatchTests_PrintDebugMessage ( " Finished parsing $ { TestTarget } " ) <nl> endfunction ( ) <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / assertions . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / assertions . md <nl> <nl> <nl> Most test frameworks have a large collection of assertion macros to capture all possible conditional forms ( ` ` ` _EQUALS ` ` ` , ` ` ` _NOTEQUALS ` ` ` , ` ` ` _GREATER_THAN ` ` ` etc ) . <nl> <nl> - Catch is different . Because it decomposes natural C - style conditional expressions most of these forms are reduced to one or two that you will use all the time . That said there are a rich set of auxiliary macros as well . We ' ll describe all of these here . <nl> + Catch is different . Because it decomposes natural C - style conditional expressions most of these forms are reduced to one or two that you will use all the time . That said there is a rich set of auxiliary macros as well . We ' ll describe all of these here . <nl> <nl> Most of these macros come in two forms : <nl> <nl> Catch provides a way to perform tolerant comparisons of floating point values th <nl> REQUIRE ( performComputation ( ) = = Approx ( 2 . 1 ) ) ; <nl> ` ` ` <nl> <nl> - Catch also provides a UDL for ` Approx ` ; ` _a ` . It resides in <nl> + Catch also provides a user - defined literal for ` Approx ` ; ` _a ` . It resides in <nl> the ` Catch : : literals ` namespace and can be used like so : <nl> ` ` ` cpp <nl> using namespace Catch : : literals ; <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / cmake - integration . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / cmake - integration . md <nl> If Catch2 has been installed in system , both of these can be used after <nl> doing ` find_package ( Catch2 REQUIRED ) ` . Otherwise you need to add them <nl> to your CMake module path . <nl> <nl> - # # # ` Catch . cmake ` and ` AddCatchTests . cmake ` <nl> + # # # ` Catch . cmake ` and ` CatchAddTests . cmake ` <nl> <nl> ` Catch . cmake ` provides function ` catch_discover_tests ` to get tests from <nl> a target . This function works by running the resulting executable with <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / command - line . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / command - line . md <nl> <nl> [ Override output colouring ] ( # override - output - colouring ) < br > <nl> <nl> Catch works quite nicely without any command line options at all - but for those times when you want greater control the following options are available . <nl> - Click one of the followings links to take you straight to that option - or scroll on to browse the available options . <nl> + Click one of the following links to take you straight to that option - or scroll on to browse the available options . <nl> <nl> < a href = " # specifying - which - tests - to - run " > ` < test - spec > . . . ` < / a > < br / > <nl> < a href = " # usage " > ` - h , - ? , - - help ` < / a > < br / > <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / configuration . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / configuration . md <nl> them yourself , their signatures are : <nl> By default , when Catch ' s stringification machinery has to stringify <nl> a type that does not specialize ` StringMaker ` , does not overload ` operator < < ` , <nl> is not an enumeration and is not a range , it uses ` " { ? } " ` . This can be <nl> - overriden by defining ` CATCH_CONFIG_FALLBACK_STRINGIFIER ` to name of a <nl> + overridden by defining ` CATCH_CONFIG_FALLBACK_STRINGIFIER ` to name of a <nl> function that should perform the stringification instead . <nl> <nl> All types that do not provide ` StringMaker ` specialization or ` operator < < ` <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / contributing . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / contributing . md <nl> before you do so , you need to check that the introduced changes are indeed <nl> intentional . <nl> <nl> <nl> + # # Code constructs to watch out for <nl> <nl> - * this document is still in - progress . . . * <nl> + This section is a ( sadly incomplete ) listing of various constructs that <nl> + are problematic and are not always caught by our CI infrastructure . <nl> + <nl> + # # # Naked exceptions and exceptions - related function <nl> + <nl> + If you are throwing an exception , it should be done via ` CATCH_ERROR ` <nl> + or ` CATCH_RUNTIME_ERROR ` in ` catch_enforce . h ` . These macros will handle <nl> + the differences between compilation with or without exceptions for you . <nl> + However , some platforms ( IAR ) also have problems with exceptions - related <nl> + functions , such as ` std : : current_exceptions ` . We do not have IAR in our <nl> + CI , but luckily there should not be too many reasons to use these . <nl> + However , if you do , they should be kept behind a <nl> + ` CATCH_CONFIG_DISABLE_EXCEPTIONS ` macro . <nl> + <nl> + # # # Unqualified usage of functions from C ' s stdlib <nl> + <nl> + If you are using a function from C ' s stdlib , please include the header <nl> + as ` < cfoo > ` and call the function qualified . The common knowledge that <nl> + there is no difference is wrong , QNX and VxWorks won ' t compile if you <nl> + include the header as ` < cfoo > ` and call the function unqualified . <nl> + <nl> + <nl> + mmm - <nl> + <nl> + _This documentation will always be in - progress as new information comes <nl> + up , but we are trying to keep it as up to date as possible . _ <nl> <nl> mmm <nl> <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / event - listeners . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / event - listeners . md <nl> struct MyListener : Catch : : TestEventListenerBase { <nl> <nl> using TestEventListenerBase : : TestEventListenerBase ; / / inherit constructor <nl> <nl> - virtual void testCaseStarting ( Catch : : TestCaseInfo const & testInfo ) override { <nl> + void testCaseStarting ( Catch : : TestCaseInfo const & testInfo ) override { <nl> / / Perform some setup before a test case is run <nl> } <nl> <nl> - virtual void testCaseEnded ( Catch : : TestCaseStats const & testCaseStats ) override { <nl> + void testCaseEnded ( Catch : : TestCaseStats const & testCaseStats ) override { <nl> / / Tear - down after a test case is run <nl> - } <nl> + } <nl> } ; <nl> CATCH_REGISTER_LISTENER ( MyListener ) <nl> ` ` ` <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / generators . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / generators . md <nl> type , making their usage much nicer . These are <nl> * ` filter ( predicate , GeneratorWrapper < T > & & ) ` for ` FilterGenerator < T , Predicate > ` <nl> * ` take ( count , GeneratorWrapper < T > & & ) ` for ` TakeGenerator < T > ` <nl> * ` repeat ( repeats , GeneratorWrapper < T > & & ) ` for ` RepeatGenerator < T > ` <nl> - * ` map ( func , GeneratorWrapper < T > & & ) ` for ` MapGenerator < T , T , Func > ` ( map ` T ` to ` T ` ) <nl> + * ` map ( func , GeneratorWrapper < T > & & ) ` for ` MapGenerator < T , U , Func > ` ( map ` U ` to ` T ` , deduced from ` Func ` ) <nl> * ` map < T > ( func , GeneratorWrapper < U > & & ) ` for ` MapGenerator < T , U , Func > ` ( map ` U ` to ` T ` ) <nl> * ` chunk ( chunk - size , GeneratorWrapper < T > & & ) ` for ` ChunkGenerator < T > ` <nl> * ` random ( IntegerOrFloat a , IntegerOrFloat b ) ` for ` RandomIntegerGenerator ` or ` RandomFloatGenerator ` <nl> - * ` range ( start , end ) ` for ` RangeGenerator < T > ` with a step size of ` 1 ` <nl> + * ` range ( start , end ) ` for ` RangeGenerator < T > ` with a step size of ` 1 ` <nl> * ` range ( start , end , step ) ` for ` RangeGenerator < T > ` with a custom step size <nl> <nl> <nl> used with other generators as arguments , such as ` auto i = GENERATE ( 0 , 2 , <nl> take ( 100 , random ( 300 , 3000 ) ) ) ; ` . This is useful e . g . if you know that <nl> specific inputs are problematic and want to test them separately / first . <nl> <nl> - * * For safety reasons , you cannot use variables inside the ` GENERATE ` macro . * * <nl> + * * For safety reasons , you cannot use variables inside the ` GENERATE ` macro . <nl> + This is done because the generator expression _will_ outlive the outside <nl> + scope and thus capturing references is dangerous . If you need to use <nl> + variables inside the generator expression , make sure you thought through <nl> + the lifetime implications and use ` GENERATE_COPY ` or ` GENERATE_REF ` . * * <nl> <nl> You can also override the inferred type by using ` as < type > ` as the first <nl> argument to the macro . This can be useful when dealing with string literals , <nl> if you want them to come out as ` std : : string ` : <nl> <nl> ` ` ` cpp <nl> TEST_CASE ( " type conversion " , " [ generators ] " ) { <nl> - auto str = GENERATE ( as < std : : string > { } , " a " , " bb " , " ccc " ) ; ` <nl> + auto str = GENERATE ( as < std : : string > { } , " a " , " bb " , " ccc " ) ; <nl> REQUIRE ( str . size ( ) > 0 ) ; <nl> } <nl> ` ` ` <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / list - of - examples . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / list - of - examples . md <nl> <nl> - Generators : [ Create your own generator ] ( . . / examples / 300 - Gen - OwnGenerator . cpp ) <nl> - Generators : [ Use map to convert types in GENERATE expression ] ( . . / examples / 301 - Gen - MapTypeConversion . cpp ) <nl> - Generators : [ Use variables in generator expressions ] ( . . / examples / 310 - Gen - VariablesInGenerators . cpp ) <nl> + - Generators : [ Use custom variable capture in generator expressions ] ( . . / examples / 311 - Gen - CustomCapture . cpp ) <nl> <nl> <nl> # # Planned <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / logging . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / logging . md <nl> Second unscoped info <nl> <nl> # # Streaming macros <nl> <nl> - All these macros allow heterogenous sequences of values to be streaming using the insertion operator ( ` ` ` < < ` ` ` ) in the same way that std : : ostream , std : : cout , etc support it . <nl> + All these macros allow heterogeneous sequences of values to be streaming using the insertion operator ( ` ` ` < < ` ` ` ) in the same way that std : : ostream , std : : cout , etc support it . <nl> <nl> E . g . : <nl> ` ` ` c + + <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / matchers . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / matchers . md <nl> public : <nl> IntRange ( int begin , int end ) : m_begin ( begin ) , m_end ( end ) { } <nl> <nl> / / Performs the test for this matcher <nl> - virtual bool match ( int const & i ) const override { <nl> + bool match ( int const & i ) const override { <nl> return i > = m_begin & & i < = m_end ; <nl> } <nl> <nl> public : <nl> / / include any provided data ( the begin / end in this case ) and <nl> / / be written as if it were stating a fact ( in the output it will be <nl> / / preceded by the value under test ) . <nl> - virtual std : : string describe ( ) const { <nl> + virtual std : : string describe ( ) const override { <nl> std : : ostringstream ss ; <nl> ss < < " is between " < < m_begin < < " and " < < m_end ; <nl> return ss . str ( ) ; <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / opensource - users . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / opensource - users . md <nl> Listing a project here does not imply endorsement and the plan is to keep these <nl> <nl> # # Libraries & Frameworks <nl> <nl> + # # # [ ApprovalTests . cpp ] ( https : / / github . com / approvals / ApprovalTests . cpp ) <nl> + C + + 11 implementation of Approval Tests , for quick , convenient testing of legacy code . <nl> + <nl> # # # [ Azmq ] ( https : / / github . com / zeromq / azmq ) <nl> Boost Asio style bindings for ZeroMQ . <nl> <nl> MAME originally stood for Multiple Arcade Machine Emulator . <nl> # # # [ Newsbeuter ] ( https : / / github . com / akrennmair / newsbeuter ) <nl> Newsbeuter is an open - source RSS / Atom feed reader for text terminals . <nl> <nl> + # # # [ raspigcd ] ( https : / / github . com / pantadeusz / raspigcd ) <nl> + Low level CLI app and library for execution of GCODE on Raspberry Pi without any additional microcontrolers ( just RPi + Stepsticks ) . <nl> + <nl> # # # [ SpECTRE ] ( https : / / github . com / sxs - collaboration / spectre ) <nl> SpECTRE is a code for multi - scale , multi - physics problems in astrophysics and gravitational physics . <nl> <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / other - macros . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / other - macros . md <nl> CHECKED_IF ( a = = b ) { <nl> <nl> ` CHECK_NOFAIL ( expr ) ` is a variant of ` CHECK ` that does not fail the test <nl> case if _expr_ evaluates to ` false ` . This can be useful for checking some <nl> - assumption , that might be violated without the test neccessarily failing . <nl> + assumption , that might be violated without the test necessarily failing . <nl> <nl> Example output : <nl> ` ` ` <nl> constructor , or before Catch2 ' s session is created in user ' s own main . _ <nl> ` ANON_TEST_CASE ` is a ` TEST_CASE ` replacement that will autogenerate <nl> unique name . The advantage of this is that you do not have to think <nl> of a name for the test case , ` the disadvantage is that the name doesn ' t <nl> - neccessarily remain stable across different links , and thus it might be <nl> + necessarily remain stable across different links , and thus it might be <nl> hard to run directly . <nl> <nl> Example : <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / release - notes . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / release - notes . md <nl> <nl> <nl> # Release notes <nl> * * Contents * * < br > <nl> + [ 2 . 7 . 2 ] ( # 272 ) < br > <nl> + [ 2 . 7 . 1 ] ( # 271 ) < br > <nl> [ 2 . 7 . 0 ] ( # 270 ) < br > <nl> [ 2 . 6 . 1 ] ( # 261 ) < br > <nl> [ 2 . 6 . 0 ] ( # 260 ) < br > <nl> <nl> [ Older versions ] ( # older - versions ) < br > <nl> [ Even Older versions ] ( # even - older - versions ) < br > <nl> <nl> + <nl> + # # 2 . 7 . 2 <nl> + <nl> + # # # Improvements <nl> + * Added an approximate vector matcher ( # 1499 ) <nl> + <nl> + # # # Fixes <nl> + * Filters will no longer be shown if there were none <nl> + * Fixed compilation error when using Homebrew GCC on OS X ( # 1588 , # 1589 ) <nl> + * Fixed the console reporter not showing messages that start with a newline ( # 1455 , # 1470 ) <nl> + * Modified JUnit reporter ' s output so that rng seed and filters are reported according to the JUnit schema ( # 1598 ) <nl> + * Fixed some obscure warnings and static analysis passes <nl> + <nl> + # # # Miscellaneous <nl> + * Various improvements to ` ParseAndAddCatchTests ` ( # 1559 , # 1601 ) <nl> + * When a target is parsed , it receives ` ParseAndAddCatchTests_TESTS ` property which summarizes found tests <nl> + * Fixed problem with tests not being found if the ` OptionalCatchTestLauncher ` variables is used <nl> + * Including the script will no longer forcefully modify ` CMAKE_MINIMUM_REQUIRED_VERSION ` <nl> + * CMake object libraries are ignored when parsing to avoid needless warnings <nl> + * ` CatchAddTests ` now adds test ' s tags to their CTest labels ( # 1600 ) <nl> + * Added basic CPack support to our build <nl> + <nl> + # # 2 . 7 . 1 <nl> + <nl> + # # # Improvements <nl> + * Reporters now print out the filters applied to test cases ( # 1550 , # 1585 ) <nl> + * Added ` GENERATE_COPY ` and ` GENERATE_VAR ` macros that can use variables inside the generator expression <nl> + * Because of the significant danger of lifetime issues , the default ` GENERATE ` macro still does not allow variables <nl> + * The ` map ` generator helper now deduces the mapped return type ( # 1576 ) <nl> + <nl> + # # # Fixes <nl> + * Fixed ObjC + + compilation ( # 1571 ) <nl> + * Fixed test tag parsing so that ` [ . foo ] ` is now parsed as ` [ . ] [ foo ] ` . <nl> + * Suppressed warning caused by the Windows headers defining SE codes in different manners ( # 1575 ) <nl> + <nl> # # 2 . 7 . 0 <nl> <nl> # # # Improvements <nl> <nl> * Running tests will no longer open the specified output file twice ( # 1545 ) <nl> * This would cause trouble when the file was not a file , but rather a named pipe <nl> * Fixes the CLion / Resharper integration with Catch <nl> - * Fixed ` - Wunreachable - code ` occuring with ( old ) ccache + cmake + clang combination ( # 1540 ) <nl> + * Fixed ` - Wunreachable - code ` occurring with ( old ) ccache + cmake + clang combination ( # 1540 ) <nl> * Fixed ` - Wdefaulted - function - deleted ` warning with Clang 8 ( # 1537 ) <nl> * Catch2 ' s type traits and helpers are now properly namespaced inside ` Catch : : ` ( # 1548 ) <nl> * Fixed std { out , err } redirection for failing test ( # 1514 , # 1525 ) <nl> Cygwin issue with ` gettimeofday ` - ` # define ` was not early enough <nl> * Usage of ` gettimeofday ` inside Catch should no longer cause compilation errors . <nl> * Improved ` - Wparentheses ` suppression for gcc ( # 674 ) <nl> * When compiled with gcc 4 . 8 or newer , the suppression is localized to assertions only <nl> - * Otherwise it is supressed for the whole TU <nl> + * Otherwise it is suppressed for the whole TU <nl> * Fixed test spec parser issue ( with escapes in multiple names ) <nl> <nl> # # # # # Other <nl> Other : <nl> <nl> # # # # # Other : <nl> * Types with overloaded ` & & ` operator are no longer evaluated twice when used in an assertion macro . <nl> - * The use of ` __COUNTER__ ` is supressed when Catch is parsed by CLion <nl> + * The use of ` __COUNTER__ ` is suppressed when Catch is parsed by CLion <nl> * This change is not active when compiling a binary <nl> * Approval tests can now be run on Windows <nl> * CMake will now warn if a file is present in the ` include ` folder but not is not enumerated as part of the project <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / release - process . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / release - process . md <nl> Once a release is ready , release notes need to be written . They should summarize <nl> <nl> # # # Commit and push update to GitHub <nl> <nl> - After version number is incremented , single - include header is regenerated and release notes are updated , changes should be commited and pushed to GitHub . <nl> + After version number is incremented , single - include header is regenerated and release notes are updated , changes should be committed and pushed to GitHub . <nl> <nl> <nl> # # # Release on GitHub <nl> mmm a / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / tutorial . md <nl> ppp b / tests / vendor / cget / pkg / catchorg__Catch2 / install / share / doc / Catch2 / tutorial . md <nl> The full source for Catch2 , including test projects , documentation , and other th <nl> <nl> # # Where to put it ? <nl> <nl> - Catch2 is header only . All you need to do is drop the file somewhere reachable from your project - either in some central location you can set your header search path to find , or directly into your project tree itself ! This is a particularly good option for other Open - Source projects that want to use Catch for their test suite . See [ this blog entry for more on that ] ( http : / / www . levelofindirection . com / journal / 2011 / 5 / 27 / unit - testing - in - c - and - objective - c - just - got - ridiculously - easi . html ) . <nl> + Catch2 is header only . All you need to do is drop the file somewhere reachable from your project - either in some central location you can set your header search path to find , or directly into your project tree itself ! This is a particularly good option for other Open - Source projects that want to use Catch for their test suite . See [ this blog entry for more on that ] ( https : / / levelofindirection . com / blog / unit - testing - in - cpp - and - objective - c - just - got - ridiculously - easier - still . html ) . <nl> <nl> The rest of this tutorial will assume that the Catch2 single - include header ( or the include folder ) is available unqualified - but you may need to prefix it with a folder name if necessary . <nl> <nl> Of course there are still more issues to deal with . For example we ' ll hit proble <nl> <nl> # # # What did we do here ? <nl> <nl> - Although this was a simple test it ' s been enough to demonstrate a few things about how Catch is used . Let ' s take moment to consider those before we move on . <nl> + Although this was a simple test it ' s been enough to demonstrate a few things about how Catch is used . Let ' s take a moment to consider those before we move on . <nl> <nl> 1 . All we did was ` ` ` # define ` ` ` one identifier and ` ` ` # include ` ` ` one header and we got everything - even an implementation of ` ` ` main ( ) ` ` ` that will [ respond to command line arguments ] ( command - line . md # top ) . You can only use that ` ` ` # define ` ` ` in one implementation file , for ( hopefully ) obvious reasons . Once you have more than one file with unit tests in you ' ll just ` ` ` # include " catch . hpp " ` ` ` and go . Usually it ' s a good idea to have a dedicated implementation file that just has ` ` ` # define CATCH_CONFIG_MAIN ` ` ` and ` ` ` # include " catch . hpp " ` ` ` . You can also provide your own implementation of main and drive Catch yourself ( see [ Supplying - your - own - main ( ) ] ( own - main . md # top ) ) . <nl> 2 . We introduce test cases with the ` ` ` TEST_CASE ` ` ` macro . This macro takes one or two arguments - a free form test name and , optionally , one or more tags ( for more see < a href = " # test - cases - and - sections " > Test cases and Sections < / a > , ) . The test name must be unique . You can run sets of tests by specifying a wildcarded test name or a tag expression . See the [ command line docs ] ( command - line . md # top ) for more information on running tests . <nl> This works because the ` ` ` SECTION ` ` ` macro contains an if statement that calls b <nl> <nl> So far so good - this is already an improvement on the setup / teardown approach because now we see our setup code inline and use the stack . <nl> <nl> - The power of sections really shows , however , when we need to execute a sequence of , checked , operations . Continuing the vector example , we might want to verify that attempting to reserve a capacity smaller than the current capacity of the vector changes nothing . We can do that , naturally , like so : <nl> + The power of sections really shows , however , when we need to execute a sequence of checked operations . Continuing the vector example , we might want to verify that attempting to reserve a capacity smaller than the current capacity of the vector changes nothing . We can do that , naturally , like so : <nl> <nl> ` ` ` c + + <nl> SECTION ( " reserving bigger changes capacity but not size " ) { <nl>
update tests / vendor
pqrs-org/Karabiner-Elements
72a1aa894b83dd9fc83f67246fd1b35bde2997a0
2019-05-25T23:53:20Z
mmm a / src / share / gcd_utility . hpp <nl> ppp b / src / share / gcd_utility . hpp <nl> class gcd_utility final { <nl> uint64_t interval = 100 . 0 * NSEC_PER_SEC ; / * dummy value * / <nl> dispatch_source_set_timer ( timer_ , when , interval , 0 ) ; <nl> dispatch_source_set_event_handler ( timer_ , ^ { <nl> - block ( ) ; <nl> cancel ( ) ; <nl> + block ( ) ; <nl> } ) ; <nl> dispatch_resume ( timer_ ) ; <nl> } <nl> class gcd_utility final { <nl> cancel ( ) ; <nl> } <nl> <nl> + bool fired ( void ) const { <nl> + bool __block r ; <nl> + gcd_utility : : dispatch_sync_in_main_queue ( ^ { <nl> + r = ! timer_ ; <nl> + } ) ; <nl> + return r ; <nl> + } <nl> + <nl> private : <nl> void cancel ( void ) { <nl> / / Release timer_ in main thread to avoid callback invocations after object has been destroyed . <nl> - dispatch_sync_in_main_queue ( ^ { <nl> + gcd_utility : : dispatch_sync_in_main_queue ( ^ { <nl> if ( timer_ ) { <nl> dispatch_source_cancel ( timer_ ) ; <nl> dispatch_release ( timer_ ) ; <nl> mmm a / tests / src / gcd_utility / test . cpp <nl> ppp b / tests / src / gcd_utility / test . cpp <nl> TEST_CASE ( " main_queue_after_timer " ) { <nl> ^ { <nl> + + value ; <nl> } ) ; <nl> + REQUIRE ( ! timer . fired ( ) ) ; <nl> + <nl> std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 200 ) ) ; <nl> REQUIRE ( value = = 1 ) ; <nl> + <nl> + REQUIRE ( timer . fired ( ) ) ; <nl> } <nl> <nl> { <nl> - krbn : : gcd_utility : : main_queue_after_timer timer ( dispatch_time ( DISPATCH_TIME_NOW , 100 * NSEC_PER_MSEC ) , <nl> - ^ { <nl> - + + value ; <nl> - } ) ; <nl> + class wrapper final { <nl> + public : <nl> + std : : shared_ptr < krbn : : gcd_utility : : main_queue_after_timer > timer ; <nl> + } ; <nl> + <nl> + wrapper w ; <nl> + wrapper * p = & w ; <nl> + w . timer = std : : make_shared < krbn : : gcd_utility : : main_queue_after_timer > ( dispatch_time ( DISPATCH_TIME_NOW , 100 * NSEC_PER_MSEC ) , <nl> + ^ { <nl> + / / w is copied before timer was constructed . <nl> + REQUIRE ( w . timer . get ( ) = = nullptr ) ; <nl> + <nl> + / / p refers ` w ` in the timer . <nl> + REQUIRE ( p - > timer ) ; <nl> + REQUIRE ( p - > timer - > fired ( ) ) ; <nl> + <nl> + + + value ; <nl> + } ) ; <nl> std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 200 ) ) ; <nl> REQUIRE ( value = = 2 ) ; <nl> } <nl> <nl> { <nl> { <nl> - krbn : : gcd_utility : : main_queue_after_timer main_queue_after_timer ( dispatch_time ( DISPATCH_TIME_NOW , 100 * NSEC_PER_MSEC ) , <nl> - ^ { <nl> - + + value ; <nl> - } ) ; <nl> + krbn : : gcd_utility : : main_queue_after_timer timer ( dispatch_time ( DISPATCH_TIME_NOW , 100 * NSEC_PER_MSEC ) , <nl> + ^ { <nl> + + + value ; <nl> + } ) ; <nl> std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 50 ) ) ; <nl> + <nl> + / / timer will be canceled . <nl> } <nl> REQUIRE ( value = = 2 ) ; <nl> <nl>
add gcd_utility : : main_queue_after_timer : : fired
pqrs-org/Karabiner-Elements
84fd98de49dd62a6b2e28d6c58e11c2b5ef7823a
2017-05-20T20:56:28Z
mmm a / Marlin / Configuration . h <nl> ppp b / Marlin / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER 10 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER 0 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / Marlin_main . cpp <nl> ppp b / Marlin / Marlin_main . cpp <nl> static uint8_t target_extruder ; <nl> int xy_probe_speed = XY_PROBE_SPEED ; <nl> bool bed_leveling_in_progress = false ; <nl> # define XY_PROBE_FEEDRATE xy_probe_speed <nl> + # elif defined ( XY_PROBE_SPEED ) <nl> + # define XY_PROBE_FEEDRATE XY_PROBE_SPEED <nl> # else <nl> # define XY_PROBE_FEEDRATE ( min ( planner . max_feedrate [ X_AXIS ] , planner . max_feedrate [ Y_AXIS ] ) * 60 ) <nl> # endif <nl> mmm a / Marlin / example_configurations / Cartesio / Configuration . h <nl> ppp b / Marlin / example_configurations / Cartesio / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER 10 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER 0 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / Felix / Configuration . h <nl> ppp b / Marlin / example_configurations / Felix / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 29 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 12 . 35 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / Felix / DUAL / Configuration . h <nl> ppp b / Marlin / example_configurations / Felix / DUAL / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 29 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 12 . 35 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / Hephestos / Configuration . h <nl> ppp b / Marlin / example_configurations / Hephestos / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = true ; / / set to true to invert the lo <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 29 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 12 . 35 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = true ; / / set to true to invert the lo <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / Hephestos_2 / Configuration . h <nl> ppp b / Marlin / example_configurations / Hephestos_2 / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER 15 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER 0 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 2 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / K8200 / Configuration . h <nl> ppp b / Marlin / example_configurations / K8200 / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 29 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 12 . 35 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / RepRapWorld / Megatronics / Configuration . h <nl> ppp b / Marlin / example_configurations / RepRapWorld / Megatronics / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 29 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 12 . 35 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / RigidBot / Configuration . h <nl> ppp b / Marlin / example_configurations / RigidBot / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 29 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 12 . 35 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / SCARA / Configuration . h <nl> ppp b / Marlin / example_configurations / SCARA / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 29 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 12 . 35 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / TAZ4 / Configuration . h <nl> ppp b / Marlin / example_configurations / TAZ4 / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 29 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 12 . 35 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / WITBOX / Configuration . h <nl> ppp b / Marlin / example_configurations / WITBOX / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = true ; / / set to true to invert the lo <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 29 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 12 . 35 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = true ; / / set to true to invert the lo <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / adafruit / ST7565 / Configuration . h <nl> ppp b / Marlin / example_configurations / adafruit / ST7565 / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 29 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 12 . 35 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / delta / biv2 . 5 / Configuration . h <nl> ppp b / Marlin / example_configurations / delta / biv2 . 5 / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = true ; / / set to true to invert the lo <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 10 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 3 . 5 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 4000 <nl> + <nl> / / Allen key retractable z - probe as seen on many Kossel delta printers - http : / / reprap . org / wiki / Kossel # Automatic_bed_leveling_probe <nl> / / Deploys by touching z - axis belt . Retracts by pushing the probe down . Uses Z_MIN_PIN . <nl> / / # define Z_PROBE_ALLEN_KEY <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = true ; / / set to true to invert the lo <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 4000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / delta / generic / Configuration . h <nl> ppp b / Marlin / example_configurations / delta / generic / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = true ; / / set to true to invert the lo <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 10 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 3 . 5 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 4000 <nl> + <nl> / / Allen key retractable z - probe as seen on many Kossel delta printers - http : / / reprap . org / wiki / Kossel # Automatic_bed_leveling_probe <nl> / / Deploys by touching z - axis belt . Retracts by pushing the probe down . Uses Z_MIN_PIN . <nl> / / # define Z_PROBE_ALLEN_KEY <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = true ; / / set to true to invert the lo <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 4000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / delta / kossel_mini / Configuration . h <nl> ppp b / Marlin / example_configurations / delta / kossel_mini / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 10 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 3 . 5 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 4000 <nl> + <nl> / / Allen key retractable z - probe as seen on many Kossel delta printers - http : / / reprap . org / wiki / Kossel # Automatic_bed_leveling_probe <nl> / / Deploys by touching z - axis belt . Retracts by pushing the probe down . Uses Z_MIN_PIN . <nl> # define Z_PROBE_ALLEN_KEY <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 4000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / delta / kossel_pro / Configuration . h <nl> ppp b / Marlin / example_configurations / delta / kossel_pro / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> / / not giving someone a head crash . Use something like G29 Z - 0 . 2 to adjust as needed . <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 17 . 25 / / Increase this if the first layer is too thin ( remember : it ' s a negative number so increase means closer to zero ) . <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / Allen key retractable z - probe as seen on many Kossel delta printers - http : / / reprap . org / wiki / Kossel # Automatic_bed_leveling_probe <nl> / / Deploys by touching z - axis belt . Retracts by pushing the probe down . Uses Z_MIN_PIN . <nl> # define Z_PROBE_ALLEN_KEY <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / delta / kossel_xl / Configuration . h <nl> ppp b / Marlin / example_configurations / delta / kossel_xl / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER 0 . 0 / / Z probe to nozzle Y offset : - front + behind <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER 0 . 3 / / Z probe to nozzle Z offset : - below ( always ! ) <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / Allen key retractable z - probe as seen on many Kossel delta printers - http : / / reprap . org / wiki / Kossel # Automatic_bed_leveling_probe <nl> / / Deploys by touching z - axis belt . Retracts by pushing the probe down . Uses Z_MIN_PIN . <nl> / / # define Z_PROBE_ALLEN_KEY <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 10 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / makibox / Configuration . h <nl> ppp b / Marlin / example_configurations / makibox / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 29 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 12 . 35 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = false ; / / set to true to invert the l <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl> mmm a / Marlin / example_configurations / tvrrug / Round2 / Configuration . h <nl> ppp b / Marlin / example_configurations / tvrrug / Round2 / Configuration . h <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = true ; / / set to true to invert the lo <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 29 / / Y offset : - front + behind [ the nozzle ] <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 12 . 35 / / Z offset : - below + above [ the nozzle ] <nl> <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> / / <nl> / / Allen Key Probe is defined in the Delta example configurations . <nl> / / <nl> const bool Z_MIN_PROBE_ENDSTOP_INVERTING = true ; / / set to true to invert the lo <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # define XY_PROBE_SPEED 8000 / / X and Y axis travel speed between probes , in mm / min . <nl> - <nl> # define Z_RAISE_BETWEEN_PROBINGS 5 / / How much the Z axis will be raised when traveling from between next probing points . <nl> <nl> / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " / / These commands will be executed in the end of G29 routine . <nl>
Make XY_PROBE_SPEED a general probe option
MarlinFirmware/Marlin
d50539583a087c45cab006255b655f2cebf27144
2016-06-22T10:04:47Z
mmm a / stdlib / public / SwiftShims / LibcShims . h <nl> ppp b / stdlib / public / SwiftShims / LibcShims . h <nl> int _stdlib_ioctlPtr ( int fd , unsigned long int request , void * ptr ) ; <nl> / / Environment <nl> # if defined ( __APPLE__ ) | | defined ( __FreeBSD__ ) <nl> SWIFT_RUNTIME_STDLIB_INTERNAL <nl> - char * _Nullable * _stdlib_getEnviron ( ) ; <nl> + char * _Nullable * _Null_unspecified _stdlib_getEnviron ( ) ; <nl> # endif <nl> <nl> / / System error numbers < errno . h > <nl> mmm a / stdlib / public / stubs / LibcShims . cpp <nl> ppp b / stdlib / public / stubs / LibcShims . cpp <nl> int swift : : _stdlib_ioctlPtr ( int fd , unsigned long int request , void * ptr ) { <nl> <nl> # if defined ( __FreeBSD__ ) <nl> SWIFT_RUNTIME_STDLIB_INTERNAL <nl> - char * _Nullable * swift : : _stdlib_getEnviron ( ) { <nl> + char * _Nullable * _Null_unspecified swift : : _stdlib_getEnviron ( ) { <nl> extern char * * environ ; <nl> return environ ; <nl> } <nl>
Merge pull request from mikeash / warning - fix - libcshims - metadata
apple/swift
0a6ea9733c47738868478e52fdc95b89dbf8595a
2018-05-10T20:13:02Z
mmm a / validation - test / compiler_crashers / 28180 - rawrepresentable - extension - with - initializer . swift <nl> ppp b / validation - test / compiler_crashers / 28180 - rawrepresentable - extension - with - initializer . swift <nl> <nl> + / / This source file is part of the Swift . org open source project <nl> + / / Copyright ( c ) 2014 - 2016 Apple Inc . and the Swift project authors <nl> + / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + / / <nl> + / / See http : / / swift . org / LICENSE . txt for license information <nl> + / / See http : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + <nl> / / RUN : not - - crash % target - swift - frontend % s - parse <nl> <nl> / / ASAN Output : stack - overflow on address 0x7fff31bf3ff8 ( pc 0x0000022f8f44 bp 0x7fff31bf49d0 sp 0x7fff31bf4000 T0 ) <nl>
[ crashers ] Add license headers for crash case
apple/swift
2443bb593f27050ce2b8aa7a33ce8b1adc99554d
2016-03-31T20:23:46Z
mmm a / xbmc / ThumbLoader . cpp <nl> ppp b / xbmc / ThumbLoader . cpp <nl> bool CVideoThumbLoader : : LoadItem ( CFileItem * pItem ) <nl> pItem - > SetThumbnailImage ( cachedThumb ) ; <nl> } <nl> } <nl> - else if ( ! item . m_bIsFolder & & item . IsVideo ( ) & & g_guiSettings . GetBool ( " myvideos . extractthumb " ) ) <nl> + else if ( ! item . m_bIsFolder & & item . IsVideo ( ) & & g_guiSettings . GetBool ( " myvideos . extractflags " ) ) <nl> { <nl> CThumbExtractor * extract = new CThumbExtractor ( item , pItem - > m_strPath , true , cachedThumb ) ; <nl> AddJob ( extract ) ; <nl>
fixed : wrong setting was being read
xbmc/xbmc
fafc078379b0509876d24047183e95af0e2b9dd5
2010-04-14T17:59:08Z
mmm a / hphp / runtime / base / object - data . cpp <nl> ppp b / hphp / runtime / base / object - data . cpp <nl> TypedValue * ObjectData : : setOpProp ( TypedValue & tvRef , <nl> <nl> if ( prop & & lookup . accessible ) { <nl> if ( prop - > m_type = = KindOfUninit & & getAttribute ( UseGet ) ) { <nl> - auto tvResult = make_tv < KindOfUninit > ( ) ; <nl> - if ( invokeGet ( & tvResult , key ) ) { <nl> - setopBody ( tvToCell ( & tvResult ) , op , val ) ; <nl> + auto get_result = make_tv < KindOfUninit > ( ) ; <nl> + if ( invokeGet ( & get_result , key ) ) { <nl> + SCOPE_EXIT { tvRefcountedDecRef ( get_result ) ; } ; <nl> + setopBody ( tvToCell ( & get_result ) , op , val ) ; <nl> if ( getAttribute ( UseSet ) ) { <nl> assert ( tvRef . m_type = = KindOfUninit ) ; <nl> - cellDup ( * tvToCell ( & tvResult ) , tvRef ) ; <nl> + cellDup ( * tvToCell ( & get_result ) , tvRef ) ; <nl> if ( invokeSet ( key , & tvRef ) ) { <nl> return & tvRef ; <nl> } <nl> tvRef . m_type = KindOfUninit ; <nl> } <nl> - cellDup ( * tvToCell ( & tvResult ) , * prop ) ; <nl> + cellDup ( * tvToCell ( & get_result ) , * prop ) ; <nl> return prop ; <nl> } <nl> } <nl> - <nl> prop = tvToCell ( prop ) ; <nl> setopBody ( prop , op , val ) ; <nl> return prop ; <nl> TypedValue * ObjectData : : setOpProp ( TypedValue & tvRef , <nl> auto const useGet = getAttribute ( UseGet ) ; <nl> <nl> if ( useGet & & ! useSet ) { <nl> - auto tvResult = make_tv < KindOfNull > ( ) ; <nl> + auto get_result = make_tv < KindOfNull > ( ) ; <nl> / / If invokeGet fails due to recursion , it leaves the KindOfNull . <nl> - invokeGet ( & tvResult , key ) ; <nl> + invokeGet ( & get_result , key ) ; <nl> + SCOPE_EXIT { tvRefcountedDecRef ( get_result ) ; } ; <nl> <nl> / / Note : the tvUnboxIfNeeded comes * after * the setop on purpose <nl> / / here , even though it comes before the IncDecOp in the analogous <nl> / / situation in incDecProp . This is to match zend 5 . 5 behavior . <nl> - setopBody ( tvToCell ( & tvResult ) , op , val ) ; <nl> - tvUnboxIfNeeded ( & tvResult ) ; <nl> + setopBody ( tvToCell ( & get_result ) , op , val ) ; <nl> + tvUnboxIfNeeded ( & get_result ) ; <nl> <nl> if ( prop ) raise_error ( " Cannot access protected property " ) ; <nl> prop = reinterpret_cast < TypedValue * > ( <nl> TypedValue * ObjectData : : setOpProp ( TypedValue & tvRef , <nl> / / unlike the non - magic case below , we may have already created it <nl> / / under the recursion into invokeGet above , so we need to do a <nl> / / tvSet here . <nl> - tvSet ( tvResult , * prop ) ; <nl> + tvSet ( get_result , * prop ) ; <nl> return prop ; <nl> } <nl> <nl>
Fix refcounting ObjectData : : setOpProp
facebook/hhvm
a5c4100b522b6b32688893b3a5c1912b8c78ce08
2015-09-18T14:00:29Z
mmm a / xbmc / utils / log . cpp <nl> ppp b / xbmc / utils / log . cpp <nl> void CLog : : Log ( int loglevel , const char * format , . . . ) <nl> } <nl> } <nl> <nl> + void CLog : : LogFunction ( int loglevel , const char * functionName , const char * format , . . . ) <nl> + { <nl> + if ( IsLogLevelLogged ( loglevel ) ) <nl> + { <nl> + std : : string fNameStr ; <nl> + if ( functionName & & functionName [ 0 ] ) <nl> + fNameStr . assign ( functionName ) . append ( " : " ) ; <nl> + va_list va ; <nl> + va_start ( va , format ) ; <nl> + LogString ( loglevel , fNameStr + StringUtils : : FormatV ( format , va ) ) ; <nl> + va_end ( va ) ; <nl> + } <nl> + } <nl> + <nl> void CLog : : LogString ( int logLevel , const std : : string & logString ) <nl> { <nl> CSingleLock waitLock ( s_globals . critSec ) ; <nl> mmm a / xbmc / utils / log . h <nl> ppp b / xbmc / utils / log . h <nl> class CLog <nl> ~ CLog ( void ) ; <nl> static void Close ( ) ; <nl> static void Log ( int loglevel , PRINTF_FORMAT_STRING const char * format , . . . ) PARAM2_PRINTF_FORMAT ; <nl> + static void LogFunction ( int loglevel , IN_OPT_STRING const char * functionName , PRINTF_FORMAT_STRING const char * format , . . . ) PARAM3_PRINTF_FORMAT ; <nl> + # define LogF ( loglevel , format , . . . ) LogFunction ( ( loglevel ) , __FUNCTION__ , ( format ) , # # __VA_ARGS__ ) <nl> static void MemDump ( char * pData , int length ) ; <nl> static bool Init ( const std : : string & path ) ; <nl> static void PrintDebugString ( const std : : string & line ) ; / / universal interface for printing debug strings <nl>
CLog : add LogFunction ( ) and macro LogF ( ) for convinent logging with function name
xbmc/xbmc
b3e97a74829494d61270b253b3ddbf2cd057cb9a
2014-08-22T11:57:09Z
mmm a / src / mongo / shell / replsettest . js <nl> ppp b / src / mongo / shell / replsettest . js <nl> var ReplSetTest = function ( opts ) { <nl> self . name = conf . _id ; <nl> } <nl> <nl> + / * * <nl> + * Constructor , which instantiates the ReplSetTest object from existing nodes . <nl> + * / <nl> + function _constructFromExistingNodes ( <nl> + { name , nodeHosts , nodeOptions , keyFile , host , waitForKeys } ) { <nl> + print ( ' Recreating replica set from existing nodes ' + tojson ( nodeHosts ) ) ; <nl> + <nl> + self . name = name ; <nl> + self . ports = nodeHosts . map ( node = > node . split ( ' : ' ) [ 1 ] ) ; <nl> + self . nodes = nodeHosts . map ( ( node ) = > { <nl> + const conn = Mongo ( node ) ; <nl> + conn . name = conn . host ; <nl> + return conn ; <nl> + } ) ; <nl> + self . host = host ; <nl> + self . waitForKeys = waitForKeys ; <nl> + self . keyFile = keyFile ; <nl> + self . nodeOptions = nodeOptions ; <nl> + } <nl> + <nl> if ( typeof opts = = = ' string ' | | opts instanceof String ) { <nl> retryOnNetworkError ( function ( ) { <nl> / / The primary may unexpectedly step down during startup if under heavy load <nl> var ReplSetTest = function ( opts ) { <nl> / / its connections . <nl> _constructFromExistingSeedNode ( opts ) ; <nl> } , 60 ) ; <nl> + } else if ( typeof opts . rstArgs = = = " object " ) { <nl> + _constructFromExistingNodes ( opts . rstArgs ) ; <nl> } else { <nl> _constructStartNewInstances ( opts ) ; <nl> } <nl> mmm a / src / mongo / shell / shardingtest . js <nl> ppp b / src / mongo / shell / shardingtest . js <nl> var ShardingTest = function ( params ) { <nl> <nl> / / Publicly exposed variables <nl> <nl> + / * * <nl> + * Tries to load the ' jstests / libs / parallelTester . js ' dependency . Returns true if the file is <nl> + * loaded successfully , and false otherwise . <nl> + * / <nl> + function tryLoadParallelTester ( ) { <nl> + try { <nl> + load ( " jstests / libs / parallelTester . js " ) ; / / For Thread . <nl> + return true ; <nl> + } catch ( e ) { <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> / * * <nl> * Attempts to open a connection to the specified connection string or throws if unable to <nl> * connect . <nl> var ShardingTest = function ( params ) { <nl> totalNumShardNodes ( shardsAsReplSets ) + " total shard nodes . " ) ; <nl> <nl> / / <nl> - / / Initiate each shard replica set . <nl> + / / Initiate each shard replica set and wait for replication . Also initiate the config replica <nl> + / / set . Whenever possible , in parallel . <nl> / / <nl> - if ( shardsAsReplSets ) { <nl> - for ( var i = 0 ; i < numShards ; i + + ) { <nl> - print ( " ShardingTest initiating replica set for shard : " + this . _rs [ i ] . setName ) ; <nl> + const shardsRS = shardsAsReplSets ? this . _rs . map ( obj = > obj . test ) : [ ] ; <nl> + const replicaSetsToInitiate = [ . . . shardsRS , this . configRS ] . map ( rst = > { <nl> + const rstConfig = rst . getReplSetConfig ( ) ; <nl> + if ( rst = = = this . configRS ) { <nl> + rstConfig . configsvr = true ; <nl> + rstConfig . writeConcernMajorityJournalDefault = true ; <nl> + } <nl> + <nl> + return { <nl> + rst , <nl> + / / Arguments for creating instances of each replica set within parallel threads . <nl> + rstArgs : { <nl> + name : rst . name , <nl> + nodeHosts : rst . nodes . map ( node = > ` 127 . 0 . 0 . 1 : $ { node . port } ` ) , <nl> + nodeOptions : rst . nodeOptions , <nl> + keyFile : this . keyFile , <nl> + host : otherParams . useHostname ? hostName : " localhost " , <nl> + waitForKeys : false , <nl> + } , <nl> + / / Replica set configuration for initiating the replica set . <nl> + rstConfig , <nl> + } ; <nl> + } ) ; <nl> <nl> - / / ReplSetTest . initiate ( ) requires all nodes to be to be authorized to run <nl> - / / replSetGetStatus . <nl> - / / TODO ( SERVER - 14017 ) : Remove this in favor of using initiate ( ) everywhere . <nl> - this . _rs [ i ] . test . initiateWithAnyNodeAsPrimary ( ) ; <nl> + const initiateReplicaSet = ( rst , rstConfig ) = > { <nl> + rst . initiateWithAnyNodeAsPrimary ( rstConfig ) ; <nl> <nl> - this [ " rs " + i ] = this . _rs [ i ] . test ; <nl> - this . _rsObjects [ i ] = this . _rs [ i ] . test ; <nl> + / / Do replication . <nl> + rst . awaitNodesAgreeOnPrimary ( ) ; <nl> + rst . getPrimary ( ) . getDB ( " admin " ) . foo . save ( { x : 1 } ) ; <nl> + if ( rst . keyFile ) { <nl> + authutil . asCluster ( rst . nodes , rst . keyFile , function ( ) { <nl> + rst . awaitReplication ( ) ; <nl> + } ) ; <nl> + } <nl> + rst . awaitSecondaryNodes ( ) ; <nl> + } ; <nl> <nl> - _alldbpaths . push ( null ) ; <nl> - this . _connections . push ( null ) ; <nl> + const isParallelSupported = ( ( ) = > { <nl> + if ( ! tryLoadParallelTester ( ) ) { <nl> + return false ; <nl> + } <nl> <nl> - if ( otherParams . useBridge ) { <nl> - unbridgedConnections . push ( null ) ; <nl> + for ( let { rst } of replicaSetsToInitiate ) { <nl> + if ( rst . startOptions & & rst . startOptions . clusterAuthMode = = = " x509 " ) { <nl> + / / The mongo shell performing X . 509 authentication as a cluster member requires <nl> + / / starting a parallel shell and using the server ' s ( not the client ' s ) certificate . <nl> + / / The ReplSetTest instance constructed in a Thread wouldn ' t have copied the path to <nl> + / / the server ' s certificate . We therefore fall back to initiating the CSRS and <nl> + / / replica set shards sequentially when X . 509 authentication is being used . <nl> + return false ; <nl> } <nl> - } <nl> - } <nl> <nl> - / / Do replication on replica sets if required <nl> - for ( var i = 0 ; i < numShards ; i + + ) { <nl> - if ( ! shardsAsReplSets ) { <nl> - continue ; <nl> + for ( let n of Object . keys ( rst . nodeOptions ) ) { <nl> + const nodeOptions = rst . nodeOptions [ n ] ; <nl> + if ( nodeOptions & & nodeOptions . clusterAuthMode = = = " x509 " ) { <nl> + return false ; <nl> + } <nl> + } <nl> } <nl> <nl> - var rs = this . _rs [ i ] . test ; <nl> - rs . awaitNodesAgreeOnPrimary ( ) ; <nl> - rs . getPrimary ( ) . getDB ( " admin " ) . foo . save ( { x : 1 } ) ; <nl> + return true ; <nl> + } ) ( ) ; <nl> + <nl> + if ( isParallelSupported ) { <nl> + const threads = [ ] ; <nl> + try { <nl> + for ( let { rstArgs , rstConfig } of replicaSetsToInitiate ) { <nl> + const thread = new Thread ( ( rstArgs , rstConfig , initiateReplicaSet ) = > { <nl> + try { <nl> + const rst = new ReplSetTest ( { rstArgs } ) ; <nl> + initiateReplicaSet ( rst , rstConfig ) ; <nl> + return { ok : 1 } ; <nl> + } catch ( e ) { <nl> + return { <nl> + ok : 0 , <nl> + hosts : rstArgs . nodeHosts , <nl> + name : rstArgs . name , <nl> + error : e . toString ( ) , <nl> + stack : e . stack , <nl> + } ; <nl> + } <nl> + } , rstArgs , rstConfig , initiateReplicaSet ) ; <nl> + thread . start ( ) ; <nl> + threads . push ( thread ) ; <nl> + } <nl> + } finally { <nl> + / / Wait for each thread to finish . Throw an error if any thread fails . <nl> + const returnData = threads . map ( thread = > { <nl> + thread . join ( ) ; <nl> + return thread . returnData ( ) ; <nl> + } ) ; <nl> <nl> - if ( this . keyFile ) { <nl> - authutil . asCluster ( rs . nodes , this . keyFile , function ( ) { <nl> - rs . awaitReplication ( ) ; <nl> + returnData . forEach ( res = > { <nl> + assert . commandWorked ( res , <nl> + ' Initiating shard or config servers as a replica set failed ' ) ; <nl> } ) ; <nl> } <nl> + } else { <nl> + for ( let { rst , rstConfig } of replicaSetsToInitiate ) { <nl> + initiateReplicaSet ( rst , rstConfig ) ; <nl> + } <nl> + } <nl> <nl> - rs . awaitSecondaryNodes ( ) ; <nl> - var rsConn = new Mongo ( rs . getURL ( ) ) ; <nl> - rsConn . name = rs . getURL ( ) ; <nl> + if ( shardsAsReplSets ) { <nl> + for ( let i = 0 ; i < numShards ; i + + ) { <nl> + let rs = this . _rs [ i ] . test ; <nl> <nl> - this . _connections [ i ] = rsConn ; <nl> - this [ " shard " + i ] = rsConn ; <nl> - rsConn . rs = rs ; <nl> - } <nl> + this [ " rs " + i ] = rs ; <nl> + this . _rsObjects [ i ] = rs ; <nl> + <nl> + _alldbpaths . push ( null ) ; <nl> + this . _connections . push ( null ) ; <nl> <nl> - / / ReplSetTest . initiate ( ) requires all nodes to be to be authorized to run replSetGetStatus . <nl> - / / TODO ( SERVER - 14017 ) : Remove this in favor of using initiate ( ) everywhere . <nl> - this . configRS . initiateWithAnyNodeAsPrimary ( config ) ; <nl> + if ( otherParams . useBridge ) { <nl> + unbridgedConnections . push ( null ) ; <nl> + } <nl> + let rsConn = new Mongo ( rs . getURL ( ) ) ; <nl> + rsConn . name = rs . getURL ( ) ; <nl> + <nl> + this . _connections [ i ] = rsConn ; <nl> + this [ " shard " + i ] = rsConn ; <nl> + rsConn . rs = rs ; <nl> + } <nl> + } <nl> <nl> / / Wait for master to be elected before starting mongos <nl> this . configRS . awaitNodesAgreeOnPrimary ( ) ; <nl>
SERVER - 43774 ShardingTest should initiate all of its ReplSetTest shard instances in parallel
mongodb/mongo
e07c2d29aded5a30ff08b5ce6a436b6ef6f44014
2020-08-03T22:24:44Z
mmm a / platformio . ini <nl> ppp b / platformio . ini <nl> default_src_filter = + < src / * > - < src / config > - < src / HAL > + < src / HAL / shared > <nl> extra_scripts = pre : buildroot / share / PlatformIO / scripts / common - cxxflags . py <nl> build_flags = - fmax - errors = 5 - g - D__MARLIN_FIRMWARE__ - fmerge - all - constants <nl> lib_deps = <nl> - LiquidCrystal <nl> - TMCStepper @ > = 0 . 6 . 2 , < 1 . 0 . 0 <nl> - Adafruit NeoPixel <nl> - U8glib - HAL = https : / / github . com / MarlinFirmware / U8glib - HAL / archive / bugfix . zip <nl> - Adafruit_MAX31865 = https : / / github . com / adafruit / Adafruit_MAX31865 / archive / master . zip <nl> - LiquidTWI2 = https : / / github . com / lincomatic / LiquidTWI2 / archive / master . zip <nl> - Arduino - L6470 = https : / / github . com / ameyer / Arduino - L6470 / archive / 0 . 8 . 0 . zip <nl> + LiquidCrystal @ 1 . 5 . 0 <nl> + TMCStepper @ ~ 0 . 7 . 1 <nl> + Adafruit NeoPixel @ 1 . 5 . 0 <nl> + U8glib - HAL @ 0 . 4 . 1 <nl> + Adafruit MAX31865 library @ ~ 1 . 1 . 0 <nl> + LiquidTWI2 @ 1 . 2 . 7 <nl> + Arduino - L6470 @ 0 . 8 . 0 <nl> + SlowSoftI2CMaster <nl> SailfishLCD = https : / / github . com / mikeshub / SailfishLCD / archive / master . zip <nl> SailfishRGB_LED = https : / / github . com / mikeshub / SailfishRGB_LED / archive / master . zip <nl> - SlowSoftI2CMaster = https : / / github . com / mikeshub / SlowSoftI2CMaster / archive / master . zip <nl> <nl> # Globally defined properties <nl> # inherited by all environments <nl> lib_compat_mode = strict <nl> extra_scripts = Marlin / src / HAL / LPC1768 / upload_extra_script . py <nl> src_filter = $ { common . default_src_filter } + < src / HAL / LPC1768 > <nl> lib_deps = Servo <nl> - LiquidCrystal <nl> - U8glib - HAL = https : / / github . com / MarlinFirmware / U8glib - HAL / archive / bugfix . zip <nl> - TMCStepper @ > = 0 . 6 . 1 , < 1 . 0 . 0 <nl> - Adafruit NeoPixel = https : / / github . com / p3p / Adafruit_NeoPixel / archive / release . zip <nl> + LiquidCrystal @ 1 . 5 . 0 <nl> + U8glib - HAL @ 0 . 4 . 1 <nl> + TMCStepper @ ~ 0 . 7 . 1 <nl> + Adafruit NeoPixel @ 1 . 5 . 0 <nl> SailfishLCD = https : / / github . com / mikeshub / SailfishLCD / archive / master . zip <nl> <nl> [ env : LPC1769 ] <nl> lib_compat_mode = strict <nl> extra_scripts = Marlin / src / HAL / LPC1768 / upload_extra_script . py <nl> src_filter = $ { common . default_src_filter } + < src / HAL / LPC1768 > <nl> lib_deps = Servo <nl> - LiquidCrystal <nl> - U8glib - HAL = https : / / github . com / MarlinFirmware / U8glib - HAL / archive / bugfix . zip <nl> - TMCStepper @ > = 0 . 6 . 1 , < 1 . 0 . 0 <nl> - Adafruit NeoPixel = https : / / github . com / p3p / Adafruit_NeoPixel / archive / release . zip <nl> + LiquidCrystal @ 1 . 5 . 0 <nl> + U8glib - HAL @ 0 . 4 . 1 <nl> + TMCStepper @ ~ 0 . 7 . 1 <nl> + Adafruit NeoPixel @ 1 . 5 . 0 <nl> SailfishLCD = https : / / github . com / mikeshub / SailfishLCD / archive / master . zip <nl> <nl> # <nl> build_flags = $ { common . build_flags } <nl> - DTARGET_STM32F4 - DSTM32F407IX - DVECT_TAB_OFFSET = 0x8000 <nl> - IMarlin / src / HAL / STM32 <nl> lib_deps = <nl> - U8glib - HAL = https : / / github . com / MarlinFirmware / U8glib - HAL / archive / bugfix . zip <nl> - LiquidCrystal <nl> - TMCStepper @ > = 0 . 5 . 2 , < 1 . 0 . 0 <nl> - Adafruit NeoPixel <nl> - LiquidTWI2 = https : / / github . com / lincomatic / LiquidTWI2 / archive / master . zip <nl> - Arduino - L6470 = https : / / github . com / ameyer / Arduino - L6470 / archive / 0 . 7 . 0 . zip <nl> + U8glib - HAL @ 0 . 4 . 1 <nl> + LiquidCrystal @ 1 . 5 . 0 <nl> + TMCStepper @ ~ 0 . 7 . 1 <nl> + Adafruit NeoPixel @ 1 . 5 . 0 <nl> + LiquidTWI2 @ 1 . 2 . 7 <nl> + Arduino - L6470 @ 0 . 8 . 0 <nl> lib_ignore = SoftwareSerial , SoftwareSerialM <nl> src_filter = $ { common . default_src_filter } + < src / HAL / STM32 > <nl> monitor_speed = 250000 <nl>
Specify supported library versions
MarlinFirmware/Marlin
c1b237920430c0944df9db6c9050977211745820
2020-07-11T12:42:35Z
mmm a / folly / io / async / AsyncSocket . h <nl> ppp b / folly / io / async / AsyncSocket . h <nl> namespace folly { <nl> # define SO_NO_TSOCKS 201 <nl> # endif <nl> <nl> - # ifdef _MSC_VER <nl> - / / We do a dynamic_cast on this , in <nl> - / / AsyncTransportWrapper : : getUnderlyingTransport so be safe and <nl> - / / force displacements for it . See : <nl> - / / https : / / msdn . microsoft . com / en - us / library / 7sf3txa8 . aspx <nl> - # pragma vtordisp ( push , 2 ) <nl> - # endif <nl> - class AsyncSocket : virtual public AsyncTransportWrapper { <nl> + class AsyncSocket : public AsyncTransportWrapper { <nl> public : <nl> using UniquePtr = std : : unique_ptr < AsyncSocket , Destructor > ; <nl> <nl> class AsyncSocket : virtual public AsyncTransportWrapper { <nl> <nl> bool closeOnFailedWrite_ { true } ; <nl> } ; <nl> - # ifdef _MSC_VER <nl> - # pragma vtordisp ( pop ) <nl> - # endif <nl> <nl> } / / namespace folly <nl> mmm a / folly / io / async / AsyncTransport . h <nl> ppp b / folly / io / async / AsyncTransport . h <nl> class AsyncWriter { <nl> <nl> / / Transitional intermediate interface . This is deprecated . <nl> / / Wrapper around folly : : AsyncTransport , that includes read / write callbacks <nl> - class AsyncTransportWrapper : virtual public AsyncTransport , <nl> - virtual public AsyncReader , <nl> - virtual public AsyncWriter { <nl> + class AsyncTransportWrapper : public AsyncTransport , <nl> + public AsyncReader , <nl> + public AsyncWriter { <nl> public : <nl> using UniquePtr = std : : unique_ptr < AsyncTransportWrapper , Destructor > ; <nl> <nl>
Remove virtual inheritance in AsyncSocket
facebook/folly
97abe3c96a4061322b6bb4da8042540657e1a3f7
2020-04-23T03:44:21Z
mmm a / addons / skin . estuary / xml / DialogBusy . xml <nl> ppp b / addons / skin . estuary / xml / DialogBusy . xml <nl> <nl> < texture > colors / black . png < / texture > <nl> < include > FullScreenDimensions < / include > <nl> < animation effect = " fade " start = " 100 " end = " 70 " time = " 0 " condition = " true " > Conditional < / animation > <nl> - < animation effect = " fade " start = " 100 " end = " 0 " time = " 240 " condition = " Window . IsVisible ( fullscreenvideo ) " > Conditional < / animation > <nl> + < animation effect = " fade " start = " 100 " end = " 0 " time = " 240 " condition = " Window . IsVisible ( fullscreenvideo ) | Window . IsVisible ( FullscreenGame ) " > Conditional < / animation > <nl> < / control > <nl> < control type = " group " > <nl> < depth > DepthMax < / depth > <nl> mmm a / addons / skin . estuary / xml / DialogExtendedProgressBar . xml <nl> ppp b / addons / skin . estuary / xml / DialogExtendedProgressBar . xml <nl> <nl> < width > 80 < / width > <nl> < top > 0 < / top > <nl> < animation effect = " slide " end = " 0 , - 90 " time = " 300 " tween = " sine " easing = " inout " condition = " $ EXP [ infodialog_active ] " > conditional < / animation > <nl> - < animation effect = " slide " end = " 0 , - 80 " time = " 150 " condition = " Window . IsVisible ( FullscreenVideo ) " > conditional < / animation > <nl> + < animation effect = " slide " end = " 0 , - 80 " time = " 150 " condition = " Window . IsVisible ( FullscreenVideo ) | Window . IsVisible ( FullscreenGame ) " > conditional < / animation > <nl> < control type = " image " > <nl> < left > 20 < / left > <nl> < top > 10 < / top > <nl> mmm a / addons / skin . estuary / xml / DialogSeekBar . xml <nl> ppp b / addons / skin . estuary / xml / DialogSeekBar . xml <nl> <nl> < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> < window > <nl> - < visible > Player . Seeking | Player . DisplayAfterSeek | [ Player . Paused + ! Player . Caching ] | Player . Forwarding | Player . Rewinding | Player . ShowInfo | Window . IsActive ( videoosd ) | Window . IsActive ( musicosd ) | Window . IsActive ( GameOSD ) | Window . IsActive ( playerprocessinfo ) | ! String . IsEmpty ( Player . SeekNumeric ) | ! String . IsEmpty ( PVR . ChannelNumberInput ) < / visible > <nl> + < visible > Player . Seeking | Player . DisplayAfterSeek | [ Player . Paused + ! Player . Caching ] | Player . Forwarding | Player . Rewinding | Player . ShowInfo | Window . IsActive ( videoosd ) | Window . IsActive ( musicosd ) | Window . IsActive ( playerprocessinfo ) | ! String . IsEmpty ( Player . SeekNumeric ) | ! String . IsEmpty ( PVR . ChannelNumberInput ) < / visible > <nl> < visible > ! [ Window . IsActive ( sliderdialog ) | Window . IsActive ( pvrosdchannels ) ] < / visible > <nl> < visible > Window . IsActive ( fullscreenvideo ) | Window . IsActive ( visualisation ) < / visible > <nl> < include > Animation_BottomSlide < / include > <nl> mmm a / addons / skin . estuary / xml / DialogVolumeBar . xml <nl> ppp b / addons / skin . estuary / xml / DialogVolumeBar . xml <nl> <nl> < width > 88 < / width > <nl> < height > 88 < / height > <nl> < texture > osd / buffer - bg . png < / texture > <nl> - < visible > Window . IsActive ( fullscreenvideo ) | Window . IsActive ( slideshow ) < / visible > <nl> + < visible > Window . IsActive ( fullscreenvideo ) | Window . IsActive ( FullscreenGame ) | Window . IsActive ( slideshow ) < / visible > <nl> < / control > <nl> < control type = " image " > <nl> < left > 22 < / left > <nl> mmm a / addons / skin . estuary / xml / Includes . xml <nl> ppp b / addons / skin . estuary / xml / Includes . xml <nl> <nl> < animation effect = " zoom " center = " auto " end = " 102 , 102 " time = " 0 " condition = " Integer . IsGreater ( System . StereoscopicMode , 0 ) " > conditional < / animation > <nl> < control type = " group " > <nl> < animation effect = " fade " start = " 100 " end = " bg_alpha " time = " 0 " condition = " ! Control . IsVisible ( 31111 ) " > Conditional < / animation > <nl> - < animation effect = " fade " start = " 0 " end = " 100 " time = " 300 " condition = " Window . Previous ( fullscreenvideo ) | Window . Previous ( startup ) " > WindowOpen < / animation > <nl> + < animation effect = " fade " start = " 0 " end = " 100 " time = " 300 " condition = " Window . Previous ( fullscreenvideo ) | Window . Previous ( FullscreenGame ) | Window . Previous ( startup ) " > WindowOpen < / animation > <nl> < include > ColoredBackgroundImages < / include > <nl> < / control > <nl> < control type = " group " id = " 31111 " > <nl> mmm a / addons / skin . estuary / xml / MyWeather . xml <nl> ppp b / addons / skin . estuary / xml / MyWeather . xml <nl> <nl> < control type = " group " > <nl> < animation effect = " zoom " center = " auto " end = " 102 , 102 " time = " 0 " condition = " Integer . IsGreater ( System . StereoscopicMode , 0 ) " > conditional < / animation > <nl> < animation effect = " fade " start = " 100 " end = " bg_alpha " time = " 0 " condition = " Player . HasMedia " > Conditional < / animation > <nl> - < animation effect = " fade " start = " 0 " end = " 100 " time = " 300 " condition = " Window . Previous ( fullscreenvideo ) | Window . Previous ( startup ) " > WindowOpen < / animation > <nl> + < animation effect = " fade " start = " 0 " end = " 100 " time = " 300 " condition = " Window . Previous ( fullscreenvideo ) | Window . Previous ( FullscreenGame ) | Window . Previous ( startup ) " > WindowOpen < / animation > <nl> < include > ColoredBackgroundImages < / include > <nl> < / control > <nl> < control type = " multiimage " > <nl> mmm a / addons / skin . estuary / xml / PlayerControls . xml <nl> ppp b / addons / skin . estuary / xml / PlayerControls . xml <nl> <nl> < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> < window > <nl> < defaultcontrol always = " true " > 201 < / defaultcontrol > <nl> - < visible > Player . HasMedia + Window . IsActive ( PlayerControls ) + ! Window . IsActive ( FullscreenVideo ) + ! Window . IsActive ( Visualisation ) < / visible > <nl> + < visible > Player . HasMedia + Window . IsActive ( PlayerControls ) + ! Window . IsActive ( FullscreenVideo ) + ! Window . IsActive ( FullscreenGame ) + ! Window . IsActive ( Visualisation ) < / visible > <nl> < include > Animation_DialogPopupOpenClose < / include > <nl> < controls > <nl> < control type = " group " > <nl> mmm a / cmake / treedata / common / retroplayer . txt <nl> ppp b / cmake / treedata / common / retroplayer . txt <nl> <nl> xbmc / cores / RetroPlayer cores / RetroPlayer <nl> xbmc / cores / RetroPlayer / guicontrols cores / RetroPlayer / guicontrols <nl> xbmc / cores / RetroPlayer / rendering cores / RetroPlayer / rendering <nl> + xbmc / cores / RetroPlayer / windows cores / RetroPlayer / windows <nl> mmm a / xbmc / Application . cpp <nl> ppp b / xbmc / Application . cpp <nl> bool CApplication : : LoadSkin ( const std : : string & skinID ) <nl> <nl> / / store player and rendering state <nl> bool bPreviousPlayingState = false ; <nl> - bool bPreviousRenderingState = false ; <nl> + <nl> + enum class RENDERING_STATE <nl> + { <nl> + NONE , <nl> + VIDEO , <nl> + GAME , <nl> + } previousRenderingState = RENDERING_STATE : : NONE ; <nl> + <nl> if ( m_pPlayer - > IsPlayingVideo ( ) ) <nl> { <nl> bPreviousPlayingState = ! m_pPlayer - > IsPausedPlayback ( ) ; <nl> bool CApplication : : LoadSkin ( const std : : string & skinID ) <nl> if ( g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_VIDEO ) <nl> { <nl> g_windowManager . ActivateWindow ( WINDOW_HOME ) ; <nl> - bPreviousRenderingState = true ; <nl> + previousRenderingState = RENDERING_STATE : : VIDEO ; <nl> + } <nl> + else if ( g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_GAME ) <nl> + { <nl> + g_windowManager . ActivateWindow ( WINDOW_HOME ) ; <nl> + previousRenderingState = RENDERING_STATE : : GAME ; <nl> } <nl> + <nl> } <nl> <nl> CSingleLock lock ( g_graphicsContext ) ; <nl> bool CApplication : : LoadSkin ( const std : : string & skinID ) <nl> { <nl> if ( bPreviousPlayingState ) <nl> m_pPlayer - > Pause ( ) ; <nl> - if ( bPreviousRenderingState ) <nl> + <nl> + switch ( previousRenderingState ) <nl> + { <nl> + case RENDERING_STATE : : VIDEO : <nl> g_windowManager . ActivateWindow ( WINDOW_FULLSCREEN_VIDEO ) ; <nl> + break ; <nl> + case RENDERING_STATE : : GAME : <nl> + g_windowManager . ActivateWindow ( WINDOW_FULLSCREEN_GAME ) ; <nl> + break ; <nl> + default : <nl> + break ; <nl> + } <nl> } <nl> <nl> return true ; <nl> bool CApplication : : OnAction ( const CAction & action ) <nl> / / Now check with the player if action can be handled . <nl> bool bIsPlayingPVRChannel = ( CServiceBroker : : GetPVRManager ( ) . IsStarted ( ) & & g_application . CurrentFileItem ( ) . IsPVRChannel ( ) ) ; <nl> if ( g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_VIDEO | | <nl> + g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_GAME | | <nl> ( g_windowManager . GetActiveWindow ( ) = = WINDOW_VISUALISATION & & bIsPlayingPVRChannel ) | | <nl> ( ( g_windowManager . GetActiveWindow ( ) = = WINDOW_DIALOG_VIDEO_OSD | | ( g_windowManager . GetActiveWindow ( ) = = WINDOW_DIALOG_MUSIC_OSD & & bIsPlayingPVRChannel ) ) & & <nl> ( action . GetID ( ) = = ACTION_NEXT_ITEM | | action . GetID ( ) = = ACTION_PREV_ITEM | | action . GetID ( ) = = ACTION_CHANNEL_UP | | action . GetID ( ) = = ACTION_CHANNEL_DOWN ) ) | | <nl> void CApplication : : OnApplicationMessage ( ThreadMessage * pMsg ) <nl> <nl> <nl> case TMSG_SWITCHTOFULLSCREEN : <nl> - if ( g_windowManager . GetActiveWindow ( ) ! = WINDOW_FULLSCREEN_VIDEO ) <nl> + if ( g_windowManager . GetActiveWindow ( ) ! = WINDOW_FULLSCREEN_VIDEO & & <nl> + g_windowManager . GetActiveWindow ( ) ! = WINDOW_FULLSCREEN_GAME ) <nl> SwitchToFullScreen ( true ) ; <nl> break ; <nl> <nl> PlayBackRet CApplication : : PlayFile ( CFileItem item , const std : : string & player , bo <nl> { <nl> / / if player didn ' t manage to switch to fullscreen by itself do it here <nl> if ( options . fullscreen & & m_pPlayer - > IsRenderingVideo ( ) & & <nl> - g_windowManager . GetActiveWindow ( ) ! = WINDOW_FULLSCREEN_VIDEO ) <nl> + g_windowManager . GetActiveWindow ( ) ! = WINDOW_FULLSCREEN_VIDEO & & <nl> + g_windowManager . GetActiveWindow ( ) ! = WINDOW_FULLSCREEN_GAME ) <nl> SwitchToFullScreen ( true ) ; <nl> } <nl> else <nl> { <nl> if ( g_windowManager . GetActiveWindow ( ) = = WINDOW_VISUALISATION | | <nl> - g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_VIDEO ) <nl> + g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_VIDEO | | <nl> + g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_GAME ) <nl> g_windowManager . PreviousWindow ( ) ; <nl> } <nl> <nl> void CApplication : : StopPlaying ( ) <nl> <nl> / / turn off visualisation window when stopping <nl> if ( ( iWin = = WINDOW_VISUALISATION <nl> - | | iWin = = WINDOW_FULLSCREEN_VIDEO ) <nl> + | | iWin = = WINDOW_FULLSCREEN_VIDEO <nl> + | | iWin = = WINDOW_FULLSCREEN_GAME ) <nl> & & ! m_bStop ) <nl> g_windowManager . PreviousWindow ( ) ; <nl> <nl> bool CApplication : : OnMessage ( CGUIMessage & message ) <nl> <nl> if ( ! m_pPlayer - > IsPlayingVideo ( ) ) <nl> { <nl> - if ( g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_VIDEO ) <nl> + if ( g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_VIDEO | | <nl> + g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_GAME ) <nl> { <nl> g_windowManager . PreviousWindow ( ) ; <nl> } <nl> void CApplication : : ProcessSlow ( ) <nl> <nl> / / Temporarily pause pausable jobs when viewing video / picture <nl> int currentWindow = g_windowManager . GetActiveWindow ( ) ; <nl> - if ( CurrentFileItem ( ) . IsVideo ( ) | | CurrentFileItem ( ) . IsPicture ( ) | | currentWindow = = WINDOW_FULLSCREEN_VIDEO | | currentWindow = = WINDOW_SLIDESHOW ) <nl> + if ( CurrentFileItem ( ) . IsVideo ( ) | | <nl> + CurrentFileItem ( ) . IsPicture ( ) | | <nl> + currentWindow = = WINDOW_FULLSCREEN_VIDEO | | <nl> + currentWindow = = WINDOW_FULLSCREEN_GAME | | <nl> + currentWindow = = WINDOW_SLIDESHOW ) <nl> { <nl> CJobManager : : GetInstance ( ) . PauseJobs ( ) ; <nl> } <nl> bool CApplication : : SwitchToFullScreen ( bool force / * = false * / ) <nl> } <nl> <nl> int windowID = WINDOW_INVALID ; <nl> + <nl> + / / See if we ' re playing a game , and are in GUI mode <nl> + if ( m_pPlayer - > IsPlayingGame ( ) & & g_windowManager . GetActiveWindow ( ) ! = WINDOW_FULLSCREEN_GAME ) <nl> + windowID = WINDOW_FULLSCREEN_GAME ; <nl> + <nl> / / See if we ' re playing a video , and are in GUI mode <nl> - if ( m_pPlayer - > IsPlayingVideo ( ) & & g_windowManager . GetActiveWindow ( ) ! = WINDOW_FULLSCREEN_VIDEO ) <nl> + else if ( m_pPlayer - > IsPlayingVideo ( ) & & g_windowManager . GetActiveWindow ( ) ! = WINDOW_FULLSCREEN_VIDEO ) <nl> windowID = WINDOW_FULLSCREEN_VIDEO ; <nl> <nl> / / special case for switching between GUI & visualisation mode . ( only if we ' re playing an audio song ) <nl> mmm a / xbmc / GUIInfoManager . cpp <nl> ppp b / xbmc / GUIInfoManager . cpp <nl> bool CGUIInfoManager : : GetBool ( int condition1 , int contextWindow , const CGUIListI <nl> bReturn = ( CServiceBroker : : GetSettings ( ) . GetInt ( CSettings : : SETTING_VIDEOPLAYER_RENDERMETHOD ) = = RENDER_OVERLAYS ) ; <nl> break ; <nl> case VIDEOPLAYER_ISFULLSCREEN : <nl> - bReturn = g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_VIDEO ; <nl> + bReturn = g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_VIDEO | | <nl> + g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_GAME ; <nl> break ; <nl> case VIDEOPLAYER_HASMENU : <nl> bReturn = g_application . m_pPlayer - > HasMenu ( ) ; <nl> mmm a / xbmc / PlayListPlayer . cpp <nl> ppp b / xbmc / PlayListPlayer . cpp <nl> void PLAYLIST : : CPlayListPlayer : : OnApplicationMessage ( KODI : : MESSAGING : : ThreadMess <nl> / / restore to previous window if needed <nl> if ( g_windowManager . GetActiveWindow ( ) = = WINDOW_SLIDESHOW | | <nl> g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_VIDEO | | <nl> + g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_GAME | | <nl> g_windowManager . GetActiveWindow ( ) = = WINDOW_VISUALISATION ) <nl> g_windowManager . PreviousWindow ( ) ; <nl> <nl> void PLAYLIST : : CPlayListPlayer : : OnApplicationMessage ( KODI : : MESSAGING : : ThreadMess <nl> <nl> if ( ( stopSlideshow & & g_windowManager . GetActiveWindow ( ) = = WINDOW_SLIDESHOW ) | | <nl> ( stopVideo & & g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_VIDEO ) | | <nl> + ( stopVideo & & g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_GAME ) | | <nl> ( stopMusic & & g_windowManager . GetActiveWindow ( ) = = WINDOW_VISUALISATION ) ) <nl> g_windowManager . PreviousWindow ( ) ; <nl> <nl> mmm a / xbmc / addons / RepositoryUpdater . cpp <nl> ppp b / xbmc / addons / RepositoryUpdater . cpp <nl> void CRepositoryUpdater : : OnTimeout ( ) <nl> { <nl> / / workaround <nl> if ( g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_VIDEO | | <nl> + g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_GAME | | <nl> g_windowManager . GetActiveWindow ( ) = = WINDOW_SLIDESHOW ) <nl> { <nl> CLog : : Log ( LOGDEBUG , " CRepositoryUpdater : busy playing . postponing scheduled update " ) ; <nl> new file mode 100644 <nl> index 000000000000 . . b1c470978948 <nl> mmm / dev / null <nl> ppp b / xbmc / cores / RetroPlayer / windows / CMakeLists . txt <nl> <nl> + set ( SOURCES GameWindowFullScreen . cpp <nl> + ) <nl> + <nl> + set ( HEADERS GameWindowFullScreen . h <nl> + ) <nl> + <nl> + core_add_library ( retroplayer_windows ) <nl> new file mode 100644 <nl> index 000000000000 . . 851a70b4f7fe <nl> mmm / dev / null <nl> ppp b / xbmc / cores / RetroPlayer / windows / GameWindowFullScreen . cpp <nl> <nl> + / * <nl> + * Copyright ( C ) 2017 Team Kodi <nl> + * http : / / kodi . tv <nl> + * <nl> + * This Program is free software ; you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> + * any later version . <nl> + * <nl> + * This Program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with this Program ; see the file COPYING . If not , see <nl> + * < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + <nl> + # include " GameWindowFullScreen . h " <nl> + # include " guilib / WindowIDs . h " <nl> + <nl> + using namespace KODI ; <nl> + using namespace RETRO ; <nl> + <nl> + CGameWindowFullScreen : : CGameWindowFullScreen ( void ) : <nl> + CGUIWindowFullScreen ( WINDOW_FULLSCREEN_GAME ) <nl> + { <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . 95db3ef51ae2 <nl> mmm / dev / null <nl> ppp b / xbmc / cores / RetroPlayer / windows / GameWindowFullScreen . h <nl> <nl> + / * <nl> + * Copyright ( C ) 2017 Team Kodi <nl> + * http : / / kodi . tv <nl> + * <nl> + * This Program is free software ; you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> + * any later version . <nl> + * <nl> + * This Program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with this Program ; see the file COPYING . If not , see <nl> + * < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + # pragma once <nl> + <nl> + # include " video / windows / GUIWindowFullScreen . h " / / ! @ todo <nl> + <nl> + namespace KODI <nl> + { <nl> + namespace RETRO <nl> + { <nl> + class CGameWindowFullScreen : public CGUIWindowFullScreen <nl> + { <nl> + public : <nl> + CGameWindowFullScreen ( ) ; <nl> + ~ CGameWindowFullScreen ( ) override = default ; <nl> + } ; <nl> + } <nl> + } <nl> mmm a / xbmc / guilib / GUIWindow . cpp <nl> ppp b / xbmc / guilib / GUIWindow . cpp <nl> void CGUIWindow : : Close_Internal ( bool forceClose / * = false * / , int nextWindowID / * <nl> if ( ! m_active ) <nl> return ; <nl> <nl> - forceClose | = ( nextWindowID = = WINDOW_FULLSCREEN_VIDEO ) ; <nl> + forceClose | = ( nextWindowID = = WINDOW_FULLSCREEN_VIDEO | | <nl> + nextWindowID = = WINDOW_FULLSCREEN_GAME ) ; <nl> + <nl> if ( ! forceClose & & HasAnimation ( ANIM_TYPE_WINDOW_CLOSE ) ) <nl> { <nl> if ( ! m_closing ) <nl> mmm a / xbmc / guilib / GUIWindowManager . cpp <nl> ppp b / xbmc / guilib / GUIWindowManager . cpp <nl> <nl> # include " addons / interfaces / AddonInterfaces . h " <nl> <nl> / * Game related include files * / <nl> + # include " cores / RetroPlayer / windows / GameWindowFullScreen . h " <nl> # include " games / controllers / windows / GUIControllerWindow . h " <nl> # include " games / windows / GUIWindowGames . h " <nl> # include " games / dialogs / osd / DialogGameOSD . h " <nl> void CGUIWindowManager : : CreateWindows ( ) <nl> Add ( new GAME : : CDialogGameOSD ) ; <nl> Add ( new GAME : : CDialogGameVideoFilter ) ; <nl> Add ( new GAME : : CDialogGameViewMode ) ; <nl> + Add ( new RETRO : : CGameWindowFullScreen ) ; <nl> } <nl> <nl> bool CGUIWindowManager : : DestroyWindows ( ) <nl> bool CGUIWindowManager : : DestroyWindows ( ) <nl> DestroyWindow ( WINDOW_DIALOG_GAME_OSD ) ; <nl> DestroyWindow ( WINDOW_DIALOG_GAME_VIDEO_FILTER ) ; <nl> DestroyWindow ( WINDOW_DIALOG_GAME_VIEW_MODE ) ; <nl> + DestroyWindow ( WINDOW_FULLSCREEN_GAME ) ; <nl> <nl> Remove ( WINDOW_SETTINGS_SERVICE ) ; <nl> Remove ( WINDOW_SETTINGS_MYPVR ) ; <nl> void CGUIWindowManager : : ActivateWindow_Internal ( int iWindowID , const std : : vector <nl> / / pause game when leaving fullscreen or resume game when entering fullscreen <nl> if ( g_application . m_pPlayer - > IsPlayingGame ( ) ) <nl> { <nl> - if ( GetActiveWindow ( ) = = WINDOW_FULLSCREEN_VIDEO & & ! g_application . m_pPlayer - > IsPaused ( ) ) <nl> + if ( GetActiveWindow ( ) = = WINDOW_FULLSCREEN_GAME & & ! g_application . m_pPlayer - > IsPaused ( ) ) <nl> g_application . OnAction ( ACTION_PAUSE ) ; <nl> - else if ( iWindowID = = WINDOW_FULLSCREEN_VIDEO & & g_application . m_pPlayer - > IsPaused ( ) ) <nl> + else if ( iWindowID = = WINDOW_FULLSCREEN_GAME & & g_application . m_pPlayer - > IsPaused ( ) ) <nl> g_application . OnAction ( ACTION_PAUSE ) ; <nl> } <nl> <nl> int CGUIWindowManager : : GetActiveWindowID ( ) const <nl> / / special casing for numeric seek <nl> else if ( CSeekHandler : : GetInstance ( ) . HasTimeCode ( ) ) <nl> iWin = WINDOW_VIDEO_TIME_SEEK ; <nl> - / / check if a game is playing <nl> - else if ( g_application . m_pPlayer - > IsPlayingGame ( ) ) <nl> - iWin = WINDOW_FULLSCREEN_GAME ; <nl> } <nl> if ( iWin = = WINDOW_VISUALISATION ) <nl> { <nl> mmm a / xbmc / input / InputManager . cpp <nl> ppp b / xbmc / input / InputManager . cpp <nl> bool CInputManager : : OnKey ( const CKey & key ) <nl> return true ; <nl> } <nl> <nl> - if ( iWin ! = WINDOW_FULLSCREEN_VIDEO ) <nl> + if ( iWin ! = WINDOW_FULLSCREEN_VIDEO | | <nl> + iWin ! = WINDOW_FULLSCREEN_GAME ) <nl> { <nl> / / current active window isnt the fullscreen window <nl> / / just use corresponding section from keymap . xml <nl> mmm a / xbmc / input / WindowTranslator . cpp <nl> ppp b / xbmc / input / WindowTranslator . cpp <nl> const CWindowTranslator : : WindowMapByName CWindowTranslator : : WindowMappingByName <nl> { " fullscreenvideo " , WINDOW_FULLSCREEN_VIDEO } , <nl> { " fullscreenlivetv " , WINDOW_FULLSCREEN_LIVETV } , / / virtual window / keymap section for PVR specific bindings in fullscreen playback ( which internally uses WINDOW_FULLSCREEN_VIDEO ) <nl> { " fullscreenradio " , WINDOW_FULLSCREEN_RADIO } , / / virtual window for fullscreen radio , uses WINDOW_VISUALISATION as fallback <nl> - { " fullscreengame " , WINDOW_FULLSCREEN_GAME } , / / virtual window for fullscreen games , uses WINDOW_FULLSCREEN_VIDEO as fallback <nl> + { " fullscreengame " , WINDOW_FULLSCREEN_GAME } , <nl> { " visualisation " , WINDOW_VISUALISATION } , <nl> { " slideshow " , WINDOW_SLIDESHOW } , <nl> { " weather " , WINDOW_WEATHER } , <nl> mmm a / xbmc / interfaces / builtins / PlayerBuiltins . cpp <nl> ppp b / xbmc / interfaces / builtins / PlayerBuiltins . cpp <nl> static int PlayMedia ( const std : : vector < std : : string > & params ) <nl> / / restore to previous window if needed <nl> if ( g_windowManager . GetActiveWindow ( ) = = WINDOW_SLIDESHOW | | <nl> g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_VIDEO | | <nl> + g_windowManager . GetActiveWindow ( ) = = WINDOW_FULLSCREEN_GAME | | <nl> g_windowManager . GetActiveWindow ( ) = = WINDOW_VISUALISATION ) <nl> g_windowManager . PreviousWindow ( ) ; <nl> <nl> mmm a / xbmc / pictures / GUIWindowSlideShow . cpp <nl> ppp b / xbmc / pictures / GUIWindowSlideShow . cpp <nl> void CGUIWindowSlideShow : : OnDeinitWindow ( int nextWindowID ) <nl> / / g_graphicsContext . SetVideoResolution ( CDisplaySettings : : GetInstance ( ) . GetCurrentResolution ( ) , TRUE ) ; <nl> } <nl> <nl> - if ( nextWindowID ! = WINDOW_FULLSCREEN_VIDEO ) <nl> + if ( nextWindowID ! = WINDOW_FULLSCREEN_VIDEO | | <nl> + nextWindowID ! = WINDOW_FULLSCREEN_GAME ) <nl> { <nl> / / wait for any outstanding picture loads <nl> if ( m_pBackgroundLoader ) <nl> mmm a / xbmc / video / windows / GUIWindowFullScreen . cpp <nl> ppp b / xbmc / video / windows / GUIWindowFullScreen . cpp <nl> using namespace KODI : : MESSAGING ; <nl> static CLinuxResourceCounter m_resourceCounter ; <nl> # endif <nl> <nl> - CGUIWindowFullScreen : : CGUIWindowFullScreen ( void ) <nl> - : CGUIWindow ( WINDOW_FULLSCREEN_VIDEO , " VideoFullScreen . xml " ) <nl> + CGUIWindowFullScreen : : CGUIWindowFullScreen ( int windowId ) <nl> + : CGUIWindow ( windowId , " VideoFullScreen . xml " ) <nl> { <nl> m_viewModeChanged = true ; <nl> m_dwShowViewModeTimeout = 0 ; <nl> mmm a / xbmc / video / windows / GUIWindowFullScreen . h <nl> ppp b / xbmc / video / windows / GUIWindowFullScreen . h <nl> <nl> * / <nl> <nl> # include " guilib / GUIWindow . h " <nl> + # include " guilib / WindowIDs . h " / / ! @ todo <nl> <nl> class CGUIDialog ; <nl> <nl> class CGUIWindowFullScreen : public CGUIWindow <nl> { <nl> public : <nl> - CGUIWindowFullScreen ( void ) ; <nl> + CGUIWindowFullScreen ( int windowId = WINDOW_FULLSCREEN_VIDEO ) ; / / ! @ todo <nl> ~ CGUIWindowFullScreen ( void ) override ; <nl> bool OnMessage ( CGUIMessage & message ) override ; <nl> bool OnAction ( const CAction & action ) override ; <nl>
Implement FullscreenGame window
xbmc/xbmc
acd30c1bf3298aa858dfb2648b42a9e52f557a5e
2017-09-07T16:07:40Z
mmm a / hphp / hack / src / utils / hh_json / hh_json . ml <nl> ppp b / hphp / hack / src / utils / hh_json / hh_json . ml <nl> let string_of_file filename = <nl> <nl> ( * Writing JSON * ) <nl> <nl> + let sort_object obj_entries = <nl> + List . sort ~ cmp : ( fun ( k1 , _ ) ( k2 , _ ) - > Pervasives . compare k1 k2 ) obj_entries <nl> + <nl> module type Output_stream_intf = sig <nl> type t <nl> val add_char : t - > char - > unit <nl> module Make_streamer ( Out : Output_stream_intf ) = struct <nl> Out . add_substring b s ! pos ( String . length s - ! pos ) ; <nl> Out . add_char b ' " ' <nl> <nl> - let rec add_json ( buf : Out . t ) ( json : json ) : unit = <nl> + let rec add_json ~ sort_keys ( buf : Out . t ) ( json : json ) : unit = <nl> match json with <nl> | JSON_Object l - > <nl> - concat ~ lb : " { " ~ rb : " } " ~ sep : " , " ~ concat_elt : add_assoc buf l <nl> + ( * Make the pretty output deterministic by sorting the keys * ) <nl> + let l = if sort_keys then sort_object l else l in <nl> + concat ~ lb : " { " ~ rb : " } " ~ sep : " , " ~ concat_elt : ( add_assoc ~ sort_keys ) buf l <nl> | JSON_Array l - > <nl> - concat ~ lb : " [ " ~ rb : " ] " ~ sep : " , " ~ concat_elt : add_json buf l <nl> + concat ~ lb : " [ " ~ rb : " ] " ~ sep : " , " ~ concat_elt : ( add_json ~ sort_keys ) buf l <nl> | JSON_String s - > escape buf s <nl> | JSON_Number n - > Out . add_string buf n <nl> | JSON_Bool b - > Out . add_string buf ( if b then " true " else " false " ) <nl> | JSON_Null - > Out . add_string buf " null " <nl> <nl> - and add_assoc ( buf : Out . t ) ( k , v ) = <nl> + and add_assoc ~ sort_keys ( buf : Out . t ) ( k , v ) = <nl> escape buf k ; <nl> Out . add_char buf ' : ' ; <nl> - add_json buf v <nl> + add_json ~ sort_keys buf v <nl> end <nl> <nl> module Out_buffer = Make_streamer ( Buffer_stream ) <nl> module Out_channel = Make_streamer ( Channel_stream ) <nl> <nl> - let rec json_to_string ? ( pretty = false ) ( json : json ) : string = <nl> + let rec json_to_string ? ( sort_keys = false ) ? ( pretty = false ) ( json : json ) : string = <nl> if pretty <nl> - then json_to_multiline json <nl> + then json_to_multiline ~ sort_keys json <nl> else <nl> let buf = Buffer . create 1024 in ( * need a better estimate ! * ) <nl> - Out_buffer . add_json buf json ; <nl> + Out_buffer . add_json ~ sort_keys buf json ; <nl> Buffer . contents buf <nl> <nl> and json_to_multiline ? ( sort_keys = false ) json = <nl> let rec loop indent json = <nl> - let single = json_to_string json in <nl> + let single = json_to_string ~ sort_keys json in <nl> if String . length single < 80 then single else <nl> match json with <nl> | JSON_Array l - > <nl> and json_to_multiline ? ( sort_keys = false ) json = <nl> " \ n " ^ indent ^ " ] " <nl> | JSON_Object l - > <nl> ( * Make the pretty output deterministic by sorting the keys * ) <nl> - let l = <nl> - if sort_keys then <nl> - List . sort ~ cmp : ( fun ( k1 , _ ) ( k2 , _ ) - > Pervasives . compare k1 k2 ) l <nl> - else l <nl> - in <nl> + let l = if sort_keys then sort_object l else l in <nl> let nl = <nl> List . map l <nl> ( fun ( k , v ) - > <nl> - indent ^ " " ^ ( json_to_string ( JSON_String k ) ) ^ " : " ^ <nl> + indent ^ " " ^ ( json_to_string ~ sort_keys ( JSON_String k ) ) ^ " : " ^ <nl> ( loop ( indent ^ " " ) v ) ) <nl> in <nl> " { \ n " ^ ( String . concat " , \ n " nl ) ^ " \ n " ^ indent ^ " } " <nl> and json_to_multiline ? ( sort_keys = false ) json = <nl> loop " " json <nl> <nl> let json_to_output oc ( json : json ) : unit = <nl> - Out_channel . add_json oc json <nl> + Out_channel . add_json ~ sort_keys : false oc json <nl> <nl> let rec json_to_multiline_output oc ( json : json ) : unit = <nl> let json_assoc_to_output oc ( k , v ) : unit = <nl> mmm a / hphp / hack / src / utils / hh_json / hh_json . mli <nl> ppp b / hphp / hack / src / utils / hh_json / hh_json . mli <nl> type json = <nl> <nl> exception Syntax_error of string <nl> <nl> - val json_to_string : ? pretty : bool - > json - > string <nl> + val json_to_string : ? sort_keys : bool - > ? pretty : bool - > json - > string <nl> val json_to_multiline : ? sort_keys : bool - > json - > string <nl> val json_to_output : out_channel - > json - > unit <nl> val json_to_multiline_output : out_channel - > json - > unit <nl> mmm a / hphp / test / slow / dv_array / ext_factparse . php . expectf <nl> ppp b / hphp / test / slow / dv_array / ext_factparse . php . expectf <nl> array ( 3 ) { <nl> array ( 10 ) { <nl> [ 0 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 3 ) " CL0 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 3 ) " CL0 " <nl> } <nl> [ 1 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 3 ) " CL1 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 3 ) " CL1 " <nl> } <nl> [ 2 ] = > <nl> array ( 4 ) { <nl> array ( 3 ) { <nl> } <nl> [ 3 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 2 ) " E0 " <nl> - [ " kindOf " ] = > <nl> - string ( 4 ) " enum " <nl> - [ " flags " ] = > <nl> - int ( 2 ) <nl> [ " baseTypes " ] = > <nl> array ( 1 ) { <nl> [ 0 ] = > <nl> string ( 14 ) " HH \ BuiltinEnum " <nl> } <nl> + [ " flags " ] = > <nl> + int ( 2 ) <nl> + [ " kindOf " ] = > <nl> + string ( 4 ) " enum " <nl> + [ " name " ] = > <nl> + string ( 2 ) " E0 " <nl> } <nl> [ 4 ] = > <nl> array ( 5 ) { <nl> - [ " name " ] = > <nl> - string ( 2 ) " I0 " <nl> - [ " kindOf " ] = > <nl> - string ( 9 ) " interface " <nl> - [ " flags " ] = > <nl> - int ( 1 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 1 ) <nl> + [ " kindOf " ] = > <nl> + string ( 9 ) " interface " <nl> + [ " name " ] = > <nl> + string ( 2 ) " I0 " <nl> [ " requireExtends " ] = > <nl> array ( 0 ) { <nl> } <nl> } <nl> [ 5 ] = > <nl> array ( 5 ) { <nl> - [ " name " ] = > <nl> - string ( 2 ) " I1 " <nl> - [ " kindOf " ] = > <nl> - string ( 9 ) " interface " <nl> - [ " flags " ] = > <nl> - int ( 1 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 1 ) <nl> + [ " kindOf " ] = > <nl> + string ( 9 ) " interface " <nl> + [ " name " ] = > <nl> + string ( 2 ) " I1 " <nl> [ " requireExtends " ] = > <nl> array ( 0 ) { <nl> } <nl> array ( 3 ) { <nl> array ( 10 ) { <nl> [ 0 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 6 ) " NS \ CL0 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 6 ) " NS \ CL0 " <nl> } <nl> [ 1 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 6 ) " NS \ CL1 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 6 ) " NS \ CL1 " <nl> } <nl> [ 2 ] = > <nl> array ( 4 ) { <nl> array ( 3 ) { <nl> } <nl> [ 3 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 5 ) " NS \ E0 " <nl> - [ " kindOf " ] = > <nl> - string ( 4 ) " enum " <nl> - [ " flags " ] = > <nl> - int ( 2 ) <nl> [ " baseTypes " ] = > <nl> array ( 1 ) { <nl> [ 0 ] = > <nl> string ( 14 ) " HH \ BuiltinEnum " <nl> } <nl> + [ " flags " ] = > <nl> + int ( 2 ) <nl> + [ " kindOf " ] = > <nl> + string ( 4 ) " enum " <nl> + [ " name " ] = > <nl> + string ( 5 ) " NS \ E0 " <nl> } <nl> [ 4 ] = > <nl> array ( 5 ) { <nl> array ( 3 ) { <nl> array ( 10 ) { <nl> [ 0 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 9 ) " NS \ NS \ CL0 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 9 ) " NS \ NS \ CL0 " <nl> } <nl> [ 1 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 9 ) " NS \ NS \ CL1 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 9 ) " NS \ NS \ CL1 " <nl> } <nl> [ 2 ] = > <nl> array ( 4 ) { <nl> array ( 3 ) { <nl> } <nl> [ 3 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 8 ) " NS \ NS \ E0 " <nl> - [ " kindOf " ] = > <nl> - string ( 4 ) " enum " <nl> - [ " flags " ] = > <nl> - int ( 2 ) <nl> [ " baseTypes " ] = > <nl> array ( 1 ) { <nl> [ 0 ] = > <nl> string ( 14 ) " HH \ BuiltinEnum " <nl> } <nl> + [ " flags " ] = > <nl> + int ( 2 ) <nl> + [ " kindOf " ] = > <nl> + string ( 4 ) " enum " <nl> + [ " name " ] = > <nl> + string ( 8 ) " NS \ NS \ E0 " <nl> } <nl> [ 4 ] = > <nl> array ( 5 ) { <nl> mmm a / hphp / test / slow / dv_array_hack_arr / ext_factparse . php . expectf <nl> ppp b / hphp / test / slow / dv_array_hack_arr / ext_factparse . php . expectf <nl> dict ( 3 ) { <nl> [ " types " ] = > <nl> vec ( 10 ) { <nl> dict ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 3 ) " CL0 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> vec ( 0 ) { <nl> } <nl> - } <nl> - dict ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 3 ) " CL1 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> [ " flags " ] = > <nl> int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 3 ) " CL0 " <nl> + } <nl> + dict ( 4 ) { <nl> [ " baseTypes " ] = > <nl> vec ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 3 ) " CL1 " <nl> } <nl> dict ( 4 ) { <nl> [ " baseTypes " ] = > <nl> dict ( 3 ) { <nl> string ( 3 ) " CL2 " <nl> } <nl> dict ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 2 ) " E0 " <nl> - [ " kindOf " ] = > <nl> - string ( 4 ) " enum " <nl> - [ " flags " ] = > <nl> - int ( 2 ) <nl> [ " baseTypes " ] = > <nl> vec ( 1 ) { <nl> string ( 14 ) " HH \ BuiltinEnum " <nl> } <nl> + [ " flags " ] = > <nl> + int ( 2 ) <nl> + [ " kindOf " ] = > <nl> + string ( 4 ) " enum " <nl> + [ " name " ] = > <nl> + string ( 2 ) " E0 " <nl> } <nl> dict ( 5 ) { <nl> - [ " name " ] = > <nl> - string ( 2 ) " I0 " <nl> - [ " kindOf " ] = > <nl> - string ( 9 ) " interface " <nl> - [ " flags " ] = > <nl> - int ( 1 ) <nl> [ " baseTypes " ] = > <nl> vec ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 1 ) <nl> + [ " kindOf " ] = > <nl> + string ( 9 ) " interface " <nl> + [ " name " ] = > <nl> + string ( 2 ) " I0 " <nl> [ " requireExtends " ] = > <nl> vec ( 0 ) { <nl> } <nl> } <nl> dict ( 5 ) { <nl> - [ " name " ] = > <nl> - string ( 2 ) " I1 " <nl> - [ " kindOf " ] = > <nl> - string ( 9 ) " interface " <nl> - [ " flags " ] = > <nl> - int ( 1 ) <nl> [ " baseTypes " ] = > <nl> vec ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 1 ) <nl> + [ " kindOf " ] = > <nl> + string ( 9 ) " interface " <nl> + [ " name " ] = > <nl> + string ( 2 ) " I1 " <nl> [ " requireExtends " ] = > <nl> vec ( 0 ) { <nl> } <nl> dict ( 3 ) { <nl> [ " types " ] = > <nl> vec ( 10 ) { <nl> dict ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 6 ) " NS \ CL0 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> vec ( 0 ) { <nl> } <nl> - } <nl> - dict ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 6 ) " NS \ CL1 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> [ " flags " ] = > <nl> int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 6 ) " NS \ CL0 " <nl> + } <nl> + dict ( 4 ) { <nl> [ " baseTypes " ] = > <nl> vec ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 6 ) " NS \ CL1 " <nl> } <nl> dict ( 4 ) { <nl> [ " baseTypes " ] = > <nl> dict ( 3 ) { <nl> string ( 6 ) " NS \ CL2 " <nl> } <nl> dict ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 5 ) " NS \ E0 " <nl> - [ " kindOf " ] = > <nl> - string ( 4 ) " enum " <nl> - [ " flags " ] = > <nl> - int ( 2 ) <nl> [ " baseTypes " ] = > <nl> vec ( 1 ) { <nl> string ( 14 ) " HH \ BuiltinEnum " <nl> } <nl> + [ " flags " ] = > <nl> + int ( 2 ) <nl> + [ " kindOf " ] = > <nl> + string ( 4 ) " enum " <nl> + [ " name " ] = > <nl> + string ( 5 ) " NS \ E0 " <nl> } <nl> dict ( 5 ) { <nl> [ " baseTypes " ] = > <nl> dict ( 3 ) { <nl> [ " types " ] = > <nl> vec ( 10 ) { <nl> dict ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 9 ) " NS \ NS \ CL0 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> vec ( 0 ) { <nl> } <nl> - } <nl> - dict ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 9 ) " NS \ NS \ CL1 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> [ " flags " ] = > <nl> int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 9 ) " NS \ NS \ CL0 " <nl> + } <nl> + dict ( 4 ) { <nl> [ " baseTypes " ] = > <nl> vec ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 9 ) " NS \ NS \ CL1 " <nl> } <nl> dict ( 4 ) { <nl> [ " baseTypes " ] = > <nl> dict ( 3 ) { <nl> string ( 9 ) " NS \ NS \ CL2 " <nl> } <nl> dict ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 8 ) " NS \ NS \ E0 " <nl> - [ " kindOf " ] = > <nl> - string ( 4 ) " enum " <nl> - [ " flags " ] = > <nl> - int ( 2 ) <nl> [ " baseTypes " ] = > <nl> vec ( 1 ) { <nl> string ( 14 ) " HH \ BuiltinEnum " <nl> } <nl> + [ " flags " ] = > <nl> + int ( 2 ) <nl> + [ " kindOf " ] = > <nl> + string ( 4 ) " enum " <nl> + [ " name " ] = > <nl> + string ( 8 ) " NS \ NS \ E0 " <nl> } <nl> dict ( 5 ) { <nl> [ " baseTypes " ] = > <nl> mmm a / hphp / test / slow / ext_factparse / ext_factparse . php . expectf <nl> ppp b / hphp / test / slow / ext_factparse / ext_factparse . php . expectf <nl> array ( 3 ) { <nl> array ( 10 ) { <nl> [ 0 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 3 ) " CL0 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 3 ) " CL0 " <nl> } <nl> [ 1 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 3 ) " CL1 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 3 ) " CL1 " <nl> } <nl> [ 2 ] = > <nl> array ( 4 ) { <nl> array ( 3 ) { <nl> } <nl> [ 3 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 2 ) " E0 " <nl> - [ " kindOf " ] = > <nl> - string ( 4 ) " enum " <nl> - [ " flags " ] = > <nl> - int ( 2 ) <nl> [ " baseTypes " ] = > <nl> array ( 1 ) { <nl> [ 0 ] = > <nl> string ( 14 ) " HH \ BuiltinEnum " <nl> } <nl> + [ " flags " ] = > <nl> + int ( 2 ) <nl> + [ " kindOf " ] = > <nl> + string ( 4 ) " enum " <nl> + [ " name " ] = > <nl> + string ( 2 ) " E0 " <nl> } <nl> [ 4 ] = > <nl> array ( 5 ) { <nl> - [ " name " ] = > <nl> - string ( 2 ) " I0 " <nl> - [ " kindOf " ] = > <nl> - string ( 9 ) " interface " <nl> - [ " flags " ] = > <nl> - int ( 1 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 1 ) <nl> + [ " kindOf " ] = > <nl> + string ( 9 ) " interface " <nl> + [ " name " ] = > <nl> + string ( 2 ) " I0 " <nl> [ " requireExtends " ] = > <nl> array ( 0 ) { <nl> } <nl> } <nl> [ 5 ] = > <nl> array ( 5 ) { <nl> - [ " name " ] = > <nl> - string ( 2 ) " I1 " <nl> - [ " kindOf " ] = > <nl> - string ( 9 ) " interface " <nl> - [ " flags " ] = > <nl> - int ( 1 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 1 ) <nl> + [ " kindOf " ] = > <nl> + string ( 9 ) " interface " <nl> + [ " name " ] = > <nl> + string ( 2 ) " I1 " <nl> [ " requireExtends " ] = > <nl> array ( 0 ) { <nl> } <nl> array ( 3 ) { <nl> array ( 10 ) { <nl> [ 0 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 6 ) " NS \ CL0 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 6 ) " NS \ CL0 " <nl> } <nl> [ 1 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 6 ) " NS \ CL1 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 6 ) " NS \ CL1 " <nl> } <nl> [ 2 ] = > <nl> array ( 4 ) { <nl> array ( 3 ) { <nl> } <nl> [ 3 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 5 ) " NS \ E0 " <nl> - [ " kindOf " ] = > <nl> - string ( 4 ) " enum " <nl> - [ " flags " ] = > <nl> - int ( 2 ) <nl> [ " baseTypes " ] = > <nl> array ( 1 ) { <nl> [ 0 ] = > <nl> string ( 14 ) " HH \ BuiltinEnum " <nl> } <nl> + [ " flags " ] = > <nl> + int ( 2 ) <nl> + [ " kindOf " ] = > <nl> + string ( 4 ) " enum " <nl> + [ " name " ] = > <nl> + string ( 5 ) " NS \ E0 " <nl> } <nl> [ 4 ] = > <nl> array ( 5 ) { <nl> array ( 3 ) { <nl> array ( 10 ) { <nl> [ 0 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 9 ) " NS \ NS \ CL0 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 9 ) " NS \ NS \ CL0 " <nl> } <nl> [ 1 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 9 ) " NS \ NS \ CL1 " <nl> - [ " kindOf " ] = > <nl> - string ( 5 ) " class " <nl> - [ " flags " ] = > <nl> - int ( 0 ) <nl> [ " baseTypes " ] = > <nl> array ( 0 ) { <nl> } <nl> + [ " flags " ] = > <nl> + int ( 0 ) <nl> + [ " kindOf " ] = > <nl> + string ( 5 ) " class " <nl> + [ " name " ] = > <nl> + string ( 9 ) " NS \ NS \ CL1 " <nl> } <nl> [ 2 ] = > <nl> array ( 4 ) { <nl> array ( 3 ) { <nl> } <nl> [ 3 ] = > <nl> array ( 4 ) { <nl> - [ " name " ] = > <nl> - string ( 8 ) " NS \ NS \ E0 " <nl> - [ " kindOf " ] = > <nl> - string ( 4 ) " enum " <nl> - [ " flags " ] = > <nl> - int ( 2 ) <nl> [ " baseTypes " ] = > <nl> array ( 1 ) { <nl> [ 0 ] = > <nl> string ( 14 ) " HH \ BuiltinEnum " <nl> } <nl> + [ " flags " ] = > <nl> + int ( 2 ) <nl> + [ " kindOf " ] = > <nl> + string ( 4 ) " enum " <nl> + [ " name " ] = > <nl> + string ( 8 ) " NS \ NS \ E0 " <nl> } <nl> [ 4 ] = > <nl> array ( 5 ) { <nl>
Recursively sort keys when producing Facts JSON
facebook/hhvm
99fd10a0a332a2b3558c68601e689df6e73e233f
2019-06-29T05:41:33Z
mmm a / hphp / hack / src / server / serverMain . ml <nl> ppp b / hphp / hack / src / server / serverMain . ml <nl> module Program = <nl> ignore @ @ <nl> Sys . signal Sys . sigusr1 ( Sys . Signal_handle Typing . debug_print_last_pos ) <nl> <nl> - let stamp_file = Filename . concat GlobalConfig . tmp_dir " stamp " <nl> - let touch_stamp ( ) = <nl> - Sys_utils . mkdir_no_fail ( Filename . dirname stamp_file ) ; <nl> - Sys_utils . with_umask <nl> - 0o111 <nl> - ( fun ( ) - > <nl> - ( * Open and close the file to set its mtime . Don ' t use the Unix . utimes <nl> - * function since that will fail if the stamp file doesn ' t exist . * ) <nl> - close_out ( open_out stamp_file ) <nl> - ) <nl> - let touch_stamp_errors l1 l2 = <nl> - ( * We don ' t want to needlessly touch the stamp file if the error list is <nl> - * the same and nothing has changed , but we also don ' t want to spend a ton <nl> - * of time comparing huge lists of errors over and over ( i . e . , grind to a <nl> - * halt in the cases when there are thousands of errors ) . So we cut off <nl> - * the comparison at an arbitrary point . * ) <nl> - let rec length_greater_than n = function <nl> - | [ ] - > false <nl> - | _ when n = 0 - > true <nl> - | _ : : l - > length_greater_than ( n - 1 ) l in <nl> - if length_greater_than 5 l1 | | length_greater_than 5 l2 | | l1 < > l2 <nl> - then touch_stamp ( ) <nl> - <nl> - let init ? load_mini_script genv = <nl> - let env = ServerInit . init ? load_mini_script genv in <nl> - touch_stamp ( ) ; <nl> - env <nl> - <nl> let run_once_and_exit genv env = <nl> ServerError . print_errorl <nl> ( ServerArgs . json_mode genv . options ) <nl> module Program = <nl> Relative_path . Set . union typecheck_updates old_env . failed_parsing in <nl> let check_env = { old_env with failed_parsing = failed_parsing } in <nl> let new_env , total_rechecked = ServerTypeCheck . check genv check_env in <nl> - touch_stamp_errors old_env . errorl new_env . errorl ; <nl> + ServerStamp . touch_stamp_errors old_env . errorl new_env . errorl ; <nl> new_env , total_rechecked <nl> end <nl> <nl> let run_load_script genv cmd = <nl> " load_error " <nl> in <nl> let env = HackEventLogger . with_init_type init_type begin fun ( ) - > <nl> - Program . init genv <nl> + ServerInit . init genv <nl> end in <nl> env , init_type <nl> end <nl> let program_init genv = <nl> ServerArgs . save_filename genv . options = None then <nl> match ServerConfig . load_mini_script genv . config with <nl> | None - > <nl> - let env = Program . init genv in <nl> + let env = ServerInit . init genv in <nl> env , " fresh " <nl> | Some load_mini_script - > <nl> - let env = Program . init ~ load_mini_script genv in <nl> + let env = ServerInit . init ~ load_mini_script genv in <nl> env , " mini_load " <nl> else <nl> match ServerConfig . load_script genv . config with <nl> | None - > <nl> - let env = Program . init genv in <nl> + let env = ServerInit . init genv in <nl> env , " fresh " <nl> | Some load_script - > <nl> run_load_script genv load_script <nl> let program_init genv = <nl> HackEventLogger . init_end init_type ; <nl> Hh_logger . log " Waiting for daemon ( s ) to be ready . . . " ; <nl> genv . wait_until_ready ( ) ; <nl> + ServerStamp . touch_stamp ( ) ; <nl> HackEventLogger . init_really_end init_type ; <nl> env <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 5e6909de289 <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / server / serverStamp . ml <nl> <nl> + ( * * <nl> + * Copyright ( c ) 2015 , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the BSD - style license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . An additional grant <nl> + * of patent rights can be found in the PATENTS file in the same directory . <nl> + * <nl> + * ) <nl> + <nl> + ( * Used for integration with HHVM * ) <nl> + <nl> + let stamp_file = Filename . concat GlobalConfig . tmp_dir " stamp " <nl> + <nl> + let touch_stamp ( ) = <nl> + Sys_utils . mkdir_no_fail ( Filename . dirname stamp_file ) ; <nl> + Sys_utils . with_umask <nl> + 0o111 <nl> + ( fun ( ) - > <nl> + ( * Open and close the file to set its mtime . Don ' t use the Unix . utimes <nl> + * function since that will fail if the stamp file doesn ' t exist . * ) <nl> + close_out ( open_out stamp_file ) <nl> + ) <nl> + <nl> + let touch_stamp_errors l1 l2 = <nl> + ( * We don ' t want to needlessly touch the stamp file if the error list is <nl> + * the same and nothing has changed , but we also don ' t want to spend a ton <nl> + * of time comparing huge lists of errors over and over ( i . e . , grind to a <nl> + * halt in the cases when there are thousands of errors ) . So we cut off <nl> + * the comparison at an arbitrary point . * ) <nl> + let rec length_greater_than n = function <nl> + | [ ] - > false <nl> + | _ when n = 0 - > true <nl> + | _ : : l - > length_greater_than ( n - 1 ) l in <nl> + if length_greater_than 5 l1 | | length_greater_than 5 l2 | | l1 < > l2 <nl> + then touch_stamp ( ) <nl>
Move stamp code to its own module
facebook/hhvm
d8e4496c00b9ef7cfea0f5881fb644a3662acf35
2015-10-17T23:48:17Z
mmm a / src / regexp / jsregexp - inl . h <nl> ppp b / src / regexp / jsregexp - inl . h <nl> int32_t * RegExpImpl : : GlobalCache : : LastSuccessfulMatch ( ) { <nl> return & register_array_ [ index ] ; <nl> } <nl> <nl> + RegExpEngine : : CompilationResult : : CompilationResult ( Isolate * isolate , <nl> + const char * error_message ) <nl> + : error_message ( error_message ) , <nl> + code ( ReadOnlyRoots ( isolate ) . the_hole_value ( ) ) { } <nl> <nl> } / / namespace internal <nl> } / / namespace v8 <nl> mmm a / src / regexp / jsregexp . h <nl> ppp b / src / regexp / jsregexp . h <nl> struct RegExpCompileData { <nl> class RegExpEngine : public AllStatic { <nl> public : <nl> struct CompilationResult { <nl> - CompilationResult ( Isolate * isolate , const char * error_message ) <nl> - : error_message ( error_message ) , <nl> - code ( ReadOnlyRoots ( isolate ) . the_hole_value ( ) ) , <nl> - num_registers ( 0 ) { } <nl> + inline CompilationResult ( Isolate * isolate , const char * error_message ) ; <nl> CompilationResult ( Object * code , int registers ) <nl> - : error_message ( nullptr ) , code ( code ) , num_registers ( registers ) { } <nl> - const char * error_message ; <nl> - Object * code ; <nl> - int num_registers ; <nl> + : code ( code ) , num_registers ( registers ) { } <nl> + const char * const error_message = nullptr ; <nl> + Object * const code ; <nl> + int const num_registers = 0 ; <nl> } ; <nl> <nl> static CompilationResult Compile ( Isolate * isolate , Zone * zone , <nl> mmm a / tools / generate - header - include - checks . py <nl> ppp b / tools / generate - header - include - checks . py <nl> <nl> # flag - definitions . h needs a mode set for being included . <nl> ' src / flag - definitions . h ' , <nl> # blacklist of headers we need to fix ( https : / / crbug . com / v8 / 7965 ) . <nl> - ' src / regexp / jsregexp . h ' , <nl> ' src / transitions . h ' , <nl> ] <nl> AUTO_EXCLUDE_PATTERNS = [ <nl>
[ iwyu ] Fix jsregexp . h
v8/v8
0d9ed95041064602e45cfd8fbcca3a2337e5d223
2018-10-29T12:20:47Z
mmm a / Marlin / ultralcd . pde <nl> ppp b / Marlin / ultralcd . pde <nl> void lcd_init ( ) <nl> B10001 , <nl> B01110 <nl> } ; <nl> - byte uplevel [ 8 ] = { 0x04 , 0x0e , 0x1f , 0x04 , 0x1c , 0x00 , 0x00 , 0x00 } ; / / thanks joris <nl> - byte refresh [ 8 ] = { 0x00 , 0x06 , 0x19 , 0x18 , 0x03 , 0x13 , 0x0c , 0x00 } ; / / thanks joris <nl> - byte folder [ 8 ] = { 0x00 , 0x1c , 0x1f , 0x11 , 0x11 , 0x1f , 0x00 , 0x00 } ; / / thanks joris <nl> + byte uplevel [ 8 ] = { <nl> + B00100 , <nl> + B01110 , <nl> + B11111 , <nl> + B00100 , <nl> + B11100 , <nl> + B00000 , <nl> + B00000 , <nl> + B00000 <nl> + } ; / / thanks joris <nl> + byte refresh [ 8 ] = { <nl> + B00000 , <nl> + B00110 , <nl> + B11001 , <nl> + B11000 , <nl> + B00011 , <nl> + B10011 , <nl> + B01100 , <nl> + B00000 , <nl> + } ; / / thanks joris <nl> + byte folder [ 8 ] = { <nl> + B00000 , <nl> + B11100 , <nl> + B11111 , <nl> + B10001 , <nl> + B10001 , <nl> + B11111 , <nl> + B00000 , <nl> + B00000 <nl> + } ; / / thanks joris <nl> lcd . begin ( LCD_WIDTH , LCD_HEIGHT ) ; <nl> lcd . createChar ( 1 , Degree ) ; <nl> lcd . createChar ( 2 , Thermometer ) ; <nl>
Merge pull request from gwelchc / patch - 1
MarlinFirmware/Marlin
0152ee71a53cc5526bc252539e7c7fd5a2210676
2012-09-14T19:10:28Z
mmm a / xbmc / linux / OMXCore . cpp <nl> ppp b / xbmc / linux / OMXCore . cpp <nl> COMXCoreComponent : : COMXCoreComponent ( ) <nl> <nl> m_exit = false ; <nl> <nl> - m_omx_input_use_buffers = false ; <nl> - m_omx_output_use_buffers = false ; <nl> - <nl> m_omx_events . clear ( ) ; <nl> m_ignore_error = OMX_ErrorNone ; <nl> <nl> OMX_ERRORTYPE COMXCoreComponent : : WaitForOutputDone ( long timeout / * = 200 * / ) <nl> } <nl> <nl> <nl> - OMX_ERRORTYPE COMXCoreComponent : : AllocInputBuffers ( bool use_buffers / * = false * * / ) <nl> + OMX_ERRORTYPE COMXCoreComponent : : AllocInputBuffers ( ) <nl> { <nl> OMX_ERRORTYPE omx_err = OMX_ErrorNone ; <nl> <nl> - m_omx_input_use_buffers = use_buffers ; <nl> - <nl> if ( ! m_handle ) <nl> return OMX_ErrorUndefined ; <nl> <nl> OMX_ERRORTYPE COMXCoreComponent : : AllocInputBuffers ( bool use_buffers / * = false * <nl> for ( size_t i = 0 ; i < portFormat . nBufferCountActual ; i + + ) <nl> { <nl> OMX_BUFFERHEADERTYPE * buffer = NULL ; <nl> - OMX_U8 * data = NULL ; <nl> <nl> - if ( m_omx_input_use_buffers ) <nl> - { <nl> - data = ( OMX_U8 * ) _aligned_malloc ( portFormat . nBufferSize , m_input_alignment ) ; <nl> - omx_err = OMX_UseBuffer ( m_handle , & buffer , m_input_port , NULL , portFormat . nBufferSize , data ) ; <nl> - } <nl> - else <nl> - { <nl> - omx_err = OMX_AllocateBuffer ( m_handle , & buffer , m_input_port , NULL , portFormat . nBufferSize ) ; <nl> - } <nl> + omx_err = OMX_AllocateBuffer ( m_handle , & buffer , m_input_port , NULL , portFormat . nBufferSize ) ; <nl> if ( omx_err ! = OMX_ErrorNone ) <nl> { <nl> CLog : : Log ( LOGERROR , " COMXCoreComponent : : AllocInputBuffers component ( % s ) - OMX_UseBuffer failed with omx_err ( 0x % x ) \ n " , <nl> m_componentName . c_str ( ) , omx_err ) ; <nl> - <nl> - if ( m_omx_input_use_buffers & & data ) <nl> - _aligned_free ( data ) ; <nl> - <nl> return omx_err ; <nl> } <nl> buffer - > nInputPortIndex = m_input_port ; <nl> OMX_ERRORTYPE COMXCoreComponent : : AllocInputBuffers ( bool use_buffers / * = false * <nl> return omx_err ; <nl> } <nl> <nl> - OMX_ERRORTYPE COMXCoreComponent : : AllocOutputBuffers ( bool use_buffers / * = false * / ) <nl> + OMX_ERRORTYPE COMXCoreComponent : : AllocOutputBuffers ( ) <nl> { <nl> OMX_ERRORTYPE omx_err = OMX_ErrorNone ; <nl> <nl> if ( ! m_handle ) <nl> return OMX_ErrorUndefined ; <nl> <nl> - m_omx_output_use_buffers = use_buffers ; <nl> - <nl> OMX_PARAM_PORTDEFINITIONTYPE portFormat ; <nl> OMX_INIT_STRUCTURE ( portFormat ) ; <nl> portFormat . nPortIndex = m_output_port ; <nl> OMX_ERRORTYPE COMXCoreComponent : : AllocOutputBuffers ( bool use_buffers / * = false <nl> for ( size_t i = 0 ; i < portFormat . nBufferCountActual ; i + + ) <nl> { <nl> OMX_BUFFERHEADERTYPE * buffer = NULL ; <nl> - OMX_U8 * data = NULL ; <nl> <nl> - if ( m_omx_output_use_buffers ) <nl> - { <nl> - data = ( OMX_U8 * ) _aligned_malloc ( portFormat . nBufferSize , m_output_alignment ) ; <nl> - omx_err = OMX_UseBuffer ( m_handle , & buffer , m_output_port , NULL , portFormat . nBufferSize , data ) ; <nl> - } <nl> - else <nl> - { <nl> - omx_err = OMX_AllocateBuffer ( m_handle , & buffer , m_output_port , NULL , portFormat . nBufferSize ) ; <nl> - } <nl> + omx_err = OMX_AllocateBuffer ( m_handle , & buffer , m_output_port , NULL , portFormat . nBufferSize ) ; <nl> if ( omx_err ! = OMX_ErrorNone ) <nl> { <nl> CLog : : Log ( LOGERROR , " COMXCoreComponent : : AllocOutputBuffers component ( % s ) - OMX_UseBuffer failed with omx_err ( 0x % x ) \ n " , <nl> m_componentName . c_str ( ) , omx_err ) ; <nl> - <nl> - if ( m_omx_output_use_buffers & & data ) <nl> - _aligned_free ( data ) ; <nl> - <nl> return omx_err ; <nl> } <nl> buffer - > nOutputPortIndex = m_output_port ; <nl> OMX_ERRORTYPE COMXCoreComponent : : FreeInputBuffers ( ) <nl> <nl> for ( size_t i = 0 ; i < m_omx_input_buffers . size ( ) ; i + + ) <nl> { <nl> - uint8_t * buf = m_omx_input_buffers [ i ] - > pBuffer ; <nl> - <nl> omx_err = OMX_FreeBuffer ( m_handle , m_input_port , m_omx_input_buffers [ i ] ) ; <nl> - <nl> - if ( m_omx_input_use_buffers & & buf ) <nl> - _aligned_free ( buf ) ; <nl> - <nl> if ( omx_err ! = OMX_ErrorNone ) <nl> { <nl> CLog : : Log ( LOGERROR , " COMXCoreComponent : : FreeInputBuffers error deallocate omx input buffer on component % s omx_err ( 0x % 08x ) \ n " , m_componentName . c_str ( ) , omx_err ) ; <nl> OMX_ERRORTYPE COMXCoreComponent : : FreeOutputBuffers ( ) <nl> <nl> for ( size_t i = 0 ; i < m_omx_output_buffers . size ( ) ; i + + ) <nl> { <nl> - uint8_t * buf = m_omx_output_buffers [ i ] - > pBuffer ; <nl> - <nl> omx_err = OMX_FreeBuffer ( m_handle , m_output_port , m_omx_output_buffers [ i ] ) ; <nl> - <nl> - if ( m_omx_output_use_buffers & & buf ) <nl> - _aligned_free ( buf ) ; <nl> - <nl> if ( omx_err ! = OMX_ErrorNone ) <nl> { <nl> CLog : : Log ( LOGERROR , " COMXCoreComponent : : FreeOutputBuffers error deallocate omx output buffer on component % s omx_err ( 0x % 08x ) \ n " , m_componentName . c_str ( ) , omx_err ) ; <nl> OMX_ERRORTYPE COMXCoreComponent : : UseEGLImage ( OMX_BUFFERHEADERTYPE * * ppBufferHdr , <nl> if ( ! m_handle ) <nl> return OMX_ErrorUndefined ; <nl> <nl> - m_omx_output_use_buffers = false ; <nl> - <nl> OMX_PARAM_PORTDEFINITIONTYPE portFormat ; <nl> OMX_INIT_STRUCTURE ( portFormat ) ; <nl> portFormat . nPortIndex = m_output_port ; <nl> bool COMXCoreComponent : : Initialize ( const std : : string & component_name , OMX_INDEX <nl> <nl> m_exit = false ; <nl> <nl> - m_omx_input_use_buffers = false ; <nl> - m_omx_output_use_buffers = false ; <nl> - <nl> m_omx_events . clear ( ) ; <nl> m_ignore_error = OMX_ErrorNone ; <nl> <nl> mmm a / xbmc / linux / OMXCore . h <nl> ppp b / xbmc / linux / OMXCore . h <nl> class COMXCoreComponent <nl> OMX_BUFFERHEADERTYPE * GetInputBuffer ( long timeout = 200 ) ; <nl> OMX_BUFFERHEADERTYPE * GetOutputBuffer ( long timeout = 200 ) ; <nl> <nl> - OMX_ERRORTYPE AllocInputBuffers ( bool use_buffers = false ) ; <nl> - OMX_ERRORTYPE AllocOutputBuffers ( bool use_buffers = false ) ; <nl> + OMX_ERRORTYPE AllocInputBuffers ( ) ; <nl> + OMX_ERRORTYPE AllocOutputBuffers ( ) ; <nl> <nl> OMX_ERRORTYPE FreeInputBuffers ( ) ; <nl> OMX_ERRORTYPE FreeOutputBuffers ( ) ; <nl> class COMXCoreComponent <nl> unsigned int m_input_alignment ; <nl> unsigned int m_input_buffer_size ; <nl> unsigned int m_input_buffer_count ; <nl> - bool m_omx_input_use_buffers ; <nl> <nl> / / OMXCore output buffers ( video frames ) <nl> pthread_mutex_t m_omx_output_mutex ; <nl> class COMXCoreComponent <nl> unsigned int m_output_alignment ; <nl> unsigned int m_output_buffer_size ; <nl> unsigned int m_output_buffer_count ; <nl> - bool m_omx_output_use_buffers ; <nl> <nl> bool m_exit ; <nl> DllOMX * m_DllOMX ; <nl>
Merge pull request from popcornmix / omxdeadcode
xbmc/xbmc
49c395c28960b05065ec1ab88b1b37ac0b7646f4
2016-05-01T18:00:51Z
mmm a / tensorflow / python / kernel_tests / variables_test . py <nl> ppp b / tensorflow / python / kernel_tests / variables_test . py <nl> def testInitialization ( self ) : <nl> self . assertEqual ( " Variable : 0 " , var0 . name ) <nl> self . assertEqual ( [ ] , var0 . get_shape ( ) ) <nl> self . assertEqual ( [ ] , var0 . get_shape ( ) ) <nl> + self . assertEqual ( [ ] , var0 . shape ) <nl> <nl> var1 = variables . Variable ( 1 . 1 ) <nl> self . assertEqual ( " Variable_1 : 0 " , var1 . name ) <nl> self . assertEqual ( [ ] , var1 . get_shape ( ) ) <nl> self . assertEqual ( [ ] , var1 . get_shape ( ) ) <nl> + self . assertEqual ( [ ] , var1 . shape ) <nl> <nl> with self . assertRaisesOpError ( " Attempting to use uninitialized value " ) : <nl> var0 . eval ( ) <nl> def testInitializationOrder ( self ) : <nl> self . assertEqual ( " rnd : 0 " , rnd . name ) <nl> self . assertEqual ( [ 3 , 6 ] , rnd . get_shape ( ) ) <nl> self . assertEqual ( [ 3 , 6 ] , rnd . get_shape ( ) ) <nl> + self . assertEqual ( [ 3 , 6 ] , rnd . shape ) <nl> <nl> dep = variables . Variable ( rnd . initialized_value ( ) , name = " dep " ) <nl> self . assertEqual ( " dep : 0 " , dep . name ) <nl> self . assertEqual ( [ 3 , 6 ] , dep . get_shape ( ) ) <nl> self . assertEqual ( [ 3 , 6 ] , dep . get_shape ( ) ) <nl> + self . assertEqual ( [ 3 , 6 ] , dep . shape ) <nl> <nl> # Currently have to set the shape manually for Add . <nl> added_val = rnd . initialized_value ( ) + dep . initialized_value ( ) + 2 . 0 <nl> def testInitializationOrder ( self ) : <nl> self . assertEqual ( " depdep : 0 " , depdep . name ) <nl> self . assertEqual ( [ 3 , 6 ] , depdep . get_shape ( ) ) <nl> self . assertEqual ( [ 3 , 6 ] , depdep . get_shape ( ) ) <nl> + self . assertEqual ( [ 3 , 6 ] , depdep . shape ) <nl> <nl> variables . global_variables_initializer ( ) . run ( ) <nl> <nl> def testInitializerFunction ( self ) : <nl> <nl> v1 = variables . Variable ( initializer , dtype = dtypes . float32 ) <nl> self . assertEqual ( shape , v1 . get_shape ( ) ) <nl> + self . assertEqual ( shape , v1 . shape ) <nl> self . assertAllClose ( value , v1 . initial_value . eval ( ) ) <nl> with self . assertRaises ( errors_impl . FailedPreconditionError ) : <nl> v1 . eval ( ) <nl> def testInitializerFunction ( self ) : <nl> v2 = variables . Variable ( <nl> math_ops . negative ( v1 . initialized_value ( ) ) , dtype = dtypes . float32 ) <nl> self . assertEqual ( v1 . get_shape ( ) , v2 . get_shape ( ) ) <nl> + self . assertEqual ( v1 . shape , v2 . shape ) <nl> self . assertAllClose ( np . negative ( value ) , v2 . initial_value . eval ( ) ) <nl> <nl> # Once v2 . initial_value . eval ( ) has been called , v1 has effectively been <nl> def testPartitionedVariable ( self ) : <nl> self . assertEqual ( 2 , num_partitions ) <nl> self . assertEqual ( [ v0 , v1 ] , iterated_partitions ) <nl> self . assertEqual ( [ 2 ] , concatenated . get_shape ( ) ) <nl> + self . assertEqual ( [ 2 ] , concatenated . shape ) <nl> <nl> def testPartitionedVariableFailures ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> mmm a / tensorflow / python / ops / variables . py <nl> ppp b / tensorflow / python / ops / variables . py <nl> def graph ( self ) : <nl> " " " The ` Graph ` of this variable . " " " <nl> return self . _variable . graph <nl> <nl> - def get_shape ( self ) : <nl> + @ property <nl> + def shape ( self ) : <nl> " " " The ` TensorShape ` of this variable . <nl> <nl> Returns : <nl> def get_shape ( self ) : <nl> " " " <nl> return self . _variable . get_shape ( ) <nl> <nl> + def get_shape ( self ) : <nl> + " " " Alias of Variable . shape . " " " <nl> + return self . shape <nl> + <nl> def to_proto ( self , export_scope = None ) : <nl> " " " Converts a ` Variable ` to a ` VariableDef ` protocol buffer . <nl> <nl>
Merge pull request from yaroslavvb / variable_shape_fix
tensorflow/tensorflow
69638e5f09e3854d50b44de5ea72dd28ccaf795e
2017-02-22T14:16:43Z
mmm a / tensorflow / compiler / xla / client / lib / math . cc <nl> ppp b / tensorflow / compiler / xla / client / lib / math . cc <nl> XlaOp ErfInv ( XlaOp x ) { <nl> 0 . 00943887047f , 1 . 00167406f , 2 . 83297682f } ; <nl> <nl> auto one = ScalarLike ( x , 1 . 0 ) ; <nl> - / / Compute logarithm of ( 1 + arg ) . ( This function is more precise than <nl> - / / the expression std : : log ( 1 + arg ) if arg is close to zero . ) <nl> + / / Compute logarithm of ( 1 + arg ) using log1p ( arg ) which is more <nl> + / / precise than log ( 1 + arg ) when arg is close to zero . <nl> / / See ref . https : / / en . cppreference . com / w / cpp / numeric / math / log1p <nl> + <nl> auto w = - Log1p ( - x * x ) ; <nl> <nl> auto lt = Lt ( w , ScalarLike ( x , 5 . 0 ) ) ; <nl>
desp
tensorflow/tensorflow
1c9218b91589e6144e83d85402f40b32e3e87bfa
2019-10-08T16:41:10Z
mmm a / test / test_jit_fuser_te . py <nl> ppp b / test / test_jit_fuser_te . py <nl> <nl> import torch . nn . functional as F <nl> from torch . testing import FileCheck <nl> <nl> + # these needs to be set before ` common_utils ` <nl> + # infers ` GRAPH_EXECUTOR ` . <nl> + # this file * * requires * * these settings <nl> + # and setting them after ` GRAPH_EXECUTOR ` is <nl> + # inferred erroneously runs or skips <nl> + # some tests <nl> + torch . _C . _jit_set_profiling_executor ( True ) <nl> + torch . _C . _jit_set_profiling_mode ( True ) <nl> + <nl> from torch . testing . _internal . common_utils import run_tests , IS_SANDCASTLE , ProfilingMode , GRAPH_EXECUTOR , \ <nl> enable_profiling_mode_for_profiling_tests , skipIfRocm <nl> from torch . testing . _internal . jit_utils import JitTestCase , _inline_everything , \ <nl> <nl> <nl> from te_utils import CudaCodeGenExecuted <nl> <nl> - torch . _C . _jit_set_profiling_executor ( True ) <nl> - torch . _C . _jit_set_profiling_mode ( True ) <nl> - <nl> FUSION_GROUP = ' tensorexpr : : Group ' <nl> <nl> def strip_profiling_nodes ( nodes ) : <nl>
properly skip legacy tests regardless of the default executor ( )
pytorch/pytorch
5036c94a6e868963e0354fc04c92e204d8d77677
2020-06-26T18:13:50Z
mmm a / arangod / IResearch / IResearchViewBlock . cpp <nl> ppp b / arangod / IResearch / IResearchViewBlock . cpp <nl> bool IResearchViewBlock : : resetIterator ( ) { <nl> <nl> if ( _scr ) { <nl> _scrVal = _scr - > value ( ) ; <nl> + # ifdef ARANGODB_ENABLE_MAINTAINER_MODE <nl> + auto const numScores = static_cast < size_t > ( <nl> + std : : distance ( _scrVal . begin ( ) , _scrVal . end ( ) ) <nl> + ) / sizeof ( float_t ) ; <nl> + <nl> + auto const & viewNode = * ExecutionNode : : castTo < IResearchViewNode const * > ( <nl> + getPlanNode ( ) <nl> + ) ; <nl> + <nl> + TRI_ASSERT ( numScores = = viewNode . sortCondition ( ) . size ( ) ) ; <nl> + # endif <nl> } else { <nl> _scr = & irs : : score : : no_score ( ) ; <nl> _scrVal = irs : : bytes_ref : : NIL ; <nl> bool IResearchViewBlock : : resetIterator ( ) { <nl> bool IResearchViewBlock : : next ( <nl> ReadContext & ctx , <nl> size_t limit ) { <nl> - TRI_ASSERT ( _filter ) ; <nl> - auto const & viewNode = * ExecutionNode : : castTo < IResearchViewNode const * > ( getPlanNode ( ) ) ; <nl> - auto const numSorts = viewNode . sortCondition ( ) . size ( ) ; <nl> - <nl> for ( size_t count = _reader . size ( ) ; _readerOffset < count ; ) { <nl> if ( ! _itr & & ! resetIterator ( ) ) { <nl> continue ; <nl> bool IResearchViewBlock : : next ( <nl> } <nl> <nl> / / evaluate scores <nl> - TRI_ASSERT ( ! viewNode . sortCondition ( ) . empty ( ) ) ; <nl> _scr - > evaluate ( ) ; <nl> <nl> - / / copy scores , registerId ' s are sequential <nl> - auto scoreRegs = ctx . curRegs ; <nl> - <nl> - for ( size_t i = 0 ; i < numSorts ; + + i ) { <nl> - / / in 3 . 4 we assume all scorers return float_t <nl> - auto const score = _order . get < float_t > ( _scrVal . c_str ( ) , i ) ; <nl> + / / in arangodb we assume all scorers return float_t <nl> + auto begin = reinterpret_cast < const float_t * > ( _scrVal . begin ( ) ) ; <nl> + auto end = reinterpret_cast < const float_t * > ( _scrVal . end ( ) ) ; <nl> <nl> + / / copy scores , registerId ' s are sequential <nl> + for ( auto scoreRegs = ctx . curRegs ; begin ! = end ; + + begin ) { <nl> ctx . res - > setValue ( <nl> ctx . pos , <nl> + + scoreRegs , <nl> - # if 0 <nl> - _order . to_string < AqlValue , std : : char_traits < char > > ( _scrVal . c_str ( ) , i ) <nl> - # else <nl> - AqlValue ( AqlValueHintDouble ( double_t ( score ) ) ) <nl> - # endif <nl> + AqlValue ( AqlValueHintDouble ( double_t ( * begin ) ) ) <nl> ) ; <nl> } <nl> <nl> IResearchViewUnorderedBlock : : IResearchViewUnorderedBlock ( <nl> } <nl> <nl> bool IResearchViewUnorderedBlock : : resetIterator ( ) { <nl> + TRI_ASSERT ( _filter ) ; <nl> TRI_ASSERT ( ! _itr ) ; <nl> <nl> auto & segmentReader = _reader [ _readerOffset ] ; <nl> mmm a / tests / IResearch / IResearchQueryStartsWith - test . cpp <nl> ppp b / tests / IResearch / IResearchQueryStartsWith - test . cpp <nl> TEST_CASE ( " IResearchQueryTestStartsWith " , " [ iresearch ] [ iresearch - query ] " ) { <nl> CHECK ( expectedDocs . empty ( ) ) ; <nl> } <nl> <nl> + / / exact term , ordered <nl> + { <nl> + std : : map < irs : : string_ref , arangodb : : ManagedDocumentResult const * > expectedDocs { <nl> + { " A " , & insertedDocs [ 0 ] } <nl> + } ; <nl> + <nl> + auto queryResult = arangodb : : tests : : executeQuery ( <nl> + vocbase , <nl> + " FOR d IN testView SEARCH starts_with ( d . name , ' A ' , 0 ) SORT TFIDF ( d ) DESC RETURN d " <nl> + ) ; <nl> + REQUIRE ( TRI_ERROR_NO_ERROR = = queryResult . code ) ; <nl> + <nl> + auto result = queryResult . result - > slice ( ) ; <nl> + CHECK ( result . isArray ( ) ) ; <nl> + <nl> + arangodb : : velocypack : : ArrayIterator resultIt ( result ) ; <nl> + CHECK ( expectedDocs . size ( ) = = resultIt . size ( ) ) ; <nl> + <nl> + for ( auto const actualDoc : resultIt ) { <nl> + auto const resolved = actualDoc . resolveExternals ( ) ; <nl> + auto const keySlice = resolved . get ( " name " ) ; <nl> + auto const key = arangodb : : iresearch : : getStringRef ( keySlice ) ; <nl> + <nl> + auto expectedDoc = expectedDocs . find ( key ) ; <nl> + REQUIRE ( expectedDoc ! = expectedDocs . end ( ) ) ; <nl> + CHECK ( 0 = = arangodb : : basics : : VelocyPackHelper : : compare ( arangodb : : velocypack : : Slice ( expectedDoc - > second - > vpack ( ) ) , resolved , true ) ) ; <nl> + expectedDocs . erase ( expectedDoc ) ; <nl> + } <nl> + CHECK ( expectedDocs . empty ( ) ) ; <nl> + } <nl> + <nl> / / d . prefix = abc * , d . seq DESC <nl> { <nl> std : : map < ptrdiff_t , arangodb : : ManagedDocumentResult const * > expectedDocs ; <nl> TEST_CASE ( " IResearchQueryTestStartsWith " , " [ iresearch ] [ iresearch - query ] " ) { <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - END - OF - FILE <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> \ No newline at end of file <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl>
fix issue and add test ( )
arangodb/arangodb
0cff2bd8f65a7bf084f5639bfb74a4d9696d61cf
2018-12-18T14:27:41Z
mmm a / src / core / hle / service / audio / audout_u . cpp <nl> ppp b / src / core / hle / service / audio / audout_u . cpp <nl> <nl> / / Licensed under GPLv2 or any later version <nl> / / Refer to the license . txt file included . <nl> <nl> + # include < array > <nl> # include < vector > <nl> # include " common / logging / log . h " <nl> # include " core / core_timing . h " <nl> void AudOutU : : ListAudioOutsImpl ( Kernel : : HLERequestContext & ctx ) { <nl> LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> IPC : : RequestParser rp { ctx } ; <nl> <nl> - const std : : string audio_interface = " AudioInterface " ; <nl> + constexpr std : : array < char , 15 > audio_interface { { " AudioInterface " } } ; <nl> ctx . WriteBuffer ( audio_interface ) ; <nl> <nl> IPC : : ResponseBuilder rb = rp . MakeBuilder ( 3 , 0 , 0 ) ; <nl>
audout_u : Use a std : : array instead of std : : string for holding the audio interface name
yuzu-emu/yuzu
c20cea118b2f045cd1b057aac766ac8c0c64eb6c
2018-07-20T03:15:00Z
mmm a / fdbserver / RestoreLoader . actor . cpp <nl> ppp b / fdbserver / RestoreLoader . actor . cpp <nl> void splitMutation ( Reference < RestoreLoaderData > self , MutationRef m , Arena & mve <nl> printf ( " SPLITMUTATION : orignal mutation : % s \ n " , m . toString ( ) . c_str ( ) ) ; <nl> std : : map < Standalone < KeyRef > , UID > : : iterator itlow , itup ; / / we will return [ itlow , itup ) <nl> itlow = self - > range2Applier . lower_bound ( m . param1 ) ; / / lower_bound returns the iterator that is > = m . param1 <nl> - if ( itlow ! = self - > range2Applier . begin ( ) & & itlow - > first > m . param1 ) { / / m . param1 is not the smallest key \ 00 <nl> - / / ( itlow - 1 ) is the node whose key range includes m . param1 <nl> - - - itlow ; <nl> - } else { <nl> - if ( m . param1 ! = LiteralStringRef ( " \ 00 " ) | | itlow - > first ! = m . param1 ) { / / MX : This is useless <nl> - printf ( " [ ERROR ] splitMutation has bug on range mutation : % s \ n " , m . toString ( ) . c_str ( ) ) ; <nl> + if ( itlow - > first > m . param1 ) { <nl> + if ( itlow ! = self - > range2Applier . begin ( ) ) { <nl> + - - itlow ; <nl> } <nl> } <nl> <nl> + / / if ( itlow ! = self - > range2Applier . begin ( ) & & itlow - > first > m . param1 ) { / / m . param1 is not the smallest key \ 00 <nl> + / / / / ( itlow - 1 ) is the node whose key range includes m . param1 <nl> + / / - - itlow ; <nl> + / / } else { <nl> + / / if ( m . param1 ! = LiteralStringRef ( " \ 00 " ) | | itlow - > first ! = m . param1 ) { / / MX : This is useless <nl> + / / printf ( " [ ERROR ] splitMutation has bug on range mutation : % s \ n " , m . toString ( ) . c_str ( ) ) ; <nl> + / / } <nl> + / / } <nl> + <nl> itup = self - > range2Applier . upper_bound ( m . param2 ) ; / / upper_bound returns the iterator that is > m . param2 ; return rmap : : end if no keys are considered to go after m . param2 . <nl> printf ( " SPLITMUTATION : itlow_key : % s itup_key : % s \ n " , itlow - > first . toString ( ) . c_str ( ) , itup = = self - > range2Applier . end ( ) ? " [ end ] " : itup - > first . toString ( ) . c_str ( ) ) ; <nl> ASSERT ( itup = = self - > range2Applier . end ( ) | | itup - > first > = m . param2 ) ; <nl> void splitMutation ( Reference < RestoreLoaderData > self , MutationRef m , Arena & mve <nl> while ( itlow ! = itup ) { <nl> Standalone < MutationRef > curm ; / / current mutation <nl> curm . type = m . type ; <nl> - curm . param1 = itlow - > first ; <nl> + / / the first split mutation should starts with m . first . The later onces should start with the range2Applier boundary <nl> + if ( m . param1 > itlow - > first ) { <nl> + curm . param1 = m . param1 ; <nl> + } else { <nl> + curm . param1 = itlow - > first ; <nl> + } <nl> + / / curm . param1 = ( ( m . param1 > itlow - > first ) ? m . param1 : itlow - > first ) ; <nl> itlow + + ; <nl> if ( itlow = = itup ) { <nl> ASSERT ( m . param2 < = normalKeys . end ) ; <nl>
FastRestore : Fix splitMutation bug
apple/foundationdb
f8c654cd8645b7de447cc80b3bc38216b6899408
2019-05-15T00:05:50Z
mmm a / samples / Cpp / TestCpp / Classes / RenderTextureTest / RenderTextureTest . cpp <nl> ppp b / samples / Cpp / TestCpp / Classes / RenderTextureTest / RenderTextureTest . cpp <nl> RenderTextureSave : : RenderTextureSave ( ) <nl> / / note that the render texture is a Node , and contains a sprite of its texture for convience , <nl> / / so we can just parent it to the scene like any other Node <nl> this - > addChild ( _target , - 1 ) ; <nl> - <nl> - / / create a brush image to draw into the texture with <nl> - _brush = Sprite : : create ( " Images / fire . png " ) ; <nl> - _brush - > retain ( ) ; <nl> - _brush - > setColor ( Color3B : : RED ) ; <nl> - _brush - > setOpacity ( 20 ) ; <nl> <nl> auto listener = EventListenerTouchAllAtOnce : : create ( ) ; <nl> listener - > onTouchesMoved = CC_CALLBACK_2 ( RenderTextureSave : : onTouchesMoved , this ) ; <nl> void RenderTextureSave : : saveImage ( cocos2d : : Object * sender ) <nl> <nl> RenderTextureSave : : ~ RenderTextureSave ( ) <nl> { <nl> - _brush - > release ( ) ; <nl> _target - > release ( ) ; <nl> Director : : getInstance ( ) - > getTextureCache ( ) - > removeUnusedTextures ( ) ; <nl> } <nl> void RenderTextureSave : : onTouchesMoved ( const std : : vector < Touch * > & touches , Event <nl> if ( distance > 1 ) <nl> { <nl> int d = ( int ) distance ; <nl> + _brushs . clear ( ) ; <nl> + for ( int i = 0 ; i < d ; + + i ) <nl> + { <nl> + Sprite * sprite = Sprite : : create ( " Images / fire . png " ) ; <nl> + sprite - > setColor ( Color3B : : RED ) ; <nl> + sprite - > setOpacity ( 20 ) ; <nl> + _brushs . pushBack ( sprite ) ; <nl> + } <nl> for ( int i = 0 ; i < d ; i + + ) <nl> { <nl> float difx = end . x - start . x ; <nl> float dify = end . y - start . y ; <nl> float delta = ( float ) i / distance ; <nl> - _brush - > setPosition ( Point ( start . x + ( difx * delta ) , start . y + ( dify * delta ) ) ) ; <nl> - _brush - > setRotation ( rand ( ) % 360 ) ; <nl> + _brushs . at ( i ) - > setPosition ( Point ( start . x + ( difx * delta ) , start . y + ( dify * delta ) ) ) ; <nl> + _brushs . at ( i ) - > setRotation ( rand ( ) % 360 ) ; <nl> float r = ( float ) ( rand ( ) % 50 / 50 . f ) + 0 . 25f ; <nl> - _brush - > setScale ( r ) ; <nl> + _brushs . at ( i ) - > setScale ( r ) ; <nl> / * _brush - > setColor ( Color3B ( CCRANDOM_0_1 ( ) * 127 + 128 , 255 , 255 ) ) ; * / <nl> / / Use CCRANDOM_0_1 ( ) will cause error when loading libtests . so on android , I don ' t know why . <nl> - _brush - > setColor ( Color3B ( rand ( ) % 127 + 128 , 255 , 255 ) ) ; <nl> + _brushs . at ( i ) - > setColor ( Color3B ( rand ( ) % 127 + 128 , 255 , 255 ) ) ; <nl> / / Call visit to draw the brush , don ' t call draw . . <nl> - _brush - > visit ( ) ; <nl> + _brushs . at ( i ) - > visit ( ) ; <nl> } <nl> } <nl> <nl> mmm a / samples / Cpp / TestCpp / Classes / RenderTextureTest / RenderTextureTest . h <nl> ppp b / samples / Cpp / TestCpp / Classes / RenderTextureTest / RenderTextureTest . h <nl> class RenderTextureSave : public RenderTextureTest <nl> <nl> private : <nl> RenderTexture * _target ; <nl> - Sprite * _brush ; <nl> + Vector < Sprite * > _brushs ; <nl> } ; <nl> <nl> class RenderTextureIssue937 : public RenderTextureTest <nl>
remove node reuse in renderTextureSave
cocos2d/cocos2d-x
a274d350253b9157acf3a34cb6e3c2e7205568a4
2014-01-06T01:49:04Z
mmm a / src / script . cpp <nl> ppp b / src / script . cpp <nl> bool IsMine ( const CKeyStore & keystore , const CScript & scriptPubKey ) <nl> return true ; <nl> } <nl> <nl> - bool static ExtractAddressInner ( const CScript & scriptPubKey , const CKeyStore * keystore , CBitcoinAddress & addressRet ) <nl> + bool ExtractAddress ( const CScript & scriptPubKey , const CKeyStore * keystore , CBitcoinAddress & addressRet ) <nl> { <nl> vector < pair < opcodetype , valtype > > vSolution ; <nl> if ( ! Solver ( scriptPubKey , vSolution ) ) <nl> bool static ExtractAddressInner ( const CScript & scriptPubKey , const CKeyStore * ke <nl> } <nl> <nl> <nl> - bool ExtractAddress ( const CScript & scriptPubKey , const CKeyStore * keystore , CBitcoinAddress & addressRet ) <nl> - { <nl> - if ( keystore ) <nl> - return ExtractAddressInner ( scriptPubKey , keystore , addressRet ) ; <nl> - else <nl> - return ExtractAddressInner ( scriptPubKey , NULL , addressRet ) ; <nl> - return false ; <nl> - } <nl> - <nl> <nl> bool VerifyScript ( const CScript & scriptSig , const CScript & scriptPubKey , const CTransaction & txTo , unsigned int nIn , int nHashType ) <nl> { <nl>
Collapse no - op ExtractAddress / ExtractAddressInner
bitcoin/bitcoin
7e55c1ab650163011a690daf74cb1cc89bdd563b
2011-12-19T17:39:47Z
mmm a / root . c <nl> ppp b / root . c <nl> static void * kqueue_thread ( void * arg ) <nl> w_ht_t * batch = NULL ; <nl> <nl> for ( ; ; ) { <nl> - time_t now ; <nl> + struct timeval now ; <nl> int n ; <nl> <nl> if ( ! batch ) { <nl> static void * kqueue_thread ( void * arg ) <nl> <nl> w_root_lock ( root ) ; <nl> root - > ticks + + ; <nl> - time ( & now ) ; <nl> + gettimeofday ( & now , NULL ) ; <nl> if ( w_ht_first ( batch , & iter ) ) do { <nl> w_string_t * name = ( w_string_t * ) iter . key ; <nl> <nl> w_log ( W_LOG_DBG , " kq - > % s \ n " , name - > buf ) ; <nl> - w_root_add_pending ( root , name , true , now ) ; <nl> + w_root_add_pending ( root , name , true , now , true ) ; <nl> <nl> } while ( w_ht_next ( batch , & iter ) ) ; <nl> <nl>
fixup kqueue build for settle time changes
facebook/watchman
1100a701072fa952275c8e7d44964f815b83c8ce
2012-12-07T16:51:24Z
mmm a / aten / src / ATen / core / Dict . h <nl> ppp b / aten / src / ATen / core / Dict . h <nl> class Dict final { <nl> / / instead of optional < TypePtr > once types are mandatory . <nl> TypePtr keyType ( ) const ; <nl> TypePtr valueType ( ) const ; <nl> + <nl> + / / [ unsafe set type ] <nl> + / / These functions mutate the tagged type of this dictionary in place . <nl> + / / There is no checking that the members of the dictionary are instances <nl> + / / of the new types , nor is there a check that other IValues which <nl> + / / hold references to this dictionary have the right static type . <nl> + / / This functionality is used only in the unpickler , where at <nl> + / / creation type the real type of the dictionary is unknown , but <nl> + / / then later recovered from the static type information of the <nl> + / / unpickled object . <nl> + void unsafeSetKeyType ( TypePtr t ) ; <nl> + void unsafeSetValueType ( TypePtr t ) ; <nl> } ; <nl> <nl> namespace impl { <nl> mmm a / aten / src / ATen / core / Dict_inl . h <nl> ppp b / aten / src / ATen / core / Dict_inl . h <nl> template < class Key , class Value > <nl> TypePtr Dict < Key , Value > : : valueType ( ) const { <nl> return impl_ - > elementTypes . valueType ; <nl> } <nl> + template < class Key , class Value > <nl> + void Dict < Key , Value > : : unsafeSetKeyType ( TypePtr t ) { <nl> + impl_ - > elementTypes . keyType = std : : move ( t ) ; <nl> + } <nl> + <nl> + template < class Key , class Value > <nl> + void Dict < Key , Value > : : unsafeSetValueType ( TypePtr t ) { <nl> + impl_ - > elementTypes . valueType = std : : move ( t ) ; <nl> + } <nl> <nl> } <nl> mmm a / aten / src / ATen / core / List . h <nl> ppp b / aten / src / ATen / core / List . h <nl> class List final { <nl> <nl> TypePtr elementType ( ) const ; <nl> <nl> + / / See [ unsafe set type ] for why this exists . <nl> + void unsafeSetElementType ( TypePtr t ) ; <nl> + <nl> private : <nl> explicit List ( c10 : : intrusive_ptr < detail : : ListImpl < StorageT > > & & elements ) ; <nl> friend struct IValue ; <nl> mmm a / aten / src / ATen / core / List_inl . h <nl> ppp b / aten / src / ATen / core / List_inl . h <nl> size_t List < T > : : use_count ( ) const { <nl> return impl_ . use_count ( ) ; <nl> } <nl> <nl> - template < class T > <nl> + template < class T > <nl> TypePtr List < T > : : elementType ( ) const { <nl> return impl_ - > elementType ; <nl> } <nl> <nl> + template < class T > <nl> + void List < T > : : unsafeSetElementType ( TypePtr t ) { <nl> + impl_ - > elementType = std : : move ( t ) ; <nl> + } <nl> } <nl> mmm a / test / jit_utils . py <nl> ppp b / test / jit_utils . py <nl> def extract_files ( buffer ) : <nl> for a , b in zip ( code_files , code_files_2 ) : <nl> self . assertMultiLineEqual ( a , b ) <nl> <nl> + if isinstance ( m , torch . _C . ScriptModule ) : <nl> + self . assertTrue ( torch . _C . _ivalue_tags_match ( m , imported . _c ) ) <nl> + <nl> <nl> def emitFunctionHook ( self , func ) : <nl> # func has invalid names for export , skip the jitter check <nl> mmm a / torch / csrc / jit / pickler . cpp <nl> ppp b / torch / csrc / jit / pickler . cpp <nl> void Pickler : : pushTuple ( const IValue & ivalue ) { <nl> } <nl> } <nl> <nl> + / / Pickled objects are stored in a form compatible with Python pickling . <nl> + / / In torchscript List [ T ] / Dict [ K , V ] are statically typed and contain <nl> + / / dynamic type tags allow T , K , and V to be recovered . But this info <nl> + / / is not stored in the Python pickling information . However , we <nl> + / / can recover this information from the static type of the top - level <nl> + / / object being unpickled , because we have a record of the type of the <nl> + / / objects it contains as attributes . <nl> + / / ` IfPossible ` - we can only do this recovery when we have an object as <nl> + / / the top - level unpickled thing ( which is guarenteed for Modules , but <nl> + / / not for torch . load / torch , save ) . Otherwise we do not know the types <nl> + / / of the contained objects and cannot restore the tags . <nl> + static void restoreAccurateTypeTagsIfPossible ( const IValue & root ) { <nl> + if ( ! root . isObject ( ) ) { <nl> + return ; <nl> + } <nl> + struct Work { <nl> + TypePtr static_type ; <nl> + IValue value ; <nl> + } ; <nl> + std : : vector < Work > to_process = { { root . type ( ) , root } } ; <nl> + std : : unordered_set < const void * > scanned ; <nl> + while ( ! to_process . empty ( ) ) { <nl> + Work w = std : : move ( to_process . back ( ) ) ; <nl> + to_process . pop_back ( ) ; <nl> + / / ensure we only scan each pointer value once , otherwise this <nl> + / / can become exponential ( and if we allow recursive data in the future , <nl> + / / it would not terminiate ) . <nl> + if ( w . value . isPtrType ( ) ) { <nl> + const void * key = w . value . internalToPointer ( ) ; <nl> + auto it = scanned . find ( key ) ; <nl> + if ( it ! = scanned . end ( ) ) { <nl> + continue ; <nl> + } <nl> + scanned . emplace_hint ( it , key ) ; <nl> + } <nl> + switch ( w . static_type - > kind ( ) ) { <nl> + case TensorType : : Kind : <nl> + case NumberType : : Kind : <nl> + case FloatType : : Kind : <nl> + case IntType : : Kind : <nl> + case NoneType : : Kind : <nl> + case GeneratorType : : Kind : <nl> + case BoolType : : Kind : <nl> + case VarType : : Kind : <nl> + case CapsuleType : : Kind : <nl> + case StringType : : Kind : <nl> + case FunctionType : : Kind : <nl> + case DeviceObjType : : Kind : <nl> + / / no op , there is nothing to tag <nl> + break ; <nl> + case AnyType : : Kind : <nl> + / / if Any type does show up , we no longer have a way to precisely <nl> + / / recover the type information since the w . value may be an untagged <nl> + / / List / Dict . We should prevent objects being serialized from having the <nl> + / / Any type and if we do allow it in functions limit it to non - heap <nl> + / / locations . <nl> + TORCH_INTERNAL_ASSERT ( <nl> + false , " AnyType should not show up in the static type of objects " ) ; <nl> + case TupleType : : Kind : { <nl> + auto t = w . value . toTuple ( ) ; <nl> + auto ttype = w . static_type - > expect < TupleType > ( ) ; <nl> + for ( size_t i = 0 ; i < ttype - > containedTypes ( ) . size ( ) ; + + i ) { <nl> + Work elem = { ttype - > containedTypes ( ) . at ( i ) , t - > elements ( ) . at ( i ) } ; <nl> + to_process . emplace_back ( std : : move ( elem ) ) ; <nl> + } <nl> + } break ; <nl> + case FutureType : : Kind : { <nl> + auto f = w . value . toFuture ( ) ; <nl> + auto t = w . static_type - > expect < FutureType > ( ) ; <nl> + if ( f - > completed ( ) ) { <nl> + Work elem = { t - > getElementType ( ) , f - > value ( ) } ; <nl> + to_process . emplace_back ( std : : move ( elem ) ) ; <nl> + } <nl> + } break ; <nl> + case OptionalType : : Kind : { <nl> + if ( ! w . value . isNone ( ) ) { <nl> + auto t = w . static_type - > expect < OptionalType > ( ) ; <nl> + Work elem = { t - > getElementType ( ) , w . value } ; <nl> + to_process . emplace_back ( std : : move ( elem ) ) ; <nl> + } <nl> + } break ; <nl> + case ListType : : Kind : { <nl> + / / specialized lists do not need their type refined , so we can exit <nl> + / / early here <nl> + if ( ! w . value . isGenericList ( ) ) { <nl> + break ; <nl> + } <nl> + auto elem_type = w . static_type - > cast < ListType > ( ) - > getElementType ( ) ; <nl> + auto lst = w . value . toGenericList ( ) ; <nl> + lst . unsafeSetElementType ( elem_type ) ; <nl> + for ( const IValue & item : lst ) { <nl> + Work elem = { elem_type , item } ; <nl> + to_process . emplace_back ( std : : move ( elem ) ) ; <nl> + } <nl> + } break ; <nl> + case DictType : : Kind : { <nl> + auto dt = w . static_type - > cast < DictType > ( ) ; <nl> + auto d = w . value . toGenericDict ( ) ; <nl> + d . unsafeSetKeyType ( dt - > getKeyType ( ) ) ; <nl> + d . unsafeSetValueType ( dt - > getValueType ( ) ) ; <nl> + for ( const auto & item : d ) { <nl> + Work kelem = { dt - > getKeyType ( ) , item . key ( ) } ; <nl> + Work velem = { dt - > getValueType ( ) , item . value ( ) } ; <nl> + to_process . emplace_back ( std : : move ( kelem ) ) ; <nl> + to_process . emplace_back ( std : : move ( velem ) ) ; <nl> + } <nl> + } break ; <nl> + / / in both cases the dynamic type is a class , and we are going to tag with <nl> + / / the dynamic type <nl> + case InterfaceType : : Kind : <nl> + case ClassType : : Kind : { <nl> + auto obj = w . value . toObject ( ) ; <nl> + auto typ = obj - > type ( ) ; / / note : intentionally using the dynamic type , <nl> + / / the static type is potentially less accurate <nl> + for ( size_t i = 0 ; i < typ - > numAttributes ( ) ; + + i ) { <nl> + Work elem = { typ - > getAttribute ( i ) , obj - > getSlot ( i ) } ; <nl> + to_process . emplace_back ( std : : move ( elem ) ) ; <nl> + } <nl> + } ; <nl> + } <nl> + } <nl> + } <nl> + <nl> IValue Unpickler : : parse_ivalue ( ) { <nl> run ( ) ; <nl> TORCH_CHECK ( <nl> stack_ . size ( ) = = 1 , <nl> " Unpickler expected 1 element on the stack , but found " , <nl> stack_ . size ( ) ) ; <nl> + restoreAccurateTypeTagsIfPossible ( stack_ [ 0 ] ) ; <nl> <nl> return stack_ [ 0 ] ; <nl> } <nl> mmm a / torch / csrc / jit / register_prim_ops . cpp <nl> ppp b / torch / csrc / jit / register_prim_ops . cpp <nl> RegisterOperators reg ( <nl> throw std : : runtime_error ( <nl> " DictConstruct must have an even number of inputs " ) ; <nl> } <nl> - TORCH_INTERNAL_ASSERT ( node - > outputs ( ) . size ( ) = = 1 , " DictConstruct must have exactly one output " ) ; <nl> + TORCH_INTERNAL_ASSERT ( <nl> + node - > outputs ( ) . size ( ) = = 1 , <nl> + " DictConstruct must have exactly one output " ) ; <nl> TypePtr output_type = node - > outputs ( ) [ 0 ] - > type ( ) ; <nl> - TORCH_INTERNAL_ASSERT ( output_type - > kind ( ) = = TypeKind : : DictType , " DictConstruct output must be of Dict type . " ) ; <nl> - TypePtr key_type = static_cast < const DictType * > ( output_type . get ( ) ) - > getKeyType ( ) ; <nl> - TypePtr value_type = static_cast < const DictType * > ( output_type . get ( ) ) - > getValueType ( ) ; <nl> + auto dt = output_type - > expect < DictType > ( ) ; <nl> + TypePtr key_type = dt - > getKeyType ( ) ; <nl> + TypePtr value_type = dt - > getValueType ( ) ; <nl> return [ = ] ( Stack & stack ) { <nl> auto vals = c10 : : impl : : GenericDict ( key_type , value_type ) ; <nl> for ( size_t i = 0 ; i < num_inputs ; i + = 2 ) { <nl> mmm a / torch / csrc / jit / script / init . cpp <nl> ppp b / torch / csrc / jit / script / init . cpp <nl> void addFunctionToModule ( Module & module , const StrongFunctionPtr & func ) { <nl> module . type ( ) - > addMethod ( method ) ; <nl> } <nl> <nl> + / / this is used in our test suite to check that we correctly preserved type tags <nl> + bool ivalue_tags_match ( const Module & lhs , const Module & rhs ) { <nl> + struct Work { <nl> + IValue a ; <nl> + IValue b ; <nl> + } ; <nl> + std : : unordered_set < const void * > visited ; <nl> + std : : vector < Work > work = { { lhs . module_object ( ) , rhs . module_object ( ) } } ; <nl> + while ( ! work . empty ( ) ) { <nl> + Work item = work . back ( ) ; <nl> + work . pop_back ( ) ; <nl> + if ( item . a . isPtrType ( ) ) { <nl> + / / uncomment to debug type matching errors <nl> + / / std : : cout < < " MATCHING " < < / * item . a < < * / " ( " < < * item . a . type ( ) < < " ) " <nl> + / / < < item . a . internalToPointer ( ) < < " " < < / * item . b < < * / " ( " <nl> + / / < < * item . b . type ( ) < < " ) " < < item . b . internalToPointer ( ) < < <nl> + / / " \ n " ; <nl> + <nl> + if ( visited . count ( item . a . internalToPointer ( ) ) ) { <nl> + continue ; <nl> + } <nl> + visited . emplace ( item . a . internalToPointer ( ) ) ; <nl> + } <nl> + if ( * unshapedType ( item . a . type ( ) ) ! = * unshapedType ( item . b . type ( ) ) ) { <nl> + return false ; <nl> + } <nl> + / / check tags for objects that contain subobjects <nl> + if ( item . a . isObject ( ) ) { <nl> + auto ao = item . a . toObject ( ) ; <nl> + auto bo = item . b . toObject ( ) ; <nl> + for ( size_t i = 0 ; i < ao - > slots ( ) . size ( ) ; + + i ) { <nl> + work . emplace_back ( Work { ao - > slots ( ) . at ( i ) , bo - > slots ( ) . at ( i ) } ) ; <nl> + } <nl> + } else if ( item . a . isTuple ( ) ) { <nl> + auto at = item . a . toTuple ( ) ; <nl> + auto bt = item . b . toTuple ( ) ; <nl> + for ( size_t i = 0 ; i < at - > elements ( ) . size ( ) ; + + i ) { <nl> + work . emplace_back ( Work { at - > elements ( ) . at ( i ) , bt - > elements ( ) . at ( i ) } ) ; <nl> + } <nl> + } else if ( item . a . isGenericList ( ) ) { <nl> + auto al = item . a . toGenericList ( ) ; <nl> + auto bl = item . b . toGenericList ( ) ; <nl> + for ( size_t i = 0 ; i < al . size ( ) ; + + i ) { <nl> + work . emplace_back ( Work { al . get ( i ) , bl . get ( i ) } ) ; <nl> + } <nl> + } else if ( item . a . isGenericDict ( ) ) { <nl> + auto ad = item . a . toGenericDict ( ) ; <nl> + auto bd = item . b . toGenericDict ( ) ; <nl> + for ( auto & item : ad ) { <nl> + / / Dictionaory keys cannot contain List / Dicts that require tags <nl> + / / so we do not have to check them . <nl> + / / Furthermore without ordered dicts it is expensive to find the <nl> + / / equivalent key <nl> + work . emplace_back ( Work { item . value ( ) , bd . at ( item . key ( ) ) } ) ; <nl> + } <nl> + } else if ( item . a . isFuture ( ) ) { <nl> + auto af = item . a . toFuture ( ) ; <nl> + auto bf = item . b . toFuture ( ) ; <nl> + af - > wait ( ) ; <nl> + bf - > wait ( ) ; <nl> + work . emplace_back ( Work { af - > value ( ) , bf - > value ( ) } ) ; <nl> + } <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> void initJitScriptBindings ( PyObject * module ) { <nl> auto m = py : : handle ( module ) . cast < py : : module > ( ) ; <nl> <nl> void initJitScriptBindings ( PyObject * module ) { <nl> auto fn = cu - > create_function ( std : : move ( name ) , graph ) ; <nl> return StrongFunctionPtr ( std : : move ( cu ) , fn ) ; <nl> } ) ; <nl> + m . def ( " _ivalue_tags_match " , ivalue_tags_match ) ; <nl> <nl> py : : class_ < testing : : FileCheck > ( m , " FileCheck " ) <nl> . def ( py : : init < > ( ) ) <nl>
Use static type information to restore type tags ( )
pytorch/pytorch
12762cd586f2197b30a7c3ba670ff86b9726194b
2019-09-18T23:07:01Z
mmm a / src / video_core / texture_cache / surface_base . cpp <nl> ppp b / src / video_core / texture_cache / surface_base . cpp <nl> std : : vector < CopyParams > SurfaceBaseImpl : : BreakDownLayered ( const SurfaceParams & i <nl> for ( u32 level = 0 ; level < mipmaps ; level + + ) { <nl> const u32 width = SurfaceParams : : IntersectWidth ( params , in_params , level , level ) ; <nl> const u32 height = SurfaceParams : : IntersectHeight ( params , in_params , level , level ) ; <nl> - result . emplace_back ( width , height , layer , level ) ; <nl> + result . emplace_back ( 0 , 0 , layer , 0 , 0 , layer , level , level , width , height , 1 ) ; <nl> } <nl> } <nl> return result ; <nl>
texture_cache / surface_base : Fix layered break down
yuzu-emu/yuzu
f55f6ff9bbe32696da0886570833ca46c54ebd65
2020-01-27T00:48:07Z
mmm a / doc / building - xcode - old . md <nl> ppp b / doc / building - xcode - old . md <nl> From * * / Users / user / TBuild / Libraries / qt5_3_2 / qtimageformats * * , run : <nl> <nl> Go to * * / Users / user / TBuild / Libraries / qt5_3_2 * * and run : <nl> <nl> - . / configure - prefix " / usr / local / tdesktop / Qt - 5 . 3 . 2 " - debug - and - release - force - debug - info - opensource - confirm - license - static - opengl desktop - nomake examples - nomake tests - platform macx - g + + <nl> + OPENSSL_LIBS = " / Users / user / TBuild / Libraries / openssl - xcode_oldmac / libssl . a / Users / user / TBuild / Libraries / openssl - xcode_oldmac / libcrypto . a " . / configure - prefix " / usr / local / tdesktop / Qt - 5 . 3 . 2 " - debug - and - release - force - debug - info - opensource - confirm - license - static - opengl desktop - openssl - linked - I " / Users / user / TBuild / Libraries / openssl - xcode_oldmac / include " - nomake examples - nomake tests - platform macx - g + + <nl> make - j4 <nl> sudo make - j4 install <nl> <nl>
Fix of OS X 10 . 6 - 10 . 7 linking with openssl for https : / / requests to work .
telegramdesktop/tdesktop
983db3a682561e153f0929f7e3688de8493b0fd4
2017-01-16T09:40:05Z
mmm a / fdbclient / Knobs . cpp <nl> ppp b / fdbclient / Knobs . cpp <nl> <nl> # include " fdbclient / SystemData . h " <nl> # include " flow / UnitTest . h " <nl> <nl> - std : : unique_ptr < ClientKnobs > globalClientKnobs { } ; <nl> - ClientKnobs const * CLIENT_KNOBS = nullptr ; <nl> + std : : unique_ptr < ClientKnobs > globalClientKnobs = std : : make_unique < ClientKnobs > ( ) ; <nl> + ClientKnobs const * CLIENT_KNOBS = globalClientKnobs . get ( ) ; <nl> <nl> # define init ( knob , value ) initKnob ( knob , value , # knob ) <nl> <nl> mmm a / flow / Knobs . cpp <nl> ppp b / flow / Knobs . cpp <nl> <nl> # include < cmath > <nl> # include < cinttypes > <nl> <nl> - std : : unique_ptr < FlowKnobs > globalFlowKnobs { } ; <nl> - FlowKnobs const * FLOW_KNOBS = nullptr ; <nl> + std : : unique_ptr < FlowKnobs > globalFlowKnobs = std : : make_unique < FlowKnobs > ( ) ; <nl> + FlowKnobs const * FLOW_KNOBS = globalFlowKnobs . get ( ) ; <nl> <nl> # define init ( knob , value ) initKnob ( knob , value , # knob ) <nl> <nl>
Initialize client and flow knobs
apple/foundationdb
b46384e1b8dd385bc85fd7b0e8530f7b10b42393
2020-11-22T17:56:18Z
mmm a / configure . ac <nl> ppp b / configure . ac <nl> case $ host in <nl> AC_MSG_ERROR ( " windres not found " ) <nl> fi <nl> <nl> - CPPFLAGS = " $ CPPFLAGS - D_MT - DWIN32 - D_WINDOWS - DBOOST_THREAD_USE_LIB - D__USE_MINGW_ANSI_STDIO " <nl> + CPPFLAGS = " $ CPPFLAGS - D_MT - DWIN32 - D_WINDOWS - DBOOST_THREAD_USE_LIB " <nl> LEVELDB_TARGET_FLAGS = " TARGET_OS = OS_WINDOWS_CROSSCOMPILE " <nl> if test " x $ CXXFLAGS_overridden " = " xno " ; then <nl> CXXFLAGS = " $ CXXFLAGS - w " <nl> mmm a / src / Makefile . am <nl> ppp b / src / Makefile . am <nl> BITCOIN_CORE_H = \ <nl> serialize . h \ <nl> sync . h \ <nl> threadsafety . h \ <nl> + tinyformat . h \ <nl> txdb . h \ <nl> txmempool . h \ <nl> ui_interface . h \ <nl> mmm a / src / main . cpp <nl> ppp b / src / main . cpp <nl> bool SetBestChain ( CValidationState & state , CBlockIndex * pindexNew ) <nl> pindex = pindex - > pprev ; <nl> } <nl> if ( nUpgraded > 0 ) <nl> - LogPrintf ( " SetBestChain : % d of last 100 blocks above version % d \ n " , nUpgraded , CBlock : : CURRENT_VERSION ) ; <nl> + LogPrintf ( " SetBestChain : % d of last 100 blocks above version % d \ n " , nUpgraded , ( int ) CBlock : : CURRENT_VERSION ) ; <nl> if ( nUpgraded > 100 / 2 ) <nl> / / strMiscWarning is read by GetWarnings ( ) , called by Qt and the JSON - RPC code to warn the user : <nl> strMiscWarning = _ ( " Warning : This version is obsolete , upgrade required ! " ) ; <nl> new file mode 100644 <nl> index 000000000000 . . 04b51f0adcd4 <nl> mmm / dev / null <nl> ppp b / src / tinyformat . h <nl> <nl> + / / tinyformat . h <nl> + / / Copyright ( C ) 2011 , Chris Foster [ chris42f ( at ) gmail ( d0t ) com ] <nl> + / / <nl> + / / Boost Software License - Version 1 . 0 <nl> + / / <nl> + / / Permission is hereby granted , free of charge , to any person or organization <nl> + / / obtaining a copy of the software and accompanying documentation covered by <nl> + / / this license ( the " Software " ) to use , reproduce , display , distribute , <nl> + / / execute , and transmit the Software , and to prepare derivative works of the <nl> + / / Software , and to permit third - parties to whom the Software is furnished to <nl> + / / do so , all subject to the following : <nl> + / / <nl> + / / The copyright notices in the Software and this entire statement , including <nl> + / / the above license grant , this restriction and the following disclaimer , <nl> + / / must be included in all copies of the Software , in whole or in part , and <nl> + / / all derivative works of the Software , unless such copies or derivative <nl> + / / works are solely in the form of machine - executable object code generated by <nl> + / / a source language processor . <nl> + / / <nl> + / / THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + / / IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + / / FITNESS FOR A PARTICULAR PURPOSE , TITLE AND NON - INFRINGEMENT . IN NO EVENT <nl> + / / SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE <nl> + / / FOR ANY DAMAGES OR OTHER LIABILITY , WHETHER IN CONTRACT , TORT OR OTHERWISE , <nl> + / / ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER <nl> + / / DEALINGS IN THE SOFTWARE . <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + / / Tinyformat : A minimal type safe printf replacement <nl> + / / <nl> + / / tinyformat . h is a type safe printf replacement library in a single C + + <nl> + / / header file . Design goals include : <nl> + / / <nl> + / / * Type safety and extensibility for user defined types . <nl> + / / * C99 printf ( ) compatibility , to the extent possible using std : : ostream <nl> + / / * Simplicity and minimalism . A single header file to include and distribute <nl> + / / with your projects . <nl> + / / * Augment rather than replace the standard stream formatting mechanism <nl> + / / * C + + 98 support , with optional C + + 11 niceties <nl> + / / <nl> + / / <nl> + / / Main interface example usage <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + / / <nl> + / / To print a date to std : : cout : <nl> + / / <nl> + / / std : : string weekday = " Wednesday " ; <nl> + / / const char * month = " July " ; <nl> + / / size_t day = 27 ; <nl> + / / long hour = 14 ; <nl> + / / int min = 44 ; <nl> + / / <nl> + / / tfm : : printf ( " % s , % s % d , % . 2d : % . 2d \ n " , weekday , month , day , hour , min ) ; <nl> + / / <nl> + / / The strange types here emphasize the type safety of the interface ; it is <nl> + / / possible to print a std : : string using the " % s " conversion , and a <nl> + / / size_t using the " % d " conversion . A similar result could be achieved <nl> + / / using either of the tfm : : format ( ) functions . One prints on a user provided <nl> + / / stream : <nl> + / / <nl> + / / tfm : : format ( std : : cerr , " % s , % s % d , % . 2d : % . 2d \ n " , <nl> + / / weekday , month , day , hour , min ) ; <nl> + / / <nl> + / / The other returns a std : : string : <nl> + / / <nl> + / / std : : string date = tfm : : format ( " % s , % s % d , % . 2d : % . 2d \ n " , <nl> + / / weekday , month , day , hour , min ) ; <nl> + / / std : : cout < < date ; <nl> + / / <nl> + / / These are the three primary interface functions . <nl> + / / <nl> + / / <nl> + / / User defined format functions <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / <nl> + / / Simulating variadic templates in C + + 98 is pretty painful since it requires <nl> + / / writing out the same function for each desired number of arguments . To make <nl> + / / this bearable tinyformat comes with a set of macros which are used <nl> + / / internally to generate the API , but which may also be used in user code . <nl> + / / <nl> + / / The three macros TINYFORMAT_ARGTYPES ( n ) , TINYFORMAT_VARARGS ( n ) and <nl> + / / TINYFORMAT_PASSARGS ( n ) will generate a list of n argument types , <nl> + / / type / name pairs and argument names respectively when called with an integer <nl> + / / n between 1 and 16 . We can use these to define a macro which generates the <nl> + / / desired user defined function with n arguments . To generate all 16 user <nl> + / / defined function bodies , use the macro TINYFORMAT_FOREACH_ARGNUM . For an <nl> + / / example , see the implementation of printf ( ) at the end of the source file . <nl> + / / <nl> + / / <nl> + / / Additional API information <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / <nl> + / / Error handling : Define TINYFORMAT_ERROR to customize the error handling for <nl> + / / format strings which are unsupported or have the wrong number of format <nl> + / / specifiers ( calls assert ( ) by default ) . <nl> + / / <nl> + / / User defined types : Uses operator < < for user defined types by default . <nl> + / / Overload formatValue ( ) for more control . <nl> + <nl> + <nl> + # ifndef TINYFORMAT_H_INCLUDED <nl> + # define TINYFORMAT_H_INCLUDED <nl> + <nl> + namespace tinyformat { } <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + / / Config section . Customize to your liking ! <nl> + <nl> + / / Namespace alias to encourage brevity <nl> + namespace tfm = tinyformat ; <nl> + <nl> + / / Error handling ; calls assert ( ) by default . <nl> + / / # define TINYFORMAT_ERROR ( reasonString ) your_error_handler ( reasonString ) <nl> + <nl> + / / Define for C + + 11 variadic templates which make the code shorter & more <nl> + / / general . If you don ' t define this , C + + 11 support is autodetected below . <nl> + / / # define TINYFORMAT_USE_VARIADIC_TEMPLATES <nl> + <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + / / Implementation details . <nl> + # include < cassert > <nl> + # include < iostream > <nl> + # include < sstream > <nl> + <nl> + # ifndef TINYFORMAT_ERROR <nl> + # define TINYFORMAT_ERROR ( reason ) assert ( 0 & & reason ) <nl> + # endif <nl> + <nl> + # if ! defined ( TINYFORMAT_USE_VARIADIC_TEMPLATES ) & & ! defined ( TINYFORMAT_NO_VARIADIC_TEMPLATES ) <nl> + # ifdef __GXX_EXPERIMENTAL_CXX0X__ <nl> + # define TINYFORMAT_USE_VARIADIC_TEMPLATES <nl> + # endif <nl> + # endif <nl> + <nl> + # ifdef __GNUC__ <nl> + # define TINYFORMAT_NOINLINE __attribute__ ( ( noinline ) ) <nl> + # elif defined ( _MSC_VER ) <nl> + # define TINYFORMAT_NOINLINE __declspec ( noinline ) <nl> + # else <nl> + # define TINYFORMAT_NOINLINE <nl> + # endif <nl> + <nl> + # if defined ( __GLIBCXX__ ) & & __GLIBCXX__ < 20080201 <nl> + / / std : : showpos is broken on old libstdc + + as provided with OSX . See <nl> + / / http : / / gcc . gnu . org / ml / libstdc + + / 2007 - 11 / msg00075 . html <nl> + # define TINYFORMAT_OLD_LIBSTDCPLUSPLUS_WORKAROUND <nl> + # endif <nl> + <nl> + namespace tinyformat { <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + namespace detail { <nl> + <nl> + / / Test whether type T1 is convertible to type T2 <nl> + template < typename T1 , typename T2 > <nl> + struct is_convertible <nl> + { <nl> + private : <nl> + / / two types of different size <nl> + struct fail { char dummy [ 2 ] ; } ; <nl> + struct succeed { char dummy ; } ; <nl> + / / Try to convert a T1 to a T2 by plugging into tryConvert <nl> + static fail tryConvert ( . . . ) ; <nl> + static succeed tryConvert ( const T2 & ) ; <nl> + static const T1 & makeT1 ( ) ; <nl> + public : <nl> + # ifdef _MSC_VER <nl> + / / Disable spurious loss of precision warnings in tryConvert ( makeT1 ( ) ) <nl> + # pragma warning ( push ) <nl> + # pragma warning ( disable : 4244 ) <nl> + # pragma warning ( disable : 4267 ) <nl> + # endif <nl> + / / Standard trick : the ( . . . ) version of tryConvert will be chosen from <nl> + / / the overload set only if the version taking a T2 doesn ' t match . <nl> + / / Then we compare the sizes of the return types to check which <nl> + / / function matched . Very neat , in a disgusting kind of way : ) <nl> + static const bool value = <nl> + sizeof ( tryConvert ( makeT1 ( ) ) ) = = sizeof ( succeed ) ; <nl> + # ifdef _MSC_VER <nl> + # pragma warning ( pop ) <nl> + # endif <nl> + } ; <nl> + <nl> + <nl> + / / Detect when a type is not a wchar_t string <nl> + template < typename T > struct is_wchar { typedef int tinyformat_wchar_is_not_supported ; } ; <nl> + template < > struct is_wchar < wchar_t * > { } ; <nl> + template < > struct is_wchar < const wchar_t * > { } ; <nl> + template < int n > struct is_wchar < const wchar_t [ n ] > { } ; <nl> + template < int n > struct is_wchar < wchar_t [ n ] > { } ; <nl> + <nl> + <nl> + / / Format the value by casting to type fmtT . This default implementation <nl> + / / should never be called . <nl> + template < typename T , typename fmtT , bool convertible = is_convertible < T , fmtT > : : value > <nl> + struct formatValueAsType <nl> + { <nl> + static void invoke ( std : : ostream & / * out * / , const T & / * value * / ) { assert ( 0 ) ; } <nl> + } ; <nl> + / / Specialized version for types that can actually be converted to fmtT , as <nl> + / / indicated by the " convertible " template parameter . <nl> + template < typename T , typename fmtT > <nl> + struct formatValueAsType < T , fmtT , true > <nl> + { <nl> + static void invoke ( std : : ostream & out , const T & value ) <nl> + { out < < static_cast < fmtT > ( value ) ; } <nl> + } ; <nl> + <nl> + # ifdef TINYFORMAT_OLD_LIBSTDCPLUSPLUS_WORKAROUND <nl> + template < typename T , bool convertible = is_convertible < T , int > : : value > <nl> + struct formatZeroIntegerWorkaround <nl> + { <nl> + static bool invoke ( std : : ostream & / * * / , const T & / * * / ) { return false ; } <nl> + } ; <nl> + template < typename T > <nl> + struct formatZeroIntegerWorkaround < T , true > <nl> + { <nl> + static bool invoke ( std : : ostream & out , const T & value ) <nl> + { <nl> + if ( static_cast < int > ( value ) = = 0 & & out . flags ( ) & std : : ios : : showpos ) <nl> + { <nl> + out < < " + 0 " ; <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + } ; <nl> + # endif / / TINYFORMAT_OLD_LIBSTDCPLUSPLUS_WORKAROUND <nl> + <nl> + / / Convert an arbitrary type to integer . The version with convertible = false <nl> + / / throws an error . <nl> + template < typename T , bool convertible = is_convertible < T , int > : : value > <nl> + struct convertToInt <nl> + { <nl> + static int invoke ( const T & / * value * / ) <nl> + { <nl> + TINYFORMAT_ERROR ( " tinyformat : Cannot convert from argument type to " <nl> + " integer for use as variable width or precision " ) ; <nl> + return 0 ; <nl> + } <nl> + } ; <nl> + / / Specialization for convertToInt when conversion is possible <nl> + template < typename T > <nl> + struct convertToInt < T , true > <nl> + { <nl> + static int invoke ( const T & value ) { return static_cast < int > ( value ) ; } <nl> + } ; <nl> + <nl> + } / / namespace detail <nl> + <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + / / Variable formatting functions . May be overridden for user - defined types if <nl> + / / desired . <nl> + <nl> + <nl> + / / Format a value into a stream . Called from format ( ) for all types by default . <nl> + / / <nl> + / / Users may override this for their own types . When this function is called , <nl> + / / the stream flags will have been modified according to the format string . <nl> + / / The format specification is provided in the range [ fmtBegin , fmtEnd ) . <nl> + / / <nl> + / / By default , formatValue ( ) uses the usual stream insertion operator <nl> + / / operator < < to format the type T , with special cases for the % c and % p <nl> + / / conversions . <nl> + template < typename T > <nl> + inline void formatValue ( std : : ostream & out , const char * / * fmtBegin * / , <nl> + const char * fmtEnd , const T & value ) <nl> + { <nl> + # ifndef TINYFORMAT_ALLOW_WCHAR_STRINGS <nl> + / / Since we don ' t support printing of wchar_t using " % ls " , make it fail at <nl> + / / compile time in preference to printing as a void * at runtime . <nl> + typedef typename detail : : is_wchar < T > : : tinyformat_wchar_is_not_supported DummyType ; <nl> + ( void ) DummyType ( ) ; / / avoid unused type warning with gcc - 4 . 8 <nl> + # endif <nl> + / / The mess here is to support the % c and % p conversions : if these <nl> + / / conversions are active we try to convert the type to a char or const <nl> + / / void * respectively and format that instead of the value itself . For the <nl> + / / % p conversion it ' s important to avoid dereferencing the pointer , which <nl> + / / could otherwise lead to a crash when printing a dangling ( const char * ) . <nl> + const bool canConvertToChar = detail : : is_convertible < T , char > : : value ; <nl> + const bool canConvertToVoidPtr = detail : : is_convertible < T , const void * > : : value ; <nl> + if ( canConvertToChar & & * ( fmtEnd - 1 ) = = ' c ' ) <nl> + detail : : formatValueAsType < T , char > : : invoke ( out , value ) ; <nl> + else if ( canConvertToVoidPtr & & * ( fmtEnd - 1 ) = = ' p ' ) <nl> + detail : : formatValueAsType < T , const void * > : : invoke ( out , value ) ; <nl> + # ifdef TINYFORMAT_OLD_LIBSTDCPLUSPLUS_WORKAROUND <nl> + else if ( detail : : formatZeroIntegerWorkaround < T > : : invoke ( out , value ) ) / * * / ; <nl> + # endif <nl> + else <nl> + out < < value ; <nl> + } <nl> + <nl> + <nl> + / / Overloaded version for char types to support printing as an integer <nl> + # define TINYFORMAT_DEFINE_FORMATVALUE_CHAR ( charType ) \ <nl> + inline void formatValue ( std : : ostream & out , const char * / * fmtBegin * / , \ <nl> + const char * fmtEnd , charType value ) \ <nl> + { \ <nl> + switch ( * ( fmtEnd - 1 ) ) \ <nl> + { \ <nl> + case ' u ' : case ' d ' : case ' i ' : case ' o ' : case ' X ' : case ' x ' : \ <nl> + out < < static_cast < int > ( value ) ; break ; \ <nl> + default : \ <nl> + out < < value ; break ; \ <nl> + } \ <nl> + } <nl> + / / per 3 . 9 . 1 : char , signed char and unsigned char are all distinct types <nl> + TINYFORMAT_DEFINE_FORMATVALUE_CHAR ( char ) <nl> + TINYFORMAT_DEFINE_FORMATVALUE_CHAR ( signed char ) <nl> + TINYFORMAT_DEFINE_FORMATVALUE_CHAR ( unsigned char ) <nl> + # undef TINYFORMAT_DEFINE_FORMATVALUE_CHAR <nl> + <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + / / Tools for emulating variadic templates in C + + 98 . The basic idea here is <nl> + / / stolen from the boost preprocessor metaprogramming library and cut down to <nl> + / / be just general enough for what we need . <nl> + <nl> + # define TINYFORMAT_ARGTYPES ( n ) TINYFORMAT_ARGTYPES_ # # n <nl> + # define TINYFORMAT_VARARGS ( n ) TINYFORMAT_VARARGS_ # # n <nl> + # define TINYFORMAT_PASSARGS ( n ) TINYFORMAT_PASSARGS_ # # n <nl> + # define TINYFORMAT_PASSARGS_TAIL ( n ) TINYFORMAT_PASSARGS_TAIL_ # # n <nl> + <nl> + / / To keep it as transparent as possible , the macros below have been generated <nl> + / / using python via the excellent cog . py code generation script . This avoids <nl> + / / the need for a bunch of complex ( but more general ) preprocessor tricks as <nl> + / / used in boost . preprocessor . <nl> + / / <nl> + / / To rerun the code generation in place , use ` cog . py - r tinyformat . h ` <nl> + / / ( see http : / / nedbatchelder . com / code / cog ) . Alternatively you can just create <nl> + / / extra versions by hand . <nl> + <nl> + / * [ [ [ cog <nl> + maxParams = 16 <nl> + <nl> + def makeCommaSepLists ( lineTemplate , elemTemplate , startInd = 1 ) : <nl> + for j in range ( startInd , maxParams + 1 ) : <nl> + list = ' , ' . join ( [ elemTemplate % { ' i ' : i } for i in range ( startInd , j + 1 ) ] ) <nl> + cog . outl ( lineTemplate % { ' j ' : j , ' list ' : list } ) <nl> + <nl> + makeCommaSepLists ( ' # define TINYFORMAT_ARGTYPES_ % ( j ) d % ( list ) s ' , <nl> + ' class T % ( i ) d ' ) <nl> + <nl> + cog . outl ( ) <nl> + makeCommaSepLists ( ' # define TINYFORMAT_VARARGS_ % ( j ) d % ( list ) s ' , <nl> + ' const T % ( i ) d & v % ( i ) d ' ) <nl> + <nl> + cog . outl ( ) <nl> + makeCommaSepLists ( ' # define TINYFORMAT_PASSARGS_ % ( j ) d % ( list ) s ' , ' v % ( i ) d ' ) <nl> + <nl> + cog . outl ( ) <nl> + cog . outl ( ' # define TINYFORMAT_PASSARGS_TAIL_1 ' ) <nl> + makeCommaSepLists ( ' # define TINYFORMAT_PASSARGS_TAIL_ % ( j ) d , % ( list ) s ' , <nl> + ' v % ( i ) d ' , startInd = 2 ) <nl> + <nl> + cog . outl ( ) <nl> + cog . outl ( ' # define TINYFORMAT_FOREACH_ARGNUM ( m ) \ \ \ n ' + <nl> + ' ' . join ( [ ' m ( % d ) ' % ( j , ) for j in range ( 1 , maxParams + 1 ) ] ) ) <nl> + ] ] ] * / <nl> + # define TINYFORMAT_ARGTYPES_1 class T1 <nl> + # define TINYFORMAT_ARGTYPES_2 class T1 , class T2 <nl> + # define TINYFORMAT_ARGTYPES_3 class T1 , class T2 , class T3 <nl> + # define TINYFORMAT_ARGTYPES_4 class T1 , class T2 , class T3 , class T4 <nl> + # define TINYFORMAT_ARGTYPES_5 class T1 , class T2 , class T3 , class T4 , class T5 <nl> + # define TINYFORMAT_ARGTYPES_6 class T1 , class T2 , class T3 , class T4 , class T5 , class T6 <nl> + # define TINYFORMAT_ARGTYPES_7 class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 <nl> + # define TINYFORMAT_ARGTYPES_8 class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 <nl> + # define TINYFORMAT_ARGTYPES_9 class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 <nl> + # define TINYFORMAT_ARGTYPES_10 class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 <nl> + # define TINYFORMAT_ARGTYPES_11 class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 <nl> + # define TINYFORMAT_ARGTYPES_12 class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 <nl> + # define TINYFORMAT_ARGTYPES_13 class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 <nl> + # define TINYFORMAT_ARGTYPES_14 class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 <nl> + # define TINYFORMAT_ARGTYPES_15 class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 <nl> + # define TINYFORMAT_ARGTYPES_16 class T1 , class T2 , class T3 , class T4 , class T5 , class T6 , class T7 , class T8 , class T9 , class T10 , class T11 , class T12 , class T13 , class T14 , class T15 , class T16 <nl> + <nl> + # define TINYFORMAT_VARARGS_1 const T1 & v1 <nl> + # define TINYFORMAT_VARARGS_2 const T1 & v1 , const T2 & v2 <nl> + # define TINYFORMAT_VARARGS_3 const T1 & v1 , const T2 & v2 , const T3 & v3 <nl> + # define TINYFORMAT_VARARGS_4 const T1 & v1 , const T2 & v2 , const T3 & v3 , const T4 & v4 <nl> + # define TINYFORMAT_VARARGS_5 const T1 & v1 , const T2 & v2 , const T3 & v3 , const T4 & v4 , const T5 & v5 <nl> + # define TINYFORMAT_VARARGS_6 const T1 & v1 , const T2 & v2 , const T3 & v3 , const T4 & v4 , const T5 & v5 , const T6 & v6 <nl> + # define TINYFORMAT_VARARGS_7 const T1 & v1 , const T2 & v2 , const T3 & v3 , const T4 & v4 , const T5 & v5 , const T6 & v6 , const T7 & v7 <nl> + # define TINYFORMAT_VARARGS_8 const T1 & v1 , const T2 & v2 , const T3 & v3 , const T4 & v4 , const T5 & v5 , const T6 & v6 , const T7 & v7 , const T8 & v8 <nl> + # define TINYFORMAT_VARARGS_9 const T1 & v1 , const T2 & v2 , const T3 & v3 , const T4 & v4 , const T5 & v5 , const T6 & v6 , const T7 & v7 , const T8 & v8 , const T9 & v9 <nl> + # define TINYFORMAT_VARARGS_10 const T1 & v1 , const T2 & v2 , const T3 & v3 , const T4 & v4 , const T5 & v5 , const T6 & v6 , const T7 & v7 , const T8 & v8 , const T9 & v9 , const T10 & v10 <nl> + # define TINYFORMAT_VARARGS_11 const T1 & v1 , const T2 & v2 , const T3 & v3 , const T4 & v4 , const T5 & v5 , const T6 & v6 , const T7 & v7 , const T8 & v8 , const T9 & v9 , const T10 & v10 , const T11 & v11 <nl> + # define TINYFORMAT_VARARGS_12 const T1 & v1 , const T2 & v2 , const T3 & v3 , const T4 & v4 , const T5 & v5 , const T6 & v6 , const T7 & v7 , const T8 & v8 , const T9 & v9 , const T10 & v10 , const T11 & v11 , const T12 & v12 <nl> + # define TINYFORMAT_VARARGS_13 const T1 & v1 , const T2 & v2 , const T3 & v3 , const T4 & v4 , const T5 & v5 , const T6 & v6 , const T7 & v7 , const T8 & v8 , const T9 & v9 , const T10 & v10 , const T11 & v11 , const T12 & v12 , const T13 & v13 <nl> + # define TINYFORMAT_VARARGS_14 const T1 & v1 , const T2 & v2 , const T3 & v3 , const T4 & v4 , const T5 & v5 , const T6 & v6 , const T7 & v7 , const T8 & v8 , const T9 & v9 , const T10 & v10 , const T11 & v11 , const T12 & v12 , const T13 & v13 , const T14 & v14 <nl> + # define TINYFORMAT_VARARGS_15 const T1 & v1 , const T2 & v2 , const T3 & v3 , const T4 & v4 , const T5 & v5 , const T6 & v6 , const T7 & v7 , const T8 & v8 , const T9 & v9 , const T10 & v10 , const T11 & v11 , const T12 & v12 , const T13 & v13 , const T14 & v14 , const T15 & v15 <nl> + # define TINYFORMAT_VARARGS_16 const T1 & v1 , const T2 & v2 , const T3 & v3 , const T4 & v4 , const T5 & v5 , const T6 & v6 , const T7 & v7 , const T8 & v8 , const T9 & v9 , const T10 & v10 , const T11 & v11 , const T12 & v12 , const T13 & v13 , const T14 & v14 , const T15 & v15 , const T16 & v16 <nl> + <nl> + # define TINYFORMAT_PASSARGS_1 v1 <nl> + # define TINYFORMAT_PASSARGS_2 v1 , v2 <nl> + # define TINYFORMAT_PASSARGS_3 v1 , v2 , v3 <nl> + # define TINYFORMAT_PASSARGS_4 v1 , v2 , v3 , v4 <nl> + # define TINYFORMAT_PASSARGS_5 v1 , v2 , v3 , v4 , v5 <nl> + # define TINYFORMAT_PASSARGS_6 v1 , v2 , v3 , v4 , v5 , v6 <nl> + # define TINYFORMAT_PASSARGS_7 v1 , v2 , v3 , v4 , v5 , v6 , v7 <nl> + # define TINYFORMAT_PASSARGS_8 v1 , v2 , v3 , v4 , v5 , v6 , v7 , v8 <nl> + # define TINYFORMAT_PASSARGS_9 v1 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 <nl> + # define TINYFORMAT_PASSARGS_10 v1 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 <nl> + # define TINYFORMAT_PASSARGS_11 v1 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 , v11 <nl> + # define TINYFORMAT_PASSARGS_12 v1 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 , v11 , v12 <nl> + # define TINYFORMAT_PASSARGS_13 v1 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 , v11 , v12 , v13 <nl> + # define TINYFORMAT_PASSARGS_14 v1 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 , v11 , v12 , v13 , v14 <nl> + # define TINYFORMAT_PASSARGS_15 v1 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 , v11 , v12 , v13 , v14 , v15 <nl> + # define TINYFORMAT_PASSARGS_16 v1 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 , v11 , v12 , v13 , v14 , v15 , v16 <nl> + <nl> + # define TINYFORMAT_PASSARGS_TAIL_1 <nl> + # define TINYFORMAT_PASSARGS_TAIL_2 , v2 <nl> + # define TINYFORMAT_PASSARGS_TAIL_3 , v2 , v3 <nl> + # define TINYFORMAT_PASSARGS_TAIL_4 , v2 , v3 , v4 <nl> + # define TINYFORMAT_PASSARGS_TAIL_5 , v2 , v3 , v4 , v5 <nl> + # define TINYFORMAT_PASSARGS_TAIL_6 , v2 , v3 , v4 , v5 , v6 <nl> + # define TINYFORMAT_PASSARGS_TAIL_7 , v2 , v3 , v4 , v5 , v6 , v7 <nl> + # define TINYFORMAT_PASSARGS_TAIL_8 , v2 , v3 , v4 , v5 , v6 , v7 , v8 <nl> + # define TINYFORMAT_PASSARGS_TAIL_9 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 <nl> + # define TINYFORMAT_PASSARGS_TAIL_10 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 <nl> + # define TINYFORMAT_PASSARGS_TAIL_11 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 , v11 <nl> + # define TINYFORMAT_PASSARGS_TAIL_12 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 , v11 , v12 <nl> + # define TINYFORMAT_PASSARGS_TAIL_13 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 , v11 , v12 , v13 <nl> + # define TINYFORMAT_PASSARGS_TAIL_14 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 , v11 , v12 , v13 , v14 <nl> + # define TINYFORMAT_PASSARGS_TAIL_15 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 , v11 , v12 , v13 , v14 , v15 <nl> + # define TINYFORMAT_PASSARGS_TAIL_16 , v2 , v3 , v4 , v5 , v6 , v7 , v8 , v9 , v10 , v11 , v12 , v13 , v14 , v15 , v16 <nl> + <nl> + # define TINYFORMAT_FOREACH_ARGNUM ( m ) \ <nl> + m ( 1 ) m ( 2 ) m ( 3 ) m ( 4 ) m ( 5 ) m ( 6 ) m ( 7 ) m ( 8 ) m ( 9 ) m ( 10 ) m ( 11 ) m ( 12 ) m ( 13 ) m ( 14 ) m ( 15 ) m ( 16 ) <nl> + / / [ [ [ end ] ] ] <nl> + <nl> + <nl> + <nl> + namespace detail { <nl> + <nl> + / / Class holding current position in format string and an output stream into <nl> + / / which arguments are formatted . <nl> + class FormatIterator <nl> + { <nl> + public : <nl> + / / Flags for features not representable with standard stream state <nl> + enum ExtraFormatFlags <nl> + { <nl> + Flag_None = 0 , <nl> + Flag_TruncateToPrecision = 1 < < 0 , / / truncate length to stream precision ( ) <nl> + Flag_SpacePadPositive = 1 < < 1 , / / pad positive values with spaces <nl> + Flag_VariableWidth = 1 < < 2 , / / variable field width in arg list <nl> + Flag_VariablePrecision = 1 < < 3 / / variable field precision in arg list <nl> + } ; <nl> + <nl> + / / out is the output stream , fmt is the full format string <nl> + FormatIterator ( std : : ostream & out , const char * fmt ) <nl> + : m_out ( out ) , <nl> + m_fmt ( fmt ) , <nl> + m_extraFlags ( Flag_None ) , <nl> + m_wantWidth ( false ) , <nl> + m_wantPrecision ( false ) , <nl> + m_variableWidth ( 0 ) , <nl> + m_variablePrecision ( 0 ) , <nl> + m_origWidth ( out . width ( ) ) , <nl> + m_origPrecision ( out . precision ( ) ) , <nl> + m_origFlags ( out . flags ( ) ) , <nl> + m_origFill ( out . fill ( ) ) <nl> + { } <nl> + <nl> + / / Print remaining part of format string . <nl> + void finish ( ) <nl> + { <nl> + / / It would be nice if we could do this from the destructor , but we <nl> + / / can ' t if TINFORMAT_ERROR is used to throw an exception ! <nl> + m_fmt = printFormatStringLiteral ( m_out , m_fmt ) ; <nl> + if ( * m_fmt ! = ' \ 0 ' ) <nl> + TINYFORMAT_ERROR ( " tinyformat : Too many conversion specifiers in format string " ) ; <nl> + } <nl> + <nl> + ~ FormatIterator ( ) <nl> + { <nl> + / / Restore stream state <nl> + m_out . width ( m_origWidth ) ; <nl> + m_out . precision ( m_origPrecision ) ; <nl> + m_out . flags ( m_origFlags ) ; <nl> + m_out . fill ( m_origFill ) ; <nl> + } <nl> + <nl> + template < typename T > <nl> + void accept ( const T & value ) ; <nl> + <nl> + private : <nl> + / / Parse and return an integer from the string c , as atoi ( ) <nl> + / / On return , c is set to one past the end of the integer . <nl> + static int parseIntAndAdvance ( const char * & c ) <nl> + { <nl> + int i = 0 ; <nl> + for ( ; * c > = ' 0 ' & & * c < = ' 9 ' ; + + c ) <nl> + i = 10 * i + ( * c - ' 0 ' ) ; <nl> + return i ; <nl> + } <nl> + <nl> + / / Format at most truncLen characters of a C string to the given <nl> + / / stream . Return true if formatting proceeded ( generic version always <nl> + / / returns false ) <nl> + template < typename T > <nl> + static bool formatCStringTruncate ( std : : ostream & / * out * / , const T & / * value * / , <nl> + std : : streamsize / * truncLen * / ) <nl> + { <nl> + return false ; <nl> + } <nl> + # define TINYFORMAT_DEFINE_FORMAT_C_STRING_TRUNCATE ( type ) \ <nl> + static bool formatCStringTruncate ( std : : ostream & out , type * value , \ <nl> + std : : streamsize truncLen ) \ <nl> + { \ <nl> + std : : streamsize len = 0 ; \ <nl> + while ( len < truncLen & & value [ len ] ! = 0 ) \ <nl> + + + len ; \ <nl> + out . write ( value , len ) ; \ <nl> + return true ; \ <nl> + } <nl> + / / Overload for const char * and char * . Could overload for signed & <nl> + / / unsigned char too , but these are technically unneeded for printf <nl> + / / compatibility . <nl> + TINYFORMAT_DEFINE_FORMAT_C_STRING_TRUNCATE ( const char ) <nl> + TINYFORMAT_DEFINE_FORMAT_C_STRING_TRUNCATE ( char ) <nl> + # undef TINYFORMAT_DEFINE_FORMAT_C_STRING_TRUNCATE <nl> + <nl> + / / Print literal part of format string and return next format spec <nl> + / / position . <nl> + / / <nl> + / / Skips over any occurrences of ' % % ' , printing a literal ' % ' to the <nl> + / / output . The position of the first % character of the next <nl> + / / nontrivial format spec is returned , or the end of string . <nl> + static const char * printFormatStringLiteral ( std : : ostream & out , <nl> + const char * fmt ) <nl> + { <nl> + const char * c = fmt ; <nl> + for ( ; true ; + + c ) <nl> + { <nl> + switch ( * c ) <nl> + { <nl> + case ' \ 0 ' : <nl> + out . write ( fmt , static_cast < std : : streamsize > ( c - fmt ) ) ; <nl> + return c ; <nl> + case ' % ' : <nl> + out . write ( fmt , static_cast < std : : streamsize > ( c - fmt ) ) ; <nl> + if ( * ( c + 1 ) ! = ' % ' ) <nl> + return c ; <nl> + / / for " % % " , tack trailing % onto next literal section . <nl> + fmt = + + c ; <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + static const char * streamStateFromFormat ( std : : ostream & out , <nl> + unsigned int & extraFlags , <nl> + const char * fmtStart , <nl> + int variableWidth , <nl> + int variablePrecision ) ; <nl> + <nl> + / / Private copy & assign : Kill gcc warnings with - Weffc + + <nl> + FormatIterator ( const FormatIterator & ) ; <nl> + FormatIterator & operator = ( const FormatIterator & ) ; <nl> + <nl> + / / Stream , current format string & state <nl> + std : : ostream & m_out ; <nl> + const char * m_fmt ; <nl> + unsigned int m_extraFlags ; <nl> + / / State machine info for handling of variable width & precision <nl> + bool m_wantWidth ; <nl> + bool m_wantPrecision ; <nl> + int m_variableWidth ; <nl> + int m_variablePrecision ; <nl> + / / Saved stream state <nl> + std : : streamsize m_origWidth ; <nl> + std : : streamsize m_origPrecision ; <nl> + std : : ios : : fmtflags m_origFlags ; <nl> + char m_origFill ; <nl> + } ; <nl> + <nl> + <nl> + / / Accept a value for formatting into the internal stream . <nl> + template < typename T > <nl> + TINYFORMAT_NOINLINE / / < greatly reduces bloat in optimized builds <nl> + void FormatIterator : : accept ( const T & value ) <nl> + { <nl> + / / Parse the format string <nl> + const char * fmtEnd = 0 ; <nl> + if ( m_extraFlags = = Flag_None & & ! m_wantWidth & & ! m_wantPrecision ) <nl> + { <nl> + m_fmt = printFormatStringLiteral ( m_out , m_fmt ) ; <nl> + fmtEnd = streamStateFromFormat ( m_out , m_extraFlags , m_fmt , 0 , 0 ) ; <nl> + m_wantWidth = ( m_extraFlags & Flag_VariableWidth ) ! = 0 ; <nl> + m_wantPrecision = ( m_extraFlags & Flag_VariablePrecision ) ! = 0 ; <nl> + } <nl> + / / Consume value as variable width and precision specifier if necessary <nl> + if ( m_extraFlags & ( Flag_VariableWidth | Flag_VariablePrecision ) ) <nl> + { <nl> + if ( m_wantWidth | | m_wantPrecision ) <nl> + { <nl> + int v = convertToInt < T > : : invoke ( value ) ; <nl> + if ( m_wantWidth ) <nl> + { <nl> + m_variableWidth = v ; <nl> + m_wantWidth = false ; <nl> + } <nl> + else if ( m_wantPrecision ) <nl> + { <nl> + m_variablePrecision = v ; <nl> + m_wantPrecision = false ; <nl> + } <nl> + return ; <nl> + } <nl> + / / If we get here , we ' ve set both the variable precision and width as <nl> + / / required and we need to rerun the stream state setup to insert these . <nl> + fmtEnd = streamStateFromFormat ( m_out , m_extraFlags , m_fmt , <nl> + m_variableWidth , m_variablePrecision ) ; <nl> + } <nl> + <nl> + / / Format the value into the stream . <nl> + if ( ! ( m_extraFlags & ( Flag_SpacePadPositive | Flag_TruncateToPrecision ) ) ) <nl> + formatValue ( m_out , m_fmt , fmtEnd , value ) ; <nl> + else <nl> + { <nl> + / / The following are special cases where there ' s no direct <nl> + / / correspondence between stream formatting and the printf ( ) behaviour . <nl> + / / Instead , we simulate the behaviour crudely by formatting into a <nl> + / / temporary string stream and munging the resulting string . <nl> + std : : ostringstream tmpStream ; <nl> + tmpStream . copyfmt ( m_out ) ; <nl> + if ( m_extraFlags & Flag_SpacePadPositive ) <nl> + tmpStream . setf ( std : : ios : : showpos ) ; <nl> + / / formatCStringTruncate is required for truncating conversions like <nl> + / / " % . 4s " where at most 4 characters of the c - string should be read . <nl> + / / If we didn ' t include this special case , we might read off the end . <nl> + if ( ! ( ( m_extraFlags & Flag_TruncateToPrecision ) & & <nl> + formatCStringTruncate ( tmpStream , value , m_out . precision ( ) ) ) ) <nl> + { <nl> + / / Not a truncated c - string ; just format normally . <nl> + formatValue ( tmpStream , m_fmt , fmtEnd , value ) ; <nl> + } <nl> + std : : string result = tmpStream . str ( ) ; / / allocates . . . yuck . <nl> + if ( m_extraFlags & Flag_SpacePadPositive ) <nl> + { <nl> + for ( size_t i = 0 , iend = result . size ( ) ; i < iend ; + + i ) <nl> + if ( result [ i ] = = ' + ' ) <nl> + result [ i ] = ' ' ; <nl> + } <nl> + if ( ( m_extraFlags & Flag_TruncateToPrecision ) & & <nl> + ( int ) result . size ( ) > ( int ) m_out . precision ( ) ) <nl> + m_out . write ( result . c_str ( ) , m_out . precision ( ) ) ; <nl> + else <nl> + m_out < < result ; <nl> + } <nl> + m_extraFlags = Flag_None ; <nl> + m_fmt = fmtEnd ; <nl> + } <nl> + <nl> + <nl> + / / Parse a format string and set the stream state accordingly . <nl> + / / <nl> + / / The format mini - language recognized here is meant to be the one from C99 , <nl> + / / with the form " % [ flags ] [ width ] [ . precision ] [ length ] type " . <nl> + / / <nl> + / / Formatting options which can ' t be natively represented using the ostream <nl> + / / state are returned in the extraFlags parameter which is a bitwise <nl> + / / combination of values from the ExtraFormatFlags enum . <nl> + inline const char * FormatIterator : : streamStateFromFormat ( std : : ostream & out , <nl> + unsigned int & extraFlags , <nl> + const char * fmtStart , <nl> + int variableWidth , <nl> + int variablePrecision ) <nl> + { <nl> + if ( * fmtStart ! = ' % ' ) <nl> + { <nl> + TINYFORMAT_ERROR ( " tinyformat : Not enough conversion specifiers in format string " ) ; <nl> + return fmtStart ; <nl> + } <nl> + / / Reset stream state to defaults . <nl> + out . width ( 0 ) ; <nl> + out . precision ( 6 ) ; <nl> + out . fill ( ' ' ) ; <nl> + / / Reset most flags ; ignore irrelevant unitbuf & skipws . <nl> + out . unsetf ( std : : ios : : adjustfield | std : : ios : : basefield | <nl> + std : : ios : : floatfield | std : : ios : : showbase | std : : ios : : boolalpha | <nl> + std : : ios : : showpoint | std : : ios : : showpos | std : : ios : : uppercase ) ; <nl> + extraFlags = Flag_None ; <nl> + bool precisionSet = false ; <nl> + bool widthSet = false ; <nl> + const char * c = fmtStart + 1 ; <nl> + / / 1 ) Parse flags <nl> + for ( ; ; + + c ) <nl> + { <nl> + switch ( * c ) <nl> + { <nl> + case ' # ' : <nl> + out . setf ( std : : ios : : showpoint | std : : ios : : showbase ) ; <nl> + continue ; <nl> + case ' 0 ' : <nl> + / / overridden by left alignment ( ' - ' flag ) <nl> + if ( ! ( out . flags ( ) & std : : ios : : left ) ) <nl> + { <nl> + / / Use internal padding so that numeric values are <nl> + / / formatted correctly , eg - 00010 rather than 000 - 10 <nl> + out . fill ( ' 0 ' ) ; <nl> + out . setf ( std : : ios : : internal , std : : ios : : adjustfield ) ; <nl> + } <nl> + continue ; <nl> + case ' - ' : <nl> + out . fill ( ' ' ) ; <nl> + out . setf ( std : : ios : : left , std : : ios : : adjustfield ) ; <nl> + continue ; <nl> + case ' ' : <nl> + / / overridden by show positive sign , ' + ' flag . <nl> + if ( ! ( out . flags ( ) & std : : ios : : showpos ) ) <nl> + extraFlags | = Flag_SpacePadPositive ; <nl> + continue ; <nl> + case ' + ' : <nl> + out . setf ( std : : ios : : showpos ) ; <nl> + extraFlags & = ~ Flag_SpacePadPositive ; <nl> + continue ; <nl> + } <nl> + break ; <nl> + } <nl> + / / 2 ) Parse width <nl> + if ( * c > = ' 0 ' & & * c < = ' 9 ' ) <nl> + { <nl> + widthSet = true ; <nl> + out . width ( parseIntAndAdvance ( c ) ) ; <nl> + } <nl> + if ( * c = = ' * ' ) <nl> + { <nl> + widthSet = true ; <nl> + if ( variableWidth < 0 ) <nl> + { <nl> + / / negative widths correspond to ' - ' flag set <nl> + out . fill ( ' ' ) ; <nl> + out . setf ( std : : ios : : left , std : : ios : : adjustfield ) ; <nl> + variableWidth = - variableWidth ; <nl> + } <nl> + out . width ( variableWidth ) ; <nl> + extraFlags | = Flag_VariableWidth ; <nl> + + + c ; <nl> + } <nl> + / / 3 ) Parse precision <nl> + if ( * c = = ' . ' ) <nl> + { <nl> + + + c ; <nl> + int precision = 0 ; <nl> + if ( * c = = ' * ' ) <nl> + { <nl> + + + c ; <nl> + extraFlags | = Flag_VariablePrecision ; <nl> + precision = variablePrecision ; <nl> + } <nl> + else <nl> + { <nl> + if ( * c > = ' 0 ' & & * c < = ' 9 ' ) <nl> + precision = parseIntAndAdvance ( c ) ; <nl> + else if ( * c = = ' - ' ) / / negative precisions ignored , treated as zero . <nl> + parseIntAndAdvance ( + + c ) ; <nl> + } <nl> + out . precision ( precision ) ; <nl> + precisionSet = true ; <nl> + } <nl> + / / 4 ) Ignore any C99 length modifier <nl> + while ( * c = = ' l ' | | * c = = ' h ' | | * c = = ' L ' | | <nl> + * c = = ' j ' | | * c = = ' z ' | | * c = = ' t ' ) <nl> + + + c ; <nl> + / / 5 ) We ' re up to the conversion specifier character . <nl> + / / Set stream flags based on conversion specifier ( thanks to the <nl> + / / boost : : format class for forging the way here ) . <nl> + bool intConversion = false ; <nl> + switch ( * c ) <nl> + { <nl> + case ' u ' : case ' d ' : case ' i ' : <nl> + out . setf ( std : : ios : : dec , std : : ios : : basefield ) ; <nl> + intConversion = true ; <nl> + break ; <nl> + case ' o ' : <nl> + out . setf ( std : : ios : : oct , std : : ios : : basefield ) ; <nl> + intConversion = true ; <nl> + break ; <nl> + case ' X ' : <nl> + out . setf ( std : : ios : : uppercase ) ; <nl> + case ' x ' : case ' p ' : <nl> + out . setf ( std : : ios : : hex , std : : ios : : basefield ) ; <nl> + intConversion = true ; <nl> + break ; <nl> + case ' E ' : <nl> + out . setf ( std : : ios : : uppercase ) ; <nl> + case ' e ' : <nl> + out . setf ( std : : ios : : scientific , std : : ios : : floatfield ) ; <nl> + out . setf ( std : : ios : : dec , std : : ios : : basefield ) ; <nl> + break ; <nl> + case ' F ' : <nl> + out . setf ( std : : ios : : uppercase ) ; <nl> + case ' f ' : <nl> + out . setf ( std : : ios : : fixed , std : : ios : : floatfield ) ; <nl> + break ; <nl> + case ' G ' : <nl> + out . setf ( std : : ios : : uppercase ) ; <nl> + case ' g ' : <nl> + out . setf ( std : : ios : : dec , std : : ios : : basefield ) ; <nl> + / / As in boost : : format , let stream decide float format . <nl> + out . flags ( out . flags ( ) & ~ std : : ios : : floatfield ) ; <nl> + break ; <nl> + case ' a ' : case ' A ' : <nl> + TINYFORMAT_ERROR ( " tinyformat : the % a and % A conversion specs " <nl> + " are not supported " ) ; <nl> + break ; <nl> + case ' c ' : <nl> + / / Handled as special case inside formatValue ( ) <nl> + break ; <nl> + case ' s ' : <nl> + if ( precisionSet ) <nl> + extraFlags | = Flag_TruncateToPrecision ; <nl> + / / Make % s print booleans as " true " and " false " <nl> + out . setf ( std : : ios : : boolalpha ) ; <nl> + break ; <nl> + case ' n ' : <nl> + / / Not supported - will cause problems ! <nl> + TINYFORMAT_ERROR ( " tinyformat : % n conversion spec not supported " ) ; <nl> + break ; <nl> + case ' \ 0 ' : <nl> + TINYFORMAT_ERROR ( " tinyformat : Conversion spec incorrectly " <nl> + " terminated by end of string " ) ; <nl> + return c ; <nl> + } <nl> + if ( intConversion & & precisionSet & & ! widthSet ) <nl> + { <nl> + / / " precision " for integers gives the minimum number of digits ( to be <nl> + / / padded with zeros on the left ) . This isn ' t really supported by the <nl> + / / iostreams , but we can approximately simulate it with the width if <nl> + / / the width isn ' t otherwise used . <nl> + out . width ( out . precision ( ) ) ; <nl> + out . setf ( std : : ios : : internal , std : : ios : : adjustfield ) ; <nl> + out . fill ( ' 0 ' ) ; <nl> + } <nl> + return c + 1 ; <nl> + } <nl> + <nl> + <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + / / Private format function on top of which the public interface is implemented . <nl> + / / We enforce a mimimum of one value to be formatted to prevent bugs looking like <nl> + / / <nl> + / / const char * myStr = " 100 % broken " ; <nl> + / / printf ( myStr ) ; / / Parses % as a format specifier <nl> + # ifdef TINYFORMAT_USE_VARIADIC_TEMPLATES <nl> + <nl> + template < typename T1 > <nl> + void format ( FormatIterator & fmtIter , const T1 & value1 ) <nl> + { <nl> + fmtIter . accept ( value1 ) ; <nl> + fmtIter . finish ( ) ; <nl> + } <nl> + <nl> + / / General version for C + + 11 <nl> + template < typename T1 , typename . . . Args > <nl> + void format ( FormatIterator & fmtIter , const T1 & value1 , const Args & . . . args ) <nl> + { <nl> + fmtIter . accept ( value1 ) ; <nl> + format ( fmtIter , args . . . ) ; <nl> + } <nl> + <nl> + # else <nl> + <nl> + inline void format ( FormatIterator & fmtIter ) <nl> + { <nl> + fmtIter . finish ( ) ; <nl> + } <nl> + <nl> + / / General version for C + + 98 <nl> + # define TINYFORMAT_MAKE_FORMAT_DETAIL ( n ) \ <nl> + template < TINYFORMAT_ARGTYPES ( n ) > \ <nl> + void format ( detail : : FormatIterator & fmtIter , TINYFORMAT_VARARGS ( n ) ) \ <nl> + { \ <nl> + fmtIter . accept ( v1 ) ; \ <nl> + format ( fmtIter TINYFORMAT_PASSARGS_TAIL ( n ) ) ; \ <nl> + } <nl> + <nl> + TINYFORMAT_FOREACH_ARGNUM ( TINYFORMAT_MAKE_FORMAT_DETAIL ) <nl> + # undef TINYFORMAT_MAKE_FORMAT_DETAIL <nl> + <nl> + # endif / / End C + + 98 variadic template emulation for format ( ) <nl> + <nl> + } / / namespace detail <nl> + <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + / / Implement all the main interface functions here in terms of detail : : format ( ) <nl> + <nl> + # ifdef TINYFORMAT_USE_VARIADIC_TEMPLATES <nl> + <nl> + / / C + + 11 - the simple case <nl> + template < typename T1 , typename . . . Args > <nl> + void format ( std : : ostream & out , const char * fmt , const T1 & v1 , const Args & . . . args ) <nl> + { <nl> + detail : : FormatIterator fmtIter ( out , fmt ) ; <nl> + format ( fmtIter , v1 , args . . . ) ; <nl> + } <nl> + <nl> + template < typename T1 , typename . . . Args > <nl> + std : : string format ( const char * fmt , const T1 & v1 , const Args & . . . args ) <nl> + { <nl> + std : : ostringstream oss ; <nl> + format ( oss , fmt , v1 , args . . . ) ; <nl> + return oss . str ( ) ; <nl> + } <nl> + <nl> + template < typename T1 , typename . . . Args > <nl> + std : : string format ( const std : : string & fmt , const T1 & v1 , const Args & . . . args ) <nl> + { <nl> + std : : ostringstream oss ; <nl> + format ( oss , fmt . c_str ( ) , v1 , args . . . ) ; <nl> + return oss . str ( ) ; <nl> + } <nl> + <nl> + template < typename T1 , typename . . . Args > <nl> + void printf ( const char * fmt , const T1 & v1 , const Args & . . . args ) <nl> + { <nl> + format ( std : : cout , fmt , v1 , args . . . ) ; <nl> + } <nl> + <nl> + # else <nl> + <nl> + / / C + + 98 - define the interface functions using the wrapping macros <nl> + # define TINYFORMAT_MAKE_FORMAT_FUNCS ( n ) \ <nl> + \ <nl> + template < TINYFORMAT_ARGTYPES ( n ) > \ <nl> + void format ( std : : ostream & out , const char * fmt , TINYFORMAT_VARARGS ( n ) ) \ <nl> + { \ <nl> + tinyformat : : detail : : FormatIterator fmtIter ( out , fmt ) ; \ <nl> + tinyformat : : detail : : format ( fmtIter , TINYFORMAT_PASSARGS ( n ) ) ; \ <nl> + } \ <nl> + \ <nl> + template < TINYFORMAT_ARGTYPES ( n ) > \ <nl> + std : : string format ( const char * fmt , TINYFORMAT_VARARGS ( n ) ) \ <nl> + { \ <nl> + std : : ostringstream oss ; \ <nl> + tinyformat : : format ( oss , fmt , TINYFORMAT_PASSARGS ( n ) ) ; \ <nl> + return oss . str ( ) ; \ <nl> + } \ <nl> + \ <nl> + template < TINYFORMAT_ARGTYPES ( n ) > \ <nl> + std : : string format ( const std : : string & fmt , TINYFORMAT_VARARGS ( n ) ) \ <nl> + { \ <nl> + std : : ostringstream oss ; \ <nl> + tinyformat : : format ( oss , fmt . c_str ( ) , TINYFORMAT_PASSARGS ( n ) ) ; \ <nl> + return oss . str ( ) ; \ <nl> + } \ <nl> + \ <nl> + template < TINYFORMAT_ARGTYPES ( n ) > \ <nl> + void printf ( const char * fmt , TINYFORMAT_VARARGS ( n ) ) \ <nl> + { \ <nl> + tinyformat : : format ( std : : cout , fmt , TINYFORMAT_PASSARGS ( n ) ) ; \ <nl> + } <nl> + <nl> + TINYFORMAT_FOREACH_ARGNUM ( TINYFORMAT_MAKE_FORMAT_FUNCS ) <nl> + # undef TINYFORMAT_MAKE_FORMAT_FUNCS <nl> + # endif <nl> + <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + / / Define deprecated wrapping macro for backward compatibility in tinyformat <nl> + / / 1 . x . Will be removed in version 2 ! <nl> + # define TINYFORMAT_WRAP_FORMAT_EXTRA_ARGS <nl> + # define TINYFORMAT_WRAP_FORMAT_N ( n , returnType , funcName , funcDeclSuffix , \ <nl> + bodyPrefix , streamName , bodySuffix ) \ <nl> + template < TINYFORMAT_ARGTYPES ( n ) > \ <nl> + returnType funcName ( TINYFORMAT_WRAP_FORMAT_EXTRA_ARGS const char * fmt , \ <nl> + TINYFORMAT_VARARGS ( n ) ) funcDeclSuffix \ <nl> + { \ <nl> + bodyPrefix \ <nl> + tinyformat : : format ( streamName , fmt , TINYFORMAT_PASSARGS ( n ) ) ; \ <nl> + bodySuffix \ <nl> + } \ <nl> + <nl> + # define TINYFORMAT_WRAP_FORMAT ( returnType , funcName , funcDeclSuffix , \ <nl> + bodyPrefix , streamName , bodySuffix ) \ <nl> + inline \ <nl> + returnType funcName ( TINYFORMAT_WRAP_FORMAT_EXTRA_ARGS const char * fmt \ <nl> + ) funcDeclSuffix \ <nl> + { \ <nl> + bodyPrefix \ <nl> + tinyformat : : detail : : FormatIterator ( streamName , fmt ) . finish ( ) ; \ <nl> + bodySuffix \ <nl> + } \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 1 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 2 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 3 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 4 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 5 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 6 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 7 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 8 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 9 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 10 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 11 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 12 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 13 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 14 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 15 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + TINYFORMAT_WRAP_FORMAT_N ( 16 , returnType , funcName , funcDeclSuffix , bodyPrefix , streamName , bodySuffix ) \ <nl> + <nl> + <nl> + } / / namespace tinyformat <nl> + <nl> + # endif / / TINYFORMAT_H_INCLUDED <nl> mmm a / src / util . cpp <nl> ppp b / src / util . cpp <nl> static void DebugPrintInit ( ) <nl> mutexDebugLog = new boost : : mutex ( ) ; <nl> } <nl> <nl> - int LogPrint ( const char * category , const char * pszFormat , . . . ) <nl> + bool LogAcceptCategory ( const char * category ) <nl> { <nl> if ( category ! = NULL ) <nl> { <nl> if ( ! fDebug ) <nl> - return 0 ; <nl> + return false ; <nl> <nl> / / Give each thread quick access to - debug settings . <nl> / / This helps prevent issues debugging global destructors , <nl> int LogPrint ( const char * category , const char * pszFormat , . . . ) <nl> / / if not debugging everything and not debugging specific category , LogPrint does nothing . <nl> if ( setCategories . count ( string ( " " ) ) = = 0 & & <nl> setCategories . count ( string ( category ) ) = = 0 ) <nl> - return 0 ; <nl> + return false ; <nl> } <nl> + return true ; <nl> + } <nl> <nl> + int LogPrintStr ( const std : : string & str ) <nl> + { <nl> int ret = 0 ; / / Returns total number of characters written <nl> if ( fPrintToConsole ) <nl> { <nl> / / print to console <nl> - va_list arg_ptr ; <nl> - va_start ( arg_ptr , pszFormat ) ; <nl> - ret + = vprintf ( pszFormat , arg_ptr ) ; <nl> - va_end ( arg_ptr ) ; <nl> + ret = fwrite ( str . data ( ) , 1 , str . size ( ) , stdout ) ; <nl> } <nl> else if ( fPrintToDebugLog ) <nl> { <nl> int LogPrint ( const char * category , const char * pszFormat , . . . ) <nl> / / Debug print useful for profiling <nl> if ( fLogTimestamps & & fStartedNewLine ) <nl> ret + = fprintf ( fileout , " % s " , DateTimeStrFormat ( " % Y - % m - % d % H : % M : % S " , GetTime ( ) ) . c_str ( ) ) ; <nl> - if ( pszFormat [ strlen ( pszFormat ) - 1 ] = = ' \ n ' ) <nl> + if ( ! str . empty ( ) & & str [ str . size ( ) - 1 ] = = ' \ n ' ) <nl> fStartedNewLine = true ; <nl> else <nl> fStartedNewLine = false ; <nl> <nl> - va_list arg_ptr ; <nl> - va_start ( arg_ptr , pszFormat ) ; <nl> - ret + = vfprintf ( fileout , pszFormat , arg_ptr ) ; <nl> - va_end ( arg_ptr ) ; <nl> + ret = fwrite ( str . data ( ) , 1 , str . size ( ) , fileout ) ; <nl> } <nl> <nl> return ret ; <nl> } <nl> <nl> - string vstrprintf ( const char * format , va_list ap ) <nl> - { <nl> - char buffer [ 50000 ] ; <nl> - char * p = buffer ; <nl> - int limit = sizeof ( buffer ) ; <nl> - int ret ; <nl> - while ( true ) <nl> - { <nl> - va_list arg_ptr ; <nl> - va_copy ( arg_ptr , ap ) ; <nl> - ret = vsnprintf ( p , limit , format , arg_ptr ) ; <nl> - va_end ( arg_ptr ) ; <nl> - if ( ret > = 0 & & ret < limit ) <nl> - break ; <nl> - if ( p ! = buffer ) <nl> - delete [ ] p ; <nl> - limit * = 2 ; <nl> - p = new char [ limit ] ; <nl> - if ( p = = NULL ) <nl> - throw std : : bad_alloc ( ) ; <nl> - } <nl> - string str ( p , p + ret ) ; <nl> - if ( p ! = buffer ) <nl> - delete [ ] p ; <nl> - return str ; <nl> - } <nl> - <nl> - string real_strprintf ( const char * format , int dummy , . . . ) <nl> - { <nl> - va_list arg_ptr ; <nl> - va_start ( arg_ptr , dummy ) ; <nl> - string str = vstrprintf ( format , arg_ptr ) ; <nl> - va_end ( arg_ptr ) ; <nl> - return str ; <nl> - } <nl> - <nl> - string real_strprintf ( const std : : string & format , int dummy , . . . ) <nl> - { <nl> - va_list arg_ptr ; <nl> - va_start ( arg_ptr , dummy ) ; <nl> - string str = vstrprintf ( format . c_str ( ) , arg_ptr ) ; <nl> - va_end ( arg_ptr ) ; <nl> - return str ; <nl> - } <nl> - <nl> - bool error ( const char * format , . . . ) <nl> - { <nl> - va_list arg_ptr ; <nl> - va_start ( arg_ptr , format ) ; <nl> - std : : string str = vstrprintf ( format , arg_ptr ) ; <nl> - va_end ( arg_ptr ) ; <nl> - LogPrintf ( " ERROR : % s \ n " , str . c_str ( ) ) ; <nl> - return false ; <nl> - } <nl> - <nl> - <nl> void ParseString ( const string & str , char c , vector < string > & v ) <nl> { <nl> if ( str . empty ( ) ) <nl> mmm a / src / util . h <nl> ppp b / src / util . h <nl> <nl> <nl> # include " compat . h " <nl> # include " serialize . h " <nl> + # include " tinyformat . h " <nl> <nl> # include < cstdio > <nl> # include < exception > <nl> inline void MilliSleep ( int64_t n ) <nl> # endif <nl> } <nl> <nl> - / * This GNU C extension enables the compiler to check the format string against the parameters provided . <nl> - * X is the number of the " format string " parameter , and Y is the number of the first variadic parameter . <nl> - * Parameters count from 1 . <nl> - * / <nl> - # ifdef __GNUC__ <nl> - # define ATTR_WARN_PRINTF ( X , Y ) __attribute__ ( ( format ( gnu_printf , X , Y ) ) ) <nl> - # else <nl> - # define ATTR_WARN_PRINTF ( X , Y ) <nl> - # endif <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> <nl> <nl> extern std : : map < std : : string , std : : string > mapArgs ; <nl> extern volatile bool fReopenDebugLog ; <nl> void RandAddSeed ( ) ; <nl> void RandAddSeedPerfmon ( ) ; <nl> <nl> - / / Print to debug . log if - debug = category switch is given OR category is NULL . <nl> - int ATTR_WARN_PRINTF ( 2 , 3 ) LogPrint ( const char * category , const char * pszFormat , . . . ) ; <nl> + / * Return true if log accepts specified category * / <nl> + bool LogAcceptCategory ( const char * category ) ; <nl> + / * Send a string to the log output * / <nl> + int LogPrintStr ( const std : : string & str ) ; <nl> + <nl> + # define strprintf tfm : : format <nl> # define LogPrintf ( . . . ) LogPrint ( NULL , __VA_ARGS__ ) <nl> <nl> - / * <nl> - Rationale for the real_strprintf / strprintf construction : <nl> - It is not allowed to use va_start with a pass - by - reference argument . <nl> - ( C + + standard , 18 . 7 , paragraph 3 ) . Use a dummy argument to work around this , and use a <nl> - macro to keep similar semantics . <nl> - * / <nl> - <nl> - / * * Overload strprintf for char * , so that GCC format type warnings can be given * / <nl> - std : : string ATTR_WARN_PRINTF ( 1 , 3 ) real_strprintf ( const char * format , int dummy , . . . ) ; <nl> - / * * Overload strprintf for std : : string , to be able to use it with _ ( translation ) . <nl> - * This will not support GCC format type warnings ( - Wformat ) so be careful . <nl> + / * When we switch to C + + 11 , this can be switched to variadic templates instead <nl> + * of this macro - based construction ( see tinyformat . h ) . <nl> + * / <nl> + # define MAKE_ERROR_AND_LOG_FUNC ( n ) \ <nl> + / * Print to debug . log if - debug = category switch is given OR category is NULL . * / \ <nl> + template < TINYFORMAT_ARGTYPES ( n ) > \ <nl> + static inline int LogPrint ( const char * category , const char * format , TINYFORMAT_VARARGS ( n ) ) \ <nl> + { \ <nl> + if ( ! LogAcceptCategory ( category ) ) return 0 ; \ <nl> + return LogPrintStr ( tfm : : format ( format , TINYFORMAT_PASSARGS ( n ) ) ) ; \ <nl> + } \ <nl> + / * Log error and return false * / \ <nl> + template < TINYFORMAT_ARGTYPES ( n ) > \ <nl> + static inline bool error ( const char * format , TINYFORMAT_VARARGS ( n ) ) \ <nl> + { \ <nl> + LogPrintStr ( " ERROR : " + tfm : : format ( format , TINYFORMAT_PASSARGS ( n ) ) ) ; \ <nl> + return false ; \ <nl> + } <nl> + <nl> + TINYFORMAT_FOREACH_ARGNUM ( MAKE_ERROR_AND_LOG_FUNC ) <nl> + <nl> + / * Zero - arg versions of logging and error , these are not covered by <nl> + * TINYFORMAT_FOREACH_ARGNUM <nl> * / <nl> - std : : string real_strprintf ( const std : : string & format , int dummy , . . . ) ; <nl> - # define strprintf ( format , . . . ) real_strprintf ( format , 0 , __VA_ARGS__ ) <nl> - std : : string vstrprintf ( const char * format , va_list ap ) ; <nl> + static inline int LogPrint ( const char * category , const char * format ) <nl> + { <nl> + if ( ! LogAcceptCategory ( category ) ) return 0 ; <nl> + return LogPrintStr ( format ) ; <nl> + } <nl> + static inline bool error ( const char * format ) <nl> + { <nl> + LogPrintStr ( std : : string ( " ERROR : " ) + format ) ; <nl> + return false ; <nl> + } <nl> <nl> - bool ATTR_WARN_PRINTF ( 1 , 2 ) error ( const char * format , . . . ) ; <nl> <nl> void LogException ( std : : exception * pex , const char * pszThread ) ; <nl> void PrintException ( std : : exception * pex , const char * pszThread ) ; <nl>
Typesafe strprintf / error / LogPrint functions
bitcoin/bitcoin
b77dfdc9e36e308aa806d63aa3b5628971789d5a
2014-01-23T15:05:00Z
mmm a / src / mongo / dbtests / d_chunk_manager_tests . cpp <nl> ppp b / src / mongo / dbtests / d_chunk_manager_tests . cpp <nl> <nl> # include " mongo / s / cluster_constants . h " <nl> # include " mongo / s / d_chunk_manager . h " <nl> # include " mongo / s / type_chunk . h " <nl> + # include " mongo / s / type_collection . h " <nl> <nl> namespace { <nl> <nl> class BasicTests { <nl> public : <nl> void run ( ) { <nl> - BSONObj collection = BSON ( CollectionFields : : name ( " test . foo " ) < < <nl> - CollectionFields : : dropped ( false ) < < <nl> - CollectionFields : : key ( BSON ( " a " < < 1 ) ) < < <nl> - CollectionFields : : unique ( false ) ) ; <nl> + BSONObj collection = BSON ( CollectionType : : ns ( " test . foo " ) < < <nl> + CollectionType : : dropped ( false ) < < <nl> + CollectionType : : keyPattern ( BSON ( " a " < < 1 ) ) < < <nl> + CollectionType : : unique ( false ) ) ; <nl> <nl> / / single - chunk collection <nl> BSONArray chunks = BSON_ARRAY ( BSON ( ChunkType : : name ( " test . foo - a_MinKey " ) < < <nl> namespace { <nl> class BasicCompoundTests { <nl> public : <nl> void run ( ) { <nl> - BSONObj collection = BSON ( CollectionFields : : name ( " test . foo " ) < < <nl> - CollectionFields : : dropped ( false ) < < <nl> - CollectionFields : : key ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> - CollectionFields : : unique ( false ) ) ; <nl> + BSONObj collection = BSON ( CollectionType : : ns ( " test . foo " ) < < <nl> + CollectionType : : dropped ( false ) < < <nl> + CollectionType : : keyPattern ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> + CollectionType : : unique ( false ) ) ; <nl> <nl> / / single - chunk collection <nl> BSONArray chunks = BSON_ARRAY ( BSON ( ChunkType : : name ( " test . foo - a_MinKeyb_MinKey " ) < < <nl> namespace { <nl> class RangeTests { <nl> public : <nl> void run ( ) { <nl> - BSONObj collection = BSON ( CollectionFields : : name ( " x . y " ) < < <nl> - CollectionFields : : dropped ( false ) < < <nl> - CollectionFields : : key ( BSON ( " a " < < 1 ) ) < < <nl> - CollectionFields : : unique ( false ) ) ; <nl> + BSONObj collection = BSON ( CollectionType : : ns ( " x . y " ) < < <nl> + CollectionType : : dropped ( false ) < < <nl> + CollectionType : : keyPattern ( BSON ( " a " < < 1 ) ) < < <nl> + CollectionType : : unique ( false ) ) ; <nl> <nl> / / 3 - chunk collection , 2 of them being contiguous <nl> / / [ min - > 10 ) , [ 10 - > 20 ) , < gap > , [ 30 - > max ) <nl> namespace { <nl> class GetNextTests { <nl> public : <nl> void run ( ) { <nl> - BSONObj collection = BSON ( CollectionFields : : name ( " x . y " ) < < <nl> - CollectionFields : : dropped ( false ) < < <nl> - CollectionFields : : key ( BSON ( " a " < < 1 ) ) < < <nl> - CollectionFields : : unique ( false ) ) ; <nl> + BSONObj collection = BSON ( CollectionType : : ns ( " x . y " ) < < <nl> + CollectionType : : dropped ( false ) < < <nl> + CollectionType : : keyPattern ( BSON ( " a " < < 1 ) ) < < <nl> + CollectionType : : unique ( false ) ) ; <nl> <nl> / / empty collection <nl> BSONArray chunks1 = BSONArray ( ) ; <nl> namespace { <nl> class DeletedTests { <nl> public : <nl> void run ( ) { <nl> - BSONObj collection = BSON ( CollectionFields : : name ( " test . foo " ) < < <nl> - CollectionFields : : dropped ( true ) ) ; <nl> + BSONObj collection = BSON ( CollectionType : : ns ( " test . foo " ) < < <nl> + CollectionType : : dropped ( true ) ) ; <nl> <nl> BSONArray chunks = BSONArray ( ) ; <nl> <nl> namespace { <nl> class ClonePlusTests { <nl> public : <nl> void run ( ) { <nl> - BSONObj collection = BSON ( CollectionFields : : name ( " test . foo " ) < < <nl> - CollectionFields : : dropped ( false ) < < <nl> - CollectionFields : : key ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> - CollectionFields : : unique ( false ) ) ; <nl> + BSONObj collection = BSON ( CollectionType : : ns ( " test . foo " ) < < <nl> + CollectionType : : dropped ( false ) < < <nl> + CollectionType : : keyPattern ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> + CollectionType : : unique ( false ) ) ; <nl> <nl> / / 1 - chunk collection <nl> / / [ 10 , 0 - 20 , 0 ) <nl> namespace { <nl> class ClonePlusExceptionTests { <nl> public : <nl> void run ( ) { <nl> - BSONObj collection = BSON ( CollectionFields : : name ( " test . foo " ) < < <nl> - CollectionFields : : dropped ( false ) < < <nl> - CollectionFields : : key ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> - CollectionFields : : unique ( false ) ) ; <nl> + BSONObj collection = BSON ( CollectionType : : ns ( " test . foo " ) < < <nl> + CollectionType : : dropped ( false ) < < <nl> + CollectionType : : keyPattern ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> + CollectionType : : unique ( false ) ) ; <nl> <nl> / / 1 - chunk collection <nl> / / [ 10 , 0 - 20 , 0 ) <nl> namespace { <nl> class CloneMinusTests { <nl> public : <nl> void run ( ) { <nl> - BSONObj collection = BSON ( CollectionFields : : name ( " x . y " ) < < <nl> - CollectionFields : : dropped ( false ) < < <nl> - CollectionFields : : key ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> - CollectionFields : : unique ( false ) ) ; <nl> + BSONObj collection = BSON ( CollectionType : : ns ( " x . y " ) < < <nl> + CollectionType : : dropped ( false ) < < <nl> + CollectionType : : keyPattern ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> + CollectionType : : unique ( false ) ) ; <nl> <nl> / / 2 - chunk collection <nl> / / [ 10 , 0 - > 20 , 0 ) , < gap > , [ 30 , 0 - > 40 , 0 ) <nl> namespace { <nl> class CloneMinusExceptionTests { <nl> public : <nl> void run ( ) { <nl> - BSONObj collection = BSON ( CollectionFields : : name ( " x . y " ) < < <nl> - CollectionFields : : dropped ( false ) < < <nl> - CollectionFields : : key ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> - CollectionFields : : unique ( false ) ) ; <nl> + BSONObj collection = BSON ( CollectionType : : ns ( " x . y " ) < < <nl> + CollectionType : : dropped ( false ) < < <nl> + CollectionType : : keyPattern ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> + CollectionType : : unique ( false ) ) ; <nl> <nl> / / 2 - chunk collection <nl> / / [ 10 , 0 - > 20 , 0 ) , < gap > , [ 30 , 0 - > 40 , 0 ) <nl> namespace { <nl> class CloneSplitTests { <nl> public : <nl> void run ( ) { <nl> - BSONObj collection = BSON ( CollectionFields : : name ( " test . foo " ) < < <nl> - CollectionFields : : dropped ( false ) < < <nl> - CollectionFields : : key ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> - CollectionFields : : unique ( false ) ) ; <nl> + BSONObj collection = BSON ( CollectionType : : ns ( " test . foo " ) < < <nl> + CollectionType : : dropped ( false ) < < <nl> + CollectionType : : keyPattern ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> + CollectionType : : unique ( false ) ) ; <nl> <nl> / / 1 - chunk collection <nl> / / [ 10 , 0 - 20 , 0 ) <nl> namespace { <nl> class CloneSplitExceptionTests { <nl> public : <nl> void run ( ) { <nl> - BSONObj collection = BSON ( CollectionFields : : name ( " test . foo " ) < < <nl> - CollectionFields : : dropped ( false ) < < <nl> - CollectionFields : : key ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> - CollectionFields : : unique ( false ) ) ; <nl> + BSONObj collection = BSON ( CollectionType : : ns ( " test . foo " ) < < <nl> + CollectionType : : dropped ( false ) < < <nl> + CollectionType : : keyPattern ( BSON ( " a " < < 1 < < " b " < < 1 ) ) < < <nl> + CollectionType : : unique ( false ) ) ; <nl> <nl> / / 1 - chunk collection <nl> / / [ 10 , 0 - 20 , 0 ) <nl> namespace { <nl> class EmptyShardTests { <nl> public : <nl> void run ( ) { <nl> - BSONObj collection = BSON ( CollectionFields : : name ( " test . foo " ) < < <nl> - CollectionFields : : dropped ( false ) < < <nl> - CollectionFields : : key ( BSON ( " a " < < 1 ) ) < < <nl> - CollectionFields : : unique ( false ) ) ; <nl> + BSONObj collection = BSON ( CollectionType : : ns ( " test . foo " ) < < <nl> + CollectionType : : dropped ( false ) < < <nl> + CollectionType : : keyPattern ( BSON ( " a " < < 1 ) ) < < <nl> + CollectionType : : unique ( false ) ) ; <nl> <nl> / / no chunks on this shard <nl> BSONArray chunks ; <nl> namespace { <nl> class LastChunkTests { <nl> public : <nl> void run ( ) { <nl> - BSONObj collection = BSON ( CollectionFields : : name ( " test . foo " ) < < <nl> - CollectionFields : : dropped ( false ) < < <nl> - CollectionFields : : key ( BSON ( " a " < < 1 ) ) < < <nl> - CollectionFields : : unique ( false ) ) ; <nl> + BSONObj collection = BSON ( CollectionType : : ns ( " test . foo " ) < < <nl> + CollectionType : : dropped ( false ) < < <nl> + CollectionType : : keyPattern ( BSON ( " a " < < 1 ) ) < < <nl> + CollectionType : : unique ( false ) ) ; <nl> <nl> / / 1 - chunk collection <nl> / / [ 10 - > 20 ) <nl> mmm a / src / mongo / s / balance . cpp <nl> ppp b / src / mongo / s / balance . cpp <nl> <nl> # include " mongo / s / server . h " <nl> # include " mongo / s / shard . h " <nl> # include " mongo / s / type_chunk . h " <nl> + # include " mongo / s / type_collection . h " <nl> <nl> namespace mongo { <nl> <nl> namespace mongo { <nl> / / the ShardsNS : : collections collection <nl> / / <nl> <nl> - auto_ptr < DBClientCursor > cursor = conn . query ( ConfigNS : : collection , BSONObj ( ) ) ; <nl> + auto_ptr < DBClientCursor > cursor = conn . query ( CollectionType : : ConfigNS , BSONObj ( ) ) ; <nl> vector < string > collections ; <nl> while ( cursor - > more ( ) ) { <nl> BSONObj col = cursor - > nextSafe ( ) ; <nl> <nl> / / sharded collections will have a shard " key " . <nl> - if ( ! col [ CollectionFields : : key ( ) ] . eoo ( ) & & <nl> - ! col [ CollectionFields : : noBalance ( ) ] . trueValue ( ) ) { <nl> - collections . push_back ( col [ CollectionFields : : name ( ) ] . String ( ) ) ; <nl> + if ( ! col [ CollectionType : : keyPattern ( ) ] . eoo ( ) & & <nl> + ! col [ CollectionType : : noBalance ( ) ] . trueValue ( ) ) { <nl> + collections . push_back ( col [ CollectionType : : ns ( ) ] . String ( ) ) ; <nl> } <nl> - else if ( col [ CollectionFields : : noBalance ( ) ] . trueValue ( ) ) { <nl> - LOG ( 1 ) < < " not balancing collection " < < col [ CollectionFields : : name ( ) ] . String ( ) <nl> + else if ( col [ CollectionType : : noBalance ( ) ] . trueValue ( ) ) { <nl> + LOG ( 1 ) < < " not balancing collection " < < col [ CollectionType : : ns ( ) ] . String ( ) <nl> < < " , explicitly disabled " < < endl ; <nl> } <nl> <nl> mmm a / src / mongo / s / chunk . cpp <nl> ppp b / src / mongo / s / chunk . cpp <nl> <nl> <nl> # include " pch . h " <nl> <nl> + # include " mongo / s / chunk . h " <nl> + <nl> # include " mongo / client / connpool . h " <nl> # include " mongo / client / dbclientcursor . h " <nl> # include " mongo / db / queryutil . h " <nl> # include " mongo / platform / random . h " <nl> - # include " mongo / s / chunk . h " <nl> # include " mongo / s / chunk_diff . h " <nl> # include " mongo / s / client_info . h " <nl> # include " mongo / s / config . h " <nl> # include " mongo / s / cursors . h " <nl> # include " mongo / s / grid . h " <nl> - # include " mongo / util / concurrency / ticketholder . h " <nl> # include " mongo / s / strategy . h " <nl> + # include " mongo / s / type_collection . h " <nl> + # include " mongo / util / concurrency / ticketholder . h " <nl> # include " mongo / util / startup_test . h " <nl> # include " mongo / util / timer . h " <nl> <nl> namespace mongo { <nl> ChunkManager : : ChunkManager ( const BSONObj & collDoc ) : <nl> / / Need the ns early , to construct the lock <nl> / / TODO : Construct lock on demand ? Not sure why we need to keep it around <nl> - _ns ( collDoc [ CollectionFields : : name ( ) ] . type ( ) = = String ? <nl> - collDoc [ CollectionFields : : name ( ) ] . String ( ) : <nl> + _ns ( collDoc [ CollectionType : : ns ( ) ] . type ( ) = = String ? <nl> + collDoc [ CollectionType : : ns ( ) ] . String ( ) : <nl> " " ) , <nl> - _key ( collDoc [ CollectionFields : : key ( ) ] . type ( ) = = Object ? <nl> - collDoc [ CollectionFields : : key ( ) ] . Obj ( ) . getOwned ( ) : <nl> + _key ( collDoc [ CollectionType : : keyPattern ( ) ] . type ( ) = = Object ? <nl> + collDoc [ CollectionType : : keyPattern ( ) ] . Obj ( ) . getOwned ( ) : <nl> BSONObj ( ) ) , <nl> - _unique ( collDoc [ CollectionFields : : unique ( ) ] . trueValue ( ) ) , <nl> + _unique ( collDoc [ CollectionType : : unique ( ) ] . trueValue ( ) ) , <nl> _chunkRanges ( ) , <nl> _mutex ( " ChunkManager " ) , <nl> / / The shard versioning mechanism hinges on keeping track of the number of times we reloaded ChunkManager ' s . <nl> namespace mongo { <nl> return _version ; <nl> } <nl> <nl> + void ChunkManager : : getInfo ( BSONObjBuilder & b ) const { <nl> + b . append ( CollectionType : : keyPattern ( ) , _key . key ( ) ) ; <nl> + b . appendBool ( CollectionType : : unique ( ) , _unique ) ; <nl> + _version . addEpochToBSON ( b , CollectionType : : DEPRECATED_lastmod ( ) ) ; <nl> + } <nl> + <nl> string ChunkManager : : toString ( ) const { <nl> stringstream ss ; <nl> ss < < " ChunkManager : " < < _ns < < " key : " < < _key . toString ( ) < < ' \ n ' ; <nl> mmm a / src / mongo / s / chunk . h <nl> ppp b / src / mongo / s / chunk . h <nl> namespace mongo { <nl> ShardChunkVersion getVersion ( const Shard & shard ) const ; <nl> ShardChunkVersion getVersion ( ) const ; <nl> <nl> - void getInfo ( BSONObjBuilder & b ) const { <nl> - b . append ( CollectionFields : : key ( ) , _key . key ( ) ) ; <nl> - b . appendBool ( CollectionFields : : unique ( ) , _unique ) ; <nl> - _version . addEpochToBSON ( b , CollectionFields : : lastmod ( ) ) ; <nl> - } <nl> + void getInfo ( BSONObjBuilder & b ) const ; <nl> <nl> / * * <nl> * @ param me - so i don ' t get deleted before i ' m done <nl> mmm a / src / mongo / s / cluster_constants . cpp <nl> ppp b / src / mongo / s / cluster_constants . cpp <nl> namespace mongo { <nl> BSONField < bool > DatabaseFields : : NEW_draining ( " draining " ) ; <nl> BSONField < bool > DatabaseFields : : NEW_scatterCollections ( " scatterCollections " ) ; <nl> <nl> - const string ConfigNS : : collection = " config . collections " ; <nl> - BSONField < string > CollectionFields : : name ( " _id " ) ; <nl> - BSONField < string > CollectionFields : : shard ( " shard " ) ; <nl> - BSONField < BSONObj > CollectionFields : : key ( " key " ) ; <nl> - BSONField < bool > CollectionFields : : unique ( " unique " ) ; <nl> - BSONField < Date_t > CollectionFields : : lastmod ( " lastmod " ) ; <nl> - BSONField < bool > CollectionFields : : dropped ( " dropped " ) ; <nl> - BSONField < bool > CollectionFields : : noBalance ( " noBalance " ) ; <nl> - BSONField < OID > CollectionFields : : epoch ( " lastmodEpoch " ) ; <nl> - <nl> const string ConfigNS : : tag = " config . tags " ; <nl> BSONField < string > TagFields : : ns ( " ns " ) ; <nl> BSONField < string > TagFields : : tag ( " tag " ) ; <nl> mmm a / src / mongo / s / cluster_constants . h <nl> ppp b / src / mongo / s / cluster_constants . h <nl> namespace mongo { <nl> * ConfigNS holds the names for all the metadata collections stored in a config server . <nl> * / <nl> struct ConfigNS { <nl> - static const string shard ; <nl> static const string database ; <nl> - static const string collection ; <nl> - static const string chunk ; <nl> static const string tag ; <nl> static const string mongos ; <nl> static const string settings ; <nl> namespace mongo { <nl> static BSONField < bool > NEW_scatterCollections ; / / distribute collection among shards <nl> } ; <nl> <nl> - / * * <nl> - * CollectionFields holds all the field names and types for the collections collection . <nl> - * / <nl> - struct CollectionFields { <nl> - static BSONField < string > name ; / / collection ' s name <nl> - static BSONField < string > shard ; / / primary , if not sharded <nl> - static BSONField < BSONObj > key ; / / sharding key , if sharded <nl> - static BSONField < bool > unique ; / / sharding key unique ? <nl> - static BSONField < Date_t > lastmod ; / / when collecation was created <nl> - static BSONField < bool > dropped ; / / logical deletion <nl> - static BSONField < bool > noBalance ; / / true if balancing is disabled <nl> - static BSONField < OID > epoch ; / / Epoch of collection <nl> - } ; <nl> - <nl> / * * <nl> * TagFields holds all the field names and types for the tags collection . <nl> * / <nl> mmm a / src / mongo / s / config . cpp <nl> ppp b / src / mongo / s / config . cpp <nl> <nl> # include " mongo / s / grid . h " <nl> # include " mongo / s / server . h " <nl> # include " mongo / s / type_chunk . h " <nl> + # include " mongo / s / type_collection . h " <nl> # include " mongo / s / type_shard . h " <nl> # include " mongo / util / net / message . h " <nl> # include " mongo / util / stringutils . h " <nl> namespace mongo { <nl> <nl> DBConfig : : CollectionInfo : : CollectionInfo ( const BSONObj & in ) { <nl> _dirty = false ; <nl> - _dropped = in [ CollectionFields : : dropped ( ) ] . trueValue ( ) ; <nl> + _dropped = in [ CollectionType : : dropped ( ) ] . trueValue ( ) ; <nl> <nl> - if ( in [ CollectionFields : : key ( ) ] . isABSONObj ( ) ) { <nl> + if ( in [ CollectionType : : keyPattern ( ) ] . isABSONObj ( ) ) { <nl> shard ( new ChunkManager ( in ) ) ; <nl> } <nl> <nl> namespace mongo { <nl> BSONObj key = BSON ( " _id " < < ns ) ; <nl> <nl> BSONObjBuilder val ; <nl> - val . append ( CollectionFields : : name ( ) , ns ) ; <nl> - val . appendDate ( CollectionFields : : lastmod ( ) , time ( 0 ) ) ; <nl> - val . appendBool ( CollectionFields : : dropped ( ) , _dropped ) ; <nl> + val . append ( CollectionType : : ns ( ) , ns ) ; <nl> + val . appendDate ( CollectionType : : DEPRECATED_lastmod ( ) , time ( 0 ) ) ; <nl> + val . appendBool ( CollectionType : : dropped ( ) , _dropped ) ; <nl> if ( _cm ) <nl> _cm - > getInfo ( val ) ; <nl> <nl> - conn - > update ( ConfigNS : : collection , key , val . obj ( ) , true ) ; <nl> + conn - > update ( CollectionType : : ConfigNS , key , val . obj ( ) , true ) ; <nl> string err = conn - > getLastError ( ) ; <nl> uassert ( 13473 , ( string ) " failed to save collection ( " + ns + " ) : " + err , err . size ( ) = = 0 ) ; <nl> <nl> namespace mongo { <nl> unserialize ( dbObj ) ; <nl> <nl> BSONObjBuilder b ; <nl> - b . appendRegex ( CollectionFields : : name ( ) , <nl> + b . appendRegex ( CollectionType : : ns ( ) , <nl> ( string ) " ^ " + pcrecpp : : RE : : QuoteMeta ( _name ) + " \ \ . " ) ; <nl> <nl> int numCollsErased = 0 ; <nl> int numCollsSharded = 0 ; <nl> <nl> - auto_ptr < DBClientCursor > cursor = conn - > get ( ) - > query ( ConfigNS : : collection , b . obj ( ) ) ; <nl> + auto_ptr < DBClientCursor > cursor = conn - > get ( ) - > query ( CollectionType : : ConfigNS , b . obj ( ) ) ; <nl> verify ( cursor . get ( ) ) ; <nl> while ( cursor - > more ( ) ) { <nl> <nl> BSONObj collObj = cursor - > next ( ) ; <nl> - string collName = collObj [ CollectionFields : : name ( ) ] . String ( ) ; <nl> + string collName = collObj [ CollectionType : : ns ( ) ] . String ( ) ; <nl> <nl> - if ( collObj [ CollectionFields : : dropped ( ) ] . trueValue ( ) ) { <nl> + if ( collObj [ CollectionType : : dropped ( ) ] . trueValue ( ) ) { <nl> _collections . erase ( collName ) ; <nl> numCollsErased + + ; <nl> } <nl> mmm a / src / mongo / s / d_chunk_manager . cpp <nl> ppp b / src / mongo / s / d_chunk_manager . cpp <nl> <nl> # include " mongo / db / instance . h " <nl> # include " mongo / s / chunk_diff . h " <nl> # include " mongo / s / type_chunk . h " <nl> + # include " mongo / s / type_collection . h " <nl> <nl> namespace mongo { <nl> <nl> namespace mongo { <nl> } <nl> <nl> / / get this collection ' s sharding key <nl> - BSONObj collectionDoc = conn - > findOne ( ConfigNS : : collection , BSON ( CollectionFields : : name ( ns ) ) ) ; <nl> + BSONObj collectionDoc = conn - > findOne ( CollectionType : : ConfigNS , BSON ( CollectionType : : ns ( ns ) ) ) ; <nl> <nl> if ( collectionDoc . isEmpty ( ) ) { <nl> warning ( ) < < ns < < " does not exist as a sharded collection " < < endl ; <nl> return ; <nl> } <nl> <nl> - if ( collectionDoc [ CollectionFields : : dropped ( ) ] . Bool ( ) ) { <nl> + if ( collectionDoc [ CollectionType : : dropped ( ) ] . Bool ( ) ) { <nl> warning ( ) < < ns < < " was dropped . Re - shard collection first . " < < endl ; <nl> return ; <nl> } <nl> mmm a / src / mongo / s / grid . cpp <nl> ppp b / src / mongo / s / grid . cpp <nl> <nl> # include " mongo / s / cluster_constants . h " <nl> # include " mongo / s / grid . h " <nl> # include " mongo / s / shard . h " <nl> + # include " mongo / s / type_collection . h " <nl> # include " mongo / s / type_shard . h " <nl> # include " mongo / util / startup_test . h " <nl> # include " mongo / util / stringutils . h " <nl> namespace mongo { <nl> / / look for the stop balancer marker <nl> balancerDoc = conn - > get ( ) - > findOne ( ConfigNS : : settings , <nl> BSON ( SettingsFields : : key ( " balancer " ) ) ) ; <nl> - if ( ns . size ( ) > 0 ) collDoc = conn - > get ( ) - > findOne ( ConfigNS : : collection , <nl> - BSON ( CollectionFields : : name ( ns ) ) ) ; <nl> + if ( ns . size ( ) > 0 ) collDoc = conn - > get ( ) - > findOne ( CollectionType : : ConfigNS , <nl> + BSON ( CollectionType : : ns ( ns ) ) ) ; <nl> conn - > done ( ) ; <nl> } <nl> catch ( DBException & e ) { <nl>
SERVER - 939 Changed CollectionFields to CollectionType
mongodb/mongo
9c07a059fedfe534c890a59d13e0fc6536fd8ee8
2012-12-18T16:39:20Z
mmm a / src / net . cpp <nl> ppp b / src / net . cpp <nl> void CConnman : : ThreadSocketHandler ( ) <nl> <nl> void CConnman : : WakeMessageHandler ( ) <nl> { <nl> + { <nl> + std : : lock_guard < std : : mutex > lock ( mutexMsgProc ) ; <nl> + fMsgProcWake = true ; <nl> + } <nl> condMsgProc . notify_one ( ) ; <nl> } <nl> <nl> void CConnman : : ThreadMessageHandler ( ) <nl> } <nl> } <nl> <nl> - bool fSleep = true ; <nl> + bool fMoreWork = false ; <nl> <nl> BOOST_FOREACH ( CNode * pnode , vNodesCopy ) <nl> { <nl> void CConnman : : ThreadMessageHandler ( ) <nl> TRY_LOCK ( pnode - > cs_vRecvMsg , lockRecv ) ; <nl> if ( lockRecv ) <nl> { <nl> - if ( ! GetNodeSignals ( ) . ProcessMessages ( pnode , * this , flagInterruptMsgProc ) ) <nl> - pnode - > CloseSocketDisconnect ( ) ; <nl> - <nl> - if ( pnode - > nSendSize < GetSendBufferSize ( ) ) <nl> - { <nl> - if ( ! pnode - > vRecvGetData . empty ( ) | | ( ! pnode - > vRecvMsg . empty ( ) & & pnode - > vRecvMsg . front ( ) . complete ( ) ) ) <nl> - { <nl> - fSleep = false ; <nl> - } <nl> - } <nl> + bool fMoreNodeWork = GetNodeSignals ( ) . ProcessMessages ( pnode , * this , flagInterruptMsgProc ) ; <nl> + fMoreWork | = ( fMoreNodeWork & & pnode - > nSendSize < GetSendBufferSize ( ) ) ; <nl> } <nl> } <nl> if ( flagInterruptMsgProc ) <nl> void CConnman : : ThreadMessageHandler ( ) <nl> pnode - > Release ( ) ; <nl> } <nl> <nl> - if ( fSleep ) { <nl> - std : : unique_lock < std : : mutex > lock ( mutexMsgProc ) ; <nl> - condMsgProc . wait_until ( lock , std : : chrono : : steady_clock : : now ( ) + std : : chrono : : milliseconds ( 100 ) ) ; <nl> + std : : unique_lock < std : : mutex > lock ( mutexMsgProc ) ; <nl> + if ( ! fMoreWork ) { <nl> + condMsgProc . wait_until ( lock , std : : chrono : : steady_clock : : now ( ) + std : : chrono : : milliseconds ( 100 ) , [ this ] { return fMsgProcWake ; } ) ; <nl> } <nl> + fMsgProcWake = false ; <nl> } <nl> } <nl> <nl> bool CConnman : : Start ( CScheduler & scheduler , std : : string & strNodeError , Options c <nl> interruptNet . reset ( ) ; <nl> flagInterruptMsgProc = false ; <nl> <nl> + { <nl> + std : : unique_lock < std : : mutex > lock ( mutexMsgProc ) ; <nl> + fMsgProcWake = false ; <nl> + } <nl> + <nl> / / Send and receive from sockets , accept connections <nl> threadSocketHandler = std : : thread ( & TraceThread < std : : function < void ( ) > > , " net " , std : : function < void ( ) > ( std : : bind ( & CConnman : : ThreadSocketHandler , this ) ) ) ; <nl> <nl> mmm a / src / net . h <nl> ppp b / src / net . h <nl> class CConnman <nl> / * * SipHasher seeds for deterministic randomness * / <nl> const uint64_t nSeed0 , nSeed1 ; <nl> <nl> + / * * flag for waking the message processor . * / <nl> + bool fMsgProcWake ; <nl> + <nl> std : : condition_variable condMsgProc ; <nl> std : : mutex mutexMsgProc ; <nl> std : : atomic < bool > flagInterruptMsgProc ; <nl> mmm a / src / net_processing . cpp <nl> ppp b / src / net_processing . cpp <nl> bool ProcessMessages ( CNode * pfrom , CConnman & connman , std : : atomic < bool > & interru <nl> / / ( 4 ) checksum <nl> / / ( x ) data <nl> / / <nl> - bool fOk = true ; <nl> + bool fMoreWork = false ; <nl> <nl> if ( ! pfrom - > vRecvGetData . empty ( ) ) <nl> ProcessGetData ( pfrom , chainparams . GetConsensus ( ) , connman , interruptMsgProc ) ; <nl> <nl> + if ( pfrom - > fDisconnect ) <nl> + return false ; <nl> + <nl> / / this maintains the order of responses <nl> - if ( ! pfrom - > vRecvGetData . empty ( ) ) return fOk ; <nl> + if ( ! pfrom - > vRecvGetData . empty ( ) ) return true ; <nl> <nl> - auto it = pfrom - > vRecvMsg . begin ( ) ; <nl> - while ( ! pfrom - > fDisconnect & & it ! = pfrom - > vRecvMsg . end ( ) ) { <nl> / / Don ' t bother if send buffer is too full to respond anyway <nl> if ( pfrom - > nSendSize > = nMaxSendBufferSize ) <nl> - break ; <nl> + return false ; <nl> <nl> - / / get next message <nl> - CNetMessage & msg = * it ; <nl> + auto it = pfrom - > vRecvMsg . begin ( ) ; <nl> + if ( it = = pfrom - > vRecvMsg . end ( ) ) <nl> + return false ; <nl> <nl> / / end , if an incomplete message is found <nl> - if ( ! msg . complete ( ) ) <nl> - break ; <nl> + if ( ! it - > complete ( ) ) <nl> + return false ; <nl> + <nl> + / / get next message <nl> + CNetMessage msg = std : : move ( * it ) ; <nl> <nl> / / at this point , any failure means we can delete the current message <nl> - it + + ; <nl> + pfrom - > vRecvMsg . erase ( pfrom - > vRecvMsg . begin ( ) ) ; <nl> + <nl> + fMoreWork = ! pfrom - > vRecvMsg . empty ( ) & & pfrom - > vRecvMsg . front ( ) . complete ( ) ; <nl> <nl> msg . SetVersion ( pfrom - > GetRecvVersion ( ) ) ; <nl> / / Scan for message start <nl> if ( memcmp ( msg . hdr . pchMessageStart , chainparams . MessageStart ( ) , CMessageHeader : : MESSAGE_START_SIZE ) ! = 0 ) { <nl> LogPrintf ( " PROCESSMESSAGE : INVALID MESSAGESTART % s peer = % d \ n " , SanitizeString ( msg . hdr . GetCommand ( ) ) , pfrom - > id ) ; <nl> - fOk = false ; <nl> - break ; <nl> + pfrom - > fDisconnect = true ; <nl> + return false ; <nl> } <nl> <nl> / / Read header <nl> bool ProcessMessages ( CNode * pfrom , CConnman & connman , std : : atomic < bool > & interru <nl> if ( ! hdr . IsValid ( chainparams . MessageStart ( ) ) ) <nl> { <nl> LogPrintf ( " PROCESSMESSAGE : ERRORS IN HEADER % s peer = % d \ n " , SanitizeString ( hdr . GetCommand ( ) ) , pfrom - > id ) ; <nl> - continue ; <nl> + return fMoreWork ; <nl> } <nl> string strCommand = hdr . GetCommand ( ) ; <nl> <nl> bool ProcessMessages ( CNode * pfrom , CConnman & connman , std : : atomic < bool > & interru <nl> SanitizeString ( strCommand ) , nMessageSize , <nl> HexStr ( hash . begin ( ) , hash . begin ( ) + CMessageHeader : : CHECKSUM_SIZE ) , <nl> HexStr ( hdr . pchChecksum , hdr . pchChecksum + CMessageHeader : : CHECKSUM_SIZE ) ) ; <nl> - continue ; <nl> + return fMoreWork ; <nl> } <nl> <nl> / / Process message <nl> bool ProcessMessages ( CNode * pfrom , CConnman & connman , std : : atomic < bool > & interru <nl> { <nl> fRet = ProcessMessage ( pfrom , strCommand , vRecv , msg . nTime , chainparams , connman , interruptMsgProc ) ; <nl> if ( interruptMsgProc ) <nl> - return true ; <nl> + return false ; <nl> + if ( ! pfrom - > vRecvGetData . empty ( ) ) <nl> + fMoreWork = true ; <nl> } <nl> catch ( const std : : ios_base : : failure & e ) <nl> { <nl> bool ProcessMessages ( CNode * pfrom , CConnman & connman , std : : atomic < bool > & interru <nl> if ( ! fRet ) <nl> LogPrintf ( " % s ( % s , % u bytes ) FAILED peer = % d \ n " , __func__ , SanitizeString ( strCommand ) , nMessageSize , pfrom - > id ) ; <nl> <nl> - break ; <nl> - } <nl> - <nl> - / / In case the connection got shut down , its receive buffer was wiped <nl> - if ( ! pfrom - > fDisconnect ) <nl> - pfrom - > vRecvMsg . erase ( pfrom - > vRecvMsg . begin ( ) , it ) ; <nl> - <nl> - return fOk ; <nl> + return fMoreWork ; <nl> } <nl> <nl> class CompareInvMempoolOrder <nl> mmm a / src / net_processing . h <nl> ppp b / src / net_processing . h <nl> bool ProcessMessages ( CNode * pfrom , CConnman & connman , std : : atomic < bool > & interru <nl> * @ param [ in ] pto The node which we are sending messages to . <nl> * @ param [ in ] connman The connection manager for that node . <nl> * @ param [ in ] interrupt Interrupt condition for processing threads <nl> + * @ return True if there is more work to be done <nl> * / <nl> bool SendMessages ( CNode * pto , CConnman & connman , std : : atomic < bool > & interrupt ) ; <nl> <nl>
net : rework the way that the messagehandler sleeps
bitcoin/bitcoin
c5a8b1b946b1ab0bb82bd4270b2a40f5731abcff
2017-01-13T04:05:24Z