diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / modules / planning / planner / lattice / lattice_planner . cc <nl> ppp b / modules / planning / planner / lattice / lattice_planner . cc <nl> void ComputeInitFrenetState ( const PathPoint & matched_point , <nl> Status LatticePlanner : : Plan ( const TrajectoryPoint & planning_start_point , <nl> Frame * frame ) { <nl> std : : size_t success_line_count = 0 ; <nl> - double priority_cost = 0 . 0 ; <nl> std : : size_t index = 0 ; <nl> for ( auto & reference_line_info : frame - > reference_line_info ( ) ) { <nl> - reference_line_info . SetPriorityCost ( priority_cost ) ; <nl> + if ( index ! = 0 ) { <nl> + reference_line_info . SetPriorityCost ( FLAGS_priority_cost_gap ) ; <nl> + } else { <nl> + reference_line_info . SetPriorityCost ( 0 . 0 ) ; <nl> + } <nl> auto status = PlanOnReferenceLine ( planning_start_point , <nl> frame , & reference_line_info ) ; <nl> <nl> Status LatticePlanner : : Plan ( const TrajectoryPoint & planning_start_point , <nl> } else { <nl> success_line_count + = 1 ; <nl> } <nl> - priority_cost + = FLAGS_priority_cost_gap * ( + + index ) ; <nl> + + + index ; <nl> } <nl> <nl> if ( success_line_count > 0 ) { <nl> | Planning : fix a bug on reference_line priority cost | ApolloAuto/apollo | fa4f86a0baa827c65e9fbac34fe4ca300993637c | 2018-02-27T04:02:24Z |
mmm a / src / AggregateFunctions / AggregateFunctionCount . cpp <nl> ppp b / src / AggregateFunctions / AggregateFunctionCount . cpp <nl> namespace DB <nl> { <nl> <nl> AggregateFunctionPtr AggregateFunctionCount : : getOwnNullAdapter ( <nl> - const AggregateFunctionPtr & , const DataTypes & types , const Array & params ) const <nl> + const AggregateFunctionPtr & , const DataTypes & types , const Array & params , const AggregateFunctionProperties & / * properties * / ) const <nl> { <nl> return std : : make_shared < AggregateFunctionCountNotNullUnary > ( types [ 0 ] , params ) ; <nl> } <nl> mmm a / src / AggregateFunctions / AggregateFunctionCount . h <nl> ppp b / src / AggregateFunctions / AggregateFunctionCount . h <nl> class AggregateFunctionCount final : public IAggregateFunctionDataHelper < Aggrega <nl> } <nl> <nl> AggregateFunctionPtr getOwnNullAdapter ( <nl> - const AggregateFunctionPtr & , const DataTypes & types , const Array & params ) const override ; <nl> + const AggregateFunctionPtr & , const DataTypes & types , const Array & params , const AggregateFunctionProperties & / * properties * / ) const override ; <nl> } ; <nl> <nl> <nl> mmm a / src / AggregateFunctions / AggregateFunctionIf . cpp <nl> ppp b / src / AggregateFunctions / AggregateFunctionIf . cpp <nl> <nl> # include < AggregateFunctions / AggregateFunctionIf . h > <nl> # include < AggregateFunctions / AggregateFunctionCombinatorFactory . h > <nl> # include " registerAggregateFunctions . h " <nl> + # include " AggregateFunctionNull . h " <nl> <nl> <nl> namespace DB <nl> namespace DB <nl> <nl> namespace ErrorCodes <nl> { <nl> + extern const int LOGICAL_ERROR ; <nl> extern const int ILLEGAL_TYPE_OF_ARGUMENT ; <nl> extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH ; <nl> } <nl> class AggregateFunctionCombinatorIf final : public IAggregateFunctionCombinator <nl> } <nl> } ; <nl> <nl> + / * * There are two cases : for single argument and variadic . <nl> + * Code for single argument is much more efficient . <nl> + * / <nl> + template < bool result_is_nullable , bool serialize_flag > <nl> + class AggregateFunctionIfNullUnary final <nl> + : public AggregateFunctionNullBase < result_is_nullable , serialize_flag , <nl> + AggregateFunctionIfNullUnary < result_is_nullable , serialize_flag > > <nl> + { <nl> + private : <nl> + size_t num_arguments ; <nl> + <nl> + using Base = AggregateFunctionNullBase < result_is_nullable , serialize_flag , <nl> + AggregateFunctionIfNullUnary < result_is_nullable , serialize_flag > > ; <nl> + public : <nl> + <nl> + String getName ( ) const override <nl> + { <nl> + return Base : : getName ( ) + " If " ; <nl> + } <nl> + <nl> + AggregateFunctionIfNullUnary ( AggregateFunctionPtr nested_function_ , const DataTypes & arguments , const Array & params ) <nl> + : Base ( std : : move ( nested_function_ ) , arguments , params ) , num_arguments ( arguments . size ( ) ) <nl> + { <nl> + if ( num_arguments = = 0 ) <nl> + throw Exception ( " Aggregate function " + getName ( ) + " require at least one argument " , <nl> + ErrorCodes : : NUMBER_OF_ARGUMENTS_DOESNT_MATCH ) ; <nl> + } <nl> + <nl> + static inline bool singleFilter ( const IColumn * * columns , size_t row_num , size_t num_arguments ) <nl> + { <nl> + const IColumn * filter_column = columns [ num_arguments - 1 ] ; <nl> + if ( const ColumnNullable * nullable_column = typeid_cast < const ColumnNullable * > ( filter_column ) ) <nl> + filter_column = nullable_column - > getNestedColumnPtr ( ) . get ( ) ; <nl> + <nl> + return assert_cast < const ColumnUInt8 & > ( * filter_column ) . getData ( ) [ row_num ] ; <nl> + } <nl> + <nl> + void add ( AggregateDataPtr place , const IColumn * * columns , size_t row_num , Arena * arena ) const override <nl> + { <nl> + const ColumnNullable * column = assert_cast < const ColumnNullable * > ( columns [ 0 ] ) ; <nl> + const IColumn * nested_column = & column - > getNestedColumn ( ) ; <nl> + if ( ! column - > isNullAt ( row_num ) & & singleFilter ( columns , row_num , num_arguments ) ) <nl> + { <nl> + this - > setFlag ( place ) ; <nl> + this - > nested_function - > add ( this - > nestedPlace ( place ) , & nested_column , row_num , arena ) ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + template < bool result_is_nullable , bool serialize_flag , bool null_is_skipped > <nl> + class AggregateFunctionIfNullVariadic final <nl> + : public AggregateFunctionNullBase < result_is_nullable , serialize_flag , <nl> + AggregateFunctionIfNullVariadic < result_is_nullable , serialize_flag , null_is_skipped > > <nl> + { <nl> + public : <nl> + <nl> + String getName ( ) const override <nl> + { <nl> + return Base : : getName ( ) + " If " ; <nl> + } <nl> + <nl> + AggregateFunctionIfNullVariadic ( AggregateFunctionPtr nested_function_ , const DataTypes & arguments , const Array & params ) <nl> + : Base ( std : : move ( nested_function_ ) , arguments , params ) , number_of_arguments ( arguments . size ( ) ) <nl> + { <nl> + if ( number_of_arguments = = 1 ) <nl> + throw Exception ( " Logical error : single argument is passed to AggregateFunctionIfNullVariadic " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + if ( number_of_arguments > MAX_ARGS ) <nl> + throw Exception ( " Maximum number of arguments for aggregate function with Nullable types is " + toString ( size_t ( MAX_ARGS ) ) , <nl> + ErrorCodes : : NUMBER_OF_ARGUMENTS_DOESNT_MATCH ) ; <nl> + <nl> + for ( size_t i = 0 ; i < number_of_arguments ; + + i ) <nl> + is_nullable [ i ] = arguments [ i ] - > isNullable ( ) ; <nl> + } <nl> + <nl> + static inline bool singleFilter ( const IColumn * * columns , size_t row_num , size_t num_arguments ) <nl> + { <nl> + return assert_cast < const ColumnUInt8 & > ( * columns [ num_arguments - 1 ] ) . getData ( ) [ row_num ] ; <nl> + } <nl> + <nl> + void add ( AggregateDataPtr place , const IColumn * * columns , size_t row_num , Arena * arena ) const override <nl> + { <nl> + / / / This container stores the columns we really pass to the nested function . <nl> + const IColumn * nested_columns [ number_of_arguments ] ; <nl> + <nl> + for ( size_t i = 0 ; i < number_of_arguments ; + + i ) <nl> + { <nl> + if ( is_nullable [ i ] ) <nl> + { <nl> + const ColumnNullable & nullable_col = assert_cast < const ColumnNullable & > ( * columns [ i ] ) ; <nl> + if ( null_is_skipped & & nullable_col . isNullAt ( row_num ) ) <nl> + { <nl> + / / / If at least one column has a null value in the current row , <nl> + / / / we don ' t process this row . <nl> + return ; <nl> + } <nl> + nested_columns [ i ] = & nullable_col . getNestedColumn ( ) ; <nl> + } <nl> + else <nl> + nested_columns [ i ] = columns [ i ] ; <nl> + } <nl> + <nl> + if ( singleFilter ( nested_columns , row_num , number_of_arguments ) ) <nl> + { <nl> + this - > setFlag ( place ) ; <nl> + this - > nested_function - > add ( this - > nestedPlace ( place ) , nested_columns , row_num , arena ) ; <nl> + } <nl> + } <nl> + <nl> + private : <nl> + using Base = AggregateFunctionNullBase < result_is_nullable , serialize_flag , <nl> + AggregateFunctionIfNullVariadic < result_is_nullable , serialize_flag , null_is_skipped > > ; <nl> + <nl> + enum { MAX_ARGS = 8 } ; <nl> + size_t number_of_arguments = 0 ; <nl> + std : : array < char , MAX_ARGS > is_nullable ; / / / Plain array is better than std : : vector due to one indirection less . <nl> + } ; <nl> + <nl> + <nl> + AggregateFunctionPtr AggregateFunctionIf : : getOwnNullAdapter ( <nl> + const AggregateFunctionPtr & nested_function , const DataTypes & arguments , <nl> + const Array & params , const AggregateFunctionProperties & properties ) const <nl> + { <nl> + bool return_type_is_nullable = ! properties . returns_default_when_only_null & & getReturnType ( ) - > canBeInsideNullable ( ) ; <nl> + size_t nullable_size = std : : count_if ( arguments . begin ( ) , arguments . end ( ) , [ ] ( const auto & element ) { return element - > isNullable ( ) ; } ) ; <nl> + return_type_is_nullable & = nullable_size ! = 1 | | ! arguments . back ( ) - > isNullable ( ) ; / / / If only condition is nullable . we should non - nullable type . <nl> + bool serialize_flag = return_type_is_nullable | | properties . returns_default_when_only_null ; <nl> + <nl> + if ( arguments . size ( ) < = 2 & & arguments . front ( ) - > isNullable ( ) ) <nl> + { <nl> + if ( return_type_is_nullable ) <nl> + { <nl> + return std : : make_shared < AggregateFunctionIfNullUnary < true , true > > ( nested_func , arguments , params ) ; <nl> + } <nl> + else <nl> + { <nl> + if ( serialize_flag ) <nl> + return std : : make_shared < AggregateFunctionIfNullUnary < false , true > > ( nested_func , arguments , params ) ; <nl> + else <nl> + return std : : make_shared < AggregateFunctionIfNullUnary < false , false > > ( nested_func , arguments , params ) ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + if ( return_type_is_nullable ) <nl> + { <nl> + return std : : make_shared < AggregateFunctionIfNullVariadic < true , true , true > > ( nested_function , arguments , params ) ; <nl> + } <nl> + else <nl> + { <nl> + if ( serialize_flag ) <nl> + return std : : make_shared < AggregateFunctionIfNullVariadic < false , true , true > > ( nested_function , arguments , params ) ; <nl> + else <nl> + return std : : make_shared < AggregateFunctionIfNullVariadic < false , true , false > > ( nested_function , arguments , params ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> void registerAggregateFunctionCombinatorIf ( AggregateFunctionCombinatorFactory & factory ) <nl> { <nl> factory . registerCombinator ( std : : make_shared < AggregateFunctionCombinatorIf > ( ) ) ; <nl> mmm a / src / AggregateFunctions / AggregateFunctionIf . h <nl> ppp b / src / AggregateFunctions / AggregateFunctionIf . h <nl> class AggregateFunctionIf final : public IAggregateFunctionHelper < AggregateFunct <nl> { <nl> return nested_func - > isState ( ) ; <nl> } <nl> + <nl> + AggregateFunctionPtr getOwnNullAdapter ( <nl> + const AggregateFunctionPtr & nested_function , const DataTypes & arguments , <nl> + const Array & params , const AggregateFunctionProperties & properties ) const override ; <nl> } ; <nl> <nl> } <nl> mmm a / src / AggregateFunctions / AggregateFunctionNull . cpp <nl> ppp b / src / AggregateFunctions / AggregateFunctionNull . cpp <nl> class AggregateFunctionCombinatorNull final : public IAggregateFunctionCombinato <nl> <nl> assert ( nested_function ) ; <nl> <nl> - if ( auto adapter = nested_function - > getOwnNullAdapter ( nested_function , arguments , params ) ) <nl> + if ( auto adapter = nested_function - > getOwnNullAdapter ( nested_function , arguments , params , properties ) ) <nl> return adapter ; <nl> <nl> / / / If applied to aggregate function with - State combinator , we apply - Null combinator to it ' s nested_function instead of itself . <nl> mmm a / src / AggregateFunctions / AggregateFunctionWindowFunnel . h <nl> ppp b / src / AggregateFunctions / AggregateFunctionWindowFunnel . h <nl> class AggregateFunctionWindowFunnel final <nl> } <nl> <nl> AggregateFunctionPtr getOwnNullAdapter ( <nl> - const AggregateFunctionPtr & nested_function , const DataTypes & arguments , const Array & params ) const override <nl> + const AggregateFunctionPtr & nested_function , const DataTypes & arguments , const Array & params , <nl> + const AggregateFunctionProperties & / * properties * / ) const override <nl> { <nl> return std : : make_shared < AggregateFunctionNullVariadic < false , false , false > > ( nested_function , arguments , params ) ; <nl> } <nl> mmm a / src / AggregateFunctions / IAggregateFunction . h <nl> ppp b / src / AggregateFunctions / IAggregateFunction . h <nl> using ConstAggregateDataPtr = const char * ; <nl> <nl> class IAggregateFunction ; <nl> using AggregateFunctionPtr = std : : shared_ptr < IAggregateFunction > ; <nl> + struct AggregateFunctionProperties ; <nl> <nl> / * * Aggregate functions interface . <nl> * Instances of classes with this interface do not contain the data itself for aggregation , <nl> class IAggregateFunction <nl> * arguments and params are for nested_function . <nl> * / <nl> virtual AggregateFunctionPtr getOwnNullAdapter ( <nl> - const AggregateFunctionPtr & / * nested_function * / , const DataTypes & / * arguments * / , const Array & / * params * / ) const <nl> + const AggregateFunctionPtr & / * nested_function * / , const DataTypes & / * arguments * / , <nl> + const Array & / * params * / , const AggregateFunctionProperties & / * properties * / ) const <nl> { <nl> return nullptr ; <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . 77f38b722ce <nl> mmm / dev / null <nl> ppp b / tests / queries / 0_stateless / 01455_nullable_type_with_if_agg_combinator . reference <nl> <nl> + \ N Nullable ( UInt8 ) <nl> + \ N Nullable ( UInt8 ) <nl> + 0 UInt8 <nl> new file mode 100644 <nl> index 00000000000 . . 852660117f5 <nl> mmm / dev / null <nl> ppp b / tests / queries / 0_stateless / 01455_nullable_type_with_if_agg_combinator . sql <nl> <nl> + - - Value nullable <nl> + SELECT anyIf ( CAST ( number , ' Nullable ( UInt8 ) ' ) , number = 3 ) AS a , toTypeName ( a ) FROM numbers ( 2 ) ; <nl> + - - Value and condition nullable <nl> + SELECT anyIf ( number , number = 3 ) AS a , toTypeName ( a ) FROM ( SELECT CAST ( number , ' Nullable ( UInt8 ) ' ) AS number FROM numbers ( 2 ) ) ; <nl> + - - Condition nullable <nl> + SELECT anyIf ( CAST ( number , ' UInt8 ' ) , number = 3 ) AS a , toTypeName ( a ) FROM ( SELECT CAST ( number , ' Nullable ( UInt8 ) ' ) AS number FROM numbers ( 2 ) ) ; <nl> | Merge pull request from zhang2014 / fix / agg_combinator | ClickHouse/ClickHouse | f6f8dc9b8a8c536b005e3ec95e4437bed5967b18 | 2020-08-26T20:47:29Z |
mmm a / include / swift / AST / DiagnosticsSema . def <nl> ppp b / include / swift / AST / DiagnosticsSema . def <nl> ERROR ( availability_query_repeated_platform , sema_avail , none , <nl> ERROR ( availability_query_required_for_platform , sema_avail , none , <nl> " condition required for target platform ' % 0 ' " , ( StringRef ) ) <nl> <nl> - WARNING ( availability_query_useless , sema_avail , none , <nl> - " unnecessary check for ' % 0 ' ; guard will always pass " , ( StringRef ) ) <nl> + WARNING ( availability_query_useless_min_deployment , sema_avail , none , <nl> + " unnecessary check for ' % 0 ' ; minimum deployment target ensures guard " <nl> + " will always pass " , ( StringRef ) ) <nl> + <nl> + WARNING ( availability_query_useless_enclosing_scope , sema_avail , none , <nl> + " unnecessary check for ' % 0 ' ; enclosing scope ensures guard " <nl> + " will always pass " , ( StringRef ) ) <nl> + <nl> + NOTE ( availability_query_useless_enclosing_scope_here , sema_avail , none , <nl> + " enclosing scope here " , ( ) ) <nl> <nl> ERROR ( availability_query_outside_if_stmt_guard , sema_avail , none , <nl> " check can only be used as guard of if statement " , ( ) ) <nl> mmm a / include / swift / AST / TypeRefinementContext . h <nl> ppp b / include / swift / AST / TypeRefinementContext . h <nl> namespace swift { <nl> / / / to refine . <nl> class TypeRefinementContext { <nl> <nl> + public : <nl> / / / Describes the reason a type refinement context was introduced . <nl> enum class Reason { <nl> / / / The root refinement context . <nl> class TypeRefinementContext { <nl> <nl> using IntroNode = llvm : : PointerUnion3 < SourceFile * , Decl * , IfStmt * > ; <nl> <nl> + private : <nl> / / / The AST node that introduced this context . <nl> IntroNode Node ; <nl> <nl> class TypeRefinementContext { <nl> / / / but its source range will cover the Then branch . <nl> IntroNode getIntroductionNode ( ) const { return Node ; } <nl> <nl> + / / / Returns the location of the node that introduced this refinement context <nl> + / / / or an invalid location if the context reflects the minimum deployment <nl> + / / target . <nl> + SourceLoc getIntroductionLoc ( ) const ; <nl> + <nl> / / / Returns the source range on which this context refines types . <nl> SourceRange getSourceRange ( ) const { return SrcRange ; } <nl> <nl> mmm a / lib / AST / TypeRefinementContext . cpp <nl> ppp b / lib / AST / TypeRefinementContext . cpp <nl> void TypeRefinementContext : : dump ( raw_ostream & OS , SourceManager & SrcMgr ) const { <nl> OS < < ' \ n ' ; <nl> } <nl> <nl> + SourceLoc TypeRefinementContext : : getIntroductionLoc ( ) const { <nl> + switch ( getReason ( ) ) { <nl> + case Reason : : Decl : <nl> + return Node . get < Decl * > ( ) - > getLoc ( ) ; <nl> + <nl> + case Reason : : IfStmtThenBranch : <nl> + return Node . get < IfStmt * > ( ) - > getIfLoc ( ) ; <nl> + <nl> + case Reason : : Root : <nl> + return SourceLoc ( ) ; <nl> + } <nl> + } <nl> + <nl> void TypeRefinementContext : : print ( raw_ostream & OS , SourceManager & SrcMgr , <nl> unsigned Indent ) const { <nl> OS . indent ( Indent ) ; <nl> mmm a / lib / Sema / TypeChecker . cpp <nl> ppp b / lib / Sema / TypeChecker . cpp <nl> class TypeRefinementContextBuilder : private ASTWalker { <nl> / / / availability query . <nl> TypeRefinementContext * refinedThenContextForQuery ( AvailabilityQueryExpr * E , <nl> IfStmt * IS ) { <nl> + TypeRefinementContext * CurTRC = getCurrentTRC ( ) ; <nl> + <nl> VersionConstraintAvailabilitySpec * Spec = bestActiveSpecForQuery ( E ) ; <nl> if ( ! Spec ) { <nl> / / We couldn ' t find an appropriate spec for the current platform , <nl> class TypeRefinementContextBuilder : private ASTWalker { <nl> AC . Diags . diagnose ( E - > getLoc ( ) , <nl> diag : : availability_query_required_for_platform , <nl> platformString ( targetPlatform ( AC . LangOpts ) ) ) ; <nl> - return getCurrentTRC ( ) ; <nl> + return CurTRC ; <nl> } <nl> <nl> <nl> class TypeRefinementContextBuilder : private ASTWalker { <nl> / / If the version range for the current TRC is completely contained in <nl> / / the range for the spec , then the query can never be false , so the <nl> / / spec is useless . If so , report this . <nl> - if ( getCurrentTRC ( ) - > getPotentialVersions ( ) . isContainedIn ( range ) ) { <nl> - AC . Diags . diagnose ( E - > getLoc ( ) , <nl> - diag : : availability_query_useless , <nl> - platformString ( targetPlatform ( AC . LangOpts ) ) ) ; <nl> + if ( CurTRC - > getPotentialVersions ( ) . isContainedIn ( range ) ) { <nl> + DiagnosticEngine & Diags = AC . Diags ; <nl> + if ( CurTRC - > getReason ( ) = = TypeRefinementContext : : Reason : : Root ) { <nl> + Diags . diagnose ( E - > getLoc ( ) , <nl> + diag : : availability_query_useless_min_deployment , <nl> + platformString ( targetPlatform ( AC . LangOpts ) ) ) ; <nl> + } else { <nl> + Diags . diagnose ( E - > getLoc ( ) , <nl> + diag : : availability_query_useless_enclosing_scope , <nl> + platformString ( targetPlatform ( AC . LangOpts ) ) ) ; <nl> + Diags . diagnose ( CurTRC - > getIntroductionLoc ( ) , <nl> + diag : : availability_query_useless_enclosing_scope_here ) ; <nl> + <nl> + } <nl> } <nl> <nl> return TypeRefinementContext : : createForIfStmtThen ( AC , IS , getCurrentTRC ( ) , <nl> mmm a / test / Sema / availability_versions . swift <nl> ppp b / test / Sema / availability_versions . swift <nl> class ClassAvailableOn10_9AdoptingProtocolAvailableOn10_10 : ProtocolAvailableOn <nl> func functionWithDefaultAvailabilityAndUselessCheck ( ) { <nl> / / Default availability reflects minimum deployment : 10 . 9 and up <nl> <nl> - if # os ( OSX > = 10 . 9 ) { / / expected - warning { { unnecessary check for ' OSX ' ; guard will always pass } } <nl> + if # os ( OSX > = 10 . 9 ) { / / expected - warning { { unnecessary check for ' OSX ' ; minimum deployment target ensures guard will always pass } } <nl> let _ = globalAvailableOn10_9 <nl> } <nl> <nl> - if # os ( OSX > = 10 . 10 ) { <nl> + if # os ( OSX > = 10 . 10 ) { / / expected - note { { enclosing scope here } } <nl> let _ = globalAvailableOn10_10 <nl> <nl> - if # os ( OSX > = 10 . 10 ) { / / expected - warning { { unnecessary check for ' OSX ' ; guard will always pass } } <nl> + if # os ( OSX > = 10 . 10 ) { / / expected - warning { { unnecessary check for ' OSX ' ; enclosing scope ensures guard will always pass } } <nl> let _ = globalAvailableOn10_10 <nl> } <nl> } <nl> } <nl> <nl> @ availability ( OSX , introduced = 10 . 10 ) <nl> - func functionWithSpecifiedAvailabilityAndUselessCheck ( ) { <nl> - if # os ( OSX > = 10 . 9 ) { / / expected - warning { { unnecessary check for ' OSX ' ; guard will always pass } } <nl> + func functionWithSpecifiedAvailabilityAndUselessCheck ( ) { / / expected - note 2 { { enclosing scope here } } <nl> + if # os ( OSX > = 10 . 9 ) { / / expected - warning { { unnecessary check for ' OSX ' ; enclosing scope ensures guard will always pass } } <nl> let _ = globalAvailableOn10_9 <nl> } <nl> <nl> - if # os ( OSX > = 10 . 10 ) { / / expected - warning { { unnecessary check for ' OSX ' ; guard will always pass } } <nl> + if # os ( OSX > = 10 . 10 ) { / / expected - warning { { unnecessary check for ' OSX ' ; enclosing scope ensures guard will always pass } } <nl> let _ = globalAvailableOn10_10 <nl> } <nl> } <nl> | [ Sema ] Change diagnostic of useless # os ( ) check to indicate why it is useless . | apple/swift | d562acb2230bc7432996bf4cf8568524a4b3fcc8 | 2014-10-14T18:38:48Z |
mmm a / src / core / hle / service / ldr / ldr . cpp <nl> ppp b / src / core / hle / service / ldr / ldr . cpp <nl> class RelocatableObject final : public ServiceFramework < RelocatableObject > { <nl> } <nl> <nl> static bool IsValidNRO ( const NROHeader & header , u64 nro_size , u64 bss_size ) { <nl> + return header . magic = = Common : : MakeMagic ( ' N ' , ' R ' , ' O ' , ' 0 ' ) & & <nl> + header . nro_size = = nro_size & & header . bss_size = = bss_size & & <nl> <nl> - const bool valid_magic = header . magic = = Common : : MakeMagic ( ' N ' , ' R ' , ' O ' , ' 0 ' ) ; <nl> + header . segment_headers [ RO_INDEX ] . memory_offset = = <nl> + header . segment_headers [ TEXT_INDEX ] . memory_offset + <nl> + header . segment_headers [ TEXT_INDEX ] . memory_size & & <nl> <nl> - const bool valid_nro_size = header . nro_size = = nro_size ; <nl> + header . segment_headers [ DATA_INDEX ] . memory_offset = = <nl> + header . segment_headers [ RO_INDEX ] . memory_offset + <nl> + header . segment_headers [ RO_INDEX ] . memory_size & & <nl> <nl> - const bool valid_bss_size = header . bss_size = = bss_size ; <nl> + nro_size = = header . segment_headers [ DATA_INDEX ] . memory_offset + <nl> + header . segment_headers [ DATA_INDEX ] . memory_size & & <nl> <nl> - const bool valid_ro_offset = header . segment_headers [ RO_INDEX ] . memory_offset = = <nl> - header . segment_headers [ TEXT_INDEX ] . memory_offset + <nl> - header . segment_headers [ TEXT_INDEX ] . memory_size ; <nl> - <nl> - const bool valid_data_offset = header . segment_headers [ DATA_INDEX ] . memory_offset = = <nl> - header . segment_headers [ RO_INDEX ] . memory_offset + <nl> - header . segment_headers [ RO_INDEX ] . memory_size ; <nl> - <nl> - const bool valid_nro_calculated_size = <nl> - nro_size = = header . segment_headers [ DATA_INDEX ] . memory_offset + <nl> - header . segment_headers [ DATA_INDEX ] . memory_size ; <nl> - <nl> - const bool text_aligned = <nl> - Common : : Is4KBAligned ( header . segment_headers [ TEXT_INDEX ] . memory_size ) ; <nl> - <nl> - const bool ro_aligned = Common : : Is4KBAligned ( header . segment_headers [ RO_INDEX ] . memory_size ) ; <nl> - <nl> - const bool data_aligned = <nl> - Common : : Is4KBAligned ( header . segment_headers [ DATA_INDEX ] . memory_size ) ; <nl> - <nl> - return valid_magic & & valid_nro_size & & valid_bss_size & & valid_ro_offset & & <nl> - valid_data_offset & & valid_nro_calculated_size & & text_aligned & & ro_aligned & & <nl> - data_aligned ; <nl> + Common : : Is4KBAligned ( header . segment_headers [ TEXT_INDEX ] . memory_size ) & & <nl> + Common : : Is4KBAligned ( header . segment_headers [ RO_INDEX ] . memory_size ) & & <nl> + Common : : Is4KBAligned ( header . segment_headers [ DATA_INDEX ] . memory_size ) ; <nl> } <nl> Core : : System & system ; <nl> } ; <nl> | Revert IsValidNRO refactor but make it more readable | yuzu-emu/yuzu | c0d61620506ef4d70b1aa60bc9961f08ce9a939f | 2020-06-16T18:24:58Z |
mmm a / tests / js / server / aql / aql - arithmetic . js <nl> ppp b / tests / js / server / aql / aql - arithmetic . js <nl> function ahuacatlArithmeticTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> testConstAttributeAccess : function ( ) { <nl> var actual = getQueryResults ( " LET it = { a : 4 , b : 5 , c : 6 } RETURN [ it . a * 3 , it . b * 4 , it . c * 5 ] " ) ; <nl> assertEqual ( [ [ 4 * 3 , 5 * 4 , 6 * 5 ] ] , actual ) ; <nl> mmm a / tests / js / server / aql / aql - array - access . js <nl> ppp b / tests / js / server / aql / aql - array - access . js <nl> function arrayAccessTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test non - array access <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - attribute - access . js <nl> ppp b / tests / js / server / aql / aql - attribute - access . js <nl> function attributeAccessTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test direct access <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - bind . js <nl> ppp b / tests / js / server / aql / aql - bind . js <nl> function ahuacatlBindTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlNumbers " ) ; <nl> numbers = internal . db . _create ( " UnitTestsAhuacatlNumbers " ) ; <nl> <nl> function ahuacatlBindTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlNumbers " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - complex . js <nl> ppp b / tests / js / server / aql / aql - complex . js <nl> function ahuacatlComplexTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlNumbers " ) ; <nl> numbers = internal . db . _create ( " UnitTestsAhuacatlNumbers " ) ; <nl> <nl> function ahuacatlComplexTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlNumbers " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - distinct . js <nl> ppp b / tests / js / server / aql / aql - distinct . js <nl> function ahuacatlDistinct ( ) { <nl> var c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _create ( " UnitTestsCollection " ) ; <nl> <nl> function ahuacatlDistinct ( ) { <nl> } <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> function ahuacatlDistinct ( ) { <nl> try { <nl> AQL_EXECUTE ( query ) ; <nl> fail ( ) ; <nl> - } <nl> - catch ( e ) { <nl> + } catch ( e ) { <nl> assertEqual ( errors . ERROR_QUERY_PARSE . code , e . errorNum ) ; <nl> } <nl> } ) ; <nl> function ahuacatlCollect ( ) { <nl> var c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _create ( " UnitTestsCollection " ) ; <nl> <nl> function ahuacatlCollect ( ) { <nl> } <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - dynamic - attributes . js <nl> ppp b / tests / js / server / aql / aql - dynamic - attributes . js <nl> function ahuacatlDynamicAttributesTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test dynamic with non - string values <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - edges - cluster . js <nl> ppp b / tests / js / server / aql / aql - edges - cluster . js <nl> function ahuacatlQueryEdgesTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlUsers " ) ; <nl> internal . db . _drop ( " UnitTestsAhuacatlUserRelations " ) ; <nl> <nl> function ahuacatlQueryEdgesTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlUsers " ) ; <nl> internal . db . _drop ( " UnitTestsAhuacatlUserRelations " ) ; <nl> } , <nl> mmm a / tests / js / server / aql / aql - edges - noncluster . js <nl> ppp b / tests / js / server / aql / aql - edges - noncluster . js <nl> function ahuacatlQueryEdgesTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlUsers " ) ; <nl> internal . db . _drop ( " UnitTestsAhuacatlUserRelations " ) ; <nl> <nl> function ahuacatlQueryEdgesTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlUsers " ) ; <nl> internal . db . _drop ( " UnitTestsAhuacatlUserRelations " ) ; <nl> } , <nl> function ahuacatlQueryEdgesTestSuite ( ) { <nl> ] , AQL_EXECUTE ( query , bindParams ) . json ) ; <nl> } <nl> <nl> - <nl> - <nl> } ; <nl> } <nl> <nl> mmm a / tests / js / server / aql / aql - escaping . js <nl> ppp b / tests / js / server / aql / aql - escaping . js <nl> var getQueryResults = helper . getQueryResults ; <nl> function ahuacatlEscapingTestSuite ( ) { <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test comment length <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - explain - cluster . js <nl> ppp b / tests / js / server / aql / aql - explain - cluster . js <nl> function explainSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( cn ) ; <nl> c = db . _create ( cn ) ; <nl> } , <nl> function explainSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( cn ) ; <nl> } , <nl> <nl> function explainSuite ( ) { <nl> try { <nl> actual = AQL_EXPLAIN ( query ) ; <nl> fail ( ) ; <nl> - } <nl> - catch ( err ) { <nl> + } catch ( err ) { <nl> assertEqual ( err . errorNum , errors . ERROR_QUERY_BIND_PARAMETER_MISSING . code ) ; <nl> } <nl> } , <nl> mmm a / tests / js / server / aql / aql - explain - noncluster . js <nl> ppp b / tests / js / server / aql / aql - explain - noncluster . js <nl> function explainSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( cn ) ; <nl> c = db . _create ( cn ) ; <nl> } , <nl> function explainSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( cn ) ; <nl> } , <nl> <nl> function explainSuite ( ) { <nl> try { <nl> actual = AQL_EXPLAIN ( query ) ; <nl> fail ( ) ; <nl> - } <nl> - catch ( err ) { <nl> + } catch ( err ) { <nl> assertEqual ( err . errorNum , errors . ERROR_QUERY_BIND_PARAMETER_MISSING . code ) ; <nl> } <nl> } , <nl> mmm a / tests / js / server / aql / aql - fail - on - warning . js <nl> ppp b / tests / js / server / aql / aql - fail - on - warning . js <nl> function ahuacatlFailOnWarningTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test disabled <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - fullcount . js <nl> ppp b / tests / js / server / aql / aql - fullcount . js <nl> function optimizerFullcountTestSuite ( ) { <nl> var c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _create ( " UnitTestsCollection " ) ; <nl> <nl> function optimizerFullcountTestSuite ( ) { <nl> c . save ( { values : [ " baz " ] } ) ; <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - functions - string . js <nl> ppp b / tests / js / server / aql / aql - functions - string . js <nl> var assertQueryWarningAndNull = helper . assertQueryWarningAndNull ; <nl> function ahuacatlStringFunctionsTestSuite ( ) { <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test tobase64 <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - hash - cluster . js <nl> ppp b / tests / js / server / aql / aql - hash - cluster . js <nl> function ahuacatlHashTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlHash " ) ; <nl> <nl> hash = internal . db . _create ( " UnitTestsAhuacatlHash " ) ; <nl> function ahuacatlHashTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlHash " ) ; <nl> hash = null ; <nl> } , <nl> mmm a / tests / js / server / aql / aql - hash - noncluster . js <nl> ppp b / tests / js / server / aql / aql - hash - noncluster . js <nl> function ahuacatlHashTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlHash " ) ; <nl> <nl> hash = internal . db . _create ( " UnitTestsAhuacatlHash " ) ; <nl> function ahuacatlHashTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlHash " ) ; <nl> hash = null ; <nl> } , <nl> mmm a / tests / js / server / aql / aql - index - hints . js <nl> ppp b / tests / js / server / aql / aql - index - hints . js <nl> function ahuacatlSkiplistOverlappingTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> internal . db . _drop ( cn ) ; <nl> collection = internal . db . _create ( cn ) ; <nl> <nl> function ahuacatlSkiplistOverlappingTestSuite ( ) { <nl> alternateSortingIndex = ' skip_a_b ' ; <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> internal . db . _drop ( cn ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - is - in - polygon . js <nl> ppp b / tests / js / server / aql / aql - is - in - polygon . js <nl> function isInPolygonSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - <nl> - <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test WITHIN_RECTANGLE as result <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - join - cluster . js <nl> ppp b / tests / js / server / aql / aql - join - cluster . js <nl> function ahuacatlClusterJoinKeySuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( cn1 ) ; <nl> db . _drop ( cn2 ) ; <nl> c1 = db . _create ( cn1 , { numberOfShards : 5 } ) ; <nl> function ahuacatlClusterJoinKeySuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( cn1 ) ; <nl> db . _drop ( cn2 ) ; <nl> c1 = null ; <nl> function ahuacatlClusterJoinNonKeySuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( cn1 ) ; <nl> db . _drop ( cn2 ) ; <nl> c1 = db . _create ( cn1 , { numberOfShards : 5 , shardKeys : [ " value " ] } ) ; <nl> function ahuacatlClusterJoinNonKeySuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( cn1 ) ; <nl> db . _drop ( cn2 ) ; <nl> c1 = null ; <nl> mmm a / tests / js / server / aql / aql - logical . js <nl> ppp b / tests / js / server / aql / aql - logical . js <nl> function ahuacatlLogicalTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> / / this . tearDown ( ) ; should actually work as well <nl> db . _drop ( vn ) ; <nl> <nl> function ahuacatlLogicalTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( vn ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - memory - limit . js <nl> ppp b / tests / js / server / aql / aql - memory - limit . js <nl> function ahuacatlMemoryLimitTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test unlimited memory <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - operators . js <nl> ppp b / tests / js / server / aql / aql - operators . js <nl> var RELATIONAL_IN = function ( a , b ) { <nl> function ahuacatlOperatorsTestSuite ( ) { <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test aql . IS_NULL function <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - optimizer - collect - into . js <nl> ppp b / tests / js / server / aql / aql - optimizer - collect - into . js <nl> function optimizerCollectExpressionTestSuite ( ) { <nl> var c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _create ( " UnitTestsCollection " ) ; <nl> <nl> function optimizerCollectExpressionTestSuite ( ) { <nl> } <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - optimizer - condition . js <nl> ppp b / tests / js / server / aql / aql - optimizer - condition . js <nl> function optimizerConditionsTestSuite ( ) { <nl> var c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _create ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - optimizer - costs . js <nl> ppp b / tests / js / server / aql / aql - optimizer - costs . js <nl> function optimizerCostsTestSuite ( ) { <nl> var c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _create ( " UnitTestsCollection " ) ; <nl> <nl> function optimizerCostsTestSuite ( ) { <nl> } <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - optimizer - dynamic - bounds . js <nl> ppp b / tests / js / server / aql / aql - optimizer - dynamic - bounds . js <nl> function singleAttributeTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( cn ) ; <nl> c = db . _create ( cn ) ; <nl> <nl> function singleAttributeTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( cn ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - optimizer - edge - index . js <nl> ppp b / tests / js / server / aql / aql - optimizer - edge - index . js <nl> function optimizerEdgeIndexTestSuite ( ) { <nl> var e ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( ' UnitTestsCollection ' ) ; <nl> db . _drop ( ' UnitTestsEdgeCollection ' ) ; <nl> db . _create ( ' UnitTestsCollection ' ) ; <nl> function optimizerEdgeIndexTestSuite ( ) { <nl> internal . waitForEstimatorSync ( ) ; <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( ' UnitTestsCollection ' ) ; <nl> db . _drop ( ' UnitTestsEdgeCollection ' ) ; <nl> } , <nl> mmm a / tests / js / server / aql / aql - optimizer - geoindex . js <nl> ppp b / tests / js / server / aql / aql - optimizer - geoindex . js <nl> function legacyOptimizerRuleTestSuite ( ) { <nl> } , <nl> <nl> testLegacyRuleBasics : function ( ) { <nl> - if ( enabled . basics ) { <nl> + if ( enabled . basics ) { <nl> geocol . ensureIndex ( { type : " hash " , fields : [ " y " , " z " ] , unique : false } ) ; <nl> <nl> var queries = [ <nl> function legacyOptimizerRuleTestSuite ( ) { <nl> } , / / testRuleBasics <nl> <nl> testLegacyRuleRemoveNodes : function ( ) { <nl> - if ( enabled . removeNodes ) { <nl> + if ( enabled . removeNodes ) { <nl> var queries = [ <nl> [ " FOR d IN " + colName + " SORT distance ( d . lat , d . lon , 0 , 0 ) ASC LIMIT 5 RETURN d " , false , false , false ] , <nl> [ " FOR d IN " + colName + " SORT distance ( 0 , 0 , d . lat , d . lon ) ASC LIMIT 5 RETURN d " , false , false , false ] , <nl> function legacyOptimizerRuleTestSuite ( ) { <nl> } <nl> } , / / testRuleSort <nl> <nl> - testLegacyRuleSorted : function ( ) { <nl> - if ( enabled . sorted ) { <nl> + testLegacyRuleSorted : function ( ) { <nl> + if ( enabled . sorted ) { <nl> var old = 0 ; <nl> var query = " FOR d IN " + colName + " SORT distance ( d . lat , d . lon , 0 , 0 ) RETURN distance ( d . lat , d . lon , 0 , 0 ) " ; <nl> var result = AQL_EXECUTE ( query ) ; <nl> mmm a / tests / js / server / aql / aql - optimizer - index - only - rocksdb . js <nl> ppp b / tests / js / server / aql / aql - optimizer - index - only - rocksdb . js <nl> function optimizerIndexOnlyPrimaryTestSuite ( ) { <nl> let c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _create ( " UnitTestsCollection " ) ; <nl> <nl> function optimizerIndexOnlyPrimaryTestSuite ( ) { <nl> } <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> function optimizerIndexOnlyEdgeTestSuite ( ) { <nl> let c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _createEdgeCollection ( " UnitTestsCollection " ) ; <nl> <nl> function optimizerIndexOnlyEdgeTestSuite ( ) { <nl> } <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - optimizer - keep . js <nl> ppp b / tests / js / server / aql / aql - optimizer - keep . js <nl> function optimizerKeepTestSuite ( ) { <nl> var c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _create ( " UnitTestsCollection " ) ; <nl> <nl> function optimizerKeepTestSuite ( ) { <nl> } <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - optimizer - plans . js <nl> ppp b / tests / js / server / aql / aql - optimizer - plans . js <nl> function optimizerPlansTestSuite ( ) { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> testCreatePlansOom : function ( ) { <nl> - if ( ! internal . debugCanUseFailAt ( ) ) { <nl> + if ( ! internal . debugCanUseFailAt ( ) ) { <nl> return ; <nl> } <nl> internal . debugSetFailAt ( " Optimizer : : createPlansOom " ) ; <nl> mmm a / tests / js / server / aql / aql - optimizer - produces - result . js <nl> ppp b / tests / js / server / aql / aql - optimizer - produces - result . js <nl> function optimizerProducesResultTestSuite ( ) { <nl> let c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _create ( " UnitTestsCollection " ) ; <nl> <nl> function optimizerProducesResultTestSuite ( ) { <nl> c . ensureIndex ( { type : " skiplist " , fields : [ " x " ] } ) ; <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - optimizer - quantifiers . js <nl> ppp b / tests / js / server / aql / aql - optimizer - quantifiers . js <nl> <nl> / * jshint globalstrict : false , strict : false , maxlen : 500 * / <nl> - / * global assertEqual , AQL_EXECUTE * / <nl> + / * global assertEqual , assertTrue , assertFalse , AQL_EXECUTE * / <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief tests for ANY | ALL | NONE <nl> function optimizerQuantifiersTestSuite ( ) { <nl> var c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _create ( " UnitTestsCollection " ) ; <nl> <nl> function optimizerQuantifiersTestSuite ( ) { <nl> } <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> function optimizerQuantifiersTestSuite ( ) { <nl> var query = " [ ] ALL = = ' 1 ' " , result ; <nl> <nl> result = AQL_EXECUTE ( " RETURN ( " + query + " ) " ) . json [ 0 ] ; <nl> - assertEqual ( true , result ) ; <nl> + assertTrue ( result ) ; <nl> <nl> result = AQL_EXECUTE ( " RETURN NOOPT ( " + query + " ) " ) . json [ 0 ] ; <nl> - assertEqual ( true , result ) ; <nl> + assertTrue ( result ) ; <nl> } , <nl> <nl> testAnyEmpty : function ( ) { <nl> var query = " [ ] ANY = = ' 1 ' " , result ; <nl> <nl> result = AQL_EXECUTE ( " RETURN ( " + query + " ) " ) . json [ 0 ] ; <nl> - assertEqual ( false , result ) ; <nl> + assertFalse ( result ) ; <nl> <nl> result = AQL_EXECUTE ( " RETURN NOOPT ( " + query + " ) " ) . json [ 0 ] ; <nl> - assertEqual ( false , result ) ; <nl> + assertFalse ( result ) ; <nl> } , <nl> <nl> testNoneEmpty : function ( ) { <nl> var query = " [ ] NONE = = ' 1 ' " , result ; <nl> <nl> result = AQL_EXECUTE ( " RETURN ( " + query + " ) " ) . json [ 0 ] ; <nl> - assertEqual ( true , result ) ; <nl> + assertTrue ( result ) ; <nl> <nl> result = AQL_EXECUTE ( " RETURN NOOPT ( " + query + " ) " ) . json [ 0 ] ; <nl> - assertEqual ( true , result ) ; <nl> + assertTrue ( result ) ; <nl> } , <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - collect - in - cluster . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - collect - in - cluster . js <nl> function optimizerCollectInClusterSuite ( ) { <nl> let c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _create ( " UnitTestsCollection " , { numberOfShards : 3 } ) ; <nl> <nl> function optimizerCollectInClusterSuite ( ) { <nl> } <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> function optimizerCollectInClusterSingleShardSuite ( ) { <nl> let c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _create ( " UnitTestsCollection " , { numberOfShards : 1 } ) ; <nl> <nl> function optimizerCollectInClusterSingleShardSuite ( ) { <nl> } <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - distribute - in - cluster . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - distribute - in - cluster . js <nl> function optimizerRuleTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> var i ; <nl> db . _drop ( cn1 ) ; <nl> db . _drop ( cn2 ) ; <nl> function optimizerRuleTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( cn1 ) ; <nl> db . _drop ( cn2 ) ; <nl> } , <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - fuse - filters . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - fuse - filters . js <nl> function optimizerRuleTestSuite ( ) { <nl> <nl> return { <nl> <nl> - setUp : function ( ) { } , <nl> - tearDown : function ( ) { } , <nl> - <nl> testResults : function ( ) { <nl> let queries = [ <nl> [ " FOR i IN 1 . . 10 FILTER i > 1 FILTER i < 4 SORT i RETURN i " , [ 2 , 3 ] ] , <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - inline - subqueries . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - inline - subqueries . js <nl> function optimizerRuleTestSuite ( ) { <nl> <nl> return { <nl> <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test that rule has no effect when explicitly disabled <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - interchange - adjacent - enumerations - cluster . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - interchange - adjacent - enumerations - cluster . js <nl> function optimizerRuleTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( collectionName ) ; <nl> collection = db . _create ( collectionName ) ; <nl> <nl> function optimizerRuleTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( collectionName ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - interchange - adjacent - enumerations - noncluster . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - interchange - adjacent - enumerations - noncluster . js <nl> function optimizerRuleTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( collectionName ) ; <nl> collection = db . _create ( collectionName ) ; <nl> <nl> function optimizerRuleTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( collectionName ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - move - calculations - down . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - move - calculations - down . js <nl> function optimizerRuleTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( cn ) ; <nl> c = db . _create ( cn ) ; <nl> + for ( let i = 0 ; i < 100 ; + + i ) { <nl> + c . save ( { _key : " test " + i , value : i } ) ; <nl> + } <nl> } , <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( cn ) ; <nl> c = null ; <nl> } , <nl> function optimizerRuleTestSuite ( ) { <nl> testCollection1 : function ( ) { <nl> var expected = [ ] ; <nl> for ( var i = 0 ; i < 100 ; + + i ) { <nl> - c . save ( { _key : " test " + i , value : i } ) ; <nl> expected . push ( " test " + i + " - " + i ) ; <nl> } <nl> <nl> function optimizerRuleTestSuite ( ) { <nl> <nl> testCollection2 : function ( ) { <nl> var expected = [ " test43 - 43 " , " test44 - 44 " ] ; <nl> - for ( var i = 0 ; i < 100 ; + + i ) { <nl> - c . save ( { _key : " test " + i , value : i } ) ; <nl> - } <nl> <nl> var query = " FOR i IN " + cn + " LET result = CONCAT ( i . _key , ' - ' , i . value ) FILTER i . value > 42 SORT i . value LIMIT 2 RETURN result " ; <nl> var planDisabled = AQL_EXPLAIN ( query , { } , paramDisabled ) ; <nl> function optimizerRuleTestSuite ( ) { <nl> <nl> testCollection3 : function ( ) { <nl> var expected = [ " test0 - 0 " , " test1 - 1 " ] ; <nl> - for ( var i = 0 ; i < 100 ; + + i ) { <nl> - c . save ( { _key : " test " + i , value : i } ) ; <nl> - } <nl> <nl> var query = " FOR i IN " + cn + " LET result = CONCAT ( i . _key , ' - ' , i . value ) SORT i . value LIMIT 2 RETURN result " ; <nl> var planDisabled = AQL_EXPLAIN ( query , { } , paramDisabled ) ; <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - move - calculations - up . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - move - calculations - up . js <nl> function optimizerRuleTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test that rule has no effect when explicitly disabled <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - move - filters - up . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - move - filters - up . js <nl> function optimizerRuleTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test that rule has no effect when explicitly disabled <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - remove - collect - variables . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - remove - collect - variables . js <nl> function optimizerRuleTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test that rule has no effect when explicitly disabled <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - remove - redundant - calculations . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - remove - redundant - calculations . js <nl> function optimizerRuleTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test that rule has no effect when explicitly disabled <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - remove - redundant - or . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - remove - redundant - or . js <nl> function NewAqlRemoveRedundantORTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test the rule fires for actual values <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - remove - redundant - sorts . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - remove - redundant - sorts . js <nl> function optimizerRuleTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test that rule has no effect when explicitly disabled <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - remove - unnecessary - calculations . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - remove - unnecessary - calculations . js <nl> function optimizerRuleTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsOptimizerTest " ) ; <nl> db . _create ( " UnitTestsOptimizerTest " ) ; <nl> } , <nl> function optimizerRuleTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsOptimizerTest " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - remove - unnecessary - filters . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - remove - unnecessary - filters . js <nl> function optimizerRuleTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test that rule has no effect when explicitly disabled <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - remove - unnecessary - remote - scatter - cluster . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - remove - unnecessary - remote - scatter - cluster . js <nl> function optimizerRuleTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> var i ; <nl> db . _drop ( cn1 ) ; <nl> db . _drop ( cn2 ) ; <nl> function optimizerRuleTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( cn1 ) ; <nl> db . _drop ( cn2 ) ; <nl> } , <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - replace - or - with - in . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - replace - or - with - in . js <nl> function NewAqlReplaceORWithINTestSuite ( ) { <nl> replace . save ( { " value " : i , x : [ i ] } ) ; <nl> replace . save ( { " a " : { " b " : i } } ) ; <nl> replace . save ( { " value " : i + 10 , " bb " : i , " cc " : 10 - i } ) ; <nl> - <nl> } <nl> - <nl> } , <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> function NewAqlReplaceORWithINTestSuite ( ) { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> testOom : function ( ) { <nl> - if ( ! internal . debugCanUseFailAt ( ) ) { <nl> + if ( ! internal . debugCanUseFailAt ( ) ) { <nl> return ; <nl> } <nl> internal . debugSetFailAt ( " OptimizerRules : : replaceOrWithInRuleOom " ) ; <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - simplify - conditions . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - simplify - conditions . js <nl> function optimizerRuleTestSuite ( ) { <nl> <nl> return { <nl> <nl> - setUp : function ( ) { } , <nl> - tearDown : function ( ) { } , <nl> - <nl> testRuleDisabled : function ( ) { <nl> let queries = [ <nl> " LET data = [ 1 , 2 , 3 , NOEVAL ( 4 ) ] RETURN data [ 0 ] " , <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - sort - in - values . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - sort - in - values . js <nl> function optimizerRuleTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test that rule has no effect when explicitly disabled <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - undistribute - remove - after - enum - coll - cluster . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - undistribute - remove - after - enum - coll - cluster . js <nl> function optimizerRuleTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> var i ; <nl> db . _drop ( cn1 ) ; <nl> db . _drop ( cn2 ) ; <nl> function optimizerRuleTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( cn1 ) ; <nl> db . _drop ( cn2 ) ; <nl> } , <nl> mmm a / tests / js / server / aql / aql - optimizer - rule - use - index - range . js <nl> ppp b / tests / js / server / aql / aql - optimizer - rule - use - index - range . js <nl> function optimizerRuleUseIndexRangeTester ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> var n = collNames . map ( function ( x ) { return collBaseName + x ; } ) ; <nl> var colls = [ ] ; <nl> for ( var i = 0 ; i < n . length ; i + + ) { <nl> function optimizerRuleUseIndexRangeTester ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> var n = collNames . map ( function ( x ) { return collBaseName + x ; } ) ; <nl> for ( var i = 0 ; i < n . length ; i + + ) { <nl> internal . db . _drop ( n [ i ] ) ; <nl> mmm a / tests / js / server / aql / aql - optimizer - stats - noncluster . js <nl> ppp b / tests / js / server / aql / aql - optimizer - stats - noncluster . js <nl> function optimizerStatsTestSuite ( ) { <nl> var c ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> c = db . _create ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - optimizer - useindexes . js <nl> ppp b / tests / js / server / aql / aql - optimizer - useindexes . js <nl> function useIndexesTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> internal . db . _drop ( colName1 ) ; <nl> internal . db . _drop ( colName2 ) ; <nl> collection1 = internal . db . _create ( colName1 ) ; <nl> function useIndexesTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> internal . db . _drop ( colName1 ) ; <nl> internal . db . _drop ( colName2 ) ; <nl> } , <nl> <nl> testRuleBasics : function ( ) { <nl> - if ( enabled . basics ) { <nl> + if ( enabled . basics ) { <nl> let query1 = ' for doc in @ @ col FILTER 25 < doc . value & & doc . value < 75 return doc ' ; <nl> let query2 = ' for doc in @ @ col FILTER 25 < doc . value & & 75 > doc . value return doc ' ; <nl> <nl> function useIndexesTestSuite ( ) { <nl> var last_rv ; <nl> <nl> let loop = 2 ; <nl> - for ( var i = 0 ; i < loop ; i + + ) { <nl> + for ( var i = 0 ; i < loop ; i + + ) { <nl> rv = db . _query ( t . query , bindvars ) ; <nl> rv = rv . toArray ( ) . map ( doc = > { return doc . value ; } ) ; <nl> <nl> mmm a / tests / js / server / aql / aql - parse . js <nl> ppp b / tests / js / server / aql / aql - parse . js <nl> function ahuacatlParseTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test empty query <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - primary - index - cluster . js <nl> ppp b / tests / js / server / aql / aql - primary - index - cluster . js <nl> function explainSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( cn ) ; <nl> c = db . _create ( cn ) ; <nl> <nl> function explainSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( cn ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - primary - index - noncluster . js <nl> ppp b / tests / js / server / aql / aql - primary - index - noncluster . js <nl> function explainSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( cn ) ; <nl> c = db . _create ( cn ) ; <nl> <nl> function explainSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( cn ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - profiler . js <nl> ppp b / tests / js / server / aql / aql - profiler . js <nl> function ahuacatlProfilerTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test { profile : 0 } <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - queries - geo . js <nl> ppp b / tests / js / server / aql / aql - queries - geo . js <nl> function ahuacatlLegacyGeoTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsAhuacatlLocations " ) ; <nl> db . _drop ( " UnitTestsAhuacatlLocationsNon " ) ; <nl> } , <nl> function legacyGeoTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> var lat , lon ; <nl> db . _drop ( " UnitTestsAhuacatlLocations " ) ; <nl> db . _drop ( " UnitTestsAhuacatlLocationsNon " ) ; <nl> function legacyGeoTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsAhuacatlLocations " ) ; <nl> db . _drop ( " UnitTestsAhuacatlLocationsNon " ) ; <nl> } , <nl> function pointsTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> var lat , lon ; <nl> db . _drop ( " UnitTestsPointsTestSuite " ) ; <nl> <nl> function pointsTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsPointsTestSuite " ) ; <nl> } , <nl> <nl> function geoJsonTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> var lat , lon ; <nl> db . _drop ( " UnitTestsGeoJsonTestSuite " ) ; <nl> <nl> function geoJsonTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsGeoJsonTestSuite " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - queries - noncollection . js <nl> ppp b / tests / js / server / aql / aql - queries - noncollection . js <nl> function ahuacatlQueryNonCollectionTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief multiple subqueries <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - queries - optimizer - sort - cluster . js <nl> ppp b / tests / js / server / aql / aql - queries - optimizer - sort - cluster . js <nl> function ahuacatlQueryOptimizerSortTestSuite ( ) { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> tearDown : function ( ) { <nl> - internal . db . _drop ( cn ) ; <nl> + internal . db . _drop ( cn ) ; <nl> } , <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - queries - optimizer . js <nl> ppp b / tests / js / server / aql / aql - queries - optimizer . js <nl> var getQueryResults = helper . getQueryResults ; <nl> function ahuacatlOptimizerTestSuite ( ) { <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> testAttributeAccessOptimization : function ( ) { <nl> let query = " LET what = { a : [ ' foo ' ] , b : [ ' bar ' ] } FOR doc IN what . a RETURN doc " ; <nl> let actual = getQueryResults ( query ) ; <nl> mmm a / tests / js / server / aql / aql - queries - simple . js <nl> ppp b / tests / js / server / aql / aql - queries - simple . js <nl> var assertQueryError = helper . assertQueryError ; <nl> function ahuacatlQuerySimpleTestSuite ( ) { <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> testNoArraySorting1 : function ( ) { <nl> let query = " LET values = [ 9 , 16 , 8 , 15 , 7 , 14 , 6 , 13 , 5 , 12 , 4 , 11 , 3 , 10 , 2 , 1 ] RETURN values " ; <nl> assertEqual ( [ 9 , 16 , 8 , 15 , 7 , 14 , 6 , 13 , 5 , 12 , 4 , 11 , 3 , 10 , 2 , 1 ] , AQL_EXECUTE ( query ) . json [ 0 ] ) ; <nl> mmm a / tests / js / server / aql / aql - query - cache - noncluster . js <nl> ppp b / tests / js / server / aql / aql - query - cache - noncluster . js <nl> function ahuacatlQueryCacheTestSuite ( ) { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> testRenameCollection1 : function ( ) { <nl> - if ( require ( " @ arangodb / cluster " ) . isCluster ( ) ) { <nl> - / / renaming collections not supported in cluster <nl> - return ; <nl> - } <nl> - <nl> var query = " FOR doc IN @ @ collection SORT doc . value RETURN doc . value " ; <nl> var result , i ; <nl> <nl> function ahuacatlQueryCacheTestSuite ( ) { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> testRenameCollection2 : function ( ) { <nl> - if ( require ( " @ arangodb / cluster " ) . isCluster ( ) ) { <nl> - / / renaming collections not supported in cluster <nl> - return ; <nl> - } <nl> - <nl> var query = " FOR doc IN @ @ collection SORT doc . value RETURN doc . value " ; <nl> var result , i ; <nl> <nl> mmm a / tests / js / server / aql / aql - range . js <nl> ppp b / tests / js / server / aql / aql - range . js <nl> function rangesSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test range as result <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - ranges . js <nl> ppp b / tests / js / server / aql / aql - ranges . js <nl> function ahuacatlRangesTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test merging of IN <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - refaccess - attribute . js <nl> ppp b / tests / js / server / aql / aql - refaccess - attribute . js <nl> function ahuacatlRefAccessAttributeTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlRefAccess " ) ; <nl> collection = internal . db . _create ( " UnitTestsAhuacatlRefAccess " ) ; <nl> <nl> function ahuacatlRefAccessAttributeTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> internal . db . _drop ( " UnitTestsAhuacatlRefAccess " ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - refaccess - variable . js <nl> ppp b / tests / js / server / aql / aql - refaccess - variable . js <nl> function ahuacatlRefAccessVariableTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test ref access <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - regex . js <nl> ppp b / tests / js / server / aql / aql - regex . js <nl> function ahuacatlRegexTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsAhuacatlRegex " ) ; <nl> c = db . _create ( " UnitTestsAhuacatlRegex " ) ; <nl> <nl> function ahuacatlRegexTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsAhuacatlRegex " ) ; <nl> c = null ; <nl> } , <nl> mmm a / tests / js / server / aql / aql - relational . js <nl> ppp b / tests / js / server / aql / aql - relational . js <nl> var getQueryResults = helper . getQueryResults ; <nl> function ahuacatlRelationalTestSuite ( ) { <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test compare order <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - shardids - cluster . js <nl> ppp b / tests / js / server / aql / aql - shardids - cluster . js <nl> function ahuacatlShardIdsTestSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> internal . db . _drop ( cn ) ; <nl> collection = internal . db . _create ( cn , { numberOfShards : 4 } ) ; <nl> <nl> function ahuacatlShardIdsTestSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> internal . db . _drop ( cn ) ; <nl> } , <nl> <nl> mmm a / tests / js / server / aql / aql - simple - attributes . js <nl> ppp b / tests / js / server / aql / aql - simple - attributes . js <nl> function ahuacatlSimpleAttributesTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test simple attribute names <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - subquery . js <nl> ppp b / tests / js / server / aql / aql - subquery . js <nl> function ahuacatlSubqueryTestSuite ( ) { <nl> <nl> return { <nl> <nl> - setUp : function ( ) { } , <nl> - <nl> - tearDown : function ( ) { } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test subquery evaluation <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - ternary . js <nl> ppp b / tests / js / server / aql / aql - ternary . js <nl> function ahuacatlTernaryTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test ternary operator <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - variables . js <nl> ppp b / tests / js / server / aql / aql - variables . js <nl> function ahuacatlVariablesTestSuite ( ) { <nl> <nl> return { <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief set up <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - setUp : function ( ) { <nl> - } , <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief tear down <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - tearDown : function ( ) { <nl> - } , <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test valid declaration <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / js / server / aql / aql - with - collections . js <nl> ppp b / tests / js / server / aql / aql - with - collections . js <nl> function queryWithCollectionsTestSuite ( ) { <nl> var errors = internal . errors ; <nl> <nl> return { <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection1 " ) ; <nl> db . _drop ( " UnitTestsCollection2 " ) ; <nl> c1 = db . _create ( " UnitTestsCollection1 " ) ; <nl> c2 = db . _create ( " UnitTestsCollection2 " ) ; <nl> } , <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " UnitTestsCollection2 " ) ; <nl> db . _drop ( " UnitTestsCollection1 " ) ; <nl> } , <nl> mmm a / tests / js / server / aql / aql - within - rectangle . js <nl> ppp b / tests / js / server / aql / aql - within - rectangle . js <nl> function withinRectangleSuite ( ) { <nl> / / / @ brief set up <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - setUp : function ( ) { <nl> + setUpAll : function ( ) { <nl> db . _drop ( " geo " ) ; <nl> db . _drop ( " geo2 " ) ; <nl> <nl> function withinRectangleSuite ( ) { <nl> / / / @ brief tear down <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - tearDown : function ( ) { <nl> + tearDownAll : function ( ) { <nl> db . _drop ( " geo " ) ; <nl> db . _drop ( " geo2 " ) ; <nl> } , <nl> function withinRectangleSuite ( ) { <nl> } catch ( e ) { <nl> assertTrue ( e . errorNum = = = errors . ERROR_QUERY_GEO_INDEX_MISSING . code ) ; <nl> } <nl> - <nl> } , <nl> <nl> testWithinRectangleAsResultWithPositionBasedGeoIndex : function ( ) { <nl> | use setUpAll and tearDownAll to reduce test durations ( ) | arangodb/arangodb | 1d84dd861de2ec0ae90ae4ff503b54aa330e2a2d | 2019-09-30T15:05:20Z |
mmm a / src / wallet / rpcwallet . cpp <nl> ppp b / src / wallet / rpcwallet . cpp <nl> UniValue rescanblockchain ( const JSONRPCRequest & request ) <nl> " } \ n " <nl> " \ nExamples : \ n " <nl> + HelpExampleCli ( " rescanblockchain " , " 100000 120000 " ) <nl> - + HelpExampleRpc ( " rescanblockchain " , " 100000 120000 " ) <nl> + + HelpExampleRpc ( " rescanblockchain " , " 100000 , 120000 " ) <nl> ) ; <nl> } <nl> <nl> | Add missing comma from rescanblockchain | bitcoin/bitcoin | 43f76f6acdef3504c072ef7ff8cb92221a92b158 | 2017-10-13T23:34:04Z |
mmm a / tensorflow / core / profiler / README . md <nl> ppp b / tensorflow / core / profiler / README . md <nl> Open a Chrome browser , enter URL chrome : / / tracing and load the timeline file . <nl> # nodes created by the Python call stack . <nl> # Nevertheless , it pops critical Python code path for us . <nl> # <nl> - # ` - trim_name_regexes ` trims the python call stack , which are always the same <nl> - # for the leaves . <nl> + # ` - trim_name_regexes ` trims the some traces that have no valuable information . <nl> # ` - select accelerator_micros ` pick accelerator time for pprof graph . User <nl> # can also generate memory profile using ` - select bytes ` <nl> - tfprof > code - max_depth 100 - trim_name_regexes ' ^ ops . py . * ' - select accelerator_micros - output pprof : outfile = < filename > <nl> + tfprof > code - select accelerator_micros - max_depth 100000 - output pprof : outfile = < filename > - trim_name_regexes . * apply_op . * <nl> <nl> # Use pprof to visualize the generated file . <nl> - pprof - png - - nodecount = 20 - - sample_index = 1 < filename > <nl> + pprof - png - - nodecount = 100 - - sample_index = 1 < filename > <nl> ` ` ` <nl> <nl> < left > <nl> Binary files a / tensorflow / core / profiler / g3doc / pprof . jpg and b / tensorflow / core / profiler / g3doc / pprof . jpg differ <nl> mmm a / tensorflow / core / profiler / internal / tfprof_code . cc <nl> ppp b / tensorflow / core / profiler / internal / tfprof_code . cc <nl> limitations under the License . <nl> namespace tensorflow { <nl> namespace tfprof { <nl> namespace { <nl> + <nl> + const char * const kGradientSuffix = " ( gradient ) " ; <nl> + <nl> / / Convert to Trace proto into a short readable string . <nl> string GetTraceString ( const CodeDef : : Trace & trace ) { <nl> string ntrace = io : : Basename ( trace . file ( ) ) . ToString ( ) ; <nl> string GetTraceString ( const CodeDef : : Trace & trace ) { <nl> return ntrace ; <nl> } <nl> <nl> + bool IsGradNode ( const string & name , string * forward_name ) { <nl> + / / Given a forward operation with name op , its gradient op has the following <nl> + / / name : . . . gradients / op_grad / . . . <nl> + / / TODO ( xpan ) : This is hacky . <nl> + auto grad_prefix = name . find ( " gradients / " ) ; <nl> + auto grad_suffix = name . find ( " _grad / " ) ; <nl> + if ( grad_prefix = = name . npos | | grad_suffix = = name . npos ) { <nl> + return false ; <nl> + } <nl> + auto start = grad_prefix + string ( " gradients / " ) . length ( ) ; <nl> + auto len = grad_suffix - start ; <nl> + if ( len < = 0 ) { <nl> + return false ; <nl> + } <nl> + * forward_name = name . substr ( start , len ) ; <nl> + return true ; <nl> + } <nl> + <nl> / / StringTable maps each string to an id . <nl> class StringTable { <nl> public : <nl> class Samples { <nl> for ( const CodeNode * cn : all_leaf ) { <nl> for ( auto gn_it : cn - > node - > graph_nodes ( ) ) { <nl> const TFGraphNode * gn = gn_it . second ; <nl> - pprof : : Sample * sample_pb = & sample_table_ [ gn - > name ( ) ] ; <nl> + string name = gn - > name ( ) ; <nl> + / / Generate a new trace name , in case the name is taken . <nl> + while ( sample_table_ . find ( name ) ! = sample_table_ . end ( ) ) { <nl> + name + = ' @ ' ; <nl> + } <nl> + pprof : : Sample * sample_pb = & sample_table_ [ name ] ; <nl> for ( uint64 id : location_ids ) { <nl> sample_pb - > mutable_location_id ( ) - > Add ( id ) ; <nl> } <nl> pprof : : Label * label_pb = sample_pb - > mutable_label ( ) - > Add ( ) ; <nl> - label_pb - > set_key ( string_table_ - > GetIndex ( " node_name " ) ) ; <nl> + label_pb - > set_key ( string_table_ - > GetIndex ( " graph node : " ) ) ; <nl> label_pb - > set_str ( string_table_ - > GetIndex ( gn - > name ( ) ) ) ; <nl> <nl> sample_pb - > mutable_value ( ) - > Add ( 1 ) ; <nl> class PprofProfileImpl : public PprofProfile { <nl> samples_ ( new Samples ( & string_table_ , opts ) ) { } <nl> <nl> uint64 AddLocation ( const CodeNode * callee , const CodeNode * caller ) override { <nl> - const string & file_path = caller - > trace - > file ( ) ; <nl> - uint64 lineno = caller - > trace - > lineno ( ) ; <nl> - const string & callee_file_path = callee - > trace - > file ( ) ; <nl> - const string & callee_function = callee - > trace - > function ( ) ; <nl> - uint64 callee_func_start_line = callee - > trace - > func_start_line ( ) ; <nl> + const string & file_path = caller - > file ( ) ; <nl> + uint64 lineno = caller - > lineno ( ) ; <nl> + const string & callee_file_path = callee - > file ( ) ; <nl> + const string & callee_function = callee - > function ( ) ; <nl> + uint64 callee_func_start_line = callee - > func_start_line ( ) ; <nl> <nl> return loc_table_ - > GetIndex ( file_path , lineno , callee_function , <nl> callee_file_path , callee_func_start_line ) ; <nl> class PprofProfileImpl : public PprofProfile { <nl> if ( ! s . ok ( ) ) return s ; <nl> s = zlib_output_buffer - > Close ( ) ; <nl> if ( ! s . ok ( ) ) return s ; <nl> - fprintf ( stdout , " \ nRun pprof - png - - nodecount = 20 - - sample_index = 1 < % s > \ n " , <nl> + fprintf ( stdout , " \ nRun pprof - png - - nodecount = 100 - - sample_index = 1 < % s > \ n " , <nl> filename . c_str ( ) ) ; <nl> return s ; <nl> } <nl> class PprofProfileImpl : public PprofProfile { <nl> string_table_ . GetIndex ( " CPU execution time . " ) ) ; <nl> } <nl> } else if ( type = = kShown [ 0 ] ) { <nl> - sample_type - > set_unit ( string_table_ . GetIndex ( " requested bytes " ) ) ; <nl> + sample_type - > set_unit ( string_table_ . GetIndex ( " bytes " ) ) ; <nl> profile_pb - > mutable_comment ( ) - > Add ( <nl> - string_table_ . GetIndex ( " Sum of operation total requested memory . " ) ) ; <nl> + string_table_ . GetIndex ( " Sum of operation total memory requests , " <nl> + " excluding deallocations . " ) ) ; <nl> } else if ( type = = kShown [ 11 ] ) { <nl> - sample_type - > set_unit ( string_table_ . GetIndex ( " peak bytes " ) ) ; <nl> + sample_type - > set_unit ( string_table_ . GetIndex ( " bytes " ) ) ; <nl> profile_pb - > mutable_comment ( ) - > Add ( <nl> string_table_ . GetIndex ( " Sum of operation peak memory usage . " ) ) ; <nl> } else if ( type = = kShown [ 12 ] ) { <nl> - sample_type - > set_unit ( string_table_ . GetIndex ( " residual bytes " ) ) ; <nl> + sample_type - > set_unit ( string_table_ . GetIndex ( " bytes " ) ) ; <nl> profile_pb - > mutable_comment ( ) - > Add ( string_table_ . GetIndex ( <nl> " Sum of operation allocated memory after finish . " ) ) ; <nl> } else if ( type = = kShown [ 13 ] ) { <nl> - sample_type - > set_unit ( string_table_ . GetIndex ( " output bytes " ) ) ; <nl> + sample_type - > set_unit ( string_table_ . GetIndex ( " bytes " ) ) ; <nl> profile_pb - > mutable_comment ( ) - > Add ( <nl> string_table_ . GetIndex ( " Sum of operation output size . " ) ) ; <nl> } else if ( type = = kShown [ 2 ] ) { <nl> void TFCode : : AddNode ( TFGraphNode * node ) { <nl> if ( node - > code ( ) . traces_size ( ) = = 0 ) { <nl> return ; <nl> } <nl> + <nl> + / / We infer the forward operation name from gradient op name . So , we can <nl> + / / map gradient op traces to forward op traces . <nl> + / / E . g . gradient node of ' inp_1 / Conv2D ' would be ' gradients / inp_1 / Conv2D_grad . <nl> + string forward_name ; <nl> + if ( IsGradNode ( node - > name ( ) , & forward_name ) ) { <nl> + auto grad_nodes_it = grad_nodes_ . find ( forward_name ) ; <nl> + if ( grad_nodes_it ! = grad_nodes_ . end ( ) ) { <nl> + grad_nodes_it - > second . push_back ( node ) ; <nl> + } else { <nl> + grad_nodes_ . insert ( <nl> + std : : pair < string , std : : vector < TFGraphNode * > > ( forward_name , { node } ) ) ; <nl> + } <nl> + return ; <nl> + } else { <nl> + forward_nodes_ [ node - > name ( ) ] = node ; <nl> + } <nl> + <nl> + / / Track if this is the first trace ( first node ) . If true , add all <nl> + / / traces to common_traces_ . Otherwise , remove uncommon traces from <nl> + / / common traces_ . <nl> + bool first_trace = false ; <nl> if ( ! root_ ) { <nl> graph_root_ . reset ( new TFMultiGraphNode ( kTFProfRoot ) ) ; <nl> - root_ . reset ( new CodeNode ( graph_root_ . get ( ) , nullptr ) ) ; <nl> + root_ . reset ( new CodeNode ( graph_root_ . get ( ) , nullptr , " " ) ) ; <nl> + first_trace = true ; <nl> } <nl> <nl> CodeNode * pre_code_node = root_ . get ( ) ; <nl> / / TODO ( xpan ) : Consider to release CodeDef after TFCode is built . It <nl> / / takes a lot of memory . <nl> + std : : set < string > traces ; <nl> for ( int i = 0 ; i < node - > code ( ) . traces_size ( ) ; + + i ) { <nl> / / Unlike op name , which is globally unique , trace name is only unique <nl> / / w . r . t . it ' s parent . <nl> const string & trace = GetTraceString ( node - > code ( ) . traces ( i ) ) ; <nl> - pre_code_node = pre_code_node - > AddChildren ( trace , & node - > code ( ) . traces ( i ) ) ; <nl> + traces . insert ( trace ) ; <nl> + pre_code_node = <nl> + pre_code_node - > AddChildren ( trace , & node - > code ( ) . traces ( i ) , " " ) ; <nl> if ( i = = node - > code ( ) . traces_size ( ) - 1 ) { <nl> pre_code_node - > node - > AddGraphNode ( node ) ; <nl> } <nl> } <nl> + if ( first_trace ) { <nl> + common_traces_ . insert ( traces . begin ( ) , traces . end ( ) ) ; <nl> + } else { <nl> + for ( auto it = common_traces_ . begin ( ) ; it ! = common_traces_ . end ( ) ; ) { <nl> + if ( traces . find ( * it ) = = traces . end ( ) ) { <nl> + common_traces_ . erase ( it + + ) ; <nl> + } else { <nl> + + + it ; <nl> + } <nl> + } <nl> + } <nl> } <nl> <nl> void TFCode : : Build ( ) { <nl> + int64 unaccounted_nodes = 0 ; <nl> + for ( auto it : grad_nodes_ ) { <nl> + const string & forward_name = it . first ; <nl> + auto forward_it = forward_nodes_ . find ( forward_name ) ; <nl> + if ( forward_it = = forward_nodes_ . end ( ) ) { <nl> + unaccounted_nodes + = 1 ; <nl> + continue ; <nl> + } <nl> + TFGraphNode * fn = forward_it - > second ; <nl> + CodeNode * leaf = nullptr ; <nl> + CodeNode * pre_code_node = root_ . get ( ) ; <nl> + for ( int i = 0 ; i < fn - > code ( ) . traces_size ( ) ; + + i ) { <nl> + const string & trace = <nl> + GetTraceString ( fn - > code ( ) . traces ( i ) ) + kGradientSuffix ; <nl> + pre_code_node = pre_code_node - > AddChildren ( trace , & fn - > code ( ) . traces ( i ) , <nl> + kGradientSuffix ) ; <nl> + if ( i = = fn - > code ( ) . traces_size ( ) - 1 ) { <nl> + leaf = pre_code_node ; <nl> + } <nl> + } <nl> + for ( TFGraphNode * gn : it . second ) { <nl> + leaf - > node - > AddGraphNode ( gn ) ; <nl> + } <nl> + } <nl> + if ( unaccounted_nodes > 0 ) { <nl> + fprintf ( stderr , " % lld gradient nodes not accounted \ n " , unaccounted_nodes ) ; <nl> + } <nl> + <nl> + / / For trace that all traces share , such as " main " , " apply_op " , people <nl> + / / are unlikely inerested . We track them and hide them from display . <nl> + if ( forward_nodes_ . size ( ) > 100 ) { <nl> + std : : set < string > tmp = common_traces_ ; <nl> + for ( const string & t : tmp ) { <nl> + common_traces_ . insert ( t + kGradientSuffix ) ; <nl> + } <nl> + } else { <nl> + common_traces_ . clear ( ) ; <nl> + } <nl> } <nl> <nl> const ShowMultiNode * TFCode : : ShowInternal ( const Options & opts , <nl> const ShowMultiNode * TFCode : : ShowInternal ( const Options & opts , <nl> void TFCode : : Format ( const CodeNode * root , const std : : vector < CodeNode * > & nodes , <nl> const Options & opts , string * display_str , <nl> MultiGraphNodeProto * proto , std : : vector < uint64 > * call_ids ) { <nl> - if ( nodes . empty ( ) & & root - > trace & & opts . output_type = = kOutput [ 3 ] ) { <nl> + if ( nodes . empty ( ) & & root - > has_trace ( ) & & opts . output_type = = kOutput [ 3 ] ) { <nl> pprof_profile_ - > AddSample ( root , call_ids ) ; <nl> } <nl> <nl> for ( CodeNode * node : nodes ) { <nl> - if ( root - > trace & & opts . output_type = = kOutput [ 3 ] ) { <nl> + if ( root - > has_trace ( ) & & opts . output_type = = kOutput [ 3 ] ) { <nl> uint64 loc_id = pprof_profile_ - > AddLocation ( node , root ) ; <nl> call_ids - > push_back ( loc_id ) ; <nl> } <nl> void TFCode : : Format ( const CodeNode * root , const std : : vector < CodeNode * > & nodes , <nl> MultiGraphNodeProto * child = proto - > add_children ( ) ; <nl> child - > MergeFrom ( node - > proto ( ) ) ; <nl> Format ( node , node - > show_children , opts , display_str , child , call_ids ) ; <nl> - if ( root - > trace & & opts . output_type = = kOutput [ 3 ] ) { <nl> + if ( root - > has_trace ( ) & & opts . output_type = = kOutput [ 3 ] ) { <nl> call_ids - > pop_back ( ) ; <nl> } <nl> } <nl> std : : vector < CodeNode * > TFCode : : PrintScope ( const std : : vector < CodeNode * > roots , <nl> continue ; <nl> } <nl> int ident = last_ident ; <nl> - bool show = ShouldShow ( node , opts , depth ) ; <nl> + bool show = ShouldShow ( node , opts , depth ) & & <nl> + common_traces_ . find ( node - > name ( ) ) = = common_traces_ . end ( ) ; <nl> if ( show ) ident + = 2 ; <nl> <nl> std : : vector < CodeNode * > show_cnodes = <nl> mmm a / tensorflow / core / profiler / internal / tfprof_code . h <nl> ppp b / tensorflow / core / profiler / internal / tfprof_code . h <nl> class TFCode : public TFMultiShow { <nl> TFCode ( ) { } <nl> ~ TFCode ( ) override { } <nl> <nl> + / / Add nodes to the code view . Called before Build ( ) <nl> void AddNode ( TFGraphNode * node ) override ; <nl> <nl> + / / Build the code view structure . Called after all nodes <nl> + / / are added via AddNode ( ) . <nl> void Build ( ) override ; <nl> <nl> private : <nl> class TFCode : public TFMultiShow { <nl> string FormatNode ( CodeNode * node , const Options & opts , int64 indent ) const ; <nl> string FormatNodeMemory ( CodeNode * node , int64 bytes , int64 total_bytes ) const ; <nl> <nl> + / / Common traces track the code path that all traces share . Such as <nl> + / / " main ( ) " , " create_op " , etc . <nl> + std : : set < string > common_traces_ ; <nl> std : : unique_ptr < CodeNode > root_ ; <nl> std : : unique_ptr < TFMultiGraphNode > graph_root_ ; <nl> std : : unique_ptr < PprofProfile > pprof_profile_ ; <nl> + std : : map < string , std : : vector < TFGraphNode * > > grad_nodes_ ; <nl> + std : : map < string , TFGraphNode * > forward_nodes_ ; <nl> } ; <nl> } / / namespace tfprof <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / profiler / internal / tfprof_node_show . h <nl> ppp b / tensorflow / core / profiler / internal / tfprof_node_show . h <nl> class ShowMultiNode { <nl> <nl> class CodeNode : public ShowMultiNode { <nl> public : <nl> - explicit CodeNode ( TFMultiGraphNode * node , const CodeDef : : Trace * trace ) <nl> - : ShowMultiNode ( node ) , trace ( trace ) { } <nl> + CodeNode ( TFMultiGraphNode * node , const CodeDef : : Trace * trace , <nl> + const string & suffix ) <nl> + : ShowMultiNode ( node ) , trace_ ( trace ) , suffix_ ( suffix ) { } <nl> ~ CodeNode ( ) override { } <nl> <nl> - CodeNode * AddChildren ( const string & name , const CodeDef : : Trace * trace ) { <nl> + CodeNode * AddChildren ( const string & name , const CodeDef : : Trace * trace , <nl> + const string suffix ) { <nl> auto it = children_ . find ( name ) ; <nl> if ( it ! = children_ . end ( ) ) { <nl> return it - > second . get ( ) ; <nl> class CodeNode : public ShowMultiNode { <nl> graph_children_ . push_back ( <nl> std : : unique_ptr < TFMultiGraphNode > ( new TFMultiGraphNode ( name ) ) ) ; <nl> auto child = & children_ [ name ] ; <nl> - child - > reset ( new CodeNode ( graph_children_ . back ( ) . get ( ) , trace ) ) ; <nl> + child - > reset ( new CodeNode ( graph_children_ . back ( ) . get ( ) , trace , suffix ) ) ; <nl> children . push_back ( child - > get ( ) ) ; <nl> return child - > get ( ) ; <nl> } <nl> <nl> - const CodeDef : : Trace * trace ; <nl> + bool has_trace ( ) const { return trace_ ! = nullptr ; } <nl> + const int32 lineno ( ) const { return trace_ - > lineno ( ) ; } <nl> + string file ( ) const { return trace_ - > file ( ) + suffix_ ; } <nl> + string function ( ) const { return trace_ - > function ( ) + suffix_ ; } <nl> + int32 func_start_line ( ) const { return trace_ - > func_start_line ( ) ; } <nl> + <nl> std : : vector < CodeNode * > children ; <nl> std : : vector < CodeNode * > show_children ; <nl> <nl> private : <nl> + const CodeDef : : Trace * trace_ ; <nl> + string suffix_ ; <nl> std : : vector < std : : unique_ptr < TFMultiGraphNode > > graph_children_ ; <nl> std : : map < string , std : : unique_ptr < CodeNode > > children_ ; <nl> } ; <nl> mmm a / tensorflow / core / profiler / internal / tfprof_stats . cc <nl> ppp b / tensorflow / core / profiler / internal / tfprof_stats . cc <nl> void TFStats : : AddRunMeta ( int64 step , std : : unique_ptr < RunMetadata > run_meta ) { <nl> fprintf ( stderr , " Invalid RunMetadata for step % lld \ n " , step ) ; <nl> return ; <nl> } <nl> - if ( steps_ . find ( step ) ! = steps_ . end ( ) ) { <nl> - fprintf ( stderr , " The same step % lld has been added before . \ n " , step ) ; <nl> - return ; <nl> + if ( steps_ . find ( step ) = = steps_ . end ( ) ) { <nl> + steps_ . insert ( step ) ; <nl> } <nl> steps_ . insert ( step ) ; <nl> <nl> mmm a / tensorflow / python / profiler / internal / run_metadata_test . py <nl> ppp b / tensorflow / python / profiler / internal / run_metadata_test . py <nl> <nl> builder = option_builder . ProfileOptionBuilder <nl> <nl> <nl> - def _extract_node ( run_meta , node_names ) : <nl> - if not isinstance ( node_names , list ) : <nl> - node_names = [ node_names ] <nl> + def _extract_node ( run_meta , node_name ) : <nl> ret = defaultdict ( list ) <nl> for dev_stat in run_meta . step_stats . dev_stats : <nl> - dev = dev_stat . device <nl> + dev = dev_stat . device . lower ( ) <nl> + if dev . find ( ' cpu : ' ) > 0 : <nl> + dev = dev [ dev . find ( ' cpu : ' ) : ] <nl> + elif dev . find ( ' gpu : ' ) > 0 : <nl> + dev = dev [ dev . find ( ' gpu : ' ) : ] <nl> + else : <nl> + assert False , ' Unrecognized device name : % s ' % dev <nl> + <nl> for node_stat in dev_stat . node_stats : <nl> - if node_stat . node_name in node_names : <nl> + nname = node_stat . node_name <nl> + if nname . find ( ' : ' ) > 0 : <nl> + nname = nname [ : nname . find ( ' : ' ) ] <nl> + if nname = = node_name : <nl> ret [ dev ] . append ( node_stat ) <nl> return ret <nl> <nl> def _run_model ( ) : <nl> opts = builder . time_and_memory ( ) <nl> opts [ ' min_micros ' ] = 0 <nl> opts [ ' min_bytes ' ] = 0 <nl> + opts [ ' output ' ] = ' none ' <nl> _ = sess . run ( y , <nl> options = config_pb2 . RunOptions ( <nl> trace_level = config_pb2 . RunOptions . FULL_TRACE ) , <nl> def _run_loop_model ( ) : <nl> trace_level = config_pb2 . RunOptions . FULL_TRACE ) , <nl> run_metadata = run_meta ) <nl> <nl> + opts = builder . time_and_memory ( ) <nl> + opts [ ' output ' ] = ' none ' <nl> + <nl> tfprof_node = model_analyzer . profile ( <nl> - sess . graph , run_meta , <nl> - options = builder . time_and_memory ( ) ) <nl> + sess . graph , run_meta , options = opts ) <nl> return tfprof_node , run_meta <nl> <nl> <nl> def testGPU ( self ) : <nl> self . assertEqual ( tfprof_node . children [ 0 ] . name , ' MatMul ' ) <nl> self . assertGreater ( tfprof_node . children [ 0 ] . exec_micros , 10 ) <nl> <nl> - ret = _extract_node ( run_meta , [ ' MatMul ' , ' MatMul : MatMul ' ] ) <nl> - self . assertEqual ( len ( ret ) , 3 ) <nl> - self . assertTrue ( ' / job : localhost / replica : 0 / task : 0 ' + gpu_dev in ret ) <nl> - del ret [ ' / job : localhost / replica : 0 / task : 0 ' + gpu_dev ] <nl> - <nl> - has_all_stream = False <nl> - for k , _ in six . iteritems ( ret ) : <nl> - self . assertTrue ( gpu_dev + ' / stream ' in k ) <nl> - if gpu_dev + ' / stream : all ' in k : <nl> - has_all_stream = True <nl> - self . assertTrue ( has_all_stream ) <nl> + ret = _extract_node ( run_meta , ' MatMul ' ) <nl> + self . assertEqual ( len ( ret [ ' gpu : 0 ' ] ) , 1 ) <nl> + self . assertEqual ( len ( ret [ ' gpu : 0 / stream : all ' ] ) , 1 , ' % s ' % run_meta ) <nl> <nl> def testCPU ( self ) : <nl> ops . reset_default_graph ( ) <nl> def testCPU ( self ) : <nl> self . assertGreater ( tfprof_node . children [ 0 ] . exec_micros , 0 ) <nl> <nl> ret = _extract_node ( run_meta , ' MatMul ' ) <nl> - self . assertEqual ( len ( ret ) , 1 ) <nl> - self . assertTrue ( ' / job : localhost / replica : 0 / task : 0 / cpu : 0 ' in ret ) <nl> + self . assertEqual ( len ( ret [ ' cpu : 0 ' ] ) , 1 ) <nl> <nl> ret = _extract_node ( run_meta , ' MatMul : MatMul ' ) <nl> self . assertEqual ( len ( ret ) , 0 ) <nl> def testLoopCPU ( self ) : <nl> # The while - loop caused a node to appear 4 times in scheduling . <nl> ret = _extract_node ( run_meta , <nl> ' rnn / while / rnn / basic_rnn_cell / basic_rnn_cell / MatMul ' ) <nl> - self . assertEqual ( len ( ret [ ' / job : localhost / replica : 0 / task : 0 / cpu : 0 ' ] ) , 4 ) <nl> + self . assertEqual ( len ( ret [ ' cpu : 0 ' ] ) , 4 ) <nl> <nl> total_cpu_execs = 0 <nl> - for node in ret [ ' / job : localhost / replica : 0 / task : 0 / cpu : 0 ' ] : <nl> + for node in ret [ ' cpu : 0 ' ] : <nl> total_cpu_execs + = node . op_end_rel_micros <nl> <nl> mm_node = lib . SearchTFProfNode ( <nl> def testLoopCPU ( self ) : <nl> self . assertEqual ( mm_node . cpu_exec_micros , total_cpu_execs ) <nl> self . assertEqual ( mm_node . exec_micros , total_cpu_execs ) <nl> <nl> + def testGradientGraph ( self ) : <nl> + # Note : Please don ' t just adjust the test to make it pass . <nl> + # The code view logic depends on it . <nl> + ops . reset_default_graph ( ) <nl> + _ , _ = _run_loop_model ( ) <nl> + graph = ops . get_default_graph ( ) <nl> + forward_op = set ( ) <nl> + backward_op = set ( ) <nl> + back_to_forward = dict ( ) <nl> + for op in graph . get_operations ( ) : <nl> + if op . name . find ( ' gradients / ' ) > 0 and op . name . find ( ' _grad / ' ) > 0 : <nl> + backward_op . add ( op . name ) <nl> + idx1 = op . name . find ( ' gradients / ' ) + 10 <nl> + idx2 = op . name . find ( ' _grad / ' ) <nl> + back_to_forward [ op . name ] = op . name [ idx1 : idx2 ] <nl> + else : <nl> + forward_op . add ( op . name ) <nl> + <nl> + for _ , f in six . iteritems ( back_to_forward ) : <nl> + self . assertTrue ( f in forward_op ) <nl> + <nl> # pylint : disable = pointless - string - statement <nl> " " " <nl> - TODO ( xpan ) : This test is flaky because RunMetadata returned from TensorFlow <nl> - is random . Still being investigated . <nl> + # TODO ( xpan ) : This test is flaky because RunMetadata returned from TensorFlow <nl> + # is random . Still being investigated . <nl> def testLoopGPU ( self ) : <nl> if not test . is_gpu_available ( ) : <nl> return <nl> def testLoopGPU ( self ) : <nl> # The while - loop caused a node to appear 4 times in scheduling . <nl> ret = _extract_node ( run_meta , <nl> ' rnn / while / rnn / basic_rnn_cell / basic_rnn_cell / MatMul ' ) <nl> - self . assertEqual ( len ( ret [ ' / job : localhost / replica : 0 / task : 0 / device : GPU : 0 ' ] ) , 4 ) <nl> + self . assertEqual ( len ( ret [ ' gpu : 0 ' ] ) , 4 , ' % s ' % run_meta ) <nl> <nl> total_cpu_execs = 0 <nl> - for node in ret [ ' / job : localhost / replica : 0 / task : 0 / device : GPU : 0 ' ] : <nl> + for node in ret [ ' gpu : 0 ' ] : <nl> total_cpu_execs + = node . op_end_rel_micros <nl> <nl> - ret = _extract_node ( <nl> - run_meta , <nl> - ' rnn / while / rnn / basic_rnn_cell / basic_rnn_cell / MatMul : MatMul ' ) <nl> - self . assertGreaterEqual ( len ( ret [ ' / device : GPU : 0 / stream : all ' ] ) , 4 ) <nl> + self . assertGreaterEqual ( len ( ret [ ' gpu : 0 / stream : all ' ] ) , 4 , ' % s ' % run_meta ) <nl> <nl> total_accelerator_execs = 0 <nl> - for node in ret [ ' / device : GPU : 0 / stream : all ' ] : <nl> + for node in ret [ ' gpu : 0 / stream : all ' ] : <nl> total_accelerator_execs + = node . op_end_rel_micros <nl> - <nl> - mm_node = lib . SearchTFProfNode ( <nl> - tfprof_node , <nl> - ' rnn / while / rnn / basic_rnn_cell / basic_rnn_cell / MatMul ' ) <nl> - <nl> - self . assertEqual ( mm_node . run_count , 4 ) <nl> - self . assertEqual ( mm_node . accelerator_exec_micros , total_accelerator_execs ) <nl> - self . assertEqual ( mm_node . cpu_exec_micros , total_cpu_execs ) <nl> - self . assertEqual ( mm_node . exec_micros , <nl> - total_cpu_execs + total_accelerator_execs ) <nl> " " " <nl> <nl> <nl> mmm a / tensorflow / python / profiler / model_analyzer_test . py <nl> ppp b / tensorflow / python / profiler / model_analyzer_test . py <nl> <nl> import io <nl> import os <nl> import random <nl> + import re <nl> <nl> from tensorflow . core . profiler import profile_pb2 <nl> from tensorflow . core . protobuf import config_pb2 <nl> def testDumpToFile ( self ) : <nl> ' ScalarW ( 1 , 1 / 1 params ) \ n ' , <nl> f . read ( ) ) <nl> <nl> + def testSelectEverthingDetail ( self ) : <nl> + ops . reset_default_graph ( ) <nl> + dev = ' / gpu : 0 ' if test . is_gpu_available ( ) else ' / cpu : 0 ' <nl> + outfile = os . path . join ( test . get_temp_dir ( ) , ' dump ' ) <nl> + opts = ( builder ( builder . trainable_variables_parameter ( ) ) <nl> + . with_file_output ( outfile ) <nl> + . with_accounted_types ( [ ' . * ' ] ) <nl> + . select ( [ ' micros ' , ' bytes ' , ' params ' , ' float_ops ' , ' occurrence ' , <nl> + ' device ' , ' op_types ' , ' input_shapes ' ] ) . build ( ) ) <nl> + <nl> + config = config_pb2 . ConfigProto ( ) <nl> + with session . Session ( config = config ) as sess , ops . device ( dev ) : <nl> + x = lib . BuildSmallModel ( ) <nl> + <nl> + sess . run ( variables . global_variables_initializer ( ) ) <nl> + run_meta = config_pb2 . RunMetadata ( ) <nl> + _ = sess . run ( x , <nl> + options = config_pb2 . RunOptions ( <nl> + trace_level = config_pb2 . RunOptions . FULL_TRACE ) , <nl> + run_metadata = run_meta ) <nl> + <nl> + model_analyzer . profile ( <nl> + sess . graph , run_meta , options = opts ) <nl> + <nl> + with gfile . Open ( outfile , ' r ' ) as f : <nl> + # pylint : disable = line - too - long <nl> + outputs = f . read ( ) . split ( ' \ n ' ) <nl> + <nl> + self . assertEqual ( outputs [ 0 ] , <nl> + ' node name | # parameters | # float_ops | requested bytes | total execution time | accelerator execution time | cpu execution time | assigned devices | op types | op count ( run | defined ) | input shapes ' ) <nl> + for o in outputs [ 1 : ] : <nl> + if o . find ( ' Conv2D ' ) > 0 : <nl> + metrics = o [ o . find ( ' ( ' ) + 1 : o . find ( ' ) ' ) ] . split ( ' , ' ) <nl> + # Make sure time is profiled . <nl> + gap = 1 if test . is_gpu_available ( ) else 2 <nl> + for i in range ( 3 , 6 , gap ) : <nl> + mat = re . search ( ' ( . * ) us / ( . * ) us ' , metrics [ i ] ) <nl> + self . assertGreater ( float ( mat . group ( 1 ) ) , 0 . 0 ) <nl> + self . assertGreater ( float ( mat . group ( 2 ) ) , 0 . 0 ) <nl> + # Make sure device is profiled . <nl> + if test . is_gpu_available ( ) : <nl> + self . assertTrue ( metrics [ 6 ] . find ( ' gpu ' ) > 0 ) <nl> + self . assertFalse ( metrics [ 6 ] . find ( ' cpu ' ) > 0 ) <nl> + else : <nl> + self . assertFalse ( metrics [ 6 ] . find ( ' gpu ' ) > 0 ) <nl> + self . assertTrue ( metrics [ 6 ] . find ( ' cpu ' ) > 0 ) <nl> + # Make sure float_ops is profiled . <nl> + mat = re . search ( ' ( . * ) k / ( . * ) k flops ' , metrics [ 1 ] . strip ( ) ) <nl> + self . assertGreater ( float ( mat . group ( 1 ) ) , 0 . 0 ) <nl> + self . assertGreater ( float ( mat . group ( 2 ) ) , 0 . 0 ) <nl> + # Make sure op_count is profiled . <nl> + self . assertEqual ( metrics [ 8 ] . strip ( ) , ' 1 / 1 | 1 / 1 ' ) <nl> + # Make sure input_shapes is profiled . <nl> + self . assertEqual ( metrics [ 9 ] . strip ( ) , ' 0 : 2x6x6x3 | 1 : 3x3x3x6 ' ) <nl> + <nl> + if o . find ( ' DW ( 3x3x3x6 ' ) > 0 : <nl> + metrics = o [ o . find ( ' ( ' ) + 1 : o . find ( ' ) ' ) ] . split ( ' , ' ) <nl> + mat = re . search ( ' ( . * ) / ( . * ) params ' , metrics [ 1 ] . strip ( ) ) <nl> + self . assertGreater ( float ( mat . group ( 1 ) ) , 0 . 0 ) <nl> + self . assertGreater ( float ( mat . group ( 2 ) ) , 0 . 0 ) <nl> + # pylint : enable = line - too - long <nl> + <nl> def testSelectEverything ( self ) : <nl> ops . reset_default_graph ( ) <nl> outfile = os . path . join ( test . get_temp_dir ( ) , ' dump ' ) <nl> def testComplexCodeView ( self ) : <nl> with gfile . Open ( outfile , ' r ' ) as f : <nl> lines = f . read ( ) . split ( ' \ n ' ) <nl> result = ' \ n ' . join ( [ l [ : min ( len ( l ) , 80 ) ] for l in lines ] ) <nl> - self . assertEqual ( ' node name | # parameters | # float_ops \ n_TFProfRoot ( - - / 2 . 84k params , - - / 91 . 04k flops ) \ n model_analyzer_testlib . py : 58 : BuildFullModel : seq . append ( array_ . . . ( 0 / 1 . 80k para \ n model_analyzer_testlib . py : 35 : BuildSmallModel : image = array_ops . . . ( 0 / 0 param \ n model_analyzer_testlib . py : 39 : BuildSmallModel : initializer = init_ . . . ( 0 / 4 param \ n model_analyzer_testlib . py : 43 : BuildSmallModel : initializer = init_ . . . ( 0 / 648 par \ n model_analyzer_testlib . py : 44 : BuildSmallModel : x = nn_ops . conv2d . . . ( 0 / 0 param \ n model_analyzer_testlib . py : 48 : BuildSmallModel : initializer = init_ . . . ( 0 / 1 . 15k p \ n model_analyzer_testlib . py : 49 : BuildSmallModel : x = nn_ops . conv2d . . . ( 0 / 0 param \ n model_analyzer_testlib . py : 62 : BuildFullModel : cell , array_ops . c . . . ( 0 / 1 . 04k para \ n model_analyzer_testlib . py : 64 : BuildFullModel : target = array_op . . . ( 0 / 0 params , \ n model_analyzer_testlib . py : 65 : BuildFullModel : loss = nn_ops . l2_ . . . ( 0 / 0 params , \ n model_analyzer_testlib . py : 67 : BuildFullModel : return sgd_op . min . . . ( 0 / 0 params , \ n ' , <nl> + self . assertEqual ( ' node name | # parameters | # float_ops \ n_TFProfRoot ( - - / 2 . 84k params , - - / 91 . 04k flops ) \ n model_analyzer_testlib . py : 58 : BuildFullModel : seq . append ( array_ . . . ( 0 / 1 . 80k para \ n model_analyzer_testlib . py : 35 : BuildSmallModel : image = array_ops . . . ( 0 / 0 param \ n model_analyzer_testlib . py : 39 : BuildSmallModel : initializer = init_ . . . ( 0 / 4 param \ n model_analyzer_testlib . py : 43 : BuildSmallModel : initializer = init_ . . . ( 0 / 648 par \ n model_analyzer_testlib . py : 44 : BuildSmallModel : x = nn_ops . conv2d . . . ( 0 / 0 param \ n model_analyzer_testlib . py : 48 : BuildSmallModel : initializer = init_ . . . ( 0 / 1 . 15k p \ n model_analyzer_testlib . py : 49 : BuildSmallModel : x = nn_ops . conv2d . . . ( 0 / 0 param \ n model_analyzer_testlib . py : 58 : BuildFullModel : seq . append ( array_ . . . ( gradient ) ( 0 \ n model_analyzer_testlib . py : 44 : BuildSmallModel : x = nn_ops . conv2d . . . ( gradient ) \ n model_analyzer_testlib . py : 49 : BuildSmallModel : x = nn_ops . conv2d . . . ( gradient ) \ n model_analyzer_testlib . py : 62 : BuildFullModel : cell , array_ops . c . . . ( 0 / 1 . 04k para \ n model_analyzer_testlib . py : 62 : BuildFullModel : cell , array_ops . c . . . ( gradient ) ( 0 \ n model_analyzer_testlib . py : 64 : BuildFullModel : target = array_op . . . ( 0 / 0 params , \ n model_analyzer_testlib . py : 65 : BuildFullModel : loss = nn_ops . l2_ . . . ( 0 / 0 params , \ n model_analyzer_testlib . py : 65 : BuildFullModel : loss = nn_ops . l2_ . . . ( gradient ) ( 0 \ n model_analyzer_testlib . py : 67 : BuildFullModel : return sgd_op . min . . . ( 0 / 0 params , \ n ' , <nl> result ) <nl> <nl> self . assertLess ( 0 , tfprof_node . total_exec_micros ) <nl> self . assertEqual ( 2844 , tfprof_node . total_parameters ) <nl> self . assertEqual ( 91040 , tfprof_node . total_float_ops ) <nl> - self . assertEqual ( 5 , len ( tfprof_node . children ) ) <nl> + self . assertEqual ( 8 , len ( tfprof_node . children ) ) <nl> self . assertEqual ( ' _TFProfRoot ' , tfprof_node . name ) <nl> self . assertEqual ( <nl> ' model_analyzer_testlib . py : 58 : BuildFullModel : seq . append ( array_ . . . ' , <nl> tfprof_node . children [ 0 ] . name ) <nl> self . assertEqual ( <nl> - ' model_analyzer_testlib . py : 62 : BuildFullModel : cell , array_ops . c . . . ' , <nl> + ' model_analyzer_testlib . py : 58 : BuildFullModel : seq . append ( array_ . . . ( gradient ) ' , <nl> tfprof_node . children [ 1 ] . name ) <nl> self . assertEqual ( <nl> - ' model_analyzer_testlib . py : 64 : BuildFullModel : target = array_op . . . ' , <nl> + ' model_analyzer_testlib . py : 62 : BuildFullModel : cell , array_ops . c . . . ' , <nl> tfprof_node . children [ 2 ] . name ) <nl> self . assertEqual ( <nl> - ' model_analyzer_testlib . py : 65 : BuildFullModel : loss = nn_ops . l2_ . . . ' , <nl> + ' model_analyzer_testlib . py : 62 : BuildFullModel : cell , array_ops . c . . . ( gradient ) ' , <nl> tfprof_node . children [ 3 ] . name ) <nl> self . assertEqual ( <nl> - ' model_analyzer_testlib . py : 67 : BuildFullModel : return sgd_op . min . . . ' , <nl> + ' model_analyzer_testlib . py : 64 : BuildFullModel : target = array_op . . . ' , <nl> tfprof_node . children [ 4 ] . name ) <nl> + self . assertEqual ( <nl> + ' model_analyzer_testlib . py : 65 : BuildFullModel : loss = nn_ops . l2_ . . . ' , <nl> + tfprof_node . children [ 5 ] . name ) <nl> + self . assertEqual ( <nl> + ' model_analyzer_testlib . py : 65 : BuildFullModel : loss = nn_ops . l2_ . . . ( gradient ) ' , <nl> + tfprof_node . children [ 6 ] . name ) <nl> + self . assertEqual ( <nl> + ' model_analyzer_testlib . py : 67 : BuildFullModel : return sgd_op . min . . . ' , <nl> + tfprof_node . children [ 7 ] . name ) <nl> # pylint : enable = line - too - long <nl> <nl> def testCodeViewLeafGraphNode ( self ) : <nl> mmm a / tensorflow / python / profiler / option_builder . py <nl> ppp b / tensorflow / python / profiler / option_builder . py <nl> def with_pprof_output ( self , pprof_file ) : <nl> " " " Generate a pprof profile gzip file . <nl> <nl> To use the pprof file : <nl> - pprof - png - - nodecount = 20 - - sample_index = 1 < pprof_file > <nl> + pprof - png - - nodecount = 100 - - sample_index = 1 < pprof_file > <nl> <nl> Args : <nl> pprof_file : filename for output , usually suffixed with . pb . gz . <nl> mmm a / tensorflow / python / profiler / profiler_test . py <nl> ppp b / tensorflow / python / profiler / profiler_test . py <nl> def testMultiStepProfile ( self ) : <nl> checker = advice_pb . checkers [ ' ExpensiveOperationChecker ' ] <nl> self . assertGreater ( len ( checker . reports ) , 0 ) <nl> <nl> + def testMultipleProfilePerStep ( self ) : <nl> + ops . reset_default_graph ( ) <nl> + opts = ( builder ( builder . trainable_variables_parameter ( ) ) <nl> + . with_empty_output ( ) <nl> + . with_accounted_types ( [ ' . * ' ] ) <nl> + . select ( [ ' micros ' , ' bytes ' , ' peak_bytes ' , <nl> + ' residual_bytes ' , ' output_bytes ' ] ) . build ( ) ) <nl> + <nl> + r = lib . BuildSmallModel ( ) <nl> + sess = session . Session ( ) <nl> + profiler = model_analyzer . Profiler ( sess . graph ) <nl> + <nl> + init_var_run_meta = config_pb2 . RunMetadata ( ) <nl> + sess . run ( variables . global_variables_initializer ( ) , <nl> + options = config_pb2 . RunOptions ( <nl> + trace_level = config_pb2 . RunOptions . FULL_TRACE ) , <nl> + run_metadata = init_var_run_meta ) <nl> + <nl> + train_run_meta = config_pb2 . RunMetadata ( ) <nl> + sess . run ( r , <nl> + options = config_pb2 . RunOptions ( <nl> + trace_level = config_pb2 . RunOptions . FULL_TRACE ) , <nl> + run_metadata = train_run_meta ) <nl> + <nl> + profiler . add_step ( 0 , train_run_meta ) <nl> + ret1 = profiler . profile_name_scope ( opts ) <nl> + n1 = lib . SearchTFProfNode ( <nl> + ret1 , ' DW / Initializer / random_normal / RandomStandardNormal ' ) <nl> + # Without the var initialization run_meta , it doesn ' t have the <nl> + # information of var_initialization . <nl> + self . assertEqual ( n1 . exec_micros , 0 ) <nl> + self . assertEqual ( n1 . requested_bytes , 0 ) <nl> + self . assertEqual ( n1 . peak_bytes , 0 ) <nl> + self . assertEqual ( n1 . residual_bytes , 0 ) <nl> + <nl> + profiler . add_step ( 0 , init_var_run_meta ) <nl> + ret2 = profiler . profile_name_scope ( opts ) <nl> + n2 = lib . SearchTFProfNode ( <nl> + ret2 , ' DW / Initializer / random_normal / RandomStandardNormal ' ) <nl> + # After adding the var initialization run_meta . <nl> + self . assertGreater ( n2 . exec_micros , 0 ) <nl> + self . assertGreater ( n2 . requested_bytes , 0 ) <nl> + self . assertGreater ( n2 . peak_bytes , 0 ) <nl> + self . assertGreater ( n2 . residual_bytes , 0 ) <nl> + <nl> <nl> if __name__ = = ' __main__ ' : <nl> test . main ( ) <nl> | 1 . Adjust code view pprof image to better visualize backprop . | tensorflow/tensorflow | 93b21f7b1fa725299f86058436f034b15350de52 | 2017-08-16T00:52:23Z |
mmm a / lib / cximage - 6 . 0 / raw / libdcr . c <nl> ppp b / lib / cximage - 6 . 0 / raw / libdcr . c <nl> dcr_getbits ( p , n ) where 0 < = n < = 25 returns an n - bit integer <nl> * / <nl> unsigned DCR_CLASS dcr_getbits ( DCRAW * p , int nbits ) <nl> { <nl> - static unsigned bitbuf = 0 ; <nl> - static int vbits = 0 , reset = 0 ; <nl> unsigned c ; <nl> <nl> if ( nbits = = - 1 ) <nl> - return bitbuf = vbits = reset = 0 ; <nl> - if ( nbits = = 0 | | reset ) return 0 ; <nl> - while ( vbits < nbits ) { <nl> + return p - > getbits_bitbuf = p - > getbits_vbits = p - > getbits_reset = 0 ; <nl> + if ( nbits = = 0 | | p - > getbits_reset ) return 0 ; <nl> + while ( p - > getbits_vbits < nbits ) { <nl> if ( ( c = dcr_fgetc ( p - > obj_ ) ) = = EOF ) dcr_derror ( p ) ; <nl> - if ( ( reset = p - > zero_after_ff & & c = = 0xff & & dcr_fgetc ( p - > obj_ ) ) ) return 0 ; <nl> - bitbuf = ( bitbuf < < 8 ) + ( uchar ) c ; <nl> - vbits + = 8 ; <nl> + if ( ( p - > getbits_reset = p - > zero_after_ff & & c = = 0xff & & dcr_fgetc ( p - > obj_ ) ) ) return 0 ; <nl> + p - > getbits_bitbuf = ( p - > getbits_bitbuf < < 8 ) + ( uchar ) c ; <nl> + p - > getbits_vbits + = 8 ; <nl> } <nl> - vbits - = nbits ; <nl> - return bitbuf < < ( 32 - nbits - vbits ) > > ( 32 - nbits ) ; <nl> + p - > getbits_vbits - = nbits ; <nl> + return p - > getbits_bitbuf < < ( 32 - nbits - p - > getbits_vbits ) > > ( 32 - nbits ) ; <nl> } <nl> <nl> void DCR_CLASS dcr_init_decoder ( DCRAW * p ) <nl> void DCR_CLASS dcr_init_decoder ( DCRAW * p ) <nl> uchar * DCR_CLASS dcr_make_decoder ( DCRAW * p , const uchar * source , int level ) <nl> { <nl> struct dcr_decode * cur ; <nl> - static int leaf ; <nl> int i , next ; <nl> <nl> - if ( level = = 0 ) leaf = 0 ; <nl> + if ( level = = 0 ) p - > make_decoder_leaf = 0 ; <nl> cur = p - > free_decode + + ; <nl> if ( p - > free_decode > p - > first_decode + 2048 ) { <nl> fprintf ( stderr , _ ( " % s : decoder table overflow \ n " ) , p - > ifname ) ; <nl> longjmp ( p - > failure , 2 ) ; <nl> } <nl> - for ( i = next = 0 ; i < = leaf & & next < 16 ; ) <nl> + for ( i = next = 0 ; i < = p - > make_decoder_leaf & & next < 16 ; ) <nl> i + = source [ next + + ] ; <nl> - if ( i > leaf ) { <nl> + if ( i > p - > make_decoder_leaf ) { <nl> if ( level < next ) { <nl> cur - > branch [ 0 ] = p - > free_decode ; <nl> dcr_make_decoder ( p , source , level + 1 ) ; <nl> cur - > branch [ 1 ] = p - > free_decode ; <nl> dcr_make_decoder ( p , source , level + 1 ) ; <nl> } else <nl> - cur - > leaf = source [ 16 + leaf + + ] ; <nl> + cur - > leaf = source [ 16 + p - > make_decoder_leaf + + ] ; <nl> } <nl> - return ( uchar * ) source + 16 + leaf ; <nl> + return ( uchar * ) source + 16 + p - > make_decoder_leaf ; <nl> } <nl> <nl> void DCR_CLASS dcr_crw_init_tables ( DCRAW * p , unsigned table ) <nl> void DCR_CLASS dcr_phase_one_load_raw ( DCRAW * p ) <nl> <nl> unsigned DCR_CLASS dcr_ph1_bits ( DCRAW * p , int nbits ) <nl> { <nl> - static UINT64 bitbuf = 0 ; <nl> - static int vbits = 0 ; <nl> - <nl> if ( nbits = = - 1 ) <nl> - return ( unsigned int ) ( bitbuf = vbits = 0 ) ; <nl> + return ( unsigned int ) ( p - > ph1_bits_bitbuf = p - > ph1_bits_vbits = 0 ) ; <nl> if ( nbits = = 0 ) return 0 ; <nl> - if ( ( vbits - = nbits ) < 0 ) { <nl> - bitbuf = bitbuf < < 32 | dcr_get4 ( p ) ; <nl> - vbits + = 32 ; <nl> + if ( ( p - > ph1_bits_vbits - = nbits ) < 0 ) { <nl> + p - > ph1_bits_bitbuf = p - > ph1_bits_bitbuf < < 32 | dcr_get4 ( p ) ; <nl> + p - > ph1_bits_vbits + = 32 ; <nl> } <nl> - return ( unsigned int ) ( bitbuf < < ( 64 - nbits - vbits ) > > ( 64 - nbits ) ) ; <nl> + return ( unsigned int ) ( p - > ph1_bits_bitbuf < < ( 64 - nbits - p - > ph1_bits_vbits ) > > ( 64 - nbits ) ) ; <nl> } <nl> <nl> void DCR_CLASS dcr_phase_one_load_raw_c ( DCRAW * p ) <nl> void DCR_CLASS nokia_load_raw ( DCRAW * p ) <nl> <nl> unsigned DCR_CLASS dcr_pana_bits ( DCRAW * p , int nbits ) <nl> { <nl> - static uchar buf [ 0x4000 ] ; <nl> - static int vbits ; <nl> int byte ; <nl> <nl> - if ( ! nbits ) return vbits = 0 ; <nl> - if ( ! vbits ) { <nl> - dcr_fread ( p - > obj_ , buf + p - > load_flags , 1 , 0x4000 - p - > load_flags ) ; <nl> - dcr_fread ( p - > obj_ , buf , 1 , p - > load_flags ) ; <nl> + if ( ! nbits ) return p - > pana_bits_vbits = 0 ; <nl> + if ( ! p - > pana_bits_vbits ) { <nl> + dcr_fread ( p - > obj_ , p - > pana_bits_buf + p - > load_flags , 1 , 0x4000 - p - > load_flags ) ; <nl> + dcr_fread ( p - > obj_ , p - > pana_bits_buf , 1 , p - > load_flags ) ; <nl> } <nl> - vbits = ( vbits - nbits ) & 0x1ffff ; <nl> - byte = vbits > > 3 ^ 0x3ff0 ; <nl> - return ( buf [ byte ] | buf [ byte + 1 ] < < 8 ) > > ( vbits & 7 ) & ~ ( - 1 < < nbits ) ; <nl> + p - > pana_bits_vbits = ( p - > pana_bits_vbits - nbits ) & 0x1ffff ; <nl> + byte = p - > pana_bits_vbits > > 3 ^ 0x3ff0 ; <nl> + return ( p - > pana_bits_buf [ byte ] | p - > pana_bits_buf [ byte + 1 ] < < 8 ) > > ( p - > pana_bits_vbits & 7 ) & ~ ( - 1 < < nbits ) ; <nl> } <nl> <nl> void DCR_CLASS dcr_panasonic_load_raw ( DCRAW * p ) <nl> const int * DCR_CLASS dcr_make_decoder_int ( DCRAW * p , const int * source , int lev <nl> int DCR_CLASS dcr_radc_token ( DCRAW * p , int tree ) <nl> { <nl> int t ; <nl> - static struct dcr_decode * dstart [ 18 ] , * dindex ; <nl> static const int * s , source [ ] = { <nl> 1 , 1 , 2 , 3 , 3 , 4 , 4 , 2 , 5 , 7 , 6 , 5 , 7 , 6 , 7 , 8 , <nl> 1 , 0 , 2 , 1 , 3 , 3 , 4 , 4 , 5 , 2 , 6 , 7 , 7 , 6 , 8 , 5 , 8 , 8 , <nl> int DCR_CLASS dcr_radc_token ( DCRAW * p , int tree ) <nl> <nl> if ( p - > free_decode = = p - > first_decode ) <nl> for ( s = source , t = 0 ; t < 18 ; t + + ) { <nl> - dstart [ t ] = p - > free_decode ; <nl> + p - > radc_token_dstart [ t ] = p - > free_decode ; <nl> s = dcr_make_decoder_int ( p , s , 0 ) ; <nl> } <nl> if ( tree = = 18 ) { <nl> int DCR_CLASS dcr_radc_token ( DCRAW * p , int tree ) <nl> else <nl> return ( dcr_getbits ( p , 5 ) < < 3 ) + 4 ; / * DC40 , Fotoman Pixtura * / <nl> } <nl> - for ( dindex = dstart [ tree ] ; dindex - > branch [ 0 ] ; ) <nl> - dindex = dindex - > branch [ dcr_getbits ( p , 1 ) ] ; <nl> - return dindex - > leaf ; <nl> + for ( p - > radc_token_dindex = p - > radc_token_dstart [ tree ] ; p - > radc_token_dindex - > branch [ 0 ] ; ) <nl> + p - > radc_token_dindex = p - > radc_token_dindex - > branch [ dcr_getbits ( p , 1 ) ] ; <nl> + return p - > radc_token_dindex - > leaf ; <nl> } <nl> <nl> # define FORYX for ( y = 1 ; y < 3 ; y + + ) for ( x = col + 1 ; x > = col ; x - - ) <nl> void DCR_CLASS dcr_kodak_jpeg_load_raw ( DCRAW * p ) { } <nl> METHODDEF ( boolean ) <nl> fill_input_buffer ( j_decompress_ptr cinfo ) <nl> { <nl> - static uchar jpeg_buffer [ 4096 ] ; <nl> + static uchar jpeg_buffer [ 4096 ] ; / / NOTE : This static not used as NO_JPEG is defined <nl> size_t nbytes ; <nl> <nl> / / nbytes = dcr_fread ( p - > obj_ , jpeg_buffer , 1 , 4096 ) ; <nl> void DCR_CLASS dcr_kodak_thumb_load_raw ( DCRAW * p ) <nl> p - > maximum = ( 1 < < ( p - > thumb_misc & 31 ) ) - 1 ; <nl> } <nl> <nl> - void DCR_CLASS dcr_sony_decrypt ( unsigned * data , int len , int start , int key ) <nl> + void DCR_CLASS dcr_sony_decrypt ( DCRAW * p , unsigned * data , int len , int start , int key ) <nl> { <nl> - static unsigned pad [ 128 ] , p ; <nl> - <nl> if ( start ) { <nl> - for ( p = 0 ; p < 4 ; p + + ) <nl> - pad [ p ] = key = key * 48828125 + 1 ; <nl> - pad [ 3 ] = pad [ 3 ] < < 1 | ( pad [ 0 ] ^ pad [ 2 ] ) > > 31 ; <nl> - for ( p = 4 ; p < 127 ; p + + ) <nl> - pad [ p ] = ( pad [ p - 4 ] ^ pad [ p - 2 ] ) < < 1 | ( pad [ p - 3 ] ^ pad [ p - 1 ] ) > > 31 ; <nl> - for ( p = 0 ; p < 127 ; p + + ) <nl> - pad [ p ] = htonl ( pad [ p ] ) ; <nl> + for ( p - > sony_decrypt_p = 0 ; p - > sony_decrypt_p < 4 ; p - > sony_decrypt_p + + ) <nl> + p - > sony_decrypt_pad [ p - > sony_decrypt_p ] = key = key * 48828125 + 1 ; <nl> + p - > sony_decrypt_pad [ 3 ] = p - > sony_decrypt_pad [ 3 ] < < 1 | ( p - > sony_decrypt_pad [ 0 ] ^ p - > sony_decrypt_pad [ 2 ] ) > > 31 ; <nl> + for ( p - > sony_decrypt_p = 4 ; p - > sony_decrypt_p < 127 ; p - > sony_decrypt_p + + ) <nl> + p - > sony_decrypt_pad [ p - > sony_decrypt_p ] = ( p - > sony_decrypt_pad [ p - > sony_decrypt_p - 4 ] ^ p - > sony_decrypt_pad [ p - > sony_decrypt_p - 2 ] ) < < 1 | ( p - > sony_decrypt_pad [ p - > sony_decrypt_p - 3 ] ^ p - > sony_decrypt_pad [ p - > sony_decrypt_p - 1 ] ) > > 31 ; <nl> + for ( p - > sony_decrypt_p = 0 ; p - > sony_decrypt_p < 127 ; p - > sony_decrypt_p + + ) <nl> + p - > sony_decrypt_pad [ p - > sony_decrypt_p ] = htonl ( p - > sony_decrypt_pad [ p - > sony_decrypt_p ] ) ; <nl> } <nl> while ( len - - ) <nl> - * data + + ^ = pad [ p + + & 127 ] = pad [ ( p + 1 ) & 127 ] ^ pad [ ( p + 65 ) & 127 ] ; <nl> + * data + + ^ = p - > sony_decrypt_pad [ p - > sony_decrypt_p + + & 127 ] = p - > sony_decrypt_pad [ ( p - > sony_decrypt_p + 1 ) & 127 ] ^ p - > sony_decrypt_pad [ ( p - > sony_decrypt_p + 65 ) & 127 ] ; <nl> } <nl> <nl> void DCR_CLASS dcr_sony_load_raw ( DCRAW * p ) <nl> void DCR_CLASS dcr_sony_load_raw ( DCRAW * p ) <nl> key = dcr_get4 ( p ) ; <nl> dcr_fseek ( p - > obj_ , 164600 , SEEK_SET ) ; <nl> dcr_fread ( p - > obj_ , head , 1 , 40 ) ; <nl> - dcr_sony_decrypt ( ( unsigned int * ) head , 10 , 1 , key ) ; <nl> + dcr_sony_decrypt ( p , ( unsigned int * ) head , 10 , 1 , key ) ; <nl> for ( i = 26 ; i - - > 22 ; ) <nl> key = key < < 8 | head [ i ] ; <nl> dcr_fseek ( p - > obj_ , p - > data_offset , SEEK_SET ) ; <nl> void DCR_CLASS dcr_sony_load_raw ( DCRAW * p ) <nl> dcr_merror ( p , pixel , " sony_load_raw ( ) " ) ; <nl> for ( row = 0 ; row < p - > height ; row + + ) { <nl> if ( dcr_fread ( p - > obj_ , pixel , 2 , p - > raw_width ) < p - > raw_width ) dcr_derror ( p ) ; <nl> - dcr_sony_decrypt ( ( unsigned int * ) pixel , p - > raw_width / 2 , ! row , key ) ; <nl> + dcr_sony_decrypt ( p , ( unsigned int * ) pixel , p - > raw_width / 2 , ! row , key ) ; <nl> for ( col = 9 ; col < p - > left_margin ; col + + ) <nl> p - > black + = ntohs ( pixel [ col ] ) ; <nl> for ( col = 0 ; col < p - > width ; col + + ) <nl> void DCR_CLASS dcr_smal_v9_load_raw ( DCRAW * p ) <nl> <nl> void DCR_CLASS dcr_foveon_decoder ( DCRAW * p , unsigned size , unsigned code ) <nl> { <nl> - static unsigned huff [ 1024 ] ; <nl> struct dcr_decode * cur ; <nl> int i , len ; <nl> <nl> if ( ! code ) { <nl> for ( i = 0 ; i < ( int ) size ; i + + ) <nl> - huff [ i ] = dcr_get4 ( p ) ; <nl> + p - > foveon_decoder_huff [ i ] = dcr_get4 ( p ) ; <nl> dcr_init_decoder ( p ) ; <nl> } <nl> cur = p - > free_decode + + ; <nl> void DCR_CLASS dcr_foveon_decoder ( DCRAW * p , unsigned size , unsigned code ) <nl> } <nl> if ( code ) <nl> for ( i = 0 ; i < ( int ) size ; i + + ) <nl> - if ( huff [ i ] = = code ) { <nl> + if ( p - > foveon_decoder_huff [ i ] = = code ) { <nl> cur - > leaf = i ; <nl> return ; <nl> } <nl> int DCR_CLASS dcr_parse_tiff_ifd ( DCRAW * p , int base ) <nl> dcr_stream_obj * sobj_ ; <nl> dcr_fseek ( p - > obj_ , sony_offset , SEEK_SET ) ; <nl> dcr_fread ( p - > obj_ , buf , sony_length , 1 ) ; <nl> - dcr_sony_decrypt ( buf , sony_length / 4 , 1 , sony_key ) ; <nl> + dcr_sony_decrypt ( p , buf , sony_length / 4 , 1 , sony_key ) ; <nl> <nl> sops_ = p - > ops_ ; <nl> sobj_ = p - > obj_ ; <nl> mmm a / lib / cximage - 6 . 0 / raw / libdcr . h <nl> ppp b / lib / cximage - 6 . 0 / raw / libdcr . h <nl> struct dcr_DCRAW { <nl> void ( * thumb_load_raw ) ( DCRAW * ) ; <nl> jmp_buf failure ; <nl> char * sz_error ; <nl> + / * local statics below here * / <nl> + unsigned getbits_bitbuf ; <nl> + int getbits_vbits ; <nl> + int getbits_reset ; <nl> + <nl> + int make_decoder_leaf ; <nl> + <nl> + unsigned long long ph1_bits_bitbuf ; <nl> + int ph1_bits_vbits ; <nl> + <nl> + uchar pana_bits_buf [ 0x4000 ] ; <nl> + int pana_bits_vbits ; <nl> + <nl> + struct dcr_decode * radc_token_dstart [ 18 ] , * radc_token_dindex ; <nl> + <nl> + unsigned sony_decrypt_pad [ 128 ] , sony_decrypt_p ; <nl> + <nl> + unsigned foveon_decoder_huff [ 1024 ] ; <nl> } ; <nl> <nl> <nl> | [ libdcr ] fix thread safety | xbmc/xbmc | e39b79fc0046364d4825954383570521ff8bdcb6 | 2012-12-16T07:53:56Z |
new file mode 100644 <nl> index 000000000000 . . 130b077d28f7 <nl> mmm / dev / null <nl> ppp b / jstests / auth / repl_auth_shell_mechanism . js <nl> <nl> + / / Start a replica set with auth using SCRAM - SHA - 256 exclusively , <nl> + / / then connect via shell . <nl> + <nl> + ( function ( ) { <nl> + <nl> + const rsTest = new ReplSetTest ( { nodes : 3 } ) ; <nl> + rsTest . startSet ( { <nl> + oplogSize : 10 , <nl> + keyFile : ' jstests / libs / key1 ' , <nl> + setParameter : { authenticationMechanisms : ' SCRAM - SHA - 256 ' } <nl> + } ) ; <nl> + rsTest . initiate ( ) ; <nl> + rsTest . awaitSecondaryNodes ( ) ; <nl> + <nl> + / / Setup initial data . <nl> + const primary = rsTest . getPrimary ( ) ; <nl> + const admin = primary . getDB ( ' admin ' ) ; <nl> + admin . createUser ( { user : ' admin ' , pwd : ' password ' , roles : jsTest . adminUserRoles } ) ; <nl> + admin . auth ( ' admin ' , ' password ' ) ; <nl> + admin . logout ( ) ; <nl> + <nl> + / / Fetch and rearrange connection string . <nl> + const connString = rsTest . getURL ( ) ; <nl> + const slash = connString . indexOf ( ' / ' ) ; <nl> + const rsName = connString . substr ( 0 , slash ) ; <nl> + const rsHosts = connString . substr ( slash + 1 ) ; <nl> + <nl> + / / Connect with shell using connString . <nl> + const csShell = runMongoProgram ( ' . / mongo ' , <nl> + ' - - host ' , <nl> + connString , <nl> + ' - u ' , <nl> + ' admin ' , <nl> + ' - - password ' , <nl> + ' password ' , <nl> + ' - - authenticationDatabase ' , <nl> + ' admin ' , <nl> + ' - - eval ' , <nl> + ' ; ' ) ; <nl> + assert . eq ( csShell , 0 , ' Failed to connect using connection string ' ) ; <nl> + <nl> + / / Connect with shell explicitly specifying mechanism . <nl> + const csShellMech = runMongoProgram ( ' . / mongo ' , <nl> + ' - - host ' , <nl> + connString , <nl> + ' - u ' , <nl> + ' admin ' , <nl> + ' - - password ' , <nl> + ' password ' , <nl> + ' - - authenticationDatabase ' , <nl> + ' admin ' , <nl> + ' - - authenticationMechanism ' , <nl> + ' SCRAM - SHA - 256 ' , <nl> + ' - - eval ' , <nl> + ' ; ' ) ; <nl> + assert . eq ( csShellMech , 0 , ' Failed to connect using connection string ' ) ; <nl> + <nl> + / / Connect with shell using URI . <nl> + const uriString = ' mongodb : / / admin : password @ ' + rsHosts + ' / admin ? replicaSet = ' + rsName ; <nl> + const uriShell = runMongoProgram ( ' . / mongo ' , uriString , ' - - eval ' , ' ; ' ) ; <nl> + assert . eq ( uriShell , 0 , ' Failed to connect using URI ' ) ; <nl> + <nl> + / / Connect with shell using URI and explcit mechanism . <nl> + const uriShellMech = <nl> + runMongoProgram ( ' . / mongo ' , uriString + ' & authMechanism = SCRAM - SHA - 256 ' , ' - - eval ' , ' ; ' ) ; <nl> + assert . eq ( uriShellMech , 0 , ' Failed to connect using URI ' ) ; <nl> + <nl> + rsTest . stopSet ( ) ; <nl> + } ) ( ) ; <nl> mmm a / src / mongo / client / dbclient_rs . cpp <nl> ppp b / src / mongo / client / dbclient_rs . cpp <nl> DBClientReplicaSet : : DBClientReplicaSet ( const string & name , <nl> _applicationName ( applicationName . toString ( ) ) , <nl> _so_timeout ( so_timeout ) , <nl> _uri ( std : : move ( uri ) ) { <nl> - if ( uri . isValid ( ) ) { <nl> + if ( _uri . isValid ( ) ) { <nl> _rsm = ReplicaSetMonitor : : createIfNeeded ( _uri ) ; <nl> } else { <nl> _rsm = ReplicaSetMonitor : : createIfNeeded ( name , <nl> mmm a / src / mongo / client / mongo_uri_connect . cpp <nl> ppp b / src / mongo / client / mongo_uri_connect . cpp <nl> DBClientBase * MongoURI : : connect ( StringData applicationName , <nl> return nullptr ; <nl> } <nl> <nl> + if ( ! getSetName ( ) . empty ( ) ) { <nl> + / / When performing initial topology discovery , don ' t bother authenticating <nl> + / / since we will be immediately restarting our connect loop to a single node . <nl> + return ret . release ( ) ; <nl> + } <nl> + <nl> auto optAuthObj = <nl> _makeAuthObjFromOptions ( ret - > getMaxWireVersion ( ) , ret - > getIsMasterSaslMechanisms ( ) ) ; <nl> if ( optAuthObj ) { <nl> | SERVER - 43582 Do not auth ReplicaSet Monitor | mongodb/mongo | b5b3517afcab6efd034db87715dcefc5557b1099 | 2019-09-24T23:49:49Z |
mmm a / test / full_test . py <nl> ppp b / test / full_test . py <nl> def run_all_tests ( mode , checker , protocol , cores , slices ) : <nl> " protocol " : protocol , <nl> " cores " : cores , <nl> " slices " : slices , <nl> - " duration " : 120 , <nl> " suite - test " : suite_test } , <nl> repeat = 3 , timeout = 240 ) <nl> <nl> mmm a / test / integration / memcached_suite . py <nl> ppp b / test / integration / memcached_suite . py <nl> def test ( opts , port ) : <nl> if __name__ = = " __main__ " : <nl> op = make_option_parser ( ) <nl> op [ " suite - test " ] = StringFlag ( " - - suite - test " ) <nl> - auto_server_test_main ( test , op . parse ( sys . argv ) ) <nl> + auto_server_test_main ( test , op . parse ( sys . argv ) , timeout = 120 ) <nl> | Actually set memcached suite timeout . | rethinkdb/rethinkdb | ebc81e538fae9de18f39117b0ef61296368ec298 | 2010-12-23T02:08:36Z |
mmm a / src / share / human_interface_device . hpp <nl> ppp b / src / share / human_interface_device . hpp <nl> class human_interface_device final { <nl> grabbed_ ( false ) , <nl> disabled_ ( false ) , <nl> is_built_in_keyboard_ ( false ) , <nl> - keyboard_type_ ( krbn : : keyboard_type : : none ) , <nl> disable_built_in_keyboard_if_exists_ ( false ) { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> / / retain device_ <nl> class human_interface_device final { <nl> return r ; <nl> } <nl> <nl> - krbn : : keyboard_type get_keyboard_type ( void ) const { <nl> - krbn : : keyboard_type __block value ; <nl> - gcd_utility : : dispatch_sync_in_main_queue ( ^ { <nl> - value = keyboard_type_ ; <nl> - } ) ; <nl> - return value ; <nl> - } <nl> - <nl> - void set_keyboard_type ( krbn : : keyboard_type keyboard_type ) { <nl> - gcd_utility : : dispatch_sync_in_main_queue ( ^ { <nl> - keyboard_type_ = keyboard_type ; <nl> - } ) ; <nl> - } <nl> - <nl> bool get_disable_built_in_keyboard_if_exists ( void ) const { <nl> bool __block value ; <nl> gcd_utility : : dispatch_sync_in_main_queue ( ^ { <nl> class human_interface_device final { <nl> bool disabled_ ; <nl> <nl> bool is_built_in_keyboard_ ; <nl> - krbn : : keyboard_type keyboard_type_ ; <nl> bool disable_built_in_keyboard_if_exists_ ; <nl> } ; <nl> | remove keyboard_type from human_interface_device | pqrs-org/Karabiner-Elements | 898e3b306496377df0740818bcdd4e75c1a8910b | 2016-12-25T08:17:12Z |
mmm a / googlemock / CMakeLists . txt <nl> ppp b / googlemock / CMakeLists . txt <nl> endif ( ) <nl> # as $ { gmock_SOURCE_DIR } and to the root binary directory as <nl> # $ { gmock_BINARY_DIR } . <nl> # Language " C " is required for find_package ( Threads ) . <nl> - project ( gmock CXX C ) <nl> + if ( CMAKE_VERSION VERSION_LESS 3 . 0 ) <nl> + project ( gmock CXX C ) <nl> + else ( ) <nl> + cmake_policy ( SET CMP0048 NEW ) <nl> + project ( gmock VERSION 1 . 9 . 0 LANGUAGES CXX C ) <nl> + endif ( ) <nl> cmake_minimum_required ( VERSION 2 . 6 . 4 ) <nl> <nl> if ( COMMAND set_up_hermetic_build ) <nl> if ( INSTALL_GMOCK ) <nl> ARCHIVE DESTINATION $ { CMAKE_INSTALL_LIBDIR } ) <nl> install ( DIRECTORY $ { gmock_SOURCE_DIR } / include / gmock <nl> DESTINATION $ { CMAKE_INSTALL_INCLUDEDIR } ) <nl> + <nl> + # configure and install pkgconfig files <nl> + configure_file ( <nl> + cmake / gmock . pc . in <nl> + " $ { CMAKE_BINARY_DIR } / gmock . pc " <nl> + @ ONLY ) <nl> + configure_file ( <nl> + cmake / gmock_main . pc . in <nl> + " $ { CMAKE_BINARY_DIR } / gmock_main . pc " <nl> + @ ONLY ) <nl> + install ( FILES " $ { CMAKE_BINARY_DIR } / gmock . pc " " $ { CMAKE_BINARY_DIR } / gmock_main . pc " <nl> + DESTINATION " $ { CMAKE_INSTALL_LIBDIR } / pkgconfig " ) <nl> endif ( ) <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> new file mode 100644 <nl> index 000000000 . . c44164264 <nl> mmm / dev / null <nl> ppp b / googlemock / cmake / gmock . pc . in <nl> <nl> + libdir = @ CMAKE_INSTALL_FULL_LIBDIR @ <nl> + includedir = @ CMAKE_INSTALL_FULL_INCLUDEDIR @ <nl> + <nl> + Name : gmock <nl> + Description : GoogleMock ( without main ( ) function ) <nl> + Version : @ PROJECT_VERSION @ <nl> + URL : https : / / github . com / google / googletest <nl> + Libs : - L $ { libdir } - lgmock @ CMAKE_THREAD_LIBS_INIT @ <nl> + Cflags : - I $ { includedir } @ GTEST_HAS_PTHREAD_MACRO @ @ CMAKE_THREAD_LIBS_INIT @ <nl> new file mode 100644 <nl> index 000000000 . . c377dba1e <nl> mmm / dev / null <nl> ppp b / googlemock / cmake / gmock_main . pc . in <nl> <nl> + libdir = @ CMAKE_INSTALL_FULL_LIBDIR @ <nl> + includedir = @ CMAKE_INSTALL_FULL_INCLUDEDIR @ <nl> + <nl> + Name : gmock_main <nl> + Description : GoogleMock ( with main ( ) function ) <nl> + Version : @ PROJECT_VERSION @ <nl> + URL : https : / / github . com / google / googletest <nl> + Libs : - L $ { libdir } - lgmock_main @ CMAKE_THREAD_LIBS_INIT @ <nl> + Cflags : - I $ { includedir } @ GTEST_HAS_PTHREAD_MACRO @ @ CMAKE_THREAD_LIBS_INIT @ <nl> mmm a / googletest / CMakeLists . txt <nl> ppp b / googletest / CMakeLists . txt <nl> endif ( ) <nl> # as $ { gtest_SOURCE_DIR } and to the root binary directory as <nl> # $ { gtest_BINARY_DIR } . <nl> # Language " C " is required for find_package ( Threads ) . <nl> - project ( gtest CXX C ) <nl> + if ( CMAKE_VERSION VERSION_LESS 3 . 0 ) <nl> + project ( gtest CXX C ) <nl> + else ( ) <nl> + cmake_policy ( SET CMP0048 NEW ) <nl> + project ( gtest VERSION 1 . 9 . 0 LANGUAGES CXX C ) <nl> + endif ( ) <nl> cmake_minimum_required ( VERSION 2 . 6 . 4 ) <nl> <nl> if ( COMMAND set_up_hermetic_build ) <nl> if ( INSTALL_GTEST ) <nl> LIBRARY DESTINATION $ { CMAKE_INSTALL_LIBDIR } ) <nl> install ( DIRECTORY $ { gtest_SOURCE_DIR } / include / gtest <nl> DESTINATION $ { CMAKE_INSTALL_INCLUDEDIR } ) <nl> + <nl> + # configure and install pkgconfig files <nl> + configure_file ( <nl> + cmake / gtest . pc . in <nl> + " $ { CMAKE_BINARY_DIR } / gtest . pc " <nl> + @ ONLY ) <nl> + configure_file ( <nl> + cmake / gtest_main . pc . in <nl> + " $ { CMAKE_BINARY_DIR } / gtest_main . pc " <nl> + @ ONLY ) <nl> + install ( FILES " $ { CMAKE_BINARY_DIR } / gtest . pc " " $ { CMAKE_BINARY_DIR } / gtest_main . pc " <nl> + DESTINATION " $ { CMAKE_INSTALL_LIBDIR } / pkgconfig " ) <nl> endif ( ) <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> new file mode 100644 <nl> index 000000000 . . e7967ad56 <nl> mmm / dev / null <nl> ppp b / googletest / cmake / gtest . pc . in <nl> <nl> + libdir = @ CMAKE_INSTALL_FULL_LIBDIR @ <nl> + includedir = @ CMAKE_INSTALL_FULL_INCLUDEDIR @ <nl> + <nl> + Name : gtest <nl> + Description : GoogleTest ( without main ( ) function ) <nl> + Version : @ PROJECT_VERSION @ <nl> + URL : https : / / github . com / google / googletest <nl> + Libs : - L $ { libdir } - lgtest @ CMAKE_THREAD_LIBS_INIT @ <nl> + Cflags : - I $ { includedir } @ GTEST_HAS_PTHREAD_MACRO @ @ CMAKE_THREAD_LIBS_INIT @ <nl> new file mode 100644 <nl> index 000000000 . . fe25d9c73 <nl> mmm / dev / null <nl> ppp b / googletest / cmake / gtest_main . pc . in <nl> <nl> + libdir = @ CMAKE_INSTALL_FULL_LIBDIR @ <nl> + includedir = @ CMAKE_INSTALL_FULL_INCLUDEDIR @ <nl> + <nl> + Name : gtest_main <nl> + Description : GoogleTest ( with main ( ) function ) <nl> + Version : @ PROJECT_VERSION @ <nl> + URL : https : / / github . com / google / googletest <nl> + Requires : gtest <nl> + Libs : - L $ { libdir } - lgtest_main @ CMAKE_THREAD_LIBS_INIT @ <nl> + Cflags : - I $ { includedir } @ GTEST_HAS_PTHREAD_MACRO @ @ CMAKE_THREAD_LIBS_INIT @ <nl> mmm a / googletest / cmake / internal_utils . cmake <nl> ppp b / googletest / cmake / internal_utils . cmake <nl> macro ( config_compiler_and_linker ) <nl> # instead , we use windows threading primitives <nl> if ( NOT gtest_disable_pthreads AND NOT MINGW ) <nl> # Defines CMAKE_USE_PTHREADS_INIT and CMAKE_THREAD_LIBS_INIT . <nl> + set ( THREADS_PREFER_PTHREAD_FLAG ON ) <nl> find_package ( Threads ) <nl> endif ( ) <nl> <nl> macro ( config_compiler_and_linker ) <nl> endif ( ) <nl> <nl> if ( CMAKE_USE_PTHREADS_INIT ) # The pthreads library is available and allowed . <nl> - set ( cxx_base_flags " $ { cxx_base_flags } - DGTEST_HAS_PTHREAD = 1 " ) <nl> + set ( GTEST_HAS_PTHREAD_MACRO " - DGTEST_HAS_PTHREAD = 1 " ) <nl> else ( ) <nl> - set ( cxx_base_flags " $ { cxx_base_flags } - DGTEST_HAS_PTHREAD = 0 " ) <nl> + set ( GTEST_HAS_PTHREAD_MACRO " - DGTEST_HAS_PTHREAD = 0 " ) <nl> endif ( ) <nl> + set ( cxx_base_flags " $ { cxx_base_flags } $ { GTEST_HAS_PTHREAD_MACRO } " ) <nl> <nl> # For building gtest ' s own tests and samples . <nl> set ( cxx_exception " $ { CMAKE_CXX_FLAGS } $ { cxx_base_flags } $ { cxx_exception_flags } " ) <nl> mmm a / googletest / docs / FAQ . md <nl> ppp b / googletest / docs / FAQ . md <nl> TEST_F ( CoolTest , DoSomething ) { <nl> If you try to build Google Test ' s Xcode project with Xcode 4 . 0 or later , you may encounter an error message that looks like <nl> " Missing SDK in target gtest \ _framework : / Developer / SDKs / MacOSX10 . 4u . sdk " . That means that Xcode does not support the SDK the project is targeting . See the Xcode section in the [ README ] ( . . / README . md ) file on how to resolve this . <nl> <nl> + # # How do I easily discover the flags needed for GoogleTest ? # # <nl> + <nl> + GoogleTest ( and GoogleMock ) now support discovering all necessary flags using pkg - config . <nl> + See the [ pkg - config guide ] ( Pkgconfig . md ) on how you can easily discover all compiler and <nl> + linker flags using pkg - config . <nl> + <nl> # # My question is not covered in your FAQ ! # # <nl> <nl> If you cannot find the answer to your question in this FAQ , there are <nl> new file mode 100644 <nl> index 000000000 . . 97612894d <nl> mmm / dev / null <nl> ppp b / googletest / docs / Pkgconfig . md <nl> <nl> + # # Using GoogleTest from various build systems # # <nl> + <nl> + GoogleTest comes with pkg - config files that can be used to determine all <nl> + necessary flags for compiling and linking to GoogleTest ( and GoogleMock ) . <nl> + Pkg - config is a standardised plain - text format containing <nl> + <nl> + * the includedir ( - I ) path <nl> + * necessary macro ( - D ) definitions <nl> + * further required flags ( - pthread ) <nl> + * the library ( - L ) path <nl> + * the library ( - l ) to link to <nl> + <nl> + All current build systems support pkg - config in one way or another . For <nl> + all examples here we assume you want to compile the sample <nl> + ` samples / sample3_unittest . cc ` . <nl> + <nl> + <nl> + # # # CMake # # # <nl> + <nl> + Using ` pkg - config ` in CMake is fairly easy : <nl> + <nl> + ` ` ` <nl> + cmake_minimum_required ( VERSION 3 . 0 ) <nl> + <nl> + cmake_policy ( SET CMP0048 NEW ) <nl> + project ( my_gtest_pkgconfig VERSION 0 . 0 . 1 LANGUAGES CXX ) <nl> + <nl> + find_package ( PkgConfig ) <nl> + pkg_search_module ( GTEST REQUIRED gtest_main ) <nl> + <nl> + add_executable ( testapp samples / sample3_unittest . cc ) <nl> + target_link_libraries ( testapp $ { GTEST_LDFLAGS } ) <nl> + target_compile_options ( testapp PUBLIC $ { GTEST_CFLAGS } ) <nl> + <nl> + include ( CTest ) <nl> + add_test ( first_and_only_test testapp ) <nl> + ` ` ` <nl> + <nl> + It is generally recommended that you use ` target_compile_options ` + ` _CFLAGS ` <nl> + over ` target_include_directories ` + ` _INCLUDE_DIRS ` as the former includes not <nl> + just - I flags ( GoogleTest might require a macro indicating to internal headers <nl> + that all libraries have been compiled with threading enabled . In addition , <nl> + GoogleTest might also require ` - pthread ` in the compiling step , and as such <nl> + splitting the pkg - config ` Cflags ` variable into include dirs and macros for <nl> + ` target_compile_definitions ( ) ` might still miss this ) . The same recommendation <nl> + goes for using ` _LDFLAGS ` over the more commonplace ` _LIBRARIES ` , which <nl> + happens to discard ` - L ` flags and ` - pthread ` . <nl> + <nl> + <nl> + # # # Autotools # # # <nl> + <nl> + Finding GoogleTest in Autoconf and using it from Automake is also fairly easy : <nl> + <nl> + In your ` configure . ac ` : <nl> + <nl> + ` ` ` <nl> + AC_PREREQ ( [ 2 . 69 ] ) <nl> + AC_INIT ( [ my_gtest_pkgconfig ] , [ 0 . 0 . 1 ] ) <nl> + AC_CONFIG_SRCDIR ( [ samples / sample3_unittest . cc ] ) <nl> + AC_PROG_CXX <nl> + <nl> + PKG_CHECK_MODULES ( [ GTEST ] , [ gtest_main ] ) <nl> + <nl> + AM_INIT_AUTOMAKE ( [ foreign subdir - objects ] ) <nl> + AC_CONFIG_FILES ( [ Makefile ] ) <nl> + AC_OUTPUT <nl> + ` ` ` <nl> + <nl> + and in your ` Makefile . am ` : <nl> + <nl> + ` ` ` <nl> + check_PROGRAMS = testapp <nl> + TESTS = $ ( check_PROGRAMS ) <nl> + <nl> + testapp_SOURCES = samples / sample3_unittest . cc <nl> + testapp_CXXFLAGS = $ ( GTEST_CFLAGS ) <nl> + testapp_LDADD = $ ( GTEST_LIBS ) <nl> + ` ` ` <nl> + <nl> + <nl> + # # # Meson # # # <nl> + <nl> + Meson natively uses pkgconfig to query dependencies : <nl> + <nl> + ` ` ` <nl> + project ( ' my_gtest_pkgconfig ' , ' cpp ' , version : ' 0 . 0 . 1 ' ) <nl> + <nl> + gtest_dep = dependency ( ' gtest_main ' ) <nl> + <nl> + testapp = executable ( <nl> + ' testapp ' , <nl> + files ( [ ' samples / sample3_unittest . cc ' ] ) , <nl> + dependencies : gtest_dep , <nl> + install : false ) <nl> + <nl> + test ( ' first_and_only_test ' , testapp ) <nl> + ` ` ` <nl> + <nl> + <nl> + # # # Plain Makefiles # # # <nl> + <nl> + Since ` pkg - config ` is a small Unix command - line utility , it can be used <nl> + in handwritten ` Makefile ` s too : <nl> + <nl> + ` ` ` <nl> + GTEST_CFLAGS = ` pkg - config - - cflags gtest_main ` <nl> + GTEST_LIBS = ` pkg - config - - libs gtest_main ` <nl> + <nl> + . PHONY : tests all <nl> + <nl> + tests : all <nl> + . / testapp <nl> + <nl> + all : testapp <nl> + <nl> + testapp : testapp . o <nl> + $ ( CXX ) $ ( CXXFLAGS ) $ ( LDFLAGS ) $ < - o $ @ $ ( GTEST_LIBS ) <nl> + <nl> + testapp . o : samples / sample3_unittest . cc <nl> + $ ( CXX ) $ ( CPPFLAGS ) $ ( CXXFLAGS ) $ < - c - o $ @ $ ( GTEST_CFLAGS ) <nl> + ` ` ` <nl> + <nl> + <nl> + # # # Help ! pkg - config can ' t find GoogleTest ! # # # <nl> + <nl> + Let ' s say you have a ` CMakeLists . txt ` along the lines of the one in this <nl> + tutorial and you try to run ` cmake ` . It is very possible that you get a <nl> + failure along the lines of : <nl> + <nl> + ` ` ` <nl> + - - Checking for one of the modules ' gtest_main ' <nl> + CMake Error at / usr / share / cmake / Modules / FindPkgConfig . cmake : 640 ( message ) : <nl> + None of the required ' gtest_main ' found <nl> + ` ` ` <nl> + <nl> + These failures are common if you installed GoogleTest yourself and have not <nl> + sourced it from a distro or other package manager . If so , you need to tell <nl> + pkg - config where it can find the ` . pc ` files containing the information . <nl> + Say you installed GoogleTest to ` / usr / local ` , then it might be that the <nl> + ` . pc ` files are installed under ` / usr / local / lib64 / pkgconfig ` . If you set <nl> + <nl> + ` ` ` <nl> + export PKG_CONFIG_PATH = / usr / local / lib64 / pkgconfig <nl> + ` ` ` <nl> + <nl> + pkg - config will also try to look in ` PKG_CONFIG_PATH ` to find ` gtest_main . pc ` . <nl> | Merge pull request from SoapGentoo / pkgconfig | google/googletest | e0fc65c5fbfe4e50a0369d032e9b2811b4b7db77 | 2017-08-14T18:33:41Z |
mmm a / pipeline . jsonc <nl> ppp b / pipeline . jsonc <nl> <nl> { <nl> - " eosio " : <nl> - { <nl> - " pipeline - branch " : " master " , <nl> - " environment " : <nl> - { <nl> - " IMAGE_TAG " : " _2 - 5 " <nl> - } <nl> - } , <nl> " eosio - base - images " : <nl> { <nl> " pipeline - branch " : " develop " <nl> } , <nl> - " eosio - build - unpinned " : <nl> - { <nl> - " pipeline - branch " : " master " <nl> - } , <nl> " eosio - lrt " : <nl> { <nl> - " pipeline - branch " : " master " , <nl> " environment " : <nl> { <nl> " BUILD_FLAGS " : " - y - P - m " , <nl> <nl> } , <nl> " eos - multiversion - tests " : <nl> { <nl> - " pipeline - branch " : " master " , <nl> " environment " : <nl> { <nl> " IMAGE_TAG " : " _1 - 8 - 0 - rc2 " <nl> | Delete defaults so pipeline . jsonc only contains modifications to pipeline defaults | EOSIO/eos | aa65f80966328b2b9e6ea573e9184a051191267b | 2019-06-06T00:06:27Z |
mmm a / libraries / chain / include / eosio / chain / resource_limits_private . hpp <nl> ppp b / libraries / chain / include / eosio / chain / resource_limits_private . hpp <nl> namespace eosio { namespace chain { namespace resource_limits { <nl> { <nl> const GreaterIntType max = std : : numeric_limits < LesserIntType > : : max ( ) ; <nl> const GreaterIntType min = 0 ; <nl> + if ( ! ( val > = min & & val < = max ) ) { <nl> + wlog ( " downgrade_cast higher to lower " ) ; <nl> + } <nl> EOS_ASSERT ( val > = min & & val < = max , rate_limiting_state_inconsistent , " Casting a higher bit integer value $ { v } to a lower bit integer value which cannot contain the value , valid range is [ $ { min } , $ { max } ] " , ( " v " , val ) ( " min " , min ) ( " max " , max ) ) ; <nl> return LesserIntType ( val ) ; <nl> } ; <nl> mmm a / plugins / chain_plugin / chain_plugin . cpp <nl> ppp b / plugins / chain_plugin / chain_plugin . cpp <nl> read_only : : get_account_results read_only : : get_account ( const get_account_params & <nl> } <nl> } <nl> <nl> + t_id = d . find < chain : : table_id_object , chain : : by_code_scope_table > ( boost : : make_tuple ( config : : system_account_name , params . account_name , N ( refunds ) ) ) ; <nl> + if ( t_id ! = nullptr ) { <nl> + const auto & idx = d . get_index < key_value_index , by_scope_primary > ( ) ; <nl> + auto it = idx . find ( boost : : make_tuple ( t_id - > id , params . account_name ) ) ; <nl> + if ( it ! = idx . end ( ) ) { <nl> + vector < char > data ; <nl> + copy_inline_row ( * it , data ) ; <nl> + result . refund_request = abis . binary_to_variant ( " refund_request " , data ) ; <nl> + } <nl> + } <nl> + <nl> t_id = d . find < chain : : table_id_object , chain : : by_code_scope_table > ( boost : : make_tuple ( config : : system_account_name , config : : system_account_name , N ( voters ) ) ) ; <nl> if ( t_id ! = nullptr ) { <nl> const auto & idx = d . get_index < key_value_index , by_scope_primary > ( ) ; <nl> mmm a / plugins / chain_plugin / include / eosio / chain_plugin / chain_plugin . hpp <nl> ppp b / plugins / chain_plugin / include / eosio / chain_plugin / chain_plugin . hpp <nl> class read_only { <nl> <nl> fc : : variant total_resources ; <nl> fc : : variant self_delegated_bandwidth ; <nl> + fc : : variant refund_request ; <nl> fc : : variant voter_info ; <nl> } ; <nl> <nl> FC_REFLECT ( eosio : : chain_apis : : read_only : : get_currency_stats_result , ( supply ) ( ma <nl> FC_REFLECT ( eosio : : chain_apis : : read_only : : get_producers_params , ( json ) ( lower_bound ) ( limit ) ) <nl> FC_REFLECT ( eosio : : chain_apis : : read_only : : get_producers_result , ( rows ) ( total_producer_vote_weight ) ( more ) ) ; <nl> <nl> - FC_REFLECT ( eosio : : chain_apis : : read_only : : get_account_results , ( account_name ) ( privileged ) ( last_code_update ) ( created ) ( ram_quota ) ( net_weight ) ( cpu_weight ) ( net_limit ) ( cpu_limit ) ( ram_usage ) ( permissions ) ( total_resources ) ( self_delegated_bandwidth ) ( voter_info ) ) <nl> + FC_REFLECT ( eosio : : chain_apis : : read_only : : get_account_results , ( account_name ) ( privileged ) ( last_code_update ) ( created ) ( ram_quota ) ( net_weight ) ( cpu_weight ) ( net_limit ) ( cpu_limit ) ( ram_usage ) ( permissions ) ( total_resources ) ( self_delegated_bandwidth ) ( refund_request ) ( voter_info ) ) <nl> FC_REFLECT ( eosio : : chain_apis : : read_only : : get_code_results , ( account_name ) ( code_hash ) ( wast ) ( wasm ) ( abi ) ) <nl> FC_REFLECT ( eosio : : chain_apis : : read_only : : get_abi_results , ( account_name ) ( abi ) ) <nl> FC_REFLECT ( eosio : : chain_apis : : read_only : : get_account_params , ( account_name ) ) <nl> mmm a / programs / cleos / main . cpp <nl> ppp b / programs / cleos / main . cpp <nl> void get_account ( const string & accountName , bool json_format ) { <nl> dfs_print ( r , 0 ) ; <nl> } <nl> <nl> - auto to_pretty_net = [ ] ( int64_t nbytes ) { <nl> + auto to_pretty_net = [ ] ( int64_t nbytes , uint8_t width_for_units = 5 ) { <nl> if ( nbytes = = - 1 ) { <nl> / / special case . Treat it as unlimited <nl> return std : : string ( " unlimited " ) ; <nl> void get_account ( const string & accountName , bool json_format ) { <nl> } <nl> std : : stringstream ss ; <nl> ss < < setprecision ( 4 ) ; <nl> - ss < < bytes < < " " < < std : : left < < setw ( 5 ) < < unit ; <nl> + ss < < bytes < < " " ; <nl> + if ( width_for_units > 0 ) <nl> + ss < < std : : left < < setw ( width_for_units ) ; <nl> + ss < < unit ; <nl> return ss . str ( ) ; <nl> } ; <nl> <nl> void get_account ( const string & accountName , bool json_format ) { <nl> } <nl> <nl> <nl> - auto to_pretty_time = [ ] ( int64_t nmicro ) { <nl> + auto to_pretty_time = [ ] ( int64_t nmicro , uint8_t width_for_units = 5 ) { <nl> if ( nmicro = = - 1 ) { <nl> / / special case . Treat it as unlimited <nl> return std : : string ( " unlimited " ) ; <nl> void get_account ( const string & accountName , bool json_format ) { <nl> string unit = " us " ; <nl> double micro = static_cast < double > ( nmicro ) ; <nl> <nl> - if ( micro > 1000000 * 60 ) { <nl> + if ( micro > 1000000 * 60 * 60ll ) { <nl> micro / = 1000000 * 60 * 60ll ; <nl> unit = " hr " ; <nl> } <nl> void get_account ( const string & accountName , bool json_format ) { <nl> } <nl> std : : stringstream ss ; <nl> ss < < setprecision ( 4 ) ; <nl> - ss < < micro < < " " < < std : : left < < setw ( 5 ) < < unit ; <nl> + ss < < micro < < " " ; <nl> + if ( width_for_units > 0 ) <nl> + ss < < std : : left < < setw ( width_for_units ) ; <nl> + ss < < unit ; <nl> return ss . str ( ) ; <nl> } ; <nl> <nl> void get_account ( const string & accountName , bool json_format ) { <nl> std : : cout < < indent < < std : : left < < std : : setw ( 11 ) < < " limit : " < < std : : right < < std : : setw ( 18 ) < < to_pretty_time ( res . cpu_limit . max ) < < " \ n " ; <nl> std : : cout < < std : : endl ; <nl> <nl> + if ( res . refund_request . is_object ( ) ) { <nl> + auto obj = res . refund_request . get_object ( ) ; <nl> + auto request_time = fc : : time_point_sec : : from_iso_string ( obj [ " request_time " ] . as_string ( ) ) ; <nl> + fc : : time_point refund_time = request_time + fc : : days ( 3 ) ; <nl> + auto now = fc : : time_point : : now ( ) ; <nl> + std : : cout < < std : : fixed < < setprecision ( 3 ) ; <nl> + std : : cout < < " unstaked tokens : " < < std : : endl ; <nl> + std : : cout < < indent < < std : : left < < std : : setw ( 25 ) < < " time of unstake request : " < < std : : right < < std : : setw ( 20 ) < < string ( request_time ) ; <nl> + if ( now > = refund_time ) { <nl> + std : : cout < < " ( available to claim now with ' eosio : : refund ' action ) \ n " ; <nl> + } else { <nl> + std : : cout < < " ( funds will be available in " < < to_pretty_time ( ( refund_time - now ) . count ( ) , 0 ) < < " ) \ n " ; <nl> + <nl> + asset net = asset : : from_string ( obj [ " net_amount " ] . as_string ( ) ) ; <nl> + asset cpu = asset : : from_string ( obj [ " cpu_amount " ] . as_string ( ) ) ; <nl> + std : : cout < < indent < < std : : left < < std : : setw ( 25 ) < < " from net bandwidth : " < < std : : right < < std : : setw ( 18 ) < < net < < std : : endl ; <nl> + std : : cout < < indent < < std : : left < < std : : setw ( 25 ) < < " from cpu bandwidth : " < < std : : right < < std : : setw ( 18 ) < < cpu < < std : : endl ; <nl> + std : : cout < < indent < < std : : left < < std : : setw ( 25 ) < < " total : " < < std : : right < < std : : setw ( 18 ) < < ( cpu + net ) < < std : : endl ; <nl> + } <nl> + std : : cout < < std : : endl ; <nl> + } <nl> <nl> if ( res . voter_info . is_object ( ) ) { <nl> auto & obj = res . voter_info . get_object ( ) ; <nl> | add unstaking info to get account | EOSIO/eos | 36d8692daca80f38cf5d5c54409e74c4a90ea1c1 | 2018-06-12T22:10:52Z |
mmm a / db / db_range_del_test . cc <nl> ppp b / db / db_range_del_test . cc <nl> TEST_F ( DBRangeDelTest , RangeTombstoneWrittenToMinimalSsts ) { <nl> ASSERT_EQ ( 1 , num_range_deletions ) ; <nl> } <nl> <nl> + TEST_F ( DBRangeDelTest , OverlappedTombstones ) { <nl> + const int kNumPerFile = 4 , kNumFiles = 2 ; <nl> + Options options = CurrentOptions ( ) ; <nl> + options . disable_auto_compactions = true ; <nl> + options . max_compaction_bytes = 9 * 1024 ; <nl> + DestroyAndReopen ( options ) ; <nl> + Random rnd ( 301 ) ; <nl> + for ( int i = 0 ; i < kNumFiles ; + + i ) { <nl> + std : : vector < std : : string > values ; <nl> + / / Write 12K ( 4 values , each 3K ) <nl> + for ( int j = 0 ; j < kNumPerFile ; j + + ) { <nl> + values . push_back ( RandomString ( & rnd , 3 < < 10 ) ) ; <nl> + ASSERT_OK ( Put ( Key ( i * kNumPerFile + j ) , values [ j ] ) ) ; <nl> + } <nl> + } <nl> + ASSERT_OK ( db_ - > Flush ( FlushOptions ( ) ) ) ; <nl> + ASSERT_EQ ( 1 , NumTableFilesAtLevel ( 0 ) ) ; <nl> + MoveFilesToLevel ( 2 ) ; <nl> + ASSERT_EQ ( 2 , NumTableFilesAtLevel ( 2 ) ) ; <nl> + <nl> + ASSERT_OK ( db_ - > DeleteRange ( WriteOptions ( ) , db_ - > DefaultColumnFamily ( ) , Key ( 1 ) , <nl> + Key ( ( kNumFiles ) * kNumPerFile + 1 ) ) ) ; <nl> + ASSERT_OK ( db_ - > Flush ( FlushOptions ( ) ) ) ; <nl> + <nl> + ASSERT_EQ ( 1 , NumTableFilesAtLevel ( 0 ) ) ; <nl> + <nl> + dbfull ( ) - > TEST_CompactRange ( 0 , nullptr , nullptr , nullptr , <nl> + true / * disallow_trivial_move * / ) ; <nl> + <nl> + / / The tombstone range is not broken up into multiple SSTs which may incur a <nl> + / / large compaction with L2 . <nl> + ASSERT_EQ ( 1 , NumTableFilesAtLevel ( 1 ) ) ; <nl> + std : : vector < std : : vector < FileMetaData > > files ; <nl> + dbfull ( ) - > TEST_CompactRange ( 1 , nullptr , nullptr , nullptr , <nl> + true / * disallow_trivial_move * / ) ; <nl> + ASSERT_EQ ( 1 , NumTableFilesAtLevel ( 2 ) ) ; <nl> + ASSERT_EQ ( 0 , NumTableFilesAtLevel ( 1 ) ) ; <nl> + } <nl> + <nl> + TEST_F ( DBRangeDelTest , OverlappedKeys ) { <nl> + const int kNumPerFile = 4 , kNumFiles = 2 ; <nl> + Options options = CurrentOptions ( ) ; <nl> + options . disable_auto_compactions = true ; <nl> + options . max_compaction_bytes = 9 * 1024 ; <nl> + DestroyAndReopen ( options ) ; <nl> + Random rnd ( 301 ) ; <nl> + for ( int i = 0 ; i < kNumFiles ; + + i ) { <nl> + std : : vector < std : : string > values ; <nl> + / / Write 12K ( 4 values , each 3K ) <nl> + for ( int j = 0 ; j < kNumPerFile ; j + + ) { <nl> + values . push_back ( RandomString ( & rnd , 3 < < 10 ) ) ; <nl> + ASSERT_OK ( Put ( Key ( i * kNumPerFile + j ) , values [ j ] ) ) ; <nl> + } <nl> + } <nl> + ASSERT_OK ( db_ - > Flush ( FlushOptions ( ) ) ) ; <nl> + ASSERT_EQ ( 1 , NumTableFilesAtLevel ( 0 ) ) ; <nl> + MoveFilesToLevel ( 2 ) ; <nl> + ASSERT_EQ ( 2 , NumTableFilesAtLevel ( 2 ) ) ; <nl> + <nl> + for ( int i = 1 ; i < kNumFiles * kNumPerFile + 1 ; i + + ) { <nl> + ASSERT_OK ( Put ( Key ( i ) , " 0x123 " ) ) ; <nl> + } <nl> + ASSERT_OK ( db_ - > Flush ( FlushOptions ( ) ) ) ; <nl> + ASSERT_EQ ( 1 , NumTableFilesAtLevel ( 0 ) ) ; <nl> + <nl> + / / The key range is broken up into three SSTs to avoid a future big compaction <nl> + / / with the grandparent <nl> + dbfull ( ) - > TEST_CompactRange ( 0 , nullptr , nullptr , nullptr , <nl> + true / * disallow_trivial_move * / ) ; <nl> + ASSERT_EQ ( 3 , NumTableFilesAtLevel ( 1 ) ) ; <nl> + <nl> + std : : vector < std : : vector < FileMetaData > > files ; <nl> + dbfull ( ) - > TEST_CompactRange ( 1 , nullptr , nullptr , nullptr , <nl> + true / * disallow_trivial_move * / ) ; <nl> + ASSERT_EQ ( 1 , NumTableFilesAtLevel ( 2 ) ) ; <nl> + ASSERT_EQ ( 0 , NumTableFilesAtLevel ( 1 ) ) ; <nl> + } <nl> + <nl> # endif / / ROCKSDB_LITE <nl> <nl> } / / namespace rocksdb <nl> | Add test showing range tombstones can create excessively large compactions ( ) | facebook/rocksdb | 250953112348d39a3f29c37834041ae611358264 | 2019-10-24T18:08:44Z |
mmm a / doc / unit_testing . md <nl> ppp b / doc / unit_testing . md <nl> Unary RPC : <nl> MockEchoTestServiceStub stub ; <nl> EchoResponse resp ; <nl> resp . set_message ( " hello world " ) ; <nl> - Expect_CALL ( stub , Echo ( _ , _ , _ ) ) . Times ( Atleast ( 1 ) ) . WillOnce ( DoAll ( SetArgPointee < 2 > ( resp ) , Return ( Status : : OK ) ) ) ; <nl> + EXPECT_CALL ( stub , Echo ( _ , _ , _ ) ) . Times ( AtLeast ( 1 ) ) . WillOnce ( DoAll ( SetArgPointee < 2 > ( resp ) , Return ( Status : : OK ) ) ) ; <nl> FakeClient client ( stub ) ; <nl> client . DoEcho ( ) ; <nl> ` ` ` <nl> | Correct gMock syntax in example code . | grpc/grpc | 280fcdb95fd893b00e401c0ea62a5d94e07b127a | 2020-05-25T23:06:53Z |
mmm a / dbms / include / DB / DataStreams / SummingSortedBlockInputStream . h <nl> ppp b / dbms / include / DB / DataStreams / SummingSortedBlockInputStream . h <nl> class SummingSortedBlockInputStream : public MergingSortedBlockInputStream <nl> Names column_names_to_sum ; / / / Если задано - преобразуется в column_numbers_to_sum при инициализации . <nl> ColumnNumbers column_numbers_to_sum ; <nl> <nl> - / * * Таблица может вложенные таблицы , обрабатываемые особым образом . <nl> - * Если название вложенной таблицы заканчинвается на ` Map ` и она содержит ровно два столбца , <nl> + / * * Таблица может иметь вложенные таблицы , обрабатываемые особым образом . <nl> + * Если название вложенной таблицы заканчинвается на ` Map ` и она содержит не менее двух столбцов , <nl> * удовлетворяющих следующим критериям : <nl> * - первый столбец - числовой ( ( U ) IntN , Date , DateTime ) , назовем его условно key , <nl> - * - второй столбец - арифметический ( ( U ) IntN , Float32 / 64 ) , условно value . <nl> - * Такая вложенная таблица воспринимается как отображение key = > value и при слиянии <nl> - * ее строк выполняется слияние элементов двух множеств по key со сложением по value . <nl> + * - остальные столбцы - арифметические ( ( U ) IntN , Float32 / 64 ) , условно ( values . . . ) . <nl> + * Такая вложенная таблица воспринимается как отображение key = > ( values . . . ) и при слиянии <nl> + * ее строк выполняется слияние элементов двух множеств по key со сложением соответствующих ( values . . . ) . <nl> * Пример : <nl> * [ ( 1 , 100 ) ] + [ ( 2 , 150 ) ] - > [ ( 1 , 100 ) , ( 2 , 150 ) ] <nl> * [ ( 1 , 100 ) ] + [ ( 1 , 150 ) ] - > [ ( 1 , 250 ) ] <nl> class SummingSortedBlockInputStream : public MergingSortedBlockInputStream <nl> * [ ( 1 , 100 ) , ( 2 , 150 ) ] + [ ( 1 , - 100 ) ] - > [ ( 2 , 150 ) ] <nl> * / <nl> <nl> - / / / Хранит номера столбца - ключа и столбца - значения <nl> + / / / Хранит номера столбца - ключа и столбцов - значений <nl> struct map_description <nl> { <nl> std : : size_t key_col_num ; <nl> mmm a / dbms / src / DataStreams / SummingSortedBlockInputStream . cpp <nl> ppp b / dbms / src / DataStreams / SummingSortedBlockInputStream . cpp <nl> Block SummingSortedBlockInputStream : : readImpl ( ) <nl> / / / select actual nested Maps from list of candidates <nl> for ( const auto & map : discovered_maps ) <nl> { <nl> - / / / map can only contain a pair of elements ( key - > value ) <nl> + / / / map should contain at least two elements ( key - > value ) <nl> if ( map . second . size ( ) < 2 ) <nl> continue ; <nl> <nl> - / / / check types of key and value <nl> + / / / check type of key <nl> const auto key_num = map . second . front ( ) ; <nl> auto & key_col = merged_block . getByPosition ( key_num ) ; <nl> / / / skip maps , whose members are part of primary key <nl> mmm a / dbms / tests / queries / 0_stateless / 00148_summing_merge_tree_nested_map_multiple_values . reference <nl> ppp b / dbms / tests / queries / 0_stateless / 00148_summing_merge_tree_nested_map_multiple_values . reference <nl> <nl> + 0 [ 1 , 2 ] [ 100 , 150 ] [ 1 , - 2 . 5 ] <nl> + 1 [ 1 ] [ 250 ] [ 0 ] <nl> + 2 [ 1 , 2 ] [ 250 , 150 ] [ 3 . 5 , 3 . 5 ] <nl> + 3 [ 2 ] [ 150 ] [ 1 . 5 ] <nl> + 0 [ 1 ] [ 1 ] [ ' 2015 - 04 - 09 ' ] [ 1 ] [ 100 ] <nl> + 0 [ 1 ] [ 1 ] [ ' 2015 - 04 - 08 ' ] [ 1 ] [ 200 ] <nl> + 0 [ 1 ] [ 1 ] [ ' 2015 - 04 - 09 ' ] [ 1 ] [ 100 ] <nl> | dbms : fix test reference and bring commentaries up - to - date [ # METR - 15913 ] . | ClickHouse/ClickHouse | 226b11b8877be2aa1cc75edcc77267c7fa404eb7 | 2015-04-10T10:02:57Z |
mmm a / example / bench . cpp <nl> ppp b / example / bench . cpp <nl> using namespace utils ; <nl> <nl> void bench ( int howmany , std : : shared_ptr < spdlog : : logger > log ) ; <nl> void bench_mt ( int howmany , std : : shared_ptr < spdlog : : logger > log , int thread_count ) ; <nl> - void f ( ) ; <nl> + <nl> int main ( int argc , char * argv [ ] ) <nl> { <nl> <nl> - <nl> int howmany = 1048576 ; <nl> int threads = 10 ; <nl> bool auto_flush = false ; <nl> int file_size = 30 * 1024 * 1024 ; <nl> int rotating_files = 5 ; <nl> <nl> - / / spdlog : : set_pattern ( " % x % X . % e % v " ) ; <nl> - auto console = spdlog : : stdout_logger_mt ( " console " ) ; <nl> - <nl> - f ( ) ; <nl> - console - > info ( " Welcome to spdlog ! " ) ; <nl> - console - > info ( " An info message example { } . . " , 1 ) ; <nl> - console - > info ( ) < < " Streams are supported too " < < 1 ; <nl> - <nl> - <nl> - console - > info ( " Easy padding in numbers like { : 08d } " , 12 ) ; <nl> - console - > info ( " Support for int : { 0 : d } ; hex : { 0 : 08x } ; oct : { 0 : o } ; bin : { 0 : b } " , 42 ) ; <nl> - console - > info ( " Support for floats { : 03 . 2f } " , 1 . 23456 ) ; <nl> - console - > info ( " Positional args are { 1 } { 0 } . . " , " too " , " supported " ) ; <nl> - <nl> - console - > info ( " { : < 30 } " , " left aligned " ) ; <nl> - console - > info ( " { : > 30 } " , " right aligned " ) ; <nl> - console - > info ( " { : ^ 30 } " , " centered " ) ; <nl> - <nl> - <nl> - / / return 0 ; <nl> - <nl> - <nl> try <nl> { <nl> + <nl> if ( argc > 1 ) <nl> howmany = atoi ( argv [ 1 ] ) ; <nl> if ( argc > 2 ) <nl> threads = atoi ( argv [ 2 ] ) ; <nl> <nl> - / * cout < < " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ n " ; <nl> - cout < < " Single thread , " < < format ( howmany ) < < " iterations , flush every " < < auto_flush < < " lines " < < endl ; <nl> - cout < < " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ n " ; <nl> <nl> - auto rotating_st = spdlog : : rotating_logger_st ( " rotating_st " , " logs / rotating_st " , file_size , rotating_files , auto_flush ) ; <nl> - bench ( howmany , rotating_st ) ; <nl> - auto daily_st = spdlog : : daily_logger_st ( " daily_st " , " logs / daily_st " , auto_flush ) ; <nl> - bench ( howmany , daily_st ) ; <nl> - bench ( howmany , spdlog : : create < null_sink_st > ( " null_st " ) ) ; <nl> + cout < < " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ n " ; <nl> + cout < < " Single thread , " < < format ( howmany ) < < " iterations , auto flush = " < < auto_flush < < endl ; <nl> + cout < < " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ n " ; <nl> <nl> - cout < < " \ n * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ n " ; <nl> - cout < < threads < < " threads sharing same logger , " < < format ( howmany ) < < " iterations , flush every " < < auto_flush < < " lines " < < endl ; <nl> - cout < < " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ n " ; <nl> + auto rotating_st = spdlog : : rotating_logger_st ( " rotating_st " , " logs / rotating_st " , file_size , rotating_files , auto_flush ) ; <nl> + bench ( howmany , rotating_st ) ; <nl> + auto daily_st = spdlog : : daily_logger_st ( " daily_st " , " logs / daily_st " , auto_flush ) ; <nl> + bench ( howmany , daily_st ) ; <nl> + bench ( howmany , spdlog : : create < null_sink_st > ( " null_st " ) ) ; <nl> <nl> - auto rotating_mt = spdlog : : rotating_logger_mt ( " rotating_mt " , " logs / rotating_mt " , file_size , rotating_files , auto_flush ) ; <nl> - bench_mt ( howmany , rotating_mt , threads ) ; <nl> - * / <nl> + cout < < " \ n * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ n " ; <nl> + cout < < threads < < " threads sharing same logger , " < < format ( howmany ) < < " iterations , auto_flush = " < < auto_flush < < endl ; <nl> + cout < < " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ n " ; <nl> <nl> - / / auto daily_mt = spdlog : : daily_logger_mt ( " daily_mt " , " logs / daily_mt " , auto_flush ) ; <nl> - / / bench_mt ( howmany , daily_mt , threads ) ; <nl> + auto rotating_mt = spdlog : : rotating_logger_mt ( " rotating_mt " , " logs / rotating_mt " , file_size , rotating_files , auto_flush ) ; <nl> + bench_mt ( howmany , rotating_mt , threads ) ; <nl> <nl> <nl> - / / spdlog : : set_pattern ( " % T % z " ) ; <nl> - / / while ( true ) <nl> - / / bench_mt ( howmany , spdlog : : create < null_sink_st > ( " null_st " ) , 8 ) ; <nl> + auto daily_mt = spdlog : : daily_logger_mt ( " daily_mt " , " logs / daily_mt " , auto_flush ) ; <nl> + bench_mt ( howmany , daily_mt , threads ) ; <nl> + bench ( howmany , spdlog : : create < null_sink_st > ( " null_mt " ) ) ; <nl> <nl> cout < < " \ n * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ n " ; <nl> - cout < < " async logging . . " < < threads < < " threads sharing same logger , " < < format ( howmany ) < < " iterations , flush every " < < auto_flush < < " lines " < < endl ; <nl> + cout < < " async logging . . " < < threads < < " threads sharing same logger , " < < format ( howmany ) < < " iterations , auto_flush = " < < auto_flush < < endl ; <nl> cout < < " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ n " ; <nl> <nl> - / / spdlog : : set_async_mode ( howmany ) ; <nl> <nl> - / / auto as = spdlog : : rotating_logger_st ( " as " , " logs / rotating_async " , file_size , rotating_files , auto_flush ) ; <nl> + spdlog : : set_async_mode ( howmany ) ; <nl> <nl> - while ( true ) <nl> + for ( int i = 0 ; i < 5 ; + + i ) <nl> { <nl> - auto as = std : : make_shared < spdlog : : async_logger > ( " as " , std : : make_shared < sinks : : null_sink_st > ( ) , howmany ) ; <nl> + auto as = spdlog : : daily_logger_st ( " as " , " logs / daily_async " , auto_flush ) ; <nl> bench_mt ( howmany , as , threads ) ; <nl> + spdlog : : drop ( " as " ) ; <nl> } <nl> - <nl> - <nl> - / / cin . ignore ( ) ; <nl> - <nl> - <nl> } <nl> catch ( std : : exception & ex ) <nl> { <nl> void bench ( int howmany , std : : shared_ptr < spdlog : : logger > log ) <nl> auto start = system_clock : : now ( ) ; <nl> for ( auto i = 0 ; i < howmany ; + + i ) <nl> { <nl> - log - > info ( " Hello logger : msg number " , i ) ; <nl> + log - > info ( " Hello logger : msg number { } " , i ) ; <nl> } <nl> <nl> <nl> void bench ( int howmany , std : : shared_ptr < spdlog : : logger > log ) <nl> void bench_mt ( int howmany , std : : shared_ptr < spdlog : : logger > log , int thread_count ) <nl> { <nl> <nl> - <nl> cout < < log - > name ( ) < < " . . . \ t \ t " < < flush ; <nl> std : : atomic < int > msg_counter { 0 } ; <nl> vector < thread > threads ; <nl> auto start = system_clock : : now ( ) ; <nl> - <nl> for ( int t = 0 ; t < thread_count ; + + t ) <nl> { <nl> threads . push_back ( std : : thread ( [ & ] ( ) <nl> void bench_mt ( int howmany , std : : shared_ptr < spdlog : : logger > log , int thread_count <nl> auto delta_d = duration_cast < duration < double > > ( delta ) . count ( ) ; <nl> cout < < format ( int ( howmany / delta_d ) ) < < " / sec " < < endl ; <nl> } <nl> - <nl> | Undo commit | gabime/spdlog | 961d5b947df5ad61260782406789720db1920e29 | 2014-12-07T16:49:34Z |
mmm a / cocos / scripting / lua - bindings / proj . android / Android . mk <nl> ppp b / cocos / scripting / lua - bindings / proj . android / Android . mk <nl> LOCAL_SRC_FILES : = . . / manual / CCLuaBridge . cpp \ <nl> . . / . . / . . / . . / external / lua / tolua / tolua_to . c \ <nl> . . / . . / . . / . . / external / xxtea / xxtea . cpp <nl> <nl> - <nl> - LOCAL_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / . . / . . / . . / external / lua / tolua \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / . . / external / lua / luajit / include \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / 2d \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / 3d \ <nl> - $ ( LOCAL_PATH ) / . . / auto \ <nl> - $ ( LOCAL_PATH ) / . . / manual \ <nl> - $ ( LOCAL_PATH ) / . . / manual / extension \ <nl> - $ ( LOCAL_PATH ) / . . / manual / cocostudio \ <nl> - $ ( LOCAL_PATH ) / . . / manual / ui \ <nl> - $ ( LOCAL_PATH ) / . . / manual / cocos2d \ <nl> - $ ( LOCAL_PATH ) / . . / manual / platform / android \ <nl> - $ ( LOCAL_PATH ) / . . / manual / platform / android / jni \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / . . / external / xxtea \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / . . \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / . . / extensions <nl> - <nl> - <nl> - LOCAL_EXPORT_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / . . / . . / . . / external / lua / tolua \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / . . / external / lua / luajit / include \ <nl> - $ ( LOCAL_PATH ) / . . / auto \ <nl> - $ ( LOCAL_PATH ) / . . / manual \ <nl> - $ ( LOCAL_PATH ) / . . / manual / cocos2d <nl> - <nl> - <nl> - LOCAL_STATIC_LIBRARIES : = luajit_static <nl> - LOCAL_STATIC_LIBRARIES + = cocos2dx_static <nl> - <nl> - include $ ( BUILD_STATIC_LIBRARY ) <nl> - <nl> - # libluacocos3d <nl> - include $ ( CLEAR_VARS ) <nl> - <nl> - LOCAL_MODULE : = cocos3d_lua_static <nl> - <nl> - LOCAL_MODULE_FILENAME : = libluacocos3d <nl> - <nl> - LOCAL_SRC_FILES : = . . / manual / 3d / lua_cocos2dx_3d_manual . cpp \ <nl> + # 3d <nl> + LOCAL_SRC_FILES + = . . / manual / 3d / lua_cocos2dx_3d_manual . cpp \ <nl> . . / auto / lua_cocos2dx_3d_auto . cpp <nl> <nl> - LOCAL_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / auto \ <nl> - $ ( LOCAL_PATH ) / . . / manual / 3d \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / 3d \ <nl> - <nl> - LOCAL_EXPORT_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / 3d <nl> - <nl> - LOCAL_STATIC_LIBRARIES : = cocos2d_lua_static <nl> - LOCAL_STATIC_LIBRARIES + = cocos3d_static <nl> - <nl> - include $ ( BUILD_STATIC_LIBRARY ) <nl> - <nl> - # libluacocosdenshion <nl> - include $ ( CLEAR_VARS ) <nl> - <nl> - LOCAL_MODULE : = cocosdenshion_lua_static <nl> - <nl> - LOCAL_MODULE_FILENAME : = libluacocosdenshion <nl> - <nl> - LOCAL_SRC_FILES : = . . / manual / cocosdenshion / lua_cocos2dx_cocosdenshion_manual . cpp \ <nl> + # cocosdenshion <nl> + LOCAL_SRC_FILES + = . . / manual / cocosdenshion / lua_cocos2dx_cocosdenshion_manual . cpp \ <nl> . . / auto / lua_cocos2dx_cocosdenshion_auto . cpp <nl> <nl> - LOCAL_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / auto \ <nl> - $ ( LOCAL_PATH ) / . . / manual / cocosdenshion \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / audio / include \ <nl> - <nl> - LOCAL_EXPORT_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / cocosdenshion <nl> - <nl> - LOCAL_STATIC_LIBRARIES : = cocosdenshion_static <nl> - LOCAL_STATIC_LIBRARIES + = cocos2d_lua_static <nl> - <nl> - include $ ( BUILD_STATIC_LIBRARY ) <nl> - <nl> - include $ ( CLEAR_VARS ) <nl> - <nl> - LOCAL_MODULE : = network_lua_static <nl> - <nl> - LOCAL_MODULE_FILENAME : = libluanetwork <nl> - <nl> - LOCAL_SRC_FILES : = . . / manual / network / lua_cocos2dx_network_manual . cpp \ <nl> + # network <nl> + LOCAL_SRC_FILES + = . . / manual / network / lua_cocos2dx_network_manual . cpp \ <nl> . . / manual / network / lua_extensions . c \ <nl> . . / manual / network / Lua_web_socket . cpp \ <nl> . . / manual / network / lua_xml_http_request . cpp \ <nl> LOCAL_SRC_FILES : = . . / manual / network / lua_cocos2dx_network_manual . cpp \ <nl> . . / . . / . . / . . / external / lua / luasocket / timeout . c \ <nl> . . / . . / . . / . . / external / lua / luasocket / udp . c \ <nl> . . / . . / . . / . . / external / lua / luasocket / unix . c \ <nl> - . . / . . / . . / . . / external / lua / luasocket / usocket . c \ <nl> - <nl> - LOCAL_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / network \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / . . / external / lua \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / network <nl> - <nl> - LOCAL_EXPORT_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / network <nl> - <nl> - LOCAL_STATIC_LIBRARIES : = cocos2d_lua_static <nl> - LOCAL_STATIC_LIBRARIES + = cocos_network_static <nl> - <nl> - include $ ( BUILD_STATIC_LIBRARY ) <nl> - <nl> - include $ ( CLEAR_VARS ) <nl> - <nl> - LOCAL_MODULE : = cocosbuilder_lua_static <nl> + . . / . . / . . / . . / external / lua / luasocket / usocket . c <nl> <nl> - LOCAL_MODULE_FILENAME : = libluacocosbuilder <nl> - <nl> - LOCAL_SRC_FILES : = . . / manual / cocosbuilder / lua_cocos2dx_cocosbuilder_manual . cpp \ <nl> + # cocosbuilder <nl> + LOCAL_SRC_FILES + = . . / manual / cocosbuilder / lua_cocos2dx_cocosbuilder_manual . cpp \ <nl> . . / manual / cocosbuilder / CCBProxy . cpp \ <nl> - . . / auto / lua_cocos2dx_cocosbuilder_auto . cpp \ <nl> - <nl> - <nl> - LOCAL_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / cocosbuilder \ <nl> - $ ( LOCAL_PATH ) / . . / auto \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / editor - support / cocosbuilder <nl> - <nl> - LOCAL_EXPORT_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / cocosbuilder <nl> - <nl> - LOCAL_STATIC_LIBRARIES : = cocos2d_lua_static <nl> - LOCAL_STATIC_LIBRARIES + = cocosbuilder_static <nl> + . . / auto / lua_cocos2dx_cocosbuilder_auto . cpp <nl> <nl> - include $ ( BUILD_STATIC_LIBRARY ) <nl> - <nl> - <nl> - # <nl> - include $ ( CLEAR_VARS ) <nl> - <nl> - LOCAL_MODULE : = cocostudio_lua_static <nl> - <nl> - LOCAL_MODULE_FILENAME : = libluacocostudio <nl> - <nl> - LOCAL_SRC_FILES : = . . / manual / cocostudio / lua_cocos2dx_coco_studio_manual . cpp \ <nl> - . . / auto / lua_cocos2dx_studio_auto . cpp \ <nl> - <nl> - <nl> - LOCAL_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / cocostudio \ <nl> - $ ( LOCAL_PATH ) / . . / auto \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / editor - support / cocostudio <nl> - <nl> - LOCAL_EXPORT_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / cocostudio <nl> - <nl> - LOCAL_STATIC_LIBRARIES : = cocos2d_lua_static <nl> - LOCAL_STATIC_LIBRARIES + = cocostudio_static <nl> - <nl> - include $ ( BUILD_STATIC_LIBRARY ) <nl> - <nl> - include $ ( CLEAR_VARS ) <nl> - <nl> - LOCAL_MODULE : = spine_lua_static <nl> + # cocostudio <nl> + LOCAL_SRC_FILES + = . . / manual / cocostudio / lua_cocos2dx_coco_studio_manual . cpp \ <nl> + . . / auto / lua_cocos2dx_studio_auto . cpp <nl> <nl> - LOCAL_MODULE_FILENAME : = libluaspine <nl> - <nl> - LOCAL_SRC_FILES : = . . / manual / spine / lua_cocos2dx_spine_manual . cpp \ <nl> + # spine <nl> + LOCAL_SRC_FILES + = . . / manual / spine / lua_cocos2dx_spine_manual . cpp \ <nl> . . / manual / spine / LuaSkeletonAnimation . cpp \ <nl> - . . / auto / lua_cocos2dx_spine_auto . cpp \ <nl> - <nl> - <nl> - LOCAL_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / spine \ <nl> - $ ( LOCAL_PATH ) / . . / auto \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / editor - support / spine <nl> - <nl> - LOCAL_EXPORT_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / spine <nl> + . . / auto / lua_cocos2dx_spine_auto . cpp <nl> <nl> - LOCAL_STATIC_LIBRARIES : = cocos2d_lua_static <nl> - LOCAL_STATIC_LIBRARIES + = spine_static <nl> - <nl> - include $ ( BUILD_STATIC_LIBRARY ) <nl> - <nl> - include $ ( CLEAR_VARS ) <nl> <nl> - LOCAL_MODULE : = extension_lua_static <nl> - <nl> - LOCAL_MODULE_FILENAME : = libluaextension <nl> + # ui <nl> + LOCAL_SRC_FILES + = . . / manual / ui / lua_cocos2dx_experimental_video_manual . cpp \ <nl> + . . / manual / ui / lua_cocos2dx_ui_manual . cpp \ <nl> + . . / auto / lua_cocos2dx_experimental_video_auto . cpp \ <nl> + . . / auto / lua_cocos2dx_ui_auto . cpp <nl> <nl> - LOCAL_SRC_FILES : = . . / manual / extension / lua_cocos2dx_extension_manual . cpp \ <nl> + # extension <nl> + LOCAL_SRC_FILES + = . . / manual / extension / lua_cocos2dx_extension_manual . cpp \ <nl> . . / auto / lua_cocos2dx_extension_auto . cpp \ <nl> <nl> - <nl> - LOCAL_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / extension \ <nl> + LOCAL_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / . . / . . / . . / external / lua / tolua \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / . . / external / lua / luajit / include \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / 2d \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / 3d \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / network \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / editor - support / cocosbuilder \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / editor - support / cocostudio \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / editor - support / spine \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / ui \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / . . / extensions \ <nl> $ ( LOCAL_PATH ) / . . / auto \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / . . / extensions <nl> - <nl> - <nl> - LOCAL_EXPORT_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / extension <nl> - <nl> - LOCAL_STATIC_LIBRARIES : = cocos2d_lua_static <nl> - LOCAL_STATIC_LIBRARIES + = cocos_extension_static <nl> - <nl> - include $ ( BUILD_STATIC_LIBRARY ) <nl> - <nl> - include $ ( CLEAR_VARS ) <nl> - <nl> - LOCAL_MODULE : = ui_lua_static <nl> + $ ( LOCAL_PATH ) / . . / manual \ <nl> + $ ( LOCAL_PATH ) / . . / manual / cocos2d \ <nl> + $ ( LOCAL_PATH ) / . . / manual / 3d \ <nl> + $ ( LOCAL_PATH ) / . . / manual / cocosdenshion \ <nl> + $ ( LOCAL_PATH ) / . . / manual / network \ <nl> + $ ( LOCAL_PATH ) / . . / manual / extension \ <nl> + $ ( LOCAL_PATH ) / . . / manual / cocostudio \ <nl> + $ ( LOCAL_PATH ) / . . / manual / cocosbuilder \ <nl> + $ ( LOCAL_PATH ) / . . / manual / spine \ <nl> + $ ( LOCAL_PATH ) / . . / manual / ui \ <nl> + $ ( LOCAL_PATH ) / . . / manual / platform / android \ <nl> + $ ( LOCAL_PATH ) / . . / manual / platform / android / jni \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / . . / external / xxtea \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / . . \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / . . / external / lua <nl> <nl> - LOCAL_MODULE_FILENAME : = libluaui <nl> <nl> - LOCAL_SRC_FILES : = . . / manual / ui / lua_cocos2dx_experimental_video_manual . cpp \ <nl> - . . / manual / ui / lua_cocos2dx_ui_manual . cpp \ <nl> - . . / auto / lua_cocos2dx_experimental_video_auto . cpp \ <nl> - . . / auto / lua_cocos2dx_ui_auto . cpp <nl> <nl> - LOCAL_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / ui \ <nl> - $ ( LOCAL_PATH ) / . . / auto \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / ui <nl> - <nl> + LOCAL_EXPORT_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / . . / . . / . . / external / lua / tolua \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / . . / external / lua / luajit / include \ <nl> + $ ( LOCAL_PATH ) / . . / auto \ <nl> + $ ( LOCAL_PATH ) / . . / manual \ <nl> + $ ( LOCAL_PATH ) / . . / manual / cocos2d \ <nl> + $ ( LOCAL_PATH ) / . . / manual / 3d \ <nl> + $ ( LOCAL_PATH ) / . . / manual / cocosdenshion \ <nl> + $ ( LOCAL_PATH ) / . . / manual / network \ <nl> + $ ( LOCAL_PATH ) / . . / manual / cocosbuilder \ <nl> + $ ( LOCAL_PATH ) / . . / manual / cocostudio \ <nl> + $ ( LOCAL_PATH ) / . . / manual / spine \ <nl> + $ ( LOCAL_PATH ) / . . / manual / extension \ <nl> + $ ( LOCAL_PATH ) / . . / manual / ui \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / . . <nl> <nl> - LOCAL_EXPORT_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / manual / ui <nl> <nl> - LOCAL_STATIC_LIBRARIES : = cocos2d_lua_static <nl> - LOCAL_STATIC_LIBRARIES + = cocos_ui_static <nl> + LOCAL_STATIC_LIBRARIES : = luajit_static <nl> + LOCAL_STATIC_LIBRARIES + = cocos2dx_static <nl> <nl> include $ ( BUILD_STATIC_LIBRARY ) <nl> <nl> + <nl> $ ( call import - module , lua / luajit / prebuilt / android ) <nl> $ ( call import - module , . ) <nl> - $ ( call import - module , audio / android ) <nl> - $ ( call import - module , network ) <nl> - $ ( call import - module , editor - support / cocosbuilder ) <nl> - $ ( call import - module , editor - support / cocostudio ) <nl> - $ ( call import - module , editor - support / spine ) <nl> - $ ( call import - module , ui ) <nl> - $ ( call import - module , extensions ) <nl> - $ ( call import - module , 3d ) <nl> - <nl> - <nl> mmm a / tests / lua - empty - test / project / proj . android / jni / Android . mk <nl> ppp b / tests / lua - empty - test / project / proj . android / jni / Android . mk <nl> LOCAL_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / . . / Classes \ <nl> $ ( LOCAL_PATH ) / . . / . . / . . / . . / . . / external / lua / tolua \ <nl> <nl> LOCAL_STATIC_LIBRARIES : = cocos2d_lua_static <nl> - LOCAL_STATIC_LIBRARIES + = cocosdenshion_lua_static <nl> - LOCAL_STATIC_LIBRARIES + = network_lua_static <nl> - <nl> <nl> include $ ( BUILD_SHARED_LIBRARY ) <nl> <nl> mmm a / tests / lua - game - controller - test / project / proj . android / jni / Android . mk <nl> ppp b / tests / lua - game - controller - test / project / proj . android / jni / Android . mk <nl> LOCAL_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / . . / Classes \ <nl> $ ( LOCAL_PATH ) / . . / . . / . . / . . / . . / cocos / base <nl> <nl> LOCAL_STATIC_LIBRARIES : = cocos2d_lua_static <nl> - LOCAL_STATIC_LIBRARIES + = cocosdenshion_static <nl> - <nl> <nl> include $ ( BUILD_SHARED_LIBRARY ) <nl> <nl> | Integrate libluacocosdenshion , libluacocos3d , libluanetwork , libluacocostudio and libluaextension into libluacocos2d on the android platform | cocos2d/cocos2d-x | 25aab05b1f45ea6fbd3363676b19aeef159f6441 | 2014-08-25T01:48:14Z |
mmm a / python / taichi / lang / __init__ . py <nl> ppp b / python / taichi / lang / __init__ . py <nl> <nl> l = indices ( 3 ) <nl> ij = indices ( 0 , 1 ) <nl> ijk = indices ( 0 , 1 , 2 ) <nl> + ijkl = indices ( 0 , 1 , 2 , 3 ) <nl> Vector = Matrix <nl> outer_product = Matrix . outer_product <nl> cfg = default_cfg ( ) <nl> mmm a / python / taichi / lang / impl . py <nl> ppp b / python / taichi / lang / impl . py <nl> def min ( a , b ) : <nl> return Expr ( taichi_lang_core . expr_min ( a . ptr , b . ptr ) ) <nl> <nl> def append ( l , indices , val ) : <nl> - taichi_lang_core . insert_append ( l . ptr , make_expr_group ( indices ) , val . ptr ) <nl> + taichi_lang_core . insert_append ( l . ptr , make_expr_group ( indices ) , Expr ( val ) . ptr ) <nl> <nl> def length ( l , indices ) : <nl> return taichi_lang_core . insert_len ( l . ptr , make_expr_group ( indices ) ) <nl> mmm a / tests / python / test_cond_grad . py <nl> ppp b / tests / python / test_cond_grad . py <nl> <nl> <nl> def test_cond_grad ( ) : <nl> ti . reset ( ) <nl> + ti . cfg . print_ir = True <nl> x = ti . var ( ti . f32 ) <nl> y = ti . var ( ti . f32 ) <nl> <nl> def func ( ) : <nl> for i in range ( 2 ) : <nl> t = 0 . 0 <nl> if x [ i ] > 0 : <nl> - t = 1 / x [ i ] <nl> + t = 1 / ( x [ i ] + 1e - 10 ) <nl> y [ i ] = t <nl> <nl> x [ 0 ] = 0 <nl> | ti . ijkl , fix ti . append with int | taichi-dev/taichi | 9797e9621d33b1ae43d69503fce071f610313fb4 | 2019-10-17T22:12:17Z |
mmm a / tools / file_packager . py <nl> ppp b / tools / file_packager . py <nl> <nl> <nl> Usage : <nl> <nl> - file_packager . py TARGET [ - - preload A [ B . . ] ] [ - - embed C [ D . . ] ] [ - - compress COMPRESSION_DATA ] [ - - pre - run ] [ - - crunch [ = X ] ] <nl> + file_packager . py TARGET [ - - preload A [ B . . ] ] [ - - embed C [ D . . ] ] [ - - compress COMPRESSION_DATA ] [ - - pre - run ] [ - - crunch [ = X ] ] [ - - js - output = OUTPUT . js ] <nl> <nl> - - pre - run Will generate wrapper code that does preloading in Module . preRun . This is necessary if you add this <nl> code before the main file has been loading , which includes necessary components like addRunDependency . <nl> <nl> from shared import Compression , execute , suffix , unsuffixed <nl> from subprocess import Popen , PIPE , STDOUT <nl> <nl> + if len ( sys . argv ) = = 1 : <nl> + print ' ' ' Usage : file_packager . py TARGET [ - - preload A . . . ] [ - - embed C . . . ] [ - - compress COMPRESSION_DATA ] [ - - pre - run ] [ - - crunch [ = X ] ] [ - - js - output = OUTPUT . js ] <nl> + See the source for more details . ' ' ' <nl> + sys . exit ( 0 ) <nl> + <nl> data_target = sys . argv [ 1 ] <nl> <nl> IMAGE_SUFFIXES = ( ' . jpg ' , ' . png ' , ' . bmp ' ) <nl> <nl> pre_run = False <nl> crunch = 0 <nl> plugins = [ ] <nl> + jsoutput = None <nl> <nl> for arg in sys . argv [ 1 : ] : <nl> if arg = = ' - - preload ' : <nl> <nl> in_preload = False <nl> in_embed = False <nl> in_compress = 0 <nl> + elif arg . startswith ( ' - - js - output ' ) : <nl> + jsoutput = arg . split ( ' = ' ) [ 1 ] if ' = ' in arg else None <nl> elif arg . startswith ( ' - - crunch ' ) : <nl> from shared import CRUNCH <nl> crunch = arg . split ( ' = ' ) [ 1 ] if ' = ' in arg else ' 128 ' <nl> <nl> in_embed = False <nl> in_compress = 0 <nl> elif in_preload : <nl> - data_files . append ( { ' name ' : arg , ' mode ' : ' preload ' } ) <nl> + if os . path . isfile ( arg ) : <nl> + data_files . append ( { ' name ' : arg , ' mode ' : ' preload ' } ) <nl> elif in_embed : <nl> - data_files . append ( { ' name ' : arg , ' mode ' : ' embed ' } ) <nl> + if os . path . isfile ( arg ) : <nl> + data_files . append ( { ' name ' : arg , ' mode ' : ' embed ' } ) <nl> elif in_compress : <nl> if in_compress = = 1 : <nl> Compression . encoder = arg <nl> <nl> Compression . js_name = arg <nl> in_compress = 0 <nl> <nl> - print ' ' ' <nl> + ret = ' ' ' <nl> ( function ( ) { <nl> ' ' ' <nl> <nl> def was_seen ( name ) : <nl> # Crunch files <nl> if crunch : <nl> shutil . copyfile ( shared . path_from_root ( ' tools ' , ' crunch - worker . js ' ) , ' crunch - worker . js ' ) <nl> - print ' ' ' <nl> + ret + = ' ' ' <nl> var decrunchWorker = new Worker ( ' crunch - worker . js ' ) ; <nl> var decrunchCallbacks = [ ] ; <nl> decrunchWorker . onmessage = function ( msg ) { <nl> def was_seen ( name ) : <nl> ' ' ' % ( data_target , os . path . basename ( Compression . compressed_name ( data_target ) if Compression . on else data_target ) , use_data , data_target ) # use basename because from the browser ' s point of view , we need to find the datafile in the same dir as the html file <nl> <nl> if pre_run : <nl> - print ' ' ' <nl> + ret + = ' ' ' <nl> if ( typeof Module = = ' undefined ' ) Module = { } ; <nl> if ( ! Module [ ' preRun ' ] ) Module [ ' preRun ' ] = [ ] ; <nl> Module [ " preRun " ] . push ( function ( ) { <nl> ' ' ' <nl> - <nl> - print code <nl> + ret + = code <nl> <nl> if pre_run : <nl> - print ' } ) ; \ n ' <nl> + ret + = ' } ) ; \ n ' <nl> <nl> if crunch : <nl> - print ' ' ' <nl> + ret + = ' ' ' <nl> if ( ! Module [ ' postRun ' ] ) Module [ ' postRun ' ] = [ ] ; <nl> Module [ " postRun " ] . push ( function ( ) { <nl> decrunchWorker . terminate ( ) ; <nl> } ) ; <nl> ' ' ' <nl> <nl> - print ' ' ' <nl> + ret + = ' ' ' <nl> } ) ( ) ; <nl> ' ' ' <nl> <nl> + if jsoutput = = None : <nl> + print ret <nl> + else : <nl> + f = open ( jsoutput , ' w ' ) <nl> + f . write ( ret ) <nl> | * Little improvements of file_packager . py | emscripten-core/emscripten | 06876b9f0ef559553afb708462042fbade0a33c4 | 2013-04-13T14:06:57Z |
mmm a / benchmark / parse . cpp <nl> ppp b / benchmark / parse . cpp <nl> int main ( int argc , char * argv [ ] ) { <nl> std : : cout < < " [ verbose ] allocated memory for parsed JSON " < < std : : endl ; <nl> } <nl> unified . start ( ) ; <nl> - isok = ( find_structural_bits ( p . data ( ) , p . size ( ) , pj ) = = simdjson : : SUCCESS ) ; <nl> + / / The default template is simdjson : : instruction_set : : native . <nl> + isok = ( find_structural_bits < > ( p . data ( ) , p . size ( ) , pj ) = = simdjson : : SUCCESS ) ; <nl> unified . end ( results ) ; <nl> cy1 + = results [ 0 ] ; <nl> cl1 + = results [ 1 ] ; <nl> int main ( int argc , char * argv [ ] ) { <nl> } <nl> <nl> auto start = std : : chrono : : steady_clock : : now ( ) ; <nl> - isok = ( find_structural_bits ( p . data ( ) , p . size ( ) , pj ) = = simdjson : : SUCCESS ) ; <nl> + / / The default template is simdjson : : instruction_set : : native . <nl> + isok = ( find_structural_bits < > ( p . data ( ) , p . size ( ) , pj ) = = simdjson : : SUCCESS ) ; <nl> isok = isok & & ( simdjson : : SUCCESS = = unified_machine ( p . data ( ) , p . size ( ) , pj ) ) ; <nl> auto end = std : : chrono : : steady_clock : : now ( ) ; <nl> std : : chrono : : duration < double > secs = end - start ; <nl> mmm a / benchmark / statisticalmodel . cpp <nl> ppp b / benchmark / statisticalmodel . cpp <nl> int main ( int argc , char * argv [ ] ) { <nl> results . resize ( evts . size ( ) ) ; <nl> for ( uint32_t i = 0 ; i < iterations ; i + + ) { <nl> unified . start ( ) ; <nl> - bool isok = ( find_structural_bits ( p . data ( ) , p . size ( ) , pj ) = = simdjson : : SUCCESS ) ; <nl> + / / The default template is simdjson : : instruction_set : : native . <nl> + bool isok = ( find_structural_bits < > ( p . data ( ) , p . size ( ) , pj ) = = simdjson : : SUCCESS ) ; <nl> unified . end ( results ) ; <nl> <nl> cy1 + = results [ 0 ] ; <nl> mmm a / include / simdjson / jsonparser . h <nl> ppp b / include / simdjson / jsonparser . h <nl> <nl> # include " simdjson / stage1_find_marks . h " <nl> # include " simdjson / stage2_build_tape . h " <nl> # include " simdjson / simdjson . h " <nl> + # ifdef _MSC_VER <nl> + # include < windows . h > <nl> + # include < sysinfoapi . h > <nl> + # else <nl> + # include < unistd . h > <nl> + # endif <nl> + <nl> + / / The function that users are expected to call is json_parse . <nl> + / / We have more than one such function because we want to support several <nl> + / / instruction sets . <nl> + <nl> + / / function pointer type for json_parse <nl> + using json_parse_functype = int ( const uint8_t * buf , size_t len , ParsedJson & pj , bool reallocifneeded ) ; <nl> + <nl> + / / Pointer that holds the json_parse implementation corresponding to the available SIMD instruction set <nl> + extern json_parse_functype * json_parse_ptr ; <nl> + <nl> + <nl> + / / json_parse_implementation is the generic function , it is specialized for various <nl> + / / SIMD instruction sets , e . g . , as json_parse_implementation < simdjson : : instruction_set : : avx2 > <nl> + / / or json_parse_implementation < simdjson : : instruction_set : : neon > <nl> + template < simdjson : : instruction_set T > <nl> + int json_parse_implementation ( const uint8_t * buf , size_t len , ParsedJson & pj , bool reallocifneeded = true ) { <nl> + if ( pj . bytecapacity < len ) { <nl> + return simdjson : : CAPACITY ; <nl> + } <nl> + bool reallocated = false ; <nl> + if ( reallocifneeded ) { <nl> + # ifdef ALLOW_SAME_PAGE_BUFFER_OVERRUN <nl> + / / realloc is needed if the end of the memory crosses a page <nl> + # ifdef _MSC_VER <nl> + SYSTEM_INFO sysInfo ; <nl> + GetSystemInfo ( & sysInfo ) ; <nl> + long pagesize = sysInfo . dwPageSize ; <nl> + # else <nl> + long pagesize = sysconf ( _SC_PAGESIZE ) ; <nl> + # endif <nl> + / / / / / / / / / / / / / / <nl> + / / We want to check that buf + len - 1 and buf + len - 1 + SIMDJSON_PADDING <nl> + / / are in the same page . <nl> + / / That is , we want to check that <nl> + / / ( buf + len - 1 ) / pagesize = = ( buf + len - 1 + SIMDJSON_PADDING ) / pagesize <nl> + / / That ' s true if ( buf + len - 1 ) % pagesize + SIMDJSON_PADDING < pagesize . <nl> + / / / / / / / / / / / <nl> + if ( ( reinterpret_cast < uintptr_t > ( buf + len - 1 ) % pagesize ) + SIMDJSON_PADDING < static_cast < uintptr_t > ( pagesize ) ) { <nl> + # else / / SIMDJSON_SAFE_SAME_PAGE_READ_OVERRUN <nl> + if ( true ) { / / if not SIMDJSON_SAFE_SAME_PAGE_READ_OVERRUN , we always reallocate <nl> + # endif <nl> + const uint8_t * tmpbuf = buf ; <nl> + buf = ( uint8_t * ) allocate_padded_buffer ( len ) ; <nl> + if ( buf = = NULL ) return simdjson : : MEMALLOC ; <nl> + memcpy ( ( void * ) buf , tmpbuf , len ) ; <nl> + reallocated = true ; <nl> + } / / if ( true ) OR if ( ( reinterpret_cast < uintptr_t > ( buf + len - 1 ) % pagesize ) + SIMDJSON_PADDING < static_cast < uintptr_t > ( pagesize ) ) { <nl> + } / / if ( reallocifneeded ) { <nl> + int stage1_is_ok = find_structural_bits < T > ( buf , len , pj ) ; <nl> + if ( stage1_is_ok ! = simdjson : : SUCCESS ) { <nl> + pj . errorcode = stage1_is_ok ; <nl> + return pj . errorcode ; <nl> + } <nl> + int res = unified_machine ( buf , len , pj ) ; <nl> + if ( reallocated ) { aligned_free ( ( void * ) buf ) ; } <nl> + return res ; <nl> + } <nl> <nl> / / Parse a document found in buf . <nl> / / You need to preallocate ParsedJson with a capacity of len ( e . g . , pj . allocateCapacity ( len ) ) . <nl> <nl> / / The input buf should be readable up to buf + len + SIMDJSON_PADDING if reallocifneeded is false , <nl> / / all bytes at and after buf + len are ignored ( can be garbage ) . <nl> / / The ParsedJson object can be reused . <nl> - WARN_UNUSED <nl> - int json_parse ( const uint8_t * buf , size_t len , ParsedJson & pj , bool reallocifneeded = true ) ; <nl> + <nl> + inline int json_parse ( const uint8_t * buf , size_t len , ParsedJson & pj , bool reallocifneeded = true ) { <nl> + return json_parse_ptr ( buf , len , pj , reallocifneeded ) ; <nl> + } <nl> <nl> / / Parse a document found in buf . <nl> / / You need to preallocate ParsedJson with a capacity of len ( e . g . , pj . allocateCapacity ( len ) ) . <nl> int json_parse ( const uint8_t * buf , size_t len , ParsedJson & pj , bool reallocifnee <nl> / / The input buf should be readable up to buf + len + SIMDJSON_PADDING if reallocifneeded is false , <nl> / / all bytes at and after buf + len are ignored ( can be garbage ) . <nl> / / The ParsedJson object can be reused . <nl> - WARN_UNUSED <nl> inline int json_parse ( const char * buf , size_t len , ParsedJson & pj , bool reallocifneeded = true ) { <nl> - return json_parse ( reinterpret_cast < const uint8_t * > ( buf ) , len , pj , reallocifneeded ) ; <nl> + return json_parse_ptr ( reinterpret_cast < const uint8_t * > ( buf ) , len , pj , reallocifneeded ) ; <nl> } <nl> <nl> / / We do not want to allow implicit conversion from C string to std : : string . <nl> int json_parse ( const char * buf , ParsedJson & pj ) = delete ; <nl> / / <nl> / / A temporary buffer is created when needed during processing <nl> / / ( a copy of the input string is made ) . <nl> - WARN_UNUSED <nl> inline int json_parse ( const std : : string & s , ParsedJson & pj ) { <nl> return json_parse ( s . data ( ) , s . length ( ) , pj , true ) ; <nl> } <nl> inline int json_parse ( const std : : string & s , ParsedJson & pj ) { <nl> / / <nl> / / You can also check validity <nl> / / by calling pj . isValid ( ) . The same ParsedJson can be reused for other documents . <nl> - WARN_UNUSED <nl> inline int json_parse ( const padded_string & s , ParsedJson & pj ) { <nl> return json_parse ( s . data ( ) , s . length ( ) , pj , false ) ; <nl> } <nl> mmm a / include / simdjson / simdjson . h <nl> ppp b / include / simdjson / simdjson . h <nl> <nl> # include < string > <nl> <nl> struct simdjson { <nl> + enum class instruction_set { <nl> + avx2 , <nl> + sse4_2 , <nl> + neon , <nl> + none , <nl> + / / the ' native ' enum class value should point at a good default on the current machine <nl> + # ifdef __AVX2__ <nl> + native = avx2 <nl> + # elif defined ( __ARM_NEON ) <nl> + native = neon <nl> + # else <nl> + / / Let us assume that we have an old x64 processor , but one that has SSE ( i . e . , something <nl> + / / that came out in the second decade of the XXIst century . <nl> + / / It would be nicer to check explicitly , but there many not be a good way to do so <nl> + / / that is cross - platform . <nl> + / / Under Visual Studio , there is no way to check for SSE4 . 2 support at compile - time . <nl> + native = sse4_2 <nl> + # endif <nl> + } ; <nl> + <nl> enum errorValues { <nl> SUCCESS = 0 , <nl> CAPACITY , / / This ParsedJson can ' t support a document that big <nl> mmm a / include / simdjson / stage1_find_marks . h <nl> ppp b / include / simdjson / stage1_find_marks . h <nl> <nl> # ifndef SIMDJSON_STAGE1_FIND_MARKS_H <nl> # define SIMDJSON_STAGE1_FIND_MARKS_H <nl> <nl> + # include < cassert > <nl> # include " simdjson / common_defs . h " <nl> + # include " simdjson / parsedjson . h " <nl> + # include " simdjson / portability . h " <nl> <nl> - struct ParsedJson ; <nl> + # ifdef __AVX2__ <nl> <nl> + # ifndef SIMDJSON_SKIPUTF8VALIDATION <nl> + # define SIMDJSON_UTF8VALIDATE <nl> + <nl> + # endif <nl> + # else <nl> + / / currently we don ' t UTF8 validate for ARM <nl> + / / also we assume that if you ' re not __AVX2__ <nl> + / / you ' re ARM , which is a bit dumb . TODO : Fix . . . <nl> + # ifdef __ARM_NEON <nl> + # include < arm_neon . h > <nl> + # else <nl> + # warning It appears that neither ARM NEON nor AVX2 are detected . <nl> + # endif / / __ARM_NEON <nl> + # endif / / __AVX2__ <nl> + <nl> + / / It seems that many parsers do UTF - 8 validation . <nl> + / / RapidJSON does not do it by default , but a flag <nl> + / / allows it . <nl> + # ifdef SIMDJSON_UTF8VALIDATE <nl> + # include " simdjson / simdutf8check . h " <nl> + # endif <nl> + <nl> + # define TRANSPOSE <nl> + <nl> + template < simdjson : : instruction_set > <nl> + struct simd_input ; <nl> + # ifdef __AVX2__ <nl> + template < > <nl> + struct simd_input < simdjson : : instruction_set : : avx2 > <nl> + { <nl> + __m256i lo ; <nl> + __m256i hi ; <nl> + } ; <nl> + # endif <nl> + <nl> + # ifdef __ARM_NEON <nl> + template < > struct simd_input < simdjson : : instruction_set : : neon > <nl> + { <nl> + # ifndef TRANSPOSE <nl> + uint8x16_t i0 ; <nl> + uint8x16_t i1 ; <nl> + uint8x16_t i2 ; <nl> + uint8x16_t i3 ; <nl> + # else <nl> + uint8x16x4_t i ; <nl> + # endif <nl> + } ; <nl> + # endif <nl> + <nl> + # ifdef __ARM_NEON <nl> + really_inline <nl> + uint16_t neonmovemask ( uint8x16_t input ) { <nl> + const uint8x16_t bitmask = { 0x01 , 0x02 , 0x4 , 0x8 , 0x10 , 0x20 , 0x40 , 0x80 , <nl> + 0x01 , 0x02 , 0x4 , 0x8 , 0x10 , 0x20 , 0x40 , 0x80 } ; <nl> + uint8x16_t minput = vandq_u8 ( input , bitmask ) ; <nl> + uint8x16_t tmp = vpaddq_u8 ( minput , minput ) ; <nl> + tmp = vpaddq_u8 ( tmp , tmp ) ; <nl> + tmp = vpaddq_u8 ( tmp , tmp ) ; <nl> + return vgetq_lane_u16 ( vreinterpretq_u16_u8 ( tmp ) , 0 ) ; <nl> + } <nl> + <nl> + really_inline <nl> + uint64_t neonmovemask_bulk ( uint8x16_t p0 , uint8x16_t p1 , uint8x16_t p2 , uint8x16_t p3 ) { <nl> + # ifndef TRANSPOSE <nl> + const uint8x16_t bitmask = { 0x01 , 0x02 , 0x4 , 0x8 , 0x10 , 0x20 , 0x40 , 0x80 , <nl> + 0x01 , 0x02 , 0x4 , 0x8 , 0x10 , 0x20 , 0x40 , 0x80 } ; <nl> + uint8x16_t t0 = vandq_u8 ( p0 , bitmask ) ; <nl> + uint8x16_t t1 = vandq_u8 ( p1 , bitmask ) ; <nl> + uint8x16_t t2 = vandq_u8 ( p2 , bitmask ) ; <nl> + uint8x16_t t3 = vandq_u8 ( p3 , bitmask ) ; <nl> + uint8x16_t sum0 = vpaddq_u8 ( t0 , t1 ) ; <nl> + uint8x16_t sum1 = vpaddq_u8 ( t2 , t3 ) ; <nl> + sum0 = vpaddq_u8 ( sum0 , sum1 ) ; <nl> + sum0 = vpaddq_u8 ( sum0 , sum0 ) ; <nl> + return vgetq_lane_u64 ( vreinterpretq_u64_u8 ( sum0 ) , 0 ) ; <nl> + # else <nl> + const uint8x16_t bitmask1 = { 0x01 , 0x10 , 0x01 , 0x10 , 0x01 , 0x10 , 0x01 , 0x10 , <nl> + 0x01 , 0x10 , 0x01 , 0x10 , 0x01 , 0x10 , 0x01 , 0x10 } ; <nl> + const uint8x16_t bitmask2 = { 0x02 , 0x20 , 0x02 , 0x20 , 0x02 , 0x20 , 0x02 , 0x20 , <nl> + 0x02 , 0x20 , 0x02 , 0x20 , 0x02 , 0x20 , 0x02 , 0x20 } ; <nl> + const uint8x16_t bitmask3 = { 0x04 , 0x40 , 0x04 , 0x40 , 0x04 , 0x40 , 0x04 , 0x40 , <nl> + 0x04 , 0x40 , 0x04 , 0x40 , 0x04 , 0x40 , 0x04 , 0x40 } ; <nl> + const uint8x16_t bitmask4 = { 0x08 , 0x80 , 0x08 , 0x80 , 0x08 , 0x80 , 0x08 , 0x80 , <nl> + 0x08 , 0x80 , 0x08 , 0x80 , 0x08 , 0x80 , 0x08 , 0x80 } ; <nl> + # if 0 <nl> + uint8x16_t t0 = vandq_u8 ( p0 , bitmask1 ) ; <nl> + uint8x16_t t1 = vandq_u8 ( p1 , bitmask2 ) ; <nl> + uint8x16_t t2 = vandq_u8 ( p2 , bitmask3 ) ; <nl> + uint8x16_t t3 = vandq_u8 ( p3 , bitmask4 ) ; <nl> + uint8x16_t tmp = vorrq_u8 ( vorrq_u8 ( t0 , t1 ) , vorrq_u8 ( t2 , t3 ) ) ; <nl> + # else <nl> + uint8x16_t t0 = vandq_u8 ( p0 , bitmask1 ) ; <nl> + uint8x16_t t1 = vbslq_u8 ( bitmask2 , p1 , t0 ) ; <nl> + uint8x16_t t2 = vbslq_u8 ( bitmask3 , p2 , t1 ) ; <nl> + uint8x16_t tmp = vbslq_u8 ( bitmask4 , p3 , t2 ) ; <nl> + # endif <nl> + uint8x16_t sum = vpaddq_u8 ( tmp , tmp ) ; <nl> + return vgetq_lane_u64 ( vreinterpretq_u64_u8 ( sum ) , 0 ) ; <nl> + # endif <nl> + } <nl> + # endif <nl> + <nl> + template < simdjson : : instruction_set T > <nl> + uint64_t compute_quote_mask ( uint64_t quote_bits ) ; <nl> + <nl> + / / In practice , if you have NEON or __PCLMUL__ , you would <nl> + / / always want to use them , but it might be useful , for research <nl> + / / purposes , to disable it willingly , that ' s what SIMDJSON_AVOID_CLMUL <nl> + / / does . <nl> + / / Also : we don ' t know of an instance where AVX2 is supported but <nl> + / / where clmul is not supported , so check for both , to be sure . <nl> + # ifdef SIMDJSON_AVOID_CLMUL <nl> + template < simdjson : : instruction_set T > really_inline <nl> + uint64_t compute_quote_mask ( uint64_t quote_bits ) <nl> + { <nl> + uint64_t quote_mask = quote_bits ^ ( quote_bits < < 1 ) ; <nl> + quote_mask = quote_mask ^ ( quote_mask < < 2 ) ; <nl> + quote_mask = quote_mask ^ ( quote_mask < < 4 ) ; <nl> + quote_mask = quote_mask ^ ( quote_mask < < 8 ) ; <nl> + quote_mask = quote_mask ^ ( quote_mask < < 16 ) ; <nl> + quote_mask = quote_mask ^ ( quote_mask < < 32 ) ; <nl> + return quote_mask ; <nl> + } <nl> + # else <nl> + template < simdjson : : instruction_set > <nl> + uint64_t compute_quote_mask ( uint64_t quote_bits ) ; <nl> + <nl> + # ifdef __AVX2__ <nl> + template < > really_inline <nl> + uint64_t compute_quote_mask < simdjson : : instruction_set : : avx2 > ( uint64_t quote_bits ) { <nl> + uint64_t quote_mask = _mm_cvtsi128_si64 ( _mm_clmulepi64_si128 ( <nl> + _mm_set_epi64x ( 0ULL , quote_bits ) , _mm_set1_epi8 ( 0xFF ) , 0 ) ) ; <nl> + return quote_mask ; <nl> + } <nl> + # endif <nl> + <nl> + # ifdef __ARM_NEON <nl> + template < > really_inline <nl> + uint64_t compute_quote_mask < simdjson : : instruction_set : : neon > ( uint64_t quote_bits ) { <nl> + # ifdef __PCLMUL__ / / Might cause problems on runtime dispatch <nl> + uint64_t quote_mask = _mm_cvtsi128_si64 ( _mm_clmulepi64_si128 ( <nl> + _mm_set_epi64x ( 0ULL , quote_bits ) , <nl> + _mm_set1_epi8 ( 0xFF ) , 0 ) ) ; <nl> + # else <nl> + uint64_t quote_mask = vmull_p64 ( - 1ULL , quote_bits ) ; <nl> + # endif <nl> + return quote_mask ; <nl> + } <nl> + # endif <nl> + # endif <nl> + <nl> + # ifdef SIMDJSON_UTF8VALIDATE <nl> + template < simdjson : : instruction_set T > really_inline <nl> + void check_utf8 ( simd_input < T > in , <nl> + __m256i & has_error , <nl> + struct avx_processed_utf_bytes & previous ) { <nl> + __m256i highbit = _mm256_set1_epi8 ( 0x80 ) ; <nl> + if ( ( _mm256_testz_si256 ( _mm256_or_si256 ( in . lo , in . hi ) , highbit ) ) = = 1 ) { <nl> + / / it is ascii , we just check continuation <nl> + has_error = _mm256_or_si256 ( <nl> + _mm256_cmpgt_epi8 ( <nl> + previous . carried_continuations , <nl> + _mm256_setr_epi8 ( 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , <nl> + 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 1 ) ) , <nl> + has_error ) ; <nl> + } else { <nl> + / / it is not ascii so we have to do heavy work <nl> + previous = avxcheckUTF8Bytes ( in . lo , & previous , & has_error ) ; <nl> + previous = avxcheckUTF8Bytes ( in . hi , & previous , & has_error ) ; <nl> + } <nl> + } <nl> + # endif <nl> + <nl> + template < simdjson : : instruction_set T > <nl> + simd_input < T > fill_input ( const uint8_t * ptr ) ; <nl> + <nl> + # ifdef __AVX2__ <nl> + template < > really_inline <nl> + simd_input < simdjson : : instruction_set : : avx2 > fill_input < simdjson : : instruction_set : : avx2 > ( const uint8_t * ptr ) { <nl> + struct simd_input < simdjson : : instruction_set : : avx2 > in ; <nl> + in . lo = _mm256_loadu_si256 ( reinterpret_cast < const __m256i * > ( ptr + 0 ) ) ; <nl> + in . hi = _mm256_loadu_si256 ( reinterpret_cast < const __m256i * > ( ptr + 32 ) ) ; <nl> + return in ; <nl> + } <nl> + # endif <nl> + <nl> + # ifdef __ARM_NEON <nl> + template < > really_inline <nl> + simd_input < simdjson : : instruction_set : : neon > fill_input < simdjson : : instruction_set : : neon > ( const uint8_t * ptr ) { <nl> + struct simd_input < simdjson : : instruction_set : : neon > in ; <nl> + # ifndef TRANSPOSE <nl> + in . i0 = vld1q_u8 ( ptr + 0 ) ; <nl> + in . i1 = vld1q_u8 ( ptr + 16 ) ; <nl> + in . i2 = vld1q_u8 ( ptr + 32 ) ; <nl> + in . i3 = vld1q_u8 ( ptr + 48 ) ; <nl> + # else <nl> + in . i = vld4q_u8 ( ptr ) ; <nl> + # endif <nl> + return in ; <nl> + } <nl> + # endif <nl> + <nl> + / / a straightforward comparison of a mask against input . 5 uops ; would be <nl> + / / cheaper in AVX512 . <nl> + template < simdjson : : instruction_set T > <nl> + uint64_t cmp_mask_against_input ( simd_input < T > in , uint8_t m ) ; <nl> + <nl> + # ifdef __AVX2__ <nl> + template < > really_inline <nl> + uint64_t cmp_mask_against_input < simdjson : : instruction_set : : avx2 > ( simd_input < simdjson : : instruction_set : : avx2 > in , uint8_t m ) { <nl> + <nl> + const __m256i mask = _mm256_set1_epi8 ( m ) ; <nl> + __m256i cmp_res_0 = _mm256_cmpeq_epi8 ( in . lo , mask ) ; <nl> + uint64_t res_0 = static_cast < uint32_t > ( _mm256_movemask_epi8 ( cmp_res_0 ) ) ; <nl> + __m256i cmp_res_1 = _mm256_cmpeq_epi8 ( in . hi , mask ) ; <nl> + uint64_t res_1 = _mm256_movemask_epi8 ( cmp_res_1 ) ; <nl> + return res_0 | ( res_1 < < 32 ) ; <nl> + } <nl> + # endif <nl> + <nl> + # ifdef __ARM_NEON <nl> + template < > really_inline <nl> + uint64_t cmp_mask_against_input < simdjson : : instruction_set : : neon > ( simd_input < simdjson : : instruction_set : : neon > in , uint8_t m ) { <nl> + const uint8x16_t mask = vmovq_n_u8 ( m ) ; <nl> + uint8x16_t cmp_res_0 = vceqq_u8 ( in . i . val [ 0 ] , mask ) ; <nl> + uint8x16_t cmp_res_1 = vceqq_u8 ( in . i . val [ 1 ] , mask ) ; <nl> + uint8x16_t cmp_res_2 = vceqq_u8 ( in . i . val [ 2 ] , mask ) ; <nl> + uint8x16_t cmp_res_3 = vceqq_u8 ( in . i . val [ 3 ] , mask ) ; <nl> + return neonmovemask_bulk ( cmp_res_0 , cmp_res_1 , cmp_res_2 , cmp_res_3 ) ; <nl> + } <nl> + # endif <nl> + <nl> + / / find all values less than or equal than the content of maxval ( using unsigned arithmetic ) <nl> + template < simdjson : : instruction_set T > <nl> + uint64_t unsigned_lteq_against_input ( simd_input < T > in , uint8_t m ) ; <nl> + <nl> + # ifdef __AVX2__ <nl> + template < > really_inline <nl> + uint64_t unsigned_lteq_against_input < simdjson : : instruction_set : : avx2 > ( simd_input < simdjson : : instruction_set : : avx2 > in , uint8_t m ) { <nl> + const __m256i maxval = _mm256_set1_epi8 ( m ) ; <nl> + __m256i cmp_res_0 = _mm256_cmpeq_epi8 ( _mm256_max_epu8 ( maxval , in . lo ) , maxval ) ; <nl> + uint64_t res_0 = static_cast < uint32_t > ( _mm256_movemask_epi8 ( cmp_res_0 ) ) ; <nl> + __m256i cmp_res_1 = _mm256_cmpeq_epi8 ( _mm256_max_epu8 ( maxval , in . hi ) , maxval ) ; <nl> + uint64_t res_1 = _mm256_movemask_epi8 ( cmp_res_1 ) ; <nl> + return res_0 | ( res_1 < < 32 ) ; <nl> + } <nl> + # endif <nl> + <nl> + # ifdef __ARM_NEON <nl> + template < > really_inline <nl> + uint64_t unsigned_lteq_against_input < simdjson : : instruction_set : : neon > ( simd_input < simdjson : : instruction_set : : neon > in , uint8_t m ) { <nl> + const uint8x16_t mask = vmovq_n_u8 ( m ) ; <nl> + uint8x16_t cmp_res_0 = vcleq_u8 ( in . i . val [ 0 ] , mask ) ; <nl> + uint8x16_t cmp_res_1 = vcleq_u8 ( in . i . val [ 1 ] , mask ) ; <nl> + uint8x16_t cmp_res_2 = vcleq_u8 ( in . i . val [ 2 ] , mask ) ; <nl> + uint8x16_t cmp_res_3 = vcleq_u8 ( in . i . val [ 3 ] , mask ) ; <nl> + return neonmovemask_bulk ( cmp_res_0 , cmp_res_1 , cmp_res_2 , cmp_res_3 ) ; <nl> + } <nl> + # endif <nl> + <nl> + / / return a bitvector indicating where we have characters that end an odd - length <nl> + / / sequence of backslashes ( and thus change the behavior of the next character <nl> + / / to follow ) . A even - length sequence of backslashes , and , for that matter , the <nl> + / / largest even - length prefix of our odd - length sequence of backslashes , simply <nl> + / / modify the behavior of the backslashes themselves . <nl> + / / We also update the prev_iter_ends_odd_backslash reference parameter to <nl> + / / indicate whether we end an iteration on an odd - length sequence of <nl> + / / backslashes , which modifies our subsequent search for odd - length <nl> + / / sequences of backslashes in an obvious way . <nl> + template < simdjson : : instruction_set T > really_inline <nl> + uint64_t find_odd_backslash_sequences ( simd_input < T > in , uint64_t & prev_iter_ends_odd_backslash ) { <nl> + const uint64_t even_bits = 0x5555555555555555ULL ; <nl> + const uint64_t odd_bits = ~ even_bits ; <nl> + uint64_t bs_bits = cmp_mask_against_input ( in , ' \ \ ' ) ; <nl> + uint64_t start_edges = bs_bits & ~ ( bs_bits < < 1 ) ; <nl> + / / flip lowest if we have an odd - length run at the end of the prior <nl> + / / iteration <nl> + uint64_t even_start_mask = even_bits ^ prev_iter_ends_odd_backslash ; <nl> + uint64_t even_starts = start_edges & even_start_mask ; <nl> + uint64_t odd_starts = start_edges & ~ even_start_mask ; <nl> + uint64_t even_carries = bs_bits + even_starts ; <nl> + <nl> + uint64_t odd_carries ; <nl> + / / must record the carry - out of our odd - carries out of bit 63 ; this <nl> + / / indicates whether the sense of any edge going to the next iteration <nl> + / / should be flipped <nl> + bool iter_ends_odd_backslash = <nl> + add_overflow ( bs_bits , odd_starts , & odd_carries ) ; <nl> + <nl> + odd_carries | = <nl> + prev_iter_ends_odd_backslash ; / / push in bit zero as a potential end <nl> + / / if we had an odd - numbered run at the <nl> + / / end of the previous iteration <nl> + prev_iter_ends_odd_backslash = iter_ends_odd_backslash ? 0x1ULL : 0x0ULL ; <nl> + uint64_t even_carry_ends = even_carries & ~ bs_bits ; <nl> + uint64_t odd_carry_ends = odd_carries & ~ bs_bits ; <nl> + uint64_t even_start_odd_end = even_carry_ends & odd_bits ; <nl> + uint64_t odd_start_even_end = odd_carry_ends & even_bits ; <nl> + uint64_t odd_ends = even_start_odd_end | odd_start_even_end ; <nl> + return odd_ends ; <nl> + } <nl> + <nl> + / / return both the quote mask ( which is a half - open mask that covers the first <nl> + / / quote <nl> + / / in an unescaped quote pair and everything in the quote pair ) and the quote <nl> + / / bits , which are the simple <nl> + / / unescaped quoted bits . We also update the prev_iter_inside_quote value to <nl> + / / tell the next iteration <nl> + / / whether we finished the final iteration inside a quote pair ; if so , this <nl> + / / inverts our behavior of <nl> + / / whether we ' re inside quotes for the next iteration . <nl> + / / Note that we don ' t do any error checking to see if we have backslash <nl> + / / sequences outside quotes ; these <nl> + / / backslash sequences ( of any length ) will be detected elsewhere . <nl> + template < simdjson : : instruction_set T > really_inline <nl> + uint64_t find_quote_mask_and_bits ( simd_input < T > in , uint64_t odd_ends , <nl> + uint64_t & prev_iter_inside_quote , uint64_t & quote_bits , uint64_t & error_mask ) { <nl> + quote_bits = cmp_mask_against_input < T > ( in , ' " ' ) ; <nl> + quote_bits = quote_bits & ~ odd_ends ; <nl> + uint64_t quote_mask = compute_quote_mask < T > ( quote_bits ) ; <nl> + quote_mask ^ = prev_iter_inside_quote ; <nl> + / / All Unicode characters may be placed within the <nl> + / / quotation marks , except for the characters that MUST be escaped : <nl> + / / quotation mark , reverse solidus , and the control characters ( U + 0000 <nl> + / / through U + 001F ) . <nl> + / / https : / / tools . ietf . org / html / rfc8259 <nl> + uint64_t unescaped = unsigned_lteq_against_input < T > ( in , 0x1F ) ; <nl> + error_mask | = quote_mask & unescaped ; <nl> + / / right shift of a signed value expected to be well - defined and standard <nl> + / / compliant as of C + + 20 , <nl> + / / John Regher from Utah U . says this is fine code <nl> + prev_iter_inside_quote = <nl> + static_cast < uint64_t > ( static_cast < int64_t > ( quote_mask ) > > 63 ) ; <nl> + return quote_mask ; <nl> + } <nl> + <nl> + / / do a ' shufti ' to detect structural JSON characters <nl> + / / they are { 0x7b } 0x7d : 0x3a [ 0x5b ] 0x5d , 0x2c <nl> + / / these go into the first 3 buckets of the comparison ( 1 / 2 / 4 ) <nl> + <nl> + / / we are also interested in the four whitespace characters <nl> + / / space 0x20 , linefeed 0x0a , horizontal tab 0x09 and carriage return 0x0d <nl> + / / these go into the next 2 buckets of the comparison ( 8 / 16 ) <nl> + template < simdjson : : instruction_set T > <nl> + void find_whitespace_and_structurals ( simd_input < T > in , <nl> + uint64_t & whitespace , <nl> + uint64_t & structurals ) ; <nl> + <nl> + # ifdef __AVX2__ <nl> + template < > really_inline <nl> + void find_whitespace_and_structurals < simdjson : : instruction_set : : avx2 > ( simd_input < simdjson : : instruction_set : : avx2 > in , <nl> + uint64_t & whitespace , <nl> + uint64_t & structurals ) { <nl> + # ifdef SIMDJSON_NAIVE_STRUCTURAL <nl> + / / You should never need this naive approach , but it can be useful <nl> + / / for research purposes <nl> + const __m256i mask_open_brace = _mm256_set1_epi8 ( 0x7b ) ; <nl> + __m256i struct_lo = _mm256_cmpeq_epi8 ( in . lo , mask_open_brace ) ; <nl> + __m256i struct_hi = _mm256_cmpeq_epi8 ( in . hi , mask_open_brace ) ; <nl> + const __m256i mask_close_brace = _mm256_set1_epi8 ( 0x7d ) ; <nl> + struct_lo = _mm256_or_si256 ( struct_lo , _mm256_cmpeq_epi8 ( in . lo , mask_close_brace ) ) ; <nl> + struct_hi = _mm256_or_si256 ( struct_hi , _mm256_cmpeq_epi8 ( in . hi , mask_close_brace ) ) ; <nl> + const __m256i mask_open_bracket = _mm256_set1_epi8 ( 0x5b ) ; <nl> + struct_lo = _mm256_or_si256 ( struct_lo , _mm256_cmpeq_epi8 ( in . lo , mask_open_bracket ) ) ; <nl> + struct_hi = _mm256_or_si256 ( struct_hi , _mm256_cmpeq_epi8 ( in . hi , mask_open_bracket ) ) ; <nl> + const __m256i mask_close_bracket = _mm256_set1_epi8 ( 0x5d ) ; <nl> + struct_lo = _mm256_or_si256 ( struct_lo , _mm256_cmpeq_epi8 ( in . lo , mask_close_bracket ) ) ; <nl> + struct_hi = _mm256_or_si256 ( struct_hi , _mm256_cmpeq_epi8 ( in . hi , mask_close_bracket ) ) ; <nl> + const __m256i mask_column = _mm256_set1_epi8 ( 0x3a ) ; <nl> + struct_lo = _mm256_or_si256 ( struct_lo , _mm256_cmpeq_epi8 ( in . lo , mask_column ) ) ; <nl> + struct_hi = _mm256_or_si256 ( struct_hi , _mm256_cmpeq_epi8 ( in . hi , mask_column ) ) ; <nl> + const __m256i mask_comma = _mm256_set1_epi8 ( 0x2c ) ; <nl> + struct_lo = _mm256_or_si256 ( struct_lo , _mm256_cmpeq_epi8 ( in . lo , mask_comma ) ) ; <nl> + struct_hi = _mm256_or_si256 ( struct_hi , _mm256_cmpeq_epi8 ( in . hi , mask_comma ) ) ; <nl> + uint64_t structural_res_0 = static_cast < uint32_t > ( _mm256_movemask_epi8 ( struct_lo ) ) ; <nl> + uint64_t structural_res_1 = _mm256_movemask_epi8 ( struct_hi ) ; <nl> + structurals = ( structural_res_0 | ( structural_res_1 < < 32 ) ) ; <nl> + <nl> + const __m256i mask_space = _mm256_set1_epi8 ( 0x20 ) ; <nl> + __m256i space_lo = _mm256_cmpeq_epi8 ( in . lo , mask_space ) ; <nl> + __m256i space_hi = _mm256_cmpeq_epi8 ( in . hi , mask_space ) ; <nl> + const __m256i mask_linefeed = _mm256_set1_epi8 ( 0x0a ) ; <nl> + space_lo = _mm256_or_si256 ( space_lo , _mm256_cmpeq_epi8 ( in . lo , mask_linefeed ) ) ; <nl> + space_hi = _mm256_or_si256 ( space_hi , _mm256_cmpeq_epi8 ( in . hi , mask_linefeed ) ) ; <nl> + const __m256i mask_tab = _mm256_set1_epi8 ( 0x09 ) ; <nl> + space_lo = _mm256_or_si256 ( space_lo , _mm256_cmpeq_epi8 ( in . lo , mask_tab ) ) ; <nl> + space_hi = _mm256_or_si256 ( space_hi , _mm256_cmpeq_epi8 ( in . hi , mask_tab ) ) ; <nl> + const __m256i mask_carriage = _mm256_set1_epi8 ( 0x0d ) ; <nl> + space_lo = _mm256_or_si256 ( space_lo , _mm256_cmpeq_epi8 ( in . lo , mask_carriage ) ) ; <nl> + space_hi = _mm256_or_si256 ( space_hi , _mm256_cmpeq_epi8 ( in . hi , mask_carriage ) ) ; <nl> + <nl> + uint64_t ws_res_0 = static_cast < uint32_t > ( _mm256_movemask_epi8 ( space_lo ) ) ; <nl> + uint64_t ws_res_1 = _mm256_movemask_epi8 ( space_hi ) ; <nl> + whitespace = ( ws_res_0 | ( ws_res_1 < < 32 ) ) ; <nl> + / / end of naive approach <nl> + <nl> + # else / / SIMDJSON_NAIVE_STRUCTURAL <nl> + const __m256i low_nibble_mask = _mm256_setr_epi8 ( <nl> + 16 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 8 , 12 , 1 , 2 , 9 , 0 , 0 , <nl> + 16 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 8 , 12 , 1 , 2 , 9 , 0 , 0 ) ; <nl> + const __m256i high_nibble_mask = _mm256_setr_epi8 ( <nl> + 8 , 0 , 18 , 4 , 0 , 1 , 0 , 1 , 0 , 0 , 0 , 3 , 2 , 1 , 0 , 0 , <nl> + 8 , 0 , 18 , 4 , 0 , 1 , 0 , 1 , 0 , 0 , 0 , 3 , 2 , 1 , 0 , 0 ) ; <nl> + <nl> + __m256i structural_shufti_mask = _mm256_set1_epi8 ( 0x7 ) ; <nl> + __m256i whitespace_shufti_mask = _mm256_set1_epi8 ( 0x18 ) ; <nl> + <nl> + __m256i v_lo = _mm256_and_si256 ( <nl> + _mm256_shuffle_epi8 ( low_nibble_mask , in . lo ) , <nl> + _mm256_shuffle_epi8 ( high_nibble_mask , <nl> + _mm256_and_si256 ( _mm256_srli_epi32 ( in . lo , 4 ) , <nl> + _mm256_set1_epi8 ( 0x7f ) ) ) ) ; <nl> + <nl> + __m256i v_hi = _mm256_and_si256 ( <nl> + _mm256_shuffle_epi8 ( low_nibble_mask , in . hi ) , <nl> + _mm256_shuffle_epi8 ( high_nibble_mask , <nl> + _mm256_and_si256 ( _mm256_srli_epi32 ( in . hi , 4 ) , <nl> + _mm256_set1_epi8 ( 0x7f ) ) ) ) ; <nl> + __m256i tmp_lo = _mm256_cmpeq_epi8 ( <nl> + _mm256_and_si256 ( v_lo , structural_shufti_mask ) , _mm256_set1_epi8 ( 0 ) ) ; <nl> + __m256i tmp_hi = _mm256_cmpeq_epi8 ( <nl> + _mm256_and_si256 ( v_hi , structural_shufti_mask ) , _mm256_set1_epi8 ( 0 ) ) ; <nl> + <nl> + uint64_t structural_res_0 = <nl> + static_cast < uint32_t > ( _mm256_movemask_epi8 ( tmp_lo ) ) ; <nl> + uint64_t structural_res_1 = _mm256_movemask_epi8 ( tmp_hi ) ; <nl> + structurals = ~ ( structural_res_0 | ( structural_res_1 < < 32 ) ) ; <nl> + <nl> + __m256i tmp_ws_lo = _mm256_cmpeq_epi8 ( <nl> + _mm256_and_si256 ( v_lo , whitespace_shufti_mask ) , _mm256_set1_epi8 ( 0 ) ) ; <nl> + __m256i tmp_ws_hi = _mm256_cmpeq_epi8 ( <nl> + _mm256_and_si256 ( v_hi , whitespace_shufti_mask ) , _mm256_set1_epi8 ( 0 ) ) ; <nl> + <nl> + uint64_t ws_res_0 = static_cast < uint32_t > ( _mm256_movemask_epi8 ( tmp_ws_lo ) ) ; <nl> + uint64_t ws_res_1 = _mm256_movemask_epi8 ( tmp_ws_hi ) ; <nl> + whitespace = ~ ( ws_res_0 | ( ws_res_1 < < 32 ) ) ; <nl> + # endif / / SIMDJSON_NAIVE_STRUCTURAL <nl> + } <nl> + # endif <nl> + <nl> + # ifdef __ARM_NEON <nl> + template < > really_inline <nl> + void find_whitespace_and_structurals < simdjson : : instruction_set : : neon > ( <nl> + simd_input < simdjson : : instruction_set : : neon > in , <nl> + uint64_t & whitespace , <nl> + uint64_t & structurals ) { <nl> + # ifndef FUNKY_BAD_TABLE <nl> + const uint8x16_t low_nibble_mask = ( uint8x16_t ) { <nl> + 16 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 8 , 12 , 1 , 2 , 9 , 0 , 0 } ; <nl> + const uint8x16_t high_nibble_mask = ( uint8x16_t ) { <nl> + 8 , 0 , 18 , 4 , 0 , 1 , 0 , 1 , 0 , 0 , 0 , 3 , 2 , 1 , 0 , 0 } ; <nl> + const uint8x16_t structural_shufti_mask = vmovq_n_u8 ( 0x7 ) ; <nl> + const uint8x16_t whitespace_shufti_mask = vmovq_n_u8 ( 0x18 ) ; <nl> + const uint8x16_t low_nib_and_mask = vmovq_n_u8 ( 0xf ) ; <nl> + <nl> + uint8x16_t nib_0_lo = vandq_u8 ( in . i . val [ 0 ] , low_nib_and_mask ) ; <nl> + uint8x16_t nib_0_hi = vshrq_n_u8 ( in . i . val [ 0 ] , 4 ) ; <nl> + uint8x16_t shuf_0_lo = vqtbl1q_u8 ( low_nibble_mask , nib_0_lo ) ; <nl> + uint8x16_t shuf_0_hi = vqtbl1q_u8 ( high_nibble_mask , nib_0_hi ) ; <nl> + uint8x16_t v_0 = vandq_u8 ( shuf_0_lo , shuf_0_hi ) ; <nl> + <nl> + uint8x16_t nib_1_lo = vandq_u8 ( in . i . val [ 1 ] , low_nib_and_mask ) ; <nl> + uint8x16_t nib_1_hi = vshrq_n_u8 ( in . i . val [ 1 ] , 4 ) ; <nl> + uint8x16_t shuf_1_lo = vqtbl1q_u8 ( low_nibble_mask , nib_1_lo ) ; <nl> + uint8x16_t shuf_1_hi = vqtbl1q_u8 ( high_nibble_mask , nib_1_hi ) ; <nl> + uint8x16_t v_1 = vandq_u8 ( shuf_1_lo , shuf_1_hi ) ; <nl> + <nl> + uint8x16_t nib_2_lo = vandq_u8 ( in . i . val [ 2 ] , low_nib_and_mask ) ; <nl> + uint8x16_t nib_2_hi = vshrq_n_u8 ( in . i . val [ 2 ] , 4 ) ; <nl> + uint8x16_t shuf_2_lo = vqtbl1q_u8 ( low_nibble_mask , nib_2_lo ) ; <nl> + uint8x16_t shuf_2_hi = vqtbl1q_u8 ( high_nibble_mask , nib_2_hi ) ; <nl> + uint8x16_t v_2 = vandq_u8 ( shuf_2_lo , shuf_2_hi ) ; <nl> + <nl> + uint8x16_t nib_3_lo = vandq_u8 ( in . i . val [ 3 ] , low_nib_and_mask ) ; <nl> + uint8x16_t nib_3_hi = vshrq_n_u8 ( in . i . val [ 3 ] , 4 ) ; <nl> + uint8x16_t shuf_3_lo = vqtbl1q_u8 ( low_nibble_mask , nib_3_lo ) ; <nl> + uint8x16_t shuf_3_hi = vqtbl1q_u8 ( high_nibble_mask , nib_3_hi ) ; <nl> + uint8x16_t v_3 = vandq_u8 ( shuf_3_lo , shuf_3_hi ) ; <nl> + <nl> + uint8x16_t tmp_0 = vtstq_u8 ( v_0 , structural_shufti_mask ) ; <nl> + uint8x16_t tmp_1 = vtstq_u8 ( v_1 , structural_shufti_mask ) ; <nl> + uint8x16_t tmp_2 = vtstq_u8 ( v_2 , structural_shufti_mask ) ; <nl> + uint8x16_t tmp_3 = vtstq_u8 ( v_3 , structural_shufti_mask ) ; <nl> + structurals = neonmovemask_bulk ( tmp_0 , tmp_1 , tmp_2 , tmp_3 ) ; <nl> + <nl> + uint8x16_t tmp_ws_0 = vtstq_u8 ( v_0 , whitespace_shufti_mask ) ; <nl> + uint8x16_t tmp_ws_1 = vtstq_u8 ( v_1 , whitespace_shufti_mask ) ; <nl> + uint8x16_t tmp_ws_2 = vtstq_u8 ( v_2 , whitespace_shufti_mask ) ; <nl> + uint8x16_t tmp_ws_3 = vtstq_u8 ( v_3 , whitespace_shufti_mask ) ; <nl> + whitespace = neonmovemask_bulk ( tmp_ws_0 , tmp_ws_1 , tmp_ws_2 , tmp_ws_3 ) ; <nl> + # else <nl> + / / I think this one is garbage . In order to save the expense <nl> + / / of another shuffle , I use an equally expensive shift , and <nl> + / / this gets glued to the end of the dependency chain . Seems a bit <nl> + / / slower for no good reason . <nl> + / / <nl> + / / need to use a weird arrangement . Bytes in this bitvector <nl> + / / are in conventional order , but bits are reversed as we are <nl> + / / using a signed left shift ( that is a + ve value from 0 . . 7 ) to <nl> + / / shift upwards to 0x80 in the bit . So we need to reverse bits . <nl> + <nl> + / / note no structural / whitespace has the high bit on <nl> + / / so it ' s OK to put the high 5 bits into our TBL shuffle <nl> + / / <nl> + <nl> + / / structurals are { 0x7b } 0x7d : 0x3a [ 0x5b ] 0x5d , 0x2c <nl> + / / or in 5 bit , 3 bit form thats <nl> + / / ( 15 , 3 ) ( 15 , 5 ) ( 7 , 2 ) ( 11 , 3 ) ( 11 , 5 ) ( 5 , 4 ) <nl> + / / bit - reversing ( subtract low 3 bits from 7 ) yields : <nl> + / / ( 15 , 4 ) ( 15 , 2 ) ( 7 , 5 ) ( 11 , 4 ) ( 11 , 2 ) ( 5 , 3 ) <nl> + <nl> + const uint8x16_t structural_bitvec = ( uint8x16_t ) { <nl> + 0 , 0 , 0 , 0 , <nl> + 0 , 8 , 0 , 32 , <nl> + 0 , 0 , 0 , 20 , <nl> + 0 , 0 , 0 , 20 } ; <nl> + / / we are also interested in the four whitespace characters <nl> + / / space 0x20 , linefeed 0x0a , horizontal tab 0x09 and carriage return 0x0d <nl> + / / ( 4 , 0 ) ( 1 , 2 ) ( 1 , 1 ) ( 1 , 5 ) <nl> + / / bit - reversing ( subtract low 3 bits from 7 ) yields : <nl> + / / ( 4 , 7 ) ( 1 , 5 ) ( 1 , 6 ) ( 1 , 2 ) <nl> + <nl> + const uint8x16_t whitespace_bitvec = ( uint8x16_t ) { <nl> + 0 , 100 , 0 , 0 , <nl> + 128 , 0 , 0 , 0 , <nl> + 0 , 0 , 0 , 0 , <nl> + 0 , 0 , 0 , 0 } ; <nl> + const uint8x16_t low_3bits_and_mask = vmovq_n_u8 ( 0x7 ) ; <nl> + const uint8x16_t high_1bit_tst_mask = vmovq_n_u8 ( 0x80 ) ; <nl> + <nl> + int8x16_t low_3bits_0 = vreinterpretq_s8_u8 ( vandq_u8 ( in . i . val [ 0 ] , low_3bits_and_mask ) ) ; <nl> + uint8x16_t high_5bits_0 = vshrq_n_u8 ( in . i . val [ 0 ] , 3 ) ; <nl> + uint8x16_t shuffle_structural_0 = vshlq_u8 ( vqtbl1q_u8 ( structural_bitvec , high_5bits_0 ) , low_3bits_0 ) ; <nl> + uint8x16_t shuffle_ws_0 = vshlq_u8 ( vqtbl1q_u8 ( whitespace_bitvec , high_5bits_0 ) , low_3bits_0 ) ; <nl> + uint8x16_t tmp_0 = vtstq_u8 ( shuffle_structural_0 , high_1bit_tst_mask ) ; <nl> + uint8x16_t tmp_ws_0 = vtstq_u8 ( shuffle_ws_0 , high_1bit_tst_mask ) ; <nl> + <nl> + int8x16_t low_3bits_1 = vreinterpretq_s8_u8 ( vandq_u8 ( in . i . val [ 1 ] , low_3bits_and_mask ) ) ; <nl> + uint8x16_t high_5bits_1 = vshrq_n_u8 ( in . i . val [ 1 ] , 3 ) ; <nl> + uint8x16_t shuffle_structural_1 = vshlq_u8 ( vqtbl1q_u8 ( structural_bitvec , high_5bits_1 ) , low_3bits_1 ) ; <nl> + uint8x16_t shuffle_ws_1 = vshlq_u8 ( vqtbl1q_u8 ( whitespace_bitvec , high_5bits_1 ) , low_3bits_1 ) ; <nl> + uint8x16_t tmp_1 = vtstq_u8 ( shuffle_structural_1 , high_1bit_tst_mask ) ; <nl> + uint8x16_t tmp_ws_1 = vtstq_u8 ( shuffle_ws_1 , high_1bit_tst_mask ) ; <nl> + <nl> + int8x16_t low_3bits_2 = vreinterpretq_s8_u8 ( vandq_u8 ( in . i . val [ 2 ] , low_3bits_and_mask ) ) ; <nl> + uint8x16_t high_5bits_2 = vshrq_n_u8 ( in . i . val [ 2 ] , 3 ) ; <nl> + uint8x16_t shuffle_structural_2 = vshlq_u8 ( vqtbl1q_u8 ( structural_bitvec , high_5bits_2 ) , low_3bits_2 ) ; <nl> + uint8x16_t shuffle_ws_2 = vshlq_u8 ( vqtbl1q_u8 ( whitespace_bitvec , high_5bits_2 ) , low_3bits_2 ) ; <nl> + uint8x16_t tmp_2 = vtstq_u8 ( shuffle_structural_2 , high_1bit_tst_mask ) ; <nl> + uint8x16_t tmp_ws_2 = vtstq_u8 ( shuffle_ws_2 , high_1bit_tst_mask ) ; <nl> + <nl> + int8x16_t low_3bits_3 = vreinterpretq_s8_u8 ( vandq_u8 ( in . i . val [ 3 ] , low_3bits_and_mask ) ) ; <nl> + uint8x16_t high_5bits_3 = vshrq_n_u8 ( in . i . val [ 3 ] , 3 ) ; <nl> + uint8x16_t shuffle_structural_3 = vshlq_u8 ( vqtbl1q_u8 ( structural_bitvec , high_5bits_3 ) , low_3bits_3 ) ; <nl> + uint8x16_t shuffle_ws_3 = vshlq_u8 ( vqtbl1q_u8 ( whitespace_bitvec , high_5bits_3 ) , low_3bits_3 ) ; <nl> + uint8x16_t tmp_3 = vtstq_u8 ( shuffle_structural_3 , high_1bit_tst_mask ) ; <nl> + uint8x16_t tmp_ws_3 = vtstq_u8 ( shuffle_ws_3 , high_1bit_tst_mask ) ; <nl> + <nl> + structurals = neonmovemask_bulk ( tmp_0 , tmp_1 , tmp_2 , tmp_3 ) ; <nl> + whitespace = neonmovemask_bulk ( tmp_ws_0 , tmp_ws_1 , tmp_ws_2 , tmp_ws_3 ) ; <nl> + # endif <nl> + } <nl> + # endif <nl> + <nl> + <nl> + # ifdef SIMDJSON_NAIVE_FLATTEN / / useful for benchmarking <nl> + / / <nl> + / / This is just a naive implementation . It should be normally <nl> + / / disable , but can be used for research purposes to compare <nl> + / / again our optimized version . <nl> + really_inline void flatten_bits ( uint32_t * base_ptr , uint32_t & base , <nl> + uint32_t idx , uint64_t bits ) { <nl> + uint32_t * out_ptr = base_ptr + base ; <nl> + idx - = 64 ; <nl> + while ( bits ! = 0 ) { <nl> + out_ptr [ 0 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + out_ptr + + ; <nl> + } <nl> + base = ( out_ptr - base_ptr ) ; <nl> + } <nl> + <nl> + # else <nl> + / / flatten out values in ' bits ' assuming that they are are to have values of idx <nl> + / / plus their position in the bitvector , and store these indexes at <nl> + / / base_ptr [ base ] incrementing base as we go <nl> + / / will potentially store extra values beyond end of valid bits , so base_ptr <nl> + / / needs to be large enough to handle this <nl> + really_inline void flatten_bits ( uint32_t * base_ptr , uint32_t & base , <nl> + uint32_t idx , uint64_t bits ) { <nl> + / / In some instances , the next branch is expensive because it is mispredicted . <nl> + / / Unfortunately , in other cases , <nl> + / / it helps tremendously . <nl> + if ( bits = = 0 ) return ; <nl> + uint32_t cnt = hamming ( bits ) ; <nl> + uint32_t next_base = base + cnt ; <nl> + idx - = 64 ; <nl> + base_ptr + = base ; <nl> + { <nl> + base_ptr [ 0 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 1 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 2 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 3 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 4 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 5 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 6 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 7 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr + = 8 ; <nl> + } <nl> + / / We hope that the next branch is easily predicted . <nl> + if ( cnt > 8 ) { <nl> + base_ptr [ 0 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 1 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 2 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 3 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 4 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 5 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 6 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr [ 7 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr + = 8 ; <nl> + } <nl> + if ( cnt > 16 ) { / / unluckly : we rarely get here <nl> + / / since it means having one structural or pseudo - structral element <nl> + / / every 4 characters ( possible with inputs like " " , " " , " " , . . . ) . <nl> + do { <nl> + base_ptr [ 0 ] = idx + trailingzeroes ( bits ) ; <nl> + bits = bits & ( bits - 1 ) ; <nl> + base_ptr + + ; <nl> + } while ( bits ! = 0 ) ; <nl> + } <nl> + base = next_base ; <nl> + } <nl> + # endif <nl> + <nl> + / / return a updated structural bit vector with quoted contents cleared out and <nl> + / / pseudo - structural characters added to the mask <nl> + / / updates prev_iter_ends_pseudo_pred which tells us whether the previous <nl> + / / iteration ended on a whitespace or a structural character ( which means that <nl> + / / the next iteration <nl> + / / will have a pseudo - structural character at its start ) <nl> + really_inline uint64_t finalize_structurals ( <nl> + uint64_t structurals , uint64_t whitespace , uint64_t quote_mask , <nl> + uint64_t quote_bits , uint64_t & prev_iter_ends_pseudo_pred ) { <nl> + / / mask off anything inside quotes <nl> + structurals & = ~ quote_mask ; <nl> + / / add the real quote bits back into our bitmask as well , so we can <nl> + / / quickly traverse the strings we ' ve spent all this trouble gathering <nl> + structurals | = quote_bits ; <nl> + / / Now , establish " pseudo - structural characters " . These are non - whitespace <nl> + / / characters that are ( a ) outside quotes and ( b ) have a predecessor that ' s <nl> + / / either whitespace or a structural character . This means that subsequent <nl> + / / passes will get a chance to encounter the first character of every string <nl> + / / of non - whitespace and , if we ' re parsing an atom like true / false / null or a <nl> + / / number we can stop at the first whitespace or structural character <nl> + / / following it . <nl> + <nl> + / / a qualified predecessor is something that can happen 1 position before an <nl> + / / pseudo - structural character <nl> + uint64_t pseudo_pred = structurals | whitespace ; <nl> + <nl> + uint64_t shifted_pseudo_pred = <nl> + ( pseudo_pred < < 1 ) | prev_iter_ends_pseudo_pred ; <nl> + prev_iter_ends_pseudo_pred = pseudo_pred > > 63 ; <nl> + uint64_t pseudo_structurals = <nl> + shifted_pseudo_pred & ( ~ whitespace ) & ( ~ quote_mask ) ; <nl> + structurals | = pseudo_structurals ; <nl> + <nl> + / / now , we ' ve used our close quotes all we need to . So let ' s switch them off <nl> + / / they will be off in the quote mask and on in quote bits . <nl> + structurals & = ~ ( quote_bits & ~ quote_mask ) ; <nl> + return structurals ; <nl> + } <nl> + <nl> + template < simdjson : : instruction_set T = simdjson : : instruction_set : : native > <nl> WARN_UNUSED <nl> - int find_structural_bits ( const uint8_t * buf , size_t len , ParsedJson & pj ) ; <nl> + / * never_inline * / int find_structural_bits ( const uint8_t * buf , size_t len , <nl> + ParsedJson & pj ) { <nl> + if ( len > pj . bytecapacity ) { <nl> + std : : cerr < < " Your ParsedJson object only supports documents up to " <nl> + < < pj . bytecapacity < < " bytes but you are trying to process " < < len <nl> + < < " bytes " < < std : : endl ; <nl> + return simdjson : : CAPACITY ; <nl> + } <nl> + uint32_t * base_ptr = pj . structural_indexes ; <nl> + uint32_t base = 0 ; <nl> + # ifdef SIMDJSON_UTF8VALIDATE <nl> + __m256i has_error = _mm256_setzero_si256 ( ) ; <nl> + struct avx_processed_utf_bytes previous { } ; <nl> + previous . rawbytes = _mm256_setzero_si256 ( ) ; <nl> + previous . high_nibbles = _mm256_setzero_si256 ( ) ; <nl> + previous . carried_continuations = _mm256_setzero_si256 ( ) ; <nl> + # endif <nl> + <nl> + / / we have padded the input out to 64 byte multiple with the remainder being <nl> + / / zeros <nl> + <nl> + / / persistent state across loop <nl> + / / does the last iteration end with an odd - length sequence of backslashes ? <nl> + / / either 0 or 1 , but a 64 - bit value <nl> + uint64_t prev_iter_ends_odd_backslash = 0ULL ; <nl> + / / does the previous iteration end inside a double - quote pair ? <nl> + uint64_t prev_iter_inside_quote = 0ULL ; / / either all zeros or all ones <nl> + / / does the previous iteration end on something that is a predecessor of a <nl> + / / pseudo - structural character - i . e . whitespace or a structural character <nl> + / / effectively the very first char is considered to follow " whitespace " for <nl> + / / the <nl> + / / purposes of pseudo - structural character detection so we initialize to 1 <nl> + uint64_t prev_iter_ends_pseudo_pred = 1ULL ; <nl> + <nl> + / / structurals are persistent state across loop as we flatten them on the <nl> + / / subsequent iteration into our array pointed to be base_ptr . <nl> + / / This is harmless on the first iteration as structurals = = 0 <nl> + / / and is done for performance reasons ; we can hide some of the latency of the <nl> + / / expensive carryless multiply in the previous step with this work <nl> + uint64_t structurals = 0 ; <nl> + <nl> + size_t lenminus64 = len < 64 ? 0 : len - 64 ; <nl> + size_t idx = 0 ; <nl> + uint64_t error_mask = 0 ; / / for unescaped characters within strings ( ASCII code points < 0x20 ) <nl> + <nl> + for ( ; idx < lenminus64 ; idx + = 64 ) { <nl> + # ifndef _MSC_VER <nl> + __builtin_prefetch ( buf + idx + 128 ) ; <nl> + # endif <nl> + simd_input < T > in = fill_input < T > ( buf + idx ) ; <nl> + # ifdef SIMDJSON_UTF8VALIDATE <nl> + check_utf8 ( in , has_error , previous ) ; <nl> + # endif <nl> + / / detect odd sequences of backslashes <nl> + uint64_t odd_ends = find_odd_backslash_sequences < T > ( <nl> + in , prev_iter_ends_odd_backslash ) ; <nl> + <nl> + / / detect insides of quote pairs ( " quote_mask " ) and also our quote_bits <nl> + / / themselves <nl> + uint64_t quote_bits ; <nl> + uint64_t quote_mask = find_quote_mask_and_bits < T > ( <nl> + in , odd_ends , prev_iter_inside_quote , quote_bits , error_mask ) ; <nl> + <nl> + / / take the previous iterations structural bits , not our current iteration , <nl> + / / and flatten <nl> + flatten_bits ( base_ptr , base , idx , structurals ) ; <nl> + <nl> + uint64_t whitespace ; <nl> + find_whitespace_and_structurals < T > ( in , whitespace , structurals ) ; <nl> + <nl> + / / fixup structurals to reflect quotes and add pseudo - structural characters <nl> + structurals = finalize_structurals ( structurals , whitespace , quote_mask , <nl> + quote_bits , prev_iter_ends_pseudo_pred ) ; <nl> + } <nl> + <nl> + / / / / / / / / / / / / / / / / <nl> + / / / we use a giant copy - paste which is ugly . <nl> + / / / but otherwise the string needs to be properly padded or else we <nl> + / / / risk invalidating the UTF - 8 checks . <nl> + / / / / / / / / / / / / <nl> + if ( idx < len ) { <nl> + uint8_t tmpbuf [ 64 ] ; <nl> + memset ( tmpbuf , 0x20 , 64 ) ; <nl> + memcpy ( tmpbuf , buf + idx , len - idx ) ; <nl> + simd_input < T > in = fill_input < T > ( tmpbuf ) ; <nl> + # ifdef SIMDJSON_UTF8VALIDATE <nl> + check_utf8 ( in , has_error , previous ) ; <nl> + # endif <nl> + <nl> + / / detect odd sequences of backslashes <nl> + uint64_t odd_ends = find_odd_backslash_sequences < T > ( <nl> + in , prev_iter_ends_odd_backslash ) ; <nl> + <nl> + / / detect insides of quote pairs ( " quote_mask " ) and also our quote_bits <nl> + / / themselves <nl> + uint64_t quote_bits ; <nl> + uint64_t quote_mask = find_quote_mask_and_bits < T > ( <nl> + in , odd_ends , prev_iter_inside_quote , quote_bits , error_mask ) ; <nl> + <nl> + / / take the previous iterations structural bits , not our current iteration , <nl> + / / and flatten <nl> + flatten_bits ( base_ptr , base , idx , structurals ) ; <nl> + <nl> + uint64_t whitespace ; <nl> + find_whitespace_and_structurals < T > ( in , whitespace , structurals ) ; <nl> + <nl> + / / fixup structurals to reflect quotes and add pseudo - structural characters <nl> + structurals = finalize_structurals ( structurals , whitespace , quote_mask , <nl> + quote_bits , prev_iter_ends_pseudo_pred ) ; <nl> + idx + = 64 ; <nl> + } <nl> + <nl> + / / is last string quote closed ? <nl> + if ( prev_iter_inside_quote ) { <nl> + return simdjson : : UNCLOSED_STRING ; <nl> + } <nl> + <nl> + / / finally , flatten out the remaining structurals from the last iteration <nl> + flatten_bits ( base_ptr , base , idx , structurals ) ; <nl> + <nl> + pj . n_structural_indexes = base ; <nl> + / / a valid JSON file cannot have zero structural indexes - we should have <nl> + / / found something <nl> + if ( pj . n_structural_indexes = = 0u ) { <nl> + fprintf ( stderr , " Empty document ? \ n " ) ; <nl> + return simdjson : : EMPTY ; <nl> + } <nl> + if ( base_ptr [ pj . n_structural_indexes - 1 ] > len ) { <nl> + fprintf ( stderr , " Internal bug \ n " ) ; <nl> + return simdjson : : UNEXPECTED_ERROR ; <nl> + } <nl> + if ( len ! = base_ptr [ pj . n_structural_indexes - 1 ] ) { <nl> + / / the string might not be NULL terminated , but we add a virtual NULL ending <nl> + / / character . <nl> + base_ptr [ pj . n_structural_indexes + + ] = len ; <nl> + } <nl> + / / make it safe to dereference one beyond this array <nl> + base_ptr [ pj . n_structural_indexes ] = 0 ; <nl> + if ( error_mask ) { <nl> + fprintf ( stderr , " Unescaped characters \ n " ) ; <nl> + return simdjson : : UNESCAPED_CHARS ; <nl> + } <nl> + # ifdef SIMDJSON_UTF8VALIDATE <nl> + return _mm256_testz_si256 ( has_error , has_error ) = = 0 ? simdjson : : UTF8_ERROR : simdjson : : SUCCESS ; <nl> + # else <nl> + return simdjson : : SUCCESS ; <nl> + # endif <nl> + } <nl> <nl> + template < simdjson : : instruction_set T = simdjson : : instruction_set : : native > <nl> WARN_UNUSED <nl> - int find_structural_bits ( const char * buf , size_t len , ParsedJson & pj ) ; <nl> + int find_structural_bits ( const char * buf , size_t len , ParsedJson & pj ) { <nl> + return find_structural_bits < T > ( reinterpret_cast < const uint8_t * > ( buf ) , len , pj ) ; <nl> + } <nl> <nl> # endif <nl> mmm a / src / jsonparser . cpp <nl> ppp b / src / jsonparser . cpp <nl> <nl> # endif <nl> # include " simdjson / simdjson . h " <nl> <nl> - / / parse a document found in buf , need to preallocate ParsedJson . <nl> - WARN_UNUSED <nl> - int json_parse ( const uint8_t * buf , size_t len , ParsedJson & pj , bool reallocifneeded ) { <nl> - if ( pj . bytecapacity < len ) { <nl> - return simdjson : : CAPACITY ; <nl> - } <nl> - bool reallocated = false ; <nl> - if ( reallocifneeded ) { <nl> - # ifdef ALLOW_SAME_PAGE_BUFFER_OVERRUN <nl> - / / realloc is needed if the end of the memory crosses a page <nl> - # ifdef _MSC_VER <nl> - SYSTEM_INFO sysInfo ; <nl> - GetSystemInfo ( & sysInfo ) ; <nl> - long pagesize = sysInfo . dwPageSize ; <nl> + <nl> + / / Responsible to select the best json_parse implementation <nl> + int json_parse_dispatch ( const uint8_t * buf , size_t len , ParsedJson & pj , bool reallocifneeded ) { <nl> + / / Versions for each implementation <nl> + # ifdef __AVX2__ <nl> + json_parse_functype * avx_implementation = & json_parse_implementation < simdjson : : instruction_set : : avx2 > ; <nl> + # endif <nl> + # ifdef __SSE4_2__ <nl> + / / json_parse_functype * sse4_2_implementation = & json_parse_implementation < simdjson : : instruction_set : : sse4_2 > ; / / not implemented yet <nl> + # endif <nl> + # ifdef __ARM_NEON <nl> + json_parse_functype * neon_implementation = & json_parse_implementation < simdjson : : instruction_set : : neon > ; <nl> + # endif <nl> + <nl> + / / Determining which implementation is the more suitable <nl> + / / Should be done at runtime . Does not make any sense on preprocessor . <nl> + # ifdef __AVX2__ <nl> + simdjson : : instruction_set best_implementation = simdjson : : instruction_set : : avx2 ; <nl> + # elif defined ( __SSE4_2__ ) <nl> + simdjson : : instruction_set best_implementation = simdjson : : instruction_set : : sse4_2 ; <nl> + # elif defined ( __ARM_NEON ) <nl> + simdjson : : instruction_set best_implementation = simdjson : : instruction_set : : neon ; <nl> # else <nl> - long pagesize = sysconf ( _SC_PAGESIZE ) ; <nl> + simdjson : : instruction_set best_implementation = simdjson : : instruction_set : : none ; <nl> # endif <nl> - / / / / / / / / / / / / / / <nl> - / / We want to check that buf + len - 1 and buf + len - 1 + SIMDJSON_PADDING <nl> - / / are in the same page . <nl> - / / That is , we want to check that <nl> - / / ( buf + len - 1 ) / pagesize = = ( buf + len - 1 + SIMDJSON_PADDING ) / pagesize <nl> - / / That ' s true if ( buf + len - 1 ) % pagesize + SIMDJSON_PADDING < pagesize . <nl> - / / / / / / / / / / / <nl> - if ( ( reinterpret_cast < uintptr_t > ( buf + len - 1 ) % pagesize ) + SIMDJSON_PADDING < static_cast < uintptr_t > ( pagesize ) ) { <nl> - # else / / SIMDJSON_SAFE_SAME_PAGE_READ_OVERRUN <nl> - if ( true ) { / / if not SIMDJSON_SAFE_SAME_PAGE_READ_OVERRUN , we always reallocate <nl> + <nl> + / / Selecting the best implementation <nl> + switch ( best_implementation ) { <nl> + # ifdef __AVX2__ <nl> + case simdjson : : instruction_set : : avx2 : <nl> + json_parse_ptr = avx_implementation ; <nl> + break ; <nl> + # elif defined ( __SSE4_2__ ) <nl> + / * case simdjson : : instruction_set : : sse4_2 : <nl> + json_parse_ptr = sse4_2_implementation ; <nl> + break ; * / <nl> + # elif defined ( __ARM_NEON ) <nl> + case simdjson : : instruction_set : : neon : <nl> + json_parse_ptr = neon_implementation ; <nl> + break ; <nl> # endif <nl> - const uint8_t * tmpbuf = buf ; <nl> - buf = ( uint8_t * ) allocate_padded_buffer ( len ) ; <nl> - if ( buf = = NULL ) return simdjson : : MEMALLOC ; <nl> - memcpy ( ( void * ) buf , tmpbuf , len ) ; <nl> - reallocated = true ; <nl> - } <nl> + default : <nl> + std : : cerr < < " No implemented simd instruction set supported " < < std : : endl ; <nl> + return simdjson : : UNEXPECTED_ERROR ; <nl> } <nl> - int stage1_is_ok = find_structural_bits ( buf , len , pj ) ; <nl> - if ( stage1_is_ok ! = simdjson : : SUCCESS ) { <nl> - pj . errorcode = stage1_is_ok ; <nl> - return pj . errorcode ; <nl> - } <nl> - int res = unified_machine ( buf , len , pj ) ; <nl> - if ( reallocated ) { aligned_free ( ( void * ) buf ) ; } <nl> - return res ; <nl> + <nl> + return json_parse_ptr ( buf , len , pj , reallocifneeded ) ; <nl> } <nl> <nl> + json_parse_functype * json_parse_ptr = & json_parse_dispatch ; <nl> + <nl> WARN_UNUSED <nl> ParsedJson build_parsed_json ( const uint8_t * buf , size_t len , bool reallocifneeded ) { <nl> ParsedJson pj ; <nl> bool ok = pj . allocateCapacity ( len ) ; <nl> if ( ok ) { <nl> - ( void ) json_parse ( buf , len , pj , reallocifneeded ) ; <nl> + json_parse ( buf , len , pj , reallocifneeded ) ; <nl> } else { <nl> std : : cerr < < " failure during memory allocation " < < std : : endl ; <nl> } <nl> mmm a / src / stage1_find_marks . cpp <nl> ppp b / src / stage1_find_marks . cpp <nl> @ @ - 1 , 780 + 1 @ @ <nl> - # include < cassert > <nl> - # include " simdjson / common_defs . h " <nl> - # include " simdjson / parsedjson . h " <nl> - # include " simdjson / portability . h " <nl> - <nl> - <nl> - # ifdef __AVX2__ <nl> - <nl> - # ifndef SIMDJSON_SKIPUTF8VALIDATION <nl> - # define SIMDJSON_UTF8VALIDATE <nl> - <nl> - # endif <nl> - # else <nl> - / / currently we don ' t UTF8 validate for ARM <nl> - / / also we assume that if you ' re not __AVX2__ <nl> - / / you ' re ARM , which is a bit dumb . TODO : Fix . . . <nl> - # ifdef __ARM_NEON <nl> - # include < arm_neon . h > <nl> - # else <nl> - # warning It appears that neither ARM NEON nor AVX2 are detected . <nl> - # endif / / __ARM_NEON <nl> - # endif / / __AVX2__ <nl> - <nl> - / / It seems that many parsers do UTF - 8 validation . <nl> - / / RapidJSON does not do it by default , but a flag <nl> - / / allows it . <nl> - # ifdef SIMDJSON_UTF8VALIDATE <nl> - # include " simdjson / simdutf8check . h " <nl> - # endif <nl> - <nl> - # define TRANSPOSE <nl> - <nl> - struct simd_input { <nl> - # ifdef __AVX2__ <nl> - __m256i lo ; <nl> - __m256i hi ; <nl> - # elif defined ( __ARM_NEON ) <nl> - # ifndef TRANSPOSE <nl> - uint8x16_t i0 ; <nl> - uint8x16_t i1 ; <nl> - uint8x16_t i2 ; <nl> - uint8x16_t i3 ; <nl> - # else <nl> - uint8x16x4_t i ; <nl> - # endif <nl> - # else <nl> - # warning It appears that neither ARM NEON nor AVX2 are detected . <nl> - # endif <nl> - } ; <nl> - <nl> - really_inline uint64_t compute_quote_mask ( uint64_t quote_bits ) { <nl> - / / In practice , if you have NEON or __PCLMUL__ , you would <nl> - / / always want to use them , but it might be useful , for research <nl> - / / purposes , to disable it willingly , that ' s what SIMDJSON_AVOID_CLMUL <nl> - / / does . <nl> - / / Also : we don ' t know of an instance where AVX2 is supported but <nl> - / / where clmul is not supported , so check for both , to be sure . <nl> - # if ( defined ( __PCLMUL__ ) | | defined ( __AVX2__ ) ) & & ! defined ( SIMDJSON_AVOID_CLMUL ) <nl> - uint64_t quote_mask = _mm_cvtsi128_si64 ( _mm_clmulepi64_si128 ( <nl> - _mm_set_epi64x ( 0ULL , quote_bits ) , _mm_set1_epi8 ( 0xFF ) , 0 ) ) ; <nl> - # elif defined ( __ARM_NEON ) & & ! defined ( SIMDJSON_AVOID_CLMUL ) <nl> - uint64_t quote_mask = vmull_p64 ( - 1ULL , quote_bits ) ; <nl> - # else <nl> - / / this code should always be used if SIMDJSON_AVOID_CLMUL is defined . <nl> - uint64_t quote_mask = quote_bits ^ ( quote_bits < < 1 ) ; <nl> - quote_mask = quote_mask ^ ( quote_mask < < 2 ) ; <nl> - quote_mask = quote_mask ^ ( quote_mask < < 4 ) ; <nl> - quote_mask = quote_mask ^ ( quote_mask < < 8 ) ; <nl> - quote_mask = quote_mask ^ ( quote_mask < < 16 ) ; <nl> - quote_mask = quote_mask ^ ( quote_mask < < 32 ) ; <nl> - # endif <nl> - return quote_mask ; <nl> - } <nl> - <nl> - really_inline simd_input fill_input ( const uint8_t * ptr ) { <nl> - struct simd_input in ; <nl> - # ifdef __AVX2__ <nl> - in . lo = _mm256_loadu_si256 ( reinterpret_cast < const __m256i * > ( ptr + 0 ) ) ; <nl> - in . hi = _mm256_loadu_si256 ( reinterpret_cast < const __m256i * > ( ptr + 32 ) ) ; <nl> - # elif defined ( __ARM_NEON ) <nl> - # ifndef TRANSPOSE <nl> - in . i0 = vld1q_u8 ( ptr + 0 ) ; <nl> - in . i1 = vld1q_u8 ( ptr + 16 ) ; <nl> - in . i2 = vld1q_u8 ( ptr + 32 ) ; <nl> - in . i3 = vld1q_u8 ( ptr + 48 ) ; <nl> - # else <nl> - in . i = vld4q_u8 ( ptr ) ; <nl> - # endif <nl> - # else <nl> - # warning It appears that neither ARM NEON nor AVX2 are detected . <nl> - # endif <nl> - return in ; <nl> - } <nl> - <nl> - # ifdef SIMDJSON_UTF8VALIDATE <nl> - really_inline void check_utf8 ( simd_input in , <nl> - __m256i & has_error , <nl> - struct avx_processed_utf_bytes & previous ) { <nl> - __m256i highbit = _mm256_set1_epi8 ( 0x80 ) ; <nl> - if ( ( _mm256_testz_si256 ( _mm256_or_si256 ( in . lo , in . hi ) , highbit ) ) = = 1 ) { <nl> - / / it is ascii , we just check continuation <nl> - has_error = _mm256_or_si256 ( <nl> - _mm256_cmpgt_epi8 ( <nl> - previous . carried_continuations , <nl> - _mm256_setr_epi8 ( 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , <nl> - 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 1 ) ) , <nl> - has_error ) ; <nl> - } else { <nl> - / / it is not ascii so we have to do heavy work <nl> - previous = avxcheckUTF8Bytes ( in . lo , & previous , & has_error ) ; <nl> - previous = avxcheckUTF8Bytes ( in . hi , & previous , & has_error ) ; <nl> - } <nl> - } <nl> - # endif <nl> - <nl> - # ifdef __ARM_NEON <nl> - uint16_t neonmovemask ( uint8x16_t input ) { <nl> - const uint8x16_t bitmask = { 0x01 , 0x02 , 0x4 , 0x8 , 0x10 , 0x20 , 0x40 , 0x80 , <nl> - 0x01 , 0x02 , 0x4 , 0x8 , 0x10 , 0x20 , 0x40 , 0x80 } ; <nl> - uint8x16_t minput = vandq_u8 ( input , bitmask ) ; <nl> - uint8x16_t tmp = vpaddq_u8 ( minput , minput ) ; <nl> - tmp = vpaddq_u8 ( tmp , tmp ) ; <nl> - tmp = vpaddq_u8 ( tmp , tmp ) ; <nl> - return vgetq_lane_u16 ( vreinterpretq_u16_u8 ( tmp ) , 0 ) ; <nl> - } <nl> - <nl> - really_inline <nl> - uint64_t neonmovemask_bulk ( uint8x16_t p0 , uint8x16_t p1 , uint8x16_t p2 , uint8x16_t p3 ) { <nl> - # ifndef TRANSPOSE <nl> - const uint8x16_t bitmask = { 0x01 , 0x02 , 0x4 , 0x8 , 0x10 , 0x20 , 0x40 , 0x80 , <nl> - 0x01 , 0x02 , 0x4 , 0x8 , 0x10 , 0x20 , 0x40 , 0x80 } ; <nl> - uint8x16_t t0 = vandq_u8 ( p0 , bitmask ) ; <nl> - uint8x16_t t1 = vandq_u8 ( p1 , bitmask ) ; <nl> - uint8x16_t t2 = vandq_u8 ( p2 , bitmask ) ; <nl> - uint8x16_t t3 = vandq_u8 ( p3 , bitmask ) ; <nl> - uint8x16_t sum0 = vpaddq_u8 ( t0 , t1 ) ; <nl> - uint8x16_t sum1 = vpaddq_u8 ( t2 , t3 ) ; <nl> - sum0 = vpaddq_u8 ( sum0 , sum1 ) ; <nl> - sum0 = vpaddq_u8 ( sum0 , sum0 ) ; <nl> - return vgetq_lane_u64 ( vreinterpretq_u64_u8 ( sum0 ) , 0 ) ; <nl> - # else <nl> - const uint8x16_t bitmask1 = { 0x01 , 0x10 , 0x01 , 0x10 , 0x01 , 0x10 , 0x01 , 0x10 , <nl> - 0x01 , 0x10 , 0x01 , 0x10 , 0x01 , 0x10 , 0x01 , 0x10 } ; <nl> - const uint8x16_t bitmask2 = { 0x02 , 0x20 , 0x02 , 0x20 , 0x02 , 0x20 , 0x02 , 0x20 , <nl> - 0x02 , 0x20 , 0x02 , 0x20 , 0x02 , 0x20 , 0x02 , 0x20 } ; <nl> - const uint8x16_t bitmask3 = { 0x04 , 0x40 , 0x04 , 0x40 , 0x04 , 0x40 , 0x04 , 0x40 , <nl> - 0x04 , 0x40 , 0x04 , 0x40 , 0x04 , 0x40 , 0x04 , 0x40 } ; <nl> - const uint8x16_t bitmask4 = { 0x08 , 0x80 , 0x08 , 0x80 , 0x08 , 0x80 , 0x08 , 0x80 , <nl> - 0x08 , 0x80 , 0x08 , 0x80 , 0x08 , 0x80 , 0x08 , 0x80 } ; <nl> - # if 0 <nl> - uint8x16_t t0 = vandq_u8 ( p0 , bitmask1 ) ; <nl> - uint8x16_t t1 = vandq_u8 ( p1 , bitmask2 ) ; <nl> - uint8x16_t t2 = vandq_u8 ( p2 , bitmask3 ) ; <nl> - uint8x16_t t3 = vandq_u8 ( p3 , bitmask4 ) ; <nl> - uint8x16_t tmp = vorrq_u8 ( vorrq_u8 ( t0 , t1 ) , vorrq_u8 ( t2 , t3 ) ) ; <nl> - # else <nl> - uint8x16_t t0 = vandq_u8 ( p0 , bitmask1 ) ; <nl> - uint8x16_t t1 = vbslq_u8 ( bitmask2 , p1 , t0 ) ; <nl> - uint8x16_t t2 = vbslq_u8 ( bitmask3 , p2 , t1 ) ; <nl> - uint8x16_t tmp = vbslq_u8 ( bitmask4 , p3 , t2 ) ; <nl> - # endif <nl> - uint8x16_t sum = vpaddq_u8 ( tmp , tmp ) ; <nl> - return vgetq_lane_u64 ( vreinterpretq_u64_u8 ( sum ) , 0 ) ; <nl> - # endif <nl> - } <nl> - # endif <nl> - <nl> - / / a straightforward comparison of a mask against input . 5 uops ; would be <nl> - / / cheaper in AVX512 . <nl> - really_inline uint64_t cmp_mask_against_input ( simd_input in , uint8_t m ) { <nl> - # ifdef __AVX2__ <nl> - const __m256i mask = _mm256_set1_epi8 ( m ) ; <nl> - __m256i cmp_res_0 = _mm256_cmpeq_epi8 ( in . lo , mask ) ; <nl> - uint64_t res_0 = static_cast < uint32_t > ( _mm256_movemask_epi8 ( cmp_res_0 ) ) ; <nl> - __m256i cmp_res_1 = _mm256_cmpeq_epi8 ( in . hi , mask ) ; <nl> - uint64_t res_1 = _mm256_movemask_epi8 ( cmp_res_1 ) ; <nl> - return res_0 | ( res_1 < < 32 ) ; <nl> - # elif defined ( __ARM_NEON ) <nl> - const uint8x16_t mask = vmovq_n_u8 ( m ) ; <nl> - uint8x16_t cmp_res_0 = vceqq_u8 ( in . i . val [ 0 ] , mask ) ; <nl> - uint8x16_t cmp_res_1 = vceqq_u8 ( in . i . val [ 1 ] , mask ) ; <nl> - uint8x16_t cmp_res_2 = vceqq_u8 ( in . i . val [ 2 ] , mask ) ; <nl> - uint8x16_t cmp_res_3 = vceqq_u8 ( in . i . val [ 3 ] , mask ) ; <nl> - return neonmovemask_bulk ( cmp_res_0 , cmp_res_1 , cmp_res_2 , cmp_res_3 ) ; <nl> - # else <nl> - # warning It appears that neither ARM NEON nor AVX2 are detected . <nl> - # endif <nl> - } <nl> - <nl> - / / find all values less than or equal than the content of maxval ( using unsigned arithmetic ) <nl> - really_inline uint64_t unsigned_lteq_against_input ( simd_input in , uint8_t m ) { <nl> - # ifdef __AVX2__ <nl> - const __m256i maxval = _mm256_set1_epi8 ( m ) ; <nl> - __m256i cmp_res_0 = _mm256_cmpeq_epi8 ( _mm256_max_epu8 ( maxval , in . lo ) , maxval ) ; <nl> - uint64_t res_0 = static_cast < uint32_t > ( _mm256_movemask_epi8 ( cmp_res_0 ) ) ; <nl> - __m256i cmp_res_1 = _mm256_cmpeq_epi8 ( _mm256_max_epu8 ( maxval , in . hi ) , maxval ) ; <nl> - uint64_t res_1 = _mm256_movemask_epi8 ( cmp_res_1 ) ; <nl> - return res_0 | ( res_1 < < 32 ) ; <nl> - # elif defined ( __ARM_NEON ) <nl> - const uint8x16_t mask = vmovq_n_u8 ( m ) ; <nl> - uint8x16_t cmp_res_0 = vcleq_u8 ( in . i . val [ 0 ] , mask ) ; <nl> - uint8x16_t cmp_res_1 = vcleq_u8 ( in . i . val [ 1 ] , mask ) ; <nl> - uint8x16_t cmp_res_2 = vcleq_u8 ( in . i . val [ 2 ] , mask ) ; <nl> - uint8x16_t cmp_res_3 = vcleq_u8 ( in . i . val [ 3 ] , mask ) ; <nl> - return neonmovemask_bulk ( cmp_res_0 , cmp_res_1 , cmp_res_2 , cmp_res_3 ) ; <nl> - # else <nl> - # warning It appears that neither ARM NEON nor AVX2 are detected . <nl> - # endif <nl> - } <nl> - <nl> - / / return a bitvector indicating where we have characters that end an odd - length <nl> - / / sequence of backslashes ( and thus change the behavior of the next character <nl> - / / to follow ) . A even - length sequence of backslashes , and , for that matter , the <nl> - / / largest even - length prefix of our odd - length sequence of backslashes , simply <nl> - / / modify the behavior of the backslashes themselves . <nl> - / / We also update the prev_iter_ends_odd_backslash reference parameter to <nl> - / / indicate whether we end an iteration on an odd - length sequence of <nl> - / / backslashes , which modifies our subsequent search for odd - length <nl> - / / sequences of backslashes in an obvious way . <nl> - really_inline uint64_t <nl> - find_odd_backslash_sequences ( simd_input in , <nl> - uint64_t & prev_iter_ends_odd_backslash ) { <nl> - const uint64_t even_bits = 0x5555555555555555ULL ; <nl> - const uint64_t odd_bits = ~ even_bits ; <nl> - uint64_t bs_bits = cmp_mask_against_input ( in , ' \ \ ' ) ; <nl> - uint64_t start_edges = bs_bits & ~ ( bs_bits < < 1 ) ; <nl> - / / flip lowest if we have an odd - length run at the end of the prior <nl> - / / iteration <nl> - uint64_t even_start_mask = even_bits ^ prev_iter_ends_odd_backslash ; <nl> - uint64_t even_starts = start_edges & even_start_mask ; <nl> - uint64_t odd_starts = start_edges & ~ even_start_mask ; <nl> - uint64_t even_carries = bs_bits + even_starts ; <nl> - <nl> - uint64_t odd_carries ; <nl> - / / must record the carry - out of our odd - carries out of bit 63 ; this <nl> - / / indicates whether the sense of any edge going to the next iteration <nl> - / / should be flipped <nl> - bool iter_ends_odd_backslash = <nl> - add_overflow ( bs_bits , odd_starts , & odd_carries ) ; <nl> - <nl> - odd_carries | = <nl> - prev_iter_ends_odd_backslash ; / / push in bit zero as a potential end <nl> - / / if we had an odd - numbered run at the <nl> - / / end of the previous iteration <nl> - prev_iter_ends_odd_backslash = iter_ends_odd_backslash ? 0x1ULL : 0x0ULL ; <nl> - uint64_t even_carry_ends = even_carries & ~ bs_bits ; <nl> - uint64_t odd_carry_ends = odd_carries & ~ bs_bits ; <nl> - uint64_t even_start_odd_end = even_carry_ends & odd_bits ; <nl> - uint64_t odd_start_even_end = odd_carry_ends & even_bits ; <nl> - uint64_t odd_ends = even_start_odd_end | odd_start_even_end ; <nl> - return odd_ends ; <nl> - } <nl> - <nl> - / / return both the quote mask ( which is a half - open mask that covers the first <nl> - / / quote <nl> - / / in an unescaped quote pair and everything in the quote pair ) and the quote <nl> - / / bits , which are the simple <nl> - / / unescaped quoted bits . We also update the prev_iter_inside_quote value to <nl> - / / tell the next iteration <nl> - / / whether we finished the final iteration inside a quote pair ; if so , this <nl> - / / inverts our behavior of <nl> - / / whether we ' re inside quotes for the next iteration . <nl> - / / Note that we don ' t do any error checking to see if we have backslash <nl> - / / sequences outside quotes ; these <nl> - / / backslash sequences ( of any length ) will be detected elsewhere . <nl> - really_inline uint64_t find_quote_mask_and_bits ( simd_input in , uint64_t odd_ends , <nl> - uint64_t & prev_iter_inside_quote , uint64_t & quote_bits , uint64_t & error_mask ) { <nl> - quote_bits = cmp_mask_against_input ( in , ' " ' ) ; <nl> - quote_bits = quote_bits & ~ odd_ends ; <nl> - uint64_t quote_mask = compute_quote_mask ( quote_bits ) ; <nl> - quote_mask ^ = prev_iter_inside_quote ; <nl> - / / All Unicode characters may be placed within the <nl> - / / quotation marks , except for the characters that MUST be escaped : <nl> - / / quotation mark , reverse solidus , and the control characters ( U + 0000 <nl> - / / through U + 001F ) . <nl> - / / https : / / tools . ietf . org / html / rfc8259 <nl> - uint64_t unescaped = unsigned_lteq_against_input ( in , 0x1F ) ; <nl> - error_mask | = quote_mask & unescaped ; <nl> - / / right shift of a signed value expected to be well - defined and standard <nl> - / / compliant as of C + + 20 , <nl> - / / John Regher from Utah U . says this is fine code <nl> - prev_iter_inside_quote = <nl> - static_cast < uint64_t > ( static_cast < int64_t > ( quote_mask ) > > 63 ) ; <nl> - return quote_mask ; <nl> - } <nl> - <nl> - really_inline void find_whitespace_and_structurals ( simd_input in , <nl> - uint64_t & whitespace , <nl> - uint64_t & structurals ) { <nl> - / / do a ' shufti ' to detect structural JSON characters <nl> - / / they are { 0x7b } 0x7d : 0x3a [ 0x5b ] 0x5d , 0x2c <nl> - / / these go into the first 3 buckets of the comparison ( 1 / 2 / 4 ) <nl> - <nl> - / / we are also interested in the four whitespace characters <nl> - / / space 0x20 , linefeed 0x0a , horizontal tab 0x09 and carriage return 0x0d <nl> - / / these go into the next 2 buckets of the comparison ( 8 / 16 ) <nl> - # ifdef __AVX2__ <nl> - # ifdef SIMDJSON_NAIVE_STRUCTURAL <nl> - / / You should never need this naive approach , but it can be useful <nl> - / / for research purposes <nl> - const __m256i mask_open_brace = _mm256_set1_epi8 ( 0x7b ) ; <nl> - __m256i struct_lo = _mm256_cmpeq_epi8 ( in . lo , mask_open_brace ) ; <nl> - __m256i struct_hi = _mm256_cmpeq_epi8 ( in . hi , mask_open_brace ) ; <nl> - const __m256i mask_close_brace = _mm256_set1_epi8 ( 0x7d ) ; <nl> - struct_lo = _mm256_or_si256 ( struct_lo , _mm256_cmpeq_epi8 ( in . lo , mask_close_brace ) ) ; <nl> - struct_hi = _mm256_or_si256 ( struct_hi , _mm256_cmpeq_epi8 ( in . hi , mask_close_brace ) ) ; <nl> - const __m256i mask_open_bracket = _mm256_set1_epi8 ( 0x5b ) ; <nl> - struct_lo = _mm256_or_si256 ( struct_lo , _mm256_cmpeq_epi8 ( in . lo , mask_open_bracket ) ) ; <nl> - struct_hi = _mm256_or_si256 ( struct_hi , _mm256_cmpeq_epi8 ( in . hi , mask_open_bracket ) ) ; <nl> - const __m256i mask_close_bracket = _mm256_set1_epi8 ( 0x5d ) ; <nl> - struct_lo = _mm256_or_si256 ( struct_lo , _mm256_cmpeq_epi8 ( in . lo , mask_close_bracket ) ) ; <nl> - struct_hi = _mm256_or_si256 ( struct_hi , _mm256_cmpeq_epi8 ( in . hi , mask_close_bracket ) ) ; <nl> - const __m256i mask_column = _mm256_set1_epi8 ( 0x3a ) ; <nl> - struct_lo = _mm256_or_si256 ( struct_lo , _mm256_cmpeq_epi8 ( in . lo , mask_column ) ) ; <nl> - struct_hi = _mm256_or_si256 ( struct_hi , _mm256_cmpeq_epi8 ( in . hi , mask_column ) ) ; <nl> - const __m256i mask_comma = _mm256_set1_epi8 ( 0x2c ) ; <nl> - struct_lo = _mm256_or_si256 ( struct_lo , _mm256_cmpeq_epi8 ( in . lo , mask_comma ) ) ; <nl> - struct_hi = _mm256_or_si256 ( struct_hi , _mm256_cmpeq_epi8 ( in . hi , mask_comma ) ) ; <nl> - uint64_t structural_res_0 = static_cast < uint32_t > ( _mm256_movemask_epi8 ( struct_lo ) ) ; <nl> - uint64_t structural_res_1 = _mm256_movemask_epi8 ( struct_hi ) ; <nl> - structurals = ( structural_res_0 | ( structural_res_1 < < 32 ) ) ; <nl> - <nl> - const __m256i mask_space = _mm256_set1_epi8 ( 0x20 ) ; <nl> - __m256i space_lo = _mm256_cmpeq_epi8 ( in . lo , mask_space ) ; <nl> - __m256i space_hi = _mm256_cmpeq_epi8 ( in . hi , mask_space ) ; <nl> - const __m256i mask_linefeed = _mm256_set1_epi8 ( 0x0a ) ; <nl> - space_lo = _mm256_or_si256 ( space_lo , _mm256_cmpeq_epi8 ( in . lo , mask_linefeed ) ) ; <nl> - space_hi = _mm256_or_si256 ( space_hi , _mm256_cmpeq_epi8 ( in . hi , mask_linefeed ) ) ; <nl> - const __m256i mask_tab = _mm256_set1_epi8 ( 0x09 ) ; <nl> - space_lo = _mm256_or_si256 ( space_lo , _mm256_cmpeq_epi8 ( in . lo , mask_tab ) ) ; <nl> - space_hi = _mm256_or_si256 ( space_hi , _mm256_cmpeq_epi8 ( in . hi , mask_tab ) ) ; <nl> - const __m256i mask_carriage = _mm256_set1_epi8 ( 0x0d ) ; <nl> - space_lo = _mm256_or_si256 ( space_lo , _mm256_cmpeq_epi8 ( in . lo , mask_carriage ) ) ; <nl> - space_hi = _mm256_or_si256 ( space_hi , _mm256_cmpeq_epi8 ( in . hi , mask_carriage ) ) ; <nl> - <nl> - uint64_t ws_res_0 = static_cast < uint32_t > ( _mm256_movemask_epi8 ( space_lo ) ) ; <nl> - uint64_t ws_res_1 = _mm256_movemask_epi8 ( space_hi ) ; <nl> - whitespace = ( ws_res_0 | ( ws_res_1 < < 32 ) ) ; <nl> - / / end of naive approach <nl> - <nl> - # else / / SIMDJSON_NAIVE_STRUCTURAL <nl> - const __m256i low_nibble_mask = _mm256_setr_epi8 ( <nl> - 16 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 8 , 12 , 1 , 2 , 9 , 0 , 0 , <nl> - 16 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 8 , 12 , 1 , 2 , 9 , 0 , 0 ) ; <nl> - const __m256i high_nibble_mask = _mm256_setr_epi8 ( <nl> - 8 , 0 , 18 , 4 , 0 , 1 , 0 , 1 , 0 , 0 , 0 , 3 , 2 , 1 , 0 , 0 , <nl> - 8 , 0 , 18 , 4 , 0 , 1 , 0 , 1 , 0 , 0 , 0 , 3 , 2 , 1 , 0 , 0 ) ; <nl> - <nl> - __m256i structural_shufti_mask = _mm256_set1_epi8 ( 0x7 ) ; <nl> - __m256i whitespace_shufti_mask = _mm256_set1_epi8 ( 0x18 ) ; <nl> - <nl> - __m256i v_lo = _mm256_and_si256 ( <nl> - _mm256_shuffle_epi8 ( low_nibble_mask , in . lo ) , <nl> - _mm256_shuffle_epi8 ( high_nibble_mask , <nl> - _mm256_and_si256 ( _mm256_srli_epi32 ( in . lo , 4 ) , <nl> - _mm256_set1_epi8 ( 0x7f ) ) ) ) ; <nl> - <nl> - __m256i v_hi = _mm256_and_si256 ( <nl> - _mm256_shuffle_epi8 ( low_nibble_mask , in . hi ) , <nl> - _mm256_shuffle_epi8 ( high_nibble_mask , <nl> - _mm256_and_si256 ( _mm256_srli_epi32 ( in . hi , 4 ) , <nl> - _mm256_set1_epi8 ( 0x7f ) ) ) ) ; <nl> - __m256i tmp_lo = _mm256_cmpeq_epi8 ( <nl> - _mm256_and_si256 ( v_lo , structural_shufti_mask ) , _mm256_set1_epi8 ( 0 ) ) ; <nl> - __m256i tmp_hi = _mm256_cmpeq_epi8 ( <nl> - _mm256_and_si256 ( v_hi , structural_shufti_mask ) , _mm256_set1_epi8 ( 0 ) ) ; <nl> - <nl> - uint64_t structural_res_0 = <nl> - static_cast < uint32_t > ( _mm256_movemask_epi8 ( tmp_lo ) ) ; <nl> - uint64_t structural_res_1 = _mm256_movemask_epi8 ( tmp_hi ) ; <nl> - structurals = ~ ( structural_res_0 | ( structural_res_1 < < 32 ) ) ; <nl> - <nl> - __m256i tmp_ws_lo = _mm256_cmpeq_epi8 ( <nl> - _mm256_and_si256 ( v_lo , whitespace_shufti_mask ) , _mm256_set1_epi8 ( 0 ) ) ; <nl> - __m256i tmp_ws_hi = _mm256_cmpeq_epi8 ( <nl> - _mm256_and_si256 ( v_hi , whitespace_shufti_mask ) , _mm256_set1_epi8 ( 0 ) ) ; <nl> - <nl> - uint64_t ws_res_0 = static_cast < uint32_t > ( _mm256_movemask_epi8 ( tmp_ws_lo ) ) ; <nl> - uint64_t ws_res_1 = _mm256_movemask_epi8 ( tmp_ws_hi ) ; <nl> - whitespace = ~ ( ws_res_0 | ( ws_res_1 < < 32 ) ) ; <nl> - # endif / / SIMDJSON_NAIVE_STRUCTURAL <nl> - # elif defined ( __ARM_NEON ) <nl> - # ifndef FUNKY_BAD_TABLE <nl> - const uint8x16_t low_nibble_mask = ( uint8x16_t ) { <nl> - 16 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 8 , 12 , 1 , 2 , 9 , 0 , 0 } ; <nl> - const uint8x16_t high_nibble_mask = ( uint8x16_t ) { <nl> - 8 , 0 , 18 , 4 , 0 , 1 , 0 , 1 , 0 , 0 , 0 , 3 , 2 , 1 , 0 , 0 } ; <nl> - const uint8x16_t structural_shufti_mask = vmovq_n_u8 ( 0x7 ) ; <nl> - const uint8x16_t whitespace_shufti_mask = vmovq_n_u8 ( 0x18 ) ; <nl> - const uint8x16_t low_nib_and_mask = vmovq_n_u8 ( 0xf ) ; <nl> - <nl> - uint8x16_t nib_0_lo = vandq_u8 ( in . i . val [ 0 ] , low_nib_and_mask ) ; <nl> - uint8x16_t nib_0_hi = vshrq_n_u8 ( in . i . val [ 0 ] , 4 ) ; <nl> - uint8x16_t shuf_0_lo = vqtbl1q_u8 ( low_nibble_mask , nib_0_lo ) ; <nl> - uint8x16_t shuf_0_hi = vqtbl1q_u8 ( high_nibble_mask , nib_0_hi ) ; <nl> - uint8x16_t v_0 = vandq_u8 ( shuf_0_lo , shuf_0_hi ) ; <nl> - <nl> - uint8x16_t nib_1_lo = vandq_u8 ( in . i . val [ 1 ] , low_nib_and_mask ) ; <nl> - uint8x16_t nib_1_hi = vshrq_n_u8 ( in . i . val [ 1 ] , 4 ) ; <nl> - uint8x16_t shuf_1_lo = vqtbl1q_u8 ( low_nibble_mask , nib_1_lo ) ; <nl> - uint8x16_t shuf_1_hi = vqtbl1q_u8 ( high_nibble_mask , nib_1_hi ) ; <nl> - uint8x16_t v_1 = vandq_u8 ( shuf_1_lo , shuf_1_hi ) ; <nl> - <nl> - uint8x16_t nib_2_lo = vandq_u8 ( in . i . val [ 2 ] , low_nib_and_mask ) ; <nl> - uint8x16_t nib_2_hi = vshrq_n_u8 ( in . i . val [ 2 ] , 4 ) ; <nl> - uint8x16_t shuf_2_lo = vqtbl1q_u8 ( low_nibble_mask , nib_2_lo ) ; <nl> - uint8x16_t shuf_2_hi = vqtbl1q_u8 ( high_nibble_mask , nib_2_hi ) ; <nl> - uint8x16_t v_2 = vandq_u8 ( shuf_2_lo , shuf_2_hi ) ; <nl> - <nl> - uint8x16_t nib_3_lo = vandq_u8 ( in . i . val [ 3 ] , low_nib_and_mask ) ; <nl> - uint8x16_t nib_3_hi = vshrq_n_u8 ( in . i . val [ 3 ] , 4 ) ; <nl> - uint8x16_t shuf_3_lo = vqtbl1q_u8 ( low_nibble_mask , nib_3_lo ) ; <nl> - uint8x16_t shuf_3_hi = vqtbl1q_u8 ( high_nibble_mask , nib_3_hi ) ; <nl> - uint8x16_t v_3 = vandq_u8 ( shuf_3_lo , shuf_3_hi ) ; <nl> - <nl> - uint8x16_t tmp_0 = vtstq_u8 ( v_0 , structural_shufti_mask ) ; <nl> - uint8x16_t tmp_1 = vtstq_u8 ( v_1 , structural_shufti_mask ) ; <nl> - uint8x16_t tmp_2 = vtstq_u8 ( v_2 , structural_shufti_mask ) ; <nl> - uint8x16_t tmp_3 = vtstq_u8 ( v_3 , structural_shufti_mask ) ; <nl> - structurals = neonmovemask_bulk ( tmp_0 , tmp_1 , tmp_2 , tmp_3 ) ; <nl> - <nl> - uint8x16_t tmp_ws_0 = vtstq_u8 ( v_0 , whitespace_shufti_mask ) ; <nl> - uint8x16_t tmp_ws_1 = vtstq_u8 ( v_1 , whitespace_shufti_mask ) ; <nl> - uint8x16_t tmp_ws_2 = vtstq_u8 ( v_2 , whitespace_shufti_mask ) ; <nl> - uint8x16_t tmp_ws_3 = vtstq_u8 ( v_3 , whitespace_shufti_mask ) ; <nl> - whitespace = neonmovemask_bulk ( tmp_ws_0 , tmp_ws_1 , tmp_ws_2 , tmp_ws_3 ) ; <nl> - # else <nl> - / / I think this one is garbage . In order to save the expense <nl> - / / of another shuffle , I use an equally expensive shift , and <nl> - / / this gets glued to the end of the dependency chain . Seems a bit <nl> - / / slower for no good reason . <nl> - / / <nl> - / / need to use a weird arrangement . Bytes in this bitvector <nl> - / / are in conventional order , but bits are reversed as we are <nl> - / / using a signed left shift ( that is a + ve value from 0 . . 7 ) to <nl> - / / shift upwards to 0x80 in the bit . So we need to reverse bits . <nl> - <nl> - / / note no structural / whitespace has the high bit on <nl> - / / so it ' s OK to put the high 5 bits into our TBL shuffle <nl> - / / <nl> - <nl> - / / structurals are { 0x7b } 0x7d : 0x3a [ 0x5b ] 0x5d , 0x2c <nl> - / / or in 5 bit , 3 bit form thats <nl> - / / ( 15 , 3 ) ( 15 , 5 ) ( 7 , 2 ) ( 11 , 3 ) ( 11 , 5 ) ( 5 , 4 ) <nl> - / / bit - reversing ( subtract low 3 bits from 7 ) yields : <nl> - / / ( 15 , 4 ) ( 15 , 2 ) ( 7 , 5 ) ( 11 , 4 ) ( 11 , 2 ) ( 5 , 3 ) <nl> - <nl> - const uint8x16_t structural_bitvec = ( uint8x16_t ) { <nl> - 0 , 0 , 0 , 0 , <nl> - 0 , 8 , 0 , 32 , <nl> - 0 , 0 , 0 , 20 , <nl> - 0 , 0 , 0 , 20 } ; <nl> - / / we are also interested in the four whitespace characters <nl> - / / space 0x20 , linefeed 0x0a , horizontal tab 0x09 and carriage return 0x0d <nl> - / / ( 4 , 0 ) ( 1 , 2 ) ( 1 , 1 ) ( 1 , 5 ) <nl> - / / bit - reversing ( subtract low 3 bits from 7 ) yields : <nl> - / / ( 4 , 7 ) ( 1 , 5 ) ( 1 , 6 ) ( 1 , 2 ) <nl> - <nl> - const uint8x16_t whitespace_bitvec = ( uint8x16_t ) { <nl> - 0 , 100 , 0 , 0 , <nl> - 128 , 0 , 0 , 0 , <nl> - 0 , 0 , 0 , 0 , <nl> - 0 , 0 , 0 , 0 } ; <nl> - const uint8x16_t low_3bits_and_mask = vmovq_n_u8 ( 0x7 ) ; <nl> - const uint8x16_t high_1bit_tst_mask = vmovq_n_u8 ( 0x80 ) ; <nl> - <nl> - int8x16_t low_3bits_0 = vreinterpretq_s8_u8 ( vandq_u8 ( in . i . val [ 0 ] , low_3bits_and_mask ) ) ; <nl> - uint8x16_t high_5bits_0 = vshrq_n_u8 ( in . i . val [ 0 ] , 3 ) ; <nl> - uint8x16_t shuffle_structural_0 = vshlq_u8 ( vqtbl1q_u8 ( structural_bitvec , high_5bits_0 ) , low_3bits_0 ) ; <nl> - uint8x16_t shuffle_ws_0 = vshlq_u8 ( vqtbl1q_u8 ( whitespace_bitvec , high_5bits_0 ) , low_3bits_0 ) ; <nl> - uint8x16_t tmp_0 = vtstq_u8 ( shuffle_structural_0 , high_1bit_tst_mask ) ; <nl> - uint8x16_t tmp_ws_0 = vtstq_u8 ( shuffle_ws_0 , high_1bit_tst_mask ) ; <nl> - <nl> - int8x16_t low_3bits_1 = vreinterpretq_s8_u8 ( vandq_u8 ( in . i . val [ 1 ] , low_3bits_and_mask ) ) ; <nl> - uint8x16_t high_5bits_1 = vshrq_n_u8 ( in . i . val [ 1 ] , 3 ) ; <nl> - uint8x16_t shuffle_structural_1 = vshlq_u8 ( vqtbl1q_u8 ( structural_bitvec , high_5bits_1 ) , low_3bits_1 ) ; <nl> - uint8x16_t shuffle_ws_1 = vshlq_u8 ( vqtbl1q_u8 ( whitespace_bitvec , high_5bits_1 ) , low_3bits_1 ) ; <nl> - uint8x16_t tmp_1 = vtstq_u8 ( shuffle_structural_1 , high_1bit_tst_mask ) ; <nl> - uint8x16_t tmp_ws_1 = vtstq_u8 ( shuffle_ws_1 , high_1bit_tst_mask ) ; <nl> - <nl> - int8x16_t low_3bits_2 = vreinterpretq_s8_u8 ( vandq_u8 ( in . i . val [ 2 ] , low_3bits_and_mask ) ) ; <nl> - uint8x16_t high_5bits_2 = vshrq_n_u8 ( in . i . val [ 2 ] , 3 ) ; <nl> - uint8x16_t shuffle_structural_2 = vshlq_u8 ( vqtbl1q_u8 ( structural_bitvec , high_5bits_2 ) , low_3bits_2 ) ; <nl> - uint8x16_t shuffle_ws_2 = vshlq_u8 ( vqtbl1q_u8 ( whitespace_bitvec , high_5bits_2 ) , low_3bits_2 ) ; <nl> - uint8x16_t tmp_2 = vtstq_u8 ( shuffle_structural_2 , high_1bit_tst_mask ) ; <nl> - uint8x16_t tmp_ws_2 = vtstq_u8 ( shuffle_ws_2 , high_1bit_tst_mask ) ; <nl> - <nl> - int8x16_t low_3bits_3 = vreinterpretq_s8_u8 ( vandq_u8 ( in . i . val [ 3 ] , low_3bits_and_mask ) ) ; <nl> - uint8x16_t high_5bits_3 = vshrq_n_u8 ( in . i . val [ 3 ] , 3 ) ; <nl> - uint8x16_t shuffle_structural_3 = vshlq_u8 ( vqtbl1q_u8 ( structural_bitvec , high_5bits_3 ) , low_3bits_3 ) ; <nl> - uint8x16_t shuffle_ws_3 = vshlq_u8 ( vqtbl1q_u8 ( whitespace_bitvec , high_5bits_3 ) , low_3bits_3 ) ; <nl> - uint8x16_t tmp_3 = vtstq_u8 ( shuffle_structural_3 , high_1bit_tst_mask ) ; <nl> - uint8x16_t tmp_ws_3 = vtstq_u8 ( shuffle_ws_3 , high_1bit_tst_mask ) ; <nl> - <nl> - structurals = neonmovemask_bulk ( tmp_0 , tmp_1 , tmp_2 , tmp_3 ) ; <nl> - whitespace = neonmovemask_bulk ( tmp_ws_0 , tmp_ws_1 , tmp_ws_2 , tmp_ws_3 ) ; <nl> - # endif <nl> - # else <nl> - # warning It appears that neither ARM NEON nor AVX2 are detected . <nl> - # endif <nl> - } <nl> - <nl> - <nl> - # ifdef SIMDJSON_NAIVE_FLATTEN / / useful for benchmarking <nl> - / / <nl> - / / This is just a naive implementation . It should be normally <nl> - / / disable , but can be used for research purposes to compare <nl> - / / again our optimized version . <nl> - really_inline void flatten_bits ( uint32_t * base_ptr , uint32_t & base , <nl> - uint32_t idx , uint64_t bits ) { <nl> - uint32_t * out_ptr = base_ptr + base ; <nl> - idx - = 64 ; <nl> - while ( bits ! = 0 ) { <nl> - out_ptr [ 0 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - out_ptr + + ; <nl> - } <nl> - base = ( out_ptr - base_ptr ) ; <nl> - } <nl> - <nl> - # else <nl> - / / flatten out values in ' bits ' assuming that they are are to have values of idx <nl> - / / plus their position in the bitvector , and store these indexes at <nl> - / / base_ptr [ base ] incrementing base as we go <nl> - / / will potentially store extra values beyond end of valid bits , so base_ptr <nl> - / / needs to be large enough to handle this <nl> - really_inline void flatten_bits ( uint32_t * base_ptr , uint32_t & base , <nl> - uint32_t idx , uint64_t bits ) { <nl> - / / In some instances , the next branch is expensive because it is mispredicted . <nl> - / / Unfortunately , in other cases , <nl> - / / it helps tremendously . <nl> - if ( bits = = 0 ) return ; <nl> - uint32_t cnt = hamming ( bits ) ; <nl> - uint32_t next_base = base + cnt ; <nl> - idx - = 64 ; <nl> - base_ptr + = base ; <nl> - { <nl> - base_ptr [ 0 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 1 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 2 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 3 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 4 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 5 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 6 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 7 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr + = 8 ; <nl> - } <nl> - / / We hope that the next branch is easily predicted . <nl> - if ( cnt > 8 ) { <nl> - base_ptr [ 0 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 1 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 2 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 3 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 4 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 5 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 6 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr [ 7 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr + = 8 ; <nl> - } <nl> - if ( cnt > 16 ) { / / unluckly : we rarely get here <nl> - / / since it means having one structural or pseudo - structral element <nl> - / / every 4 characters ( possible with inputs like " " , " " , " " , . . . ) . <nl> - do { <nl> - base_ptr [ 0 ] = idx + trailingzeroes ( bits ) ; <nl> - bits = bits & ( bits - 1 ) ; <nl> - base_ptr + + ; <nl> - } while ( bits ! = 0 ) ; <nl> - } <nl> - base = next_base ; <nl> - } <nl> - # endif <nl> - <nl> - / / return a updated structural bit vector with quoted contents cleared out and <nl> - / / pseudo - structural characters added to the mask <nl> - / / updates prev_iter_ends_pseudo_pred which tells us whether the previous <nl> - / / iteration ended on a whitespace or a structural character ( which means that <nl> - / / the next iteration <nl> - / / will have a pseudo - structural character at its start ) <nl> - really_inline uint64_t finalize_structurals ( <nl> - uint64_t structurals , uint64_t whitespace , uint64_t quote_mask , <nl> - uint64_t quote_bits , uint64_t & prev_iter_ends_pseudo_pred ) { <nl> - / / mask off anything inside quotes <nl> - structurals & = ~ quote_mask ; <nl> - / / add the real quote bits back into our bitmask as well , so we can <nl> - / / quickly traverse the strings we ' ve spent all this trouble gathering <nl> - structurals | = quote_bits ; <nl> - / / Now , establish " pseudo - structural characters " . These are non - whitespace <nl> - / / characters that are ( a ) outside quotes and ( b ) have a predecessor that ' s <nl> - / / either whitespace or a structural character . This means that subsequent <nl> - / / passes will get a chance to encounter the first character of every string <nl> - / / of non - whitespace and , if we ' re parsing an atom like true / false / null or a <nl> - / / number we can stop at the first whitespace or structural character <nl> - / / following it . <nl> - <nl> - / / a qualified predecessor is something that can happen 1 position before an <nl> - / / pseudo - structural character <nl> - uint64_t pseudo_pred = structurals | whitespace ; <nl> - <nl> - uint64_t shifted_pseudo_pred = <nl> - ( pseudo_pred < < 1 ) | prev_iter_ends_pseudo_pred ; <nl> - prev_iter_ends_pseudo_pred = pseudo_pred > > 63 ; <nl> - uint64_t pseudo_structurals = <nl> - shifted_pseudo_pred & ( ~ whitespace ) & ( ~ quote_mask ) ; <nl> - structurals | = pseudo_structurals ; <nl> - <nl> - / / now , we ' ve used our close quotes all we need to . So let ' s switch them off <nl> - / / they will be off in the quote mask and on in quote bits . <nl> - structurals & = ~ ( quote_bits & ~ quote_mask ) ; <nl> - return structurals ; <nl> - } <nl> - <nl> - WARN_UNUSED <nl> - / * never_inline * / int find_structural_bits ( const uint8_t * buf , size_t len , <nl> - ParsedJson & pj ) { <nl> - if ( len > pj . bytecapacity ) { <nl> - std : : cerr < < " Your ParsedJson object only supports documents up to " <nl> - < < pj . bytecapacity < < " bytes but you are trying to process " < < len <nl> - < < " bytes " < < std : : endl ; <nl> - return simdjson : : CAPACITY ; <nl> - } <nl> - uint32_t * base_ptr = pj . structural_indexes ; <nl> - uint32_t base = 0 ; <nl> - # ifdef SIMDJSON_UTF8VALIDATE <nl> - __m256i has_error = _mm256_setzero_si256 ( ) ; <nl> - struct avx_processed_utf_bytes previous { } ; <nl> - previous . rawbytes = _mm256_setzero_si256 ( ) ; <nl> - previous . high_nibbles = _mm256_setzero_si256 ( ) ; <nl> - previous . carried_continuations = _mm256_setzero_si256 ( ) ; <nl> - # endif <nl> - <nl> - / / we have padded the input out to 64 byte multiple with the remainder being <nl> - / / zeros <nl> - <nl> - / / persistent state across loop <nl> - / / does the last iteration end with an odd - length sequence of backslashes ? <nl> - / / either 0 or 1 , but a 64 - bit value <nl> - uint64_t prev_iter_ends_odd_backslash = 0ULL ; <nl> - / / does the previous iteration end inside a double - quote pair ? <nl> - uint64_t prev_iter_inside_quote = 0ULL ; / / either all zeros or all ones <nl> - / / does the previous iteration end on something that is a predecessor of a <nl> - / / pseudo - structural character - i . e . whitespace or a structural character <nl> - / / effectively the very first char is considered to follow " whitespace " for <nl> - / / the <nl> - / / purposes of pseudo - structural character detection so we initialize to 1 <nl> - uint64_t prev_iter_ends_pseudo_pred = 1ULL ; <nl> - <nl> - / / structurals are persistent state across loop as we flatten them on the <nl> - / / subsequent iteration into our array pointed to be base_ptr . <nl> - / / This is harmless on the first iteration as structurals = = 0 <nl> - / / and is done for performance reasons ; we can hide some of the latency of the <nl> - / / expensive carryless multiply in the previous step with this work <nl> - uint64_t structurals = 0 ; <nl> - <nl> - size_t lenminus64 = len < 64 ? 0 : len - 64 ; <nl> - size_t idx = 0 ; <nl> - uint64_t error_mask = 0 ; / / for unescaped characters within strings ( ASCII code points < 0x20 ) <nl> - <nl> - for ( ; idx < lenminus64 ; idx + = 64 ) { <nl> - # ifndef _MSC_VER <nl> - __builtin_prefetch ( buf + idx + 128 ) ; <nl> - # endif <nl> - simd_input in = fill_input ( buf + idx ) ; <nl> - # ifdef SIMDJSON_UTF8VALIDATE <nl> - check_utf8 ( in , has_error , previous ) ; <nl> - # endif <nl> - / / detect odd sequences of backslashes <nl> - uint64_t odd_ends = find_odd_backslash_sequences ( <nl> - in , prev_iter_ends_odd_backslash ) ; <nl> - <nl> - / / detect insides of quote pairs ( " quote_mask " ) and also our quote_bits <nl> - / / themselves <nl> - uint64_t quote_bits ; <nl> - uint64_t quote_mask = find_quote_mask_and_bits ( <nl> - in , odd_ends , prev_iter_inside_quote , quote_bits , error_mask ) ; <nl> - <nl> - / / take the previous iterations structural bits , not our current iteration , <nl> - / / and flatten <nl> - flatten_bits ( base_ptr , base , idx , structurals ) ; <nl> - <nl> - uint64_t whitespace ; <nl> - find_whitespace_and_structurals ( in , whitespace , structurals ) ; <nl> - <nl> - / / fixup structurals to reflect quotes and add pseudo - structural characters <nl> - structurals = finalize_structurals ( structurals , whitespace , quote_mask , <nl> - quote_bits , prev_iter_ends_pseudo_pred ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / <nl> - / / / we use a giant copy - paste which is ugly . <nl> - / / / but otherwise the string needs to be properly padded or else we <nl> - / / / risk invalidating the UTF - 8 checks . <nl> - / / / / / / / / / / / / <nl> - if ( idx < len ) { <nl> - uint8_t tmpbuf [ 64 ] ; <nl> - memset ( tmpbuf , 0x20 , 64 ) ; <nl> - memcpy ( tmpbuf , buf + idx , len - idx ) ; <nl> - simd_input in = fill_input ( tmpbuf ) ; <nl> - # ifdef SIMDJSON_UTF8VALIDATE <nl> - check_utf8 ( in , has_error , previous ) ; <nl> - # endif <nl> - <nl> - / / detect odd sequences of backslashes <nl> - uint64_t odd_ends = find_odd_backslash_sequences ( <nl> - in , prev_iter_ends_odd_backslash ) ; <nl> - <nl> - / / detect insides of quote pairs ( " quote_mask " ) and also our quote_bits <nl> - / / themselves <nl> - uint64_t quote_bits ; <nl> - uint64_t quote_mask = find_quote_mask_and_bits ( <nl> - in , odd_ends , prev_iter_inside_quote , quote_bits , error_mask ) ; <nl> - <nl> - / / take the previous iterations structural bits , not our current iteration , <nl> - / / and flatten <nl> - flatten_bits ( base_ptr , base , idx , structurals ) ; <nl> - <nl> - uint64_t whitespace ; <nl> - find_whitespace_and_structurals ( in , whitespace , structurals ) ; <nl> - <nl> - / / fixup structurals to reflect quotes and add pseudo - structural characters <nl> - structurals = finalize_structurals ( structurals , whitespace , quote_mask , <nl> - quote_bits , prev_iter_ends_pseudo_pred ) ; <nl> - idx + = 64 ; <nl> - } <nl> - <nl> - / / is last string quote closed ? <nl> - if ( prev_iter_inside_quote ) { <nl> - return simdjson : : UNCLOSED_STRING ; <nl> - } <nl> - <nl> - / / finally , flatten out the remaining structurals from the last iteration <nl> - flatten_bits ( base_ptr , base , idx , structurals ) ; <nl> - <nl> - pj . n_structural_indexes = base ; <nl> - / / a valid JSON file cannot have zero structural indexes - we should have <nl> - / / found something <nl> - if ( pj . n_structural_indexes = = 0u ) { <nl> - fprintf ( stderr , " Empty document ? \ n " ) ; <nl> - return simdjson : : EMPTY ; <nl> - } <nl> - if ( base_ptr [ pj . n_structural_indexes - 1 ] > len ) { <nl> - fprintf ( stderr , " Internal bug \ n " ) ; <nl> - return simdjson : : UNEXPECTED_ERROR ; <nl> - } <nl> - if ( len ! = base_ptr [ pj . n_structural_indexes - 1 ] ) { <nl> - / / the string might not be NULL terminated , but we add a virtual NULL ending <nl> - / / character . <nl> - base_ptr [ pj . n_structural_indexes + + ] = len ; <nl> - } <nl> - / / make it safe to dereference one beyond this array <nl> - base_ptr [ pj . n_structural_indexes ] = 0 ; <nl> - if ( error_mask ) { <nl> - fprintf ( stderr , " Unescaped characters \ n " ) ; <nl> - return simdjson : : UNESCAPED_CHARS ; <nl> - } <nl> - # ifdef SIMDJSON_UTF8VALIDATE <nl> - return _mm256_testz_si256 ( has_error , has_error ) = = 0 ? simdjson : : UTF8_ERROR : simdjson : : SUCCESS ; <nl> - # else <nl> - return simdjson : : SUCCESS ; <nl> - # endif <nl> - } <nl> - <nl> - int find_structural_bits ( const char * buf , size_t len , ParsedJson & pj ) { <nl> - return find_structural_bits ( reinterpret_cast < const uint8_t * > ( buf ) , len , pj ) ; <nl> - } <nl> + / / File kept in case we want to reuse it soon . ( many configuration files to edit ) <nl> mmm a / tools / jsonstats . cpp <nl> ppp b / tools / jsonstats . cpp <nl> stat_t simdjson_computestats ( const padded_string & p ) { <nl> <nl> <nl> int main ( int argc , char * argv [ ] ) { <nl> - int optind = 1 ; <nl> - if ( optind > = argc ) { <nl> + int myoptind = 1 ; <nl> + if ( myoptind > = argc ) { <nl> std : : cerr < < " Reads json , prints stats . " < < std : : endl ; <nl> std : : cerr < < " Usage : " < < argv [ 0 ] < < " < jsonfile > " < < std : : endl ; <nl> exit ( 1 ) ; <nl> } <nl> - const char * filename = argv [ optind ] ; <nl> - if ( optind + 1 < argc ) { <nl> - std : : cerr < < " warning : ignoring everything after " < < argv [ optind + 1 ] < < std : : endl ; <nl> + const char * filename = argv [ myoptind ] ; <nl> + if ( myoptind + 1 < argc ) { <nl> + std : : cerr < < " warning : ignoring everything after " < < argv [ myoptind + 1 ] < < std : : endl ; <nl> } <nl> padded_string p ; <nl> try { <nl> | Merge pull request from lemire / multiple_implementation_refactoring | simdjson/simdjson | 78406ba954016e4cd192d053fa1dde2e062b7bd2 | 2019-07-02T14:42:51Z |
mmm a / src / qtlibtorrent / qtorrenthandle . cpp <nl> ppp b / src / qtlibtorrent / qtorrenthandle . cpp <nl> float QTorrentHandle : : progress ( ) const { <nl> <nl> bitfield QTorrentHandle : : pieces ( ) const { <nl> # if LIBTORRENT_VERSION_MINOR > 15 <nl> - return torrent_handle : : status ( 0x0 ) . pieces ; <nl> + return torrent_handle : : status ( torrent_handle : : query_pieces ) . pieces ; <nl> # else <nl> return torrent_handle : : status ( ) . pieces ; <nl> # endif <nl> | Pass the correct flag in torrent_handle : : status for libtorrent > 0 . 15 | qbittorrent/qBittorrent | 0fa0be2d0ee7b08e9c33d73b5cb35758d72d88ac | 2013-03-10T15:08:40Z |
mmm a / src / worker . js <nl> ppp b / src / worker . js <nl> <nl> var threadInfoStruct = 0 ; / / Info area for this thread in Emscripten HEAP ( shared ) . If zero , this worker is not currently hosting an executing pthread . <nl> var selfThreadId = 0 ; / / The ID of this thread . 0 if not hosting a pthread . <nl> var parentThreadId = 0 ; / / The ID of the parent pthread that launched this thread . <nl> - # if ! WASM_BACKEND <nl> + # if ! WASM_BACKEND & & ! MODULARIZE <nl> var tempDoublePtr = 0 ; / / A temporary memory area for global float and double marshalling operations . <nl> # endif <nl> <nl> this . addEventListener ( ' error ' , function ( e ) { <nl> console . error ( e . error ) ; <nl> } ) ; <nl> <nl> - function threadPrint ( ) { <nl> - var text = Array . prototype . slice . call ( arguments ) . join ( ' ' ) ; <nl> - console . log ( text ) ; <nl> - } <nl> function threadPrintErr ( ) { <nl> var text = Array . prototype . slice . call ( arguments ) . join ( ' ' ) ; <nl> console . error ( text ) ; <nl> function threadAlert ( ) { <nl> var text = Array . prototype . slice . call ( arguments ) . join ( ' ' ) ; <nl> postMessage ( { cmd : ' alert ' , text : text , threadId : selfThreadId } ) ; <nl> } <nl> - out = threadPrint ; <nl> - err = threadPrintErr ; <nl> + var err = threadPrintErr ; <nl> this . alert = threadAlert ; <nl> <nl> # if WASM <nl> Module [ ' instantiateWasm ' ] = function ( info , receiveInstance ) { <nl> / / Instantiate from the module posted from the main thread . <nl> / / We can just use sync instantiation in the worker . <nl> - instance = new WebAssembly . Instance ( wasmModule , info ) ; <nl> + var instance = new WebAssembly . Instance ( wasmModule , info ) ; <nl> / / We don ' t need the module anymore ; new threads will be spawned from the main thread . <nl> wasmModule = null ; <nl> receiveInstance ( instance ) ; / / The second ' module ' parameter is intentionally null here , we don ' t need to keep a ref to the Module object from here . <nl> return instance . exports ; <nl> - } <nl> + } ; <nl> # endif <nl> <nl> var wasmModule ; <nl> this . onmessage = function ( e ) { <nl> console . error ( e . stack ) ; <nl> throw e ; <nl> } <nl> - } <nl> + } ; <nl> | Clean up worker . js a bit ( ) | emscripten-core/emscripten | 92a971b1630960005747530b1ff61151e8616f72 | 2019-07-25T19:18:41Z |
mmm a / tensorflow / core / lib / wav / wav_io . cc <nl> ppp b / tensorflow / core / lib / wav / wav_io . cc <nl> inline float Int16SampleToFloat ( int16 data ) { <nl> return data * kMultiplier ; <nl> } <nl> <nl> + } / / namespace <nl> + <nl> + / / Handles moving the data index forward , validating the arguments , and avoiding <nl> + / / overflow or underflow . <nl> + Status IncrementOffset ( int old_offset , size_t increment , size_t max_size , <nl> + int * new_offset ) { <nl> + if ( old_offset < 0 ) { <nl> + return errors : : InvalidArgument ( " Negative offsets are not allowed : " , <nl> + old_offset ) ; <nl> + } <nl> + if ( old_offset > max_size ) { <nl> + return errors : : InvalidArgument ( " Initial offset is outside data range : " , <nl> + old_offset ) ; <nl> + } <nl> + * new_offset = old_offset + increment ; <nl> + if ( * new_offset > max_size ) { <nl> + return errors : : InvalidArgument ( " Data too short when trying to read string " ) ; <nl> + } <nl> + / / See above for the check that the input offset is positive . If it ' s negative <nl> + / / here then it means that there ' s been an overflow in the arithmetic . <nl> + if ( * new_offset < 0 ) { <nl> + return errors : : InvalidArgument ( " Offset too large , overflowed : " , <nl> + * new_offset ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> Status ExpectText ( const string & data , const string & expected_text , <nl> int * offset ) { <nl> - const int new_offset = * offset + expected_text . size ( ) ; <nl> - if ( new_offset > data . size ( ) ) { <nl> - return errors : : InvalidArgument ( " Data too short when trying to read " , <nl> - expected_text ) ; <nl> - } <nl> + int new_offset ; <nl> + TF_RETURN_IF_ERROR ( <nl> + IncrementOffset ( * offset , expected_text . size ( ) , data . size ( ) , & new_offset ) ) ; <nl> const string found_text ( data . begin ( ) + * offset , data . begin ( ) + new_offset ) ; <nl> if ( found_text ! = expected_text ) { <nl> return errors : : InvalidArgument ( " Header mismatch : Expected " , expected_text , <nl> Status ExpectText ( const string & data , const string & expected_text , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - template < class T > <nl> - Status ReadValue ( const string & data , T * value , int * offset ) { <nl> - const int new_offset = * offset + sizeof ( T ) ; <nl> - if ( new_offset > data . size ( ) ) { <nl> - return errors : : InvalidArgument ( " Data too short when trying to read value " ) ; <nl> - } <nl> - if ( port : : kLittleEndian ) { <nl> - memcpy ( value , data . data ( ) + * offset , sizeof ( T ) ) ; <nl> - } else { <nl> - * value = 0 ; <nl> - const uint8 * data_buf = <nl> - reinterpret_cast < const uint8 * > ( data . data ( ) + * offset ) ; <nl> - int shift = 0 ; <nl> - for ( int i = 0 ; i < sizeof ( T ) ; + + i , shift + = 8 ) { <nl> - * value = * value | ( data_buf [ i ] < < shift ) ; <nl> - } <nl> - } <nl> - * offset = new_offset ; <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> Status ReadString ( const string & data , int expected_length , string * value , <nl> int * offset ) { <nl> - const int new_offset = * offset + expected_length ; <nl> - if ( new_offset > data . size ( ) ) { <nl> - return errors : : InvalidArgument ( " Data too short when trying to read string " ) ; <nl> - } <nl> + int new_offset ; <nl> + TF_RETURN_IF_ERROR ( <nl> + IncrementOffset ( * offset , expected_length , data . size ( ) , & new_offset ) ) ; <nl> * value = string ( data . begin ( ) + * offset , data . begin ( ) + new_offset ) ; <nl> * offset = new_offset ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - } / / namespace <nl> - <nl> Status EncodeAudioAsS16LEWav ( const float * audio , size_t sample_rate , <nl> size_t num_channels , size_t num_frames , <nl> string * wav_string ) { <nl> Status DecodeLin16WaveAsFloatVector ( const string & wav_string , <nl> TF_RETURN_IF_ERROR ( ReadString ( wav_string , 4 , & chunk_id , & offset ) ) ; <nl> uint32 chunk_size ; <nl> TF_RETURN_IF_ERROR ( ReadValue < uint32 > ( wav_string , & chunk_size , & offset ) ) ; <nl> + if ( chunk_size > std : : numeric_limits < int32 > : : max ( ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " WAV data chunk ' " , chunk_id , " ' is too large : " , chunk_size , <nl> + " bytes , but the limit is " , std : : numeric_limits < int32 > : : max ( ) ) ; <nl> + } <nl> if ( chunk_id = = kDataChunkId ) { <nl> if ( was_data_found ) { <nl> return errors : : InvalidArgument ( " More than one data chunk found in WAV " ) ; <nl> mmm a / tensorflow / core / lib / wav / wav_io . h <nl> ppp b / tensorflow / core / lib / wav / wav_io . h <nl> limitations under the License . <nl> # include < string > <nl> # include < vector > <nl> <nl> + # include " tensorflow / core / lib / core / casts . h " <nl> + # include " tensorflow / core / lib / core / coding . h " <nl> + # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> # include " tensorflow / core / platform / types . h " <nl> <nl> Status DecodeLin16WaveAsFloatVector ( const string & wav_string , <nl> uint32 * sample_count , uint16 * channel_count , <nl> uint32 * sample_rate ) ; <nl> <nl> + / / Everything below here is only exposed publicly for testing purposes . <nl> + <nl> + / / Handles moving the data index forward , validating the arguments , and avoiding <nl> + / / overflow or underflow . <nl> + Status IncrementOffset ( int old_offset , size_t increment , size_t max_size , <nl> + int * new_offset ) ; <nl> + <nl> + / / This function is only exposed in the header for testing purposes , as a <nl> + / / template that needs to be instantiated . Reads a typed numeric value from a <nl> + / / stream of data . <nl> + template < class T > <nl> + Status ReadValue ( const string & data , T * value , int * offset ) { <nl> + int new_offset ; <nl> + TF_RETURN_IF_ERROR ( <nl> + IncrementOffset ( * offset , sizeof ( T ) , data . size ( ) , & new_offset ) ) ; <nl> + if ( port : : kLittleEndian ) { <nl> + memcpy ( value , data . data ( ) + * offset , sizeof ( T ) ) ; <nl> + } else { <nl> + * value = 0 ; <nl> + const uint8 * data_buf = <nl> + reinterpret_cast < const uint8 * > ( data . data ( ) + * offset ) ; <nl> + int shift = 0 ; <nl> + for ( int i = 0 ; i < sizeof ( T ) ; + + i , shift + = 8 ) { <nl> + * value = * value | ( data_buf [ i ] < < shift ) ; <nl> + } <nl> + } <nl> + * offset = new_offset ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> } / / namespace wav <nl> } / / namespace tensorflow <nl> <nl> mmm a / tensorflow / core / lib / wav / wav_io_test . cc <nl> ppp b / tensorflow / core / lib / wav / wav_io_test . cc <nl> limitations under the License . <nl> namespace tensorflow { <nl> namespace wav { <nl> <nl> + / / These are defined in wav_io . cc , and the signatures are here so we don ' t have <nl> + / / to expose them in the public header . <nl> + Status ExpectText ( const string & data , const string & expected_text , int * offset ) ; <nl> + Status ReadString ( const string & data , int expected_length , string * value , <nl> + int * offset ) ; <nl> + <nl> TEST ( WavIO , BadArguments ) { <nl> float audio [ ] = { 0 . 0f , 0 . 1f , 0 . 2f , 0 . 3f , 0 . 4f , 0 . 5f } ; <nl> string result ; <nl> TEST ( WavIO , BasicStereo ) { <nl> EXPECT_EQ ( expected , result ) ; <nl> } <nl> <nl> + / / Test how chunk sizes larger than 2GB are handled , since they ' re stored as <nl> + / / unsigned int32s , so there are lots of ways for conversions to confuse the <nl> + / / decoding logic . The expected behavior is to fail with an error , since such <nl> + / / large WAV files are not common , and are unsupported by many readers . <nl> + / / See b / 72655902 . <nl> + TEST ( WavIO , ChunkSizeOverflow ) { <nl> + std : : vector < uint8 > wav_data = { <nl> + ' R ' , ' I ' , ' F ' , ' F ' , / / ChunkID <nl> + 60 , 0 , 0 , 0 , / / ChunkSize : 36 + SubChunk2Size <nl> + ' W ' , ' A ' , ' V ' , ' E ' , / / Format <nl> + ' f ' , ' m ' , ' t ' , ' ' , / / Subchunk1ID <nl> + 16 , 0 , 0 , 0 , / / Subchunk1Size <nl> + 1 , 0 , / / AudioFormat : 1 = PCM <nl> + 1 , 0 , / / NumChannels <nl> + 0x44 , 0xac , 0 , 0 , / / SampleRate : 44100 <nl> + 0x88 , 0x58 , 0x1 , 0 , / / BytesPerSecond : SampleRate * NumChannels * <nl> + / / BitsPerSample / 8 <nl> + 2 , 0 , / / BytesPerSample : NumChannels * BitsPerSample / 8 <nl> + 16 , 0 , / / BitsPerSample <nl> + ' d ' , ' a ' , ' t ' , ' a ' , / / Subchunk2ID <nl> + 8 , 0 , 0 , 0 , / / Subchunk2Size : NumSamples * NumChannels * <nl> + / / BitsPerSample / 8 <nl> + 0 , 0 , / / Sample 1 : 0 <nl> + 0xff , 0x7f , / / Sample 2 : 32767 ( saturated ) <nl> + 0 , 0 , / / Sample 3 : 0 <nl> + 0x00 , 0x80 , / / Sample 4 : - 32768 ( saturated ) <nl> + ' f ' , ' o ' , ' o ' , ' o ' , / / Subchunk2ID <nl> + 0xff , 0xff , 0xff , 0xf8 , / / Chunk size that could cause an infinite loop . <nl> + 0 , 0 , / / Sample 1 : 0 <nl> + 0xff , 0x7f , / / Sample 2 : 32767 ( saturated ) <nl> + 0 , 0 , / / Sample 3 : 0 <nl> + 0x00 , 0x80 , / / Sample 4 : - 32768 ( saturated ) <nl> + } ; <nl> + string wav_data_string ( wav_data . begin ( ) , wav_data . end ( ) ) ; <nl> + std : : vector < float > decoded_audio ; <nl> + uint32 decoded_sample_count ; <nl> + uint16 decoded_channel_count ; <nl> + uint32 decoded_sample_rate ; <nl> + Status decode_status = DecodeLin16WaveAsFloatVector ( <nl> + wav_data_string , & decoded_audio , & decoded_sample_count , <nl> + & decoded_channel_count , & decoded_sample_rate ) ; <nl> + EXPECT_FALSE ( decode_status . ok ( ) ) ; <nl> + EXPECT_TRUE ( StringPiece ( decode_status . error_message ( ) ) . contains ( " too large " ) ) <nl> + < < decode_status . error_message ( ) ; <nl> + } <nl> + <nl> + TEST ( WavIO , IncrementOffset ) { <nl> + int new_offset = - 1 ; <nl> + TF_EXPECT_OK ( IncrementOffset ( 0 , 10 , 20 , & new_offset ) ) ; <nl> + EXPECT_EQ ( 10 , new_offset ) ; <nl> + <nl> + new_offset = - 1 ; <nl> + TF_EXPECT_OK ( IncrementOffset ( 10 , 4 , 20 , & new_offset ) ) ; <nl> + EXPECT_EQ ( 14 , new_offset ) ; <nl> + <nl> + new_offset = - 1 ; <nl> + TF_EXPECT_OK ( IncrementOffset ( 99 , 1 , 100 , & new_offset ) ) ; <nl> + EXPECT_EQ ( 100 , new_offset ) ; <nl> + <nl> + new_offset = - 1 ; <nl> + EXPECT_FALSE ( IncrementOffset ( - 1 , 1 , 100 , & new_offset ) . ok ( ) ) ; <nl> + <nl> + new_offset = - 1 ; <nl> + EXPECT_FALSE ( IncrementOffset ( 0 , - 1 , 100 , & new_offset ) . ok ( ) ) ; <nl> + <nl> + new_offset = - 1 ; <nl> + EXPECT_FALSE ( IncrementOffset ( std : : numeric_limits < int > : : max ( ) , 1 , <nl> + std : : numeric_limits < int > : : max ( ) , & new_offset ) <nl> + . ok ( ) ) ; <nl> + <nl> + new_offset = - 1 ; <nl> + EXPECT_FALSE ( IncrementOffset ( 101 , 1 , 100 , & new_offset ) . ok ( ) ) ; <nl> + } <nl> + <nl> + TEST ( WavIO , ExpectText ) { <nl> + std : : vector < uint8 > test_data = { <nl> + ' E ' , ' x ' , ' p ' , ' e ' , ' c ' , ' t ' , ' e ' , ' d ' , <nl> + } ; <nl> + string test_string ( test_data . begin ( ) , test_data . end ( ) ) ; <nl> + <nl> + int offset = 0 ; <nl> + TF_EXPECT_OK ( ExpectText ( test_string , " Expected " , & offset ) ) ; <nl> + EXPECT_EQ ( 8 , offset ) ; <nl> + <nl> + offset = 0 ; <nl> + Status expect_status = ExpectText ( test_string , " Unexpected " , & offset ) ; <nl> + EXPECT_FALSE ( expect_status . ok ( ) ) ; <nl> + <nl> + offset = 0 ; <nl> + TF_EXPECT_OK ( ExpectText ( test_string , " Exp " , & offset ) ) ; <nl> + EXPECT_EQ ( 3 , offset ) ; <nl> + TF_EXPECT_OK ( ExpectText ( test_string , " ected " , & offset ) ) ; <nl> + EXPECT_EQ ( 8 , offset ) ; <nl> + expect_status = ExpectText ( test_string , " foo " , & offset ) ; <nl> + EXPECT_FALSE ( expect_status . ok ( ) ) ; <nl> + } <nl> + <nl> + TEST ( WavIO , ReadString ) { <nl> + std : : vector < uint8 > test_data = { <nl> + ' E ' , ' x ' , ' p ' , ' e ' , ' c ' , ' t ' , ' e ' , ' d ' , <nl> + } ; <nl> + string test_string ( test_data . begin ( ) , test_data . end ( ) ) ; <nl> + <nl> + int offset = 0 ; <nl> + string read_value ; <nl> + TF_EXPECT_OK ( ReadString ( test_string , 2 , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( " Ex " , read_value ) ; <nl> + EXPECT_EQ ( 2 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadString ( test_string , 6 , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( " pected " , read_value ) ; <nl> + EXPECT_EQ ( 8 , offset ) ; <nl> + <nl> + Status read_status = ReadString ( test_string , 3 , & read_value , & offset ) ; <nl> + EXPECT_FALSE ( read_status . ok ( ) ) ; <nl> + } <nl> + <nl> + TEST ( WavIO , ReadValueInt8 ) { <nl> + std : : vector < uint8 > test_data = { 0x00 , 0x05 , 0xff , 0x80 } ; <nl> + string test_string ( test_data . begin ( ) , test_data . end ( ) ) ; <nl> + <nl> + int offset = 0 ; <nl> + int8 read_value ; <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 0 , read_value ) ; <nl> + EXPECT_EQ ( 1 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 5 , read_value ) ; <nl> + EXPECT_EQ ( 2 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( - 1 , read_value ) ; <nl> + EXPECT_EQ ( 3 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( - 128 , read_value ) ; <nl> + EXPECT_EQ ( 4 , offset ) ; <nl> + <nl> + Status read_status = ReadValue ( test_string , & read_value , & offset ) ; <nl> + EXPECT_FALSE ( read_status . ok ( ) ) ; <nl> + } <nl> + <nl> + TEST ( WavIO , ReadValueUInt8 ) { <nl> + std : : vector < uint8 > test_data = { 0x00 , 0x05 , 0xff , 0x80 } ; <nl> + string test_string ( test_data . begin ( ) , test_data . end ( ) ) ; <nl> + <nl> + int offset = 0 ; <nl> + uint8 read_value ; <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 0 , read_value ) ; <nl> + EXPECT_EQ ( 1 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 5 , read_value ) ; <nl> + EXPECT_EQ ( 2 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 255 , read_value ) ; <nl> + EXPECT_EQ ( 3 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 128 , read_value ) ; <nl> + EXPECT_EQ ( 4 , offset ) ; <nl> + <nl> + Status read_status = ReadValue ( test_string , & read_value , & offset ) ; <nl> + EXPECT_FALSE ( read_status . ok ( ) ) ; <nl> + } <nl> + <nl> + TEST ( WavIO , ReadValueInt16 ) { <nl> + std : : vector < uint8 > test_data = { <nl> + 0x00 , 0x00 , / / 0 <nl> + 0xff , 0x00 , / / 255 <nl> + 0x00 , 0x01 , / / 256 <nl> + 0xff , 0xff , / / - 1 <nl> + 0x00 , 0x80 , / / - 32768 <nl> + } ; <nl> + string test_string ( test_data . begin ( ) , test_data . end ( ) ) ; <nl> + <nl> + int offset = 0 ; <nl> + int16 read_value ; <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 0 , read_value ) ; <nl> + EXPECT_EQ ( 2 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 255 , read_value ) ; <nl> + EXPECT_EQ ( 4 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 256 , read_value ) ; <nl> + EXPECT_EQ ( 6 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( - 1 , read_value ) ; <nl> + EXPECT_EQ ( 8 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( - 32768 , read_value ) ; <nl> + EXPECT_EQ ( 10 , offset ) ; <nl> + <nl> + Status read_status = ReadValue ( test_string , & read_value , & offset ) ; <nl> + EXPECT_FALSE ( read_status . ok ( ) ) ; <nl> + } <nl> + <nl> + TEST ( WavIO , ReadValueUInt16 ) { <nl> + std : : vector < uint8 > test_data = { <nl> + 0x00 , 0x00 , / / 0 <nl> + 0xff , 0x00 , / / 255 <nl> + 0x00 , 0x01 , / / 256 <nl> + 0xff , 0xff , / / 65535 <nl> + 0x00 , 0x80 , / / 32768 <nl> + } ; <nl> + string test_string ( test_data . begin ( ) , test_data . end ( ) ) ; <nl> + <nl> + int offset = 0 ; <nl> + uint16 read_value ; <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 0 , read_value ) ; <nl> + EXPECT_EQ ( 2 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 255 , read_value ) ; <nl> + EXPECT_EQ ( 4 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 256 , read_value ) ; <nl> + EXPECT_EQ ( 6 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 65535 , read_value ) ; <nl> + EXPECT_EQ ( 8 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 32768 , read_value ) ; <nl> + EXPECT_EQ ( 10 , offset ) ; <nl> + <nl> + Status read_status = ReadValue ( test_string , & read_value , & offset ) ; <nl> + EXPECT_FALSE ( read_status . ok ( ) ) ; <nl> + } <nl> + <nl> + TEST ( WavIO , ReadValueInt32 ) { <nl> + std : : vector < uint8 > test_data = { <nl> + 0x00 , 0x00 , 0x00 , 0x00 , / / 0 <nl> + 0xff , 0x00 , 0x00 , 0x00 , / / 255 <nl> + 0x00 , 0xff , 0x00 , 0x00 , / / 65280 <nl> + 0x00 , 0x00 , 0xff , 0x00 , / / 16 , 711 , 680 <nl> + 0xff , 0xff , 0xff , 0xff , / / - 1 <nl> + } ; <nl> + string test_string ( test_data . begin ( ) , test_data . end ( ) ) ; <nl> + <nl> + int offset = 0 ; <nl> + int32 read_value ; <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 0 , read_value ) ; <nl> + EXPECT_EQ ( 4 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 255 , read_value ) ; <nl> + EXPECT_EQ ( 8 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 65280 , read_value ) ; <nl> + EXPECT_EQ ( 12 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 16711680 , read_value ) ; <nl> + EXPECT_EQ ( 16 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( - 1 , read_value ) ; <nl> + EXPECT_EQ ( 20 , offset ) ; <nl> + <nl> + Status read_status = ReadValue ( test_string , & read_value , & offset ) ; <nl> + EXPECT_FALSE ( read_status . ok ( ) ) ; <nl> + } <nl> + <nl> + TEST ( WavIO , ReadValueUInt32 ) { <nl> + std : : vector < uint8 > test_data = { <nl> + 0x00 , 0x00 , 0x00 , 0x00 , / / 0 <nl> + 0xff , 0x00 , 0x00 , 0x00 , / / 255 <nl> + 0x00 , 0xff , 0x00 , 0x00 , / / 65280 <nl> + 0x00 , 0x00 , 0xff , 0x00 , / / 16 , 711 , 680 <nl> + 0xff , 0xff , 0xff , 0xff , / / 4 , 294 , 967 , 295 <nl> + } ; <nl> + string test_string ( test_data . begin ( ) , test_data . end ( ) ) ; <nl> + <nl> + int offset = 0 ; <nl> + uint32 read_value ; <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 0 , read_value ) ; <nl> + EXPECT_EQ ( 4 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 255 , read_value ) ; <nl> + EXPECT_EQ ( 8 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 65280 , read_value ) ; <nl> + EXPECT_EQ ( 12 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 16711680 , read_value ) ; <nl> + EXPECT_EQ ( 16 , offset ) ; <nl> + <nl> + TF_EXPECT_OK ( ReadValue ( test_string , & read_value , & offset ) ) ; <nl> + EXPECT_EQ ( 4294967295 , read_value ) ; <nl> + EXPECT_EQ ( 20 , offset ) ; <nl> + <nl> + Status read_status = ReadValue ( test_string , & read_value , & offset ) ; <nl> + EXPECT_FALSE ( read_status . ok ( ) ) ; <nl> + } <nl> + <nl> } / / namespace wav <nl> } / / namespace tensorflow <nl> | Check for very large chunk sizes in WAV decoding | tensorflow/tensorflow | f9dc34df6d56d2bcb67b563ade81a3f12bbcacd2 | 2018-03-15T21:55:27Z |
mmm a / libraries / chainbase <nl> ppp b / libraries / chainbase <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 4724baf2095cdc1bb1722254874b51070adf0e74 <nl> + Subproject commit 8ca96ad6b18709d65a7d1f67f8893978f25babcf <nl> | Merge pull request from EOSIO / chainbase_unused_locking_vars | EOSIO/eos | ba6a5aa36db4df61e79c95d19b4ad65a54a2d189 | 2018-10-03T17:14:03Z |
mmm a / tensorflow / c / experimental / filesystem / plugins / gcs / BUILD <nl> ppp b / tensorflow / c / experimental / filesystem / plugins / gcs / BUILD <nl> cc_library ( <nl> " / / tensorflow / c : tf_status " , <nl> " / / tensorflow / c / experimental / filesystem : filesystem_interface " , <nl> " @ com_github_googlecloudplatform_google_cloud_cpp / / : storage_client " , <nl> - " @ com_google_absl / / absl / strings " , <nl> ] , <nl> ) <nl> <nl> tf_cc_test ( <nl> " gcs_filesystem . cc " , <nl> " gcs_filesystem_test . cc " , <nl> ] , <nl> - local_defines = [ " TF_GCS_FILESYSTEM_TEST " ] , <nl> tags = [ <nl> " manual " , <nl> " notap " , <nl> mmm a / tensorflow / c / experimental / filesystem / plugins / gcs / gcs_filesystem . cc <nl> ppp b / tensorflow / c / experimental / filesystem / plugins / gcs / gcs_filesystem . cc <nl> static inline void TF_SetStatusFromGCSStatus ( <nl> static void * plugin_memory_allocate ( size_t size ) { return calloc ( 1 , size ) ; } <nl> static void plugin_memory_free ( void * ptr ) { free ( ptr ) ; } <nl> <nl> - void ParseGCSPath ( absl : : string_view fname , bool object_empty_ok , char * * bucket , <nl> - char * * object , TF_Status * status ) { <nl> + void ParseGCSPath ( const std : : string & fname , bool object_empty_ok , <nl> + std : : string & bucket , std : : string & object , TF_Status * status ) { <nl> size_t scheme_end = fname . find ( " : / / " ) + 2 ; <nl> if ( fname . substr ( 0 , scheme_end + 1 ) ! = " gs : / / " ) { <nl> TF_SetStatus ( status , TF_INVALID_ARGUMENT , <nl> void ParseGCSPath ( absl : : string_view fname , bool object_empty_ok , char * * bucket , <nl> } <nl> <nl> size_t bucket_end = fname . find ( " / " , scheme_end + 1 ) ; <nl> - if ( bucket_end = = absl : : string_view : : npos ) { <nl> + if ( bucket_end = = std : : string : : npos ) { <nl> TF_SetStatus ( status , TF_INVALID_ARGUMENT , <nl> " GCS path doesn ' t contain a bucket name . " ) ; <nl> return ; <nl> } <nl> - absl : : string_view bucket_view = <nl> - fname . substr ( scheme_end + 1 , bucket_end - scheme_end - 1 ) ; <nl> - * bucket = <nl> - static_cast < char * > ( plugin_memory_allocate ( bucket_view . length ( ) + 1 ) ) ; <nl> - memcpy ( * bucket , bucket_view . data ( ) , bucket_view . length ( ) ) ; <nl> - ( * bucket ) [ bucket_view . length ( ) ] = ' \ 0 ' ; <nl> - <nl> - absl : : string_view object_view = fname . substr ( bucket_end + 1 ) ; <nl> - if ( object_view . empty ( ) ) { <nl> - if ( object_empty_ok ) { <nl> - * object = nullptr ; <nl> - return ; <nl> - } else { <nl> - TF_SetStatus ( status , TF_INVALID_ARGUMENT , <nl> - " GCS path doesn ' t contain an object name . " ) ; <nl> - return ; <nl> - } <nl> + bucket = std : : move ( fname . substr ( scheme_end + 1 , bucket_end - scheme_end - 1 ) ) ; <nl> + <nl> + object = std : : move ( fname . substr ( bucket_end + 1 ) ) ; <nl> + if ( object . empty ( ) & & ! object_empty_ok ) { <nl> + TF_SetStatus ( status , TF_INVALID_ARGUMENT , <nl> + " GCS path doesn ' t contain an object name . " ) ; <nl> } <nl> - * object = <nl> - static_cast < char * > ( plugin_memory_allocate ( object_view . length ( ) + 1 ) ) ; <nl> - / / object_view . data ( ) is a null - terminated string_view because fname is . <nl> - strcpy ( * object , object_view . data ( ) ) ; <nl> } <nl> <nl> / / SECTION 1 . Implementation for ` TF_RandomAccessFile ` <nl> namespace tf_random_access_file { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> namespace tf_writable_file { <nl> typedef struct GCSFile { <nl> - const char * bucket ; <nl> - const char * object ; <nl> + const std : : string bucket ; <nl> + const std : : string object ; <nl> gcs : : Client * gcs_client ; / / not owned <nl> TempFile outfile ; <nl> bool sync_need ; <nl> typedef struct GCSFile { <nl> <nl> static void Cleanup ( TF_WritableFile * file ) { <nl> auto gcs_file = static_cast < GCSFile * > ( file - > plugin_file ) ; <nl> - plugin_memory_free ( const_cast < char * > ( gcs_file - > bucket ) ) ; <nl> - plugin_memory_free ( const_cast < char * > ( gcs_file - > object ) ) ; <nl> delete gcs_file ; <nl> } <nl> <nl> void Cleanup ( TF_Filesystem * filesystem ) { <nl> <nl> void NewWritableFile ( const TF_Filesystem * filesystem , const char * path , <nl> TF_WritableFile * file , TF_Status * status ) { <nl> - char * bucket ; <nl> - char * object ; <nl> - ParseGCSPath ( path , false , & bucket , & object , status ) ; <nl> + std : : string bucket , object ; <nl> + ParseGCSPath ( path , false , bucket , object , status ) ; <nl> if ( TF_GetCode ( status ) ! = TF_OK ) return ; <nl> <nl> auto gcs_client = static_cast < gcs : : Client * > ( filesystem - > plugin_filesystem ) ; <nl> char * temp_file_name = TF_GetTempFileName ( " " ) ; <nl> file - > plugin_file = new tf_writable_file : : GCSFile ( <nl> - { bucket , object , gcs_client , <nl> + { std : : move ( bucket ) , std : : move ( object ) , gcs_client , <nl> TempFile ( temp_file_name , std : : ios : : binary | std : : ios : : out ) , true } ) ; <nl> / / We are responsible for freeing the pointer returned by TF_GetTempFileName <nl> free ( temp_file_name ) ; <nl> void NewWritableFile ( const TF_Filesystem * filesystem , const char * path , <nl> <nl> void NewAppendableFile ( const TF_Filesystem * filesystem , const char * path , <nl> TF_WritableFile * file , TF_Status * status ) { <nl> - char * bucket ; <nl> - char * object ; <nl> - ParseGCSPath ( path , false , & bucket , & object , status ) ; <nl> + std : : string bucket , object ; <nl> + ParseGCSPath ( path , false , bucket , object , status ) ; <nl> if ( TF_GetCode ( status ) ! = TF_OK ) return ; <nl> <nl> auto gcs_client = static_cast < gcs : : Client * > ( filesystem - > plugin_filesystem ) ; <nl> void NewAppendableFile ( const TF_Filesystem * filesystem , const char * path , <nl> / / If this file does not exist on server , we will need to sync it . <nl> bool sync_need = ( status_code = = TF_NOT_FOUND ) ; <nl> file - > plugin_file = new tf_writable_file : : GCSFile ( <nl> - { bucket , object , gcs_client , <nl> + { std : : move ( bucket ) , std : : move ( object ) , gcs_client , <nl> TempFile ( temp_file_name , std : : ios : : binary | std : : ios : : app ) , sync_need } ) ; <nl> free ( temp_file_name ) ; <nl> TF_SetStatus ( status , TF_OK , " " ) ; <nl> mmm a / tensorflow / c / experimental / filesystem / plugins / gcs / gcs_filesystem . h <nl> ppp b / tensorflow / c / experimental / filesystem / plugins / gcs / gcs_filesystem . h <nl> <nl> # ifndef TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_PLUGINS_GCS_GCS_FILESYSTEM_H_ <nl> # define TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_PLUGINS_GCS_GCS_FILESYSTEM_H_ <nl> <nl> - # include " absl / strings / string_view . h " <nl> # include " google / cloud / storage / client . h " <nl> # include " tensorflow / c / experimental / filesystem / filesystem_interface . h " <nl> # include " tensorflow / c / tf_status . h " <nl> <nl> - void ParseGCSPath ( absl : : string_view fname , bool object_empty_ok , char * * bucket , <nl> - char * * object , TF_Status * status ) ; <nl> + void ParseGCSPath ( const std : : string & fname , bool object_empty_ok , <nl> + std : : string & bucket , std : : string & object , TF_Status * status ) ; <nl> <nl> namespace tf_gcs_filesystem { <nl> void Init ( TF_Filesystem * filesystem , TF_Status * status ) ; <nl> mmm a / tensorflow / c / experimental / filesystem / plugins / gcs / gcs_filesystem_test . cc <nl> ppp b / tensorflow / c / experimental / filesystem / plugins / gcs / gcs_filesystem_test . cc <nl> class GCSFilesystemTest : public : : testing : : Test { <nl> TF_Status * status_ ; <nl> } ; <nl> <nl> - / / We have to add this test here because there must be at least one test . <nl> - / / This test will be removed in the future . <nl> - TEST_F ( GCSFilesystemTest , TestInit ) { ASSERT_TF_OK ( status_ ) ; } <nl> + TEST_F ( GCSFilesystemTest , ParseGCSPath ) { <nl> + std : : string bucket , object ; <nl> + ParseGCSPath ( " gs : / / bucket / path / to / object " , false , bucket , object , status_ ) ; <nl> + ASSERT_TF_OK ( status_ ) ; <nl> + ASSERT_EQ ( bucket , " bucket " ) ; <nl> + ASSERT_EQ ( object , " path / to / object " ) ; <nl> + <nl> + ParseGCSPath ( " gs : / / bucket / " , true , bucket , object , status_ ) ; <nl> + ASSERT_TF_OK ( status_ ) ; <nl> + ASSERT_EQ ( bucket , " bucket " ) ; <nl> + <nl> + ParseGCSPath ( " bucket / path / to / object " , false , bucket , object , status_ ) ; <nl> + ASSERT_EQ ( TF_GetCode ( status_ ) , TF_INVALID_ARGUMENT ) ; <nl> + <nl> + / / bucket name must end with " / " <nl> + ParseGCSPath ( " gs : / / bucket " , true , bucket , object , status_ ) ; <nl> + ASSERT_EQ ( TF_GetCode ( status_ ) , TF_INVALID_ARGUMENT ) ; <nl> + <nl> + ParseGCSPath ( " gs : / / bucket / " , false , bucket , object , status_ ) ; <nl> + ASSERT_EQ ( TF_GetCode ( status_ ) , TF_INVALID_ARGUMENT ) ; <nl> + } <nl> <nl> } / / namespace <nl> } / / namespace tensorflow <nl> | use string instead of const char * gcs | tensorflow/tensorflow | c7482df39b5c38538fe02d5371db308629eb827f | 2020-06-24T22:06:16Z |
mmm a / modules / core / include / opencv2 / core / operations . hpp <nl> ppp b / modules / core / include / opencv2 / core / operations . hpp <nl> inline RotatedRect : : operator CvBox2D ( ) const <nl> CvBox2D box ; box . center = center ; box . size = size ; box . angle = angle ; <nl> return box ; <nl> } <nl> - inline void RotatedRect : : points ( Point2f pt [ ] ) const <nl> - { <nl> - double _angle = angle * CV_PI / 180 . ; <nl> - float a = ( float ) cos ( _angle ) * 0 . 5f ; <nl> - float b = ( float ) sin ( _angle ) * 0 . 5f ; <nl> - <nl> - pt [ 0 ] . x = center . x - a * size . height - b * size . width ; <nl> - pt [ 0 ] . y = center . y + b * size . height - a * size . width ; <nl> - pt [ 1 ] . x = center . x + a * size . height - b * size . width ; <nl> - pt [ 1 ] . y = center . y - b * size . height - a * size . width ; <nl> - pt [ 2 ] . x = 2 * center . x - pt [ 0 ] . x ; <nl> - pt [ 2 ] . y = 2 * center . y - pt [ 0 ] . y ; <nl> - pt [ 3 ] . x = 2 * center . x - pt [ 1 ] . x ; <nl> - pt [ 3 ] . y = 2 * center . y - pt [ 1 ] . y ; <nl> - } <nl> - <nl> - inline Rect RotatedRect : : boundingRect ( ) const <nl> - { <nl> - Point2f pt [ 4 ] ; <nl> - points ( pt ) ; <nl> - Rect r ( cvFloor ( min ( min ( min ( pt [ 0 ] . x , pt [ 1 ] . x ) , pt [ 2 ] . x ) , pt [ 3 ] . x ) ) , <nl> - cvFloor ( min ( min ( min ( pt [ 0 ] . y , pt [ 1 ] . y ) , pt [ 2 ] . y ) , pt [ 3 ] . y ) ) , <nl> - cvCeil ( max ( max ( max ( pt [ 0 ] . x , pt [ 1 ] . x ) , pt [ 2 ] . x ) , pt [ 3 ] . x ) ) , <nl> - cvCeil ( max ( max ( max ( pt [ 0 ] . y , pt [ 1 ] . y ) , pt [ 2 ] . y ) , pt [ 3 ] . y ) ) ) ; <nl> - r . width - = r . x - 1 ; <nl> - r . height - = r . y - 1 ; <nl> - return r ; <nl> - } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / Scalar_ / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> mmm a / modules / core / src / drawing . cpp <nl> ppp b / modules / core / src / drawing . cpp <nl> void ellipse2Poly ( Point center , Size axes , int angle , <nl> y = size_b * SinTable [ angle ] ; <nl> Point pt ; <nl> pt . x = cvRound ( cx + x * alpha - y * beta ) ; <nl> - pt . y = cvRound ( cy - x * beta - y * alpha ) ; <nl> + pt . y = cvRound ( cy + x * beta + y * alpha ) ; <nl> if ( pt ! = prevPt ) <nl> pts . push_back ( pt ) ; <nl> } <nl> mmm a / modules / core / src / matrix . cpp <nl> ppp b / modules / core / src / matrix . cpp <nl> void normalize ( const SparseMat & src , SparseMat & dst , double a , int norm_type ) <nl> <nl> src . convertTo ( dst , - 1 , scale ) ; <nl> } <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / RotatedRect / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + void RotatedRect : : points ( Point2f pt [ ] ) const <nl> + { <nl> + double _angle = angle * CV_PI / 180 . ; <nl> + float b = ( float ) cos ( _angle ) * 0 . 5f ; <nl> + float a = ( float ) sin ( _angle ) * 0 . 5f ; <nl> + <nl> + pt [ 0 ] . x = center . x - a * size . height - b * size . width ; <nl> + pt [ 0 ] . y = center . y + b * size . height - a * size . width ; <nl> + pt [ 1 ] . x = center . x + a * size . height - b * size . width ; <nl> + pt [ 1 ] . y = center . y - b * size . height - a * size . width ; <nl> + pt [ 2 ] . x = 2 * center . x - pt [ 0 ] . x ; <nl> + pt [ 2 ] . y = 2 * center . y - pt [ 0 ] . y ; <nl> + pt [ 3 ] . x = 2 * center . x - pt [ 1 ] . x ; <nl> + pt [ 3 ] . y = 2 * center . y - pt [ 1 ] . y ; <nl> + } <nl> + <nl> + inline Rect RotatedRect : : boundingRect ( ) const <nl> + { <nl> + Point2f pt [ 4 ] ; <nl> + points ( pt ) ; <nl> + Rect r ( cvFloor ( min ( min ( min ( pt [ 0 ] . x , pt [ 1 ] . x ) , pt [ 2 ] . x ) , pt [ 3 ] . x ) ) , <nl> + cvFloor ( min ( min ( min ( pt [ 0 ] . y , pt [ 1 ] . y ) , pt [ 2 ] . y ) , pt [ 3 ] . y ) ) , <nl> + cvCeil ( max ( max ( max ( pt [ 0 ] . x , pt [ 1 ] . x ) , pt [ 2 ] . x ) , pt [ 3 ] . x ) ) , <nl> + cvCeil ( max ( max ( max ( pt [ 0 ] . y , pt [ 1 ] . y ) , pt [ 2 ] . y ) , pt [ 3 ] . y ) ) ) ; <nl> + r . width - = r . x - 1 ; <nl> + r . height - = r . y - 1 ; <nl> + return r ; <nl> + } <nl> <nl> } <nl> <nl> mmm a / modules / imgproc / src / rotcalipers . cpp <nl> ppp b / modules / imgproc / src / rotcalipers . cpp <nl> cvMinAreaRect2 ( const CvArr * array , CvMemStorage * storage ) <nl> icvRotatingCalipers ( points , n , CV_CALIPERS_MINAREARECT , ( float * ) out ) ; <nl> box . center . x = out [ 0 ] . x + ( out [ 1 ] . x + out [ 2 ] . x ) * 0 . 5f ; <nl> box . center . y = out [ 0 ] . y + ( out [ 1 ] . y + out [ 2 ] . y ) * 0 . 5f ; <nl> - box . size . height = ( float ) sqrt ( ( double ) out [ 1 ] . x * out [ 1 ] . x + ( double ) out [ 1 ] . y * out [ 1 ] . y ) ; <nl> - box . size . width = ( float ) sqrt ( ( double ) out [ 2 ] . x * out [ 2 ] . x + ( double ) out [ 2 ] . y * out [ 2 ] . y ) ; <nl> - box . angle = ( float ) atan2 ( - ( double ) out [ 1 ] . y , ( double ) out [ 1 ] . x ) ; <nl> + box . size . width = ( float ) sqrt ( ( double ) out [ 1 ] . x * out [ 1 ] . x + ( double ) out [ 1 ] . y * out [ 1 ] . y ) ; <nl> + box . size . height = ( float ) sqrt ( ( double ) out [ 2 ] . x * out [ 2 ] . x + ( double ) out [ 2 ] . y * out [ 2 ] . y ) ; <nl> + box . angle = ( float ) atan2 ( ( double ) out [ 1 ] . y , ( double ) out [ 1 ] . x ) ; <nl> } <nl> else if ( n = = 2 ) <nl> { <nl> cvMinAreaRect2 ( const CvArr * array , CvMemStorage * storage ) <nl> box . center . y = ( points [ 0 ] . y + points [ 1 ] . y ) * 0 . 5f ; <nl> double dx = points [ 1 ] . x - points [ 0 ] . x ; <nl> double dy = points [ 1 ] . y - points [ 0 ] . y ; <nl> - box . size . height = ( float ) sqrt ( dx * dx + dy * dy ) ; <nl> - box . size . width = 0 ; <nl> - box . angle = ( float ) atan2 ( - dy , dx ) ; <nl> + box . size . width = ( float ) sqrt ( dx * dx + dy * dy ) ; <nl> + box . size . height = 0 ; <nl> + box . angle = ( float ) atan2 ( dy , dx ) ; <nl> } <nl> else <nl> { <nl> mmm a / modules / video / src / camshift . cpp <nl> ppp b / modules / video / src / camshift . cpp <nl> cvCamShift ( const void * imgProb , CvRect windowIn , <nl> { <nl> box - > size . height = ( float ) length ; <nl> box - > size . width = ( float ) width ; <nl> - box - > angle = ( float ) ( theta * 180 . / CV_PI ) ; <nl> + box - > angle = ( float ) ( ( CV_PI * 0 . 5 + theta ) * 180 . / CV_PI ) ; <nl> + while ( box - > angle < 0 ) <nl> + box - > angle + = 360 ; <nl> + while ( box - > angle > = 360 ) <nl> + box - > angle - = 360 ; <nl> + if ( box - > angle > = 180 ) <nl> + box - > angle - = 180 ; <nl> box - > center = cvPoint2D32f ( comp . rect . x + comp . rect . width * 0 . 5f , <nl> comp . rect . y + comp . rect . height * 0 . 5f ) ; <nl> } <nl> mmm a / samples / cpp / camshiftdemo . cpp <nl> ppp b / samples / cpp / camshiftdemo . cpp <nl> int main ( int argc , char * * argv ) <nl> backproj & = mask ; <nl> RotatedRect trackBox = CamShift ( backproj , trackWindow , <nl> TermCriteria ( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER , 10 , 1 ) ) ; <nl> - trackBox . angle = 90 - trackBox . angle ; <nl> <nl> if ( backprojMode ) <nl> cvtColor ( backproj , image , CV_GRAY2BGR ) ; <nl> mmm a / samples / cpp / fitellipse . cpp <nl> ppp b / samples / cpp / fitellipse . cpp <nl> void processImage ( int h , void * ) <nl> Mat ( contours [ i ] ) . convertTo ( pointsf , CV_32F ) ; <nl> RotatedRect box = fitEllipse ( pointsf ) ; <nl> <nl> - box . angle = - box . angle ; <nl> if ( MAX ( box . size . width , box . size . height ) > MIN ( box . size . width , box . size . height ) * 30 ) <nl> continue ; <nl> drawContours ( cimage , contours , ( int ) i , Scalar : : all ( 255 ) , 1 , 8 ) ; <nl> <nl> ellipse ( cimage , box , Scalar ( 0 , 0 , 255 ) , 1 , CV_AA ) ; <nl> + ellipse ( cimage , box . center , box . size * 0 . 5f , box . angle , 0 , 360 , Scalar ( 0 , 255 , 255 ) , 1 , CV_AA ) ; <nl> + Point2f vtx [ 4 ] ; <nl> + box . points ( vtx ) ; <nl> + for ( int j = 0 ; j < 4 ; j + + ) <nl> + line ( cimage , vtx [ j ] , vtx [ ( j + 1 ) % 4 ] , Scalar ( 0 , 255 , 0 ) , 1 , CV_AA ) ; <nl> } <nl> <nl> imshow ( " result " , cimage ) ; <nl> mmm a / tests / cv / src / acamshift . cpp <nl> ppp b / tests / cv / src / acamshift . cpp <nl> void CV_TrackBaseTest : : generate_object ( ) <nl> double width = box0 . size . width * 0 . 5 ; <nl> double height = box0 . size . height * 0 . 5 ; <nl> double angle = box0 . angle * CV_PI / 180 . ; <nl> - double a = cos ( angle ) , b = sin ( angle ) ; <nl> + double a = sin ( angle ) , b = - cos ( angle ) ; <nl> double inv_ww = 1 . / ( width * width ) , inv_hh = 1 . / ( height * height ) ; <nl> <nl> img = cvCreateMat ( img_size . height , img_size . width , img_type ) ; <nl> | unified the coordinate interpretation in RotatedRect ( ticket ) | opencv/opencv | a937d9d43c7bc1345d6d43634a1731ec3d0330fa | 2010-11-29T18:14:08Z |
mmm a / stdlib / private / SwiftPrivateThreadExtras / CMakeLists . txt <nl> ppp b / stdlib / private / SwiftPrivateThreadExtras / CMakeLists . txt <nl> add_swift_target_library ( swiftSwiftPrivateThreadExtras $ { SWIFT_STDLIB_LIBRARY_BU <nl> SWIFT_MODULE_DEPENDS_HAIKU Glibc <nl> SWIFT_MODULE_DEPENDS_WINDOWS MSVCRT WinSDK <nl> SWIFT_COMPILE_FLAGS $ { SWIFT_STANDARD_LIBRARY_SWIFT_FLAGS } <nl> - TARGET_SDKS ALL_APPLE_PLATFORMS CYGWIN FREEBSD HAIKU LINUX WINDOWS <nl> + TARGET_SDKS ALL_APPLE_PLATFORMS CYGWIN FREEBSD HAIKU LINUX WINDOWS ANDROID <nl> INSTALL_IN_COMPONENT stdlib - experimental <nl> DARWIN_INSTALL_NAME_DIR " $ { SWIFT_DARWIN_STDLIB_PRIVATE_INSTALL_NAME_DIR } " ) <nl> <nl> | [ android ] Enable SwiftPrivateThreadExtras to build in Android . | apple/swift | 5e05ce5ff9ad6cb6be7cf73878db4a7b1f122a79 | 2020-02-13T22:15:49Z |
mmm a / ports / jxrlib / CMakeLists . txt <nl> ppp b / ports / jxrlib / CMakeLists . txt <nl> project ( jxrlib C ) <nl> # Need shared libs for ABI <nl> option ( BUILD_SHARED_LIBS " Build shared libraries " ON ) <nl> <nl> + # Add a debug postfix <nl> + set ( CMAKE_DEBUG_POSTFIX " d " ) <nl> + <nl> # helper macro to preserve original Makefile convention <nl> macro ( JXR_MAKE_OBJ SET_NAME ) <nl> foreach ( src $ { SRC_ $ { SET_NAME } } ) <nl> mmm a / ports / jxrlib / portfile . cmake <nl> ppp b / ports / jxrlib / portfile . cmake <nl> vcpkg_extract_source_archive ( $ { ARCHIVE } ) <nl> <nl> file ( COPY $ { CMAKE_CURRENT_LIST_DIR } / CMakeLists . txt DESTINATION $ { SOURCE_PATH } ) <nl> <nl> + # The file guiddef . h is part of the Windows SDK , <nl> + # we then remove the local copy shipped with jxrlib <nl> + file ( REMOVE $ { SOURCE_PATH } / common / include / guiddef . h ) <nl> + <nl> vcpkg_configure_cmake ( <nl> SOURCE_PATH $ { SOURCE_PATH } <nl> OPTIONS - DCMAKE_WINDOWS_EXPORT_ALL_SYMBOLS : BOOL = ON <nl> | Merge pull request from traversaro / fix - jxrlib | microsoft/vcpkg | 407fde814d7a956471e17e1a87a77d6ff3116755 | 2016-11-16T23:30:32Z |
mmm a / jstests / replsets / drop_collections_two_phase_rename_drop_target . js <nl> ppp b / jstests / replsets / drop_collections_two_phase_rename_drop_target . js <nl> try { <nl> <nl> / / Confirm in the logs that the renameCollection dropped the target collection on the <nl> / / secondary using two phase collection drop . <nl> - checkLog . contains ( secondary , ' dropCollection : ' + toColl . getFullName ( ) ) ; <nl> + checkLog . contains ( secondary , new RegExp ( ' dropCollection : . * ' + toColl . getFullName ( ) ) ) ; <nl> <nl> / / Rename target collection back to source collection . This helps to ensure the collection <nl> / / metadata is updated correctly on both primary and secondary . <nl> | SERVER - 46235 Fix drop_collections_two_phase_rename_drop_target . js with JSON logs | mongodb/mongo | 55ad6d9a40389f1b4989ff2320862ad4a77d685c | 2020-02-24T23:57:41Z |
mmm a / cmake / Dependencies . cmake <nl> ppp b / cmake / Dependencies . cmake <nl> if ( BUILD_PYTHON ) <nl> <nl> # These should fill in the rest of the variables , like versions , but resepct <nl> # the variables we set above <nl> - set ( Python_ADDITIONAL_VERSIONS 3 . 7 3 . 6 3 . 5 2 . 8 2 . 7 2 . 6 ) <nl> - find_package ( PythonInterp 2 . 7 ) <nl> - find_package ( PythonLibs 2 . 7 ) <nl> + set ( Python_ADDITIONAL_VERSIONS 3 . 7 3 . 6 3 . 5 ) <nl> + find_package ( PythonInterp 3 . 0 ) <nl> + find_package ( PythonLibs 3 . 0 ) <nl> + <nl> + if ( $ { PYTHONLIBS_VERSION_STRING } VERSION_LESS 3 ) <nl> + message ( FATAL_ERROR <nl> + " Found Python libraries version $ { PYTHONLIBS_VERSION_STRING } . Python 2 has reached end - of - life and is no longer supported by PyTorch . " ) <nl> + endif ( ) <nl> <nl> # When building pytorch , we pass this in directly from setup . py , and <nl> # don ' t want to overwrite it because we trust python more than cmake <nl> | Fail CMake setup if trying to build with Python 2 ( ) | pytorch/pytorch | 83de675ebff8f553a4251a04ea9cef98dd3dff4f | 2020-04-16T17:22:36Z |
mmm a / caffe2 / python / data_parallel_model . py <nl> ppp b / caffe2 / python / data_parallel_model . py <nl> def Parallelize_GPU ( <nl> model_helper_obj , <nl> input_builder_fun , <nl> forward_pass_builder_fun , <nl> - param_update_builder_fun , <nl> + param_update_builder_fun = None , <nl> + optimizer_builder_fun = None , <nl> devices = range ( 0 , workspace . NumCudaDevices ( ) ) , <nl> rendezvous = None , <nl> net_type = ' dag ' , <nl> def Parallelize_GPU ( <nl> param_update_builder_fun : <nl> Function that adds operators that are run after <nl> gradient update , such as updating the weights and <nl> - weight decaying . <nl> + weight decaying . This is called for each GPU separately . <nl> Signature : param_update_builder_fun ( model ) <nl> + optimizer_builder_fun : <nl> + Alternative to param_update_builder_fun , allows one <nl> + to add an optimizer for the whole model . Called only <nl> + once , without name or devicescope . <nl> + <nl> devices : List of GPU ids , such as [ 0 , 1 , 2 , 3 ] , <nl> rendezvous : used for rendezvous in distributed computation , if None <nl> then only one node is used . To create rendezvous , <nl> def Parallelize_GPU ( <nl> num_shards = 1 if rendezvous is None else rendezvous [ ' num_shards ' ] <nl> loss_scale = 1 . 0 / ( len ( devices ) * num_shards ) <nl> <nl> + has_parameter_updates = param_update_builder_fun is not None or \ <nl> + optimizer_builder_fun is not None <nl> + assert not ( <nl> + param_update_builder_fun is not None and <nl> + optimizer_builder_fun is not None <nl> + ) , ' Can only specify one of param_update_builder_fun , optimizer_builder_fun ' <nl> + <nl> for device in devices : <nl> device_opt = core . DeviceOption ( caffe2_pb2 . CUDA , device ) <nl> with core . DeviceScope ( device_opt ) : <nl> def Parallelize_GPU ( <nl> input_builder_fun ( model_helper_obj ) <nl> losses = forward_pass_builder_fun ( model_helper_obj , loss_scale ) <nl> # Losses are not needed for test net <nl> - if param_update_builder_fun is not None : <nl> + if has_parameter_updates : <nl> assert isinstance ( losses , list ) , \ <nl> ' Model builder function must return list of loss blobs ' <nl> for loss in losses : <nl> def Parallelize_GPU ( <nl> model_helper_obj . _device_grouped_blobs . keys ( ) <nl> model_helper_obj . _computed_param_names = computed_params_grouped . keys ( ) <nl> <nl> - if ( param_update_builder_fun is None ) : <nl> + if not has_parameter_updates : <nl> log . info ( " Parameter update function not defined - - > only forward " ) <nl> _InferBlobDevice ( model_helper_obj ) <nl> return <nl> def Parallelize_GPU ( <nl> if rendezvous is not None : <nl> assert num_shards > 1 , \ <nl> " Please use more than one shard for distributed training " <nl> - for device in devices : <nl> - device_opt = core . DeviceOption ( caffe2_pb2 . CUDA , device ) <nl> - with core . DeviceScope ( device_opt ) : <nl> - with core . NameScope ( " gpu_ { } " . format ( device ) ) : <nl> - param_update_builder_fun ( model_helper_obj ) <nl> + <nl> + if param_update_builder_fun is not None : <nl> + for device in devices : <nl> + device_opt = core . DeviceOption ( caffe2_pb2 . CUDA , device ) <nl> + with core . DeviceScope ( device_opt ) : <nl> + with core . NameScope ( " gpu_ { } " . format ( device ) ) : <nl> + param_update_builder_fun ( model_helper_obj ) <nl> + else : <nl> + log . info ( " Calling optimizer builder function " ) <nl> + optimizer_builder_fun ( model_helper_obj ) <nl> <nl> ( sync_blobs , sync_names ) = _ComputeBlobsToSync ( model_helper_obj ) <nl> sync_blobs_grouped = _GroupByDevice ( <nl> def GetCheckpointParams ( model ) : <nl> They are blobs for the first gpu and iteration blobs . <nl> ' ' ' <nl> ( all_blobs , _ ) = _ComputeBlobsToSync ( model ) <nl> - return { <nl> + first_gpu_blobs = { <nl> b for b in all_blobs <nl> if str ( b ) . startswith ( " gpu_ { } / " . format ( model . _devices [ 0 ] ) ) } <nl> <nl> + # Add iteration blobs that do not have namescope separately , since <nl> + # it is important to checkpoint iteration counter <nl> + iteration_blobs = set ( ) <nl> + for op in model . net . Proto ( ) . op : <nl> + if op . type = = ' Iter ' or op . type = = ' AtomicIter ' : <nl> + if not op . output [ 0 ] . startswith ( " gpu_ " ) : <nl> + iteration_blobs . add ( op . output [ 0 ] ) <nl> + <nl> + return first_gpu_blobs . union ( iteration_blobs ) <nl> + <nl> <nl> def FinalizeAfterCheckpoint ( model , blobs = None ) : <nl> ' ' ' <nl> def sumN ( * gpu_indices ) : <nl> sumN ( j * 2 , j * 2 + 1 ) <nl> for j in range ( 2 ) : <nl> sumN ( j * 4 , j * 4 + 2 ) <nl> - sum2 ( 0 , 4 ) <nl> + sumN ( 0 , 4 ) <nl> elif len ( devices ) = = 4 : <nl> sumN ( 0 , 1 ) <nl> sumN ( 2 , 3 ) <nl> mmm a / caffe2 / python / data_parallel_model_test . py <nl> ppp b / caffe2 / python / data_parallel_model_test . py <nl> <nl> import unittest <nl> from caffe2 . proto import caffe2_pb2 <nl> from caffe2 . python import core , workspace , data_parallel_model , cnn , rnn_cell <nl> + from caffe2 . python import optimizer <nl> from caffe2 . python . test_util import TestCase <nl> <nl> + <nl> @ unittest . skipIf ( not workspace . has_gpu_support , " No gpu support . " ) <nl> @ unittest . skipIf ( workspace . NumCudaDevices ( ) < 2 , " Need at least 2 GPUs . " ) <nl> class GPUDataParallelModelTest ( TestCase ) : <nl> def model_build_fun ( model , loss_scale ) : <nl> loss = model . Scale ( loss , scale = loss_scale ) <nl> return [ loss ] <nl> <nl> - def param_update_fun ( model ) : <nl> - ITER = model . Iter ( " ITER " ) <nl> - LR = model . net . LearningRate ( <nl> - [ ITER ] , <nl> - " LR " , <nl> - base_lr = ( - 0 . 1 ) , <nl> - policy = " fixed " , <nl> - ) <nl> - ONE = model . param_init_net . ConstantFill ( <nl> - [ ] , " ONE " , shape = [ 1 ] , value = 1 . 0 , <nl> - ) <nl> - for param in model . GetParams ( ) : <nl> - grad = model . param_to_grad [ param ] <nl> - model . WeightedSum ( [ param , ONE , grad , LR ] , param ) <nl> + def add_optimizer ( model ) : <nl> + optimizer . build_sgd ( model , 0 . 1 , policy = " fixed " ) <nl> <nl> workspace . ResetWorkspace ( ) <nl> model = cnn . CNNModelHelper ( <nl> def param_update_fun ( model ) : <nl> model , <nl> input_builder_fun = input_builder_fun , <nl> forward_pass_builder_fun = model_build_fun , <nl> - param_update_builder_fun = param_update_fun , <nl> + optimizer_builder_fun = add_optimizer , <nl> devices = gpu_devices , <nl> ) <nl> <nl> def add_model_ops ( model , loss_scale ) : <nl> ) <nl> return [ loss ] <nl> <nl> - def add_parameter_update_ops ( model ) : <nl> - model . Iter ( " ITER " ) <nl> - LR = model . param_init_net . ConstantFill ( <nl> - [ ] , ' LR ' , shape = [ 1 ] , value = 0 . 1 <nl> - ) <nl> - for param in model . GetParams ( ) : <nl> - param_grad = model . param_to_grad [ param ] <nl> - param_momentum = model . param_init_net . ConstantFill ( <nl> - [ param ] , param + ' _momentum ' , value = 0 . 0 <nl> - ) <nl> - model . net . MomentumSGDUpdate ( <nl> - [ param_grad , param_momentum , LR , param ] , <nl> - [ param_grad , param_momentum , param ] , <nl> - ) <nl> + def add_optimizer ( model ) : <nl> + optimizer . build_sgd ( model , 0 . 1 , policy = " fixed " , momentum = 0 . 9 ) <nl> <nl> model = cnn . CNNModelHelper ( <nl> order = " NHWC " , <nl> def add_parameter_update_ops ( model ) : <nl> model , <nl> input_builder_fun = add_input_ops , <nl> forward_pass_builder_fun = add_model_ops , <nl> - param_update_builder_fun = add_parameter_update_ops , <nl> + optimizer_builder_fun = add_optimizer , <nl> devices = [ 1 , 2 , 3 ] , <nl> ) <nl> <nl> def add_parameter_update_ops ( model ) : <nl> self . assertTrue ( p + " _momentum " in checkpoint_params ) <nl> for p in model . GetParams ( " gpu_2 / " ) : <nl> self . assertFalse ( p in checkpoint_params ) <nl> + self . assertTrue ( <nl> + core . BlobReference ( " gpu_1 / fc_w_momentum " ) in checkpoint_params ) <nl> for c in model . GetComputedParams ( " gpu_1 / " ) : <nl> self . assertTrue ( c in checkpoint_params ) <nl> for c in model . GetComputedParams ( " gpu_2 / " ) : <nl> self . assertFalse ( c in checkpoint_params ) <nl> self . assertFalse ( core . BlobReference ( " gpu_1 / data " ) in checkpoint_params ) <nl> - self . assertTrue ( core . BlobReference ( " gpu_1 / ITER " ) in checkpoint_params ) <nl> - <nl> + self . assertTrue ( core . BlobReference ( " optimizer_iteration " ) in checkpoint_params ) <nl> <nl> <nl> @ unittest . skipIf ( not workspace . has_gpu_support , " No gpu support . " ) <nl> mmm a / caffe2 / python / examples / resnet50_trainer . py <nl> ppp b / caffe2 / python / examples / resnet50_trainer . py <nl> <nl> import time <nl> import os <nl> <nl> - from caffe2 . python import core , workspace , experiment_util , data_parallel_model , dyndep <nl> + from caffe2 . python import core , workspace , experiment_util , data_parallel_model <nl> + from caffe2 . python import dyndep , optimizer <nl> from caffe2 . python import timeout_guard , model_helper , brew <nl> <nl> import caffe2 . python . models . resnet as resnet <nl> def AddImageInput ( model , reader , batch_size , img_size ) : <nl> data = model . StopGradient ( data , data ) <nl> <nl> <nl> - def AddMomentumParameterUpdate ( train_model , LR ) : <nl> - ' ' ' <nl> - Add the momentum - SGD update . <nl> - ' ' ' <nl> - params = train_model . GetParams ( ) <nl> - assert ( len ( params ) > 0 ) <nl> - <nl> - for param in params : <nl> - param_grad = train_model . param_to_grad [ param ] <nl> - param_momentum = train_model . param_init_net . ConstantFill ( <nl> - [ param ] , param + ' _momentum ' , value = 0 . 0 <nl> - ) <nl> - <nl> - # Update param_grad and param_momentum in place <nl> - train_model . net . MomentumSGDUpdate ( <nl> - [ param_grad , param_momentum , LR , param ] , <nl> - [ param_grad , param_momentum , param ] , <nl> - momentum = 0 . 9 , <nl> - nesterov = 1 , <nl> - ) <nl> - <nl> - <nl> def SaveModel ( args , train_model , epoch ) : <nl> prefix = " gpu_ { } " . format ( train_model . _devices [ 0 ] ) <nl> predictor_export_meta = pred_exp . PredictorExportMeta ( <nl> def create_resnet50_model_ops ( model , loss_scale ) : <nl> brew . accuracy ( model , [ softmax , " label " ] , " accuracy " ) <nl> return [ loss ] <nl> <nl> - # SGD <nl> - def add_parameter_update_ops ( model ) : <nl> - brew . add_weight_decay ( model , args . weight_decay ) <nl> - ITER = brew . iter ( model , " ITER " ) <nl> + def add_optimizer ( model ) : <nl> stepsz = int ( 30 * args . epoch_size / total_batch_size / num_shards ) <nl> - LR = model . net . LearningRate ( <nl> - [ ITER ] , <nl> - " LR " , <nl> - base_lr = args . base_learning_rate , <nl> + optimizer . build_sgd ( <nl> + model , <nl> + args . base_learning_rate , <nl> + momentum = 0 . 9 , <nl> + nesterov = 1 , <nl> policy = " step " , <nl> stepsize = stepsz , <nl> - gamma = 0 . 1 , <nl> + gamma = 0 . 1 <nl> ) <nl> - AddMomentumParameterUpdate ( model , LR ) <nl> <nl> # Input . Note that the reader must be shared with all GPUS . <nl> reader = train_model . CreateDB ( <nl> def add_image_input ( model ) : <nl> train_model , <nl> input_builder_fun = add_image_input , <nl> forward_pass_builder_fun = create_resnet50_model_ops , <nl> - param_update_builder_fun = add_parameter_update_ops , <nl> + optimizer_builder_fun = add_optimizer , <nl> devices = gpus , <nl> rendezvous = rendezvous , <nl> optimize_gradient_memory = True , <nl> mmm a / caffe2 / python / optimizer . py <nl> ppp b / caffe2 / python / optimizer . py <nl> def scale_learning_rate ( self , * args , * * kwargs ) : <nl> <nl> class SgdOptimizer ( Optimizer ) : <nl> def __init__ ( self , base_learning_rate = 0 . 01 , policy = ' fixed ' , <nl> - momentum = 0 . 0 , * * kwargs ) : <nl> + momentum = 0 . 0 , nesterov = 1 , * * kwargs ) : <nl> super ( SgdOptimizer , self ) . __init__ ( ) <nl> self . base_learning_rate = base_learning_rate <nl> self . policy = policy <nl> self . momentum = momentum <nl> + self . nesterov = nesterov <nl> self . init_kwargs = kwargs <nl> <nl> def _run ( self , net , param_init_net , param_info ) : <nl> param = param_info . blob <nl> grad = param_info . grad <nl> - if self . base_learning_rate < = 0 : <nl> + if self . base_learning_rate = = 0 : <nl> return <nl> + assert self . base_learning_rate > 0 <nl> <nl> + # We need negative sign for LR when used directly with WeightedSum <nl> + # below . <nl> + lr_sign = - 1 if self . momentum else 1 <nl> lr , _ = self . build_lr ( <nl> net , param_init_net , <nl> - base_learning_rate = self . base_learning_rate , <nl> + base_learning_rate = self . base_learning_rate * lr_sign , <nl> learning_rate_blob = str ( param ) + " _lr " , <nl> policy = self . policy , <nl> * * ( self . init_kwargs ) <nl> def _run ( self , net , param_init_net , param_info ) : <nl> if dev is None : <nl> dev = core . DeviceOption ( caffe2_pb2 . CPU ) <nl> <nl> + # Each GPU / CPU must have its own ONE blob , thus modify the name <nl> + # to include device information . <nl> ONE = param_init_net . ConstantFill ( <nl> [ ] , <nl> " ONE_ { } _ { } " . format ( dev . device_type , dev . cuda_gpu_id ) , <nl> shape = [ 1 ] , <nl> value = 1 . 0 <nl> ) <nl> + <nl> self . _aux_params . shared . append ( ONE ) <nl> <nl> if self . momentum > 0 : <nl> def _run ( self , net , param_init_net , param_info ) : <nl> ) <nl> else : <nl> if self . momentum > 0 . : <nl> - net . MomentumSGD ( <nl> - [ grad , momentum_data , lr ] , [ grad , momentum_data ] , <nl> + net . MomentumSGDUpdate ( <nl> + [ grad , momentum_data , lr , param ] , <nl> + [ grad , momentum_data , param ] , <nl> momentum = self . momentum , <nl> - nesterov = 1 ) <nl> - coeff = ONE <nl> + nesterov = self . nesterov ) <nl> else : <nl> coeff = lr <nl> <nl> - net . WeightedSum ( <nl> - [ param , ONE , grad , coeff ] , <nl> - param <nl> - ) <nl> + net . WeightedSum ( <nl> + [ param , ONE , grad , coeff ] , <nl> + param <nl> + ) <nl> <nl> def scale_learning_rate ( self , scale ) : <nl> self . base_learning_rate * = scale <nl> | add optimizer support to data_parallel_model ; Use MomentumSGDUpdate | pytorch/pytorch | cdb50fbf2b7c32d17fb97457a00cf8593431edee | 2017-05-30T19:49:57Z |
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> else ( ) <nl> # changes intact , so we ' ll just clobber everything and say sorry . <nl> message ( STATUS " Cache compiler flags ignored , please edit CMakeLists . txt to change the flags . " ) <nl> # / MP - Multi - threaded compilation <nl> - # / MD - Multi - threaded runtime <nl> # / Ox - Full optimization <nl> # / Oy - - Don ' t omit frame pointer <nl> # / GR - - Disable RTTI <nl> # / GS - - No stack buffer overflow checks <nl> # / EHsc - C + + - only exception handling semantics <nl> - set ( optimization_flags " / MP / MD / Ox / Oy - / GR - / GS - / EHsc " ) <nl> + set ( optimization_flags " / MP / Ox / Oy - / GR - / GS - / EHsc " ) <nl> # / Zi - Output debugging information <nl> # / Zo - enahnced debug info for optimized builds <nl> - set ( CMAKE_C_FLAGS_RELEASE " $ { optimization_flags } / Zi " CACHE STRING " " FORCE ) <nl> - set ( CMAKE_CXX_FLAGS_RELEASE " $ { optimization_flags } / Zi " CACHE STRING " " FORCE ) <nl> - set ( CMAKE_C_FLAGS_RELWITHDEBINFO " $ { optimization_flags } / Zi / Zo " CACHE STRING " " FORCE ) <nl> - set ( CMAKE_CXX_FLAGS_RELWITHDEBINFO " $ { optimization_flags } / Zi / Zo " CACHE STRING " " FORCE ) <nl> + # / MDd - Multi - threaded Debug Runtime DLL <nl> + set ( CMAKE_C_FLAGS_DEBUG " $ { optimization_flags } / MDd / Zi / Zo " CACHE STRING " " FORCE ) <nl> + set ( CMAKE_CXX_FLAGS_DEBUG " $ { optimization_flags } / MDd / Zi / Zo " CACHE STRING " " FORCE ) <nl> + # / MD - Multi - threaded runtime DLL <nl> + set ( CMAKE_C_FLAGS_RELEASE " $ { optimization_flags } / MD / Zi " CACHE STRING " " FORCE ) <nl> + set ( CMAKE_CXX_FLAGS_RELEASE " $ { optimization_flags } / MD / Zi " CACHE STRING " " FORCE ) <nl> + set ( CMAKE_C_FLAGS_RELWITHDEBINFO " $ { optimization_flags } / MD / Zi / Zo " CACHE STRING " " FORCE ) <nl> + set ( CMAKE_CXX_FLAGS_RELWITHDEBINFO " $ { optimization_flags } / MD / Zi / Zo " CACHE STRING " " FORCE ) <nl> + <nl> + set ( CMAKE_EXE_LINKER_FLAGS_DEBUG " / DEBUG " CACHE STRING " " FORCE ) <nl> + set ( CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO " / DEBUG " CACHE STRING " " FORCE ) <nl> endif ( ) <nl> <nl> add_definitions ( - DSINGLETHREADED ) <nl> if ( ENABLE_GLFW ) <nl> set ( GLFW_PREFIX " $ { CMAKE_CURRENT_SOURCE_DIR } / externals / glfw - 3 . 0 . 4 . bin " ) <nl> set ( GLFW_INCLUDE_DIRS " $ { GLFW_PREFIX } / include " CACHE PATH " Path to GLFW3 headers " ) <nl> set ( GLFW_LIBRARY_DIRS " $ { GLFW_PREFIX } / lib - $ { TMP_TOOLSET } " CACHE PATH " Path to GLFW3 libraries " ) <nl> - <nl> + <nl> # Clean up after ourselves <nl> unset ( TMP_TOOLSET ) <nl> unset ( TMP_ARCH ) <nl> | Small changes to the CMake file to make windows build easier | yuzu-emu/yuzu | fa79b3f4f47a53ed758cb781940bf2a6ba0f1111 | 2015-03-26T10:04:22Z |
mmm a / src / compiler / instruction . h <nl> ppp b / src / compiler / instruction . h <nl> class InstructionOperand { <nl> ConvertTo ( kind , index ) ; <nl> } <nl> <nl> + static InstructionOperand * New ( Zone * zone , Kind kind , int index ) { <nl> + return New ( zone , InstructionOperand ( kind , index ) ) ; <nl> + } <nl> + <nl> Kind kind ( ) const { return KindField : : decode ( value_ ) ; } <nl> int index ( ) const { return static_cast < int > ( value_ ) > > KindField : : kSize ; } <nl> # define INSTRUCTION_OPERAND_PREDICATE ( name , type ) \ <nl> class InstructionOperand { <nl> if ( kind ! = UNALLOCATED ) virtual_register_ = kInvalidVirtualRegister ; <nl> } <nl> <nl> - / / TODO ( dcarney ) : get rid of these <nl> - void * operator new ( size_t , void * location ) { return location ; } <nl> - void * operator new ( size_t size , Zone * zone ) { <nl> - return zone - > New ( static_cast < int > ( size ) ) ; <nl> + protected : <nl> + template < typename SubKindOperand > <nl> + static SubKindOperand * New ( Zone * zone , const SubKindOperand & op ) { <nl> + void * buffer = zone - > New ( sizeof ( op ) ) ; <nl> + return new ( buffer ) SubKindOperand ( op ) ; <nl> } <nl> - void operator delete ( void * pointer , Zone * zone ) { UNREACHABLE ( ) ; } <nl> <nl> - protected : <nl> InstructionOperand ( Kind kind , int index , int virtual_register ) <nl> : virtual_register_ ( virtual_register ) { <nl> ConvertTo ( kind , index ) ; <nl> class UnallocatedOperand : public InstructionOperand { <nl> value_ | = LifetimeField : : encode ( lifetime ) ; <nl> } <nl> <nl> + UnallocatedOperand * Copy ( Zone * zone ) { return New ( zone , * this ) ; } <nl> + <nl> UnallocatedOperand * CopyUnconstrained ( Zone * zone ) { <nl> - return new ( zone ) UnallocatedOperand ( ANY , virtual_register ( ) ) ; <nl> + return New ( zone , UnallocatedOperand ( ANY , virtual_register ( ) ) ) ; <nl> } <nl> <nl> static const UnallocatedOperand * cast ( const InstructionOperand * op ) { <nl> std : : ostream & operator < < ( std : : ostream & os , const PrintableMoveOperands & mo ) ; <nl> : InstructionOperand ( kOperandKind , index ) { } \ <nl> \ <nl> static SubKind # # Operand * New ( int index , Zone * zone ) { \ <nl> - return new ( zone ) SubKind # # Operand ( index ) ; \ <nl> + return InstructionOperand : : New ( zone , SubKind # # Operand ( index ) ) ; \ <nl> } \ <nl> \ <nl> static SubKind # # Operand * cast ( InstructionOperand * op ) { \ <nl> class PointerMap FINAL : public ZoneObject { <nl> std : : ostream & operator < < ( std : : ostream & os , const PointerMap & pm ) ; <nl> <nl> / / TODO ( titzer ) : s / PointerMap / ReferenceMap / <nl> - class Instruction : public ZoneObject { <nl> + class Instruction { <nl> public : <nl> size_t OutputCount ( ) const { return OutputCountField : : decode ( bit_field_ ) ; } <nl> const InstructionOperand * OutputAt ( size_t i ) const { <nl> class Instruction : public ZoneObject { <nl> pointer_map_ = map ; <nl> } <nl> <nl> - / / Placement new operator so that we can smash instructions into <nl> - / / zone - allocated memory . <nl> - void * operator new ( size_t , void * location ) { return location ; } <nl> - <nl> - void operator delete ( void * pointer , void * location ) { UNREACHABLE ( ) ; } <nl> - <nl> void OverwriteWithNop ( ) { <nl> opcode_ = ArchOpcodeField : : encode ( kArchNop ) ; <nl> bit_field_ = 0 ; <nl> class Instruction : public ZoneObject { <nl> InstructionOperand * inputs , size_t temp_count , <nl> InstructionOperand * temps ) ; <nl> <nl> - protected : <nl> typedef BitField < size_t , 0 , 8 > OutputCountField ; <nl> typedef BitField < size_t , 8 , 16 > InputCountField ; <nl> typedef BitField < size_t , 24 , 6 > TempCountField ; <nl> class Instruction : public ZoneObject { <nl> uint32_t bit_field_ ; <nl> PointerMap * pointer_map_ ; <nl> InstructionOperand operands_ [ 1 ] ; <nl> + <nl> + private : <nl> + DISALLOW_COPY_AND_ASSIGN ( Instruction ) ; <nl> } ; <nl> <nl> <nl> mmm a / src / compiler / move - optimizer . cc <nl> ppp b / src / compiler / move - optimizer . cc <nl> MoveOperands * PrepareInsertAfter ( ParallelMove * left , MoveOperands * move , <nl> } <nl> DCHECK ( ! ( replacement = = to_eliminate & & replacement ! = nullptr ) ) ; <nl> if ( replacement ! = nullptr ) { <nl> - auto new_source = new ( zone ) InstructionOperand ( <nl> - replacement - > source ( ) - > kind ( ) , replacement - > source ( ) - > index ( ) ) ; <nl> + auto new_source = InstructionOperand : : New ( <nl> + zone , replacement - > source ( ) - > kind ( ) , replacement - > source ( ) - > index ( ) ) ; <nl> move - > set_source ( new_source ) ; <nl> } <nl> return to_eliminate ; <nl> void MoveOptimizer : : FinalizeMoves ( GapInstruction * gap ) { <nl> loads . push_back ( move ) ; <nl> / / Replace source with copy for later use . <nl> auto dest = move - > destination ( ) ; <nl> - move - > set_destination ( new ( code_zone ( ) ) <nl> - InstructionOperand ( dest - > kind ( ) , dest - > index ( ) ) ) ; <nl> + move - > set_destination ( <nl> + InstructionOperand : : New ( code_zone ( ) , dest - > kind ( ) , dest - > index ( ) ) ) ; <nl> continue ; <nl> } <nl> if ( ( found - > destination ( ) - > IsStackSlot ( ) | | <nl> void MoveOptimizer : : FinalizeMoves ( GapInstruction * gap ) { <nl> InstructionOperand : : Kind found_kind = found - > destination ( ) - > kind ( ) ; <nl> int found_index = found - > destination ( ) - > index ( ) ; <nl> auto next_dest = <nl> - new ( code_zone ( ) ) InstructionOperand ( found_kind , found_index ) ; <nl> + InstructionOperand : : New ( code_zone ( ) , found_kind , found_index ) ; <nl> auto dest = move - > destination ( ) ; <nl> found - > destination ( ) - > ConvertTo ( dest - > kind ( ) , dest - > index ( ) ) ; <nl> move - > set_destination ( next_dest ) ; <nl> mmm a / src / compiler / register - allocator . cc <nl> ppp b / src / compiler / register - allocator . cc <nl> void RegisterAllocator : : AssignSpillSlots ( ) { <nl> auto op_kind = kind = = DOUBLE_REGISTERS <nl> ? InstructionOperand : : DOUBLE_STACK_SLOT <nl> : InstructionOperand : : STACK_SLOT ; <nl> - auto op = new ( code_zone ( ) ) InstructionOperand ( op_kind , index ) ; <nl> + auto op = InstructionOperand : : New ( code_zone ( ) , op_kind , index ) ; <nl> range - > SetOperand ( op ) ; <nl> } <nl> } <nl> void RegisterAllocator : : MeetRegisterConstraintsForLastInstructionInBlock ( <nl> <nl> / / Create an unconstrained operand for the same virtual register <nl> / / and insert a gap move from the fixed output to the operand . <nl> - UnallocatedOperand * output_copy = new ( code_zone ( ) ) <nl> - UnallocatedOperand ( UnallocatedOperand : : ANY , output_vreg ) ; <nl> - <nl> + UnallocatedOperand * output_copy = <nl> + UnallocatedOperand ( UnallocatedOperand : : ANY , output_vreg ) <nl> + . Copy ( code_zone ( ) ) ; <nl> AddGapMove ( gap_index , GapInstruction : : START , output , output_copy ) ; <nl> } <nl> } <nl> mmm a / test / cctest / compiler / test - instruction . cc <nl> ppp b / test / cctest / compiler / test - instruction . cc <nl> class InstructionTester : public HandleAndZoneScope { <nl> } <nl> <nl> UnallocatedOperand * NewUnallocated ( int vreg ) { <nl> - return new ( zone ( ) ) UnallocatedOperand ( UnallocatedOperand : : ANY , vreg ) ; <nl> + return UnallocatedOperand ( UnallocatedOperand : : ANY , vreg ) . Copy ( zone ( ) ) ; <nl> } <nl> <nl> InstructionBlock * BlockAt ( BasicBlock * block ) { <nl> | [ turbofan ] make zone allocation of InstructionOperand explicit | v8/v8 | 6f97beb51e1adc31a893d392a4d0f80cc5f22f41 | 2015-02-09T14:20:19Z |
mmm a / Marlin / src / module / endstops . cpp <nl> ppp b / Marlin / src / module / endstops . cpp <nl> void Endstops : : not_homing ( ) { <nl> void Endstops : : resync ( ) { <nl> if ( ! abort_enabled ( ) ) return ; / / If endstops / probes are disabled the loop below can hang <nl> <nl> - # if ENABLED ( ENDSTOP_INTERRUPTS_FEATURE ) & & ! ENDSTOP_NOISE_THRESHOLD <nl> + # if ENABLED ( ENDSTOP_INTERRUPTS_FEATURE ) <nl> update ( ) ; <nl> # else <nl> - safe_delay ( 2 ) ; / / Wait for Temperature ISR ( runs at 1KHz ) <nl> + safe_delay ( 2 ) ; / / Wait for Temperature ISR to run at least once ( runs at 1KHz ) <nl> # endif <nl> # if ENDSTOP_NOISE_THRESHOLD <nl> while ( endstop_poll_count ) safe_delay ( 1 ) ; <nl> | Endstops fix followup ( ) | MarlinFirmware/Marlin | 98c2fc4e42eb3f0a1b2a40f2cab785f7a9f59517 | 2018-11-13T23:10:07Z |
mmm a / modules / opensimplex / noise_texture . cpp <nl> ppp b / modules / opensimplex / noise_texture . cpp <nl> NoiseTexture : : NoiseTexture ( ) { <nl> size = Vector2i ( 512 , 512 ) ; <nl> seamless = false ; <nl> as_normalmap = false ; <nl> + bump_strength = 1 . 0 ; / / 1 . 0 is a little low . Keep at 1 . 0 for compatibility for now . For 3 . 2 increase to 8 . 0 . <nl> flags = FLAGS_DEFAULT ; <nl> <nl> noise = Ref < OpenSimplexNoise > ( ) ; <nl> void NoiseTexture : : _bind_methods ( ) { <nl> ClassDB : : bind_method ( D_METHOD ( " set_as_normalmap " , " as_normalmap " ) , & NoiseTexture : : set_as_normalmap ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " is_normalmap " ) , & NoiseTexture : : is_normalmap ) ; <nl> <nl> + ClassDB : : bind_method ( D_METHOD ( " set_bump_strength " , " bump_strength " ) , & NoiseTexture : : set_bump_strength ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " get_bump_strength " ) , & NoiseTexture : : get_bump_strength ) ; <nl> + <nl> ClassDB : : bind_method ( D_METHOD ( " _update_texture " ) , & NoiseTexture : : _update_texture ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " _generate_texture " ) , & NoiseTexture : : _generate_texture ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " _thread_done " , " image " ) , & NoiseTexture : : _thread_done ) ; <nl> void NoiseTexture : : _bind_methods ( ) { <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : INT , " height " , PROPERTY_HINT_RANGE , " 1 , 2048 , 1 , or_greater " ) , " set_height " , " get_height " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : BOOL , " seamless " ) , " set_seamless " , " get_seamless " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : BOOL , " as_normalmap " ) , " set_as_normalmap " , " is_normalmap " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : REAL , " bump_strength " , PROPERTY_HINT_RANGE , " 0 , 32 , 0 . 1 , or_greater " ) , " set_bump_strength " , " get_bump_strength " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : OBJECT , " noise " , PROPERTY_HINT_RESOURCE_TYPE , " OpenSimplexNoise " ) , " set_noise " , " get_noise " ) ; <nl> } <nl> <nl> + void NoiseTexture : : _validate_property ( PropertyInfo & property ) const { <nl> + <nl> + if ( property . name = = " bump_strength " ) { <nl> + if ( ! as_normalmap ) { <nl> + property . usage = PROPERTY_USAGE_NOEDITOR | PROPERTY_USAGE_INTERNAL ; <nl> + } <nl> + } <nl> + } <nl> + <nl> void NoiseTexture : : _set_texture_data ( const Ref < Image > & p_image ) { <nl> data = p_image ; <nl> if ( data . is_valid ( ) ) { <nl> Ref < Image > NoiseTexture : : _generate_texture ( ) { <nl> } <nl> <nl> if ( as_normalmap ) { <nl> - image - > bumpmap_to_normalmap ( ) ; <nl> + image - > bumpmap_to_normalmap ( bump_strength ) ; <nl> } <nl> <nl> return image ; <nl> void NoiseTexture : : set_as_normalmap ( bool p_as_normalmap ) { <nl> if ( p_as_normalmap = = as_normalmap ) return ; <nl> as_normalmap = p_as_normalmap ; <nl> _queue_update ( ) ; <nl> + _change_notify ( ) ; <nl> } <nl> <nl> bool NoiseTexture : : is_normalmap ( ) { <nl> return as_normalmap ; <nl> } <nl> <nl> + void NoiseTexture : : set_bump_strength ( float p_bump_strength ) { <nl> + <nl> + if ( p_bump_strength = = bump_strength ) return ; <nl> + bump_strength = p_bump_strength ; <nl> + if ( as_normalmap ) <nl> + _queue_update ( ) ; <nl> + } <nl> + <nl> + float NoiseTexture : : get_bump_strength ( ) { <nl> + <nl> + return bump_strength ; <nl> + } <nl> + <nl> int NoiseTexture : : get_width ( ) const { <nl> <nl> return size . x ; <nl> mmm a / modules / opensimplex / noise_texture . h <nl> ppp b / modules / opensimplex / noise_texture . h <nl> class NoiseTexture : public Texture { <nl> Vector2i size ; <nl> bool seamless ; <nl> bool as_normalmap ; <nl> + float bump_strength ; <nl> <nl> void _thread_done ( const Ref < Image > & p_image ) ; <nl> static void _thread_function ( void * p_ud ) ; <nl> class NoiseTexture : public Texture { <nl> <nl> protected : <nl> static void _bind_methods ( ) ; <nl> + virtual void _validate_property ( PropertyInfo & property ) const ; <nl> <nl> public : <nl> void set_noise ( Ref < OpenSimplexNoise > p_noise ) ; <nl> class NoiseTexture : public Texture { <nl> void set_as_normalmap ( bool p_seamless ) ; <nl> bool is_normalmap ( ) ; <nl> <nl> + void set_bump_strength ( float p_bump_strength ) ; <nl> + float get_bump_strength ( ) ; <nl> + <nl> int get_width ( ) const ; <nl> int get_height ( ) const ; <nl> <nl> | added bump_strength to noisetexture | godotengine/godot | 44b71a22ff99cba159385be91fd3c3971024f7fa | 2019-03-03T20:33:44Z |
new file mode 100644 <nl> index 000000000000 . . 4fe7e36dad78 <nl> mmm / dev / null <nl> ppp b / db / btree . cpp <nl> <nl> + / / btree . cpp <nl> + <nl> + # include " stdafx . h " <nl> + # include " btree . h " <nl> + # include " pdfile . h " <nl> + <nl> + / * it is easy to do custom sizes for a namespace - all the same for now * / <nl> + const int BucketSize = 8192 ; <nl> + const int KeyMax = BucketSize / 8 ; <nl> + <nl> + inline KeyNode : : KeyNode ( BucketBasics & bb , _KeyNode & k ) : <nl> + prevChildBucket ( k . prevChildBucket ) , recordLoc ( k . recordLoc ) , key ( bb . data + k . keyDataOfs ) { } <nl> + <nl> + / * - BucketBasics mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm * / <nl> + <nl> + inline void BucketBasics : : setNotPacked ( ) { flags & = ~ Packed ; } <nl> + inline void BucketBasics : : setPacked ( ) { flags | = Packed ; } <nl> + <nl> + int BucketBasics : : totalDataSize ( ) const { <nl> + return Size - ( data - ( char * ) this ) ; <nl> + } <nl> + <nl> + void BucketBasics : : init ( ) { <nl> + parent . Null ( ) ; nextChild . Null ( ) ; <nl> + Size = BucketSize ; <nl> + flags = Packed ; <nl> + n = 0 ; <nl> + emptySize = totalDataSize ( ) ; <nl> + reserved = 0 ; <nl> + } <nl> + <nl> + / * we allocate space from the end of the buffer for data . <nl> + the keynodes grow from the front . <nl> + * / <nl> + inline int BucketBasics : : _alloc ( int bytes ) { <nl> + int ofs = emptySize - bytes ; <nl> + assert ( ofs > = 0 ) ; <nl> + emptySize - = bytes ; <nl> + return ofs ; <nl> + } <nl> + <nl> + void BucketBasics : : del ( int keypos ) { <nl> + assert ( keypos > = 0 & & keypos < = n ) ; <nl> + n - - ; <nl> + for ( int j = keypos ; j < n ; j + + ) <nl> + k ( j ) = k ( j + 1 ) ; <nl> + emptySize + = sizeof ( _KeyNode ) ; <nl> + setNotPacked ( ) ; <nl> + } <nl> + <nl> + / * add a key . must be < all existing * / <nl> + void BucketBasics : : pushFront ( const DiskLoc & recordLoc , JSObj & key , DiskLoc prevChild ) { <nl> + int bytesNeeded = key . objsize ( ) + sizeof ( _KeyNode ) ; <nl> + assert ( bytesNeeded < = emptySize ) ; <nl> + for ( int j = n ; j > 0 ; j - - ) / / make room <nl> + k ( j ) = k ( j - 1 ) ; <nl> + n + + ; <nl> + emptySize - = sizeof ( _KeyNode ) ; <nl> + _KeyNode & kn = k ( 0 ) ; <nl> + kn . prevChildBucket = prevChild ; <nl> + kn . recordLoc = recordLoc ; <nl> + kn . keyDataOfs = ( short ) _alloc ( key . objsize ( ) ) ; <nl> + char * p = dataAt ( kn . keyDataOfs ) ; <nl> + memcpy ( p , key . objdata ( ) , key . objsize ( ) ) ; <nl> + } <nl> + <nl> + bool BucketBasics : : basicInsert ( int keypos , const DiskLoc & recordLoc , JSObj & key ) { <nl> + assert ( keypos > = 0 & & keypos < = n ) ; <nl> + int bytesNeeded = key . objsize ( ) + sizeof ( _KeyNode ) ; <nl> + if ( bytesNeeded > emptySize ) { <nl> + pack ( ) ; <nl> + if ( bytesNeeded > emptySize ) <nl> + return false ; <nl> + } <nl> + for ( int j = n ; j > keypos ; j - - ) / / make room <nl> + k ( j ) = k ( j - 1 ) ; <nl> + n + + ; <nl> + emptySize - = sizeof ( _KeyNode ) ; <nl> + _KeyNode & kn = k ( keypos ) ; <nl> + kn . prevChildBucket . Null ( ) ; <nl> + kn . recordLoc = recordLoc ; <nl> + kn . keyDataOfs = ( short ) _alloc ( key . objsize ( ) ) ; <nl> + char * p = dataAt ( kn . keyDataOfs ) ; <nl> + memcpy ( p , key . objdata ( ) , key . objsize ( ) ) ; <nl> + return true ; <nl> + } <nl> + <nl> + / * when we delete things we just leave empty space until the node is <nl> + full and then we repack it . <nl> + * / <nl> + void BucketBasics : : pack ( ) { <nl> + if ( flags & Packed ) <nl> + return ; <nl> + <nl> + int keysz = n * sizeof ( _KeyNode ) ; <nl> + int left = totalDataSize ( ) - keysz ; <nl> + for ( int j = n - 1 ; j > = 0 ; j + + ) { <nl> + short ofsold = k ( j ) . keyDataOfs ; <nl> + int sz = keyNode ( j ) . key . objsize ( ) ; <nl> + short ofsnew = keysz + left - sz ; <nl> + if ( ofsold ! = ofsnew ) { <nl> + memmove ( dataAt ( ofsnew ) , dataAt ( ofsold ) , sz ) ; <nl> + k ( j ) . keyDataOfs = ofsnew ; <nl> + } <nl> + left - = sz ; <nl> + } <nl> + assert ( left > = 0 ) ; <nl> + emptySize = left ; <nl> + <nl> + setPacked ( ) ; <nl> + } <nl> + <nl> + inline void BucketBasics : : truncateTo ( int N ) { <nl> + n = N ; <nl> + int sz = 0 ; <nl> + for ( int i = 0 ; i < n ; i + + ) <nl> + sz + = sizeof ( _KeyNode ) + keyNode ( i ) . key . objsize ( ) ; <nl> + emptySize = totalDataSize ( ) - sz ; <nl> + assert ( emptySize > = 0 ) ; <nl> + } <nl> + <nl> + / * - BtreeBucket mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm * / <nl> + <nl> + / * pos : for existing keys k0 . . . kn - 1 . <nl> + returns # it goes BEFORE . so pos = 0 - > newkey < k0 . <nl> + returns n if it goes after the last existing key . <nl> + * / <nl> + bool BtreeBucket : : find ( JSObj & key , int & pos ) { <nl> + / * binary search for this key * / <nl> + int l = 0 ; int h = n - 1 ; <nl> + while ( l < = h ) { <nl> + int m = ( l + h ) / 2 ; <nl> + KeyNode M = keyNode ( m ) ; <nl> + int x = key . woCompare ( M . key ) ; <nl> + if ( x < 0 ) <nl> + h = m - 1 ; <nl> + else if ( x > 0 ) <nl> + l = m + 1 ; <nl> + else { <nl> + pos = m ; <nl> + return true ; <nl> + } <nl> + } <nl> + / / not found <nl> + pos = l ; <nl> + return false ; <nl> + } <nl> + <nl> + BtreeBucket * BtreeBucket : : allocTemp ( ) { <nl> + BtreeBucket * b = ( BtreeBucket * ) malloc ( BucketSize ) ; <nl> + b - > init ( ) ; <nl> + return b ; <nl> + } <nl> + <nl> + void BtreeBucket : : insertHere ( const DiskLoc & thisLoc , const char * ns , int keypos , <nl> + const DiskLoc & recordLoc , JSObj & key , <nl> + DiskLoc lchild , DiskLoc rchild ) { <nl> + if ( basicInsert ( keypos , recordLoc , key ) ) { <nl> + _KeyNode & kn = k ( keypos ) ; <nl> + if ( keypos + 1 = = n ) { / / last key <nl> + kn . prevChildBucket = nextChild ; <nl> + nextChild = rchild ; <nl> + assert ( kn . prevChildBucket = = lchild ) ; <nl> + } <nl> + else { <nl> + k ( keypos ) . prevChildBucket = lchild ; <nl> + assert ( k ( keypos + 1 ) . prevChildBucket = = lchild ) ; <nl> + k ( keypos + 1 ) . prevChildBucket = rchild ; <nl> + } <nl> + return ; <nl> + } <nl> + <nl> + / / split ! <nl> + BtreeBucket * r = allocTemp ( ) ; <nl> + DiskLoc rLoc ; <nl> + int mid = n / 2 ; <nl> + for ( int i = mid + 1 ; i < n ; i + + ) { <nl> + KeyNode kn = keyNode ( i ) ; <nl> + r - > pushFront ( kn . recordLoc , kn . key , kn . prevChildBucket ) ; <nl> + } <nl> + rLoc = theDataFileMgr . insert ( ns , r , r - > Size , true ) ; <nl> + free ( r ) ; r = 0 ; <nl> + KeyNode middle = keyNode ( mid ) ; <nl> + truncateTo ( mid ) ; / / mark on left that we no longer have anything from midpoint on . <nl> + nextChild = middle . prevChildBucket ; <nl> + <nl> + / / add our new key , there is room now <nl> + { <nl> + if ( keypos < mid ) { <nl> + insertHere ( thisLoc , ns , keypos , recordLoc , key , lchild , rchild ) ; <nl> + } else { <nl> + int kp = keypos - mid - 1 ; assert ( kp > = 0 ) ; <nl> + insertHere ( rLoc , ns , kp , recordLoc , key , lchild , rchild ) ; <nl> + } <nl> + } <nl> + <nl> + / / promote middle to a parent node <nl> + { <nl> + if ( parent . isNull ( ) ) { <nl> + / / make a new parent if we were the root <nl> + BtreeBucket * p = allocTemp ( ) ; <nl> + p - > pushFront ( middle . recordLoc , middle . key , thisLoc ) ; <nl> + p - > nextChild = rLoc ; <nl> + theDataFileMgr . insert ( ns , p , p - > Size , true ) ; <nl> + free ( p ) ; <nl> + / / set location of new head ! xxx <nl> + } <nl> + else { <nl> + parent . btree ( ) - > _insert ( parent , ns , middle . recordLoc , middle . key , false , thisLoc , rLoc ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + DiskLoc BtreeBucket : : addHead ( const char * ns ) { <nl> + BtreeBucket * p = allocTemp ( ) ; <nl> + DiskLoc loc = theDataFileMgr . insert ( ns , p , p - > Size , true ) ; <nl> + return loc ; <nl> + } <nl> + <nl> + / * thisloc is the location of this bucket object . you must pass that in . * / <nl> + int BtreeBucket : : _insert ( const DiskLoc & thisLoc , const char * ns , const DiskLoc & recordLoc , <nl> + JSObj & key , bool dupsAllowed , <nl> + DiskLoc lChild , DiskLoc rChild ) { <nl> + if ( key . objsize ( ) > KeyMax ) { <nl> + cout < < " ERROR : key too large len : " < < key . objsize ( ) < < " max : " < < KeyMax < < endl ; <nl> + return 2 ; <nl> + } <nl> + <nl> + int pos ; <nl> + bool found = find ( key , pos ) ; <nl> + if ( found ) { <nl> + / / todo : support dup keys <nl> + cout < < " dup key failing " < < endl ; <nl> + return 1 ; <nl> + } <nl> + <nl> + DiskLoc & child = getChild ( pos ) ; <nl> + if ( child . isNull ( ) | | ! rChild . isNull ( ) ) { <nl> + insertHere ( thisLoc , ns , pos , recordLoc , key , lChild , rChild ) ; <nl> + return 0 ; <nl> + } <nl> + <nl> + return child . btree ( ) - > insert ( child , ns , recordLoc , key , dupsAllowed ) ; <nl> + } <nl> + <nl> + int BtreeBucket : : insert ( const DiskLoc & thisLoc , const char * ns , const DiskLoc & recordLoc , <nl> + JSObj & key , bool dupsAllowed ) <nl> + { <nl> + return _insert ( thisLoc , ns , recordLoc , key , dupsAllowed , DiskLoc ( ) , DiskLoc ( ) ) ; <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . d2e80dfdafe9 <nl> mmm / dev / null <nl> ppp b / db / btree . h <nl> <nl> + / / btree . h <nl> + <nl> + # pragma once <nl> + <nl> + # include " . . / stdafx . h " <nl> + # include " jsobj . h " <nl> + # include " storage . h " <nl> + <nl> + # pragma pack ( push ) <nl> + # pragma pack ( 1 ) <nl> + <nl> + struct _KeyNode { <nl> + DiskLoc prevChildBucket ; <nl> + DiskLoc recordLoc ; <nl> + short keyDataOfs ; <nl> + } ; <nl> + <nl> + # pragma pack ( pop ) <nl> + <nl> + class BucketBasics ; <nl> + <nl> + / * wrapper - this is our in memory representation of the key . _KeyNode is the disk representation . * / <nl> + class KeyNode { <nl> + public : <nl> + KeyNode ( BucketBasics & bb , _KeyNode & k ) ; <nl> + DiskLoc & prevChildBucket ; <nl> + DiskLoc & recordLoc ; <nl> + JSObj key ; <nl> + } ; <nl> + <nl> + # pragma pack ( push ) <nl> + # pragma pack ( 1 ) <nl> + <nl> + / * this class is all about the storage management * / <nl> + class BucketBasics { <nl> + friend class KeyNode ; <nl> + public : <nl> + <nl> + protected : <nl> + DiskLoc & getChild ( int pos ) { <nl> + assert ( pos > = 0 & & pos < = n ) ; <nl> + return pos = = n ? nextChild : k ( pos ) . prevChildBucket ; <nl> + } <nl> + KeyNode keyNode ( int i ) { <nl> + assert ( i < n ) ; <nl> + return KeyNode ( * this , k ( i ) ) ; <nl> + } <nl> + <nl> + char * dataAt ( short ofs ) { return data + ofs ; } <nl> + <nl> + void init ( ) ; / / initialize a new node <nl> + <nl> + / * returns false if node is full and must be split <nl> + keypos is where to insert - - inserted after that key # . so keypos = 0 is the leftmost one . <nl> + * / <nl> + bool basicInsert ( int keypos , const DiskLoc & recordLoc , JSObj & key ) ; <nl> + void pushFront ( const DiskLoc & recordLoc , JSObj & key , DiskLoc prevChild ) ; <nl> + void del ( int keypos ) ; <nl> + <nl> + enum Flags { Packed = 1 } ; <nl> + <nl> + int totalDataSize ( ) const ; <nl> + void pack ( ) ; void setNotPacked ( ) ; void setPacked ( ) ; <nl> + int _alloc ( int bytes ) ; <nl> + void truncateTo ( int N ) ; <nl> + <nl> + DiskLoc parent ; <nl> + DiskLoc nextChild ; / / the next bucket <nl> + int Size ; / / total size of this btree node in bytes . constant . <nl> + int flags ; <nl> + int emptySize ; <nl> + int n ; / / # of keys so far . <nl> + int reserved ; <nl> + _KeyNode & k ( int i ) { return ( ( _KeyNode * ) data ) [ i ] ; } <nl> + char data [ 4 ] ; <nl> + } ; <nl> + <nl> + class BtreeBucket : public BucketBasics { <nl> + public : <nl> + / * rc : 0 = ok * / <nl> + static DiskLoc addHead ( const char * ns ) ; / * start a new index off , empty * / <nl> + int insert ( const DiskLoc & thisLoc , const char * ns , const DiskLoc & recordLoc , <nl> + JSObj & key , bool dupsAllowed ) ; <nl> + void update ( const DiskLoc & recordLoc , JSObj & key ) ; <nl> + bool del ( JSObj & key ) ; <nl> + private : <nl> + static BtreeBucket * allocTemp ( ) ; / * caller must release with free ( ) * / <nl> + void insertHere ( const DiskLoc & thisLoc , const char * ns , int keypos , <nl> + const DiskLoc & recordLoc , JSObj & key , <nl> + DiskLoc lchild , DiskLoc rchild ) ; <nl> + int _insert ( const DiskLoc & thisLoc , const char * ns , const DiskLoc & recordLoc , <nl> + JSObj & key , bool dupsAllowed , <nl> + DiskLoc lChild , DiskLoc rChild ) ; <nl> + bool find ( JSObj & key , int & pos ) ; <nl> + } ; <nl> + <nl> + # pragma pack ( pop ) <nl> new file mode 100644 <nl> index 000000000000 . . afd3a5b479dc <nl> mmm / dev / null <nl> ppp b / db / objwrappers . h <nl> <nl> + / / objwrappers . h <nl> + <nl> + # pragma once <nl> + <nl> + # include " . . / stdafx . h " <nl> + # include " jsobj . h " <nl> + <nl> + <nl> + <nl> | new files | mongodb/mongo | 895aac4e1f34e108d4b8c8c73ebc0e05880f452f | 2007-11-09T02:43:31Z |
mmm a / libraries / chain / chain_controller . cpp <nl> ppp b / libraries / chain / chain_controller . cpp <nl> static void record_locks_for_data_access ( transaction_trace & trace , flat_set < shar <nl> } <nl> <nl> / / remove read locks for write locks taken by other actions <nl> - trace . read_locks . erase ( trace . write_locks . begin ( ) , trace . write_locks . end ( ) ) ; <nl> - read_locks . erase ( trace . write_locks . begin ( ) , trace . write_locks . end ( ) ) ; <nl> + std : : for_each ( trace . write_locks . begin ( ) , trace . write_locks . end ( ) , [ & ] ( const shard_lock & l ) { <nl> + trace . read_locks . erase ( l ) ; read_locks . erase ( l ) ; <nl> + } ) ; <nl> <nl> read_locks . insert ( trace . read_locks . begin ( ) , trace . read_locks . end ( ) ) ; <nl> write_locks . insert ( trace . write_locks . begin ( ) , trace . write_locks . end ( ) ) ; <nl> void chain_controller : : update_resource_usage ( transaction_trace & trace , const tr <nl> / / enforce that the system controlled per tx limits are not violated <nl> EOS_ASSERT ( trace . cpu_usage < = chain_configuration . max_transaction_cpu_usage , <nl> tx_resource_exhausted , " Transaction exceeds the maximum cpu usage [ used : $ { used } , max : $ { max } ] " , <nl> - ( " usage " , trace . cpu_usage ) ( " max " , chain_configuration . max_transaction_cpu_usage ) ) ; <nl> + ( " used " , trace . cpu_usage ) ( " max " , chain_configuration . max_transaction_cpu_usage ) ) ; <nl> <nl> EOS_ASSERT ( trace . net_usage < = chain_configuration . max_transaction_net_usage , <nl> tx_resource_exhausted , " Transaction exceeds the maximum net usage [ used : $ { used } , max : $ { max } ] " , <nl> - ( " usage " , trace . net_usage ) ( " max " , chain_configuration . max_transaction_net_usage ) ) ; <nl> + ( " used " , trace . net_usage ) ( " max " , chain_configuration . max_transaction_net_usage ) ) ; <nl> <nl> / / determine the accounts to bill <nl> set < std : : pair < account_name , permission_name > > authorizations ; <nl> mmm a / libraries / chain / include / eosio / chain / config . hpp <nl> ppp b / libraries / chain / include / eosio / chain / config . hpp <nl> static const uint32_t block_size_average_window_ms = 60 * 1000l ; <nl> const static uint32_t default_max_block_size = 1024 * 1024 ; / / / at 500ms blocks and 200byte trx , this enables ~ 10 , 000 TPS burst <nl> const static uint32_t default_target_block_size = default_max_block_size / 10 ; / / / we target 1000 TPS <nl> <nl> - const static uint32_t default_max_block_cpu_usage = 10 * 1024 * 1024 ; / / / at 500ms blocks and 2000instr trx , this enables ~ 10 , 000 TPS burst <nl> + const static uint32_t default_max_block_cpu_usage = 100 * 1024 * 1024 ; / / / at 500ms blocks and 20000instr trx , this enables ~ 10 , 000 TPS burst <nl> const static uint32_t default_target_block_cpu_usage = default_max_block_cpu_usage / 10 ; / / / target 1000 TPS <nl> <nl> const static uint64_t default_max_storage_size = 10 * 1024 ; <nl> | - set more reasonabler defaults considering the max trx cpu usage was less than the setcode penalty . . . the smartest contract is the one that does not have to exist | EOSIO/eos | 09d645b3c17ff6fa0272b9548b4364d97389c8da | 2018-03-30T14:06:24Z |
mmm a / admin / static / coffee / namespaces / index . coffee <nl> ppp b / admin / static / coffee / namespaces / index . coffee <nl> module ' NamespaceView ' , - > <nl> need_to_increase = true <nl> switch formdata . protocol <nl> when " Memcached " <nl> - formdata . port = 11211 <nl> + formdata . port = 11213 <nl> when " Redis " <nl> formdata . port = 6379 <nl> when " Riak " <nl> formdata . port = 8098 <nl> when " MongoDB " <nl> formdata . port = 27017 <nl> - if need_to_increase is true <nl> - used_ports = { } <nl> - for namespace in namespaces . models <nl> - used_ports [ namespace . get ( ' port ' ) ] = true <nl> - while formdata . port of used_ports <nl> - formdata . port + + <nl> + if need_to_increase is true <nl> + used_ports = { } <nl> + for namespace in namespaces . models <nl> + used_ports [ namespace . get ( ' port ' ) ] = true <nl> + while formdata . port of used_ports <nl> + formdata . port + = 123 <nl> else if @ isnt_integer ( formdata . port ) <nl> input_error = true <nl> template_error . port_isnt_integer = true <nl> | Fix indentation bug | rethinkdb/rethinkdb | 6426eb94166d468fcaae0c790a504fbeb9c8807e | 2012-06-07T16:20:41Z |
mmm a / tools / export / blender25 / godot_export_manager . py <nl> ppp b / tools / export / blender25 / godot_export_manager . py <nl> <nl> from bpy . app . handlers import persistent <nl> from mathutils import Vector , Matrix <nl> <nl> + <nl> class godot_export_manager ( bpy . types . Panel ) : <nl> bl_label = " Godot Export Manager " <nl> bl_space_type = ' PROPERTIES ' <nl> def draw ( self , context ) : <nl> <nl> op = col . operator ( " scene . godot_delete_objects_from_group " , text = " Delete selected objects from Group " , icon = " PASTEDOWN " ) <nl> <nl> - <nl> - <nl> row = layout . row ( ) <nl> col = row . column ( ) <nl> col . label ( text = " Export Groups : " ) <nl> <nl> - <nl> row = layout . row ( ) <nl> col = row . column ( ) <nl> <nl> def draw ( self , context ) : <nl> col . prop ( group , " anim_optimize_precision " ) <nl> col . prop ( group , " use_metadata " ) <nl> <nl> + <nl> # # # Custom template_list look <nl> class UI_List_Godot ( bpy . types . UIList ) : <nl> def draw_item ( self , context , layout , data , item , icon , active_data , active_propname , index ) : <nl> def execute ( self , context ) : <nl> else : <nl> objects_str + = " , " + object . name <nl> <nl> - <nl> self . report ( { ' INFO ' } , objects_str + " added to group . " ) <nl> if self . undo : <nl> bpy . ops . ed . undo_push ( message = " Objects added to group " ) <nl> def execute ( self , context ) : <nl> if node . name in selected_objects : <nl> scene . godot_export_groups [ scene . godot_export_groups_index ] . nodes . remove ( i ) <nl> <nl> - <nl> if j = = 0 : <nl> objects_str + = object . name <nl> else : <nl> objects_str + = " , " + object . name <nl> j + = 1 <nl> <nl> - <nl> self . report ( { ' INFO ' } , objects_str + " deleted from group . " ) <nl> bpy . ops . ed . undo_push ( message = " Objects deleted from group " ) <nl> else : <nl> def execute ( self , context ) : <nl> context . scene . objects . active = bpy . data . objects [ node . name ] <nl> return { ' FINISHED ' } <nl> <nl> + <nl> class export_groups_autosave ( bpy . types . Operator ) : <nl> bl_idname = " scene . godot_export_groups_autosave " <nl> bl_label = " Export All Groups " <nl> def execute ( self , context ) : <nl> bpy . ops . ed . undo_push ( message = " Export all Groups " ) <nl> return { ' FINISHED ' } <nl> <nl> + <nl> class export_all_groups ( bpy . types . Operator ) : <nl> bl_idname = " scene . godot_export_all_groups " <nl> bl_label = " Export All Groups " <nl> class export_group ( bpy . types . Operator ) : <nl> idx = IntProperty ( default = 0 ) <nl> export_all = BoolProperty ( default = False ) <nl> <nl> - <nl> def copy_object_recursive ( self , ob , parent , single_user = True ) : <nl> new_ob = bpy . data . objects [ ob . name ] . copy ( ) <nl> if single_user or ob . type = = " ARMATURE " : <nl> def execute ( self , context ) : <nl> <nl> context . scene . layers = [ True , True , True , True , True , True , True , True , True , True , True , True , True , True , True , True , True , True , True , True ] <nl> <nl> - <nl> if group [ self . idx ] . export_name . endswith ( " . dae " ) : <nl> path = os . path . join ( path , group [ self . idx ] . export_name ) <nl> else : <nl> def execute ( self , context ) : <nl> self . report ( { ' INFO ' } , " Define Export Name and Export Path . " ) <nl> return { ' FINISHED ' } <nl> <nl> + <nl> class add_export_group ( bpy . types . Operator ) : <nl> bl_idname = " scene . godot_add_export_group " <nl> bl_label = " Adds a new export Group " <nl> def execute ( self , context ) : <nl> bpy . ops . ed . undo_push ( message = " Create New Export Group " ) <nl> return { ' FINISHED ' } <nl> <nl> + <nl> class del_export_group ( bpy . types . Operator ) : <nl> bl_idname = " scene . godot_delete_export_group " <nl> bl_label = " Delets the selected export Group " <nl> def execute ( self , context ) : <nl> bpy . ops . ed . undo_push ( message = " Delete Export Group " ) <nl> return { ' FINISHED ' } <nl> <nl> + <nl> class godot_node_list ( bpy . types . PropertyGroup ) : <nl> name = StringProperty ( ) <nl> <nl> + <nl> class godot_export_groups ( bpy . types . PropertyGroup ) : <nl> name = StringProperty ( name = " Group Name " ) <nl> export_name = StringProperty ( name = " scene_name " ) <nl> class godot_export_groups ( bpy . types . PropertyGroup ) : <nl> use_metadata = BoolProperty ( name = " Use Metadata " , default = True , options = { ' HIDDEN ' } ) <nl> use_include_particle_duplicates = BoolProperty ( name = " Include Particle Duplicates " , default = True ) <nl> <nl> + <nl> def register ( ) : <nl> bpy . utils . register_class ( godot_export_manager ) <nl> bpy . utils . register_class ( godot_node_list ) <nl> def register ( ) : <nl> bpy . types . Scene . godot_export_groups = CollectionProperty ( type = godot_export_groups ) <nl> bpy . types . Scene . godot_export_groups_index = IntProperty ( default = 0 , min = 0 ) <nl> <nl> + <nl> def unregister ( ) : <nl> bpy . utils . unregister_class ( godot_export_manager ) <nl> bpy . utils . unregister_class ( godot_node_list ) <nl> def unregister ( ) : <nl> bpy . utils . unregister_class ( select_group_objects ) <nl> bpy . utils . unregister_class ( UI_List_Godot ) <nl> <nl> + <nl> @ persistent <nl> def auto_export ( dummy ) : <nl> bpy . ops . scene . godot_export_groups_autosave ( ) <nl> mmm a / tools / export / blender25 / io_scene_dae / __init__ . py <nl> ppp b / tools / export / blender25 / io_scene_dae / __init__ . py <nl> class ExportDAE ( bpy . types . Operator , ExportHelper ) : <nl> # List of operator properties , the attributes will be assigned <nl> # to the class instance from the operator settings before calling . <nl> <nl> - <nl> object_types = EnumProperty ( <nl> - name = " Object Types " , <nl> - options = { ' ENUM_FLAG ' } , <nl> - items = ( ( ' EMPTY ' , " Empty " , " " ) , <nl> - ( ' CAMERA ' , " Camera " , " " ) , <nl> - ( ' LAMP ' , " Lamp " , " " ) , <nl> - ( ' ARMATURE ' , " Armature " , " " ) , <nl> - ( ' MESH ' , " Mesh " , " " ) , <nl> - ( ' CURVE ' , " Curve " , " " ) , <nl> - ) , <nl> - default = { ' EMPTY ' , ' CAMERA ' , ' LAMP ' , ' ARMATURE ' , ' MESH ' , ' CURVE ' } , <nl> - ) <nl> + name = " Object Types " , <nl> + options = { ' ENUM_FLAG ' } , <nl> + items = ( ( ' EMPTY ' , " Empty " , " " ) , <nl> + ( ' CAMERA ' , " Camera " , " " ) , <nl> + ( ' LAMP ' , " Lamp " , " " ) , <nl> + ( ' ARMATURE ' , " Armature " , " " ) , <nl> + ( ' MESH ' , " Mesh " , " " ) , <nl> + ( ' CURVE ' , " Curve " , " " ) , <nl> + ) , <nl> + default = { ' EMPTY ' , ' CAMERA ' , ' LAMP ' , ' ARMATURE ' , ' MESH ' , ' CURVE ' } , <nl> + ) <nl> <nl> use_export_selected = BoolProperty ( <nl> - name = " Selected Objects " , <nl> - description = " Export only selected objects ( and visible in active layers if that applies ) . " , <nl> - default = False , <nl> - ) <nl> + name = " Selected Objects " , <nl> + description = " Export only selected objects ( and visible in active layers if that applies ) . " , <nl> + default = False , <nl> + ) <nl> use_mesh_modifiers = BoolProperty ( <nl> - name = " Apply Modifiers " , <nl> - description = " Apply modifiers to mesh objects ( on a copy ! ) . " , <nl> - default = False , <nl> - ) <nl> + name = " Apply Modifiers " , <nl> + description = " Apply modifiers to mesh objects ( on a copy ! ) . " , <nl> + default = False , <nl> + ) <nl> use_tangent_arrays = BoolProperty ( <nl> - name = " Tangent Arrays " , <nl> - description = " Export Tangent and Binormal arrays ( for normalmapping ) . " , <nl> - default = False , <nl> - ) <nl> + name = " Tangent Arrays " , <nl> + description = " Export Tangent and Binormal arrays ( for normalmapping ) . " , <nl> + default = False , <nl> + ) <nl> use_triangles = BoolProperty ( <nl> - name = " Triangulate " , <nl> - description = " Export Triangles instead of Polygons . " , <nl> - default = False , <nl> - ) <nl> - <nl> + name = " Triangulate " , <nl> + description = " Export Triangles instead of Polygons . " , <nl> + default = False , <nl> + ) <nl> use_copy_images = BoolProperty ( <nl> - name = " Copy Images " , <nl> - description = " Copy Images ( create images / subfolder ) " , <nl> - default = False , <nl> - ) <nl> + name = " Copy Images " , <nl> + description = " Copy Images ( create images / subfolder ) " , <nl> + default = False , <nl> + ) <nl> use_active_layers = BoolProperty ( <nl> - name = " Active Layers " , <nl> - description = " Export only objects on the active layers . " , <nl> - default = True , <nl> - ) <nl> + name = " Active Layers " , <nl> + description = " Export only objects on the active layers . " , <nl> + default = True , <nl> + ) <nl> use_anim = BoolProperty ( <nl> - name = " Export Animation " , <nl> - description = " Export keyframe animation " , <nl> - default = False , <nl> - ) <nl> + name = " Export Animation " , <nl> + description = " Export keyframe animation " , <nl> + default = False , <nl> + ) <nl> use_anim_action_all = BoolProperty ( <nl> - name = " All Actions " , <nl> - description = ( " Export all actions for the first armature found in separate DAE files " ) , <nl> - default = False , <nl> - ) <nl> + name = " All Actions " , <nl> + description = ( " Export all actions for the first armature found in separate DAE files " ) , <nl> + default = False , <nl> + ) <nl> use_anim_skip_noexp = BoolProperty ( <nl> - name = " Skip ( - noexp ) Actions " , <nl> - description = " Skip exporting of actions whose name end in ( - noexp ) . Useful to skip control animations . " , <nl> - default = True , <nl> - ) <nl> + name = " Skip ( - noexp ) Actions " , <nl> + description = " Skip exporting of actions whose name end in ( - noexp ) . Useful to skip control animations . " , <nl> + default = True , <nl> + ) <nl> use_anim_optimize = BoolProperty ( <nl> - name = " Optimize Keyframes " , <nl> - description = " Remove double keyframes " , <nl> - default = True , <nl> - ) <nl> + name = " Optimize Keyframes " , <nl> + description = " Remove double keyframes " , <nl> + default = True , <nl> + ) <nl> <nl> anim_optimize_precision = FloatProperty ( <nl> - name = " Precision " , <nl> - description = ( " Tolerence for comparing double keyframes " <nl> - " ( higher for greater accuracy ) " ) , <nl> - min = 1 , max = 16 , <nl> - soft_min = 1 , soft_max = 16 , <nl> - default = 6 . 0 , <nl> - ) <nl> + name = " Precision " , <nl> + description = ( " Tolerence for comparing double keyframes " <nl> + " ( higher for greater accuracy ) " ) , <nl> + min = 1 , max = 16 , <nl> + soft_min = 1 , soft_max = 16 , <nl> + default = 6 . 0 , <nl> + ) <nl> <nl> use_metadata = BoolProperty ( <nl> - name = " Use Metadata " , <nl> - default = True , <nl> - options = { ' HIDDEN ' } , <nl> - ) <nl> + name = " Use Metadata " , <nl> + default = True , <nl> + options = { ' HIDDEN ' } , <nl> + ) <nl> <nl> @ property <nl> def check_extension ( self ) : <nl> - return True # return self . batch_mode = = ' OFF ' <nl> + return True # return self . batch_mode = = ' OFF ' <nl> <nl> def check ( self , context ) : <nl> return True <nl> mmm a / tools / export / blender25 / io_scene_dae / export_dae . py <nl> ppp b / tools / export / blender25 / io_scene_dae / export_dae . py <nl> <nl> import bmesh <nl> from mathutils import Vector , Matrix <nl> <nl> - # according to collada spec , order matters <nl> + # according to collada spec , order matters <nl> S_ASSET = 0 <nl> S_IMGS = 1 <nl> S_FX = 2 <nl> <nl> CMP_EPSILON = 0 . 0001 <nl> <nl> def snap_tup ( tup ) : <nl> - ret = ( ) <nl> - for x in tup : <nl> - ret + = ( x - math . fmod ( x , 0 . 0001 ) , ) <nl> + ret = ( ) <nl> + for x in tup : <nl> + ret + = ( x - math . fmod ( x , 0 . 0001 ) , ) <nl> <nl> - return tup <nl> + return tup <nl> <nl> <nl> def strmtx ( mtx ) : <nl> - s = " " <nl> - for x in range ( 4 ) : <nl> - for y in range ( 4 ) : <nl> - s + = str ( mtx [ x ] [ y ] ) <nl> - s + = " " <nl> - s + = " " <nl> - return s <nl> + s = " " <nl> + for x in range ( 4 ) : <nl> + for y in range ( 4 ) : <nl> + s + = str ( mtx [ x ] [ y ] ) <nl> + s + = " " <nl> + s + = " " <nl> + return s <nl> <nl> def numarr ( a , mult = 1 . 0 ) : <nl> - s = " " <nl> - for x in a : <nl> - s + = " " + str ( x * mult ) <nl> - s + = " " <nl> - return s <nl> - <nl> - def numarr_alpha ( a , mult = 1 . 0 ) : <nl> - s = " " <nl> - for x in a : <nl> - s + = " " + str ( x * mult ) <nl> - if len ( a ) = = 3 : <nl> - s + = " 1 . 0 " <nl> - s + = " " <nl> - return s <nl> - <nl> - def strarr ( arr ) : <nl> - s = " " <nl> - for x in arr : <nl> - s + = " " + str ( x ) <nl> - s + = " " <nl> - return s <nl> - <nl> - class DaeExporter : <nl> - <nl> - def validate_id ( self , d ) : <nl> - if ( d . find ( " id - " ) = = 0 ) : <nl> - return " z " + d <nl> - return d <nl> - <nl> - <nl> - def new_id ( self , t ) : <nl> - self . last_id + = 1 <nl> - return " id - " + t + " - " + str ( self . last_id ) <nl> - <nl> - class Vertex : <nl> - <nl> - def close_to ( v ) : <nl> - if ( ( self . vertex - v . vertex ) . length ( ) > CMP_EPSILON ) : <nl> - return False <nl> - if ( ( self . normal - v . normal ) . length ( ) > CMP_EPSILON ) : <nl> - return False <nl> - if ( ( self . uv - v . uv ) . length ( ) > CMP_EPSILON ) : <nl> - return False <nl> - if ( ( self . uv2 - v . uv2 ) . length ( ) > CMP_EPSILON ) : <nl> - return False <nl> - <nl> - return True <nl> - <nl> - def get_tup ( self ) : <nl> - tup = ( self . vertex . x , self . vertex . y , self . vertex . z , self . normal . x , self . normal . y , self . normal . z ) <nl> - for t in self . uv : <nl> - tup = tup + ( t . x , t . y ) <nl> - if ( self . color ! = None ) : <nl> - tup = tup + ( self . color . x , self . color . y , self . color . z ) <nl> - if ( self . tangent ! = None ) : <nl> - tup = tup + ( self . tangent . x , self . tangent . y , self . tangent . z ) <nl> - if ( self . bitangent ! = None ) : <nl> - tup = tup + ( self . bitangent . x , self . bitangent . y , self . bitangent . z ) <nl> - for t in self . bones : <nl> - tup = tup + ( float ( t ) , ) <nl> - for t in self . weights : <nl> - tup = tup + ( float ( t ) , ) <nl> - <nl> - return tup <nl> - <nl> - def __init__ ( self ) : <nl> - self . vertex = Vector ( ( 0 . 0 , 0 . 0 , 0 . 0 ) ) <nl> - self . normal = Vector ( ( 0 . 0 , 0 . 0 , 0 . 0 ) ) <nl> - self . tangent = None <nl> - self . bitangent = None <nl> - self . color = None <nl> - self . uv = [ ] <nl> - self . uv2 = Vector ( ( 0 . 0 , 0 . 0 ) ) <nl> - self . bones = [ ] <nl> - self . weights = [ ] <nl> - <nl> - <nl> - def writel ( self , section , indent , text ) : <nl> - if ( not ( section in self . sections ) ) : <nl> - self . sections [ section ] = [ ] <nl> - line = " " <nl> - for x in range ( indent ) : <nl> - line + = " \ t " <nl> - line + = text <nl> - self . sections [ section ] . append ( line ) <nl> - <nl> - <nl> - def export_image ( self , image ) : <nl> - if ( image in self . image_cache ) : <nl> - return self . image_cache [ image ] <nl> - <nl> - imgpath = image . filepath <nl> - if ( imgpath . find ( " / / " ) = = 0 or imgpath . find ( " \ \ \ \ " ) = = 0 ) : <nl> - # if relative , convert to absolute <nl> - imgpath = bpy . path . abspath ( imgpath ) <nl> - <nl> - # path is absolute , now do something ! <nl> - <nl> - if ( self . config [ " use_copy_images " ] ) : <nl> - # copy image <nl> - basedir = os . path . dirname ( self . path ) + " / images " <nl> - if ( not os . path . isdir ( basedir ) ) : <nl> - os . makedirs ( basedir ) <nl> - <nl> - if os . path . isfile ( imgpath ) : <nl> - dstfile = basedir + " / " + os . path . basename ( imgpath ) <nl> - <nl> - if ( not os . path . isfile ( dstfile ) ) : <nl> - shutil . copy ( imgpath , dstfile ) <nl> - imgpath = " images / " + os . path . basename ( imgpath ) <nl> - else : <nl> - # # # if file is not found save it as png file in the destination folder <nl> - img_tmp_path = image . filepath <nl> - if img_tmp_path . endswith ( ( " . bmp " , " . rgb " , " . png " , " . jpeg " , " . jpg " , " . jp2 " , " . tga " , " . cin " , " . dpx " , " . exr " , " . hdr " , " . tif " ) ) : <nl> - image . filepath = basedir + " / " + os . path . basename ( img_tmp_path ) <nl> - else : <nl> - image . filepath = basedir + " / " + image . name + " . png " <nl> - <nl> - dstfile = basedir + " / " + os . path . basename ( image . filepath ) <nl> - <nl> - if ( not os . path . isfile ( dstfile ) ) : <nl> - <nl> - image . save ( ) <nl> - imgpath = " images / " + os . path . basename ( image . filepath ) <nl> - image . filepath = img_tmp_path <nl> - <nl> - else : <nl> - # export relative , always , no one wants absolute paths . <nl> - try : <nl> - imgpath = os . path . relpath ( imgpath , os . path . dirname ( self . path ) ) . replace ( " \ \ " , " / " ) # export unix compatible always <nl> - <nl> - except : <nl> - pass # fails sometimes , not sure why <nl> - <nl> - <nl> - imgid = self . new_id ( " image " ) <nl> - <nl> - print ( " FOR : " + imgpath ) <nl> - <nl> - # if ( not os . path . isfile ( imgpath ) ) : <nl> - # print ( " NOT FILE ? " ) <nl> - # if imgpath . endswith ( ( " . bmp " , " . rgb " , " . png " , " . jpeg " , " . jpg " , " . jp2 " , " . tga " , " . cin " , " . dpx " , " . exr " , " . hdr " , " . tif " ) ) : <nl> - # imgpath = " images / " + os . path . basename ( imgpath ) <nl> - # else : <nl> - # imgpath = " images / " + image . name + " . png " <nl> - <nl> - self . writel ( S_IMGS , 1 , ' < image id = " ' + imgid + ' " name = " ' + image . name + ' " > ' ) <nl> - self . writel ( S_IMGS , 2 , ' < init_from > ' + imgpath + ' < / init_from > ' ) <nl> - self . writel ( S_IMGS , 1 , ' < / image > ' ) <nl> - self . image_cache [ image ] = imgid <nl> - return imgid <nl> - <nl> - def export_material ( self , material , double_sided_hint = True ) : <nl> - <nl> - if ( material in self . material_cache ) : <nl> - return self . material_cache [ material ] <nl> - <nl> - fxid = self . new_id ( " fx " ) <nl> - self . writel ( S_FX , 1 , ' < effect id = " ' + fxid + ' " name = " ' + material . name + ' - fx " > ' ) <nl> - self . writel ( S_FX , 2 , ' < profile_COMMON > ' ) <nl> - <nl> - # Find and fetch the textures and create sources <nl> - sampler_table = { } <nl> - diffuse_tex = None <nl> - specular_tex = None <nl> - emission_tex = None <nl> - normal_tex = None <nl> - for i in range ( len ( material . texture_slots ) ) : <nl> - ts = material . texture_slots [ i ] <nl> - if ( not ts ) : <nl> - continue <nl> - if ( not ts . use ) : <nl> - continue <nl> - if ( not ts . texture ) : <nl> - continue <nl> - if ( ts . texture . type ! = " IMAGE " ) : <nl> - continue <nl> - <nl> - if ( ts . texture . image = = None ) : <nl> - continue <nl> - <nl> - # image <nl> - imgid = self . export_image ( ts . texture . image ) <nl> - <nl> - # surface <nl> - surface_sid = self . new_id ( " fx_surf " ) <nl> - self . writel ( S_FX , 3 , ' < newparam sid = " ' + surface_sid + ' " > ' ) <nl> - self . writel ( S_FX , 4 , ' < surface type = " 2D " > ' ) <nl> - self . writel ( S_FX , 5 , ' < init_from > ' + imgid + ' < / init_from > ' ) # this is sooo weird <nl> - self . writel ( S_FX , 5 , ' < format > A8R8G8B8 < / format > ' ) <nl> - self . writel ( S_FX , 4 , ' < / surface > ' ) <nl> - self . writel ( S_FX , 3 , ' < / newparam > ' ) <nl> - # sampler , collada sure likes it difficult <nl> - sampler_sid = self . new_id ( " fx_sampler " ) <nl> - self . writel ( S_FX , 3 , ' < newparam sid = " ' + sampler_sid + ' " > ' ) <nl> - self . writel ( S_FX , 4 , ' < sampler2D > ' ) <nl> - self . writel ( S_FX , 5 , ' < source > ' + surface_sid + ' < / source > ' ) <nl> - self . writel ( S_FX , 4 , ' < / sampler2D > ' ) <nl> - self . writel ( S_FX , 3 , ' < / newparam > ' ) <nl> - sampler_table [ i ] = sampler_sid <nl> - <nl> - if ( ts . use_map_color_diffuse and diffuse_tex = = None ) : <nl> - diffuse_tex = sampler_sid <nl> - if ( ts . use_map_color_spec and specular_tex = = None ) : <nl> - specular_tex = sampler_sid <nl> - if ( ts . use_map_emit and emission_tex = = None ) : <nl> - emission_tex = sampler_sid <nl> - if ( ts . use_map_normal and normal_tex = = None ) : <nl> - normal_tex = sampler_sid <nl> - <nl> - self . writel ( S_FX , 3 , ' < technique sid = " common " > ' ) <nl> - shtype = " blinn " <nl> - self . writel ( S_FX , 4 , ' < ' + shtype + ' > ' ) <nl> - # ambient ? from where ? <nl> - <nl> - self . writel ( S_FX , 5 , ' < emission > ' ) <nl> - if ( emission_tex ! = None ) : <nl> - self . writel ( S_FX , 6 , ' < texture texture = " ' + emission_tex + ' " texcoord = " CHANNEL1 " / > ' ) <nl> - else : <nl> - self . writel ( S_FX , 6 , ' < color > ' + numarr_alpha ( material . diffuse_color , material . emit ) + ' < / color > ' ) # not totally right but good enough <nl> - self . writel ( S_FX , 5 , ' < / emission > ' ) <nl> - <nl> - self . writel ( S_FX , 5 , ' < ambient > ' ) <nl> - self . writel ( S_FX , 6 , ' < color > ' + numarr_alpha ( self . scene . world . ambient_color , material . ambient ) + ' < / color > ' ) <nl> - self . writel ( S_FX , 5 , ' < / ambient > ' ) <nl> - <nl> - self . writel ( S_FX , 5 , ' < diffuse > ' ) <nl> - if ( diffuse_tex ! = None ) : <nl> - self . writel ( S_FX , 6 , ' < texture texture = " ' + diffuse_tex + ' " texcoord = " CHANNEL1 " / > ' ) <nl> - else : <nl> - self . writel ( S_FX , 6 , ' < color > ' + numarr_alpha ( material . diffuse_color , material . diffuse_intensity ) + ' < / color > ' ) <nl> - self . writel ( S_FX , 5 , ' < / diffuse > ' ) <nl> - <nl> - self . writel ( S_FX , 5 , ' < specular > ' ) <nl> - if ( specular_tex ! = None ) : <nl> - self . writel ( S_FX , 6 , ' < texture texture = " ' + specular_tex + ' " texcoord = " CHANNEL1 " / > ' ) <nl> - else : <nl> - self . writel ( S_FX , 6 , ' < color > ' + numarr_alpha ( material . specular_color , material . specular_intensity ) + ' < / color > ' ) <nl> - self . writel ( S_FX , 5 , ' < / specular > ' ) <nl> - <nl> - self . writel ( S_FX , 5 , ' < shininess > ' ) <nl> - self . writel ( S_FX , 6 , ' < float > ' + str ( material . specular_hardness ) + ' < / float > ' ) <nl> - self . writel ( S_FX , 5 , ' < / shininess > ' ) <nl> - <nl> - self . writel ( S_FX , 5 , ' < reflective > ' ) <nl> - self . writel ( S_FX , 6 , ' < color > ' + numarr_alpha ( material . mirror_color ) + ' < / color > ' ) <nl> - self . writel ( S_FX , 5 , ' < / reflective > ' ) <nl> - <nl> - if ( material . use_transparency ) : <nl> - self . writel ( S_FX , 5 , ' < transparency > ' ) <nl> - self . writel ( S_FX , 6 , ' < float > ' + str ( material . alpha ) + ' < / float > ' ) <nl> - self . writel ( S_FX , 5 , ' < / transparency > ' ) <nl> - <nl> - self . writel ( S_FX , 5 , ' < index_of_refraction > ' ) <nl> - self . writel ( S_FX , 6 , ' < float > ' + str ( material . specular_ior ) + ' < / float > ' ) <nl> - self . writel ( S_FX , 5 , ' < / index_of_refraction > ' ) <nl> - <nl> - self . writel ( S_FX , 4 , ' < / ' + shtype + ' > ' ) <nl> - <nl> - self . writel ( S_FX , 4 , ' < extra > ' ) <nl> - self . writel ( S_FX , 5 , ' < technique profile = " FCOLLADA " > ' ) <nl> - if ( normal_tex ) : <nl> - self . writel ( S_FX , 6 , ' < bump bumptype = " NORMALMAP " > ' ) <nl> - self . writel ( S_FX , 7 , ' < texture texture = " ' + normal_tex + ' " texcoord = " CHANNEL1 " / > ' ) <nl> - self . writel ( S_FX , 6 , ' < / bump > ' ) <nl> - <nl> - self . writel ( S_FX , 5 , ' < / technique > ' ) <nl> - self . writel ( S_FX , 5 , ' < technique profile = " GOOGLEEARTH " > ' ) <nl> - self . writel ( S_FX , 6 , ' < double_sided > ' + [ " 0 " , " 1 " ] [ double_sided_hint ] + " < / double_sided > " ) <nl> - self . writel ( S_FX , 5 , ' < / technique > ' ) <nl> - <nl> - if ( material . use_shadeless ) : <nl> - self . writel ( S_FX , 5 , ' < technique profile = " GODOT " > ' ) <nl> - self . writel ( S_FX , 6 , ' < unshaded > 1 < / unshaded > ' ) <nl> - self . writel ( S_FX , 5 , ' < / technique > ' ) <nl> - <nl> - self . writel ( S_FX , 4 , ' < / extra > ' ) <nl> - <nl> - self . writel ( S_FX , 3 , ' < / technique > ' ) <nl> - self . writel ( S_FX , 2 , ' < / profile_COMMON > ' ) <nl> - self . writel ( S_FX , 1 , ' < / effect > ' ) <nl> - <nl> - # Also export blender material in all it ' s glory ( if set as active ) <nl> - <nl> - <nl> - # Material <nl> - matid = self . new_id ( " material " ) <nl> - self . writel ( S_MATS , 1 , ' < material id = " ' + matid + ' " name = " ' + material . name + ' " > ' ) <nl> - self . writel ( S_MATS , 2 , ' < instance_effect url = " # ' + fxid + ' " / > ' ) <nl> - self . writel ( S_MATS , 1 , ' < / material > ' ) <nl> - <nl> - self . material_cache [ material ] = matid <nl> - return matid <nl> - <nl> - <nl> - def export_mesh ( self , node , armature = None , skeyindex = - 1 , skel_source = None , custom_name = None ) : <nl> - <nl> - mesh = node . data <nl> - <nl> - <nl> - if ( node . data in self . mesh_cache ) : <nl> - return self . mesh_cache [ mesh ] <nl> - <nl> - if ( skeyindex = = - 1 and mesh . shape_keys ! = None and len ( mesh . shape_keys . key_blocks ) ) : <nl> - values = [ ] <nl> - morph_targets = [ ] <nl> - md = None <nl> - for k in range ( 0 , len ( mesh . shape_keys . key_blocks ) ) : <nl> - shape = node . data . shape_keys . key_blocks [ k ] <nl> - values + = [ shape . value ] # save value <nl> - shape . value = 0 <nl> - <nl> - mid = self . new_id ( " morph " ) <nl> - <nl> - for k in range ( 0 , len ( mesh . shape_keys . key_blocks ) ) : <nl> - <nl> - shape = node . data . shape_keys . key_blocks [ k ] <nl> - node . show_only_shape_key = True <nl> - node . active_shape_key_index = k <nl> - shape . value = 1 . 0 <nl> - mesh . update ( ) <nl> - " " " <nl> - oldval = shape . value <nl> - shape . value = 1 . 0 <nl> - <nl> - " " " <nl> - p = node . data <nl> - v = node . to_mesh ( bpy . context . scene , True , " RENDER " ) <nl> - node . data = v <nl> - # self . export_node ( node , il , shape . name ) <nl> - node . data . update ( ) <nl> - if ( armature and k = = 0 ) : <nl> - md = self . export_mesh ( node , armature , k , mid , shape . name ) <nl> - else : <nl> - md = self . export_mesh ( node , None , k , None , shape . name ) <nl> - <nl> - node . data = p <nl> - node . data . update ( ) <nl> - shape . value = 0 . 0 <nl> - morph_targets . append ( md ) <nl> - <nl> - " " " <nl> - shape . value = oldval <nl> - " " " <nl> - node . show_only_shape_key = False <nl> - node . active_shape_key_index = 0 <nl> - <nl> - <nl> - self . writel ( S_MORPH , 1 , ' < controller id = " ' + mid + ' " name = " " > ' ) <nl> - # if ( " skin_id " in morph_targets [ 0 ] ) : <nl> - # self . writel ( S_MORPH , 2 , ' < morph source = " # ' + morph_targets [ 0 ] [ " skin_id " ] + ' " method = " NORMALIZED " > ' ) <nl> - # else : <nl> - self . writel ( S_MORPH , 2 , ' < morph source = " # ' + morph_targets [ 0 ] [ " id " ] + ' " method = " NORMALIZED " > ' ) <nl> - <nl> - self . writel ( S_MORPH , 3 , ' < source id = " ' + mid + ' - morph - targets " > ' ) <nl> - self . writel ( S_MORPH , 4 , ' < IDREF_array id = " ' + mid + ' - morph - targets - array " count = " ' + str ( len ( morph_targets ) - 1 ) + ' " > ' ) <nl> - marr = " " <nl> - warr = " " <nl> - for i in range ( len ( morph_targets ) ) : <nl> - if ( i = = 0 ) : <nl> - continue <nl> - elif ( i > 1 ) : <nl> - marr + = " " <nl> - <nl> - if ( " skin_id " in morph_targets [ i ] ) : <nl> - marr + = morph_targets [ i ] [ " skin_id " ] <nl> - else : <nl> - marr + = morph_targets [ i ] [ " id " ] <nl> - <nl> - warr + = " 0 " <nl> - <nl> - self . writel ( S_MORPH , 5 , marr ) <nl> - self . writel ( S_MORPH , 4 , ' < / IDREF_array > ' ) <nl> - self . writel ( S_MORPH , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_MORPH , 5 , ' < accessor source = " # ' + mid + ' - morph - targets - array " count = " ' + str ( len ( morph_targets ) - 1 ) + ' " stride = " 1 " > ' ) <nl> - self . writel ( S_MORPH , 6 , ' < param name = " MORPH_TARGET " type = " IDREF " / > ' ) <nl> - self . writel ( S_MORPH , 5 , ' < / accessor > ' ) <nl> - self . writel ( S_MORPH , 4 , ' < / technique_common > ' ) <nl> - self . writel ( S_MORPH , 3 , ' < / source > ' ) <nl> - <nl> - self . writel ( S_MORPH , 3 , ' < source id = " ' + mid + ' - morph - weights " > ' ) <nl> - self . writel ( S_MORPH , 4 , ' < float_array id = " ' + mid + ' - morph - weights - array " count = " ' + str ( len ( morph_targets ) - 1 ) + ' " > ' ) <nl> - self . writel ( S_MORPH , 5 , warr ) <nl> - self . writel ( S_MORPH , 4 , ' < / float_array > ' ) <nl> - self . writel ( S_MORPH , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_MORPH , 5 , ' < accessor source = " # ' + mid + ' - morph - weights - array " count = " ' + str ( len ( morph_targets ) - 1 ) + ' " stride = " 1 " > ' ) <nl> - self . writel ( S_MORPH , 6 , ' < param name = " MORPH_WEIGHT " type = " float " / > ' ) <nl> - self . writel ( S_MORPH , 5 , ' < / accessor > ' ) <nl> - self . writel ( S_MORPH , 4 , ' < / technique_common > ' ) <nl> - self . writel ( S_MORPH , 3 , ' < / source > ' ) <nl> - <nl> - self . writel ( S_MORPH , 3 , ' < targets > ' ) <nl> - self . writel ( S_MORPH , 4 , ' < input semantic = " MORPH_TARGET " source = " # ' + mid + ' - morph - targets " / > ' ) <nl> - self . writel ( S_MORPH , 4 , ' < input semantic = " MORPH_WEIGHT " source = " # ' + mid + ' - morph - weights " / > ' ) <nl> - self . writel ( S_MORPH , 3 , ' < / targets > ' ) <nl> - self . writel ( S_MORPH , 2 , ' < / morph > ' ) <nl> - self . writel ( S_MORPH , 1 , ' < / controller > ' ) <nl> - if ( armature ! = None ) : <nl> - <nl> - self . armature_for_morph [ node ] = armature <nl> - <nl> - meshdata = { } <nl> - if ( armature ) : <nl> - meshdata = morph_targets [ 0 ] <nl> - meshdata [ " morph_id " ] = mid <nl> - else : <nl> - meshdata [ " id " ] = morph_targets [ 0 ] [ " id " ] <nl> - meshdata [ " morph_id " ] = mid <nl> - meshdata [ " material_assign " ] = morph_targets [ 0 ] [ " material_assign " ] <nl> - <nl> - <nl> - <nl> - self . mesh_cache [ node . data ] = meshdata <nl> - return meshdata <nl> - <nl> - apply_modifiers = len ( node . modifiers ) and self . config [ " use_mesh_modifiers " ] <nl> - <nl> - name_to_use = mesh . name <nl> - # print ( " name to use : " + mesh . name ) <nl> - if ( custom_name ! = None and custom_name ! = " " ) : <nl> - name_to_use = custom_name <nl> - <nl> - mesh = node . to_mesh ( self . scene , apply_modifiers , " RENDER " ) # is this allright ? <nl> - <nl> - triangulate = self . config [ " use_triangles " ] <nl> - if ( triangulate ) : <nl> - bm = bmesh . new ( ) <nl> - bm . from_mesh ( mesh ) <nl> - bmesh . ops . triangulate ( bm , faces = bm . faces ) <nl> - bm . to_mesh ( mesh ) <nl> - bm . free ( ) <nl> - <nl> - <nl> - mesh . update ( calc_tessface = True ) <nl> - vertices = [ ] <nl> - vertex_map = { } <nl> - surface_indices = { } <nl> - materials = { } <nl> - <nl> - materials = { } <nl> - <nl> - si = None <nl> - if ( armature ! = None ) : <nl> - si = self . skeleton_info [ armature ] <nl> - <nl> - has_uv = False <nl> - has_uv2 = False <nl> - has_weights = armature ! = None <nl> - has_tangents = self . config [ " use_tangent_arrays " ] # could detect . . <nl> - has_colors = len ( mesh . vertex_colors ) <nl> - mat_assign = [ ] <nl> - <nl> - uv_layer_count = len ( mesh . uv_textures ) <nl> - if ( has_tangents and len ( mesh . uv_textures ) ) : <nl> - try : <nl> - mesh . calc_tangents ( ) <nl> - except : <nl> - self . operator . report ( { ' WARNING ' } , ' CalcTangets failed for mesh " ' + mesh . name + ' " , no tangets will be exported . ' ) <nl> - # uv_layer_count = 0 <nl> - mesh . calc_normals_split ( ) <nl> - has_tangents = False <nl> - <nl> - else : <nl> - mesh . calc_normals_split ( ) <nl> - has_tangents = False <nl> - <nl> - <nl> - for fi in range ( len ( mesh . polygons ) ) : <nl> - f = mesh . polygons [ fi ] <nl> - <nl> - if ( not ( f . material_index in surface_indices ) ) : <nl> - surface_indices [ f . material_index ] = [ ] <nl> - # print ( " Type : " + str ( type ( f . material_index ) ) ) <nl> - # print ( " IDX : " + str ( f . material_index ) + " / " + str ( len ( mesh . materials ) ) ) <nl> - <nl> - try : <nl> - # Bizarre blender behavior i don ' t understand , so catching exception <nl> - mat = mesh . materials [ f . material_index ] <nl> - except : <nl> - mat = None <nl> - <nl> - if ( mat ! = None ) : <nl> - materials [ f . material_index ] = self . export_material ( mat , mesh . show_double_sided ) <nl> - else : <nl> - materials [ f . material_index ] = None # weird , has no material ? <nl> - <nl> - indices = surface_indices [ f . material_index ] <nl> - vi = [ ] <nl> - # vertices always 3 <nl> - " " " <nl> - if ( len ( f . vertices ) = = 3 ) : <nl> - vi . append ( 0 ) <nl> - vi . append ( 1 ) <nl> - vi . append ( 2 ) <nl> - elif ( len ( f . vertices ) = = 4 ) : <nl> - # todo , should use shortest path <nl> - vi . append ( 0 ) <nl> - vi . append ( 1 ) <nl> - vi . append ( 2 ) <nl> - vi . append ( 0 ) <nl> - vi . append ( 2 ) <nl> - vi . append ( 3 ) <nl> - " " " <nl> - <nl> - for lt in range ( f . loop_total ) : <nl> - loop_index = f . loop_start + lt <nl> - ml = mesh . loops [ loop_index ] <nl> - mv = mesh . vertices [ ml . vertex_index ] <nl> - <nl> - v = self . Vertex ( ) <nl> - v . vertex = Vector ( mv . co ) <nl> - <nl> - for xt in mesh . uv_layers : <nl> - v . uv . append ( Vector ( xt . data [ loop_index ] . uv ) ) <nl> - <nl> - if ( has_colors ) : <nl> - v . color = Vector ( mesh . vertex_colors [ 0 ] . data [ loop_index ] . color ) <nl> - <nl> - v . normal = Vector ( ml . normal ) <nl> - <nl> - if ( has_tangents ) : <nl> - v . tangent = Vector ( ml . tangent ) <nl> - v . bitangent = Vector ( ml . bitangent ) <nl> - <nl> - <nl> - # if ( armature ) : <nl> - # v . vertex = node . matrix_world * v . vertex <nl> - <nl> - # v . color = Vertex ( mv . ? ? ? <nl> - <nl> - if ( armature ! = None ) : <nl> - wsum = 0 . 0 <nl> - zero_bones = [ ] <nl> - <nl> - for vg in mv . groups : <nl> - if vg . group > = len ( node . vertex_groups ) : <nl> - continue ; <nl> - name = node . vertex_groups [ vg . group ] . name <nl> - <nl> - if ( name in si [ " bone_index " ] ) : <nl> - # could still put the weight as 0 . 0001 maybe <nl> - if ( vg . weight > 0 . 001 ) : # blender has a lot of zero weight stuff <nl> - v . bones . append ( si [ " bone_index " ] [ name ] ) <nl> - v . weights . append ( vg . weight ) <nl> - wsum + = vg . weight <nl> - if ( wsum = = 0 . 0 ) : <nl> - if not self . wrongvtx_report : <nl> - self . operator . report ( { ' WARNING ' } , ' Mesh for object " ' + node . name + ' " has unassigned weights . This may look wrong in exported model . ' ) <nl> - self . wrongvtx_report = True <nl> - <nl> - # blender can have bones assigned that weight zero so they remain local <nl> - # this is the best it can be done ? <nl> - v . bones . append ( 0 ) <nl> - v . weights . append ( 1 ) <nl> - <nl> - <nl> - <nl> - <nl> - tup = v . get_tup ( ) <nl> - idx = 0 <nl> - if ( skeyindex = = - 1 and tup in vertex_map ) : # do not optmize if using shapekeys <nl> - idx = vertex_map [ tup ] <nl> - else : <nl> - idx = len ( vertices ) <nl> - vertices . append ( v ) <nl> - vertex_map [ tup ] = idx <nl> - <nl> - vi . append ( idx ) <nl> - <nl> - if ( len ( vi ) > 2 ) : <nl> - # only triangles and above <nl> - indices . append ( vi ) <nl> - <nl> - <nl> - meshid = self . new_id ( " mesh " ) <nl> - self . writel ( S_GEOM , 1 , ' < geometry id = " ' + meshid + ' " name = " ' + name_to_use + ' " > ' ) <nl> - <nl> - self . writel ( S_GEOM , 2 , ' < mesh > ' ) <nl> - <nl> - <nl> - # Vertex Array <nl> - self . writel ( S_GEOM , 3 , ' < source id = " ' + meshid + ' - positions " > ' ) <nl> - float_values = " " <nl> - for v in vertices : <nl> - float_values + = " " + str ( v . vertex . x ) + " " + str ( v . vertex . y ) + " " + str ( v . vertex . z ) <nl> - self . writel ( S_GEOM , 4 , ' < float_array id = " ' + meshid + ' - positions - array " count = " ' + str ( len ( vertices ) * 3 ) + ' " > ' + float_values + ' < / float_array > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + meshid + ' - positions - array " count = " ' + str ( len ( vertices ) ) + ' " stride = " 3 " > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / technique_common > ' ) <nl> - self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> - <nl> - # Normal Array <nl> - <nl> - self . writel ( S_GEOM , 3 , ' < source id = " ' + meshid + ' - normals " > ' ) <nl> - float_values = " " <nl> - for v in vertices : <nl> - float_values + = " " + str ( v . normal . x ) + " " + str ( v . normal . y ) + " " + str ( v . normal . z ) <nl> - self . writel ( S_GEOM , 4 , ' < float_array id = " ' + meshid + ' - normals - array " count = " ' + str ( len ( vertices ) * 3 ) + ' " > ' + float_values + ' < / float_array > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + meshid + ' - normals - array " count = " ' + str ( len ( vertices ) ) + ' " stride = " 3 " > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / technique_common > ' ) <nl> - self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> - <nl> - if ( has_tangents ) : <nl> - self . writel ( S_GEOM , 3 , ' < source id = " ' + meshid + ' - tangents " > ' ) <nl> - float_values = " " <nl> - for v in vertices : <nl> - float_values + = " " + str ( v . tangent . x ) + " " + str ( v . tangent . y ) + " " + str ( v . tangent . z ) <nl> - self . writel ( S_GEOM , 4 , ' < float_array id = " ' + meshid + ' - tangents - array " count = " ' + str ( len ( vertices ) * 3 ) + ' " > ' + float_values + ' < / float_array > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + meshid + ' - tangents - array " count = " ' + str ( len ( vertices ) ) + ' " stride = " 3 " > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / technique_common > ' ) <nl> - self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> - <nl> - self . writel ( S_GEOM , 3 , ' < source id = " ' + meshid + ' - bitangents " > ' ) <nl> - float_values = " " <nl> - for v in vertices : <nl> - float_values + = " " + str ( v . bitangent . x ) + " " + str ( v . bitangent . y ) + " " + str ( v . bitangent . z ) <nl> - self . writel ( S_GEOM , 4 , ' < float_array id = " ' + meshid + ' - bitangents - array " count = " ' + str ( len ( vertices ) * 3 ) + ' " > ' + float_values + ' < / float_array > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + meshid + ' - bitangents - array " count = " ' + str ( len ( vertices ) ) + ' " stride = " 3 " > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / technique_common > ' ) <nl> - self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> - <nl> - <nl> - <nl> - # UV Arrays <nl> - <nl> - for uvi in range ( uv_layer_count ) : <nl> - <nl> - self . writel ( S_GEOM , 3 , ' < source id = " ' + meshid + ' - texcoord - ' + str ( uvi ) + ' " > ' ) <nl> - float_values = " " <nl> - for v in vertices : <nl> - try : <nl> - float_values + = " " + str ( v . uv [ uvi ] . x ) + " " + str ( v . uv [ uvi ] . y ) <nl> - except : <nl> - # I don ' t understand this weird multi - uv - layer API , but with this it seems to works <nl> - float_values + = " 0 0 " <nl> - <nl> - self . writel ( S_GEOM , 4 , ' < float_array id = " ' + meshid + ' - texcoord - ' + str ( uvi ) + ' - array " count = " ' + str ( len ( vertices ) * 2 ) + ' " > ' + float_values + ' < / float_array > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + meshid + ' - texcoord - ' + str ( uvi ) + ' - array " count = " ' + str ( len ( vertices ) ) + ' " stride = " 2 " > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " S " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " T " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / technique_common > ' ) <nl> - self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> - <nl> - # Color Arrays <nl> - <nl> - if ( has_colors ) : <nl> - self . writel ( S_GEOM , 3 , ' < source id = " ' + meshid + ' - colors " > ' ) <nl> - float_values = " " <nl> - for v in vertices : <nl> - float_values + = " " + str ( v . color . x ) + " " + str ( v . color . y ) + " " + str ( v . color . z ) <nl> - self . writel ( S_GEOM , 4 , ' < float_array id = " ' + meshid + ' - colors - array " count = " ' + str ( len ( vertices ) * 3 ) + ' " > ' + float_values + ' < / float_array > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + meshid + ' - colors - array " count = " ' + str ( len ( vertices ) ) + ' " stride = " 3 " > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / technique_common > ' ) <nl> - self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> - <nl> - # Triangle Lists <nl> - self . writel ( S_GEOM , 3 , ' < vertices id = " ' + meshid + ' - vertices " > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < input semantic = " POSITION " source = " # ' + meshid + ' - positions " / > ' ) <nl> - self . writel ( S_GEOM , 3 , ' < / vertices > ' ) <nl> - <nl> - prim_type = " " <nl> - if ( triangulate ) : <nl> - prim_type = " triangles " <nl> - else : <nl> - prim_type = " polygons " <nl> - <nl> - <nl> - for m in surface_indices : <nl> - indices = surface_indices [ m ] <nl> - mat = materials [ m ] <nl> - <nl> - if ( mat ! = None ) : <nl> - matref = self . new_id ( " trimat " ) <nl> - self . writel ( S_GEOM , 3 , ' < ' + prim_type + ' count = " ' + str ( int ( len ( indices ) ) ) + ' " material = " ' + matref + ' " > ' ) # todo material <nl> - mat_assign . append ( ( mat , matref ) ) <nl> - else : <nl> - self . writel ( S_GEOM , 3 , ' < ' + prim_type + ' count = " ' + str ( int ( len ( indices ) ) ) + ' " > ' ) # todo material <nl> - <nl> - <nl> - self . writel ( S_GEOM , 4 , ' < input semantic = " VERTEX " source = " # ' + meshid + ' - vertices " offset = " 0 " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < input semantic = " NORMAL " source = " # ' + meshid + ' - normals " offset = " 0 " / > ' ) <nl> - <nl> - for uvi in range ( uv_layer_count ) : <nl> - self . writel ( S_GEOM , 4 , ' < input semantic = " TEXCOORD " source = " # ' + meshid + ' - texcoord - ' + str ( uvi ) + ' " offset = " 0 " set = " ' + str ( uvi ) + ' " / > ' ) <nl> - <nl> - if ( has_colors ) : <nl> - self . writel ( S_GEOM , 4 , ' < input semantic = " COLOR " source = " # ' + meshid + ' - colors " offset = " 0 " / > ' ) <nl> - if ( has_tangents ) : <nl> - self . writel ( S_GEOM , 4 , ' < input semantic = " TEXTANGENT " source = " # ' + meshid + ' - tangents " offset = " 0 " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < input semantic = " TEXBINORMAL " source = " # ' + meshid + ' - bitangents " offset = " 0 " / > ' ) <nl> - <nl> - if ( triangulate ) : <nl> - int_values = " < p > " <nl> - for p in indices : <nl> - for i in p : <nl> - int_values + = " " + str ( i ) <nl> - int_values + = " < / p > " <nl> - self . writel ( S_GEOM , 4 , int_values ) <nl> - else : <nl> - for p in indices : <nl> - int_values = " < p > " <nl> - for i in p : <nl> - int_values + = " " + str ( i ) <nl> - int_values + = " < / p > " <nl> - self . writel ( S_GEOM , 4 , int_values ) <nl> - <nl> - self . writel ( S_GEOM , 3 , ' < / ' + prim_type + ' > ' ) <nl> - <nl> - <nl> - self . writel ( S_GEOM , 2 , ' < / mesh > ' ) <nl> - self . writel ( S_GEOM , 1 , ' < / geometry > ' ) <nl> - <nl> - <nl> - meshdata = { } <nl> - meshdata [ " id " ] = meshid <nl> - meshdata [ " material_assign " ] = mat_assign <nl> - if ( skeyindex = = - 1 ) : <nl> - self . mesh_cache [ node . data ] = meshdata <nl> - <nl> - <nl> - # Export armature data ( if armature exists ) <nl> - <nl> - if ( armature ! = None and ( skel_source ! = None or skeyindex = = - 1 ) ) : <nl> - <nl> - contid = self . new_id ( " controller " ) <nl> - <nl> - self . writel ( S_SKIN , 1 , ' < controller id = " ' + contid + ' " > ' ) <nl> - if ( skel_source ! = None ) : <nl> - self . writel ( S_SKIN , 2 , ' < skin source = " # ' + skel_source + ' " > ' ) <nl> - else : <nl> - self . writel ( S_SKIN , 2 , ' < skin source = " # ' + meshid + ' " > ' ) <nl> - <nl> - self . writel ( S_SKIN , 3 , ' < bind_shape_matrix > ' + strmtx ( node . matrix_world ) + ' < / bind_shape_matrix > ' ) <nl> - # Joint Names <nl> - self . writel ( S_SKIN , 3 , ' < source id = " ' + contid + ' - joints " > ' ) <nl> - name_values = " " <nl> - for v in si [ " bone_names " ] : <nl> - name_values + = " " + v <nl> - <nl> - self . writel ( S_SKIN , 4 , ' < Name_array id = " ' + contid + ' - joints - array " count = " ' + str ( len ( si [ " bone_names " ] ) ) + ' " > ' + name_values + ' < / Name_array > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < accessor source = " # ' + contid + ' - joints - array " count = " ' + str ( len ( si [ " bone_names " ] ) ) + ' " stride = " 1 " > ' ) <nl> - self . writel ( S_SKIN , 5 , ' < param name = " JOINT " type = " Name " / > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < / technique_common > ' ) <nl> - self . writel ( S_SKIN , 3 , ' < / source > ' ) <nl> - # Pose Matrices ! <nl> - self . writel ( S_SKIN , 3 , ' < source id = " ' + contid + ' - bind_poses " > ' ) <nl> - pose_values = " " <nl> - for v in si [ " bone_bind_poses " ] : <nl> - pose_values + = " " + strmtx ( v ) <nl> - <nl> - self . writel ( S_SKIN , 4 , ' < float_array id = " ' + contid + ' - bind_poses - array " count = " ' + str ( len ( si [ " bone_bind_poses " ] ) * 16 ) + ' " > ' + pose_values + ' < / float_array > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < accessor source = " # ' + contid + ' - bind_poses - array " count = " ' + str ( len ( si [ " bone_bind_poses " ] ) ) + ' " stride = " 16 " > ' ) <nl> - self . writel ( S_SKIN , 5 , ' < param name = " TRANSFORM " type = " float4x4 " / > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < / technique_common > ' ) <nl> - self . writel ( S_SKIN , 3 , ' < / source > ' ) <nl> - # Skin Weights ! <nl> - self . writel ( S_SKIN , 3 , ' < source id = " ' + contid + ' - skin_weights " > ' ) <nl> - skin_weights = " " <nl> - skin_weights_total = 0 <nl> - for v in vertices : <nl> - skin_weights_total + = len ( v . weights ) <nl> - for w in v . weights : <nl> - skin_weights + = " " + str ( w ) <nl> - <nl> - self . writel ( S_SKIN , 4 , ' < float_array id = " ' + contid + ' - skin_weights - array " count = " ' + str ( skin_weights_total ) + ' " > ' + skin_weights + ' < / float_array > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < accessor source = " # ' + contid + ' - skin_weights - array " count = " ' + str ( skin_weights_total ) + ' " stride = " 1 " > ' ) <nl> - self . writel ( S_SKIN , 5 , ' < param name = " WEIGHT " type = " float " / > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < / technique_common > ' ) <nl> - self . writel ( S_SKIN , 3 , ' < / source > ' ) <nl> - <nl> - <nl> - self . writel ( S_SKIN , 3 , ' < joints > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < input semantic = " JOINT " source = " # ' + contid + ' - joints " / > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < input semantic = " INV_BIND_MATRIX " source = " # ' + contid + ' - bind_poses " / > ' ) <nl> - self . writel ( S_SKIN , 3 , ' < / joints > ' ) <nl> - self . writel ( S_SKIN , 3 , ' < vertex_weights count = " ' + str ( len ( vertices ) ) + ' " > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < input semantic = " JOINT " source = " # ' + contid + ' - joints " offset = " 0 " / > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < input semantic = " WEIGHT " source = " # ' + contid + ' - skin_weights " offset = " 1 " / > ' ) <nl> - vcounts = " " <nl> - vs = " " <nl> - vcount = 0 <nl> - for v in vertices : <nl> - vcounts + = " " + str ( len ( v . weights ) ) <nl> - for b in v . bones : <nl> - vs + = " " + str ( b ) <nl> - vs + = " " + str ( vcount ) <nl> - vcount + = 1 <nl> - self . writel ( S_SKIN , 4 , ' < vcount > ' + vcounts + ' < / vcount > ' ) <nl> - self . writel ( S_SKIN , 4 , ' < v > ' + vs + ' < / v > ' ) <nl> - self . writel ( S_SKIN , 3 , ' < / vertex_weights > ' ) <nl> - <nl> - <nl> - self . writel ( S_SKIN , 2 , ' < / skin > ' ) <nl> - self . writel ( S_SKIN , 1 , ' < / controller > ' ) <nl> - meshdata [ " skin_id " ] = contid <nl> - <nl> - <nl> - return meshdata <nl> - <nl> - <nl> - def export_mesh_node ( self , node , il ) : <nl> - <nl> - if ( node . data = = None ) : <nl> - return <nl> - armature = None <nl> - armcount = 0 <nl> - for n in node . modifiers : <nl> - if ( n . type = = " ARMATURE " ) : <nl> - armcount + = 1 <nl> - <nl> - if ( node . parent ! = None ) : <nl> - if ( node . parent . type = = " ARMATURE " ) : <nl> - armature = node . parent <nl> - if ( armcount > 1 ) : <nl> - self . operator . report ( { ' WARNING ' } , ' Object " ' + node . name + ' " refers to more than one armature ! This is unsupported . ' ) <nl> - if ( armcount = = 0 ) : <nl> - self . operator . report ( { ' WARNING ' } , ' Object " ' + node . name + ' " is child of an armature , but has no armature modifier . ' ) <nl> - <nl> - <nl> - if ( armcount > 0 and not armature ) : <nl> - self . operator . report ( { ' WARNING ' } , ' Object " ' + node . name + ' " has armature modifier , but is not a child of an armature . This is unsupported . ' ) <nl> - <nl> - <nl> - if ( node . data . shape_keys ! = None ) : <nl> - sk = node . data . shape_keys <nl> - if ( sk . animation_data ) : <nl> - # print ( " HAS ANIM " ) <nl> - # print ( " DRIVERS : " + str ( len ( sk . animation_data . drivers ) ) ) <nl> - for d in sk . animation_data . drivers : <nl> - if ( d . driver ) : <nl> - for v in d . driver . variables : <nl> - for t in v . targets : <nl> - if ( t . id ! = None and t . id . name in self . scene . objects ) : <nl> - # print ( " LINKING " + str ( node ) + " WITH " + str ( t . id . name ) ) <nl> - self . armature_for_morph [ node ] = self . scene . objects [ t . id . name ] <nl> - <nl> - <nl> - meshdata = self . export_mesh ( node , armature ) <nl> - close_controller = False <nl> - <nl> - if ( " skin_id " in meshdata ) : <nl> - close_controller = True <nl> - self . writel ( S_NODES , il , ' < instance_controller url = " # ' + meshdata [ " skin_id " ] + ' " > ' ) <nl> - for sn in self . skeleton_info [ armature ] [ " skeleton_nodes " ] : <nl> - self . writel ( S_NODES , il + 1 , ' < skeleton > # ' + sn + ' < / skeleton > ' ) <nl> - elif ( " morph_id " in meshdata ) : <nl> - self . writel ( S_NODES , il , ' < instance_controller url = " # ' + meshdata [ " morph_id " ] + ' " > ' ) <nl> - close_controller = True <nl> - elif ( armature = = None ) : <nl> - self . writel ( S_NODES , il , ' < instance_geometry url = " # ' + meshdata [ " id " ] + ' " > ' ) <nl> - <nl> - <nl> - if ( len ( meshdata [ " material_assign " ] ) > 0 ) : <nl> - <nl> - self . writel ( S_NODES , il + 1 , ' < bind_material > ' ) <nl> - self . writel ( S_NODES , il + 2 , ' < technique_common > ' ) <nl> - for m in meshdata [ " material_assign " ] : <nl> - self . writel ( S_NODES , il + 3 , ' < instance_material symbol = " ' + m [ 1 ] + ' " target = " # ' + m [ 0 ] + ' " / > ' ) <nl> - <nl> - self . writel ( S_NODES , il + 2 , ' < / technique_common > ' ) <nl> - self . writel ( S_NODES , il + 1 , ' < / bind_material > ' ) <nl> - <nl> - if ( close_controller ) : <nl> - self . writel ( S_NODES , il , ' < / instance_controller > ' ) <nl> - else : <nl> - self . writel ( S_NODES , il , ' < / instance_geometry > ' ) <nl> - <nl> - <nl> - def export_armature_bone ( self , bone , il , si ) : <nl> - boneid = self . new_id ( " bone " ) <nl> - boneidx = si [ " bone_count " ] <nl> - si [ " bone_count " ] + = 1 <nl> - bonesid = si [ " id " ] + " - " + str ( boneidx ) <nl> - if ( bone . name in self . used_bones ) : <nl> - if ( self . config [ " use_anim_action_all " ] ) : <nl> - self . operator . report ( { ' WARNING ' } , ' Bone name " ' + bone . name + ' " used in more than one skeleton . Actions might export wrong . ' ) <nl> - else : <nl> - self . used_bones . append ( bone . name ) <nl> - <nl> - si [ " bone_index " ] [ bone . name ] = boneidx <nl> - si [ " bone_ids " ] [ bone ] = boneid <nl> - si [ " bone_names " ] . append ( bonesid ) <nl> - self . writel ( S_NODES , il , ' < node id = " ' + boneid + ' " sid = " ' + bonesid + ' " name = " ' + bone . name + ' " type = " JOINT " > ' ) <nl> - il + = 1 <nl> - xform = bone . matrix_local <nl> - si [ " bone_bind_poses " ] . append ( ( si [ " armature_xform " ] * xform ) . inverted ( ) ) <nl> - <nl> - if ( bone . parent ! = None ) : <nl> - xform = bone . parent . matrix_local . inverted ( ) * xform <nl> - else : <nl> - si [ " skeleton_nodes " ] . append ( boneid ) <nl> - <nl> - self . writel ( S_NODES , il , ' < matrix sid = " transform " > ' + strmtx ( xform ) + ' < / matrix > ' ) <nl> - for c in bone . children : <nl> - self . export_armature_bone ( c , il , si ) <nl> - il - = 1 <nl> - self . writel ( S_NODES , il , ' < / node > ' ) <nl> - <nl> - <nl> - def export_armature_node ( self , node , il ) : <nl> - <nl> - if ( node . data = = None ) : <nl> - return <nl> - <nl> - self . skeletons . append ( node ) <nl> - <nl> - armature = node . data <nl> - self . skeleton_info [ node ] = { " bone_count " : 0 , " id " : self . new_id ( " skelbones " ) , " name " : node . name , " bone_index " : { } , " bone_ids " : { } , " bone_names " : [ ] , " bone_bind_poses " : [ ] , " skeleton_nodes " : [ ] , " armature_xform " : node . matrix_world } <nl> - <nl> - <nl> - <nl> - for b in armature . bones : <nl> - if ( b . parent ! = None ) : <nl> - continue <nl> - self . export_armature_bone ( b , il , self . skeleton_info [ node ] ) <nl> - <nl> - if ( node . pose ) : <nl> - for b in node . pose . bones : <nl> - for x in b . constraints : <nl> - if ( x . type = = ' ACTION ' ) : <nl> - self . action_constraints . append ( x . action ) <nl> - <nl> - <nl> - def export_camera_node ( self , node , il ) : <nl> - <nl> - if ( node . data = = None ) : <nl> - return <nl> - <nl> - camera = node . data <nl> - camid = self . new_id ( " camera " ) <nl> - self . writel ( S_CAMS , 1 , ' < camera id = " ' + camid + ' " name = " ' + camera . name + ' " > ' ) <nl> - self . writel ( S_CAMS , 2 , ' < optics > ' ) <nl> - self . writel ( S_CAMS , 3 , ' < technique_common > ' ) <nl> - if ( camera . type = = " PERSP " ) : <nl> - self . writel ( S_CAMS , 4 , ' < perspective > ' ) <nl> - self . writel ( S_CAMS , 5 , ' < yfov > ' + str ( math . degrees ( camera . angle ) ) + ' < / yfov > ' ) # I think ? <nl> - self . writel ( S_CAMS , 5 , ' < aspect_ratio > ' + str ( self . scene . render . resolution_x / self . scene . render . resolution_y ) + ' < / aspect_ratio > ' ) <nl> - self . writel ( S_CAMS , 5 , ' < znear > ' + str ( camera . clip_start ) + ' < / znear > ' ) <nl> - self . writel ( S_CAMS , 5 , ' < zfar > ' + str ( camera . clip_end ) + ' < / zfar > ' ) <nl> - self . writel ( S_CAMS , 4 , ' < / perspective > ' ) <nl> - else : <nl> - self . writel ( S_CAMS , 4 , ' < orthographic > ' ) <nl> - self . writel ( S_CAMS , 5 , ' < xmag > ' + str ( camera . ortho_scale * 0 . 5 ) + ' < / xmag > ' ) # I think ? <nl> - self . writel ( S_CAMS , 5 , ' < aspect_ratio > ' + str ( self . scene . render . resolution_x / self . scene . render . resolution_y ) + ' < / aspect_ratio > ' ) <nl> - self . writel ( S_CAMS , 5 , ' < znear > ' + str ( camera . clip_start ) + ' < / znear > ' ) <nl> - self . writel ( S_CAMS , 5 , ' < zfar > ' + str ( camera . clip_end ) + ' < / zfar > ' ) <nl> - self . writel ( S_CAMS , 4 , ' < / orthographic > ' ) <nl> - <nl> - self . writel ( S_CAMS , 3 , ' < / technique_common > ' ) <nl> - self . writel ( S_CAMS , 2 , ' < / optics > ' ) <nl> - self . writel ( S_CAMS , 1 , ' < / camera > ' ) <nl> - <nl> - <nl> - self . writel ( S_NODES , il , ' < instance_camera url = " # ' + camid + ' " / > ' ) <nl> - <nl> - def export_lamp_node ( self , node , il ) : <nl> - <nl> - if ( node . data = = None ) : <nl> - return <nl> - <nl> - light = node . data <nl> - lightid = self . new_id ( " light " ) <nl> - self . writel ( S_LAMPS , 1 , ' < light id = " ' + lightid + ' " name = " ' + light . name + ' " > ' ) <nl> - # self . writel ( S_LAMPS , 2 , ' < optics > ' ) <nl> - self . writel ( S_LAMPS , 3 , ' < technique_common > ' ) <nl> - <nl> - if ( light . type = = " POINT " ) : <nl> - self . writel ( S_LAMPS , 4 , ' < point > ' ) <nl> - self . writel ( S_LAMPS , 5 , ' < color > ' + strarr ( light . color ) + ' < / color > ' ) <nl> - att_by_distance = 2 . 0 / light . distance # convert to linear attenuation <nl> - self . writel ( S_LAMPS , 5 , ' < linear_attenuation > ' + str ( att_by_distance ) + ' < / linear_attenuation > ' ) <nl> - if ( light . use_sphere ) : <nl> - self . writel ( S_LAMPS , 5 , ' < zfar > ' + str ( light . distance ) + ' < / zfar > ' ) <nl> - <nl> - self . writel ( S_LAMPS , 4 , ' < / point > ' ) <nl> - elif ( light . type = = " SPOT " ) : <nl> - self . writel ( S_LAMPS , 4 , ' < spot > ' ) <nl> - self . writel ( S_LAMPS , 5 , ' < color > ' + strarr ( light . color ) + ' < / color > ' ) <nl> - att_by_distance = 2 . 0 / light . distance # convert to linear attenuation <nl> - self . writel ( S_LAMPS , 5 , ' < linear_attenuation > ' + str ( att_by_distance ) + ' < / linear_attenuation > ' ) <nl> - self . writel ( S_LAMPS , 5 , ' < falloff_angle > ' + str ( math . degrees ( light . spot_size / 2 ) ) + ' < / falloff_angle > ' ) <nl> - self . writel ( S_LAMPS , 4 , ' < / spot > ' ) <nl> - <nl> - <nl> - else : # write a sun lamp for everything else ( not supported ) <nl> - self . writel ( S_LAMPS , 4 , ' < directional > ' ) <nl> - self . writel ( S_LAMPS , 5 , ' < color > ' + strarr ( light . color ) + ' < / color > ' ) <nl> - self . writel ( S_LAMPS , 4 , ' < / directional > ' ) <nl> - <nl> - <nl> - self . writel ( S_LAMPS , 3 , ' < / technique_common > ' ) <nl> - # self . writel ( S_LAMPS , 2 , ' < / optics > ' ) <nl> - self . writel ( S_LAMPS , 1 , ' < / light > ' ) <nl> - <nl> - <nl> - self . writel ( S_NODES , il , ' < instance_light url = " # ' + lightid + ' " / > ' ) <nl> - <nl> - def export_empty_node ( self , node , il ) : <nl> - <nl> - self . writel ( S_NODES , 4 , ' < extra > ' ) <nl> - self . writel ( S_NODES , 5 , ' < technique profile = " GODOT " > ' ) <nl> - self . writel ( S_NODES , 6 , ' < empty_draw_type > ' + node . empty_draw_type + ' < / empty_draw_type > ' ) <nl> - self . writel ( S_NODES , 5 , ' < / technique > ' ) <nl> - self . writel ( S_NODES , 4 , ' < / extra > ' ) <nl> - <nl> - <nl> - def export_curve ( self , curve ) : <nl> - <nl> - splineid = self . new_id ( " spline " ) <nl> - <nl> - self . writel ( S_GEOM , 1 , ' < geometry id = " ' + splineid + ' " name = " ' + curve . name + ' " > ' ) <nl> - self . writel ( S_GEOM , 2 , ' < spline closed = " 0 " > ' ) <nl> - <nl> - points = [ ] <nl> - interps = [ ] <nl> - handles_in = [ ] <nl> - handles_out = [ ] <nl> - tilts = [ ] <nl> - <nl> - for cs in curve . splines : <nl> - <nl> - if ( cs . type = = " BEZIER " ) : <nl> - for s in cs . bezier_points : <nl> - points . append ( s . co [ 0 ] ) <nl> - points . append ( s . co [ 1 ] ) <nl> - points . append ( s . co [ 2 ] ) <nl> - <nl> - <nl> - handles_in . append ( s . handle_left [ 0 ] ) <nl> - handles_in . append ( s . handle_left [ 1 ] ) <nl> - handles_in . append ( s . handle_left [ 2 ] ) <nl> - <nl> - handles_out . append ( s . handle_right [ 0 ] ) <nl> - handles_out . append ( s . handle_right [ 1 ] ) <nl> - handles_out . append ( s . handle_right [ 2 ] ) <nl> - <nl> - <nl> - tilts . append ( s . tilt ) <nl> - interps . append ( " BEZIER " ) <nl> - else : <nl> - <nl> - for s in cs . points : <nl> - points . append ( s . co [ 0 ] ) <nl> - points . append ( s . co [ 1 ] ) <nl> - points . append ( s . co [ 2 ] ) <nl> - handles_in . append ( s . co [ 0 ] ) <nl> - handles_in . append ( s . co [ 1 ] ) <nl> - handles_in . append ( s . co [ 2 ] ) <nl> - handles_out . append ( s . co [ 0 ] ) <nl> - handles_out . append ( s . co [ 1 ] ) <nl> - handles_out . append ( s . co [ 2 ] ) <nl> - tilts . append ( s . tilt ) <nl> - interps . append ( " LINEAR " ) <nl> - <nl> - <nl> - <nl> - <nl> - self . writel ( S_GEOM , 3 , ' < source id = " ' + splineid + ' - positions " > ' ) <nl> - position_values = " " <nl> - for x in points : <nl> - position_values + = " " + str ( x ) <nl> - self . writel ( S_GEOM , 4 , ' < float_array id = " ' + splineid + ' - positions - array " count = " ' + str ( len ( points ) ) + ' " > ' + position_values + ' < / float_array > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + splineid + ' - positions - array " count = " ' + str ( len ( points ) / 3 ) + ' " stride = " 3 " > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> - <nl> - self . writel ( S_GEOM , 3 , ' < source id = " ' + splineid + ' - intangents " > ' ) <nl> - intangent_values = " " <nl> - for x in handles_in : <nl> - intangent_values + = " " + str ( x ) <nl> - self . writel ( S_GEOM , 4 , ' < float_array id = " ' + splineid + ' - intangents - array " count = " ' + str ( len ( points ) ) + ' " > ' + intangent_values + ' < / float_array > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + splineid + ' - intangents - array " count = " ' + str ( len ( points ) / 3 ) + ' " stride = " 3 " > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> - <nl> - self . writel ( S_GEOM , 3 , ' < source id = " ' + splineid + ' - outtangents " > ' ) <nl> - outtangent_values = " " <nl> - for x in handles_out : <nl> - outtangent_values + = " " + str ( x ) <nl> - self . writel ( S_GEOM , 4 , ' < float_array id = " ' + splineid + ' - outtangents - array " count = " ' + str ( len ( points ) ) + ' " > ' + outtangent_values + ' < / float_array > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + splineid + ' - outtangents - array " count = " ' + str ( len ( points ) / 3 ) + ' " stride = " 3 " > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> - <nl> - self . writel ( S_GEOM , 3 , ' < source id = " ' + splineid + ' - interpolations " > ' ) <nl> - interpolation_values = " " <nl> - for x in interps : <nl> - interpolation_values + = " " + x <nl> - self . writel ( S_GEOM , 4 , ' < Name_array id = " ' + splineid + ' - interpolations - array " count = " ' + str ( len ( interps ) ) + ' " > ' + interpolation_values + ' < / Name_array > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + splineid + ' - interpolations - array " count = " ' + str ( len ( interps ) ) + ' " stride = " 1 " > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " INTERPOLATION " type = " name " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> - <nl> - <nl> - self . writel ( S_GEOM , 3 , ' < source id = " ' + splineid + ' - tilts " > ' ) <nl> - tilt_values = " " <nl> - for x in tilts : <nl> - tilt_values + = " " + str ( x ) <nl> - self . writel ( S_GEOM , 4 , ' < float_array id = " ' + splineid + ' - tilts - array " count = " ' + str ( len ( tilts ) ) + ' " > ' + tilt_values + ' < / float_array > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + splineid + ' - tilts - array " count = " ' + str ( len ( tilts ) ) + ' " stride = " 1 " > ' ) <nl> - self . writel ( S_GEOM , 5 , ' < param name = " TILT " type = " float " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> - <nl> - self . writel ( S_GEOM , 3 , ' < control_vertices > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < input semantic = " POSITION " source = " # ' + splineid + ' - positions " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < input semantic = " IN_TANGENT " source = " # ' + splineid + ' - intangents " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < input semantic = " OUT_TANGENT " source = " # ' + splineid + ' - outtangents " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < input semantic = " INTERPOLATION " source = " # ' + splineid + ' - interpolations " / > ' ) <nl> - self . writel ( S_GEOM , 4 , ' < input semantic = " TILT " source = " # ' + splineid + ' - tilts " / > ' ) <nl> - self . writel ( S_GEOM , 3 , ' < / control_vertices > ' ) <nl> - <nl> - <nl> - self . writel ( S_GEOM , 2 , ' < / spline > ' ) <nl> - self . writel ( S_GEOM , 1 , ' < / geometry > ' ) <nl> - <nl> - return splineid <nl> - <nl> - def export_curve_node ( self , node , il ) : <nl> - <nl> - if ( node . data = = None ) : <nl> - return <nl> - curveid = self . export_curve ( node . data ) <nl> - <nl> - self . writel ( S_NODES , il , ' < instance_geometry url = " # ' + curveid + ' " > ' ) <nl> - self . writel ( S_NODES , il , ' < / instance_geometry > ' ) <nl> - <nl> - <nl> - <nl> - def export_node ( self , node , il ) : <nl> - if ( not node in self . valid_nodes ) : <nl> - return <nl> - prev_node = bpy . context . scene . objects . active <nl> - bpy . context . scene . objects . active = node <nl> - <nl> - self . writel ( S_NODES , il , ' < node id = " ' + self . validate_id ( node . name ) + ' " name = " ' + node . name + ' " type = " NODE " > ' ) <nl> - il + = 1 <nl> - <nl> - self . writel ( S_NODES , il , ' < matrix sid = " transform " > ' + strmtx ( node . matrix_local ) + ' < / matrix > ' ) <nl> - # print ( " NODE TYPE : " + node . type + " NAME : " + node . name ) <nl> - if ( node . type = = " MESH " ) : <nl> - self . export_mesh_node ( node , il ) <nl> - elif ( node . type = = " CURVE " ) : <nl> - self . export_curve_node ( node , il ) <nl> - elif ( node . type = = " ARMATURE " ) : <nl> - self . export_armature_node ( node , il ) <nl> - elif ( node . type = = " CAMERA " ) : <nl> - self . export_camera_node ( node , il ) <nl> - elif ( node . type = = " LAMP " ) : <nl> - self . export_lamp_node ( node , il ) <nl> - elif ( node . type = = " EMPTY " ) : <nl> - self . export_empty_node ( node , il ) <nl> - <nl> - for x in node . children : <nl> - self . export_node ( x , il ) <nl> - <nl> - il - = 1 <nl> - self . writel ( S_NODES , il , ' < / node > ' ) <nl> - bpy . context . scene . objects . active = prev_node # make previous node active again <nl> - <nl> - def is_node_valid ( self , node ) : <nl> - if ( not node . type in self . config [ " object_types " ] ) : <nl> - return False <nl> - if ( self . config [ " use_active_layers " ] ) : <nl> - valid = False <nl> - # print ( " NAME : " + node . name ) <nl> - for i in range ( 20 ) : <nl> - if ( node . layers [ i ] and self . scene . layers [ i ] ) : <nl> - valid = True <nl> - break <nl> - if ( not valid ) : <nl> - return False <nl> - <nl> - if ( self . config [ " use_export_selected " ] and not node . select ) : <nl> - return False <nl> - <nl> - return True <nl> - <nl> - <nl> - def export_scene ( self ) : <nl> - <nl> - <nl> - self . writel ( S_NODES , 0 , ' < library_visual_scenes > ' ) <nl> - self . writel ( S_NODES , 1 , ' < visual_scene id = " ' + self . scene_name + ' " name = " scene " > ' ) <nl> - <nl> - # validate nodes <nl> - for obj in self . scene . objects : <nl> - if ( obj in self . valid_nodes ) : <nl> - continue <nl> - if ( self . is_node_valid ( obj ) ) : <nl> - n = obj <nl> - while ( n ! = None ) : <nl> - if ( not n in self . valid_nodes ) : <nl> - self . valid_nodes . append ( n ) <nl> - n = n . parent <nl> - <nl> - <nl> - <nl> - for obj in self . scene . objects : <nl> - if ( obj in self . valid_nodes and obj . parent = = None ) : <nl> - self . export_node ( obj , 2 ) <nl> - <nl> - self . writel ( S_NODES , 1 , ' < / visual_scene > ' ) <nl> - self . writel ( S_NODES , 0 , ' < / library_visual_scenes > ' ) <nl> - <nl> - def export_asset ( self ) : <nl> - <nl> - <nl> - self . writel ( S_ASSET , 0 , ' < asset > ' ) <nl> - # Why is this time stuff mandatory ? , no one could care less . . . <nl> - self . writel ( S_ASSET , 1 , ' < contributor > ' ) <nl> - self . writel ( S_ASSET , 2 , ' < author > Anonymous < / author > ' ) # Who made Collada , the FBI ? <nl> - self . writel ( S_ASSET , 2 , ' < authoring_tool > Collada Exporter for Blender 2 . 6 + , by Juan Linietsky ( juan @ codenix . com ) < / authoring_tool > ' ) # Who made Collada , the FBI ? <nl> - self . writel ( S_ASSET , 1 , ' < / contributor > ' ) <nl> - self . writel ( S_ASSET , 1 , ' < created > ' + time . strftime ( " % Y - % m - % dT % H : % M : % SZ " ) + ' < / created > ' ) <nl> - self . writel ( S_ASSET , 1 , ' < modified > ' + time . strftime ( " % Y - % m - % dT % H : % M : % SZ " ) + ' < / modified > ' ) <nl> - self . writel ( S_ASSET , 1 , ' < unit meter = " 1 . 0 " name = " meter " / > ' ) <nl> - self . writel ( S_ASSET , 1 , ' < up_axis > Z_UP < / up_axis > ' ) <nl> - self . writel ( S_ASSET , 0 , ' < / asset > ' ) <nl> - <nl> - <nl> - def export_animation_transform_channel ( self , target , keys , matrices = True ) : <nl> - <nl> - frame_total = len ( keys ) <nl> - anim_id = self . new_id ( " anim " ) <nl> - self . writel ( S_ANIM , 1 , ' < animation id = " ' + anim_id + ' " > ' ) <nl> - source_frames = " " <nl> - source_transforms = " " <nl> - source_interps = " " <nl> - <nl> - for k in keys : <nl> - source_frames + = " " + str ( k [ 0 ] ) <nl> - if ( matrices ) : <nl> - source_transforms + = " " + strmtx ( k [ 1 ] ) <nl> - else : <nl> - source_transforms + = " " + str ( k [ 1 ] ) <nl> - <nl> - source_interps + = " LINEAR " <nl> - <nl> - <nl> - # Time Source <nl> - self . writel ( S_ANIM , 2 , ' < source id = " ' + anim_id + ' - input " > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < float_array id = " ' + anim_id + ' - input - array " count = " ' + str ( frame_total ) + ' " > ' + source_frames + ' < / float_array > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < technique_common > ' ) <nl> - self . writel ( S_ANIM , 4 , ' < accessor source = " # ' + anim_id + ' - input - array " count = " ' + str ( frame_total ) + ' " stride = " 1 " > ' ) <nl> - self . writel ( S_ANIM , 5 , ' < param name = " TIME " type = " float " / > ' ) <nl> - self . writel ( S_ANIM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < / technique_common > ' ) <nl> - self . writel ( S_ANIM , 2 , ' < / source > ' ) <nl> - <nl> - if ( matrices ) : <nl> - # Transform Source <nl> - self . writel ( S_ANIM , 2 , ' < source id = " ' + anim_id + ' - transform - output " > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < float_array id = " ' + anim_id + ' - transform - output - array " count = " ' + str ( frame_total * 16 ) + ' " > ' + source_transforms + ' < / float_array > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < technique_common > ' ) <nl> - self . writel ( S_ANIM , 4 , ' < accessor source = " # ' + anim_id + ' - transform - output - array " count = " ' + str ( frame_total ) + ' " stride = " 16 " > ' ) <nl> - self . writel ( S_ANIM , 5 , ' < param name = " TRANSFORM " type = " float4x4 " / > ' ) <nl> - self . writel ( S_ANIM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < / technique_common > ' ) <nl> - self . writel ( S_ANIM , 2 , ' < / source > ' ) <nl> - else : <nl> - # Value Source <nl> - self . writel ( S_ANIM , 2 , ' < source id = " ' + anim_id + ' - transform - output " > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < float_array id = " ' + anim_id + ' - transform - output - array " count = " ' + str ( frame_total ) + ' " > ' + source_transforms + ' < / float_array > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < technique_common > ' ) <nl> - self . writel ( S_ANIM , 4 , ' < accessor source = " # ' + anim_id + ' - transform - output - array " count = " ' + str ( frame_total ) + ' " stride = " 1 " > ' ) <nl> - self . writel ( S_ANIM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> - self . writel ( S_ANIM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < / technique_common > ' ) <nl> - self . writel ( S_ANIM , 2 , ' < / source > ' ) <nl> - <nl> - # Interpolation Source <nl> - self . writel ( S_ANIM , 2 , ' < source id = " ' + anim_id + ' - interpolation - output " > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < Name_array id = " ' + anim_id + ' - interpolation - output - array " count = " ' + str ( frame_total ) + ' " > ' + source_interps + ' < / Name_array > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < technique_common > ' ) <nl> - self . writel ( S_ANIM , 4 , ' < accessor source = " # ' + anim_id + ' - interpolation - output - array " count = " ' + str ( frame_total ) + ' " stride = " 1 " > ' ) <nl> - self . writel ( S_ANIM , 5 , ' < param name = " INTERPOLATION " type = " Name " / > ' ) <nl> - self . writel ( S_ANIM , 4 , ' < / accessor > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < / technique_common > ' ) <nl> - self . writel ( S_ANIM , 2 , ' < / source > ' ) <nl> - <nl> - self . writel ( S_ANIM , 2 , ' < sampler id = " ' + anim_id + ' - sampler " > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < input semantic = " INPUT " source = " # ' + anim_id + ' - input " / > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < input semantic = " OUTPUT " source = " # ' + anim_id + ' - transform - output " / > ' ) <nl> - self . writel ( S_ANIM , 3 , ' < input semantic = " INTERPOLATION " source = " # ' + anim_id + ' - interpolation - output " / > ' ) <nl> - self . writel ( S_ANIM , 2 , ' < / sampler > ' ) <nl> - if ( matrices ) : <nl> - self . writel ( S_ANIM , 2 , ' < channel source = " # ' + anim_id + ' - sampler " target = " ' + target + ' / transform " / > ' ) <nl> - else : <nl> - self . writel ( S_ANIM , 2 , ' < channel source = " # ' + anim_id + ' - sampler " target = " ' + target + ' " / > ' ) <nl> - self . writel ( S_ANIM , 1 , ' < / animation > ' ) <nl> - <nl> - return [ anim_id ] <nl> - <nl> - <nl> - def export_animation ( self , start , end , allowed = None ) : <nl> - <nl> - # Blender - > Collada frames needs a little work <nl> - # Collada starts from 0 , blender usually from 1 <nl> - # The last frame must be included also <nl> - <nl> - frame_orig = self . scene . frame_current <nl> - <nl> - frame_len = 1 . 0 / self . scene . render . fps <nl> - frame_total = end - start + 1 <nl> - frame_sub = 0 <nl> - if ( start > 0 ) : <nl> - frame_sub = start * frame_len <nl> - <nl> - tcn = [ ] <nl> - xform_cache = { } <nl> - blend_cache = { } <nl> - # Change frames first , export objects last <nl> - # This improves performance enormously <nl> - <nl> - # print ( " anim from : " + str ( start ) + " to " + str ( end ) + " allowed : " + str ( allowed ) ) <nl> - for t in range ( start , end + 1 ) : <nl> - self . scene . frame_set ( t ) <nl> - key = t * frame_len - frame_sub <nl> - # print ( " Export Anim Frame " + str ( t ) + " / " + str ( self . scene . frame_end + 1 ) ) <nl> - <nl> - for node in self . scene . objects : <nl> - <nl> - if ( not node in self . valid_nodes ) : <nl> - continue <nl> - if ( allowed ! = None and not ( node in allowed ) ) : <nl> - if ( node . type = = " MESH " and node . data ! = None and ( node in self . armature_for_morph ) and ( self . armature_for_morph [ node ] in allowed ) ) : <nl> - pass # all good you pass with flying colors for morphs inside of action <nl> - else : <nl> - # print ( " fail " + str ( ( node in self . armature_for_morph ) ) ) <nl> - continue <nl> - if ( node . type = = " MESH " and node . data ! = None and node . data . shape_keys ! = None and ( node . data in self . mesh_cache ) and len ( node . data . shape_keys . key_blocks ) ) : <nl> - target = self . mesh_cache [ node . data ] [ " morph_id " ] <nl> - for i in range ( len ( node . data . shape_keys . key_blocks ) ) : <nl> - <nl> - if ( i = = 0 ) : <nl> - continue <nl> - <nl> - name = target + " - morph - weights ( " + str ( i - 1 ) + " ) " <nl> - if ( not ( name in blend_cache ) ) : <nl> - blend_cache [ name ] = [ ] <nl> - <nl> - blend_cache [ name ] . append ( ( key , node . data . shape_keys . key_blocks [ i ] . value ) ) <nl> - <nl> - <nl> - if ( node . type = = " MESH " and node . parent and node . parent . type = = " ARMATURE " ) : <nl> - <nl> - continue # In Collada , nodes that have skin modifier must not export animation , animate the skin instead . <nl> - <nl> - if ( len ( node . constraints ) > 0 or node . animation_data ! = None ) : <nl> - # If the node has constraints , or animation data , then export a sampled animation track <nl> - name = self . validate_id ( node . name ) <nl> - if ( not ( name in xform_cache ) ) : <nl> - xform_cache [ name ] = [ ] <nl> - <nl> - mtx = node . matrix_world . copy ( ) <nl> - if ( node . parent ) : <nl> - mtx = node . parent . matrix_world . inverted ( ) * mtx <nl> - <nl> - xform_cache [ name ] . append ( ( key , mtx ) ) <nl> - <nl> - if ( node . type = = " ARMATURE " ) : <nl> - # All bones exported for now <nl> - <nl> - for bone in node . data . bones : <nl> - <nl> - bone_name = self . skeleton_info [ node ] [ " bone_ids " ] [ bone ] <nl> - <nl> - if ( not ( bone_name in xform_cache ) ) : <nl> - # print ( " has bone : " + bone_name ) <nl> - xform_cache [ bone_name ] = [ ] <nl> - <nl> - posebone = node . pose . bones [ bone . name ] <nl> - parent_posebone = None <nl> - <nl> - mtx = posebone . matrix . copy ( ) <nl> - if ( bone . parent ) : <nl> - parent_posebone = node . pose . bones [ bone . parent . name ] <nl> - parent_invisible = False <nl> - <nl> - for i in range ( 3 ) : <nl> - if ( parent_posebone . scale [ i ] = = 0 . 0 ) : <nl> - parent_invisible = True <nl> - <nl> - if ( not parent_invisible ) : <nl> - mtx = parent_posebone . matrix . inverted ( ) * mtx <nl> - <nl> - <nl> - xform_cache [ bone_name ] . append ( ( key , mtx ) ) <nl> - <nl> - self . scene . frame_set ( frame_orig ) <nl> - <nl> - # export animation xml <nl> - for nid in xform_cache : <nl> - tcn + = self . export_animation_transform_channel ( nid , xform_cache [ nid ] , True ) <nl> - for nid in blend_cache : <nl> - tcn + = self . export_animation_transform_channel ( nid , blend_cache [ nid ] , False ) <nl> - <nl> - return tcn <nl> - <nl> - def export_animations ( self ) : <nl> - tmp_mat = [ ] <nl> - for s in self . skeletons : <nl> - tmp_bone_mat = [ ] <nl> - for bone in s . pose . bones : <nl> - tmp_bone_mat . append ( Matrix ( bone . matrix_basis ) ) <nl> - bone . matrix_basis = Matrix ( ) <nl> - tmp_mat . append ( [ Matrix ( s . matrix_local ) , tmp_bone_mat ] ) <nl> - <nl> - self . writel ( S_ANIM , 0 , ' < library_animations > ' ) <nl> + s = " " <nl> + for x in a : <nl> + s + = " " + str ( x * mult ) <nl> + s + = " " <nl> + return s <nl> <nl> <nl> - if ( self . config [ " use_anim_action_all " ] and len ( self . skeletons ) ) : <nl> - <nl> - cached_actions = { } <nl> - <nl> - for s in self . skeletons : <nl> - if s . animation_data and s . animation_data . action : <nl> - cached_actions [ s ] = s . animation_data . action . name <nl> - <nl> - <nl> - self . writel ( S_ANIM_CLIPS , 0 , ' < library_animation_clips > ' ) <nl> - <nl> - for x in bpy . data . actions [ : ] : <nl> - if x . users = = 0 or x in self . action_constraints : <nl> - continue <nl> - if ( self . config [ " use_anim_skip_noexp " ] and x . name . endswith ( " - noexp " ) ) : <nl> - continue <nl> - <nl> - bones = [ ] <nl> - # find bones used <nl> - for p in x . fcurves : <nl> - dp = str ( p . data_path ) <nl> - base = " pose . bones [ \ " " <nl> - if ( dp . find ( base ) = = 0 ) : <nl> - dp = dp [ len ( base ) : ] <nl> - if ( dp . find ( ' " ' ) ! = - 1 ) : <nl> - dp = dp [ : dp . find ( ' " ' ) ] <nl> - if ( not dp in bones ) : <nl> - bones . append ( dp ) <nl> - <nl> - allowed_skeletons = [ ] <nl> - for i , y in enumerate ( self . skeletons ) : <nl> - if ( y . animation_data ) : <nl> - for z in y . pose . bones : <nl> - if ( z . bone . name in bones ) : <nl> - if ( not y in allowed_skeletons ) : <nl> - allowed_skeletons . append ( y ) <nl> - y . animation_data . action = x ; <nl> - <nl> - y . matrix_local = tmp_mat [ i ] [ 0 ] <nl> - for j , bone in enumerate ( s . pose . bones ) : <nl> - bone . matrix_basis = Matrix ( ) <nl> - <nl> - <nl> - # print ( " allowed skeletons " + str ( allowed_skeletons ) ) <nl> - <nl> - # print ( str ( x ) ) <nl> - <nl> - tcn = self . export_animation ( int ( x . frame_range [ 0 ] ) , int ( x . frame_range [ 1 ] + 0 . 5 ) , allowed_skeletons ) <nl> - framelen = ( 1 . 0 / self . scene . render . fps ) <nl> - start = x . frame_range [ 0 ] * framelen <nl> - end = x . frame_range [ 1 ] * framelen <nl> - # print ( " Export anim : " + x . name ) <nl> - self . writel ( S_ANIM_CLIPS , 1 , ' < animation_clip name = " ' + x . name + ' " start = " ' + str ( start ) + ' " end = " ' + str ( end ) + ' " > ' ) <nl> - for z in tcn : <nl> - self . writel ( S_ANIM_CLIPS , 2 , ' < instance_animation url = " # ' + z + ' " / > ' ) <nl> - self . writel ( S_ANIM_CLIPS , 1 , ' < / animation_clip > ' ) <nl> - if ( len ( tcn ) = = 0 ) : <nl> - self . operator . report ( { ' WARNING ' } , ' Animation clip " ' + x . name + ' " contains no tracks . ' ) <nl> - <nl> - <nl> - <nl> - self . writel ( S_ANIM_CLIPS , 0 , ' < / library_animation_clips > ' ) <nl> - <nl> - <nl> - for i , s in enumerate ( self . skeletons ) : <nl> - if ( s . animation_data = = None ) : <nl> - continue <nl> - if s in cached_actions : <nl> - s . animation_data . action = bpy . data . actions [ cached_actions [ s ] ] <nl> - else : <nl> - s . animation_data . action = None <nl> - for j , bone in enumerate ( s . pose . bones ) : <nl> - bone . matrix_basis = tmp_mat [ i ] [ 1 ] [ j ] <nl> - <nl> - else : <nl> - self . export_animation ( self . scene . frame_start , self . scene . frame_end ) <nl> - <nl> - <nl> - <nl> - self . writel ( S_ANIM , 0 , ' < / library_animations > ' ) <nl> - <nl> - def export ( self ) : <nl> - <nl> - self . writel ( S_GEOM , 0 , ' < library_geometries > ' ) <nl> - self . writel ( S_CONT , 0 , ' < library_controllers > ' ) <nl> - self . writel ( S_CAMS , 0 , ' < library_cameras > ' ) <nl> - self . writel ( S_LAMPS , 0 , ' < library_lights > ' ) <nl> - self . writel ( S_IMGS , 0 , ' < library_images > ' ) <nl> - self . writel ( S_MATS , 0 , ' < library_materials > ' ) <nl> - self . writel ( S_FX , 0 , ' < library_effects > ' ) <nl> - <nl> - <nl> - self . skeletons = [ ] <nl> - self . action_constraints = [ ] <nl> - self . export_asset ( ) <nl> - self . export_scene ( ) <nl> - <nl> - self . writel ( S_GEOM , 0 , ' < / library_geometries > ' ) <nl> - <nl> - # morphs always go before skin controllers <nl> - if S_MORPH in self . sections : <nl> - for l in self . sections [ S_MORPH ] : <nl> - self . writel ( S_CONT , 0 , l ) <nl> - del self . sections [ S_MORPH ] <nl> - <nl> - # morphs always go before skin controllers <nl> - if S_SKIN in self . sections : <nl> - for l in self . sections [ S_SKIN ] : <nl> - self . writel ( S_CONT , 0 , l ) <nl> - del self . sections [ S_SKIN ] <nl> - <nl> - self . writel ( S_CONT , 0 , ' < / library_controllers > ' ) <nl> - self . writel ( S_CAMS , 0 , ' < / library_cameras > ' ) <nl> - self . writel ( S_LAMPS , 0 , ' < / library_lights > ' ) <nl> - self . writel ( S_IMGS , 0 , ' < / library_images > ' ) <nl> - self . writel ( S_MATS , 0 , ' < / library_materials > ' ) <nl> - self . writel ( S_FX , 0 , ' < / library_effects > ' ) <nl> - <nl> - if ( self . config [ " use_anim " ] ) : <nl> - self . export_animations ( ) <nl> - <nl> - try : <nl> - f = open ( self . path , " wb " ) <nl> - except : <nl> - return False <nl> - <nl> - f . write ( bytes ( ' < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > \ n ' , " UTF - 8 " ) ) <nl> - f . write ( bytes ( ' < COLLADA xmlns = " http : / / www . collada . org / 2005 / 11 / COLLADASchema " version = " 1 . 4 . 1 " > \ n ' , " UTF - 8 " ) ) <nl> - <nl> - <nl> - s = [ ] <nl> - for x in self . sections . keys ( ) : <nl> - s . append ( x ) <nl> - s . sort ( ) <nl> - for x in s : <nl> - for l in self . sections [ x ] : <nl> - f . write ( bytes ( l + " \ n " , " UTF - 8 " ) ) <nl> - <nl> - f . write ( bytes ( ' < scene > \ n ' , " UTF - 8 " ) ) <nl> - f . write ( bytes ( ' \ t < instance_visual_scene url = " # ' + self . scene_name + ' " / > \ n ' , " UTF - 8 " ) ) <nl> - f . write ( bytes ( ' < / scene > \ n ' , " UTF - 8 " ) ) <nl> - f . write ( bytes ( ' < / COLLADA > \ n ' , " UTF - 8 " ) ) <nl> - return True <nl> - <nl> - def __init__ ( self , path , kwargs , operator ) : <nl> - self . operator = operator <nl> - self . scene = bpy . context . scene <nl> - self . last_id = 0 <nl> - self . scene_name = self . new_id ( " scene " ) <nl> - self . sections = { } <nl> - self . path = path <nl> - self . mesh_cache = { } <nl> - self . curve_cache = { } <nl> - self . material_cache = { } <nl> - self . image_cache = { } <nl> - self . skeleton_info = { } <nl> - self . config = kwargs <nl> - self . valid_nodes = [ ] <nl> - self . armature_for_morph = { } <nl> - self . used_bones = [ ] <nl> - self . wrongvtx_report = False <nl> + def numarr_alpha ( a , mult = 1 . 0 ) : <nl> + s = " " <nl> + for x in a : <nl> + s + = " " + str ( x * mult ) <nl> + if len ( a ) = = 3 : <nl> + s + = " 1 . 0 " <nl> + s + = " " <nl> + return s <nl> <nl> <nl> + def strarr ( arr ) : <nl> + s = " " <nl> + for x in arr : <nl> + s + = " " + str ( x ) <nl> + s + = " " <nl> + return s <nl> <nl> <nl> + class DaeExporter : <nl> <nl> + def validate_id ( self , d ) : <nl> + if ( d . find ( " id - " ) = = 0 ) : <nl> + return " z " + d <nl> + return d <nl> + <nl> + def new_id ( self , t ) : <nl> + self . last_id + = 1 <nl> + return " id - " + t + " - " + str ( self . last_id ) <nl> + <nl> + class Vertex : <nl> + <nl> + def close_to ( v ) : <nl> + if ( ( self . vertex - v . vertex ) . length ( ) > CMP_EPSILON ) : <nl> + return False <nl> + if ( ( self . normal - v . normal ) . length ( ) > CMP_EPSILON ) : <nl> + return False <nl> + if ( ( self . uv - v . uv ) . length ( ) > CMP_EPSILON ) : <nl> + return False <nl> + if ( ( self . uv2 - v . uv2 ) . length ( ) > CMP_EPSILON ) : <nl> + return False <nl> + <nl> + return True <nl> + <nl> + def get_tup ( self ) : <nl> + tup = ( self . vertex . x , self . vertex . y , self . vertex . z , self . normal . x , self . normal . y , self . normal . z ) <nl> + for t in self . uv : <nl> + tup = tup + ( t . x , t . y ) <nl> + if ( self . color ! = None ) : <nl> + tup = tup + ( self . color . x , self . color . y , self . color . z ) <nl> + if ( self . tangent ! = None ) : <nl> + tup = tup + ( self . tangent . x , self . tangent . y , self . tangent . z ) <nl> + if ( self . bitangent ! = None ) : <nl> + tup = tup + ( self . bitangent . x , self . bitangent . y , self . bitangent . z ) <nl> + for t in self . bones : <nl> + tup = tup + ( float ( t ) , ) <nl> + for t in self . weights : <nl> + tup = tup + ( float ( t ) , ) <nl> + <nl> + return tup <nl> + <nl> + def __init__ ( self ) : <nl> + self . vertex = Vector ( ( 0 . 0 , 0 . 0 , 0 . 0 ) ) <nl> + self . normal = Vector ( ( 0 . 0 , 0 . 0 , 0 . 0 ) ) <nl> + self . tangent = None <nl> + self . bitangent = None <nl> + self . color = None <nl> + self . uv = [ ] <nl> + self . uv2 = Vector ( ( 0 . 0 , 0 . 0 ) ) <nl> + self . bones = [ ] <nl> + self . weights = [ ] <nl> + <nl> + def writel ( self , section , indent , text ) : <nl> + if ( not ( section in self . sections ) ) : <nl> + self . sections [ section ] = [ ] <nl> + line = " " <nl> + for x in range ( indent ) : <nl> + line + = " \ t " <nl> + line + = text <nl> + self . sections [ section ] . append ( line ) <nl> + <nl> + def export_image ( self , image ) : <nl> + if ( image in self . image_cache ) : <nl> + return self . image_cache [ image ] <nl> + <nl> + imgpath = image . filepath <nl> + if ( imgpath . find ( " / / " ) = = 0 or imgpath . find ( " \ \ \ \ " ) = = 0 ) : <nl> + # if relative , convert to absolute <nl> + imgpath = bpy . path . abspath ( imgpath ) <nl> + <nl> + # path is absolute , now do something ! <nl> + if ( self . config [ " use_copy_images " ] ) : <nl> + # copy image <nl> + basedir = os . path . dirname ( self . path ) + " / images " <nl> + if ( not os . path . isdir ( basedir ) ) : <nl> + os . makedirs ( basedir ) <nl> + <nl> + if os . path . isfile ( imgpath ) : <nl> + dstfile = basedir + " / " + os . path . basename ( imgpath ) <nl> + <nl> + if ( not os . path . isfile ( dstfile ) ) : <nl> + shutil . copy ( imgpath , dstfile ) <nl> + imgpath = " images / " + os . path . basename ( imgpath ) <nl> + else : <nl> + # # # if file is not found save it as png file in the destination folder <nl> + img_tmp_path = image . filepath <nl> + if img_tmp_path . endswith ( ( " . bmp " , " . rgb " , " . png " , " . jpeg " , " . jpg " , " . jp2 " , " . tga " , " . cin " , " . dpx " , " . exr " , " . hdr " , " . tif " ) ) : <nl> + image . filepath = basedir + " / " + os . path . basename ( img_tmp_path ) <nl> + else : <nl> + image . filepath = basedir + " / " + image . name + " . png " <nl> + <nl> + dstfile = basedir + " / " + os . path . basename ( image . filepath ) <nl> + <nl> + if ( not os . path . isfile ( dstfile ) ) : <nl> + <nl> + image . save ( ) <nl> + imgpath = " images / " + os . path . basename ( image . filepath ) <nl> + image . filepath = img_tmp_path <nl> + <nl> + else : <nl> + # export relative , always , no one wants absolute paths . <nl> + try : <nl> + imgpath = os . path . relpath ( imgpath , os . path . dirname ( self . path ) ) . replace ( " \ \ " , " / " ) # export unix compatible always <nl> + <nl> + except : <nl> + pass # fails sometimes , not sure why <nl> + <nl> + imgid = self . new_id ( " image " ) <nl> + <nl> + print ( " FOR : " + imgpath ) <nl> + <nl> + # if ( not os . path . isfile ( imgpath ) ) : <nl> + # print ( " NOT FILE ? " ) <nl> + # if imgpath . endswith ( ( " . bmp " , " . rgb " , " . png " , " . jpeg " , " . jpg " , " . jp2 " , " . tga " , " . cin " , " . dpx " , " . exr " , " . hdr " , " . tif " ) ) : <nl> + # imgpath = " images / " + os . path . basename ( imgpath ) <nl> + # else : <nl> + # imgpath = " images / " + image . name + " . png " <nl> + <nl> + self . writel ( S_IMGS , 1 , ' < image id = " ' + imgid + ' " name = " ' + image . name + ' " > ' ) <nl> + self . writel ( S_IMGS , 2 , ' < init_from > ' + imgpath + ' < / init_from > ' ) <nl> + self . writel ( S_IMGS , 1 , ' < / image > ' ) <nl> + self . image_cache [ image ] = imgid <nl> + return imgid <nl> + <nl> + def export_material ( self , material , double_sided_hint = True ) : <nl> + if ( material in self . material_cache ) : <nl> + return self . material_cache [ material ] <nl> + <nl> + fxid = self . new_id ( " fx " ) <nl> + self . writel ( S_FX , 1 , ' < effect id = " ' + fxid + ' " name = " ' + material . name + ' - fx " > ' ) <nl> + self . writel ( S_FX , 2 , ' < profile_COMMON > ' ) <nl> + <nl> + # Find and fetch the textures and create sources <nl> + sampler_table = { } <nl> + diffuse_tex = None <nl> + specular_tex = None <nl> + emission_tex = None <nl> + normal_tex = None <nl> + for i in range ( len ( material . texture_slots ) ) : <nl> + ts = material . texture_slots [ i ] <nl> + if ( not ts ) : <nl> + continue <nl> + if ( not ts . use ) : <nl> + continue <nl> + if ( not ts . texture ) : <nl> + continue <nl> + if ( ts . texture . type ! = " IMAGE " ) : <nl> + continue <nl> + <nl> + if ( ts . texture . image = = None ) : <nl> + continue <nl> + <nl> + # image <nl> + imgid = self . export_image ( ts . texture . image ) <nl> + <nl> + # surface <nl> + surface_sid = self . new_id ( " fx_surf " ) <nl> + self . writel ( S_FX , 3 , ' < newparam sid = " ' + surface_sid + ' " > ' ) <nl> + self . writel ( S_FX , 4 , ' < surface type = " 2D " > ' ) <nl> + self . writel ( S_FX , 5 , ' < init_from > ' + imgid + ' < / init_from > ' ) # this is sooo weird <nl> + self . writel ( S_FX , 5 , ' < format > A8R8G8B8 < / format > ' ) <nl> + self . writel ( S_FX , 4 , ' < / surface > ' ) <nl> + self . writel ( S_FX , 3 , ' < / newparam > ' ) <nl> + # sampler , collada sure likes it difficult <nl> + sampler_sid = self . new_id ( " fx_sampler " ) <nl> + self . writel ( S_FX , 3 , ' < newparam sid = " ' + sampler_sid + ' " > ' ) <nl> + self . writel ( S_FX , 4 , ' < sampler2D > ' ) <nl> + self . writel ( S_FX , 5 , ' < source > ' + surface_sid + ' < / source > ' ) <nl> + self . writel ( S_FX , 4 , ' < / sampler2D > ' ) <nl> + self . writel ( S_FX , 3 , ' < / newparam > ' ) <nl> + sampler_table [ i ] = sampler_sid <nl> + <nl> + if ( ts . use_map_color_diffuse and diffuse_tex = = None ) : <nl> + diffuse_tex = sampler_sid <nl> + if ( ts . use_map_color_spec and specular_tex = = None ) : <nl> + specular_tex = sampler_sid <nl> + if ( ts . use_map_emit and emission_tex = = None ) : <nl> + emission_tex = sampler_sid <nl> + if ( ts . use_map_normal and normal_tex = = None ) : <nl> + normal_tex = sampler_sid <nl> + <nl> + self . writel ( S_FX , 3 , ' < technique sid = " common " > ' ) <nl> + shtype = " blinn " <nl> + self . writel ( S_FX , 4 , ' < ' + shtype + ' > ' ) <nl> + # ambient ? from where ? <nl> + <nl> + self . writel ( S_FX , 5 , ' < emission > ' ) <nl> + if ( emission_tex ! = None ) : <nl> + self . writel ( S_FX , 6 , ' < texture texture = " ' + emission_tex + ' " texcoord = " CHANNEL1 " / > ' ) <nl> + else : <nl> + self . writel ( S_FX , 6 , ' < color > ' + numarr_alpha ( material . diffuse_color , material . emit ) + ' < / color > ' ) # not totally right but good enough <nl> + self . writel ( S_FX , 5 , ' < / emission > ' ) <nl> + <nl> + self . writel ( S_FX , 5 , ' < ambient > ' ) <nl> + self . writel ( S_FX , 6 , ' < color > ' + numarr_alpha ( self . scene . world . ambient_color , material . ambient ) + ' < / color > ' ) <nl> + self . writel ( S_FX , 5 , ' < / ambient > ' ) <nl> + <nl> + self . writel ( S_FX , 5 , ' < diffuse > ' ) <nl> + if ( diffuse_tex ! = None ) : <nl> + self . writel ( S_FX , 6 , ' < texture texture = " ' + diffuse_tex + ' " texcoord = " CHANNEL1 " / > ' ) <nl> + else : <nl> + self . writel ( S_FX , 6 , ' < color > ' + numarr_alpha ( material . diffuse_color , material . diffuse_intensity ) + ' < / color > ' ) <nl> + self . writel ( S_FX , 5 , ' < / diffuse > ' ) <nl> + <nl> + self . writel ( S_FX , 5 , ' < specular > ' ) <nl> + if ( specular_tex ! = None ) : <nl> + self . writel ( S_FX , 6 , ' < texture texture = " ' + specular_tex + ' " texcoord = " CHANNEL1 " / > ' ) <nl> + else : <nl> + self . writel ( S_FX , 6 , ' < color > ' + numarr_alpha ( material . specular_color , material . specular_intensity ) + ' < / color > ' ) <nl> + self . writel ( S_FX , 5 , ' < / specular > ' ) <nl> + <nl> + self . writel ( S_FX , 5 , ' < shininess > ' ) <nl> + self . writel ( S_FX , 6 , ' < float > ' + str ( material . specular_hardness ) + ' < / float > ' ) <nl> + self . writel ( S_FX , 5 , ' < / shininess > ' ) <nl> + <nl> + self . writel ( S_FX , 5 , ' < reflective > ' ) <nl> + self . writel ( S_FX , 6 , ' < color > ' + numarr_alpha ( material . mirror_color ) + ' < / color > ' ) <nl> + self . writel ( S_FX , 5 , ' < / reflective > ' ) <nl> + <nl> + if ( material . use_transparency ) : <nl> + self . writel ( S_FX , 5 , ' < transparency > ' ) <nl> + self . writel ( S_FX , 6 , ' < float > ' + str ( material . alpha ) + ' < / float > ' ) <nl> + self . writel ( S_FX , 5 , ' < / transparency > ' ) <nl> + <nl> + self . writel ( S_FX , 5 , ' < index_of_refraction > ' ) <nl> + self . writel ( S_FX , 6 , ' < float > ' + str ( material . specular_ior ) + ' < / float > ' ) <nl> + self . writel ( S_FX , 5 , ' < / index_of_refraction > ' ) <nl> + <nl> + self . writel ( S_FX , 4 , ' < / ' + shtype + ' > ' ) <nl> + <nl> + self . writel ( S_FX , 4 , ' < extra > ' ) <nl> + self . writel ( S_FX , 5 , ' < technique profile = " FCOLLADA " > ' ) <nl> + if ( normal_tex ) : <nl> + self . writel ( S_FX , 6 , ' < bump bumptype = " NORMALMAP " > ' ) <nl> + self . writel ( S_FX , 7 , ' < texture texture = " ' + normal_tex + ' " texcoord = " CHANNEL1 " / > ' ) <nl> + self . writel ( S_FX , 6 , ' < / bump > ' ) <nl> + <nl> + self . writel ( S_FX , 5 , ' < / technique > ' ) <nl> + self . writel ( S_FX , 5 , ' < technique profile = " GOOGLEEARTH " > ' ) <nl> + self . writel ( S_FX , 6 , ' < double_sided > ' + [ " 0 " , " 1 " ] [ double_sided_hint ] + " < / double_sided > " ) <nl> + self . writel ( S_FX , 5 , ' < / technique > ' ) <nl> + <nl> + if ( material . use_shadeless ) : <nl> + self . writel ( S_FX , 5 , ' < technique profile = " GODOT " > ' ) <nl> + self . writel ( S_FX , 6 , ' < unshaded > 1 < / unshaded > ' ) <nl> + self . writel ( S_FX , 5 , ' < / technique > ' ) <nl> + <nl> + self . writel ( S_FX , 4 , ' < / extra > ' ) <nl> + <nl> + self . writel ( S_FX , 3 , ' < / technique > ' ) <nl> + self . writel ( S_FX , 2 , ' < / profile_COMMON > ' ) <nl> + self . writel ( S_FX , 1 , ' < / effect > ' ) <nl> + <nl> + # Also export blender material in all it ' s glory ( if set as active ) <nl> + <nl> + # Material <nl> + matid = self . new_id ( " material " ) <nl> + self . writel ( S_MATS , 1 , ' < material id = " ' + matid + ' " name = " ' + material . name + ' " > ' ) <nl> + self . writel ( S_MATS , 2 , ' < instance_effect url = " # ' + fxid + ' " / > ' ) <nl> + self . writel ( S_MATS , 1 , ' < / material > ' ) <nl> + <nl> + self . material_cache [ material ] = matid <nl> + return matid <nl> + <nl> + def export_mesh ( self , node , armature = None , skeyindex = - 1 , skel_source = None , custom_name = None ) : <nl> + mesh = node . data <nl> + <nl> + if ( node . data in self . mesh_cache ) : <nl> + return self . mesh_cache [ mesh ] <nl> + <nl> + if ( skeyindex = = - 1 and mesh . shape_keys ! = None and len ( mesh . shape_keys . key_blocks ) ) : <nl> + values = [ ] <nl> + morph_targets = [ ] <nl> + md = None <nl> + for k in range ( 0 , len ( mesh . shape_keys . key_blocks ) ) : <nl> + shape = node . data . shape_keys . key_blocks [ k ] <nl> + values + = [ shape . value ] # save value <nl> + shape . value = 0 <nl> + <nl> + mid = self . new_id ( " morph " ) <nl> + <nl> + for k in range ( 0 , len ( mesh . shape_keys . key_blocks ) ) : <nl> + <nl> + shape = node . data . shape_keys . key_blocks [ k ] <nl> + node . show_only_shape_key = True <nl> + node . active_shape_key_index = k <nl> + shape . value = 1 . 0 <nl> + mesh . update ( ) <nl> + " " " <nl> + oldval = shape . value <nl> + shape . value = 1 . 0 <nl> + <nl> + " " " <nl> + p = node . data <nl> + v = node . to_mesh ( bpy . context . scene , True , " RENDER " ) <nl> + node . data = v <nl> + # self . export_node ( node , il , shape . name ) <nl> + node . data . update ( ) <nl> + if ( armature and k = = 0 ) : <nl> + md = self . export_mesh ( node , armature , k , mid , shape . name ) <nl> + else : <nl> + md = self . export_mesh ( node , None , k , None , shape . name ) <nl> + <nl> + node . data = p <nl> + node . data . update ( ) <nl> + shape . value = 0 . 0 <nl> + morph_targets . append ( md ) <nl> + <nl> + " " " <nl> + shape . value = oldval <nl> + " " " <nl> + node . show_only_shape_key = False <nl> + node . active_shape_key_index = 0 <nl> + <nl> + self . writel ( S_MORPH , 1 , ' < controller id = " ' + mid + ' " name = " " > ' ) <nl> + # if ( " skin_id " in morph_targets [ 0 ] ) : <nl> + # self . writel ( S_MORPH , 2 , ' < morph source = " # ' + morph_targets [ 0 ] [ " skin_id " ] + ' " method = " NORMALIZED " > ' ) <nl> + # else : <nl> + self . writel ( S_MORPH , 2 , ' < morph source = " # ' + morph_targets [ 0 ] [ " id " ] + ' " method = " NORMALIZED " > ' ) <nl> + <nl> + self . writel ( S_MORPH , 3 , ' < source id = " ' + mid + ' - morph - targets " > ' ) <nl> + self . writel ( S_MORPH , 4 , ' < IDREF_array id = " ' + mid + ' - morph - targets - array " count = " ' + str ( len ( morph_targets ) - 1 ) + ' " > ' ) <nl> + marr = " " <nl> + warr = " " <nl> + for i in range ( len ( morph_targets ) ) : <nl> + if ( i = = 0 ) : <nl> + continue <nl> + elif ( i > 1 ) : <nl> + marr + = " " <nl> + <nl> + if ( " skin_id " in morph_targets [ i ] ) : <nl> + marr + = morph_targets [ i ] [ " skin_id " ] <nl> + else : <nl> + marr + = morph_targets [ i ] [ " id " ] <nl> + <nl> + warr + = " 0 " <nl> + <nl> + self . writel ( S_MORPH , 5 , marr ) <nl> + self . writel ( S_MORPH , 4 , ' < / IDREF_array > ' ) <nl> + self . writel ( S_MORPH , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_MORPH , 5 , ' < accessor source = " # ' + mid + ' - morph - targets - array " count = " ' + str ( len ( morph_targets ) - 1 ) + ' " stride = " 1 " > ' ) <nl> + self . writel ( S_MORPH , 6 , ' < param name = " MORPH_TARGET " type = " IDREF " / > ' ) <nl> + self . writel ( S_MORPH , 5 , ' < / accessor > ' ) <nl> + self . writel ( S_MORPH , 4 , ' < / technique_common > ' ) <nl> + self . writel ( S_MORPH , 3 , ' < / source > ' ) <nl> + <nl> + self . writel ( S_MORPH , 3 , ' < source id = " ' + mid + ' - morph - weights " > ' ) <nl> + self . writel ( S_MORPH , 4 , ' < float_array id = " ' + mid + ' - morph - weights - array " count = " ' + str ( len ( morph_targets ) - 1 ) + ' " > ' ) <nl> + self . writel ( S_MORPH , 5 , warr ) <nl> + self . writel ( S_MORPH , 4 , ' < / float_array > ' ) <nl> + self . writel ( S_MORPH , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_MORPH , 5 , ' < accessor source = " # ' + mid + ' - morph - weights - array " count = " ' + str ( len ( morph_targets ) - 1 ) + ' " stride = " 1 " > ' ) <nl> + self . writel ( S_MORPH , 6 , ' < param name = " MORPH_WEIGHT " type = " float " / > ' ) <nl> + self . writel ( S_MORPH , 5 , ' < / accessor > ' ) <nl> + self . writel ( S_MORPH , 4 , ' < / technique_common > ' ) <nl> + self . writel ( S_MORPH , 3 , ' < / source > ' ) <nl> + <nl> + self . writel ( S_MORPH , 3 , ' < targets > ' ) <nl> + self . writel ( S_MORPH , 4 , ' < input semantic = " MORPH_TARGET " source = " # ' + mid + ' - morph - targets " / > ' ) <nl> + self . writel ( S_MORPH , 4 , ' < input semantic = " MORPH_WEIGHT " source = " # ' + mid + ' - morph - weights " / > ' ) <nl> + self . writel ( S_MORPH , 3 , ' < / targets > ' ) <nl> + self . writel ( S_MORPH , 2 , ' < / morph > ' ) <nl> + self . writel ( S_MORPH , 1 , ' < / controller > ' ) <nl> + if ( armature ! = None ) : <nl> + <nl> + self . armature_for_morph [ node ] = armature <nl> + <nl> + meshdata = { } <nl> + if ( armature ) : <nl> + meshdata = morph_targets [ 0 ] <nl> + meshdata [ " morph_id " ] = mid <nl> + else : <nl> + meshdata [ " id " ] = morph_targets [ 0 ] [ " id " ] <nl> + meshdata [ " morph_id " ] = mid <nl> + meshdata [ " material_assign " ] = morph_targets [ 0 ] [ " material_assign " ] <nl> + <nl> + self . mesh_cache [ node . data ] = meshdata <nl> + return meshdata <nl> + <nl> + apply_modifiers = len ( node . modifiers ) and self . config [ " use_mesh_modifiers " ] <nl> + <nl> + name_to_use = mesh . name <nl> + # print ( " name to use : " + mesh . name ) <nl> + if ( custom_name ! = None and custom_name ! = " " ) : <nl> + name_to_use = custom_name <nl> + <nl> + mesh = node . to_mesh ( self . scene , apply_modifiers , " RENDER " ) # is this allright ? <nl> + <nl> + triangulate = self . config [ " use_triangles " ] <nl> + if ( triangulate ) : <nl> + bm = bmesh . new ( ) <nl> + bm . from_mesh ( mesh ) <nl> + bmesh . ops . triangulate ( bm , faces = bm . faces ) <nl> + bm . to_mesh ( mesh ) <nl> + bm . free ( ) <nl> + <nl> + mesh . update ( calc_tessface = True ) <nl> + vertices = [ ] <nl> + vertex_map = { } <nl> + surface_indices = { } <nl> + materials = { } <nl> + <nl> + materials = { } <nl> + <nl> + si = None <nl> + if ( armature ! = None ) : <nl> + si = self . skeleton_info [ armature ] <nl> + <nl> + has_uv = False <nl> + has_uv2 = False <nl> + has_weights = armature ! = None <nl> + has_tangents = self . config [ " use_tangent_arrays " ] # could detect . . <nl> + has_colors = len ( mesh . vertex_colors ) <nl> + mat_assign = [ ] <nl> + <nl> + uv_layer_count = len ( mesh . uv_textures ) <nl> + if ( has_tangents and len ( mesh . uv_textures ) ) : <nl> + try : <nl> + mesh . calc_tangents ( ) <nl> + except : <nl> + self . operator . report ( { ' WARNING ' } , ' CalcTangets failed for mesh " ' + mesh . name + ' " , no tangets will be exported . ' ) <nl> + # uv_layer_count = 0 <nl> + mesh . calc_normals_split ( ) <nl> + has_tangents = False <nl> + <nl> + else : <nl> + mesh . calc_normals_split ( ) <nl> + has_tangents = False <nl> + <nl> + for fi in range ( len ( mesh . polygons ) ) : <nl> + f = mesh . polygons [ fi ] <nl> + <nl> + if ( not ( f . material_index in surface_indices ) ) : <nl> + surface_indices [ f . material_index ] = [ ] <nl> + # print ( " Type : " + str ( type ( f . material_index ) ) ) <nl> + # print ( " IDX : " + str ( f . material_index ) + " / " + str ( len ( mesh . materials ) ) ) <nl> + <nl> + try : <nl> + # Bizarre blender behavior i don ' t understand , so catching exception <nl> + mat = mesh . materials [ f . material_index ] <nl> + except : <nl> + mat = None <nl> + <nl> + if ( mat ! = None ) : <nl> + materials [ f . material_index ] = self . export_material ( mat , mesh . show_double_sided ) <nl> + else : <nl> + materials [ f . material_index ] = None # weird , has no material ? <nl> + <nl> + indices = surface_indices [ f . material_index ] <nl> + vi = [ ] <nl> + # vertices always 3 <nl> + " " " <nl> + if ( len ( f . vertices ) = = 3 ) : <nl> + vi . append ( 0 ) <nl> + vi . append ( 1 ) <nl> + vi . append ( 2 ) <nl> + elif ( len ( f . vertices ) = = 4 ) : <nl> + # todo , should use shortest path <nl> + vi . append ( 0 ) <nl> + vi . append ( 1 ) <nl> + vi . append ( 2 ) <nl> + vi . append ( 0 ) <nl> + vi . append ( 2 ) <nl> + vi . append ( 3 ) <nl> + " " " <nl> + <nl> + for lt in range ( f . loop_total ) : <nl> + loop_index = f . loop_start + lt <nl> + ml = mesh . loops [ loop_index ] <nl> + mv = mesh . vertices [ ml . vertex_index ] <nl> + <nl> + v = self . Vertex ( ) <nl> + v . vertex = Vector ( mv . co ) <nl> + <nl> + for xt in mesh . uv_layers : <nl> + v . uv . append ( Vector ( xt . data [ loop_index ] . uv ) ) <nl> + <nl> + if ( has_colors ) : <nl> + v . color = Vector ( mesh . vertex_colors [ 0 ] . data [ loop_index ] . color ) <nl> + <nl> + v . normal = Vector ( ml . normal ) <nl> + <nl> + if ( has_tangents ) : <nl> + v . tangent = Vector ( ml . tangent ) <nl> + v . bitangent = Vector ( ml . bitangent ) <nl> + <nl> + # if ( armature ) : <nl> + # v . vertex = node . matrix_world * v . vertex <nl> + <nl> + # v . color = Vertex ( mv . ? ? ? <nl> + <nl> + if ( armature ! = None ) : <nl> + wsum = 0 . 0 <nl> + zero_bones = [ ] <nl> + <nl> + for vg in mv . groups : <nl> + if vg . group > = len ( node . vertex_groups ) : <nl> + continue ; <nl> + name = node . vertex_groups [ vg . group ] . name <nl> + <nl> + if ( name in si [ " bone_index " ] ) : <nl> + # could still put the weight as 0 . 0001 maybe <nl> + if ( vg . weight > 0 . 001 ) : # blender has a lot of zero weight stuff <nl> + v . bones . append ( si [ " bone_index " ] [ name ] ) <nl> + v . weights . append ( vg . weight ) <nl> + wsum + = vg . weight <nl> + if ( wsum = = 0 . 0 ) : <nl> + if not self . wrongvtx_report : <nl> + self . operator . report ( { ' WARNING ' } , ' Mesh for object " ' + node . name + ' " has unassigned weights . This may look wrong in exported model . ' ) <nl> + self . wrongvtx_report = True <nl> + <nl> + # blender can have bones assigned that weight zero so they remain local <nl> + # this is the best it can be done ? <nl> + v . bones . append ( 0 ) <nl> + v . weights . append ( 1 ) <nl> + <nl> + tup = v . get_tup ( ) <nl> + idx = 0 <nl> + if ( skeyindex = = - 1 and tup in vertex_map ) : # do not optmize if using shapekeys <nl> + idx = vertex_map [ tup ] <nl> + else : <nl> + idx = len ( vertices ) <nl> + vertices . append ( v ) <nl> + vertex_map [ tup ] = idx <nl> + <nl> + vi . append ( idx ) <nl> + <nl> + if ( len ( vi ) > 2 ) : <nl> + # only triangles and above <nl> + indices . append ( vi ) <nl> + <nl> + meshid = self . new_id ( " mesh " ) <nl> + self . writel ( S_GEOM , 1 , ' < geometry id = " ' + meshid + ' " name = " ' + name_to_use + ' " > ' ) <nl> + <nl> + self . writel ( S_GEOM , 2 , ' < mesh > ' ) <nl> + <nl> + # Vertex Array <nl> + self . writel ( S_GEOM , 3 , ' < source id = " ' + meshid + ' - positions " > ' ) <nl> + float_values = " " <nl> + for v in vertices : <nl> + float_values + = " " + str ( v . vertex . x ) + " " + str ( v . vertex . y ) + " " + str ( v . vertex . z ) <nl> + self . writel ( S_GEOM , 4 , ' < float_array id = " ' + meshid + ' - positions - array " count = " ' + str ( len ( vertices ) * 3 ) + ' " > ' + float_values + ' < / float_array > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + meshid + ' - positions - array " count = " ' + str ( len ( vertices ) ) + ' " stride = " 3 " > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / technique_common > ' ) <nl> + self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> + <nl> + # Normal Array <nl> + <nl> + self . writel ( S_GEOM , 3 , ' < source id = " ' + meshid + ' - normals " > ' ) <nl> + float_values = " " <nl> + for v in vertices : <nl> + float_values + = " " + str ( v . normal . x ) + " " + str ( v . normal . y ) + " " + str ( v . normal . z ) <nl> + self . writel ( S_GEOM , 4 , ' < float_array id = " ' + meshid + ' - normals - array " count = " ' + str ( len ( vertices ) * 3 ) + ' " > ' + float_values + ' < / float_array > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + meshid + ' - normals - array " count = " ' + str ( len ( vertices ) ) + ' " stride = " 3 " > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / technique_common > ' ) <nl> + self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> + <nl> + if ( has_tangents ) : <nl> + self . writel ( S_GEOM , 3 , ' < source id = " ' + meshid + ' - tangents " > ' ) <nl> + float_values = " " <nl> + for v in vertices : <nl> + float_values + = " " + str ( v . tangent . x ) + " " + str ( v . tangent . y ) + " " + str ( v . tangent . z ) <nl> + self . writel ( S_GEOM , 4 , ' < float_array id = " ' + meshid + ' - tangents - array " count = " ' + str ( len ( vertices ) * 3 ) + ' " > ' + float_values + ' < / float_array > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + meshid + ' - tangents - array " count = " ' + str ( len ( vertices ) ) + ' " stride = " 3 " > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / technique_common > ' ) <nl> + self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> + <nl> + self . writel ( S_GEOM , 3 , ' < source id = " ' + meshid + ' - bitangents " > ' ) <nl> + float_values = " " <nl> + for v in vertices : <nl> + float_values + = " " + str ( v . bitangent . x ) + " " + str ( v . bitangent . y ) + " " + str ( v . bitangent . z ) <nl> + self . writel ( S_GEOM , 4 , ' < float_array id = " ' + meshid + ' - bitangents - array " count = " ' + str ( len ( vertices ) * 3 ) + ' " > ' + float_values + ' < / float_array > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + meshid + ' - bitangents - array " count = " ' + str ( len ( vertices ) ) + ' " stride = " 3 " > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / technique_common > ' ) <nl> + self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> + <nl> + # UV Arrays <nl> + for uvi in range ( uv_layer_count ) : <nl> + <nl> + self . writel ( S_GEOM , 3 , ' < source id = " ' + meshid + ' - texcoord - ' + str ( uvi ) + ' " > ' ) <nl> + float_values = " " <nl> + for v in vertices : <nl> + try : <nl> + float_values + = " " + str ( v . uv [ uvi ] . x ) + " " + str ( v . uv [ uvi ] . y ) <nl> + except : <nl> + # I don ' t understand this weird multi - uv - layer API , but with this it seems to works <nl> + float_values + = " 0 0 " <nl> + <nl> + self . writel ( S_GEOM , 4 , ' < float_array id = " ' + meshid + ' - texcoord - ' + str ( uvi ) + ' - array " count = " ' + str ( len ( vertices ) * 2 ) + ' " > ' + float_values + ' < / float_array > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + meshid + ' - texcoord - ' + str ( uvi ) + ' - array " count = " ' + str ( len ( vertices ) ) + ' " stride = " 2 " > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " S " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " T " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / technique_common > ' ) <nl> + self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> + <nl> + # Color Arrays <nl> + <nl> + if ( has_colors ) : <nl> + self . writel ( S_GEOM , 3 , ' < source id = " ' + meshid + ' - colors " > ' ) <nl> + float_values = " " <nl> + for v in vertices : <nl> + float_values + = " " + str ( v . color . x ) + " " + str ( v . color . y ) + " " + str ( v . color . z ) <nl> + self . writel ( S_GEOM , 4 , ' < float_array id = " ' + meshid + ' - colors - array " count = " ' + str ( len ( vertices ) * 3 ) + ' " > ' + float_values + ' < / float_array > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + meshid + ' - colors - array " count = " ' + str ( len ( vertices ) ) + ' " stride = " 3 " > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / technique_common > ' ) <nl> + self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> + <nl> + # Triangle Lists <nl> + self . writel ( S_GEOM , 3 , ' < vertices id = " ' + meshid + ' - vertices " > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < input semantic = " POSITION " source = " # ' + meshid + ' - positions " / > ' ) <nl> + self . writel ( S_GEOM , 3 , ' < / vertices > ' ) <nl> + <nl> + prim_type = " " <nl> + if ( triangulate ) : <nl> + prim_type = " triangles " <nl> + else : <nl> + prim_type = " polygons " <nl> + <nl> + for m in surface_indices : <nl> + indices = surface_indices [ m ] <nl> + mat = materials [ m ] <nl> + <nl> + if ( mat ! = None ) : <nl> + matref = self . new_id ( " trimat " ) <nl> + self . writel ( S_GEOM , 3 , ' < ' + prim_type + ' count = " ' + str ( int ( len ( indices ) ) ) + ' " material = " ' + matref + ' " > ' ) # todo material <nl> + mat_assign . append ( ( mat , matref ) ) <nl> + else : <nl> + self . writel ( S_GEOM , 3 , ' < ' + prim_type + ' count = " ' + str ( int ( len ( indices ) ) ) + ' " > ' ) # todo material <nl> + <nl> + self . writel ( S_GEOM , 4 , ' < input semantic = " VERTEX " source = " # ' + meshid + ' - vertices " offset = " 0 " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < input semantic = " NORMAL " source = " # ' + meshid + ' - normals " offset = " 0 " / > ' ) <nl> + <nl> + for uvi in range ( uv_layer_count ) : <nl> + self . writel ( S_GEOM , 4 , ' < input semantic = " TEXCOORD " source = " # ' + meshid + ' - texcoord - ' + str ( uvi ) + ' " offset = " 0 " set = " ' + str ( uvi ) + ' " / > ' ) <nl> + <nl> + if ( has_colors ) : <nl> + self . writel ( S_GEOM , 4 , ' < input semantic = " COLOR " source = " # ' + meshid + ' - colors " offset = " 0 " / > ' ) <nl> + if ( has_tangents ) : <nl> + self . writel ( S_GEOM , 4 , ' < input semantic = " TEXTANGENT " source = " # ' + meshid + ' - tangents " offset = " 0 " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < input semantic = " TEXBINORMAL " source = " # ' + meshid + ' - bitangents " offset = " 0 " / > ' ) <nl> + <nl> + if ( triangulate ) : <nl> + int_values = " < p > " <nl> + for p in indices : <nl> + for i in p : <nl> + int_values + = " " + str ( i ) <nl> + int_values + = " < / p > " <nl> + self . writel ( S_GEOM , 4 , int_values ) <nl> + else : <nl> + for p in indices : <nl> + int_values = " < p > " <nl> + for i in p : <nl> + int_values + = " " + str ( i ) <nl> + int_values + = " < / p > " <nl> + self . writel ( S_GEOM , 4 , int_values ) <nl> + <nl> + self . writel ( S_GEOM , 3 , ' < / ' + prim_type + ' > ' ) <nl> + <nl> + self . writel ( S_GEOM , 2 , ' < / mesh > ' ) <nl> + self . writel ( S_GEOM , 1 , ' < / geometry > ' ) <nl> + <nl> + meshdata = { } <nl> + meshdata [ " id " ] = meshid <nl> + meshdata [ " material_assign " ] = mat_assign <nl> + if ( skeyindex = = - 1 ) : <nl> + self . mesh_cache [ node . data ] = meshdata <nl> + <nl> + # Export armature data ( if armature exists ) <nl> + if ( armature ! = None and ( skel_source ! = None or skeyindex = = - 1 ) ) : <nl> + <nl> + contid = self . new_id ( " controller " ) <nl> + <nl> + self . writel ( S_SKIN , 1 , ' < controller id = " ' + contid + ' " > ' ) <nl> + if ( skel_source ! = None ) : <nl> + self . writel ( S_SKIN , 2 , ' < skin source = " # ' + skel_source + ' " > ' ) <nl> + else : <nl> + self . writel ( S_SKIN , 2 , ' < skin source = " # ' + meshid + ' " > ' ) <nl> + <nl> + self . writel ( S_SKIN , 3 , ' < bind_shape_matrix > ' + strmtx ( node . matrix_world ) + ' < / bind_shape_matrix > ' ) <nl> + # Joint Names <nl> + self . writel ( S_SKIN , 3 , ' < source id = " ' + contid + ' - joints " > ' ) <nl> + name_values = " " <nl> + for v in si [ " bone_names " ] : <nl> + name_values + = " " + v <nl> + <nl> + self . writel ( S_SKIN , 4 , ' < Name_array id = " ' + contid + ' - joints - array " count = " ' + str ( len ( si [ " bone_names " ] ) ) + ' " > ' + name_values + ' < / Name_array > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < accessor source = " # ' + contid + ' - joints - array " count = " ' + str ( len ( si [ " bone_names " ] ) ) + ' " stride = " 1 " > ' ) <nl> + self . writel ( S_SKIN , 5 , ' < param name = " JOINT " type = " Name " / > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < / technique_common > ' ) <nl> + self . writel ( S_SKIN , 3 , ' < / source > ' ) <nl> + # Pose Matrices ! <nl> + self . writel ( S_SKIN , 3 , ' < source id = " ' + contid + ' - bind_poses " > ' ) <nl> + pose_values = " " <nl> + for v in si [ " bone_bind_poses " ] : <nl> + pose_values + = " " + strmtx ( v ) <nl> + <nl> + self . writel ( S_SKIN , 4 , ' < float_array id = " ' + contid + ' - bind_poses - array " count = " ' + str ( len ( si [ " bone_bind_poses " ] ) * 16 ) + ' " > ' + pose_values + ' < / float_array > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < accessor source = " # ' + contid + ' - bind_poses - array " count = " ' + str ( len ( si [ " bone_bind_poses " ] ) ) + ' " stride = " 16 " > ' ) <nl> + self . writel ( S_SKIN , 5 , ' < param name = " TRANSFORM " type = " float4x4 " / > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < / technique_common > ' ) <nl> + self . writel ( S_SKIN , 3 , ' < / source > ' ) <nl> + # Skin Weights ! <nl> + self . writel ( S_SKIN , 3 , ' < source id = " ' + contid + ' - skin_weights " > ' ) <nl> + skin_weights = " " <nl> + skin_weights_total = 0 <nl> + for v in vertices : <nl> + skin_weights_total + = len ( v . weights ) <nl> + for w in v . weights : <nl> + skin_weights + = " " + str ( w ) <nl> + <nl> + self . writel ( S_SKIN , 4 , ' < float_array id = " ' + contid + ' - skin_weights - array " count = " ' + str ( skin_weights_total ) + ' " > ' + skin_weights + ' < / float_array > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < accessor source = " # ' + contid + ' - skin_weights - array " count = " ' + str ( skin_weights_total ) + ' " stride = " 1 " > ' ) <nl> + self . writel ( S_SKIN , 5 , ' < param name = " WEIGHT " type = " float " / > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < / technique_common > ' ) <nl> + self . writel ( S_SKIN , 3 , ' < / source > ' ) <nl> + <nl> + self . writel ( S_SKIN , 3 , ' < joints > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < input semantic = " JOINT " source = " # ' + contid + ' - joints " / > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < input semantic = " INV_BIND_MATRIX " source = " # ' + contid + ' - bind_poses " / > ' ) <nl> + self . writel ( S_SKIN , 3 , ' < / joints > ' ) <nl> + self . writel ( S_SKIN , 3 , ' < vertex_weights count = " ' + str ( len ( vertices ) ) + ' " > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < input semantic = " JOINT " source = " # ' + contid + ' - joints " offset = " 0 " / > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < input semantic = " WEIGHT " source = " # ' + contid + ' - skin_weights " offset = " 1 " / > ' ) <nl> + vcounts = " " <nl> + vs = " " <nl> + vcount = 0 <nl> + for v in vertices : <nl> + vcounts + = " " + str ( len ( v . weights ) ) <nl> + for b in v . bones : <nl> + vs + = " " + str ( b ) <nl> + vs + = " " + str ( vcount ) <nl> + vcount + = 1 <nl> + self . writel ( S_SKIN , 4 , ' < vcount > ' + vcounts + ' < / vcount > ' ) <nl> + self . writel ( S_SKIN , 4 , ' < v > ' + vs + ' < / v > ' ) <nl> + self . writel ( S_SKIN , 3 , ' < / vertex_weights > ' ) <nl> + <nl> + self . writel ( S_SKIN , 2 , ' < / skin > ' ) <nl> + self . writel ( S_SKIN , 1 , ' < / controller > ' ) <nl> + meshdata [ " skin_id " ] = contid <nl> + <nl> + return meshdata <nl> + <nl> + def export_mesh_node ( self , node , il ) : <nl> + if ( node . data = = None ) : <nl> + return <nl> + <nl> + armature = None <nl> + armcount = 0 <nl> + for n in node . modifiers : <nl> + if ( n . type = = " ARMATURE " ) : <nl> + armcount + = 1 <nl> + <nl> + if ( node . parent ! = None ) : <nl> + if ( node . parent . type = = " ARMATURE " ) : <nl> + armature = node . parent <nl> + if ( armcount > 1 ) : <nl> + self . operator . report ( { ' WARNING ' } , ' Object " ' + node . name + ' " refers to more than one armature ! This is unsupported . ' ) <nl> + if ( armcount = = 0 ) : <nl> + self . operator . report ( { ' WARNING ' } , ' Object " ' + node . name + ' " is child of an armature , but has no armature modifier . ' ) <nl> + <nl> + if ( armcount > 0 and not armature ) : <nl> + self . operator . report ( { ' WARNING ' } , ' Object " ' + node . name + ' " has armature modifier , but is not a child of an armature . This is unsupported . ' ) <nl> + <nl> + if ( node . data . shape_keys ! = None ) : <nl> + sk = node . data . shape_keys <nl> + if ( sk . animation_data ) : <nl> + # print ( " HAS ANIM " ) <nl> + # print ( " DRIVERS : " + str ( len ( sk . animation_data . drivers ) ) ) <nl> + for d in sk . animation_data . drivers : <nl> + if ( d . driver ) : <nl> + for v in d . driver . variables : <nl> + for t in v . targets : <nl> + if ( t . id ! = None and t . id . name in self . scene . objects ) : <nl> + # print ( " LINKING " + str ( node ) + " WITH " + str ( t . id . name ) ) <nl> + self . armature_for_morph [ node ] = self . scene . objects [ t . id . name ] <nl> + <nl> + meshdata = self . export_mesh ( node , armature ) <nl> + close_controller = False <nl> + <nl> + if ( " skin_id " in meshdata ) : <nl> + close_controller = True <nl> + self . writel ( S_NODES , il , ' < instance_controller url = " # ' + meshdata [ " skin_id " ] + ' " > ' ) <nl> + for sn in self . skeleton_info [ armature ] [ " skeleton_nodes " ] : <nl> + self . writel ( S_NODES , il + 1 , ' < skeleton > # ' + sn + ' < / skeleton > ' ) <nl> + elif ( " morph_id " in meshdata ) : <nl> + self . writel ( S_NODES , il , ' < instance_controller url = " # ' + meshdata [ " morph_id " ] + ' " > ' ) <nl> + close_controller = True <nl> + elif ( armature = = None ) : <nl> + self . writel ( S_NODES , il , ' < instance_geometry url = " # ' + meshdata [ " id " ] + ' " > ' ) <nl> + <nl> + if ( len ( meshdata [ " material_assign " ] ) > 0 ) : <nl> + <nl> + self . writel ( S_NODES , il + 1 , ' < bind_material > ' ) <nl> + self . writel ( S_NODES , il + 2 , ' < technique_common > ' ) <nl> + for m in meshdata [ " material_assign " ] : <nl> + self . writel ( S_NODES , il + 3 , ' < instance_material symbol = " ' + m [ 1 ] + ' " target = " # ' + m [ 0 ] + ' " / > ' ) <nl> + <nl> + self . writel ( S_NODES , il + 2 , ' < / technique_common > ' ) <nl> + self . writel ( S_NODES , il + 1 , ' < / bind_material > ' ) <nl> + <nl> + if ( close_controller ) : <nl> + self . writel ( S_NODES , il , ' < / instance_controller > ' ) <nl> + else : <nl> + self . writel ( S_NODES , il , ' < / instance_geometry > ' ) <nl> + <nl> + def export_armature_bone ( self , bone , il , si ) : <nl> + boneid = self . new_id ( " bone " ) <nl> + boneidx = si [ " bone_count " ] <nl> + si [ " bone_count " ] + = 1 <nl> + bonesid = si [ " id " ] + " - " + str ( boneidx ) <nl> + if ( bone . name in self . used_bones ) : <nl> + if ( self . config [ " use_anim_action_all " ] ) : <nl> + self . operator . report ( { ' WARNING ' } , ' Bone name " ' + bone . name + ' " used in more than one skeleton . Actions might export wrong . ' ) <nl> + else : <nl> + self . used_bones . append ( bone . name ) <nl> + <nl> + si [ " bone_index " ] [ bone . name ] = boneidx <nl> + si [ " bone_ids " ] [ bone ] = boneid <nl> + si [ " bone_names " ] . append ( bonesid ) <nl> + self . writel ( S_NODES , il , ' < node id = " ' + boneid + ' " sid = " ' + bonesid + ' " name = " ' + bone . name + ' " type = " JOINT " > ' ) <nl> + il + = 1 <nl> + xform = bone . matrix_local <nl> + si [ " bone_bind_poses " ] . append ( ( si [ " armature_xform " ] * xform ) . inverted ( ) ) <nl> + <nl> + if ( bone . parent ! = None ) : <nl> + xform = bone . parent . matrix_local . inverted ( ) * xform <nl> + else : <nl> + si [ " skeleton_nodes " ] . append ( boneid ) <nl> + <nl> + self . writel ( S_NODES , il , ' < matrix sid = " transform " > ' + strmtx ( xform ) + ' < / matrix > ' ) <nl> + for c in bone . children : <nl> + self . export_armature_bone ( c , il , si ) <nl> + il - = 1 <nl> + self . writel ( S_NODES , il , ' < / node > ' ) <nl> + <nl> + def export_armature_node ( self , node , il ) : <nl> + if ( node . data = = None ) : <nl> + return <nl> + <nl> + self . skeletons . append ( node ) <nl> + <nl> + armature = node . data <nl> + self . skeleton_info [ node ] = { " bone_count " : 0 , " id " : self . new_id ( " skelbones " ) , " name " : node . name , " bone_index " : { } , " bone_ids " : { } , " bone_names " : [ ] , " bone_bind_poses " : [ ] , " skeleton_nodes " : [ ] , " armature_xform " : node . matrix_world } <nl> + <nl> + for b in armature . bones : <nl> + if ( b . parent ! = None ) : <nl> + continue <nl> + self . export_armature_bone ( b , il , self . skeleton_info [ node ] ) <nl> + <nl> + if ( node . pose ) : <nl> + for b in node . pose . bones : <nl> + for x in b . constraints : <nl> + if ( x . type = = ' ACTION ' ) : <nl> + self . action_constraints . append ( x . action ) <nl> + <nl> + def export_camera_node ( self , node , il ) : <nl> + if ( node . data = = None ) : <nl> + return <nl> + <nl> + camera = node . data <nl> + camid = self . new_id ( " camera " ) <nl> + self . writel ( S_CAMS , 1 , ' < camera id = " ' + camid + ' " name = " ' + camera . name + ' " > ' ) <nl> + self . writel ( S_CAMS , 2 , ' < optics > ' ) <nl> + self . writel ( S_CAMS , 3 , ' < technique_common > ' ) <nl> + if ( camera . type = = " PERSP " ) : <nl> + self . writel ( S_CAMS , 4 , ' < perspective > ' ) <nl> + self . writel ( S_CAMS , 5 , ' < yfov > ' + str ( math . degrees ( camera . angle ) ) + ' < / yfov > ' ) # I think ? <nl> + self . writel ( S_CAMS , 5 , ' < aspect_ratio > ' + str ( self . scene . render . resolution_x / self . scene . render . resolution_y ) + ' < / aspect_ratio > ' ) <nl> + self . writel ( S_CAMS , 5 , ' < znear > ' + str ( camera . clip_start ) + ' < / znear > ' ) <nl> + self . writel ( S_CAMS , 5 , ' < zfar > ' + str ( camera . clip_end ) + ' < / zfar > ' ) <nl> + self . writel ( S_CAMS , 4 , ' < / perspective > ' ) <nl> + else : <nl> + self . writel ( S_CAMS , 4 , ' < orthographic > ' ) <nl> + self . writel ( S_CAMS , 5 , ' < xmag > ' + str ( camera . ortho_scale * 0 . 5 ) + ' < / xmag > ' ) # I think ? <nl> + self . writel ( S_CAMS , 5 , ' < aspect_ratio > ' + str ( self . scene . render . resolution_x / self . scene . render . resolution_y ) + ' < / aspect_ratio > ' ) <nl> + self . writel ( S_CAMS , 5 , ' < znear > ' + str ( camera . clip_start ) + ' < / znear > ' ) <nl> + self . writel ( S_CAMS , 5 , ' < zfar > ' + str ( camera . clip_end ) + ' < / zfar > ' ) <nl> + self . writel ( S_CAMS , 4 , ' < / orthographic > ' ) <nl> + <nl> + self . writel ( S_CAMS , 3 , ' < / technique_common > ' ) <nl> + self . writel ( S_CAMS , 2 , ' < / optics > ' ) <nl> + self . writel ( S_CAMS , 1 , ' < / camera > ' ) <nl> + <nl> + self . writel ( S_NODES , il , ' < instance_camera url = " # ' + camid + ' " / > ' ) <nl> + <nl> + def export_lamp_node ( self , node , il ) : <nl> + if ( node . data = = None ) : <nl> + return <nl> + <nl> + light = node . data <nl> + lightid = self . new_id ( " light " ) <nl> + self . writel ( S_LAMPS , 1 , ' < light id = " ' + lightid + ' " name = " ' + light . name + ' " > ' ) <nl> + # self . writel ( S_LAMPS , 2 , ' < optics > ' ) <nl> + self . writel ( S_LAMPS , 3 , ' < technique_common > ' ) <nl> + <nl> + if ( light . type = = " POINT " ) : <nl> + self . writel ( S_LAMPS , 4 , ' < point > ' ) <nl> + self . writel ( S_LAMPS , 5 , ' < color > ' + strarr ( light . color ) + ' < / color > ' ) <nl> + att_by_distance = 2 . 0 / light . distance # convert to linear attenuation <nl> + self . writel ( S_LAMPS , 5 , ' < linear_attenuation > ' + str ( att_by_distance ) + ' < / linear_attenuation > ' ) <nl> + if ( light . use_sphere ) : <nl> + self . writel ( S_LAMPS , 5 , ' < zfar > ' + str ( light . distance ) + ' < / zfar > ' ) <nl> + <nl> + self . writel ( S_LAMPS , 4 , ' < / point > ' ) <nl> + elif ( light . type = = " SPOT " ) : <nl> + self . writel ( S_LAMPS , 4 , ' < spot > ' ) <nl> + self . writel ( S_LAMPS , 5 , ' < color > ' + strarr ( light . color ) + ' < / color > ' ) <nl> + att_by_distance = 2 . 0 / light . distance # convert to linear attenuation <nl> + self . writel ( S_LAMPS , 5 , ' < linear_attenuation > ' + str ( att_by_distance ) + ' < / linear_attenuation > ' ) <nl> + self . writel ( S_LAMPS , 5 , ' < falloff_angle > ' + str ( math . degrees ( light . spot_size / 2 ) ) + ' < / falloff_angle > ' ) <nl> + self . writel ( S_LAMPS , 4 , ' < / spot > ' ) <nl> + <nl> + else : # write a sun lamp for everything else ( not supported ) <nl> + self . writel ( S_LAMPS , 4 , ' < directional > ' ) <nl> + self . writel ( S_LAMPS , 5 , ' < color > ' + strarr ( light . color ) + ' < / color > ' ) <nl> + self . writel ( S_LAMPS , 4 , ' < / directional > ' ) <nl> + <nl> + self . writel ( S_LAMPS , 3 , ' < / technique_common > ' ) <nl> + # self . writel ( S_LAMPS , 2 , ' < / optics > ' ) <nl> + self . writel ( S_LAMPS , 1 , ' < / light > ' ) <nl> + <nl> + self . writel ( S_NODES , il , ' < instance_light url = " # ' + lightid + ' " / > ' ) <nl> + <nl> + def export_empty_node ( self , node , il ) : <nl> + self . writel ( S_NODES , 4 , ' < extra > ' ) <nl> + self . writel ( S_NODES , 5 , ' < technique profile = " GODOT " > ' ) <nl> + self . writel ( S_NODES , 6 , ' < empty_draw_type > ' + node . empty_draw_type + ' < / empty_draw_type > ' ) <nl> + self . writel ( S_NODES , 5 , ' < / technique > ' ) <nl> + self . writel ( S_NODES , 4 , ' < / extra > ' ) <nl> + <nl> + def export_curve ( self , curve ) : <nl> + splineid = self . new_id ( " spline " ) <nl> + <nl> + self . writel ( S_GEOM , 1 , ' < geometry id = " ' + splineid + ' " name = " ' + curve . name + ' " > ' ) <nl> + self . writel ( S_GEOM , 2 , ' < spline closed = " 0 " > ' ) <nl> + <nl> + points = [ ] <nl> + interps = [ ] <nl> + handles_in = [ ] <nl> + handles_out = [ ] <nl> + tilts = [ ] <nl> + <nl> + for cs in curve . splines : <nl> + <nl> + if ( cs . type = = " BEZIER " ) : <nl> + for s in cs . bezier_points : <nl> + points . append ( s . co [ 0 ] ) <nl> + points . append ( s . co [ 1 ] ) <nl> + points . append ( s . co [ 2 ] ) <nl> + <nl> + handles_in . append ( s . handle_left [ 0 ] ) <nl> + handles_in . append ( s . handle_left [ 1 ] ) <nl> + handles_in . append ( s . handle_left [ 2 ] ) <nl> + <nl> + handles_out . append ( s . handle_right [ 0 ] ) <nl> + handles_out . append ( s . handle_right [ 1 ] ) <nl> + handles_out . append ( s . handle_right [ 2 ] ) <nl> + <nl> + tilts . append ( s . tilt ) <nl> + interps . append ( " BEZIER " ) <nl> + else : <nl> + <nl> + for s in cs . points : <nl> + points . append ( s . co [ 0 ] ) <nl> + points . append ( s . co [ 1 ] ) <nl> + points . append ( s . co [ 2 ] ) <nl> + handles_in . append ( s . co [ 0 ] ) <nl> + handles_in . append ( s . co [ 1 ] ) <nl> + handles_in . append ( s . co [ 2 ] ) <nl> + handles_out . append ( s . co [ 0 ] ) <nl> + handles_out . append ( s . co [ 1 ] ) <nl> + handles_out . append ( s . co [ 2 ] ) <nl> + tilts . append ( s . tilt ) <nl> + interps . append ( " LINEAR " ) <nl> + <nl> + self . writel ( S_GEOM , 3 , ' < source id = " ' + splineid + ' - positions " > ' ) <nl> + position_values = " " <nl> + for x in points : <nl> + position_values + = " " + str ( x ) <nl> + self . writel ( S_GEOM , 4 , ' < float_array id = " ' + splineid + ' - positions - array " count = " ' + str ( len ( points ) ) + ' " > ' + position_values + ' < / float_array > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + splineid + ' - positions - array " count = " ' + str ( len ( points ) / 3 ) + ' " stride = " 3 " > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> + <nl> + self . writel ( S_GEOM , 3 , ' < source id = " ' + splineid + ' - intangents " > ' ) <nl> + intangent_values = " " <nl> + for x in handles_in : <nl> + intangent_values + = " " + str ( x ) <nl> + self . writel ( S_GEOM , 4 , ' < float_array id = " ' + splineid + ' - intangents - array " count = " ' + str ( len ( points ) ) + ' " > ' + intangent_values + ' < / float_array > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + splineid + ' - intangents - array " count = " ' + str ( len ( points ) / 3 ) + ' " stride = " 3 " > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> + <nl> + self . writel ( S_GEOM , 3 , ' < source id = " ' + splineid + ' - outtangents " > ' ) <nl> + outtangent_values = " " <nl> + for x in handles_out : <nl> + outtangent_values + = " " + str ( x ) <nl> + self . writel ( S_GEOM , 4 , ' < float_array id = " ' + splineid + ' - outtangents - array " count = " ' + str ( len ( points ) ) + ' " > ' + outtangent_values + ' < / float_array > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + splineid + ' - outtangents - array " count = " ' + str ( len ( points ) / 3 ) + ' " stride = " 3 " > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Y " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " Z " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> + <nl> + self . writel ( S_GEOM , 3 , ' < source id = " ' + splineid + ' - interpolations " > ' ) <nl> + interpolation_values = " " <nl> + for x in interps : <nl> + interpolation_values + = " " + x <nl> + self . writel ( S_GEOM , 4 , ' < Name_array id = " ' + splineid + ' - interpolations - array " count = " ' + str ( len ( interps ) ) + ' " > ' + interpolation_values + ' < / Name_array > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + splineid + ' - interpolations - array " count = " ' + str ( len ( interps ) ) + ' " stride = " 1 " > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " INTERPOLATION " type = " name " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> + <nl> + self . writel ( S_GEOM , 3 , ' < source id = " ' + splineid + ' - tilts " > ' ) <nl> + tilt_values = " " <nl> + for x in tilts : <nl> + tilt_values + = " " + str ( x ) <nl> + self . writel ( S_GEOM , 4 , ' < float_array id = " ' + splineid + ' - tilts - array " count = " ' + str ( len ( tilts ) ) + ' " > ' + tilt_values + ' < / float_array > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < technique_common > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < accessor source = " # ' + splineid + ' - tilts - array " count = " ' + str ( len ( tilts ) ) + ' " stride = " 1 " > ' ) <nl> + self . writel ( S_GEOM , 5 , ' < param name = " TILT " type = " float " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_GEOM , 3 , ' < / source > ' ) <nl> + <nl> + self . writel ( S_GEOM , 3 , ' < control_vertices > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < input semantic = " POSITION " source = " # ' + splineid + ' - positions " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < input semantic = " IN_TANGENT " source = " # ' + splineid + ' - intangents " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < input semantic = " OUT_TANGENT " source = " # ' + splineid + ' - outtangents " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < input semantic = " INTERPOLATION " source = " # ' + splineid + ' - interpolations " / > ' ) <nl> + self . writel ( S_GEOM , 4 , ' < input semantic = " TILT " source = " # ' + splineid + ' - tilts " / > ' ) <nl> + self . writel ( S_GEOM , 3 , ' < / control_vertices > ' ) <nl> + <nl> + self . writel ( S_GEOM , 2 , ' < / spline > ' ) <nl> + self . writel ( S_GEOM , 1 , ' < / geometry > ' ) <nl> + <nl> + return splineid <nl> + <nl> + def export_curve_node ( self , node , il ) : <nl> + if ( node . data = = None ) : <nl> + return <nl> + <nl> + curveid = self . export_curve ( node . data ) <nl> + <nl> + self . writel ( S_NODES , il , ' < instance_geometry url = " # ' + curveid + ' " > ' ) <nl> + self . writel ( S_NODES , il , ' < / instance_geometry > ' ) <nl> + <nl> + def export_node ( self , node , il ) : <nl> + if ( not node in self . valid_nodes ) : <nl> + return <nl> + <nl> + prev_node = bpy . context . scene . objects . active <nl> + bpy . context . scene . objects . active = node <nl> + <nl> + self . writel ( S_NODES , il , ' < node id = " ' + self . validate_id ( node . name ) + ' " name = " ' + node . name + ' " type = " NODE " > ' ) <nl> + il + = 1 <nl> + <nl> + self . writel ( S_NODES , il , ' < matrix sid = " transform " > ' + strmtx ( node . matrix_local ) + ' < / matrix > ' ) <nl> + # print ( " NODE TYPE : " + node . type + " NAME : " + node . name ) <nl> + if ( node . type = = " MESH " ) : <nl> + self . export_mesh_node ( node , il ) <nl> + elif ( node . type = = " CURVE " ) : <nl> + self . export_curve_node ( node , il ) <nl> + elif ( node . type = = " ARMATURE " ) : <nl> + self . export_armature_node ( node , il ) <nl> + elif ( node . type = = " CAMERA " ) : <nl> + self . export_camera_node ( node , il ) <nl> + elif ( node . type = = " LAMP " ) : <nl> + self . export_lamp_node ( node , il ) <nl> + elif ( node . type = = " EMPTY " ) : <nl> + self . export_empty_node ( node , il ) <nl> + <nl> + for x in node . children : <nl> + self . export_node ( x , il ) <nl> + <nl> + il - = 1 <nl> + self . writel ( S_NODES , il , ' < / node > ' ) <nl> + bpy . context . scene . objects . active = prev_node # make previous node active again <nl> + <nl> + def is_node_valid ( self , node ) : <nl> + if ( not node . type in self . config [ " object_types " ] ) : <nl> + return False <nl> + <nl> + if ( self . config [ " use_active_layers " ] ) : <nl> + valid = False <nl> + # print ( " NAME : " + node . name ) <nl> + for i in range ( 20 ) : <nl> + if ( node . layers [ i ] and self . scene . layers [ i ] ) : <nl> + valid = True <nl> + break <nl> + if ( not valid ) : <nl> + return False <nl> + <nl> + if ( self . config [ " use_export_selected " ] and not node . select ) : <nl> + return False <nl> + <nl> + return True <nl> + <nl> + def export_scene ( self ) : <nl> + self . writel ( S_NODES , 0 , ' < library_visual_scenes > ' ) <nl> + self . writel ( S_NODES , 1 , ' < visual_scene id = " ' + self . scene_name + ' " name = " scene " > ' ) <nl> + <nl> + # validate nodes <nl> + for obj in self . scene . objects : <nl> + if ( obj in self . valid_nodes ) : <nl> + continue <nl> + if ( self . is_node_valid ( obj ) ) : <nl> + n = obj <nl> + while ( n ! = None ) : <nl> + if ( not n in self . valid_nodes ) : <nl> + self . valid_nodes . append ( n ) <nl> + n = n . parent <nl> + <nl> + for obj in self . scene . objects : <nl> + if ( obj in self . valid_nodes and obj . parent = = None ) : <nl> + self . export_node ( obj , 2 ) <nl> + <nl> + self . writel ( S_NODES , 1 , ' < / visual_scene > ' ) <nl> + self . writel ( S_NODES , 0 , ' < / library_visual_scenes > ' ) <nl> + <nl> + def export_asset ( self ) : <nl> + self . writel ( S_ASSET , 0 , ' < asset > ' ) <nl> + # Why is this time stuff mandatory ? , no one could care less . . . <nl> + self . writel ( S_ASSET , 1 , ' < contributor > ' ) <nl> + self . writel ( S_ASSET , 2 , ' < author > Anonymous < / author > ' ) # Who made Collada , the FBI ? <nl> + self . writel ( S_ASSET , 2 , ' < authoring_tool > Collada Exporter for Blender 2 . 6 + , by Juan Linietsky ( juan @ codenix . com ) < / authoring_tool > ' ) # Who made Collada , the FBI ? <nl> + self . writel ( S_ASSET , 1 , ' < / contributor > ' ) <nl> + self . writel ( S_ASSET , 1 , ' < created > ' + time . strftime ( " % Y - % m - % dT % H : % M : % SZ " ) + ' < / created > ' ) <nl> + self . writel ( S_ASSET , 1 , ' < modified > ' + time . strftime ( " % Y - % m - % dT % H : % M : % SZ " ) + ' < / modified > ' ) <nl> + self . writel ( S_ASSET , 1 , ' < unit meter = " 1 . 0 " name = " meter " / > ' ) <nl> + self . writel ( S_ASSET , 1 , ' < up_axis > Z_UP < / up_axis > ' ) <nl> + self . writel ( S_ASSET , 0 , ' < / asset > ' ) <nl> + <nl> + def export_animation_transform_channel ( self , target , keys , matrices = True ) : <nl> + frame_total = len ( keys ) <nl> + anim_id = self . new_id ( " anim " ) <nl> + self . writel ( S_ANIM , 1 , ' < animation id = " ' + anim_id + ' " > ' ) <nl> + source_frames = " " <nl> + source_transforms = " " <nl> + source_interps = " " <nl> + <nl> + for k in keys : <nl> + source_frames + = " " + str ( k [ 0 ] ) <nl> + if ( matrices ) : <nl> + source_transforms + = " " + strmtx ( k [ 1 ] ) <nl> + else : <nl> + source_transforms + = " " + str ( k [ 1 ] ) <nl> + <nl> + source_interps + = " LINEAR " <nl> + <nl> + # Time Source <nl> + self . writel ( S_ANIM , 2 , ' < source id = " ' + anim_id + ' - input " > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < float_array id = " ' + anim_id + ' - input - array " count = " ' + str ( frame_total ) + ' " > ' + source_frames + ' < / float_array > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < technique_common > ' ) <nl> + self . writel ( S_ANIM , 4 , ' < accessor source = " # ' + anim_id + ' - input - array " count = " ' + str ( frame_total ) + ' " stride = " 1 " > ' ) <nl> + self . writel ( S_ANIM , 5 , ' < param name = " TIME " type = " float " / > ' ) <nl> + self . writel ( S_ANIM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < / technique_common > ' ) <nl> + self . writel ( S_ANIM , 2 , ' < / source > ' ) <nl> + <nl> + if ( matrices ) : <nl> + # Transform Source <nl> + self . writel ( S_ANIM , 2 , ' < source id = " ' + anim_id + ' - transform - output " > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < float_array id = " ' + anim_id + ' - transform - output - array " count = " ' + str ( frame_total * 16 ) + ' " > ' + source_transforms + ' < / float_array > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < technique_common > ' ) <nl> + self . writel ( S_ANIM , 4 , ' < accessor source = " # ' + anim_id + ' - transform - output - array " count = " ' + str ( frame_total ) + ' " stride = " 16 " > ' ) <nl> + self . writel ( S_ANIM , 5 , ' < param name = " TRANSFORM " type = " float4x4 " / > ' ) <nl> + self . writel ( S_ANIM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < / technique_common > ' ) <nl> + self . writel ( S_ANIM , 2 , ' < / source > ' ) <nl> + else : <nl> + # Value Source <nl> + self . writel ( S_ANIM , 2 , ' < source id = " ' + anim_id + ' - transform - output " > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < float_array id = " ' + anim_id + ' - transform - output - array " count = " ' + str ( frame_total ) + ' " > ' + source_transforms + ' < / float_array > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < technique_common > ' ) <nl> + self . writel ( S_ANIM , 4 , ' < accessor source = " # ' + anim_id + ' - transform - output - array " count = " ' + str ( frame_total ) + ' " stride = " 1 " > ' ) <nl> + self . writel ( S_ANIM , 5 , ' < param name = " X " type = " float " / > ' ) <nl> + self . writel ( S_ANIM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < / technique_common > ' ) <nl> + self . writel ( S_ANIM , 2 , ' < / source > ' ) <nl> + <nl> + # Interpolation Source <nl> + self . writel ( S_ANIM , 2 , ' < source id = " ' + anim_id + ' - interpolation - output " > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < Name_array id = " ' + anim_id + ' - interpolation - output - array " count = " ' + str ( frame_total ) + ' " > ' + source_interps + ' < / Name_array > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < technique_common > ' ) <nl> + self . writel ( S_ANIM , 4 , ' < accessor source = " # ' + anim_id + ' - interpolation - output - array " count = " ' + str ( frame_total ) + ' " stride = " 1 " > ' ) <nl> + self . writel ( S_ANIM , 5 , ' < param name = " INTERPOLATION " type = " Name " / > ' ) <nl> + self . writel ( S_ANIM , 4 , ' < / accessor > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < / technique_common > ' ) <nl> + self . writel ( S_ANIM , 2 , ' < / source > ' ) <nl> + <nl> + self . writel ( S_ANIM , 2 , ' < sampler id = " ' + anim_id + ' - sampler " > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < input semantic = " INPUT " source = " # ' + anim_id + ' - input " / > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < input semantic = " OUTPUT " source = " # ' + anim_id + ' - transform - output " / > ' ) <nl> + self . writel ( S_ANIM , 3 , ' < input semantic = " INTERPOLATION " source = " # ' + anim_id + ' - interpolation - output " / > ' ) <nl> + self . writel ( S_ANIM , 2 , ' < / sampler > ' ) <nl> + if ( matrices ) : <nl> + self . writel ( S_ANIM , 2 , ' < channel source = " # ' + anim_id + ' - sampler " target = " ' + target + ' / transform " / > ' ) <nl> + else : <nl> + self . writel ( S_ANIM , 2 , ' < channel source = " # ' + anim_id + ' - sampler " target = " ' + target + ' " / > ' ) <nl> + self . writel ( S_ANIM , 1 , ' < / animation > ' ) <nl> + <nl> + return [ anim_id ] <nl> + <nl> + def export_animation ( self , start , end , allowed = None ) : <nl> + # Blender - > Collada frames needs a little work <nl> + # Collada starts from 0 , blender usually from 1 <nl> + # The last frame must be included also <nl> + <nl> + frame_orig = self . scene . frame_current <nl> + <nl> + frame_len = 1 . 0 / self . scene . render . fps <nl> + frame_total = end - start + 1 <nl> + frame_sub = 0 <nl> + if ( start > 0 ) : <nl> + frame_sub = start * frame_len <nl> + <nl> + tcn = [ ] <nl> + xform_cache = { } <nl> + blend_cache = { } <nl> + # Change frames first , export objects last <nl> + # This improves performance enormously <nl> + <nl> + # print ( " anim from : " + str ( start ) + " to " + str ( end ) + " allowed : " + str ( allowed ) ) <nl> + for t in range ( start , end + 1 ) : <nl> + self . scene . frame_set ( t ) <nl> + key = t * frame_len - frame_sub <nl> + # print ( " Export Anim Frame " + str ( t ) + " / " + str ( self . scene . frame_end + 1 ) ) <nl> + <nl> + for node in self . scene . objects : <nl> + <nl> + if ( not node in self . valid_nodes ) : <nl> + continue <nl> + if ( allowed ! = None and not ( node in allowed ) ) : <nl> + if ( node . type = = " MESH " and node . data ! = None and ( node in self . armature_for_morph ) and ( self . armature_for_morph [ node ] in allowed ) ) : <nl> + pass # all good you pass with flying colors for morphs inside of action <nl> + else : <nl> + # print ( " fail " + str ( ( node in self . armature_for_morph ) ) ) <nl> + continue <nl> + if ( node . type = = " MESH " and node . data ! = None and node . data . shape_keys ! = None and ( node . data in self . mesh_cache ) and len ( node . data . shape_keys . key_blocks ) ) : <nl> + target = self . mesh_cache [ node . data ] [ " morph_id " ] <nl> + for i in range ( len ( node . data . shape_keys . key_blocks ) ) : <nl> + <nl> + if ( i = = 0 ) : <nl> + continue <nl> + <nl> + name = target + " - morph - weights ( " + str ( i - 1 ) + " ) " <nl> + if ( not ( name in blend_cache ) ) : <nl> + blend_cache [ name ] = [ ] <nl> + <nl> + blend_cache [ name ] . append ( ( key , node . data . shape_keys . key_blocks [ i ] . value ) ) <nl> + <nl> + if ( node . type = = " MESH " and node . parent and node . parent . type = = " ARMATURE " ) : <nl> + <nl> + continue # In Collada , nodes that have skin modifier must not export animation , animate the skin instead . <nl> + <nl> + if ( len ( node . constraints ) > 0 or node . animation_data ! = None ) : <nl> + # If the node has constraints , or animation data , then export a sampled animation track <nl> + name = self . validate_id ( node . name ) <nl> + if ( not ( name in xform_cache ) ) : <nl> + xform_cache [ name ] = [ ] <nl> + <nl> + mtx = node . matrix_world . copy ( ) <nl> + if ( node . parent ) : <nl> + mtx = node . parent . matrix_world . inverted ( ) * mtx <nl> + <nl> + xform_cache [ name ] . append ( ( key , mtx ) ) <nl> + <nl> + if ( node . type = = " ARMATURE " ) : <nl> + # All bones exported for now <nl> + <nl> + for bone in node . data . bones : <nl> + <nl> + bone_name = self . skeleton_info [ node ] [ " bone_ids " ] [ bone ] <nl> + <nl> + if ( not ( bone_name in xform_cache ) ) : <nl> + # print ( " has bone : " + bone_name ) <nl> + xform_cache [ bone_name ] = [ ] <nl> + <nl> + posebone = node . pose . bones [ bone . name ] <nl> + parent_posebone = None <nl> + <nl> + mtx = posebone . matrix . copy ( ) <nl> + if ( bone . parent ) : <nl> + parent_posebone = node . pose . bones [ bone . parent . name ] <nl> + parent_invisible = False <nl> + <nl> + for i in range ( 3 ) : <nl> + if ( parent_posebone . scale [ i ] = = 0 . 0 ) : <nl> + parent_invisible = True <nl> + <nl> + if ( not parent_invisible ) : <nl> + mtx = parent_posebone . matrix . inverted ( ) * mtx <nl> + <nl> + xform_cache [ bone_name ] . append ( ( key , mtx ) ) <nl> + <nl> + self . scene . frame_set ( frame_orig ) <nl> + <nl> + # export animation xml <nl> + for nid in xform_cache : <nl> + tcn + = self . export_animation_transform_channel ( nid , xform_cache [ nid ] , True ) <nl> + for nid in blend_cache : <nl> + tcn + = self . export_animation_transform_channel ( nid , blend_cache [ nid ] , False ) <nl> + <nl> + return tcn <nl> + <nl> + def export_animations ( self ) : <nl> + tmp_mat = [ ] <nl> + for s in self . skeletons : <nl> + tmp_bone_mat = [ ] <nl> + for bone in s . pose . bones : <nl> + tmp_bone_mat . append ( Matrix ( bone . matrix_basis ) ) <nl> + bone . matrix_basis = Matrix ( ) <nl> + tmp_mat . append ( [ Matrix ( s . matrix_local ) , tmp_bone_mat ] ) <nl> + <nl> + self . writel ( S_ANIM , 0 , ' < library_animations > ' ) <nl> + <nl> + if ( self . config [ " use_anim_action_all " ] and len ( self . skeletons ) ) : <nl> + cached_actions = { } <nl> + <nl> + for s in self . skeletons : <nl> + if s . animation_data and s . animation_data . action : <nl> + cached_actions [ s ] = s . animation_data . action . name <nl> + <nl> + self . writel ( S_ANIM_CLIPS , 0 , ' < library_animation_clips > ' ) <nl> + <nl> + for x in bpy . data . actions [ : ] : <nl> + if x . users = = 0 or x in self . action_constraints : <nl> + continue <nl> + if ( self . config [ " use_anim_skip_noexp " ] and x . name . endswith ( " - noexp " ) ) : <nl> + continue <nl> + <nl> + bones = [ ] <nl> + # find bones used <nl> + for p in x . fcurves : <nl> + dp = str ( p . data_path ) <nl> + base = " pose . bones [ \ " " <nl> + if ( dp . find ( base ) = = 0 ) : <nl> + dp = dp [ len ( base ) : ] <nl> + if ( dp . find ( ' " ' ) ! = - 1 ) : <nl> + dp = dp [ : dp . find ( ' " ' ) ] <nl> + if ( not dp in bones ) : <nl> + bones . append ( dp ) <nl> + <nl> + allowed_skeletons = [ ] <nl> + for i , y in enumerate ( self . skeletons ) : <nl> + if ( y . animation_data ) : <nl> + for z in y . pose . bones : <nl> + if ( z . bone . name in bones ) : <nl> + if ( not y in allowed_skeletons ) : <nl> + allowed_skeletons . append ( y ) <nl> + y . animation_data . action = x ; <nl> + <nl> + y . matrix_local = tmp_mat [ i ] [ 0 ] <nl> + for j , bone in enumerate ( s . pose . bones ) : <nl> + bone . matrix_basis = Matrix ( ) <nl> + <nl> + # print ( " allowed skeletons " + str ( allowed_skeletons ) ) <nl> + <nl> + # print ( str ( x ) ) <nl> + <nl> + tcn = self . export_animation ( int ( x . frame_range [ 0 ] ) , int ( x . frame_range [ 1 ] + 0 . 5 ) , allowed_skeletons ) <nl> + framelen = ( 1 . 0 / self . scene . render . fps ) <nl> + start = x . frame_range [ 0 ] * framelen <nl> + end = x . frame_range [ 1 ] * framelen <nl> + # print ( " Export anim : " + x . name ) <nl> + self . writel ( S_ANIM_CLIPS , 1 , ' < animation_clip name = " ' + x . name + ' " start = " ' + str ( start ) + ' " end = " ' + str ( end ) + ' " > ' ) <nl> + for z in tcn : <nl> + self . writel ( S_ANIM_CLIPS , 2 , ' < instance_animation url = " # ' + z + ' " / > ' ) <nl> + self . writel ( S_ANIM_CLIPS , 1 , ' < / animation_clip > ' ) <nl> + if ( len ( tcn ) = = 0 ) : <nl> + self . operator . report ( { ' WARNING ' } , ' Animation clip " ' + x . name + ' " contains no tracks . ' ) <nl> + <nl> + self . writel ( S_ANIM_CLIPS , 0 , ' < / library_animation_clips > ' ) <nl> + <nl> + for i , s in enumerate ( self . skeletons ) : <nl> + if ( s . animation_data = = None ) : <nl> + continue <nl> + if s in cached_actions : <nl> + s . animation_data . action = bpy . data . actions [ cached_actions [ s ] ] <nl> + else : <nl> + s . animation_data . action = None <nl> + for j , bone in enumerate ( s . pose . bones ) : <nl> + bone . matrix_basis = tmp_mat [ i ] [ 1 ] [ j ] <nl> + <nl> + else : <nl> + self . export_animation ( self . scene . frame_start , self . scene . frame_end ) <nl> + <nl> + self . writel ( S_ANIM , 0 , ' < / library_animations > ' ) <nl> + <nl> + def export ( self ) : <nl> + self . writel ( S_GEOM , 0 , ' < library_geometries > ' ) <nl> + self . writel ( S_CONT , 0 , ' < library_controllers > ' ) <nl> + self . writel ( S_CAMS , 0 , ' < library_cameras > ' ) <nl> + self . writel ( S_LAMPS , 0 , ' < library_lights > ' ) <nl> + self . writel ( S_IMGS , 0 , ' < library_images > ' ) <nl> + self . writel ( S_MATS , 0 , ' < library_materials > ' ) <nl> + self . writel ( S_FX , 0 , ' < library_effects > ' ) <nl> + <nl> + self . skeletons = [ ] <nl> + self . action_constraints = [ ] <nl> + self . export_asset ( ) <nl> + self . export_scene ( ) <nl> + <nl> + self . writel ( S_GEOM , 0 , ' < / library_geometries > ' ) <nl> + <nl> + # morphs always go before skin controllers <nl> + if S_MORPH in self . sections : <nl> + for l in self . sections [ S_MORPH ] : <nl> + self . writel ( S_CONT , 0 , l ) <nl> + del self . sections [ S_MORPH ] <nl> + <nl> + # morphs always go before skin controllers <nl> + if S_SKIN in self . sections : <nl> + for l in self . sections [ S_SKIN ] : <nl> + self . writel ( S_CONT , 0 , l ) <nl> + del self . sections [ S_SKIN ] <nl> + <nl> + self . writel ( S_CONT , 0 , ' < / library_controllers > ' ) <nl> + self . writel ( S_CAMS , 0 , ' < / library_cameras > ' ) <nl> + self . writel ( S_LAMPS , 0 , ' < / library_lights > ' ) <nl> + self . writel ( S_IMGS , 0 , ' < / library_images > ' ) <nl> + self . writel ( S_MATS , 0 , ' < / library_materials > ' ) <nl> + self . writel ( S_FX , 0 , ' < / library_effects > ' ) <nl> + <nl> + if ( self . config [ " use_anim " ] ) : <nl> + self . export_animations ( ) <nl> + <nl> + try : <nl> + f = open ( self . path , " wb " ) <nl> + except : <nl> + return False <nl> + <nl> + f . write ( bytes ( ' < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > \ n ' , " UTF - 8 " ) ) <nl> + f . write ( bytes ( ' < COLLADA xmlns = " http : / / www . collada . org / 2005 / 11 / COLLADASchema " version = " 1 . 4 . 1 " > \ n ' , " UTF - 8 " ) ) <nl> + <nl> + s = [ ] <nl> + for x in self . sections . keys ( ) : <nl> + s . append ( x ) <nl> + s . sort ( ) <nl> + for x in s : <nl> + for l in self . sections [ x ] : <nl> + f . write ( bytes ( l + " \ n " , " UTF - 8 " ) ) <nl> + <nl> + f . write ( bytes ( ' < scene > \ n ' , " UTF - 8 " ) ) <nl> + f . write ( bytes ( ' \ t < instance_visual_scene url = " # ' + self . scene_name + ' " / > \ n ' , " UTF - 8 " ) ) <nl> + f . write ( bytes ( ' < / scene > \ n ' , " UTF - 8 " ) ) <nl> + f . write ( bytes ( ' < / COLLADA > \ n ' , " UTF - 8 " ) ) <nl> + return True <nl> + <nl> + def __init__ ( self , path , kwargs , operator ) : <nl> + self . operator = operator <nl> + self . scene = bpy . context . scene <nl> + self . last_id = 0 <nl> + self . scene_name = self . new_id ( " scene " ) <nl> + self . sections = { } <nl> + self . path = path <nl> + self . mesh_cache = { } <nl> + self . curve_cache = { } <nl> + self . material_cache = { } <nl> + self . image_cache = { } <nl> + self . skeleton_info = { } <nl> + self . config = kwargs <nl> + self . valid_nodes = [ ] <nl> + self . armature_for_morph = { } <nl> + self . used_bones = [ ] <nl> + self . wrongvtx_report = False <nl> <nl> <nl> def save ( operator , context , <nl> - filepath = " " , <nl> - use_selection = False , <nl> - * * kwargs <nl> - ) : <nl> - <nl> - exp = DaeExporter ( filepath , kwargs , operator ) <nl> - exp . export ( ) <nl> - <nl> - <nl> - <nl> - return { ' FINISHED ' } # so the script wont run after we have batch exported . <nl> + filepath = " " , <nl> + use_selection = False , <nl> + * * kwargs <nl> + ) : <nl> <nl> + exp = DaeExporter ( filepath , kwargs , operator ) <nl> + exp . export ( ) <nl> <nl> + return { ' FINISHED ' } # so the script wont run after we have batch exported . <nl> | Whitespace fix | godotengine/godot | 7de55b86c16b2fbef45d9114031fb3b88044fc4f | 2016-07-09T10:00:32Z |
mmm a / utils / build - presets . ini <nl> ppp b / utils / build - presets . ini <nl> swift - install - components = compiler ; clang - builtin - headers ; stdlib ; sdk - overlay ; parse <nl> <nl> [ preset : mixin_buildbot_install_components_with_clang ] <nl> swift - install - components = compiler ; clang - resource - dir - symlink ; stdlib ; sdk - overlay ; parser - lib ; toolchain - tools ; license ; sourcekit - xpc - service ; swift - remote - mirror ; swift - remote - mirror - headers <nl> - llvm - install - components = llvm - cov ; llvm - profdata ; IndexStore ; clang ; clang - headers ; compiler - rt ; clangd <nl> + llvm - install - components = llvm - cov ; llvm - profdata ; IndexStore ; clang ; clang - resource - headers ; compiler - rt ; clangd <nl> <nl> [ preset : mixin_buildbot_trunk_base ] <nl> # Build standard library and SDK overlay for iOS device and simulator . <nl> | Merge pull request from brentdax / your - headers - are - in - another - castle | apple/swift | 138eeb74c1c5d4c685d724b383667fc265bb4cb8 | 2019-09-13T18:28:44Z |
mmm a / tensorflow / compiler / xla / python / BUILD <nl> ppp b / tensorflow / compiler / xla / python / BUILD <nl> licenses ( [ " notice " ] ) # Apache 2 . 0 <nl> package ( default_visibility = [ " / / tensorflow : internal " ] ) <nl> <nl> load ( " / / tensorflow : tensorflow . bzl " , " tf_py_wrap_cc " ) <nl> + load ( " @ local_config_cuda / / cuda : build_defs . bzl " , " if_cuda_is_configured " ) <nl> <nl> py_library ( <nl> name = " xla_client " , <nl> cc_library ( <nl> " / / tensorflow / compiler / xla / client : xla_builder " , <nl> " / / tensorflow / compiler / xla / client : xla_computation " , <nl> " / / tensorflow / compiler / xla / client / lib : math " , <nl> + " / / tensorflow / compiler / xla / service : platform_util " , <nl> " / / tensorflow / compiler / xla / service : shaped_buffer " , <nl> " / / tensorflow / compiler / xrt : xrt_proto " , <nl> " / / tensorflow / compiler / xrt / cc : xrt_ops " , <nl> tf_py_wrap_cc ( <nl> " / / tensorflow / compiler / xla : shape_util " , <nl> " / / tensorflow / compiler / xla : xla_data_proto " , <nl> " / / tensorflow / compiler / xla / service : cpu_plugin " , <nl> - ] , <nl> + ] + if_cuda_is_configured ( [ <nl> + " / / tensorflow / compiler / xla / service : gpu_plugin " , <nl> + ] ) , <nl> ) <nl> mmm a / tensorflow / compiler / xla / python / local_computation_builder . cc <nl> ppp b / tensorflow / compiler / xla / python / local_computation_builder . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / executable_run_options . h " <nl> # include " tensorflow / compiler / xla / literal . h " <nl> # include " tensorflow / compiler / xla / literal_util . h " <nl> + # include " tensorflow / compiler / xla / service / platform_util . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> # include " tensorflow / compiler / xla / util . h " <nl> # include " tensorflow / compiler / xla / xla_data . pb . h " <nl> tensorflow : : mutex g_local_client_mutex ( tensorflow : : LINKER_INITIALIZED ) ; <nl> int g_replica_count GUARDED_BY ( g_local_client_mutex ) = 1 ; <nl> LocalClient * g_local_client GUARDED_BY ( g_local_client_mutex ) = nullptr ; <nl> <nl> + string * GetPlatformNameString ( ) { <nl> + static string * platform_name_string PT_GUARDED_BY ( g_local_client_mutex ) = <nl> + new string ( " Host " ) ; <nl> + return platform_name_string ; <nl> + } <nl> + <nl> Status InitializeReplicaCount ( int replica_count ) { <nl> if ( replica_count < 1 ) { <nl> return InvalidArgument ( " Replica count must be > = 1 ; got % d . " , <nl> Status InitializeReplicaCount ( int replica_count ) { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + Status InitializePlatformName ( const string & platform_name ) { <nl> + string * g_platform_name = GetPlatformNameString ( ) ; <nl> + tensorflow : : mutex_lock lock ( g_local_client_mutex ) ; <nl> + if ( g_local_client ! = nullptr ) { <nl> + return FailedPrecondition ( <nl> + " Attempted to set the platform name to % s , but a local XLA service was " <nl> + " previously created with a platform name of % s . " , <nl> + platform_name , * g_platform_name ) ; <nl> + } <nl> + TF_RETURN_IF_ERROR ( PlatformUtil : : GetPlatform ( platform_name ) . status ( ) ) ; <nl> + * g_platform_name = platform_name ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> int GetReplicaCount ( ) { <nl> tensorflow : : mutex_lock lock ( g_local_client_mutex ) ; <nl> return g_replica_count ; <nl> } <nl> <nl> LocalClient * GetOrCreateLocalClient ( ) { <nl> + string * platform_name = GetPlatformNameString ( ) ; <nl> tensorflow : : mutex_lock lock ( g_local_client_mutex ) ; <nl> if ( g_local_client ! = nullptr ) { <nl> return g_local_client ; <nl> } <nl> LocalClientOptions options ; <nl> + options . set_platform ( PlatformUtil : : GetPlatform ( * platform_name ) . ValueOrDie ( ) ) ; <nl> options . set_number_of_replicas ( g_replica_count ) ; <nl> g_local_client = ClientLibrary : : GetOrCreateLocalClient ( options ) . ValueOrDie ( ) ; <nl> CHECK ( g_local_client ! = nullptr ) ; <nl> mmm a / tensorflow / compiler / xla / python / local_computation_builder . h <nl> ppp b / tensorflow / compiler / xla / python / local_computation_builder . h <nl> namespace swig { <nl> / / returned . <nl> Status InitializeReplicaCount ( int replica_count ) ; <nl> <nl> + / / Initializes the platform name that XLA will be initialized with ( when <nl> + / / first obtaining a handle to the local XLA service ) . If this is called after <nl> + / / the handle to the local XLA service has been established , then an error is <nl> + / / returned . <nl> + Status InitializePlatformName ( const string & platform_name ) ; <nl> + <nl> / / Returns the replica count that is currently set , regardless of whether the <nl> / / local XLA service has been instantiated yet or not . <nl> int GetReplicaCount ( ) ; <nl> mmm a / tensorflow / compiler / xla / python / local_computation_builder . i <nl> ppp b / tensorflow / compiler / xla / python / local_computation_builder . i <nl> tensorflow : : ImportNumpy ( ) ; <nl> % unignore xla ; <nl> % unignore xla : : swig ; <nl> % unignore xla : : swig : : InitializeReplicaCount ; <nl> + % unignore xla : : swig : : InitializePlatformName ; <nl> % unignore xla : : swig : : GetReplicaCount ; <nl> % unignore xla : : swig : : TransferToInfeedLocal ; <nl> % unignore xla : : swig : : TransferToInfeedLocalReplica ; <nl> mmm a / tensorflow / compiler / xla / python / xla_client . py <nl> ppp b / tensorflow / compiler / xla / python / xla_client . py <nl> def initialize_replica_count ( replica_count ) : <nl> c_api . InitializeReplicaCount ( replica_count ) <nl> <nl> <nl> + def initialize_platform_name ( platform_name ) : <nl> + " " " Initializes the desired platform name to use on XLA service init . <nl> + <nl> + Args : <nl> + platform_name : string name of platform . <nl> + <nl> + Raises : <nl> + A runtime exception if the XLA service has already been initialized . <nl> + " " " <nl> + c_api . InitializePlatformName ( platform_name ) <nl> + <nl> + <nl> def get_replica_count ( ) : <nl> " " " Returns the current replica count used for the XLA service . <nl> <nl> | [ XLA ] plumb through platform option in local Python client | tensorflow/tensorflow | cca5a0bfe62e0281ef11dfe358536ffab58c3978 | 2018-11-13T04:36:08Z |
mmm a / test / test_nn . py <nl> ppp b / test / test_nn . py <nl> def test_prune ( self ) : <nl> pruned_tensor = p . prune ( t , default_mask ) <nl> self . assertEqual ( t * expected_mask , pruned_tensor ) <nl> <nl> + def test_rnn_pruning ( self ) : <nl> + l = torch . nn . LSTM ( 32 , 32 ) <nl> + # This Module has 4 parameters called : <nl> + # ' weight_ih_l0 ' , ' weight_hh_l0 ' , ' bias_ih_l0 ' , ' bias_hh_l0 ' <nl> + <nl> + # Pruning one of them causes one of the weights to become a tensor <nl> + prune . l1_unstructured ( l , ' weight_ih_l0 ' , 0 . 5 ) <nl> + assert ( <nl> + sum ( [ isinstance ( p , torch . nn . Parameter ) for p in l . _flat_weights ] ) <nl> + = = 3 <nl> + ) <nl> + <nl> + # Removing the pruning reparametrization restores the Parameter <nl> + prune . remove ( l , ' weight_ih_l0 ' ) <nl> + assert ( <nl> + sum ( [ isinstance ( p , torch . nn . Parameter ) for p in l . _flat_weights ] ) <nl> + = = 4 <nl> + ) <nl> + <nl> + # Make sure that , upon removal of the reparametrization , the <nl> + # ` . _parameters ` and ` . named_parameters ` contain the right params . <nl> + # Specifically , the original weight ( ' weight_ih_l0 ' ) should be placed <nl> + # back in the parameters , while the reparametrization component <nl> + # ( ' weight_ih_l0_orig ' ) should be removed . <nl> + assert ' weight_ih_l0 ' in l . _parameters <nl> + assert l . _parameters [ ' weight_ih_l0 ' ] is not None <nl> + assert ' weight_ih_l0_orig ' not in l . _parameters <nl> + assert ' weight_ih_l0 ' in dict ( l . named_parameters ( ) ) <nl> + assert dict ( l . named_parameters ( ) ) [ ' weight_ih_l0 ' ] is not None <nl> + assert ' weight_ih_l0_orig ' not in dict ( l . named_parameters ( ) ) <nl> + <nl> + <nl> + def test_rnn_weight_norm ( self ) : <nl> + l = torch . nn . LSTM ( 32 , 32 ) <nl> + # This Module has 4 parameters called : <nl> + # ' weight_ih_l0 ' , ' weight_hh_l0 ' , ' bias_ih_l0 ' , ' bias_hh_l0 ' <nl> + <nl> + # Applying weight norm on one of them causes it to become a tensor <nl> + l = torch . nn . utils . weight_norm ( l , name = ' weight_ih_l0 ' ) <nl> + assert ( <nl> + sum ( [ isinstance ( p , torch . nn . Parameter ) for p in l . _flat_weights ] ) <nl> + = = 3 <nl> + ) <nl> + <nl> + # Removing the weight norm reparametrization restores the Parameter <nl> + l = torch . nn . utils . remove_weight_norm ( l , name = ' weight_ih_l0 ' ) <nl> + assert ( <nl> + sum ( [ isinstance ( p , torch . nn . Parameter ) for p in l . _flat_weights ] ) <nl> + = = 4 <nl> + ) <nl> + <nl> + # Make sure that , upon removal of the reparametrization , the <nl> + # ` . _parameters ` and ` . named_parameters ` contain the right params . <nl> + # Specifically , the original weight ( ' weight_ih_l0 ' ) should be placed <nl> + # back in the parameters , while the reparametrization components <nl> + # ( ' weight_ih_l0_v ' and ' weight_ih_l0_g ' ) should be removed . <nl> + assert ' weight_ih_l0 ' in l . _parameters <nl> + assert l . _parameters [ ' weight_ih_l0 ' ] is not None <nl> + assert ' weight_ih_l0_v ' not in l . _parameters <nl> + assert ' weight_ih_l0_g ' not in l . _parameters <nl> + assert ' weight_ih_l0 ' in dict ( l . named_parameters ( ) ) <nl> + assert dict ( l . named_parameters ( ) ) [ ' weight_ih_l0 ' ] is not None <nl> + assert ' weight_ih_l0_v ' not in dict ( l . named_parameters ( ) ) <nl> + assert ' weight_ih_l0_g ' not in dict ( l . named_parameters ( ) ) <nl> <nl> def test_weight_norm ( self ) : <nl> input = torch . randn ( 3 , 5 ) <nl> mmm a / torch / nn / utils / prune . py <nl> ppp b / torch / nn / utils / prune . py <nl> def remove ( self , module ) : <nl> orig . data = weight . data <nl> del module . _parameters [ self . _tensor_name + " _orig " ] <nl> del module . _buffers [ self . _tensor_name + " _mask " ] <nl> - module . register_parameter ( self . _tensor_name , orig ) <nl> + setattr ( module , self . _tensor_name , orig ) <nl> <nl> <nl> class PruningContainer ( BasePruningMethod ) : <nl> mmm a / torch / nn / utils / weight_norm . py <nl> ppp b / torch / nn / utils / weight_norm . py <nl> def remove ( self , module ) : <nl> delattr ( module , self . name ) <nl> del module . _parameters [ self . name + ' _g ' ] <nl> del module . _parameters [ self . name + ' _v ' ] <nl> - module . register_parameter ( self . name , Parameter ( weight . data ) ) <nl> + setattr ( module , self . name , Parameter ( weight . data ) ) <nl> <nl> def __call__ ( self , module , inputs ) : <nl> setattr ( module , self . name , self . compute_weight ( module ) ) <nl> | Explicit attribute setting for pruning and weight_norm upon reparam removal ( ) | pytorch/pytorch | d37a4861b8a5eed3d9a1340484d1efb0f48aa59e | 2020-04-29T16:01:59Z |
mmm a / 3rdParty / curl / curl - 7 . 57 . 0 / lib / http . h <nl> ppp b / 3rdParty / curl / curl - 7 . 57 . 0 / lib / http . h <nl> CURLcode Curl_http_perhapsrewind ( struct connectdata * conn ) ; <nl> * <nl> * / <nl> # ifndef EXPECT_100_THRESHOLD <nl> - # define EXPECT_100_THRESHOLD 1024 <nl> + # define EXPECT_100_THRESHOLD 1024 * 1024 * 512 <nl> # endif <nl> <nl> # endif / * CURL_DISABLE_HTTP * / <nl> mmm a / lib / SimpleHttpClient / Communicator . cpp <nl> ppp b / lib / SimpleHttpClient / Communicator . cpp <nl> static std : : string buildPrefix ( Ticket ticketId ) { <nl> <nl> static std : : atomic_uint_fast64_t NEXT_TICKET_ID ( static_cast < uint64_t > ( 1 ) ) ; <nl> static std : : vector < char > urlDotSeparators { ' / ' , ' # ' , ' ? ' } ; <nl> - } <nl> + } / / namespace <nl> <nl> Communicator : : Communicator ( ) : _curl ( nullptr ) , _mc ( CURLM_OK ) , _enabled ( true ) { <nl> curl_global_init ( CURL_GLOBAL_ALL ) ; <nl> Communicator : : Communicator ( ) : _curl ( nullptr ) , _mc ( CURLM_OK ) , _enabled ( true ) { <nl> THROW_ARANGO_EXCEPTION_MESSAGE ( TRI_ERROR_OUT_OF_MEMORY , " unable to initialize curl " ) ; <nl> } <nl> <nl> + curl_multi_setopt ( _curl , CURLMOPT_MAXCONNECTS , 20 ) ; <nl> + <nl> # ifdef _WIN32 <nl> int err = dumb_socketpair ( _socks , 0 ) ; <nl> if ( err ! = 0 ) { <nl> void Communicator : : createRequestInProgress ( NewRequest & & newRequest ) { <nl> curl_easy_setopt ( handle , CURLOPT_HEADERDATA , handleInProgress - > _rip . get ( ) ) ; <nl> curl_easy_setopt ( handle , CURLOPT_ERRORBUFFER , <nl> handleInProgress - > _rip . get ( ) - > _errorBuffer ) ; <nl> - <nl> + <nl> / / mop : XXX : S CURLE 51 and 60 . . . <nl> curl_easy_setopt ( handle , CURLOPT_SSL_VERIFYPEER , 0L ) ; <nl> curl_easy_setopt ( handle , CURLOPT_SSL_VERIFYHOST , 0L ) ; <nl> void Communicator : : handleResult ( CURL * handle , CURLcode rc ) { <nl> } <nl> <nl> LOG_TOPIC ( TRACE , Logger : : COMMUNICATION ) <nl> - < < buildPrefix ( rip - > _ticketId ) < < " curl rc is : " < < rc < < " after " <nl> + < < : : buildPrefix ( rip - > _ticketId ) < < " curl rc is : " < < rc < < " after " <nl> < < Logger : : FIXED ( TRI_microtime ( ) - rip - > _startTime ) < < " s " ; <nl> <nl> if ( CURLE_OPERATION_TIMEDOUT = = rc ) { <nl> curl_easy_getinfo ( handle , CURLINFO_CONNECT_TIME , & connectTime ) ; <nl> LOG_TOPIC ( TRACE , Logger : : COMMUNICATION ) <nl> - < < buildPrefix ( rip - > _ticketId ) < < " CURLINFO_CONNECT_TIME is " < < connectTime ; <nl> + < < : : buildPrefix ( rip - > _ticketId ) < < " CURLINFO_CONNECT_TIME is " < < connectTime ; <nl> } / / if <nl> <nl> if ( strlen ( rip - > _errorBuffer ) ! = 0 ) { <nl> LOG_TOPIC ( TRACE , Logger : : COMMUNICATION ) <nl> - < < buildPrefix ( rip - > _ticketId ) < < " curl error details : " < < rip - > _errorBuffer ; <nl> + < < : : buildPrefix ( rip - > _ticketId ) < < " curl error details : " < < rip - > _errorBuffer ; <nl> } <nl> <nl> MUTEX_LOCKER ( guard , _handlesLock ) ; <nl> int Communicator : : curlDebug ( CURL * handle , curl_infotype type , char * data , <nl> TRI_ASSERT ( data ! = nullptr ) ; <nl> <nl> std : : string dataStr ( data , size ) ; <nl> - std : : string prefix ( buildPrefix ( request - > _ticketId ) ) ; <nl> + std : : string prefix ( : : buildPrefix ( request - > _ticketId ) ) ; <nl> <nl> switch ( type ) { <nl> case CURLINFO_TEXT : <nl> void Communicator : : abortRequestInternal ( Ticket ticketId ) { <nl> } <nl> <nl> LOG_TOPIC ( WARN , Logger : : REQUESTS ) <nl> - < < buildPrefix ( handle - > second - > _rip - > _ticketId ) <nl> + < < : : buildPrefix ( handle - > second - > _rip - > _ticketId ) <nl> < < " aborting request to " < < handle - > second - > _rip - > _destination . url ( ) ; <nl> handle - > second - > _rip - > _aborted = true ; <nl> } <nl> void Communicator : : callErrorFn ( Ticket const & ticketId , Destination const & destin <nl> <nl> if ( total > CALLBACK_WARN_TIME ) { <nl> LOG_TOPIC ( WARN , Logger : : COMMUNICATION ) <nl> - < < buildPrefix ( ticketId ) < < " error callback for request to " < < destination . url ( ) < < " took " < < total < < " s " ; <nl> + < < : : buildPrefix ( ticketId ) < < " error callback for request to " < < destination . url ( ) < < " took " < < total < < " s " ; <nl> } <nl> } <nl> <nl> void Communicator : : callSuccessFn ( Ticket const & ticketId , Destination const & dest <nl> <nl> if ( total > CALLBACK_WARN_TIME ) { <nl> LOG_TOPIC ( WARN , Logger : : COMMUNICATION ) <nl> - < < buildPrefix ( ticketId ) < < " success callback for request to " < < destination . url ( ) < < " took " < < ( total ) < < " s " ; <nl> + < < : : buildPrefix ( ticketId ) < < " success callback for request to " < < destination . url ( ) < < " took " < < ( total ) < < " s " ; <nl> } <nl> } <nl> | make curl not send " Expect : 100 - continue " headers ( ) | arangodb/arangodb | 0b7c74ca9673cfe209c9cabadd523368287a7806 | 2018-07-10T07:17:01Z |
mmm a / include / swift / SIL / SILBuilder . h <nl> ppp b / include / swift / SIL / SILBuilder . h <nl> class SILBuilder { <nl> BeginAccessInst * createBeginAccess ( SILLocation loc , SILValue address , <nl> SILAccessKind accessKind , <nl> SILAccessEnforcement enforcement , <nl> - bool noNestedConflict ) { <nl> + bool noNestedConflict , <nl> + bool fromBuiltin ) { <nl> return insert ( new ( getModule ( ) ) BeginAccessInst ( <nl> getSILDebugLocation ( loc ) , address , accessKind , enforcement , <nl> - noNestedConflict ) ) ; <nl> + noNestedConflict , fromBuiltin ) ) ; <nl> } <nl> <nl> EndAccessInst * createEndAccess ( SILLocation loc , SILValue address , <nl> class SILBuilder { <nl> createBeginUnpairedAccess ( SILLocation loc , SILValue address , SILValue buffer , <nl> SILAccessKind accessKind , <nl> SILAccessEnforcement enforcement , <nl> - bool noNestedConflict ) { <nl> + bool noNestedConflict , <nl> + bool fromBuiltin ) { <nl> return insert ( new ( getModule ( ) ) BeginUnpairedAccessInst ( <nl> getSILDebugLocation ( loc ) , address , buffer , accessKind , enforcement , <nl> - noNestedConflict ) ) ; <nl> + noNestedConflict , fromBuiltin ) ) ; <nl> } <nl> <nl> - EndUnpairedAccessInst * createEndUnpairedAccess ( SILLocation loc , <nl> - SILValue buffer , <nl> - SILAccessEnforcement enforcement , <nl> - bool aborted ) { <nl> + EndUnpairedAccessInst * <nl> + createEndUnpairedAccess ( SILLocation loc , SILValue buffer , <nl> + SILAccessEnforcement enforcement , bool aborted , <nl> + bool fromBuiltin ) { <nl> return insert ( new ( getModule ( ) ) EndUnpairedAccessInst ( <nl> - getSILDebugLocation ( loc ) , buffer , enforcement , aborted ) ) ; <nl> + getSILDebugLocation ( loc ) , buffer , enforcement , aborted , fromBuiltin ) ) ; <nl> } <nl> <nl> AssignInst * createAssign ( SILLocation Loc , SILValue Src , SILValue DestAddr ) { <nl> mmm a / include / swift / SIL / SILCloner . h <nl> ppp b / include / swift / SIL / SILCloner . h <nl> void SILCloner < ImplClass > : : visitBeginAccessInst ( BeginAccessInst * Inst ) { <nl> getOpValue ( Inst - > getOperand ( ) ) , <nl> Inst - > getAccessKind ( ) , <nl> Inst - > getEnforcement ( ) , <nl> - Inst - > hasNoNestedConflict ( ) ) ) ; <nl> + Inst - > hasNoNestedConflict ( ) , <nl> + Inst - > isFromBuiltin ( ) ) ) ; <nl> } <nl> <nl> template < typename ImplClass > <nl> void SILCloner < ImplClass > : : visitBeginUnpairedAccessInst ( <nl> getOpValue ( Inst - > getBuffer ( ) ) , <nl> Inst - > getAccessKind ( ) , <nl> Inst - > getEnforcement ( ) , <nl> - Inst - > hasNoNestedConflict ( ) ) ) ; <nl> + Inst - > hasNoNestedConflict ( ) , <nl> + Inst - > isFromBuiltin ( ) ) ) ; <nl> } <nl> <nl> template < typename ImplClass > <nl> void SILCloner < ImplClass > : : visitEndUnpairedAccessInst ( <nl> EndUnpairedAccessInst * Inst ) { <nl> getBuilder ( ) . setCurrentDebugScope ( getOpScope ( Inst - > getDebugScope ( ) ) ) ; <nl> - doPostProcess ( <nl> - Inst , getBuilder ( ) . createEndUnpairedAccess ( getOpLocation ( Inst - > getLoc ( ) ) , <nl> - getOpValue ( Inst - > getOperand ( ) ) , <nl> - Inst - > getEnforcement ( ) , <nl> - Inst - > isAborting ( ) ) ) ; <nl> + doPostProcess ( Inst , <nl> + getBuilder ( ) . createEndUnpairedAccess ( <nl> + getOpLocation ( Inst - > getLoc ( ) ) , <nl> + getOpValue ( Inst - > getOperand ( ) ) , Inst - > getEnforcement ( ) , <nl> + Inst - > isAborting ( ) , Inst - > isFromBuiltin ( ) ) ) ; <nl> } <nl> <nl> template < typename ImplClass > <nl> mmm a / include / swift / SIL / SILInstruction . h <nl> ppp b / include / swift / SIL / SILInstruction . h <nl> class BeginAccessInst <nl> <nl> BeginAccessInst ( SILDebugLocation loc , SILValue lvalue , <nl> SILAccessKind accessKind , SILAccessEnforcement enforcement , <nl> - bool noNestedConflict ) <nl> + bool noNestedConflict , bool fromBuiltin ) <nl> : UnaryInstructionBase ( loc , lvalue , lvalue - > getType ( ) ) { <nl> SILInstruction : : Bits . BeginAccessInst . AccessKind = unsigned ( accessKind ) ; <nl> SILInstruction : : Bits . BeginAccessInst . Enforcement = unsigned ( enforcement ) ; <nl> SILInstruction : : Bits . BeginAccessInst . NoNestedConflict = <nl> unsigned ( noNestedConflict ) ; <nl> + SILInstruction : : Bits . BeginAccessInst . FromBuiltin = <nl> + unsigned ( fromBuiltin ) ; <nl> <nl> static_assert ( unsigned ( SILAccessKind : : Last ) < ( 1 < < 2 ) , <nl> " reserve sufficient bits for serialized SIL " ) ; <nl> class BeginAccessInst <nl> SILInstruction : : Bits . BeginAccessInst . NoNestedConflict = noNestedConflict ; <nl> } <nl> <nl> + / / / Return true if this access marker was emitted for a user - controlled <nl> + / / / Builtin . Return false if this access marker was auto - generated by the <nl> + / / / compiler to enforce formal access that derives from the language . <nl> + bool isFromBuiltin ( ) const { <nl> + return SILInstruction : : Bits . BeginAccessInst . FromBuiltin ; <nl> + } <nl> + <nl> SILValue getSource ( ) const { <nl> return getOperand ( ) ; <nl> } <nl> class BeginUnpairedAccessInst <nl> BeginUnpairedAccessInst ( SILDebugLocation loc , SILValue addr , SILValue buffer , <nl> SILAccessKind accessKind , <nl> SILAccessEnforcement enforcement , <nl> - bool noNestedConflict ) <nl> + bool noNestedConflict , <nl> + bool fromBuiltin ) <nl> : InstructionBase ( loc ) , Operands ( this , addr , buffer ) { <nl> SILInstruction : : Bits . BeginUnpairedAccessInst . AccessKind = <nl> unsigned ( accessKind ) ; <nl> class BeginUnpairedAccessInst <nl> unsigned ( enforcement ) ; <nl> SILInstruction : : Bits . BeginUnpairedAccessInst . NoNestedConflict = <nl> unsigned ( noNestedConflict ) ; <nl> + SILInstruction : : Bits . BeginUnpairedAccessInst . FromBuiltin = <nl> + unsigned ( fromBuiltin ) ; <nl> } <nl> <nl> public : <nl> class BeginUnpairedAccessInst <nl> noNestedConflict ; <nl> } <nl> <nl> + / / / Return true if this access marker was emitted for a user - controlled <nl> + / / / Builtin . Return false if this access marker was auto - generated by the <nl> + / / / compiler to enforce formal access that derives from the language . <nl> + bool isFromBuiltin ( ) const { <nl> + return SILInstruction : : Bits . BeginUnpairedAccessInst . FromBuiltin ; <nl> + } <nl> + <nl> SILValue getSource ( ) const { <nl> return Operands [ 0 ] . get ( ) ; <nl> } <nl> class EndUnpairedAccessInst <nl> <nl> private : <nl> EndUnpairedAccessInst ( SILDebugLocation loc , SILValue buffer , <nl> - SILAccessEnforcement enforcement , bool aborting = false ) <nl> + SILAccessEnforcement enforcement , bool aborting , <nl> + bool fromBuiltin ) <nl> : UnaryInstructionBase ( loc , buffer ) { <nl> SILInstruction : : Bits . EndUnpairedAccessInst . Enforcement <nl> = unsigned ( enforcement ) ; <nl> SILInstruction : : Bits . EndUnpairedAccessInst . Aborting = aborting ; <nl> + SILInstruction : : Bits . EndUnpairedAccessInst . FromBuiltin = fromBuiltin ; <nl> } <nl> <nl> public : <nl> class EndUnpairedAccessInst <nl> unsigned ( enforcement ) ; <nl> } <nl> <nl> + / / / Return true if this access marker was emitted for a user - controlled <nl> + / / / Builtin . Return false if this access marker was auto - generated by the <nl> + / / / compiler to enforce formal access that derives from the language . <nl> + bool isFromBuiltin ( ) const { <nl> + return SILInstruction : : Bits . EndUnpairedAccessInst . FromBuiltin ; <nl> + } <nl> + <nl> SILValue getBuffer ( ) const { <nl> return getOperand ( ) ; <nl> } <nl> mmm a / include / swift / SIL / SILNode . h <nl> ppp b / include / swift / SIL / SILNode . h <nl> class alignas ( 8 ) SILNode { <nl> <nl> SWIFT_INLINE_BITFIELD ( BeginAccessInst , SingleValueInstruction , <nl> NumSILAccessKindBits + NumSILAccessEnforcementBits <nl> - + 1 , <nl> + + 1 + 1 , <nl> AccessKind : NumSILAccessKindBits , <nl> Enforcement : NumSILAccessEnforcementBits , <nl> - NoNestedConflict : 1 <nl> + NoNestedConflict : 1 , <nl> + FromBuiltin : 1 <nl> ) ; <nl> SWIFT_INLINE_BITFIELD ( BeginUnpairedAccessInst , NonValueInstruction , <nl> - NumSILAccessKindBits + NumSILAccessEnforcementBits + 1 , <nl> + NumSILAccessKindBits + NumSILAccessEnforcementBits <nl> + + 1 + 1 , <nl> AccessKind : NumSILAccessKindBits , <nl> Enforcement : NumSILAccessEnforcementBits , <nl> - NoNestedConflict : 1 ) ; <nl> + NoNestedConflict : 1 , <nl> + FromBuiltin : 1 ) ; <nl> <nl> SWIFT_INLINE_BITFIELD ( EndAccessInst , NonValueInstruction , 1 , <nl> Aborting : 1 <nl> ) ; <nl> SWIFT_INLINE_BITFIELD ( EndUnpairedAccessInst , NonValueInstruction , <nl> - NumSILAccessEnforcementBits + 1 , <nl> + NumSILAccessEnforcementBits + 1 + 1 , <nl> Enforcement : NumSILAccessEnforcementBits , <nl> - Aborting : 1 ) ; <nl> + Aborting : 1 , <nl> + FromBuiltin : 1 ) ; <nl> <nl> SWIFT_INLINE_BITFIELD ( StoreInst , NonValueInstruction , <nl> NumStoreOwnershipQualifierBits , <nl> mmm a / include / swift / Serialization / ModuleFormat . h <nl> ppp b / include / swift / Serialization / ModuleFormat . h <nl> const uint16_t VERSION_MAJOR = 0 ; <nl> / / / describe what change you made . The content of this comment isn ' t important ; <nl> / / / it just ensures a conflict if two people change the module format . <nl> / / / Don ' t worry about adhering to the 80 - column limit for this line . <nl> - const uint16_t VERSION_MINOR = 417 ; / / Last change : revert @ usableFromInline imports <nl> + const uint16_t VERSION_MINOR = 418 ; / / Last change : add begin_access [ builtin ] . <nl> <nl> using DeclIDField = BCFixed < 31 > ; <nl> <nl> mmm a / lib / IRGen / LoadableByAddress . cpp <nl> ppp b / lib / IRGen / LoadableByAddress . cpp <nl> static void rewriteFunction ( StructLoweringState & pass , <nl> auto * convInstr = cast < BeginAccessInst > ( instr ) ; <nl> newInstr = resultTyBuilder . createBeginAccess ( <nl> Loc , convInstr - > getOperand ( ) , convInstr - > getAccessKind ( ) , <nl> - convInstr - > getEnforcement ( ) , convInstr - > hasNoNestedConflict ( ) ) ; <nl> + convInstr - > getEnforcement ( ) , convInstr - > hasNoNestedConflict ( ) , <nl> + convInstr - > isFromBuiltin ( ) ) ; <nl> break ; <nl> } <nl> case SILInstructionKind : : EnumInst : { <nl> mmm a / lib / ParseSIL / ParseSIL . cpp <nl> ppp b / lib / ParseSIL / ParseSIL . cpp <nl> bool SILParser : : parseSILInstruction ( SILBuilder & B ) { <nl> ParsedEnum < SILAccessEnforcement > enforcement ; <nl> ParsedEnum < bool > aborting ; <nl> ParsedEnum < bool > noNestedConflict ; <nl> + ParsedEnum < bool > fromBuiltin ; <nl> <nl> bool isBeginAccess = ( Opcode = = SILInstructionKind : : BeginAccessInst | | <nl> Opcode = = SILInstructionKind : : BeginUnpairedAccessInst ) ; <nl> bool SILParser : : parseSILInstruction ( SILBuilder & B ) { <nl> auto setNoNestedConflict = [ & ] ( bool value ) { <nl> maybeSetEnum ( isBeginAccess , noNestedConflict , value , attr , identLoc ) ; <nl> } ; <nl> + auto setFromBuiltin = [ & ] ( bool value ) { <nl> + maybeSetEnum ( Opcode ! = SILInstructionKind : : EndAccessInst , fromBuiltin , <nl> + value , attr , identLoc ) ; <nl> + } ; <nl> <nl> if ( attr = = " unknown " ) { <nl> setEnforcement ( SILAccessEnforcement : : Unknown ) ; <nl> bool SILParser : : parseSILInstruction ( SILBuilder & B ) { <nl> setAborting ( true ) ; <nl> } else if ( attr = = " no_nested_conflict " ) { <nl> setNoNestedConflict ( true ) ; <nl> + } else if ( attr = = " builtin " ) { <nl> + setFromBuiltin ( true ) ; <nl> } else { <nl> P . diagnose ( identLoc , diag : : unknown_attribute , attr ) ; <nl> } <nl> bool SILParser : : parseSILInstruction ( SILBuilder & B ) { <nl> if ( isBeginAccess & & ! noNestedConflict . isSet ( ) ) <nl> noNestedConflict . Value = false ; <nl> <nl> + if ( ! fromBuiltin . isSet ( ) ) <nl> + fromBuiltin . Value = false ; <nl> + <nl> SILValue addrVal ; <nl> SourceLoc addrLoc ; <nl> if ( parseTypedValueRef ( addrVal , addrLoc , B ) ) <nl> bool SILParser : : parseSILInstruction ( SILBuilder & B ) { <nl> if ( Opcode = = SILInstructionKind : : BeginAccessInst ) { <nl> ResultVal = <nl> B . createBeginAccess ( InstLoc , addrVal , * kind , * enforcement , <nl> - * noNestedConflict ) ; <nl> + * noNestedConflict , * fromBuiltin ) ; <nl> } else if ( Opcode = = SILInstructionKind : : EndAccessInst ) { <nl> ResultVal = B . createEndAccess ( InstLoc , addrVal , * aborting ) ; <nl> } else if ( Opcode = = SILInstructionKind : : BeginUnpairedAccessInst ) { <nl> ResultVal = B . createBeginUnpairedAccess ( InstLoc , addrVal , bufferVal , <nl> * kind , * enforcement , <nl> - * noNestedConflict ) ; <nl> + * noNestedConflict , * fromBuiltin ) ; <nl> } else { <nl> - ResultVal = B . createEndUnpairedAccess ( InstLoc , addrVal , <nl> - * enforcement , * aborting ) ; <nl> + ResultVal = B . createEndUnpairedAccess ( InstLoc , addrVal , * enforcement , <nl> + * aborting , * fromBuiltin ) ; <nl> } <nl> break ; <nl> } <nl> mmm a / lib / SIL / SILInstruction . cpp <nl> ppp b / lib / SIL / SILInstruction . cpp <nl> namespace { <nl> auto left = cast < BeginAccessInst > ( LHS ) ; <nl> return left - > getAccessKind ( ) = = right - > getAccessKind ( ) <nl> & & left - > getEnforcement ( ) = = right - > getEnforcement ( ) <nl> - & & left - > hasNoNestedConflict ( ) = = right - > hasNoNestedConflict ( ) ; <nl> + & & left - > hasNoNestedConflict ( ) = = right - > hasNoNestedConflict ( ) <nl> + & & left - > isFromBuiltin ( ) = = right - > isFromBuiltin ( ) ; <nl> } <nl> <nl> bool visitEndAccessInst ( const EndAccessInst * right ) { <nl> namespace { <nl> auto left = cast < BeginUnpairedAccessInst > ( LHS ) ; <nl> return left - > getAccessKind ( ) = = right - > getAccessKind ( ) <nl> & & left - > getEnforcement ( ) = = right - > getEnforcement ( ) <nl> - & & left - > hasNoNestedConflict ( ) = = right - > hasNoNestedConflict ( ) ; <nl> + & & left - > hasNoNestedConflict ( ) = = right - > hasNoNestedConflict ( ) <nl> + & & left - > isFromBuiltin ( ) = = right - > isFromBuiltin ( ) ; <nl> } <nl> <nl> bool visitEndUnpairedAccessInst ( const EndUnpairedAccessInst * right ) { <nl> auto left = cast < EndUnpairedAccessInst > ( LHS ) ; <nl> return left - > getEnforcement ( ) = = right - > getEnforcement ( ) <nl> - & & left - > isAborting ( ) = = right - > isAborting ( ) ; <nl> + & & left - > isAborting ( ) = = right - > isAborting ( ) <nl> + & & left - > isFromBuiltin ( ) = = right - > isFromBuiltin ( ) ; <nl> } <nl> <nl> bool visitStrongReleaseInst ( const StrongReleaseInst * RHS ) { <nl> mmm a / lib / SIL / SILPrinter . cpp <nl> ppp b / lib / SIL / SILPrinter . cpp <nl> class SILPrinter : public SILInstructionVisitor < SILPrinter > { <nl> * this < < ' [ ' < < getSILAccessKindName ( BAI - > getAccessKind ( ) ) < < " ] [ " <nl> < < getSILAccessEnforcementName ( BAI - > getEnforcement ( ) ) < < " ] " <nl> < < ( BAI - > hasNoNestedConflict ( ) ? " [ no_nested_conflict ] " : " " ) <nl> + < < ( BAI - > isFromBuiltin ( ) ? " [ builtin ] " : " " ) <nl> < < getIDAndType ( BAI - > getOperand ( ) ) ; <nl> } <nl> void visitEndAccessInst ( EndAccessInst * EAI ) { <nl> class SILPrinter : public SILInstructionVisitor < SILPrinter > { <nl> * this < < ' [ ' < < getSILAccessKindName ( BAI - > getAccessKind ( ) ) < < " ] [ " <nl> < < getSILAccessEnforcementName ( BAI - > getEnforcement ( ) ) < < " ] " <nl> < < ( BAI - > hasNoNestedConflict ( ) ? " [ no_nested_conflict ] " : " " ) <nl> + < < ( BAI - > isFromBuiltin ( ) ? " [ builtin ] " : " " ) <nl> < < getIDAndType ( BAI - > getSource ( ) ) < < " , " <nl> < < getIDAndType ( BAI - > getBuffer ( ) ) ; <nl> } <nl> void visitEndUnpairedAccessInst ( EndUnpairedAccessInst * EAI ) { <nl> - * this < < ( EAI - > isAborting ( ) ? " [ abort ] " : " " ) <nl> - < < ' [ ' < < getSILAccessEnforcementName ( EAI - > getEnforcement ( ) ) < < " ] " <nl> + * this < < ( EAI - > isAborting ( ) ? " [ abort ] " : " " ) < < ' [ ' <nl> + < < getSILAccessEnforcementName ( EAI - > getEnforcement ( ) ) < < " ] " <nl> + < < ( EAI - > isFromBuiltin ( ) ? " [ builtin ] " : " " ) <nl> < < getIDAndType ( EAI - > getOperand ( ) ) ; <nl> } <nl> <nl> mmm a / lib / SILGen / SILGenBuiltin . cpp <nl> ppp b / lib / SILGen / SILGenBuiltin . cpp <nl> static ManagedValue emitBuiltinBeginUnpairedModifyAccess ( SILGenFunction & SGF , <nl> / * invariant * / false ) ; <nl> SGF . B . createBeginUnpairedAccess ( loc , addr , buffer , SILAccessKind : : Modify , <nl> SILAccessEnforcement : : Dynamic , <nl> - / * noNestedConflict * / false ) ; <nl> + / * noNestedConflict * / false , <nl> + / * fromBuiltin * / true ) ; <nl> <nl> return ManagedValue : : forUnmanaged ( SGF . emitEmptyTuple ( loc ) ) ; <nl> } <nl> static ManagedValue emitBuiltinPerformInstantaneousReadAccess ( <nl> / / use will be trivially optimized away . <nl> SGF . B . createBeginUnpairedAccess ( loc , addr , unusedBuffer , SILAccessKind : : Read , <nl> SILAccessEnforcement : : Dynamic , <nl> - / * noNestedConflict * / true ) ; <nl> + / * noNestedConflict * / true , <nl> + / * fromBuiltin * / true ) ; <nl> <nl> return ManagedValue : : forUnmanaged ( SGF . emitEmptyTuple ( loc ) ) ; <nl> } <nl> static ManagedValue emitBuiltinEndUnpairedAccess ( SILGenFunction & SGF , <nl> / * strict * / true , <nl> / * invariant * / false ) ; <nl> SGF . B . createEndUnpairedAccess ( loc , buffer , SILAccessEnforcement : : Dynamic , <nl> - / * aborted * / false ) ; <nl> + / * aborted * / false , <nl> + / * fromBuiltin * / true ) ; <nl> <nl> return ManagedValue : : forUnmanaged ( SGF . emitEmptyTuple ( loc ) ) ; <nl> } <nl> mmm a / lib / SILGen / SILGenLValue . cpp <nl> ppp b / lib / SILGen / SILGenLValue . cpp <nl> static SILValue enterAccessScope ( SILGenFunction & SGF , SILLocation loc , <nl> if ( enforcement = = SILAccessEnforcement : : Dynamic ) { <nl> SGF . B . createBeginUnpairedAccess ( loc , addr , unpairedAccesses - > Buffer , <nl> silAccessKind , enforcement , <nl> - / * hasNoNestedConflict = * / false ) ; <nl> + / * hasNoNestedConflict = * / false , <nl> + / * fromBuiltin = * / false ) ; <nl> unpairedAccesses - > NumAccesses + + ; <nl> } <nl> return addr ; <nl> static SILValue enterAccessScope ( SILGenFunction & SGF , SILLocation loc , <nl> <nl> / / Enter the access . <nl> addr = SGF . B . createBeginAccess ( loc , addr , silAccessKind , enforcement , <nl> - / * hasNoNestedConflict = * / false ) ; <nl> + / * hasNoNestedConflict = * / false , <nl> + / * fromBuiltin = * / false ) ; <nl> <nl> / / Push a writeback to end it . <nl> auto accessedMV = ManagedValue : : forLValue ( addr ) ; <nl> SILValue UnenforcedAccess : : beginAccess ( SILGenFunction & SGF , SILLocation loc , <nl> <nl> auto BAI = <nl> SGF . B . createBeginAccess ( loc , address , kind , SILAccessEnforcement : : Unsafe , <nl> - / * hasNoNestedConflict = * / false ) ; <nl> + / * hasNoNestedConflict = * / false , <nl> + / * fromBuiltin = * / false ) ; <nl> beginAccessPtr = BeginAccessPtr ( BAI , DeleterCheck ( ) ) ; <nl> <nl> return BAI ; <nl> mmm a / lib / SILGen / SILGenMaterializeForSet . cpp <nl> ppp b / lib / SILGen / SILGenMaterializeForSet . cpp <nl> MaterializeForSetEmitter : : createEndUnpairedAccessesCallback ( SILFunction & F , <nl> " multiple unpaired accesses not supported " ) ; <nl> SGF . B . createEndUnpairedAccess ( loc , callbackStorage , <nl> SILAccessEnforcement : : Dynamic , <nl> - / * aborting * / false ) ; <nl> + / * aborting * / false , <nl> + / * fromBuiltin * / false ) ; <nl> } ) ; <nl> } <nl> <nl> mmm a / lib / Serialization / DeserializeSIL . cpp <nl> ppp b / lib / Serialization / DeserializeSIL . cpp <nl> bool SILDeserializer : : readSILInstruction ( SILFunction * Fn , SILBasicBlock * BB , <nl> ValID , getSILType ( MF - > getType ( TyID ) , ( SILValueCategory ) TyCategory ) ) ; <nl> auto accessKind = SILAccessKind ( Attr & 0x3 ) ; <nl> auto enforcement = SILAccessEnforcement ( ( Attr > > 2 ) & 0x3 ) ; <nl> - bool noNestedConflict = Attr > > 4 ; <nl> + bool noNestedConflict = ( Attr > > 4 ) & 0x01 ; <nl> + bool fromBuiltin = ( Attr > > 5 ) & 0x01 ; <nl> ResultVal = <nl> Builder . createBeginAccess ( Loc , op , accessKind , enforcement , <nl> - noNestedConflict ) ; <nl> + noNestedConflict , fromBuiltin ) ; <nl> break ; <nl> } <nl> case SILInstructionKind : : EndAccessInst : { <nl> bool SILDeserializer : : readSILInstruction ( SILFunction * Fn , SILBasicBlock * BB , <nl> ValID2 , getSILType ( MF - > getType ( TyID2 ) , ( SILValueCategory ) TyCategory2 ) ) ; <nl> auto accessKind = SILAccessKind ( Attr & 0x3 ) ; <nl> auto enforcement = SILAccessEnforcement ( ( Attr > > 2 ) & 0x03 ) ; <nl> - bool noNestedConflict = Attr > > 4 ; <nl> + bool noNestedConflict = ( Attr > > 4 ) & 0x01 ; <nl> + bool fromBuiltin = ( Attr > > 5 ) & 0x01 ; <nl> ResultVal = Builder . createBeginUnpairedAccess ( <nl> - Loc , source , buffer , accessKind , enforcement , noNestedConflict ) ; <nl> + Loc , source , buffer , accessKind , enforcement , noNestedConflict , <nl> + fromBuiltin ) ; <nl> break ; <nl> } <nl> case SILInstructionKind : : EndUnpairedAccessInst : { <nl> SILValue op = getLocalValue ( <nl> ValID , getSILType ( MF - > getType ( TyID ) , ( SILValueCategory ) TyCategory ) ) ; <nl> bool aborted = Attr & 0x1 ; <nl> - auto enforcement = SILAccessEnforcement ( Attr > > 1 ) ; <nl> - ResultVal = Builder . createEndUnpairedAccess ( Loc , op , enforcement , aborted ) ; <nl> + auto enforcement = SILAccessEnforcement ( ( Attr > > 1 ) & 0x03 ) ; <nl> + bool fromBuiltin = ( Attr > > 3 ) & 0x01 ; <nl> + ResultVal = Builder . createEndUnpairedAccess ( Loc , op , enforcement , aborted , <nl> + fromBuiltin ) ; <nl> break ; <nl> } <nl> case SILInstructionKind : : StoreUnownedInst : { <nl> mmm a / lib / Serialization / SILFormat . h <nl> ppp b / lib / Serialization / SILFormat . h <nl> namespace sil_block { <nl> using SILOneOperandExtraAttributeLayout = BCRecordLayout < <nl> SIL_ONE_OPERAND_EXTRA_ATTR , <nl> SILInstOpCodeField , <nl> - BCFixed < 5 > , / / Optional attributes <nl> + BCFixed < 6 > , / / Optional attributes <nl> TypeIDField , SILTypeCategoryField , ValueIDField <nl> > ; <nl> <nl> namespace sil_block { <nl> using SILTwoOperandsExtraAttributeLayout = BCRecordLayout < <nl> SIL_TWO_OPERANDS_EXTRA_ATTR , <nl> SILInstOpCodeField , <nl> - BCFixed < 5 > , / / Optional attributes <nl> + BCFixed < 6 > , / / Optional attributes <nl> TypeIDField , <nl> SILTypeCategoryField , <nl> ValueIDField , <nl> mmm a / lib / Serialization / SerializeSIL . cpp <nl> ppp b / lib / Serialization / SerializeSIL . cpp <nl> void SILSerializer : : writeSILInstruction ( const SILInstruction & SI ) { <nl> auto * BAI = cast < BeginAccessInst > ( & SI ) ; <nl> unsigned attr = unsigned ( BAI - > getAccessKind ( ) ) <nl> + ( unsigned ( BAI - > getEnforcement ( ) ) < < 2 ) <nl> - + ( BAI - > hasNoNestedConflict ( ) < < 4 ) ; <nl> + + ( BAI - > hasNoNestedConflict ( ) < < 4 ) <nl> + + ( BAI - > isFromBuiltin ( ) < < 5 ) ; <nl> SILValue operand = BAI - > getOperand ( ) ; <nl> <nl> SILOneOperandExtraAttributeLayout : : emitRecord ( <nl> void SILSerializer : : writeSILInstruction ( const SILInstruction & SI ) { <nl> auto * BAI = cast < BeginUnpairedAccessInst > ( & SI ) ; <nl> unsigned attr = unsigned ( BAI - > getAccessKind ( ) ) <nl> + ( unsigned ( BAI - > getEnforcement ( ) ) < < 2 ) <nl> - + ( unsigned ( BAI - > hasNoNestedConflict ( ) ) < < 4 ) ; <nl> + + ( unsigned ( BAI - > hasNoNestedConflict ( ) ) < < 4 ) <nl> + + ( unsigned ( BAI - > isFromBuiltin ( ) ) < < 5 ) ; <nl> SILValue source = BAI - > getSource ( ) ; <nl> SILValue buffer = BAI - > getBuffer ( ) ; <nl> <nl> void SILSerializer : : writeSILInstruction ( const SILInstruction & SI ) { <nl> unsigned abbrCode = SILAbbrCodes [ SILOneOperandExtraAttributeLayout : : Code ] ; <nl> auto * EAI = cast < EndUnpairedAccessInst > ( & SI ) ; <nl> unsigned attr = unsigned ( EAI - > isAborting ( ) ) <nl> - + ( unsigned ( EAI - > getEnforcement ( ) ) < < 1 ) ; <nl> + + ( unsigned ( EAI - > getEnforcement ( ) ) < < 1 ) <nl> + + ( unsigned ( EAI - > isFromBuiltin ( ) ) < < 3 ) ; <nl> SILValue operand = EAI - > getOperand ( ) ; <nl> <nl> SILOneOperandExtraAttributeLayout : : emitRecord ( <nl> mmm a / test / SIL / Parser / basic . sil <nl> ppp b / test / SIL / Parser / basic . sil <nl> bb0 ( % 0 : $ A ) : <nl> return % 20 : $ ( ) <nl> } <nl> <nl> + / / CHECK - LABEL : sil @ test_builtin_access : $ @ convention ( thin ) ( @ guaranteed A ) - > ( ) { <nl> + / / CHECK : begin_access [ read ] [ dynamic ] [ builtin ] <nl> + / / CHECK : begin_unpaired_access [ read ] [ dynamic ] [ builtin ] <nl> + / / CHECK : end_unpaired_access [ dynamic ] [ builtin ] <nl> + / / CHECK - LABEL : } / / end sil function ' test_builtin_access ' <nl> + sil @ test_builtin_access : $ ( @ guaranteed A ) - > ( ) { <nl> + bb0 ( % 0 : $ A ) : <nl> + % 1 = alloc_stack $ Any <nl> + % 2 = ref_element_addr % 0 : $ A , # A . property <nl> + % 6 = begin_access [ read ] [ dynamic ] [ builtin ] % 2 : $ * Any <nl> + copy_addr % 6 to % 1 : $ * Any <nl> + end_access % 6 : $ * Any <nl> + % 9 = alloc_stack $ Builtin . UnsafeValueBuffer <nl> + begin_unpaired_access [ read ] [ dynamic ] [ builtin ] % 2 : $ * Any , % 9 : $ * Builtin . UnsafeValueBuffer <nl> + copy_addr % 2 to % 1 : $ * Any <nl> + end_unpaired_access [ dynamic ] [ builtin ] % 9 : $ * Builtin . UnsafeValueBuffer <nl> + destroy_addr % 1 : $ * Any <nl> + dealloc_stack % 9 : $ * Builtin . UnsafeValueBuffer <nl> + dealloc_stack % 1 : $ * Any <nl> + % 20 = tuple ( ) <nl> + return % 20 : $ ( ) <nl> + } <nl> + <nl> struct EmptyStruct { } <nl> <nl> sil @ test_empty_destructure : $ @ convention ( thin ) ( ) - > ( ) { <nl> mmm a / test / SILGen / builtins . swift <nl> ppp b / test / SILGen / builtins . swift <nl> func getTailAddr < T1 , T2 > ( start : Builtin . RawPointer , i : Builtin . Word , ty1 : T1 . Typ <nl> func beginUnpairedModifyAccess < T1 > ( address : Builtin . RawPointer , scratch : Builtin . RawPointer , ty1 : T1 . Type ) { <nl> / / CHECK : [ [ P2A_ADDR : % . * ] ] = pointer_to_address % 0 <nl> / / CHECK : [ [ P2A_SCRATCH : % . * ] ] = pointer_to_address % 1 <nl> - / / CHECK : begin_unpaired_access [ modify ] [ dynamic ] [ [ P2A_ADDR ] ] : $ * T1 , [ [ P2A_SCRATCH ] ] : $ * Builtin . UnsafeValueBuffer <nl> + / / CHECK : begin_unpaired_access [ modify ] [ dynamic ] [ builtin ] [ [ P2A_ADDR ] ] : $ * T1 , [ [ P2A_SCRATCH ] ] : $ * Builtin . UnsafeValueBuffer <nl> / / CHECK : [ [ RESULT : % . * ] ] = tuple ( ) <nl> / / CHECK : [ [ RETURN : % . * ] ] = tuple ( ) <nl> / / CHECK : return [ [ RETURN ] ] : $ ( ) <nl> func beginUnpairedModifyAccess < T1 > ( address : Builtin . RawPointer , scratch : Builtin <nl> func performInstantaneousReadAccess < T1 > ( address : Builtin . RawPointer , scratch : Builtin . RawPointer , ty1 : T1 . Type ) { <nl> / / CHECK : [ [ P2A_ADDR : % . * ] ] = pointer_to_address % 0 <nl> / / CHECK : [ [ SCRATCH : % . * ] ] = alloc_stack $ Builtin . UnsafeValueBuffer <nl> - / / CHECK : begin_unpaired_access [ read ] [ dynamic ] [ no_nested_conflict ] [ [ P2A_ADDR ] ] : $ * T1 , [ [ SCRATCH ] ] : $ * Builtin . UnsafeValueBuffer <nl> + / / CHECK : begin_unpaired_access [ read ] [ dynamic ] [ no_nested_conflict ] [ builtin ] [ [ P2A_ADDR ] ] : $ * T1 , [ [ SCRATCH ] ] : $ * Builtin . UnsafeValueBuffer <nl> / / CHECK - NOT : end_ { { . * } } access <nl> / / CHECK : [ [ RESULT : % . * ] ] = tuple ( ) <nl> / / CHECK : [ [ RETURN : % . * ] ] = tuple ( ) <nl> mmm a / test / Serialization / Inputs / def_basic . sil <nl> ppp b / test / Serialization / Inputs / def_basic . sil <nl> bb0 ( % 0 : $ A ) : <nl> return % 20 : $ ( ) <nl> } <nl> <nl> + / / CHECK - LABEL : sil public_external [ transparent ] [ serialized ] @ test_builtin_access : $ @ convention ( thin ) ( @ guaranteed A ) - > ( ) { <nl> + / / CHECK : begin_access [ read ] [ dynamic ] [ builtin ] <nl> + / / CHECK : end_access <nl> + / / CHECK : begin_unpaired_access [ read ] [ dynamic ] [ builtin ] <nl> + / / CHECK : end_unpaired_access [ dynamic ] [ builtin ] <nl> + / / CHECK - LABEL : } / / end sil function ' test_builtin_access ' <nl> + sil [ transparent ] [ serialized ] @ test_builtin_access : $ @ convention ( thin ) ( @ guaranteed A ) - > ( ) { <nl> + bb0 ( % 0 : $ A ) : <nl> + % 1 = alloc_stack $ Any <nl> + % 2 = ref_element_addr % 0 : $ A , # A . property <nl> + % 6 = begin_access [ dynamic ] [ read ] [ builtin ] % 2 : $ * Any <nl> + copy_addr % 6 to % 1 : $ * Any <nl> + end_access % 6 : $ * Any <nl> + destroy_addr % 1 : $ * Any <nl> + % 9 = alloc_stack $ Builtin . UnsafeValueBuffer <nl> + begin_unpaired_access [ read ] [ dynamic ] [ builtin ] % 2 : $ * Any , % 9 : $ * Builtin . UnsafeValueBuffer <nl> + copy_addr % 2 to % 1 : $ * Any <nl> + end_unpaired_access [ dynamic ] [ builtin ] % 9 : $ * Builtin . UnsafeValueBuffer <nl> + destroy_addr % 1 : $ * Any <nl> + dealloc_stack % 9 : $ * Builtin . UnsafeValueBuffer <nl> + dealloc_stack % 1 : $ * Any <nl> + % 20 = tuple ( ) <nl> + return % 20 : $ ( ) <nl> + } <nl> + <nl> public class Foo { <nl> subscript ( x : Int , y : Int ) - > Int32 { get set } <nl> var x : Int <nl> bb0 : <nl> % 152 = function_ref @ a_regular_thunk : $ @ convention ( thin ) ( ) - > ( ) <nl> % 153 = function_ref @ weak_unowned : $ @ convention ( thin ) ( @ owned WeakUnownedTest , @ owned AnyObject ) - > ( ) <nl> % 154 = function_ref @ test_access : $ @ convention ( thin ) ( @ guaranteed A ) - > ( ) <nl> + % 155 = function_ref @ test_builtin_access : $ @ convention ( thin ) ( @ guaranteed A ) - > ( ) <nl> <nl> % r = tuple ( ) <nl> return % r : $ ( ) <nl> | [ exclusivity ] Add a [ builtin ] flag to begin_ [ unpaired_ ] access . | apple/swift | fe326266cc83edf6ee7fd93c54b43a1792399b9f | 2018-05-10T04:42:37Z |
mmm a / src / http / json / cJSON . cc <nl> ppp b / src / http / json / cJSON . cc <nl> void cJSON_Delete ( cJSON * c ) <nl> } <nl> <nl> / * Parse the input text to generate a number , and populate the result into item . * / <nl> - static const char * parse_number ( cJSON * item , const char * num ) <nl> - { <nl> - double n = 0 , sign = 1 , scale = 0 ; int subscale = 0 , signsubscale = 1 ; <nl> - <nl> - / * Could use sscanf for this ? * / <nl> - if ( * num = = ' - ' ) sign = - 1 , num + + ; / * Has sign ? * / <nl> - if ( * num = = ' 0 ' ) num + + ; / * is zero * / <nl> - if ( * num > = ' 1 ' & & * num < = ' 9 ' ) do n = ( n * 10 . 0 ) + ( * num + + - ' 0 ' ) ; while ( * num > = ' 0 ' & & * num < = ' 9 ' ) ; / * Number ? * / <nl> - if ( * num = = ' . ' & & num [ 1 ] > = ' 0 ' & & num [ 1 ] < = ' 9 ' ) { num + + ; do n = ( n * 10 . 0 ) + ( * num + + - ' 0 ' ) , scale - - ; while ( * num > = ' 0 ' & & * num < = ' 9 ' ) ; } / * Fractional part ? * / <nl> - if ( * num = = ' e ' | | * num = = ' E ' ) / * Exponent ? * / <nl> - { num + + ; if ( * num = = ' + ' ) num + + ; else if ( * num = = ' - ' ) signsubscale = - 1 , num + + ; / * With sign ? * / <nl> - while ( * num > = ' 0 ' & & * num < = ' 9 ' ) subscale = ( subscale * 10 ) + ( * num + + - ' 0 ' ) ; / * Number ? * / <nl> - } <nl> - <nl> - n = sign * n * pow ( 10 . 0 , ( scale + subscale * signsubscale ) ) ; / * number = + / - number . fraction * 10 ^ + / - exponent * / <nl> - <nl> - item - > valuedouble = n ; <nl> - item - > valueint = ( int ) n ; <nl> - item - > type = cJSON_Number ; <nl> - return num ; <nl> + static const char * parse_number ( cJSON * item , const char * num ) { <nl> + double n ; <nl> + int offset ; <nl> + / / ` % lg ` differs from the JSON spec in two ways : it accepts numbers prefixed <nl> + / / with ` + ` , and it accepts hexadecimal floats ( sometimes ) . We don ' t need <nl> + / / to check for numbers prefixed with ` + ` because ` parse_value ` only calls <nl> + / / us if ` item [ 0 ] ` is ` - ` or ` [ 0 - 9 ] ` . We check for hexadecimal floats below <nl> + / / ( cJSON ' s convention is to just ignore trailing garbage ) . <nl> + if ( num [ 0 ] = = ' 0 ' & & ( num [ 1 ] = = ' x ' | | num [ 1 ] = = ' X ' ) ) { <nl> + n = 0 ; <nl> + offset = 1 ; <nl> + } else { <nl> + sscanf ( num , " % lg % n " , & n , & offset ) ; <nl> + } <nl> + item - > valuedouble = n ; <nl> + item - > valueint = ( int ) n ; <nl> + item - > type = cJSON_Number ; <nl> + return num + offset ; <nl> } <nl> <nl> / * Render the number nicely from the given item into a string . * / <nl> - static char * print_number ( cJSON * item ) <nl> - { <nl> - char * str ; <nl> - double d = item - > valuedouble ; <nl> - guarantee ( isfinite ( d ) ) ; <nl> - if ( fabs ( ( ( double ) item - > valueint ) - d ) < = DBL_EPSILON & & d < = INT_MAX & & d > = INT_MIN ) <nl> - { <nl> - str = ( char * ) cJSON_malloc ( 21 ) ; / * 2 ^ 64 + 1 can be represented in 21 chars . * / <nl> - if ( str ) sprintf ( str , " % d " , item - > valueint ) ; / / NOLINT ( runtime / printf ) <nl> - } <nl> - else <nl> - { <nl> - str = ( char * ) cJSON_malloc ( 512 ) ; / * This is a correct tradeoff . * / <nl> - if ( str ) sprintf ( str , " % . 32g " , d ) ; <nl> - } <nl> - return str ; <nl> + static char * print_number ( cJSON * item ) { <nl> + char * str ; <nl> + double d = item - > valuedouble ; <nl> + guarantee ( isfinite ( d ) ) ; <nl> + asprintf ( & str , " % . 20g " , d ) ; <nl> + return str ; <nl> } <nl> <nl> / * Parse the input text into an unescaped cstring , and populate item . * / <nl> | fixed cJSON parse_number and print_number | rethinkdb/rethinkdb | 996dfcfa0198cb9bb23c5ead284e4e4d3b980e1a | 2013-08-02T13:27:15Z |
mmm a / src / runtime / vm / translator / hopt / codegen . cpp <nl> ppp b / src / runtime / vm / translator / hopt / codegen . cpp <nl> ArgDesc : : ArgDesc ( SSATmp * tmp , bool val ) : m_imm ( - 1 ) { <nl> m_kind = Imm ; <nl> } <nl> <nl> - Address ArgDesc : : genCode ( CodeGenerator : : Asm & a ) const { <nl> - Address start = a . code . frontier ; <nl> - switch ( m_kind ) { <nl> - case TypeReg : <nl> - case Reg : <nl> - a . movq ( m_srcReg , m_dstReg ) ; <nl> - TRACE ( 3 , " [ counter ] 1 reg move in ArgDesc : : genCode \ n " ) ; <nl> - if ( m_kind = = TypeReg ) { <nl> - a . shlq ( kTypeShiftBits , m_dstReg ) ; <nl> - } <nl> - break ; <nl> - case Imm : <nl> - emitImmReg ( a , m_imm , m_dstReg ) ; <nl> - break ; <nl> - case Addr : <nl> - a . lea ( m_srcReg [ m_imm . l ( ) ] , m_dstReg ) ; <nl> - break ; <nl> - } <nl> - return start ; <nl> - } <nl> - <nl> const Func * CodeGenerator : : getCurFunc ( ) { <nl> if ( m_lastMarker ) { <nl> return m_lastMarker - > getFunc ( ) ; <nl> mmm a / src / runtime / vm / translator / hopt / codegen . h <nl> ppp b / src / runtime / vm / translator / hopt / codegen . h <nl> class ArgDesc { <nl> PhysReg getSrcReg ( ) const { return m_srcReg ; } <nl> Kind getKind ( ) const { return m_kind ; } <nl> void setDstReg ( PhysReg reg ) { m_dstReg = reg ; } <nl> - Address genCode ( CodeGenerator : : Asm & as ) const ; <nl> Immed getImm ( ) const { return m_imm ; } <nl> <nl> private : / / These should be created using ArgGroup . <nl> | Remove unused ArgDesc : : genCode | facebook/hhvm | 89dc02e61851522e67d8b131791c977eb0052f3c | 2013-01-30T19:40:32Z |
mmm a / . clang - tidy <nl> ppp b / . clang - tidy <nl> <nl> mmm <nl> - Checks : ' modernize - use - nullptr , google - build - namespaces , google - build - explicit - make - pair , readability - function - size , performance - * ' <nl> - WarningsAsErrors : ' modernize - use - nullptr , google - build - namespaces , google - build - explicit - make - pair , readability - function - size , performance - * ' <nl> + Checks : ' modernize - use - nullptr , google - build - namespaces , google - build - explicit - make - pair , readability - function - size , performance - * , bugprone - * ' <nl> + WarningsAsErrors : ' modernize - use - nullptr , google - build - namespaces , google - build - explicit - make - pair , readability - function - size , performance - * , bugprone - * ' <nl> CheckOptions : <nl> - key : readability - function - size . StatementThreshold <nl> value : ' 450 ' <nl> mmm a / src / core / ext / filters / client_channel / client_channel . cc <nl> ppp b / src / core / ext / filters / client_channel / client_channel . cc <nl> get_service_config_from_resolver_result_locked ( channel_data * chand ) { <nl> grpc_uri * uri = grpc_uri_parse ( server_uri , true ) ; <nl> GPR_ASSERT ( uri - > path [ 0 ] ! = ' \ 0 ' ) ; <nl> service_config_parsing_state parsing_state ; <nl> - memset ( & parsing_state , 0 , sizeof ( parsing_state ) ) ; <nl> parsing_state . server_name = <nl> uri - > path [ 0 ] = = ' / ' ? uri - > path + 1 : uri - > path ; <nl> service_config - > ParseGlobalParams ( parse_retry_throttle_params , <nl> mmm a / src / core / lib / surface / channel . cc <nl> ppp b / src / core / lib / surface / channel . cc <nl> grpc_channel * grpc_channel_create_with_builder ( <nl> return channel ; <nl> } <nl> <nl> - memset ( channel , 0 , sizeof ( * channel ) ) ; <nl> channel - > target = target ; <nl> channel - > is_client = grpc_channel_stack_type_is_client ( channel_stack_type ) ; <nl> size_t channel_tracer_max_nodes = 0 ; / / default to off <nl> mmm a / src / core / tsi / alts_transport_security . cc <nl> ppp b / src / core / tsi / alts_transport_security . cc <nl> void grpc_tsi_alts_signal_for_cq_destroy ( ) { <nl> } <nl> <nl> void grpc_tsi_alts_init ( ) { <nl> - memset ( & g_alts_resource , 0 , sizeof ( alts_shared_resource ) ) ; <nl> + g_alts_resource . channel = nullptr ; <nl> + g_alts_resource . cq = nullptr ; <nl> + g_alts_resource . is_cq_drained = false ; <nl> gpr_mu_init ( & g_alts_resource . mu ) ; <nl> gpr_cv_init ( & g_alts_resource . cv ) ; <nl> } <nl> similarity index 100 % <nl> rename from test / cpp / util / . clang - tidy <nl> rename to test / . clang - tidy <nl> mmm a / test / core / end2end / fixtures / http_proxy_fixture . cc <nl> ppp b / test / core / end2end / fixtures / http_proxy_fixture . cc <nl> <nl> # include " test / core / util / port . h " <nl> <nl> struct grpc_end2end_http_proxy { <nl> + grpc_end2end_http_proxy ( ) <nl> + : proxy_name ( nullptr ) , <nl> + server ( nullptr ) , <nl> + channel_args ( nullptr ) , <nl> + mu ( nullptr ) , <nl> + pollset ( nullptr ) , <nl> + combiner ( nullptr ) { <nl> + gpr_ref_init ( & users , 1 ) ; <nl> + combiner = grpc_combiner_create ( ) ; <nl> + } <nl> char * proxy_name ; <nl> grpc_core : : Thread thd ; <nl> grpc_tcp_server * server ; <nl> static void thread_main ( void * arg ) { <nl> grpc_end2end_http_proxy * grpc_end2end_http_proxy_create ( <nl> grpc_channel_args * args ) { <nl> grpc_core : : ExecCtx exec_ctx ; <nl> - grpc_end2end_http_proxy * proxy = <nl> - static_cast < grpc_end2end_http_proxy * > ( gpr_malloc ( sizeof ( * proxy ) ) ) ; <nl> - memset ( proxy , 0 , sizeof ( * proxy ) ) ; <nl> - proxy - > combiner = grpc_combiner_create ( ) ; <nl> - gpr_ref_init ( & proxy - > users , 1 ) ; <nl> + grpc_end2end_http_proxy * proxy = grpc_core : : New < grpc_end2end_http_proxy > ( ) ; <nl> / / Construct proxy address . <nl> const int proxy_port = grpc_pick_unused_port_or_die ( ) ; <nl> gpr_join_host_port ( & proxy - > proxy_name , " localhost " , proxy_port ) ; <nl> void grpc_end2end_http_proxy_destroy ( grpc_end2end_http_proxy * proxy ) { <nl> GRPC_CLOSURE_CREATE ( destroy_pollset , proxy - > pollset , <nl> grpc_schedule_on_exec_ctx ) ) ; <nl> GRPC_COMBINER_UNREF ( proxy - > combiner , " test " ) ; <nl> - gpr_free ( proxy ) ; <nl> + grpc_core : : Delete ( proxy ) ; <nl> } <nl> <nl> const char * grpc_end2end_http_proxy_get_proxy_name ( <nl> mmm a / test / core / end2end / fixtures / proxy . cc <nl> ppp b / test / core / end2end / fixtures / proxy . cc <nl> <nl> # include " test / core / util / port . h " <nl> <nl> struct grpc_end2end_proxy { <nl> + grpc_end2end_proxy ( ) <nl> + : proxy_port ( nullptr ) , <nl> + server_port ( nullptr ) , <nl> + cq ( nullptr ) , <nl> + server ( nullptr ) , <nl> + client ( nullptr ) , <nl> + shutdown ( false ) , <nl> + new_call ( nullptr ) { <nl> + memset ( & new_call_details , 0 , sizeof ( new_call_details ) ) ; <nl> + memset ( & new_call_metadata , 0 , sizeof ( new_call_metadata ) ) ; <nl> + } <nl> grpc_core : : Thread thd ; <nl> char * proxy_port ; <nl> char * server_port ; <nl> grpc_end2end_proxy * grpc_end2end_proxy_create ( const grpc_end2end_proxy_def * def , <nl> int proxy_port = grpc_pick_unused_port_or_die ( ) ; <nl> int server_port = grpc_pick_unused_port_or_die ( ) ; <nl> <nl> - grpc_end2end_proxy * proxy = <nl> - static_cast < grpc_end2end_proxy * > ( gpr_malloc ( sizeof ( * proxy ) ) ) ; <nl> - memset ( proxy , 0 , sizeof ( * proxy ) ) ; <nl> + grpc_end2end_proxy * proxy = grpc_core : : New < grpc_end2end_proxy > ( ) ; <nl> <nl> gpr_join_host_port ( & proxy - > proxy_port , " localhost " , proxy_port ) ; <nl> gpr_join_host_port ( & proxy - > server_port , " localhost " , server_port ) ; <nl> void grpc_end2end_proxy_destroy ( grpc_end2end_proxy * proxy ) { <nl> grpc_channel_destroy ( proxy - > client ) ; <nl> grpc_completion_queue_destroy ( proxy - > cq ) ; <nl> grpc_call_details_destroy ( & proxy - > new_call_details ) ; <nl> - gpr_free ( proxy ) ; <nl> + grpc_core : : Delete ( proxy ) ; <nl> } <nl> <nl> static void unrefpc ( proxy_call * pc , const char * reason ) { <nl> | Fix all instances of bugprone - undefined - memory - manipulation | grpc/grpc | f8a4aae1197e586d5c34f1acff450eac4d764c4b | 2018-08-29T21:24:02Z |
mmm a / lib / SIL / SILPrinter . cpp <nl> ppp b / lib / SIL / SILPrinter . cpp <nl> void SILModule : : print ( SILPrintContext & PrintCtx , ModuleDecl * M , <nl> ! D - > isImplicit ( ) ) { <nl> if ( isa < AccessorDecl > ( D ) ) <nl> continue ; <nl> + <nl> + / / skip to visit ASTPrinter to avoid sil - opt prints duplicated import declarations <nl> + if ( auto importDecl = dyn_cast < ImportDecl > ( D ) ) { <nl> + StringRef importName = importDecl - > getModule ( ) - > getName ( ) . str ( ) ; <nl> + if ( importName = = BUILTIN_NAME | | <nl> + importName = = STDLIB_NAME | | <nl> + importName = = SWIFT_SHIMS_NAME ) <nl> + continue ; <nl> + } <nl> D - > print ( OS , Options ) ; <nl> OS < < " \ n \ n " ; <nl> } <nl> new file mode 100644 <nl> index 000000000000 . . 61cce34ee9d4 <nl> mmm / dev / null <nl> ppp b / test / sil - opt / import - decls . silgen <nl> <nl> + / / RUN : % target - sil - opt % s | % FileCheck % s <nl> + <nl> + import Builtin <nl> + import Swift <nl> + <nl> + func foo ( ) { } <nl> + <nl> + / / CHECK : import Builtin <nl> + / / CHECK : import Swift <nl> + / / CHECK - NOT : import Builtin <nl> + / / CHECK - NOT : import Swift { { $ } } <nl> | Don ' t print duplicated import decls by sil - opt if input file is silgen | apple/swift | 802774c3270f07269930022b58e657a867496f27 | 2018-06-09T19:36:20Z |
mmm a / util / aligned_buffer . h <nl> ppp b / util / aligned_buffer . h <nl> class AlignedBuffer { <nl> ~ static_cast < uintptr_t > ( alignment_ - 1 ) ) ; <nl> <nl> if ( copy_data ) { <nl> + assert ( bufstart_ + copy_offset + copy_len < = bufstart_ + cursize_ ) ; <nl> memcpy ( new_bufstart , bufstart_ + copy_offset , copy_len ) ; <nl> cursize_ = copy_len ; <nl> } else { <nl> mmm a / util / file_reader_writer . cc <nl> ppp b / util / file_reader_writer . cc <nl> Status FilePrefetchBuffer : : Prefetch ( RandomAccessFileReader * reader , <nl> chunk_len = buffer_len_ - chunk_offset_in_buffer ; <nl> assert ( chunk_offset_in_buffer % alignment = = 0 ) ; <nl> assert ( chunk_len % alignment = = 0 ) ; <nl> - copy_data_to_new_buffer = true ; <nl> + assert ( chunk_offset_in_buffer + chunk_len < = <nl> + buffer_offset_ + buffer_len_ ) ; <nl> + if ( chunk_len > 0 ) { <nl> + copy_data_to_new_buffer = true ; <nl> + } else { <nl> + / / this reset is not necessary , but just to be safe . <nl> + chunk_offset_in_buffer = 0 ; <nl> + } <nl> } <nl> } <nl> <nl> | Fix Copying of data between buffers in FilePrefetchBuffer ( ) | facebook/rocksdb | 440621aab8e2669d5f74985780ed1c2bec2dfbf3 | 2018-07-11T19:28:13Z |
mmm a / tensorflow / python / keras / applications / mobilenet_v3 . py <nl> ppp b / tensorflow / python / keras / applications / mobilenet_v3 . py <nl> <nl> The following table describes the performance of MobileNets : <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> MACs stands for Multiply Adds <nl> - <nl> + <nl> | Classification Checkpoint | MACs ( M ) | Parameters ( M ) | Top1 Accuracy | Pixel1 CPU ( ms ) | <nl> | mmm | mmm | mmm | mmm | mmm | <nl> | mobilenet_v3_large_1 . 0_224 | 217 | 5 . 4 | 75 . 6 | 51 . 2 | <nl> <nl> <nl> Optionally loads weights pre - trained on ImageNet . <nl> <nl> - Note : each Keras Application expects a specific kind of input preprocessing . <nl> - For MobileNetV3 , call <nl> - ` tf . keras . applications . mobilenet_v3 . preprocess_input ` on your <nl> - inputs before passing them to the model . <nl> - <nl> Arguments : <nl> input_shape : Optional shape tuple , to be specified if you would <nl> like to use a model with an input image resolution that is not <nl> <nl> on the " top " layer . Ignored unless ` include_top = True ` . Set <nl> ` classifier_activation = None ` to return the logits of the " top " layer . <nl> <nl> + Call arguments : <nl> + inputs : A floating point ` numpy . array ` or a ` tf . Tensor ` , 4D with 3 color <nl> + channels , with values in the range [ 0 , 255 ] . <nl> + <nl> Returns : <nl> A ` keras . Model ` instance . <nl> <nl> def _inverted_res_block ( x , expansion , filters , kernel_size , stride , se_ratio , <nl> <nl> @ keras_export ( ' keras . applications . mobilenet_v3 . preprocess_input ' ) <nl> def preprocess_input ( x , data_format = None ) : # pylint : disable = unused - argument <nl> + " " " A placeholder method for backward compatibility . <nl> + <nl> + The preprocessing logic has been included in the mobilenet_v3 model <nl> + implementation . Users are no longer required to call this method to normalize <nl> + the input data . This method does nothing and only kept as a placeholder to <nl> + align the API surface between old and new version of model . <nl> + <nl> + Args : <nl> + x : A floating point ` numpy . array ` or a ` tf . Tensor ` . <nl> + data_format : Optional data format of the image tensor / array . Defaults to <nl> + None , in which case the global setting <nl> + ` tf . keras . backend . image_data_format ( ) ` is used ( unless you changed it , <nl> + it defaults to " channels_last " ) . { mode } <nl> + <nl> + Returns : <nl> + Unchanged ` numpy . array ` or ` tf . Tensor ` . <nl> + " " " <nl> + <nl> return x <nl> <nl> <nl> def decode_predictions ( preds , top = 5 ) : <nl> return imagenet_utils . decode_predictions ( preds , top = top ) <nl> <nl> <nl> - preprocess_input . __doc__ = imagenet_utils . PREPROCESS_INPUT_DOC . format ( <nl> - mode = ' ' , <nl> - ret = imagenet_utils . PREPROCESS_INPUT_RET_DOC_TF , <nl> - error = imagenet_utils . PREPROCESS_INPUT_ERROR_DOC ) <nl> decode_predictions . __doc__ = imagenet_utils . decode_predictions . __doc__ <nl> | Update mobilenet_v3 . preprocess_input docstring to reflect its real behavior . | tensorflow/tensorflow | 52cdef300393aa1e2a4220030f77839edbae8bd3 | 2020-12-08T20:12:21Z |
mmm a / src / mongo / db / auth / role_name . cpp <nl> ppp b / src / mongo / db / auth / role_name . cpp <nl> namespace mongo { <nl> _splitPoint = role . size ( ) ; <nl> } <nl> <nl> - RoleNameVectorIterator : : RoleNameVectorIterator ( <nl> - const std : : vector < RoleName > : : const_iterator & begin , <nl> - const std : : vector < RoleName > : : const_iterator & end ) : _begin ( begin ) , _end ( end ) { } <nl> - <nl> - RoleNameVectorIterator : : ~ RoleNameVectorIterator ( ) { } ; <nl> - <nl> - bool RoleNameVectorIterator : : more ( ) const { <nl> - return _begin ! = _end ; <nl> - } <nl> - <nl> - const RoleName & RoleNameVectorIterator : : next ( ) { <nl> - const RoleName & toReturn = get ( ) ; <nl> - + + _begin ; <nl> - return toReturn ; <nl> - } <nl> - <nl> - const RoleName & RoleNameVectorIterator : : get ( ) const { <nl> - return * _begin ; <nl> - } <nl> - <nl> - RoleNameIterator : : Impl * RoleNameVectorIterator : : doClone ( ) const { <nl> - return new RoleNameVectorIterator ( _begin , _end ) ; <nl> - } <nl> - <nl> std : : ostream & operator < < ( std : : ostream & os , const RoleName & name ) { <nl> return os < < name . getFullName ( ) ; <nl> } <nl> mmm a / src / mongo / db / auth / role_name . h <nl> ppp b / src / mongo / db / auth / role_name . h <nl> MONGO_HASH_NAMESPACE_END <nl> <nl> namespace mongo { <nl> <nl> - / / RoleNameIterator for iterating over a vector of RoleNames . <nl> - class RoleNameVectorIterator : public RoleNameIterator : : Impl { <nl> - MONGO_DISALLOW_COPYING ( RoleNameVectorIterator ) ; <nl> - <nl> + template < typename ContainerIterator > <nl> + class RoleNameContainerIteratorImpl : public RoleNameIterator : : Impl { <nl> + MONGO_DISALLOW_COPYING ( RoleNameContainerIteratorImpl ) ; <nl> public : <nl> - RoleNameVectorIterator ( const std : : vector < RoleName > : : const_iterator & begin , <nl> - const std : : vector < RoleName > : : const_iterator & end ) ; <nl> - <nl> - virtual ~ RoleNameVectorIterator ( ) ; <nl> - <nl> - virtual bool more ( ) const ; <nl> - <nl> - virtual const RoleName & next ( ) ; <nl> - <nl> - virtual const RoleName & get ( ) const ; <nl> + RoleNameContainerIteratorImpl ( const ContainerIterator & begin , <nl> + const ContainerIterator & end ) : <nl> + _curr ( begin ) , _end ( end ) { } <nl> + virtual ~ RoleNameContainerIteratorImpl ( ) { } <nl> + virtual bool more ( ) const { return _curr ! = _end ; } <nl> + virtual const RoleName & next ( ) { return * ( _curr + + ) ; } <nl> + virtual const RoleName & get ( ) const { return * _curr ; } <nl> + virtual RoleNameIterator : : Impl * doClone ( ) const { <nl> + return new RoleNameContainerIteratorImpl ( _curr , _end ) ; <nl> + } <nl> <nl> private : <nl> - virtual Impl * doClone ( ) const ; <nl> - <nl> - std : : vector < RoleName > : : const_iterator _begin ; <nl> - std : : vector < RoleName > : : const_iterator _end ; <nl> + ContainerIterator _curr ; <nl> + ContainerIterator _end ; <nl> } ; <nl> <nl> + template < typename ContainerIterator > <nl> + RoleNameIterator makeRoleNameIterator ( const ContainerIterator & begin , <nl> + const ContainerIterator & end ) { <nl> + return RoleNameIterator ( new RoleNameContainerIteratorImpl < ContainerIterator > ( begin , end ) ) ; <nl> + } <nl> + <nl> + template < typename Container > <nl> + RoleNameIterator makeRoleNameIteratorForContainer ( const Container & container ) { <nl> + return makeRoleNameIterator ( container . begin ( ) , container . end ( ) ) ; <nl> + } <nl> + <nl> } / / namespace mongo <nl> | SERVER - 10670 Templatize the RoleNameIteratorImpl for std containers . | mongodb/mongo | 07c18d3c1147bd11ea50902542432a178f4f685f | 2013-10-05T21:39:16Z |
mmm a / tensorflow / workspace . bzl <nl> ppp b / tensorflow / workspace . bzl <nl> def tf_workspace ( path_prefix = " " , tf_repo_name = " " ) : <nl> tf_http_archive ( <nl> name = " llvm " , <nl> build_file = clean_dep ( " / / third_party / llvm : llvm . autogenerated . BUILD " ) , <nl> - sha256 = " 65b48c80eba736ab834a9790b78a72cd0e3919b6dace44a96259d3e6936624ec " , <nl> - strip_prefix = " llvm - cfa2cf74cd9ba0e759974ce11bfd7b9e051dd8ff " , <nl> + sha256 = " 65a1aeb29e5940f9f480a41e904659d944e738458afd139caa7bde14bd6aab8a " , <nl> + strip_prefix = " llvm - 331ffd31b3dd49b3f02a27556938b836b679f564 " , <nl> urls = [ <nl> - " https : / / mirror . bazel . build / github . com / llvm - mirror / llvm / archive / cfa2cf74cd9ba0e759974ce11bfd7b9e051dd8ff . tar . gz " , <nl> - " https : / / github . com / llvm - mirror / llvm / archive / cfa2cf74cd9ba0e759974ce11bfd7b9e051dd8ff . tar . gz " , <nl> + " https : / / mirror . bazel . build / github . com / llvm - mirror / llvm / archive / 331ffd31b3dd49b3f02a27556938b836b679f564 . tar . gz " , <nl> + " https : / / github . com / llvm - mirror / llvm / archive / 331ffd31b3dd49b3f02a27556938b836b679f564 . tar . gz " , <nl> ] , <nl> ) <nl> <nl> | [ TF : XLA ] Bump open source llvm revision to r349610 | tensorflow/tensorflow | b185ef78847a5ba823348c55f0e5481e904dee27 | 2018-12-20T03:11:50Z |
mmm a / client / connpool . h <nl> ppp b / client / connpool . h <nl> namespace mongo { <nl> public : <nl> AScopedConnection ( ) { _numConnections + + ; } <nl> virtual ~ AScopedConnection ( ) { _numConnections - - ; } <nl> + <nl> virtual DBClientBase * get ( ) = 0 ; <nl> virtual void done ( ) = 0 ; <nl> virtual string getHost ( ) const = 0 ; <nl> + <nl> + / * * <nl> + * @ return true iff this has a connection to the db <nl> + * / <nl> + virtual bool ok ( ) const = 0 ; <nl> <nl> / * * <nl> * @ return total number of current instances of AScopedConnection <nl> namespace mongo { <nl> return _conn ; <nl> } <nl> <nl> + bool ok ( ) const { return _conn > 0 ; } <nl> + <nl> string getHost ( ) const { return _host ; } <nl> <nl> / * * Force closure of the connection . You should call this if you leave it in <nl> mmm a / s / shard . h <nl> ppp b / s / shard . h <nl> namespace mongo { <nl> _setVersion = false ; <nl> _finishedInit = true ; <nl> } <nl> + <nl> + bool ok ( ) const { return _conn > 0 ; } <nl> <nl> / * * <nl> this just passes through excpet it checks for stale configs <nl> | ok ( ) method on ScopedConnection to know if you actually have one or if done has been called | mongodb/mongo | 516600ebbd7831b5056a989cab5c59e919b42304 | 2011-04-21T18:57:44Z |
mmm a / src / caffe / layers / cudnn_conv_layer . cpp <nl> ppp b / src / caffe / layers / cudnn_conv_layer . cpp <nl> void CuDNNConvolutionLayer < Dtype > : : Reshape ( <nl> <nl> / / this is the total amount of storage needed over all groups + streams <nl> if ( total_max_workspace > workspaceSizeInBytes ) { <nl> - LOG ( INFO ) < < " Reallocating workspace storage : " < < total_max_workspace ; <nl> + DLOG ( INFO ) < < " Reallocating workspace storage : " < < total_max_workspace ; <nl> workspaceSizeInBytes = total_max_workspace ; <nl> <nl> / / free the existing workspace and allocate a new ( larger ) one <nl> | cuDNN : only log conv workspace in debug mode | BVLC/caffe | 9898794172b7def7a91d925d97e11dd0878ddb61 | 2015-10-23T02:12:48Z |
mmm a / src / builtins / builtins - collections - gen . cc <nl> ppp b / src / builtins / builtins - collections - gen . cc <nl> class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { <nl> / / of OrderedHashTable , it should be OrderedHashMap or OrderedHashSet . <nl> template < typename CollectionType > <nl> void FindOrderedHashTableEntry ( <nl> - Node * table , Node * hash , <nl> + const TNode < CollectionType > table , const TNode < IntPtrT > hash , <nl> const std : : function < void ( TNode < Object > , Label * , Label * ) > & key_compare , <nl> - Variable * entry_start_position , Label * entry_found , Label * not_found ) ; <nl> + TVariable < IntPtrT > * entry_start_position , Label * entry_found , <nl> + Label * not_found ) ; <nl> } ; <nl> <nl> template < typename CollectionType > <nl> void CollectionsBuiltinsAssembler : : FindOrderedHashTableEntry ( <nl> - Node * table , Node * hash , <nl> + const TNode < CollectionType > table , const TNode < IntPtrT > hash , <nl> const std : : function < void ( TNode < Object > , Label * , Label * ) > & key_compare , <nl> - Variable * entry_start_position , Label * entry_found , Label * not_found ) { <nl> + TVariable < IntPtrT > * entry_start_position , Label * entry_found , <nl> + Label * not_found ) { <nl> / / Get the index of the bucket . <nl> TNode < IntPtrT > const number_of_buckets = <nl> SmiUntag ( CAST ( UnsafeLoadFixedArrayElement ( <nl> - CAST ( table ) , CollectionType : : NumberOfBucketsIndex ( ) ) ) ) ; <nl> + table , CollectionType : : NumberOfBucketsIndex ( ) ) ) ) ; <nl> TNode < WordT > const bucket = <nl> WordAnd ( hash , IntPtrSub ( number_of_buckets , IntPtrConstant ( 1 ) ) ) ; <nl> TNode < IntPtrT > const first_entry = SmiUntag ( CAST ( UnsafeLoadFixedArrayElement ( <nl> - CAST ( table ) , bucket , <nl> - CollectionType : : HashTableStartIndex ( ) * kTaggedSize ) ) ) ; <nl> + table , bucket , CollectionType : : HashTableStartIndex ( ) * kTaggedSize ) ) ) ; <nl> <nl> / / Walk the bucket chain . <nl> TNode < IntPtrT > entry_start ; <nl> void CollectionsBuiltinsAssembler : : FindOrderedHashTableEntry ( <nl> var_entry . value ( ) , <nl> SmiUntag ( SmiAdd ( <nl> CAST ( UnsafeLoadFixedArrayElement ( <nl> - CAST ( table ) , CollectionType : : NumberOfElementsIndex ( ) ) ) , <nl> + table , CollectionType : : NumberOfElementsIndex ( ) ) ) , <nl> CAST ( UnsafeLoadFixedArrayElement ( <nl> - CAST ( table ) , <nl> - CollectionType : : NumberOfDeletedElementsIndex ( ) ) ) ) ) ) ) ; <nl> + table , CollectionType : : NumberOfDeletedElementsIndex ( ) ) ) ) ) ) ) ; <nl> <nl> / / Compute the index of the entry relative to kHashTableStartIndex . <nl> entry_start = <nl> void CollectionsBuiltinsAssembler : : FindOrderedHashTableEntry ( <nl> <nl> / / Load the key from the entry . <nl> TNode < Object > const candidate_key = UnsafeLoadFixedArrayElement ( <nl> - CAST ( table ) , entry_start , <nl> + table , entry_start , <nl> CollectionType : : HashTableStartIndex ( ) * kTaggedSize ) ; <nl> <nl> key_compare ( candidate_key , & if_key_found , & continue_next_entry ) ; <nl> void CollectionsBuiltinsAssembler : : FindOrderedHashTableEntry ( <nl> BIND ( & continue_next_entry ) ; <nl> / / Load the index of the next entry in the bucket chain . <nl> var_entry = SmiUntag ( CAST ( UnsafeLoadFixedArrayElement ( <nl> - CAST ( table ) , entry_start , <nl> + table , entry_start , <nl> ( CollectionType : : HashTableStartIndex ( ) + CollectionType : : kChainOffset ) * <nl> kTaggedSize ) ) ) ; <nl> <nl> void CollectionsBuiltinsAssembler : : FindOrderedHashTableEntry ( <nl> } <nl> <nl> BIND ( & if_key_found ) ; <nl> - entry_start_position - > Bind ( entry_start ) ; <nl> + * entry_start_position = entry_start ; <nl> Goto ( entry_found ) ; <nl> } <nl> <nl> | [ cleanup ] TNodify builtins - collections - gen . cc | v8/v8 | fcdae18e88c715f51de5473d72f967df9476fc8a | 2019-10-25T17:21:10Z |
mmm a / src / hydrogen - check - elimination . cc <nl> ppp b / src / hydrogen - check - elimination . cc <nl> class HCheckTable : public ZoneObject { <nl> } <nl> <nl> / / Global analysis : Copy state to successor block . <nl> - HCheckTable * Copy ( HBasicBlock * succ , Zone * zone ) { <nl> + HCheckTable * Copy ( HBasicBlock * succ , HBasicBlock * from_block , Zone * zone ) { <nl> HCheckTable * copy = new ( phase_ - > zone ( ) ) HCheckTable ( phase_ ) ; <nl> for ( int i = 0 ; i < size_ ; i + + ) { <nl> HCheckTableEntry * old_entry = & entries_ [ i ] ; <nl> class HCheckTable : public ZoneObject { <nl> } <nl> <nl> / / Global analysis : Merge this state with the other incoming state . <nl> - HCheckTable * Merge ( HBasicBlock * succ , HCheckTable * that , Zone * zone ) { <nl> + HCheckTable * Merge ( HBasicBlock * succ , HCheckTable * that , <nl> + HBasicBlock * that_block , Zone * zone ) { <nl> if ( that - > size_ = = 0 ) { <nl> / / If the other state is empty , simply reset . <nl> size_ = 0 ; <nl> mmm a / src / hydrogen - flow - engine . h <nl> ppp b / src / hydrogen - flow - engine . h <nl> class HFlowEngine { <nl> if ( SkipNonDominatedBlock ( root , block ) ) continue ; <nl> State * state = StateAt ( block ) ; <nl> <nl> - if ( block - > IsLoopHeader ( ) ) { <nl> - / / Apply loop effects before analyzing loop body . <nl> - ComputeLoopEffects ( block ) - > Apply ( state ) ; <nl> - } else { <nl> - / / Must have visited all predecessors before this block . <nl> - CheckPredecessorCount ( block ) ; <nl> - } <nl> + if ( block - > IsReachable ( ) ) { <nl> + if ( block - > IsLoopHeader ( ) ) { <nl> + / / Apply loop effects before analyzing loop body . <nl> + ComputeLoopEffects ( block ) - > Apply ( state ) ; <nl> + } else { <nl> + / / Must have visited all predecessors before this block . <nl> + CheckPredecessorCount ( block ) ; <nl> + } <nl> <nl> - / / Go through all instructions of the current block , updating the state . <nl> - for ( HInstructionIterator it ( block ) ; ! it . Done ( ) ; it . Advance ( ) ) { <nl> - state = state - > Process ( it . Current ( ) , zone_ ) ; <nl> + / / Go through all instructions of the current block , updating the state . <nl> + for ( HInstructionIterator it ( block ) ; ! it . Done ( ) ; it . Advance ( ) ) { <nl> + state = state - > Process ( it . Current ( ) , zone_ ) ; <nl> + } <nl> } <nl> <nl> / / Propagate the block state forward to all successor blocks . <nl> class HFlowEngine { <nl> SetStateAt ( succ , state ) ; <nl> } else { <nl> / / Successor needs a copy of the state . <nl> - SetStateAt ( succ , state - > Copy ( succ , zone_ ) ) ; <nl> + SetStateAt ( succ , state - > Copy ( succ , block , zone_ ) ) ; <nl> } <nl> } else { <nl> / / Merge the current state with the state already at the successor . <nl> - SetStateAt ( succ , state - > Merge ( succ , StateAt ( succ ) , zone_ ) ) ; <nl> + SetStateAt ( succ , StateAt ( succ ) - > Merge ( succ , state , block , zone_ ) ) ; <nl> } <nl> } <nl> } <nl> class HFlowEngine { <nl> i = member - > loop_information ( ) - > GetLastBackEdge ( ) - > block_id ( ) ; <nl> } else { <nl> / / Process all the effects of the block . <nl> + if ( member - > IsUnreachable ( ) ) continue ; <nl> ASSERT ( member - > current_loop ( ) = = loop ) ; <nl> for ( HInstructionIterator it ( member ) ; ! it . Done ( ) ; it . Advance ( ) ) { <nl> effects - > Process ( it . Current ( ) , zone_ ) ; <nl> mmm a / src / hydrogen - load - elimination . cc <nl> ppp b / src / hydrogen - load - elimination . cc <nl> class HLoadEliminationTable : public ZoneObject { <nl> return this ; <nl> } <nl> <nl> - / / Support for global analysis with HFlowEngine : Copy state to sucessor block . <nl> - HLoadEliminationTable * Copy ( HBasicBlock * succ , Zone * zone ) { <nl> + / / Support for global analysis with HFlowEngine : Copy state to successor <nl> + / / block . <nl> + HLoadEliminationTable * Copy ( HBasicBlock * succ , HBasicBlock * from_block , <nl> + Zone * zone ) { <nl> HLoadEliminationTable * copy = <nl> new ( zone ) HLoadEliminationTable ( zone , aliasing_ ) ; <nl> copy - > EnsureFields ( fields_ . length ( ) ) ; <nl> class HLoadEliminationTable : public ZoneObject { <nl> <nl> / / Support for global analysis with HFlowEngine : Merge this state with <nl> / / the other incoming state . <nl> - HLoadEliminationTable * Merge ( HBasicBlock * succ , <nl> - HLoadEliminationTable * that , Zone * zone ) { <nl> + HLoadEliminationTable * Merge ( HBasicBlock * succ , HLoadEliminationTable * that , <nl> + HBasicBlock * that_block , Zone * zone ) { <nl> if ( that - > fields_ . length ( ) < fields_ . length ( ) ) { <nl> / / Drop fields not in the other table . <nl> fields_ . Rewind ( that - > fields_ . length ( ) ) ; <nl> | Flow engine fixes : unreachable block processing , state merging . | v8/v8 | 5dafb426b3f118d5d524312e7357f3b26f489e52 | 2014-01-28T16:57:39Z |
mmm a / src / Common / tests / average . cpp <nl> ppp b / src / Common / tests / average . cpp <nl> Float NO_INLINE really_unrolled ( const PODArray < UInt8 > & keys , const PODArray < Flo <nl> <nl> struct State4 <nl> { <nl> - Float sum [ 4 ] = { 0 , 0 , 0 , 0 } ; <nl> - size_t count [ 4 ] = { 0 , 0 , 0 , 0 } ; <nl> + Float sum [ 4 ] { } ; <nl> + size_t count [ 4 ] { } ; <nl> <nl> template < UInt32 idx > <nl> void add ( Float value ) <nl> Float NO_INLINE another_unrolled_x4 ( const PODArray < UInt8 > & keys , const PODArray <nl> { <nl> State4 map [ 256 ] { } ; <nl> <nl> - size_t size = keys . size ( ) & ~ size_t ( 3 ) ; <nl> - for ( size_t i = 0 ; i < size ; i + = 4 ) <nl> + size_t size = keys . size ( ) / 4 * 4 ; <nl> + for ( size_t i = 0 ; i < size ; i + = 4 ) <nl> { <nl> map [ keys [ i ] ] . add < 0 > ( values [ i ] ) ; <nl> - map [ keys [ i + 1 ] ] . add < 1 > ( values [ i ] ) ; <nl> - map [ keys [ i + 2 ] ] . add < 2 > ( values [ i ] ) ; <nl> - map [ keys [ i + 3 ] ] . add < 3 > ( values [ i ] ) ; <nl> + map [ keys [ i + 1 ] ] . add < 1 > ( values [ i ] ) ; <nl> + map [ keys [ i + 2 ] ] . add < 2 > ( values [ i ] ) ; <nl> + map [ keys [ i + 3 ] ] . add < 3 > ( values [ i ] ) ; <nl> } <nl> <nl> / / / tail <nl> | Style | ClickHouse/ClickHouse | 448c87363c1e466c90a71b907397813dd3cc565f | 2020-10-20T01:29:34Z |
mmm a / examples / addressbook . proto <nl> ppp b / examples / addressbook . proto <nl> <nl> / / See README . txt for information and build instructions . <nl> - <nl> + / / <nl> + / / Note : START and END tags are used in comments to define sections used in <nl> + / / tutorials . They are not part of the syntax for Protocol Buffers . <nl> + / / <nl> + / / To get an in - depth walkthrough of this file and the related examples , see : <nl> + / / https : / / developers . google . com / protocol - buffers / docs / tutorials <nl> + <nl> + / / [ START declaration ] <nl> syntax = " proto3 " ; <nl> - <nl> package tutorial ; <nl> + / / [ END declaration ] <nl> <nl> + / / [ START java_declaration ] <nl> option java_package = " com . example . tutorial " ; <nl> option java_outer_classname = " AddressBookProtos " ; <nl> + / / [ END java_declaration ] <nl> + <nl> + / / [ START csharp_declaration ] <nl> option csharp_namespace = " Google . Protobuf . Examples . AddressBook " ; <nl> + / / [ END csharp_declaration ] <nl> <nl> + / / [ START messages ] <nl> message Person { <nl> string name = 1 ; <nl> - int32 id = 2 ; / / Unique ID number for this person . <nl> + int32 id = 2 ; / / Unique ID number for this person . <nl> string email = 3 ; <nl> <nl> enum PhoneType { <nl> message Person { <nl> message AddressBook { <nl> repeated Person people = 1 ; <nl> } <nl> + / / [ END messages ] <nl> | Add region tags for protocol buffers tutorials . | protocolbuffers/protobuf | bc4723481a8ee30b918c9938e25b7e4ba8282893 | 2015-12-02T01:07:18Z |
deleted file mode 100644 <nl> index 165ce5695d . . 0000000000 <nl> Binary files a / test / screenshot / border - android . png and / dev / null differ <nl> | * [ android ] update screenshot | apache/incubator-weex | 2e4d79915802fd11fed6cdecb70ff9347101753b | 2017-09-11T03:56:44Z |
mmm a / js / server / tests / shell - foxx . js <nl> ppp b / js / server / tests / shell - foxx . js <nl> function DocumentationAndConstraintsSpec ( ) { <nl> <nl> assertEqual ( routes . length , 1 ) ; <nl> <nl> - assertEqual ( routes [ 0 ] . url . constraint . foxx , " / . + / " ) ; <nl> + assertEqual ( routes [ 0 ] . url . constraint . foxx , " / [ ^ / ] + / " ) ; <nl> assertEqual ( routes [ 0 ] . docs . parameters [ 0 ] . paramType , " path " ) ; <nl> assertEqual ( routes [ 0 ] . docs . parameters [ 0 ] . name , " foxx " ) ; <nl> assertEqual ( routes [ 0 ] . docs . parameters [ 0 ] . description , " Kind of Foxx " ) ; <nl> function DocumentationAndConstraintsSpec ( ) { <nl> <nl> assertEqual ( routes . length , 1 ) ; <nl> <nl> - assertEqual ( routes [ 0 ] . url . constraint . foxxParam , " / . + / " ) ; <nl> + assertEqual ( routes [ 0 ] . url . constraint . foxxParam , " / [ ^ / ] + / " ) ; <nl> assertEqual ( routes [ 0 ] . docs . parameters [ 0 ] . paramType , " path " ) ; <nl> assertEqual ( routes [ 0 ] . docs . parameters [ 0 ] . name , " foxxParam " ) ; <nl> assertEqual ( routes [ 0 ] . docs . parameters [ 0 ] . description , " Kind of Foxx " ) ; <nl> | Adjust tests to @ fceller ' s bug fix | arangodb/arangodb | cec32b981a596f42ed8aa2ed3145f94828d29708 | 2014-06-12T12:24:50Z |
mmm a / tools / jenkins - scripts / cocos - console - test . py <nl> ppp b / tools / jenkins - scripts / cocos - console - test . py <nl> <nl> import socket <nl> import smtplib <nl> from email . mime . text import MIMEText <nl> + from os . path import join , getsize <nl> <nl> # default console_param . <nl> console_param = ' [ console run ] ' <nl> class ENUM_PARAM : <nl> <nl> # now cocos2d - console suport different run on Platforms , e . g : only run android on win <nl> runSupport = { <nl> - ' darwin ' : { ' mac ' : 1 , ' ios ' : 1 , ' android ' : 0 } , <nl> + ' darwin ' : { ' mac ' : 1 , ' ios ' : 1 , ' android ' : 1 } , <nl> ' win ' : { ' mac ' : 0 , ' ios ' : 0 , ' android ' : 1 } , <nl> ' linux ' : { ' mac ' : 0 , ' ios ' : 0 , ' android ' : 1 } <nl> } <nl> def appendToResult ( content ) : <nl> global console_result <nl> console_result = console_result + content <nl> <nl> + # if any error <nl> + ANY_ERROR_IN_RUN = 0 <nl> # excute cocos command <nl> def cocos_project ( level ) : <nl> + global ANY_ERROR_IN_RUN <nl> print ' will excute cocos_command : ' , COCOS_CMD [ level ] , level <nl> appendToResult ( ' will excute ' + COCOS_CMD [ level ] + ' command : ' + " \ n \ r \ t " ) <nl> for proj in project_types : <nl> def cocos_project ( level ) : <nl> time . sleep ( 12 ) <nl> addConsoleListenOnTCP ( proj ) <nl> print ' create project ' , proj , ' is : ' , not info_create <nl> + ANY_ERROR_IN_RUN = ANY_ERROR_IN_RUN + info_create <nl> appendToResult ( ' ' + cmd + ' : ' + str ( not info_create ) + " . \ n \ r \ t " ) <nl> else : <nl> for phone in phonePlats : <nl> def cocos_project ( level ) : <nl> if runSupport [ curPlat ] [ phone ] : <nl> info_cmd = os . system ( cmd ) <nl> print ' info ' + COCOS_CMD [ level ] + ' : ' , not info_cmd <nl> - else : <nl> + appendToResult ( ' ' + cmd + ' : ' + str ( not info_cmd ) + " . \ n \ r \ t " ) <nl> + else : <nl> if runSupport [ curPlat ] [ phone ] : <nl> if phone = = ' android ' and getAndroidDevices ( ) = = 0 : <nl> - print ' no android device , please checkout the device is running ok . ' <nl> - continue <nl> - info_cmd = os . system ( cmd ) <nl> - print ' info ' + COCOS_CMD [ level ] + ' : ' , not info_cmd <nl> - if level = = ENUM_PARAM . run : <nl> - time . sleep ( 20 ) <nl> - strClose = close_proj ( proj , phone ) <nl> - appendToResult ( ' ' + strClose + " \ n \ r \ t " ) <nl> - appendToResult ( ' ' + cmd + ' : ' + str ( not info_cmd ) + " . \ n \ r \ t " ) <nl> + strInfo = ' no android device , please checkout the device is running ok . ' <nl> + print strInfo <nl> + # appendToResult ( ' ' + strInfo + " \ n \ r \ t " ) <nl> + else : <nl> + info_cmd = os . system ( cmd ) <nl> + print ' info ' + COCOS_CMD [ level ] + ' : ' , not info_cmd <nl> + if level = = ENUM_PARAM . run : <nl> + time . sleep ( 20 ) <nl> + strClose = close_proj ( proj , phone ) <nl> + appendToResult ( ' ' + strClose + " \ n \ r \ t " ) <nl> + appendToResult ( ' ' + cmd + ' : ' + str ( not info_cmd ) + " . \ n \ r \ t " ) <nl> <nl> # build and run according to params of provided . ( lv_ignore : e . g : ignore new ) <nl> def build_run ( lv_ignore ) : <nl> def start_android_simulator ( ) : <nl> return <nl> if cocos_param > = LEVEL_COCOS [ ENUM_PARAM . deploy ] : <nl> cmd_start = [ ' emulator - avd ' + ANDROID_SIMULATOR_NAME ] <nl> - print ' cmd_start : ' , cmd_start <nl> - info_start = subprocess . Popen ( cmd_start , stdin = subprocess . PIPE , shell = True , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) <nl> - print ' start an android simulator : ' , not info_start <nl> + # print ' cmd_start : ' , cmd_start <nl> + # info_start = subprocess . Popen ( cmd_start , stdin = subprocess . PIPE , shell = True , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) <nl> + # print ' start an android simulator : ' , not info_start <nl> <nl> # send email <nl> EMAIL_KEYS = { <nl> def send_mail ( to_list , sub , title , content ) : <nl> def sendEmail ( msg ) : <nl> send_mail ( OBJ_EMAIL_INFO [ EMAIL_KEYS [ 4 ] ] , " cocos - console - test result " , ' for error . ' , msg ) <nl> <nl> + # get package size <nl> + def getdirsize ( dir ) : <nl> + size = 0L <nl> + for root , dirs , files in os . walk ( dir ) : <nl> + size + = sum ( [ getsize ( join ( root , name ) ) for name in files ] ) <nl> + return size <nl> + APP_FILE_DIR = { <nl> + ' cpp ' : ' bin / debug / ' , <nl> + ' lua ' : ' runtime / ' <nl> + } <nl> + APP_FILE_SUFFIX = { <nl> + ' mac ' : ' . app ' , <nl> + ' ios ' : ' . app ' , <nl> + ' android ' : ' - debug - unaligned . apk ' <nl> + } <nl> + def getPackageSize ( ) : <nl> + for proj in project_types : <nl> + for phone in phonePlats : <nl> + # if runSupport [ curPlat ] [ phone ] : <nl> + package_path = ' . / ' + proj + PROJ_SUFFIX + ' / ' + APP_FILE_DIR [ proj ] + phone + ' / ' + proj + PROJ_SUFFIX + APP_FILE_SUFFIX [ phone ] <nl> + print ' package_path ' , package_path <nl> + package_size = 0 <nl> + if os . path . isfile ( package_path ) : <nl> + package_size = os . path . getsize ( package_path ) ; <nl> + else : <nl> + package_size = getdirsize ( package_path ) ; <nl> + strSize = ' size of ' + proj + PROJ_SUFFIX + ' ' + phone + ' is : ' + str ( package_size / ( 1024 ) ) + ' KB ' + ' \ n \ t ' <nl> + print ' strSize : ' , strSize <nl> + appendToResult ( strSize ) <nl> + <nl> def main ( ) : <nl> print ' in main : ' <nl> # start_android_simulator ( ) <nl> print ' will build_run : ' <nl> build_run ( - 1 ) <nl> - print ' end build run . ' <nl> + print ' ANY_ERROR_IN_RUN : ' , ANY_ERROR_IN_RUN <nl> + print ' end build run . and get package size . ' <nl> + getPackageSize ( ) <nl> print ' will send email : ' <nl> - if OBJ_EMAIL_INFO [ EMAIL_KEYS [ 5 ] ] : <nl> - sendEmail ( console_result ) <nl> print ' console_result : ' , console_result <nl> + if OBJ_EMAIL_INFO [ EMAIL_KEYS [ 5 ] ] or ANY_ERROR_IN_RUN : <nl> + sendEmail ( console_result ) <nl> <nl> # mmmmmmmmmmmm - - main mmmmmmmmmmmm - - <nl> if __name__ = = ' __main__ ' : <nl> | [ ci skip ] , add get package size in cocos - console - test . py . | cocos2d/cocos2d-x | 11af83dbf6d1a673148f37225a290db33c53cadc | 2014-04-08T10:02:12Z |
mmm a / include / swift / Basic / OwnedString . h <nl> ppp b / include / swift / Basic / OwnedString . h <nl> class OwnedString { <nl> void initialize ( const char * Data , size_t Length , StringOwnership Ownership ) { <nl> this - > Length = Length ; <nl> this - > Ownership = Ownership ; <nl> - assert ( Length > = 0 & & " expected length to be non - negative " ) ; <nl> if ( Ownership = = StringOwnership : : Copied & & Data ) { <nl> char * substring = static_cast < char * > ( malloc ( Length + 1 ) ) ; <nl> assert ( substring & & " expected successful malloc of copy " ) ; <nl> mmm a / lib / FrontendTool / FrontendTool . cpp <nl> ppp b / lib / FrontendTool / FrontendTool . cpp <nl> class JSONFixitWriter <nl> / / This is a separate function so that it shows up in stack traces . <nl> LLVM_ATTRIBUTE_NOINLINE <nl> static void debugFailWithAssertion ( ) { <nl> - / / This assertion should always fail , per the user ' s request , and should <nl> - / / not be converted to llvm_unreachable . <nl> - assert ( 0 & & " This is an assertion ! " ) ; <nl> + / / Per the user ' s request , this assertion should always fail in <nl> + / / builds with assertions enabled . <nl> + <nl> + / / This should not be converted to llvm_unreachable , as those are <nl> + / / treated as optimization hints in builds where they turn into <nl> + / / __builtin_unreachable ( ) . <nl> + assert ( ( 0 ) & & " This is an assertion ! " ) ; <nl> } <nl> <nl> / / This is a separate function so that it shows up in stack traces . <nl> mmm a / lib / Sema / CSDiag . cpp <nl> ppp b / lib / Sema / CSDiag . cpp <nl> bool FailureDiagnosis : : visitUnresolvedMemberExpr ( UnresolvedMemberExpr * E ) { <nl> case CC_SelfMismatch : / / Self argument mismatches . <nl> case CC_ArgumentNearMismatch : / / Argument list mismatch . <nl> case CC_ArgumentMismatch : / / Argument list mismatch . <nl> - assert ( 0 & & " These aren ' t produced by filterContextualMemberList " ) ; <nl> + llvm_unreachable ( " These aren ' t produced by filterContextualMemberList " ) ; <nl> return false ; <nl> <nl> case CC_ExactMatch : { / / This is a perfect match for the arguments . <nl> mmm a / lib / Sema / TypeCheckSwitchStmt . cpp <nl> ppp b / lib / Sema / TypeCheckSwitchStmt . cpp <nl> namespace { <nl> break ; <nl> case SpaceKind : : Disjunct : { <nl> if ( forDisplay ) { <nl> - assert ( false & & " Attempted to display disjunct to user ! " ) ; <nl> + llvm_unreachable ( " Attempted to display disjunct to user ! " ) ; <nl> } else { <nl> buffer < < " DISJOIN ( " ; <nl> for ( auto & sp : Spaces ) { <nl> | Squelch some warnings . | apple/swift | d8f492e5d78881f68f44ee5b57fff4546b6f41b2 | 2017-12-10T23:21:19Z |
deleted file mode 100644 <nl> index c54e65b80bc . . 00000000000 <nl> mmm a / modules / core / doc / command_line_parser . rst <nl> ppp / dev / null <nl> <nl> - Command Line Parser <nl> - = = = = = = = = = = = = = = = = = = = <nl> - <nl> - . . highlight : : cpp <nl> - <nl> - CommandLineParser <nl> mmmmmmmmm <nl> - . . ocv : class : : CommandLineParser <nl> - <nl> - The CommandLineParser class is designed for command line arguments parsing <nl> - <nl> - <nl> - . . ocv : function : : CommandLineParser : : CommandLineParser ( int argc , const char * const argv [ ] , const std : : string keys ) <nl> - <nl> - : param argc : <nl> - : param argv : <nl> - : param keys : <nl> - <nl> - . . ocv : function : : T CommandLineParser : : get < T > ( const std : : string & name , bool space_delete = true ) <nl> - <nl> - : param name : <nl> - : param space_delete : <nl> - <nl> - . . ocv : function : : T CommandLineParser : : get < T > ( int index , bool space_delete = true ) <nl> - <nl> - : param index : <nl> - : param space_delete : <nl> - <nl> - . . ocv : function : : bool CommandLineParser : : has ( const std : : string & name ) <nl> - <nl> - : param name : <nl> - <nl> - . . ocv : function : : bool CommandLineParser : : check ( ) <nl> - <nl> - <nl> - . . ocv : function : : void CommandLineParser : : about ( std : : string message ) <nl> - <nl> - : param message : <nl> - <nl> - . . ocv : function : : void CommandLineParser : : printMessage ( ) <nl> - <nl> - . . ocv : function : : void CommandLineParser : : printErrors ( ) <nl> - <nl> - . . ocv : function : : std : : string CommandLineParser : : getPathToApplication ( ) <nl> - <nl> - <nl> - The sample below demonstrates how to use CommandLineParser : <nl> - <nl> - : : <nl> - <nl> - CommandLineParser parser ( argc , argv , keys ) ; <nl> - parser . about ( " Application name v1 . 0 . 0 " ) ; <nl> - <nl> - if ( parser . has ( " help " ) ) <nl> - { <nl> - parser . printMessage ( ) ; <nl> - return 0 ; <nl> - } <nl> - <nl> - int N = parser . get < int > ( " N " ) ; <nl> - double fps = parser . get < double > ( " fps " ) ; <nl> - std : : string path = parser . get < std : : string > ( " path " ) ; <nl> - <nl> - use_time_stamp = parser . has ( " timestamp " ) ; <nl> - <nl> - std : : string img1 = parser . get < string > ( 0 ) ; <nl> - std : : string img2 = parser . get < string > ( 1 ) ; <nl> - <nl> - int repeat = parser . get < int > ( 2 ) ; <nl> - <nl> - if ( ! parser . check ( ) ) <nl> - { <nl> - parser . printErrors ( ) ; <nl> - return 0 ; <nl> - } <nl> - <nl> - Syntax : <nl> - <nl> - : : <nl> - <nl> - const std : : string keys = <nl> - " { help h usage ? | | print this message } " <nl> - " { @ image1 | | image1 for compare } " <nl> - " { @ image2 | | image2 for compare } " <nl> - " { @ repeat | 1 | number } " <nl> - " { path | . | path to file } " <nl> - " { fps | - 1 . 0 | fps for output video } " <nl> - " { N count | 100 | count of objects } " <nl> - " { ts timestamp | | use time stamp } " <nl> - ; <nl> - <nl> - Use : <nl> - <nl> - : : <nl> - <nl> - # . / app - N = 200 1 . png 2 . jpg 19 - ts <nl> - <nl> - # . / app - fps = aaa <nl> - ERRORS : <nl> - Exception : can not convert : [ aaa ] to [ double ] <nl> - <nl> mmm a / modules / core / doc / core . rst <nl> ppp b / modules / core / doc / core . rst <nl> core . The Core Functionality <nl> : maxdepth : 2 <nl> <nl> basic_structures <nl> - command_line_parser <nl> old_basic_structures <nl> dynamic_structures <nl> operations_on_arrays <nl> mmm a / modules / ts / src / ts_perf . cpp <nl> ppp b / modules / ts / src / ts_perf . cpp <nl> unsigned int TestBase : : iterationsLimitDefault = ( unsigned int ) ( - 1 ) ; <nl> int64 TestBase : : _timeadjustment = 0 ; <nl> <nl> const std : : string command_line_keys = <nl> - " { perf_max_outliers | 8 | percent of allowed outliers } " <nl> - " { perf_min_samples | 10 | minimal required numer of samples } " <nl> - " { perf_force_samples | 100 | force set maximum number of samples for all tests } " <nl> - " { perf_seed | 809564 | seed for random numbers generator } " <nl> - " { perf_threads | - 1 | the number of worker threads , if parallel execution is enabled } " <nl> - " { perf_write_sanity | | allow to create new records for sanity checks } " <nl> + " { | perf_max_outliers | 8 | percent of allowed outliers } " <nl> + " { | perf_min_samples | 10 | minimal required numer of samples } " <nl> + " { | perf_force_samples | 100 | force set maximum number of samples for all tests } " <nl> + " { | perf_seed | 809564 | seed for random numbers generator } " <nl> + " { | perf_threads | - 1 | the number of worker threads , if parallel execution is enabled } " <nl> + " { | perf_write_sanity | | allow to create new records for sanity checks } " <nl> # ifdef ANDROID <nl> - " { perf_time_limit | 6 . 0 | default time limit for a single test ( in seconds ) } " <nl> - " { perf_affinity_mask | 0 | set affinity mask for the main thread } " <nl> - " { perf_log_power_checkpoints | | additional xml logging for power measurement } " <nl> + " { | perf_time_limit | 6 . 0 | default time limit for a single test ( in seconds ) } " <nl> + " { | perf_affinity_mask | 0 | set affinity mask for the main thread } " <nl> + " { | perf_log_power_checkpoints | | additional xml logging for power measurement } " <nl> # else <nl> - " { perf_time_limit | 3 . 0 | default time limit for a single test ( in seconds ) } " <nl> + " { | perf_time_limit | 3 . 0 | default time limit for a single test ( in seconds ) } " <nl> # endif <nl> - " { perf_max_deviation | 1 . 0 | } " <nl> - " { help h | | print help info } " <nl> + " { | perf_max_deviation | 1 . 0 | } " <nl> + " { h | help | | print help info } " <nl> # ifdef HAVE_CUDA <nl> - " { perf_run_cpu | false | run GPU performance tests for analogical CPU functions } " <nl> - " { perf_cuda_device | 0 | run GPU test suite onto specific CUDA capable device } " <nl> - " { perf_cuda_info_only | false | print an information about system and an available CUDA devices and then exit . } " <nl> + " { | perf_run_cpu | false | run GPU performance tests for analogical CPU functions } " <nl> + " { | perf_cuda_device | 0 | run GPU test suite onto specific CUDA capable device } " <nl> + " { | perf_cuda_info_only | false | print an information about system and an available CUDA devices and then exit . } " <nl> # endif <nl> ; <nl> <nl> performance_metrics : : performance_metrics ( ) <nl> <nl> void TestBase : : Init ( int argc , const char * const argv [ ] ) <nl> { <nl> - cv : : CommandLineParser args ( argc , argv , command_line_keys ) ; <nl> - if ( args . has ( " help " ) ) <nl> + cv : : CommandLineParser args ( argc , argv , command_line_keys . c_str ( ) ) ; <nl> + if ( args . get < bool > ( " help " ) ) <nl> { <nl> - args . printMessage ( ) ; <nl> + args . printParams ( ) ; <nl> + printf ( " \ n \ n " ) ; <nl> return ; <nl> } <nl> <nl> void TestBase : : Init ( int argc , const char * const argv [ ] ) <nl> param_max_outliers = std : : min ( 100 . , std : : max ( 0 . , args . get < double > ( " perf_max_outliers " ) ) ) ; <nl> param_min_samples = std : : max ( 1u , args . get < unsigned int > ( " perf_min_samples " ) ) ; <nl> param_max_deviation = std : : max ( 0 . , args . get < double > ( " perf_max_deviation " ) ) ; <nl> - param_seed = args . get < unsigned long long > ( " perf_seed " ) ; <nl> + param_seed = args . get < uint64 > ( " perf_seed " ) ; <nl> param_time_limit = std : : max ( 0 . , args . get < double > ( " perf_time_limit " ) ) ; <nl> param_force_samples = args . get < unsigned int > ( " perf_force_samples " ) ; <nl> - param_write_sanity = args . has ( " perf_write_sanity " ) ; <nl> + param_write_sanity = args . get < bool > ( " perf_write_sanity " ) ; <nl> param_threads = args . get < int > ( " perf_threads " ) ; <nl> # ifdef ANDROID <nl> param_affinity_mask = args . get < int > ( " perf_affinity_mask " ) ; <nl> - log_power_checkpoints = args . has ( " perf_log_power_checkpoints " ) ; <nl> + log_power_checkpoints = args . get < bool > ( " perf_log_power_checkpoints " ) ; <nl> # endif <nl> <nl> # ifdef HAVE_CUDA <nl> <nl> - bool printOnly = args . has ( " perf_cuda_info_only " ) ; <nl> + bool printOnly = args . get < bool > ( " perf_cuda_info_only " ) ; <nl> <nl> if ( printOnly ) <nl> exit ( 0 ) ; <nl> <nl> - param_run_cpu = args . has ( " perf_run_cpu " ) ; <nl> + param_run_cpu = args . get < bool > ( " perf_run_cpu " ) ; <nl> param_cuda_device = std : : max ( 0 , std : : min ( cv : : gpu : : getCudaEnabledDeviceCount ( ) , args . get < int > ( " perf_cuda_device " ) ) ) ; <nl> <nl> if ( param_run_cpu ) <nl> void TestBase : : Init ( int argc , const char * const argv [ ] ) <nl> } <nl> # endif <nl> <nl> - if ( ! args . check ( ) ) <nl> - { <nl> - args . printErrors ( ) ; <nl> - return ; <nl> - } <nl> + / / if ( ! args . check ( ) ) <nl> + / / { <nl> + / / args . printErrors ( ) ; <nl> + / / return ; <nl> + / / } <nl> <nl> timeLimitDefault = param_time_limit = = 0 . 0 ? 1 : ( int64 ) ( param_time_limit * cv : : getTickFrequency ( ) ) ; <nl> iterationsLimitDefault = param_force_samples = = 0 ? ( unsigned ) ( - 1 ) : param_force_samples ; <nl> mmm a / samples / c / bgfg_codebook . cpp <nl> ppp b / samples / c / bgfg_codebook . cpp <nl> static void help ( ) <nl> / / <nl> const char * keys = <nl> { <nl> - " { nf nframes | 300 | frames number } " <nl> - " { c camera | false | use the camera or not } " <nl> - " { mf movie_file | tree . avi | used movie video file } " <nl> + " { nf | nframes | 300 | frames number } " <nl> + " { c | camera | false | use the camera or not } " <nl> + " { mf | movie_file | tree . avi | used movie video file } " <nl> } ; <nl> int main ( int argc , const char * * argv ) <nl> { <nl> int main ( int argc , const char * * argv ) <nl> <nl> CommandLineParser parser ( argc , argv , keys ) ; <nl> int nframesToLearnBG = parser . get < int > ( " nf " ) ; <nl> - bool useCamera = parser . has ( " c " ) ; <nl> + bool useCamera = parser . get < bool > ( " c " ) ; <nl> string filename = parser . get < string > ( " mf " ) ; <nl> IplImage * rawImage = 0 , * yuvImage = 0 ; / / yuvImage is for codebook method <nl> IplImage * ImaskCodeBook = 0 , * ImaskCodeBookCC = 0 ; <nl> mmm a / samples / cpp / bgfg_segm . cpp <nl> ppp b / samples / cpp / bgfg_segm . cpp <nl> static void help ( ) <nl> <nl> const char * keys = <nl> { <nl> - " { c camera | | use camera or not } " <nl> - " { fn file_name | tree . avi | movie file } " <nl> + " { c | camera | true | use camera or not } " <nl> + " { fn | file_name | tree . avi | movie file } " <nl> } ; <nl> <nl> / / this is a sample for foreground detection functions <nl> int main ( int argc , const char * * argv ) <nl> help ( ) ; <nl> <nl> CommandLineParser parser ( argc , argv , keys ) ; <nl> - bool useCamera = parser . has ( " camera " ) ; <nl> + bool useCamera = parser . get < bool > ( " camera " ) ; <nl> string file = parser . get < string > ( " file_name " ) ; <nl> VideoCapture cap ; <nl> bool update_bg_model = true ; <nl> int main ( int argc , const char * * argv ) <nl> cap . open ( 0 ) ; <nl> else <nl> cap . open ( file . c_str ( ) ) ; <nl> - <nl> - parser . printMessage ( ) ; <nl> + parser . printParams ( ) ; <nl> <nl> if ( ! cap . isOpened ( ) ) <nl> { <nl> mmm a / samples / cpp / brief_match_test . cpp <nl> ppp b / samples / cpp / brief_match_test . cpp <nl> static void help ( ) <nl> <nl> const char * keys = <nl> { <nl> - " { @ first_image | box . png | the first image } " <nl> - " { @ second_image | box_in_scene . png | the second image } " <nl> + " { 1 | | box . png | the first image } " <nl> + " { 2 | | box_in_scene . png | the second image } " <nl> } ; <nl> <nl> int main ( int argc , const char * * argv ) <nl> int main ( int argc , const char * * argv ) <nl> <nl> help ( ) ; <nl> CommandLineParser parser ( argc , argv , keys ) ; <nl> - string im1_name = parser . get < string > ( 1 ) ; <nl> - string im2_name = parser . get < string > ( 2 ) ; <nl> + string im1_name = parser . get < string > ( " 1 " ) ; <nl> + string im2_name = parser . get < string > ( " 2 " ) ; <nl> <nl> Mat im1 = imread ( im1_name , CV_LOAD_IMAGE_GRAYSCALE ) ; <nl> Mat im2 = imread ( im2_name , CV_LOAD_IMAGE_GRAYSCALE ) ; <nl> int main ( int argc , const char * * argv ) <nl> { <nl> cout < < " could not open one of the images . . . " < < endl ; <nl> cout < < " the cmd parameters have next current value : " < < endl ; <nl> - parser . printMessage ( ) ; <nl> + parser . printParams ( ) ; <nl> return 1 ; <nl> } <nl> <nl> mmm a / samples / cpp / camshiftdemo . cpp <nl> ppp b / samples / cpp / camshiftdemo . cpp <nl> static void help ( ) <nl> <nl> const char * keys = <nl> { <nl> - " { @ camera_number | 0 | camera number } " <nl> + " { 1 | | 0 | camera number } " <nl> } ; <nl> <nl> int main ( int argc , const char * * argv ) <nl> int main ( int argc , const char * * argv ) <nl> float hranges [ ] = { 0 , 180 } ; <nl> const float * phranges = hranges ; <nl> CommandLineParser parser ( argc , argv , keys ) ; <nl> - int camNum = parser . get < int > ( 1 ) ; <nl> + int camNum = parser . get < int > ( " 1 " ) ; <nl> <nl> cap . open ( camNum ) ; <nl> <nl> int main ( int argc , const char * * argv ) <nl> help ( ) ; <nl> cout < < " * * * Could not initialize capturing . . . * * * \ n " ; <nl> cout < < " Current parameter ' s value : \ n " ; <nl> - parser . printMessage ( ) ; <nl> + parser . printParams ( ) ; <nl> return - 1 ; <nl> } <nl> <nl> mmm a / samples / cpp / chamfer . cpp <nl> ppp b / samples / cpp / chamfer . cpp <nl> static void help ( ) <nl> <nl> const char * keys = <nl> { <nl> - " { @ logo1 | logo_in_clutter . png | image edge map } " <nl> - " { @ logo2 | logo . png | template edge map } " <nl> + " { 1 | | logo_in_clutter . png | image edge map } " <nl> + " { 2 | | logo . png | template edge map } " <nl> } ; <nl> <nl> int main ( int argc , const char * * argv ) <nl> int main ( int argc , const char * * argv ) <nl> help ( ) ; <nl> CommandLineParser parser ( argc , argv , keys ) ; <nl> <nl> - string image = parser . get < string > ( 1 ) ; <nl> - string templ = parser . get < string > ( 2 ) ; <nl> + string image = parser . get < string > ( " 1 " ) ; <nl> + string templ = parser . get < string > ( " 2 " ) ; <nl> Mat img = imread ( image . c_str ( ) , 0 ) ; <nl> Mat tpl = imread ( templ . c_str ( ) , 0 ) ; <nl> <nl> mmm a / samples / cpp / connected_components . cpp <nl> ppp b / samples / cpp / connected_components . cpp <nl> static void help ( ) <nl> <nl> const char * keys = <nl> { <nl> - " { @ image | stuff . jpg | image for converting to a grayscale } " <nl> + " { 1 | | stuff . jpg | image for converting to a grayscale } " <nl> } ; <nl> <nl> int main ( int argc , const char * * argv ) <nl> { <nl> help ( ) ; <nl> CommandLineParser parser ( argc , argv , keys ) ; <nl> - string inputImage = parser . get < string > ( 1 ) ; <nl> + string inputImage = parser . get < string > ( " 1 " ) ; <nl> img = imread ( inputImage . c_str ( ) , 0 ) ; <nl> <nl> if ( img . empty ( ) ) <nl> mmm a / samples / cpp / demhist . cpp <nl> ppp b / samples / cpp / demhist . cpp <nl> static void help ( ) <nl> <nl> const char * keys = <nl> { <nl> - " { @ image | baboon . jpg | input image file } " <nl> + " { 1 | | baboon . jpg | input image file } " <nl> } ; <nl> <nl> int main ( int argc , const char * * argv ) <nl> int main ( int argc , const char * * argv ) <nl> help ( ) ; <nl> <nl> CommandLineParser parser ( argc , argv , keys ) ; <nl> - string inputImage = parser . get < string > ( 1 ) ; <nl> + string inputImage = parser . get < string > ( " 1 " ) ; <nl> <nl> / / Load the source image . HighGUI use . <nl> image = imread ( inputImage , 0 ) ; <nl> mmm a / samples / cpp / dft . cpp <nl> ppp b / samples / cpp / dft . cpp <nl> static void help ( ) <nl> <nl> const char * keys = <nl> { <nl> - " { @ image | lena . jpg | input image file } " <nl> + " { 1 | | lena . jpg | input image file } " <nl> } ; <nl> <nl> int main ( int argc , const char * * argv ) <nl> { <nl> help ( ) ; <nl> CommandLineParser parser ( argc , argv , keys ) ; <nl> - string filename = parser . get < string > ( 1 ) ; <nl> + string filename = parser . get < string > ( " 1 " ) ; <nl> <nl> Mat img = imread ( filename . c_str ( ) , CV_LOAD_IMAGE_GRAYSCALE ) ; <nl> if ( img . empty ( ) ) <nl> mmm a / samples / cpp / distrans . cpp <nl> ppp b / samples / cpp / distrans . cpp <nl> static void help ( ) <nl> <nl> const char * keys = <nl> { <nl> - " { @ image | stuff . jpg | input image file } " <nl> + " { 1 | | stuff . jpg | input image file } " <nl> } ; <nl> <nl> int main ( int argc , const char * * argv ) <nl> { <nl> help ( ) ; <nl> CommandLineParser parser ( argc , argv , keys ) ; <nl> - string filename = parser . get < string > ( 1 ) ; <nl> + string filename = parser . get < string > ( " 1 " ) ; <nl> gray = imread ( filename . c_str ( ) , 0 ) ; <nl> if ( gray . empty ( ) ) <nl> { <nl> mmm a / samples / cpp / edge . cpp <nl> ppp b / samples / cpp / edge . cpp <nl> static void help ( ) <nl> <nl> const char * keys = <nl> { <nl> - " { @ image | fruits . jpg | input image name } " <nl> + " { 1 | | fruits . jpg | input image name } " <nl> } ; <nl> <nl> int main ( int argc , const char * * argv ) <nl> int main ( int argc , const char * * argv ) <nl> help ( ) ; <nl> <nl> CommandLineParser parser ( argc , argv , keys ) ; <nl> - string filename = parser . get < string > ( 1 ) ; <nl> + string filename = parser . get < string > ( " 1 " ) ; <nl> <nl> image = imread ( filename , 1 ) ; <nl> if ( image . empty ( ) ) <nl> mmm a / samples / cpp / opencv_version . cpp <nl> ppp b / samples / cpp / opencv_version . cpp <nl> <nl> <nl> const char * keys = <nl> { <nl> - " { b build | | print complete build info } " <nl> - " { h help | | print this help } " <nl> + " { b | build | false | print complete build info } " <nl> + " { h | help | false | print this help } " <nl> } ; <nl> <nl> int main ( int argc , const char * argv [ ] ) <nl> { <nl> cv : : CommandLineParser parser ( argc , argv , keys ) ; <nl> <nl> - if ( parser . has ( " help " ) ) <nl> + if ( parser . get < bool > ( " help " ) ) <nl> { <nl> - parser . printMessage ( ) ; <nl> + parser . printParams ( ) ; <nl> } <nl> - else if ( ! parser . check ( ) ) <nl> - { <nl> - parser . printErrors ( ) ; <nl> - } <nl> - else if ( parser . has ( " build " ) ) <nl> + else if ( parser . get < bool > ( " build " ) ) <nl> { <nl> std : : cout < < cv : : getBuildInformation ( ) < < std : : endl ; <nl> } <nl> int main ( int argc , const char * argv [ ] ) <nl> } <nl> <nl> return 0 ; <nl> - } <nl> + } <nl> \ No newline at end of file <nl> mmm a / samples / cpp / point_cloud . cpp <nl> ppp b / samples / cpp / point_cloud . cpp <nl> static void openGlDrawCallback ( void * userdata ) <nl> int main ( int argc , const char * argv [ ] ) <nl> { <nl> const char * keys = <nl> - " { l left | | left image file name } " <nl> - " { r right | | right image file name } " <nl> - " { i intrinsic | | intrinsic camera parameters file name } " <nl> - " { e extrinsic | | extrinsic camera parameters file name } " <nl> - " { d ndisp | 256 | number of disparities } " <nl> - " { s scale | 1 . 0 | scale factor for point cloud } " <nl> - " { h help | | print help message } " ; <nl> + " { l | left | | left image file name } " <nl> + " { r | right | | right image file name } " <nl> + " { i | intrinsic | | intrinsic camera parameters file name } " <nl> + " { e | extrinsic | | extrinsic camera parameters file name } " <nl> + " { d | ndisp | 256 | number of disparities } " <nl> + " { s | scale | 1 . 0 | scale factor for point cloud } " <nl> + " { h | help | false | print help message } " ; <nl> <nl> CommandLineParser cmd ( argc , argv , keys ) ; <nl> <nl> - if ( cmd . has ( " help " ) ) <nl> + if ( cmd . get < bool > ( " help " ) ) <nl> { <nl> - cmd . printMessage ( ) ; <nl> + cout < < " Avaible options : " < < endl ; <nl> + cmd . printParams ( ) ; <nl> return 0 ; <nl> } <nl> <nl> int main ( int argc , const char * argv [ ] ) <nl> int ndisp = cmd . get < int > ( " ndisp " ) ; <nl> double scale = cmd . get < double > ( " scale " ) ; <nl> <nl> - if ( ! cmd . check ( ) ) <nl> - { <nl> - cmd . printErrors ( ) ; <nl> - return 0 ; <nl> - } <nl> - <nl> - <nl> if ( left . empty ( ) | | right . empty ( ) ) <nl> { <nl> cout < < " Missed input images " < < endl ; <nl> cout < < " Avaible options : " < < endl ; <nl> - cmd . printMessage ( ) ; <nl> + cmd . printParams ( ) ; <nl> return 0 ; <nl> } <nl> <nl> int main ( int argc , const char * argv [ ] ) <nl> { <nl> cout < < " Boss camera parameters must be specified " < < endl ; <nl> cout < < " Avaible options : " < < endl ; <nl> - cmd . printMessage ( ) ; <nl> + cmd . printParams ( ) ; <nl> return 0 ; <nl> } <nl> <nl> mmm a / samples / cpp / videostab . cpp <nl> ppp b / samples / cpp / videostab . cpp <nl> int main ( int argc , const char * * argv ) <nl> try <nl> { <nl> const char * keys = <nl> - " { @ 1 | | } " <nl> - " { m model | affine | } " <nl> - " { lp lin - prog - motion - est | no | } " <nl> - " { subset | auto | } " <nl> - " { thresh | auto | } " <nl> - " { outlier - ratio | 0 . 5 | } " <nl> - " { min - inlier - ratio | 0 . 1 | } " <nl> - " { nkps | 1000 | } " <nl> - " { extra - kps | 0 | } " <nl> - " { local - outlier - rejection | no | } " <nl> - " { sm save - motions | no | } " <nl> - " { lm load - motions | no | } " <nl> - " { r radius | 15 | } " <nl> - " { stdev | auto | } " <nl> - " { lps lin - prog - stab | no | } " <nl> - " { lps - trim - ratio | auto | } " <nl> - " { lps - w1 | 1 | } " <nl> - " { lps - w2 | 10 | } " <nl> - " { lps - w3 | 100 | } " <nl> - " { lps - w4 | 100 | } " <nl> - " { deblur | no | } " <nl> - " { deblur - sens | 0 . 1 | } " <nl> - " { et est - trim | yes | } " <nl> - " { t trim - ratio | 0 . 1 | } " <nl> - " { ic incl - constr | no | } " <nl> - " { bm border - mode | replicate | } " <nl> - " { mosaic | no | } " <nl> - " { ms mosaic - stdev | 10 . 0 | } " <nl> - " { mi motion - inpaint | no | } " <nl> - " { mi - dist - thresh | 5 . 0 | } " <nl> - " { ci color - inpaint | no | } " <nl> - " { ci - radius | 2 | } " <nl> - " { ws wobble - suppress | no | } " <nl> - " { ws - period | 30 | } " <nl> - " { ws - model | homography | } " <nl> - " { ws - subset | auto | } " <nl> - " { ws - thresh | auto | } " <nl> - " { ws - outlier - ratio | 0 . 5 | } " <nl> - " { ws - min - inlier - ratio | 0 . 1 | } " <nl> - " { ws - nkps | 1000 | } " <nl> - " { ws - extra - kps | 0 | } " <nl> - " { ws - local - outlier - rejection | no | } " <nl> - " { ws - lp | no | } " <nl> - " { sm2 save - motions2 | no | } " <nl> - " { lm2 load - motions2 | no | } " <nl> - " { gpu | no | } " <nl> - " { o output | stabilized . avi | } " <nl> - " { fps | auto | } " <nl> - " { q quiet | | } " <nl> - " { h help | | } " ; <nl> + " { 1 | | | | } " <nl> + " { m | model | affine | } " <nl> + " { lp | lin - prog - motion - est | no | } " <nl> + " { | subset | auto | } " <nl> + " { | thresh | auto | } " <nl> + " { | outlier - ratio | 0 . 5 | } " <nl> + " { | min - inlier - ratio | 0 . 1 | } " <nl> + " { | nkps | 1000 | } " <nl> + " { | extra - kps | 0 | } " <nl> + " { | local - outlier - rejection | no | } " <nl> + " { sm | save - motions | no | } " <nl> + " { lm | load - motions | no | } " <nl> + " { r | radius | 15 | } " <nl> + " { | stdev | auto | } " <nl> + " { lps | lin - prog - stab | no | } " <nl> + " { | lps - trim - ratio | auto | } " <nl> + " { | lps - w1 | 1 | } " <nl> + " { | lps - w2 | 10 | } " <nl> + " { | lps - w3 | 100 | } " <nl> + " { | lps - w4 | 100 | } " <nl> + " { | deblur | no | } " <nl> + " { | deblur - sens | 0 . 1 | } " <nl> + " { et | est - trim | yes | } " <nl> + " { t | trim - ratio | 0 . 1 | } " <nl> + " { ic | incl - constr | no | } " <nl> + " { bm | border - mode | replicate | } " <nl> + " { | mosaic | no | } " <nl> + " { ms | mosaic - stdev | 10 . 0 | } " <nl> + " { mi | motion - inpaint | no | } " <nl> + " { | mi - dist - thresh | 5 . 0 | } " <nl> + " { ci | color - inpaint | no | } " <nl> + " { | ci - radius | 2 | } " <nl> + " { ws | wobble - suppress | no | } " <nl> + " { | ws - period | 30 | } " <nl> + " { | ws - model | homography | } " <nl> + " { | ws - subset | auto | } " <nl> + " { | ws - thresh | auto | } " <nl> + " { | ws - outlier - ratio | 0 . 5 | } " <nl> + " { | ws - min - inlier - ratio | 0 . 1 | } " <nl> + " { | ws - nkps | 1000 | } " <nl> + " { | ws - extra - kps | 0 | } " <nl> + " { | ws - local - outlier - rejection | no | } " <nl> + " { | ws - lp | no | } " <nl> + " { sm2 | save - motions2 | no | } " <nl> + " { lm2 | load - motions2 | no | } " <nl> + " { gpu | | no } " <nl> + " { o | output | stabilized . avi | } " <nl> + " { | fps | auto | } " <nl> + " { q | quiet | false | } " <nl> + " { h | help | false | } " ; <nl> CommandLineParser cmd ( argc , argv , keys ) ; <nl> <nl> / / parse command arguments <nl> mmm a / samples / gpu / bgfg_segm . cpp <nl> ppp b / samples / gpu / bgfg_segm . cpp <nl> enum Method <nl> int main ( int argc , const char * * argv ) <nl> { <nl> cv : : CommandLineParser cmd ( argc , argv , <nl> - " { c camera | | use camera } " <nl> - " { f file | 768x576 . avi | input video file } " <nl> - " { m method | mog | method ( fgd , mog , mog2 , vibe , gmg ) } " <nl> - " { h help | | print help message } " ) ; <nl> + " { c | camera | false | use camera } " <nl> + " { f | file | 768x576 . avi | input video file } " <nl> + " { m | method | mog | method ( fgd , mog , mog2 , vibe , gmg ) } " <nl> + " { h | help | false | print help message } " ) ; <nl> <nl> - if ( cmd . has ( " help " ) | | ! cmd . check ( ) ) <nl> + if ( cmd . get < bool > ( " help " ) ) <nl> { <nl> - cmd . printMessage ( ) ; <nl> - cmd . printErrors ( ) ; <nl> + cout < < " Usage : bgfg_segm [ options ] " < < endl ; <nl> + cout < < " Avaible options : " < < endl ; <nl> + cmd . printParams ( ) ; <nl> return 0 ; <nl> } <nl> <nl> - bool useCamera = cmd . has ( " camera " ) ; <nl> + bool useCamera = cmd . get < bool > ( " camera " ) ; <nl> string file = cmd . get < string > ( " file " ) ; <nl> string method = cmd . get < string > ( " method " ) ; <nl> <nl> mmm a / samples / gpu / brox_optical_flow . cpp <nl> ppp b / samples / gpu / brox_optical_flow . cpp <nl> int main ( int argc , const char * argv [ ] ) <nl> try <nl> { <nl> const char * keys = <nl> - " { h help | | print help message } " <nl> - " { l left | | specify left image } " <nl> - " { r right | | specify right image } " <nl> - " { s scale | 0 . 8 | set pyramid scale factor } " <nl> - " { a alpha | 0 . 197 | set alpha } " <nl> - " { g gamma | 50 . 0 | set gamma } " <nl> - " { i inner | 10 | set number of inner iterations } " <nl> - " { o outer | 77 | set number of outer iterations } " <nl> - " { si solver | 10 | set number of basic solver iterations } " <nl> - " { t time_step | 0 . 1 | set frame interpolation time step } " ; <nl> + " { h | help | false | print help message } " <nl> + " { l | left | | specify left image } " <nl> + " { r | right | | specify right image } " <nl> + " { s | scale | 0 . 8 | set pyramid scale factor } " <nl> + " { a | alpha | 0 . 197 | set alpha } " <nl> + " { g | gamma | 50 . 0 | set gamma } " <nl> + " { i | inner | 10 | set number of inner iterations } " <nl> + " { o | outer | 77 | set number of outer iterations } " <nl> + " { si | solver | 10 | set number of basic solver iterations } " <nl> + " { t | time_step | 0 . 1 | set frame interpolation time step } " ; <nl> <nl> CommandLineParser cmd ( argc , argv , keys ) ; <nl> <nl> - if ( cmd . has ( " help " ) | | ! cmd . check ( ) ) <nl> + if ( cmd . get < bool > ( " help " ) ) <nl> { <nl> - cmd . printMessage ( ) ; <nl> - cmd . printErrors ( ) ; <nl> + cout < < " Usage : brox_optical_flow [ options ] " < < endl ; <nl> + cout < < " Avaible options : " < < endl ; <nl> + cmd . printParams ( ) ; <nl> return 0 ; <nl> } <nl> <nl> mmm a / samples / gpu / farneback_optical_flow . cpp <nl> ppp b / samples / gpu / farneback_optical_flow . cpp <nl> static void colorizeFlow ( const Mat & u , const Mat & v , Mat & dst ) <nl> int main ( int argc , char * * argv ) <nl> { <nl> CommandLineParser cmd ( argc , argv , <nl> - " { l left | | specify left image } " <nl> - " { r right | | specify right image } " <nl> - " { h help | | print help message } " ) ; <nl> + " { l | left | | specify left image } " <nl> + " { r | right | | specify right image } " <nl> + " { h | help | false | print help message } " ) ; <nl> <nl> - cmd . about ( " Farneback ' s optical flow sample . " ) ; <nl> - if ( cmd . has ( " help " ) | | ! cmd . check ( ) ) <nl> + if ( cmd . get < bool > ( " help " ) ) <nl> { <nl> - cmd . printMessage ( ) ; <nl> - cmd . printErrors ( ) ; <nl> + cout < < " Farneback ' s optical flow sample . \ n \ n " <nl> + < < " Usage : farneback_optical_flow_gpu [ arguments ] \ n \ n " <nl> + < < " Arguments : \ n " ; <nl> + cmd . printParams ( ) ; <nl> return 0 ; <nl> } <nl> <nl> - <nl> string pathL = cmd . get < string > ( " left " ) ; <nl> string pathR = cmd . get < string > ( " right " ) ; <nl> if ( pathL . empty ( ) ) cout < < " Specify left image path \ n " ; <nl> mmm a / samples / gpu / performance / performance . cpp <nl> ppp b / samples / gpu / performance / performance . cpp <nl> int main ( int argc , const char * argv [ ] ) <nl> redirectError ( cvErrorCallback ) ; <nl> <nl> const char * keys = <nl> - " { h help | | print help message } " <nl> - " { f filter | | filter for test } " <nl> - " { w workdir | | set working directory } " <nl> - " { l list | | show all tests } " <nl> - " { d device | 0 | device id } " <nl> - " { i iters | 10 | iteration count } " ; <nl> + " { h | help | false | print help message } " <nl> + " { f | filter | | filter for test } " <nl> + " { w | workdir | | set working directory } " <nl> + " { l | list | false | show all tests } " <nl> + " { d | device | 0 | device id } " <nl> + " { i | iters | 10 | iteration count } " ; <nl> <nl> CommandLineParser cmd ( argc , argv , keys ) ; <nl> <nl> - if ( cmd . has ( " help " ) | | ! cmd . check ( ) ) <nl> + if ( cmd . get < bool > ( " help " ) ) <nl> { <nl> - cmd . printMessage ( ) ; <nl> - cmd . printErrors ( ) ; <nl> + cout < < " Avaible options : " < < endl ; <nl> + cmd . printParams ( ) ; <nl> return 0 ; <nl> } <nl> <nl> - <nl> int device = cmd . get < int > ( " device " ) ; <nl> if ( device < 0 | | device > = num_devices ) <nl> { <nl> int main ( int argc , const char * argv [ ] ) <nl> <nl> string filter = cmd . get < string > ( " filter " ) ; <nl> string workdir = cmd . get < string > ( " workdir " ) ; <nl> - bool list = cmd . has ( " list " ) ; <nl> + bool list = cmd . get < bool > ( " list " ) ; <nl> int iters = cmd . get < int > ( " iters " ) ; <nl> <nl> if ( ! filter . empty ( ) ) <nl> mmm a / samples / gpu / pyrlk_optical_flow . cpp <nl> ppp b / samples / gpu / pyrlk_optical_flow . cpp <nl> static void getFlowField ( const Mat & u , const Mat & v , Mat & flowField ) <nl> int main ( int argc , const char * argv [ ] ) <nl> { <nl> const char * keys = <nl> - " { h help | | print help message } " <nl> - " { l left | | specify left image } " <nl> - " { r right | | specify right image } " <nl> - " { gray | | use grayscale sources [ PyrLK Sparse ] } " <nl> - " { win_size | 21 | specify windows size [ PyrLK ] } " <nl> - " { max_level | 3 | specify max level [ PyrLK ] } " <nl> - " { iters | 30 | specify iterations count [ PyrLK ] } " <nl> - " { points | 4000 | specify points count [ GoodFeatureToTrack ] } " <nl> - " { min_dist | 0 | specify minimal distance between points [ GoodFeatureToTrack ] } " ; <nl> + " { h | help | false | print help message } " <nl> + " { l | left | | specify left image } " <nl> + " { r | right | | specify right image } " <nl> + " { gray | gray | false | use grayscale sources [ PyrLK Sparse ] } " <nl> + " { win_size | win_size | 21 | specify windows size [ PyrLK ] } " <nl> + " { max_level | max_level | 3 | specify max level [ PyrLK ] } " <nl> + " { iters | iters | 30 | specify iterations count [ PyrLK ] } " <nl> + " { points | points | 4000 | specify points count [ GoodFeatureToTrack ] } " <nl> + " { min_dist | min_dist | 0 | specify minimal distance between points [ GoodFeatureToTrack ] } " ; <nl> <nl> CommandLineParser cmd ( argc , argv , keys ) ; <nl> <nl> - if ( cmd . has ( " help " ) | | ! cmd . check ( ) ) <nl> + if ( cmd . get < bool > ( " help " ) ) <nl> { <nl> - cmd . printMessage ( ) ; <nl> - cmd . printErrors ( ) ; <nl> + cout < < " Usage : pyrlk_optical_flow [ options ] " < < endl ; <nl> + cout < < " Avaible options : " < < endl ; <nl> + cmd . printParams ( ) ; <nl> return 0 ; <nl> } <nl> <nl> int main ( int argc , const char * argv [ ] ) <nl> return - 1 ; <nl> } <nl> <nl> - bool useGray = cmd . has ( " gray " ) ; <nl> + bool useGray = cmd . get < bool > ( " gray " ) ; <nl> int winSize = cmd . get < int > ( " win_size " ) ; <nl> int maxLevel = cmd . get < int > ( " max_level " ) ; <nl> int iters = cmd . get < int > ( " iters " ) ; <nl> | Revert " add new version of CommandLineParser . add empty docs " | opencv/opencv | d566c6bc865408b31b61a56c71c865474cff9ee4 | 2012-10-15T16:01:44Z |
mmm a / test / Misc / stats_dir_failure_count . swift <nl> ppp b / test / Misc / stats_dir_failure_count . swift <nl> <nl> - / / REQUIRES : rdar35537905 <nl> / / Check that a failed process - tree emits nonzero failure counters <nl> / / RUN : rm - rf % t & & mkdir - p % t <nl> / / RUN : echo zzz > % t / other . swift <nl> <nl> / / FAILURE : { { " Driver . NumProcessFailures " 1 $ } } <nl> / / FAILURE : { { " Frontend . NumProcessFailures " 2 $ } } <nl> <nl> - / / Check that a successful process - tree emits no failure counters <nl> + / / Check that a successful process - tree emits no nonzero failure counters <nl> / / RUN : rm - rf % t & & mkdir - p % t <nl> / / RUN : echo ' let x : Int = 1 ' > % t / other . swift <nl> / / RUN : % target - swiftc_driver - j 2 - typecheck - stats - output - dir % t % s % t / other . swift <nl> / / RUN : % utils / process - stats - dir . py - - set - csv - baseline % t / stats . csv % t <nl> / / RUN : % FileCheck - input - file % t / stats . csv - check - prefix = SUCCESS % s <nl> - / / SUCCESS - NOT : { { " Driver . NumProcessFailures " } } <nl> - / / SUCCESS - NOT : { { " Frontend . NumProcessFailures " } } <nl> + / / SUCCESS - NOT : { { " Driver . NumProcessFailures " [ 1 - 9 ] + } } <nl> + / / SUCCESS - NOT : { { " Frontend . NumProcessFailures " [ 1 - 9 ] + } } <nl> <nl> func foo ( ) { <nl> # if BROKEN <nl> mmm a / test / NameBinding / named_lazy_member_loading_swift_struct . swift <nl> ppp b / test / NameBinding / named_lazy_member_loading_swift_struct . swift <nl> <nl> / / Check that named - lazy - member - loading reduces the number of Decls deserialized <nl> / / RUN : % target - swift - frontend - typecheck - I % t - disable - named - lazy - member - loading - typecheck - stats - output - dir % t / stats - pre % s <nl> / / RUN : % target - swift - frontend - typecheck - I % t - stats - output - dir % t / stats - post % s <nl> - / / RUN : % utils / process - stats - dir . py - - evaluate - delta ' NumDeclsDeserialized < - 5 ' % t / stats - pre % t / stats - post <nl> - <nl> - / / REQUIRES : rdar_35639403 <nl> + / / RUN : % utils / process - stats - dir . py - - evaluate - delta ' NumDeclsDeserialized < - 4 ' % t / stats - pre % t / stats - post <nl> <nl> import NamedLazyMembers <nl> <nl> | Merge remote - tracking branch ' origin / master ' into master - next | apple/swift | 009da9aa1f409f6c174969bb6305e7fd8bde2c02 | 2017-12-13T02:29:09Z |
mmm a / dbms / CMakeLists . txt <nl> ppp b / dbms / CMakeLists . txt <nl> add_library ( dbms <nl> include / DB / DataStreams / verbosePrintString . h <nl> include / DB / DataStreams / SquashingTransform . h <nl> include / DB / DataStreams / SquashingBlockInputStream . h <nl> + include / DB / DataStreams / SquashingBlockOutputStream . h <nl> include / DB / DataTypes / IDataType . h <nl> include / DB / DataTypes / IDataTypeDummy . h <nl> include / DB / DataTypes / DataTypeSet . h <nl> add_library ( dbms <nl> src / DataStreams / verbosePrintString . cpp <nl> src / DataStreams / SquashingTransform . cpp <nl> src / DataStreams / SquashingBlockInputStream . cpp <nl> + src / DataStreams / SquashingBlockOutputStream . cpp <nl> <nl> src / DataTypes / DataTypeString . cpp <nl> src / DataTypes / DataTypeFixedString . cpp <nl> mmm a / dbms / include / DB / DataStreams / SquashingBlockInputStream . h <nl> ppp b / dbms / include / DB / DataStreams / SquashingBlockInputStream . h <nl> <nl> namespace DB <nl> { <nl> <nl> - / * * Merging consequtive blocks of stream to specified minimum size . <nl> - * <nl> - * ( But if one of input blocks has already at least specified size , <nl> - * then don ' t merge it with neighbours , even if neighbours are small . ) <nl> - * <nl> - * Used to prepare blocks to adequate size for INSERT queries , <nl> - * because such storages as Memory , StripeLog , Log , TinyLog . . . <nl> - * store or compress data in blocks exactly as passed to it , <nl> - * and blocks of small size are not efficient . <nl> - * <nl> - * Order of data is kept . <nl> + / * * Merging consecutive blocks of stream to specified minimum size . <nl> * / <nl> class SquashingBlockInputStream : public IProfilingBlockInputStream <nl> { <nl> public : <nl> - / / / Conditions on rows and bytes are OR - ed . If one of them is zero , then corresponding condition is ignored . <nl> SquashingBlockInputStream ( BlockInputStreamPtr & src , size_t min_block_size_rows , size_t min_block_size_bytes ) ; <nl> <nl> String getName ( ) const override { return " Squashing " ; } <nl> class SquashingBlockInputStream : public IProfilingBlockInputStream <nl> <nl> private : <nl> SquashingTransform transform ; <nl> + bool all_read = false ; <nl> } ; <nl> <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . 87b4799d0f4 <nl> mmm / dev / null <nl> ppp b / dbms / include / DB / DataStreams / SquashingBlockOutputStream . h <nl> <nl> + # pragma once <nl> + <nl> + # include < DB / DataStreams / IBlockOutputStream . h > <nl> + # include < DB / DataStreams / SquashingTransform . h > <nl> + <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + / * * Merging consecutive blocks of stream to specified minimum size . <nl> + * / <nl> + class SquashingBlockOutputStream : public IBlockOutputStream <nl> + { <nl> + public : <nl> + SquashingBlockOutputStream ( BlockOutputStreamPtr & dst , size_t min_block_size_rows , size_t min_block_size_bytes ) ; <nl> + <nl> + void write ( const Block & block ) override ; <nl> + <nl> + void flush ( ) override ; <nl> + void writePrefix ( ) override ; <nl> + void writeSuffix ( ) override ; <nl> + <nl> + private : <nl> + BlockOutputStreamPtr output ; <nl> + <nl> + SquashingTransform transform ; <nl> + bool all_written = false ; <nl> + <nl> + void finalize ( ) ; <nl> + } ; <nl> + <nl> + } <nl> mmm a / dbms / include / DB / DataStreams / SquashingTransform . h <nl> ppp b / dbms / include / DB / DataStreams / SquashingTransform . h <nl> <nl> namespace DB <nl> { <nl> <nl> + <nl> + / * * Merging consecutive passed blocks to specified minimum size . <nl> + * <nl> + * ( But if one of input blocks has already at least specified size , <nl> + * then don ' t merge it with neighbours , even if neighbours are small . ) <nl> + * <nl> + * Used to prepare blocks to adequate size for INSERT queries , <nl> + * because such storages as Memory , StripeLog , Log , TinyLog . . . <nl> + * store or compress data in blocks exactly as passed to it , <nl> + * and blocks of small size are not efficient . <nl> + * <nl> + * Order of data is kept . <nl> + * / <nl> class SquashingTransform <nl> { <nl> public : <nl> + / / / Conditions on rows and bytes are OR - ed . If one of them is zero , then corresponding condition is ignored . <nl> SquashingTransform ( size_t min_block_size_rows , size_t min_block_size_bytes ) ; <nl> <nl> + / / / When not ready , you need to pass more blocks to add function . <nl> struct Result <nl> { <nl> bool ready = false ; <nl> class SquashingTransform <nl> Result ( Block & & block_ ) : ready ( true ) , block ( std : : move ( block_ ) ) { } <nl> } ; <nl> <nl> + / * * Add next block and possibly returns squashed block . <nl> + * At end , you need to pass empty block . As the result for last ( empty ) block , you will get last Result with ready = true . <nl> + * / <nl> Result add ( Block & & block ) ; <nl> <nl> private : <nl> class SquashingTransform <nl> size_t min_block_size_bytes ; <nl> <nl> Block accumulated_block ; <nl> - bool all_read = false ; <nl> <nl> void append ( Block & & block ) ; <nl> <nl> mmm a / dbms / src / DataStreams / SquashingBlockInputStream . cpp <nl> ppp b / dbms / src / DataStreams / SquashingBlockInputStream . cpp <nl> SquashingBlockInputStream : : SquashingBlockInputStream ( BlockInputStreamPtr & src , <nl> <nl> Block SquashingBlockInputStream : : readImpl ( ) <nl> { <nl> + if ( all_read ) <nl> + return { } ; <nl> + <nl> while ( true ) <nl> { <nl> - SquashingTransform : : Result result = transform . add ( children [ 0 ] - > read ( ) ) ; <nl> + Block block = children [ 0 ] - > read ( ) ; <nl> + if ( ! block ) <nl> + all_read = true ; <nl> + <nl> + SquashingTransform : : Result result = transform . add ( std : : move ( block ) ) ; <nl> if ( result . ready ) <nl> return result . block ; <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . bdeceade518 <nl> mmm / dev / null <nl> ppp b / dbms / src / DataStreams / SquashingBlockOutputStream . cpp <nl> <nl> + # include < DB / DataStreams / SquashingBlockOutputStream . h > <nl> + <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + SquashingBlockOutputStream : : SquashingBlockOutputStream ( BlockOutputStreamPtr & dst , size_t min_block_size_rows , size_t min_block_size_bytes ) <nl> + : output ( dst ) , transform ( min_block_size_rows , min_block_size_bytes ) <nl> + { <nl> + } <nl> + <nl> + <nl> + void SquashingBlockOutputStream : : write ( const Block & block ) <nl> + { <nl> + SquashingTransform : : Result result = transform . add ( Block ( block ) ) ; <nl> + if ( result . ready ) <nl> + output - > write ( result . block ) ; <nl> + } <nl> + <nl> + <nl> + void SquashingBlockOutputStream : : finalize ( ) <nl> + { <nl> + if ( all_written ) <nl> + return ; <nl> + <nl> + all_written = true ; <nl> + <nl> + SquashingTransform : : Result result = transform . add ( { } ) ; <nl> + if ( result . ready & & result . block ) <nl> + output - > write ( result . block ) ; <nl> + } <nl> + <nl> + <nl> + void SquashingBlockOutputStream : : flush ( ) <nl> + { <nl> + finalize ( ) ; <nl> + output - > flush ( ) ; <nl> + } <nl> + <nl> + <nl> + void SquashingBlockOutputStream : : writePrefix ( ) <nl> + { <nl> + output - > writePrefix ( ) ; <nl> + } <nl> + <nl> + <nl> + void SquashingBlockOutputStream : : writeSuffix ( ) <nl> + { <nl> + finalize ( ) ; <nl> + output - > writeSuffix ( ) ; <nl> + } <nl> + <nl> + } <nl> mmm a / dbms / src / DataStreams / SquashingTransform . cpp <nl> ppp b / dbms / src / DataStreams / SquashingTransform . cpp <nl> SquashingTransform : : SquashingTransform ( size_t min_block_size_rows , size_t min_bl <nl> <nl> SquashingTransform : : Result SquashingTransform : : add ( Block & & block ) <nl> { <nl> - if ( all_read ) <nl> - return true ; <nl> - <nl> if ( ! block ) <nl> - { <nl> - all_read = true ; <nl> return Result ( std : : move ( accumulated_block ) ) ; <nl> - } <nl> <nl> / / / Just read block is alredy enough . <nl> if ( isEnoughSize ( block . rowsInFirstColumn ( ) , block . bytes ( ) ) ) <nl> mmm a / dbms / src / Interpreters / InterpreterInsertQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterInsertQuery . cpp <nl> <nl> # include < DB / DataStreams / AddingDefaultBlockOutputStream . h > <nl> # include < DB / DataStreams / PushingToViewsBlockOutputStream . h > <nl> # include < DB / DataStreams / NullAndDoCopyBlockInputStream . h > <nl> - # include < DB / DataStreams / SquashingBlockInputStream . h > <nl> + # include < DB / DataStreams / SquashingBlockOutputStream . h > <nl> # include < DB / DataStreams / copyData . h > <nl> <nl> # include < DB / Parsers / ASTInsertQuery . h > <nl> BlockIO InterpreterInsertQuery : : execute ( ) <nl> <nl> NamesAndTypesListPtr required_columns = std : : make_shared < NamesAndTypesList > ( table - > getColumnsList ( ) ) ; <nl> <nl> - / / / Создаем кортеж из нескольких стримов , в которые будем писать данные . <nl> - BlockOutputStreamPtr out = <nl> - std : : make_shared < ProhibitColumnsBlockOutputStream > ( <nl> - std : : make_shared < AddingDefaultBlockOutputStream > ( <nl> - std : : make_shared < MaterializingBlockOutputStream > ( <nl> - std : : make_shared < PushingToViewsBlockOutputStream > ( query . database , query . table , context , query_ptr ) ) , <nl> - required_columns , table - > column_defaults , context , static_cast < bool > ( context . getSettingsRef ( ) . strict_insert_defaults ) ) , <nl> - table - > materialized_columns ) ; <nl> + / / / Создаем конвейер из нескольких стримов , в которые будем писать данные . <nl> + BlockOutputStreamPtr out ; <nl> + <nl> + out = std : : make_shared < PushingToViewsBlockOutputStream > ( query . database , query . table , context , query_ptr ) ; <nl> + <nl> + out = std : : make_shared < MaterializingBlockOutputStream > ( out ) ; <nl> + <nl> + out = std : : make_shared < AddingDefaultBlockOutputStream > ( out , <nl> + required_columns , table - > column_defaults , context , static_cast < bool > ( context . getSettingsRef ( ) . strict_insert_defaults ) ) ; <nl> + <nl> + out = std : : make_shared < ProhibitColumnsBlockOutputStream > ( out , table - > materialized_columns ) ; <nl> + <nl> + out = std : : make_shared < SquashingBlockOutputStream > ( out , <nl> + context . getSettingsRef ( ) . min_insert_block_size_rows , <nl> + context . getSettingsRef ( ) . min_insert_block_size_bytes ) ; <nl> <nl> BlockIO res ; <nl> res . out_sample = getSampleBlock ( ) ; <nl> BlockIO InterpreterInsertQuery : : execute ( ) <nl> InterpreterSelectQuery interpreter_select { query . select , context } ; <nl> BlockInputStreamPtr in = interpreter_select . execute ( ) . in ; <nl> <nl> - in = std : : make_shared < SquashingBlockInputStream > ( in , <nl> - context . getSettingsRef ( ) . min_insert_block_size_rows , <nl> - context . getSettingsRef ( ) . min_insert_block_size_bytes ) ; <nl> - <nl> res . in = std : : make_shared < NullAndDoCopyBlockInputStream > ( in , out ) ; <nl> res . in_sample = interpreter_select . getSampleBlock ( ) ; <nl> } <nl> mmm a / dbms / src / Server / TCPHandler . cpp <nl> ppp b / dbms / src / Server / TCPHandler . cpp <nl> <nl> # include < DB / DataStreams / AsynchronousBlockInputStream . h > <nl> # include < DB / DataStreams / NativeBlockInputStream . h > <nl> # include < DB / DataStreams / NativeBlockOutputStream . h > <nl> - # include < DB / DataStreams / SquashingBlockInputStream . h > <nl> # include < DB / Interpreters / executeQuery . h > <nl> # include < DB / Interpreters / Quota . h > <nl> <nl> mmm a / dbms / src / Server / TCPHandler . h <nl> ppp b / dbms / src / Server / TCPHandler . h <nl> namespace DB <nl> { <nl> <nl> <nl> - / / / Состояние обработки запроса . <nl> + / / / State of query processing . <nl> struct QueryState <nl> { <nl> - / / / Идентификатор запроса . <nl> + / / / Identifier of the query . <nl> String query_id ; <nl> <nl> QueryProcessingStage : : Enum stage = QueryProcessingStage : : Complete ; <nl> Protocol : : Compression : : Enum compression = Protocol : : Compression : : Disable ; <nl> <nl> - / / / Откуда читать данные для INSERT - а . <nl> + / / / From where to read data for INSERT . <nl> std : : shared_ptr < ReadBuffer > maybe_compressed_in ; <nl> BlockInputStreamPtr block_in ; <nl> <nl> - / / / Куда писать возвращаемые данные . <nl> + / / / Where to write result data . <nl> std : : shared_ptr < WriteBuffer > maybe_compressed_out ; <nl> BlockOutputStreamPtr block_out ; <nl> <nl> - / / / Текст запроса . <nl> + / / / Query text . <nl> String query ; <nl> - / / / Потоки блоков , с помощью которых выполнять запрос . <nl> + / / / Streams of blocks , that are processing the query . <nl> BlockIO io ; <nl> <nl> / / / Отменен ли запрос <nl> | Fixed error [ # METR - 21877 ] . | ClickHouse/ClickHouse | c2929666fdf02c803f797a3ca532835b3bad23a7 | 2016-07-07T01:57:48Z |
mmm a / Code / CryEngine / CryAction / ICryMannequin . h <nl> ppp b / Code / CryEngine / CryAction / ICryMannequin . h <nl> typedef void ( * MannAssetCallback ) ( const SAnimAssetReport & assetReport , void * _c <nl> <nl> class IAnimationDatabase <nl> { <nl> - friend class SFragmentDataRAII ; <nl> - <nl> public : <nl> virtual ~ IAnimationDatabase ( ) { } <nl> <nl> class IAnimationDatabase <nl> virtual bool ClearSubADBFilter ( const string & sADBFileName ) = 0 ; <nl> <nl> virtual void QueryUsedTags ( const FragmentID fragmentID , const SFragTagState & filter , SFragTagState & usedTags ) const = 0 ; <nl> - <nl> - protected : <nl> - <nl> - / / ! Factory method for SFragmentData , to handle dll - scope <nl> - virtual SFragmentData * CreateSFragmentDataRaw ( ) const = 0 ; <nl> - virtual void DestroySFragmentDataRaw ( SFragmentData * ptr ) const = 0 ; <nl> - } ; <nl> - <nl> - class SFragmentDataRAII <nl> - { <nl> - public : <nl> - <nl> - SFragmentDataRAII ( const IAnimationDatabase * adb ) : m_adb ( adb ) , m_sfd ( nullptr ) <nl> - { <nl> - if ( m_adb ) <nl> - { <nl> - m_sfd = m_adb - > CreateSFragmentDataRaw ( ) ; <nl> - } <nl> - } <nl> - <nl> - ~ SFragmentDataRAII ( ) <nl> - { <nl> - Clear ( ) ; <nl> - } <nl> - <nl> - SFragmentData * Get ( ) { return m_sfd ; } <nl> - <nl> - void Clear ( ) <nl> - { <nl> - if ( m_adb & & m_sfd ) <nl> - { <nl> - m_adb - > DestroySFragmentDataRaw ( m_sfd ) ; <nl> - } <nl> - } <nl> - <nl> - SFragmentDataRAII & operator = ( SFragmentDataRAII & & other ) <nl> - { <nl> - if ( ! other . m_adb ) return * this ; <nl> - if ( this ! = & other ) <nl> - { <nl> - Clear ( ) ; <nl> - m_adb = other . m_adb ; <nl> - m_sfd = other . m_sfd ; <nl> - other . m_adb = nullptr ; <nl> - other . m_sfd = nullptr ; <nl> - } <nl> - return * this ; <nl> - } <nl> - <nl> - private : <nl> - SFragmentDataRAII ( const SFragmentDataRAII & other ) ; <nl> - SFragmentDataRAII & operator = ( const SFragmentDataRAII & ) ; <nl> - <nl> - const IAnimationDatabase * m_adb ; <nl> - SFragmentData * m_sfd ; <nl> } ; <nl> <nl> class IAnimationDatabaseManager <nl> mmm a / Code / CryEngine / CryAction / ICryMannequinDefs . h <nl> ppp b / Code / CryEngine / CryAction / ICryMannequinDefs . h <nl> struct SCRCRef < 1 , THash > <nl> <nl> SCRCRef ( ) <nl> : crc ( INVALID ) <nl> + , stringValue ( ) <nl> { <nl> } <nl> <nl> explicit SCRCRef ( const char * const nameString ) <nl> : crc ( INVALID ) <nl> + , stringValue ( ) <nl> { <nl> SetByString ( nameString ) ; <nl> } <nl> <nl> SCRCRef ( const SCRCRef < 1 > & other ) <nl> : crc ( INVALID ) <nl> + , stringValue ( ) <nl> { <nl> - SetByString ( other . stringValue ) ; <nl> - } <nl> - <nl> - ~ SCRCRef ( ) <nl> - { <nl> - CleanUp ( ) ; <nl> + SetByString ( other . c_str ( ) ) ; <nl> } <nl> <nl> SCRCRef < 1 > & operator = ( const SCRCRef < 1 > & other ) <nl> { <nl> if ( & other ! = this ) <nl> { <nl> - SetByString ( other . stringValue ) ; <nl> + SetByString ( other . c_str ( ) ) ; <nl> } <nl> return * this ; <nl> } <nl> <nl> - void CleanUp ( ) <nl> - { <nl> - crc = INVALID ; <nl> - } <nl> - <nl> void SetByString ( const char * const nameString ) <nl> { <nl> - CleanUp ( ) ; <nl> if ( nameString & & ( nameString [ 0 ] ! = ' \ 0 ' ) ) <nl> { <nl> - stringValue = string ( nameString ) ; <nl> + const size_t lengthPlusOne = strlen ( nameString ) + 1 ; <nl> + stringValue . assign ( nameString , nameString + lengthPlusOne ) ; <nl> + <nl> crc = THash : : CalculateHash ( nameString ) ; <nl> } <nl> + else <nl> + { <nl> + stringValue . clear ( ) ; <nl> + crc = INVALID ; <nl> + } <nl> } <nl> <nl> ILINE bool IsEmpty ( ) const <nl> struct SCRCRef < 1 , THash > <nl> <nl> ILINE const char * c_str ( ) const <nl> { <nl> - return stringValue . c_str ( ) ; <nl> + return stringValue . empty ( ) ? " " : stringValue . data ( ) ; <nl> } <nl> <nl> ILINE SCRCRef < 1 > & operator = ( const char * const s ) <nl> struct SCRCRef < 1 , THash > <nl> TInt crc ; <nl> <nl> private : <nl> - string stringValue ; <nl> + DynArray < char > stringValue ; <nl> } ; <nl> <nl> typedef SCRCRef < STORE_TAG_STRINGS > STagRef ; <nl> mmm a / Code / CryEngine / CryAction / Mannequin / AnimationDatabase . cpp <nl> ppp b / Code / CryEngine / CryAction / Mannequin / AnimationDatabase . cpp <nl> float AppendBlend ( SFragmentData & outFragmentData , const SBlendQueryResult & blend <nl> return fragmentTime ; <nl> } <nl> <nl> - SFragmentData * CAnimationDatabase : : CreateSFragmentDataRaw ( ) const <nl> - { <nl> - return new SFragmentData ; <nl> - } <nl> - <nl> - void CAnimationDatabase : : DestroySFragmentDataRaw ( SFragmentData * ptr ) const <nl> - { <nl> - delete ptr ; <nl> - } <nl> - <nl> uint32 CAnimationDatabase : : Query ( SFragmentData & outFragmentData , const SBlendQuery & inBlendQuery , uint32 inOptionIdx , const IAnimationSet * inAnimSet , SFragmentSelection * outFragSelection ) const <nl> { <nl> uint32 retFlags = 0 ; <nl> mmm a / Code / CryEngine / CryAction / Mannequin / AnimationDatabase . h <nl> ppp b / Code / CryEngine / CryAction / Mannequin / AnimationDatabase . h <nl> class CAnimationDatabase : public IAnimationDatabase <nl> <nl> static void RegisterCVars ( ) ; <nl> <nl> - protected : <nl> - <nl> - virtual SFragmentData * CreateSFragmentDataRaw ( ) const ; <nl> - virtual void DestroySFragmentDataRaw ( SFragmentData * ptr ) const ; <nl> - <nl> private : <nl> <nl> void EnumerateFragmentAnimAssets ( const CFragment * pFragment , const IAnimationSet * animSet , SAnimAssetReport & assetReport , MannAssetCallback assetCallback , void * callbackContext ) const ; <nl> | ! B ( Action ) ( CE - 8897 ) ( CE - 8896 ) Fixed a memory management issue with a debug string instance being passed across a dll boundary . Removed the previous ad - hoc workaround for this issue to reduce complexity on the CryAction ' s client side . ( Approved by timur ) | CRYTEK/CRYENGINE | 67a158fb276929922b730aaece0add9ab3bf9a94 | 2016-04-19T15:17:22Z |
mmm a / jstests / aggregation / bugs / skip_limit_overflow . js <nl> ppp b / jstests / aggregation / bugs / skip_limit_overflow . js <nl> function testPipeline ( pipeline , expectedResult , optimizedAwayStages ) { <nl> assert . eq ( coll . aggregate ( pipeline ) . toArray ( ) , [ ] ) ; <nl> } <nl> <nl> - / / Case where overflow of limit + skip prevents limit stage from being absorbed . Values <nl> - / / are specified as integrals > MAX_LONG . Note that we cannot specify this huge value as <nl> - / / a NumberLong , as we get a number conversion error ( even if it ' s passed as a string ) . <nl> - testPipeline ( [ { $ sort : { x : - 1 } } , { $ skip : 18446744073709552000 } , { $ limit : 6 } ] , { <nl> - $ limit : { path : " $ limit " , expectedValue : [ NumberLong ( 6 ) ] } , <nl> - $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } <nl> - } ) ; <nl> - testPipeline ( [ { $ sort : { x : - 1 } } , { $ skip : 6 } , { $ limit : 18446744073709552000 } ] , { <nl> - $ limit : { path : " $ limit " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } , <nl> - $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( 6 ) ] } <nl> - } ) ; <nl> + / / Case where overflow of limit + skip prevents limit stage from being absorbed . Values are <nl> + / / specified as integrals > MAX_LONG . Note that we cannot specify this huge value as a NumberLong , <nl> + / / as we get a number conversion error ( even if it ' s passed as a string ) . <nl> + testPipeline ( [ { $ sort : { x : - 1 } } , { $ skip : 18446744073709552000 } , { $ limit : 6 } ] , <nl> + { <nl> + $ limit : { path : " $ limit " , expectedValue : [ NumberLong ( 6 ) ] } , <nl> + SKIP : { path : " skipAmount " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } <nl> + } , <nl> + [ " $ skip " ] ) ; <nl> + testPipeline ( [ { $ sort : { x : - 1 } } , { $ skip : 6 } , { $ limit : 18446744073709552000 } ] , <nl> + { <nl> + $ limit : { path : " $ limit " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } , <nl> + SKIP : { path : " skipAmount " , expectedValue : [ 6 ] } <nl> + } , <nl> + [ " $ skip " ] ) ; <nl> <nl> / / Case where overflow of limit + skip prevents limit stage from being absorbed . One of the <nl> / / values = = MAX_LONG , another one is 1 . <nl> - testPipeline ( [ { $ sort : { x : - 1 } } , { $ skip : NumberLong ( " 9223372036854775807 " ) } , { $ limit : 1 } ] , { <nl> - $ limit : { path : " $ limit " , expectedValue : [ NumberLong ( 1 ) ] } , <nl> - $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } <nl> - } ) ; <nl> - testPipeline ( [ { $ sort : { x : - 1 } } , { $ skip : 1 } , { $ limit : NumberLong ( " 9223372036854775807 " ) } ] , { <nl> - $ limit : { path : " $ limit " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } , <nl> - $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( 1 ) ] } <nl> - } ) ; <nl> + testPipeline ( [ { $ sort : { x : - 1 } } , { $ skip : NumberLong ( " 9223372036854775807 " ) } , { $ limit : 1 } ] , <nl> + { <nl> + $ limit : { path : " $ limit " , expectedValue : [ NumberLong ( 1 ) ] } , <nl> + SKIP : { path : " skipAmount " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } <nl> + } , <nl> + [ " $ skip " ] ) ; <nl> + testPipeline ( [ { $ sort : { x : - 1 } } , { $ skip : 1 } , { $ limit : NumberLong ( " 9223372036854775807 " ) } ] , <nl> + { <nl> + $ limit : { path : " $ limit " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } , <nl> + SKIP : { path : " skipAmount " , expectedValue : [ 1 ] } <nl> + } , <nl> + [ " $ skip " ] ) ; <nl> <nl> / / Case where limit + skip do not overflow . Limit = = MAX_LONG and skip is 0 . Should be able to <nl> / / absorb the limit and skip stages . <nl> testPipeline ( [ { $ sort : { x : - 1 } } , { $ skip : 0 } , { $ limit : NumberLong ( " 922337203685477 <nl> testPipeline ( [ { $ sort : { x : - 1 } } , { $ skip : NumberLong ( " 9223372036854775806 " ) } , { $ limit : 1 } ] , <nl> { <nl> SORT : { path : " limitAmount " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } , <nl> - $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( " 9223372036854775806 " ) ] } <nl> + SKIP : { path : " skipAmount " , expectedValue : [ NumberLong ( " 9223372036854775806 " ) ] } <nl> } , <nl> - [ " $ limit " ] ) ; <nl> + [ " $ skip " , " $ limit " ] ) ; <nl> testPipeline ( [ { $ sort : { x : - 1 } } , { $ skip : 1 } , { $ limit : NumberLong ( " 9223372036854775806 " ) } ] , <nl> { <nl> SORT : { path : " limitAmount " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } , <nl> - $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( 1 ) ] } <nl> + SKIP : { path : " skipAmount " , expectedValue : [ 1 ] } <nl> } , <nl> - [ " $ limit " ] ) ; <nl> + [ " $ skip " , " $ limit " ] ) ; <nl> <nl> / / Case where the first $ limit can be pushed down , but the second overflows and thus remains in <nl> / / place . <nl> testPipeline ( <nl> ] , <nl> { <nl> SORT : { path : " limitAmount " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } , <nl> + SKIP : { path : " skipAmount " , expectedValue : [ NumberLong ( " 9223372036854775800 " ) ] } , <nl> + $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( 10 ) ] } , <nl> $ limit : { path : " $ limit " , expectedValue : [ NumberLong ( 1 ) ] } <nl> } ) ; <nl> <nl> testPipeline ( <nl> ] , <nl> { <nl> SORT : { path : " limitAmount " , expectedValue : [ NumberLong ( " 9223372036854775804 " ) ] } , <nl> - $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( " 9223372036854775803 " ) ] } <nl> - } ) ; <nl> + SKIP : { path : " skipAmount " , expectedValue : [ NumberLong ( " 9223372036854775803 " ) ] } <nl> + } , <nl> + [ " $ skip " , " $ limit " ] ) ; <nl> <nl> / / Case where limit + skip do not overflow . Both values are < MAX_LONG . <nl> testPipeline ( [ { $ sort : { x : - 1 } } , { $ skip : 674761616283 } , { $ limit : 35361718 } ] , <nl> { <nl> SORT : { path : " limitAmount " , expectedValue : [ NumberLong ( 674796978001 ) ] } , <nl> - $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( 674761616283 ) ] } <nl> + SKIP : { path : " skipAmount " , expectedValue : [ NumberLong ( 674761616283 ) ] } <nl> } , <nl> - [ " $ limit " ] ) ; <nl> + [ " $ skip " , " $ limit " ] ) ; <nl> testPipeline ( [ { $ sort : { x : - 1 } } , { $ skip : 35361718 } , { $ limit : 674761616283 } ] , <nl> { <nl> SORT : { path : " limitAmount " , expectedValue : [ NumberLong ( 674796978001 ) ] } , <nl> - $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( 35361718 ) ] } <nl> + SKIP : { path : " skipAmount " , expectedValue : [ 35361718 ] } <nl> } , <nl> - [ " $ limit " ] ) ; <nl> + [ " $ skip " , " $ limit " ] ) ; <nl> <nl> / / Case where where overflow of limit + skip + skip prevents limit stage from being absorbed . <nl> - / / One skip = = MAX_LONG - 1 , another one is 1 . Should merge two skip stages into one . <nl> + / / One skip = = MAX_LONG - 1 , another one is 1 . Should merge two skip stages into one and push down . <nl> testPipeline ( <nl> [ { $ sort : { x : - 1 } } , { $ skip : 1 } , { $ skip : NumberLong ( " 9223372036854775806 " ) } , { $ limit : 1 } ] , <nl> { <nl> $ limit : { path : " $ limit " , expectedValue : [ NumberLong ( 1 ) ] } , <nl> - $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } <nl> + SKIP : { path : " skipAmount " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } <nl> } , <nl> - [ " $ sort " ] ) ; <nl> + [ " $ skip " , " $ sort " ] ) ; <nl> <nl> - / / Case where where overflow of limit + skip + skip prevents limit stage from being absorbed . <nl> - / / One skip = = MAX_LONG , another one is 1 . Should not absorb or merge any stages . <nl> + / / Case where where overflow of limit + skip + skip prevents limit stage and one of the skip stages <nl> + / / from being absorbed . One skip = = MAX_LONG , another one is 1 . Should absorb the first skip . <nl> testPipeline ( <nl> [ { $ sort : { x : - 1 } } , { $ skip : 1 } , { $ skip : NumberLong ( " 9223372036854775807 " ) } , { $ limit : 1 } ] , <nl> { <nl> $ limit : { path : " $ limit " , expectedValue : [ NumberLong ( 1 ) ] } , <nl> - $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( 1 ) , NumberLong ( " 9223372036854775807 " ) ] } <nl> + SKIP : { path : " skipAmount " , expectedValue : [ 1 ] } , <nl> + $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } <nl> } , <nl> [ " $ sort " ] ) ; <nl> <nl> + / / Cases where both limit and skip = = MAX_LONG . <nl> + testPipeline ( <nl> + [ <nl> + { $ sort : { x : - 1 } } , <nl> + { $ limit : NumberLong ( " 9223372036854775807 " ) } , <nl> + { $ skip : NumberLong ( " 9223372036854775807 " ) } <nl> + ] , <nl> + { <nl> + SORT : { path : " limitAmount " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } , <nl> + SKIP : { path : " skipAmount " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } <nl> + } , <nl> + [ " $ skip " , " $ limit " , " $ sort " ] ) ; <nl> + <nl> + testPipeline ( <nl> + [ <nl> + { $ sort : { x : - 1 } } , <nl> + { $ skip : NumberLong ( " 9223372036854775807 " ) } , <nl> + { $ limit : NumberLong ( " 9223372036854775807 " ) } <nl> + ] , <nl> + { <nl> + $ limit : { path : " $ limit " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } , <nl> + SKIP : { path : " skipAmount " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } <nl> + } , <nl> + [ " $ skip " , " $ sort " ] ) ; <nl> + <nl> / / Case where sample size is > MAX_LONG . <nl> testPipeline ( [ { $ sample : { size : 18446744073709552000 } } ] , <nl> { $ sample : { path : " $ sample . size " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } } ) ; <nl> testPipeline ( [ { $ sample : { size : NumberLong ( " 9223372036854775807 " ) } } ] , <nl> / / Case where sample size is = = MAX_LONG - 1 . <nl> testPipeline ( [ { $ sample : { size : NumberLong ( " 9223372036854775806 " ) } } ] , <nl> { $ sample : { path : " $ sample . size " , expectedValue : [ NumberLong ( " 9223372036854775806 " ) ] } } ) ; <nl> + <nl> + / / Case where we omit $ skip stage causing overflow from the pushdown and continue to sum $ skip <nl> + / / stages after it . <nl> + testPipeline ( <nl> + [ <nl> + { $ sort : { x : - 1 } } , <nl> + { $ skip : NumberLong ( " 1 " ) } , <nl> + { $ skip : NumberLong ( " 2 " ) } , <nl> + { $ skip : NumberLong ( " 9223372036854775807 " ) } , <nl> + { $ skip : NumberLong ( " 3 " ) } , <nl> + { $ skip : NumberLong ( " 4 " ) } , <nl> + ] , <nl> + { <nl> + $ skip : { path : " $ skip " , expectedValue : [ NumberLong ( " 9223372036854775807 " ) ] } , <nl> + SKIP : { path : " skipAmount " , expectedValue : [ 10 ] } <nl> + } , <nl> + [ " $ sort " ] ) ; <nl> } ) ( ) ; <nl> deleted file mode 100644 <nl> index 3644282b3c8b . . 000000000000 <nl> mmm a / jstests / aggregation / extras / limitskip . js <nl> ppp / dev / null <nl> <nl> - <nl> - var coll = " numbers " ; <nl> - <nl> - db [ coll ] . drop ( ) ; <nl> - for ( i = 0 ; i < 100 ; i + + ) { <nl> - db [ coll ] . save ( { _id : i , mod : [ i % 2 , i % 3 , i % 5 ] } ) ; <nl> - } <nl> - <nl> - print ( " mmm - - LIMITmmm - - " ) ; <nl> - <nl> - print ( " normal limit " ) ; <nl> - var doc = db . runCommand ( { aggregate : coll , pipeline : [ { $ limit : 2 } ] } ) ; <nl> - assert . eq ( doc . result . length , 2 , tojson ( doc ) ) ; <nl> - <nl> - print ( " limit larger than result size " ) ; <nl> - doc = db . runCommand ( { aggregate : coll , pipeline : [ { $ limit : 200 } ] } ) ; <nl> - assert . eq ( doc . result . length , 100 , tojson ( doc ) ) ; <nl> - <nl> - print ( " limit on sort " ) ; <nl> - doc = db . runCommand ( { aggregate : coll , pipeline : [ { $ sort : { _id : - 1 } } , { $ limit : 3 } ] } ) ; <nl> - r = doc . result ; <nl> - assert . eq ( doc . result . length , 3 ) ; <nl> - for ( var i = 0 ; i < r ; i + + ) { <nl> - assert . eq ( 100 - r [ i ] . _id , i , tojson ( doc ) ) ; <nl> - } <nl> - <nl> - print ( " TODO : invalid limit " ) ; / / once assert has been replaced with uassert <nl> - <nl> - print ( " mmm - - SKIPmmmmmm " ) ; <nl> - <nl> - print ( " normal skip " ) ; <nl> - doc = db . runCommand ( { aggregate : coll , pipeline : [ { $ skip : 95 } ] } ) ; <nl> - assert . eq ( doc . result . length , 5 , tojson ( doc ) ) ; <nl> - <nl> - print ( " skip larger than result size " ) ; <nl> - doc = db . runCommand ( { aggregate : coll , pipeline : [ { $ skip : 102 } ] } ) ; <nl> - assert . eq ( doc . result . length , 0 , tojson ( doc ) ) ; <nl> - <nl> - print ( " check skip results " ) ; <nl> - doc = db . runCommand ( { aggregate : coll , pipeline : [ { $ sort : { _id : 1 } } , { $ skip : 6 } , { $ limit : 3 } ] } ) ; <nl> - assert . eq ( doc . result . length , 3 , tojson ( doc ) ) ; <nl> - for ( var i = 0 ; i < 3 ; i + + ) { <nl> - assert . eq ( i + 6 , doc . result [ i ] . _id , tojson ( doc ) ) ; <nl> - } <nl> - <nl> - print ( " TODO : invalid skip " ) ; / / once assert has been replaced with uassert <nl> - <nl> - print ( " on virtual collection " ) ; <nl> - doc = db . runCommand ( { <nl> - aggregate : coll , <nl> - pipeline : [ <nl> - { $ unwind : " $ mod " } , <nl> - { $ project : { m : " $ mod " } } , <nl> - { $ sort : { m : 1 , _id : - 1 } } , <nl> - { $ skip : 150 } , <nl> - { $ limit : 5 } <nl> - ] <nl> - } ) ; <nl> - <nl> - assert . eq ( doc . result . length , 5 ) ; <nl> - for ( var i = 0 ; i < 5 ; i + + ) { <nl> - assert . eq ( 1 , doc . result [ i ] . m , tojson ( doc ) ) ; <nl> - } <nl> - assert . eq ( doc . result [ 0 ] . _id , 55 , tojson ( doc ) ) ; <nl> - assert . eq ( doc . result [ 1 ] . _id , 53 , tojson ( doc ) ) ; <nl> - assert . eq ( doc . result [ 2 ] . _id , 52 , tojson ( doc ) ) ; <nl> - assert . eq ( doc . result [ 3 ] . _id , 51 , tojson ( doc ) ) ; <nl> - assert . eq ( doc . result [ 4 ] . _id , 51 , tojson ( doc ) ) ; <nl> - <nl> - print ( " size 0 collection " ) ; <nl> - db [ coll ] . drop ( ) ; <nl> - <nl> - doc = db . runCommand ( { aggregate : coll , pipeline : [ { $ skip : 6 } ] } ) ; <nl> - assert . eq ( doc . ok , 1 ) ; <nl> - assert . eq ( doc . result . length , 0 ) ; <nl> - <nl> - doc = db . runCommand ( { aggregate : coll , pipeline : [ { $ limit : 3 } ] } ) ; <nl> - assert . eq ( doc . ok , 1 ) ; <nl> - assert . eq ( doc . result . length , 0 ) ; <nl> mmm a / jstests / aggregation / optimize_away_pipeline . js <nl> ppp b / jstests / aggregation / optimize_away_pipeline . js <nl> assertPipelineDoesNotUseAggregation ( { <nl> } ) ; <nl> assert . commandWorked ( coll . deleteOne ( { _id : 4 } ) ) ; <nl> <nl> - / / Pipelines which cannot be optimized away . <nl> - <nl> - / / TODO SERVER - 40909 : $ skip stage is not supported yet . <nl> - assertPipelineUsesAggregation ( { <nl> + assertPipelineDoesNotUseAggregation ( { <nl> pipeline : [ { $ match : { x : { $ gte : 20 } } } , { $ skip : 1 } ] , <nl> - expectedStages : [ " COLLSCAN " ] , <nl> + expectedStages : [ " COLLSCAN " , " SKIP " ] , <nl> expectedResult : [ { _id : 3 , x : 30 } ] <nl> } ) ; <nl> + <nl> + / / Pipelines which cannot be optimized away . <nl> + <nl> / / We cannot optimize away a pipeline if there are stages which have no equivalent in the <nl> / / find command . <nl> assertPipelineUsesAggregation ( { <nl> let limitStage = getAggPlanStage ( explain , " LIMIT " ) ; <nl> assert . neq ( null , limitStage , explain ) ; <nl> assert . eq ( 1 , limitStage . limitAmount , explain ) ; <nl> <nl> - / / We can optimize away interleaved $ limit and $ skip after a project . The $ limits can be collapsed <nl> - / / into a single $ limit : 35 prior to the $ skip stages . We currently do not push down $ skip into the <nl> - / / PlanStage layer ( see SERVER - 40909 ) , which prevents this pipeline from being entirely optimized <nl> - / / away . <nl> + / / We can optimize away interleaved $ limit and $ skip after a project . <nl> pipeline = [ <nl> { $ match : { x : { $ gte : 0 } } } , <nl> { $ project : { _id : 0 , x : 1 } } , <nl> pipeline = [ <nl> { $ skip : 10 } , <nl> { $ limit : 7 } <nl> ] ; <nl> - assertPipelineUsesAggregation ( { <nl> + assertPipelineDoesNotUseAggregation ( { <nl> pipeline : pipeline , <nl> - expectedStages : [ " IXSCAN " , " PROJECTION_COVERED " , " LIMIT " ] , <nl> - optimizedAwayStages : [ " $ match " , " $ limit " ] , <nl> + expectedStages : [ " IXSCAN " , " PROJECTION_COVERED " , " LIMIT " , " SKIP " ] , <nl> + optimizedAwayStages : [ " $ match " , " $ limit " , " $ skip " ] , <nl> } ) ; <nl> explain = coll . explain ( ) . aggregate ( pipeline ) ; <nl> + <nl> + let skipStage = getAggPlanStage ( explain , " SKIP " ) ; <nl> + assert . neq ( null , skipStage , explain ) ; <nl> + assert . eq ( 30 , skipStage . skipAmount , explain ) ; <nl> + <nl> limitStage = getAggPlanStage ( explain , " LIMIT " ) ; <nl> assert . neq ( null , limitStage , explain ) ; <nl> - assert . eq ( 35 , limitStage . limitAmount , explain ) ; <nl> - let skipStage = getAggPlanStage ( explain , " $ skip " ) ; <nl> - assert . neq ( null , skipStage , explain ) ; <nl> - assert . eq ( 30 , skipStage . $ skip , explain ) ; <nl> + assert . eq ( 5 , limitStage . limitAmount , explain ) ; <nl> <nl> assert . commandWorked ( coll . dropIndexes ( ) ) ; <nl> <nl> mmm a / jstests / aggregation / sources / match / skip_with_limit . js <nl> ppp b / jstests / aggregation / sources / match / skip_with_limit . js <nl> const bulk = coll . initializeOrderedBulkOp ( ) ; <nl> Array . from ( { length : 20 } , ( _ , i ) = > ( { x : 4 , y : i } ) ) . forEach ( doc = > bulk . insert ( doc ) ) ; <nl> assert . commandWorked ( bulk . execute ( ) ) ; <nl> <nl> + / / Test pipelines with $ skip before $ limit . <nl> var count = coll . aggregate ( [ { $ match : { x : 4 } } , { $ skip : 10 } , { $ limit : 5 } ] ) . itcount ( ) ; <nl> assert . eq ( count , 5 ) ; <nl> <nl> count = coll . aggregate ( [ { $ match : { x : 4 } } , { $ skip : 7 } , { $ skip : 3 } , { $ limit : 5 } ] ) . itcount ( ) ; <nl> assert . eq ( count , 5 ) ; <nl> <nl> - count = coll . aggregate ( [ { $ match : { x : 4 } } , { $ limit : 10 } , { $ skip : 5 } ] ) . itcount ( ) ; <nl> + count = coll . aggregate ( [ { $ match : { x : 4 } } , { $ skip : 10 } , { $ limit : 5 } ] ) . itcount ( ) ; <nl> assert . eq ( count , 5 ) ; <nl> <nl> count = <nl> count = <nl> coll . aggregate ( [ { $ match : { x : 4 } } , { $ skip : 10 } , { $ group : { _id : ' $ y ' } } , { $ limit : 5 } ] ) . itcount ( ) ; <nl> assert . eq ( count , 5 ) ; <nl> <nl> + / / Test pipelines with $ limit before $ skip . <nl> + count = coll . aggregate ( [ { $ match : { x : 4 } } , { $ limit : 10 } , { $ skip : 5 } ] ) . itcount ( ) ; <nl> + assert . eq ( count , 5 ) ; <nl> + <nl> + count = coll . aggregate ( [ { $ match : { x : 4 } } , { $ limit : 7 } , { $ limit : 3 } , { $ skip : 1 } ] ) . itcount ( ) ; <nl> + assert . eq ( count , 2 ) ; <nl> + <nl> + count = coll . aggregate ( [ { $ match : { x : 4 } } , { $ limit : 10 } , { $ skip : 5 } ] ) . itcount ( ) ; <nl> + assert . eq ( count , 5 ) ; <nl> + <nl> + count = <nl> + coll . aggregate ( [ { $ match : { x : 4 } } , { $ limit : 10 } , { $ addFields : { y : 1 } } , { $ skip : 5 } ] ) . itcount ( ) ; <nl> + assert . eq ( count , 5 ) ; <nl> + <nl> + count = <nl> + coll . aggregate ( [ { $ match : { x : 4 } } , { $ limit : 10 } , { $ group : { _id : ' $ y ' } } , { $ skip : 5 } ] ) . itcount ( ) ; <nl> + assert . eq ( count , 5 ) ; <nl> + <nl> / / For the pipelines with a $ skip before the $ limit , repeat the tests with larger skip values to <nl> / / ensure that the skip is actually working . The large skips exhaust our 20 documents , so we get <nl> / / fewer results . <nl> assert . eq ( count , 2 ) ; <nl> count = <nl> coll . aggregate ( [ { $ match : { x : 4 } } , { $ skip : 18 } , { $ group : { _id : ' $ y ' } } , { $ limit : 5 } ] ) . itcount ( ) ; <nl> assert . eq ( count , 2 ) ; <nl> + <nl> + / / Now add some pipelines that have multiple consecutive skips to test that our logic to swap a <nl> + / / limit in front of a skip adds the correct total to the limit . For example , in the first test the <nl> + / / limit should end up being 23 . Here we also throw in some tests with $ sort stages , because $ sort <nl> + / / stages will try to pull limits forward . <nl> + count = coll . aggregate ( [ { $ match : { x : 4 } } , { $ sort : { x : 1 } } , { $ skip : 10 } , { $ skip : 8 } , { $ limit : 5 } ] ) <nl> + . itcount ( ) ; <nl> + assert . eq ( count , 2 ) ; <nl> + <nl> + count = <nl> + coll . aggregate ( [ { $ match : { x : 4 } } , { $ skip : 5 } , { $ limit : 10 } , { $ skip : 5 } , { $ limit : 4 } ] ) . itcount ( ) ; <nl> + assert . eq ( count , 4 ) ; <nl> + <nl> + count = coll . aggregate ( [ { $ match : { x : 4 } } , { $ skip : 7 } , { $ skip : 4 } , { $ limit : 4 } ] ) . itcount ( ) ; <nl> + assert . eq ( count , 4 ) ; <nl> + count = coll . aggregate ( [ { $ match : { x : 4 } } , { $ sort : { y : - 1 } } , { $ skip : 7 } , { $ skip : 4 } , { $ limit : 4 } ] ) <nl> + . itcount ( ) ; <nl> + assert . eq ( count , 4 ) ; <nl> + count = coll . aggregate ( [ { $ match : { x : 4 } } , { $ skip : 7 } , { $ skip : 10 } , { $ limit : 4 } ] ) . itcount ( ) ; <nl> + assert . eq ( count , 3 ) ; <nl> + count = coll . aggregate ( [ { $ match : { x : 4 } } , { $ sort : { y : - 1 } } , { $ skip : 7 } , { $ skip : 10 } , { $ limit : 4 } ] ) <nl> + . itcount ( ) ; <nl> + assert . eq ( count , 3 ) ; <nl> + <nl> + / / Prevent $ sort stage from being pushdowned to the find layer and check that the code folding <nl> + / / $ limit stages in this case respects values from $ skip stages . <nl> + count = coll . aggregate ( [ <nl> + { $ match : { x : 4 } } , <nl> + { $ _internalInhibitOptimization : { } } , <nl> + { $ sort : { x : 1 } } , <nl> + { $ skip : 10 } , <nl> + { $ skip : 8 } , <nl> + { $ limit : 5 } <nl> + ] ) <nl> + . itcount ( ) ; <nl> + assert . eq ( count , 2 ) ; <nl> + <nl> + count = coll . aggregate ( [ <nl> + { $ match : { x : 4 } } , <nl> + { $ _internalInhibitOptimization : { } } , <nl> + { $ sort : { y : - 1 } } , <nl> + { $ skip : 7 } , <nl> + { $ skip : 4 } , <nl> + { $ limit : 4 } <nl> + ] ) <nl> + . itcount ( ) ; <nl> + assert . eq ( count , 4 ) ; <nl> + <nl> + count = coll . aggregate ( [ <nl> + { $ match : { x : 4 } } , <nl> + { $ _internalInhibitOptimization : { } } , <nl> + { $ sort : { y : - 1 } } , <nl> + { $ skip : 7 } , <nl> + { $ skip : 10 } , <nl> + { $ limit : 4 } <nl> + ] ) <nl> + . itcount ( ) ; <nl> + assert . eq ( count , 3 ) ; <nl> } ( ) ) ; <nl> mmm a / src / mongo / db / pipeline / SConscript <nl> ppp b / src / mongo / db / pipeline / SConscript <nl> pipelineEnv . Library ( <nl> ' pipeline . cpp ' , <nl> ' semantic_analysis . cpp ' , <nl> ' sequential_document_cache . cpp ' , <nl> + ' skip_and_limit . cpp ' , <nl> ' tee_buffer . cpp ' , <nl> ] , <nl> LIBDEPS = [ <nl> env . CppUnitTest ( <nl> ' semantic_analysis_test . cpp ' , <nl> ' sequential_document_cache_test . cpp ' , <nl> ' sharded_union_test . cpp ' , <nl> + ' skip_and_limit_test . cpp ' , <nl> ' tee_buffer_test . cpp ' , <nl> ] , <nl> LIBDEPS = [ <nl> mmm a / src / mongo / db / pipeline / document_source . cpp <nl> ppp b / src / mongo / db / pipeline / document_source . cpp <nl> bool DocumentSource : : pushMatchBefore ( Pipeline : : SourceContainer : : iterator itr , <nl> bool DocumentSource : : pushSampleBefore ( Pipeline : : SourceContainer : : iterator itr , <nl> Pipeline : : SourceContainer * container ) { <nl> auto nextSample = dynamic_cast < DocumentSourceSample * > ( ( * std : : next ( itr ) ) . get ( ) ) ; <nl> - if ( constraints ( ) . canSwapWithLimitAndSample & & nextSample ) { <nl> + if ( constraints ( ) . canSwapWithSkippingOrLimitingStage & & nextSample ) { <nl> <nl> container - > insert ( itr , std : : move ( nextSample ) ) ; <nl> container - > erase ( std : : next ( itr ) ) ; <nl> mmm a / src / mongo / db / pipeline / document_source_lookup . cpp <nl> ppp b / src / mongo / db / pipeline / document_source_lookup . cpp <nl> StageConstraints DocumentSourceLookUp : : constraints ( Pipeline : : SplitState ) const { <nl> } <nl> <nl> constraints . canSwapWithMatch = true ; <nl> - constraints . canSwapWithLimitAndSample = ! _unwindSrc ; <nl> + constraints . canSwapWithSkippingOrLimitingStage = ! _unwindSrc ; <nl> <nl> return constraints ; <nl> } <nl> mmm a / src / mongo / db / pipeline / document_source_single_document_transformation . h <nl> ppp b / src / mongo / db / pipeline / document_source_single_document_transformation . h <nl> class DocumentSourceSingleDocumentTransformation final : public DocumentSource { <nl> UnionRequirement : : kAllowed , <nl> ChangeStreamRequirement : : kWhitelist ) ; <nl> constraints . canSwapWithMatch = true ; <nl> - constraints . canSwapWithLimitAndSample = true ; <nl> + constraints . canSwapWithSkippingOrLimitingStage = true ; <nl> constraints . isAllowedWithinUpdatePipeline = true ; <nl> / / This transformation could be part of a ' collectionless ' change stream on an entire <nl> / / database or cluster , mark as independent of any collection if so . <nl> mmm a / src / mongo / db / pipeline / document_source_sort . cpp <nl> ppp b / src / mongo / db / pipeline / document_source_sort . cpp <nl> <nl> <nl> # include < algorithm > <nl> <nl> - # include " mongo / base / exact_cast . h " <nl> # include " mongo / db / exec / document_value / document . h " <nl> # include " mongo / db / exec / document_value / document_comparator . h " <nl> # include " mongo / db / exec / document_value / value . h " <nl> # include " mongo / db / jsobj . h " <nl> - # include " mongo / db / pipeline / document_source_skip . h " <nl> # include " mongo / db / pipeline / expression . h " <nl> # include " mongo / db / pipeline / expression_context . h " <nl> # include " mongo / db / pipeline / lite_parsed_document_source . h " <nl> + # include " mongo / db / pipeline / skip_and_limit . h " <nl> # include " mongo / db / query / collation / collation_index_key . h " <nl> # include " mongo / platform / overflow_arithmetic . h " <nl> # include " mongo / s / query / document_source_merge_cursors . h " <nl> boost : : optional < long long > DocumentSourceSort : : getLimit ( ) const { <nl> : boost : : none ; <nl> } <nl> <nl> - boost : : optional < long long > DocumentSourceSort : : extractLimitForPushdown ( <nl> - Pipeline : : SourceContainer : : iterator itr , Pipeline : : SourceContainer * container ) { <nl> - int64_t skipSum = 0 ; <nl> - boost : : optional < long long > minLimit ; <nl> - while ( itr ! = container - > end ( ) ) { <nl> - auto nextStage = ( * itr ) . get ( ) ; <nl> - auto nextSkip = exact_pointer_cast < DocumentSourceSkip * > ( nextStage ) ; <nl> - auto nextLimit = exact_pointer_cast < DocumentSourceLimit * > ( nextStage ) ; <nl> - int64_t safeSum = 0 ; <nl> - <nl> - / / The skip and limit values can be very large , so we need to make sure the sum doesn ' t <nl> - / / overflow before applying an optimization to swap the $ limit with the $ skip . <nl> - if ( nextSkip & & ! overflow : : add ( skipSum , nextSkip - > getSkip ( ) , & safeSum ) ) { <nl> - skipSum = safeSum ; <nl> - + + itr ; <nl> - } else if ( nextLimit & & ! overflow : : add ( nextLimit - > getLimit ( ) , skipSum , & safeSum ) ) { <nl> - if ( ! minLimit ) { <nl> - minLimit = safeSum ; <nl> - } else { <nl> - minLimit = std : : min ( static_cast < long long > ( safeSum ) , * minLimit ) ; <nl> - } <nl> - <nl> - itr = container - > erase ( itr ) ; <nl> - / / If the removed stage wasn ' t the last in the pipeline , make sure that the stage <nl> - / / followed the erased stage has a valid pointer to the previous document source . <nl> - if ( itr ! = container - > end ( ) ) { <nl> - ( * itr ) - > setSource ( itr ! = container - > begin ( ) ? std : : prev ( itr ) - > get ( ) : nullptr ) ; <nl> - } <nl> - } else if ( ! nextStage - > constraints ( ) . canSwapWithLimitAndSample ) { <nl> - break ; <nl> - } else { <nl> - + + itr ; <nl> - } <nl> - } <nl> - <nl> - return minLimit ; <nl> - } <nl> - <nl> Pipeline : : SourceContainer : : iterator DocumentSourceSort : : doOptimizeAt ( <nl> Pipeline : : SourceContainer : : iterator itr , Pipeline : : SourceContainer * container ) { <nl> invariant ( * itr = = this ) ; <nl> <nl> auto stageItr = std : : next ( itr ) ; <nl> auto limit = extractLimitForPushdown ( stageItr , container ) ; <nl> - if ( limit ) { <nl> + if ( limit ) <nl> _sortExecutor - > setLimit ( * limit ) ; <nl> - } <nl> <nl> auto nextStage = std : : next ( itr ) ; <nl> if ( nextStage = = container - > end ( ) ) { <nl> mmm a / src / mongo / db / pipeline / document_source_sort . h <nl> ppp b / src / mongo / db / pipeline / document_source_sort . h <nl> class DocumentSourceSort final : public DocumentSource { <nl> public : <nl> static constexpr StringData kStageName = " $ sort " _sd ; <nl> <nl> - / * * <nl> - * If there are any $ limit stages that could be logically swapped forward to the position of the <nl> - * pipeline pointed to by ' itr ' without changing the meaning of the query , removes these $ limit <nl> - * stages from the Pipeline and returns the resulting limit . A single limit value is computed by <nl> - * taking the minimum after swapping each individual $ limit stage forward . <nl> - * <nl> - * This method also implements the ability to swap a $ limit before a $ skip , by adding the value <nl> - * of the $ skip to the value of the $ limit . <nl> - * / <nl> - static boost : : optional < long long > extractLimitForPushdown ( <nl> - Pipeline : : SourceContainer : : iterator itr , Pipeline : : SourceContainer * container ) ; <nl> - <nl> const char * getSourceName ( ) const final { <nl> return kStageName . rawData ( ) ; <nl> } <nl> mmm a / src / mongo / db / pipeline / pipeline_d . cpp <nl> ppp b / src / mongo / db / pipeline / pipeline_d . cpp <nl> <nl> # include " mongo / db / pipeline / document_source_single_document_transformation . h " <nl> # include " mongo / db / pipeline / document_source_sort . h " <nl> # include " mongo / db / pipeline / pipeline . h " <nl> + # include " mongo / db / pipeline / skip_and_limit . h " <nl> # include " mongo / db / query / collation / collator_interface . h " <nl> # include " mongo / db / query / get_executor . h " <nl> # include " mongo / db / query / plan_executor_factory . h " <nl> StatusWith < std : : unique_ptr < PlanExecutor , PlanExecutor : : Deleter > > attemptToGetExe <nl> BSONObj projectionObj , <nl> const QueryMetadataBitSet & metadataRequested , <nl> BSONObj sortObj , <nl> - boost : : optional < long long > limit , <nl> + SkipThenLimit skipThenLimit , <nl> boost : : optional < std : : string > groupIdForDistinctScan , <nl> const AggregationRequest * aggRequest , <nl> const size_t plannerOpts , <nl> StatusWith < std : : unique_ptr < PlanExecutor , PlanExecutor : : Deleter > > attemptToGetExe <nl> qr - > setFilter ( queryObj ) ; <nl> qr - > setProj ( projectionObj ) ; <nl> qr - > setSort ( sortObj ) ; <nl> - qr - > setLimit ( limit ) ; <nl> + qr - > setSkip ( skipThenLimit . getSkip ( ) ) ; <nl> + qr - > setLimit ( skipThenLimit . getLimit ( ) ) ; <nl> if ( aggRequest ) { <nl> qr - > setExplain ( static_cast < bool > ( aggRequest - > getExplain ( ) ) ) ; <nl> qr - > setHint ( aggRequest - > getHint ( ) ) ; <nl> getSortAndGroupStagesFromPipeline ( const Pipeline : : SourceContainer & sources ) { <nl> return std : : make_pair ( sortStage , groupStage ) ; <nl> } <nl> <nl> - boost : : optional < long long > extractLimitForPushdown ( Pipeline * pipeline ) { <nl> - / / If the disablePipelineOptimization failpoint is enabled , then do not attempt the limit <nl> + boost : : optional < long long > extractSkipForPushdown ( Pipeline * pipeline ) { <nl> + / / If the disablePipelineOptimization failpoint is enabled , then do not attempt the skip <nl> / / pushdown optimization . <nl> if ( MONGO_unlikely ( disablePipelineOptimization . shouldFail ( ) ) ) { <nl> return boost : : none ; <nl> } <nl> auto & & sources = pipeline - > getSources ( ) ; <nl> - auto limit = DocumentSourceSort : : extractLimitForPushdown ( sources . begin ( ) , & sources ) ; <nl> - if ( limit ) { <nl> - / / Removing $ limit stages may have produced the opportunity for additional optimizations . <nl> + <nl> + auto skip = extractSkipForPushdown ( sources . begin ( ) , & sources ) ; <nl> + if ( skip ) { <nl> + / / Removing stages may have produced the opportunity for additional optimizations . <nl> pipeline - > optimizePipeline ( ) ; <nl> } <nl> - return limit ; <nl> + return skip ; <nl> + } <nl> + <nl> + SkipThenLimit extractSkipAndLimitForPushdown ( Pipeline * pipeline ) { <nl> + / / If the disablePipelineOptimization failpoint is enabled , then do not attempt the limit and <nl> + / / skip pushdown optimization . <nl> + if ( MONGO_unlikely ( disablePipelineOptimization . shouldFail ( ) ) ) { <nl> + return { boost : : none , boost : : none } ; <nl> + } <nl> + auto & & sources = pipeline - > getSources ( ) ; <nl> + <nl> + / / It is important to call ' extractLimitForPushdown ' before ' extractSkipForPushdown ' . Otherwise <nl> + / / there could be a situation when $ limit stages in pipeline would prevent <nl> + / / ' extractSkipForPushdown ' from extracting all $ skip stages . <nl> + auto limit = extractLimitForPushdown ( sources . begin ( ) , & sources ) ; <nl> + auto skip = extractSkipForPushdown ( sources . begin ( ) , & sources ) ; <nl> + auto skipThenLimit = LimitThenSkip ( limit , skip ) . flip ( ) ; <nl> + if ( skipThenLimit . getSkip ( ) | | skipThenLimit . getLimit ( ) ) { <nl> + / / Removing stages may have produced the opportunity for additional optimizations . <nl> + pipeline - > optimizePipeline ( ) ; <nl> + } <nl> + return skipThenLimit ; <nl> } <nl> <nl> / * * <nl> PipelineD : : buildInnerQueryExecutorGeneric ( const CollectionPtr & collection , <nl> rewrittenGroupStage = groupStage - > rewriteGroupAsTransformOnFirstDocument ( ) ; <nl> } <nl> <nl> - / / If there is a $ limit stage ( or multiple $ limit stages ) that could be pushed down into the <nl> - / / PlanStage layer , obtain the value of the limit and remove the $ limit stages from the <nl> - / / pipeline . <nl> + / / If there is a $ limit or $ skip stage ( or multiple of them ) that could be pushed down into the <nl> + / / PlanStage layer , obtain the value of the limit and skip and remove the $ limit and $ skip <nl> + / / stages from the pipeline . <nl> / / <nl> / / This analysis is done here rather than in ' optimizePipeline ( ) ' because swapping $ limit before <nl> / / stages such as $ project is not always useful , and can sometimes defeat other optimizations . <nl> PipelineD : : buildInnerQueryExecutorGeneric ( const CollectionPtr & collection , <nl> / / merging shard first , and then apply the projection serially . See SERVER - 24981 for a more <nl> / / detailed discussion . <nl> / / <nl> - / / This only handles the case in which the the $ limit can logically be swapped to the front of <nl> - / / the pipeline . We can also push down a $ limit which comes after a $ sort into the PlanStage <nl> - / / layer , but that is handled elsewhere . <nl> - const auto limit = extractLimitForPushdown ( pipeline ) ; <nl> + / / This only handles the case in which the the $ limit or $ skip can logically be swapped to the <nl> + / / front of the pipeline . We can also push down a $ limit which comes after a $ sort into the <nl> + / / PlanStage layer , but that is handled elsewhere . <nl> + const auto skipThenLimit = extractSkipAndLimitForPushdown ( pipeline ) ; <nl> <nl> auto unavailableMetadata = DocumentSourceMatch : : isTextQuery ( queryObj ) <nl> ? DepsTracker : : kDefaultUnavailableMetadata & ~ DepsTracker : : kOnlyTextScore <nl> PipelineD : : buildInnerQueryExecutorGeneric ( const CollectionPtr & collection , <nl> std : : move ( rewrittenGroupStage ) , <nl> unavailableMetadata , <nl> queryObj , <nl> - limit , <nl> + skipThenLimit , <nl> aggRequest , <nl> Pipeline : : kAllowedMatcherFeatures , <nl> & shouldProduceEmptyDocs ) ) ; <nl> PipelineD : : buildInnerQueryExecutorGeoNear ( const CollectionPtr & collection , <nl> nullptr , / * rewrittenGroupStage * / <nl> DepsTracker : : kDefaultUnavailableMetadata & ~ DepsTracker : : kAllGeoNearData , <nl> std : : move ( fullQuery ) , <nl> - boost : : none , / * limit * / <nl> + SkipThenLimit { boost : : none , boost : : none } , <nl> aggRequest , <nl> Pipeline : : kGeoNearMatcherFeatures , <nl> & shouldProduceEmptyDocs ) ) ; <nl> StatusWith < std : : unique_ptr < PlanExecutor , PlanExecutor : : Deleter > > PipelineD : : prep <nl> std : : unique_ptr < GroupFromFirstDocumentTransformation > rewrittenGroupStage , <nl> QueryMetadataBitSet unavailableMetadata , <nl> const BSONObj & queryObj , <nl> - boost : : optional < long long > limit , <nl> + SkipThenLimit skipThenLimit , <nl> const AggregationRequest * aggRequest , <nl> const MatchExpressionParser : : AllowedFeatureSet & matcherFeatures , <nl> bool * hasNoRequirements ) { <nl> StatusWith < std : : unique_ptr < PlanExecutor , PlanExecutor : : Deleter > > PipelineD : : prep <nl> . serialize ( SortPattern : : SortKeySerialization : : kForPipelineSerialization ) <nl> . toBson ( ) ; <nl> <nl> - / / If the $ sort has a coalesced $ limit , then we push it down as well . Since the $ limit was <nl> - / / after a $ sort in the pipeline , it should not have been provided by the caller . <nl> - invariant ( ! limit ) ; <nl> - limit = sortStage - > getLimit ( ) ; <nl> - <nl> pipeline - > popFrontWithName ( DocumentSourceSort : : kStageName ) ; <nl> + <nl> + / / Now that we ' ve pushed down the sort , see if there is a $ limit and $ skip to push down <nl> + / / also . We should not already have a limit or skip here , otherwise it would be incorrect <nl> + / / for the caller to pass us a sort stage to push down , since the order matters . <nl> + invariant ( ! skipThenLimit . getLimit ( ) ) ; <nl> + invariant ( ! skipThenLimit . getSkip ( ) ) ; <nl> + <nl> + / / Since all $ limit stages were already pushdowned to the sort stage , we are only looking <nl> + / / for $ skip stages . <nl> + auto skip = extractSkipForPushdown ( pipeline ) ; <nl> + <nl> + / / Since the limit from $ sort is going before the extracted $ skip stages , we construct <nl> + / / ' LimitThenSkip ' object and then convert it ' SkipThenLimit ' . <nl> + skipThenLimit = LimitThenSkip ( sortStage - > getLimit ( ) , skip ) . flip ( ) ; <nl> } <nl> <nl> / / Perform dependency analysis . In order to minimize the dependency set , we only analyze the <nl> StatusWith < std : : unique_ptr < PlanExecutor , PlanExecutor : : Deleter > > PipelineD : : prep <nl> projObj , <nl> deps . metadataDeps ( ) , <nl> sortObj , <nl> - boost : : none , / * limit * / <nl> + SkipThenLimit { boost : : none , boost : : none } , <nl> rewrittenGroupStage - > groupId ( ) , <nl> aggRequest , <nl> plannerOpts , <nl> StatusWith < std : : unique_ptr < PlanExecutor , PlanExecutor : : Deleter > > PipelineD : : prep <nl> projObj , <nl> deps . metadataDeps ( ) , <nl> sortObj , <nl> - limit , <nl> + skipThenLimit , <nl> boost : : none , / * groupIdForDistinctScan * / <nl> aggRequest , <nl> plannerOpts , <nl> mmm a / src / mongo / db / pipeline / pipeline_d . h <nl> ppp b / src / mongo / db / pipeline / pipeline_d . h <nl> class DocumentSourceCursor ; <nl> class DocumentSourceMatch ; <nl> class DocumentSourceSort ; <nl> class ExpressionContext ; <nl> + class SkipThenLimit ; <nl> class OperationContext ; <nl> class Pipeline ; <nl> struct PlanSummaryStats ; <nl> class PipelineD { <nl> std : : unique_ptr < GroupFromFirstDocumentTransformation > rewrittenGroupStage , <nl> QueryMetadataBitSet metadataAvailable , <nl> const BSONObj & queryObj , <nl> - boost : : optional < long long > limit , <nl> + SkipThenLimit skipThenLimit , <nl> const AggregationRequest * aggRequest , <nl> const MatchExpressionParser : : AllowedFeatureSet & matcherFeatures , <nl> bool * hasNoRequirements ) ; <nl> mmm a / src / mongo / db / pipeline / sharded_agg_helpers . cpp <nl> ppp b / src / mongo / db / pipeline / sharded_agg_helpers . cpp <nl> boost : : optional < long long > getPipelineLimit ( Pipeline * pipeline ) { <nl> / / If this stage is one that can swap with a $ limit stage , then we can look at the previous <nl> / / stage to see if it includes a limit . Otherwise , we give up trying to find a limit on this <nl> / / stage ' s output . <nl> - if ( ! source - > constraints ( ) . canSwapWithLimitAndSample ) { <nl> + if ( ! source - > constraints ( ) . canSwapWithSkippingOrLimitingStage ) { <nl> break ; <nl> } <nl> } <nl> void propagateDocLimitToShards ( Pipeline * shardPipe , Pipeline * mergePipe ) { <nl> / / If there are any stages in the merge pipeline before the $ skip and $ limit stages , then we <nl> / / cannot use the $ limit to determine an upper bound , unless those stages could be swapped <nl> / / with the $ limit . <nl> - if ( ! source - > constraints ( ) . canSwapWithLimitAndSample ) { <nl> + if ( ! source - > constraints ( ) . canSwapWithSkippingOrLimitingStage ) { <nl> return ; <nl> } <nl> } <nl> new file mode 100644 <nl> index 000000000000 . . 6923699937d2 <nl> mmm / dev / null <nl> ppp b / src / mongo / db / pipeline / skip_and_limit . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2018 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + <nl> + # include " mongo / platform / basic . h " <nl> + <nl> + # include " mongo / base / exact_cast . h " <nl> + # include " mongo / db / pipeline / document_source_limit . h " <nl> + # include " mongo / db / pipeline / document_source_skip . h " <nl> + # include " mongo / db / pipeline / skip_and_limit . h " <nl> + # include " mongo / platform / overflow_arithmetic . h " <nl> + <nl> + namespace mongo { <nl> + <nl> + boost : : optional < long long > SkipAndLimit : : getLimit ( ) const { <nl> + return _limit ; <nl> + } <nl> + <nl> + boost : : optional < long long > SkipAndLimit : : getSkip ( ) const { <nl> + return _skip ; <nl> + } <nl> + <nl> + SkipThenLimit : : SkipThenLimit ( boost : : optional < long long > skip , boost : : optional < long long > limit ) { <nl> + _skip = skip ; <nl> + _limit = limit ; <nl> + } <nl> + <nl> + LimitThenSkip : : LimitThenSkip ( boost : : optional < long long > limit , boost : : optional < long long > skip ) { <nl> + _limit = limit ; <nl> + / / We cannot skip more documents than received after applying limit . So if both limit and skip <nl> + / / are defined , skip size must be not greater than limit size . <nl> + if ( skip ) { <nl> + _skip = std : : min ( * skip , limit . get_value_or ( std : : numeric_limits < long long > : : max ( ) ) ) ; <nl> + } <nl> + } <nl> + <nl> + SkipThenLimit LimitThenSkip : : flip ( ) const { <nl> + if ( _limit ) { <nl> + return { _skip , * _limit - _skip . get_value_or ( 0 ) } ; <nl> + } <nl> + <nl> + return { _skip , boost : : none } ; <nl> + } <nl> + <nl> + namespace { <nl> + <nl> + Pipeline : : SourceContainer : : iterator eraseAndStich ( Pipeline : : SourceContainer : : iterator itr , <nl> + Pipeline : : SourceContainer * container ) { <nl> + itr = container - > erase ( itr ) ; <nl> + / / If the removed stage wasn ' t the last in the pipeline , make sure that the stage followed the <nl> + / / erased stage has a valid pointer to the previous document source . <nl> + if ( itr ! = container - > end ( ) ) { <nl> + ( * itr ) - > setSource ( itr ! = container - > begin ( ) ? std : : prev ( itr ) - > get ( ) : nullptr ) ; <nl> + } <nl> + return itr ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> + boost : : optional < long long > extractLimitForPushdown ( Pipeline : : SourceContainer : : iterator itr , <nl> + Pipeline : : SourceContainer * container ) { <nl> + int64_t skipSum = 0 ; <nl> + boost : : optional < long long > minLimit ; <nl> + while ( itr ! = container - > end ( ) ) { <nl> + auto nextStage = itr - > get ( ) ; <nl> + auto nextSkip = exact_pointer_cast < DocumentSourceSkip * > ( nextStage ) ; <nl> + auto nextLimit = exact_pointer_cast < DocumentSourceLimit * > ( nextStage ) ; <nl> + int64_t safeSum = 0 ; <nl> + <nl> + / / The skip and limit values can be very large , so we need to make sure the sum doesn ' t <nl> + / / overflow before applying an optimization to swap the $ limit with the $ skip . <nl> + if ( nextSkip & & ! overflow : : add ( skipSum , nextSkip - > getSkip ( ) , & safeSum ) ) { <nl> + skipSum = safeSum ; <nl> + + + itr ; <nl> + } else if ( nextLimit & & ! overflow : : add ( nextLimit - > getLimit ( ) , skipSum , & safeSum ) ) { <nl> + if ( ! minLimit ) { <nl> + minLimit = safeSum ; <nl> + } else { <nl> + minLimit = std : : min ( static_cast < long long > ( safeSum ) , * minLimit ) ; <nl> + } <nl> + <nl> + itr = eraseAndStich ( itr , container ) ; <nl> + } else if ( ! nextStage - > constraints ( ) . canSwapWithSkippingOrLimitingStage ) { <nl> + break ; <nl> + } else { <nl> + + + itr ; <nl> + } <nl> + } <nl> + <nl> + return minLimit ; <nl> + } <nl> + <nl> + boost : : optional < long long > extractSkipForPushdown ( Pipeline : : SourceContainer : : iterator itr , <nl> + Pipeline : : SourceContainer * container ) { <nl> + boost : : optional < long long > skipSum ; <nl> + while ( itr ! = container - > end ( ) ) { <nl> + auto nextStage = itr - > get ( ) ; <nl> + auto nextSkip = exact_pointer_cast < DocumentSourceSkip * > ( nextStage ) ; <nl> + int64_t safeSum = 0 ; <nl> + <nl> + / / The skip values can be very large , so we need to make sure the sum doesn ' t overflow <nl> + / / before extracting skip stage for pushdown . Even if we failed to extract $ skip stage due <nl> + / / to overflow , we still want to continue our analysis after it . If there is multiple $ skip <nl> + / / stages one after another , only total sum of skipped documents matters . <nl> + if ( nextSkip & & ! overflow : : add ( skipSum . get_value_or ( 0 ) , nextSkip - > getSkip ( ) , & safeSum ) ) { <nl> + skipSum = safeSum ; <nl> + itr = eraseAndStich ( itr , container ) ; <nl> + } else if ( ! nextSkip & & ! nextStage - > constraints ( ) . canSwapWithSkippingOrLimitingStage ) { <nl> + break ; <nl> + } else { <nl> + + + itr ; <nl> + } <nl> + } <nl> + <nl> + return skipSum ; <nl> + } <nl> + <nl> + } / / namespace mongo <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000000 . . c00c5ad110c9 <nl> mmm / dev / null <nl> ppp b / src / mongo / db / pipeline / skip_and_limit . h <nl> <nl> + / * * <nl> + * Copyright ( C ) 2018 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include < boost / optional . hpp > <nl> + <nl> + # include " mongo / db / pipeline / pipeline . h " <nl> + <nl> + namespace mongo { <nl> + <nl> + class SkipAndLimit { <nl> + public : <nl> + boost : : optional < long long > getLimit ( ) const ; <nl> + <nl> + boost : : optional < long long > getSkip ( ) const ; <nl> + <nl> + protected : <nl> + boost : : optional < long long > _skip ; <nl> + boost : : optional < long long > _limit ; <nl> + } ; <nl> + <nl> + class SkipThenLimit ; <nl> + class LimitThenSkip ; <nl> + <nl> + / * * <nl> + * A struct representing a skip and a limit , with the skip to be applied before the limit . <nl> + * / <nl> + class SkipThenLimit final : public SkipAndLimit { <nl> + public : <nl> + SkipThenLimit ( boost : : optional < long long > skip , boost : : optional < long long > limit ) ; <nl> + } ; <nl> + <nl> + / * * <nl> + * A struct representing a limit and a skip , with the limit to be applied before the skip . <nl> + * / <nl> + class LimitThenSkip final : public SkipAndLimit { <nl> + public : <nl> + / * * <nl> + * Initiates struct with given limit and skip sizes . If skip size is greater than limit size , <nl> + * it will take minimum of two values for skip size . This is done because we cannot skip more <nl> + * documents than limit returned . <nl> + * / <nl> + LimitThenSkip ( boost : : optional < long long > limit , boost : : optional < long long > skip ) ; <nl> + <nl> + / * * <nl> + * Returns SkipThenLimit structure representing logically the same operation , but by performing <nl> + * skip before the limit . <nl> + * / <nl> + SkipThenLimit flip ( ) const ; <nl> + } ; <nl> + <nl> + / * * <nl> + * If there are any $ limit stages that could be logically swapped forward to the position of the <nl> + * pipeline pointed to by ' itr ' without changing the meaning of the query , removes these $ limit <nl> + * stages from the Pipeline and returns the resulting limit . A single limit value is computed by <nl> + * taking the minimum after swapping each individual $ limit stage forward . <nl> + * <nl> + * This method also implements the ability to swap a $ limit before a $ skip , by adding the value of <nl> + * the $ skip to the value of the $ limit . <nl> + * / <nl> + boost : : optional < long long > extractLimitForPushdown ( Pipeline : : SourceContainer : : iterator itr , <nl> + Pipeline : : SourceContainer * container ) ; <nl> + <nl> + / * * <nl> + * If there are any $ skip stages that could be logically swapped forward to the position of the <nl> + * pipeline pointed to by ' itr ' without changing the meaning of the query , removes these $ skip <nl> + * stages from the Pipeline and returns the resulting skip . A single skip value is computed by <nl> + * taking the sum of all $ skip stages that participate in swap . <nl> + * <nl> + * This method does NOT swap $ skip before $ limit . One can use ' extractLimitForPushdown ' method to <nl> + * extract all $ limit stages and then call this method if it is applicable . <nl> + * / <nl> + boost : : optional < long long > extractSkipForPushdown ( Pipeline : : SourceContainer : : iterator itr , <nl> + Pipeline : : SourceContainer * container ) ; <nl> + <nl> + } / / namespace mongo <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000000 . . 1d4798f3e2dd <nl> mmm / dev / null <nl> ppp b / src / mongo / db / pipeline / skip_and_limit_test . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2018 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + <nl> + # include " mongo / platform / basic . h " <nl> + <nl> + # include < climits > <nl> + <nl> + # include " mongo / db / pipeline / skip_and_limit . h " <nl> + # include " mongo / unittest / unittest . h " <nl> + <nl> + namespace mongo { <nl> + <nl> + namespace { <nl> + <nl> + TEST ( LimitThenSkip , SkipIsCappedWhenLargerThanLimit ) { <nl> + LimitThenSkip source ( 5 , 10 ) ; <nl> + ASSERT_EQ ( * source . getLimit ( ) , 5 ) ; <nl> + ASSERT_EQ ( * source . getSkip ( ) , 5 ) ; <nl> + } <nl> + <nl> + TEST ( LimitThenSkip , FlipToSkipThenLimit ) { <nl> + LimitThenSkip source ( 15 , 5 ) ; <nl> + SkipThenLimit converted = source . flip ( ) ; <nl> + ASSERT_EQ ( * converted . getSkip ( ) , 5 ) ; <nl> + ASSERT_EQ ( * converted . getLimit ( ) , 10 ) ; <nl> + } <nl> + <nl> + TEST ( LimitThenSkip , FlipOnlyLimit ) { <nl> + LimitThenSkip source ( 15 , boost : : none ) ; <nl> + SkipThenLimit converted = source . flip ( ) ; <nl> + ASSERT ( ! converted . getSkip ( ) ) ; <nl> + ASSERT_EQ ( * converted . getLimit ( ) , 15 ) ; <nl> + } <nl> + <nl> + TEST ( LimitThenSkip , FlipOnlySkip ) { <nl> + LimitThenSkip source ( boost : : none , 5 ) ; <nl> + SkipThenLimit converted = source . flip ( ) ; <nl> + ASSERT_EQ ( * converted . getSkip ( ) , 5 ) ; <nl> + ASSERT ( ! converted . getLimit ( ) ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + } / / namespace mongo <nl> mmm a / src / mongo / db / pipeline / stage_constraints . h <nl> ppp b / src / mongo / db / pipeline / stage_constraints . h <nl> struct StageConstraints { <nl> / / $ match predicates be swapped before itself . <nl> bool canSwapWithMatch = false ; <nl> <nl> - / / Neither a $ sample nor a $ limit can be moved before any stage which will possibly change the <nl> - / / number of documents in the stream . Further , no stage which will change the order of documents <nl> - / / can be swapped with a $ limit or $ sample , and no stage which will change behavior based on the <nl> - / / order of documents can be swapped with a $ sample because our implementation of sample will do <nl> - / / a random sort which shuffles the order . <nl> - bool canSwapWithLimitAndSample = false ; <nl> + / / True if this stage can be safely swapped with a stage which alters the number of documents in <nl> + / / the stream . <nl> + / / <nl> + / / For example , a $ project can be safely swapped with a $ skip , $ limit , or $ sample . But there are <nl> + / / some cases when we cannot perform such swap : <nl> + / / - $ skip , $ limit and $ sample stages cannot be moved before any stage which will change the <nl> + / / number of documents <nl> + / / - $ skip , $ limit and $ sample stages cannot be swapped with any stage which will change the <nl> + / / order of documents <nl> + / / - $ sample cannot be swapped with stages which will change behavior based on the order of <nl> + / / documents because our implementation of $ sample shuffles the order <nl> + bool canSwapWithSkippingOrLimitingStage = false ; <nl> <nl> / / Indicates that a stage is allowed within a pipeline - stlye update . <nl> bool isAllowedWithinUpdatePipeline = false ; <nl> struct StageConstraints { <nl> requiresInputDocSource = = other . requiresInputDocSource & & <nl> isIndependentOfAnyCollection = = other . isIndependentOfAnyCollection & & <nl> canSwapWithMatch = = other . canSwapWithMatch & & <nl> - canSwapWithLimitAndSample = = other . canSwapWithLimitAndSample & & <nl> + canSwapWithSkippingOrLimitingStage = = other . canSwapWithSkippingOrLimitingStage & & <nl> isAllowedWithinUpdatePipeline = = other . isAllowedWithinUpdatePipeline & & <nl> unionRequirement = = other . unionRequirement ; <nl> } <nl> mmm a / src / mongo / db / query / planner_analysis . cpp <nl> ppp b / src / mongo / db / query / planner_analysis . cpp <nl> std : : unique_ptr < QuerySolutionNode > tryPushdownProjectBeneathSort ( <nl> return root ; <nl> } <nl> <nl> - if ( ! isSortStageType ( projectNode - > children [ 0 ] - > getType ( ) ) ) { <nl> + / / There could be a situation when there is a SKIP stage between PROJECT and SORT : <nl> + / / PROJECT = > SKIP = > SORT <nl> + / / In this case we still want to push PROJECT beneath SORT . <nl> + bool hasSkipBetween = false ; <nl> + auto sortNodeCandidate = projectNode - > children [ 0 ] ; <nl> + if ( sortNodeCandidate - > getType ( ) = = STAGE_SKIP ) { <nl> + hasSkipBetween = true ; <nl> + sortNodeCandidate = sortNodeCandidate - > children [ 0 ] ; <nl> + } <nl> + <nl> + if ( ! isSortStageType ( sortNodeCandidate - > getType ( ) ) ) { <nl> return root ; <nl> } <nl> <nl> - auto sortNode = static_cast < SortNode * > ( root - > children [ 0 ] ) ; <nl> + auto sortNode = static_cast < SortNode * > ( sortNodeCandidate ) ; <nl> <nl> / / Don ' t perform this optimization if the sort is a top - k sort . We would be wasting work <nl> / / computing projections for documents that are discarded since they are not in the top - k set . <nl> std : : unique_ptr < QuerySolutionNode > tryPushdownProjectBeneathSort ( <nl> <nl> / / Perform the swap . We are starting with the following structure : <nl> / / PROJECT = > SORT = > CHILD <nl> + / / Or if there is a SKIP stage between PROJECT and SORT : <nl> + / / PROJECT = > SKIP = > SORT = > CHILD <nl> / / <nl> / / This needs to be transformed to the following : <nl> / / SORT = > PROJECT = > CHILD <nl> + / / Or to the following in case of SKIP : <nl> + / / SKIP = > SORT = > PROJECT = > CHILD <nl> / / <nl> - / / First , detach the bottom of the tree . <nl> + / / First , detach the bottom of the tree . This part is CHILD in the comment above . <nl> std : : unique_ptr < QuerySolutionNode > restOfTree { sortNode - > children [ 0 ] } ; <nl> invariant ( sortNode - > children . size ( ) = = 1u ) ; <nl> sortNode - > children . clear ( ) ; <nl> <nl> - / / Next , detach the sort from the projection and assume ownership of it . <nl> - std : : unique_ptr < QuerySolutionNode > ownedSortNode { sortNode } ; <nl> + / / Next , detach the input from the projection and assume ownership of it . <nl> + / / The projection input is either this structure : <nl> + / / SORT <nl> + / / Or this if we have SKIP : <nl> + / / SKIP = > SORT <nl> + std : : unique_ptr < QuerySolutionNode > ownedProjectionInput { projectNode - > children [ 0 ] } ; <nl> sortNode = nullptr ; <nl> invariant ( projectNode - > children . size ( ) = = 1u ) ; <nl> projectNode - > children . clear ( ) ; <nl> <nl> / / Attach the lower part of the tree as the child of the projection . <nl> + / / We want to get the following structure : <nl> + / / PROJECT = > CHILD <nl> std : : unique_ptr < QuerySolutionNode > ownedProjectionNode = std : : move ( root ) ; <nl> ownedProjectionNode - > children . push_back ( restOfTree . release ( ) ) ; <nl> <nl> / / Attach the projection as the child of the sort stage . <nl> - ownedSortNode - > children . push_back ( ownedProjectionNode . release ( ) ) ; <nl> + if ( hasSkipBetween ) { <nl> + / / In this case ' ownedProjectionInput ' points to the structure : <nl> + / / SKIP = > SORT <nl> + / / And to attach PROJECT = > CHILD to it , we need to access children of SORT stage . <nl> + ownedProjectionInput - > children [ 0 ] - > children . push_back ( ownedProjectionNode . release ( ) ) ; <nl> + } else { <nl> + / / In this case ' ownedProjectionInput ' points to the structure : <nl> + / / SORT <nl> + / / And we can just add PROJECT = > CHILD to its children . <nl> + ownedProjectionInput - > children . push_back ( ownedProjectionNode . release ( ) ) ; <nl> + } <nl> <nl> / / Re - compute properties so that they reflect the new structure of the tree . <nl> - ownedSortNode - > computeProperties ( ) ; <nl> + ownedProjectionInput - > computeProperties ( ) ; <nl> <nl> - return ownedSortNode ; <nl> + return ownedProjectionInput ; <nl> } <nl> <nl> bool canUseSimpleSort ( const QuerySolutionNode & solnRoot , <nl> mmm a / src / mongo / db / query / query_planner_operator_test . cpp <nl> ppp b / src / mongo / db / query / query_planner_operator_test . cpp <nl> TEST_F ( QueryPlannerTest , SortSkipSoftLimit ) { <nl> " { cscan : { dir : 1 } } } } } } } } " ) ; <nl> } <nl> <nl> + / / Push project behind sort even when there is a skip between them . <nl> + TEST_F ( QueryPlannerTest , PushProjectBehindSortWithSkipBetween ) { <nl> + runQueryAsCommand ( fromjson ( R " ( { <nl> + find : ' testns ' , <nl> + filter : { } , <nl> + sort : { a : 1 } , <nl> + projection : { _id : 0 , a : 1 } , <nl> + skip : 2 <nl> + } ) " ) ) ; <nl> + assertNumSolutions ( 1U ) ; <nl> + assertSolutionExists ( <nl> + " { skip : { n : 2 , node : " <nl> + " { sort : { pattern : { a : 1 } , limit : 0 , type : ' simple ' , node : " <nl> + " { proj : { spec : { _id : 0 , a : 1 } , node : " <nl> + " { cscan : { dir : 1 } } } } } } } } } " ) ; <nl> + } <nl> + <nl> / / <nl> / / Sort elimination <nl> / / <nl> | SERVER - 40909 push down $ skip stage to query when possible | mongodb/mongo | 32cea84dcc86009cc09d8e30cd7534fac6fb2242 | 2020-10-05T15:53:51Z |
mmm a / Marlin / temperature . cpp <nl> ppp b / Marlin / temperature . cpp <nl> static volatile bool temp_meas_ready = false ; <nl> unsigned long watchmillis = 0 ; <nl> # endif / / WATCHPERIOD <nl> <nl> - / / Init min and max temp with extreme values to prevent false errors during startup <nl> - static int minttemp [ EXTRUDERS ] = { 0 } ; <nl> - static int maxttemp [ EXTRUDERS ] = { 16383 } ; / / the first value used for all <nl> - static int bed_minttemp = 0 ; <nl> - static int bed_maxttemp = 16383 ; <nl> - static void * heater_ttbl_map [ EXTRUDERS ] = { ( void * ) heater_0_temptable <nl> - # if EXTRUDERS > 1 <nl> - , ( void * ) heater_1_temptable <nl> - # endif <nl> - # if EXTRUDERS > 2 <nl> - , ( void * ) heater_2_temptable <nl> - # endif <nl> - # if EXTRUDERS > 3 <nl> - # error Unsupported number of extruders <nl> - # endif <nl> - } ; <nl> - static int heater_ttbllen_map [ EXTRUDERS ] = { heater_0_temptable_len <nl> - # if EXTRUDERS > 1 <nl> - , heater_1_temptable_len <nl> - # endif <nl> - # if EXTRUDERS > 2 <nl> - , heater_2_temptable_len <nl> - # endif <nl> # if EXTRUDERS > 3 <nl> - # error Unsupported number of extruders <nl> + # error Unsupported number of extruders <nl> + # elif EXTRUDERS > 2 <nl> + # define ARRAY_BY_EXTRUDERS ( v1 , v2 , v3 ) { v1 , v2 , v3 } <nl> + # elif EXTRUDERS > 1 <nl> + # define ARRAY_BY_EXTRUDERS ( v1 , v2 , v3 ) { v1 , v2 } <nl> + # else <nl> + # define ARRAY_BY_EXTRUDERS ( v1 , v2 , v3 ) { v1 } <nl> # endif <nl> - } ; <nl> + <nl> + / / Init min and max temp with extreme values to prevent false errors during startup <nl> + static int minttemp [ EXTRUDERS ] = ARRAY_BY_EXTRUDERS ( 0 , 0 , 0 ) ; <nl> + static int maxttemp [ EXTRUDERS ] = ARRAY_BY_EXTRUDERS ( 16383 , 16383 , 16383 ) ; / / the first value used for all <nl> + static int bed_minttemp = 0 ; <nl> + static int bed_maxttemp = 16383 ; <nl> + static void * heater_ttbl_map [ EXTRUDERS ] = ARRAY_BY_EXTRUDERS ( ( void * ) heater_0_temptable , ( void * ) heater_1_temptable , ( void * ) heater_2_temptable ) ; <nl> + static int heater_ttbllen_map [ EXTRUDERS ] = ARRAY_BY_EXTRUDERS ( heater_0_temptable_len , heater_1_temptable_len , heater_2_temptable_len ) ; <nl> + <nl> <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = functions = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> void max_temp_error ( uint8_t e ) { <nl> SERIAL_ERRORLN ( ( int ) e ) ; <nl> SERIAL_ERRORLNPGM ( " : Extruder switched off . MAXTEMP triggered ! " ) ; <nl> } <nl> + # ifndef BOGUS_TEMPERATURE_FAILSAFE_OVERRIDE <nl> + Stop ( ) ; <nl> + # endif <nl> } <nl> <nl> void min_temp_error ( uint8_t e ) { <nl> void min_temp_error ( uint8_t e ) { <nl> SERIAL_ERRORLN ( ( int ) e ) ; <nl> SERIAL_ERRORLNPGM ( " : Extruder switched off . MINTEMP triggered ! " ) ; <nl> } <nl> + # ifndef BOGUS_TEMPERATURE_FAILSAFE_OVERRIDE <nl> + Stop ( ) ; <nl> + # endif <nl> } <nl> <nl> void bed_max_temp_error ( void ) { <nl> void bed_max_temp_error ( void ) { <nl> SERIAL_ERROR_START ; <nl> SERIAL_ERRORLNPGM ( " Temperature heated bed switched off . MAXTEMP triggered ! ! " ) ; <nl> } <nl> + # ifndef BOGUS_TEMPERATURE_FAILSAFE_OVERRIDE <nl> + Stop ( ) ; <nl> + # endif <nl> } <nl> <nl> - # define HEAT_INTERVAL 250 <nl> # ifdef HEATER_0_USES_MAX6675 <nl> + # define MAX6675_HEAT_INTERVAL 250 <nl> long max6675_previous_millis = - HEAT_INTERVAL ; <nl> int max6675_temp = 2000 ; <nl> <nl> int read_max6675 ( ) <nl> { <nl> - if ( millis ( ) - max6675_previous_millis < HEAT_INTERVAL ) <nl> + if ( millis ( ) - max6675_previous_millis < MAX6675_HEAT_INTERVAL ) <nl> return max6675_temp ; <nl> <nl> max6675_previous_millis = millis ( ) ; <nl> ISR ( TIMER0_COMPB_vect ) <nl> if ( current_raw [ e ] > = maxttemp [ e ] ) { <nl> target_raw [ e ] = 0 ; <nl> max_temp_error ( e ) ; <nl> - # ifndef BOGUS_TEMPERATURE_FAILSAFE_OVERRIDE <nl> - { <nl> - Stop ( ) ; ; <nl> - } <nl> - # endif <nl> } <nl> if ( current_raw [ e ] < = minttemp [ e ] ) { <nl> target_raw [ e ] = 0 ; <nl> min_temp_error ( e ) ; <nl> - # ifndef BOGUS_TEMPERATURE_FAILSAFE_OVERRIDE <nl> - { <nl> - Stop ( ) ; <nl> - } <nl> - # endif <nl> } <nl> } <nl> <nl> ISR ( TIMER0_COMPB_vect ) <nl> if ( current_raw_bed > = bed_maxttemp ) { <nl> target_raw_bed = 0 ; <nl> bed_max_temp_error ( ) ; <nl> - Stop ( ) ; <nl> } <nl> # endif <nl> } <nl> | Minor changes in the temperature code for some cleanup . | MarlinFirmware/Marlin | 529748894caaf47982411c0f600a3896cd344b81 | 2012-12-05T18:54:01Z |
mmm a / MRuby / mr - actions . c <nl> ppp b / MRuby / mr - actions . c <nl> void TRI_InitMRActions ( MR_state_t * mrs ) { <nl> / / ArangoResponse <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> <nl> - rcl = mrs - > _arangoResponse = mrb_define_class ( & mrs - > _mrb , " ArangoResponse " , mrs - > _mrb . hash_class ) ; <nl> + rcl = mrs - > _arangoResponse = mrb_define_class ( & mrs - > _mrb , " ArangoResponse " , mrs - > _mrb . object_class ) ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / Makefile . files <nl> ppp b / Makefile . files <nl> BUILT_SOURCES + = $ ( JAVASCRIPT_BROWSER ) <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> MRUBY_HEADER = \ <nl> - mr / common / bootstrap / mr - error . h <nl> + mr / common / bootstrap / mr - error . h \ <nl> + mr / server / mr - server . h <nl> <nl> BUILT_SOURCES + = $ ( MRUBY_HEADER ) <nl> <nl> mmm a / Makefile . in <nl> ppp b / Makefile . in <nl> JAVASCRIPT_BROWSER = \ <nl> # # # @ brief MRuby source code as header <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> MRUBY_HEADER = \ <nl> - mr / common / bootstrap / mr - error . h <nl> + mr / common / bootstrap / mr - error . h \ <nl> + mr / server / mr - server . h <nl> <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> js / server / js - % . h : @ srcdir @ / js / server / % . js . setup - js - directories <nl> mr / common / bootstrap / mr - % . h : @ srcdir @ / mr / common / bootstrap / % . rb . setup - mr - directories <nl> @ top_srcdir @ / config / mr2c . sh $ < > $ @ <nl> <nl> + mr / server / mr - % . h : @ srcdir @ / mr / server / % . rb . setup - mr - directories <nl> + @ top_srcdir @ / config / mr2c . sh $ < > $ @ <nl> + <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> mmm a / Makefile . mruby <nl> ppp b / Makefile . mruby <nl> BUILT_SOURCES + = . setup - mr - directories <nl> mr / common / bootstrap / mr - % . h : @ srcdir @ / mr / common / bootstrap / % . rb . setup - mr - directories <nl> @ top_srcdir @ / config / mr2c . sh $ < > $ @ <nl> <nl> + mr / server / mr - % . h : @ srcdir @ / mr / server / % . rb . setup - mr - directories <nl> + @ top_srcdir @ / config / mr2c . sh $ < > $ @ <nl> + <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # # # @ brief cleanup <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> mmm a / RestServer / ArangoServer . cpp <nl> ppp b / RestServer / ArangoServer . cpp <nl> <nl> <nl> # ifdef TRI_ENABLE_MRUBY <nl> # include " MRuby / MRLineEditor . h " <nl> + # include " MRuby / MRLoader . h " <nl> # include " MRuby / mr - actions . h " <nl> # endif <nl> <nl> using namespace triagens : : arango ; <nl> # include " mruby / data . h " <nl> # include " mruby / proc . h " <nl> # include " mruby / variable . h " <nl> + <nl> + # include " mr / common / bootstrap / mr - error . h " <nl> + # include " mr / server / mr - server . h " <nl> # endif <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> static uint64_t GcIntervalJS ; <nl> <nl> static string StartupModulesJS ; <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief V8 startup loader <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + static JSLoader StartupLoaderJS ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief V8 action loader <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + static JSLoader ActionLoaderJS ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief allowed client actions <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> static set < string > AllowedAdminActions ; <nl> / / / @ brief startup loader <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - static JSLoader StartupLoaderJS ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief action loader <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - static JSLoader ActionLoaderJS ; <nl> + static MRLoader StartupLoaderMR ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ } <nl> static void DefineAdminHandlers ( HttpHandlerFactory * factory , <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief UnviversalVoc constructor <nl> + / / / @ brief constructor <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> ArangoServer : : ArangoServer ( int argc , char * * argv ) <nl> ArangoServer : : ArangoServer ( int argc , char * * argv ) <nl> _actionPathJS ( ) , <nl> _actionThreadsJS ( 8 ) , <nl> _gcIntervalJS ( 1000 ) , <nl> + _startupPathMR ( ) , <nl> _databasePath ( " / var / lib / arango " ) , <nl> _removeOnDrop ( true ) , <nl> _removeOnCompacted ( true ) , <nl> void ArangoServer : : buildApplicationServer ( ) { <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> <nl> additional [ " JAVASCRIPT Options : help - admin " ] <nl> - ( " startup . directory " , & _startupPathJS , " path to the directory containing alternate startup scripts " ) <nl> - ( " startup . modules - path " , & _startupModulesJS , " one or more directories separated by cola " ) <nl> + ( " action . system - directory " , & _actionPathJS , " path to the system action directory " ) <nl> + ( " action . threads " , & _actionThreadsJS , " threads for actions " ) <nl> ( " gc . interval " , & _gcIntervalJS , " garbage collection interval ( each x requests ) " ) <nl> + ( " startup . directory " , & _startupPathJS , " path to the directory containing alternate JavaScript startup scripts " ) <nl> + ( " startup . modules - path " , & _startupModulesJS , " one or more directories separated by cola " ) <nl> ; <nl> <nl> - additional [ " JAVASCRIPT Options : help - admin " ] <nl> - ( " action . system - directory " , & _actionPathJS , " path to the system action directory " ) <nl> - ( " action . threads " , & _actionThreadsJS , " threads for actions " ) <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / JavaScript options <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> + additional [ " MRUBY Options : help - admin " ] <nl> + ( " startup . ruby - directory " , & _startupPathMR , " path to the directory containing alternate MRuby startup scripts " ) <nl> ; <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> void ArangoServer : : buildApplicationServer ( ) { <nl> LOGGER_INFO < < " actions are disabled , empty system action path " ; <nl> } <nl> <nl> + # ifdef TRI_ENABLE_MRUBY <nl> + <nl> + if ( _startupPathMR . empty ( ) ) { <nl> + LOGGER_INFO < < " using built - in MRuby startup files " ; <nl> + StartupLoaderMR . defineScript ( " common / bootstrap / error . rb " , MR_common_bootstrap_error ) ; <nl> + StartupLoaderMR . defineScript ( " server / server . rb " , MR_server_server ) ; <nl> + } <nl> + else { <nl> + LOGGER_INFO < < " using MRuby startup files at ' " < < _startupPathMR < < " ' " ; <nl> + StartupLoaderMR . setDirectory ( _startupPathMR ) ; <nl> + } <nl> + <nl> + # endif <nl> + <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / in shell mode ignore the rest <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> int ArangoServer : : executeShell ( bool tests ) { <nl> ok = StartupLoaderJS . loadScript ( context , files [ i ] ) ; <nl> <nl> if ( ok ) { <nl> - LOGGER_TRACE < < " loaded json file ' " < < files [ i ] < < " ' " ; <nl> + LOGGER_TRACE < < " loaded JavaScript file ' " < < files [ i ] < < " ' " ; <nl> } <nl> else { <nl> - LOGGER_FATAL < < " cannot load json file ' " < < files [ i ] < < " ' " ; <nl> + LOGGER_FATAL < < " cannot load JavaScript file ' " < < files [ i ] < < " ' " ; <nl> TRI_FlushLogging ( ) ; <nl> return EXIT_FAILURE ; <nl> } <nl> } <nl> <nl> / / run the shell <nl> - printf ( " ArangoDB shell [ V8 version % s , DB version % s ] \ n " , v8 : : V8 : : GetVersion ( ) , TRIAGENS_VERSION ) ; <nl> + printf ( " ArangoDB JavaScript shell [ V8 version % s , DB version % s ] \ n " , v8 : : V8 : : GetVersion ( ) , TRIAGENS_VERSION ) ; <nl> <nl> v8 : : Local < v8 : : String > name ( v8 : : String : : New ( " ( arango ) " ) ) ; <nl> v8 : : Context : : Scope contextScope ( context ) ; <nl> <nl> ok = true ; <nl> <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / run all unit tests <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> if ( tests ) { <nl> v8 : : HandleScope scope ; <nl> v8 : : TryCatch tryCatch ; <nl> int ArangoServer : : executeShell ( bool tests ) { <nl> } <nl> } <nl> <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / run a shell <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> else { <nl> V8LineEditor * console = new V8LineEditor ( context , " . arango " ) ; <nl> <nl> mrb_value MR_ArangoDatabase_Collection ( mrb_state * mrb , mrb_value self ) { <nl> return mrb_obj_value ( Data_Wrap_Struct ( mrb , ArangoCollectionClass , & MR_ArangoCollection_Type , ( void * ) collection ) ) ; <nl> } <nl> <nl> - <nl> - int ArangoServer : : executeRubyShell ( ) { <nl> - struct mrb_parser_state * p ; <nl> - <nl> - / / only simple logging <nl> - TRI_ShutdownLogging ( ) ; <nl> - TRI_InitialiseLogging ( false ) ; <nl> - TRI_CreateLogAppenderFile ( " + " ) ; <nl> - <nl> - / / open the database <nl> - openDatabase ( ) ; <nl> - <nl> - / / create a new ruby shell <nl> - MR_state_t * mrs = MR_OpenShell ( ) ; <nl> - <nl> - TRI_InitMRUtils ( mrs ) ; <nl> - TRI_InitMRActions ( mrs ) ; <nl> - <nl> - / / create a line editor <nl> - MRLineEditor * console = new MRLineEditor ( mrs , " . arango - mrb " ) ; <nl> - <nl> / / setup the classes <nl> # if 0 <nl> struct RClass * ArangoDatabaseClass = mrb_define_class ( mrb , " ArangoDatabase " , mrb - > object_class ) ; <nl> int ArangoServer : : executeRubyShell ( ) { <nl> mrb_define_const ( mrb , " $ db " , db ) ; <nl> # endif <nl> <nl> + <nl> + int ArangoServer : : executeRubyShell ( ) { <nl> + struct mrb_parser_state * p ; <nl> + size_t i ; <nl> + char const * files [ ] = { " common / bootstrap / error . rb " , <nl> + " server / server . rb " <nl> + } ; <nl> + <nl> + / / only simple logging <nl> + TRI_ShutdownLogging ( ) ; <nl> + TRI_InitialiseLogging ( false ) ; <nl> + TRI_CreateLogAppenderFile ( " + " ) ; <nl> + <nl> + / / open the database <nl> + openDatabase ( ) ; <nl> + <nl> + / / create a new ruby shell <nl> + MR_state_t * mrs = MR_OpenShell ( ) ; <nl> + <nl> + TRI_InitMRUtils ( mrs ) ; <nl> + TRI_InitMRActions ( mrs ) ; <nl> + <nl> + / / load all init files <nl> + for ( i = 0 ; i < sizeof ( files ) / sizeof ( files [ 0 ] ) ; + + i ) { <nl> + bool ok = StartupLoaderMR . loadScript ( & mrs - > _mrb , files [ i ] ) ; <nl> + <nl> + if ( ok ) { <nl> + LOGGER_TRACE < < " loaded ruby file ' " < < files [ i ] < < " ' " ; <nl> + } <nl> + else { <nl> + LOGGER_FATAL < < " cannot load ruby file ' " < < files [ i ] < < " ' " ; <nl> + TRI_FlushLogging ( ) ; <nl> + return EXIT_FAILURE ; <nl> + } <nl> + } <nl> + <nl> + / / create a line editor <nl> + printf ( " ArangoDB MRuby shell [ DB version % s ] \ n " , TRIAGENS_VERSION ) ; <nl> + <nl> + MRLineEditor * console = new MRLineEditor ( mrs , " . arango - mrb " ) ; <nl> + <nl> console - > open ( false ) ; <nl> <nl> while ( true ) { <nl> char * input = console - > prompt ( " arangod > " ) ; <nl> <nl> if ( input = = 0 ) { <nl> - printf ( " \ nBye Bye ! Auf Wiedersehen ! さようなら \ n " ) ; <nl> + printf ( " < ctrl - D > \ nBye Bye ! Auf Wiedersehen ! До свидания ! さようなら \ n " ) ; <nl> break ; <nl> } <nl> <nl> int ArangoServer : : executeRubyShell ( ) { <nl> TRI_FreeString ( TRI_UNKNOWN_MEM_ZONE , input ) ; <nl> <nl> if ( p = = 0 | | p - > tree = = 0 | | 0 < p - > nerr ) { <nl> - cout < < " UPPS ! \ n " ; <nl> + LOGGER_ERROR < < " failed to compile input " ; <nl> continue ; <nl> } <nl> <nl> int n = mrb_generate_code ( & mrs - > _mrb , p - > tree ) ; <nl> <nl> if ( n < 0 ) { <nl> - cout < < " UPPS 2 : " < < n < < " \ n " ; <nl> + LOGGER_ERROR < < " failed to execute Ruby bytecode " ; <nl> continue ; <nl> } <nl> <nl> int ArangoServer : : executeRubyShell ( ) { <nl> mrb_top_self ( & mrs - > _mrb ) ) ; <nl> <nl> if ( mrs - > _mrb . exc ) { <nl> - cout < < " Caught exception : \ n " ; <nl> + LOGGER_ERROR < < " caught Ruby exception " ; <nl> mrb_p ( & mrs - > _mrb , mrb_obj_value ( mrs - > _mrb . exc ) ) ; <nl> mrs - > _mrb . exc = 0 ; <nl> } <nl> mmm a / RestServer / ArangoServer . h <nl> ppp b / RestServer / ArangoServer . h <nl> namespace triagens { <nl> <nl> uint64_t _gcIntervalJS ; <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief path to the directory containing alternate startup scripts <nl> + / / / <nl> + / / / @ CMDOPT { - - startup . ruby - directory @ CA { directory } } <nl> + / / / <nl> + / / / Specifies the @ CA { directory } path to alternate startup MRuby files . <nl> + / / / Normally , the server will start using built - in MRuby core functionality . To <nl> + / / / override the core functionality with a different implementation , this option <nl> + / / / can be used . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + string _startupPathMR ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief path to the database <nl> / / / <nl> new file mode 100644 <nl> index 00000000000 . . 927cf1da7d1 <nl> mmm / dev / null <nl> ppp b / mr / server / mr - server . h <nl> <nl> + static string MR_server_server = <nl> + " # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \ n " <nl> + " # # # @ brief error handling \ n " <nl> + " # # # \ n " <nl> + " # # # @ file \ n " <nl> + " # # # \ n " <nl> + " # # # DISCLAIMER \ n " <nl> + " # # # \ n " <nl> + " # # # Copyright 2012 triagens GmbH , Cologne , Germany \ n " <nl> + " # # # \ n " <nl> + " # # # Licensed under the Apache License , Version 2 . 0 ( the \ " License \ " ) ; \ n " <nl> + " # # # you may not use this file except in compliance with the License . \ n " <nl> + " # # # You may obtain a copy of the License at \ n " <nl> + " # # # \ n " <nl> + " # # # http : / / www . apache . org / licenses / LICENSE - 2 . 0 \ n " <nl> + " # # # \ n " <nl> + " # # # Unless required by applicable law or agreed to in writing , software \ n " <nl> + " # # # distributed under the License is distributed on an \ " AS IS \ " BASIS , \ n " <nl> + " # # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . \ n " <nl> + " # # # See the License for the specific language governing permissions and \ n " <nl> + " # # # limitations under the License . \ n " <nl> + " # # # \ n " <nl> + " # # # Copyright holder is triAGENS GmbH , Cologne , Germany \ n " <nl> + " # # # \ n " <nl> + " # # # @ author Dr . Frank Celler \ n " <nl> + " # # # @ author Copyright 2012 , triAGENS GmbH , Cologne , Germany \ n " <nl> + " # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \ n " <nl> + " \ n " <nl> + " # # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - \ n " <nl> + " # # - - SECTION - - ArangoResponse \ n " <nl> + " # # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - \ n " <nl> + " \ n " <nl> + " # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \ n " <nl> + " # # - - SECTION - - END - OF - FILE \ n " <nl> + " # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \ n " <nl> + " \ n " <nl> + " # # Local Variables : \ n " <nl> + " # # mode : outline - minor \ n " <nl> + " # # outline - regexp : \ " ^ \ \ \ \ ( # # # @ brief \ \ \ \ | # # - - SECTION - - \ \ \ \ | # - \ \ \ \ * - \ \ \ \ ) \ " \ n " <nl> + " # # End : \ n " <nl> + ; <nl> new file mode 100644 <nl> index 00000000000 . . a11f216b9f9 <nl> mmm / dev / null <nl> ppp b / mr / server / server . rb <nl> <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # # # @ brief error handling <nl> + # # # <nl> + # # # @ file <nl> + # # # <nl> + # # # DISCLAIMER <nl> + # # # <nl> + # # # Copyright 2012 triagens GmbH , Cologne , Germany <nl> + # # # <nl> + # # # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # # # you may not use this file except in compliance with the License . <nl> + # # # You may obtain a copy of the License at <nl> + # # # <nl> + # # # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # # # <nl> + # # # Unless required by applicable law or agreed to in writing , software <nl> + # # # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # # # See the License for the specific language governing permissions and <nl> + # # # limitations under the License . <nl> + # # # <nl> + # # # Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + # # # <nl> + # # # @ author Dr . Frank Celler <nl> + # # # @ author Copyright 2012 , triAGENS GmbH , Cologne , Germany <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + # # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + # # - - SECTION - - ArangoResponse <nl> + # # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # # - - SECTION - - END - OF - FILE <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + # # Local Variables : <nl> + # # mode : outline - minor <nl> + # # outline - regexp : " ^ \ \ ( # # # @ brief \ \ | # # - - SECTION - - \ \ | # - \ \ * - \ \ ) " <nl> + # # End : <nl> | towards mruby console | arangodb/arangodb | 61aac47e35b048a50b8de85777f7ef6fb1bb9e67 | 2012-06-02T12:29:57Z |
mmm a / addons / skin . pm3 - hd / 720p / custom_SkinSetting_1111 . xml <nl> ppp b / addons / skin . pm3 - hd / 720p / custom_SkinSetting_1111 . xml <nl> <nl> < texturefocus border = " 7 " > list - focus . png < / texturefocus > <nl> < texturenofocus > - < / texturenofocus > <nl> < textureradioon > radiobutton - focus - 2 . png < / textureradioon > <nl> - < onclick > Skin . SetFile ( LyricScript_Path , . py ) < / onclick > <nl> + < onclick > Skin . SetFile ( LyricScript_Path , Script ) < / onclick > <nl> < enable > Skin . HasSetting ( LyricScript_Enable ) < / enable > <nl> < / control > <nl> < control type = " image " id = " 403 " > <nl> <nl> < texturefocus border = " 7 " > list - focus . png < / texturefocus > <nl> < texturenofocus > - < / texturenofocus > <nl> < textureradioon > radiobutton - focus - 2 . png < / textureradioon > <nl> - < onclick > Skin . SetFile ( WeatherScript_Path , . py ) < / onclick > <nl> + < onclick > Skin . SetFile ( WeatherScript_Path , Script ) < / onclick > <nl> < enable > Skin . HasSetting ( WeatherScript_Enable ) < / enable > <nl> < / control > <nl> < control type = " image " id = " 407 " > <nl> <nl> < texturefocus border = " 7 " > list - focus . png < / texturefocus > <nl> < texturenofocus > - < / texturenofocus > <nl> < textureradioon > radiobutton - focus - 2 . png < / textureradioon > <nl> - < onclick > Skin . SetFile ( SubtitleScript_Path , . py ) < / onclick > <nl> + < onclick > Skin . SetFile ( SubtitleScript_Path , Script ) < / onclick > <nl> < enable > Skin . HasSetting ( SubtitleScript_Enable ) < / enable > <nl> < / control > <nl> < control type = " image " id = " 410 " > <nl> | Fixed : Use the new Addon Browser way to find scripts in skin settings | xbmc/xbmc | 6b9aaa228493c3b468a758b26f873237b4153eb6 | 2010-05-04T12:44:34Z |
mmm a / LICENSES - OTHER - COMPONENTS . md <nl> ppp b / LICENSES - OTHER - COMPONENTS . md <nl> <nl> * Project Home : https : / / code . google . com / p / v8 / <nl> * GITHUB : https : / / github . com / v8 / v8 <nl> * License Overview : [ https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / LICENSE ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / LICENSE ) <nl> - * License : V8 [ free as - is license ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / LICENSE . v8 ) <nl> - * License : strongtalk [ free as - is license ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / LICENSE . strongtalk ) <nl> - * License : valgrind [ BSD - style license ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / LICENSE . valgrind ) <nl> - * License : vtune , Dual - License [ BSD license ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / src / third_party / vtune / v8 - vtune . h ) <nl> - * License : gmock [ free as - is license ] ( https : / / github . com / arangodb / arangodb / blob / devel / 3rdParty / V8 - 4 . 3 . 61 / testing / gmock / LICENSE ) <nl> - * License : gtest [ free as - is license ] ( https : / / github . com / arangodb / arangodb / blob / devel / 3rdParty / V8 - 4 . 3 . 61 / testing / gtest / LICENSE ) <nl> + * License : V8 [ BSD 3 - Clause License ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / LICENSE . v8 ) <nl> + * License : strongtalk [ BSD 3 - Clause License ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / LICENSE . strongtalk ) <nl> + * License : valgrind [ BSD 4 - Clause license ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / LICENSE . valgrind ) <nl> + * License : vtune , Dual - License [ BSD 3 - Clause License ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / src / third_party / vtune / v8 - vtune . h ) <nl> + * License : gmock [ BSD 3 - Clause License ] ( https : / / github . com / arangodb / arangodb / blob / devel / 3rdParty / V8 - 4 . 3 . 61 / testing / gmock / LICENSE ) <nl> + * License : gtest [ BSD 3 - Clause License ] ( https : / / github . com / arangodb / arangodb / blob / devel / 3rdParty / V8 - 4 . 3 . 61 / testing / gtest / LICENSE ) <nl> * License : fdlibm [ free as - is license ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / src / third_party / fdlibm / LICENSE ) <nl> - * License : PCRE [ BSD license ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / test / mjsunit / third_party / regexp - pcre . js ) <nl> - * License : object - keys [ free as - is license ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / test / mjsunit / third_party / object - keys . js ) <nl> + * License : PCRE [ BSD 3 - Clause license ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / test / mjsunit / third_party / regexp - pcre . js ) <nl> + * License : object - keys [ BSD 3 - Clause license ] ( https : / / github . com / v8 / v8 / blob / 4 . 3 . 61 / test / mjsunit / third_party / object - keys . js ) <nl> <nl> # # # ICU 54 . 1 <nl> <nl> <nl> # # # libev 4 . 11 <nl> <nl> * Project Home : http : / / software . schmorp . de / pkg / libev . html <nl> - * License : Dual - License [ free as - is license ] ( http : / / cvs . schmorp . de / libev / LICENSE ? revision = 1 . 11 & view = markup ) <nl> + * License : Dual - License [ BSD 2 - Clause License ] ( http : / / cvs . schmorp . de / libev / LICENSE ? revision = 1 . 11 & view = markup ) <nl> <nl> # # # linenoise <nl> <nl> * GITHUB : https : / / github . com / antirez / linenoise <nl> - * License : [ free as - is license ] ( https : / / github . com / antirez / linenoise / blob / master / LICENSE ) <nl> + * License : [ BSD 2 - Clause License ] ( https : / / github . com / antirez / linenoise / blob / master / LICENSE ) <nl> <nl> # # # Valgrind <nl> <nl> * Project Home : http : / / valgrind . org / <nl> * uses valgrind . h header file only , none of the other files <nl> - * License : [ BSD - style license ] ( https : / / raw . githubusercontent . com / arangodb / arangodb / devel / 3rdParty / valgrind / valgrind . h ) <nl> + * License : [ BSD 4 - Clause license ] ( https : / / raw . githubusercontent . com / arangodb / arangodb / devel / 3rdParty / valgrind / valgrind . h ) <nl> <nl> # # # zlib 1 . 2 . 7 <nl> <nl> <nl> <nl> * Project Home : http : / / coffeescript . org <nl> * GIUTHUB : https : / / github . com / jashkenas / coffeescript <nl> - * License : [ free as - is license ] ( https : / / github . com / jashkenas / coffeescript / blob / master / LICENSE ) <nl> + * License : [ MT - style License ] ( https : / / github . com / jashkenas / coffeescript / blob / master / LICENSE ) <nl> <nl> # # # # expect . js <nl> <nl> <nl> <nl> * Project Home : https : / / highlightjs . org <nl> * GITHUB : https : / / github . com / isagalaev / highlight . js <nl> - * License : [ free as - is license ] ( https : / / github . com / isagalaev / highlight . js / blob / master / LICENSE ) <nl> + * License : [ BSD 3 - Clause License ] ( https : / / github . com / isagalaev / highlight . js / blob / master / LICENSE ) <nl> <nl> # # # # http - errors <nl> <nl> <nl> # # # # inflect <nl> <nl> * GITHUB : https : / / github . com / pksunkara / inflect <nl> - * License : [ free as - is license ] ( https : / / github . com / pksunkara / inflect / blob / master / LICENSE ) <nl> + * License : [ MIT - style License ] ( https : / / github . com / pksunkara / inflect / blob / master / LICENSE ) <nl> <nl> # # # # Jasmine <nl> <nl> <nl> # # # # Joi <nl> <nl> * GITHUB : https : / / github . com / hapijs / joi <nl> - * License : [ free as - is license ] ( https : / / github . com / hapijs / joi / blob / master / LICENSE ) <nl> + * License : [ BSD - style 3 - Clause License ] ( https : / / github . com / hapijs / joi / blob / master / LICENSE ) <nl> <nl> # # # # JSHint <nl> <nl> * Project Home : http : / / jshint . com <nl> * GITHUB : https : / / github . com / jshint / jshint <nl> - * License : [ free as - is license ] ( https : / / github . com / jshint / jshint / blob / master / LICENSE ) <nl> + * License : [ MIT - style License ] ( https : / / github . com / jshint / jshint / blob / master / LICENSE ) <nl> <nl> # # # # JSUnity <nl> <nl> <nl> # # # # qs <nl> <nl> * GITHUB : https : / / github . com / hapijs / qs <nl> - * License : [ free as - is license ] ( https : / / github . com / hapijs / qs / blob / master / LICENSE ) <nl> + * License : [ BSD - style 3 - Clause License ] ( https : / / github . com / hapijs / qs / blob / master / LICENSE ) <nl> <nl> # # # # Ramda <nl> <nl> <nl> <nl> * Project Home : http : / / underscorejs . org <nl> * GITHUB : https : / / github . com / jashkenas / underscore <nl> - * License : [ free as - is license ] ( https : / / github . com / jashkenas / underscore / blob / master / LICENSE ) <nl> + * License : [ MIT - style License ] ( https : / / github . com / jashkenas / underscore / blob / master / LICENSE ) <nl> <nl> # # # # YAML <nl> <nl> <nl> <nl> * Project Home : http : / / backbonejs . org <nl> * GITHUB : https : / / github . com / jashkenas / backbone <nl> - * License : [ free as - is license ] ( https : / / github . com / jashkenas / backbone / blob / master / LICENSE ) <nl> + * License : [ MIT - style License ] ( https : / / github . com / jashkenas / backbone / blob / master / LICENSE ) <nl> <nl> # # # # Bootstrap <nl> <nl> <nl> <nl> * Project Home : http : / / d3js . org <nl> * GITHUB : https : / / github . com / mbostock / d3 <nl> - * License : [ free as - is license ] ( https : / / github . com / mbostock / d3 / blob / master / LICENSE ) <nl> + * License : [ BSD - style 3 - Clause License ] ( https : / / github . com / mbostock / d3 / blob / master / LICENSE ) <nl> <nl> # # # # dygraph <nl> <nl> * Project Home : http : / / dygraphs . com <nl> * GITHUB : https : / / github . com / danvk / dygraphs <nl> - * License : [ free as - is license ] ( https : / / github . com / danvk / dygraphs / blob / master / LICENSE . txt ) <nl> + * License : [ MIT - style License ] ( https : / / github . com / danvk / dygraphs / blob / master / LICENSE . txt ) <nl> <nl> # # # # Embedded JS <nl> <nl> <nl> <nl> * Project Home : http : / / jquery . com <nl> * GITHUB : https : / / github . com / jquery / jquery <nl> - * License : [ free as - is license ] ( https : / / github . com / jquery / jquery / blob / master / LICENSE . txt ) <nl> + * License : [ MIT - style License ] ( https : / / github . com / jquery / jquery / blob / master / LICENSE . txt ) <nl> <nl> # # # # jQuery Contextmenu <nl> <nl> <nl> <nl> * Project Home : http : / / jqueryui . com <nl> * GITHUB : https : / / github . com / jquery / jquery - ui <nl> - * License : [ free as - is license ] ( https : / / github . com / jquery / jquery - ui / blob / master / LICENSE . txt ) <nl> + * License : [ MIT - style License ] ( https : / / github . com / jquery / jquery - ui / blob / master / LICENSE . txt ) <nl> <nl> # # # # jQuery UploadFile <nl> <nl> | Update LICENSES - OTHER - COMPONENTS . md | arangodb/arangodb | 3d8205859afaa83e1631b6e033cc7c736ed86094 | 2015-10-29T17:14:52Z |
mmm a / db / compaction . cc <nl> ppp b / db / compaction . cc <nl> void Compaction : : Summary ( char * output , int len ) { <nl> uint64_t Compaction : : OutputFilePreallocationSize ( ) const { <nl> uint64_t preallocation_size = 0 ; <nl> <nl> + for ( const auto & level_files : inputs_ ) { <nl> + for ( const auto & file : level_files . files ) { <nl> + preallocation_size + = file - > fd . GetFileSize ( ) ; <nl> + } <nl> + } <nl> + <nl> if ( max_output_file_size_ ! = port : : kMaxUint64 & & <nl> - ( cfd_ - > ioptions ( ) - > compaction_style = = kCompactionStyleLevel | | <nl> + ( immutable_cf_options_ . compaction_style = = kCompactionStyleLevel | | <nl> output_level ( ) > 0 ) ) { <nl> - preallocation_size = max_output_file_size_ ; <nl> - } else { <nl> - for ( const auto & level_files : inputs_ ) { <nl> - for ( const auto & file : level_files . files ) { <nl> - preallocation_size + = file - > fd . GetFileSize ( ) ; <nl> - } <nl> - } <nl> + preallocation_size = std : : min ( max_output_file_size_ , preallocation_size ) ; <nl> } <nl> + <nl> / / Over - estimate slightly so we don ' t end up just barely crossing <nl> / / the threshold <nl> - return preallocation_size + ( preallocation_size / 10 ) ; <nl> + / / No point to prellocate more than 1GB . <nl> + return std : : min ( uint64_t { 1073741824 } , <nl> + preallocation_size + ( preallocation_size / 10 ) ) ; <nl> } <nl> <nl> std : : unique_ptr < CompactionFilter > Compaction : : CreateCompactionFilter ( ) const { <nl> mmm a / db / compaction_picker_test . cc <nl> ppp b / db / compaction_picker_test . cc <nl> TEST_F ( CompactionPickerTest , Level1Trigger ) { <nl> } <nl> <nl> TEST_F ( CompactionPickerTest , Level1Trigger2 ) { <nl> + mutable_cf_options_ . target_file_size_base = 10000000000 ; <nl> + mutable_cf_options_ . RefreshDerivedOptions ( ioptions_ ) ; <nl> NewVersionStorage ( 6 , kCompactionStyleLevel ) ; <nl> Add ( 1 , 66U , " 150 " , " 200 " , 1000000001U ) ; <nl> Add ( 1 , 88U , " 201 " , " 300 " , 1000000000U ) ; <nl> TEST_F ( CompactionPickerTest , Level1Trigger2 ) { <nl> ASSERT_EQ ( 66U , compaction - > input ( 0 , 0 ) - > fd . GetNumber ( ) ) ; <nl> ASSERT_EQ ( 6U , compaction - > input ( 1 , 0 ) - > fd . GetNumber ( ) ) ; <nl> ASSERT_EQ ( 7U , compaction - > input ( 1 , 1 ) - > fd . GetNumber ( ) ) ; <nl> + ASSERT_EQ ( uint64_t { 1073741824 } , compaction - > OutputFilePreallocationSize ( ) ) ; <nl> } <nl> <nl> TEST_F ( CompactionPickerTest , LevelMaxScore ) { <nl> NewVersionStorage ( 6 , kCompactionStyleLevel ) ; <nl> mutable_cf_options_ . target_file_size_base = 10000000 ; <nl> - mutable_cf_options_ . target_file_size_multiplier = 10 ; <nl> mutable_cf_options_ . max_bytes_for_level_base = 10 * 1024 * 1024 ; <nl> + mutable_cf_options_ . RefreshDerivedOptions ( ioptions_ ) ; <nl> Add ( 0 , 1U , " 150 " , " 200 " , 1000000U ) ; <nl> / / Level 1 score 1 . 2 <nl> Add ( 1 , 66U , " 150 " , " 200 " , 6000000U ) ; <nl> TEST_F ( CompactionPickerTest , LevelMaxScore ) { <nl> ASSERT_TRUE ( compaction . get ( ) ! = nullptr ) ; <nl> ASSERT_EQ ( 1U , compaction - > num_input_files ( 0 ) ) ; <nl> ASSERT_EQ ( 7U , compaction - > input ( 0 , 0 ) - > fd . GetNumber ( ) ) ; <nl> + ASSERT_EQ ( mutable_cf_options_ . target_file_size_base + <nl> + mutable_cf_options_ . target_file_size_base / 10 , <nl> + compaction - > OutputFilePreallocationSize ( ) ) ; <nl> } <nl> <nl> TEST_F ( CompactionPickerTest , NeedsCompactionLevel ) { <nl> TEST_F ( CompactionPickerTest , NeedsCompactionFIFO ) { <nl> TEST_F ( CompactionPickerTest , CompactionPriMinOverlapping1 ) { <nl> NewVersionStorage ( 6 , kCompactionStyleLevel ) ; <nl> ioptions_ . compaction_pri = kMinOverlappingRatio ; <nl> - mutable_cf_options_ . target_file_size_base = 10000000 ; <nl> + mutable_cf_options_ . target_file_size_base = 100000000000 ; <nl> mutable_cf_options_ . target_file_size_multiplier = 10 ; <nl> mutable_cf_options_ . max_bytes_for_level_base = 10 * 1024 * 1024 ; <nl> + mutable_cf_options_ . RefreshDerivedOptions ( ioptions_ ) ; <nl> <nl> Add ( 2 , 6U , " 150 " , " 179 " , 50000000U ) ; <nl> Add ( 2 , 7U , " 180 " , " 220 " , 50000000U ) ; <nl> TEST_F ( CompactionPickerTest , CompactionPriMinOverlapping1 ) { <nl> ASSERT_EQ ( 1U , compaction - > num_input_files ( 0 ) ) ; <nl> / / Pick file 8 because it overlaps with 0 files on level 3 . <nl> ASSERT_EQ ( 8U , compaction - > input ( 0 , 0 ) - > fd . GetNumber ( ) ) ; <nl> + / / Compaction input size * 1 . 1 <nl> + ASSERT_GE ( uint64_t { 55000000 } , compaction - > OutputFilePreallocationSize ( ) ) ; <nl> } <nl> <nl> TEST_F ( CompactionPickerTest , CompactionPriMinOverlapping2 ) { <nl> | Improve fallocate size in compaction output | facebook/rocksdb | 7291a3f813e563efbd6870465b1063a115480373 | 2018-01-23T00:43:46Z |
mmm a / tensorflow / python / training / moving_averages . py <nl> ppp b / tensorflow / python / training / moving_averages . py <nl> class ExponentialMovingAverage ( object ) : <nl> @ @ variables_to_restore <nl> " " " <nl> <nl> - def __init__ ( self , decay , num_updates = None , name = " ExponentialMovingAverage " ) : <nl> + def __init__ ( self , decay , num_updates = None , zero_debias = False , <nl> + name = " ExponentialMovingAverage " ) : <nl> " " " Creates a new ExponentialMovingAverage object . <nl> <nl> The ` apply ( ) ` method has to be called to create shadow variables and add <nl> def __init__ ( self , decay , num_updates = None , name = " ExponentialMovingAverage " ) : <nl> Args : <nl> decay : Float . The decay to use . <nl> num_updates : Optional count of number of updates applied to variables . <nl> + zero_debias : If ` True ` , zero debias moving - averages that are initialized <nl> + with tensors . <nl> name : String . Optional prefix name to use for the name of ops added in <nl> ` apply ( ) ` . <nl> " " " <nl> self . _decay = decay <nl> self . _num_updates = num_updates <nl> + self . _zero_debias = zero_debias <nl> self . _name = name <nl> self . _averages = { } <nl> <nl> def apply ( self , var_list = None ) : <nl> var , <nl> self . _name , <nl> colocate_with_primary = ( var . op . type = = " Variable " ) ) <nl> - zero_debias_true . add ( avg ) <nl> + if self . _zero_debias : <nl> + zero_debias_true . add ( avg ) <nl> self . _averages [ var ] = avg <nl> <nl> with ops . name_scope ( self . _name ) as scope : <nl> mmm a / tensorflow / python / training / moving_averages_test . py <nl> ppp b / tensorflow / python / training / moving_averages_test . py <nl> def _Repeat ( value , dim ) : <nl> class ExponentialMovingAverageTest ( tf . test . TestCase ) : <nl> <nl> def _CheckDecay ( self , ema , actual_decay , dim ) : <nl> + def _Scale ( dk , steps ) : <nl> + if ema . _zero_debias : <nl> + return 1 - dk * * ( steps + 1 ) <nl> + else : <nl> + return 1 <nl> tens = _Repeat ( 10 . 0 , dim ) <nl> thirties = _Repeat ( 30 . 0 , dim ) <nl> var0 = tf . Variable ( tens , name = " v0 " ) <nl> def _CheckDecay ( self , ema , actual_decay , dim ) : <nl> self . assertAllClose ( expected , avg0 . eval ( ) ) <nl> expected = _Repeat ( 30 . 0 * dk + 30 . 0 * ( 1 - dk ) , dim ) <nl> self . assertAllClose ( expected , avg1 . eval ( ) ) <nl> - expected = _Repeat ( 0 . 0 * dk + ( 10 . 0 + 30 . 0 ) * ( 1 - dk ) / ( 1 - dk * * 2 ) , dim ) <nl> + expected = _Repeat ( 0 . 0 * dk + ( 10 . 0 + 30 . 0 ) * ( 1 - dk ) / _Scale ( dk , 1 ) , dim ) <nl> self . assertAllClose ( expected , avg2 . eval ( ) ) <nl> <nl> # Again , update the averages and check . <nl> def _CheckDecay ( self , ema , actual_decay , dim ) : <nl> dim ) <nl> self . assertAllClose ( expected , avg1 . eval ( ) ) <nl> expected = _Repeat ( ( ( 0 . 0 * dk + ( 10 . 0 + 30 . 0 ) * ( 1 - dk ) ) * dk + <nl> - ( 10 . 0 + 30 . 0 ) * ( 1 - dk ) ) / ( 1 - dk * * 3 ) , <nl> + ( 10 . 0 + 30 . 0 ) * ( 1 - dk ) ) / _Scale ( dk , 2 ) , <nl> dim ) <nl> self . assertAllClose ( expected , avg2 . eval ( ) ) <nl> <nl> def testAverageVariablesNoNumUpdates_Scalar ( self ) : <nl> ema = tf . train . ExponentialMovingAverage ( 0 . 25 ) <nl> self . _CheckDecay ( ema , actual_decay = 0 . 25 , dim = 1 ) <nl> <nl> + def testAverageVariablesNoNumUpdates_Scalar_Debias ( self ) : <nl> + with self . test_session ( ) : <nl> + ema = tf . train . ExponentialMovingAverage ( 0 . 25 , zero_debias = True ) <nl> + self . _CheckDecay ( ema , actual_decay = 0 . 25 , dim = 1 ) <nl> + <nl> def testAverageVariablesNoNumUpdates_Vector ( self ) : <nl> with self . test_session ( ) : <nl> ema = tf . train . ExponentialMovingAverage ( 0 . 25 ) <nl> self . _CheckDecay ( ema , actual_decay = 0 . 25 , dim = 5 ) <nl> <nl> + def testAverageVariablesNoNumUpdates_Vector_Debias ( self ) : <nl> + with self . test_session ( ) : <nl> + ema = tf . train . ExponentialMovingAverage ( 0 . 25 , zero_debias = True ) <nl> + self . _CheckDecay ( ema , actual_decay = 0 . 25 , dim = 5 ) <nl> + <nl> def testAverageVariablesNumUpdates_Scalar ( self ) : <nl> with self . test_session ( ) : <nl> # With num_updates 1 , the decay applied is 0 . 1818 <nl> ema = tf . train . ExponentialMovingAverage ( 0 . 25 , num_updates = 1 ) <nl> self . _CheckDecay ( ema , actual_decay = 0 . 181818 , dim = 1 ) <nl> <nl> + def testAverageVariablesNumUpdates_Scalar_Debias ( self ) : <nl> + with self . test_session ( ) : <nl> + # With num_updates 1 , the decay applied is 0 . 1818 <nl> + ema = tf . train . ExponentialMovingAverage ( <nl> + 0 . 25 , num_updates = 1 , zero_debias = True ) <nl> + self . _CheckDecay ( ema , actual_decay = 0 . 181818 , dim = 1 ) <nl> + <nl> def testAverageVariablesNumUpdates_Vector ( self ) : <nl> with self . test_session ( ) : <nl> # With num_updates 1 , the decay applied is 0 . 1818 <nl> ema = tf . train . ExponentialMovingAverage ( 0 . 25 , num_updates = 1 ) <nl> self . _CheckDecay ( ema , actual_decay = 0 . 181818 , dim = 5 ) <nl> <nl> + def testAverageVariablesNumUpdates_Vector_Debias ( self ) : <nl> + with self . test_session ( ) : <nl> + # With num_updates 1 , the decay applied is 0 . 1818 <nl> + ema = tf . train . ExponentialMovingAverage ( <nl> + 0 . 25 , num_updates = 1 , zero_debias = True ) <nl> + self . _CheckDecay ( ema , actual_decay = 0 . 181818 , dim = 5 ) <nl> + <nl> def testAverageVariablesWithControlDeps ( self ) : <nl> with self . test_session ( ) as sess : <nl> v0 = tf . Variable ( 0 , name = " v0 " ) <nl> def testAverageVariablesWithControlDeps ( self ) : <nl> self . assertEqual ( 1 , sess . run ( v0 ) ) <nl> self . assertEqual ( [ 17 . 5 ] , sess . run ( v1_avg ) ) <nl> <nl> - def testAverageVariablesNames ( self ) : <nl> + def averageVariablesNamesHelper ( self , zero_debias ) : <nl> with self . test_session ( ) : <nl> v0 = tf . Variable ( 10 . 0 , name = " v0 " ) <nl> v1 = tf . Variable ( 30 . 0 , name = " v1 " ) <nl> # Add a non - trainable variable . <nl> v2 = tf . Variable ( 20 . 0 , name = " v2 " , trainable = False ) <nl> tensor2 = v0 + v1 <nl> - ema = tf . train . ExponentialMovingAverage ( 0 . 25 , name = " foo " ) <nl> + ema = tf . train . ExponentialMovingAverage ( <nl> + 0 . 25 , zero_debias = zero_debias , name = " foo " ) <nl> self . assertEqual ( " v0 / foo " , ema . average_name ( v0 ) ) <nl> self . assertEqual ( " v1 / foo " , ema . average_name ( v1 ) ) <nl> self . assertEqual ( " add / foo " , ema . average_name ( tensor2 ) ) <nl> def testAverageVariablesNames ( self ) : <nl> # { v0 / foo : v0 , <nl> # v1 / foo : v1 , <nl> # add / foo : add / foo , <nl> - # add / foo / biased : add / foo / biased , <nl> - # add / foo / local_step : add / foo / local_step , <nl> # v2 : v2 } <nl> + expected_names = [ ema . average_name ( v0 ) , <nl> + ema . average_name ( v1 ) , <nl> + ema . average_name ( tensor2 ) , <nl> + v2 . op . name ] <nl> + if zero_debias : <nl> + # vars_to_restore should also contain the following : <nl> + # { add / foo / biased : add / foo / biased , <nl> + # add / foo / local_step : add / foo / local_step } <nl> + expected_names + = [ ema . average_name ( tensor2 ) + " / biased " , <nl> + ema . average_name ( tensor2 ) + " / local_step " ] <nl> self . assertEqual ( sorted ( vars_to_restore . keys ( ) ) , <nl> - sorted ( [ ema . average_name ( v0 ) , <nl> - ema . average_name ( v1 ) , <nl> - ema . average_name ( tensor2 ) , <nl> - ema . average_name ( tensor2 ) + " / biased " , <nl> - ema . average_name ( tensor2 ) + " / local_step " , <nl> - v2 . op . name ] ) ) <nl> + sorted ( expected_names ) ) <nl> self . assertEqual ( ema . average_name ( v0 ) , ema . average ( v0 ) . op . name ) <nl> self . assertEqual ( ema . average_name ( v1 ) , ema . average ( v1 ) . op . name ) <nl> self . assertEqual ( ema . average_name ( tensor2 ) , ema . average ( tensor2 ) . op . name ) <nl> <nl> - def testAverageVariablesNamesRespectScope ( self ) : <nl> + def testAverageVariablesNames ( self ) : <nl> + self . averageVariablesNamesHelper ( zero_debias = True ) <nl> + <nl> + def testAverageVariablesNamesNoDebias ( self ) : <nl> + self . averageVariablesNamesHelper ( zero_debias = False ) <nl> + <nl> + def averageVariablesNamesRespectScopeHelper ( self , zero_debias ) : <nl> # See discussion on # 2740 . <nl> with self . test_session ( ) : <nl> with tf . variable_scope ( " scope1 " ) : <nl> def testAverageVariablesNamesRespectScope ( self ) : <nl> v2 = tf . Variable ( 20 . 0 , name = " v2 " , trainable = False ) <nl> tensor2 = v0 + v1 <nl> with tf . variable_scope ( " scope2 " ) : <nl> - ema = tf . train . ExponentialMovingAverage ( 0 . 25 , name = " foo " ) <nl> + ema = tf . train . ExponentialMovingAverage ( <nl> + 0 . 25 , zero_debias = zero_debias , name = " foo " ) <nl> self . assertEqual ( " scope2 / scope1 / v0 / foo " , ema . average_name ( v0 ) ) <nl> self . assertEqual ( " scope2 / scope1 / v1 / foo " , ema . average_name ( v1 ) ) <nl> self . assertEqual ( " scope2 / scope1 / add / foo " , ema . average_name ( tensor2 ) ) <nl> def testAverageVariablesNamesRespectScope ( self ) : <nl> # { scope2 / scope1 / v0 / foo : v0 , <nl> # scope2 / scope1 / v1 / foo : v1 , <nl> # scope2 / scope1 / add / foo : add / foo , <nl> - # scope2 / scope2 / scope1 / add / foo / biased : add / foo / biased , <nl> - # scope2 / scope2 / scope1 / add / foo / local_step : add / foo / local_step , <nl> # scope1 / v2 : v2 } <nl> - sc = " scope2 / " <nl> + expected_names = [ ema . average_name ( v0 ) , <nl> + ema . average_name ( v1 ) , <nl> + ema . average_name ( tensor2 ) , <nl> + v2 . op . name ] <nl> + if zero_debias : <nl> + # vars_to_restore should also contain the following : <nl> + # { scope2 / scope2 / scope1 / add / foo / biased : add / foo / biased , <nl> + # scope2 / scope2 / scope1 / add / foo / local_step : add / foo / local_step } <nl> + sc = " scope2 / " <nl> + expected_names + = [ sc + ema . average_name ( tensor2 ) + " / biased " , <nl> + sc + ema . average_name ( tensor2 ) + " / local_step " ] <nl> + <nl> self . assertEqual ( sorted ( vars_to_restore . keys ( ) ) , <nl> - sorted ( [ ema . average_name ( v0 ) , <nl> - ema . average_name ( v1 ) , <nl> - ema . average_name ( tensor2 ) , <nl> - sc + ema . average_name ( tensor2 ) + " / biased " , <nl> - sc + ema . average_name ( tensor2 ) + " / local_step " , <nl> - v2 . op . name ] ) ) <nl> + sorted ( expected_names ) ) <nl> self . assertEqual ( ema . average_name ( v0 ) , ema . average ( v0 ) . op . name ) <nl> self . assertEqual ( ema . average_name ( v1 ) , ema . average ( v1 ) . op . name ) <nl> self . assertEqual ( ema . average_name ( tensor2 ) , <nl> ema . average ( tensor2 ) . op . name ) <nl> <nl> + def testAverageVariablesNamesRespectScope ( self ) : <nl> + self . averageVariablesNamesRespectScopeHelper ( zero_debias = True ) <nl> + <nl> + def testAverageVariablesNamesRespectScopeNoDebias ( self ) : <nl> + self . averageVariablesNamesRespectScopeHelper ( zero_debias = False ) <nl> + <nl> def testSubsetAverageVariablesNames ( self ) : <nl> with self . test_session ( ) : <nl> v0 = tf . Variable ( 10 . 0 , name = " v0 " ) <nl> | Add ability to disable zero - debiasing in ExponentialMovingAverage , for the purpose of backwards compatibility to support old checkpoints . For now , set this default value to avoid debiasing . | tensorflow/tensorflow | 3b5ada30c14d35d6fbf0aeaaee898c5ff65b008c | 2016-11-30T17:46:08Z |
mmm a / modules / core / include / opencv2 / core / core_c . h <nl> ppp b / modules / core / include / opencv2 / core / core_c . h <nl> CVAPI ( void ) cvSetIPLAllocators ( Cv_iplCreateImageHeader create_header , <nl> <nl> The function opens file storage for reading or writing data . In the latter case , a new file is <nl> created or an existing file is rewritten . The type of the read or written file is determined by the <nl> - filename extension : . xml for XML and . yml or . yaml for YAML . <nl> + filename extension : . xml for XML , . yml or . yaml for YAML and . json for JSON . <nl> <nl> At the same time , it also supports adding parameters like " example . xml ? base64 " . The three ways <nl> are the same : <nl> One and only one of the two above flags must be specified <nl> @ param type_name Optional parameter - the object type name . In <nl> case of XML it is written as a type_id attribute of the structure opening tag . In the case of <nl> YAML it is written after a colon following the structure name ( see the example in <nl> - CvFileStorage description ) . Mainly it is used with user objects . When the storage is read , the <nl> + CvFileStorage description ) . In case of JSON it is written as a name / value pair . <nl> + Mainly it is used with user objects . When the storage is read , the <nl> encoded type name is used to determine the object type ( see CvTypeInfo and cvFindType ) . <nl> @ param attributes This parameter is not used in the current implementation <nl> * / <nl> CVAPI ( void ) cvReadRawData ( const CvFileStorage * fs , const CvFileNode * src , <nl> / * * @ brief Writes a file node to another file storage . <nl> <nl> The function writes a copy of a file node to file storage . Possible applications of the function are <nl> - merging several file storages into one and conversion between XML and YAML formats . <nl> + merging several file storages into one and conversion between XML , YAML and JSON formats . <nl> @ param fs Destination file storage <nl> @ param new_node_name New name of the file node in the destination file storage . To keep the <nl> existing name , use cvcvGetFileNodeName <nl> mmm a / modules / core / include / opencv2 / core / persistence . hpp <nl> ppp b / modules / core / include / opencv2 / core / persistence . hpp <nl> Several functions that are described below take CvFileStorage \ * as inputs and al <nl> save or to load hierarchical collections that consist of scalar values , standard CXCore objects <nl> ( such as matrices , sequences , graphs ) , and user - defined objects . <nl> <nl> - OpenCV can read and write data in XML ( < http : / / www . w3c . org / XML > ) or YAML ( < http : / / www . yaml . org > ) <nl> - formats . Below is an example of 3x3 floating - point identity matrix A , stored in XML and YAML files <nl> + OpenCV can read and write data in XML ( < http : / / www . w3c . org / XML > ) , YAML ( < http : / / www . yaml . org > ) or <nl> + JSON ( < http : / / www . json . org / > ) formats . Below is an example of 3x3 floating - point identity matrix A , <nl> + stored in XML and YAML files <nl> using CXCore functions : <nl> XML : <nl> @ code { . xml } <nl> As it can be seen from the examples , XML uses nested tags to represent hierarchy <nl> indentation for that purpose ( similar to the Python programming language ) . <nl> <nl> The same functions can read and write data in both formats ; the particular format is determined by <nl> - the extension of the opened file , " . xml " for XML files and " . yml " or " . yaml " for YAML . <nl> + the extension of the opened file , " . xml " for XML files , " . yml " or " . yaml " for YAML and " . json " for <nl> + JSON . <nl> * / <nl> typedef struct CvFileStorage CvFileStorage ; <nl> typedef struct CvFileNode CvFileNode ; <nl> namespace cv { <nl> <nl> / * * @ addtogroup core_xml <nl> <nl> - XML / YAML file storages . { # xml_storage } <nl> + XML / YAML / JSON file storages . { # xml_storage } <nl> = = = = = = = = = = = = = = = = = = = = = = = <nl> Writing to a file storage . <nl> mmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - You can store and then restore various OpenCV data structures to / from XML ( < http : / / www . w3c . org / XML > ) <nl> - or YAML ( < http : / / www . yaml . org > ) formats . Also , it is possible store and load arbitrarily complex <nl> - data structures , which include OpenCV data structures , as well as primitive data types ( integer and <nl> - floating - point numbers and text strings ) as their elements . <nl> + You can store and then restore various OpenCV data structures to / from XML ( < http : / / www . w3c . org / XML > ) , <nl> + YAML ( < http : / / www . yaml . org > ) or JSON ( < http : / / www . json . org / > ) formats . Also , it is possible store <nl> + and load arbitrarily complex data structures , which include OpenCV data structures , as well as <nl> + primitive data types ( integer and floating - point numbers and text strings ) as their elements . <nl> <nl> - Use the following procedure to write something to XML or YAML : <nl> + Use the following procedure to write something to XML , YAML or JSON : <nl> - # Create new FileStorage and open it for writing . It can be done with a single call to <nl> FileStorage : : FileStorage constructor that takes a filename , or you can use the default constructor <nl> - and then call FileStorage : : open . Format of the file ( XML or YAML ) is determined from the filename <nl> - extension ( " . xml " and " . yml " / " . yaml " , respectively ) <nl> + and then call FileStorage : : open . Format of the file ( XML , YAML or JSON ) is determined from the filename <nl> + extension ( " . xml " , " . yml " / " . yaml " and " . json " , respectively ) <nl> - # Write all the data you want using the streaming operator ` < < ` , just like in the case of STL <nl> streams . <nl> - # Close the file using FileStorage : : release . FileStorage destructor also closes the file . <nl> distCoeffs : ! ! opencv - matrix <nl> - { x : 344 , y : 158 , lbp : [ 1 , 1 , 0 , 0 , 0 , 0 , 1 , 0 ] } <nl> @ endcode <nl> <nl> - As an exercise , you can replace " . yml " with " . xml " in the sample above and see , how the <nl> + As an exercise , you can replace " . yml " with " . xml " or " . json " in the sample above and see , how the <nl> corresponding XML file will look like . <nl> <nl> Several things can be noted by looking at the sample code and the output : <nl> <nl> - - The produced YAML ( and XML ) consists of heterogeneous collections that can be nested . There are 2 <nl> - types of collections : named collections ( mappings ) and unnamed collections ( sequences ) . In mappings <nl> + - The produced YAML ( and XML / JSON ) consists of heterogeneous collections that can be nested . There are <nl> + 2 types of collections : named collections ( mappings ) and unnamed collections ( sequences ) . In mappings <nl> each element has a name and is accessed by name . This is similar to structures and std : : map in <nl> C / C + + and dictionaries in Python . In sequences elements do not have names , they are accessed by <nl> indices . This is similar to arrays and std : : vector in C / C + + and lists , tuples in Python . <nl> " Heterogeneous " means that elements of each single collection can have different types . <nl> <nl> - Top - level collection in YAML / XML is a mapping . Each matrix is stored as a mapping , and the matrix <nl> + Top - level collection in YAML / XML / JSON is a mapping . Each matrix is stored as a mapping , and the matrix <nl> elements are stored as a sequence . Then , there is a sequence of features , where each feature is <nl> represented a mapping , and lbp value in a nested sequence . <nl> <nl> Several things can be noted by looking at the sample code and the output : <nl> - To write a sequence , you first write the special string ` [ ` , then write the elements , then <nl> write the closing ` ] ` . <nl> <nl> - - In YAML ( but not XML ) , mappings and sequences can be written in a compact Python - like inline <nl> + - In YAML / JSON ( but not XML ) , mappings and sequences can be written in a compact Python - like inline <nl> form . In the sample above matrix elements , as well as each feature , including its lbp value , is <nl> stored in such inline form . To store a mapping / sequence in a compact form , put ` : ` after the <nl> opening character , e . g . use ` { : ` instead of ` { ` and ` [ : ` instead of ` [ ` . When the <nl> Several things can be noted by looking at the sample code and the output : <nl> <nl> Reading data from a file storage . <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - To read the previously written XML or YAML file , do the following : <nl> + To read the previously written XML , YAML or JSON file , do the following : <nl> - # Open the file storage using FileStorage : : FileStorage constructor or FileStorage : : open method . <nl> In the current implementation the whole file is parsed and the whole representation of file <nl> storage is built in memory as a hierarchy of file nodes ( see FileNode ) <nl> A complete example using the FileStorage interface <nl> class CV_EXPORTS FileNode ; <nl> class CV_EXPORTS FileNodeIterator ; <nl> <nl> - / * * @ brief XML / YAML file storage class that encapsulates all the information necessary for writing or reading <nl> - data to / from a file . <nl> + / * * @ brief XML / YAML / JSON file storage class that encapsulates all the information necessary for writing or <nl> + reading data to / from a file . <nl> * / <nl> class CV_EXPORTS_W FileStorage <nl> { <nl> class CV_EXPORTS_W FileStorage <nl> FORMAT_AUTO = 0 , / / ! < flag , auto format <nl> FORMAT_XML = ( 1 < < 3 ) , / / ! < flag , XML format <nl> FORMAT_YAML = ( 2 < < 3 ) , / / ! < flag , YAML format <nl> + FORMAT_JSON = ( 3 < < 3 ) , / / ! < flag , JSON format <nl> <nl> BASE64 = 64 , / / ! < flag , write rawdata in Base64 by default . ( consider using WRITE_BASE64 ) <nl> WRITE_BASE64 = BASE64 | WRITE , / / ! < flag , enable both WRITE and BASE64 <nl> class CV_EXPORTS_W FileStorage <nl> <nl> / * * @ overload <nl> @ param source Name of the file to open or the text string to read the data from . Extension of the <nl> - file ( . xml or . yml / . yaml ) determines its format ( XML or YAML respectively ) . Also you can append . gz <nl> - to work with compressed files , for example myHugeMatrix . xml . gz . If both FileStorage : : WRITE and <nl> - FileStorage : : MEMORY flags are specified , source is used just to specify the output file format ( e . g . <nl> + file ( . xml , . yml / . yaml , or . json ) determines its format ( XML , YAML or JSON respectively ) . Also you can <nl> + append . gz to work with compressed files , for example myHugeMatrix . xml . gz . If both FileStorage : : WRITE <nl> + and FileStorage : : MEMORY flags are specified , source is used just to specify the output file format ( e . g . <nl> mydata . xml , . yml etc . ) . <nl> @ param flags Mode of operation . See FileStorage : : Mode <nl> @ param encoding Encoding of the file . Note that UTF - 16 XML encoding is not supported currently and <nl> class CV_EXPORTS_W FileStorage <nl> See description of parameters in FileStorage : : FileStorage . The method calls FileStorage : : release <nl> before opening the file . <nl> @ param filename Name of the file to open or the text string to read the data from . <nl> - Extension of the file ( . xml or . yml / . yaml ) determines its format ( XML or YAML respectively ) . <nl> - Also you can append . gz to work with compressed files , for example myHugeMatrix . xml . gz . If both <nl> + Extension of the file ( . xml , . yml / . yaml or . json ) determines its format ( XML , YAML or JSON <nl> + respectively ) . Also you can append . gz to work with compressed files , for example myHugeMatrix . xml . gz . If both <nl> FileStorage : : WRITE and FileStorage : : MEMORY flags are specified , source is used just to specify <nl> the output file format ( e . g . mydata . xml , . yml etc . ) . A file name can also contain parameters . <nl> - You can use this format , " * ? base64 " ( e . g . " file . xml ? base64 " ) , as an alternative to <nl> - FileStorage : : BASE64 flag . Note : it is case sensitive . <nl> + You can use this format , " * ? base64 " ( e . g . " file . json ? base64 " ( case sensitive ) ) , as an alternative to <nl> + FileStorage : : BASE64 flag . <nl> @ param flags Mode of operation . One of FileStorage : : Mode <nl> @ param encoding Encoding of the file . Note that UTF - 16 XML encoding is not supported currently and <nl> you should use 8 - bit encoding instead of it . <nl> mmm a / modules / core / include / opencv2 / core / types_c . h <nl> ppp b / modules / core / include / opencv2 / core / types_c . h <nl> typedef struct CvFileStorage CvFileStorage ; <nl> # define CV_STORAGE_FORMAT_AUTO 0 <nl> # define CV_STORAGE_FORMAT_XML 8 <nl> # define CV_STORAGE_FORMAT_YAML 16 <nl> + # define CV_STORAGE_FORMAT_JSON 24 <nl> # define CV_STORAGE_BASE64 64 <nl> # define CV_STORAGE_WRITE_BASE64 ( CV_STORAGE_BASE64 | CV_STORAGE_WRITE ) <nl> <nl> mmm a / modules / core / perf / perf_io_base64 . cpp <nl> ppp b / modules / core / perf / perf_io_base64 . cpp <nl> typedef TestBaseWithParam < Size_MatType_Str_t > Size_Mat_StrType ; <nl> <nl> # define MAT_SIZES : : perf : : sz1080p / * , : : perf : : sz4320p * / <nl> # define MAT_TYPES CV_8UC1 , CV_32FC1 <nl> - # define FILE_EXTENSION String ( " . xml " ) , String ( " . yml " ) <nl> + # define FILE_EXTENSION String ( " . xml " ) , String ( " . yml " ) , String ( " . json " ) <nl> <nl> <nl> PERF_TEST_P ( Size_Mat_StrType , fs_text , <nl> mmm a / modules / core / src / persistence . cpp <nl> ppp b / modules / core / src / persistence . cpp <nl> static char * icv_itoa ( int _val , char * buffer , int / * radix * / ) <nl> return ptr ; <nl> } <nl> <nl> + static inline bool cv_strcasecmp ( const char * s1 , const char * s2 ) <nl> + { <nl> + if ( s1 = = 0 & & s2 = = 0 ) <nl> + return true ; <nl> + else if ( s1 = = 0 | | s2 = = 0 ) <nl> + return false ; <nl> + <nl> + size_t len1 = strlen ( s1 ) ; <nl> + size_t len2 = strlen ( s2 ) ; <nl> + if ( len1 ! = len2 ) <nl> + return false ; <nl> + <nl> + for ( size_t i = 0U ; i < len1 ; i + + ) <nl> + if ( tolower ( static_cast < int > ( s1 [ i ] ) ) ! = tolower ( static_cast < int > ( s2 [ i ] ) ) ) <nl> + return false ; <nl> + <nl> + return true ; <nl> + } <nl> + <nl> cv : : String cv : : FileStorage : : getDefaultObjectName ( const cv : : String & _filename ) <nl> { <nl> static const char * stubname = " unnamed " ; <nl> icvFSFlush ( CvFileStorage * fs ) <nl> <nl> if ( fs - > space ! = indent ) <nl> { <nl> - if ( fs - > space < indent ) <nl> - memset ( fs - > buffer_start + fs - > space , ' ' , indent - fs - > space ) ; <nl> + memset ( fs - > buffer_start , ' ' , indent ) ; <nl> fs - > space = indent ; <nl> } <nl> <nl> icvClose ( CvFileStorage * fs , cv : : String * out ) <nl> icvFSFlush ( fs ) ; <nl> if ( fs - > fmt = = CV_STORAGE_FORMAT_XML ) <nl> icvPuts ( fs , " < / opencv_storage > \ n " ) ; <nl> + else if ( fs - > fmt = = CV_STORAGE_FORMAT_JSON ) <nl> + icvPuts ( fs , " } \ n " ) ; <nl> } <nl> <nl> icvCloseFile ( fs ) ; <nl> cvReleaseFileStorage ( CvFileStorage * * p_fs ) <nl> cvFree ( & fs - > buffer_start ) ; <nl> cvReleaseMemStorage ( & fs - > memstorage ) ; <nl> <nl> - if ( fs - > outbuf ) <nl> - delete fs - > outbuf ; <nl> + delete fs - > outbuf ; <nl> + delete fs - > base64_writer ; <nl> + delete fs - > delayed_struct_key ; <nl> + delete fs - > delayed_type_name ; <nl> <nl> memset ( fs , 0 , sizeof ( * fs ) ) ; <nl> cvFree ( & fs ) ; <nl> static double icv_strtod ( CvFileStorage * fs , char * ptr , char * * endptr ) <nl> return fval ; <nl> } <nl> <nl> + / / this function will convert " aa ? bb & cc & dd " to { " aa " , " bb " , " cc " , " dd " } <nl> static std : : vector < std : : string > analyze_file_name ( std : : string const & file_name ) <nl> { <nl> static const char not_file_name = ' \ n ' ; <nl> static char * icvYMLParseBase64 ( CvFileStorage * fs , char * ptr , int indent , CvFileN <nl> std : : vector < char > header ( base64 : : HEADER_SIZE + 1 , ' ' ) ; <nl> base64 : : base64_decode ( beg , header . data ( ) , 0U , base64 : : ENCODED_HEADER_SIZE ) ; <nl> if ( ! base64 : : read_base64_header ( header , dt ) | | dt . empty ( ) ) <nl> - CV_PARSE_ERROR ( " Cannot parse dt in Base64 header " ) ; <nl> + CV_PARSE_ERROR ( " Invalid ` dt ` in Base64 header " ) ; <nl> <nl> beg + = base64 : : ENCODED_HEADER_SIZE ; <nl> } <nl> static char * icvXMLParseBase64 ( CvFileStorage * fs , char * ptr , CvFileNode * node ) <nl> std : : vector < char > header ( base64 : : HEADER_SIZE + 1 , ' ' ) ; <nl> base64 : : base64_decode ( beg , header . data ( ) , 0U , base64 : : ENCODED_HEADER_SIZE ) ; <nl> if ( ! base64 : : read_base64_header ( header , dt ) | | dt . empty ( ) ) <nl> - CV_PARSE_ERROR ( " Cannot parse dt in Base64 header " ) ; <nl> + CV_PARSE_ERROR ( " Invalid ` dt ` in Base64 header " ) ; <nl> <nl> beg + = base64 : : ENCODED_HEADER_SIZE ; <nl> } <nl> icvXMLStartNextStream ( CvFileStorage * fs ) <nl> } <nl> <nl> <nl> - static void <nl> - icvXMLWriteScalar ( CvFileStorage * fs , const char * key , const char * data , int len ) <nl> - { <nl> - check_if_write_struct_is_delayed ( fs ) ; <nl> - if ( fs - > state_of_writing_base64 = = base64 : : fs : : Uncertain ) <nl> + static void <nl> + icvXMLWriteScalar ( CvFileStorage * fs , const char * key , const char * data , int len ) <nl> + { <nl> + check_if_write_struct_is_delayed ( fs ) ; <nl> + if ( fs - > state_of_writing_base64 = = base64 : : fs : : Uncertain ) <nl> + { <nl> + switch_to_Base64_state ( fs , base64 : : fs : : NotUse ) ; <nl> + } <nl> + else if ( fs - > state_of_writing_base64 = = base64 : : fs : : InUse ) <nl> + { <nl> + CV_Error ( CV_StsError , " Currently only Base64 data is allowed . " ) ; <nl> + } <nl> + <nl> + if ( CV_NODE_IS_MAP ( fs - > struct_flags ) | | <nl> + ( ! CV_NODE_IS_COLLECTION ( fs - > struct_flags ) & & key ) ) <nl> + { <nl> + icvXMLWriteTag ( fs , key , CV_XML_OPENING_TAG , cvAttrList ( 0 , 0 ) ) ; <nl> + char * ptr = icvFSResizeWriteBuffer ( fs , fs - > buffer , len ) ; <nl> + memcpy ( ptr , data , len ) ; <nl> + fs - > buffer = ptr + len ; <nl> + icvXMLWriteTag ( fs , key , CV_XML_CLOSING_TAG , cvAttrList ( 0 , 0 ) ) ; <nl> + } <nl> + else <nl> + { <nl> + char * ptr = fs - > buffer ; <nl> + int new_offset = ( int ) ( ptr - fs - > buffer_start ) + len ; <nl> + <nl> + if ( key ) <nl> + CV_Error ( CV_StsBadArg , " elements with keys can not be written to sequence " ) ; <nl> + <nl> + fs - > struct_flags = CV_NODE_SEQ ; <nl> + <nl> + if ( ( new_offset > fs - > wrap_margin & & new_offset - fs - > struct_indent > 10 ) | | <nl> + ( ptr > fs - > buffer_start & & ptr [ - 1 ] = = ' > ' & & ! CV_NODE_IS_EMPTY ( fs - > struct_flags ) ) ) <nl> + { <nl> + ptr = icvXMLFlush ( fs ) ; <nl> + } <nl> + else if ( ptr > fs - > buffer_start + fs - > struct_indent & & ptr [ - 1 ] ! = ' > ' ) <nl> + * ptr + + = ' ' ; <nl> + <nl> + memcpy ( ptr , data , len ) ; <nl> + fs - > buffer = ptr + len ; <nl> + } <nl> + } <nl> + <nl> + <nl> + static void <nl> + icvXMLWriteInt ( CvFileStorage * fs , const char * key , int value ) <nl> + { <nl> + char buf [ 128 ] , * ptr = icv_itoa ( value , buf , 10 ) ; <nl> + int len = ( int ) strlen ( ptr ) ; <nl> + icvXMLWriteScalar ( fs , key , ptr , len ) ; <nl> + } <nl> + <nl> + <nl> + static void <nl> + icvXMLWriteReal ( CvFileStorage * fs , const char * key , double value ) <nl> + { <nl> + char buf [ 128 ] ; <nl> + int len = ( int ) strlen ( icvDoubleToString ( buf , value ) ) ; <nl> + icvXMLWriteScalar ( fs , key , buf , len ) ; <nl> + } <nl> + <nl> + <nl> + static void <nl> + icvXMLWriteString ( CvFileStorage * fs , const char * key , const char * str , int quote ) <nl> + { <nl> + char buf [ CV_FS_MAX_LEN * 6 + 16 ] ; <nl> + char * data = ( char * ) str ; <nl> + int i , len ; <nl> + <nl> + if ( ! str ) <nl> + CV_Error ( CV_StsNullPtr , " Null string pointer " ) ; <nl> + <nl> + len = ( int ) strlen ( str ) ; <nl> + if ( len > CV_FS_MAX_LEN ) <nl> + CV_Error ( CV_StsBadArg , " The written string is too long " ) ; <nl> + <nl> + if ( quote | | len = = 0 | | str [ 0 ] ! = ' \ " ' | | str [ 0 ] ! = str [ len - 1 ] ) <nl> + { <nl> + int need_quote = quote | | len = = 0 ; <nl> + data = buf ; <nl> + * data + + = ' \ " ' ; <nl> + for ( i = 0 ; i < len ; i + + ) <nl> + { <nl> + char c = str [ i ] ; <nl> + <nl> + if ( ( uchar ) c > = 128 | | c = = ' ' ) <nl> + { <nl> + * data + + = c ; <nl> + need_quote = 1 ; <nl> + } <nl> + else if ( ! cv_isprint ( c ) | | c = = ' < ' | | c = = ' > ' | | c = = ' & ' | | c = = ' \ ' ' | | c = = ' \ " ' ) <nl> + { <nl> + * data + + = ' & ' ; <nl> + if ( c = = ' < ' ) <nl> + { <nl> + memcpy ( data , " lt " , 2 ) ; <nl> + data + = 2 ; <nl> + } <nl> + else if ( c = = ' > ' ) <nl> + { <nl> + memcpy ( data , " gt " , 2 ) ; <nl> + data + = 2 ; <nl> + } <nl> + else if ( c = = ' & ' ) <nl> + { <nl> + memcpy ( data , " amp " , 3 ) ; <nl> + data + = 3 ; <nl> + } <nl> + else if ( c = = ' \ ' ' ) <nl> + { <nl> + memcpy ( data , " apos " , 4 ) ; <nl> + data + = 4 ; <nl> + } <nl> + else if ( c = = ' \ " ' ) <nl> + { <nl> + memcpy ( data , " quot " , 4 ) ; <nl> + data + = 4 ; <nl> + } <nl> + else <nl> + { <nl> + sprintf ( data , " # x % 02x " , ( uchar ) c ) ; <nl> + data + = 4 ; <nl> + } <nl> + * data + + = ' ; ' ; <nl> + need_quote = 1 ; <nl> + } <nl> + else <nl> + * data + + = c ; <nl> + } <nl> + if ( ! need_quote & & ( cv_isdigit ( str [ 0 ] ) | | <nl> + str [ 0 ] = = ' + ' | | str [ 0 ] = = ' - ' | | str [ 0 ] = = ' . ' ) ) <nl> + need_quote = 1 ; <nl> + <nl> + if ( need_quote ) <nl> + * data + + = ' \ " ' ; <nl> + len = ( int ) ( data - buf ) - ! need_quote ; <nl> + * data + + = ' \ 0 ' ; <nl> + data = buf + ! need_quote ; <nl> + } <nl> + <nl> + icvXMLWriteScalar ( fs , key , data , len ) ; <nl> + } <nl> + <nl> + <nl> + static void <nl> + icvXMLWriteComment ( CvFileStorage * fs , const char * comment , int eol_comment ) <nl> + { <nl> + int len ; <nl> + int multiline ; <nl> + const char * eol ; <nl> + char * ptr ; <nl> + <nl> + if ( ! comment ) <nl> + CV_Error ( CV_StsNullPtr , " Null comment " ) ; <nl> + <nl> + if ( strstr ( comment , " - - " ) ! = 0 ) <nl> + CV_Error ( CV_StsBadArg , " Double hyphen \ ' - - \ ' is not allowed in the comments " ) ; <nl> + <nl> + len = ( int ) strlen ( comment ) ; <nl> + eol = strchr ( comment , ' \ n ' ) ; <nl> + multiline = eol ! = 0 ; <nl> + ptr = fs - > buffer ; <nl> + <nl> + if ( multiline | | ! eol_comment | | fs - > buffer_end - ptr < len + 5 ) <nl> + ptr = icvXMLFlush ( fs ) ; <nl> + else if ( ptr > fs - > buffer_start + fs - > struct_indent ) <nl> + * ptr + + = ' ' ; <nl> + <nl> + if ( ! multiline ) <nl> + { <nl> + ptr = icvFSResizeWriteBuffer ( fs , ptr , len + 9 ) ; <nl> + sprintf ( ptr , " < ! - - % s - - > " , comment ) ; <nl> + len = ( int ) strlen ( ptr ) ; <nl> + } <nl> + else <nl> + { <nl> + strcpy ( ptr , " < ! - - " ) ; <nl> + len = 4 ; <nl> + } <nl> + <nl> + fs - > buffer = ptr + len ; <nl> + ptr = icvXMLFlush ( fs ) ; <nl> + <nl> + if ( multiline ) <nl> + { <nl> + while ( comment ) <nl> + { <nl> + if ( eol ) <nl> + { <nl> + ptr = icvFSResizeWriteBuffer ( fs , ptr , ( int ) ( eol - comment ) + 1 ) ; <nl> + memcpy ( ptr , comment , eol - comment + 1 ) ; <nl> + ptr + = eol - comment ; <nl> + comment = eol + 1 ; <nl> + eol = strchr ( comment , ' \ n ' ) ; <nl> + } <nl> + else <nl> + { <nl> + len = ( int ) strlen ( comment ) ; <nl> + ptr = icvFSResizeWriteBuffer ( fs , ptr , len ) ; <nl> + memcpy ( ptr , comment , len ) ; <nl> + ptr + = len ; <nl> + comment = 0 ; <nl> + } <nl> + fs - > buffer = ptr ; <nl> + ptr = icvXMLFlush ( fs ) ; <nl> + } <nl> + sprintf ( ptr , " - - > " ) ; <nl> + fs - > buffer = ptr + 3 ; <nl> + icvXMLFlush ( fs ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ <nl> + * JSON Parser * <nl> + \ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + static char * <nl> + icvJSONSkipSpaces ( CvFileStorage * fs , char * ptr ) <nl> + { <nl> + bool is_eof = false ; <nl> + bool is_completed = false ; <nl> + <nl> + while ( is_eof = = false & & is_completed = = false ) <nl> + { <nl> + switch ( * ptr ) <nl> + { <nl> + / * comment * / <nl> + case ' / ' : { <nl> + ptr + + ; <nl> + if ( * ptr = = ' \ 0 ' ) <nl> + { <nl> + ptr = icvGets ( fs , fs - > buffer_start , static_cast < int > ( fs - > buffer_end - fs - > buffer_start ) ) ; <nl> + if ( ! ptr ) { is_eof = true ; break ; } <nl> + } <nl> + <nl> + if ( * ptr = = ' / ' ) <nl> + { <nl> + while ( * ptr ! = ' \ n ' & & * ptr ! = ' \ r ' ) <nl> + { <nl> + if ( * ptr = = ' \ 0 ' ) <nl> + { <nl> + ptr = icvGets ( fs , fs - > buffer_start , static_cast < int > ( fs - > buffer_end - fs - > buffer_start ) ) ; <nl> + if ( ! ptr ) { is_eof = true ; break ; } <nl> + } <nl> + else <nl> + { <nl> + ptr + + ; <nl> + } <nl> + } <nl> + } <nl> + else if ( * ptr = = ' * ' ) <nl> + { <nl> + ptr + + ; <nl> + for ( ; ; ) <nl> + { <nl> + if ( * ptr = = ' \ 0 ' ) <nl> + { <nl> + ptr = icvGets ( fs , fs - > buffer_start , static_cast < int > ( fs - > buffer_end - fs - > buffer_start ) ) ; <nl> + if ( ! ptr ) { is_eof = true ; break ; } <nl> + } <nl> + else if ( * ptr = = ' * ' ) <nl> + { <nl> + ptr + + ; <nl> + if ( * ptr = = ' \ 0 ' ) <nl> + { <nl> + ptr = icvGets ( fs , fs - > buffer_start , static_cast < int > ( fs - > buffer_end - fs - > buffer_start ) ) ; <nl> + if ( ! ptr ) { is_eof = true ; break ; } <nl> + } <nl> + if ( * ptr = = ' / ' ) <nl> + { <nl> + ptr + + ; <nl> + break ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + ptr + + ; <nl> + } <nl> + } <nl> + } <nl> + else <nl> + { <nl> + CV_PARSE_ERROR ( " Not supported escape character " ) ; <nl> + } <nl> + } break ; <nl> + / * whitespace * / <nl> + case ' \ t ' : <nl> + case ' ' : { <nl> + ptr + + ; <nl> + } break ; <nl> + / * newline | | end mark * / <nl> + case ' \ 0 ' : <nl> + case ' \ n ' : <nl> + case ' \ r ' : { <nl> + ptr = icvGets ( fs , fs - > buffer_start , static_cast < int > ( fs - > buffer_end - fs - > buffer_start ) ) ; <nl> + if ( ! ptr ) { is_eof = true ; break ; } <nl> + } break ; <nl> + / * other character * / <nl> + default : { <nl> + if ( ! cv_isprint ( * ptr ) ) <nl> + CV_PARSE_ERROR ( " Invalid character in the stream " ) ; <nl> + is_completed = true ; <nl> + } break ; <nl> + } <nl> + } <nl> + <nl> + if ( is_eof ) <nl> + { <nl> + ptr = fs - > buffer_start ; <nl> + * ptr = ' \ 0 ' ; <nl> + fs - > dummy_eof = 1 ; <nl> + } <nl> + else if ( ! is_completed ) <nl> + { <nl> + / * should not be executed * / <nl> + ptr = 0 ; <nl> + fs - > dummy_eof = 1 ; <nl> + CV_PARSE_ERROR ( " Abort at parse time " ) ; <nl> + } <nl> + return ptr ; <nl> + } <nl> + <nl> + <nl> + static char * icvJSONParseKey ( CvFileStorage * fs , char * ptr , CvFileNode * map , CvFileNode * * value_placeholder ) <nl> + { <nl> + if ( * ptr ! = ' " ' ) <nl> + CV_PARSE_ERROR ( " Key must start with \ ' \ " \ ' " ) ; <nl> + <nl> + char * beg = ptr + 1 ; <nl> + char * end = beg ; <nl> + <nl> + do + + ptr ; <nl> + while ( cv_isprint ( * ptr ) & & * ptr ! = ' " ' ) ; <nl> + <nl> + if ( * ptr ! = ' " ' ) <nl> + CV_PARSE_ERROR ( " Key must end with \ ' \ " \ ' " ) ; <nl> + <nl> + end = ptr ; <nl> + ptr + + ; <nl> + ptr = icvJSONSkipSpaces ( fs , ptr ) ; <nl> + if ( ptr = = 0 | | fs - > dummy_eof ) <nl> + return 0 ; <nl> + <nl> + if ( * ptr ! = ' : ' ) <nl> + CV_PARSE_ERROR ( " Missing \ ' : \ ' between key and value " ) ; <nl> + <nl> + / * [ beg , end ) * / <nl> + if ( end < = beg ) <nl> + CV_PARSE_ERROR ( " Key is empty " ) ; <nl> + <nl> + if ( end - beg = = 7u & & memcmp ( beg , " type_id " , 7u ) = = 0 ) <nl> + { <nl> + * value_placeholder = 0 ; <nl> + } <nl> + else <nl> + { <nl> + CvStringHashNode * str_hash_node = cvGetHashedKey ( fs , beg , static_cast < int > ( end - beg ) , 1 ) ; <nl> + * value_placeholder = cvGetFileNode ( fs , map , str_hash_node , 1 ) ; <nl> + } <nl> + <nl> + ptr + + ; <nl> + return ptr ; <nl> + } <nl> + <nl> + static char * icvJSONParseValue ( CvFileStorage * fs , char * ptr , CvFileNode * node ) <nl> + { <nl> + ptr = icvJSONSkipSpaces ( fs , ptr ) ; <nl> + if ( ptr = = 0 | | fs - > dummy_eof ) <nl> + CV_PARSE_ERROR ( " Unexpected End - Of - File " ) ; <nl> + <nl> + memset ( node , 0 , sizeof ( * node ) ) ; <nl> + <nl> + if ( * ptr = = ' " ' ) <nl> + { / * must be string or Base64 string * / <nl> + ptr + + ; <nl> + char * beg = ptr ; <nl> + size_t len = 0u ; <nl> + for ( ; ( cv_isalnum ( * ptr ) | | * ptr = = ' $ ' ) & & len < = 9u ; ptr + + ) <nl> + len + + ; <nl> + <nl> + if ( len > = 8u & & memcmp ( beg , " $ base64 $ " , 8u ) = = 0 ) <nl> + { / * * * * * * * * * * * * * * * * Base64 string * * * * * * * * * * * * * * * * / <nl> + ptr = beg + = 8 ; <nl> + <nl> + std : : string base64_buffer ; <nl> + base64_buffer . reserve ( PARSER_BASE64_BUFFER_SIZE ) ; <nl> + <nl> + bool is_matching = false ; <nl> + while ( ! is_matching ) <nl> + { <nl> + switch ( * ptr ) <nl> + { <nl> + case ' \ 0 ' : <nl> + { <nl> + base64_buffer . append ( beg , ptr ) ; <nl> + <nl> + ptr = icvGets ( fs , fs - > buffer_start , static_cast < int > ( fs - > buffer_end - fs - > buffer_start ) ) ; <nl> + if ( ! ptr ) <nl> + CV_PARSE_ERROR ( " ' \ " ' - right - quote of string is missing " ) ; <nl> + <nl> + beg = ptr ; <nl> + break ; <nl> + } <nl> + case ' \ " ' : <nl> + { <nl> + base64_buffer . append ( beg , ptr ) ; <nl> + beg = ptr ; <nl> + is_matching = true ; <nl> + break ; <nl> + } <nl> + case ' \ n ' : <nl> + case ' \ r ' : <nl> + { <nl> + CV_PARSE_ERROR ( " ' \ " ' - right - quote of string is missing " ) ; <nl> + break ; <nl> + } <nl> + default : <nl> + { <nl> + ptr + + ; <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + if ( * ptr ! = ' \ " ' ) <nl> + CV_PARSE_ERROR ( " ' \ " ' - right - quote of string is missing " ) ; <nl> + else <nl> + ptr + + ; <nl> + <nl> + if ( base64_buffer . size ( ) > = base64 : : ENCODED_HEADER_SIZE ) <nl> + { <nl> + const char * base64_beg = base64_buffer . data ( ) ; <nl> + const char * base64_end = base64_beg + base64_buffer . size ( ) ; <nl> + <nl> + / * get dt from header * / <nl> + std : : string dt ; <nl> + { <nl> + std : : vector < char > header ( base64 : : HEADER_SIZE + 1 , ' ' ) ; <nl> + base64 : : base64_decode ( base64_beg , header . data ( ) , 0U , base64 : : ENCODED_HEADER_SIZE ) ; <nl> + if ( ! base64 : : read_base64_header ( header , dt ) | | dt . empty ( ) ) <nl> + CV_PARSE_ERROR ( " Invalid ` dt ` in Base64 header " ) ; <nl> + } <nl> + <nl> + / * set base64_beg to beginning of base64 data * / <nl> + base64_beg = & base64_buffer . at ( base64 : : ENCODED_HEADER_SIZE ) ; <nl> + <nl> + if ( base64_buffer . size ( ) > base64 : : ENCODED_HEADER_SIZE ) <nl> + { <nl> + if ( ! base64 : : base64_valid ( base64_beg , 0U , base64_end - base64_beg ) ) <nl> + CV_PARSE_ERROR ( " Invalid Base64 data . " ) ; <nl> + <nl> + / * buffer for decoded data ( exclude header ) * / <nl> + std : : vector < uchar > binary_buffer ( base64 : : base64_decode_buffer_size ( base64_end - base64_beg ) ) ; <nl> + int total_byte_size = static_cast < int > ( <nl> + base64 : : base64_decode_buffer_size ( base64_end - base64_beg , base64_beg , false ) <nl> + ) ; <nl> + { <nl> + base64 : : Base64ContextParser parser ( binary_buffer . data ( ) , binary_buffer . size ( ) ) ; <nl> + const uchar * binary_beg = reinterpret_cast < const uchar * > ( base64_beg ) ; <nl> + const uchar * binary_end = binary_beg + ( base64_end - base64_beg ) ; <nl> + parser . read ( binary_beg , binary_end ) ; <nl> + parser . flush ( ) ; <nl> + } <nl> + <nl> + / * save as CvSeq * / <nl> + int elem_size = : : icvCalcStructSize ( dt . c_str ( ) , 0 ) ; <nl> + if ( total_byte_size % elem_size ! = 0 ) <nl> + CV_PARSE_ERROR ( " Byte size not match elememt size " ) ; <nl> + int elem_cnt = total_byte_size / elem_size ; <nl> + <nl> + / * after icvFSCreateCollection , node - > tag = = struct_flags * / <nl> + icvFSCreateCollection ( fs , CV_NODE_FLOW | CV_NODE_SEQ , node ) ; <nl> + base64 : : make_seq ( binary_buffer . data ( ) , elem_cnt , dt . c_str ( ) , * node - > data . seq ) ; <nl> + } <nl> + else <nl> + { <nl> + / * empty * / <nl> + icvFSCreateCollection ( fs , CV_NODE_FLOW | CV_NODE_SEQ , node ) ; <nl> + } <nl> + } <nl> + else if ( base64_buffer . empty ( ) ) <nl> + { <nl> + / * empty * / <nl> + icvFSCreateCollection ( fs , CV_NODE_FLOW | CV_NODE_SEQ , node ) ; <nl> + } <nl> + else <nl> + { <nl> + CV_PARSE_ERROR ( " Unrecognized Base64 header " ) ; <nl> + } <nl> + } <nl> + else <nl> + { / * * * * * * * * * * * * * * * * normal string * * * * * * * * * * * * * * * * / <nl> + std : : string string_buffer ; <nl> + string_buffer . reserve ( PARSER_BASE64_BUFFER_SIZE ) ; <nl> + <nl> + ptr = beg ; <nl> + bool is_matching = false ; <nl> + while ( ! is_matching ) <nl> + { <nl> + switch ( * ptr ) <nl> + { <nl> + case ' \ \ ' : <nl> + { <nl> + string_buffer . append ( beg , ptr ) ; <nl> + ptr + + ; <nl> + switch ( * ptr ) <nl> + { <nl> + case ' \ \ ' : <nl> + case ' \ " ' : <nl> + case ' \ ' ' : { string_buffer . append ( 1u , * ptr ) ; break ; } <nl> + case ' n ' : { string_buffer . append ( 1u , ' \ n ' ) ; break ; } <nl> + case ' r ' : { string_buffer . append ( 1u , ' \ r ' ) ; break ; } <nl> + case ' t ' : { string_buffer . append ( 1u , ' \ t ' ) ; break ; } <nl> + case ' b ' : { string_buffer . append ( 1u , ' \ b ' ) ; break ; } <nl> + case ' f ' : { string_buffer . append ( 1u , ' \ f ' ) ; break ; } <nl> + case ' u ' : { CV_PARSE_ERROR ( " ' \ \ uXXXX ' currently not supported " ) ; } <nl> + default : { CV_PARSE_ERROR ( " Invalid escape character " ) ; } <nl> + break ; <nl> + } <nl> + ptr + + ; <nl> + beg = ptr ; <nl> + break ; <nl> + } <nl> + case ' \ 0 ' : <nl> + { <nl> + string_buffer . append ( beg , ptr ) ; <nl> + <nl> + ptr = icvGets ( fs , fs - > buffer_start , static_cast < int > ( fs - > buffer_end - fs - > buffer_start ) ) ; <nl> + if ( ! ptr ) <nl> + CV_PARSE_ERROR ( " ' \ " ' - right - quote of string is missing " ) ; <nl> + <nl> + beg = ptr ; <nl> + break ; <nl> + } <nl> + case ' \ " ' : <nl> + { <nl> + string_buffer . append ( beg , ptr ) ; <nl> + beg = ptr ; <nl> + is_matching = true ; <nl> + break ; <nl> + } <nl> + case ' \ n ' : <nl> + case ' \ r ' : <nl> + { <nl> + CV_PARSE_ERROR ( " ' \ " ' - right - quote of string is missing " ) ; <nl> + break ; <nl> + } <nl> + default : <nl> + { <nl> + ptr + + ; <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + if ( * ptr ! = ' \ " ' ) <nl> + CV_PARSE_ERROR ( " ' \ " ' - right - quote of string is missing " ) ; <nl> + else <nl> + ptr + + ; <nl> + <nl> + node - > data . str = cvMemStorageAllocString <nl> + ( <nl> + fs - > memstorage , <nl> + string_buffer . c_str ( ) , <nl> + static_cast < int > ( string_buffer . size ( ) ) <nl> + ) ; <nl> + node - > tag = CV_NODE_STRING ; <nl> + } <nl> + } <nl> + else if ( cv_isdigit ( * ptr ) | | * ptr = = ' - ' | | * ptr = = ' + ' | | * ptr = = ' . ' ) <nl> + { / * * * * * * * * * * * * * * * * number * * * * * * * * * * * * * * * * / <nl> + char * beg = ptr ; <nl> + if ( * ptr = = ' + ' | | * ptr = = ' - ' ) <nl> + ptr + + ; <nl> + while ( cv_isdigit ( * ptr ) ) <nl> + ptr + + ; <nl> + if ( * ptr = = ' . ' | | * ptr = = ' e ' ) <nl> + { <nl> + node - > data . f = icv_strtod ( fs , beg , & ptr ) ; <nl> + node - > tag = CV_NODE_REAL ; <nl> + } <nl> + else <nl> + { <nl> + node - > data . i = static_cast < int > ( strtol ( beg , & ptr , 0 ) ) ; <nl> + node - > tag = CV_NODE_INT ; <nl> + } <nl> + <nl> + if ( beg > = ptr ) <nl> + CV_PARSE_ERROR ( " Invalid numeric value ( inconsistent explicit type specification ? ) " ) ; <nl> + } <nl> + else <nl> + { / * * * * * * * * * * * * * * * * other data * * * * * * * * * * * * * * * * / <nl> + const char * beg = ptr ; <nl> + size_t len = 0u ; <nl> + for ( ; cv_isalpha ( * ptr ) & & len < = 6u ; ptr + + ) <nl> + len + + ; <nl> + <nl> + if ( len > = 4u & & memcmp ( beg , " null " , 4u ) = = 0 ) <nl> + { <nl> + CV_PARSE_ERROR ( " Value ' null ' is not supported by this parser " ) ; <nl> + } <nl> + else if ( len > = 4u & & memcmp ( beg , " true " , 4u ) = = 0 ) <nl> + { <nl> + node - > data . i = 1 ; <nl> + node - > tag = CV_NODE_INT ; <nl> + } <nl> + else if ( len > = 5u & & memcmp ( beg , " false " , 5u ) = = 0 ) <nl> + { <nl> + node - > data . i = 0 ; <nl> + node - > tag = CV_NODE_INT ; <nl> + } <nl> + else <nl> + { <nl> + CV_PARSE_ERROR ( " Unrecognized value " ) ; <nl> + } <nl> + ptr + + ; <nl> + } <nl> + <nl> + return ptr ; <nl> + } <nl> + <nl> + static char * icvJSONParseSeq ( CvFileStorage * fs , char * ptr , CvFileNode * node ) ; <nl> + static char * icvJSONParseMap ( CvFileStorage * fs , char * ptr , CvFileNode * node ) ; <nl> + <nl> + static char * icvJSONParseSeq ( CvFileStorage * fs , char * ptr , CvFileNode * node ) <nl> + { <nl> + if ( * ptr ! = ' [ ' ) <nl> + CV_PARSE_ERROR ( " ' [ ' - left - brace of seq is missing " ) ; <nl> + else <nl> + ptr + + ; <nl> + <nl> + memset ( node , 0 , sizeof ( * node ) ) ; <nl> + icvFSCreateCollection ( fs , CV_NODE_SEQ , node ) ; <nl> + <nl> + for ( ; ; ) <nl> + { <nl> + ptr = icvJSONSkipSpaces ( fs , ptr ) ; <nl> + if ( ptr = = 0 | | fs - > dummy_eof ) <nl> + break ; <nl> + <nl> + if ( * ptr ! = ' ] ' ) <nl> + { <nl> + CvFileNode * child = ( CvFileNode * ) cvSeqPush ( node - > data . seq , 0 ) ; <nl> + <nl> + if ( * ptr = = ' [ ' ) <nl> + ptr = icvJSONParseSeq ( fs , ptr , child ) ; <nl> + else if ( * ptr = = ' { ' ) <nl> + ptr = icvJSONParseMap ( fs , ptr , child ) ; <nl> + else <nl> + ptr = icvJSONParseValue ( fs , ptr , child ) ; <nl> + } <nl> + <nl> + ptr = icvJSONSkipSpaces ( fs , ptr ) ; <nl> + if ( ptr = = 0 | | fs - > dummy_eof ) <nl> + break ; <nl> + <nl> + if ( * ptr = = ' , ' ) <nl> + ptr + + ; <nl> + else if ( * ptr = = ' ] ' ) <nl> + break ; <nl> + else <nl> + CV_PARSE_ERROR ( " Unexpected character " ) ; <nl> + } <nl> + <nl> + if ( * ptr ! = ' ] ' ) <nl> + CV_PARSE_ERROR ( " ' ] ' - right - brace of seq is missing " ) ; <nl> + else <nl> + ptr + + ; <nl> + <nl> + return ptr ; <nl> + } <nl> + <nl> + static char * icvJSONParseMap ( CvFileStorage * fs , char * ptr , CvFileNode * node ) <nl> + { <nl> + if ( * ptr ! = ' { ' ) <nl> + CV_PARSE_ERROR ( " ' { ' - left - brace of map is missing " ) ; <nl> + else <nl> + ptr + + ; <nl> + <nl> + memset ( node , 0 , sizeof ( * node ) ) ; <nl> + icvFSCreateCollection ( fs , CV_NODE_MAP , node ) ; <nl> + <nl> + for ( ; ; ) <nl> + { <nl> + ptr = icvJSONSkipSpaces ( fs , ptr ) ; <nl> + if ( ptr = = 0 | | fs - > dummy_eof ) <nl> + break ; <nl> + <nl> + if ( * ptr = = ' " ' ) <nl> + { <nl> + CvFileNode * child = 0 ; <nl> + ptr = icvJSONParseKey ( fs , ptr , node , & child ) ; <nl> + ptr = icvJSONSkipSpaces ( fs , ptr ) ; <nl> + if ( ptr = = 0 | | fs - > dummy_eof ) <nl> + break ; <nl> + <nl> + if ( child = = 0 ) <nl> + { / * type_id * / <nl> + CvFileNode tmp ; <nl> + ptr = icvJSONParseValue ( fs , ptr , & tmp ) ; <nl> + if ( CV_NODE_IS_STRING ( tmp . tag ) ) <nl> + { <nl> + node - > info = cvFindType ( tmp . data . str . ptr ) ; <nl> + if ( node - > info ) <nl> + node - > tag | = CV_NODE_USER ; <nl> + / / delete tmp . data . str <nl> + } <nl> + else <nl> + { <nl> + CV_PARSE_ERROR ( " \ " type_id \ " should be of type string " ) ; <nl> + } <nl> + } <nl> + else <nl> + { / * normal * / <nl> + if ( * ptr = = ' [ ' ) <nl> + ptr = icvJSONParseSeq ( fs , ptr , child ) ; <nl> + else if ( * ptr = = ' { ' ) <nl> + ptr = icvJSONParseMap ( fs , ptr , child ) ; <nl> + else <nl> + ptr = icvJSONParseValue ( fs , ptr , child ) ; <nl> + } <nl> + } <nl> + <nl> + ptr = icvJSONSkipSpaces ( fs , ptr ) ; <nl> + if ( ptr = = 0 | | fs - > dummy_eof ) <nl> + break ; <nl> + <nl> + if ( * ptr = = ' , ' ) <nl> + ptr + + ; <nl> + else if ( * ptr = = ' } ' ) <nl> + break ; <nl> + else <nl> + CV_PARSE_ERROR ( " Unexpected character " ) ; <nl> + } <nl> + <nl> + if ( * ptr ! = ' } ' ) <nl> + CV_PARSE_ERROR ( " ' } ' - right - brace of map is missing " ) ; <nl> + else <nl> + ptr + + ; <nl> + <nl> + return ptr ; <nl> + } <nl> + <nl> + <nl> + static void <nl> + icvJSONParse ( CvFileStorage * fs ) <nl> + { <nl> + char * ptr = fs - > buffer_start ; <nl> + ptr = icvJSONSkipSpaces ( fs , ptr ) ; <nl> + if ( ptr = = 0 | | fs - > dummy_eof ) <nl> + return ; <nl> + <nl> + if ( * ptr = = ' { ' ) <nl> + { <nl> + CvFileNode * root_node = ( CvFileNode * ) cvSeqPush ( fs - > roots , 0 ) ; <nl> + ptr = icvJSONParseMap ( fs , ptr , root_node ) ; <nl> + } <nl> + else if ( * ptr = = ' [ ' ) <nl> + { <nl> + CvFileNode * root_node = ( CvFileNode * ) cvSeqPush ( fs - > roots , 0 ) ; <nl> + ptr = icvJSONParseSeq ( fs , ptr , root_node ) ; <nl> + } <nl> + else <nl> + { <nl> + CV_PARSE_ERROR ( " left - brace of top level is missing " ) ; <nl> + } <nl> + <nl> + if ( fs - > dummy_eof ! = 0 ) <nl> + CV_PARSE_ERROR ( " Unexpected End - Of - File " ) ; <nl> + } <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ <nl> + * JSON Emitter * <nl> + \ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + static void <nl> + icvJSONWrite ( CvFileStorage * fs , const char * key , const char * data ) <nl> + { <nl> + / * check write_struct * / <nl> + <nl> + check_if_write_struct_is_delayed ( fs ) ; <nl> + if ( fs - > state_of_writing_base64 = = base64 : : fs : : Uncertain ) <nl> + { <nl> + switch_to_Base64_state ( fs , base64 : : fs : : NotUse ) ; <nl> + } <nl> + else if ( fs - > state_of_writing_base64 = = base64 : : fs : : InUse ) <nl> + { <nl> + CV_Error ( CV_StsError , " At present , output Base64 data only . " ) ; <nl> + } <nl> + <nl> + / * check parameters * / <nl> + <nl> + size_t key_len = 0u ; <nl> + if ( key & & * key = = ' \ 0 ' ) <nl> + key = 0 ; <nl> + if ( key ) <nl> + { <nl> + key_len = strlen ( key ) ; <nl> + if ( key_len = = 0u ) <nl> + CV_Error ( CV_StsBadArg , " The key is an empty " ) ; <nl> + else if ( static_cast < int > ( key_len ) > CV_FS_MAX_LEN ) <nl> + CV_Error ( CV_StsBadArg , " The key is too long " ) ; <nl> + } <nl> + <nl> + size_t data_len = 0u ; <nl> + if ( data ) <nl> + data_len = strlen ( data ) ; <nl> + <nl> + int struct_flags = fs - > struct_flags ; <nl> + if ( CV_NODE_IS_COLLECTION ( struct_flags ) ) <nl> + { <nl> + if ( ( CV_NODE_IS_MAP ( struct_flags ) ^ ( key ! = 0 ) ) ) <nl> + CV_Error ( CV_StsBadArg , " An attempt to add element without a key to a map , " <nl> + " or add element with key to sequence " ) ; <nl> + } else { <nl> + fs - > is_first = 0 ; <nl> + struct_flags = CV_NODE_EMPTY | ( key ? CV_NODE_MAP : CV_NODE_SEQ ) ; <nl> + } <nl> + <nl> + / * start to write * / <nl> + <nl> + char * ptr = 0 ; <nl> + <nl> + if ( CV_NODE_IS_FLOW ( struct_flags ) ) <nl> + { <nl> + int new_offset ; <nl> + ptr = fs - > buffer ; <nl> + if ( ! CV_NODE_IS_EMPTY ( struct_flags ) ) <nl> + * ptr + + = ' , ' ; <nl> + new_offset = static_cast < int > ( ptr - fs - > buffer_start + key_len + data_len ) ; <nl> + if ( new_offset > fs - > wrap_margin & & new_offset - fs - > struct_indent > 10 ) <nl> + { <nl> + fs - > buffer = ptr ; <nl> + ptr = icvFSFlush ( fs ) ; <nl> + } <nl> + else <nl> + * ptr + + = ' ' ; <nl> + } <nl> + else <nl> + { <nl> + if ( ! CV_NODE_IS_EMPTY ( struct_flags ) ) <nl> + { <nl> + ptr = fs - > buffer ; <nl> + * ptr + + = ' , ' ; <nl> + * ptr + + = ' \ n ' ; <nl> + * ptr + + = ' \ 0 ' ; <nl> + : : icvPuts ( fs , fs - > buffer_start ) ; <nl> + ptr = fs - > buffer = fs - > buffer_start ; <nl> + } <nl> + ptr = icvFSFlush ( fs ) ; <nl> + } <nl> + <nl> + if ( key ) <nl> + { <nl> + if ( ! cv_isalpha ( key [ 0 ] ) & & key [ 0 ] ! = ' _ ' ) <nl> + CV_Error ( CV_StsBadArg , " Key must start with a letter or _ " ) ; <nl> + <nl> + ptr = icvFSResizeWriteBuffer ( fs , ptr , static_cast < int > ( key_len ) ) ; <nl> + * ptr + + = ' \ " ' ; <nl> + <nl> + for ( size_t i = 0u ; i < key_len ; i + + ) <nl> + { <nl> + char c = key [ i ] ; <nl> + <nl> + ptr [ i ] = c ; <nl> + if ( ! cv_isalnum ( c ) & & c ! = ' - ' & & c ! = ' _ ' & & c ! = ' ' ) <nl> + CV_Error ( CV_StsBadArg , " Key names may only contain alphanumeric characters [ a - zA - Z0 - 9 ] , ' - ' , ' _ ' and ' ' " ) ; <nl> + } <nl> + <nl> + ptr + = key_len ; <nl> + * ptr + + = ' \ " ' ; <nl> + * ptr + + = ' : ' ; <nl> + * ptr + + = ' ' ; <nl> + } <nl> + <nl> + if ( data ) <nl> + { <nl> + ptr = icvFSResizeWriteBuffer ( fs , ptr , static_cast < int > ( data_len ) ) ; <nl> + memcpy ( ptr , data , data_len ) ; <nl> + ptr + = data_len ; <nl> + } <nl> + <nl> + fs - > buffer = ptr ; <nl> + fs - > struct_flags = struct_flags & ~ CV_NODE_EMPTY ; <nl> + } <nl> + <nl> + <nl> + static void <nl> + icvJSONStartWriteStruct ( CvFileStorage * fs , const char * key , int struct_flags , <nl> + const char * type_name CV_DEFAULT ( 0 ) ) <nl> + { <nl> + int parent_flags ; <nl> + char data [ CV_FS_MAX_LEN + 1024 ] ; <nl> + <nl> + struct_flags = ( struct_flags & ( CV_NODE_TYPE_MASK | CV_NODE_FLOW ) ) | CV_NODE_EMPTY ; <nl> + if ( ! CV_NODE_IS_COLLECTION ( struct_flags ) ) <nl> + CV_Error ( CV_StsBadArg , <nl> + " Some collection type - CV_NODE_SEQ or CV_NODE_MAP , must be specified " ) ; <nl> + <nl> + if ( type_name & & * type_name = = ' \ 0 ' ) <nl> + type_name = 0 ; <nl> + <nl> + bool has_type_id = false ; <nl> + bool is_real_collection = true ; <nl> + if ( type_name & & memcmp ( type_name , " binary " , 6 ) = = 0 ) <nl> { <nl> - switch_to_Base64_state ( fs , base64 : : fs : : NotUse ) ; <nl> + struct_flags = CV_NODE_STR ; <nl> + data [ 0 ] = ' \ 0 ' ; <nl> + is_real_collection = false ; <nl> } <nl> - else if ( fs - > state_of_writing_base64 = = base64 : : fs : : InUse ) <nl> + else if ( type_name ) <nl> { <nl> - CV_Error ( CV_StsError , " Currently only Base64 data is allowed . " ) ; <nl> + has_type_id = true ; <nl> } <nl> <nl> - if ( CV_NODE_IS_MAP ( fs - > struct_flags ) | | <nl> - ( ! CV_NODE_IS_COLLECTION ( fs - > struct_flags ) & & key ) ) <nl> + if ( is_real_collection ) <nl> { <nl> - icvXMLWriteTag ( fs , key , CV_XML_OPENING_TAG , cvAttrList ( 0 , 0 ) ) ; <nl> - char * ptr = icvFSResizeWriteBuffer ( fs , fs - > buffer , len ) ; <nl> - memcpy ( ptr , data , len ) ; <nl> - fs - > buffer = ptr + len ; <nl> - icvXMLWriteTag ( fs , key , CV_XML_CLOSING_TAG , cvAttrList ( 0 , 0 ) ) ; <nl> + char c = CV_NODE_IS_MAP ( struct_flags ) ? ' { ' : ' [ ' ; <nl> + data [ 0 ] = c ; <nl> + data [ 1 ] = ' \ 0 ' ; <nl> } <nl> - else <nl> - { <nl> - char * ptr = fs - > buffer ; <nl> - int new_offset = ( int ) ( ptr - fs - > buffer_start ) + len ; <nl> <nl> - if ( key ) <nl> - CV_Error ( CV_StsBadArg , " elements with keys can not be written to sequence " ) ; <nl> + icvJSONWrite ( fs , key , data ) ; <nl> <nl> - fs - > struct_flags = CV_NODE_SEQ ; <nl> + parent_flags = fs - > struct_flags ; <nl> + cvSeqPush ( fs - > write_stack , & parent_flags ) ; <nl> + fs - > struct_flags = struct_flags ; <nl> + fs - > struct_indent + = 4 ; <nl> <nl> - if ( ( new_offset > fs - > wrap_margin & & new_offset - fs - > struct_indent > 10 ) | | <nl> - ( ptr > fs - > buffer_start & & ptr [ - 1 ] = = ' > ' & & ! CV_NODE_IS_EMPTY ( fs - > struct_flags ) ) ) <nl> + if ( has_type_id ) <nl> + fs - > write_string ( fs , " type_id " , type_name , 1 ) ; <nl> + } <nl> + <nl> + <nl> + static void <nl> + icvJSONEndWriteStruct ( CvFileStorage * fs ) <nl> + { <nl> + if ( fs - > write_stack - > total = = 0 ) <nl> + CV_Error ( CV_StsError , " EndWriteStruct w / o matching StartWriteStruct " ) ; <nl> + <nl> + int parent_flags = 0 ; <nl> + int struct_flags = fs - > struct_flags ; <nl> + cvSeqPop ( fs - > write_stack , & parent_flags ) ; <nl> + fs - > struct_indent - = 4 ; <nl> + fs - > struct_flags = parent_flags & ~ CV_NODE_EMPTY ; <nl> + assert ( fs - > struct_indent > = 0 ) ; <nl> + <nl> + if ( CV_NODE_IS_COLLECTION ( struct_flags ) ) <nl> + { <nl> + if ( ! CV_NODE_IS_FLOW ( struct_flags ) ) <nl> { <nl> - ptr = icvXMLFlush ( fs ) ; <nl> + if ( fs - > buffer < = fs - > buffer_start + fs - > space ) <nl> + { <nl> + / * some bad code for base64_writer . . . * / <nl> + * fs - > buffer + + = ' \ n ' ; <nl> + * fs - > buffer + + = ' \ 0 ' ; <nl> + icvPuts ( fs , fs - > buffer_start ) ; <nl> + fs - > buffer = fs - > buffer_start ; <nl> + } <nl> + icvFSFlush ( fs ) ; <nl> } <nl> - else if ( ptr > fs - > buffer_start + fs - > struct_indent & & ptr [ - 1 ] ! = ' > ' ) <nl> + <nl> + char * ptr = fs - > buffer ; <nl> + if ( ptr > fs - > buffer_start + fs - > struct_indent & & ! CV_NODE_IS_EMPTY ( struct_flags ) ) <nl> * ptr + + = ' ' ; <nl> + * ptr + + = CV_NODE_IS_MAP ( struct_flags ) ? ' } ' : ' ] ' ; <nl> + fs - > buffer = ptr ; <nl> + } <nl> + } <nl> <nl> - memcpy ( ptr , data , len ) ; <nl> - fs - > buffer = ptr + len ; <nl> + <nl> + static void <nl> + icvJSONStartNextStream ( CvFileStorage * fs ) <nl> + { <nl> + if ( ! fs - > is_first ) <nl> + { <nl> + while ( fs - > write_stack - > total > 0 ) <nl> + icvJSONEndWriteStruct ( fs ) ; <nl> + <nl> + fs - > struct_indent = 4 ; <nl> + icvFSFlush ( fs ) ; <nl> + fs - > buffer = fs - > buffer_start ; <nl> } <nl> } <nl> <nl> <nl> static void <nl> - icvXMLWriteInt ( CvFileStorage * fs , const char * key , int value ) <nl> + icvJSONWriteInt ( CvFileStorage * fs , const char * key , int value ) <nl> { <nl> - char buf [ 128 ] , * ptr = icv_itoa ( value , buf , 10 ) ; <nl> - int len = ( int ) strlen ( ptr ) ; <nl> - icvXMLWriteScalar ( fs , key , ptr , len ) ; <nl> + char buf [ 128 ] ; <nl> + icvJSONWrite ( fs , key , icv_itoa ( value , buf , 10 ) ) ; <nl> } <nl> <nl> <nl> static void <nl> - icvXMLWriteReal ( CvFileStorage * fs , const char * key , double value ) <nl> + icvJSONWriteReal ( CvFileStorage * fs , const char * key , double value ) <nl> { <nl> char buf [ 128 ] ; <nl> - int len = ( int ) strlen ( icvDoubleToString ( buf , value ) ) ; <nl> - icvXMLWriteScalar ( fs , key , buf , len ) ; <nl> + icvJSONWrite ( fs , key , icvDoubleToString ( buf , value ) ) ; <nl> } <nl> <nl> <nl> static void <nl> - icvXMLWriteString ( CvFileStorage * fs , const char * key , const char * str , int quote ) <nl> + icvJSONWriteString ( CvFileStorage * fs , const char * key , <nl> + const char * str , int quote CV_DEFAULT ( 0 ) ) <nl> { <nl> - char buf [ CV_FS_MAX_LEN * 6 + 16 ] ; <nl> + char buf [ CV_FS_MAX_LEN * 4 + 16 ] ; <nl> char * data = ( char * ) str ; <nl> int i , len ; <nl> <nl> icvXMLWriteString ( CvFileStorage * fs , const char * key , const char * str , int quot <nl> if ( len > CV_FS_MAX_LEN ) <nl> CV_Error ( CV_StsBadArg , " The written string is too long " ) ; <nl> <nl> - if ( quote | | len = = 0 | | str [ 0 ] ! = ' \ " ' | | str [ 0 ] ! = str [ len - 1 ] ) <nl> + if ( quote | | len = = 0 | | str [ 0 ] ! = str [ len - 1 ] | | ( str [ 0 ] ! = ' \ " ' & & str [ 0 ] ! = ' \ ' ' ) ) <nl> { <nl> - int need_quote = quote | | len = = 0 ; <nl> + int need_quote = 1 ; <nl> data = buf ; <nl> * data + + = ' \ " ' ; <nl> for ( i = 0 ; i < len ; i + + ) <nl> { <nl> char c = str [ i ] ; <nl> <nl> - if ( ( uchar ) c > = 128 | | c = = ' ' ) <nl> - { <nl> - * data + + = c ; <nl> - need_quote = 1 ; <nl> - } <nl> - else if ( ! cv_isprint ( c ) | | c = = ' < ' | | c = = ' > ' | | c = = ' & ' | | c = = ' \ ' ' | | c = = ' \ " ' ) <nl> + switch ( c ) <nl> { <nl> - * data + + = ' & ' ; <nl> - if ( c = = ' < ' ) <nl> - { <nl> - memcpy ( data , " lt " , 2 ) ; <nl> - data + = 2 ; <nl> - } <nl> - else if ( c = = ' > ' ) <nl> - { <nl> - memcpy ( data , " gt " , 2 ) ; <nl> - data + = 2 ; <nl> - } <nl> - else if ( c = = ' & ' ) <nl> - { <nl> - memcpy ( data , " amp " , 3 ) ; <nl> - data + = 3 ; <nl> - } <nl> - else if ( c = = ' \ ' ' ) <nl> - { <nl> - memcpy ( data , " apos " , 4 ) ; <nl> - data + = 4 ; <nl> - } <nl> - else if ( c = = ' \ " ' ) <nl> - { <nl> - memcpy ( data , " quot " , 4 ) ; <nl> - data + = 4 ; <nl> - } <nl> - else <nl> - { <nl> - sprintf ( data , " # x % 02x " , ( uchar ) c ) ; <nl> - data + = 4 ; <nl> - } <nl> - * data + + = ' ; ' ; <nl> - need_quote = 1 ; <nl> + case ' \ \ ' : <nl> + case ' \ " ' : <nl> + case ' \ ' ' : { * data + + = ' \ \ ' ; * data + + = c ; break ; } <nl> + case ' \ n ' : { * data + + = ' \ \ ' ; * data + + = ' n ' ; break ; } <nl> + case ' \ r ' : { * data + + = ' \ \ ' ; * data + + = ' r ' ; break ; } <nl> + case ' \ t ' : { * data + + = ' \ \ ' ; * data + + = ' t ' ; break ; } <nl> + case ' \ b ' : { * data + + = ' \ \ ' ; * data + + = ' b ' ; break ; } <nl> + case ' \ f ' : { * data + + = ' \ \ ' ; * data + + = ' f ' ; break ; } <nl> + default : { * data + + = c ; } <nl> + break ; <nl> } <nl> - else <nl> - * data + + = c ; <nl> } <nl> - if ( ! need_quote & & ( cv_isdigit ( str [ 0 ] ) | | <nl> - str [ 0 ] = = ' + ' | | str [ 0 ] = = ' - ' | | str [ 0 ] = = ' . ' ) ) <nl> - need_quote = 1 ; <nl> <nl> - if ( need_quote ) <nl> - * data + + = ' \ " ' ; <nl> - len = ( int ) ( data - buf ) - ! need_quote ; <nl> + * data + + = ' \ " ' ; <nl> * data + + = ' \ 0 ' ; <nl> data = buf + ! need_quote ; <nl> } <nl> <nl> - icvXMLWriteScalar ( fs , key , data , len ) ; <nl> + icvJSONWrite ( fs , key , data ) ; <nl> } <nl> <nl> <nl> static void <nl> - icvXMLWriteComment ( CvFileStorage * fs , const char * comment , int eol_comment ) <nl> + icvJSONWriteComment ( CvFileStorage * fs , const char * comment , int eol_comment ) <nl> { <nl> - int len ; <nl> - int multiline ; <nl> - const char * eol ; <nl> - char * ptr ; <nl> - <nl> if ( ! comment ) <nl> CV_Error ( CV_StsNullPtr , " Null comment " ) ; <nl> <nl> - if ( strstr ( comment , " - - " ) ! = 0 ) <nl> - CV_Error ( CV_StsBadArg , " Double hyphen \ ' - - \ ' is not allowed in the comments " ) ; <nl> - <nl> - len = ( int ) strlen ( comment ) ; <nl> - eol = strchr ( comment , ' \ n ' ) ; <nl> - multiline = eol ! = 0 ; <nl> - ptr = fs - > buffer ; <nl> - <nl> - if ( multiline | | ! eol_comment | | fs - > buffer_end - ptr < len + 5 ) <nl> - ptr = icvXMLFlush ( fs ) ; <nl> - else if ( ptr > fs - > buffer_start + fs - > struct_indent ) <nl> - * ptr + + = ' ' ; <nl> + int len = static_cast < int > ( strlen ( comment ) ) ; <nl> + char * ptr = fs - > buffer ; <nl> + const char * eol = strchr ( comment , ' \ n ' ) ; <nl> + bool multiline = eol ! = 0 ; <nl> <nl> - if ( ! multiline ) <nl> - { <nl> - ptr = icvFSResizeWriteBuffer ( fs , ptr , len + 9 ) ; <nl> - sprintf ( ptr , " < ! - - % s - - > " , comment ) ; <nl> - len = ( int ) strlen ( ptr ) ; <nl> - } <nl> + if ( ! eol_comment | | multiline | | fs - > buffer_end - ptr < len | | ptr = = fs - > buffer_start ) <nl> + ptr = icvFSFlush ( fs ) ; <nl> else <nl> - { <nl> - strcpy ( ptr , " < ! - - " ) ; <nl> - len = 4 ; <nl> - } <nl> - <nl> - fs - > buffer = ptr + len ; <nl> - ptr = icvXMLFlush ( fs ) ; <nl> + * ptr + + = ' ' ; <nl> <nl> - if ( multiline ) <nl> + while ( comment ) <nl> { <nl> - while ( comment ) <nl> + * ptr + + = ' / ' ; <nl> + * ptr + + = ' / ' ; <nl> + * ptr + + = ' ' ; <nl> + if ( eol ) <nl> { <nl> - if ( eol ) <nl> - { <nl> - ptr = icvFSResizeWriteBuffer ( fs , ptr , ( int ) ( eol - comment ) + 1 ) ; <nl> - memcpy ( ptr , comment , eol - comment + 1 ) ; <nl> - ptr + = eol - comment ; <nl> - comment = eol + 1 ; <nl> - eol = strchr ( comment , ' \ n ' ) ; <nl> - } <nl> - else <nl> - { <nl> - len = ( int ) strlen ( comment ) ; <nl> - ptr = icvFSResizeWriteBuffer ( fs , ptr , len ) ; <nl> - memcpy ( ptr , comment , len ) ; <nl> - ptr + = len ; <nl> - comment = 0 ; <nl> - } <nl> - fs - > buffer = ptr ; <nl> - ptr = icvXMLFlush ( fs ) ; <nl> + ptr = icvFSResizeWriteBuffer ( fs , ptr , ( int ) ( eol - comment ) + 1 ) ; <nl> + memcpy ( ptr , comment , eol - comment + 1 ) ; <nl> + fs - > buffer = ptr + ( eol - comment ) ; <nl> + comment = eol + 1 ; <nl> + eol = strchr ( comment , ' \ n ' ) ; <nl> } <nl> - sprintf ( ptr , " - - > " ) ; <nl> - fs - > buffer = ptr + 3 ; <nl> - icvXMLFlush ( fs ) ; <nl> + else <nl> + { <nl> + len = ( int ) strlen ( comment ) ; <nl> + ptr = icvFSResizeWriteBuffer ( fs , ptr , len ) ; <nl> + memcpy ( ptr , comment , len ) ; <nl> + fs - > buffer = ptr + len ; <nl> + comment = 0 ; <nl> + } <nl> + ptr = icvFSFlush ( fs ) ; <nl> } <nl> } <nl> <nl> cvOpenFileStorage ( const char * query , CvMemStorage * dststorage , int flags , const <nl> bool append = ( flags & 3 ) = = CV_STORAGE_APPEND ; <nl> bool mem = ( flags & CV_STORAGE_MEMORY ) ! = 0 ; <nl> bool write_mode = ( flags & 3 ) ! = 0 ; <nl> - bool write_base64 = write_mode & & ( flags & CV_STORAGE_BASE64 ) ! = 0 ; <nl> + bool write_base64 = ( write_mode | | append ) & & ( flags & CV_STORAGE_BASE64 ) ! = 0 ; <nl> bool isGZ = false ; <nl> size_t fnamelen = 0 ; <nl> const char * filename = query ; <nl> cvOpenFileStorage ( const char * query , CvMemStorage * dststorage , int flags , const <nl> filename = params . begin ( ) - > c_str ( ) ; <nl> <nl> if ( write_base64 = = false & & is_param_exist ( params , " base64 " ) ) <nl> - write_base64 = true ; <nl> + write_base64 = ( write_mode | | append ) ; <nl> } <nl> <nl> if ( ! filename | | filename [ 0 ] = = ' \ 0 ' ) <nl> cvOpenFileStorage ( const char * query , CvMemStorage * dststorage , int flags , const <nl> <nl> if ( fmt = = CV_STORAGE_FORMAT_AUTO & & filename ) <nl> { <nl> - const char * dot_pos = filename + fnamelen - ( isGZ ? 7 : 4 ) ; <nl> - fs - > fmt = ( dot_pos > = filename & & ( memcmp ( dot_pos , " . xml " , 4 ) = = 0 | | <nl> - memcmp ( dot_pos , " . XML " , 4 ) = = 0 | | memcmp ( dot_pos , " . Xml " , 4 ) = = 0 ) ) ? <nl> - CV_STORAGE_FORMAT_XML : CV_STORAGE_FORMAT_YAML ; <nl> + const char * dot_pos = strrchr ( filename , ' . ' ) ; <nl> + fs - > fmt <nl> + = cv_strcasecmp ( dot_pos , " . xml " ) <nl> + ? CV_STORAGE_FORMAT_XML <nl> + : cv_strcasecmp ( dot_pos , " . json " ) <nl> + ? CV_STORAGE_FORMAT_JSON <nl> + : CV_STORAGE_FORMAT_YAML <nl> + ; <nl> + } <nl> + else if ( fmt ! = CV_STORAGE_FORMAT_AUTO ) <nl> + { <nl> + fs - > fmt = fmt ; <nl> } <nl> else <nl> - fs - > fmt = fmt ! = CV_STORAGE_FORMAT_AUTO ? fmt : CV_STORAGE_FORMAT_XML ; <nl> + { <nl> + fs - > fmt = CV_STORAGE_FORMAT_XML ; <nl> + } <nl> <nl> / / we use factor = 6 for XML ( the longest characters ( ' and " ) are encoded with 6 bytes ( & apos ; and & quot ; ) <nl> / / and factor = 4 for YAML ( as we use 4 bytes for non ASCII characters ( e . g . \ xAB ) ) <nl> cvOpenFileStorage ( const char * query , CvMemStorage * dststorage , int flags , const <nl> fs - > write_comment = icvXMLWriteComment ; <nl> fs - > start_next_stream = icvXMLStartNextStream ; <nl> } <nl> - else <nl> + else if ( fs - > fmt = = CV_STORAGE_FORMAT_YAML ) <nl> { <nl> if ( ! append ) <nl> icvPuts ( fs , " % YAML 1 . 0 \ nmmm \ n " ) ; <nl> cvOpenFileStorage ( const char * query , CvMemStorage * dststorage , int flags , const <nl> fs - > write_comment = icvYMLWriteComment ; <nl> fs - > start_next_stream = icvYMLStartNextStream ; <nl> } <nl> + else <nl> + { <nl> + if ( ! append ) <nl> + icvPuts ( fs , " { \ n " ) ; <nl> + else <nl> + { <nl> + bool valid = false ; <nl> + long roffset = 0 ; <nl> + for ( ; <nl> + fseek ( fs - > file , roffset , SEEK_END ) = = 0 ; <nl> + roffset - = 1 ) <nl> + { <nl> + const char end_mark = ' } ' ; <nl> + if ( fgetc ( fs - > file ) = = end_mark ) <nl> + { <nl> + fseek ( fs - > file , roffset , SEEK_END ) ; <nl> + valid = true ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + if ( valid ) <nl> + { <nl> + icvCloseFile ( fs ) ; <nl> + fs - > file = fopen ( fs - > filename , " r + t " ) ; <nl> + fseek ( fs - > file , roffset , SEEK_END ) ; <nl> + fputs ( " , " , fs - > file ) ; <nl> + } <nl> + else <nl> + { <nl> + CV_Error ( CV_StsError , " Could not find ' } ' in the end of file . \ n " ) ; <nl> + } <nl> + } <nl> + fs - > struct_indent = 4 ; <nl> + fs - > start_write_struct = icvJSONStartWriteStruct ; <nl> + fs - > end_write_struct = icvJSONEndWriteStruct ; <nl> + fs - > write_int = icvJSONWriteInt ; <nl> + fs - > write_real = icvJSONWriteReal ; <nl> + fs - > write_string = icvJSONWriteString ; <nl> + fs - > write_comment = icvJSONWriteComment ; <nl> + fs - > start_next_stream = icvJSONStartNextStream ; <nl> + } <nl> } <nl> else <nl> { <nl> cvOpenFileStorage ( const char * query , CvMemStorage * dststorage , int flags , const <nl> <nl> size_t buf_size = 1 < < 20 ; <nl> const char * yaml_signature = " % YAML " ; <nl> + const char * json_signature = " { " ; <nl> char buf [ 16 ] ; <nl> icvGets ( fs , buf , sizeof ( buf ) - 2 ) ; <nl> - fs - > fmt = strncmp ( buf , yaml_signature , strlen ( yaml_signature ) ) = = 0 ? <nl> - CV_STORAGE_FORMAT_YAML : CV_STORAGE_FORMAT_XML ; <nl> + fs - > fmt <nl> + = strncmp ( buf , yaml_signature , strlen ( yaml_signature ) ) = = 0 <nl> + ? CV_STORAGE_FORMAT_YAML <nl> + : strncmp ( buf , json_signature , strlen ( json_signature ) ) = = 0 <nl> + ? CV_STORAGE_FORMAT_JSON <nl> + : CV_STORAGE_FORMAT_XML <nl> + ; <nl> <nl> if ( ! isGZ ) <nl> { <nl> cvOpenFileStorage ( const char * query , CvMemStorage * dststorage , int flags , const <nl> / / cvSetErrMode ( CV_ErrModeSilent ) ; <nl> try <nl> { <nl> - if ( fs - > fmt = = CV_STORAGE_FORMAT_XML ) <nl> - icvXMLParse ( fs ) ; <nl> - else <nl> - icvYMLParse ( fs ) ; <nl> + switch ( fs - > fmt ) <nl> + { <nl> + case CV_STORAGE_FORMAT_XML : { icvXMLParse ( fs ) ; break ; } <nl> + case CV_STORAGE_FORMAT_YAML : { icvYMLParse ( fs ) ; break ; } <nl> + case CV_STORAGE_FORMAT_JSON : { icvJSONParse ( fs ) ; break ; } <nl> + default : break ; <nl> + } <nl> } <nl> catch ( . . . ) <nl> { <nl> cvStartWriteStruct ( CvFileStorage * fs , const char * key , int struct_flags , <nl> type_name = = 0 <nl> ) <nl> { <nl> - / * Uncertain if output Base64 data * / <nl> + / * Uncertain whether output Base64 data * / <nl> make_write_struct_delayed ( fs , key , struct_flags , type_name ) ; <nl> } <nl> else if ( type_name & & memcmp ( type_name , " binary " , 6 ) = = 0 ) <nl> cvWriteRawData ( CvFileStorage * fs , const void * _data , int len , const char * dt ) <nl> int buf_len = ( int ) strlen ( ptr ) ; <nl> icvXMLWriteScalar ( fs , 0 , ptr , buf_len ) ; <nl> } <nl> - else <nl> + else if ( fs - > fmt = = CV_STORAGE_FORMAT_YAML ) <nl> + { <nl> icvYMLWrite ( fs , 0 , ptr ) ; <nl> + } <nl> + else <nl> + { <nl> + icvJSONWrite ( fs , 0 , ptr ) ; <nl> + } <nl> } <nl> <nl> offset = ( int ) ( data - data0 ) ; <nl> class base64 : : Base64ContextEmitter <nl> <nl> CV_CHECK_OUTPUT_FILE_STORAGE ( fs ) ; <nl> <nl> - : : icvFSFlush ( file_storage ) ; <nl> + if ( fs - > fmt = = CV_STORAGE_FORMAT_JSON ) <nl> + { <nl> + / * clean and break buffer * / <nl> + * fs - > buffer + + = ' \ 0 ' ; <nl> + : : icvPuts ( fs , fs - > buffer_start ) ; <nl> + fs - > buffer = fs - > buffer_start ; <nl> + memset ( file_storage - > buffer_start , 0 , static_cast < int > ( file_storage - > space ) ) ; <nl> + : : icvPuts ( fs , " \ " $ base64 $ " ) ; <nl> + } <nl> + else <nl> + { <nl> + : : icvFSFlush ( file_storage ) ; <nl> + } <nl> } <nl> <nl> ~ Base64ContextEmitter ( ) <nl> class base64 : : Base64ContextEmitter <nl> / * cleaning * / <nl> if ( src_cur ! = src_beg ) <nl> flush ( ) ; / * encode the rest binary data to base64 buffer * / <nl> + <nl> + if ( file_storage - > fmt = = CV_STORAGE_FORMAT_JSON ) <nl> + { <nl> + / * clean and break buffer * / <nl> + : : icvPuts ( file_storage , " \ " " ) ; <nl> + file_storage - > buffer = file_storage - > buffer_start ; <nl> + : : icvFSFlush ( file_storage ) ; <nl> + memset ( file_storage - > buffer_start , 0 , static_cast < int > ( file_storage - > space ) ) ; <nl> + file_storage - > buffer = file_storage - > buffer_start ; <nl> + } <nl> } <nl> <nl> Base64ContextEmitter & write ( const uchar * beg , const uchar * end ) <nl> class base64 : : Base64ContextEmitter <nl> <nl> src_cur = src_beg ; <nl> { <nl> - / / TODO : better solutions . <nl> - const char newline [ ] = " \ n " ; <nl> - char space [ 80 ] ; <nl> - <nl> - int ident = file_storage - > struct_indent ; <nl> - memset ( space , ' ' , ident ) ; <nl> - space [ ident ] = ' \ 0 ' ; <nl> + if ( file_storage - > fmt = = CV_STORAGE_FORMAT_JSON ) <nl> + { <nl> + : : icvPuts ( file_storage , ( const char * ) base64_buffer . data ( ) ) ; <nl> + } <nl> + else <nl> + { <nl> + const char newline [ ] = " \ n " ; <nl> + char space [ 80 ] ; <nl> + int ident = file_storage - > struct_indent ; <nl> + memset ( space , ' ' , static_cast < int > ( ident ) ) ; <nl> + space [ ident ] = ' \ 0 ' ; <nl> + <nl> + : : icvPuts ( file_storage , space ) ; <nl> + : : icvPuts ( file_storage , ( const char * ) base64_buffer . data ( ) ) ; <nl> + : : icvPuts ( file_storage , newline ) ; <nl> + : : icvFSFlush ( file_storage ) ; <nl> + } <nl> <nl> - : : icvPuts ( file_storage , space ) ; <nl> - : : icvPuts ( file_storage , ( const char * ) base64_buffer . data ( ) ) ; <nl> - : : icvPuts ( file_storage , newline ) ; <nl> - : : icvFSFlush ( file_storage ) ; <nl> } <nl> <nl> return true ; <nl> base64 : : Base64Writer : : Base64Writer ( : : CvFileStorage * fs ) <nl> , data_type_string ( ) <nl> { <nl> CV_CHECK_OUTPUT_FILE_STORAGE ( fs ) ; <nl> - icvFSFlush ( fs ) ; <nl> } <nl> <nl> void base64 : : Base64Writer : : write ( const void * _data , size_t len , const char * dt ) <nl> mmm a / modules / core / test / test_io . cpp <nl> ppp b / modules / core / test / test_io . cpp <nl> class Core_IOTest : public cvtest : : BaseTest <nl> { - 1000000 , 1000000 } , { - 10 , 10 } , { - 10 , 10 } } ; <nl> RNG & rng = ts - > get_rng ( ) ; <nl> RNG rng0 ; <nl> - test_case_count = 4 ; <nl> int progress = 0 ; <nl> MemStorage storage ( cvCreateMemStorage ( 0 ) ) ; <nl> + const char * suffixs [ 3 ] = { " . yml " , " . xml " , " . json " } ; <nl> + test_case_count = 6 ; <nl> <nl> for ( int idx = 0 ; idx < test_case_count ; idx + + ) <nl> { <nl> class Core_IOTest : public cvtest : : BaseTest <nl> <nl> cvClearMemStorage ( storage ) ; <nl> <nl> - bool mem = ( idx % 4 ) > = 2 ; <nl> - string filename = tempfile ( idx % 2 ? " . yml " : " . xml " ) ; <nl> + bool mem = ( idx % test_case_count ) > = ( test_case_count > > 1 ) ; <nl> + string filename = tempfile ( suffixs [ idx % ( test_case_count > > 1 ) ] ) ; <nl> <nl> FileStorage fs ( filename , FileStorage : : WRITE + ( mem ? FileStorage : : MEMORY : 0 ) ) ; <nl> <nl> class CV_MiscIOTest : public cvtest : : BaseTest <nl> protected : <nl> void run ( int ) <nl> { <nl> - try <nl> - { <nl> - string fname = cv : : tempfile ( " . xml " ) ; <nl> - vector < int > mi , mi2 , mi3 , mi4 ; <nl> - vector < Mat > mv , mv2 , mv3 , mv4 ; <nl> - vector < UserDefinedType > vudt , vudt2 , vudt3 , vudt4 ; <nl> - Mat m ( 10 , 9 , CV_32F ) ; <nl> - Mat empty ; <nl> - UserDefinedType udt = { 8 , 3 . 3f } ; <nl> - randu ( m , 0 , 1 ) ; <nl> - mi3 . push_back ( 5 ) ; <nl> - mv3 . push_back ( m ) ; <nl> - vudt3 . push_back ( udt ) ; <nl> - Point_ < float > p1 ( 1 . 1f , 2 . 2f ) , op1 ; <nl> - Point3i p2 ( 3 , 4 , 5 ) , op2 ; <nl> - Size s1 ( 6 , 7 ) , os1 ; <nl> - Complex < int > c1 ( 9 , 10 ) , oc1 ; <nl> - Rect r1 ( 11 , 12 , 13 , 14 ) , or1 ; <nl> - Vec < int , 5 > v1 ( 15 , 16 , 17 , 18 , 19 ) , ov1 ; <nl> - Scalar sc1 ( 20 . 0 , 21 . 1 , 22 . 2 , 23 . 3 ) , osc1 ; <nl> - Range g1 ( 7 , 8 ) , og1 ; <nl> - <nl> - FileStorage fs ( fname , FileStorage : : WRITE ) ; <nl> - fs < < " mi " < < mi ; <nl> - fs < < " mv " < < mv ; <nl> - fs < < " mi3 " < < mi3 ; <nl> - fs < < " mv3 " < < mv3 ; <nl> - fs < < " vudt " < < vudt ; <nl> - fs < < " vudt3 " < < vudt3 ; <nl> - fs < < " empty " < < empty ; <nl> - fs < < " p1 " < < p1 ; <nl> - fs < < " p2 " < < p2 ; <nl> - fs < < " s1 " < < s1 ; <nl> - fs < < " c1 " < < c1 ; <nl> - fs < < " r1 " < < r1 ; <nl> - fs < < " v1 " < < v1 ; <nl> - fs < < " sc1 " < < sc1 ; <nl> - fs < < " g1 " < < g1 ; <nl> - fs . release ( ) ; <nl> + const char * suffix [ 3 ] = { <nl> + " . yml " , <nl> + " . xml " , <nl> + " . json " <nl> + } ; <nl> <nl> - fs . open ( fname , FileStorage : : READ ) ; <nl> - fs [ " mi " ] > > mi2 ; <nl> - fs [ " mv " ] > > mv2 ; <nl> - fs [ " mi3 " ] > > mi4 ; <nl> - fs [ " mv3 " ] > > mv4 ; <nl> - fs [ " vudt " ] > > vudt2 ; <nl> - fs [ " vudt3 " ] > > vudt4 ; <nl> - fs [ " empty " ] > > empty ; <nl> - fs [ " p1 " ] > > op1 ; <nl> - fs [ " p2 " ] > > op2 ; <nl> - fs [ " s1 " ] > > os1 ; <nl> - fs [ " c1 " ] > > oc1 ; <nl> - fs [ " r1 " ] > > or1 ; <nl> - fs [ " v1 " ] > > ov1 ; <nl> - fs [ " sc1 " ] > > osc1 ; <nl> - fs [ " g1 " ] > > og1 ; <nl> - CV_Assert ( mi2 . empty ( ) ) ; <nl> - CV_Assert ( mv2 . empty ( ) ) ; <nl> - CV_Assert ( cvtest : : norm ( Mat ( mi3 ) , Mat ( mi4 ) , CV_C ) = = 0 ) ; <nl> - CV_Assert ( mv4 . size ( ) = = 1 ) ; <nl> - double n = cvtest : : norm ( mv3 [ 0 ] , mv4 [ 0 ] , CV_C ) ; <nl> - CV_Assert ( vudt2 . empty ( ) ) ; <nl> - CV_Assert ( vudt3 = = vudt4 ) ; <nl> - CV_Assert ( n = = 0 ) ; <nl> - CV_Assert ( op1 = = p1 ) ; <nl> - CV_Assert ( op2 = = p2 ) ; <nl> - CV_Assert ( os1 = = s1 ) ; <nl> - CV_Assert ( oc1 = = c1 ) ; <nl> - CV_Assert ( or1 = = r1 ) ; <nl> - CV_Assert ( ov1 = = v1 ) ; <nl> - CV_Assert ( osc1 = = sc1 ) ; <nl> - CV_Assert ( og1 = = g1 ) ; <nl> - } <nl> - catch ( . . . ) <nl> + for ( size_t i = 0u ; i < 3u ; i + + ) <nl> { <nl> - ts - > set_failed_test_info ( cvtest : : TS : : FAIL_MISMATCH ) ; <nl> + try <nl> + { <nl> + string fname = cv : : tempfile ( suffix [ i ] ) ; <nl> + vector < int > mi , mi2 , mi3 , mi4 ; <nl> + vector < Mat > mv , mv2 , mv3 , mv4 ; <nl> + vector < UserDefinedType > vudt , vudt2 , vudt3 , vudt4 ; <nl> + Mat m ( 10 , 9 , CV_32F ) ; <nl> + Mat empty ; <nl> + UserDefinedType udt = { 8 , 3 . 3f } ; <nl> + randu ( m , 0 , 1 ) ; <nl> + mi3 . push_back ( 5 ) ; <nl> + mv3 . push_back ( m ) ; <nl> + vudt3 . push_back ( udt ) ; <nl> + Point_ < float > p1 ( 1 . 1f , 2 . 2f ) , op1 ; <nl> + Point3i p2 ( 3 , 4 , 5 ) , op2 ; <nl> + Size s1 ( 6 , 7 ) , os1 ; <nl> + Complex < int > c1 ( 9 , 10 ) , oc1 ; <nl> + Rect r1 ( 11 , 12 , 13 , 14 ) , or1 ; <nl> + Vec < int , 5 > v1 ( 15 , 16 , 17 , 18 , 19 ) , ov1 ; <nl> + Scalar sc1 ( 20 . 0 , 21 . 1 , 22 . 2 , 23 . 3 ) , osc1 ; <nl> + Range g1 ( 7 , 8 ) , og1 ; <nl> + <nl> + FileStorage fs ( fname , FileStorage : : WRITE ) ; <nl> + fs < < " mi " < < mi ; <nl> + fs < < " mv " < < mv ; <nl> + fs < < " mi3 " < < mi3 ; <nl> + fs < < " mv3 " < < mv3 ; <nl> + fs < < " vudt " < < vudt ; <nl> + fs < < " vudt3 " < < vudt3 ; <nl> + fs < < " empty " < < empty ; <nl> + fs < < " p1 " < < p1 ; <nl> + fs < < " p2 " < < p2 ; <nl> + fs < < " s1 " < < s1 ; <nl> + fs < < " c1 " < < c1 ; <nl> + fs < < " r1 " < < r1 ; <nl> + fs < < " v1 " < < v1 ; <nl> + fs < < " sc1 " < < sc1 ; <nl> + fs < < " g1 " < < g1 ; <nl> + fs . release ( ) ; <nl> + <nl> + fs . open ( fname , FileStorage : : READ ) ; <nl> + fs [ " mi " ] > > mi2 ; <nl> + fs [ " mv " ] > > mv2 ; <nl> + fs [ " mi3 " ] > > mi4 ; <nl> + fs [ " mv3 " ] > > mv4 ; <nl> + fs [ " vudt " ] > > vudt2 ; <nl> + fs [ " vudt3 " ] > > vudt4 ; <nl> + fs [ " empty " ] > > empty ; <nl> + fs [ " p1 " ] > > op1 ; <nl> + fs [ " p2 " ] > > op2 ; <nl> + fs [ " s1 " ] > > os1 ; <nl> + fs [ " c1 " ] > > oc1 ; <nl> + fs [ " r1 " ] > > or1 ; <nl> + fs [ " v1 " ] > > ov1 ; <nl> + fs [ " sc1 " ] > > osc1 ; <nl> + fs [ " g1 " ] > > og1 ; <nl> + CV_Assert ( mi2 . empty ( ) ) ; <nl> + CV_Assert ( mv2 . empty ( ) ) ; <nl> + CV_Assert ( cvtest : : norm ( Mat ( mi3 ) , Mat ( mi4 ) , CV_C ) = = 0 ) ; <nl> + CV_Assert ( mv4 . size ( ) = = 1 ) ; <nl> + double n = cvtest : : norm ( mv3 [ 0 ] , mv4 [ 0 ] , CV_C ) ; <nl> + CV_Assert ( vudt2 . empty ( ) ) ; <nl> + CV_Assert ( vudt3 = = vudt4 ) ; <nl> + CV_Assert ( n = = 0 ) ; <nl> + CV_Assert ( op1 = = p1 ) ; <nl> + CV_Assert ( op2 = = p2 ) ; <nl> + CV_Assert ( os1 = = s1 ) ; <nl> + CV_Assert ( oc1 = = c1 ) ; <nl> + CV_Assert ( or1 = = r1 ) ; <nl> + CV_Assert ( ov1 = = v1 ) ; <nl> + CV_Assert ( osc1 = = sc1 ) ; <nl> + CV_Assert ( og1 = = g1 ) ; <nl> + } <nl> + catch ( . . . ) <nl> + { <nl> + ts - > set_failed_test_info ( cvtest : : TS : : FAIL_MISMATCH ) ; <nl> + } <nl> } <nl> } <nl> } ; <nl> TEST ( Core_InputOutput , filestorage_base64_basic ) <nl> char const * filenames [ ] = { <nl> " core_io_base64_basic_test . yml " , <nl> " core_io_base64_basic_test . xml " , <nl> + " core_io_base64_basic_test . json " , <nl> 0 <nl> } ; <nl> <nl> TEST ( Core_InputOutput , filestorage_base64_basic ) <nl> cv : : Mat _nd_out , _nd_in ; <nl> cv : : Mat _rd_out ( 64 , 64 , CV_64FC1 ) , _rd_in ; <nl> <nl> + bool no_type_id = true ; <nl> + <nl> { / * init * / <nl> <nl> / * a normal mat * / <nl> TEST ( Core_InputOutput , filestorage_base64_basic ) <nl> fs [ " normal_nd_mat " ] > > _nd_in ; <nl> fs [ " random_mat " ] > > _rd_in ; <nl> <nl> + if ( ! fs [ " empty_2d_mat " ] [ " type_id " ] . empty ( ) | | <nl> + ! fs [ " normal_2d_mat " ] [ " type_id " ] . empty ( ) | | <nl> + ! fs [ " normal_nd_mat " ] [ " type_id " ] . empty ( ) | | <nl> + ! fs [ " random_mat " ] [ " type_id " ] . empty ( ) ) <nl> + no_type_id = false ; <nl> + <nl> / * raw data * / <nl> std : : vector < data_t > ( 1000 ) . swap ( rawdata ) ; <nl> cvReadRawData ( * fs , fs [ " rawdata " ] . node , rawdata . data ( ) , data_t : : signature ( ) ) ; <nl> TEST ( Core_InputOutput , filestorage_base64_basic ) <nl> / / EXPECT_EQ ( rawdata [ i ] . i4 , i ) ; <nl> } <nl> <nl> + EXPECT_TRUE ( no_type_id ) ; <nl> + <nl> EXPECT_EQ ( _em_in . rows , _em_out . rows ) ; <nl> EXPECT_EQ ( _em_in . cols , _em_out . cols ) ; <nl> EXPECT_EQ ( _em_in . depth ( ) , _em_out . depth ( ) ) ; <nl> TEST ( Core_InputOutput , filestorage_base64_valid_call ) <nl> char const * filenames [ ] = { <nl> " core_io_base64_other_test . yml " , <nl> " core_io_base64_other_test . xml " , <nl> + " core_io_base64_other_test . json " , <nl> " core_io_base64_other_test . yml ? base64 " , <nl> " core_io_base64_other_test . xml ? base64 " , <nl> + " core_io_base64_other_test . json ? base64 " , <nl> 0 <nl> } ; <nl> char const * real_name [ ] = { <nl> " core_io_base64_other_test . yml " , <nl> " core_io_base64_other_test . xml " , <nl> + " core_io_base64_other_test . json " , <nl> " core_io_base64_other_test . yml " , <nl> " core_io_base64_other_test . xml " , <nl> + " core_io_base64_other_test . json " , <nl> 0 <nl> } ; <nl> <nl> TEST ( Core_InputOutput , filestorage_base64_invalid_call ) <nl> char const * filenames [ ] = { <nl> " core_io_base64_other_test . yml " , <nl> " core_io_base64_other_test . xml " , <nl> + " core_io_base64_other_test . json " , <nl> 0 <nl> } ; <nl> <nl> TEST ( Core_InputOutput , filestorage_yml_vec2i ) <nl> <nl> remove ( file_name . c_str ( ) ) ; <nl> } <nl> + <nl> + TEST ( Core_InputOutput , filestorage_json_comment ) <nl> + { <nl> + String mem_str = <nl> + " { / * comment * / \ n " <nl> + " \ " key \ " : \ " value \ " \ n " <nl> + " / * * * * * * * * * * * * \ n " <nl> + " * multiline comment \ n " <nl> + " * * * * * * * * * * * * / \ n " <nl> + " / / 233 \ n " <nl> + " / / \ n " <nl> + " } \ n " <nl> + ; <nl> + <nl> + String str ; <nl> + <nl> + EXPECT_NO_THROW ( <nl> + { <nl> + cv : : FileStorage fs ( mem_str , cv : : FileStorage : : READ | cv : : FileStorage : : MEMORY ) ; <nl> + fs [ " key " ] > > str ; <nl> + fs . release ( ) ; <nl> + } ) ; <nl> + <nl> + EXPECT_EQ ( str , String ( " value " ) ) ; <nl> + } <nl> mmm a / modules / ml / test / test_save_load . cpp <nl> ppp b / modules / ml / test / test_save_load . cpp <nl> int CV_SLMLTest : : run_test_case ( int testCaseIdx ) <nl> if ( code = = cvtest : : TS : : OK ) <nl> { <nl> get_test_error ( testCaseIdx , & test_resps1 ) ; <nl> - fname1 = tempfile ( " . yml . gz " ) ; <nl> + fname1 = tempfile ( " . json . gz " ) ; <nl> save ( ( fname1 + " ? base64 " ) . c_str ( ) ) ; <nl> load ( fname1 . c_str ( ) ) ; <nl> get_test_error ( testCaseIdx , & test_resps2 ) ; <nl> - fname2 = tempfile ( " . yml . gz " ) ; <nl> + fname2 = tempfile ( " . json . gz " ) ; <nl> save ( ( fname2 + " ? base64 " ) . c_str ( ) ) ; <nl> } <nl> else <nl> TEST ( DISABLED_ML_SVM , linear_save_load ) <nl> <nl> svm1 = Algorithm : : load < SVM > ( " SVM45_X_38 - 1 . xml " ) ; <nl> svm2 = Algorithm : : load < SVM > ( " SVM45_X_38 - 2 . xml " ) ; <nl> - string tname = tempfile ( " a . xml " ) ; <nl> + string tname = tempfile ( " a . json " ) ; <nl> svm2 - > save ( tname + " ? base64 " ) ; <nl> svm3 = Algorithm : : load < SVM > ( tname ) ; <nl> <nl> | Merge pull request from wiryls : FileStorageJSON | opencv/opencv | b03e3abd62568653777bd47c3471b985a5121491 | 2016-08-24T16:49:06Z |
mmm a / tests / runner . py <nl> ppp b / tests / runner . py <nl> def get_bullet_library ( runner_core , use_cmake ) : <nl> <nl> To run a specific set of tests , you can do things like <nl> <nl> - python tests / runner . py o1 <nl> + python tests / runner . py asm2 <nl> <nl> - ( that runs the o1 ( - O1 ) tests ) . You can run individual tests with <nl> + ( that runs the asm2 ( asm . js , - O2 ) tests ) . You can run individual tests with <nl> <nl> python tests / runner . py test_hello_world <nl> <nl> | Update test runner help text . | emscripten-core/emscripten | ca5203f0321d0f8591c4eac73c553c95a2a5a7d1 | 2014-03-18T09:22:43Z |
mmm a / src / json . hpp <nl> ppp b / src / json . hpp <nl> class basic_json <nl> { <nl> if ( c > = 0x00 and c < = 0x1f ) <nl> { <nl> + / / convert a number 0 . . 15 to its hex representation ( 0 . . f ) <nl> + auto hexify = [ ] ( const char v ) - > char <nl> + { <nl> + return ( v < 10 ) ? ( ' 0 ' + v ) : ( ' a ' + v - 10 ) ; <nl> + } ; <nl> + <nl> / / print character c as \ uxxxx <nl> - sprintf ( & result [ pos + 1 ] , " u % 04x " , int ( c ) ) ; <nl> - pos + = 6 ; <nl> - / / overwrite trailing null character <nl> - result [ pos ] = ' \ \ ' ; <nl> + for ( const char m : { ' u ' , ' 0 ' , ' 0 ' , hexify ( c > > 4 ) , hexify ( c & 0x0f ) } ) <nl> + { <nl> + result [ + + pos ] = m ; <nl> + } <nl> + <nl> + + + pos ; <nl> } <nl> else <nl> { <nl> mmm a / src / json . hpp . re2c <nl> ppp b / src / json . hpp . re2c <nl> class basic_json <nl> { <nl> if ( c > = 0x00 and c < = 0x1f ) <nl> { <nl> + / / convert a number 0 . . 15 to its hex representation ( 0 . . f ) <nl> + auto hexify = [ ] ( const char v ) - > char <nl> + { <nl> + return ( v < 10 ) ? ( ' 0 ' + v ) : ( ' a ' + v - 10 ) ; <nl> + } ; <nl> + <nl> / / print character c as \ uxxxx <nl> - sprintf ( & result [ pos + 1 ] , " u % 04x " , int ( c ) ) ; <nl> - pos + = 6 ; <nl> - / / overwrite trailing null character <nl> - result [ pos ] = ' \ \ ' ; <nl> + for ( const char m : { ' u ' , ' 0 ' , ' 0 ' , hexify ( c > > 4 ) , hexify ( c & 0x0f ) } ) <nl> + { <nl> + result [ + + pos ] = m ; <nl> + } <nl> + <nl> + + + pos ; <nl> } <nl> else <nl> { <nl> | Replace sprintf with hex function , this fixes | nlohmann/json | 14d8a91f7349baefa83cabc96113a12b9d9848a9 | 2015-11-19T06:30:00Z |
mmm a / src / ui / int_entry . cpp <nl> ppp b / src / ui / int_entry . cpp <nl> void IntEntry : : openPopup ( ) <nl> m_popupWindow - > setAutoRemap ( false ) ; <nl> m_popupWindow - > setBounds ( rc ) ; <nl> <nl> - Region rgn ( rc ) ; <nl> + Region rgn ( rc . createUnion ( getBounds ( ) ) ) ; <nl> rgn . createUnion ( rgn , Region ( getBounds ( ) ) ) ; <nl> m_popupWindow - > setHotRegion ( rgn ) ; <nl> <nl> | Bigger hot region for IntEntry ' s popup window | aseprite/aseprite | cbb5809c106d16201243127aa2b97b13f905456e | 2013-03-30T23:50:24Z |
mmm a / docs - translations / ko - KR / development / build - instructions - linux . md <nl> ppp b / docs - translations / ko - KR / development / build - instructions - linux . md <nl> $ sudo apt - get install libc6 - dev - armhf - cross linux - libc - dev - armhf - cross \ <nl> $ . / script / bootstrap . py - v - - target_arch = arm <nl> ` ` ` <nl> <nl> - # # 빌드 하기 <nl> + # # 빌드하기 <nl> <nl> ` Release ` 와 ` Debug ` 두 타겟 모두 빌드 합니다 : <nl> <nl> $ . / script / build . py - c D <nl> <nl> 빌드가 모두 끝나면 ` out / D ` 디렉터리에서 ` electron ` 디버그 바이너리를 찾을 수 있습니다 . <nl> <nl> - # # 정리 하기 <nl> + # # 정리하기 <nl> <nl> 빌드 파일들을 정리합니다 : <nl> <nl> $ npm run lint <nl> ` ` ` bash <nl> $ . / script / test . py <nl> ` ` ` <nl> + <nl> + # # 고급 주제 <nl> + <nl> + 기본적인 빌드 구성은 가장 주력인 Linux 배포판에 초점이 맞춰져있으며 , 특정 배포판이나 <nl> + 기기에 빌드할 계획이라면 다음 정보들이 도움이 될 것입니다 . <nl> + <nl> + # # # 로컬에서 ` libchromiumcontent ` 빌드하기 <nl> + <nl> + 미리 빌드된 ` libchromiumcontent ` 를 사용하는 것을 피하기 위해 , ` bootstrap . py ` <nl> + 스크립트에 ` - - build_libchromiumcontent ` 스위치를 추가할 수 있습니다 : <nl> + <nl> + ` ` ` bash <nl> + $ . / script / bootstrap . py - v - - build_libchromiumcontent <nl> + ` ` ` <nl> + <nl> + 참고로 ` shared_library ` 구성은 기본적으로 빌드되어있지 않으며 , 다음 모드를 사용하면 <nl> + ` Release ` 버전의 Electron만 빌드할 수 있습니다 : <nl> + <nl> + ` ` ` bash <nl> + $ . / script / build . py - c R <nl> + ` ` ` <nl> + <nl> + # # # 다운로드된 ` clang ` 바이너리 대신 시스템의 ` clang ` 사용하기 <nl> + <nl> + 기본적으로 Electron은 Chromium 프로젝트에서 제공하는 미리 빌드된 ` clang ` 바이너리를 <nl> + 통해 빌드됩니다 . 만약 어떤 이유로 시스템에 설치된 ` clang ` 을 사용하여 빌드하고 싶다면 , <nl> + ` bootstrap . py ` 를 ` - - clang_dir = < path > ` 스위치와 함께 실행함으로써 해결할 수 있습니다 . <nl> + 빌드 스크립트를 이 스위치와 함께 실행할 때 스크립트는 ` < path > / bin / ` 와 같은 경로로 <nl> + ` clang ` 바이너리를 찾습니다 . <nl> + <nl> + 예를 들어 ` clang ` 을 ` / user / local / bin / clang ` 에 설치했다면 다음과 같습니다 : <nl> + <nl> + ` ` ` bash <nl> + $ . / script / bootstrap . py - v - - build_libchromiumcontent - - clang_dir / usr / local <nl> + $ . / script / build . py - c R <nl> + ` ` ` <nl> + <nl> + # # # ` clang ` 대신 다른 컴파일러 사용하기 <nl> + <nl> + Electron을 ` g + + ` 과 같은 다른 컴파일러로 빌드하려면 , 먼저 ` - - disable_clang ` 스위치를 <nl> + 통해 ` clang ` 을 비활성화 시켜야 하고 , 필요하다면 ` CC ` 와 ` CXX ` 환경 변수도 설정합니다 . <nl> + <nl> + 예를 들어 GCC 툴체인을 사용하여 빌드한다면 다음과 같습니다 : <nl> + <nl> + ` ` ` bash <nl> + $ env CC = gcc CXX = g + + . / script / bootstrap . py - v - - build_libchromiumcontent - - disable_clang <nl> + $ . / script / build . py - c R <nl> + ` ` ` <nl> + <nl> + # # # 환경 변수 <nl> + <nl> + 또한 ` CC ` 와 ` CXX ` 와는 별개로 , 빌드 구성을 변경하기 위해 다음 환경 변수들을 사용할 수 <nl> + 있습니다 : <nl> + <nl> + * ` CPPFLAGS ` <nl> + * ` CPPFLAGS_host ` <nl> + * ` CFLAGS ` <nl> + * ` CFLAGS_host ` <nl> + * ` CXXFLAGS ` <nl> + * ` CXXFLAGS_host ` <nl> + * ` AR ` <nl> + * ` AR_host ` <nl> + * ` CC ` <nl> + * ` CC_host ` <nl> + * ` CXX ` <nl> + * ` CXX_host ` <nl> + * ` LDFLAGS ` <nl> + <nl> + 이 환경 변수는 ` bootstrap . py ` 스크립트를 실행할 때 설정되어야 하며 , ` build . py ` <nl> + 스크립트에선 작동하지 않습니다 . <nl> mmm a / docs - translations / ko - KR / development / build - instructions - osx . md <nl> ppp b / docs - translations / ko - KR / development / build - instructions - osx . md <nl> $ cd electron <nl> $ . / script / bootstrap . py - v <nl> ` ` ` <nl> <nl> - # # 빌드 하기 <nl> + # # 빌드하기 <nl> <nl> ` Release ` 와 ` Debug ` 두 타겟 모두 빌드 합니다 : <nl> <nl> mmm a / docs - translations / ko - KR / development / build - instructions - windows . md <nl> ppp b / docs - translations / ko - KR / development / build - instructions - windows . md <nl> $ cd electron <nl> $ python script \ bootstrap . py - v <nl> ` ` ` <nl> <nl> - # # 빌드 하기 <nl> + # # 빌드하기 <nl> <nl> ` Release ` 와 ` Debug ` 두 타겟 모두 빌드 합니다 : <nl> <nl> | : memo : Update Korean docs as upstream | electron/electron | 9fcafc6f9ee446b96e66cad4faa6beb35e421fec | 2016-05-04T06:00:59Z |
mmm a / examples / sdl_opengl2_example / imgui_impl_sdl . cpp <nl> ppp b / examples / sdl_opengl2_example / imgui_impl_sdl . cpp <nl> bool ImGui_ImplSdlGL2_Init ( SDL_Window * window ) <nl> io . KeyMap [ ImGuiKey_PageDown ] = SDL_SCANCODE_PAGEDOWN ; <nl> io . KeyMap [ ImGuiKey_Home ] = SDL_SCANCODE_HOME ; <nl> io . KeyMap [ ImGuiKey_End ] = SDL_SCANCODE_END ; <nl> - io . KeyMap [ ImGuiKey_Insert ] = SDLK_INSERT ; <nl> + io . KeyMap [ ImGuiKey_Insert ] = SDL_SCANCODE_INSERT ; <nl> io . KeyMap [ ImGuiKey_Delete ] = SDLK_DELETE ; <nl> io . KeyMap [ ImGuiKey_Backspace ] = SDLK_BACKSPACE ; <nl> io . KeyMap [ ImGuiKey_Enter ] = SDLK_RETURN ; <nl> mmm a / examples / sdl_opengl3_example / imgui_impl_sdl_gl3 . cpp <nl> ppp b / examples / sdl_opengl3_example / imgui_impl_sdl_gl3 . cpp <nl> bool ImGui_ImplSdlGL3_Init ( SDL_Window * window ) <nl> io . KeyMap [ ImGuiKey_PageDown ] = SDL_SCANCODE_PAGEDOWN ; <nl> io . KeyMap [ ImGuiKey_Home ] = SDL_SCANCODE_HOME ; <nl> io . KeyMap [ ImGuiKey_End ] = SDL_SCANCODE_END ; <nl> - io . KeyMap [ ImGuiKey_Insert ] = SDLK_INSERT ; <nl> + io . KeyMap [ ImGuiKey_Insert ] = SDL_SCANCODE_INSERT ; <nl> io . KeyMap [ ImGuiKey_Delete ] = SDLK_DELETE ; <nl> io . KeyMap [ ImGuiKey_Backspace ] = SDLK_BACKSPACE ; <nl> io . KeyMap [ ImGuiKey_Enter ] = SDLK_RETURN ; <nl> | Examples : SDL : Fixed mapping of Insert key ( , fix bug introduced in ) | ocornut/imgui | 60d5dc79029e4d9c778c9635987c1f11260ce87e | 2018-01-18T09:01:36Z |
mmm a / drivers / gles2 / rasterizer_scene_gles2 . cpp <nl> ppp b / drivers / gles2 / rasterizer_scene_gles2 . cpp <nl> static const GLenum gl_primitive [ ] = { <nl> GL_TRIANGLE_FAN <nl> } ; <nl> <nl> - void RasterizerSceneGLES2 : : _setup_material ( RasterizerStorageGLES2 : : Material * p_material , bool p_reverse_cull , Size2i p_skeleton_tex_size ) { <nl> + void RasterizerSceneGLES2 : : _setup_material ( RasterizerStorageGLES2 : : Material * p_material , bool p_reverse_cull , bool p_alpha_pass , Size2i p_skeleton_tex_size ) { <nl> <nl> / / material parameters <nl> <nl> void RasterizerSceneGLES2 : : _setup_material ( RasterizerStorageGLES2 : : Material * p_m <nl> glEnable ( GL_DEPTH_TEST ) ; <nl> } <nl> <nl> + switch ( p_material - > shader - > spatial . depth_draw_mode ) { <nl> + case RasterizerStorageGLES2 : : Shader : : Spatial : : DEPTH_DRAW_ALPHA_PREPASS : <nl> + case RasterizerStorageGLES2 : : Shader : : Spatial : : DEPTH_DRAW_OPAQUE : { <nl> + <nl> + glDepthMask ( ! p_alpha_pass ) ; <nl> + } break ; <nl> + case RasterizerStorageGLES2 : : Shader : : Spatial : : DEPTH_DRAW_ALWAYS : { <nl> + glDepthMask ( GL_TRUE ) ; <nl> + } break ; <nl> + case RasterizerStorageGLES2 : : Shader : : Spatial : : DEPTH_DRAW_NEVER : { <nl> + glDepthMask ( GL_FALSE ) ; <nl> + } break ; <nl> + } <nl> + <nl> / / TODO whyyyyy ? ? ? ? <nl> p_reverse_cull = true ; <nl> <nl> void RasterizerSceneGLES2 : : _render_render_list ( RenderList : : Element * * p_elements , <nl> <nl> _setup_geometry ( e , skeleton ) ; <nl> <nl> - _setup_material ( material , p_reverse_cull , Size2i ( skeleton ? skeleton - > size * 3 : 0 , 0 ) ) ; <nl> + _setup_material ( material , p_reverse_cull , p_alpha_pass , Size2i ( skeleton ? skeleton - > size * 3 : 0 , 0 ) ) ; <nl> <nl> if ( use_radiance_map ) { <nl> state . scene_shader . set_uniform ( SceneShaderGLES2 : : RADIANCE_INVERSE_XFORM , p_view_transform ) ; <nl> void RasterizerSceneGLES2 : : _render_render_list ( RenderList : : Element * * p_elements , <nl> { <nl> _setup_geometry ( e , skeleton ) ; <nl> <nl> - _setup_material ( material , p_reverse_cull , Size2i ( skeleton ? skeleton - > size * 3 : 0 , 0 ) ) ; <nl> + _setup_material ( material , p_reverse_cull , p_alpha_pass , Size2i ( skeleton ? skeleton - > size * 3 : 0 , 0 ) ) ; <nl> if ( shadow_atlas ! = NULL ) { <nl> glActiveTexture ( GL_TEXTURE0 + storage - > config . max_texture_image_units - 4 ) ; <nl> glBindTexture ( GL_TEXTURE_2D , shadow_atlas - > depth ) ; <nl> void RasterizerSceneGLES2 : : _render_render_list ( RenderList : : Element * * p_elements , <nl> RasterizerStorageGLES2 : : Skeleton * skeleton = storage - > skeleton_owner . getornull ( e - > instance - > skeleton ) ; <nl> <nl> { <nl> - _setup_material ( material , p_reverse_cull , Size2i ( skeleton ? skeleton - > size * 3 : 0 , 0 ) ) ; <nl> + _setup_material ( material , p_reverse_cull , false , Size2i ( skeleton ? skeleton - > size * 3 : 0 , 0 ) ) ; <nl> <nl> if ( directional_shadow . depth ) { <nl> glActiveTexture ( GL_TEXTURE0 + storage - > config . max_texture_image_units - 4 ) ; / / TODO move into base pass <nl> mmm a / drivers / gles2 / rasterizer_scene_gles2 . h <nl> ppp b / drivers / gles2 / rasterizer_scene_gles2 . h <nl> class RasterizerSceneGLES2 : public RasterizerScene { <nl> <nl> void _draw_sky ( RasterizerStorageGLES2 : : Sky * p_sky , const CameraMatrix & p_projection , const Transform & p_transform , bool p_vflip , float p_custom_fov , float p_energy ) ; <nl> <nl> - void _setup_material ( RasterizerStorageGLES2 : : Material * p_material , bool p_reverse_cull , Size2i p_skeleton_tex_size = Size2i ( 0 , 0 ) ) ; <nl> + void _setup_material ( RasterizerStorageGLES2 : : Material * p_material , bool p_reverse_cull , bool p_alpha_pass , Size2i p_skeleton_tex_size = Size2i ( 0 , 0 ) ) ; <nl> void _setup_geometry ( RenderList : : Element * p_element , RasterizerStorageGLES2 : : Skeleton * p_skeleton ) ; <nl> void _render_geometry ( RenderList : : Element * p_element ) ; <nl> <nl> | [ GLES2 ] fix depth for alpha pass | godotengine/godot | 5ebc70f288dcb68f67ee5b202bbd4bab3f6420e3 | 2018-08-23T15:25:00Z |
mmm a / src / validation . cpp <nl> ppp b / src / validation . cpp <nl> bool ProcessNewBlock ( const CChainParams & chainparams , const std : : shared_ptr < cons <nl> CBlockIndex * pindex = nullptr ; <nl> if ( fNewBlock ) * fNewBlock = false ; <nl> CValidationState state ; <nl> - / / Ensure that CheckBlock ( ) passes before calling AcceptBlock , as <nl> - / / belt - and - suspenders . <nl> - bool ret = CheckBlock ( * pblock , state , chainparams . GetConsensus ( ) ) ; <nl> <nl> + / / CheckBlock ( ) does not support multi - threaded block validation because CBlock : : fChecked can cause data race . <nl> + / / Therefore , the following critical section must include the CheckBlock ( ) call as well . <nl> LOCK ( cs_main ) ; <nl> <nl> + / / Ensure that CheckBlock ( ) passes before calling AcceptBlock , as <nl> + / / belt - and - suspenders . <nl> + bool ret = CheckBlock ( * pblock , state , chainparams . GetConsensus ( ) ) ; <nl> if ( ret ) { <nl> / / Store to disk <nl> ret = g_chainstate . AcceptBlock ( pblock , state , chainparams , & pindex , fForceProcessing , nullptr , fNewBlock ) ; <nl> mmm a / test / sanitizer_suppressions / tsan <nl> ppp b / test / sanitizer_suppressions / tsan <nl> <nl> # ThreadSanitizer suppressions <nl> # = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> - # fChecked is theoretically racy , practically only in unit tests <nl> - race : CheckBlock <nl> - <nl> # WalletBatch ( unidentified deadlock ) <nl> deadlock : WalletBatch <nl> <nl> | Merge : consensus : Move CheckBlock ( ) call to critical section | bitcoin/bitcoin | 5ab5341d13569736acc262ee750010d5c59edca5 | 2018-12-01T09:27:49Z |
mmm a / lib / Basics / Traverser . cpp <nl> ppp b / lib / Basics / Traverser . cpp <nl> <nl> <nl> # include " Traverser . h " <nl> <nl> + # include " Basics / Thread . h " <nl> + <nl> using namespace std ; <nl> using namespace triagens : : basics ; <nl> <nl> - void Traverser : : insertNeighbor ( ThreadInfo & info , <nl> - VertexId neighbor , <nl> - VertexId predecessor , <nl> - EdgeId edge , <nl> - EdgeWeight weight <nl> - ) { <nl> - std : : lock_guard < std : : mutex > guard ( info . mutex ) ; <nl> - auto it = info . lookup . find ( neighbor ) ; <nl> - <nl> - / / Not found insert it <nl> - if ( it = = info . lookup . end ( ) ) { <nl> - info . lookup . emplace ( <nl> - neighbor , <nl> - LookupInfo ( weight , edge , predecessor ) <nl> - ) ; <nl> - info . queue . insert ( <nl> - QueueInfo ( neighbor , weight ) <nl> - ) ; <nl> - return ; <nl> - } <nl> - if ( it - > second . done ) { <nl> - return ; <nl> - } <nl> - if ( it - > second . weight > weight ) { <nl> - QueueInfo q ( neighbor , it - > second . weight ) ; <nl> - info . queue . erase ( q ) ; <nl> - q . weight = weight ; <nl> - info . queue . insert ( q ) ; <nl> - it - > second . weight = weight ; <nl> - } <nl> - } ; <nl> + class Searcher : public Thread { <nl> <nl> - void Traverser : : lookupPeer ( ThreadInfo & info , <nl> - VertexId & neighbor , <nl> - EdgeWeight & weight ) { <nl> - std : : lock_guard < std : : mutex > guard ( info . mutex ) ; <nl> - auto it = info . lookup . find ( neighbor ) ; <nl> - if ( it = = info . lookup . end ( ) ) { <nl> - return ; <nl> - } <nl> - EdgeWeight total = it - > second . weight + weight ; <nl> - if ( total < highscore ) { <nl> - highscore = total ; <nl> - } <nl> - if ( it - > second . done & & total < = highscore ) { <nl> - std : : lock_guard < std : : mutex > guard ( resultMutex ) ; <nl> - intermediate = neighbor ; <nl> - bingo = true ; <nl> - } <nl> - } ; <nl> + Traverser * _traverser ; <nl> + Traverser : : ThreadInfo & _myInfo ; <nl> + Traverser : : ThreadInfo & _peerInfo ; <nl> + Traverser : : VertexId _start ; <nl> + Traverser : : ExpanderFunction _expander ; <nl> + string _id ; <nl> + <nl> + public : <nl> + <nl> + Searcher ( Traverser * traverser , Traverser : : ThreadInfo & myInfo , <nl> + Traverser : : ThreadInfo & peerInfo , Traverser : : VertexId start , <nl> + Traverser : : ExpanderFunction expander , string id ) <nl> + : Thread ( id ) , _traverser ( traverser ) , _myInfo ( myInfo ) , _peerInfo ( peerInfo ) , <nl> + _start ( start ) , _expander ( expander ) , _id ( id ) { <nl> + } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Search graph starting at Start following edges of the given <nl> - / / / direction only <nl> + / / / @ brief Insert a neighbor to the todo list . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - void Traverser : : searchFromVertex ( <nl> - ThreadInfo * myInfo_p , <nl> - ThreadInfo * peerInfo_p , <nl> - VertexId start , <nl> - ExpanderFunction expander , <nl> - string id <nl> - ) { <nl> - ThreadInfo & myInfo ( * myInfo_p ) ; <nl> - ThreadInfo & peerInfo ( * peerInfo_p ) ; <nl> - <nl> - cout < < id < < " : inserting " < < start < < endl ; <nl> - insertNeighbor ( myInfo , start , " " , " " , 0 ) ; <nl> - auto nextVertexIt = myInfo . queue . begin ( ) ; <nl> - std : : vector < Neighbor > neighbors ; <nl> - <nl> - / / Iterate while no bingo found and <nl> - / / there still is a vertex on the stack . <nl> - while ( ! bingo & & nextVertexIt ! = myInfo . queue . end ( ) ) { <nl> - auto nextVertex = * nextVertexIt ; <nl> - cout < < id < < " : next " < < nextVertex . vertex < < endl ; <nl> - myInfo . queue . erase ( nextVertexIt ) ; <nl> - neighbors . clear ( ) ; <nl> - expander ( nextVertex . vertex , neighbors ) ; <nl> - for ( auto & neighbor : neighbors ) { <nl> - cout < < id < < " : neighbor " < < neighbor . neighbor < < endl ; <nl> - insertNeighbor ( myInfo , neighbor . neighbor , nextVertex . vertex , <nl> - neighbor . edge , nextVertex . weight + neighbor . weight ) ; <nl> + private : <nl> + <nl> + void insertNeighbor ( Traverser : : ThreadInfo & info , <nl> + Traverser : : VertexId & neighbor , <nl> + Traverser : : VertexId & predecessor , <nl> + Traverser : : EdgeId & edge , <nl> + Traverser : : EdgeWeight weight ) { <nl> + <nl> + std : : lock_guard < std : : mutex > guard ( info . mutex ) ; <nl> + auto it = info . lookup . find ( neighbor ) ; <nl> + <nl> + / / Not found , so insert it : <nl> + if ( it = = info . lookup . end ( ) ) { <nl> + info . lookup . emplace ( <nl> + neighbor , <nl> + Traverser : : LookupInfo ( weight , edge , predecessor ) <nl> + ) ; <nl> + info . queue . insert ( <nl> + Traverser : : QueueInfo ( neighbor , weight ) <nl> + ) ; <nl> + return ; <nl> + } <nl> + if ( it - > second . done ) { <nl> + return ; <nl> + } <nl> + if ( it - > second . weight > weight ) { <nl> + Traverser : : QueueInfo q ( neighbor , it - > second . weight ) ; <nl> + info . queue . erase ( q ) ; <nl> + q . weight = weight ; <nl> + info . queue . insert ( q ) ; <nl> + it - > second . weight = weight ; <nl> + } <nl> } <nl> - lookupPeer ( peerInfo , nextVertex . vertex , nextVertex . weight ) ; <nl> - myInfo . mutex . lock ( ) ; <nl> - / / Can move nextVertexLookup up ? <nl> - auto nextVertexLookup = myInfo . lookup . find ( nextVertex . vertex ) ; <nl> <nl> - TRI_ASSERT ( nextVertexLookup ! = myInfo . lookup . end ( ) ) ; <nl> - cout < < id < < " : done " < < nextVertexLookup - > first < < endl ; <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Lookup a neighbor in the list of our peer . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + void lookupPeer ( Traverser : : ThreadInfo & info , <nl> + Traverser : : VertexId & neighbor , <nl> + Traverser : : EdgeWeight weight ) { <nl> + <nl> + std : : lock_guard < std : : mutex > guard ( info . mutex ) ; <nl> + auto it = info . lookup . find ( neighbor ) ; <nl> + if ( it = = info . lookup . end ( ) ) { <nl> + return ; <nl> + } <nl> + Traverser : : EdgeWeight total = it - > second . weight + weight ; <nl> + if ( total < _traverser - > highscore ) { <nl> + _traverser - > highscore = total ; <nl> + } <nl> + if ( it - > second . done & & total < = _traverser - > highscore ) { <nl> + std : : lock_guard < std : : mutex > guard ( _traverser - > resultMutex ) ; <nl> + _traverser - > intermediate = neighbor ; <nl> + _traverser - > bingo = true ; <nl> + } <nl> + } <nl> <nl> - nextVertexLookup - > second . done = true ; <nl> - myInfo . mutex . unlock ( ) ; <nl> - nextVertexIt = myInfo . queue . begin ( ) ; <nl> - } <nl> - bingo = true ; <nl> - / / No possible path , can possibly terminate other thread <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Search graph starting at Start following edges of the given <nl> + / / / direction only <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + public : <nl> + <nl> + virtual void run ( ) { <nl> + <nl> + cout < < _id < < " : inserting " < < _start < < endl ; <nl> + string empty ; <nl> + insertNeighbor ( _myInfo , _start , empty , empty , 0 ) ; <nl> + auto nextVertexIt = _myInfo . queue . begin ( ) ; <nl> + std : : vector < Traverser : : Neighbor > neighbors ; <nl> + <nl> + / / Iterate while no bingo found and <nl> + / / there still is a vertex on the stack . <nl> + while ( ! _traverser - > bingo & & nextVertexIt ! = _myInfo . queue . end ( ) ) { <nl> + auto nextVertex = * nextVertexIt ; <nl> + cout < < _id < < " : next " < < nextVertex . vertex < < endl ; <nl> + _myInfo . queue . erase ( nextVertexIt ) ; <nl> + neighbors . clear ( ) ; <nl> + _expander ( nextVertex . vertex , neighbors ) ; <nl> + for ( auto & neighbor : neighbors ) { <nl> + cout < < _id < < " : neighbor " < < neighbor . neighbor < < endl ; <nl> + insertNeighbor ( _myInfo , neighbor . neighbor , nextVertex . vertex , <nl> + neighbor . edge , nextVertex . weight + neighbor . weight ) ; <nl> + } <nl> + lookupPeer ( _peerInfo , nextVertex . vertex , nextVertex . weight ) ; <nl> + _myInfo . mutex . lock ( ) ; <nl> + / / Can move nextVertexLookup up ? <nl> + auto nextVertexLookup = _myInfo . lookup . find ( nextVertex . vertex ) ; <nl> + <nl> + TRI_ASSERT ( nextVertexLookup ! = _myInfo . lookup . end ( ) ) ; <nl> + cout < < _id < < " : done " < < nextVertexLookup - > first < < endl ; <nl> + <nl> + nextVertexLookup - > second . done = true ; <nl> + _myInfo . mutex . unlock ( ) ; <nl> + nextVertexIt = _myInfo . queue . begin ( ) ; <nl> + } <nl> + _traverser - > bingo = true ; <nl> + / / No possible path , can possibly terminate other thread <nl> + } <nl> } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> void Traverser : : searchFromVertex ( <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> Traverser : : Path * Traverser : : ShortestPath ( VertexId const & start , <nl> - VertexId const & target ) { <nl> + VertexId const & target ) { <nl> <nl> std : : vector < VertexId > r_vertices ; <nl> std : : vector < VertexId > r_edges ; <nl> Traverser : : Path * Traverser : : ShortestPath ( VertexId const & start , <nl> _backwardQueue . clear ( ) ; <nl> ThreadInfo backwardInfo ( _backwardLookup , _backwardQueue , _backwardMutex ) ; <nl> <nl> - std : : thread forwardSearcher ( & Traverser : : searchFromVertex , <nl> - this , & forwardInfo , & backwardInfo , start , forwardExpander , string ( " X " ) ) ; <nl> - std : : thread backwardSearcher ( & Traverser : : searchFromVertex , <nl> - this , & backwardInfo , & forwardInfo , target , backwardExpander , string ( " Y " ) ) ; <nl> + Searcher forwardSearcher ( this , forwardInfo , backwardInfo , start , <nl> + forwardExpander , " X " ) ; <nl> + Searcher backwardSearcher ( this , backwardInfo , forwardInfo , target , <nl> + backwardExpander , " Y " ) ; <nl> + forwardSearcher . start ( ) ; <nl> + backwardSearcher . start ( ) ; <nl> forwardSearcher . join ( ) ; <nl> backwardSearcher . join ( ) ; <nl> <nl> mmm a / lib / Basics / Traverser . h <nl> ppp b / lib / Basics / Traverser . h <nl> <nl> <nl> # include " Basics / Common . h " <nl> <nl> - # include < thread > <nl> # include < mutex > <nl> <nl> + class Searcher ; <nl> + <nl> namespace triagens { <nl> namespace basics { <nl> <nl> namespace triagens { <nl> <nl> class Traverser { <nl> <nl> + friend class : : Searcher ; <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - data structures <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> namespace triagens { <nl> } ; <nl> } ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief edge direction <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief edge direction <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> typedef enum { FORWARD , BACKWARD } Direction ; <nl> <nl> typedef std : : function < void ( VertexId source , std : : vector < Neighbor > & result ) > <nl> namespace triagens { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Function to compute all neighbors of a given vertex <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> private : <nl> <nl> std : : atomic < EdgeWeight > highscore ; <nl> namespace triagens { <nl> std : : unordered_map < VertexId , LookupInfo > _backwardLookup ; <nl> std : : set < QueueInfo , std : : less < QueueInfo > > _backwardQueue ; <nl> std : : mutex _backwardMutex ; <nl> - <nl> - <nl> - void insertNeighbor ( ThreadInfo & info , <nl> - VertexId neighbor , <nl> - VertexId predecessor , <nl> - EdgeId edge , <nl> - EdgeWeight weight <nl> - ) ; <nl> - <nl> - void lookupPeer ( ThreadInfo & info , <nl> - VertexId & neighbor , <nl> - EdgeWeight & weight <nl> - ) ; <nl> - void searchFromVertex ( ThreadInfo * myInfo , <nl> - ThreadInfo * peerInfo , <nl> - VertexId start , <nl> - ExpanderFunction expander , <nl> - std : : string id <nl> - ) ; <nl> } ; <nl> } <nl> } <nl> | Use our threads . | arangodb/arangodb | e7e690913afe562d86a8ccb6fa99aca468c767cd | 2015-04-26T04:38:07Z |
mmm a / tensorflow / compiler / mlir / tensorflow / ir / tf_generated_ops . td <nl> ppp b / tensorflow / compiler / mlir / tensorflow / ir / tf_generated_ops . td <nl> the dimension is padded with zeros . <nl> TF_DerivedResultTypeAttr Tcomplex = TF_DerivedResultTypeAttr < 0 > ; <nl> } <nl> <nl> + def TF_RandomShuffleOp : TF_Op < " RandomShuffle " , [ SameOperandsAndResultType ] > { <nl> + let summary = " Randomly shuffles a tensor along its first dimension . " ; <nl> + <nl> + let description = [ { <nl> + The tensor is shuffled along dimension 0 , such that each ` value [ j ] ` is mapped <nl> + to one and only one ` output [ i ] ` . For example , a mapping that might occur for a <nl> + 3x2 tensor is : <nl> + <nl> + ` ` ` <nl> + [ [ 1 , 2 ] , [ [ 5 , 6 ] , <nl> + [ 3 , 4 ] , = = > [ 1 , 2 ] , <nl> + [ 5 , 6 ] ] [ 3 , 4 ] ] <nl> + ` ` ` <nl> + } ] ; <nl> + <nl> + let arguments = ( ins <nl> + TF_Tensor : $ value , <nl> + <nl> + DefaultValuedAttr < I64Attr , " 0 " > : $ seed , <nl> + DefaultValuedAttr < I64Attr , " 0 " > : $ seed2 <nl> + ) ; <nl> + <nl> + let results = ( outs <nl> + TF_Tensor : $ output <nl> + ) ; <nl> + <nl> + TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> + } <nl> + <nl> def TF_RandomUniformOp : TF_Op < " RandomUniform " , [ ] > { <nl> let summary = " Outputs random values from a uniform distribution . " ; <nl> <nl> mmm a / tensorflow / compiler / mlir / xla / ir / hlo_ops . td <nl> ppp b / tensorflow / compiler / mlir / xla / ir / hlo_ops . td <nl> def HLO_SortOp : HLO_Op < " sort " , [ NoSideEffect ] > , BASE_HLO_SortOp { <nl> <nl> let builders = [ OpBuilder < <nl> " Builder * builder , OperationState & state , ValueRange operands , " <nl> - " int64_t dimension , bool is_stable " <nl> + " int64_t dimension = - 1 , bool is_stable = false " <nl> > ] ; <nl> <nl> / / TODO ( b / 129422361 ) : SortOp has special conversion logic to HLO . <nl> mmm a / tensorflow / compiler / mlir / xla / tests / legalize - tf . mlir <nl> ppp b / tensorflow / compiler / mlir / xla / tests / legalize - tf . mlir <nl> func @ tensor_scatter_update ( % tensor : tensor < ? x ? x ? xf32 > , % indices : tensor < ? x2xi32 <nl> % 0 = " tf . TensorScatterUpdate " ( % tensor , % indices , % updates ) : ( tensor < ? x ? x ? xf32 > , tensor < ? x2xi32 > , tensor < ? x ? xf32 > ) - > tensor < ? x ? x ? xf32 > <nl> return % 0 : tensor < ? x ? x ? xf32 > <nl> } <nl> + <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + / / tf . RandomShuffle legalization <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + <nl> + / / CHECK - LABEL : @ random_shuffle_first_dim_1 <nl> + / / CHECK - SAME : [ [ INPUT : % . * ] ] : tensor < 1x ? xf32 > <nl> + func @ random_shuffle_first_dim_1 ( % input : tensor < 1x ? xf32 > ) - > tensor < 1x ? xf32 > { <nl> + % 0 = " tf . RandomShuffle " ( % input ) : ( tensor < 1x ? xf32 > ) - > ( tensor < 1x ? xf32 > ) <nl> + / / CHECK - NEXT : return [ [ INPUT ] ] <nl> + return % 0 : tensor < 1x ? xf32 > <nl> + } <nl> + <nl> + / / CHECK - LABEL : @ random_shuffle_1D_16 <nl> + / / CHECK - SAME : [ [ INPUT : % . * ] ] : tensor < 16xf32 > <nl> + func @ random_shuffle_1D_16 ( % input : tensor < 16xf32 > ) - > tensor < 16xf32 > { <nl> + / / CHECK : [ [ SHAPE : % . * ] ] = xla_hlo . constant dense < 16 > : tensor < 1xi64 > <nl> + / / CHECK : [ [ LOWER : % . * ] ] = xla_hlo . constant dense < 0 > : tensor < i32 > <nl> + / / CHECK : [ [ UPPER : % . * ] ] = xla_hlo . constant dense < - 1 > : tensor < i32 > <nl> + / / CHECK : [ [ RNG : % . * ] ] = " xla_hlo . rng_uniform " ( [ [ LOWER ] ] , [ [ UPPER ] ] , [ [ SHAPE ] ] ) <nl> + / / CHECK : [ [ SORT : % . * ] ] = " xla_hlo . sort " ( [ [ RNG ] ] , [ [ INPUT ] ] ) ( { <nl> + / / CHECK : ^ { { . * } } ( [ [ ARG1 : % . * ] ] : tensor < i32 > , [ [ ARG2 : % . * ] ] : tensor < i32 > , { { . * } } : tensor < f32 > , { { . * } } : tensor < f32 > ) : <nl> + / / CHECK : " xla_hlo . compare " ( [ [ ARG1 ] ] , [ [ ARG2 ] ] ) { comparison_direction = " LT " } <nl> + / / CHECK : } ) { dimension = - 1 : i64 , is_stable = true } : ( tensor < 16xi32 > , tensor < 16xf32 > ) - > tuple < tensor < 16xi32 > , tensor < 16xf32 > > <nl> + / / CHECK : [ [ RES : % . * ] ] = " xla_hlo . get_tuple_element " ( [ [ SORT ] ] ) { index = 1 : i32 } <nl> + / / CHECK : return [ [ RES ] ] <nl> + % 0 = " tf . RandomShuffle " ( % input ) : ( tensor < 16xf32 > ) - > ( tensor < 16xf32 > ) <nl> + return % 0 : tensor < 16xf32 > <nl> + } <nl> + <nl> + / / CHECK - LABEL : @ random_shuffle_1D_10240 <nl> + func @ random_shuffle_1D_10240 ( % input : tensor < 10240xf32 > ) - > tensor < 10240xf32 > { <nl> + / / CHECK : xla_hlo . rng_uniform <nl> + / / CHECK : xla_hlo . sort <nl> + / / CHECK : xla_hlo . get_tuple_element <nl> + / / CHECK : xla_hlo . rng_uniform <nl> + / / CHECK : xla_hlo . sort <nl> + / / CHECK : xla_hlo . get_tuple_element <nl> + % 0 = " tf . RandomShuffle " ( % input ) : ( tensor < 10240xf32 > ) - > ( tensor < 10240xf32 > ) <nl> + return % 0 : tensor < 10240xf32 > <nl> + } <nl> mmm a / tensorflow / compiler / mlir / xla / transforms / legalize_tf . cc <nl> ppp b / tensorflow / compiler / mlir / xla / transforms / legalize_tf . cc <nl> class ConvertUnsortedSegmentSumOp <nl> } <nl> } ; <nl> <nl> + class ConvertRandomShuffleOp : public OpRewritePattern < TF : : RandomShuffleOp > { <nl> + public : <nl> + using OpRewritePattern : : OpRewritePattern ; <nl> + <nl> + PatternMatchResult matchAndRewrite ( TF : : RandomShuffleOp op , <nl> + PatternRewriter & rewriter ) const override { <nl> + auto input_type = op . value ( ) . getType ( ) . dyn_cast < RankedTensorType > ( ) ; <nl> + if ( ! input_type ) return matchFailure ( ) ; <nl> + <nl> + int64_t input_rank = input_type . getRank ( ) ; <nl> + int64_t first_dim_size = input_type . getDimSize ( 0 ) ; <nl> + if ( ShapedType : : isDynamic ( first_dim_size ) ) return matchFailure ( ) ; <nl> + <nl> + / / We are shuffling along the first dimension . If its size is < = 1 , then <nl> + / / shuffling is a no - op . <nl> + if ( first_dim_size < = 1 ) { <nl> + rewriter . replaceOp ( op , op . value ( ) ) ; <nl> + return matchSuccess ( ) ; <nl> + } <nl> + <nl> + / / For vectors , shuffle values by sorting instead of the obvious <nl> + / / Fisher - Yates algorithm . Fisher - Yates is simple to implement and correct , <nl> + / / but not easily parallelizable . For a sufficiently parallel architecture , <nl> + / / it is faster to sort many times , than Fisher - Yates shuffle once . <nl> + if ( input_rank = = 1 ) { <nl> + / / Shuffle values by assigning each value a random key and sorting the <nl> + / / keys . Keys can collide causing detectable patterns in the shuffled <nl> + / / output . Collisions translates into more ascending sub - sequences in the <nl> + / / shuffled output than would be expected by chance . To avoid collisions , <nl> + / / the number of possible key values must be sufficiently large . <nl> + <nl> + / / How are more than 2 ^ 32 keys created ? In each loop iteration , the <nl> + / / algorithm sorts by random keys . Conceptually , the earlier iterations <nl> + / / are sorting on the lower - order bits of larger keys that are never <nl> + / / actually assembled . <nl> + <nl> + / / The expected number of collisions is n - d + d ( 1 - 1 / d ) ^ n , where d is <nl> + / / the number of possible keys and n is the number of values . If d = n ^ 2 , <nl> + / / then the limit as n goes to infinity is 1 / 2 . If d = n ^ 3 , then the limit <nl> + / / as n goes to infinity is zero . <nl> + <nl> + / / This implementation ensures that the key - space is greater than or equal <nl> + / / to the cube of the number of values . The risk of collisions can be <nl> + / / further reduced by increasing Exponent at the expense of <nl> + / / performance . <nl> + <nl> + / / For Exponent = 2 , the expected number of collisions per shuffle is <nl> + / / maximized at n = floor ( ( 2 ^ 32 - 1 ) ^ ( 1 / 2 ) ) = 65535 where the expectation is <nl> + / / about 1 / 2 . <nl> + <nl> + / / For Exponent = 3 , the expected number of collisions per shuffle is <nl> + / / maximized at n = floor ( ( 2 ^ 32 - 1 ) ^ ( 1 / 3 ) ) = 1625 where the expectation is <nl> + / / about 1 / 3255 . <nl> + <nl> + / / For Exponent = 4 , the expected number of collisions per shuffle is <nl> + / / maximized at n = floor ( ( 2 ^ 32 - 1 ) ^ ( 1 / 4 ) ) = 255 where the expectation is <nl> + / / about 1 / 132622 . <nl> + constexpr int exponent = 3 ; <nl> + int64_t num_elements = input_type . getNumElements ( ) ; <nl> + uint32_t u32_max = std : : numeric_limits < uint32_t > : : max ( ) ; <nl> + int rounds = <nl> + std : : ceil ( exponent * std : : log ( num_elements ) / std : : log ( u32_max ) ) ; <nl> + <nl> + auto i32_type = rewriter . getIntegerType ( 32 ) ; <nl> + auto key_type = RankedTensorType : : get ( { num_elements } , i32_type ) ; <nl> + auto shape_tensor = rewriter . create < xla_hlo : : ConstOp > ( <nl> + op . getLoc ( ) , GetI64ElementsAttr ( { num_elements } , & rewriter ) ) ; <nl> + <nl> + auto lower_limit = rewriter . create < xla_hlo : : ConstOp > ( <nl> + op . getLoc ( ) , rewriter . getI32IntegerAttr ( 0 ) ) ; <nl> + / / Unfortunately , xla : : RngUniform gives values in the half open interval <nl> + / / rather than the closed interval , so instead of 2 ^ 32 possible keys there <nl> + / / are only 2 ^ 32 - 1 ( kuint32max ) . <nl> + auto upper_limit = rewriter . create < xla_hlo : : ConstOp > ( <nl> + op . getLoc ( ) , rewriter . getI32IntegerAttr ( u32_max ) ) ; <nl> + <nl> + Value current = op . value ( ) ; <nl> + for ( int i = 0 ; i < rounds ; + + i ) { <nl> + auto keys = rewriter . create < xla_hlo : : RngUniformOp > ( <nl> + op . getLoc ( ) , key_type , lower_limit , upper_limit , shape_tensor ) ; <nl> + auto sorted = rewriter . create < xla_hlo : : SortOp > ( <nl> + op . getLoc ( ) , llvm : : ArrayRef < Value > { keys , current } ) ; <nl> + BuildSortComparisonBody ( { i32_type , input_type . getElementType ( ) } , <nl> + / * direction = * / " LT " , & sorted . comparator ( ) , <nl> + & rewriter ) ; <nl> + current = rewriter . create < GetTupleElementOp > ( op . getLoc ( ) , <nl> + sorted . getResult ( ) , 1 ) ; <nl> + } <nl> + rewriter . replaceOp ( op , current ) ; <nl> + return matchSuccess ( ) ; <nl> + } <nl> + <nl> + / / The Fisher - Yates algorithm . <nl> + <nl> + / / TODO ( b / 147215441 ) : implement this . <nl> + <nl> + return matchFailure ( ) ; <nl> + } <nl> + } ; <nl> + <nl> # include " tensorflow / compiler / mlir / xla / transforms / generated_legalize_tf . inc " <nl> <nl> LogicalResult legalizeTF ( Operation * op , bool allow_partial_conversion ) { <nl> LogicalResult legalizeTF ( Operation * op , bool allow_partial_conversion ) { <nl> ConvertStridedSliceOp , ConvertStridedSliceGradOp , ConvertSumOp , <nl> ConvertTensorScatterUpdateOp , ConvertTileOp , ConvertTopKV2Op , <nl> ConvertUnpackOp , ConvertUnsortedSegmentMaxOp , ConvertUnsortedSegmentMinOp , <nl> - ConvertUnsortedSegmentProdOp , ConvertUnsortedSegmentSumOp > ( <nl> - op - > getContext ( ) ) ; <nl> + ConvertUnsortedSegmentProdOp , ConvertUnsortedSegmentSumOp , <nl> + ConvertRandomShuffleOp > ( op - > getContext ( ) ) ; <nl> <nl> ConversionTarget target ( * context ) ; <nl> target . addLegalDialect < XlaHloDialect > ( ) ; <nl> | Add lowering from tf . RandomShuffle to HLO ops for degenerated cases | tensorflow/tensorflow | 0bc5e0beabefb97842cc71487f6adb0a76859b0b | 2020-01-09T22:09:53Z |
mmm a / Documentation / InstallationManual / Installing . md <nl> ppp b / Documentation / InstallationManual / Installing . md <nl> Using a Package Manager to install ArangoDB { # InstallingLinuxPackageManager } <nl> Follow the instructions on the <nl> @ EXTREF_S { http : / / www . arangodb . org / download , Downloads } <nl> page to use your favorite package manager for the major distributions . After setting <nl> - up the ArangoDB repository you can then easily install ArangoDB using yum , aptitude , <nl> + up the ArangoDB repository you can easily install ArangoDB using yum , aptitude , <nl> urpmi , or zypper . <nl> <nl> # # # Gentoo <nl> provided by @ @ mgiken . <nl> <nl> # # # Linux - Mint { # InstallingDebian } <nl> <nl> - Download and import GPG - PublicKey <nl> + Download and import GPG - PublicKey : <nl> <nl> wget - O RPM - GPG - KEY - www . arangodb . org http : / / www . arangodb . org / repositories / PublicKey <nl> apt - key add RPM - GPG - KEY - www . arangodb . org <nl> graphical user interface to start and stop the server . <nl> Homebrew { # InstallingMacOSXHomebrew } <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> <nl> - If you are using @ EXTREF_S { http : / / brew . sh / , homebrew } , <nl> + If you are using @ EXTREF_S { http : / / brew . sh / , homebrew } , <nl> then you can install the ArangoDB using ` brew ` as follows : <nl> <nl> brew install arangodb <nl> <nl> This will install the current stable version of ArangoDB and all <nl> dependencies within your Homebrew tree . Note that the server will be <nl> - installed as <nl> + installed as : <nl> <nl> / usr / local / sbin / arangod <nl> <nl> - The ArangoDB shell will be install as <nl> + The ArangoDB shell will be installed as : <nl> <nl> / usr / local / bin / arangosh <nl> <nl> If you want to install the latest ( unstable ) version use : <nl> <nl> brew install - - HEAD arangodb <nl> <nl> - You can unstall ArangoDB using <nl> + You can uninstall ArangoDB using : <nl> <nl> brew uninstall arangodb <nl> <nl> However , in case you started ArangoDB using the launchctl , then you <nl> - need to unload it before uninstalling the server . <nl> + need to unload it before uninstalling the server : <nl> <nl> launchctl unload ~ / Library / LaunchAgents / homebrew . mxcl . arangodb . plist <nl> <nl> - Then remove the LaunchAgent <nl> + Then remove the LaunchAgent : <nl> <nl> rm ~ / Library / LaunchAgents / homebrew . mxcl . arangodb . plist <nl> <nl> Command - Line App { # InstallingMacOSXBundle } <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> <nl> In case you are not using homebrew , we also provide a command - line app . You can <nl> - download it from <nl> + download it from : <nl> <nl> http : / / www . arangodb . org / download <nl> <nl> installation process you may change this . In the following description we will a <nl> that ArangoDB has been installed in the location ` < ROOTDIR > ` . <nl> <nl> You have to be careful when choosing an installation directory . You need either <nl> - write permission to this directoy or you need to modify the config file for the <nl> + write permission to this directory or you need to modify the config file for the <nl> server process . In the latter case the database directory and the Foxx directory <nl> - should must be writable by the user . <nl> + should be writable by the user . <nl> <nl> - Installating for a single user : Select a different directory during <nl> + Installing for a single user : Select a different directory during <nl> installation . For example ` C : / Users / < username > / arangodb ` or ` C : / ArangoDB ` . <nl> <nl> - Installating for multiple users : Keep the default directory . After the <nl> + Installing for multiple users : Keep the default directory . After the <nl> installation edit the file ` < ROOTDIR > / etc / arangodb / arangod . conf ` . Adjust the <nl> ` directory ` and ` app - path ` so that these paths point into your home directory . <nl> <nl> installation edit the file ` < ROOTDIR > / etc / arangodb / arangod . conf ` . Adjust the <nl> <nl> Create the directories for each user that wants to use ArangoDB . <nl> <nl> - Installating as Service : Keep the default directory . After the installation open <nl> + Installing as Service : Keep the default directory . After the installation open <nl> a command line as administrator ( search for ` cmd ` and right click ` run as <nl> administrator ` ) . <nl> <nl> Starting { # InstallingWindowsStarting } <nl> To start an ArangoDB server instance with networking enabled , use the executable <nl> ` arangod . exe ` located in ` < ROOTDIR > / bin ` . This will use the configuration <nl> file ` arangod . conf ` located in ` < ROOTDIR > / etc / arangodb ` , which you can adjust <nl> - to your needs and use the data directory " < ROOTDIR > / var / lib / arangodb " . This <nl> + to your needs and use the data directory ` < ROOTDIR > / var / lib / arangodb ` . This <nl> is the place where all your data ( databases and collections ) will be stored <nl> by default . <nl> <nl> Please check the output of the ` arangod . exe ` executable before going on . If the <nl> - server started successully , you should see a line ` ArangoDB is ready for <nl> + server started successfully , you should see a line ` ArangoDB is ready for <nl> business . Have fun ! ` at the end of its output . <nl> <nl> We now wish to check that the installation is working correctly and to do this <nl> Using the Client { # InstallingWindowsClient } <nl> <nl> To connect to an already running ArangoDB server instance , there is a shell <nl> ` arangosh . exe ` located in ` < ROOTDIR > / bin ` . This starts a shell which can be <nl> - used ( amongst other things ) to administer and query a local or remote <nl> + used – amongst other things – to administer and query a local or remote <nl> ArangoDB server . <nl> <nl> Note that ` arangosh . exe ` does NOT start a separate server , it only starts the <nl> - shell . To use it , you must have a server running somewhere , e . g . by using <nl> + shell . To use it you must have a server running somewhere , e . g . by using <nl> the ` arangod . exe ` executable . <nl> <nl> ` arangosh . exe ` uses configuration from the file ` arangosh . conf ` located in <nl> Limitations for Cygwin { # InstallingWindowsCygwin } <nl> <nl> Please note some important limitations when running ArangoDB under Cygwin : <nl> Starting ArangoDB can be started from out of a Cygwin terminal , but pressing <nl> - CTRL - C will forcefully kill the server process , without giving it a chance to <nl> + ` CTRL - C ` will forcefully kill the server process without giving it a chance to <nl> handle the kill signal . In this case , a regular server shutdown is not possible , <nl> which may leave a file ` LOCK ` around in the server ' s data directory . This file <nl> needs to be removed manually to make ArangoDB start again . Additionally , as <nl> | Documentation fixes : Reviewed Installing Manual | arangodb/arangodb | 258d4ab7f5dc232992589a80a219ac001bb69994 | 2014-04-08T11:17:42Z |
mmm a / lib / IRGen / GenEnum . cpp <nl> ppp b / lib / IRGen / GenEnum . cpp <nl> namespace { <nl> llvm : : MapVector < CanType , llvm : : Value * > & typeToMetadataVec , <nl> SILType T ) const override { <nl> auto canType = T . getSwiftRValueType ( ) ; <nl> - assert ( ! canType - > hasArchetype ( ) & & <nl> + assert ( ! canType - > is < ArchetypeType > ( ) & & <nl> " collectArchetypeMetadata : no archetype expected here " ) ; <nl> } <nl> <nl> mmm a / lib / IRGen / GenValueWitness . cpp <nl> ppp b / lib / IRGen / GenValueWitness . cpp <nl> void TypeInfo : : collectArchetypeMetadata ( <nl> llvm : : MapVector < CanType , llvm : : Value * > & typeToMetadataVec , <nl> SILType T ) const { <nl> auto canType = T . getSwiftRValueType ( ) ; <nl> - assert ( ! canType - > getWithoutSpecifierType ( ) - > is < ArchetypeType > ( ) & & <nl> - " Did not expect an ArchetypeType " ) ; <nl> + assert ( ! canType - > is < ArchetypeType > ( ) & & " Did not expect an ArchetypeType " ) ; <nl> } <nl> | Merge pull request from shajrawi / fix_outline_assert | apple/swift | 0a17342740089ea0b342d19a30ba18f1f12bb228 | 2017-12-05T01:47:12Z |
mmm a / cmake / scripts / android / Install . cmake <nl> ppp b / cmake / scripts / android / Install . cmake <nl> foreach ( target apk obb apk - unsigned apk - obb apk - obb - unsigned apk - noobb apk - clean <nl> CC = $ { CMAKE_C_COMPILER } <nl> CPU = $ { CPU } <nl> ARCH = $ { ARCH } <nl> + HOST = $ { HOST } <nl> + TOOLCHAIN = $ { TOOLCHAIN } <nl> PREFIX = $ { prefix } <nl> DEPENDS_PATH = $ { DEPENDS_PATH } <nl> NDKROOT = $ { NDKROOT } <nl> mmm a / docs / README . android <nl> ppp b / docs / README . android <nl> Building for arm architecture : <nl> $ cd build / tools <nl> $ . / make - standalone - toolchain . sh \ <nl> - - install - dir = < android - toolchain - arm > / android - 21 - - platform = android - 21 \ <nl> - - - toolchain = arm - linux - androideabi - 4 . 9 <nl> + - - toolchain = arm - linux - androideabi - 4 . 9 - - stl = libc + + <nl> <nl> Building for aarch64 architecture : <nl> <nl> Building for aarch64 architecture : <nl> $ cd build / tools <nl> $ . / make - standalone - toolchain . sh \ <nl> - - install - dir = < android - toolchain - aarch64 > / android - 21 - - platform = android - 21 \ <nl> - - - toolchain = aarch64 - linux - android - 4 . 9 <nl> + - - toolchain = aarch64 - linux - android - 4 . 9 - - stl = libc + + <nl> <nl> Building for x86 architecture : <nl> <nl> Building for x86 architecture : <nl> $ cd build / tools <nl> $ . / make - standalone - toolchain . sh \ <nl> - - install - dir = < android - toolchain - x86 > / android - 21 - - platform = android - 21 \ <nl> - - - toolchain = x86 - 4 . 9 - - arch = x86 <nl> + - - toolchain = x86 - 4 . 9 - - arch = x86 - - stl = libc + + <nl> <nl> Make sure to pick a toolchain for your desired architecture . If an error about <nl> the used system is shown - please add proper - - system parameter as mentioned <nl> mmm a / tools / android / packaging / Makefile . in <nl> ppp b / tools / android / packaging / Makefile . in <nl> else ifeq ( $ ( findstring arm , $ ( CPU ) ) , arm ) <nl> CPU = armeabi - v7a <nl> endif <nl> <nl> + # libc + + <nl> + STLLIB = $ ( TOOLCHAIN ) / $ ( HOST ) / lib / libc + + _shared . so <nl> + <nl> # older ndk x86 <nl> GDBPATH = $ ( NDKROOT ) / toolchains / $ ( ARCH ) - $ ( GCC_VERSION ) / prebuilt / gdbserver <nl> <nl> libs : $ ( PREFIX ) / lib / @ APP_NAME_LC @ / lib @ APP_NAME_LC @ . so <nl> cd xbmc / obj / local / $ ( CPU ) / ; find . - name " * . so " - not - name " lib * . so " | sed " s / \ . \ / / / " | xargs - I @ mv @ lib @ <nl> cp - fp xbmc / obj / local / $ ( CPU ) / * . so xbmc / lib / $ ( CPU ) / <nl> $ ( STRIP ) - - strip - unneeded xbmc / lib / $ ( CPU ) / * . so <nl> + install - p $ ( STLLIB ) . / xbmc / lib / $ ( CPU ) / <nl> install - p $ ( GDBPATH ) . / xbmc / lib / $ ( CPU ) / gdbserver <nl> echo " set solib - search - path . / obj / local / $ ( CPU ) " > . / xbmc / lib / $ ( CPU ) / gdb . setup <nl> echo " directory $ ( TOOLCHAIN ) / sysroot / usr / include $ ( NDKROOT ) / sources / android / native_app_glue " \ <nl> mmm a / tools / depends / configure . ac <nl> ppp b / tools / depends / configure . ac <nl> case $ host in <nl> if test " x $ use_cpu " = " xauto " ; then <nl> use_cpu = " armeabi - v7a " <nl> fi <nl> + platform_cc = clang <nl> + platform_cxx = clang + + <nl> use_sdk = " $ { use_sdk : - android - 24 } " <nl> use_ndk_api = " $ { use_ndk_api : - 21 } " <nl> deps_dir = " $ use_host - $ use_ndk_api - $ build_type " <nl> case $ host in <nl> if test " x $ use_cpu " = " xauto " ; then <nl> use_cpu = " arm64 - v8a " <nl> fi <nl> + platform_cc = clang <nl> + platform_cxx = clang + + <nl> use_sdk = " $ { use_sdk : - android - 24 } " <nl> use_ndk_api = " $ { use_ndk_api : - 21 } " <nl> deps_dir = " $ use_host - $ use_ndk_api - $ build_type " <nl> case $ host in <nl> if test " x $ use_cpu " = " xauto " ; then <nl> use_cpu = $ host_cpu <nl> fi <nl> + platform_cc = clang <nl> + platform_cxx = clang + + <nl> use_sdk = " $ { use_sdk : - android - 24 } " <nl> use_ndk_api = " $ { use_ndk_api : - 21 } " <nl> deps_dir = " $ use_host - $ use_ndk_api - $ build_type " <nl> mmm a / tools / depends / target / Toolchain . cmake . in <nl> ppp b / tools / depends / target / Toolchain . cmake . in <nl> endif ( ) <nl> if ( CORE_SYSTEM_NAME STREQUAL android ) <nl> set ( NDKROOT @ use_ndk_path @ ) <nl> set ( SDKROOT @ use_sdk_path @ ) <nl> + set ( TOOLCHAIN @ use_toolchain @ ) <nl> + set ( HOST @ use_host @ ) <nl> set ( SDK_PLATFORM @ use_sdk @ ) <nl> string ( REPLACE " : " " ; " SDK_BUILDTOOLS_PATH " @ build_tools_path @ " ) <nl> endif ( ) <nl> | CHG : [ droid ] switch to clang & libc + + | xbmc/xbmc | d32bd6656f0d7b2b77304d60202142ea278dfd3e | 2017-12-22T10:00:00Z |
new file mode 100644 <nl> index 000000000000 . . 5563cc64aa1a <nl> mmm / dev / null <nl> ppp b / jstests / noPassthrough / index_failover_key_errors . js <nl> <nl> + / * * <nl> + * Confirms that an index build is aborted after step - up by a new primary when there are key <nl> + * generation errors . This test orchestrates a scenario such that a secondary detects ( and <nl> + * ignores ) an indexing error . After step - up , the node retries indexing the skipped record before <nl> + * completing . The expected result is that the node , now primary , aborts the index build for the <nl> + * entire replica set . <nl> + * <nl> + * @ tags : [ <nl> + * requires_replication , <nl> + * ] <nl> + * / <nl> + ( function ( ) { <nl> + " use strict " ; <nl> + <nl> + load ( ' jstests / noPassthrough / libs / index_build . js ' ) ; <nl> + <nl> + const rst = new ReplSetTest ( { <nl> + nodes : [ <nl> + { } , <nl> + { } , <nl> + ] , <nl> + } ) ; <nl> + const nodes = rst . startSet ( ) ; <nl> + rst . initiate ( ) ; <nl> + <nl> + const primary = rst . getPrimary ( ) ; <nl> + const testDB = primary . getDB ( ' test ' ) ; <nl> + const coll = testDB . getCollection ( ' test ' ) ; <nl> + <nl> + if ( ! IndexBuildTest . supportsTwoPhaseIndexBuild ( primary ) ) { <nl> + jsTestLog ( ' Two phase index builds not enabled , skipping test . ' ) ; <nl> + rst . stopSet ( ) ; <nl> + return ; <nl> + } <nl> + <nl> + / / Insert a document that cannot be indexed because it causes a CannotIndexParallelArrays error <nl> + / / code . <nl> + const badDoc = { <nl> + _id : 0 , <nl> + a : [ 0 , 1 ] , <nl> + b : [ 2 , 3 ] <nl> + } ; <nl> + assert . commandWorked ( coll . insert ( badDoc ) ) ; <nl> + <nl> + / / Start an index build on primary and secondary , but prevent the primary from scanning the <nl> + / / collection . Do not stop the secondary ; intentionally let it scan the invalid document , which we <nl> + / / will resolve later . <nl> + <nl> + / / We are using this fail point to pause the index build before it starts the collection scan . <nl> + / / This is important for this test because we are mutating the collection state before the index <nl> + / / builder is able to observe the invalid document . <nl> + / / By comparison , IndexBuildTest . pauseIndexBuilds ( ) stalls the index build in the middle of the <nl> + / / collection scan . <nl> + assert . commandWorked ( <nl> + testDB . adminCommand ( { configureFailPoint : ' hangAfterInitializingIndexBuild ' , mode : ' alwaysOn ' } ) ) ; <nl> + const createIdx = IndexBuildTest . startIndexBuild ( primary , coll . getFullName ( ) , { a : 1 , b : 1 } ) ; <nl> + <nl> + / / Wait for the index build to start on the secondary . <nl> + const secondary = rst . getSecondary ( ) ; <nl> + const secondaryDB = secondary . getDB ( testDB . getName ( ) ) ; <nl> + const secondaryColl = secondaryDB . getCollection ( coll . getName ( ) ) ; <nl> + IndexBuildTest . waitForIndexBuildToStart ( secondaryDB ) ; <nl> + IndexBuildTest . assertIndexes ( secondaryColl , 2 , [ " _id_ " ] , [ " a_1_b_1 " ] , { includeBuildUUIDs : true } ) ; <nl> + <nl> + / / Step down the primary . <nl> + const stepDown = startParallelShell ( ( ) = > { <nl> + assert . commandWorked ( db . adminCommand ( { " replSetStepDown " : 60 , " force " : true } ) ) ; <nl> + } , primary . port ) ; <nl> + <nl> + / / Expect a failed createIndex command invocation in the parallel shell due to stepdown even though <nl> + / / the index build will continue in the background . <nl> + const exitCode = createIdx ( { checkExitSuccess : false } ) ; <nl> + assert . neq ( 0 , exitCode , ' expected shell to exit abnormally due to index build being terminated ' ) ; <nl> + checkLog . contains ( primary , ' Index build interrupted : ' ) ; <nl> + <nl> + / / Wait for stepdown to complete . <nl> + stepDown ( ) ; <nl> + <nl> + / / Unblock the index build on the old primary . <nl> + assert . commandWorked ( <nl> + testDB . adminCommand ( { configureFailPoint : ' hangAfterInitializingIndexBuild ' , mode : ' off ' } ) ) ; <nl> + <nl> + const newPrimary = rst . getPrimary ( ) ; <nl> + const newPrimaryDB = newPrimary . getDB ( ' test ' ) ; <nl> + const newPrimaryColl = newPrimaryDB . getCollection ( ' test ' ) ; <nl> + <nl> + / / Ensure the old primary doesn ' t take over again . <nl> + assert . neq ( primary . port , newPrimary . port ) ; <nl> + <nl> + / / The index should not be present on the old primary after processing the abortIndexBuild oplog <nl> + / / entry from the new primary . <nl> + jsTestLog ( " waiting for index build to stop on old primary " ) ; <nl> + IndexBuildTest . waitForIndexBuildToStop ( testDB ) ; <nl> + IndexBuildTest . assertIndexes ( coll , 1 , [ ' _id_ ' ] ) ; <nl> + <nl> + / / Check that index was not built on the new primary . <nl> + jsTestLog ( " waiting for index build to stop on new primary " ) ; <nl> + IndexBuildTest . waitForIndexBuildToStop ( newPrimaryDB ) ; <nl> + IndexBuildTest . assertIndexes ( newPrimaryColl , 1 , [ ' _id_ ' ] ) ; <nl> + <nl> + rst . stopSet ( ) ; <nl> + } ) ( ) ; <nl> new file mode 100644 <nl> index 000000000000 . . 7113c9411eb8 <nl> mmm / dev / null <nl> ppp b / jstests / noPassthrough / index_failover_resolved_key_errors . js <nl> <nl> + / * * <nl> + * Confirms that an index build is committed after step - up by a new primary when there are key <nl> + * generation errors that are eventually resolved . This test orchestrates a scenario such that a <nl> + * secondary detects ( and ignores ) an indexing error while performing a collection scan , but before <nl> + * receiving an update that resolves the error . After step - up , the node retries indexing the skipped <nl> + * record before completing . The expected result is that the node , now primary , commits the index <nl> + * build for the entire replica set . <nl> + * <nl> + * @ tags : [ <nl> + * requires_replication , <nl> + * ] <nl> + * / <nl> + ( function ( ) { <nl> + " use strict " ; <nl> + <nl> + load ( ' jstests / noPassthrough / libs / index_build . js ' ) ; <nl> + <nl> + const rst = new ReplSetTest ( { <nl> + / / We want at least two electable nodes . <nl> + nodes : [ <nl> + { } , <nl> + { } , <nl> + ] , <nl> + } ) ; <nl> + const nodes = rst . startSet ( ) ; <nl> + rst . initiate ( ) ; <nl> + <nl> + const primary = rst . getPrimary ( ) ; <nl> + const testDB = primary . getDB ( ' test ' ) ; <nl> + const coll = testDB . getCollection ( ' test ' ) ; <nl> + <nl> + if ( ! IndexBuildTest . supportsTwoPhaseIndexBuild ( primary ) ) { <nl> + jsTestLog ( ' Two phase index builds not enabled , skipping test . ' ) ; <nl> + rst . stopSet ( ) ; <nl> + return ; <nl> + } <nl> + <nl> + / / Insert a document that cannot be indexed because it causes a CannotIndexParallelArrays error <nl> + / / code . <nl> + const badDoc = { <nl> + _id : 0 , <nl> + a : [ 0 , 1 ] , <nl> + b : [ 2 , 3 ] <nl> + } ; <nl> + assert . commandWorked ( coll . insert ( badDoc ) ) ; <nl> + <nl> + / / Start an index build on primary and secondary , but prevent the primary from scanning the <nl> + / / collection . Do not stop the secondary ; intentionally let it scan the invalid document , which we <nl> + / / will resolve later . <nl> + <nl> + / / We are using this fail point to pause the index build before it starts the <nl> + / / collection scan . This is important for this test because we are mutating the collection state <nl> + / / before the index builder is able to observe the invalid geo document . By comparison , <nl> + / / IndexBuildTest . pauseIndexBuilds ( ) stalls the index build in the middle of the collection scan . <nl> + assert . commandWorked ( <nl> + testDB . adminCommand ( { configureFailPoint : ' hangAfterSettingUpIndexBuild ' , mode : ' alwaysOn ' } ) ) ; <nl> + const createIdx = IndexBuildTest . startIndexBuild ( primary , coll . getFullName ( ) , { a : 1 , b : 1 } ) ; <nl> + <nl> + / / Wait for the index build to start on the secondary . <nl> + const secondary = rst . getSecondary ( ) ; <nl> + const secondaryDB = secondary . getDB ( testDB . getName ( ) ) ; <nl> + const secondaryColl = secondaryDB . getCollection ( coll . getName ( ) ) ; <nl> + IndexBuildTest . waitForIndexBuildToStart ( secondaryDB ) ; <nl> + IndexBuildTest . assertIndexes ( secondaryColl , 2 , [ " _id_ " ] , [ " a_1_b_1 " ] , { includeBuildUUIDs : true } ) ; <nl> + <nl> + / / Resolve the key generation error so that the index build succeeds on the primary before it scans <nl> + / / the invalid document . <nl> + assert . commandWorked ( coll . update ( { _id : 0 } , { a : 1 , b : 1 } ) ) ; <nl> + <nl> + / / Unblock the index build on the old primary during the collection scanning phase , and block after <nl> + / / the collection scan phase . <nl> + assert . commandWorked ( testDB . adminCommand ( <nl> + { configureFailPoint : ' hangAfterIndexBuildDumpsInsertsFromBulk ' , mode : ' alwaysOn ' } ) ) ; <nl> + assert . commandWorked ( <nl> + testDB . adminCommand ( { configureFailPoint : ' hangAfterSettingUpIndexBuild ' , mode : ' off ' } ) ) ; <nl> + <nl> + / / Step down the primary . <nl> + const stepDown = startParallelShell ( ( ) = > { <nl> + assert . commandWorked ( db . adminCommand ( { " replSetStepDown " : 60 , " force " : true } ) ) ; <nl> + } , primary . port ) ; <nl> + <nl> + / / Expect a failed createIndex command invocation in the parallel shell due to stepdown even though <nl> + / / the index build will continue in the background . <nl> + const exitCode = createIdx ( { checkExitSuccess : false } ) ; <nl> + assert . neq ( 0 , exitCode , ' expected shell to exit abnormally due to index build being terminated ' ) ; <nl> + checkLog . contains ( primary , ' Index build interrupted : ' ) ; <nl> + <nl> + / / Unblock the index build on the old primary during the collection scanning phase , this lets <nl> + / / stepdown complete . <nl> + assert . commandWorked ( testDB . adminCommand ( <nl> + { configureFailPoint : ' hangAfterIndexBuildDumpsInsertsFromBulk ' , mode : ' off ' } ) ) ; <nl> + <nl> + / / Wait for stepdown to complete . <nl> + stepDown ( ) ; <nl> + <nl> + const newPrimary = rst . getPrimary ( ) ; <nl> + const newPrimaryDB = newPrimary . getDB ( ' test ' ) ; <nl> + const newPrimaryColl = newPrimaryDB . getCollection ( ' test ' ) ; <nl> + <nl> + / / Ensure the old primary doesn ' t take over again . <nl> + assert . neq ( primary . port , newPrimary . port ) ; <nl> + <nl> + / / A new index should be present on the old primary after processing the commitIndexBuild oplog <nl> + / / entry from the new primary . <nl> + IndexBuildTest . waitForIndexBuildToStop ( testDB ) ; <nl> + IndexBuildTest . assertIndexes ( coll , 2 , [ ' _id_ ' , ' a_1_b_1 ' ] ) ; <nl> + <nl> + / / Check that index was created on the new primary . <nl> + IndexBuildTest . waitForIndexBuildToStop ( newPrimaryDB ) ; <nl> + IndexBuildTest . assertIndexes ( newPrimaryColl , 2 , [ ' _id_ ' , ' a_1_b_1 ' ] ) ; <nl> + <nl> + rst . stopSet ( ) ; <nl> + } ) ( ) ; <nl> mmm a / src / mongo / db / catalog / index_build_block . cpp <nl> ppp b / src / mongo / db / catalog / index_build_block . cpp <nl> Status IndexBuildBlock : : init ( OperationContext * opCtx , Collection * collection ) { <nl> _indexCatalogEntry = <nl> _indexCatalog - > createIndexEntry ( opCtx , std : : move ( descriptor ) , initFromDisk , isReadyIndex ) ; <nl> <nl> + / / Only track skipped records with two - phase index builds , which is indicated by a present build <nl> + / / UUID . <nl> + const auto trackSkipped = ( _buildUUID ) ? IndexBuildInterceptor : : TrackSkippedRecords : : kTrack <nl> + : IndexBuildInterceptor : : TrackSkippedRecords : : kNoTrack ; <nl> if ( _method = = IndexBuildMethod : : kHybrid ) { <nl> - _indexBuildInterceptor = std : : make_unique < IndexBuildInterceptor > ( opCtx , _indexCatalogEntry ) ; <nl> + _indexBuildInterceptor = <nl> + std : : make_unique < IndexBuildInterceptor > ( opCtx , _indexCatalogEntry , trackSkipped ) ; <nl> _indexCatalogEntry - > setIndexBuildInterceptor ( _indexBuildInterceptor . get ( ) ) ; <nl> } <nl> <nl> void IndexBuildBlock : : success ( OperationContext * opCtx , Collection * collection ) { <nl> UncommittedCollections : : get ( opCtx ) . hasExclusiveAccessToCollection ( opCtx , collection - > ns ( ) ) ) ; <nl> <nl> if ( _indexBuildInterceptor ) { <nl> + / / Skipped records are only checked when we complete an index build as primary . <nl> + const auto replCoord = repl : : ReplicationCoordinator : : get ( opCtx ) ; <nl> + const auto skippedRecordsTracker = _indexBuildInterceptor - > getSkippedRecordTracker ( ) ; <nl> + if ( skippedRecordsTracker & & replCoord - > canAcceptWritesFor ( opCtx , collection - > ns ( ) ) ) { <nl> + invariant ( skippedRecordsTracker - > areAllRecordsApplied ( opCtx ) ) ; <nl> + } <nl> + <nl> / / An index build should never be completed with writes remaining in the interceptor . <nl> invariant ( _indexBuildInterceptor - > areAllWritesApplied ( opCtx ) ) ; <nl> <nl> mmm a / src / mongo / db / catalog / index_builds_manager . cpp <nl> ppp b / src / mongo / db / catalog / index_builds_manager . cpp <nl> Status IndexBuildsManager : : setUpIndexBuild ( OperationContext * opCtx , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - StatusWith < IndexBuildRecoveryState > IndexBuildsManager : : recoverIndexBuild ( <nl> - const NamespaceString & nss , const UUID & buildUUID , std : : vector < std : : string > indexNames ) { <nl> - <nl> - / / TODO : Not yet implemented . <nl> - <nl> - return IndexBuildRecoveryState : : Building ; <nl> - } <nl> - <nl> Status IndexBuildsManager : : startBuildingIndex ( OperationContext * opCtx , <nl> Collection * collection , <nl> const UUID & buildUUID ) { <nl> Status IndexBuildsManager : : drainBackgroundWrites ( <nl> return builder - > drainBackgroundWrites ( opCtx , readSource , drainYieldPolicy ) ; <nl> } <nl> <nl> - Status IndexBuildsManager : : finishBuildingPhase ( const UUID & buildUUID ) { <nl> - auto multiIndexBlockPtr = _getBuilder ( buildUUID ) ; <nl> - / / TODO : verify that the index builder is in the expected state . <nl> - <nl> - / / TODO : Not yet implemented . <nl> - <nl> - return Status : : OK ( ) ; <nl> + Status IndexBuildsManager : : retrySkippedRecords ( OperationContext * opCtx , <nl> + const UUID & buildUUID , <nl> + Collection * collection ) { <nl> + auto builder = _getBuilder ( buildUUID ) ; <nl> + return builder - > retrySkippedRecords ( opCtx , collection ) ; <nl> } <nl> <nl> Status IndexBuildsManager : : checkIndexConstraintViolations ( OperationContext * opCtx , <nl> mmm a / src / mongo / db / catalog / index_builds_manager . h <nl> ppp b / src / mongo / db / catalog / index_builds_manager . h <nl> class IndexBuildsManager { <nl> OnInitFn onInit , <nl> SetupOptions options = { } ) ; <nl> <nl> - / * * <nl> - * Recovers the index build from its persisted state and sets it up to run again . <nl> - * <nl> - * Returns an enum reflecting the point up to which the build was recovered , so the caller knows <nl> - * where to recommence . <nl> - * <nl> - * TODO : Not yet implemented . <nl> - * / <nl> - StatusWith < IndexBuildRecoveryState > recoverIndexBuild ( const NamespaceString & nss , <nl> - const UUID & buildUUID , <nl> - std : : vector < std : : string > indexNames ) ; <nl> - <nl> / * * <nl> * Runs the scanning / insertion phase of the index build . . <nl> - * <nl> - * TODO : Not yet implemented . <nl> * / <nl> Status startBuildingIndex ( OperationContext * opCtx , <nl> Collection * collection , <nl> class IndexBuildsManager { <nl> IndexBuildInterceptor : : DrainYieldPolicy drainYieldPolicy ) ; <nl> <nl> / * * <nl> - * Persists information in the index catalog entry to reflect the successful completion of the <nl> - * scanning / insertion phase . <nl> - * <nl> - * TODO : Not yet implemented . <nl> + * Retries the key generation and insertion of records that were skipped during the scanning <nl> + * phase due to error suppression . <nl> * / <nl> - Status finishBuildingPhase ( const UUID & buildUUID ) ; <nl> + Status retrySkippedRecords ( OperationContext * opCtx , <nl> + const UUID & buildUUID , <nl> + Collection * collection ) ; <nl> <nl> / * * <nl> * Runs the index constraint violation checking phase of the index build . . <nl> - * <nl> - * TODO : Not yet implemented . <nl> * / <nl> Status checkIndexConstraintViolations ( OperationContext * opCtx , const UUID & buildUUID ) ; <nl> <nl> class IndexBuildsManager { <nl> * <nl> * Returns true if a build existed to be signaled , as opposed to having already finished and <nl> * been cleared away , or not having yet started . . <nl> - * <nl> - * TODO : Not yet fully implemented . The MultiIndexBlock : : abort function that is called is <nl> - * not yet implemented . <nl> * / <nl> bool abortIndexBuild ( const UUID & buildUUID , const std : : string & reason ) ; <nl> <nl> mmm a / src / mongo / db / catalog / index_catalog_impl . cpp <nl> ppp b / src / mongo / db / catalog / index_catalog_impl . cpp <nl> Status IndexCatalogImpl : : _indexFilteredRecords ( OperationContext * opCtx , <nl> & keys , <nl> & multikeyMetadataKeys , <nl> & multikeyPaths , <nl> - bsonRecord . id ) ; <nl> + bsonRecord . id , <nl> + IndexAccessMethod : : kNoopOnSuppressedErrorFn ) ; <nl> <nl> Status status = _indexKeys ( opCtx , <nl> index , <nl> void IndexCatalogImpl : : _unindexRecord ( OperationContext * opCtx , <nl> & keys , <nl> nullptr , <nl> nullptr , <nl> - loc ) ; <nl> + loc , <nl> + IndexAccessMethod : : kNoopOnSuppressedErrorFn ) ; <nl> <nl> / / Tests can enable this failpoint to produce index corruption scenarios where an index has <nl> / / extra keys . <nl> mmm a / src / mongo / db / catalog / multi_index_block . cpp <nl> ppp b / src / mongo / db / catalog / multi_index_block . cpp <nl> Status MultiIndexBlock : : drainBackgroundWrites ( <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + Status MultiIndexBlock : : retrySkippedRecords ( OperationContext * opCtx , Collection * collection ) { <nl> + for ( auto & & index : _indexes ) { <nl> + auto interceptor = index . block - > getEntry ( ) - > indexBuildInterceptor ( ) ; <nl> + if ( ! interceptor ) <nl> + continue ; <nl> + <nl> + auto status = interceptor - > retrySkippedRecords ( opCtx , collection ) ; <nl> + if ( ! status . isOK ( ) ) { <nl> + return status ; <nl> + } <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> Status MultiIndexBlock : : checkConstraints ( OperationContext * opCtx ) { <nl> _constraintsChecked = true ; <nl> <nl> mmm a / src / mongo / db / catalog / multi_index_block . h <nl> ppp b / src / mongo / db / catalog / multi_index_block . h <nl> class MultiIndexBlock { <nl> RecoveryUnit : : ReadSource readSource , <nl> IndexBuildInterceptor : : DrainYieldPolicy drainYieldPolicy ) ; <nl> <nl> + <nl> + / * * <nl> + * Retries key generation and insertion for all records skipped during the collection scanning <nl> + * phase . <nl> + * <nl> + * Index builds ignore key generation errors on secondaries . In steady - state replication , all <nl> + * writes from the primary are eventually applied , so an index build should always succeed when <nl> + * the primary commits . In two - phase index builds , a secondary may become primary in the middle <nl> + * of an index build , so it must ensure that before it finishes , it has indexed all documents in <nl> + * a collection , requiring a call to this function upon completion . <nl> + * / <nl> + Status retrySkippedRecords ( OperationContext * opCtx , Collection * collection ) ; <nl> + <nl> / * * <nl> * Check any constraits that may have been temporarily violated during the index build for <nl> * background indexes using an IndexBuildInterceptor to capture writes . The caller is <nl> class MultiIndexBlock { <nl> / / Duplicate key constraints should be checked at least once in the MultiIndexBlock . <nl> bool _constraintsChecked = false ; <nl> <nl> + / / A unique identifier associating this index build with a two - phase index build within a <nl> + / / replica set . <nl> boost : : optional < UUID > _buildUUID ; <nl> <nl> / / Protects member variables of this class declared below . <nl> mmm a / src / mongo / db / catalog / validate_adaptor . cpp <nl> ppp b / src / mongo / db / catalog / validate_adaptor . cpp <nl> Status ValidateAdaptor : : validateRecord ( OperationContext * opCtx , <nl> & documentKeySet , <nl> & multikeyMetadataKeys , <nl> & multikeyPaths , <nl> - recordId ) ; <nl> + recordId , <nl> + IndexAccessMethod : : kNoopOnSuppressedErrorFn ) ; <nl> <nl> if ( ! descriptor - > isMultikey ( ) & & <nl> iam - > shouldMarkIndexAsMultikey ( <nl> mmm a / src / mongo / db / exec / working_set_common . cpp <nl> ppp b / src / mongo / db / exec / working_set_common . cpp <nl> bool WorkingSetCommon : : fetch ( OperationContext * opCtx , <nl> & keys , <nl> multikeyMetadataKeys , <nl> multikeyPaths , <nl> - member - > recordId ) ; <nl> + member - > recordId , <nl> + IndexAccessMethod : : kNoopOnSuppressedErrorFn ) ; <nl> KeyString : : HeapBuilder keyString ( iam - > getSortedDataInterface ( ) - > getKeyStringVersion ( ) , <nl> memberKey . keyData , <nl> iam - > getSortedDataInterface ( ) - > getOrdering ( ) , <nl> mmm a / src / mongo / db / index / SConscript <nl> ppp b / src / mongo / db / index / SConscript <nl> env . Library ( <nl> ] , <nl> ) <nl> <nl> + env . Library ( <nl> + target = ' skipped_record_tracker ' , <nl> + source = [ <nl> + ' skipped_record_tracker . cpp ' , <nl> + ] , <nl> + LIBDEPS = [ <nl> + ' $ BUILD_DIR / mongo / db / service_context ' , <nl> + ] , <nl> + LIBDEPS_PRIVATE = [ <nl> + ' $ BUILD_DIR / mongo / base ' , <nl> + ' $ BUILD_DIR / mongo / db / curop ' , <nl> + ] , <nl> + ) <nl> + <nl> env . Library ( <nl> target = ' key_generator ' , <nl> source = [ <nl> serveronlyEnv . Library ( <nl> ' index_descriptor ' , <nl> ] , <nl> LIBDEPS_PRIVATE = [ <nl> + ' skipped_record_tracker ' , <nl> ' $ BUILD_DIR / mongo / db / logical_clock ' , <nl> ' $ BUILD_DIR / mongo / idl / server_parameter ' , <nl> ] , <nl> env . Library ( <nl> LIBDEPS = [ <nl> ' $ BUILD_DIR / mongo / base ' , <nl> ' duplicate_key_tracker ' , <nl> + ' skipped_record_tracker ' , <nl> ] , <nl> LIBDEPS_PRIVATE = [ <nl> ' $ BUILD_DIR / mongo / db / catalog / index_timestamp_helper ' , <nl> mmm a / src / mongo / db / index / index_access_method . cpp <nl> ppp b / src / mongo / db / index / index_access_method . cpp <nl> <nl> # include " mongo / db / client . h " <nl> # include " mongo / db / concurrency / write_conflict_exception . h " <nl> # include " mongo / db / curop . h " <nl> + # include " mongo / db / index / index_build_interceptor . h " <nl> # include " mongo / db / index / index_descriptor . h " <nl> # include " mongo / db / jsobj . h " <nl> # include " mongo / db / keypattern . h " <nl> struct BtreeExternalSortComparison { <nl> <nl> AbstractIndexAccessMethod : : AbstractIndexAccessMethod ( IndexCatalogEntry * btreeState , <nl> std : : unique_ptr < SortedDataInterface > btree ) <nl> - : _btreeState ( btreeState ) , <nl> + : _indexCatalogEntry ( btreeState ) , <nl> _descriptor ( btreeState - > descriptor ( ) ) , <nl> _newInterface ( std : : move ( btree ) ) { <nl> verify ( IndexDescriptor : : isIndexVersionSupported ( _descriptor - > version ( ) ) ) ; <nl> bool AbstractIndexAccessMethod : : isFatalError ( OperationContext * opCtx , <nl> <nl> / / A document might be indexed multiple times during a background index build if it moves ahead <nl> / / of the cursor ( e . g . via an update ) . We test this scenario and swallow the error accordingly . <nl> - if ( status = = ErrorCodes : : DuplicateKeyValue & & ! _btreeState - > isReady ( opCtx ) ) { <nl> + if ( status = = ErrorCodes : : DuplicateKeyValue & & ! _indexCatalogEntry - > isReady ( opCtx ) ) { <nl> LOG ( 3 ) < < " KeyString " < < key < < " already in index during background indexing ( ok ) " ; <nl> return false ; <nl> } <nl> Status AbstractIndexAccessMethod : : insert ( OperationContext * opCtx , <nl> const RecordId & loc , <nl> const InsertDeleteOptions & options , <nl> InsertResult * result ) { <nl> - invariant ( options . fromIndexBuilder | | ! _btreeState - > isHybridBuilding ( ) ) ; <nl> + invariant ( options . fromIndexBuilder | | ! _indexCatalogEntry - > isHybridBuilding ( ) ) ; <nl> <nl> KeyStringSet multikeyMetadataKeys ; <nl> KeyStringSet keys ; <nl> MultikeyPaths multikeyPaths ; <nl> <nl> - / / Delegate to the subclass . <nl> getKeys ( obj , <nl> options . getKeysMode , <nl> GetKeysContext : : kReadOrAddKeys , <nl> & keys , <nl> & multikeyMetadataKeys , <nl> & multikeyPaths , <nl> - loc ) ; <nl> + loc , <nl> + kNoopOnSuppressedErrorFn ) ; <nl> <nl> return insertKeys ( opCtx , <nl> { keys . begin ( ) , keys . end ( ) } , <nl> Status AbstractIndexAccessMethod : : insertKeys ( OperationContext * opCtx , <nl> } <nl> <nl> if ( shouldMarkIndexAsMultikey ( keys . size ( ) , multikeyMetadataKeys , multikeyPaths ) ) { <nl> - _btreeState - > setMultikey ( opCtx , multikeyPaths ) ; <nl> + _indexCatalogEntry - > setMultikey ( opCtx , multikeyPaths ) ; <nl> } <nl> return Status : : OK ( ) ; <nl> } <nl> RecordId AbstractIndexAccessMethod : : findSingle ( OperationContext * opCtx , <nl> const BSONObj & requestedKey ) const { <nl> / / Generate the key for this index . <nl> KeyString : : Value actualKey = [ & ] ( ) { <nl> - if ( _btreeState - > getCollator ( ) ) { <nl> + if ( _indexCatalogEntry - > getCollator ( ) ) { <nl> / / For performance , call get keys only if there is a non - simple collation . <nl> KeyStringSet keys ; <nl> KeyStringSet * multikeyMetadataKeys = nullptr ; <nl> RecordId AbstractIndexAccessMethod : : findSingle ( OperationContext * opCtx , <nl> GetKeysContext : : kReadOrAddKeys , <nl> & keys , <nl> multikeyMetadataKeys , <nl> - multikeyPaths ) ; <nl> + multikeyPaths , <nl> + boost : : none , / / loc <nl> + kNoopOnSuppressedErrorFn ) ; <nl> invariant ( keys . size ( ) = = 1 ) ; <nl> return * keys . begin ( ) ; <nl> } else { <nl> void AbstractIndexAccessMethod : : prepareUpdate ( OperationContext * opCtx , <nl> & ticket - > oldKeys , <nl> nullptr , <nl> nullptr , <nl> - record ) ; <nl> + record , <nl> + kNoopOnSuppressedErrorFn ) ; <nl> } <nl> <nl> if ( ! indexFilter | | indexFilter - > matchesBSON ( to ) ) { <nl> void AbstractIndexAccessMethod : : prepareUpdate ( OperationContext * opCtx , <nl> & ticket - > newKeys , <nl> & ticket - > newMultikeyMetadataKeys , <nl> & ticket - > newMultikeyPaths , <nl> - record ) ; <nl> + record , <nl> + kNoopOnSuppressedErrorFn ) ; <nl> } <nl> <nl> ticket - > loc = record ; <nl> Status AbstractIndexAccessMethod : : update ( OperationContext * opCtx , <nl> const UpdateTicket & ticket , <nl> int64_t * numInserted , <nl> int64_t * numDeleted ) { <nl> - invariant ( ! _btreeState - > isHybridBuilding ( ) ) ; <nl> + invariant ( ! _indexCatalogEntry - > isHybridBuilding ( ) ) ; <nl> invariant ( ticket . newKeys . size ( ) = = <nl> ticket . oldKeys . size ( ) + ticket . added . size ( ) - ticket . removed . size ( ) ) ; <nl> invariant ( numInserted ) ; <nl> Status AbstractIndexAccessMethod : : update ( OperationContext * opCtx , <nl> ticket . newKeys . size ( ) , <nl> { ticket . newMultikeyMetadataKeys . begin ( ) , ticket . newMultikeyMetadataKeys . end ( ) } , <nl> ticket . newMultikeyPaths ) ) { <nl> - _btreeState - > setMultikey ( opCtx , ticket . newMultikeyPaths ) ; <nl> + _indexCatalogEntry - > setMultikey ( opCtx , ticket . newMultikeyPaths ) ; <nl> } <nl> <nl> * numDeleted = ticket . removed . size ( ) ; <nl> Status AbstractIndexAccessMethod : : compact ( OperationContext * opCtx ) { <nl> <nl> class AbstractIndexAccessMethod : : BulkBuilderImpl : public IndexAccessMethod : : BulkBuilder { <nl> public : <nl> - BulkBuilderImpl ( const IndexAccessMethod * index , <nl> + BulkBuilderImpl ( IndexCatalogEntry * indexCatalogEntry , <nl> const IndexDescriptor * descriptor , <nl> size_t maxMemoryUsageBytes ) ; <nl> <nl> class AbstractIndexAccessMethod : : BulkBuilderImpl : public IndexAccessMethod : : Bul <nl> <nl> private : <nl> std : : unique_ptr < Sorter > _sorter ; <nl> - const IndexAccessMethod * _real ; <nl> + IndexCatalogEntry * _indexCatalogEntry ; <nl> int64_t _keysInserted = 0 ; <nl> <nl> / / Set to true if any document added to the BulkBuilder causes the index to become multikey . <nl> class AbstractIndexAccessMethod : : BulkBuilderImpl : public IndexAccessMethod : : Bul <nl> <nl> std : : unique_ptr < IndexAccessMethod : : BulkBuilder > AbstractIndexAccessMethod : : initiateBulk ( <nl> size_t maxMemoryUsageBytes ) { <nl> - return std : : make_unique < BulkBuilderImpl > ( this , _descriptor , maxMemoryUsageBytes ) ; <nl> + return std : : make_unique < BulkBuilderImpl > ( _indexCatalogEntry , _descriptor , maxMemoryUsageBytes ) ; <nl> } <nl> <nl> - AbstractIndexAccessMethod : : BulkBuilderImpl : : BulkBuilderImpl ( const IndexAccessMethod * index , <nl> + AbstractIndexAccessMethod : : BulkBuilderImpl : : BulkBuilderImpl ( IndexCatalogEntry * index , <nl> const IndexDescriptor * descriptor , <nl> size_t maxMemoryUsageBytes ) <nl> - : _sorter ( Sorter : : make ( SortOptions ( ) <nl> - . TempDir ( storageGlobalParams . dbpath + " / _tmp " ) <nl> - . ExtSortAllowed ( ) <nl> - . MaxMemoryUsageBytes ( maxMemoryUsageBytes ) , <nl> - BtreeExternalSortComparison ( ) , <nl> - std : : pair < KeyString : : Value : : SorterDeserializeSettings , <nl> - mongo : : NullValue : : SorterDeserializeSettings > ( <nl> - { index - > getSortedDataInterface ( ) - > getKeyStringVersion ( ) } , { } ) ) ) , <nl> - _real ( index ) { } <nl> + : _sorter ( Sorter : : make ( <nl> + SortOptions ( ) <nl> + . TempDir ( storageGlobalParams . dbpath + " / _tmp " ) <nl> + . ExtSortAllowed ( ) <nl> + . MaxMemoryUsageBytes ( maxMemoryUsageBytes ) , <nl> + BtreeExternalSortComparison ( ) , <nl> + std : : pair < KeyString : : Value : : SorterDeserializeSettings , <nl> + mongo : : NullValue : : SorterDeserializeSettings > ( <nl> + { index - > accessMethod ( ) - > getSortedDataInterface ( ) - > getKeyStringVersion ( ) } , { } ) ) ) , <nl> + _indexCatalogEntry ( index ) { } <nl> <nl> Status AbstractIndexAccessMethod : : BulkBuilderImpl : : insert ( OperationContext * opCtx , <nl> const BSONObj & obj , <nl> Status AbstractIndexAccessMethod : : BulkBuilderImpl : : insert ( OperationContext * opCt <nl> MultikeyPaths multikeyPaths ; <nl> <nl> try { <nl> - _real - > getKeys ( obj , <nl> - options . getKeysMode , <nl> - GetKeysContext : : kReadOrAddKeys , <nl> - & keys , <nl> - & _multikeyMetadataKeys , <nl> - & multikeyPaths , <nl> - loc ) ; <nl> + _indexCatalogEntry - > accessMethod ( ) - > getKeys ( <nl> + obj , <nl> + options . getKeysMode , <nl> + GetKeysContext : : kReadOrAddKeys , <nl> + & keys , <nl> + & _multikeyMetadataKeys , <nl> + & multikeyPaths , <nl> + loc , <nl> + [ & ] ( Status status , const BSONObj & , boost : : optional < RecordId > ) { <nl> + / / If a key generation error was suppressed , record the document as " skipped " so the <nl> + / / index builder can retry at a point when data is consistent . <nl> + auto interceptor = _indexCatalogEntry - > indexBuildInterceptor ( ) ; <nl> + if ( interceptor & & interceptor - > getSkippedRecordTracker ( ) ) { <nl> + LOG ( 1 ) < < " Recording suppressed key generation error to retry later : " < < status <nl> + < < " on " < < loc < < " : " < < redact ( obj ) ; <nl> + interceptor - > getSkippedRecordTracker ( ) - > record ( opCtx , loc ) ; <nl> + } <nl> + } ) ; <nl> } catch ( . . . ) { <nl> return exceptionToStatus ( ) ; <nl> } <nl> Status AbstractIndexAccessMethod : : BulkBuilderImpl : : insert ( OperationContext * opCt <nl> } <nl> <nl> _isMultiKey = _isMultiKey | | <nl> - _real - > shouldMarkIndexAsMultikey ( <nl> + _indexCatalogEntry - > accessMethod ( ) - > shouldMarkIndexAsMultikey ( <nl> keys . size ( ) , <nl> { _multikeyMetadataKeys . begin ( ) , _multikeyMetadataKeys . end ( ) } , <nl> multikeyPaths ) ; <nl> Status AbstractIndexAccessMethod : : commitBulk ( OperationContext * opCtx , <nl> } <nl> <nl> void AbstractIndexAccessMethod : : setIndexIsMultikey ( OperationContext * opCtx , MultikeyPaths paths ) { <nl> - _btreeState - > setMultikey ( opCtx , paths ) ; <nl> + _indexCatalogEntry - > setMultikey ( opCtx , paths ) ; <nl> } <nl> <nl> + IndexAccessMethod : : OnSuppressedErrorFn IndexAccessMethod : : kNoopOnSuppressedErrorFn = <nl> + [ ] ( Status status , const BSONObj & obj , boost : : optional < RecordId > loc ) { <nl> + LOG ( 1 ) < < " Suppressed key generation error : " < < redact ( status ) <nl> + < < " when getting index keys for " < < loc < < " : " < < redact ( obj ) ; <nl> + } ; <nl> + <nl> void AbstractIndexAccessMethod : : getKeys ( const BSONObj & obj , <nl> GetKeysMode mode , <nl> GetKeysContext context , <nl> KeyStringSet * keys , <nl> KeyStringSet * multikeyMetadataKeys , <nl> MultikeyPaths * multikeyPaths , <nl> - boost : : optional < RecordId > id ) const { <nl> + boost : : optional < RecordId > id , <nl> + OnSuppressedErrorFn onSuppressedError ) const { <nl> static stdx : : unordered_set < int > whiteList { ErrorCodes : : CannotBuildIndexKeys , <nl> / / Btree <nl> ErrorCodes : : CannotIndexParallelArrays , <nl> void AbstractIndexAccessMethod : : getKeys ( const BSONObj & obj , <nl> <nl> / / If the document applies to the filter ( which means that it should have never been <nl> / / indexed ) , do not supress the error . <nl> - const MatchExpression * filter = _btreeState - > getFilterExpression ( ) ; <nl> + const MatchExpression * filter = _indexCatalogEntry - > getFilterExpression ( ) ; <nl> if ( mode = = GetKeysMode : : kRelaxConstraintsUnfiltered & & filter & & <nl> filter - > matchesBSON ( obj ) ) { <nl> throw ; <nl> } <nl> <nl> - LOG ( 1 ) < < " Ignoring indexing error for idempotency reasons : " < < redact ( ex ) <nl> - < < " when getting index keys of " < < redact ( obj ) ; <nl> + onSuppressedError ( ex . toStatus ( ) , obj , id ) ; <nl> } <nl> } <nl> <nl> mmm a / src / mongo / db / index / index_access_method . h <nl> ppp b / src / mongo / db / index / index_access_method . h <nl> class IndexAccessMethod { <nl> * BSONObjSet with any multikey metadata keys generated while processing the document . These <nl> * keys are not associated with the document itself , but instead represent multi - key path <nl> * information that must be stored in a reserved keyspace within the index . <nl> + * <nl> + * If any key generation errors are encountered and suppressed due to the provided GetKeysMode , <nl> + * ' onSuppressedErrorFn ' is called . <nl> * / <nl> + using OnSuppressedErrorFn = <nl> + std : : function < void ( Status status , const BSONObj & obj , boost : : optional < RecordId > loc ) > ; <nl> virtual void getKeys ( const BSONObj & obj , <nl> GetKeysMode mode , <nl> GetKeysContext context , <nl> KeyStringSet * keys , <nl> KeyStringSet * multikeyMetadataKeys , <nl> MultikeyPaths * multikeyPaths , <nl> - boost : : optional < RecordId > id ) const = 0 ; <nl> + boost : : optional < RecordId > id , <nl> + OnSuppressedErrorFn onSuppressedError ) const = 0 ; <nl> + <nl> + static OnSuppressedErrorFn kNoopOnSuppressedErrorFn ; <nl> <nl> / * * <nl> * Given the set of keys , multikeyMetadataKeys and multikeyPaths generated by a particular <nl> class AbstractIndexAccessMethod : public IndexAccessMethod { <nl> KeyStringSet * keys , <nl> KeyStringSet * multikeyMetadataKeys , <nl> MultikeyPaths * multikeyPaths , <nl> - boost : : optional < RecordId > id = boost : : none ) const final ; <nl> + boost : : optional < RecordId > id , <nl> + OnSuppressedErrorFn onSuppressedError ) const final ; <nl> <nl> bool shouldMarkIndexAsMultikey ( size_t numberOfKeys , <nl> const std : : vector < KeyString : : Value > & multikeyMetadataKeys , <nl> class AbstractIndexAccessMethod : public IndexAccessMethod { <nl> MultikeyPaths * multikeyPaths , <nl> boost : : optional < RecordId > id ) const = 0 ; <nl> <nl> - IndexCatalogEntry * const _btreeState ; / / owned by IndexCatalogEntry <nl> + IndexCatalogEntry * const _indexCatalogEntry ; / / owned by IndexCatalog <nl> const IndexDescriptor * const _descriptor ; <nl> <nl> private : <nl> mmm a / src / mongo / db / index / index_build_interceptor . cpp <nl> ppp b / src / mongo / db / index / index_build_interceptor . cpp <nl> bool IndexBuildInterceptor : : typeCanFastpathMultikeyUpdates ( IndexType indexType ) <nl> return ( indexType = = INDEX_BTREE ) ; <nl> } <nl> <nl> - IndexBuildInterceptor : : IndexBuildInterceptor ( OperationContext * opCtx , IndexCatalogEntry * entry ) <nl> + IndexBuildInterceptor : : IndexBuildInterceptor ( OperationContext * opCtx , <nl> + IndexCatalogEntry * entry , <nl> + TrackSkippedRecords trackSkippedRecords ) <nl> : _indexCatalogEntry ( entry ) , <nl> _sideWritesTable ( <nl> opCtx - > getServiceContext ( ) - > getStorageEngine ( ) - > makeTemporaryRecordStore ( opCtx ) ) , <nl> _sideWritesCounter ( std : : make_shared < AtomicWord < long long > > ( ) ) { <nl> <nl> + if ( TrackSkippedRecords : : kTrack = = trackSkippedRecords ) { <nl> + _skippedRecordTracker = std : : make_unique < SkippedRecordTracker > ( _indexCatalogEntry ) ; <nl> + } <nl> + <nl> if ( entry - > descriptor ( ) - > unique ( ) ) { <nl> _duplicateKeyTracker = std : : make_unique < DuplicateKeyTracker > ( opCtx , entry ) ; <nl> } <nl> void IndexBuildInterceptor : : deleteTemporaryTables ( OperationContext * opCtx ) { <nl> if ( _duplicateKeyTracker ) { <nl> _duplicateKeyTracker - > deleteTemporaryTable ( opCtx ) ; <nl> } <nl> + if ( _skippedRecordTracker ) { <nl> + _skippedRecordTracker - > deleteTemporaryTable ( opCtx ) ; <nl> + } <nl> } <nl> <nl> Status IndexBuildInterceptor : : recordDuplicateKeys ( OperationContext * opCtx , <nl> Status IndexBuildInterceptor : : sideWrite ( OperationContext * opCtx , <nl> Op op , <nl> int64_t * const numKeysOut ) { <nl> invariant ( opCtx - > lockState ( ) - > inAWriteUnitOfWork ( ) ) ; <nl> + <nl> / / Maintain parity with IndexAccessMethods handling of key counting . Only include <nl> / / ` multikeyMetadataKeys ` when inserting . <nl> * numKeysOut = keys . size ( ) + ( op = = Op : : kInsert ? multikeyMetadataKeys . size ( ) : 0 ) ; <nl> Status IndexBuildInterceptor : : sideWrite ( OperationContext * opCtx , <nl> return _sideWritesTable - > rs ( ) - > insertRecords ( opCtx , & records , timestamps ) ; <nl> } <nl> <nl> + Status IndexBuildInterceptor : : retrySkippedRecords ( OperationContext * opCtx , <nl> + const Collection * collection ) { <nl> + if ( ! _skippedRecordTracker ) { <nl> + return Status : : OK ( ) ; <nl> + } <nl> + return _skippedRecordTracker - > retrySkippedRecords ( opCtx , collection ) ; <nl> + } <nl> + <nl> + <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / index / index_build_interceptor . h <nl> ppp b / src / mongo / db / index / index_build_interceptor . h <nl> <nl> # include " mongo / db / index / duplicate_key_tracker . h " <nl> # include " mongo / db / index / index_access_method . h " <nl> # include " mongo / db / index / multikey_paths . h " <nl> + # include " mongo / db / index / skipped_record_tracker . h " <nl> # include " mongo / db / namespace_string . h " <nl> # include " mongo / db / storage / temporary_record_store . h " <nl> # include " mongo / platform / atomic_word . h " <nl> class IndexBuildInterceptor { <nl> <nl> enum class Op { kInsert , kDelete } ; <nl> <nl> + / * * <nl> + * Indicates whether this interceptor will allow tracking of documents skipped due to key <nl> + * generation errors . When ' kTrack ' , a SkippedRecordTracker is created . <nl> + * / <nl> + enum class TrackSkippedRecords { kNoTrack , kTrack } ; <nl> + <nl> static bool typeCanFastpathMultikeyUpdates ( IndexType type ) ; <nl> <nl> / * * <nl> class IndexBuildInterceptor { <nl> * <nl> * deleteTemporaryTable ( ) must be called before destruction to delete the temporary tables . <nl> * / <nl> - IndexBuildInterceptor ( OperationContext * opCtx , IndexCatalogEntry * entry ) ; <nl> + IndexBuildInterceptor ( OperationContext * opCtx , <nl> + IndexCatalogEntry * entry , <nl> + TrackSkippedRecords trackSkippedRecords ) ; <nl> <nl> / * * <nl> * Deletes the temporary side writes and duplicate key constraint violations tables . Must be <nl> class IndexBuildInterceptor { <nl> RecoveryUnit : : ReadSource readSource , <nl> DrainYieldPolicy drainYieldPolicy ) ; <nl> <nl> + SkippedRecordTracker * getSkippedRecordTracker ( ) { <nl> + return _skippedRecordTracker . get ( ) ; <nl> + } <nl> + <nl> + const SkippedRecordTracker * getSkippedRecordTracker ( ) const { <nl> + return _skippedRecordTracker . get ( ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Tries to index previously skipped records . For each record , if the new indexing attempt is <nl> + * successful , keys are written directly to the index . Unsuccessful key generation or writes <nl> + * will return errors . <nl> + * / <nl> + Status retrySkippedRecords ( OperationContext * opCtx , const Collection * collection ) ; <nl> + <nl> / * * <nl> * Returns ' true ' if there are no visible records remaining to be applied from the side writes <nl> * table . Ensure that this returns ' true ' when an index build is completed . <nl> class IndexBuildInterceptor { <nl> * / <nl> bool areAllConstraintsChecked ( OperationContext * opCtx ) const ; <nl> <nl> + <nl> / * * <nl> * When an index builder wants to commit , use this to retrieve any recorded multikey paths <nl> * that were tracked during the build . <nl> class IndexBuildInterceptor { <nl> / / The entry for the index that is being built . <nl> IndexCatalogEntry * _indexCatalogEntry ; <nl> <nl> - / / This temporary record store is owned by the interceptor and dropped along with it . <nl> + / / This temporary record store records intercepted keys that will be written into the index by <nl> + / / calling drainWritesIntoIndex ( ) . It is owned by the interceptor and dropped along with it . <nl> std : : unique_ptr < TemporaryRecordStore > _sideWritesTable ; <nl> <nl> + / / Records RecordIds that have been skipped due to indexing errors . <nl> + std : : unique_ptr < SkippedRecordTracker > _skippedRecordTracker ; <nl> + <nl> std : : unique_ptr < DuplicateKeyTracker > _duplicateKeyTracker ; <nl> <nl> int64_t _numApplied { 0 } ; <nl> new file mode 100644 <nl> index 000000000000 . . c3ef41e8e327 <nl> mmm / dev / null <nl> ppp b / src / mongo / db / index / skipped_record_tracker . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2020 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + <nl> + # define MONGO_LOG_DEFAULT_COMPONENT : : mongo : : logger : : LogComponent : : kIndex <nl> + <nl> + # include " mongo / db / index / skipped_record_tracker . h " <nl> + <nl> + # include " mongo / db / catalog / collection . h " <nl> + # include " mongo / db / curop . h " <nl> + # include " mongo / db / index / index_access_method . h " <nl> + # include " mongo / util / log . h " <nl> + <nl> + namespace mongo { <nl> + namespace { <nl> + static constexpr StringData kRecordIdField = " recordId " _sd ; <nl> + } <nl> + <nl> + void SkippedRecordTracker : : deleteTemporaryTable ( OperationContext * opCtx ) { <nl> + if ( _skippedRecordsTable ) { <nl> + _skippedRecordsTable - > deleteTemporaryTable ( opCtx ) ; <nl> + } <nl> + } <nl> + <nl> + void SkippedRecordTracker : : record ( OperationContext * opCtx , const RecordId & recordId ) { <nl> + auto toInsert = BSON ( kRecordIdField < < recordId . repr ( ) ) ; <nl> + <nl> + / / Lazily initialize table when we record the first document . <nl> + if ( ! _skippedRecordsTable ) { <nl> + _skippedRecordsTable = <nl> + opCtx - > getServiceContext ( ) - > getStorageEngine ( ) - > makeTemporaryRecordStore ( opCtx ) ; <nl> + } <nl> + uassertStatusOK ( <nl> + _skippedRecordsTable - > rs ( ) <nl> + - > insertRecord ( opCtx , toInsert . objdata ( ) , toInsert . objsize ( ) , Timestamp : : min ( ) ) <nl> + . getStatus ( ) ) ; <nl> + } <nl> + <nl> + bool SkippedRecordTracker : : areAllRecordsApplied ( OperationContext * opCtx ) const { <nl> + if ( ! _skippedRecordsTable ) { <nl> + return true ; <nl> + } <nl> + auto cursor = _skippedRecordsTable - > rs ( ) - > getCursor ( opCtx ) ; <nl> + auto record = cursor - > next ( ) ; <nl> + <nl> + / / The table is empty only when all writes are applied . <nl> + if ( ! record ) <nl> + return true ; <nl> + <nl> + return false ; <nl> + } <nl> + <nl> + Status SkippedRecordTracker : : retrySkippedRecords ( OperationContext * opCtx , <nl> + const Collection * collection ) { <nl> + dassert ( opCtx - > lockState ( ) - > isCollectionLockedForMode ( collection - > ns ( ) , MODE_X ) ) ; <nl> + if ( ! _skippedRecordsTable ) { <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + InsertDeleteOptions options ; <nl> + collection - > getIndexCatalog ( ) - > prepareInsertDeleteOptions ( <nl> + opCtx , _indexCatalogEntry - > descriptor ( ) , & options ) ; <nl> + options . fromIndexBuilder = true ; <nl> + <nl> + / / This should only be called when constraints are being enforced , on a primary . It does not <nl> + / / make sense , nor is it necessary for this to be called on a secondary . <nl> + invariant ( options . getKeysMode = = IndexAccessMethod : : GetKeysMode : : kEnforceConstraints ) ; <nl> + <nl> + static const char * curopMessage = " Index Build : retrying skipped records " ; <nl> + ProgressMeterHolder progress ; <nl> + { <nl> + stdx : : unique_lock < Client > lk ( * opCtx - > getClient ( ) ) ; <nl> + progress . set ( <nl> + CurOp : : get ( opCtx ) - > setProgress_inlock ( curopMessage , _skippedRecordCounter . load ( ) , 1 ) ) ; <nl> + } <nl> + <nl> + auto recordStore = _skippedRecordsTable - > rs ( ) ; <nl> + auto cursor = recordStore - > getCursor ( opCtx ) ; <nl> + int resolved = 0 ; <nl> + while ( auto record = cursor - > next ( ) ) { <nl> + const BSONObj doc = record - > data . toBson ( ) ; <nl> + <nl> + / / This is the RecordId of the skipped record from the collection . <nl> + const RecordId skippedRecordId ( doc [ kRecordIdField ] . Long ( ) ) ; <nl> + <nl> + WriteUnitOfWork wuow ( opCtx ) ; <nl> + <nl> + / / If the record still exists , get a potentially new version of the document to index . <nl> + auto collCursor = collection - > getCursor ( opCtx ) ; <nl> + auto skippedRecord = collCursor - > seekExact ( skippedRecordId ) ; <nl> + if ( skippedRecord ) { <nl> + const auto skippedDoc = skippedRecord - > data . toBson ( ) ; <nl> + LOG ( 2 ) < < " reapplying skipped RecordID " < < skippedRecordId < < " : " < < skippedDoc ; <nl> + <nl> + try { <nl> + / / Because constraint enforcement is set , this will throw if there are any indexing <nl> + / / errors , instead of writing back to the skipped records table , which would <nl> + / / normally happen if constraints were relaxed . <nl> + InsertResult result ; <nl> + auto status = _indexCatalogEntry - > accessMethod ( ) - > insert ( <nl> + opCtx , skippedDoc , skippedRecordId , options , & result ) ; <nl> + if ( ! status . isOK ( ) ) { <nl> + return status ; <nl> + } <nl> + } catch ( const DBException & ex ) { <nl> + return ex . toStatus ( ) ; <nl> + } <nl> + } <nl> + <nl> + / / Delete the record so that it is not applied more than once . <nl> + recordStore - > deleteRecord ( opCtx , record - > id ) ; <nl> + <nl> + cursor - > save ( ) ; <nl> + wuow . commit ( ) ; <nl> + cursor - > restore ( ) ; <nl> + <nl> + progress - > hit ( ) ; <nl> + resolved + + ; <nl> + } <nl> + progress - > finished ( ) ; <nl> + <nl> + int logLevel = ( resolved > 0 ) ? 0 : 1 ; <nl> + LOG ( logLevel ) < < " index build : reapplied " < < resolved < < " skipped records for index : " <nl> + < < _indexCatalogEntry - > descriptor ( ) - > indexName ( ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + } / / namespace mongo <nl> new file mode 100644 <nl> index 000000000000 . . 2858e3305e51 <nl> mmm / dev / null <nl> ppp b / src / mongo / db / index / skipped_record_tracker . h <nl> <nl> + / * * <nl> + * Copyright ( C ) 2020 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include " mongo / db / catalog / index_catalog_entry . h " <nl> + # include " mongo / db / operation_context . h " <nl> + # include " mongo / db / storage / temporary_record_store . h " <nl> + # include " mongo / platform / atomic_word . h " <nl> + <nl> + namespace mongo { <nl> + <nl> + class IndexCatalogEntry ; <nl> + <nl> + / * * <nl> + * Records keys that have violated index key constraints . The keys are backed by a temporary table <nl> + * that is created and destroyed by this tracker . <nl> + * / <nl> + class SkippedRecordTracker { <nl> + SkippedRecordTracker ( const SkippedRecordTracker & ) = delete ; <nl> + <nl> + public : <nl> + SkippedRecordTracker ( IndexCatalogEntry * indexCatalogEntry ) <nl> + : _indexCatalogEntry ( indexCatalogEntry ) { } <nl> + <nl> + / * * <nl> + * Records a RecordId that was unable to be indexed due to a key generation error . At the <nl> + * conclusion of the build , the key generation and insertion into the index should be attempted <nl> + * again by calling ' retrySkippedRecords ' . <nl> + * / <nl> + void record ( OperationContext * opCtx , const RecordId & recordId ) ; <nl> + <nl> + / * * <nl> + * Deletes the temporary table managed by this tracker . This call is required , and is a no - op <nl> + * when the table is empty or has not yet been initialized . <nl> + * / <nl> + void deleteTemporaryTable ( OperationContext * opCtx ) ; <nl> + <nl> + / * * <nl> + * Returns true if the temporary table is empty . <nl> + * / <nl> + bool areAllRecordsApplied ( OperationContext * opCtx ) const ; <nl> + <nl> + / * * <nl> + * Attempts to generates keys for each skipped record and insert into the index . Returns OK if <nl> + * all records were either indexed or no longer exist . <nl> + * / <nl> + Status retrySkippedRecords ( OperationContext * opCtx , const Collection * collection ) ; <nl> + <nl> + private : <nl> + IndexCatalogEntry * _indexCatalogEntry ; <nl> + <nl> + / / This temporary record store is owned by the duplicate key tracker and should be dropped along <nl> + / / with it with a call to deleteTemporaryTable ( ) . <nl> + std : : unique_ptr < TemporaryRecordStore > _skippedRecordsTable ; <nl> + <nl> + AtomicWord < std : : uint32_t > _skippedRecordCounter { 0 } ; <nl> + } ; <nl> + <nl> + } / / namespace mongo <nl> mmm a / src / mongo / db / index / wildcard_access_method . cpp <nl> ppp b / src / mongo / db / index / wildcard_access_method . cpp <nl> WildcardAccessMethod : : WildcardAccessMethod ( IndexCatalogEntry * wildcardState , <nl> : AbstractIndexAccessMethod ( wildcardState , std : : move ( btree ) ) , <nl> _keyGen ( _descriptor - > keyPattern ( ) , <nl> _descriptor - > pathProjection ( ) , <nl> - _btreeState - > getCollator ( ) , <nl> + _indexCatalogEntry - > getCollator ( ) , <nl> getSortedDataInterface ( ) - > getKeyStringVersion ( ) , <nl> getSortedDataInterface ( ) - > getOrdering ( ) ) { } <nl> <nl> mmm a / src / mongo / db / index_builds_coordinator . cpp <nl> ppp b / src / mongo / db / index_builds_coordinator . cpp <nl> void IndexBuildsCoordinator : : _insertKeysFromSideTablesAndCommit ( <nl> RecoveryUnit : : ReadSource : : kUnset , <nl> IndexBuildInterceptor : : DrainYieldPolicy : : kNoYield ) ) ; <nl> <nl> + / / Retry indexing records that may have been skipped while relaxing constraints ( i . e . as <nl> + / / secondary ) , but only if we are primary and committing the index build and during two - phase <nl> + / / builds . Single - phase index builds are not resilient to state transitions . <nl> + auto replCoord = repl : : ReplicationCoordinator : : get ( opCtx ) ; <nl> + if ( IndexBuildProtocol : : kTwoPhase = = replState - > protocol & & <nl> + replCoord - > canAcceptWritesFor ( opCtx , collection - > ns ( ) ) ) { <nl> + uassertStatusOK ( <nl> + _indexBuildsManager . retrySkippedRecords ( opCtx , replState - > buildUUID , collection ) ) ; <nl> + } <nl> + <nl> / / Index constraint checking phase . <nl> uassertStatusOK ( <nl> _indexBuildsManager . checkIndexConstraintViolations ( opCtx , replState - > buildUUID ) ) ; <nl> mmm a / src / mongo / dbtests / storage_timestamp_tests . cpp <nl> ppp b / src / mongo / dbtests / storage_timestamp_tests . cpp <nl> Status SecondaryReadsDuringBatchApplicationAreAllowedApplier : : applyOplogBatchPer <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + class IndexBuildsResolveErrorsDuringStateChangeToPrimary : public StorageTimestampTest { <nl> + public : <nl> + void run ( ) { <nl> + <nl> + NamespaceString nss ( " unittests . timestampIndexBuilds " ) ; <nl> + reset ( nss ) ; <nl> + <nl> + AutoGetCollection autoColl ( _opCtx , nss , LockMode : : MODE_X ) ; <nl> + auto collection = autoColl . getCollection ( ) ; <nl> + <nl> + / / Indexing of parallel arrays is not allowed , so these are deemed " bad " . <nl> + const auto badDoc1 = <nl> + BSON ( " _id " < < 0 < < " a " < < BSON_ARRAY ( 0 < < 1 ) < < " b " < < BSON_ARRAY ( 0 < < 1 ) ) ; <nl> + const auto badDoc2 = <nl> + BSON ( " _id " < < 1 < < " a " < < BSON_ARRAY ( 2 < < 3 ) < < " b " < < BSON_ARRAY ( 2 < < 3 ) ) ; <nl> + const auto badDoc3 = <nl> + BSON ( " _id " < < 2 < < " a " < < BSON_ARRAY ( 4 < < 5 ) < < " b " < < BSON_ARRAY ( 4 < < 5 ) ) ; <nl> + <nl> + / / NOTE : This test does not test any timestamp reads . <nl> + const LogicalTime insert1 = _clock - > reserveTicks ( 1 ) ; <nl> + { <nl> + log ( ) < < " inserting " < < badDoc1 ; <nl> + WriteUnitOfWork wuow ( _opCtx ) ; <nl> + insertDocument ( autoColl . getCollection ( ) , <nl> + InsertStatement ( badDoc1 , insert1 . asTimestamp ( ) , presentTerm ) ) ; <nl> + wuow . commit ( ) ; <nl> + } <nl> + <nl> + const LogicalTime insert2 = _clock - > reserveTicks ( 1 ) ; <nl> + { <nl> + log ( ) < < " inserting " < < badDoc2 ; <nl> + WriteUnitOfWork wuow ( _opCtx ) ; <nl> + insertDocument ( autoColl . getCollection ( ) , <nl> + InsertStatement ( badDoc2 , insert2 . asTimestamp ( ) , presentTerm ) ) ; <nl> + wuow . commit ( ) ; <nl> + } <nl> + <nl> + const IndexCatalogEntry * buildingIndex = nullptr ; <nl> + MultiIndexBlock indexer ; <nl> + ON_BLOCK_EXIT ( [ & ] { <nl> + indexer . cleanUpAfterBuild ( _opCtx , collection , MultiIndexBlock : : kNoopOnCleanUpFn ) ; <nl> + } ) ; <nl> + <nl> + / / Provide a build UUID , indicating that this is a two - phase index build . <nl> + const auto buildUUID = UUID : : gen ( ) ; <nl> + indexer . setTwoPhaseBuildUUID ( buildUUID ) ; <nl> + <nl> + const LogicalTime indexInit = _clock - > reserveTicks ( 3 ) ; <nl> + <nl> + / / First , simulate being a secondary . Indexing errors are ignored . <nl> + { <nl> + ASSERT_OK ( _coordinatorMock - > setFollowerMode ( { repl : : MemberState : : MS : : RS_SECONDARY } ) ) ; <nl> + _coordinatorMock - > alwaysAllowWrites ( false ) ; <nl> + repl : : UnreplicatedWritesBlock unreplicatedWrites ( _opCtx ) ; <nl> + <nl> + { <nl> + TimestampBlock tsBlock ( _opCtx , indexInit . asTimestamp ( ) ) ; <nl> + <nl> + auto swSpecs = <nl> + indexer . init ( _opCtx , <nl> + collection , <nl> + { BSON ( " v " < < 2 < < " name " <nl> + < < " a_1_b_1 " <nl> + < < " ns " < < collection - > ns ( ) . ns ( ) < < " key " <nl> + < < BSON ( " a " < < 1 < < " b " < < 1 ) ) } , <nl> + MultiIndexBlock : : makeTimestampedIndexOnInitFn ( _opCtx , collection ) ) ; <nl> + ASSERT_OK ( swSpecs . getStatus ( ) ) ; <nl> + } <nl> + <nl> + auto indexCatalog = collection - > getIndexCatalog ( ) ; <nl> + buildingIndex = indexCatalog - > getEntry ( <nl> + indexCatalog - > findIndexByName ( _opCtx , " a_1_b_1 " , / * includeUnfinished * / true ) ) ; <nl> + ASSERT ( buildingIndex ) ; <nl> + <nl> + ASSERT_OK ( indexer . insertAllDocumentsInCollection ( _opCtx , collection ) ) ; <nl> + <nl> + ASSERT_TRUE ( buildingIndex - > indexBuildInterceptor ( ) - > areAllWritesApplied ( _opCtx ) ) ; <nl> + <nl> + / / There should be one skipped record from the collection scan . <nl> + ASSERT_FALSE ( buildingIndex - > indexBuildInterceptor ( ) <nl> + - > getSkippedRecordTracker ( ) <nl> + - > areAllRecordsApplied ( _opCtx ) ) ; <nl> + } <nl> + <nl> + / / As a primary , stop ignoring indexing errors . <nl> + ASSERT_OK ( _coordinatorMock - > setFollowerMode ( { repl : : MemberState : : MS : : RS_PRIMARY } ) ) ; <nl> + <nl> + { <nl> + / / This write will not succeed because the node is a primary and the document is not <nl> + / / indexable . <nl> + log ( ) < < " attempting to insert " < < badDoc3 ; <nl> + WriteUnitOfWork wuow ( _opCtx ) ; <nl> + ASSERT_THROWS_CODE ( <nl> + collection - > insertDocument ( <nl> + _opCtx , <nl> + InsertStatement ( badDoc3 , indexInit . addTicks ( 1 ) . asTimestamp ( ) , presentTerm ) , <nl> + / * opDebug * / nullptr , <nl> + / * noWarn * / false ) , <nl> + DBException , <nl> + ErrorCodes : : CannotIndexParallelArrays ) ; <nl> + wuow . commit ( ) ; <nl> + } <nl> + <nl> + / / There should skipped records from failed collection scans and writes . <nl> + ASSERT_FALSE ( <nl> + buildingIndex - > indexBuildInterceptor ( ) - > getSkippedRecordTracker ( ) - > areAllRecordsApplied ( <nl> + _opCtx ) ) ; <nl> + / / This fails because the bad record is still invalid . <nl> + auto status = indexer . retrySkippedRecords ( _opCtx , collection ) ; <nl> + ASSERT_EQ ( status . code ( ) , ErrorCodes : : CannotIndexParallelArrays ) ; <nl> + <nl> + ASSERT_FALSE ( <nl> + buildingIndex - > indexBuildInterceptor ( ) - > getSkippedRecordTracker ( ) - > areAllRecordsApplied ( <nl> + _opCtx ) ) ; <nl> + ASSERT_TRUE ( buildingIndex - > indexBuildInterceptor ( ) - > areAllWritesApplied ( _opCtx ) ) ; <nl> + <nl> + / / Update one documents to be valid , and delete the other . These modifications are written <nl> + / / to the side writes table and must be drained . <nl> + Helpers : : upsert ( _opCtx , collection - > ns ( ) . ns ( ) , BSON ( " _id " < < 0 < < " a " < < 1 < < " b " < < 1 ) ) ; <nl> + { <nl> + RecordId badRecord = <nl> + Helpers : : findOne ( _opCtx , collection , BSON ( " _id " < < 1 ) , false / * requireIndex * / ) ; <nl> + WriteUnitOfWork wuow ( _opCtx ) ; <nl> + collection - > deleteDocument ( _opCtx , kUninitializedStmtId , badRecord , nullptr ) ; <nl> + wuow . commit ( ) ; <nl> + } <nl> + <nl> + ASSERT_FALSE ( buildingIndex - > indexBuildInterceptor ( ) - > areAllWritesApplied ( _opCtx ) ) ; <nl> + ASSERT_OK ( indexer . drainBackgroundWrites ( _opCtx , <nl> + RecoveryUnit : : ReadSource : : kUnset , <nl> + IndexBuildInterceptor : : DrainYieldPolicy : : kNoYield ) ) ; <nl> + <nl> + <nl> + / / This succeeds because the bad documents are now either valid or removed . <nl> + ASSERT_OK ( indexer . retrySkippedRecords ( _opCtx , collection ) ) ; <nl> + ASSERT_TRUE ( <nl> + buildingIndex - > indexBuildInterceptor ( ) - > getSkippedRecordTracker ( ) - > areAllRecordsApplied ( <nl> + _opCtx ) ) ; <nl> + ASSERT_TRUE ( buildingIndex - > indexBuildInterceptor ( ) - > areAllWritesApplied ( _opCtx ) ) ; <nl> + ASSERT_OK ( indexer . checkConstraints ( _opCtx ) ) ; <nl> + <nl> + { <nl> + WriteUnitOfWork wuow ( _opCtx ) ; <nl> + ASSERT_OK ( indexer . commit ( <nl> + _opCtx , <nl> + collection , <nl> + [ & ] ( const BSONObj & indexSpec ) { <nl> + _opCtx - > getServiceContext ( ) - > getOpObserver ( ) - > onCreateIndex ( <nl> + _opCtx , collection - > ns ( ) , collection - > uuid ( ) , indexSpec , false ) ; <nl> + } , <nl> + MultiIndexBlock : : kNoopOnCommitFn ) ) ; <nl> + wuow . commit ( ) ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> class SecondaryReadsDuringBatchApplicationAreAllowed : public StorageTimestampTest { <nl> public : <nl> void run ( ) { <nl> class AllStorageTimestampTests : public unittest : : OldStyleSuiteSpecification { <nl> addIf < AbortPreparedMultiOplogEntryTransaction > ( ) ; <nl> addIf < PreparedMultiDocumentTransaction > ( ) ; <nl> addIf < AbortedPreparedMultiDocumentTransaction > ( ) ; <nl> + addIf < IndexBuildsResolveErrorsDuringStateChangeToPrimary > ( ) ; <nl> } <nl> } ; <nl> <nl> mmm a / src / mongo / dbtests / validate_tests . cpp <nl> ppp b / src / mongo / dbtests / validate_tests . cpp <nl> class ValidateIndexEntry : public ValidateBase { <nl> & keys , <nl> nullptr , <nl> nullptr , <nl> - id1 ) ; <nl> + id1 , <nl> + IndexAccessMethod : : kNoopOnSuppressedErrorFn ) ; <nl> auto removeStatus = <nl> iam - > removeKeys ( & _opCtx , { keys . begin ( ) , keys . end ( ) } , id1 , options , & numDeleted ) ; <nl> auto insertStatus = iam - > insert ( & _opCtx , badKey , id1 , options , & insertResult ) ; <nl> class ValidateMissingIndexEntryResults : public ValidateBase { <nl> & keys , <nl> nullptr , <nl> nullptr , <nl> - rid ) ; <nl> + rid , <nl> + IndexAccessMethod : : kNoopOnSuppressedErrorFn ) ; <nl> auto removeStatus = <nl> iam - > removeKeys ( & _opCtx , { keys . begin ( ) , keys . end ( ) } , rid , options , & numDeleted ) ; <nl> <nl> class ValidateDuplicateDocumentIndexKeySet : public ValidateBase { <nl> & keys , <nl> nullptr , <nl> nullptr , <nl> - rid ) ; <nl> + rid , <nl> + IndexAccessMethod : : kNoopOnSuppressedErrorFn ) ; <nl> auto removeStatus = <nl> iam - > removeKeys ( & _opCtx , { keys . begin ( ) , keys . end ( ) } , rid , options , & numDeleted ) ; <nl> <nl> class ValidateDuplicateDocumentIndexKeySet : public ValidateBase { <nl> & keys , <nl> nullptr , <nl> nullptr , <nl> - rid ) ; <nl> + rid , <nl> + IndexAccessMethod : : kNoopOnSuppressedErrorFn ) ; <nl> auto removeStatus = <nl> iam - > removeKeys ( & _opCtx , { keys . begin ( ) , keys . end ( ) } , rid , options , & numDeleted ) ; <nl> <nl> | SERVER - 45351 Record all indexing errors during simultaneous index builds for later constraint checking | mongodb/mongo | 3740db9c9166fed48b92ea5cf59c9ae061a94cf1 | 2020-01-30T14:52:48Z |
mmm a / src / builtins / ppc / builtins - ppc . cc <nl> ppp b / src / builtins / ppc / builtins - ppc . cc <nl> void Builtins : : Generate_WasmCompileLazy ( MacroAssembler * masm ) { <nl> __ LoadSmiLiteral ( cp , Smi : : kZero ) ; <nl> __ CallRuntime ( Runtime : : kWasmCompileLazy ) ; <nl> / / The entrypoint address is the first return value . <nl> - __ mov ( r11 , kReturnRegister0 ) ; <nl> + __ mr ( r11 , kReturnRegister0 ) ; <nl> / / The WASM instance is the second return value . <nl> - __ mov ( wasm_instance_reg , kReturnRegister1 ) ; <nl> + __ mr ( wasm_instance_reg , kReturnRegister1 ) ; <nl> <nl> / / Restore registers . <nl> __ MultiPopDoubles ( fp_regs ) ; <nl> mmm a / src / builtins / s390 / builtins - s390 . cc <nl> ppp b / src / builtins / s390 / builtins - s390 . cc <nl> void Builtins : : Generate_WasmCompileLazy ( MacroAssembler * masm ) { <nl> __ LoadSmiLiteral ( cp , Smi : : kZero ) ; <nl> __ CallRuntime ( Runtime : : kWasmCompileLazy ) ; <nl> / / The entrypoint address is the first return value . <nl> - __ mov ( ip , r2 ) ; <nl> + __ LoadRR ( ip , r2 ) ; <nl> / / The WASM instance is the second return value . <nl> - __ movq ( wasm_instance_reg , kReturnRegister1 ) ; <nl> + __ LoadRR ( wasm_instance_reg , kReturnRegister1 ) ; <nl> <nl> / / Restore registers . <nl> __ MultiPopDoubles ( fp_regs ) ; <nl> | PPC / s390 : [ wasm ] Merge the WasmContext into WasmInstanceObject | v8/v8 | 66e03c883225a8dd0942b51265bdad5fc1261419 | 2018-04-09T19:32:17Z |
mmm a / lib / LLVMPasses / LLVMMergeFunctions . cpp <nl> ppp b / lib / LLVMPasses / LLVMMergeFunctions . cpp <nl> class SwiftMergeFunctions : public ModulePass { <nl> bool runOnModule ( Module & M ) override ; <nl> <nl> private : <nl> - enum { <nl> - / / / The maximum number of parameters added to a merged functions . This <nl> - / / / roughly corresponds to the number of differing constants . <nl> - maxAddedParams = 4 <nl> - } ; <nl> - <nl> struct FunctionEntry ; <nl> <nl> / / / Describes the set of functions which are considered as " equivalent " ( i . e . <nl> class SwiftMergeFunctions : public ModulePass { <nl> } <nl> } ; <nl> <nl> - using ParamInfos = SmallVector < ParamInfo , maxAddedParams > ; <nl> + using ParamInfos = SmallVector < ParamInfo , 16 > ; <nl> <nl> GlobalNumberState GlobalNumbers ; <nl> <nl> class SwiftMergeFunctions : public ModulePass { <nl> <nl> FunctionInfo removeFuncWithMostParams ( FunctionInfos & FInfos ) ; <nl> <nl> - bool deriveParams ( ParamInfos & Params , FunctionInfos & FInfos ) ; <nl> + bool deriveParams ( ParamInfos & Params , FunctionInfos & FInfos , <nl> + unsigned maxParams ) ; <nl> <nl> bool numOperandsDiffer ( FunctionInfos & FInfos ) ; <nl> <nl> bool constsDiffer ( const FunctionInfos & FInfos , unsigned OpIdx ) ; <nl> <nl> bool tryMapToParameter ( FunctionInfos & FInfos , unsigned OpIdx , <nl> - ParamInfos & Params ) ; <nl> + ParamInfos & Params , unsigned maxParams ) ; <nl> <nl> void mergeWithParams ( const FunctionInfos & FInfos , ParamInfos & Params ) ; <nl> <nl> static bool mayMergeCallsToFunction ( Function & F ) { <nl> return true ; <nl> } <nl> <nl> - / / / Returns true if function \ p F is eligible for merging . <nl> - static bool isEligibleFunction ( Function * F ) { <nl> - if ( F - > isDeclaration ( ) ) <nl> - return false ; <nl> - <nl> - if ( F - > hasAvailableExternallyLinkage ( ) ) <nl> - return false ; <nl> - <nl> - if ( F - > getFunctionType ( ) - > isVarArg ( ) ) <nl> - return false ; <nl> - <nl> + / / / Returns the benefit , which is approximately the size of the function . <nl> + / / / Return 0 , if the function should not be merged . <nl> + static unsigned getBenefit ( Function * F ) { <nl> unsigned Benefit = 0 ; <nl> <nl> / / We don ' t want to merge very small functions , because the overhead of <nl> static bool isEligibleFunction ( Function * F ) { <nl> if ( CallBase * CB = dyn_cast < CallBase > ( & I ) ) { <nl> Function * Callee = CB - > getCalledFunction ( ) ; <nl> if ( Callee & & ! mayMergeCallsToFunction ( * Callee ) ) <nl> - return false ; <nl> + return 0 ; <nl> if ( ! Callee | | ! Callee - > isIntrinsic ( ) ) { <nl> Benefit + = 5 ; <nl> continue ; <nl> static bool isEligibleFunction ( Function * F ) { <nl> Benefit + = 1 ; <nl> } <nl> } <nl> + return Benefit ; <nl> + } <nl> + <nl> + / / / Returns true if function \ p F is eligible for merging . <nl> + static bool isEligibleFunction ( Function * F ) { <nl> + if ( F - > isDeclaration ( ) ) <nl> + return false ; <nl> + <nl> + if ( F - > hasAvailableExternallyLinkage ( ) ) <nl> + return false ; <nl> + <nl> + if ( F - > getFunctionType ( ) - > isVarArg ( ) ) <nl> + return false ; <nl> + <nl> + unsigned Benefit = getBenefit ( F ) ; <nl> if ( Benefit < FunctionMergeThreshold ) <nl> return false ; <nl> <nl> bool SwiftMergeFunctions : : tryMergeEquivalenceClass ( FunctionEntry * FirstInClass ) <nl> bool Changed = false ; <nl> int Try = 0 ; <nl> <nl> + unsigned Benefit = getBenefit ( FirstInClass - > F ) ; <nl> + <nl> + / / The bigger the function , the more parameters are allowed . <nl> + unsigned maxParams = std : : max ( 4u , Benefit / 100 ) ; <nl> + <nl> / / We need multiple tries if there are some functions in FInfos which differ <nl> / / too much from the first function in FInfos . But we limit the number of <nl> / / tries to a small number , because this is quadratic . <nl> while ( FInfos . size ( ) > = 2 & & Try + + < 4 ) { <nl> ParamInfos Params ; <nl> - bool Merged = deriveParams ( Params , FInfos ) ; <nl> + bool Merged = deriveParams ( Params , FInfos , maxParams ) ; <nl> if ( Merged ) { <nl> mergeWithParams ( FInfos , Params ) ; <nl> Changed = true ; <nl> removeFuncWithMostParams ( FunctionInfos & FInfos ) { <nl> / / / Returns true on success , i . e . the functions in \ p FInfos can be merged with <nl> / / / the parameters returned in \ p Params . <nl> bool SwiftMergeFunctions : : deriveParams ( ParamInfos & Params , <nl> - FunctionInfos & FInfos ) { <nl> + FunctionInfos & FInfos , <nl> + unsigned maxParams ) { <nl> for ( FunctionInfo & FI : FInfos ) <nl> FI . init ( ) ; <nl> <nl> bool SwiftMergeFunctions : : deriveParams ( ParamInfos & Params , <nl> if ( constsDiffer ( FInfos , OpIdx ) ) { <nl> / / This instruction has operands which differ in at least some <nl> / / functions . So we need to parameterize it . <nl> - if ( ! tryMapToParameter ( FInfos , OpIdx , Params ) ) { <nl> + if ( ! tryMapToParameter ( FInfos , OpIdx , Params , maxParams ) ) { <nl> / / We ran out of parameters . <nl> return false ; <nl> } <nl> bool SwiftMergeFunctions : : constsDiffer ( const FunctionInfos & FInfos , <nl> / / / Returns true if a parameter could be created or found without exceeding the <nl> / / / maximum number of parameters . <nl> bool SwiftMergeFunctions : : tryMapToParameter ( FunctionInfos & FInfos , <nl> - unsigned OpIdx , ParamInfos & Params ) { <nl> + unsigned OpIdx , ParamInfos & Params , <nl> + unsigned maxParams ) { <nl> ParamInfo * Matching = nullptr ; <nl> / / Try to find an existing parameter which exactly matches the differing <nl> / / operands of the current instruction . <nl> bool SwiftMergeFunctions : : tryMapToParameter ( FunctionInfos & FInfos , <nl> if ( ! Matching ) { <nl> / / We need a new parameter . <nl> / / Check if we are within the limit . <nl> - if ( Params . size ( ) > = maxAddedParams ) <nl> + if ( Params . size ( ) > = maxParams ) <nl> return false ; <nl> <nl> Params . resize ( Params . size ( ) + 1 ) ; <nl> | LLVMMergeFunctions : allow more parameters if the function is bigger | apple/swift | 145b8ae35ddbf52b7b8f144daab84f12b3c7d3ba | 2020-08-06T17:02:11Z |
mmm a / atom / browser / ui / message_box_views . cc <nl> ppp b / atom / browser / ui / message_box_views . cc <nl> <nl> <nl> # include " atom / browser / ui / message_box . h " <nl> <nl> + # if defined ( USE_X11 ) <nl> + # include < gtk / gtk . h > <nl> + # endif <nl> + <nl> # include " atom / browser / native_window . h " <nl> # include " base / callback . h " <nl> # include " base / message_loop / message_loop . h " <nl> <nl> # include " ui / wm / core / shadow_types . h " <nl> <nl> # if defined ( USE_X11 ) <nl> + # include " atom / browser / browser . h " <nl> # include " ui / views / window / native_frame_view . h " <nl> # endif <nl> <nl> <nl> # include " ui / base / win / message_box_win . h " <nl> # endif <nl> <nl> + # define ANSI_FOREGROUND_RED " \ x1b [ 31m " <nl> + # define ANSI_FOREGROUND_BLACK " \ x1b [ 30m " <nl> + # define ANSI_TEXT_BOLD " \ x1b [ 1m " <nl> + # define ANSI_BACKGROUND_GRAY " \ x1b [ 47m " <nl> + # define ANSI_RESET " \ x1b [ 0m " <nl> + <nl> namespace atom { <nl> <nl> namespace { <nl> void ShowMessageBox ( NativeWindow * parent_window , <nl> void ShowErrorBox ( const base : : string16 & title , const base : : string16 & content ) { <nl> # if defined ( OS_WIN ) <nl> ui : : MessageBox ( NULL , content , title , MB_OK | MB_ICONERROR | MB_TASKMODAL ) ; <nl> + # elif defined ( USE_X11 ) <nl> + if ( Browser : : Get ( ) - > is_ready ( ) ) { <nl> + } else { <nl> + fprintf ( stderr , <nl> + ANSI_TEXT_BOLD ANSI_BACKGROUND_GRAY <nl> + ANSI_FOREGROUND_RED " % s \ n " <nl> + ANSI_FOREGROUND_BLACK " % s " <nl> + ANSI_RESET " \ n " , <nl> + base : : UTF16ToUTF8 ( title ) . c_str ( ) , <nl> + base : : UTF16ToUTF8 ( content ) . c_str ( ) ) ; <nl> + } <nl> # endif <nl> } <nl> <nl> | linux : Print error to console when GUI is not ready | electron/electron | b54caccb227b721350ef438c468f3a7859e50dae | 2014-11-05T11:08:00Z |
mmm a / tensorflow / tools / docker / Dockerfile <nl> ppp b / tensorflow / tools / docker / Dockerfile <nl> <nl> - FROM ubuntu : 16 . 04 <nl> + FROM ubuntu : 18 . 04 <nl> <nl> LABEL maintainer = " Craig Citro < craigcitro @ google . com > " <nl> <nl> RUN apt - get update & & apt - get install - y - - no - install - recommends \ <nl> curl \ <nl> libfreetype6 - dev \ <nl> libhdf5 - serial - dev \ <nl> - libpng12 - dev \ <nl> + libpng - dev \ <nl> libzmq3 - dev \ <nl> pkg - config \ <nl> python \ <nl> mmm a / tensorflow / tools / docker / Dockerfile . devel <nl> ppp b / tensorflow / tools / docker / Dockerfile . devel <nl> <nl> - FROM ubuntu : 16 . 04 <nl> + FROM ubuntu : 18 . 04 <nl> <nl> LABEL maintainer = " Craig Citro < craigcitro @ google . com > " <nl> <nl> RUN apt - get update & & apt - get install - y - - no - install - recommends \ <nl> libcurl3 - dev \ <nl> libfreetype6 - dev \ <nl> libhdf5 - serial - dev \ <nl> - libpng12 - dev \ <nl> + libpng - dev \ <nl> libzmq3 - dev \ <nl> pkg - config \ <nl> python - dev \ <nl> mmm a / tensorflow / tools / docker / Dockerfile . devel - mkl <nl> ppp b / tensorflow / tools / docker / Dockerfile . devel - mkl <nl> <nl> - FROM ubuntu : 16 . 04 <nl> + FROM ubuntu : 18 . 04 <nl> <nl> LABEL maintainer = " Clayne Robison < clayne . b . robison @ intel . com > " <nl> <nl> RUN apt - get update & & apt - get install - y - - no - install - recommends \ <nl> libcurl3 - dev \ <nl> libfreetype6 - dev \ <nl> libhdf5 - serial - dev \ <nl> - libpng12 - dev \ <nl> + libpng - dev \ <nl> libzmq3 - dev \ <nl> libssl - dev \ <nl> pkg - config \ <nl> mmm a / tensorflow / tools / docker / Dockerfile . devel - mkl - horovod <nl> ppp b / tensorflow / tools / docker / Dockerfile . devel - mkl - horovod <nl> <nl> - FROM ubuntu : 16 . 04 <nl> + FROM ubuntu : 18 . 04 <nl> <nl> LABEL maintainer = " Cong Xu < cong . xu @ intel . com > " <nl> <nl> RUN apt - get update & & apt - get install - y - - no - install - recommends \ <nl> libcurl3 - dev \ <nl> libfreetype6 - dev \ <nl> libhdf5 - serial - dev \ <nl> - libpng12 - dev \ <nl> + libpng - dev \ <nl> libzmq3 - dev \ <nl> pkg - config \ <nl> python - dev \ <nl> mmm a / tensorflow / tools / docker / Dockerfile . mkl <nl> ppp b / tensorflow / tools / docker / Dockerfile . mkl <nl> <nl> - FROM ubuntu : 16 . 04 <nl> + FROM ubuntu : 18 . 04 <nl> <nl> LABEL maintainer = " Clayne Robison < clayne . b . robison @ intel . com > " <nl> <nl> RUN apt - get update & & apt - get install - y - - no - install - recommends \ <nl> curl \ <nl> libfreetype6 - dev \ <nl> libhdf5 - serial - dev \ <nl> - libpng12 - dev \ <nl> + libpng - dev \ <nl> libzmq3 - dev \ <nl> pkg - config \ <nl> $ { PYTHON } \ <nl> mmm a / tensorflow / tools / docker / Dockerfile . mkl - horovod <nl> ppp b / tensorflow / tools / docker / Dockerfile . mkl - horovod <nl> <nl> - FROM ubuntu : 16 . 04 <nl> + FROM ubuntu : 18 . 04 <nl> <nl> LABEL maintainer = " Cong Xu < cong . xu @ intel . com > " <nl> <nl> RUN apt - get update & & apt - get install - y - - no - install - recommends \ <nl> curl \ <nl> libfreetype6 - dev \ <nl> libhdf5 - serial - dev \ <nl> - libpng12 - dev \ <nl> + libpng - dev \ <nl> libzmq3 - dev \ <nl> pkg - config \ <nl> python \ <nl> | Merge pull request from jpds : 18 . 04 - docker | tensorflow/tensorflow | 48244d031429640df446751e39ba3e6cae76d9c7 | 2018-11-26T21:53:47Z |
mmm a / include / swift / Demangling / TypeDecoder . h <nl> ppp b / include / swift / Demangling / TypeDecoder . h <nl> class TypeDecoder { <nl> case NodeKind : : BoundGenericClass : <nl> { <nl> # if SWIFT_OBJC_INTEROP <nl> - if ( Node - > getNumChildren ( ) = = 2 ) { <nl> + if ( Node - > getNumChildren ( ) > = 2 ) { <nl> auto ChildNode = Node - > getChild ( 0 ) ; <nl> if ( ChildNode - > getKind ( ) = = NodeKind : : Type & & <nl> ChildNode - > getNumChildren ( ) > 0 ) <nl> | [ Runtime ] Minor future - proofing for demangling bound generic ObjC classes . | apple/swift | aed8b9f96a17cca79b8f2d611f1c1e8ba2c6c2c8 | 2019-01-03T19:19:36Z |
mmm a / src / messages . js <nl> ppp b / src / messages . js <nl> function FormatMessage ( message ) { <nl> " proxy_non_object_prop_names " , [ " Trap ' " , " % 1 " , " ' returned non - object " , " % 0 " ] , <nl> " proxy_repeated_prop_name " , [ " Trap ' " , " % 1 " , " ' returned repeated property name ' " , " % 2 " , " ' " ] , <nl> " invalid_weakmap_key " , [ " Invalid value used as weak map key " ] , <nl> - " not_date_object " , [ " Receiver is not a Date object . " ] , <nl> + " not_date_object " , [ " this is not a Date object . " ] , <nl> / / RangeError <nl> " invalid_array_length " , [ " Invalid array length " ] , <nl> " stack_overflow " , [ " Maximum call stack size exceeded " ] , <nl> | Fix TypeError message for Date builtins . | v8/v8 | 474e34e3c52feb4ce8d453fb845ca30eac50819d | 2012-09-11T12:43:17Z |
similarity index 100 % <nl> rename from src / python / src / . gitignore <nl> rename to src / python / grpcio / . gitignore <nl> similarity index 100 % <nl> rename from src / python / src / MANIFEST . in <nl> rename to src / python / grpcio / MANIFEST . in <nl> similarity index 100 % <nl> rename from src / python / src / README . rst <nl> rename to src / python / grpcio / README . rst <nl> similarity index 99 % <nl> rename from src / python / src / commands . py <nl> rename to src / python / grpcio / commands . py <nl> mmm a / src / python / src / commands . py <nl> ppp b / src / python / grpcio / commands . py <nl> <nl> html_theme = ' sphinx_rtd_theme ' <nl> " " " <nl> <nl> + <nl> class SphinxDocumentation ( setuptools . Command ) : <nl> " " " Command to generate documentation via sphinx . " " " <nl> <nl> similarity index 100 % <nl> rename from src / python / interop / interop / __init__ . py <nl> rename to src / python / grpcio / grpc / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / . gitignore <nl> rename to src / python / grpcio / grpc / _adapter / . gitignore <nl> similarity index 100 % <nl> rename from src / python / src / grpc / __init__ . py <nl> rename to src / python / grpcio / grpc / _adapter / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _c / module . c <nl> rename to src / python / grpcio / grpc / _adapter / _c / module . c <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _c / types . c <nl> rename to src / python / grpcio / grpc / _adapter / _c / types . c <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _c / types . h <nl> rename to src / python / grpcio / grpc / _adapter / _c / types . h <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _c / types / call . c <nl> rename to src / python / grpcio / grpc / _adapter / _c / types / call . c <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _c / types / channel . c <nl> rename to src / python / grpcio / grpc / _adapter / _c / types / channel . c <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _c / types / client_credentials . c <nl> rename to src / python / grpcio / grpc / _adapter / _c / types / client_credentials . c <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _c / types / completion_queue . c <nl> rename to src / python / grpcio / grpc / _adapter / _c / types / completion_queue . c <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _c / types / server . c <nl> rename to src / python / grpcio / grpc / _adapter / _c / types / server . c <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _c / types / server_credentials . c <nl> rename to src / python / grpcio / grpc / _adapter / _c / types / server_credentials . c <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _c / utility . c <nl> rename to src / python / grpcio / grpc / _adapter / _c / utility . c <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _common . py <nl> rename to src / python / grpcio / grpc / _adapter / _common . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _intermediary_low . py <nl> rename to src / python / grpcio / grpc / _adapter / _intermediary_low . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _low . py <nl> rename to src / python / grpcio / grpc / _adapter / _low . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _types . py <nl> rename to src / python / grpcio / grpc / _adapter / _types . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / fore . py <nl> rename to src / python / grpcio / grpc / _adapter / fore . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / rear . py <nl> rename to src / python / grpcio / grpc / _adapter / rear . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / . gitignore <nl> rename to src / python / grpcio / grpc / _cython / . gitignore <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / README . rst <nl> rename to src / python / grpcio / grpc / _cython / README . rst <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / __init__ . py <nl> rename to src / python / grpcio / grpc / _cython / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / __init__ . py <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / call . pxd <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / call . pxd <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / call . pyx <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / call . pyx <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / channel . pxd <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / channel . pxd <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / channel . pyx <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / channel . pyx <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / completion_queue . pxd <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / completion_queue . pxd <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / completion_queue . pyx <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / completion_queue . pyx <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / credentials . pxd <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / credentials . pxd <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / credentials . pyx <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / credentials . pyx <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / grpc . pxd <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / grpc . pxd <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / records . pxd <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / records . pxd <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / records . pyx <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / records . pyx <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / server . pxd <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / server . pxd <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / _cygrpc / server . pyx <nl> rename to src / python / grpcio / grpc / _cython / _cygrpc / server . pyx <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / adapter_low . py <nl> rename to src / python / grpcio / grpc / _cython / adapter_low . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / cygrpc . pyx <nl> rename to src / python / grpcio / grpc / _cython / cygrpc . pyx <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / __init__ . py <nl> rename to src / python / grpcio / grpc / _links / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _links / invocation . py <nl> rename to src / python / grpcio / grpc / _links / invocation . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _links / service . py <nl> rename to src / python / grpcio / grpc / _links / service . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _junkdrawer / __init__ . py <nl> rename to src / python / grpcio / grpc / early_adopter / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / early_adopter / implementations . py <nl> rename to src / python / grpcio / grpc / early_adopter / implementations . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _links / __init__ . py <nl> rename to src / python / grpcio / grpc / framework / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / alpha / __init__ . py <nl> rename to src / python / grpcio / grpc / framework / alpha / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / alpha / _face_utilities . py <nl> rename to src / python / grpcio / grpc / framework / alpha / _face_utilities . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / alpha / _reexport . py <nl> rename to src / python / grpcio / grpc / framework / alpha / _reexport . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / alpha / exceptions . py <nl> rename to src / python / grpcio / grpc / framework / alpha / exceptions . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / alpha / interfaces . py <nl> rename to src / python / grpcio / grpc / framework / alpha / interfaces . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / alpha / utilities . py <nl> rename to src / python / grpcio / grpc / framework / alpha / utilities . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / early_adopter / __init__ . py <nl> rename to src / python / grpcio / grpc / framework / base / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / _cancellation . py <nl> rename to src / python / grpcio / grpc / framework / base / _cancellation . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / _constants . py <nl> rename to src / python / grpcio / grpc / framework / base / _constants . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / _context . py <nl> rename to src / python / grpcio / grpc / framework / base / _context . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / _emission . py <nl> rename to src / python / grpcio / grpc / framework / base / _emission . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / _ends . py <nl> rename to src / python / grpcio / grpc / framework / base / _ends . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / _expiration . py <nl> rename to src / python / grpcio / grpc / framework / base / _expiration . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / _ingestion . py <nl> rename to src / python / grpcio / grpc / framework / base / _ingestion . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / _interfaces . py <nl> rename to src / python / grpcio / grpc / framework / base / _interfaces . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / _reception . py <nl> rename to src / python / grpcio / grpc / framework / base / _reception . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / _termination . py <nl> rename to src / python / grpcio / grpc / framework / base / _termination . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / _transmission . py <nl> rename to src / python / grpcio / grpc / framework / base / _transmission . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / exceptions . py <nl> rename to src / python / grpcio / grpc / framework / base / exceptions . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / implementations . py <nl> rename to src / python / grpcio / grpc / framework / base / implementations . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / in_memory . py <nl> rename to src / python / grpcio / grpc / framework / base / in_memory . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / interfaces . py <nl> rename to src / python / grpcio / grpc / framework / base / interfaces . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / null . py <nl> rename to src / python / grpcio / grpc / framework / base / null . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / util . py <nl> rename to src / python / grpcio / grpc / framework / base / util . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / __init__ . py <nl> rename to src / python / grpcio / grpc / framework / common / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / common / cardinality . py <nl> rename to src / python / grpcio / grpc / framework / common / cardinality . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / common / style . py <nl> rename to src / python / grpcio / grpc / framework / common / style . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / base / __init__ . py <nl> rename to src / python / grpcio / grpc / framework / face / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / _calls . py <nl> rename to src / python / grpcio / grpc / framework / face / _calls . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / _control . py <nl> rename to src / python / grpcio / grpc / framework / face / _control . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / _service . py <nl> rename to src / python / grpcio / grpc / framework / face / _service . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / demonstration . py <nl> rename to src / python / grpcio / grpc / framework / face / demonstration . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / exceptions . py <nl> rename to src / python / grpcio / grpc / framework / face / exceptions . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / implementations . py <nl> rename to src / python / grpcio / grpc / framework / face / implementations . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / interfaces . py <nl> rename to src / python / grpcio / grpc / framework / face / interfaces . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / utilities . py <nl> rename to src / python / grpcio / grpc / framework / face / utilities . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / common / __init__ . py <nl> rename to src / python / grpcio / grpc / framework / foundation / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / _timer_future . py <nl> rename to src / python / grpcio / grpc / framework / foundation / _timer_future . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / abandonment . py <nl> rename to src / python / grpcio / grpc / framework / foundation / abandonment . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / activated . py <nl> rename to src / python / grpcio / grpc / framework / foundation / activated . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / callable_util . py <nl> rename to src / python / grpcio / grpc / framework / foundation / callable_util . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / future . py <nl> rename to src / python / grpcio / grpc / framework / foundation / future . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / later . py <nl> rename to src / python / grpcio / grpc / framework / foundation / later . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / logging_pool . py <nl> rename to src / python / grpcio / grpc / framework / foundation / logging_pool . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / relay . py <nl> rename to src / python / grpcio / grpc / framework / foundation / relay . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / stream . py <nl> rename to src / python / grpcio / grpc / framework / foundation / stream . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / stream_util . py <nl> rename to src / python / grpcio / grpc / framework / foundation / stream_util . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / __init__ . py <nl> rename to src / python / grpcio / grpc / framework / interfaces / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / testing / __init__ . py <nl> rename to src / python / grpcio / grpc / framework / interfaces / links / __init__ . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / interfaces / links / links . py <nl> rename to src / python / grpcio / grpc / framework / interfaces / links / links . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / interfaces / links / utilities . py <nl> rename to src / python / grpcio / grpc / framework / interfaces / links / utilities . py <nl> similarity index 100 % <nl> rename from src / python / src / setup . cfg <nl> rename to src / python / grpcio / setup . cfg <nl> similarity index 85 % <nl> rename from src / python / src / setup . py <nl> rename to src / python / grpcio / setup . py <nl> mmm a / src / python / src / setup . py <nl> ppp b / src / python / grpcio / setup . py <nl> <nl> _EXTENSION_MODULES = [ _C_EXTENSION_MODULE ] <nl> <nl> _PACKAGES = ( <nl> - ' grpc ' , <nl> - ' grpc . _adapter ' , <nl> - ' grpc . _junkdrawer ' , <nl> - ' grpc . _links ' , <nl> - ' grpc . early_adopter ' , <nl> - ' grpc . framework ' , <nl> - ' grpc . framework . alpha ' , <nl> - ' grpc . framework . base ' , <nl> - ' grpc . framework . common ' , <nl> - ' grpc . framework . face ' , <nl> - ' grpc . framework . face . testing ' , <nl> - ' grpc . framework . foundation ' , <nl> - ' grpc . framework . interfaces ' , <nl> - ' grpc . framework . interfaces . links ' , <nl> + setuptools . find_packages ( ' . ' , exclude = [ ' * . _cython ' , ' * . _cython . * ' ] ) <nl> ) <nl> <nl> _PACKAGE_DIRECTORIES = { <nl> - ' grpc ' : ' grpc ' , <nl> - ' grpc . _adapter ' : ' grpc / _adapter ' , <nl> - ' grpc . _junkdrawer ' : ' grpc / _junkdrawer ' , <nl> - ' grpc . _links ' : ' grpc / _links ' , <nl> - ' grpc . early_adopter ' : ' grpc / early_adopter ' , <nl> - ' grpc . framework ' : ' grpc / framework ' , <nl> + ' ' : ' . ' , <nl> } <nl> <nl> _INSTALL_REQUIRES = ( <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / __init__ . py <nl> rename to src / python / grpcio_test / grpc_interop / __init__ . py <nl> similarity index 96 % <nl> rename from src / python / interop / interop / _insecure_interop_test . py <nl> rename to src / python / grpcio_test / grpc_interop / _insecure_interop_test . py <nl> mmm a / src / python / interop / interop / _insecure_interop_test . py <nl> ppp b / src / python / grpcio_test / grpc_interop / _insecure_interop_test . py <nl> <nl> <nl> from grpc . early_adopter import implementations <nl> <nl> - from interop import _interop_test_case <nl> - from interop import methods <nl> + from grpc_interop import _interop_test_case <nl> + from grpc_interop import methods <nl> <nl> <nl> class InsecureInteropTest ( <nl> similarity index 98 % <nl> rename from src / python / interop / interop / _interop_test_case . py <nl> rename to src / python / grpcio_test / grpc_interop / _interop_test_case . py <nl> mmm a / src / python / interop / interop / _interop_test_case . py <nl> ppp b / src / python / grpcio_test / grpc_interop / _interop_test_case . py <nl> <nl> <nl> " " " Common code for unit tests of the interoperability test code . " " " <nl> <nl> - from interop import methods <nl> + from grpc_interop import methods <nl> <nl> <nl> class InteropTestCase ( object ) : <nl> similarity index 95 % <nl> rename from src / python / interop / interop / _secure_interop_test . py <nl> rename to src / python / grpcio_test / grpc_interop / _secure_interop_test . py <nl> mmm a / src / python / interop / interop / _secure_interop_test . py <nl> ppp b / src / python / grpcio_test / grpc_interop / _secure_interop_test . py <nl> <nl> <nl> from grpc . early_adopter import implementations <nl> <nl> - from interop import _interop_test_case <nl> - from interop import methods <nl> - from interop import resources <nl> + from grpc_interop import _interop_test_case <nl> + from grpc_interop import methods <nl> + from grpc_interop import resources <nl> <nl> _SERVER_HOST_OVERRIDE = ' foo . test . google . fr ' <nl> <nl> similarity index 98 % <nl> rename from src / python / interop / interop / client . py <nl> rename to src / python / grpcio_test / grpc_interop / client . py <nl> mmm a / src / python / interop / interop / client . py <nl> ppp b / src / python / grpcio_test / grpc_interop / client . py <nl> <nl> <nl> from grpc . early_adopter import implementations <nl> <nl> - from interop import methods <nl> - from interop import resources <nl> + from grpc_interop import methods <nl> + from grpc_interop import resources <nl> <nl> _ONE_DAY_IN_SECONDS = 60 * 60 * 24 <nl> <nl> similarity index 100 % <nl> rename from src / python / interop / interop / credentials / README <nl> rename to src / python / grpcio_test / grpc_interop / credentials / README <nl> similarity index 100 % <nl> rename from src / python / interop / interop / credentials / ca . pem <nl> rename to src / python / grpcio_test / grpc_interop / credentials / ca . pem <nl> similarity index 100 % <nl> rename from src / python / interop / interop / credentials / server1 . key <nl> rename to src / python / grpcio_test / grpc_interop / credentials / server1 . key <nl> similarity index 100 % <nl> rename from src / python / interop / interop / credentials / server1 . pem <nl> rename to src / python / grpcio_test / grpc_interop / credentials / server1 . pem <nl> similarity index 100 % <nl> rename from src / python / interop / interop / empty_pb2 . py <nl> rename to src / python / grpcio_test / grpc_interop / empty_pb2 . py <nl> similarity index 100 % <nl> rename from src / python / interop / interop / messages_pb2 . py <nl> rename to src / python / grpcio_test / grpc_interop / messages_pb2 . py <nl> similarity index 99 % <nl> rename from src / python / interop / interop / methods . py <nl> rename to src / python / grpcio_test / grpc_interop / methods . py <nl> mmm a / src / python / interop / interop / methods . py <nl> ppp b / src / python / grpcio_test / grpc_interop / methods . py <nl> <nl> <nl> from grpc . framework . alpha import utilities <nl> <nl> - from interop import empty_pb2 <nl> - from interop import messages_pb2 <nl> + from grpc_interop import empty_pb2 <nl> + from grpc_interop import messages_pb2 <nl> <nl> _TIMEOUT = 7 <nl> <nl> similarity index 100 % <nl> rename from src / python / interop / interop / resources . py <nl> rename to src / python / grpcio_test / grpc_interop / resources . py <nl> similarity index 97 % <nl> rename from src / python / interop / interop / server . py <nl> rename to src / python / grpcio_test / grpc_interop / server . py <nl> mmm a / src / python / interop / interop / server . py <nl> ppp b / src / python / grpcio_test / grpc_interop / server . py <nl> <nl> <nl> from grpc . early_adopter import implementations <nl> <nl> - from interop import methods <nl> - from interop import resources <nl> + from grpc_interop import methods <nl> + from grpc_interop import resources <nl> <nl> _ONE_DAY_IN_SECONDS = 60 * 60 * 24 <nl> <nl> similarity index 100 % <nl> rename from src / python / interop / interop / test_pb2 . py <nl> rename to src / python / grpcio_test / grpc_interop / test_pb2 . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / interfaces / __init__ . py <nl> rename to src / python / grpcio_test / grpc_test / __init__ . py <nl> new file mode 100644 <nl> index 00000000000 . . a6f96cd6dbb <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / _adapter / . gitignore <nl> <nl> + * . a <nl> + * . so <nl> + * . dll <nl> + * . pyc <nl> + * . pyd <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / interfaces / links / __init__ . py <nl> rename to src / python / grpcio_test / grpc_test / _adapter / __init__ . py <nl> similarity index 92 % <nl> rename from src / python / src / grpc / _adapter / _blocking_invocation_inline_service_test . py <nl> rename to src / python / grpcio_test / grpc_test / _adapter / _blocking_invocation_inline_service_test . py <nl> mmm a / src / python / src / grpc / _adapter / _blocking_invocation_inline_service_test . py <nl> ppp b / src / python / grpcio_test / grpc_test / _adapter / _blocking_invocation_inline_service_test . py <nl> <nl> <nl> import unittest <nl> <nl> - from grpc . _adapter import _face_test_case <nl> - from grpc . framework . face . testing import blocking_invocation_inline_service_test_case as test_case <nl> + from grpc_test . _adapter import _face_test_case <nl> + from grpc_test . framework . face . testing import blocking_invocation_inline_service_test_case as test_case <nl> <nl> <nl> class BlockingInvocationInlineServiceTest ( <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _c_test . py <nl> rename to src / python / grpcio_test / grpc_test / _adapter / _c_test . py <nl> similarity index 92 % <nl> rename from src / python / src / grpc / _adapter / _event_invocation_synchronous_event_service_test . py <nl> rename to src / python / grpcio_test / grpc_test / _adapter / _event_invocation_synchronous_event_service_test . py <nl> mmm a / src / python / src / grpc / _adapter / _event_invocation_synchronous_event_service_test . py <nl> ppp b / src / python / grpcio_test / grpc_test / _adapter / _event_invocation_synchronous_event_service_test . py <nl> <nl> <nl> import unittest <nl> <nl> - from grpc . _adapter import _face_test_case <nl> - from grpc . framework . face . testing import event_invocation_synchronous_event_service_test_case as test_case <nl> + from grpc_test . _adapter import _face_test_case <nl> + from grpc_test . framework . face . testing import event_invocation_synchronous_event_service_test_case as test_case <nl> <nl> <nl> class EventInvocationSynchronousEventServiceTest ( <nl> similarity index 96 % <nl> rename from src / python / src / grpc / _adapter / _face_test_case . py <nl> rename to src / python / grpcio_test / grpc_test / _adapter / _face_test_case . py <nl> mmm a / src / python / src / grpc / _adapter / _face_test_case . py <nl> ppp b / src / python / grpcio_test / grpc_test / _adapter / _face_test_case . py <nl> <nl> from grpc . framework . base import util <nl> from grpc . framework . base import implementations as base_implementations <nl> from grpc . framework . face import implementations as face_implementations <nl> - from grpc . framework . face . testing import coverage <nl> - from grpc . framework . face . testing import serial <nl> - from grpc . framework . face . testing import test_case <nl> from grpc . framework . foundation import logging_pool <nl> + from grpc_test . framework . face . testing import coverage <nl> + from grpc_test . framework . face . testing import serial <nl> + from grpc_test . framework . face . testing import test_case <nl> <nl> _TIMEOUT = 3 <nl> _MAXIMUM_TIMEOUT = 90 <nl> similarity index 92 % <nl> rename from src / python / src / grpc / _adapter / _future_invocation_asynchronous_event_service_test . py <nl> rename to src / python / grpcio_test / grpc_test / _adapter / _future_invocation_asynchronous_event_service_test . py <nl> mmm a / src / python / src / grpc / _adapter / _future_invocation_asynchronous_event_service_test . py <nl> ppp b / src / python / grpcio_test / grpc_test / _adapter / _future_invocation_asynchronous_event_service_test . py <nl> <nl> <nl> import unittest <nl> <nl> - from grpc . _adapter import _face_test_case <nl> - from grpc . framework . face . testing import future_invocation_asynchronous_event_service_test_case as test_case <nl> + from grpc_test . _adapter import _face_test_case <nl> + from grpc_test . framework . face . testing import future_invocation_asynchronous_event_service_test_case as test_case <nl> <nl> <nl> class FutureInvocationAsynchronousEventServiceTest ( <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _intermediary_low_test . py <nl> rename to src / python / grpcio_test / grpc_test / _adapter / _intermediary_low_test . py <nl> similarity index 99 % <nl> rename from src / python / src / grpc / _adapter / _links_test . py <nl> rename to src / python / grpcio_test / grpc_test / _adapter / _links_test . py <nl> mmm a / src / python / src / grpc / _adapter / _links_test . py <nl> ppp b / src / python / grpcio_test / grpc_test / _adapter / _links_test . py <nl> <nl> import threading <nl> import unittest <nl> <nl> - from grpc . _adapter import _proto_scenarios <nl> - from grpc . _adapter import _test_links <nl> from grpc . _adapter import fore <nl> from grpc . _adapter import rear <nl> from grpc . framework . base import interfaces <nl> from grpc . framework . foundation import logging_pool <nl> + from grpc_test . _adapter import _proto_scenarios <nl> + from grpc_test . _adapter import _test_links <nl> <nl> _IDENTITY = lambda x : x <nl> _TIMEOUT = 32 <nl> similarity index 98 % <nl> rename from src / python / src / grpc / _adapter / _lonely_rear_link_test . py <nl> rename to src / python / grpcio_test / grpc_test / _adapter / _lonely_rear_link_test . py <nl> mmm a / src / python / src / grpc / _adapter / _lonely_rear_link_test . py <nl> ppp b / src / python / grpcio_test / grpc_test / _adapter / _lonely_rear_link_test . py <nl> <nl> <nl> import unittest <nl> <nl> - from grpc . _adapter import _test_links <nl> from grpc . _adapter import rear <nl> from grpc . framework . base import interfaces <nl> from grpc . framework . foundation import logging_pool <nl> + from grpc_test . _adapter import _test_links <nl> <nl> _IDENTITY = lambda x : x <nl> _TIMEOUT = 2 <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _low_test . py <nl> rename to src / python / grpcio_test / grpc_test / _adapter / _low_test . py <nl> similarity index 99 % <nl> rename from src / python / src / grpc / _adapter / _proto_scenarios . py <nl> rename to src / python / grpcio_test / grpc_test / _adapter / _proto_scenarios . py <nl> mmm a / src / python / src / grpc / _adapter / _proto_scenarios . py <nl> ppp b / src / python / grpcio_test / grpc_test / _adapter / _proto_scenarios . py <nl> <nl> import abc <nl> import threading <nl> <nl> - from grpc . _junkdrawer import math_pb2 <nl> + from grpc_test . _junkdrawer import math_pb2 <nl> <nl> <nl> class ProtoScenario ( object ) : <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _adapter / _test_links . py <nl> rename to src / python / grpcio_test / grpc_test / _adapter / _test_links . py <nl> new file mode 100644 <nl> index 00000000000 . . c3150292885 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / _cython / . gitignore <nl> <nl> + * . h <nl> + * . c <nl> + * . a <nl> + * . so <nl> + * . dll <nl> + * . pyc <nl> + * . pyd <nl> new file mode 100644 <nl> index 00000000000 . . b89398809fa <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / _cython / __init__ . py <nl> <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / adapter_low_test . py <nl> rename to src / python / grpcio_test / grpc_test / _cython / adapter_low_test . py <nl> similarity index 99 % <nl> rename from src / python / src / grpc / _cython / cygrpc_test . py <nl> rename to src / python / grpcio_test / grpc_test / _cython / cygrpc_test . py <nl> mmm a / src / python / src / grpc / _cython / cygrpc_test . py <nl> ppp b / src / python / grpcio_test / grpc_test / _cython / cygrpc_test . py <nl> <nl> import unittest <nl> <nl> from grpc . _cython import cygrpc <nl> - from grpc . _cython import test_utilities <nl> + from grpc_test . _cython import test_utilities <nl> <nl> <nl> class TypeSmokeTest ( unittest . TestCase ) : <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _cython / test_utilities . py <nl> rename to src / python / grpcio_test / grpc_test / _cython / test_utilities . py <nl> new file mode 100644 <nl> index 00000000000 . . 70865191060 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / _junkdrawer / __init__ . py <nl> <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _junkdrawer / math_pb2 . py <nl> rename to src / python / grpcio_test / grpc_test / _junkdrawer / math_pb2 . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / _junkdrawer / stock_pb2 . py <nl> rename to src / python / grpcio_test / grpc_test / _junkdrawer / stock_pb2 . py <nl> new file mode 100644 <nl> index 00000000000 . . 70865191060 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / _links / __init__ . py <nl> <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + <nl> similarity index 95 % <nl> rename from src / python / src / grpc / _links / _lonely_invocation_link_test . py <nl> rename to src / python / grpcio_test / grpc_test / _links / _lonely_invocation_link_test . py <nl> mmm a / src / python / src / grpc / _links / _lonely_invocation_link_test . py <nl> ppp b / src / python / grpcio_test / grpc_test / _links / _lonely_invocation_link_test . py <nl> <nl> <nl> from grpc . _adapter import _intermediary_low <nl> from grpc . _links import invocation <nl> - from grpc . framework . common import test_constants <nl> from grpc . framework . interfaces . links import links <nl> - from grpc . framework . interfaces . links import test_cases <nl> - from grpc . framework . interfaces . links import test_utilities <nl> + from grpc_test . framework . common import test_constants <nl> + from grpc_test . framework . interfaces . links import test_cases <nl> + from grpc_test . framework . interfaces . links import test_utilities <nl> <nl> _NULL_BEHAVIOR = lambda unused_argument : None <nl> <nl> similarity index 98 % <nl> rename from src / python / src / grpc / _links / _proto_scenarios . py <nl> rename to src / python / grpcio_test / grpc_test / _links / _proto_scenarios . py <nl> mmm a / src / python / src / grpc / _links / _proto_scenarios . py <nl> ppp b / src / python / grpcio_test / grpc_test / _links / _proto_scenarios . py <nl> <nl> import abc <nl> import threading <nl> <nl> - from grpc . _junkdrawer import math_pb2 <nl> - from grpc . framework . common import test_constants <nl> + from grpc_test . _junkdrawer import math_pb2 <nl> + from grpc_test . framework . common import test_constants <nl> <nl> <nl> class ProtoScenario ( object ) : <nl> similarity index 97 % <nl> rename from src / python / src / grpc / _links / _transmission_test . py <nl> rename to src / python / grpcio_test / grpc_test / _links / _transmission_test . py <nl> mmm a / src / python / src / grpc / _links / _transmission_test . py <nl> ppp b / src / python / grpcio_test / grpc_test / _links / _transmission_test . py <nl> <nl> import unittest <nl> <nl> from grpc . _adapter import _intermediary_low <nl> - from grpc . _links import _proto_scenarios <nl> from grpc . _links import invocation <nl> from grpc . _links import service <nl> - from grpc . framework . common import test_constants <nl> from grpc . framework . interfaces . links import links <nl> - from grpc . framework . interfaces . links import test_cases <nl> - from grpc . framework . interfaces . links import test_utilities <nl> + from grpc_test . _links import _proto_scenarios <nl> + from grpc_test . framework . common import test_constants <nl> + from grpc_test . framework . interfaces . links import test_cases <nl> + from grpc_test . framework . interfaces . links import test_utilities <nl> <nl> _IDENTITY = lambda x : x <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 70865191060 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / early_adopter / __init__ . py <nl> <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + <nl> similarity index 99 % <nl> rename from src / python / src / grpc / early_adopter / implementations_test . py <nl> rename to src / python / grpcio_test / grpc_test / early_adopter / implementations_test . py <nl> mmm a / src / python / src / grpc / early_adopter / implementations_test . py <nl> ppp b / src / python / grpcio_test / grpc_test / early_adopter / implementations_test . py <nl> <nl> <nl> from grpc . early_adopter import implementations <nl> from grpc . framework . alpha import utilities <nl> - from grpc . _junkdrawer import math_pb2 <nl> + from grpc_test . _junkdrawer import math_pb2 <nl> <nl> SERVICE_NAME = ' math . Math ' <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 70865191060 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / framework / __init__ . py <nl> <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + <nl> new file mode 100644 <nl> index 00000000000 . . 70865191060 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / framework / base / __init__ . py <nl> <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + <nl> similarity index 98 % <nl> rename from src / python / src / grpc / framework / base / implementations_test . py <nl> rename to src / python / grpcio_test / grpc_test / framework / base / implementations_test . py <nl> mmm a / src / python / src / grpc / framework / base / implementations_test . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / base / implementations_test . py <nl> <nl> import unittest <nl> <nl> from grpc . framework . base import implementations <nl> - from grpc . framework . base import interfaces_test_case <nl> from grpc . framework . base import util <nl> from grpc . framework . foundation import logging_pool <nl> + from grpc_test . framework . base import interfaces_test_case <nl> <nl> POOL_MAX_WORKERS = 10 <nl> DEFAULT_TIMEOUT = 30 <nl> similarity index 99 % <nl> rename from src / python / src / grpc / framework / base / interfaces_test_case . py <nl> rename to src / python / grpcio_test / grpc_test / framework / base / interfaces_test_case . py <nl> mmm a / src / python / src / grpc / framework / base / interfaces_test_case . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / base / interfaces_test_case . py <nl> <nl> from grpc . framework . base import interfaces <nl> from grpc . framework . base import util <nl> from grpc . framework . foundation import stream <nl> - from grpc . framework . foundation import stream_testing <nl> from grpc . framework . foundation import stream_util <nl> + from grpc_test . framework . foundation import stream_testing <nl> <nl> TICK = 0 . 1 <nl> SMALL_TIMEOUT = TICK * 50 <nl> new file mode 100644 <nl> index 00000000000 . . 70865191060 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / framework / common / __init__ . py <nl> <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / common / test_constants . py <nl> rename to src / python / grpcio_test / grpc_test / framework / common / test_constants . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / common / test_control . py <nl> rename to src / python / grpcio_test / grpc_test / framework / common / test_control . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / common / test_coverage . py <nl> rename to src / python / grpcio_test / grpc_test / framework / common / test_coverage . py <nl> new file mode 100644 <nl> index 00000000000 . . 70865191060 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / framework / face / __init__ . py <nl> <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + <nl> similarity index 95 % <nl> rename from src / python / src / grpc / framework / face / _test_case . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / _test_case . py <nl> mmm a / src / python / src / grpc / framework / face / _test_case . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / face / _test_case . py <nl> <nl> " " " Common lifecycle code for in - memory - ticket - exchange Face - layer tests . " " " <nl> <nl> from grpc . framework . face import implementations <nl> - from grpc . framework . face . testing import base_util <nl> - from grpc . framework . face . testing import test_case <nl> from grpc . framework . foundation import logging_pool <nl> + from grpc_test . framework . face . testing import base_util <nl> + from grpc_test . framework . face . testing import test_case <nl> <nl> _TIMEOUT = 3 <nl> _MAXIMUM_POOL_SIZE = 10 <nl> similarity index 92 % <nl> rename from src / python / src / grpc / framework / face / blocking_invocation_inline_service_test . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / blocking_invocation_inline_service_test . py <nl> mmm a / src / python / src / grpc / framework / face / blocking_invocation_inline_service_test . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / face / blocking_invocation_inline_service_test . py <nl> <nl> <nl> import unittest <nl> <nl> - from grpc . framework . face import _test_case <nl> - from grpc . framework . face . testing import blocking_invocation_inline_service_test_case as test_case <nl> + from grpc_test . framework . face import _test_case <nl> + from grpc_test . framework . face . testing import blocking_invocation_inline_service_test_case as test_case <nl> <nl> <nl> class BlockingInvocationInlineServiceTest ( <nl> similarity index 92 % <nl> rename from src / python / src / grpc / framework / face / event_invocation_synchronous_event_service_test . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / event_invocation_synchronous_event_service_test . py <nl> mmm a / src / python / src / grpc / framework / face / event_invocation_synchronous_event_service_test . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / face / event_invocation_synchronous_event_service_test . py <nl> <nl> <nl> import unittest <nl> <nl> - from grpc . framework . face import _test_case <nl> - from grpc . framework . face . testing import event_invocation_synchronous_event_service_test_case as test_case <nl> + from grpc_test . framework . face import _test_case <nl> + from grpc_test . framework . face . testing import event_invocation_synchronous_event_service_test_case as test_case <nl> <nl> <nl> class EventInvocationSynchronousEventServiceTest ( <nl> similarity index 91 % <nl> rename from src / python / src / grpc / framework / face / future_invocation_asynchronous_event_service_test . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / future_invocation_asynchronous_event_service_test . py <nl> mmm a / src / python / src / grpc / framework / face / future_invocation_asynchronous_event_service_test . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / face / future_invocation_asynchronous_event_service_test . py <nl> <nl> <nl> import unittest <nl> <nl> - from grpc . framework . face import _test_case <nl> - from grpc . framework . face . testing import future_invocation_asynchronous_event_service_test_case as test_case <nl> + from grpc_test . framework . face import _test_case <nl> + from grpc_test . framework . face . testing import future_invocation_asynchronous_event_service_test_case as test_case <nl> <nl> <nl> class FutureInvocationAsynchronousEventServiceTest ( <nl> new file mode 100644 <nl> index 00000000000 . . 70865191060 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / framework / face / testing / __init__ . py <nl> <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / testing / base_util . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / testing / base_util . py <nl> similarity index 97 % <nl> rename from src / python / src / grpc / framework / face / testing / blocking_invocation_inline_service_test_case . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / testing / blocking_invocation_inline_service_test_case . py <nl> mmm a / src / python / src / grpc / framework / face / testing / blocking_invocation_inline_service_test_case . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / face / testing / blocking_invocation_inline_service_test_case . py <nl> <nl> import unittest # pylint : disable = unused - import <nl> <nl> from grpc . framework . face import exceptions <nl> - from grpc . framework . face . testing import control <nl> - from grpc . framework . face . testing import coverage <nl> - from grpc . framework . face . testing import digest <nl> - from grpc . framework . face . testing import stock_service <nl> - from grpc . framework . face . testing import test_case <nl> + from grpc_test . framework . face . testing import control <nl> + from grpc_test . framework . face . testing import coverage <nl> + from grpc_test . framework . face . testing import digest <nl> + from grpc_test . framework . face . testing import stock_service <nl> + from grpc_test . framework . face . testing import test_case <nl> <nl> _TIMEOUT = 3 <nl> _LONG_TIMEOUT = 45 <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / testing / callback . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / testing / callback . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / testing / control . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / testing / control . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / testing / coverage . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / testing / coverage . py <nl> similarity index 98 % <nl> rename from src / python / src / grpc / framework / face / testing / digest . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / testing / digest . py <nl> mmm a / src / python / src / grpc / framework / face / testing / digest . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / face / testing / digest . py <nl> <nl> from grpc . framework . common import style <nl> from grpc . framework . face import exceptions <nl> from grpc . framework . face import interfaces as face_interfaces <nl> - from grpc . framework . face . testing import control as testing_control # pylint : disable = unused - import <nl> - from grpc . framework . face . testing import interfaces # pylint : disable = unused - import <nl> - from grpc . framework . face . testing import service as testing_service # pylint : disable = unused - import <nl> from grpc . framework . foundation import stream <nl> from grpc . framework . foundation import stream_util <nl> + from grpc_test . framework . face . testing import control as testing_control # pylint : disable = unused - import <nl> + from grpc_test . framework . face . testing import interfaces # pylint : disable = unused - import <nl> + from grpc_test . framework . face . testing import service as testing_service # pylint : disable = unused - import <nl> <nl> _IDENTITY = lambda x : x <nl> <nl> similarity index 97 % <nl> rename from src / python / src / grpc / framework / face / testing / event_invocation_synchronous_event_service_test_case . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / testing / event_invocation_synchronous_event_service_test_case . py <nl> mmm a / src / python / src / grpc / framework / face / testing / event_invocation_synchronous_event_service_test_case . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / face / testing / event_invocation_synchronous_event_service_test_case . py <nl> <nl> import unittest <nl> <nl> from grpc . framework . face import interfaces <nl> - from grpc . framework . face . testing import callback as testing_callback <nl> - from grpc . framework . face . testing import control <nl> - from grpc . framework . face . testing import coverage <nl> - from grpc . framework . face . testing import digest <nl> - from grpc . framework . face . testing import stock_service <nl> - from grpc . framework . face . testing import test_case <nl> + from grpc_test . framework . face . testing import callback as testing_callback <nl> + from grpc_test . framework . face . testing import control <nl> + from grpc_test . framework . face . testing import coverage <nl> + from grpc_test . framework . face . testing import digest <nl> + from grpc_test . framework . face . testing import stock_service <nl> + from grpc_test . framework . face . testing import test_case <nl> <nl> _TIMEOUT = 3 <nl> <nl> similarity index 98 % <nl> rename from src / python / src / grpc / framework / face / testing / future_invocation_asynchronous_event_service_test_case . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / testing / future_invocation_asynchronous_event_service_test_case . py <nl> mmm a / src / python / src / grpc / framework / face / testing / future_invocation_asynchronous_event_service_test_case . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / face / testing / future_invocation_asynchronous_event_service_test_case . py <nl> <nl> import unittest <nl> <nl> from grpc . framework . face import exceptions <nl> - from grpc . framework . face . testing import control <nl> - from grpc . framework . face . testing import coverage <nl> - from grpc . framework . face . testing import digest <nl> - from grpc . framework . face . testing import stock_service <nl> - from grpc . framework . face . testing import test_case <nl> from grpc . framework . foundation import future <nl> from grpc . framework . foundation import logging_pool <nl> + from grpc_test . framework . face . testing import control <nl> + from grpc_test . framework . face . testing import coverage <nl> + from grpc_test . framework . face . testing import digest <nl> + from grpc_test . framework . face . testing import stock_service <nl> + from grpc_test . framework . face . testing import test_case <nl> <nl> _TIMEOUT = 3 <nl> _MAXIMUM_POOL_SIZE = 10 <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / testing / interfaces . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / testing / interfaces . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / face / testing / serial . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / testing / serial . py <nl> similarity index 99 % <nl> rename from src / python / src / grpc / framework / face / testing / service . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / testing / service . py <nl> mmm a / src / python / src / grpc / framework / face / testing / service . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / face / testing / service . py <nl> <nl> <nl> # interfaces is referenced from specification in this module . <nl> from grpc . framework . face import interfaces as face_interfaces # pylint : disable = unused - import <nl> - from grpc . framework . face . testing import interfaces <nl> + from grpc_test . framework . face . testing import interfaces <nl> <nl> <nl> class UnaryUnaryTestMethodImplementation ( interfaces . Method ) : <nl> similarity index 99 % <nl> rename from src / python / src / grpc / framework / face / testing / stock_service . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / testing / stock_service . py <nl> mmm a / src / python / src / grpc / framework / face / testing / stock_service . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / face / testing / stock_service . py <nl> <nl> " " " Examples of Python implementations of the stock . proto Stock service . " " " <nl> <nl> from grpc . framework . common import cardinality <nl> - from grpc . framework . face . testing import service <nl> from grpc . framework . foundation import abandonment <nl> from grpc . framework . foundation import stream <nl> from grpc . framework . foundation import stream_util <nl> - from grpc . _junkdrawer import stock_pb2 <nl> + from grpc_test . framework . face . testing import service <nl> + from grpc_test . _junkdrawer import stock_pb2 <nl> <nl> SYMBOL_FORMAT = ' test symbol : % 03d ' <nl> STREAM_LENGTH = 400 <nl> similarity index 97 % <nl> rename from src / python / src / grpc / framework / face / testing / test_case . py <nl> rename to src / python / grpcio_test / grpc_test / framework / face / testing / test_case . py <nl> mmm a / src / python / src / grpc / framework / face / testing / test_case . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / face / testing / test_case . py <nl> <nl> <nl> # face_interfaces and interfaces are referenced in specification in this module . <nl> from grpc . framework . face import interfaces as face_interfaces # pylint : disable = unused - import <nl> - from grpc . framework . face . testing import interfaces # pylint : disable = unused - import <nl> + from grpc_test . framework . face . testing import interfaces # pylint : disable = unused - import <nl> <nl> <nl> class FaceTestCase ( object ) : <nl> new file mode 100644 <nl> index 00000000000 . . 70865191060 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / framework / foundation / __init__ . py <nl> <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / _later_test . py <nl> rename to src / python / grpcio_test / grpc_test / framework / foundation / _later_test . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / _logging_pool_test . py <nl> rename to src / python / grpcio_test / grpc_test / framework / foundation / _logging_pool_test . py <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / foundation / stream_testing . py <nl> rename to src / python / grpcio_test / grpc_test / framework / foundation / stream_testing . py <nl> new file mode 100644 <nl> index 00000000000 . . 70865191060 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / framework / interfaces / __init__ . py <nl> <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + <nl> new file mode 100644 <nl> index 00000000000 . . 70865191060 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_test / grpc_test / framework / interfaces / links / __init__ . py <nl> <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + <nl> similarity index 99 % <nl> rename from src / python / src / grpc / framework / interfaces / links / test_cases . py <nl> rename to src / python / grpcio_test / grpc_test / framework / interfaces / links / test_cases . py <nl> mmm a / src / python / src / grpc / framework / interfaces / links / test_cases . py <nl> ppp b / src / python / grpcio_test / grpc_test / framework / interfaces / links / test_cases . py <nl> <nl> import abc <nl> import unittest # pylint : disable = unused - import <nl> <nl> - from grpc . framework . common import test_constants <nl> from grpc . framework . interfaces . links import links <nl> - from grpc . framework . interfaces . links import test_utilities <nl> + from grpc_test . framework . common import test_constants <nl> + from grpc_test . framework . interfaces . links import test_utilities <nl> <nl> <nl> def at_least_n_payloads_received_predicate ( n ) : <nl> similarity index 100 % <nl> rename from src / python / src / grpc / framework / interfaces / links / test_utilities . py <nl> rename to src / python / grpcio_test / grpc_test / framework / interfaces / links / test_utilities . py <nl> similarity index 93 % <nl> rename from src / python / interop / setup . py <nl> rename to src / python / grpcio_test / setup . py <nl> mmm a / src / python / interop / setup . py <nl> ppp b / src / python / grpcio_test / setup . py <nl> <nl> <nl> import setuptools <nl> <nl> - _PACKAGES = ( <nl> - ' interop ' , <nl> - ) <nl> + _PACKAGES = setuptools . find_packages ( ' . ' , exclude = [ ' * . _cython ' , ' * . _cython . * ' ] ) <nl> <nl> _PACKAGE_DIRECTORIES = { <nl> - ' interop ' : ' interop ' , <nl> + ' ' : ' . ' , <nl> } <nl> <nl> _PACKAGE_DATA = { <nl> - ' interop ' : [ <nl> + ' grpc_interop ' : [ <nl> ' credentials / ca . pem ' , ' credentials / server1 . key ' , <nl> ' credentials / server1 . pem ' , ] <nl> } <nl> <nl> _INSTALL_REQUIRES = [ ' oauth2client > = 1 . 4 . 7 ' , ' grpcio > = 0 . 10 . 0a0 ' ] <nl> <nl> setuptools . setup ( <nl> - name = ' interop ' , <nl> + name = ' grpcio_test ' , <nl> version = ' 0 . 0 . 1 ' , <nl> packages = _PACKAGES , <nl> package_dir = _PACKAGE_DIRECTORIES , <nl> mmm a / tools / distrib / python / docgen . py <nl> ppp b / tools / distrib / python / docgen . py <nl> <nl> PROJECT_ROOT = os . path . abspath ( os . path . join ( SCRIPT_DIR , ' . . ' , ' . . ' , ' . . ' ) ) <nl> <nl> CONFIG = args . config <nl> - SETUP_PATH = os . path . join ( PROJECT_ROOT , ' src / python / src / setup . py ' ) <nl> - DOC_PATH = os . path . join ( PROJECT_ROOT , ' src / python / src / doc / build ' ) <nl> + SETUP_PATH = os . path . join ( PROJECT_ROOT , ' src / python / grpcio / setup . py ' ) <nl> + DOC_PATH = os . path . join ( PROJECT_ROOT , ' src / python / grpcio / doc / build ' ) <nl> INCLUDE_PATH = os . path . join ( PROJECT_ROOT , ' include ' ) <nl> LIBRARY_PATH = os . path . join ( PROJECT_ROOT , ' libs / { } ' . format ( CONFIG ) ) <nl> VIRTUALENV_DIR = os . path . join ( SCRIPT_DIR , ' distrib_virtualenv ' ) <nl> mmm a / tools / distrib / python / submit . py <nl> ppp b / tools / distrib / python / submit . py <nl> <nl> <nl> # Move to the root directory of Python GRPC . <nl> pkgdir = os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , <nl> - ' . . / . . / . . / src / python / src ' ) <nl> + ' . . / . . / . . / src / python / grpcio ' ) <nl> # Remove previous distributions ; they somehow confuse twine . <nl> try : <nl> shutil . rmtree ( os . path . join ( pkgdir , ' dist / ' ) ) <nl> mmm a / tools / run_tests / build_python . sh <nl> ppp b / tools / run_tests / build_python . sh <nl> make_virtualenv ( ) { <nl> virtualenv - p ` which " python " $ 1 ` $ virtualenv_name <nl> source $ virtualenv_name / bin / activate <nl> pip install - r src / python / requirements . txt <nl> - CFLAGS = " - I $ root / include - std = c89 " LDFLAGS = - L $ root / libs / $ CONFIG GRPC_PYTHON_BUILD_WITH_CYTHON = 1 pip install src / python / src <nl> - pip install src / python / interop <nl> + CFLAGS = " - I $ root / include - std = c89 " LDFLAGS = - L $ root / libs / $ CONFIG GRPC_PYTHON_BUILD_WITH_CYTHON = 1 pip install src / python / grpcio <nl> + pip install src / python / grpcio_test <nl> else <nl> source $ virtualenv_name / bin / activate <nl> # Uninstall and re - install the packages we care about . Don ' t use <nl> make_virtualenv ( ) { <nl> # unnecessarily to dependencies . Don ' t use - - no - deps to avoid missing <nl> # dependency upgrades . <nl> ( yes | pip uninstall grpcio ) | | true <nl> - ( yes | pip uninstall interop ) | | true <nl> - ( CFLAGS = " - I $ root / include - std = c89 " LDFLAGS = - L $ root / libs / $ CONFIG GRPC_PYTHON_BUILD_WITH_CYTHON = 1 pip install src / python / src ) | | ( <nl> + ( yes | pip uninstall grpcio_test ) | | true <nl> + ( CFLAGS = " - I $ root / include - std = c89 " LDFLAGS = - L $ root / libs / $ CONFIG GRPC_PYTHON_BUILD_WITH_CYTHON = 1 pip install src / python / grpcio ) | | ( <nl> # Fall back to rebuilding the entire environment <nl> rm - rf $ virtualenv_name <nl> make_virtualenv $ 1 <nl> ) <nl> - pip install src / python / interop <nl> + pip install src / python / grpcio_test <nl> fi <nl> } <nl> <nl> mmm a / tools / run_tests / python_tests . json <nl> ppp b / tools / run_tests / python_tests . json <nl> <nl> [ <nl> { <nl> - " module " : " grpc . _adapter . _c_test " , <nl> + " module " : " grpc_test . _adapter . _c_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . _adapter . _low_test " , <nl> + " module " : " grpc_test . _adapter . _low_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . _adapter . _intermediary_low_test " , <nl> + " module " : " grpc_test . _adapter . _intermediary_low_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . _adapter . _links_test " , <nl> + " module " : " grpc_test . _adapter . _links_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . _adapter . _lonely_rear_link_test " , <nl> + " module " : " grpc_test . _adapter . _lonely_rear_link_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . _adapter . _blocking_invocation_inline_service_test " , <nl> + " module " : " grpc_test . _adapter . _blocking_invocation_inline_service_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . _adapter . _event_invocation_synchronous_event_service_test " , <nl> + " module " : " grpc_test . _adapter . _event_invocation_synchronous_event_service_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . _adapter . _future_invocation_asynchronous_event_service_test " , <nl> + " module " : " grpc_test . _adapter . _future_invocation_asynchronous_event_service_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . _links . _lonely_invocation_link_test " , <nl> + " module " : " grpc_test . _links . _lonely_invocation_link_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . _links . _transmission_test " , <nl> + " module " : " grpc_test . _links . _transmission_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . early_adopter . implementations_test " , <nl> + " module " : " grpc_test . early_adopter . implementations_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . framework . base . implementations_test " , <nl> + " module " : " grpc_test . framework . base . implementations_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . framework . face . blocking_invocation_inline_service_test " , <nl> + " module " : " grpc_test . framework . face . blocking_invocation_inline_service_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . framework . face . event_invocation_synchronous_event_service_test " , <nl> + " module " : " grpc_test . framework . face . event_invocation_synchronous_event_service_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . framework . face . future_invocation_asynchronous_event_service_test " , <nl> + " module " : " grpc_test . framework . face . future_invocation_asynchronous_event_service_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . framework . foundation . _later_test " , <nl> + " module " : " grpc_test . framework . foundation . _later_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " grpc . framework . foundation . _logging_pool_test " , <nl> + " module " : " grpc_test . framework . foundation . _logging_pool_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " interop . _insecure_interop_test " , <nl> + " module " : " grpc_interop . _insecure_interop_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> } , <nl> { <nl> - " module " : " interop . _secure_interop_test " , <nl> + " module " : " grpc_interop . _secure_interop_test " , <nl> " pythonVersions " : [ <nl> " 2 . 7 " <nl> ] <nl> | Merge pull request from soltanmm / reorganize - python | grpc/grpc | 5f8d05bb9c72d950ca35e27a8da8a303765c85cb | 2015-07-31T16:43:33Z |
mmm a / src / bootstrapper . cc <nl> ppp b / src / bootstrapper . cc <nl> <nl> # include " platform . h " <nl> # include " snapshot . h " <nl> # include " trig - table . h " <nl> + # include " extensions / free - buffer - extension . h " <nl> # include " extensions / externalize - string - extension . h " <nl> # include " extensions / gc - extension . h " <nl> # include " extensions / statistics - extension . h " <nl> void Bootstrapper : : Initialize ( bool create_heap_objects ) { <nl> <nl> <nl> void Bootstrapper : : InitializeOncePerProcess ( ) { <nl> + # ifdef ADDRESS_SANITIZER <nl> + FreeBufferExtension : : Register ( ) ; <nl> + # endif <nl> GCExtension : : Register ( ) ; <nl> ExternalizeStringExtension : : Register ( ) ; <nl> StatisticsExtension : : Register ( ) ; <nl> bool Genesis : : InstallExtensions ( Handle < Context > native_context , <nl> current = current - > next ( ) ; <nl> } <nl> <nl> + # ifdef ADDRESS_SANITIZER <nl> + if ( FLAG_expose_free_buffer ) { <nl> + InstallExtension ( isolate , " v8 / free - buffer " , & extension_states ) ; <nl> + } <nl> + # endif <nl> if ( FLAG_expose_gc ) InstallExtension ( isolate , " v8 / gc " , & extension_states ) ; <nl> if ( FLAG_expose_externalize_string ) { <nl> InstallExtension ( isolate , " v8 / externalize " , & extension_states ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 4040c90bffe <nl> mmm / dev / null <nl> ppp b / src / extensions / free - buffer - extension . cc <nl> <nl> + / / Copyright 2013 the V8 project authors . All rights reserved . <nl> + / / Redistribution and use in source and binary forms , with or without <nl> + / / modification , are permitted provided that the following conditions are <nl> + / / met : <nl> + / / <nl> + / / * Redistributions of source code must retain the above copyright <nl> + / / notice , this list of conditions and the following disclaimer . <nl> + / / * Redistributions in binary form must reproduce the above <nl> + / / copyright notice , this list of conditions and the following <nl> + / / disclaimer in the documentation and / or other materials provided <nl> + / / with the distribution . <nl> + / / * Neither the name of Google Inc . nor the names of its <nl> + / / contributors may be used to endorse or promote products derived <nl> + / / from this software without specific prior written permission . <nl> + / / <nl> + / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + # include " free - buffer - extension . h " <nl> + # include " platform . h " <nl> + # include " v8 . h " <nl> + <nl> + namespace v8 { <nl> + namespace internal { <nl> + <nl> + <nl> + v8 : : Handle < v8 : : FunctionTemplate > FreeBufferExtension : : GetNativeFunction ( <nl> + v8 : : Handle < v8 : : String > str ) { <nl> + return v8 : : FunctionTemplate : : New ( FreeBufferExtension : : FreeBuffer ) ; <nl> + } <nl> + <nl> + <nl> + void FreeBufferExtension : : FreeBuffer ( <nl> + const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> + v8 : : Handle < v8 : : ArrayBuffer > arrayBuffer = args [ 0 ] . As < v8 : : ArrayBuffer > ( ) ; <nl> + v8 : : ArrayBuffer : : Contents contents = arrayBuffer - > Externalize ( ) ; <nl> + V8 : : ArrayBufferAllocator ( ) - > Free ( contents . Data ( ) , contents . ByteLength ( ) ) ; <nl> + } <nl> + <nl> + <nl> + void FreeBufferExtension : : Register ( ) { <nl> + static char buffer [ 100 ] ; <nl> + Vector < char > temp_vector ( buffer , sizeof ( buffer ) ) ; <nl> + OS : : SNPrintF ( temp_vector , " native function freeBuffer ( ) ; " ) ; <nl> + <nl> + static FreeBufferExtension buffer_free_extension ( buffer ) ; <nl> + static v8 : : DeclareExtension declaration ( & buffer_free_extension ) ; <nl> + } <nl> + <nl> + } } / / namespace v8 : : internal <nl> new file mode 100644 <nl> index 00000000000 . . 29ffbc014ec <nl> mmm / dev / null <nl> ppp b / src / extensions / free - buffer - extension . h <nl> <nl> + / / Copyright 2013 the V8 project authors . All rights reserved . <nl> + / / Redistribution and use in source and binary forms , with or without <nl> + / / modification , are permitted provided that the following conditions are <nl> + / / met : <nl> + / / <nl> + / / * Redistributions of source code must retain the above copyright <nl> + / / notice , this list of conditions and the following disclaimer . <nl> + / / * Redistributions in binary form must reproduce the above <nl> + / / copyright notice , this list of conditions and the following <nl> + / / disclaimer in the documentation and / or other materials provided <nl> + / / with the distribution . <nl> + / / * Neither the name of Google Inc . nor the names of its <nl> + / / contributors may be used to endorse or promote products derived <nl> + / / from this software without specific prior written permission . <nl> + / / <nl> + / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + # ifndef V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_ <nl> + # define V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_ <nl> + <nl> + # include " v8 . h " <nl> + <nl> + namespace v8 { <nl> + namespace internal { <nl> + <nl> + class FreeBufferExtension : public v8 : : Extension { <nl> + public : <nl> + explicit FreeBufferExtension ( const char * source ) <nl> + : v8 : : Extension ( " v8 / free - buffer " , source ) { } <nl> + virtual v8 : : Handle < v8 : : FunctionTemplate > GetNativeFunction ( <nl> + v8 : : Handle < v8 : : String > name ) ; <nl> + static void FreeBuffer ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) ; <nl> + static void Register ( ) ; <nl> + } ; <nl> + <nl> + } } / / namespace v8 : : internal <nl> + <nl> + # endif / / V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_ <nl> mmm a / src / flag - definitions . h <nl> ppp b / src / flag - definitions . h <nl> DEFINE_bool ( enable_vldr_imm , false , <nl> / / bootstrapper . cc <nl> DEFINE_string ( expose_natives_as , NULL , " expose natives in global object " ) <nl> DEFINE_string ( expose_debug_as , NULL , " expose debug in global object " ) <nl> + # ifdef ADDRESS_SANITIZER <nl> + DEFINE_bool ( expose_free_buffer , false , " expose freeBuffer extension " ) <nl> + # endif <nl> DEFINE_bool ( expose_gc , false , " expose gc extension " ) <nl> DEFINE_string ( expose_gc_as , NULL , <nl> " expose gc extension under the specified name " ) <nl> mmm a / tools / gyp / v8 . gyp <nl> ppp b / tools / gyp / v8 . gyp <nl> <nl> ' . . / . . / src / execution . h ' , <nl> ' . . / . . / src / extensions / externalize - string - extension . cc ' , <nl> ' . . / . . / src / extensions / externalize - string - extension . h ' , <nl> + ' . . / . . / src / extensions / free - buffer - extension . cc ' , <nl> + ' . . / . . / src / extensions / free - buffer - extension . h ' , <nl> ' . . / . . / src / extensions / gc - extension . cc ' , <nl> ' . . / . . / src / extensions / gc - extension . h ' , <nl> ' . . / . . / src / extensions / statistics - extension . cc ' , <nl> | Provide " freeBuffer ( ) " primitive for testing under ASan . | v8/v8 | f7927265f25785fefb6fe9579de36f99680ab363 | 2013-11-27T09:22:04Z |
mmm a / tensorflow / core / profiler / convert / BUILD <nl> ppp b / tensorflow / core / profiler / convert / BUILD <nl> tf_cc_test ( <nl> " / / tensorflow / core : test " , <nl> " / / tensorflow / core : test_main " , <nl> " / / tensorflow / core / profiler / protobuf : op_metrics_proto_cc " , <nl> + " / / tensorflow / core / profiler / utils : op_metrics_db_utils " , <nl> " / / tensorflow / core / profiler / utils : time_utils " , <nl> " / / tensorflow / core / profiler / utils : xplane_builder " , <nl> " / / tensorflow / core / profiler / utils : xplane_schema " , <nl> mmm a / tensorflow / core / profiler / convert / op_stats_to_tf_stats . cc <nl> ppp b / tensorflow / core / profiler / convert / op_stats_to_tf_stats . cc <nl> TfStatsTable GenerateTfStatsTable ( const OpMetricsDb & host_tf_metrics_db , <nl> } <nl> double total_device_time_us = PicosToMicros ( total_device_time_ps ) ; <nl> for ( const OpMetrics * metrics : SortedOpMetricsDb ( device_tf_metrics_db ) ) { <nl> - if ( exclude_idle & & metrics - > category ( ) = = " IDLE " ) continue ; <nl> + if ( exclude_idle & & IsIdleOp ( * metrics ) ) continue ; <nl> TfStatsRecord * record = tf_stats_table . add_tf_stats_record ( ) ; <nl> * record = ConvertOpMetricsToTfStatsRecord ( <nl> / * on_device = * / true , * metrics , ridge_point ) ; <nl> TfStatsTable GenerateTfStatsTable ( const OpMetricsDb & host_tf_metrics_db , <nl> double total_host_time_us = PicosToMicros ( total_host_time_ps ) ; <nl> for ( const OpMetrics * metrics : <nl> tensorflow : : profiler : : SortedOpMetricsDb ( host_tf_metrics_db ) ) { <nl> - if ( exclude_idle & & metrics - > category ( ) = = " IDLE " ) continue ; <nl> + if ( exclude_idle & & IsIdleOp ( * metrics ) ) continue ; <nl> TfStatsRecord * record = tf_stats_table . add_tf_stats_record ( ) ; <nl> * record = ConvertOpMetricsToTfStatsRecord ( <nl> / * on_device = * / false , * metrics , ridge_point ) ; <nl> mmm a / tensorflow / core / profiler / convert / xplane_to_op_metrics_db_test . cc <nl> ppp b / tensorflow / core / profiler / convert / xplane_to_op_metrics_db_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / platform / test . h " <nl> # include " tensorflow / core / profiler / protobuf / op_metrics . pb . h " <nl> + # include " tensorflow / core / profiler / utils / op_metrics_db_utils . h " <nl> # include " tensorflow / core / profiler / utils / time_utils . h " <nl> # include " tensorflow / core / profiler / utils / xplane_builder . h " <nl> # include " tensorflow / core / profiler / utils / xplane_schema . h " <nl> TEST ( ConvertXPlaneToOpMetricsDb , HostOpMetricsDb ) { <nl> EXPECT_EQ ( NanosToPicos ( kTfOp1DurationNs ) * 2 , op_1 . time_ps ( ) ) ; <nl> <nl> const OpMetrics & idle = op_metrics . metrics_db ( ) . at ( 1 ) ; <nl> - EXPECT_EQ ( " IDLE " , idle . name ( ) ) ; <nl> + EXPECT_EQ ( kIdle , idle . name ( ) ) ; <nl> / / Idle time is the gap between Op2 start and the end of Op1 , which is 2000ns . <nl> EXPECT_EQ ( NanosToPicos ( 2000 ) , idle . time_ps ( ) ) ; <nl> <nl> TEST ( ConvertXPlaneToOpMetricsDb , DeviceOpMetricsDb ) { <nl> EXPECT_EQ ( NanosToPicos ( kTfOp2DurationNs ) , op_2 . time_ps ( ) ) ; <nl> <nl> const OpMetrics & idle = op_metrics . metrics_db ( ) . at ( 2 ) ; <nl> - EXPECT_EQ ( " IDLE " , idle . name ( ) ) ; <nl> + EXPECT_EQ ( kIdle , idle . name ( ) ) ; <nl> / / GPU is always busy in this example . <nl> EXPECT_EQ ( NanosToPicos ( 0 ) , idle . time_ps ( ) ) ; <nl> } <nl> mmm a / tensorflow / core / profiler / utils / op_metrics_db_utils . cc <nl> ppp b / tensorflow / core / profiler / utils / op_metrics_db_utils . cc <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> namespace profiler { <nl> + <nl> + const absl : : string_view kIdle = " IDLE " ; <nl> + <nl> namespace { <nl> <nl> class DeviceTfOpMetricsDbBuilder : public OpMetricsDbBuilder { <nl> uint64 IdleTimePs ( const OpMetricsDb & metrics_db ) { <nl> void AddIdleOp ( OpMetricsDb * db ) { <nl> uint64 idle_time_ps = IdleTimePs ( * db ) ; <nl> OpMetrics * metrics = db - > add_metrics_db ( ) ; <nl> - metrics - > set_name ( " IDLE " ) ; <nl> - metrics - > set_category ( " IDLE " ) ; <nl> - metrics - > set_occurrences ( 1 ) ; <nl> + metrics - > set_name ( string ( kIdle ) ) ; <nl> + metrics - > set_category ( string ( kIdle ) ) ; <nl> + metrics - > set_occurrences ( 0 ) ; <nl> metrics - > set_time_ps ( idle_time_ps ) ; <nl> metrics - > set_self_time_ps ( idle_time_ps ) ; <nl> } <nl> OpMetricsDb CreateTfMetricsDbFromDeviceOpMetricsDb ( <nl> builder . UpdateTfOpMetricsWithDeviceOpMetrics ( tf_op . name , tf_op . type , <nl> device_op_metrics ) ; <nl> } else { <nl> - DCHECK_EQ ( device_op_metrics . name ( ) , " IDLE " ) ; <nl> + DCHECK ( IsIdleOp ( device_op_metrics ) ) ; <nl> if ( with_idle ) { <nl> - builder . UpdateTfOpMetricsWithDeviceOpMetrics ( " IDLE " , " IDLE " , <nl> + builder . UpdateTfOpMetricsWithDeviceOpMetrics ( kIdle , kIdle , <nl> device_op_metrics ) ; <nl> } <nl> } <nl> mmm a / tensorflow / core / profiler / utils / op_metrics_db_utils . h <nl> ppp b / tensorflow / core / profiler / utils / op_metrics_db_utils . h <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> namespace profiler { <nl> + <nl> + / / The name of OpMetrics to represent the idle time . <nl> + ABSL_CONST_INIT extern const absl : : string_view kIdle ; <nl> + <nl> / / Helps build an op metrics database ( borrowed ) . <nl> / / Enables fast lookup of existing ops and prevents the creation of duplicate <nl> / / ops . It is the user ' s responsibility to ensure an op metrics database <nl> uint64 IdleTimePs ( const OpMetricsDb & metrics_db ) ; <nl> / / must have been set . <nl> void AddIdleOp ( OpMetricsDb * db ) ; <nl> <nl> + / / Returns true if the given metrics represents idle time . <nl> + inline bool IsIdleOp ( const OpMetrics & metrics ) { <nl> + return metrics . name ( ) = = kIdle ; <nl> + } <nl> + <nl> / / Converts from the device op metrics to Tf - op metrics . <nl> OpMetricsDb CreateTfMetricsDbFromDeviceOpMetricsDb ( <nl> const OpMetricsDb & device_op_metrics_db , bool with_idle = true ) ; <nl> | Add a const string for the idle op ' s name and set the idle op ' s occurrences to 0 . | tensorflow/tensorflow | 2faa541d4275670a88d4a04f30d0ac4b6f16a26a | 2020-03-25T16:19:39Z |
mmm a / cyber / python / cyber_py3 / BUILD <nl> ppp b / cyber / python / cyber_py3 / BUILD <nl> py_library ( <nl> name = " cyber_time " , <nl> srcs = [ " cyber_time . py " ] , <nl> data = [ <nl> - " / / cyber / py_wrapper : _cyber_py3 . so " , <nl> - " / / cyber / py_wrapper : _cyber_time_py3 . so " , <nl> + " / / cyber / python / internal : _cyber_py3 . so " , <nl> + " / / cyber / python / internal : _cyber_time_py3 . so " , <nl> ] , <nl> ) <nl> <nl> py_library ( <nl> name = " cyber_timer " , <nl> srcs = [ " cyber_timer . py " ] , <nl> data = [ <nl> - " / / cyber / py_wrapper : _cyber_timer_py3 . so " , <nl> + " / / cyber / python / internal : _cyber_timer_py3 . so " , <nl> ] , <nl> ) <nl> <nl> py_library ( <nl> name = " cyber " , <nl> srcs = [ " cyber . py " ] , <nl> data = [ <nl> - " / / cyber / py_wrapper : _cyber_py3 . so " , <nl> + " / / cyber / python / internal : _cyber_py3 . so " , <nl> ] , <nl> ) <nl> <nl> py_library ( <nl> name = " parameter " , <nl> srcs = [ " parameter . py " ] , <nl> data = [ <nl> - " / / cyber / py_wrapper : _cyber_parameter_py3 . so " , <nl> + " / / cyber / python / internal : _cyber_parameter_py3 . so " , <nl> ] , <nl> ) <nl> <nl> py_library ( <nl> name = " record " , <nl> srcs = [ " record . py " ] , <nl> data = [ <nl> - " / / cyber / py_wrapper : _cyber_record_py3 . so " , <nl> + " / / cyber / python / internal : _cyber_record_py3 . so " , <nl> ] , <nl> ) <nl> mmm a / cyber / python / cyber_py3 / record . py <nl> ppp b / cyber / python / cyber_py3 / record . py <nl> <nl> sys . path . append ( CYBER_DIR + " / cyber / " ) <nl> <nl> wrapper_lib_path = os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , <nl> - ' . . / . . / py_wrapper ' ) ) <nl> + ' . . / internal ' ) ) <nl> sys . path . insert ( 0 , wrapper_lib_path ) <nl> _CYBER_RECORD = importlib . import_module ( ' _cyber_record_py3 ' ) <nl> PyBagMessage = collections . namedtuple ( ' PyBagMessage ' , <nl> | Python3 : update cyber_py3 deps on / / cyber / python / internal : xxx | ApolloAuto/apollo | ef7128818071911bda0cbd37bea550b58900b175 | 2020-06-11T21:45:36Z |
mmm a / arangod / Aql / ExecutionBlock . cpp <nl> ppp b / arangod / Aql / ExecutionBlock . cpp <nl> void AggregatorGroup : : addValues ( AqlItemBlock const * src , <nl> / / / @ brief batch size value <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - size_t const ExecutionBlock : : DefaultBatchSize = 1000 ; <nl> + size_t const ExecutionBlock : : DefaultBatchSize = 11 ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - constructors / destructors <nl> int SingletonBlock : : shutdown ( int errorCode ) { <nl> } <nl> <nl> int SingletonBlock : : getOrSkipSome ( size_t , / / atLeast , <nl> - size_t , / / atMost , <nl> + size_t atMost , / / atMost , <nl> bool skipping , <nl> AqlItemBlock * & result , <nl> size_t & skipped ) { <nl> <nl> + std : : cout < < " SingletonBlock : : getOrSkipSome atMost = " < < atMost < < " \ n " ; <nl> TRI_ASSERT ( result = = nullptr & & skipped = = 0 ) ; <nl> <nl> if ( _done ) { <nl> IndexRangeBlock : : IndexRangeBlock ( ExecutionEngine * engine , <nl> : ExecutionBlock ( engine , en ) , <nl> _collection ( en - > collection ( ) ) , <nl> _posInDocs ( 0 ) , <nl> - _allBoundsConstant ( true ) { <nl> + _allBoundsConstant ( true ) , <nl> + _skiplistIterator ( nullptr ) { <nl> <nl> std : : vector < std : : vector < RangeInfo > > const & orRanges = en - > _ranges ; <nl> TRI_ASSERT ( en - > _index ! = nullptr ) ; <nl> int IndexRangeBlock : : initialize ( ) { <nl> return res ; <nl> } <nl> <nl> - bool IndexRangeBlock : : readIndex ( size_t atMost ) { <nl> - / / This is either called from initialize if all bounds are constant , <nl> - / / in this case it is never called again . If there is at least one <nl> - / / variable bound , then readIndex is called once for every item coming <nl> - / / in from our dependency . In that case , it is guaranteed that <nl> - / / _buffer is not empty , in particular _buffer . front ( ) is defined <nl> - / / _pos points to a position in _buffer . front ( ) <nl> - / / Therefore , we can use the register values in _buffer . front ( ) in row <nl> - / / _pos to evaluate the variable bounds . <nl> - <nl> - if ( _documents . empty ( ) ) { <nl> - _documents . reserve ( atMost ) ; <nl> - } <nl> - else { / / FIXME does this trash some stuff unnecessarily ? <nl> - _documents . clear ( ) ; <nl> - } <nl> + / / init the index for reading , this should be called once per new incoming <nl> + / / block ! <nl> <nl> + bool IndexRangeBlock : : initIndex ( ) { <nl> + <nl> auto en = static_cast < IndexRangeNode const * > ( getPlanNode ( ) ) ; <nl> IndexOrCondition const * condition = & en - > _ranges ; <nl> <nl> bool IndexRangeBlock : : readIndex ( size_t atMost ) { <nl> <nl> condition = newCondition . get ( ) ; <nl> } <nl> + <nl> + if ( en - > _index - > type = = TRI_IDX_TYPE_SKIPLIST_INDEX ) { <nl> + initSkiplistIndex ( * condition ) ; <nl> + return ( _skiplistIterator ! = nullptr ) ; <nl> + } / / TODO the other cases ! ! <nl> + else { <nl> + TRI_ASSERT ( false ) ; <nl> + } <nl> + return false ; / / FIXME remove this <nl> + } <nl> + <nl> + / / this is called every time everything in _documents has been passed on <nl> + <nl> + bool IndexRangeBlock : : readIndex ( size_t atMost ) { <nl> + / / TODO : update this comment , which is now out of date ! ! <nl> + / / This is either called from initialize if all bounds are constant , <nl> + / / in this case it is never called again . If there is at least one <nl> + / / variable bound , then readIndex is called once for every item coming <nl> + / / in from our dependency . In that case , it is guaranteed that <nl> + / / _buffer is not empty , in particular _buffer . front ( ) is defined <nl> + / / _pos points to a position in _buffer . front ( ) <nl> + / / Therefore , we can use the register values in _buffer . front ( ) in row <nl> + / / _pos to evaluate the variable bounds . <nl> + <nl> + if ( _documents . empty ( ) ) { <nl> + _documents . reserve ( atMost ) ; <nl> + } <nl> + else { <nl> + _documents . clear ( ) ; <nl> + } <nl> + <nl> + auto en = static_cast < IndexRangeNode const * > ( getPlanNode ( ) ) ; <nl> + IndexOrCondition const * condition = & en - > _ranges ; / / TODO remove this line <nl> <nl> if ( en - > _index - > type = = TRI_IDX_TYPE_PRIMARY_INDEX ) { <nl> / / atMost not passed since only equality is supported <nl> - readPrimaryIndex ( * condition ) ; <nl> + readPrimaryIndex ( * condition ) ; / / TODO correct <nl> } <nl> else if ( en - > _index - > type = = TRI_IDX_TYPE_HASH_INDEX ) { <nl> - readHashIndex ( * condition , atMost ) ; <nl> + readHashIndex ( * condition , atMost ) ; / / TODO correct <nl> } <nl> else if ( en - > _index - > type = = TRI_IDX_TYPE_SKIPLIST_INDEX ) { <nl> - readSkiplistIndex ( * condition , atMost ) ; <nl> + readSkiplistIndex ( atMost ) ; <nl> } <nl> else if ( en - > _index - > type = = TRI_IDX_TYPE_EDGE_INDEX ) { <nl> / / atMost not passed since only equality is supported <nl> - readEdgeIndex ( * condition ) ; <nl> + readEdgeIndex ( * condition ) ; / / TODO correct <nl> } <nl> else { <nl> TRI_ASSERT ( false ) ; <nl> int IndexRangeBlock : : initializeCursor ( AqlItemBlock * items , size_t pos ) { <nl> <nl> AqlItemBlock * IndexRangeBlock : : getSome ( size_t atLeast , <nl> size_t atMost ) { <nl> + std : : cout < < " IndexRangeBlock : : getSome atMost = " < < atMost < < " \ n " ; <nl> if ( _done ) { <nl> return nullptr ; <nl> } <nl> AqlItemBlock * IndexRangeBlock : : getSome ( size_t atLeast , <nl> } <nl> _pos = 0 ; / / this is in the first block <nl> <nl> - / / This is a new item , so let ' s read the index if bounds are variable : <nl> - / / if ( ! _allBoundsConstant ) { <nl> + / / This is a new item , so let ' s init and read the index <nl> + if ( initIndex ( ) ) { / / successfully initted the index <nl> readIndex ( atMost ) ; <nl> - / / } <nl> - <nl> + } <nl> + else { / / failed to init the index , nothing to pass on <nl> + _done = true ; <nl> + return nullptr ; <nl> + } <nl> _posInDocs = 0 ; / / position in _documents . . . <nl> } <nl> <nl> AqlItemBlock * IndexRangeBlock : : getSome ( size_t atLeast , <nl> <nl> _posInDocs = 0 ; <nl> <nl> - if ( + + _pos > = cur - > size ( ) ) { <nl> - _buffer . pop_front ( ) ; / / does not throw <nl> - delete cur ; <nl> - _pos = 0 ; <nl> - } <nl> - <nl> - / / let ' s read the index if bounds are variable : <nl> - if ( ! _buffer . empty ( ) ) { <nl> - readIndex ( atMost ) ; <nl> + if ( ! readIndex ( atMost ) ) { / / no more output from this version of the index <nl> + if ( + + _pos > = cur - > size ( ) ) { <nl> + _buffer . pop_front ( ) ; / / does not throw <nl> + delete cur ; <nl> + _pos = 0 ; <nl> + } <nl> + if ( ! _buffer . empty ( ) ) { <nl> + initIndex ( ) ; / / TODO if this returns false , what to do ? <nl> + readIndex ( atMost ) ; <nl> + } <nl> + / / If _buffer is empty , then we will fetch a new block in the next call <nl> + / / and then init / read the index . <nl> } <nl> - / / If _buffer is empty , then we will fetch a new block in the next call <nl> - / / and then read the index . <nl> - <nl> } <nl> } <nl> while ( res . get ( ) = = nullptr ) ; <nl> void IndexRangeBlock : : readHashIndex ( IndexOrCondition const & ranges , size_t atMo <nl> } ; <nl> <nl> auto setupSearchValue = [ & ] ( ) { <nl> - size_t const n = hashIndex - > _paths . _length ; <nl> + size_t const n = ( std : : min ) ( hashIndex - > _paths . _length , atMost ) ; <nl> searchValue . _length = 0 ; <nl> searchValue . _values = static_cast < TRI_shaped_json_t * > ( TRI_Allocate ( TRI_CORE_MEM_ZONE , <nl> n * sizeof ( TRI_shaped_json_t ) , true ) ) ; <nl> void IndexRangeBlock : : readEdgeIndex ( IndexOrCondition const & ranges ) { <nl> / / ( i . e . the 1 in x . c > = 1 ) cannot be lists or arrays . <nl> / / <nl> <nl> - void IndexRangeBlock : : readSkiplistIndex ( IndexOrCondition const & ranges , size_t atMost ) { <nl> + void IndexRangeBlock : : initSkiplistIndex ( IndexOrCondition const & ranges ) { <nl> + TRI_ASSERT ( _skiplistIterator = = nullptr ) <nl> + <nl> auto en = static_cast < IndexRangeNode const * > ( getPlanNode ( ) ) ; <nl> TRI_index_t * idx = en - > _index - > data ; <nl> TRI_ASSERT ( idx ! = nullptr ) ; <nl> - <nl> + <nl> TRI_shaper_t * shaper = _collection - > documentCollection ( ) - > getShaper ( ) ; <nl> TRI_ASSERT ( shaper ! = nullptr ) ; <nl> - <nl> + <nl> TRI_index_operator_t * skiplistOperator = nullptr ; <nl> <nl> Json parameters ( Json : : List ) ; <nl> void IndexRangeBlock : : readSkiplistIndex ( IndexOrCondition const & ranges , size_t <nl> } <nl> } <nl> <nl> - TRI_skiplist_iterator_t * skiplistIterator = TRI_LookupSkiplistIndex ( idx , skiplistOperator , en - > _reverse ) ; <nl> + _skiplistIterator = TRI_LookupSkiplistIndex ( idx , skiplistOperator , en - > _reverse ) ; <nl> if ( skiplistOperator ! = nullptr ) { <nl> TRI_FreeIndexOperator ( skiplistOperator ) ; <nl> } <nl> - <nl> - if ( skiplistIterator = = nullptr ) { <nl> + <nl> + if ( _skiplistIterator = = nullptr ) { <nl> int res = TRI_errno ( ) ; <nl> if ( res = = TRI_RESULT_ELEMENT_NOT_FOUND ) { <nl> return ; <nl> void IndexRangeBlock : : readSkiplistIndex ( IndexOrCondition const & ranges , size_t <nl> <nl> THROW_ARANGO_EXCEPTION ( TRI_ERROR_ARANGO_NO_INDEX ) ; <nl> } <nl> + } <nl> <nl> + void IndexRangeBlock : : readSkiplistIndex ( size_t atMost ) { <nl> + <nl> + if ( _skiplistIterator = = nullptr ) { <nl> + return ; <nl> + } <nl> + <nl> try { <nl> size_t nrSent = 0 ; <nl> - while ( true ) { <nl> - TRI_skiplist_index_element_t * indexElement = skiplistIterator - > next ( skiplistIterator ) ; <nl> + TRI_skiplist_index_element_t * indexElement ; <nl> + while ( nrSent < atMost ) { <nl> + indexElement = _skiplistIterator - > next ( _skiplistIterator ) ; <nl> <nl> - if ( indexElement = = nullptr | | nrSent = = atMost ) { <nl> + if ( indexElement = = nullptr ) { <nl> break ; <nl> } <nl> _documents . emplace_back ( * ( indexElement - > _document ) ) ; <nl> + + nrSent ; <nl> + + _engine - > _stats . scannedIndex ; <nl> } <nl> - TRI_FreeSkiplistIterator ( skiplistIterator ) ; <nl> + if ( indexElement = = nullptr ) { <nl> + TRI_FreeSkiplistIterator ( _skiplistIterator ) ; <nl> + _skiplistIterator = nullptr ; <nl> + } <nl> } <nl> catch ( . . . ) { <nl> - TRI_FreeSkiplistIterator ( skiplistIterator ) ; <nl> + TRI_FreeSkiplistIterator ( _skiplistIterator ) ; <nl> throw ; <nl> } <nl> } <nl> int LimitBlock : : getOrSkipSome ( size_t atLeast , <nl> AqlItemBlock * ReturnBlock : : getSome ( size_t atLeast , <nl> size_t atMost ) { <nl> <nl> + std : : cout < < " ReturnBlock : : getSome atMost = " < < atMost < < " \ n " ; <nl> auto res = ExecutionBlock : : getSomeWithoutRegisterClearout ( atLeast , atMost ) ; <nl> <nl> if ( res = = nullptr ) { <nl> mmm a / arangod / Aql / ExecutionBlock . h <nl> ppp b / arangod / Aql / ExecutionBlock . h <nl> namespace triagens { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> bool readIndex ( size_t atMost ) ; <nl> + bool initIndex ( ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief read using the primary index <nl> namespace triagens { <nl> / / / @ brief read using a skiplist index <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - void readSkiplistIndex ( IndexOrCondition const & , size_t atMost ) ; <nl> + void readSkiplistIndex ( size_t atMost ) ; <nl> + void initSkiplistIndex ( IndexOrCondition const & ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief read using a hash index <nl> namespace triagens { <nl> <nl> std : : vector < std : : vector < RegisterId > > _inRegs ; <nl> <nl> + TRI_skiplist_iterator_t * _skiplistIterator ; <nl> + <nl> } ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> | lazy index working for skiplists | arangodb/arangodb | d6e1971fd3070066dbabbe776fc7a0e3112fd381 | 2014-11-11T11:29:46Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.