diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
mmm a / src / core / lib / security / transport / security_connector . c <nl> ppp b / src / core / lib / security / transport / security_connector . c <nl> static void fake_channel_add_handshakers ( <nl> & sc - > base ) ) ; <nl> } <nl> <nl> - static void fake_server_add_handshakers ( <nl> - grpc_exec_ctx * exec_ctx , grpc_server_security_connector * sc , <nl> - grpc_handshake_manager * handshake_mgr ) { <nl> + static void fake_server_add_handshakers ( grpc_exec_ctx * exec_ctx , <nl> + grpc_server_security_connector * sc , <nl> + grpc_handshake_manager * handshake_mgr ) { <nl> grpc_handshake_manager_add ( <nl> handshake_mgr , <nl> grpc_security_handshaker_create ( <nl>
clang - format
grpc/grpc
5b850b2194d668292959131635bda17c4f497a06
2016-12-08T17:40:58Z
mmm a / test / test_jit . py <nl> ppp b / test / test_jit . py <nl> def forward ( self , x , y ) : <nl> ' test_nn_interpolate ' , <nl> ' test_nn_fold ' , <nl> ' test_nn_max_unpool1d ' , <nl> - ' test_nn_poisson_nll_loss ' , <nl> - ' test_nn_poisson_nll_loss_full ' , <nl> } <nl> <nl> DISABLE_AUTODIFF_SUBGRAPH_INLINING = { <nl> def func ( x ) : <nl> ( ' group_norm ' , ( S , S , S ) , ( 1 , torch . Tensor ( 5 ) , None ) , ) , <nl> ( ' local_response_norm ' , ( S , S , S ) , ( 2 , ) , ) , <nl> ( ' nll_loss ' , F . log_softmax ( torch . randn ( 3 , 5 ) , dim = 0 ) , ( torch . tensor ( [ 1 , 0 , 4 ] ) , None , None ) , ) , <nl> - ( ' poisson_nll_loss ' , ( S , 2 ) , ( ( S , 2 ) , ) , ) , <nl> - ( ' poisson_nll_loss ' , ( S , 2 ) , ( ( S , 2 ) , True , True ) , ' full ' ) , <nl> + ( ' poisson_nll_loss ' , torch . rand ( S , 2 ) , ( torch . rand ( S , 2 ) , ) , ) , <nl> + ( ' poisson_nll_loss ' , torch . rand ( S , 2 ) , ( torch . rand ( S , 2 ) , True , True ) , ' full ' ) , <nl> ( ' kl_div ' , F . log_softmax ( torch . randn ( S , 10 ) , 1 ) , ( F . softmax ( torch . randn ( S , 10 ) , 1 ) , ) , ) , <nl> ( ' cross_entropy ' , ( 3 , S ) , ( torch . randint ( S , ( 3 , ) , dtype = torch . int64 ) , ) , ) , <nl> ( ' binary_cross_entropy_with_logits ' , ( 3 , ) , ( torch . empty ( 3 ) . random_ ( 2 ) , ) , ) , <nl> def func ( x ) : <nl> ( ' binary_cross_entropy ' , torch . randn ( 3 , 2 ) . sigmoid ( ) , <nl> ( non_differentiable ( torch . rand ( 3 , 2 ) ) , <nl> non_differentiable ( torch . randn ( 3 , 2 ) ) , True ) , ' size_average ' ) , <nl> - ( ' ctc_loss ' , torch . randn ( S , S , S ) . log_softmax ( 2 ) . detach ( ) . requires_grad_ ( ) , \ <nl> + ( ' ctc_loss ' , torch . rand ( S , S , S ) . log_softmax ( 2 ) . detach ( ) . requires_grad_ ( ) , \ <nl> ( torch . randint ( 1 , S , ( S , S ) , dtype = torch . long ) , torch . full ( ( S , ) , S , dtype = torch . long ) , \ <nl> torch . randint ( 1 , S , ( S , ) , dtype = torch . long ) ) ) , <nl> ] <nl> mmm a / torch / nn / functional . py <nl> ppp b / torch / nn / functional . py <nl> def nll_loss ( input , target , weight = None , size_average = None , ignore_index = - 100 , <nl> return ret <nl> <nl> <nl> + @ torch . _jit_internal . weak_script <nl> def poisson_nll_loss ( input , target , log_input = True , full = False , size_average = None , eps = 1e - 8 , <nl> reduce = None , reduction = ' mean ' ) : <nl> # type : ( Tensor , Tensor , bool , bool , Optional [ bool ] , float , Optional [ bool ] , str ) - > Tensor <nl>
Add poisson_nll_loss to script
pytorch/pytorch
662f66ebb9a904d003e9728a552d0bc9be2bbe37
2018-11-28T03:39:16Z
mmm a / tests / test_mat_pixel_resize . cpp <nl> ppp b / tests / test_mat_pixel_resize . cpp <nl> static int test_mat_pixel_0 ( ) <nl> for ( int c = 1 ; c < = 4 ; c + + ) <nl> { <nl> int ret = 0 <nl> - | | test_mat_pixel_resize ( 24 , 48 , c , 24 , 48 ) <nl> - | | test_mat_pixel_resize ( 13 , 17 , c , 11 , 14 ) <nl> - | | test_mat_pixel_resize ( 33 , 23 , c , 5 , 6 ) <nl> - | | test_mat_pixel_resize ( 5 , 4 , c , 11 , 16 ) <nl> - | | test_mat_pixel_resize ( 23 , 11 , c , 15 , 21 ) ; <nl> + | | test_mat_pixel_resize ( 24 , 48 , c , 24 , 48 ) <nl> + | | test_mat_pixel_resize ( 13 , 17 , c , 11 , 14 ) <nl> + | | test_mat_pixel_resize ( 33 , 23 , c , 5 , 6 ) <nl> + | | test_mat_pixel_resize ( 5 , 4 , c , 11 , 16 ) <nl> + | | test_mat_pixel_resize ( 23 , 11 , c , 15 , 21 ) ; <nl> <nl> if ( ret ! = 0 ) <nl> return ret ; <nl> mmm a / tests / testutil . h <nl> ppp b / tests / testutil . h <nl> static struct prng_rand_t g_prng_rand_state ; <nl> # define SRAND ( seed ) prng_srand ( seed , & g_prng_rand_state ) <nl> # define RAND ( ) prng_rand ( & g_prng_rand_state ) <nl> <nl> - static float RandomFloat ( float a = - 1 . 5f , float b = 1 . 5f ) <nl> + static float RandomFloat ( float a = - 1 . 2f , float b = 1 . 2f ) <nl> { <nl> float random = ( ( float ) RAND ( ) ) / ( float ) uint64_t ( - 1 ) ; / / RAND_MAX ; <nl> float diff = b - a ; <nl> static float RandomFloat ( float a = - 1 . 5f , float b = 1 . 5f ) <nl> return a + r ; <nl> } <nl> <nl> - static void Randomize ( ncnn : : Mat & m , float a = - 1 . 5f , float b = 1 . 5f ) <nl> + static void Randomize ( ncnn : : Mat & m , float a = - 1 . 2f , float b = 1 . 2f ) <nl> { <nl> for ( size_t i = 0 ; i < m . total ( ) ; i + + ) <nl> { <nl>
try smaller random values
Tencent/ncnn
d3f0b9f9930d6883e4eb0524b61e82f9f5a8b6d5
2020-08-17T07:24:21Z
mmm a / src / mongo / db / s / collection_sharding_runtime . cpp <nl> ppp b / src / mongo / db / s / collection_sharding_runtime . cpp <nl> boost : : optional < ChunkVersion > getOperationReceivedVersion ( OperationContext * opCt <nl> } / / namespace <nl> <nl> CollectionShardingRuntime : : CollectionShardingRuntime ( <nl> - ServiceContext * sc , <nl> + ServiceContext * service , <nl> NamespaceString nss , <nl> std : : shared_ptr < executor : : TaskExecutor > rangeDeleterExecutor ) <nl> - : _nss ( std : : move ( nss ) ) , <nl> - _rangeDeleterExecutor ( rangeDeleterExecutor ) , <nl> - _stateChangeMutex ( nss . toString ( ) ) , <nl> - _serviceContext ( sc ) { <nl> - if ( isNamespaceAlwaysUnsharded ( _nss ) ) { <nl> - _metadataType = MetadataType : : kUnsharded ; <nl> - } <nl> - } <nl> + : _serviceContext ( service ) , <nl> + _nss ( std : : move ( nss ) ) , <nl> + _rangeDeleterExecutor ( std : : move ( rangeDeleterExecutor ) ) , <nl> + _stateChangeMutex ( _nss . toString ( ) ) , <nl> + _metadataType ( isNamespaceAlwaysUnsharded ( _nss ) ? MetadataType : : kUnsharded <nl> + : MetadataType : : kUnknown ) { } <nl> <nl> CollectionShardingRuntime * CollectionShardingRuntime : : get ( OperationContext * opCtx , <nl> const NamespaceString & nss ) { <nl> size_t CollectionShardingRuntime : : numberOfRangesScheduledForDeletion ( ) const { <nl> return 0 ; <nl> } <nl> <nl> - CollectionCriticalSection : : CollectionCriticalSection ( OperationContext * opCtx , NamespaceString ns ) <nl> - : _nss ( std : : move ( ns ) ) , _opCtx ( opCtx ) { <nl> + CollectionCriticalSection : : CollectionCriticalSection ( OperationContext * opCtx , NamespaceString nss ) <nl> + : _opCtx ( opCtx ) , _nss ( std : : move ( nss ) ) { <nl> AutoGetCollection autoColl ( _opCtx , <nl> _nss , <nl> MODE_X , <nl> mmm a / src / mongo / db / s / collection_sharding_runtime . h <nl> ppp b / src / mongo / db / s / collection_sharding_runtime . h <nl> extern AtomicWord < int > migrationLockAcquisitionMaxWaitMS ; <nl> class CollectionShardingRuntime final : public CollectionShardingState , <nl> public Decorable < CollectionShardingRuntime > { <nl> public : <nl> - CollectionShardingRuntime ( ServiceContext * sc , <nl> + CollectionShardingRuntime ( ServiceContext * service , <nl> NamespaceString nss , <nl> std : : shared_ptr < executor : : TaskExecutor > rangeDeleterExecutor ) ; <nl> <nl> class CollectionShardingRuntime final : public CollectionShardingState , <nl> std : : shared_ptr < ScopedCollectionDescription : : Impl > _getMetadataWithVersionCheckAt ( <nl> OperationContext * opCtx , const boost : : optional < mongo : : LogicalTime > & atClusterTime ) ; <nl> <nl> + / / The service context under which this instance runs <nl> + ServiceContext * const _serviceContext ; <nl> + <nl> / / Namespace this state belongs to . <nl> const NamespaceString _nss ; <nl> <nl> class CollectionShardingRuntime final : public CollectionShardingState , <nl> / / Must hold CSRLock while accessing . <nl> ShardingMigrationCriticalSection _critSec ; <nl> <nl> + / / Protects state around the metadata manager below <nl> mutable Mutex _metadataManagerLock = <nl> MONGO_MAKE_LATCH ( " CollectionShardingRuntime : : _metadataManagerLock " ) ; <nl> <nl> / / Tracks whether the filtering metadata is unknown , unsharded , or sharded <nl> - enum class MetadataType { <nl> - kUnknown , <nl> - kUnsharded , <nl> - kSharded <nl> - } _metadataType { MetadataType : : kUnknown } ; <nl> + enum class MetadataType { kUnknown , kUnsharded , kSharded } _metadataType ; <nl> <nl> / / If the collection is sharded , contains all the metadata associated with this collection . <nl> / / <nl> class CollectionShardingRuntime final : public CollectionShardingState , <nl> <nl> / / Used for testing to check the number of times a new MetadataManager has been installed . <nl> std : : uint64_t _numMetadataManagerChanges { 0 } ; <nl> - <nl> - / / Used to get the shardId if no metadata is known when calling getCollectionDescription <nl> - ServiceContext * _serviceContext ; <nl> } ; <nl> <nl> / * * <nl> class CollectionCriticalSection { <nl> CollectionCriticalSection & operator = ( const CollectionCriticalSection & ) = delete ; <nl> <nl> public : <nl> - CollectionCriticalSection ( OperationContext * opCtx , NamespaceString ns ) ; <nl> + CollectionCriticalSection ( OperationContext * opCtx , NamespaceString nss ) ; <nl> ~ CollectionCriticalSection ( ) ; <nl> <nl> / * * <nl> class CollectionCriticalSection { <nl> void enterCommitPhase ( ) ; <nl> <nl> private : <nl> - NamespaceString _nss ; <nl> + OperationContext * const _opCtx ; <nl> <nl> - OperationContext * _opCtx ; <nl> + NamespaceString _nss ; <nl> } ; <nl> <nl> } / / namespace mongo <nl>
SERVER - 48401 Ensure the CSR lock has the correct namespace
mongodb/mongo
764b77bfe24784e740527b167c0a21d22842af30
2020-05-26T10:05:06Z
mmm a / src / Functions / FunctionsComparison . h <nl> ppp b / src / Functions / FunctionsComparison . h <nl> <nl> <nl> # include < DataTypes / DataTypesNumber . h > <nl> # include < DataTypes / DataTypeNullable . h > <nl> + # include < DataTypes / DataTypeNothing . h > <nl> # include < DataTypes / DataTypeDateTime . h > <nl> # include < DataTypes / DataTypeDateTime64 . h > <nl> # include < DataTypes / DataTypeDate . h > <nl> class FunctionComparison : public IFunction <nl> FunctionComparison < Op , Name > : : create ( context ) ) ) ; <nl> <nl> bool has_nullable = false ; <nl> + bool has_null = false ; <nl> <nl> size_t size = left_tuple - > getElements ( ) . size ( ) ; <nl> for ( size_t i = 0 ; i < size ; + + i ) <nl> { <nl> ColumnsWithTypeAndName args = { { nullptr , left_tuple - > getElements ( ) [ i ] , " " } , <nl> { nullptr , right_tuple - > getElements ( ) [ i ] , " " } } ; <nl> - has_nullable = has_nullable | | adaptor . build ( args ) - > getReturnType ( ) - > isNullable ( ) ; <nl> + auto element_type = adaptor . build ( args ) - > getReturnType ( ) ; <nl> + has_nullable = has_nullable | | element_type - > isNullable ( ) ; <nl> + has_null = has_null | | element_type - > onlyNull ( ) ; <nl> } <nl> <nl> / / / If any element comparison is nullable , return type will also be nullable . <nl> / / / We useDefaultImplementationForNulls , but it doesn ' t work for tuples . <nl> + if ( has_null ) <nl> + return std : : make_shared < DataTypeNullable > ( std : : make_shared < DataTypeNothing > ( ) ) ; <nl> if ( has_nullable ) <nl> return std : : make_shared < DataTypeNullable > ( std : : make_shared < DataTypeUInt8 > ( ) ) ; <nl> } <nl>
Fix tuple with NULL exement comparison .
ClickHouse/ClickHouse
b28e121967de400b77ee0958678d32222ce3d900
2020-08-06T12:54:14Z
mmm a / stdlib / public / core / KeyPath . swift <nl> ppp b / stdlib / public / core / KeyPath . swift <nl> internal func _abstract ( <nl> <nl> / / MARK : Type - erased abstract base classes <nl> <nl> - / / A type - erased key path , from any root type to any resulting value <nl> - / / type . NOTE : older runtimes had Swift . AnyKeyPath as the ObjC name . <nl> + / / NOTE : older runtimes had Swift . AnyKeyPath as the ObjC name . <nl> / / The two must coexist , so it was renamed . The old name must not be <nl> / / used in the new runtime . _TtCs11_AnyKeyPath is the mangled name for <nl> / / Swift . _AnyKeyPath . <nl> @ _objcRuntimeName ( _TtCs11_AnyKeyPath ) <nl> + <nl> + / / / A type - erased key path , from any root type to any resulting value <nl> + / / / type . <nl> public class AnyKeyPath : Hashable , _AppendKeyPath { <nl> / / / The root type for this key path . <nl> @ inlinable <nl>
Merge remote - tracking branch ' origin / master ' into master - next
apple/swift
932b50e45f144f3ff135b0f12a30e5bf6351b188
2019-03-13T22:49:47Z
mmm a / lang / slp_vectorize . cpp <nl> ppp b / lang / slp_vectorize . cpp <nl> class BasicBlockSLP : public IRVisitor { <nl> inside = std : : set < Stmt * > ( input_statements . begin ( ) , input_statements . end ( ) ) ; <nl> visited . clear ( ) ; <nl> auto & stmts = input_statements ; <nl> - int counter = 0 ; <nl> while ( 1 ) { <nl> TC_INFO ( " Seeding . . . " ) ; <nl> / / Find the last statement <nl> class BasicBlockSLP : public IRVisitor { <nl> std : : reverse ( seed_statements . begin ( ) , seed_statements . end ( ) ) ; <nl> TC_P ( last_stmt - > id ) ; <nl> build ( seed_statements ) ; <nl> - counter + + ; <nl> - if ( counter > 5 ) break ; <nl> } <nl> sort ( new_stmts ) ; <nl> fix_alloca_ref ( new_stmts . stmts ) ; <nl>
remove the counter that limits SLP passes
taichi-dev/taichi
6ca93ac14beeefdab2b0e7bc07893b337d21f14c
2019-03-05T19:37:07Z
mmm a / include / swift / SIL / OwnershipUtils . h <nl> ppp b / include / swift / SIL / OwnershipUtils . h <nl> bool isOwnedForwardingInstruction ( SILInstruction * inst ) ; <nl> / / / previous terminator . <nl> bool isOwnedForwardingValue ( SILValue value ) ; <nl> <nl> + class ForwardingOperand { <nl> + Operand * use ; <nl> + <nl> + ForwardingOperand ( Operand * use ) : use ( use ) { } <nl> + <nl> + public : <nl> + static Optional < ForwardingOperand > get ( Operand * use ) ; <nl> + <nl> + ValueOwnershipKind getOwnershipKind ( ) const ; <nl> + void setOwnershipKind ( ValueOwnershipKind newKind ) const ; <nl> + void replaceOwnershipKind ( ValueOwnershipKind oldKind , <nl> + ValueOwnershipKind newKind ) const ; <nl> + } ; <nl> + <nl> / / / Returns true if the instruction is a ' reborrow ' . <nl> bool isReborrowInstruction ( const SILInstruction * inst ) ; <nl> <nl> mmm a / include / swift / SIL / SILInstruction . h <nl> ppp b / include / swift / SIL / SILInstruction . h <nl> class MultipleValueInstructionTrailingObjects < Derived , DerivedResult , <nl> return { ptr , NumResults } ; <nl> } <nl> <nl> + MutableArrayRef < DerivedResult > getAllResultsBuffer ( ) { <nl> + auto * ptr = this - > TrailingObjects : : template <nl> + getTrailingObjects < DerivedResult > ( ) ; <nl> + return { ptr , NumResults } ; <nl> + } <nl> + <nl> SILInstructionResultArray getAllResults ( ) const { <nl> / / Our results start at element 1 since we stash the pointer to our parent <nl> / / MultipleValueInstruction in the 0 elt slot . This allows all <nl> class DynamicMethodBranchInst <nl> } ; <nl> <nl> / / / The base class for cast instructions which are terminators . <nl> - class CastBranchInstBase : public TermInst { <nl> + template < typename BaseTy > class CastBranchInstBase : public BaseTy { <nl> std : : array < SILSuccessor , 2 > DestBBs ; <nl> <nl> public : <nl> - <nl> + template < typename . . . ArgTys > <nl> CastBranchInstBase ( SILInstructionKind K , SILDebugLocation DebugLoc , <nl> SILBasicBlock * SuccessBB , SILBasicBlock * FailureBB , <nl> - ProfileCounter Target1Count = ProfileCounter ( ) , <nl> - ProfileCounter Target2Count = ProfileCounter ( ) ) : <nl> - TermInst ( K , DebugLoc ) , <nl> - DestBBs { { { this , SuccessBB , Target1Count } , <nl> - { this , FailureBB , Target2Count } } } <nl> - { } <nl> + ProfileCounter Target1Count , ProfileCounter Target2Count , <nl> + ArgTys & & . . . args ) <nl> + : BaseTy ( K , DebugLoc , std : : forward < ArgTys > ( args ) . . . ) , <nl> + DestBBs { { { this , SuccessBB , Target1Count } , <nl> + { this , FailureBB , Target2Count } } } { } <nl> <nl> - SuccessorListTy getSuccessors ( ) { <nl> - return DestBBs ; <nl> - } <nl> + TermInst : : SuccessorListTy getSuccessors ( ) { return DestBBs ; } <nl> <nl> SILBasicBlock * getSuccessBB ( ) { return DestBBs [ 0 ] ; } <nl> const SILBasicBlock * getSuccessBB ( ) const { return DestBBs [ 0 ] ; } <nl> class CastBranchInstBase : public TermInst { <nl> <nl> / / / The base class for cast instructions which are terminators and have a <nl> / / / CastConsumptionKind . <nl> - class CastBranchWithConsumptionKindBase : public CastBranchInstBase { <nl> + class CastBranchWithConsumptionKindBase : public CastBranchInstBase < TermInst > { <nl> CastConsumptionKind ConsumptionKind ; <nl> <nl> public : <nl> class AddrCastInstBase <nl> / / / Perform a checked cast operation and branch on whether the cast succeeds . <nl> / / / The success branch destination block receives the cast result as a BB <nl> / / / argument . <nl> - class CheckedCastBranchInst final : <nl> - public UnaryInstructionWithTypeDependentOperandsBase < <nl> - SILInstructionKind : : CheckedCastBranchInst , <nl> - CheckedCastBranchInst , <nl> - CastBranchInstBase > { <nl> + class CheckedCastBranchInst final <nl> + : public UnaryInstructionWithTypeDependentOperandsBase < <nl> + SILInstructionKind : : CheckedCastBranchInst , CheckedCastBranchInst , <nl> + CastBranchInstBase < OwnershipForwardingTermInst > > { <nl> friend SILBuilder ; <nl> <nl> SILType DestLoweredTy ; <nl> class CheckedCastBranchInst final : <nl> ArrayRef < SILValue > TypeDependentOperands , <nl> SILType DestLoweredTy , CanType DestFormalTy , <nl> SILBasicBlock * SuccessBB , SILBasicBlock * FailureBB , <nl> - ProfileCounter Target1Count , ProfileCounter Target2Count ) <nl> - : UnaryInstructionWithTypeDependentOperandsBase ( DebugLoc , Operand , <nl> - TypeDependentOperands , <nl> - SuccessBB , FailureBB , Target1Count , Target2Count ) , <nl> - DestLoweredTy ( DestLoweredTy ) , <nl> - DestFormalTy ( DestFormalTy ) , <nl> + ProfileCounter Target1Count , <nl> + ProfileCounter Target2Count ) <nl> + : UnaryInstructionWithTypeDependentOperandsBase ( <nl> + DebugLoc , Operand , TypeDependentOperands , SuccessBB , FailureBB , <nl> + Target1Count , Target2Count , Operand . getOwnershipKind ( ) ) , <nl> + DestLoweredTy ( DestLoweredTy ) , DestFormalTy ( DestFormalTy ) , <nl> IsExact ( IsExact ) { } <nl> <nl> static CheckedCastBranchInst * <nl> class CheckedCastBranchInst final : <nl> class CheckedCastValueBranchInst final <nl> : public UnaryInstructionWithTypeDependentOperandsBase < <nl> SILInstructionKind : : CheckedCastValueBranchInst , <nl> - CheckedCastValueBranchInst , <nl> - CastBranchInstBase > { <nl> + CheckedCastValueBranchInst , CastBranchInstBase < TermInst > > { <nl> friend SILBuilder ; <nl> <nl> CanType SourceFormalTy ; <nl> SILType DestLoweredTy ; <nl> CanType DestFormalTy ; <nl> <nl> - CheckedCastValueBranchInst ( SILDebugLocation DebugLoc , <nl> - SILValue Operand , CanType SourceFormalTy , <nl> + CheckedCastValueBranchInst ( SILDebugLocation DebugLoc , SILValue Operand , <nl> + CanType SourceFormalTy , <nl> ArrayRef < SILValue > TypeDependentOperands , <nl> SILType DestLoweredTy , CanType DestFormalTy , <nl> SILBasicBlock * SuccessBB , SILBasicBlock * FailureBB ) <nl> - : UnaryInstructionWithTypeDependentOperandsBase ( DebugLoc , Operand , <nl> - TypeDependentOperands , SuccessBB , FailureBB ) , <nl> - SourceFormalTy ( SourceFormalTy ) , <nl> - DestLoweredTy ( DestLoweredTy ) , DestFormalTy ( DestFormalTy ) { } <nl> + : UnaryInstructionWithTypeDependentOperandsBase ( <nl> + DebugLoc , Operand , TypeDependentOperands , SuccessBB , FailureBB , <nl> + ProfileCounter ( ) , ProfileCounter ( ) ) , <nl> + SourceFormalTy ( SourceFormalTy ) , DestLoweredTy ( DestLoweredTy ) , <nl> + DestFormalTy ( DestFormalTy ) { } <nl> <nl> static CheckedCastValueBranchInst * <nl> create ( SILDebugLocation DebugLoc , <nl> SILFunction * ApplyInstBase < Impl , Base , false > : : getCalleeFunction ( ) const { <nl> } <nl> } <nl> <nl> + class OwnershipForwardingMultipleValueInstruction <nl> + : public MultipleValueInstruction { <nl> + ValueOwnershipKind ownershipKind ; <nl> + <nl> + public : <nl> + OwnershipForwardingMultipleValueInstruction ( SILInstructionKind kind , <nl> + SILDebugLocation loc , <nl> + ValueOwnershipKind ownershipKind ) <nl> + : MultipleValueInstruction ( kind , loc ) , ownershipKind ( ownershipKind ) { } <nl> + <nl> + / / / Returns the preferred ownership kind of this multiple value instruction . <nl> + ValueOwnershipKind getOwnershipKind ( ) const { return ownershipKind ; } <nl> + void setOwnershipKind ( ValueOwnershipKind newOwnershipKind ) { <nl> + ownershipKind = newOwnershipKind ; <nl> + } <nl> + } ; <nl> + <nl> / / / A result for the destructure_struct instruction . See documentation for <nl> / / / destructure_struct for more information . <nl> class DestructureStructResult final : public MultipleValueInstructionResult { <nl> class DestructureStructResult final : public MultipleValueInstructionResult { <nl> / / / struct ' s fields . <nl> class DestructureStructInst final <nl> : public UnaryInstructionBase < SILInstructionKind : : DestructureStructInst , <nl> - MultipleValueInstruction > , <nl> - public MultipleValueInstructionTrailingObjects < <nl> - DestructureStructInst , DestructureStructResult > { <nl> + OwnershipForwardingMultipleValueInstruction > , <nl> + public MultipleValueInstructionTrailingObjects < DestructureStructInst , <nl> + DestructureStructResult > { <nl> friend TrailingObjects ; <nl> <nl> DestructureStructInst ( SILModule & M , SILDebugLocation Loc , SILValue Operand , <nl> ArrayRef < SILType > Types , <nl> ArrayRef < ValueOwnershipKind > OwnershipKinds ) <nl> - : UnaryInstructionBase ( Loc , Operand ) , <nl> + : UnaryInstructionBase ( Loc , Operand , Operand . getOwnershipKind ( ) ) , <nl> MultipleValueInstructionTrailingObjects ( this , Types , OwnershipKinds ) { } <nl> <nl> public : <nl> class DestructureTupleResult final : public MultipleValueInstructionResult { <nl> / / / tuples ' s elements . <nl> class DestructureTupleInst final <nl> : public UnaryInstructionBase < SILInstructionKind : : DestructureTupleInst , <nl> - MultipleValueInstruction > , <nl> - public MultipleValueInstructionTrailingObjects < <nl> - DestructureTupleInst , DestructureTupleResult > { <nl> + OwnershipForwardingMultipleValueInstruction > , <nl> + public MultipleValueInstructionTrailingObjects < DestructureTupleInst , <nl> + DestructureTupleResult > { <nl> friend TrailingObjects ; <nl> <nl> DestructureTupleInst ( SILModule & M , SILDebugLocation Loc , SILValue Operand , <nl> ArrayRef < SILType > Types , <nl> ArrayRef < ValueOwnershipKind > OwnershipKinds ) <nl> - : UnaryInstructionBase ( Loc , Operand ) , <nl> + : UnaryInstructionBase ( Loc , Operand , Operand . getOwnershipKind ( ) ) , <nl> MultipleValueInstructionTrailingObjects ( this , Types , OwnershipKinds ) { } <nl> <nl> public : <nl> mmm a / lib / SIL / IR / OperandOwnership . cpp <nl> ppp b / lib / SIL / IR / OperandOwnership . cpp <nl> class OperandOwnershipKindClassifier <nl> return getOwnershipKind ( ) = = ValueOwnershipKind : : None ; <nl> } <nl> <nl> - OperandOwnershipKindMap visitForwardingInst ( SILInstruction * i , <nl> - ArrayRef < Operand > ops ) ; <nl> - OperandOwnershipKindMap visitForwardingInst ( SILInstruction * i ) { <nl> - return visitForwardingInst ( i , i - > getAllOperands ( ) ) ; <nl> - } <nl> - <nl> OperandOwnershipKindMap <nl> visitApplyParameter ( ValueOwnershipKind requiredConvention , <nl> UseLifetimeConstraint requirement ) ; <nl> ACCEPTS_ANY_OWNERSHIP_INST ( ConvertEscapeToNoEscape ) <nl> # include " swift / AST / ReferenceStorage . def " <nl> # undef ACCEPTS_ANY_OWNERSHIP_INST <nl> <nl> - OperandOwnershipKindMap <nl> - OperandOwnershipKindClassifier : : visitForwardingInst ( SILInstruction * i , <nl> - ArrayRef < Operand > ops ) { <nl> - assert ( i - > getNumOperands ( ) & & " Expected to have non - zero operands " ) ; <nl> - assert ( isOwnershipForwardingInst ( i ) & & <nl> - " Expected to have an ownership forwarding inst " ) ; <nl> - <nl> - / / Merge all of the ownership of our operands . If we get back a . none from the <nl> - / / merge , then we return an empty compatibility map . This ensures that we will <nl> - / / not be compatible with / any / input triggering a special error in the <nl> - / / ownership verifier . <nl> - Optional < ValueOwnershipKind > optionalKind = <nl> - ValueOwnershipKind : : merge ( makeOptionalTransformRange ( <nl> - ops , [ & i ] ( const Operand & op ) - > Optional < ValueOwnershipKind > { <nl> - if ( i - > isTypeDependentOperand ( op ) ) <nl> - return None ; <nl> - return op . get ( ) . getOwnershipKind ( ) ; <nl> - } ) ) ; <nl> - if ( ! optionalKind ) <nl> - return Map ( ) ; <nl> - <nl> - auto kind = optionalKind . getValue ( ) ; <nl> - if ( kind = = ValueOwnershipKind : : None ) <nl> - return Map : : allLive ( ) ; <nl> - auto lifetimeConstraint = kind . getForwardingLifetimeConstraint ( ) ; <nl> - return Map : : compatibilityMap ( kind , lifetimeConstraint ) ; <nl> - } <nl> - <nl> # define FORWARD_ANY_OWNERSHIP_INST ( INST ) \ <nl> OperandOwnershipKindMap OperandOwnershipKindClassifier : : visit # # INST # # Inst ( \ <nl> INST # # Inst * i ) { \ <nl> - return visitForwardingInst ( i ) ; \ <nl> + auto kind = i - > getOwnershipKind ( ) ; \ <nl> + auto lifetimeConstraint = kind . getForwardingLifetimeConstraint ( ) ; \ <nl> + return Map : : compatibilityMap ( kind , lifetimeConstraint ) ; \ <nl> } <nl> FORWARD_ANY_OWNERSHIP_INST ( Tuple ) <nl> FORWARD_ANY_OWNERSHIP_INST ( Struct ) <nl> FORWARD_ANY_OWNERSHIP_INST ( RefToBridgeObject ) <nl> FORWARD_ANY_OWNERSHIP_INST ( BridgeObjectToRef ) <nl> FORWARD_ANY_OWNERSHIP_INST ( UnconditionalCheckedCast ) <nl> FORWARD_ANY_OWNERSHIP_INST ( UncheckedEnumData ) <nl> - FORWARD_ANY_OWNERSHIP_INST ( DestructureStruct ) <nl> - FORWARD_ANY_OWNERSHIP_INST ( DestructureTuple ) <nl> FORWARD_ANY_OWNERSHIP_INST ( InitExistentialRef ) <nl> FORWARD_ANY_OWNERSHIP_INST ( DifferentiableFunction ) <nl> FORWARD_ANY_OWNERSHIP_INST ( LinearFunction ) <nl> FORWARD_ANY_OWNERSHIP_INST ( UncheckedValueCast ) <nl> + FORWARD_ANY_OWNERSHIP_INST ( DestructureStruct ) <nl> + FORWARD_ANY_OWNERSHIP_INST ( DestructureTuple ) <nl> # undef FORWARD_ANY_OWNERSHIP_INST <nl> <nl> / / An instruction that forwards a constant ownership or trivial ownership . <nl> OperandOwnershipKindClassifier : : visitSelectEnumInst ( SelectEnumInst * i ) { <nl> return Map : : allLive ( ) ; <nl> } <nl> <nl> - return visitForwardingInst ( i , i - > getAllOperands ( ) . drop_front ( ) ) ; <nl> + auto kind = i - > getOwnershipKind ( ) ; <nl> + auto lifetimeConstraint = kind . getForwardingLifetimeConstraint ( ) ; <nl> + return Map : : compatibilityMap ( kind , lifetimeConstraint ) ; <nl> } <nl> <nl> OperandOwnershipKindMap <nl> OperandOwnershipKindClassifier : : visitSwitchEnumInst ( SwitchEnumInst * sei ) { <nl> OperandOwnershipKindMap <nl> OperandOwnershipKindClassifier : : visitCheckedCastBranchInst ( <nl> CheckedCastBranchInst * ccbi ) { <nl> - / / TODO : Simplify this using ValueOwnershipKind : : merge . <nl> - Optional < OperandOwnershipKindMap > map ; <nl> - for ( auto argArray : ccbi - > getSuccessorBlockArgumentLists ( ) ) { <nl> - assert ( ! argArray . empty ( ) ) ; <nl> - <nl> - auto argOwnershipKind = argArray [ getOperandIndex ( ) ] - > getOwnershipKind ( ) ; <nl> - / / If we do not have a map yet , initialize it and continue . <nl> - if ( ! map ) { <nl> - auto lifetimeConstraint = <nl> - argOwnershipKind . getForwardingLifetimeConstraint ( ) ; <nl> - map = Map : : compatibilityMap ( argOwnershipKind , lifetimeConstraint ) ; <nl> - continue ; <nl> - } <nl> - <nl> - / / Otherwise , make sure that we can accept the rest of our <nl> - / / arguments . If not , we return an empty ownership kind to make <nl> - / / sure that we flag everything as an error . <nl> - if ( map - > canAcceptKind ( argOwnershipKind ) ) { <nl> - continue ; <nl> - } <nl> - <nl> - return OperandOwnershipKindMap ( ) ; <nl> - } <nl> - <nl> - return map . getValue ( ) ; <nl> + auto kind = getOwnershipKind ( ) ; <nl> + auto lifetimeConstraint = kind . getForwardingLifetimeConstraint ( ) ; <nl> + return Map : : compatibilityMap ( kind , lifetimeConstraint ) ; <nl> } <nl> <nl> / / / / FIX THIS HERE <nl> mmm a / lib / SIL / Utils / OwnershipUtils . cpp <nl> ppp b / lib / SIL / Utils / OwnershipUtils . cpp <nl> swift : : getSingleOwnedValueIntroducer ( SILValue inputValue ) { <nl> <nl> llvm_unreachable ( " Should never hit this " ) ; <nl> } <nl> + <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + / / Forwarding Operand <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + <nl> + Optional < ForwardingOperand > ForwardingOperand : : get ( Operand * use ) { <nl> + auto * user = use - > getUser ( ) ; <nl> + if ( isa < OwnershipForwardingTermInst > ( user ) ) <nl> + return ForwardingOperand ( use ) ; <nl> + if ( isa < OwnershipForwardingSingleValueInst > ( user ) ) <nl> + return ForwardingOperand ( use ) ; <nl> + if ( isa < OwnershipForwardingConversionInst > ( user ) ) <nl> + return ForwardingOperand ( use ) ; <nl> + if ( isa < OwnershipForwardingSelectEnumInstBase > ( user ) ) <nl> + return ForwardingOperand ( use ) ; <nl> + if ( isa < OwnershipForwardingMultipleValueInstruction > ( user ) ) <nl> + return ForwardingOperand ( use ) ; <nl> + return None ; <nl> + } <nl> + <nl> + ValueOwnershipKind ForwardingOperand : : getOwnershipKind ( ) const { <nl> + auto * user = use - > getUser ( ) ; <nl> + if ( auto * ofti = dyn_cast < OwnershipForwardingTermInst > ( user ) ) <nl> + return ofti - > getOwnershipKind ( ) ; <nl> + if ( auto * ofsvi = dyn_cast < OwnershipForwardingSingleValueInst > ( user ) ) <nl> + return ofsvi - > getOwnershipKind ( ) ; <nl> + if ( auto * ofci = dyn_cast < OwnershipForwardingConversionInst > ( user ) ) <nl> + return ofci - > getOwnershipKind ( ) ; <nl> + if ( auto * ofseib = dyn_cast < OwnershipForwardingSelectEnumInstBase > ( user ) ) <nl> + return ofseib - > getOwnershipKind ( ) ; <nl> + if ( auto * ofmvi = dyn_cast < OwnershipForwardingMultipleValueInstruction > ( user ) ) <nl> + return ofmvi - > getOwnershipKind ( ) ; <nl> + llvm_unreachable ( " Out of sync with ForwardingOperand : : get ? ! " ) ; <nl> + } <nl> + <nl> + void ForwardingOperand : : setOwnershipKind ( ValueOwnershipKind newKind ) const { <nl> + auto * user = use - > getUser ( ) ; <nl> + if ( auto * ofsvi = dyn_cast < OwnershipForwardingSingleValueInst > ( user ) ) <nl> + if ( ! ofsvi - > getType ( ) . isTrivial ( * ofsvi - > getFunction ( ) ) ) <nl> + return ofsvi - > setOwnershipKind ( newKind ) ; <nl> + if ( auto * ofci = dyn_cast < OwnershipForwardingConversionInst > ( user ) ) <nl> + if ( ! ofci - > getType ( ) . isTrivial ( * ofci - > getFunction ( ) ) ) <nl> + return ofci - > setOwnershipKind ( newKind ) ; <nl> + if ( auto * ofseib = dyn_cast < OwnershipForwardingSelectEnumInstBase > ( user ) ) <nl> + if ( ! ofseib - > getType ( ) . isTrivial ( * ofseib - > getFunction ( ) ) ) <nl> + return ofseib - > setOwnershipKind ( newKind ) ; <nl> + <nl> + if ( auto * ofmvi = dyn_cast < OwnershipForwardingMultipleValueInstruction > ( user ) ) { <nl> + assert ( ofmvi - > getNumOperands ( ) = = 1 ) ; <nl> + if ( ! ofmvi - > getOperand ( 0 ) - > getType ( ) . isTrivial ( * ofmvi - > getFunction ( ) ) ) { <nl> + ofmvi - > setOwnershipKind ( newKind ) ; <nl> + / / TODO : Refactor this better . <nl> + if ( auto * dsi = dyn_cast < DestructureStructInst > ( ofmvi ) ) { <nl> + for ( auto & result : dsi - > getAllResultsBuffer ( ) ) { <nl> + if ( result . getType ( ) . isTrivial ( * dsi - > getFunction ( ) ) ) <nl> + continue ; <nl> + result . setOwnershipKind ( newKind ) ; <nl> + } <nl> + } else { <nl> + auto * dti = cast < DestructureTupleInst > ( ofmvi ) ; <nl> + for ( auto & result : dti - > getAllResultsBuffer ( ) ) { <nl> + if ( result . getType ( ) . isTrivial ( * dti - > getFunction ( ) ) ) <nl> + continue ; <nl> + result . setOwnershipKind ( newKind ) ; <nl> + } <nl> + } <nl> + } <nl> + return ; <nl> + } <nl> + <nl> + if ( auto * ofti = dyn_cast < OwnershipForwardingTermInst > ( user ) ) { <nl> + assert ( ofti - > getNumOperands ( ) = = 1 ) ; <nl> + if ( ! ofti - > getOperand ( 0 ) - > getType ( ) . isTrivial ( * ofti - > getFunction ( ) ) ) { <nl> + ofti - > setOwnershipKind ( newKind ) ; <nl> + <nl> + / / Then convert all of its incoming values that are owned to be guaranteed . <nl> + for ( auto & succ : ofti - > getSuccessors ( ) ) { <nl> + auto * succBlock = succ . getBB ( ) ; <nl> + <nl> + / / If we do not have any arguments , then continue . <nl> + if ( succBlock - > args_empty ( ) ) <nl> + continue ; <nl> + <nl> + for ( auto * succArg : succBlock - > getSILPhiArguments ( ) ) { <nl> + / / If we have an any value , just continue . <nl> + if ( ! succArg - > getType ( ) . isTrivial ( * ofti - > getFunction ( ) ) ) <nl> + continue ; <nl> + succArg - > setOwnershipKind ( newKind ) ; <nl> + } <nl> + } <nl> + } <nl> + return ; <nl> + } <nl> + <nl> + llvm_unreachable ( " Out of sync with ForwardingOperand : : get ? ! " ) ; <nl> + } <nl> + <nl> + void ForwardingOperand : : replaceOwnershipKind ( ValueOwnershipKind oldKind , <nl> + ValueOwnershipKind newKind ) const { <nl> + auto * user = use - > getUser ( ) ; <nl> + <nl> + if ( auto * ofsvi = dyn_cast < OwnershipForwardingSingleValueInst > ( user ) ) <nl> + if ( ofsvi - > getOwnershipKind ( ) = = oldKind ) <nl> + return ofsvi - > setOwnershipKind ( newKind ) ; <nl> + <nl> + if ( auto * ofci = dyn_cast < OwnershipForwardingConversionInst > ( user ) ) <nl> + if ( ofci - > getOwnershipKind ( ) = = oldKind ) <nl> + return ofci - > setOwnershipKind ( newKind ) ; <nl> + <nl> + if ( auto * ofseib = dyn_cast < OwnershipForwardingSelectEnumInstBase > ( user ) ) <nl> + if ( ofseib - > getOwnershipKind ( ) = = oldKind ) <nl> + return ofseib - > setOwnershipKind ( newKind ) ; <nl> + <nl> + if ( auto * ofmvi = dyn_cast < OwnershipForwardingMultipleValueInstruction > ( user ) ) { <nl> + if ( ofmvi - > getOwnershipKind ( ) = = oldKind ) { <nl> + ofmvi - > setOwnershipKind ( newKind ) ; <nl> + } <nl> + / / TODO : Refactor this better . <nl> + if ( auto * dsi = dyn_cast < DestructureStructInst > ( ofmvi ) ) { <nl> + for ( auto & result : dsi - > getAllResultsBuffer ( ) ) { <nl> + if ( result . getOwnershipKind ( ) ! = oldKind ) <nl> + continue ; <nl> + result . setOwnershipKind ( newKind ) ; <nl> + } <nl> + } else { <nl> + auto * dti = cast < DestructureTupleInst > ( ofmvi ) ; <nl> + for ( auto & result : dti - > getAllResultsBuffer ( ) ) { <nl> + if ( result . getOwnershipKind ( ) ! = oldKind ) <nl> + continue ; <nl> + result . setOwnershipKind ( newKind ) ; <nl> + } <nl> + } <nl> + return ; <nl> + } <nl> + <nl> + if ( auto * ofti = dyn_cast < OwnershipForwardingTermInst > ( user ) ) { <nl> + if ( ofti - > getOwnershipKind ( ) = = oldKind ) { <nl> + ofti - > setOwnershipKind ( newKind ) ; <nl> + / / Then convert all of its incoming values that are owned to be guaranteed . <nl> + for ( auto & succ : ofti - > getSuccessors ( ) ) { <nl> + auto * succBlock = succ . getBB ( ) ; <nl> + <nl> + / / If we do not have any arguments , then continue . <nl> + if ( succBlock - > args_empty ( ) ) <nl> + continue ; <nl> + <nl> + for ( auto * succArg : succBlock - > getSILPhiArguments ( ) ) { <nl> + / / If we have an any value , just continue . <nl> + if ( succArg - > getOwnershipKind ( ) = = oldKind ) { <nl> + succArg - > setOwnershipKind ( newKind ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + return ; <nl> + } <nl> + llvm_unreachable ( " Out of sync with ForwardingOperand : : get ? ! " ) ; <nl> + } <nl> mmm a / lib / SIL / Verifier / SILVerifier . cpp <nl> ppp b / lib / SIL / Verifier / SILVerifier . cpp <nl> class SILVerifier : public SILVerifierBase < SILVerifier > { <nl> require ( ! sd - > isResilient ( F . getModule ( ) . getSwiftModule ( ) , <nl> F . getResilienceExpansion ( ) ) , <nl> " cannot access storage of resilient struct " ) ; <nl> + if ( F . hasOwnership ( ) ) { <nl> + / / Make sure that all of our destructure results ownership kinds are <nl> + / / compatible with our destructure_struct ' s ownership kind / and / that if <nl> + / / our destructure ownership kind is non - trivial then all non - trivial <nl> + / / results must have the same ownership kind as our operand . <nl> + auto parentKind = DSI - > getOwnershipKind ( ) ; <nl> + for ( const DestructureStructResult & result : DSI - > getAllResultsBuffer ( ) ) { <nl> + require ( parentKind . isCompatibleWith ( result . getOwnershipKind ( ) ) , <nl> + " destructure result with ownership that is incompatible with " <nl> + " parent forwarding ownership kind " ) ; <nl> + require ( parentKind ! = ValueOwnershipKind : : None | | <nl> + result . getOwnershipKind ( ) = = ValueOwnershipKind : : None , <nl> + " destructure with none ownership kind operand and non - none " <nl> + " ownership kind result ? ! " ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void checkDestructureTupleInst ( DestructureTupleInst * dti ) { <nl> + if ( F . hasOwnership ( ) ) { <nl> + / / Make sure that all of our destructure results ownership kinds are <nl> + / / compatible with our destructure_struct ' s ownership kind / and / that if <nl> + / / our destructure ownership kind is non - trivial then all non - trivial <nl> + / / results must have the same ownership kind as our operand . <nl> + auto parentKind = dti - > getOwnershipKind ( ) ; <nl> + for ( const auto & result : dti - > getAllResultsBuffer ( ) ) { <nl> + require ( parentKind . isCompatibleWith ( result . getOwnershipKind ( ) ) , <nl> + " destructure result with ownership that is incompatible with " <nl> + " parent forwarding ownership kind " ) ; <nl> + require ( parentKind ! = ValueOwnershipKind : : None | | <nl> + result . getOwnershipKind ( ) = = ValueOwnershipKind : : None , <nl> + " destructure with none ownership kind operand and non - none " <nl> + " ownership kind result ? ! " ) ; <nl> + } <nl> + } <nl> } <nl> <nl> SILType getMethodSelfType ( CanSILFunctionType ft ) { <nl> class SILVerifier : public SILVerifierBase < SILVerifier > { <nl> CBI - > getOperand ( ) - > getType ( ) , <nl> " failure dest block argument must match type of original type in " <nl> " ownership qualified sil " ) ; <nl> + auto succOwnershipKind = <nl> + CBI - > getSuccessBB ( ) - > args_begin ( ) [ 0 ] - > getOwnershipKind ( ) ; <nl> + require ( succOwnershipKind . isCompatibleWith ( <nl> + CBI - > getOperand ( ) . getOwnershipKind ( ) ) , <nl> + " succ dest block argument must have ownership compatible with " <nl> + " the checked_cast_br operand " ) ; <nl> + auto failOwnershipKind = <nl> + CBI - > getFailureBB ( ) - > args_begin ( ) [ 0 ] - > getOwnershipKind ( ) ; <nl> + require ( failOwnershipKind . isCompatibleWith ( <nl> + CBI - > getOperand ( ) . getOwnershipKind ( ) ) , <nl> + " failure dest block argument must have ownership compatible with " <nl> + " the checked_cast_br operand " ) ; <nl> } else { <nl> require ( CBI - > getFailureBB ( ) - > args_empty ( ) , <nl> " Failure dest of checked_cast_br must not take any argument in " <nl> mmm a / lib / SILOptimizer / SemanticARC / OwnershipLiveRange . cpp <nl> ppp b / lib / SILOptimizer / SemanticARC / OwnershipLiveRange . cpp <nl> void OwnershipLiveRange : : insertEndBorrowsAtDestroys ( <nl> } <nl> } <nl> <nl> - static void convertInstructionOwnership ( SILInstruction * i , <nl> - ValueOwnershipKind oldOwnership , <nl> - ValueOwnershipKind newOwnership ) { <nl> - / / If this is a term inst . . . <nl> - if ( auto * ti = dyn_cast < TermInst > ( i ) ) { <nl> - / / First see if it is an ownership forwarding term inst . In such a case , <nl> - / / change the ownership kind as appropriate . <nl> - if ( auto * ofti = dyn_cast < OwnershipForwardingTermInst > ( ti ) ) <nl> - if ( ofti - > getOwnershipKind ( ) = = oldOwnership ) <nl> - ofti - > setOwnershipKind ( newOwnership ) ; <nl> - <nl> - / / Then convert all of its incoming values that are owned to be guaranteed . <nl> - for ( auto & succ : ti - > getSuccessors ( ) ) { <nl> - auto * succBlock = succ . getBB ( ) ; <nl> - <nl> - / / If we do not have any arguments , then continue . <nl> - if ( succBlock - > args_empty ( ) ) <nl> - continue ; <nl> - <nl> - for ( auto * succArg : succBlock - > getSILPhiArguments ( ) ) { <nl> - / / If we have an any value , just continue . <nl> - if ( succArg - > getOwnershipKind ( ) = = oldOwnership ) { <nl> - succArg - > setOwnershipKind ( newOwnership ) ; <nl> - } <nl> - } <nl> - } <nl> - return ; <nl> - } <nl> - <nl> - assert ( i - > hasResults ( ) ) ; <nl> - for ( SILValue result : i - > getResults ( ) ) { <nl> - if ( auto * svi = dyn_cast < OwnershipForwardingSingleValueInst > ( result ) ) { <nl> - if ( svi - > getOwnershipKind ( ) = = oldOwnership ) { <nl> - svi - > setOwnershipKind ( newOwnership ) ; <nl> - } <nl> - continue ; <nl> - } <nl> - <nl> - if ( auto * ofci = dyn_cast < OwnershipForwardingConversionInst > ( result ) ) { <nl> - if ( ofci - > getOwnershipKind ( ) = = oldOwnership ) { <nl> - ofci - > setOwnershipKind ( newOwnership ) ; <nl> - } <nl> - continue ; <nl> - } <nl> - <nl> - if ( auto * sei = dyn_cast < OwnershipForwardingSelectEnumInstBase > ( result ) ) { <nl> - if ( sei - > getOwnershipKind ( ) = = oldOwnership ) { <nl> - sei - > setOwnershipKind ( newOwnership ) ; <nl> - } <nl> - continue ; <nl> - } <nl> - <nl> - if ( auto * mvir = dyn_cast < MultipleValueInstructionResult > ( result ) ) { <nl> - if ( mvir - > getOwnershipKind ( ) = = oldOwnership ) { <nl> - mvir - > setOwnershipKind ( newOwnership ) ; <nl> - } <nl> - continue ; <nl> - } <nl> - <nl> - llvm_unreachable ( " unhandled forwarding instruction ? ! " ) ; <nl> - } <nl> - } <nl> - <nl> void OwnershipLiveRange : : convertOwnedGeneralForwardingUsesToGuaranteed ( ) & & { <nl> while ( ! ownershipForwardingUses . empty ( ) ) { <nl> - auto * i = ownershipForwardingUses . back ( ) - > getUser ( ) ; <nl> + auto * use = ownershipForwardingUses . back ( ) ; <nl> ownershipForwardingUses = ownershipForwardingUses . drop_back ( ) ; <nl> - convertInstructionOwnership ( i , ValueOwnershipKind : : Owned , <nl> - ValueOwnershipKind : : Guaranteed ) ; <nl> + auto operand = * ForwardingOperand : : get ( use ) ; <nl> + operand . replaceOwnershipKind ( ValueOwnershipKind : : Owned , <nl> + ValueOwnershipKind : : Guaranteed ) ; <nl> } <nl> } <nl> <nl> mmm a / test / SIL / ownership - verifier / over_consume . sil <nl> ppp b / test / SIL / ownership - verifier / over_consume . sil <nl> bb3 : <nl> return % 9999 : $ ( ) <nl> } <nl> <nl> - / / CHECK - LABEL : Function : ' checked_cast_br_mismatching_argument_guaranteed_to_owned_1 ' <nl> - / / CHECK : Have operand with incompatible ownership ? ! <nl> - / / CHECK : Value : % 0 = argument of bb0 : $ Builtin . NativeObject <nl> - / / CHECK : User : checked_cast_br % 0 : $ Builtin . NativeObject to SuperKlass , bb1 , bb2 <nl> - / / CHECK : Conv : guaranteed <nl> - / / CHECK : OwnershipMap : <nl> - / / CHECK : - - OperandOwnershipKindMap - - <nl> - / / CHECK : unowned : No . <nl> - / / CHECK : owned : Yes . Liveness : LifetimeEnding <nl> - / / CHECK : guaranteed : No . <nl> - / / CHECK : any : Yes . Liveness : NonLifetimeEnding <nl> - sil [ ossa ] @ checked_cast_br_mismatching_argument_guaranteed_to_owned_1 : $ @ convention ( thin ) ( @ guaranteed Builtin . NativeObject ) - > ( ) { <nl> - bb0 ( % 0 : @ guaranteed $ Builtin . NativeObject ) : <nl> - checked_cast_br % 0 : $ Builtin . NativeObject to SuperKlass , bb1 , bb2 <nl> - <nl> - bb1 ( % 1 : @ owned $ SuperKlass ) : <nl> - destroy_value % 1 : $ SuperKlass <nl> - br bb3 <nl> - <nl> - bb2 ( % 2 : @ owned $ Builtin . NativeObject ) : <nl> - destroy_value % 2 : $ Builtin . NativeObject <nl> - br bb3 <nl> - <nl> - bb3 : <nl> - % 9999 = tuple ( ) <nl> - return % 9999 : $ ( ) <nl> - } <nl> - <nl> - / / CHECK - LABEL : Function : ' checked_cast_br_mismatching_argument_guaranteed_to_owned_2 ' <nl> - / / CHECK : Ill - formed SIL ! Unable to compute ownership kind map for user ? ! <nl> - / / CHECK : For terminator users , check that successors have compatible ownership kinds . <nl> - / / CHECK : Value : % 0 = argument of bb0 : $ Builtin . NativeObject <nl> - / / CHECK : User : checked_cast_br % 0 : $ Builtin . NativeObject to SuperKlass , bb1 , bb2 <nl> - / / CHECK : Conv : guaranteed <nl> - sil [ ossa ] @ checked_cast_br_mismatching_argument_guaranteed_to_owned_2 : $ @ convention ( thin ) ( @ guaranteed Builtin . NativeObject ) - > ( ) { <nl> - bb0 ( % 0 : @ guaranteed $ Builtin . NativeObject ) : <nl> - checked_cast_br % 0 : $ Builtin . NativeObject to SuperKlass , bb1 , bb2 <nl> - <nl> - bb1 ( % 1 : @ guaranteed $ SuperKlass ) : <nl> - br bb3 <nl> - <nl> - bb2 ( % 2 : @ owned $ Builtin . NativeObject ) : <nl> - destroy_value % 2 : $ Builtin . NativeObject <nl> - br bb3 <nl> - <nl> - bb3 : <nl> - % 9999 = tuple ( ) <nl> - return % 9999 : $ ( ) <nl> - } <nl> - <nl> - / / CHECK - LABEL : Function : ' checked_cast_br_mismatching_argument_guaranteed_to_owned_3 ' <nl> - / / CHECK : Ill - formed SIL ! Unable to compute ownership kind map for user ? ! <nl> - / / CHECK : For terminator users , check that successors have compatible ownership kinds . <nl> - / / CHECK : Value : % 0 = argument of bb0 : $ Builtin . NativeObject <nl> - / / CHECK : User : checked_cast_br % 0 : $ Builtin . NativeObject to SuperKlass , bb1 , bb2 <nl> - / / CHECK : Conv : guaranteed <nl> - sil [ ossa ] @ checked_cast_br_mismatching_argument_guaranteed_to_owned_3 : $ @ convention ( thin ) ( @ guaranteed Builtin . NativeObject ) - > ( ) { <nl> - bb0 ( % 0 : @ guaranteed $ Builtin . NativeObject ) : <nl> - checked_cast_br % 0 : $ Builtin . NativeObject to SuperKlass , bb1 , bb2 <nl> - <nl> - bb1 ( % 1 : @ owned $ SuperKlass ) : <nl> - destroy_value % 1 : $ SuperKlass <nl> - br bb3 <nl> - <nl> - bb2 ( % 2 : @ guaranteed $ Builtin . NativeObject ) : <nl> - br bb3 <nl> - <nl> - bb3 : <nl> - % 9999 = tuple ( ) <nl> - return % 9999 : $ ( ) <nl> - } <nl> - <nl> - / / CHECK - LABEL : Function : ' checked_cast_br_mismatching_argument_owned_to_guaranteed_1 ' <nl> - / / CHECK : Have operand with incompatible ownership ? ! <nl> - / / CHECK : Value : % 0 = argument of bb0 : $ Builtin . NativeObject <nl> - / / CHECK : User : checked_cast_br % 0 : $ Builtin . NativeObject to SuperKlass , bb1 , bb2 <nl> - / / CHECK : Conv : owned <nl> - / / CHECK : OwnershipMap : <nl> - / / CHECK : - - OperandOwnershipKindMap - - <nl> - / / CHECK : unowned : No . <nl> - / / CHECK : owned : No . <nl> - / / CHECK : guaranteed : Yes . Liveness : NonLifetimeEnding <nl> - / / CHECK : any : Yes . Liveness : NonLifetimeEnding <nl> - sil [ ossa ] @ checked_cast_br_mismatching_argument_owned_to_guaranteed_1 : $ @ convention ( thin ) ( @ owned Builtin . NativeObject ) - > ( ) { <nl> - bb0 ( % 0 : @ owned $ Builtin . NativeObject ) : <nl> - checked_cast_br % 0 : $ Builtin . NativeObject to SuperKlass , bb1 , bb2 <nl> - <nl> - bb1 ( % 1 : @ guaranteed $ SuperKlass ) : <nl> - br bb3 <nl> - <nl> - bb2 ( % 2 : @ guaranteed $ Builtin . NativeObject ) : <nl> - br bb3 <nl> - <nl> - bb3 : <nl> - % 9999 = tuple ( ) <nl> - return % 9999 : $ ( ) <nl> - } <nl> - <nl> - <nl> - / / CHECK - LABEL : Function : ' checked_cast_br_mismatching_argument_owned_to_guaranteed_2 ' <nl> - / / CHECK : Ill - formed SIL ! Unable to compute ownership kind map for user ? ! <nl> - / / CHECK : For terminator users , check that successors have compatible ownership kinds . <nl> - / / CHECK : Value : % 0 = argument of bb0 : $ Builtin . NativeObject <nl> - / / CHECK : User : checked_cast_br % 0 : $ Builtin . NativeObject to SuperKlass , bb1 , bb2 <nl> - / / CHECK : Conv : owned <nl> - sil [ ossa ] @ checked_cast_br_mismatching_argument_owned_to_guaranteed_2 : $ @ convention ( thin ) ( @ owned Builtin . NativeObject ) - > ( ) { <nl> - bb0 ( % 0 : @ owned $ Builtin . NativeObject ) : <nl> - checked_cast_br % 0 : $ Builtin . NativeObject to SuperKlass , bb1 , bb2 <nl> - <nl> - bb1 ( % 1 : @ guaranteed $ SuperKlass ) : <nl> - br bb3 <nl> - <nl> - bb2 ( % 2 : @ owned $ Builtin . NativeObject ) : <nl> - destroy_value % 2 : $ Builtin . NativeObject <nl> - br bb3 <nl> - <nl> - bb3 : <nl> - % 9999 = tuple ( ) <nl> - return % 9999 : $ ( ) <nl> - } <nl> - <nl> - / / CHECK - LABEL : Function : ' checked_cast_br_mismatching_argument_owned_to_guaranteed_3 ' <nl> - / / CHECK : Ill - formed SIL ! Unable to compute ownership kind map for user ? ! <nl> - / / CHECK : For terminator users , check that successors have compatible ownership kinds . <nl> - / / CHECK : Value : % 0 = argument of bb0 : $ Builtin . NativeObject <nl> - / / CHECK : User : checked_cast_br % 0 : $ Builtin . NativeObject to SuperKlass , bb1 , bb2 <nl> - / / CHECK : Conv : owned <nl> - sil [ ossa ] @ checked_cast_br_mismatching_argument_owned_to_guaranteed_3 : $ @ convention ( thin ) ( @ owned Builtin . NativeObject ) - > ( ) { <nl> - bb0 ( % 0 : @ owned $ Builtin . NativeObject ) : <nl> - checked_cast_br % 0 : $ Builtin . NativeObject to SuperKlass , bb1 , bb2 <nl> - <nl> - bb1 ( % 1 : @ owned $ SuperKlass ) : <nl> - destroy_value % 1 : $ SuperKlass <nl> - br bb3 <nl> - <nl> - bb2 ( % 2 : @ guaranteed $ Builtin . NativeObject ) : <nl> - br bb3 <nl> - <nl> - bb3 : <nl> - % 9999 = tuple ( ) <nl> - return % 9999 : $ ( ) <nl> - } <nl> - <nl> / / CHECK - LABEL : Function : ' checked_cast_br_guaranteed_arg_outlives_original_value ' <nl> / / CHECK : Found outside of lifetime use ? ! <nl> / / CHECK : Value : % 1 = begin_borrow % 0 : $ Builtin . NativeObject <nl> mmm a / test / SIL / ownership - verifier / undef . sil <nl> ppp b / test / SIL / ownership - verifier / undef . sil <nl> struct MyInt { <nl> / / CHECK - NEXT : Operand Ownership Map : <nl> / / CHECK - NEXT : Op # : 0 <nl> / / CHECK - NEXT : Ownership Map : - - OperandOwnershipKindMap - - <nl> - / / CHECK - NEXT : unowned : Yes . Liveness : NonLifetimeEnding <nl> - / / CHECK - NEXT : owned : Yes . Liveness : NonLifetimeEnding <nl> - / / CHECK - NEXT : guaranteed : Yes . Liveness : NonLifetimeEnding <nl> + / / CHECK - NEXT : unowned : No . <nl> + / / CHECK - NEXT : owned : No . <nl> + / / CHECK - NEXT : guaranteed : No . <nl> / / CHECK - NEXT : any : Yes . Liveness : NonLifetimeEnding <nl> sil [ ossa ] @ undef_addresses_have_any_ownership : $ @ convention ( thin ) ( ) - > ( ) { <nl> bb0 : <nl>
Merge pull request from gottesmm / pr - 88f0c52a3c1db5fb272598e5c5596ce3594d804c
apple/swift
93d1524cc07fa75f14429cfbfd7ab6cf2bf61b36
2020-11-09T23:28:45Z
mmm a / include / swift / IDE / ModuleInterfacePrinting . h <nl> ppp b / include / swift / IDE / ModuleInterfacePrinting . h <nl> <nl> # include " swift / Basic / LLVM . h " <nl> # include " swift / Basic / OptionSet . h " <nl> <nl> + # include < vector > <nl> + <nl> namespace swift { <nl> class ASTContext ; <nl> class ASTPrinter ; <nl> enum class ModuleTraversal : unsigned { <nl> / / / Options used to describe the traversal of a module for printing . <nl> typedef OptionSet < ModuleTraversal > ModuleTraversalOptions ; <nl> <nl> + ArrayRef < StringRef > collectModuleGroups ( ModuleDecl * M , <nl> + std : : vector < StringRef > & Scratch ) ; <nl> + <nl> void printModuleInterface ( ModuleDecl * M , <nl> ModuleTraversalOptions TraversalOptions , <nl> ASTPrinter & Printer , const PrintOptions & Options , <nl> mmm a / lib / IDE / ModuleInterfacePrinting . cpp <nl> ppp b / lib / IDE / ModuleInterfacePrinting . cpp <nl> void findExtensionsFromConformingProtocols ( Decl * D , <nl> } <nl> } <nl> <nl> + ArrayRef < StringRef > <nl> + swift : : ide : : collectModuleGroups ( Module * M , std : : vector < StringRef > & Scratch ) { <nl> + for ( auto File : M - > getFiles ( ) ) { <nl> + File - > collectAllGroups ( Scratch ) ; <nl> + } <nl> + std : : sort ( Scratch . begin ( ) , Scratch . end ( ) , [ ] ( StringRef L , StringRef R ) { <nl> + return L . compare_lower ( R ) < 0 ; <nl> + } ) ; <nl> + return llvm : : makeArrayRef ( Scratch ) ; <nl> + } <nl> + <nl> void swift : : ide : : printSubmoduleInterface ( <nl> Module * M , <nl> ArrayRef < StringRef > FullModuleName , <nl> mmm a / tools / SourceKit / lib / SwiftLang / SwiftDocSupport . cpp <nl> ppp b / tools / SourceKit / lib / SwiftLang / SwiftDocSupport . cpp <nl> void SwiftLangSupport : : findModuleGroups ( StringRef ModuleName , <nl> Receiver ( Groups , Error ) ; <nl> return ; <nl> } <nl> - for ( auto File : M - > getFiles ( ) ) { <nl> - File - > collectAllGroups ( Groups ) ; <nl> - } <nl> - std : : sort ( Groups . begin ( ) , Groups . end ( ) , [ ] ( StringRef L , StringRef R ) { <nl> - return L . compare_lower ( R ) < 0 ; <nl> - } ) ; <nl> - Receiver ( Groups , Error ) ; <nl> + std : : vector < StringRef > Scratch ; <nl> + Receiver ( collectModuleGroups ( M , Scratch ) , Error ) ; <nl> } <nl>
ASTPrinter : Move the logic for collecting all module groups from SourceKit to an IDE API , NFC .
apple/swift
bf1436a367d208d1621f802fcf28429c05dbcf83
2016-02-22T19:50:34Z
mmm a / utils / build - script - impl <nl> ppp b / utils / build - script - impl <nl> function set_build_options_for_host ( ) { <nl> # in the compiler checks CMake performs <nl> - DCMAKE_OSX_ARCHITECTURES = " $ { architecture } " <nl> ) <nl> + <nl> + lldb_cmake_options + = ( <nl> + - DCMAKE_OSX_SYSROOT : PATH = " $ { cmake_os_sysroot } " <nl> + ) <nl> ; ; <nl> esac <nl> <nl>
Merge pull request from vedantk / pass - sysroot
apple/swift
d1a54a530103111c020a718465fd0a30b1e6f6dd
2020-07-10T04:10:10Z
mmm a / torch / jit / __init__ . py <nl> ppp b / torch / jit / __init__ . py <nl> def make_module ( mod , _module_class , _compilation_unit ) : <nl> if isinstance ( mod , ScriptModule ) : <nl> return mod <nl> elif torch . _jit_internal . module_has_exports ( mod ) : <nl> - exported = [ ] <nl> - for name in dir ( mod ) : <nl> - item = getattr ( mod , name , None ) <nl> - if torch . _jit_internal . get_torchscript_modifier ( item ) is _jit_internal . FunctionModifiers . EXPORT : <nl> - exported . append ( name ) <nl> - stubs = [ ] <nl> - for method in exported : <nl> - stubs . append ( torch . jit . _recursive . make_stub_from_method ( mod , method ) ) <nl> - <nl> - return torch . jit . _recursive . create_script_module_for_tracing ( mod , stubs ) <nl> + def make_stubs_from_exported_methods ( mod ) : <nl> + exported = [ ] <nl> + for name in dir ( mod ) : <nl> + item = getattr ( mod , name , None ) <nl> + if torch . _jit_internal . get_torchscript_modifier ( item ) is _jit_internal . FunctionModifiers . EXPORT : <nl> + exported . append ( name ) <nl> + <nl> + stubs = [ ] <nl> + for method in exported : <nl> + stubs . append ( torch . jit . _recursive . make_stub_from_method ( mod , method ) ) <nl> + return stubs <nl> + <nl> + return torch . jit . _recursive . create_script_module ( mod , make_stubs_from_exported_methods , share_types = False ) <nl> else : <nl> if _module_class is None : <nl> _module_class = TopLevelTracedModule <nl> def forward ( self , input ) : <nl> <nl> if optimize is not None : <nl> warnings . warn ( " ` optimize ` is deprecated and has no effect . Use ` with torch . jit . optimized_execution ( ) instead " ) <nl> + if isinstance ( obj , ScriptModule ) : <nl> + return obj <nl> <nl> if isinstance ( obj , torch . nn . Module ) : <nl> - return torch . jit . _recursive . recursive_script ( obj ) <nl> + return torch . jit . _recursive . create_script_module ( obj , torch . jit . _recursive . infer_methods_to_compile ) <nl> <nl> qualified_name = _qualified_name ( obj ) <nl> if inspect . isclass ( obj ) : <nl> def __init__ ( cls , name , bases , attrs ) : <nl> def init_then_script ( self , * args , * * kwargs ) : <nl> original_init ( self , * args , * * kwargs ) <nl> if type ( self ) = = cls : <nl> - stubs = [ v for k , v in sorted ( cls . _methods . items ( ) ) ] <nl> - self . __dict__ [ " _actual_script_module " ] = torch . jit . _recursive . create_script_module ( self , stubs ) <nl> + def make_stubs ( module ) : <nl> + cls = type ( module ) <nl> + return [ v for k , v in sorted ( cls . _methods . items ( ) ) ] <nl> + <nl> + self . __dict__ [ " _actual_script_module " ] = torch . jit . _recursive . create_script_module ( self , make_stubs ) <nl> <nl> # Delete the Python attributes that now shadow the ScriptModule <nl> # ones , so that __getattr__ and __setattr__ will properly find <nl> def check_unique ( param ) : <nl> <nl> # TODO : this way of doing it means we lose name information on the class , <nl> # since the qualname is basically " nn . Module " <nl> - script_module = torch . jit . _recursive . create_script_module_for_tracing ( tmp_module , ( ) ) <nl> + script_module = torch . jit . _recursive . create_script_module ( tmp_module , lambda module : ( ) , share_types = False ) <nl> <nl> self . __dict__ [ ' _name ' ] = type ( orig ) . __name__ <nl> self . __dict__ [ ' _actual_script_module ' ] = script_module <nl> mmm a / torch / jit / _recursive . py <nl> ppp b / torch / jit / _recursive . py <nl> def create_methods_from_stubs ( concrete_type , stubs ) : <nl> defaults = [ get_default_args ( m . original_method ) for m in stubs ] <nl> concrete_type . _create_methods ( defs , rcbs , defaults ) <nl> <nl> - def create_script_module_for_tracing ( nn_module , stubs ) : <nl> + def create_script_module ( nn_module , stubs_fn , share_types = True ) : <nl> " " " <nl> - Creates a new ScriptModule from an nn . Module , but always uses a fresh type . <nl> - <nl> - NOTE : Only use this when we cannot guarantee type sharing will work <nl> - correctly . This only happens today for traced modules , where the same <nl> - module can produce different traced methods depending on the inputs . <nl> + Creates a new ScriptModule from an nn . Module <nl> <nl> Arguments : <nl> nn_module : The original Python nn . Module that we are creating a ScriptModule for . <nl> - stubs : ScriptMethodStubs to compile as part of the conversion process . <nl> + stubs_fn : Lambda that takes an nn . Module and generates a list of ScriptMethodStubs to compile . <nl> + share_types : Whether to share underlying JIT types between modules ( if possible ) . <nl> + NOTE : Only set to False this when we cannot guarantee type sharing will work <nl> + correctly . This only happens today for traced modules , where the same <nl> + module can produce different traced methods depending on the inputs . <nl> " " " <nl> + assert not isinstance ( nn_module , torch . jit . RecursiveScriptModule ) <nl> check_module_initialized ( nn_module ) <nl> - # Get a concrete type directly , without trying to re - use an existing JIT <nl> - # type from the type store . <nl> - concrete_type_builder = infer_concrete_type_builder ( nn_module ) <nl> - concrete_type_builder . set_poisoned ( ) <nl> - concrete_type = concrete_type_builder . build ( ) <nl> - <nl> - return create_script_module_impl ( nn_module , concrete_type , stubs ) <nl> - <nl> - <nl> - def create_script_module ( nn_module , stubs ) : <nl> - " " " <nl> - Creates a new ScriptModule from an nn . Module , sharing underlying JIT types if possible <nl> - <nl> - Arguments : <nl> - nn_module : The original Python nn . Module that we are creating a ScriptModule for . <nl> - stubs : ScriptMethodStubs to compile as part of the conversion process . <nl> - " " " <nl> - check_module_initialized ( nn_module ) <nl> - concrete_type = concrete_type_store . get_or_create_concrete_type ( nn_module ) <nl> + if share_types : <nl> + # Look into the store of cached JIT types <nl> + concrete_type = concrete_type_store . get_or_create_concrete_type ( nn_module ) <nl> + else : <nl> + # Get a concrete type directly , without trying to re - use an existing JIT <nl> + # type from the type store . <nl> + concrete_type_builder = infer_concrete_type_builder ( nn_module ) <nl> + concrete_type_builder . set_poisoned ( ) <nl> + concrete_type = concrete_type_builder . build ( ) <nl> <nl> - return create_script_module_impl ( nn_module , concrete_type , stubs ) <nl> + return create_script_module_impl ( nn_module , concrete_type , stubs_fn ) <nl> <nl> - def create_script_module_impl ( nn_module , concrete_type , stubs ) : <nl> + def create_script_module_impl ( nn_module , concrete_type , stubs_fn ) : <nl> " " " <nl> Convert an nn . Module to a RecursiveScriptModule . <nl> <nl> Arguments : <nl> nn_module : The original Python nn . Module that we are creating a ScriptModule for . <nl> concrete_type : The fully initialized ConcreteType of the module . <nl> - stubs : ScriptMethodStubs to compile as part of the conversion process . <nl> + stubs_fn : Lambda that takes an nn . Module and generates a list of ScriptMethodStubs to compile . <nl> " " " <nl> cpp_module = torch . _C . _create_module_with_type ( concrete_type . jit_type ) <nl> + stubs = stubs_fn ( nn_module ) <nl> <nl> def init_fn ( script_module ) : <nl> # Initialize the ScriptModule : <nl> def init_fn ( script_module ) : <nl> if isinstance ( module_type , torch . _C . InterfaceType ) : <nl> # use the interface inference rule to compile the module <nl> scripted = interface_script ( module_type , orig_value ) <nl> + elif isinstance ( orig_value , torch . jit . ScriptModule ) : <nl> + scripted = orig_value <nl> else : <nl> # use the default recursive rule to compile the module <nl> - scripted = recursive_script ( orig_value ) <nl> + scripted = create_script_module ( orig_value , infer_methods_to_compile ) <nl> cpp_module . setattr ( name , scripted ) <nl> script_module . _modules [ name ] = scripted <nl> <nl> def ignore_overloaded ( method_name ) : <nl> stubs . append ( make_stub_from_method ( nn_module , method ) ) <nl> return overload_stubs + stubs <nl> <nl> - def infer_interface_methods_to_compile ( mod_interface , nn_module ) : <nl> - " " " <nl> - Rule to infer the methods from the interface type to know which <nl> - methods need to act as starting points for compilation . <nl> - " " " <nl> - stubs = [ ] <nl> - for method in mod_interface . getMethodNames ( ) : <nl> - stubs . append ( make_stub_from_method ( nn_module , method ) ) <nl> - return stubs <nl> - <nl> def interface_script ( mod_interface , nn_module ) : <nl> " " " <nl> Makes a ScriptModule from an nn . Module , using the interface methods rule for <nl> def interface_script ( mod_interface , nn_module ) : <nl> return nn_module <nl> <nl> check_module_initialized ( nn_module ) <nl> - return create_script_module ( nn_module , infer_interface_methods_to_compile ( mod_interface , nn_module ) ) <nl> - <nl> - def recursive_script ( nn_module ) : <nl> - " " " <nl> - Makes a ScriptModule from an nn . Module , using the default rules for <nl> - determining which methods to compile . <nl> - <nl> - Arguments : <nl> - nn_module : The original Python nn . Module that we are creating a ScriptModule for . <nl> - " " " <nl> - if isinstance ( nn_module , torch . jit . ScriptModule ) : <nl> - return nn_module <nl> <nl> - check_module_initialized ( nn_module ) <nl> + def infer_interface_methods_to_compile ( nn_module ) : <nl> + " " " <nl> + Rule to infer the methods from the interface type to know which <nl> + methods need to act as starting points for compilation . <nl> + " " " <nl> + stubs = [ ] <nl> + for method in mod_interface . getMethodNames ( ) : <nl> + stubs . append ( make_stub_from_method ( nn_module , method ) ) <nl> + return stubs <nl> <nl> - return create_script_module ( nn_module , infer_methods_to_compile ( nn_module ) ) <nl> + return create_script_module ( nn_module , infer_interface_methods_to_compile ) <nl> <nl> def try_compile_fn ( fn , loc ) : <nl> if _jit_internal . is_ignored_fn ( fn ) : <nl>
Simplify recursive script compilation flow . ( )
pytorch/pytorch
878b0e35f7723a4638a9cd65f5b9601073e92958
2019-12-18T05:55:50Z
mmm a / regrank / xgboost_regrank_obj . h <nl> ppp b / regrank / xgboost_regrank_obj . h <nl> namespace xgboost { <nl> IObjFunction * CreateObjFunction ( const char * name ) { <nl> if ( ! strcmp ( " reg " , name ) ) return new RegressionObj ( ) ; <nl> if ( ! strcmp ( " rank " , name ) ) return new PairwiseRankObj ( ) ; <nl> + if ( ! strcmp ( " softmax " , name ) ) return new SoftmaxObj ( ) ; <nl> utils : : Error ( " unknown objective function type " ) ; <nl> return NULL ; <nl> } <nl> mmm a / regrank / xgboost_regrank_obj . hpp <nl> ppp b / regrank / xgboost_regrank_obj . hpp <nl> namespace xgboost { <nl> } ; <nl> } ; <nl> <nl> + namespace regrank { <nl> + / / simple softmax rak <nl> + class SoftmaxObj : public IObjFunction { <nl> + public : <nl> + SoftmaxObj ( void ) { <nl> + } <nl> + virtual ~ SoftmaxObj ( ) { } <nl> + virtual void SetParam ( const char * name , const char * val ) { <nl> + } <nl> + virtual void GetGradient ( const std : : vector < float > & preds , <nl> + const DMatrix : : Info & info , <nl> + int iter , <nl> + std : : vector < float > & grad , <nl> + std : : vector < float > & hess ) { <nl> + grad . resize ( preds . size ( ) ) ; hess . resize ( preds . size ( ) ) ; <nl> + const std : : vector < unsigned > & gptr = info . group_ptr ; <nl> + utils : : Assert ( gptr . size ( ) ! = 0 & & gptr . back ( ) = = preds . size ( ) , " rank loss must have group file " ) ; <nl> + const unsigned ngroup = static_cast < unsigned > ( gptr . size ( ) - 1 ) ; <nl> + <nl> + # pragma omp parallel <nl> + { <nl> + std : : vector < float > rec ; <nl> + # pragma for schedule ( static ) <nl> + for ( unsigned k = 0 ; k < ngroup ; + + k ) { <nl> + rec . clear ( ) ; <nl> + int nhit = 0 ; <nl> + for ( unsigned j = gptr [ k ] ; j < gptr [ k + 1 ] ; + + j ) { <nl> + rec . push_back ( preds [ j ] ) ; <nl> + grad [ j ] = hess [ j ] = 0 . 0f ; <nl> + nhit + = info . labels [ j ] ; <nl> + } <nl> + Softmax ( rec ) ; <nl> + if ( nhit = = 1 ) { <nl> + for ( unsigned j = gptr [ k ] ; j < gptr [ k + 1 ] ; + + j ) { <nl> + float p = rec [ j - gptr [ k ] ] ; <nl> + grad [ j ] = p - info . labels [ j ] ; <nl> + hess [ j ] = 2 . 0f * p * ( 1 . 0f - p ) ; <nl> + } <nl> + } else { <nl> + utils : : Assert ( nhit = = 0 , " softmax does not allow multiple labels " ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + virtual const char * DefaultEvalMetric ( void ) { <nl> + return " pre @ 1 " ; <nl> + } <nl> + private : <nl> + inline static void Softmax ( std : : vector < float > & rec ) { <nl> + float wmax = rec [ 0 ] ; <nl> + for ( size_t i = 1 ; i < rec . size ( ) ; + + i ) { <nl> + wmax = std : : max ( rec [ i ] , wmax ) ; <nl> + } <nl> + double wsum = 0 . 0f ; <nl> + for ( size_t i = 0 ; i < rec . size ( ) ; + + i ) { <nl> + rec [ i ] = expf ( rec [ i ] - wmax ) ; <nl> + wsum + = rec [ i ] ; <nl> + } <nl> + for ( size_t i = 0 ; i < rec . size ( ) ; + + i ) { <nl> + rec [ i ] / = wsum ; <nl> + } <nl> + } <nl> + } ; <nl> + } ; <nl> + <nl> namespace regrank { <nl> / / simple pairwise rank <nl> class PairwiseRankObj : public IObjFunction { <nl>
add softmax
dmlc/xgboost
bf64608cc941a5bc30478acd1516d46d80335612
2014-05-01T05:11:26Z
mmm a / tensorflow / lite / c / c_api_internal . c <nl> ppp b / tensorflow / lite / c / c_api_internal . c <nl> TfLiteFloatArray * TfLiteFloatArrayCreate ( int size ) { <nl> void TfLiteFloatArrayFree ( TfLiteFloatArray * a ) { free ( a ) ; } <nl> <nl> void TfLiteTensorDataFree ( TfLiteTensor * t ) { <nl> - if ( t - > allocation_type = = kTfLiteDynamic & & t - > data . raw ) { <nl> + if ( t - > allocation_type = = kTfLiteDynamic ) { <nl> free ( t - > data . raw ) ; <nl> } <nl> t - > data . raw = NULL ; <nl>
Nit : Checking non - null - ness before calling free is redundant .
tensorflow/tensorflow
54343f143351f932c59b455059eaea1763e461d2
2019-10-21T22:59:31Z
mmm a / drivers / alsa / audio_driver_alsa . cpp <nl> ppp b / drivers / alsa / audio_driver_alsa . cpp <nl> Error AudioDriverALSA : : init_device ( ) { <nl> samples_in . resize ( period_size * channels ) ; <nl> samples_out . resize ( period_size * channels ) ; <nl> <nl> - snd_pcm_nonblock ( pcm_handle , 0 ) ; <nl> - <nl> return OK ; <nl> } <nl> <nl> void AudioDriverALSA : : thread_func ( void * p_udata ) { <nl> AudioDriverALSA * ad = ( AudioDriverALSA * ) p_udata ; <nl> <nl> while ( ! ad - > exit_thread ) { <nl> + <nl> + ad - > lock ( ) ; <nl> + ad - > start_counting_ticks ( ) ; <nl> + <nl> if ( ! ad - > active ) { <nl> for ( unsigned int i = 0 ; i < ad - > period_size * ad - > channels ; i + + ) { <nl> ad - > samples_out [ i ] = 0 ; <nl> - } ; <nl> - } else { <nl> - ad - > lock ( ) ; <nl> + } <nl> <nl> + } else { <nl> ad - > audio_server_process ( ad - > period_size , ad - > samples_in . ptrw ( ) ) ; <nl> <nl> - ad - > unlock ( ) ; <nl> - <nl> for ( unsigned int i = 0 ; i < ad - > period_size * ad - > channels ; i + + ) { <nl> ad - > samples_out [ i ] = ad - > samples_in [ i ] > > 16 ; <nl> } <nl> - } ; <nl> + } <nl> <nl> int todo = ad - > period_size ; <nl> int total = 0 ; <nl> <nl> - while ( todo ) { <nl> - if ( ad - > exit_thread ) <nl> - break ; <nl> + while ( todo & & ! ad - > exit_thread ) { <nl> uint8_t * src = ( uint8_t * ) ad - > samples_out . ptr ( ) ; <nl> int wrote = snd_pcm_writei ( ad - > pcm_handle , ( void * ) ( src + ( total * ad - > channels ) ) , todo ) ; <nl> <nl> - if ( wrote < 0 ) { <nl> - if ( ad - > exit_thread ) <nl> - break ; <nl> + if ( wrote > 0 ) { <nl> + total + = wrote ; <nl> + todo - = wrote ; <nl> + } else if ( wrote = = - EAGAIN ) { <nl> + ad - > stop_counting_ticks ( ) ; <nl> + ad - > unlock ( ) ; <nl> <nl> - if ( wrote = = - EAGAIN ) { <nl> - / / can ' t write yet ( though this is blocking . . ) <nl> - usleep ( 1000 ) ; <nl> - continue ; <nl> - } <nl> + OS : : get_singleton ( ) - > delay_usec ( 1000 ) ; <nl> + <nl> + ad - > lock ( ) ; <nl> + ad - > start_counting_ticks ( ) ; <nl> + } else { <nl> wrote = snd_pcm_recover ( ad - > pcm_handle , wrote , 0 ) ; <nl> if ( wrote < 0 ) { <nl> - / / absolute fail <nl> - fprintf ( stderr , " ALSA failed and can ' t recover : % s \ n " , snd_strerror ( wrote ) ) ; <nl> + ERR_PRINTS ( " ALSA : Failed and can ' t recover : " + String ( snd_strerror ( wrote ) ) ) ; <nl> ad - > active = false ; <nl> ad - > exit_thread = true ; <nl> - break ; <nl> } <nl> - continue ; <nl> - } ; <nl> - <nl> - total + = wrote ; <nl> - todo - = wrote ; <nl> - } ; <nl> + } <nl> + } <nl> <nl> / / User selected a new device , finish the current one so we ' ll init the new device <nl> if ( ad - > device_name ! = ad - > new_device ) { <nl> void AudioDriverALSA : : thread_func ( void * p_udata ) { <nl> if ( err ! = OK ) { <nl> ad - > active = false ; <nl> ad - > exit_thread = true ; <nl> - break ; <nl> } <nl> } <nl> } <nl> + <nl> + ad - > stop_counting_ticks ( ) ; <nl> + ad - > unlock ( ) ; <nl> } ; <nl> <nl> ad - > thread_exited = true ; <nl> String AudioDriverALSA : : get_device ( ) { <nl> <nl> void AudioDriverALSA : : set_device ( String device ) { <nl> <nl> + lock ( ) ; <nl> new_device = device ; <nl> + unlock ( ) ; <nl> } <nl> <nl> void AudioDriverALSA : : lock ( ) { <nl> void AudioDriverALSA : : finish_device ( ) { <nl> <nl> void AudioDriverALSA : : finish ( ) { <nl> <nl> - if ( ! thread ) <nl> - return ; <nl> - <nl> - exit_thread = true ; <nl> - Thread : : wait_to_finish ( thread ) ; <nl> + if ( thread ) { <nl> + exit_thread = true ; <nl> + Thread : : wait_to_finish ( thread ) ; <nl> <nl> - finish_device ( ) ; <nl> + memdelete ( thread ) ; <nl> + thread = NULL ; <nl> <nl> - memdelete ( thread ) ; <nl> - if ( mutex ) { <nl> - memdelete ( mutex ) ; <nl> - mutex = NULL ; <nl> + if ( mutex ) { <nl> + memdelete ( mutex ) ; <nl> + mutex = NULL ; <nl> + } <nl> } <nl> - thread = NULL ; <nl> - } ; <nl> + <nl> + finish_device ( ) ; <nl> + } <nl> <nl> AudioDriverALSA : : AudioDriverALSA ( ) { <nl> <nl> mmm a / drivers / coreaudio / audio_driver_coreaudio . cpp <nl> ppp b / drivers / coreaudio / audio_driver_coreaudio . cpp <nl> OSStatus AudioDriverCoreAudio : : output_callback ( void * inRefCon , <nl> return 0 ; <nl> } ; <nl> <nl> + ad - > start_counting_ticks ( ) ; <nl> + <nl> for ( unsigned int i = 0 ; i < ioData - > mNumberBuffers ; i + + ) { <nl> <nl> AudioBuffer * abuf = & ioData - > mBuffers [ i ] ; <nl> OSStatus AudioDriverCoreAudio : : output_callback ( void * inRefCon , <nl> } ; <nl> } ; <nl> <nl> + ad - > stop_counting_ticks ( ) ; <nl> ad - > unlock ( ) ; <nl> <nl> return 0 ; <nl> mmm a / drivers / pulseaudio / audio_driver_pulseaudio . cpp <nl> ppp b / drivers / pulseaudio / audio_driver_pulseaudio . cpp <nl> void AudioDriverPulseAudio : : thread_func ( void * p_udata ) { <nl> AudioDriverPulseAudio * ad = ( AudioDriverPulseAudio * ) p_udata ; <nl> <nl> while ( ! ad - > exit_thread ) { <nl> + <nl> + ad - > lock ( ) ; <nl> + ad - > start_counting_ticks ( ) ; <nl> + <nl> if ( ! ad - > active ) { <nl> for ( unsigned int i = 0 ; i < ad - > pa_buffer_size ; i + + ) { <nl> ad - > samples_out [ i ] = 0 ; <nl> } <nl> <nl> } else { <nl> - ad - > lock ( ) ; <nl> - <nl> ad - > audio_server_process ( ad - > buffer_frames , ad - > samples_in . ptrw ( ) ) ; <nl> <nl> - ad - > unlock ( ) ; <nl> - <nl> if ( ad - > channels = = ad - > pa_map . channels ) { <nl> for ( unsigned int i = 0 ; i < ad - > pa_buffer_size ; i + + ) { <nl> ad - > samples_out [ i ] = ad - > samples_in [ i ] > > 16 ; <nl> void AudioDriverPulseAudio : : thread_func ( void * p_udata ) { <nl> <nl> int error_code ; <nl> int byte_size = ad - > pa_buffer_size * sizeof ( int16_t ) ; <nl> - <nl> - ad - > lock ( ) ; <nl> - <nl> int ret ; <nl> do { <nl> ret = pa_mainloop_iterate ( ad - > pa_ml , 0 , NULL ) ; <nl> void AudioDriverPulseAudio : : thread_func ( void * p_udata ) { <nl> if ( ret = = 0 ) { <nl> / / If pa_mainloop_iterate returns 0 sleep for 1 msec to wait <nl> / / for the stream to be able to process more bytes <nl> + ad - > stop_counting_ticks ( ) ; <nl> ad - > unlock ( ) ; <nl> <nl> OS : : get_singleton ( ) - > delay_usec ( 1000 ) ; <nl> <nl> ad - > lock ( ) ; <nl> + ad - > start_counting_ticks ( ) ; <nl> } <nl> } <nl> } <nl> void AudioDriverPulseAudio : : thread_func ( void * p_udata ) { <nl> } <nl> } <nl> <nl> + ad - > stop_counting_ticks ( ) ; <nl> ad - > unlock ( ) ; <nl> } <nl> <nl> String AudioDriverPulseAudio : : get_device ( ) { <nl> <nl> void AudioDriverPulseAudio : : set_device ( String device ) { <nl> <nl> + lock ( ) ; <nl> new_device = device ; <nl> + unlock ( ) ; <nl> } <nl> <nl> void AudioDriverPulseAudio : : lock ( ) { <nl> mmm a / drivers / wasapi / audio_driver_wasapi . cpp <nl> ppp b / drivers / wasapi / audio_driver_wasapi . cpp <nl> Error AudioDriverWASAPI : : init ( ) { <nl> return OK ; <nl> } <nl> <nl> - Error AudioDriverWASAPI : : reopen ( ) { <nl> - Error err = finish_device ( ) ; <nl> - if ( err ! = OK ) { <nl> - ERR_PRINT ( " WASAPI : finish_device error " ) ; <nl> - } else { <nl> - err = init_device ( ) ; <nl> - if ( err ! = OK ) { <nl> - ERR_PRINT ( " WASAPI : init_device error " ) ; <nl> - } else { <nl> - start ( ) ; <nl> - } <nl> - } <nl> - <nl> - return err ; <nl> - } <nl> - <nl> int AudioDriverWASAPI : : get_mix_rate ( ) const { <nl> <nl> return mix_rate ; <nl> String AudioDriverWASAPI : : get_device ( ) { <nl> <nl> void AudioDriverWASAPI : : set_device ( String device ) { <nl> <nl> + lock ( ) ; <nl> new_device = device ; <nl> + unlock ( ) ; <nl> } <nl> <nl> void AudioDriverWASAPI : : write_sample ( AudioDriverWASAPI * ad , BYTE * buffer , int i , int32_t sample ) { <nl> void AudioDriverWASAPI : : thread_func ( void * p_udata ) { <nl> AudioDriverWASAPI * ad = ( AudioDriverWASAPI * ) p_udata ; <nl> <nl> while ( ! ad - > exit_thread ) { <nl> - if ( ad - > active ) { <nl> - ad - > lock ( ) ; <nl> <nl> - ad - > audio_server_process ( ad - > buffer_frames , ad - > samples_in . ptrw ( ) ) ; <nl> + ad - > lock ( ) ; <nl> + ad - > start_counting_ticks ( ) ; <nl> <nl> - ad - > unlock ( ) ; <nl> + if ( ad - > active ) { <nl> + ad - > audio_server_process ( ad - > buffer_frames , ad - > samples_in . ptrw ( ) ) ; <nl> } else { <nl> for ( unsigned int i = 0 ; i < ad - > buffer_size ; i + + ) { <nl> ad - > samples_in [ i ] = 0 ; <nl> } <nl> } <nl> <nl> + ad - > stop_counting_ticks ( ) ; <nl> + ad - > unlock ( ) ; <nl> + <nl> unsigned int left_frames = ad - > buffer_frames ; <nl> unsigned int buffer_idx = 0 ; <nl> while ( left_frames > 0 & & ad - > audio_client ) { <nl> WaitForSingleObject ( ad - > event , 1000 ) ; <nl> <nl> + ad - > lock ( ) ; <nl> + ad - > start_counting_ticks ( ) ; <nl> + <nl> UINT32 cur_frames ; <nl> + bool invalidated = false ; <nl> HRESULT hr = ad - > audio_client - > GetCurrentPadding ( & cur_frames ) ; <nl> if ( hr = = S_OK ) { <nl> / / Check how much frames are available on the WASAPI buffer <nl> void AudioDriverWASAPI : : thread_func ( void * p_udata ) { <nl> <nl> left_frames - = write_frames ; <nl> } else if ( hr = = AUDCLNT_E_DEVICE_INVALIDATED ) { <nl> - / / Device is not valid anymore , reopen it <nl> - <nl> - Error err = ad - > finish_device ( ) ; <nl> - if ( err ! = OK ) { <nl> - ERR_PRINT ( " WASAPI : finish_device error " ) ; <nl> - } else { <nl> - / / We reopened the device and samples_in may have resized , so invalidate the current left_frames <nl> - left_frames = 0 ; <nl> - } <nl> + invalidated = true ; <nl> } else { <nl> ERR_PRINT ( " WASAPI : Get buffer error " ) ; <nl> ad - > exit_thread = true ; <nl> } <nl> } else if ( hr = = AUDCLNT_E_DEVICE_INVALIDATED ) { <nl> - / / Device is not valid anymore , reopen it <nl> + invalidated = true ; <nl> + } else { <nl> + ERR_PRINT ( " WASAPI : GetCurrentPadding error " ) ; <nl> + } <nl> + <nl> + if ( invalidated ) { <nl> + / / Device is not valid anymore <nl> + WARN_PRINT ( " WASAPI : Current device invalidated , closing device " ) ; <nl> <nl> Error err = ad - > finish_device ( ) ; <nl> if ( err ! = OK ) { <nl> ERR_PRINT ( " WASAPI : finish_device error " ) ; <nl> - } else { <nl> - / / We reopened the device and samples_in may have resized , so invalidate the current left_frames <nl> - left_frames = 0 ; <nl> } <nl> - } else { <nl> - ERR_PRINT ( " WASAPI : GetCurrentPadding error " ) ; <nl> } <nl> + <nl> + ad - > stop_counting_ticks ( ) ; <nl> + ad - > unlock ( ) ; <nl> } <nl> <nl> + ad - > lock ( ) ; <nl> + ad - > start_counting_ticks ( ) ; <nl> + <nl> / / If we ' re using the Default device and it changed finish it so we ' ll re - init the device <nl> if ( ad - > device_name = = " Default " & & default_device_changed ) { <nl> Error err = ad - > finish_device ( ) ; <nl> void AudioDriverWASAPI : : thread_func ( void * p_udata ) { <nl> ad - > start ( ) ; <nl> } <nl> } <nl> + <nl> + ad - > stop_counting_ticks ( ) ; <nl> + ad - > unlock ( ) ; <nl> } <nl> <nl> ad - > thread_exited = true ; <nl> mmm a / drivers / wasapi / audio_driver_wasapi . h <nl> ppp b / drivers / wasapi / audio_driver_wasapi . h <nl> class AudioDriverWASAPI : public AudioDriver { <nl> <nl> Error init_device ( bool reinit = false ) ; <nl> Error finish_device ( ) ; <nl> - Error reopen ( ) ; <nl> <nl> public : <nl> virtual const char * get_name ( ) const { <nl> mmm a / main / main . cpp <nl> ppp b / main / main . cpp <nl> bool Main : : iteration ( ) { <nl> ScriptServer : : get_language ( i ) - > frame ( ) ; <nl> } <nl> <nl> + AudioServer : : get_singleton ( ) - > update ( ) ; <nl> + <nl> if ( script_debugger ) { <nl> if ( script_debugger - > is_profiling ( ) ) { <nl> script_debugger - > profiling_set_frame_times ( USEC_TO_SEC ( frame_time ) , USEC_TO_SEC ( idle_process_ticks ) , USEC_TO_SEC ( physics_process_ticks ) , frame_slice ) ; <nl> mmm a / servers / audio_server . cpp <nl> ppp b / servers / audio_server . cpp <nl> AudioDriver : : AudioDriver ( ) { <nl> <nl> _last_mix_time = 0 ; <nl> _mix_amount = 0 ; <nl> + <nl> + # ifdef DEBUG_ENABLED <nl> + prof_time = 0 ; <nl> + # endif <nl> } <nl> <nl> AudioDriver * AudioDriverManager : : drivers [ MAX_DRIVERS ] ; <nl> void AudioServer : : _driver_process ( int p_frames , int32_t * p_buffer ) { <nl> <nl> int todo = p_frames ; <nl> <nl> + # ifdef DEBUG_ENABLED <nl> + uint64_t prof_ticks = OS : : get_singleton ( ) - > get_ticks_usec ( ) ; <nl> + # endif <nl> + <nl> if ( channel_count ! = get_channel_count ( ) ) { <nl> / / Amount of channels changed due to a device change <nl> / / reinitialize the buses channels and buffers <nl> void AudioServer : : _driver_process ( int p_frames , int32_t * p_buffer ) { <nl> output_latency = ( ticks - output_latency_ticks ) / 1000000 . f ; <nl> output_latency_ticks = ticks ; <nl> } <nl> + <nl> + # ifdef DEBUG_ENABLED <nl> + prof_time + = OS : : get_singleton ( ) - > get_ticks_usec ( ) - prof_ticks ; <nl> + # endif <nl> } <nl> <nl> void AudioServer : : _mix_step ( ) { <nl> void AudioServer : : _mix_step ( ) { <nl> if ( ! bus - > effects [ j ] . enabled ) <nl> continue ; <nl> <nl> + # ifdef DEBUG_ENABLED <nl> + uint64_t ticks = OS : : get_singleton ( ) - > get_ticks_usec ( ) ; <nl> + # endif <nl> + <nl> for ( int k = 0 ; k < bus - > channels . size ( ) ; k + + ) { <nl> <nl> if ( ! bus - > channels [ k ] . active ) <nl> void AudioServer : : _mix_step ( ) { <nl> continue ; <nl> SWAP ( bus - > channels [ k ] . buffer , temp_buffer [ k ] ) ; <nl> } <nl> + <nl> + # ifdef DEBUG_ENABLED <nl> + bus - > effects [ j ] . prof_time + = OS : : get_singleton ( ) - > get_ticks_usec ( ) - ticks ; <nl> + # endif <nl> } <nl> } <nl> <nl> void AudioServer : : add_bus_effect ( int p_bus , const Ref < AudioEffect > & p_effect , in <nl> fx . effect = p_effect ; <nl> / / fx . instance = p_effect - > instance ( ) ; <nl> fx . enabled = true ; <nl> + # ifdef DEBUG_ENABLED <nl> + fx . prof_time = 0 ; <nl> + # endif <nl> <nl> if ( p_at_pos > = buses [ p_bus ] - > effects . size ( ) | | p_at_pos < 0 ) { <nl> buses [ p_bus ] - > effects . push_back ( fx ) ; <nl> void AudioServer : : init ( ) { <nl> GLOBAL_DEF ( " audio / video_delay_compensation_ms " , 0 ) ; <nl> } <nl> <nl> + void AudioServer : : update ( ) { <nl> + # ifdef DEBUG_ENABLED <nl> + if ( ScriptDebugger : : get_singleton ( ) & & ScriptDebugger : : get_singleton ( ) - > is_profiling ( ) ) { <nl> + <nl> + / / Driver time includes server time + effects times <nl> + / / Server time includes effects times <nl> + uint64_t driver_time = AudioDriver : : get_singleton ( ) - > get_profiling_time ( ) ; <nl> + uint64_t server_time = prof_time ; <nl> + <nl> + / / Substract the server time from the driver time <nl> + if ( driver_time > server_time ) <nl> + driver_time - = server_time ; <nl> + <nl> + Array values ; <nl> + <nl> + for ( int i = buses . size ( ) - 1 ; i > = 0 ; i - - ) { <nl> + Bus * bus = buses [ i ] ; <nl> + if ( bus - > bypass ) <nl> + continue ; <nl> + <nl> + for ( int j = 0 ; j < bus - > effects . size ( ) ; j + + ) { <nl> + if ( ! bus - > effects [ j ] . enabled ) <nl> + continue ; <nl> + <nl> + values . push_back ( String ( bus - > name ) + bus - > effects [ j ] . effect - > get_name ( ) ) ; <nl> + values . push_back ( USEC_TO_SEC ( bus - > effects [ j ] . prof_time ) ) ; <nl> + <nl> + / / Substract the effect time from the driver and server times <nl> + if ( driver_time > bus - > effects [ j ] . prof_time ) <nl> + driver_time - = bus - > effects [ j ] . prof_time ; <nl> + if ( server_time > bus - > effects [ j ] . prof_time ) <nl> + server_time - = bus - > effects [ j ] . prof_time ; <nl> + } <nl> + } <nl> + <nl> + values . push_back ( " audio_server " ) ; <nl> + values . push_back ( USEC_TO_SEC ( server_time ) ) ; <nl> + values . push_back ( " audio_driver " ) ; <nl> + values . push_back ( USEC_TO_SEC ( driver_time ) ) ; <nl> + <nl> + ScriptDebugger : : get_singleton ( ) - > add_profiling_frame_data ( " audio_thread " , values ) ; <nl> + } <nl> + <nl> + / / Reset profiling times <nl> + for ( int i = buses . size ( ) - 1 ; i > = 0 ; i - - ) { <nl> + Bus * bus = buses [ i ] ; <nl> + if ( bus - > bypass ) <nl> + continue ; <nl> + <nl> + for ( int j = 0 ; j < bus - > effects . size ( ) ; j + + ) { <nl> + if ( ! bus - > effects [ j ] . enabled ) <nl> + continue ; <nl> + <nl> + bus - > effects [ j ] . prof_time = 0 ; <nl> + } <nl> + } <nl> + <nl> + AudioDriver : : get_singleton ( ) - > reset_profiling_time ( ) ; <nl> + prof_time = 0 ; <nl> + # endif <nl> + } <nl> + <nl> void AudioServer : : load_default_bus_layout ( ) { <nl> <nl> if ( FileAccess : : exists ( " res : / / default_bus_layout . tres " ) ) { <nl> AudioServer : : AudioServer ( ) { <nl> to_mix = 0 ; <nl> output_latency = 0 ; <nl> output_latency_ticks = 0 ; <nl> + # ifdef DEBUG_ENABLED <nl> + prof_time = 0 ; <nl> + # endif <nl> } <nl> <nl> AudioServer : : ~ AudioServer ( ) { <nl> mmm a / servers / audio_server . h <nl> ppp b / servers / audio_server . h <nl> <nl> <nl> # include " audio_frame . h " <nl> # include " object . h " <nl> + # include " os / os . h " <nl> # include " servers / audio / audio_effect . h " <nl> # include " variant . h " <nl> <nl> class AudioDriver { <nl> uint64_t _last_mix_time ; <nl> uint64_t _mix_amount ; <nl> <nl> + # ifdef DEBUG_ENABLED <nl> + uint64_t prof_ticks ; <nl> + uint64_t prof_time ; <nl> + # endif <nl> + <nl> protected : <nl> void audio_server_process ( int p_frames , int32_t * p_buffer , bool p_update_mix_time = true ) ; <nl> void update_mix_time ( int p_frames ) ; <nl> <nl> + # ifdef DEBUG_ENABLED <nl> + _FORCE_INLINE_ void start_counting_ticks ( ) { prof_ticks = OS : : get_singleton ( ) - > get_ticks_usec ( ) ; } <nl> + _FORCE_INLINE_ void stop_counting_ticks ( ) { prof_time + = OS : : get_singleton ( ) - > get_ticks_usec ( ) - prof_ticks ; } <nl> + # else <nl> + _FORCE_INLINE_ void start_counting_ticks ( ) { } <nl> + _FORCE_INLINE_ void stop_counting_ticks ( ) { } <nl> + # endif <nl> + <nl> public : <nl> double get_mix_time ( ) const ; / / useful for video - > audio sync <nl> <nl> class AudioDriver { <nl> SpeakerMode get_speaker_mode_by_total_channels ( int p_channels ) const ; <nl> int get_total_channels_by_speaker_mode ( SpeakerMode ) const ; <nl> <nl> + # ifdef DEBUG_ENABLED <nl> + uint64_t get_profiling_time ( ) const { return prof_time ; } <nl> + void reset_profiling_time ( ) { prof_time = 0 ; } <nl> + # endif <nl> + <nl> AudioDriver ( ) ; <nl> virtual ~ AudioDriver ( ) { } <nl> } ; <nl> class AudioServer : public Object { <nl> uint32_t buffer_size ; <nl> uint64_t mix_count ; <nl> uint64_t mix_frames ; <nl> + # ifdef DEBUG_ENABLED <nl> + uint64_t prof_time ; <nl> + # endif <nl> <nl> float channel_disable_threshold_db ; <nl> uint32_t channel_disable_frames ; <nl> class AudioServer : public Object { <nl> struct Effect { <nl> Ref < AudioEffect > effect ; <nl> bool enabled ; <nl> + # ifdef DEBUG_ENABLED <nl> + uint64_t prof_time ; <nl> + # endif <nl> } ; <nl> <nl> Vector < Effect > effects ; <nl> class AudioServer : public Object { <nl> <nl> virtual void init ( ) ; <nl> virtual void finish ( ) ; <nl> + virtual void update ( ) ; <nl> virtual void load_default_bus_layout ( ) ; <nl> <nl> / * MISC config * / <nl>
Merge pull request from marcelofg55 / audio_profile
godotengine/godot
41a2dccd93d79c8c786ea1b4296cd58af8d87d7b
2018-07-17T14:56:27Z
mmm a / site / source / docs / getting_started / FAQ . rst <nl> ppp b / site / source / docs / getting_started / FAQ . rst <nl> In most cases you will be able to use your project ' s current build system with E <nl> <nl> <nl> <nl> - Why is code compilation so slow ? <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + Why is code compilation slow ? <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> Emscripten makes some trade - offs that make the generated code faster and smaller , at the cost of longer compilation times . For example , we build parts of the standard library along with your code , which enables some additional optimizations , but takes a little longer to compile . <nl> <nl> The main tips for improving build time are : <nl> - Emscripten can run some passes in parallel ( specifically , the JavaScript optimisations ) . Increasing the number of cores results in an almost linear improvement . <nl> - Emscripten will automatically use more cores if they are available . You can control how many cores are used with ` ` EMCC_CORES = N ` ` ( this is useful if you have many cores but relatively less memory ) . <nl> <nl> + - Make sure that the native optimizer is being used , which greatly speeds up optimized builds as of 1 . 28 . 2 . ` ` EMCC_DEBUG = 1 ` ` output should not report errors about the native optimizer failing to build or not being used because of a previous failed build ( if it previously failed , do ` ` emcc - - clear - cache ` ` then compile your file again , and the optimizer will be automatically rebuilt ) . <nl> <nl> <nl> Why does my code run slowly ? <nl> Make sure you optimize code by building with ` ` - O2 ` ` ( even more : ref : ` aggressive <nl> . . note : This is necessary both for each source file , and for the final stage of linking and compiling to JavaScript . For more information see : ref : ` Building - Projects ` and : ref : ` Optimizing - Code ` . <nl> <nl> <nl> - Why is my compiled code so big ? <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + Why is my compiled code big ? <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> Make sure you build with ` ` - O2 ` ` so code is optimized and minified . You should also set up gzip compression on your webserver , which all browsers now support . <nl> <nl> How do I link against system libraries like SDL , boost , etc . ? <nl> <nl> System libraries that are included with Emscripten are automatically linked when you compile ( just the necessary parts ) . This includes * libc * , * libc + + * ( C + + standard library ) and : term : ` SDL ` . <nl> <nl> - Libraries not included with Emscripten ( like Boost ) must be compiled and linked with the program just as if they were a module in the project . For example , see how ` BananaBread links in libz < https : / / github . com / kripken / BananaBread / blob / master / cube2 / src / web / Makefile > ` _ . <nl> + Libraries not included with Emscripten ( like Boost ) must be compiled and linked with the program just as if they were a module in the project . <nl> + <nl> + There is a set of libraries ported to Emscripten for convenient use , Emscripten Ports . See : ref : ` Building - Projects ` <nl> <nl> Another option is to implement needed C APIs as JavaScript librarys ( see ` ` - - js - library ` ` in : ref : ` emcc < emcc - js - library > ` and : ref : ` implement - c - in - javascript ` ) . Emscripten itself does this for * libc * ( not including * malloc * ) and : term : ` SDL ` ( but not * libc + + * or * malloc * ) . <nl> <nl> Another option is to implement needed C APIs as JavaScript librarys ( see ` ` - - js - <nl> - In the specific case of * Boost * , if you only need the boost headers then you don ' t need to compile anything . <nl> <nl> <nl> + What are my options for audio playback ? <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + Emscripten has partial support for SDL ( 1 , not 2 ) audio , and OpenAL . <nl> + <nl> + To use SDL audio , include it as ` ` # include < SDL / SDL_mixer . h > ` ` . You can use it that way alongside SDL1 , SDL2 , or another library for platform integration . <nl> + <nl> + <nl> How can my compiled program access files ? <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl>
update faq
emscripten-core/emscripten
d8d338a6a4300960edcb351c16a50b0f7026d689
2015-01-08T22:15:50Z
mmm a / mars / zstd / lib / compress / fse_compress . c <nl> ppp b / mars / zstd / lib / compress / fse_compress . c <nl> size_t FSE_buildCTable_wksp ( FSE_CTable * ct , <nl> / * For explanations on how to distribute symbol values over the table : <nl> * http : / / fastcompression . blogspot . fr / 2014 / 02 / fse - distributing - symbol - values . html * / <nl> <nl> - / / # ifdef __clang_analyzer__ <nl> + # ifdef __clang_analyzer__ <nl> + / / add some compile error code , to make sure __clang_analyzer__ undefined <nl> + test compile <nl> / / memset ( tableSymbol , 0 , sizeof ( * tableSymbol ) * tableSize ) ; / * useless initialization , just to keep scan - build happy * / <nl> - / / # endif <nl> + # endif <nl> <nl> / * symbol start positions * / <nl> { U32 u ; <nl>
add compile error
Tencent/mars
7f291d5a68912621edfc7712f22d2fadc9983bb7
2020-10-21T03:36:14Z
mmm a / include / v8 . h <nl> ppp b / include / v8 . h <nl> String : : ExternalStringResource * String : : GetExternalStringResource ( ) const { <nl> <nl> ExternalStringResource * result ; <nl> if ( I : : IsExternalTwoByteString ( I : : GetInstanceType ( obj ) ) ) { <nl> - void * value = I : : ReadRawField < void * > ( obj , I : : kStringResourceOffset ) ; <nl> + internal : : Isolate * isolate = <nl> + internal : : IsolateFromNeverReadOnlySpaceObject ( obj ) ; <nl> + A value = <nl> + I : : ReadExternalPointerField ( isolate , obj , I : : kStringResourceOffset ) ; <nl> result = reinterpret_cast < String : : ExternalStringResource * > ( value ) ; <nl> } else { <nl> result = GetExternalStringResourceSlow ( ) ; <nl> String : : ExternalStringResourceBase * String : : GetExternalStringResourceBase ( <nl> ExternalStringResourceBase * resource ; <nl> if ( type = = I : : kExternalOneByteRepresentationTag | | <nl> type = = I : : kExternalTwoByteRepresentationTag ) { <nl> - void * value = I : : ReadRawField < void * > ( obj , I : : kStringResourceOffset ) ; <nl> - resource = static_cast < ExternalStringResourceBase * > ( value ) ; <nl> + internal : : Isolate * isolate = <nl> + internal : : IsolateFromNeverReadOnlySpaceObject ( obj ) ; <nl> + A value = <nl> + I : : ReadExternalPointerField ( isolate , obj , I : : kStringResourceOffset ) ; <nl> + resource = reinterpret_cast < ExternalStringResourceBase * > ( value ) ; <nl> } else { <nl> resource = GetExternalStringResourceBaseSlow ( encoding_out ) ; <nl> } <nl> mmm a / src / api / api . cc <nl> ppp b / src / api / api . cc <nl> <nl> # include " src / codegen / compiler . h " <nl> # include " src / codegen / cpu - features . h " <nl> # include " src / common / assert - scope . h " <nl> + # include " src / common / external - pointer . h " <nl> # include " src / common / globals . h " <nl> # include " src / compiler - dispatcher / compiler - dispatcher . h " <nl> # include " src / date / date . h " <nl> String : : ExternalStringResource * String : : GetExternalStringResourceSlow ( ) const { <nl> } <nl> <nl> if ( i : : StringShape ( str ) . IsExternalTwoByte ( ) ) { <nl> - void * value = I : : ReadRawField < void * > ( str . ptr ( ) , I : : kStringResourceOffset ) ; <nl> + internal : : Isolate * isolate = <nl> + internal : : IsolateFromNeverReadOnlySpaceObject ( str . ptr ( ) ) ; <nl> + internal : : Address value = I : : ReadExternalPointerField ( <nl> + isolate , str . ptr ( ) , I : : kStringResourceOffset ) ; <nl> return reinterpret_cast < String : : ExternalStringResource * > ( value ) ; <nl> } <nl> return nullptr ; <nl> String : : ExternalStringResourceBase * String : : GetExternalStringResourceBaseSlow ( <nl> * encoding_out = static_cast < Encoding > ( type & I : : kStringEncodingMask ) ; <nl> if ( i : : StringShape ( str ) . IsExternalOneByte ( ) | | <nl> i : : StringShape ( str ) . IsExternalTwoByte ( ) ) { <nl> - void * value = I : : ReadRawField < void * > ( string , I : : kStringResourceOffset ) ; <nl> - resource = static_cast < ExternalStringResourceBase * > ( value ) ; <nl> + internal : : Isolate * isolate = <nl> + internal : : IsolateFromNeverReadOnlySpaceObject ( string ) ; <nl> + internal : : Address value = <nl> + I : : ReadExternalPointerField ( isolate , string , I : : kStringResourceOffset ) ; <nl> + resource = reinterpret_cast < ExternalStringResourceBase * > ( value ) ; <nl> } <nl> return resource ; <nl> } <nl> mmm a / src / heap / heap - inl . h <nl> ppp b / src / heap / heap - inl . h <nl> void Heap : : FinalizeExternalString ( String string ) { <nl> ExternalBackingStoreType : : kExternalString , <nl> ext_string . ExternalPayloadSize ( ) ) ; <nl> <nl> - ext_string . DisposeResource ( ) ; <nl> + ext_string . DisposeResource ( isolate ( ) ) ; <nl> } <nl> <nl> Address Heap : : NewSpaceTop ( ) { return new_space_ - > top ( ) ; } <nl> mmm a / src / objects / string - inl . h <nl> ppp b / src / objects / string - inl . h <nl> <nl> # ifndef V8_OBJECTS_STRING_INL_H_ <nl> # define V8_OBJECTS_STRING_INL_H_ <nl> <nl> + # include " src / common / external - pointer . h " <nl> # include " src / objects / string . h " <nl> <nl> # include " src / handles / handles - inl . h " <nl> bool ExternalString : : is_uncached ( ) const { <nl> return ( type & kUncachedExternalStringMask ) = = kUncachedExternalStringTag ; <nl> } <nl> <nl> - Address ExternalString : : resource_as_address ( ) { <nl> - return ReadField < Address > ( kResourceOffset ) ; <nl> + DEF_GETTER ( ExternalString , resource_as_address , Address ) { <nl> + ExternalPointer_t encoded_address = <nl> + ReadField < ExternalPointer_t > ( kResourceOffset ) ; <nl> + return DecodeExternalPointer ( isolate , encoded_address ) ; <nl> } <nl> <nl> - void ExternalString : : set_address_as_resource ( Address address ) { <nl> - WriteField < Address > ( kResourceOffset , address ) ; <nl> + void ExternalString : : set_address_as_resource ( Isolate * isolate , <nl> + Address address ) { <nl> + const ExternalPointer_t encoded_address = <nl> + EncodeExternalPointer ( isolate , address ) ; <nl> + WriteField < ExternalPointer_t > ( kResourceOffset , encoded_address ) ; <nl> if ( IsExternalOneByteString ( ) ) { <nl> ExternalOneByteString : : cast ( * this ) . update_data_cache ( ) ; <nl> } else { <nl> void ExternalString : : set_address_as_resource ( Address address ) { <nl> } <nl> <nl> uint32_t ExternalString : : resource_as_uint32 ( ) { <nl> - return static_cast < uint32_t > ( ReadField < Address > ( kResourceOffset ) ) ; <nl> + ExternalPointer_t encoded_address = <nl> + ReadField < ExternalPointer_t > ( kResourceOffset ) ; <nl> + return static_cast < uint32_t > ( encoded_address ) ; <nl> } <nl> <nl> void ExternalString : : set_uint32_as_resource ( uint32_t value ) { <nl> - WriteField < Address > ( kResourceOffset , value ) ; <nl> + WriteField < ExternalPointer_t > ( kResourceOffset , value ) ; <nl> if ( is_uncached ( ) ) return ; <nl> WriteField < Address > ( kResourceDataOffset , kNullAddress ) ; <nl> } <nl> <nl> - void ExternalString : : DisposeResource ( ) { <nl> + void ExternalString : : DisposeResource ( Isolate * isolate ) { <nl> + const ExternalPointer_t encoded_address = <nl> + ReadField < ExternalPointer_t > ( kResourceOffset ) ; <nl> v8 : : String : : ExternalStringResourceBase * resource = <nl> reinterpret_cast < v8 : : String : : ExternalStringResourceBase * > ( <nl> - ReadField < Address > ( ExternalString : : kResourceOffset ) ) ; <nl> + DecodeExternalPointer ( isolate , encoded_address ) ) ; <nl> <nl> / / Dispose of the C + + object if it has not already been disposed . <nl> if ( resource ! = nullptr ) { <nl> resource - > Dispose ( ) ; <nl> - WriteField < Address > ( ExternalString : : kResourceOffset , kNullAddress ) ; <nl> + const ExternalPointer_t encoded_address = <nl> + EncodeExternalPointer ( isolate , kNullAddress ) ; <nl> + WriteField < ExternalPointer_t > ( kResourceOffset , encoded_address ) ; <nl> } <nl> } <nl> <nl> - const ExternalOneByteString : : Resource * ExternalOneByteString : : resource ( ) { <nl> - return reinterpret_cast < Resource * > ( ReadField < Address > ( kResourceOffset ) ) ; <nl> + DEF_GETTER ( ExternalOneByteString , resource , <nl> + const ExternalOneByteString : : Resource * ) { <nl> + const ExternalPointer_t encoded_address = <nl> + ReadField < ExternalPointer_t > ( kResourceOffset ) ; <nl> + return reinterpret_cast < Resource * > ( <nl> + DecodeExternalPointer ( isolate , encoded_address ) ) ; <nl> } <nl> <nl> void ExternalOneByteString : : update_data_cache ( ) { <nl> void ExternalOneByteString : : update_data_cache ( ) { <nl> <nl> void ExternalOneByteString : : SetResource ( <nl> Isolate * isolate , const ExternalOneByteString : : Resource * resource ) { <nl> - set_resource ( resource ) ; <nl> + set_resource ( isolate , resource ) ; <nl> size_t new_payload = resource = = nullptr ? 0 : resource - > length ( ) ; <nl> if ( new_payload > 0 ) { <nl> isolate - > heap ( ) - > UpdateExternalString ( * this , 0 , new_payload ) ; <nl> void ExternalOneByteString : : SetResource ( <nl> } <nl> <nl> void ExternalOneByteString : : set_resource ( <nl> - const ExternalOneByteString : : Resource * resource ) { <nl> - WriteField < Address > ( kResourceOffset , reinterpret_cast < Address > ( resource ) ) ; <nl> + Isolate * isolate , const ExternalOneByteString : : Resource * resource ) { <nl> + const ExternalPointer_t encoded_address = <nl> + EncodeExternalPointer ( isolate , reinterpret_cast < Address > ( resource ) ) ; <nl> + WriteField < ExternalPointer_t > ( kResourceOffset , encoded_address ) ; <nl> if ( resource ! = nullptr ) update_data_cache ( ) ; <nl> } <nl> <nl> uint8_t ExternalOneByteString : : Get ( int index ) { <nl> return GetChars ( ) [ index ] ; <nl> } <nl> <nl> - const ExternalTwoByteString : : Resource * ExternalTwoByteString : : resource ( ) { <nl> - return reinterpret_cast < Resource * > ( ReadField < Address > ( kResourceOffset ) ) ; <nl> + DEF_GETTER ( ExternalTwoByteString , resource , <nl> + const ExternalTwoByteString : : Resource * ) { <nl> + const ExternalPointer_t encoded_address = <nl> + ReadField < ExternalPointer_t > ( kResourceOffset ) ; <nl> + return reinterpret_cast < Resource * > ( <nl> + DecodeExternalPointer ( isolate , encoded_address ) ) ; <nl> } <nl> <nl> void ExternalTwoByteString : : update_data_cache ( ) { <nl> void ExternalTwoByteString : : update_data_cache ( ) { <nl> <nl> void ExternalTwoByteString : : SetResource ( <nl> Isolate * isolate , const ExternalTwoByteString : : Resource * resource ) { <nl> - set_resource ( resource ) ; <nl> + set_resource ( isolate , resource ) ; <nl> size_t new_payload = resource = = nullptr ? 0 : resource - > length ( ) * 2 ; <nl> if ( new_payload > 0 ) { <nl> isolate - > heap ( ) - > UpdateExternalString ( * this , 0 , new_payload ) ; <nl> void ExternalTwoByteString : : SetResource ( <nl> } <nl> <nl> void ExternalTwoByteString : : set_resource ( <nl> - const ExternalTwoByteString : : Resource * resource ) { <nl> - WriteField < Address > ( kResourceOffset , reinterpret_cast < Address > ( resource ) ) ; <nl> + Isolate * isolate , const ExternalTwoByteString : : Resource * resource ) { <nl> + const ExternalPointer_t encoded_address = <nl> + EncodeExternalPointer ( isolate , reinterpret_cast < Address > ( resource ) ) ; <nl> + WriteField < ExternalPointer_t > ( kResourceOffset , encoded_address ) ; <nl> if ( resource ! = nullptr ) update_data_cache ( ) ; <nl> } <nl> <nl> mmm a / src / objects / string . h <nl> ppp b / src / objects / string . h <nl> class ExternalString : public String { <nl> int ExternalPayloadSize ( ) const ; <nl> <nl> / / Used in the serializer / deserializer . <nl> - inline Address resource_as_address ( ) ; <nl> - inline void set_address_as_resource ( Address address ) ; <nl> + DECL_GETTER ( resource_as_address , Address ) <nl> + inline void set_address_as_resource ( Isolate * isolate , Address address ) ; <nl> inline uint32_t resource_as_uint32 ( ) ; <nl> inline void set_uint32_as_resource ( uint32_t value ) ; <nl> <nl> / / Disposes string ' s resource object if it has not already been disposed . <nl> - inline void DisposeResource ( ) ; <nl> + inline void DisposeResource ( Isolate * isolate ) ; <nl> <nl> STATIC_ASSERT ( kResourceOffset = = Internals : : kStringResourceOffset ) ; <nl> static const int kSizeOfAllExternalStrings = kHeaderSize ; <nl> class ExternalOneByteString : public ExternalString { <nl> using Resource = v8 : : String : : ExternalOneByteStringResource ; <nl> <nl> / / The underlying resource . <nl> - inline const Resource * resource ( ) ; <nl> + DECL_GETTER ( resource , const Resource * ) <nl> <nl> / / It is assumed that the previous resource is null . If it is not null , then <nl> / / it is the responsability of the caller the handle the previous resource . <nl> inline void SetResource ( Isolate * isolate , const Resource * buffer ) ; <nl> / / Used only during serialization . <nl> - inline void set_resource ( const Resource * buffer ) ; <nl> + inline void set_resource ( Isolate * isolate , const Resource * buffer ) ; <nl> <nl> / / Update the pointer cache to the external character array . <nl> / / The cached pointer is always valid , as the external character array does = <nl> class ExternalTwoByteString : public ExternalString { <nl> using Resource = v8 : : String : : ExternalStringResource ; <nl> <nl> / / The underlying string resource . <nl> - inline const Resource * resource ( ) ; <nl> + DECL_GETTER ( resource , const Resource * ) <nl> <nl> / / It is assumed that the previous resource is null . If it is not null , then <nl> / / it is the responsability of the caller the handle the previous resource . <nl> inline void SetResource ( Isolate * isolate , const Resource * buffer ) ; <nl> / / Used only during serialization . <nl> - inline void set_resource ( const Resource * buffer ) ; <nl> + inline void set_resource ( Isolate * isolate , const Resource * buffer ) ; <nl> <nl> / / Update the pointer cache to the external character array . <nl> / / The cached pointer is always valid , as the external character array does = <nl> mmm a / src / objects / string . tq <nl> ppp b / src / objects / string . tq <nl> extern class ConsString extends String { <nl> @ abstract <nl> @ generateBodyDescriptor <nl> extern class ExternalString extends String { <nl> - resource : RawPtr ; <nl> + resource : ExternalPointer ; <nl> resource_data : RawPtr ; <nl> } <nl> <nl> mmm a / src / snapshot / deserializer . cc <nl> ppp b / src / snapshot / deserializer . cc <nl> HeapObject Deserializer : : PostProcessNewObject ( HeapObject obj , <nl> uint32_t index = string . resource_as_uint32 ( ) ; <nl> Address address = <nl> static_cast < Address > ( isolate_ - > api_external_references ( ) [ index ] ) ; <nl> - string . set_address_as_resource ( address ) ; <nl> + string . set_address_as_resource ( isolate_ , address ) ; <nl> isolate_ - > heap ( ) - > UpdateExternalString ( string , 0 , <nl> string . ExternalPayloadSize ( ) ) ; <nl> isolate_ - > heap ( ) - > RegisterExternalString ( String : : cast ( obj ) ) ; <nl> mmm a / src / snapshot / serializer . cc <nl> ppp b / src / snapshot / serializer . cc <nl> void Serializer : : ObjectSerializer : : SerializeExternalString ( ) { <nl> DCHECK ( reference . is_from_api ( ) ) ; <nl> string . set_uint32_as_resource ( reference . index ( ) ) ; <nl> SerializeObject ( ) ; <nl> - string . set_address_as_resource ( resource ) ; <nl> + string . set_address_as_resource ( serializer_ - > isolate ( ) , resource ) ; <nl> } else { <nl> SerializeExternalStringAsSequentialString ( ) ; <nl> } <nl>
[ sandbox ] Wire ExternalString resource through bottleneck
v8/v8
c64b52a8929108cbeb98369f54e6f8513a00fc68
2020-05-06T12:35:46Z
mmm a / jstests / noPassthrough / profile_operation_metrics . js <nl> ppp b / jstests / noPassthrough / profile_operation_metrics . js <nl> const operations = [ <nl> assert . gt ( profileDoc . cursorSeeks , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 2 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 1 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 1 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 1 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> - name : ' findAndModify ' , <nl> + name : ' findAndModifyUpdate ' , <nl> command : ( db ) = > { <nl> assert ( db [ collName ] . findAndModify ( { query : { _id : 1 } , update : { $ set : { a : 1 } } } ) ) ; <nl> } , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 2 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 1 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 2 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . docBytesWritten , 0 ) ; <nl> assert . eq ( profileDoc . idxEntryBytesWritten , 0 ) ; <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> / / Clear the profile collection so we can easily identify new operations with similar filters as <nl> const operations = [ <nl> assert . eq ( profileDoc . docBytesWritten , 0 ) ; <nl> assert . eq ( profileDoc . idxEntryBytesWritten , 0 ) ; <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 1 ) ; <nl> + } <nl> + } , <nl> + resetProfileColl , <nl> + { <nl> + name : ' findAndModifyRemove ' , <nl> + command : ( db ) = > { <nl> + assert . commandWorked ( db [ collName ] . insert ( { _id : 3 , a : 0 } ) ) ; <nl> + assert ( db [ collName ] . findAndModify ( { query : { _id : 3 } , remove : true } ) ) ; <nl> + } , <nl> + profileFilter : { op : ' command ' , ' command . findandmodify ' : collName } , <nl> + profileAssert : ( db , profileDoc ) = > { <nl> + / / Should read exactly as many bytes are in the document . Debug builds may perform extra <nl> + / / reads of the _mdb_catalog . <nl> + if ( ! isDebugBuild ( db ) ) { <nl> + assert . eq ( profileDoc . docBytesRead , 29 ) ; <nl> + assert . eq ( profileDoc . docUnitsRead , 1 ) ; <nl> + assert . eq ( profileDoc . cursorSeeks , 3 ) ; <nl> + } else { <nl> + assert . gte ( profileDoc . docBytesRead , 29 ) ; <nl> + assert . gte ( profileDoc . docUnitsRead , 1 ) ; <nl> + assert . gte ( profileDoc . cursorSeeks , 3 ) ; <nl> + } <nl> + assert . eq ( profileDoc . idxEntryBytesRead , 3 ) ; <nl> + assert . eq ( profileDoc . idxEntryUnitsRead , 1 ) ; <nl> + assert . eq ( profileDoc . docBytesWritten , 29 ) ; <nl> + assert . eq ( profileDoc . docUnitsWritten , 1 ) ; <nl> + assert . eq ( profileDoc . idxEntryBytesWritten , 3 ) ; <nl> + assert . eq ( profileDoc . idxEntryUnitsWritten , 1 ) ; <nl> + assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> + assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 1 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 1 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . docUnitsWritten , 1 ) ; <nl> assert . eq ( profileDoc . idxEntryBytesWritten , 3 ) ; <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 1 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 5 ) ; <nl> } <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 150 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 10 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 2 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 1 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 2 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 2 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 1 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 1 ) ; <nl> } <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 1 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . docUnitsWritten , 1 ) ; <nl> assert . eq ( profileDoc . idxEntryBytesWritten , 0 ) ; <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 1 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 100 ) ; <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 100 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 100 ) ; <nl> } , <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 100 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 100 ) ; <nl> } , <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 100 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 100 ) ; <nl> } , <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 100 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 1 ) ; <nl> } , <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 100 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 5 ) ; <nl> } , <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 100 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 101 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 100 ) ; <nl> } , <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . keysSorted , 0 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> } <nl> + assert . eq ( profileDoc . docUnitsReturned , 10 ) ; <nl> } , <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 0 ) ; <nl> assert . eq ( profileDoc . keysSorted , 100 ) ; <nl> assert . eq ( profileDoc . sorterSpills , 0 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 10 ) ; <nl> } , <nl> } , <nl> { <nl> const operations = [ <nl> assert . eq ( profileDoc . docUnitsWritten , 1 ) ; <nl> assert . eq ( profileDoc . idxEntryBytesWritten , 2 ) ; <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 1 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . docUnitsWritten , 9 ) ; <nl> assert . eq ( profileDoc . idxEntryBytesWritten , 27 ) ; <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 9 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . docUnitsWritten , 2 ) ; <nl> assert . eq ( profileDoc . idxEntryBytesWritten , 5 ) ; <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 2 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } , <nl> resetProfileColl , <nl> const operations = [ <nl> assert . eq ( profileDoc . docUnitsWritten , 18 ) ; <nl> assert . eq ( profileDoc . idxEntryBytesWritten , 54 ) ; <nl> assert . eq ( profileDoc . idxEntryUnitsWritten , 18 ) ; <nl> + assert . eq ( profileDoc . docUnitsReturned , 0 ) ; <nl> } <nl> } <nl> ] ; <nl> mmm a / src / mongo / db / commands / find_and_modify . cpp <nl> ppp b / src / mongo / db / commands / find_and_modify . cpp <nl> <nl> # include " mongo / db / repl / replication_coordinator . h " <nl> # include " mongo / db / retryable_writes_stats . h " <nl> # include " mongo / db / s / collection_sharding_state . h " <nl> + # include " mongo / db / stats / resource_consumption_metrics . h " <nl> # include " mongo / db / stats / top . h " <nl> # include " mongo / db / storage / duplicate_key_error_info . h " <nl> # include " mongo / db / transaction_participant . h " <nl> class CmdFindAndModify : public BasicCommand { <nl> } <nl> recordStatsForTopCommand ( opCtx ) ; <nl> <nl> + if ( docFound ) { <nl> + ResourceConsumption : : DocumentUnitCounter docUnitsReturned ; <nl> + docUnitsReturned . observeOne ( docFound - > objsize ( ) ) ; <nl> + <nl> + auto & metricsCollector = ResourceConsumption : : MetricsCollector : : get ( opCtx ) ; <nl> + metricsCollector . incrementDocUnitsReturned ( docUnitsReturned ) ; <nl> + } <nl> + <nl> appendCommandResponse ( exec . get ( ) , request . getRemove ( ) . value_or ( false ) , docFound , & result ) ; <nl> <nl> return true ; <nl> class CmdFindAndModify : public BasicCommand { <nl> } <nl> recordStatsForTopCommand ( opCtx ) ; <nl> <nl> + if ( docFound ) { <nl> + ResourceConsumption : : DocumentUnitCounter docUnitsReturned ; <nl> + docUnitsReturned . observeOne ( docFound - > objsize ( ) ) ; <nl> + <nl> + auto & metricsCollector = ResourceConsumption : : MetricsCollector : : get ( opCtx ) ; <nl> + metricsCollector . incrementDocUnitsReturned ( docUnitsReturned ) ; <nl> + } <nl> + <nl> appendCommandResponse ( exec . get ( ) , request . getRemove ( ) . value_or ( false ) , docFound , & result ) ; <nl> <nl> return true ; <nl> mmm a / src / mongo / db / commands / find_cmd . cpp <nl> ppp b / src / mongo / db / commands / find_cmd . cpp <nl> <nl> # include " mongo / db / repl / replication_coordinator . h " <nl> # include " mongo / db / service_context . h " <nl> # include " mongo / db / stats / counters . h " <nl> + # include " mongo / db / stats / resource_consumption_metrics . h " <nl> # include " mongo / db / stats / server_read_concern_metrics . h " <nl> # include " mongo / db / storage / storage_engine . h " <nl> # include " mongo / db / transaction_participant . h " <nl> class FindCmd final : public Command { <nl> PlanExecutor : : ExecState state = PlanExecutor : : ADVANCED ; <nl> std : : uint64_t numResults = 0 ; <nl> bool stashedResult = false ; <nl> + ResourceConsumption : : DocumentUnitCounter docUnitsReturned ; <nl> <nl> try { <nl> while ( ! FindCommon : : enoughForFirstBatch ( originalQR , numResults ) & & <nl> class FindCmd final : public Command { <nl> / / Add result to output buffer . <nl> firstBatch . append ( obj ) ; <nl> numResults + + ; <nl> + docUnitsReturned . observeOne ( obj . objsize ( ) ) ; <nl> } <nl> } catch ( DBException & exception ) { <nl> firstBatch . abandon ( ) ; <nl> class FindCmd final : public Command { <nl> <nl> / / Generate the response object to send to the client . <nl> firstBatch . done ( cursorId , nss . ns ( ) ) ; <nl> + <nl> + / / Increment this metric once we have generated a response and we know it will return <nl> + / / documents . <nl> + auto & metricsCollector = ResourceConsumption : : MetricsCollector : : get ( opCtx ) ; <nl> + metricsCollector . incrementDocUnitsReturned ( docUnitsReturned ) ; <nl> } <nl> <nl> void appendMirrorableRequest ( BSONObjBuilder * bob ) const override { <nl> mmm a / src / mongo / db / commands / getmore_cmd . cpp <nl> ppp b / src / mongo / db / commands / getmore_cmd . cpp <nl> <nl> # include " mongo / db / repl / speculative_majority_read_info . h " <nl> # include " mongo / db / service_context . h " <nl> # include " mongo / db / stats / counters . h " <nl> + # include " mongo / db / stats / resource_consumption_metrics . h " <nl> # include " mongo / db / stats / top . h " <nl> # include " mongo / logv2 / log . h " <nl> # include " mongo / s / chunk_version . h " <nl> class GetMoreCmd final : public Command { <nl> const GetMoreRequest & request , <nl> const bool isTailable , <nl> CursorResponseBuilder * nextBatch , <nl> - std : : uint64_t * numResults ) { <nl> + std : : uint64_t * numResults , <nl> + ResourceConsumption : : DocumentUnitCounter * docUnitsReturned ) { <nl> PlanExecutor * exec = cursor - > getExecutor ( ) ; <nl> <nl> / / If an awaitData getMore is killed during this process due to our max time expiring at <nl> class GetMoreCmd final : public Command { <nl> nextBatch - > setPostBatchResumeToken ( exec - > getPostBatchResumeToken ( ) ) ; <nl> nextBatch - > append ( obj ) ; <nl> ( * numResults ) + + ; <nl> + docUnitsReturned - > observeOne ( obj . objsize ( ) ) ; <nl> } <nl> } catch ( const ExceptionFor < ErrorCodes : : CloseChangeStream > & ) { <nl> / / This exception indicates that we should close the cursor without reporting an <nl> class GetMoreCmd final : public Command { <nl> CursorResponseBuilder nextBatch ( reply , options ) ; <nl> BSONObj obj ; <nl> std : : uint64_t numResults = 0 ; <nl> + ResourceConsumption : : DocumentUnitCounter docUnitsReturned ; <nl> <nl> / / We report keysExamined and docsExamined to OpDebug for a given getMore operation . To <nl> / / obtain these values we need to take a diff of the pre - execution and post - execution <nl> class GetMoreCmd final : public Command { <nl> _request , <nl> cursorPin - > isTailable ( ) , <nl> & nextBatch , <nl> - & numResults ) ; <nl> + & numResults , <nl> + & docUnitsReturned ) ; <nl> <nl> PlanSummaryStats postExecutionStats ; <nl> exec - > getPlanExplainer ( ) . getSummaryStats ( & postExecutionStats ) ; <nl> class GetMoreCmd final : public Command { <nl> <nl> nextBatch . done ( respondWithId , _request . nss . ns ( ) ) ; <nl> <nl> + / / Increment this metric once we have generated a response and we know it will return <nl> + / / documents . <nl> + auto & metricsCollector = ResourceConsumption : : MetricsCollector : : get ( opCtx ) ; <nl> + metricsCollector . incrementDocUnitsReturned ( docUnitsReturned ) ; <nl> + <nl> / / Ensure log and profiler include the number of results returned in this getMore ' s <nl> / / response batch . <nl> curOp - > debug ( ) . nreturned = numResults ; <nl> mmm a / src / mongo / db / commands / run_aggregate . cpp <nl> ppp b / src / mongo / db / commands / run_aggregate . cpp <nl> <nl> # include " mongo / db / s / operation_sharding_state . h " <nl> # include " mongo / db / s / sharding_state . h " <nl> # include " mongo / db / service_context . h " <nl> + # include " mongo / db / stats / resource_consumption_metrics . h " <nl> # include " mongo / db / storage / storage_options . h " <nl> # include " mongo / db / views / view . h " <nl> # include " mongo / db / views / view_catalog . h " <nl> bool handleCursorCommand ( OperationContext * opCtx , <nl> invariant ( cursor ) ; <nl> auto exec = cursor - > getExecutor ( ) ; <nl> invariant ( exec ) ; <nl> + ResourceConsumption : : DocumentUnitCounter docUnitsReturned ; <nl> <nl> bool stashedResult = false ; <nl> / / We are careful to avoid ever calling ' getNext ( ) ' on the PlanExecutor when the batchSize is <nl> bool handleCursorCommand ( OperationContext * opCtx , <nl> / / If this executor produces a postBatchResumeToken , add it to the cursor response . <nl> responseBuilder . setPostBatchResumeToken ( exec - > getPostBatchResumeToken ( ) ) ; <nl> responseBuilder . append ( nextDoc ) ; <nl> + docUnitsReturned . observeOne ( nextDoc . objsize ( ) ) ; <nl> } <nl> <nl> if ( cursor ) { <nl> bool handleCursorCommand ( OperationContext * opCtx , <nl> const CursorId cursorId = cursor ? cursor - > cursorid ( ) : 0LL ; <nl> responseBuilder . done ( cursorId , nsForCursor . ns ( ) ) ; <nl> <nl> + auto & metricsCollector = ResourceConsumption : : MetricsCollector : : get ( opCtx ) ; <nl> + metricsCollector . incrementDocUnitsReturned ( docUnitsReturned ) ; <nl> + <nl> return static_cast < bool > ( cursor ) ; <nl> } <nl> <nl> mmm a / src / mongo / db / query / find . cpp <nl> ppp b / src / mongo / db / query / find . cpp <nl> void generateBatch ( int ntoreturn , <nl> ClientCursor * cursor , <nl> BufBuilder * bb , <nl> std : : uint64_t * numResults , <nl> + ResourceConsumption : : DocumentUnitCounter * docUnitsReturned , <nl> PlanExecutor : : ExecState * state ) { <nl> PlanExecutor * exec = cursor - > getExecutor ( ) ; <nl> <nl> void generateBatch ( int ntoreturn , <nl> <nl> / / Count the result . <nl> ( * numResults ) + + ; <nl> + <nl> + docUnitsReturned - > observeOne ( obj . objsize ( ) ) ; <nl> } <nl> } catch ( DBException & exception ) { <nl> auto & & explainer = exec - > getPlanExplainer ( ) ; <nl> Message getMore ( OperationContext * opCtx , <nl> <nl> std : : uint64_t numResults = 0 ; <nl> int startingResult = 0 ; <nl> + ResourceConsumption : : DocumentUnitCounter docUnitsReturned ; <nl> <nl> const int initialBufSize = <nl> 512 + sizeof ( QueryResult : : Value ) + FindCommon : : kMaxBytesToReturnToClientAtOnce ; <nl> Message getMore ( OperationContext * opCtx , <nl> nullptr ) ; <nl> } <nl> <nl> - generateBatch ( ntoreturn , cursorPin . getCursor ( ) , & bb , & numResults , & state ) ; <nl> + generateBatch ( ntoreturn , cursorPin . getCursor ( ) , & bb , & numResults , & docUnitsReturned , & state ) ; <nl> <nl> / / If this is an await data cursor , and we hit EOF without generating any results , then we block <nl> / / waiting for new data to arrive . <nl> Message getMore ( OperationContext * opCtx , <nl> <nl> / / We woke up because either the timed_wait expired , or there was more data . Either way , <nl> / / attempt to generate another batch of results . <nl> - generateBatch ( ntoreturn , cursorPin . getCursor ( ) , & bb , & numResults , & state ) ; <nl> + generateBatch ( <nl> + ntoreturn , cursorPin . getCursor ( ) , & bb , & numResults , & docUnitsReturned , & state ) ; <nl> } <nl> <nl> PlanSummaryStats postExecutionStats ; <nl> Message getMore ( OperationContext * opCtx , <nl> dropAndReaquireReadLock ) ; <nl> } <nl> <nl> + / / Increment this metric once the command succeeds and we know it will return documents . <nl> + auto & metricsCollector = ResourceConsumption : : MetricsCollector : : get ( opCtx ) ; <nl> + metricsCollector . incrementDocUnitsReturned ( docUnitsReturned ) ; <nl> + <nl> QueryResult : : View qr = bb . buf ( ) ; <nl> qr . msgdata ( ) . setLen ( bb . len ( ) ) ; <nl> qr . msgdata ( ) . setOperation ( opReply ) ; <nl> bool runQuery ( OperationContext * opCtx , <nl> <nl> / / How many results have we obtained from the executor ? <nl> int numResults = 0 ; <nl> + ResourceConsumption : : DocumentUnitCounter docUnitsReturned ; <nl> <nl> BSONObj obj ; <nl> PlanExecutor : : ExecState state ; <nl> bool runQuery ( OperationContext * opCtx , <nl> / / Count the result . <nl> + + numResults ; <nl> <nl> + docUnitsReturned . observeOne ( obj . objsize ( ) ) ; <nl> + <nl> if ( FindCommon : : enoughForFirstBatch ( qr , numResults ) ) { <nl> LOGV2_DEBUG ( 20915 , <nl> 5 , <nl> bool runQuery ( OperationContext * opCtx , <nl> endQueryOp ( opCtx , collection . getCollection ( ) , * exec , numResults , ccId ) ; <nl> } <nl> <nl> + / / Increment this metric once it has succeeded and we know it will return documents . <nl> + auto & metricsCollector = ResourceConsumption : : MetricsCollector : : get ( opCtx ) ; <nl> + metricsCollector . incrementDocUnitsReturned ( docUnitsReturned ) ; <nl> + <nl> / / Fill out the output buffer ' s header . <nl> QueryResult : : View queryResultView = bb . buf ( ) ; <nl> queryResultView . setCursorId ( ccId ) ; <nl> mmm a / src / mongo / db / stats / resource_consumption_metrics . cpp <nl> ppp b / src / mongo / db / stats / resource_consumption_metrics . cpp <nl> ResourceConsumption : : MetricsCollector & ResourceConsumption : : MetricsCollector : : ge <nl> return getMetricsCollector ( opCtx ) ; <nl> } <nl> <nl> + void ResourceConsumption : : UnitCounter : : observeOne ( size_t datumBytes ) { <nl> + _units + = std : : ceil ( datumBytes / static_cast < float > ( unitSize ( ) ) ) ; <nl> + _bytes + = datumBytes ; <nl> + } <nl> + <nl> + int ResourceConsumption : : DocumentUnitCounter : : unitSize ( ) const { <nl> + return gDocumentUnitSizeBytes ; <nl> + } <nl> + <nl> + int ResourceConsumption : : IdxEntryUnitCounter : : unitSize ( ) const { <nl> + return gIndexEntryUnitSizeBytes ; <nl> + } <nl> + <nl> void ResourceConsumption : : ReadMetrics : : toBson ( BSONObjBuilder * builder ) const { <nl> - builder - > appendNumber ( kDocBytesRead , docBytesRead ) ; <nl> - builder - > appendNumber ( kDocUnitsRead , docUnitsRead ) ; <nl> - builder - > appendNumber ( kIdxEntryBytesRead , idxEntryBytesRead ) ; <nl> - builder - > appendNumber ( kIdxEntryUnitsRead , idxEntryUnitsRead ) ; <nl> + builder - > appendNumber ( kDocBytesRead , docsRead . bytes ( ) ) ; <nl> + builder - > appendNumber ( kDocUnitsRead , docsRead . units ( ) ) ; <nl> + builder - > appendNumber ( kIdxEntryBytesRead , idxEntriesRead . bytes ( ) ) ; <nl> + builder - > appendNumber ( kIdxEntryUnitsRead , idxEntriesRead . units ( ) ) ; <nl> builder - > appendNumber ( kKeysSorted , keysSorted ) ; <nl> builder - > appendNumber ( kSorterSpills , sorterSpills ) ; <nl> - builder - > appendNumber ( kDocUnitsReturned , docUnitsReturned ) ; <nl> + builder - > appendNumber ( kDocUnitsReturned , docsReturned . units ( ) ) ; <nl> builder - > appendNumber ( kCursorSeeks , cursorSeeks ) ; <nl> } <nl> <nl> void ResourceConsumption : : WriteMetrics : : toBson ( BSONObjBuilder * builder ) const { <nl> - builder - > appendNumber ( kDocBytesWritten , docBytesWritten ) ; <nl> - builder - > appendNumber ( kDocUnitsWritten , docUnitsWritten ) ; <nl> - builder - > appendNumber ( kIdxEntryBytesWritten , idxEntryBytesWritten ) ; <nl> - builder - > appendNumber ( kIdxEntryUnitsWritten , idxEntryUnitsWritten ) ; <nl> + builder - > appendNumber ( kDocBytesWritten , docsWritten . bytes ( ) ) ; <nl> + builder - > appendNumber ( kDocUnitsWritten , docsWritten . units ( ) ) ; <nl> + builder - > appendNumber ( kIdxEntryBytesWritten , idxEntriesWritten . bytes ( ) ) ; <nl> + builder - > appendNumber ( kIdxEntryUnitsWritten , idxEntriesWritten . units ( ) ) ; <nl> } <nl> <nl> void ResourceConsumption : : AggregatedMetrics : : toBson ( BSONObjBuilder * builder ) const { <nl> void ResourceConsumption : : OperationMetrics : : toBson ( BSONObjBuilder * builder ) cons <nl> } <nl> <nl> void ResourceConsumption : : OperationMetrics : : toBsonNonZeroFields ( BSONObjBuilder * builder ) const { <nl> - appendNonZeroMetric ( builder , kDocBytesRead , readMetrics . docBytesRead ) ; <nl> - appendNonZeroMetric ( builder , kDocUnitsRead , readMetrics . docUnitsRead ) ; <nl> - appendNonZeroMetric ( builder , kIdxEntryBytesRead , readMetrics . idxEntryBytesRead ) ; <nl> - appendNonZeroMetric ( builder , kIdxEntryUnitsRead , readMetrics . idxEntryUnitsRead ) ; <nl> + appendNonZeroMetric ( builder , kDocBytesRead , readMetrics . docsRead . bytes ( ) ) ; <nl> + appendNonZeroMetric ( builder , kDocUnitsRead , readMetrics . docsRead . units ( ) ) ; <nl> + appendNonZeroMetric ( builder , kIdxEntryBytesRead , readMetrics . idxEntriesRead . bytes ( ) ) ; <nl> + appendNonZeroMetric ( builder , kIdxEntryUnitsRead , readMetrics . idxEntriesRead . units ( ) ) ; <nl> appendNonZeroMetric ( builder , kKeysSorted , readMetrics . keysSorted ) ; <nl> appendNonZeroMetric ( builder , kSorterSpills , readMetrics . sorterSpills ) ; <nl> - appendNonZeroMetric ( builder , kDocUnitsReturned , readMetrics . docUnitsReturned ) ; <nl> + appendNonZeroMetric ( builder , kDocUnitsReturned , readMetrics . docsReturned . units ( ) ) ; <nl> appendNonZeroMetric ( builder , kCursorSeeks , readMetrics . cursorSeeks ) ; <nl> <nl> if ( cpuTimer ) { <nl> appendNonZeroMetric ( builder , kCpuNanos , durationCount < Nanoseconds > ( cpuTimer - > getElapsed ( ) ) ) ; <nl> } <nl> - appendNonZeroMetric ( builder , kDocBytesWritten , writeMetrics . docBytesWritten ) ; <nl> - appendNonZeroMetric ( builder , kDocUnitsWritten , writeMetrics . docUnitsWritten ) ; <nl> - appendNonZeroMetric ( builder , kIdxEntryBytesWritten , writeMetrics . idxEntryBytesWritten ) ; <nl> - appendNonZeroMetric ( builder , kIdxEntryUnitsWritten , writeMetrics . idxEntryUnitsWritten ) ; <nl> + appendNonZeroMetric ( builder , kDocBytesWritten , writeMetrics . docsWritten . bytes ( ) ) ; <nl> + appendNonZeroMetric ( builder , kDocUnitsWritten , writeMetrics . docsWritten . units ( ) ) ; <nl> + appendNonZeroMetric ( builder , kIdxEntryBytesWritten , writeMetrics . idxEntriesWritten . bytes ( ) ) ; <nl> + appendNonZeroMetric ( builder , kIdxEntryUnitsWritten , writeMetrics . idxEntriesWritten . units ( ) ) ; <nl> } <nl> <nl> template < typename Func > <nl> inline void ResourceConsumption : : MetricsCollector : : _doIfCollecting ( Func & & func ) <nl> } <nl> <nl> void ResourceConsumption : : MetricsCollector : : incrementOneDocRead ( size_t docBytesRead ) { <nl> - _doIfCollecting ( [ & ] ( ) { <nl> - size_t docUnits = std : : ceil ( docBytesRead / static_cast < float > ( gDocumentUnitSizeBytes ) ) ; <nl> - _metrics . readMetrics . docBytesRead + = docBytesRead ; <nl> - _metrics . readMetrics . docUnitsRead + = docUnits ; <nl> - } ) ; <nl> + _doIfCollecting ( [ & ] ( ) { _metrics . readMetrics . docsRead . observeOne ( docBytesRead ) ; } ) ; <nl> } <nl> <nl> void ResourceConsumption : : MetricsCollector : : incrementOneIdxEntryRead ( size_t bytesRead ) { <nl> - _doIfCollecting ( [ & ] ( ) { <nl> - size_t units = std : : ceil ( bytesRead / static_cast < float > ( gIndexEntryUnitSizeBytes ) ) ; <nl> - _metrics . readMetrics . idxEntryBytesRead + = bytesRead ; <nl> - _metrics . readMetrics . idxEntryUnitsRead + = units ; <nl> - } ) ; <nl> + _doIfCollecting ( [ & ] ( ) { _metrics . readMetrics . idxEntriesRead . observeOne ( bytesRead ) ; } ) ; <nl> } <nl> <nl> void ResourceConsumption : : MetricsCollector : : incrementKeysSorted ( size_t keysSorted ) { <nl> void ResourceConsumption : : MetricsCollector : : incrementSorterSpills ( size_t spills ) <nl> _doIfCollecting ( [ & ] ( ) { _metrics . readMetrics . sorterSpills + = spills ; } ) ; <nl> } <nl> <nl> - void ResourceConsumption : : MetricsCollector : : incrementDocUnitsReturned ( size_t returned ) { <nl> - _doIfCollecting ( [ & ] ( ) { _metrics . readMetrics . docUnitsReturned + = returned ; } ) ; <nl> + void ResourceConsumption : : MetricsCollector : : incrementDocUnitsReturned ( <nl> + DocumentUnitCounter docUnits ) { <nl> + _doIfCollecting ( [ & ] ( ) { _metrics . readMetrics . docsReturned + = docUnits ; } ) ; <nl> } <nl> <nl> void ResourceConsumption : : MetricsCollector : : incrementOneDocWritten ( size_t bytesWritten ) { <nl> - _doIfCollecting ( [ & ] { <nl> - size_t docUnits = std : : ceil ( bytesWritten / static_cast < float > ( gDocumentUnitSizeBytes ) ) ; <nl> - _metrics . writeMetrics . docBytesWritten + = bytesWritten ; <nl> - _metrics . writeMetrics . docUnitsWritten + = docUnits ; <nl> - } ) ; <nl> + _doIfCollecting ( [ & ] { _metrics . writeMetrics . docsWritten . observeOne ( bytesWritten ) ; } ) ; <nl> } <nl> <nl> void ResourceConsumption : : MetricsCollector : : incrementOneIdxEntryWritten ( size_t bytesWritten ) { <nl> - _doIfCollecting ( [ & ] { <nl> - size_t idxUnits = std : : ceil ( bytesWritten / static_cast < float > ( gIndexEntryUnitSizeBytes ) ) ; <nl> - _metrics . writeMetrics . idxEntryBytesWritten + = bytesWritten ; <nl> - _metrics . writeMetrics . idxEntryUnitsWritten + = idxUnits ; <nl> - } ) ; <nl> + _doIfCollecting ( [ & ] { _metrics . writeMetrics . idxEntriesWritten . observeOne ( bytesWritten ) ; } ) ; <nl> } <nl> <nl> void ResourceConsumption : : MetricsCollector : : beginScopedCollecting ( OperationContext * opCtx , <nl> mmm a / src / mongo / db / stats / resource_consumption_metrics . h <nl> ppp b / src / mongo / db / stats / resource_consumption_metrics . h <nl> class ResourceConsumption { <nl> static ResourceConsumption & get ( OperationContext * opCtx ) ; <nl> static ResourceConsumption & get ( ServiceContext * svcCtx ) ; <nl> <nl> + / * * <nl> + * UnitCounter observes individual input datums and then calculates the total number of bytes <nl> + * and whole number units observed . <nl> + * / <nl> + class UnitCounter { <nl> + public : <nl> + UnitCounter ( ) = default ; <nl> + <nl> + void add ( const UnitCounter & other ) { <nl> + _bytes + = other . _bytes ; <nl> + _units + = other . _units ; <nl> + } <nl> + <nl> + UnitCounter & operator + = ( const UnitCounter & other ) { <nl> + add ( other ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + long long bytes ( ) const { <nl> + return _bytes ; <nl> + } <nl> + long long units ( ) const { <nl> + return _units ; <nl> + } <nl> + <nl> + / * * <nl> + * Call once per input datum with its size in bytes . <nl> + * <nl> + * This function calculates the number of units observed based on the implentation - specific <nl> + * unitSize ( ) . The function uses the following formula to calculate the number of units per <nl> + * datum : <nl> + * <nl> + * units = ceil ( datum bytes / unit size in bytes ) <nl> + * <nl> + * This achieves the goal of counting small datums as at least one unit while ensuring <nl> + * larger units are accounted proportionately . This can result in overstating smaller datums <nl> + * when the unit size is large . This is desired behavior , and the extent to which small <nl> + * datums are overstated is tunable by the unit size of the implementor . <nl> + * / <nl> + void observeOne ( size_t datumBytes ) ; <nl> + <nl> + protected : <nl> + / * * <nl> + * Returns the implementation - specific unit size . <nl> + * / <nl> + virtual int unitSize ( ) const = 0 ; <nl> + <nl> + long long _bytes = 0 ; <nl> + long long _units = 0 ; <nl> + } ; <nl> + <nl> + / * * DocumentUnitCounter records the number of document units observed . * / <nl> + class DocumentUnitCounter : public UnitCounter { <nl> + private : <nl> + int unitSize ( ) const final ; <nl> + } ; <nl> + <nl> + / * * IdxEntryUnitCounter records the number of index entry units observed . * / <nl> + class IdxEntryUnitCounter : public UnitCounter { <nl> + private : <nl> + int unitSize ( ) const final ; <nl> + } ; <nl> + <nl> / * * ReadMetrics maintains metrics for read operations . * / <nl> class ReadMetrics { <nl> public : <nl> + ReadMetrics ( ) = default ; <nl> + <nl> void add ( const ReadMetrics & other ) { <nl> - docBytesRead + = other . docBytesRead ; <nl> - docUnitsRead + = other . docUnitsRead ; <nl> - idxEntryBytesRead + = other . idxEntryBytesRead ; <nl> - idxEntryUnitsRead + = other . idxEntryUnitsRead ; <nl> + docsRead + = other . docsRead ; <nl> + idxEntriesRead + = other . idxEntriesRead ; <nl> + docsReturned + = other . docsReturned ; <nl> keysSorted + = other . keysSorted ; <nl> sorterSpills + = other . sorterSpills ; <nl> - docUnitsReturned + = other . docUnitsReturned ; <nl> cursorSeeks + = other . cursorSeeks ; <nl> } <nl> <nl> class ResourceConsumption { <nl> * / <nl> void toBson ( BSONObjBuilder * builder ) const ; <nl> <nl> - / / Number of document bytes read <nl> - long long docBytesRead = 0 ; <nl> / / Number of document units read <nl> - long long docUnitsRead = 0 ; <nl> - / / Number of index entry bytes read <nl> - long long idxEntryBytesRead = 0 ; <nl> - / / Number of index entries units read <nl> - long long idxEntryUnitsRead = 0 ; <nl> + DocumentUnitCounter docsRead ; <nl> + / / Number of index entry units read <nl> + IdxEntryUnitCounter idxEntriesRead ; <nl> + / / Number of document units returned by a query <nl> + DocumentUnitCounter docsReturned ; <nl> + <nl> / / Number of keys sorted for query operations <nl> long long keysSorted = 0 ; <nl> / / Number of individual spills of data to disk by the sorter <nl> long long sorterSpills = 0 ; <nl> - / / Number of document units returned by a query <nl> - long long docUnitsReturned = 0 ; <nl> / / Number of cursor seeks <nl> long long cursorSeeks = 0 ; <nl> } ; <nl> class ResourceConsumption { <nl> class WriteMetrics { <nl> public : <nl> void add ( const WriteMetrics & other ) { <nl> - docBytesWritten + = other . docBytesWritten ; <nl> - docUnitsWritten + = other . docUnitsWritten ; <nl> - idxEntryBytesWritten + = other . idxEntryBytesWritten ; <nl> - idxEntryUnitsWritten + = other . idxEntryUnitsWritten ; <nl> + docsWritten + = other . docsWritten ; <nl> + idxEntriesWritten + = other . idxEntriesWritten ; <nl> } <nl> <nl> WriteMetrics & operator + = ( const WriteMetrics & other ) { <nl> class ResourceConsumption { <nl> * / <nl> void toBson ( BSONObjBuilder * builder ) const ; <nl> <nl> - / / Number of document bytes written <nl> - long long docBytesWritten = 0 ; <nl> - / / Number of document units written <nl> - long long docUnitsWritten = 0 ; <nl> - / / Number of index entry bytes written <nl> - long long idxEntryBytesWritten = 0 ; <nl> - / / Number of index entry units written <nl> - long long idxEntryUnitsWritten = 0 ; <nl> + / / Number of documents written <nl> + DocumentUnitCounter docsWritten ; <nl> + / / Number of index entries written <nl> + IdxEntryUnitCounter idxEntriesWritten ; <nl> } ; <nl> <nl> / * * <nl> class ResourceConsumption { <nl> public : <nl> MetricsCollector ( ) = default ; <nl> <nl> - / / Delete copy constructors to prevent callers from accidentally copying when this is <nl> - / / decorated on the OperationContext by reference . <nl> - MetricsCollector ( const MetricsCollector & ) = delete ; <nl> - MetricsCollector operator = ( const MetricsCollector & ) = delete ; <nl> - <nl> static MetricsCollector & get ( OperationContext * opCtx ) ; <nl> <nl> / * * <nl> class ResourceConsumption { <nl> <nl> void reset ( ) { <nl> invariant ( ! isInScope ( ) ) ; <nl> - _metrics = { } ; <nl> - _dbName = { } ; <nl> - _hasCollectedMetrics = false ; <nl> + * this = { } ; <nl> } <nl> <nl> / * * <nl> class ResourceConsumption { <nl> * / <nl> void incrementSorterSpills ( size_t spills ) ; <nl> <nl> - void incrementDocUnitsReturned ( size_t docUnitsReturned ) ; <nl> + / * * <nl> + * Increments the number of document units returned in the command response . <nl> + * / <nl> + void incrementDocUnitsReturned ( DocumentUnitCounter docUnitsReturned ) ; <nl> <nl> / * * <nl> * This should be called once per document written with the number of bytes written for that <nl> class ResourceConsumption { <nl> void incrementOneCursorSeek ( ) ; <nl> <nl> private : <nl> + / / Privatize copy constructors to prevent callers from accidentally copying when this is <nl> + / / decorated on the OperationContext by reference . <nl> + MetricsCollector ( const MetricsCollector & ) = default ; <nl> + MetricsCollector & operator = ( const MetricsCollector & ) = default ; <nl> + <nl> / * * <nl> * Helper function that calls the Func when this collector is currently collecting metrics . <nl> * / <nl> mmm a / src / mongo / db / stats / resource_consumption_metrics_test . cpp <nl> ppp b / src / mongo / db / stats / resource_consumption_metrics_test . cpp <nl> TEST_F ( ResourceConsumptionMetricsTest , NestedScopedMetricsCollector ) { <nl> ASSERT_EQ ( metricsCopy . count ( " db2 " ) , 0 ) ; <nl> } <nl> <nl> + namespace { <nl> + ResourceConsumption : : DocumentUnitCounter makeDocUnits ( size_t bytes ) { <nl> + ResourceConsumption : : DocumentUnitCounter docUnitsReturned ; <nl> + docUnitsReturned . observeOne ( bytes ) ; <nl> + return docUnitsReturned ; <nl> + } <nl> + } / / namespace <nl> + <nl> TEST_F ( ResourceConsumptionMetricsTest , IncrementReadMetrics ) { <nl> auto & globalResourceConsumption = ResourceConsumption : : get ( getServiceContext ( ) ) ; <nl> auto & operationMetrics = ResourceConsumption : : MetricsCollector : : get ( _opCtx . get ( ) ) ; <nl> TEST_F ( ResourceConsumptionMetricsTest , IncrementReadMetrics ) { <nl> operationMetrics . incrementOneIdxEntryRead ( 8 ) ; <nl> operationMetrics . incrementKeysSorted ( 16 ) ; <nl> operationMetrics . incrementSorterSpills ( 32 ) ; <nl> - operationMetrics . incrementDocUnitsReturned ( 64 ) ; <nl> + operationMetrics . incrementDocUnitsReturned ( makeDocUnits ( 64 ) ) ; <nl> operationMetrics . incrementOneCursorSeek ( ) ; <nl> } <nl> <nl> ASSERT ( operationMetrics . hasCollectedMetrics ( ) ) ; <nl> <nl> auto metricsCopy = globalResourceConsumption . getDbMetrics ( ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docBytesRead , 2 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docUnitsRead , 1 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntryBytesRead , 8 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntryUnitsRead , 1 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsRead . bytes ( ) , 2 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsRead . units ( ) , 1 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntriesRead . bytes ( ) , 8 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntriesRead . units ( ) , 1 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . keysSorted , 16 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . sorterSpills , 32 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docUnitsReturned , 64 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsReturned . bytes ( ) , 64 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsReturned . units ( ) , 1 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . cursorSeeks , 1 ) ; <nl> <nl> / / Clear metrics so we do not double - count . <nl> TEST_F ( ResourceConsumptionMetricsTest , IncrementReadMetrics ) { <nl> operationMetrics . incrementOneIdxEntryRead ( 128 ) ; <nl> operationMetrics . incrementKeysSorted ( 256 ) ; <nl> operationMetrics . incrementSorterSpills ( 512 ) ; <nl> - operationMetrics . incrementDocUnitsReturned ( 1024 ) ; <nl> + operationMetrics . incrementDocUnitsReturned ( makeDocUnits ( 1024 ) ) ; <nl> operationMetrics . incrementOneCursorSeek ( ) ; <nl> } <nl> <nl> metricsCopy = globalResourceConsumption . getDbMetrics ( ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docBytesRead , 2 + 32 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docUnitsRead , 2 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntryBytesRead , 8 + 128 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntryUnitsRead , 1 + 8 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsRead . bytes ( ) , 2 + 32 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsRead . units ( ) , 2 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntriesRead . bytes ( ) , 8 + 128 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntriesRead . units ( ) , 1 + 8 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . keysSorted , 16 + 256 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . sorterSpills , 32 + 512 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docUnitsReturned , 64 + 1024 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsReturned . bytes ( ) , 64 + 1024 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsReturned . units ( ) , 1 + 8 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . cursorSeeks , 1 + 1 ) ; <nl> } <nl> <nl> TEST_F ( ResourceConsumptionMetricsTest , IncrementReadMetricsSecondary ) { <nl> operationMetrics . incrementOneIdxEntryRead ( 8 ) ; <nl> operationMetrics . incrementKeysSorted ( 16 ) ; <nl> operationMetrics . incrementSorterSpills ( 32 ) ; <nl> - operationMetrics . incrementDocUnitsReturned ( 64 ) ; <nl> + operationMetrics . incrementDocUnitsReturned ( makeDocUnits ( 64 ) ) ; <nl> operationMetrics . incrementOneCursorSeek ( ) ; <nl> } <nl> <nl> auto metricsCopy = globalResourceConsumption . getDbMetrics ( ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docBytesRead , 2 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docUnitsRead , 1 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntryBytesRead , 8 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntryUnitsRead , 1 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsRead . bytes ( ) , 2 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsRead . units ( ) , 1 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntriesRead . bytes ( ) , 8 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntriesRead . units ( ) , 1 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . keysSorted , 16 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . sorterSpills , 32 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docUnitsReturned , 64 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsReturned . bytes ( ) , 64 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsReturned . units ( ) , 1 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . cursorSeeks , 1 ) ; <nl> <nl> / / Clear metrics so we do not double - count . <nl> TEST_F ( ResourceConsumptionMetricsTest , IncrementReadMetricsSecondary ) { <nl> operationMetrics . incrementOneIdxEntryRead ( 128 ) ; <nl> operationMetrics . incrementKeysSorted ( 256 ) ; <nl> operationMetrics . incrementSorterSpills ( 512 ) ; <nl> - operationMetrics . incrementDocUnitsReturned ( 1024 ) ; <nl> + operationMetrics . incrementDocUnitsReturned ( makeDocUnits ( 1024 ) ) ; <nl> operationMetrics . incrementOneCursorSeek ( ) ; <nl> } <nl> <nl> metricsCopy = globalResourceConsumption . getDbMetrics ( ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docBytesRead , 2 + 32 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docUnitsRead , 2 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntryBytesRead , 8 + 128 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntryUnitsRead , 1 + 8 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsRead . bytes ( ) , 2 + 32 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsRead . units ( ) , 2 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntriesRead . bytes ( ) , 8 + 128 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntriesRead . units ( ) , 1 + 8 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . keysSorted , 16 + 256 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . sorterSpills , 32 + 512 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docUnitsReturned , 64 + 1024 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsReturned . bytes ( ) , 64 + 1024 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsReturned . units ( ) , 1 + 8 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . cursorSeeks , 1 + 1 ) ; <nl> } <nl> <nl> TEST_F ( ResourceConsumptionMetricsTest , IncrementReadMetricsAcrossStates ) { <nl> operationMetrics . incrementOneIdxEntryRead ( 8 ) ; <nl> operationMetrics . incrementKeysSorted ( 16 ) ; <nl> operationMetrics . incrementSorterSpills ( 32 ) ; <nl> - operationMetrics . incrementDocUnitsReturned ( 64 ) ; <nl> + operationMetrics . incrementDocUnitsReturned ( makeDocUnits ( 64 ) ) ; <nl> operationMetrics . incrementOneCursorSeek ( ) ; <nl> <nl> ASSERT_OK ( repl : : ReplicationCoordinator : : get ( _opCtx . get ( ) ) <nl> TEST_F ( ResourceConsumptionMetricsTest , IncrementReadMetricsAcrossStates ) { <nl> operationMetrics . incrementOneIdxEntryRead ( 128 ) ; <nl> operationMetrics . incrementKeysSorted ( 256 ) ; <nl> operationMetrics . incrementSorterSpills ( 512 ) ; <nl> - operationMetrics . incrementDocUnitsReturned ( 1024 ) ; <nl> + operationMetrics . incrementDocUnitsReturned ( makeDocUnits ( 1024 ) ) ; <nl> operationMetrics . incrementOneCursorSeek ( ) ; <nl> } <nl> <nl> auto metricsCopy = globalResourceConsumption . getAndClearDbMetrics ( ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docBytesRead , 0 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docUnitsRead , 0 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntryBytesRead , 0 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntryUnitsRead , 0 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsRead . bytes ( ) , 0 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsRead . units ( ) , 0 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntriesRead . bytes ( ) , 0 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntriesRead . units ( ) , 0 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . keysSorted , 0 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docUnitsReturned , 0 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsReturned . bytes ( ) , 0 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsReturned . units ( ) , 0 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . cursorSeeks , 0 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docBytesRead , 2 + 32 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docUnitsRead , 2 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntryBytesRead , 8 + 128 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntryUnitsRead , 1 + 8 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsRead . bytes ( ) , 2 + 32 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsRead . units ( ) , 2 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntriesRead . bytes ( ) , 8 + 128 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntriesRead . units ( ) , 1 + 8 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . keysSorted , 16 + 256 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . sorterSpills , 32 + 512 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docUnitsReturned , 64 + 1024 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsReturned . bytes ( ) , 64 + 1024 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsReturned . units ( ) , 1 + 8 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . cursorSeeks , 1 + 1 ) ; <nl> <nl> operationMetrics . reset ( ) ; <nl> TEST_F ( ResourceConsumptionMetricsTest , IncrementReadMetricsAcrossStates ) { <nl> operationMetrics . incrementOneIdxEntryRead ( 8 ) ; <nl> operationMetrics . incrementKeysSorted ( 16 ) ; <nl> operationMetrics . incrementSorterSpills ( 32 ) ; <nl> - operationMetrics . incrementDocUnitsReturned ( 64 ) ; <nl> + operationMetrics . incrementDocUnitsReturned ( makeDocUnits ( 64 ) ) ; <nl> operationMetrics . incrementOneCursorSeek ( ) ; <nl> <nl> ASSERT_OK ( repl : : ReplicationCoordinator : : get ( _opCtx . get ( ) ) <nl> TEST_F ( ResourceConsumptionMetricsTest , IncrementReadMetricsAcrossStates ) { <nl> operationMetrics . incrementOneIdxEntryRead ( 128 ) ; <nl> operationMetrics . incrementKeysSorted ( 256 ) ; <nl> operationMetrics . incrementSorterSpills ( 512 ) ; <nl> - operationMetrics . incrementDocUnitsReturned ( 1024 ) ; <nl> + operationMetrics . incrementDocUnitsReturned ( makeDocUnits ( 1024 ) ) ; <nl> operationMetrics . incrementOneCursorSeek ( ) ; <nl> } <nl> <nl> metricsCopy = globalResourceConsumption . getAndClearDbMetrics ( ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docBytesRead , 2 + 32 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docUnitsRead , 2 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntryBytesRead , 8 + 128 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntryUnitsRead , 1 + 8 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsRead . bytes ( ) , 2 + 32 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsRead . units ( ) , 2 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntriesRead . bytes ( ) , 8 + 128 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntriesRead . units ( ) , 1 + 8 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . keysSorted , 16 + 256 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . sorterSpills , 32 + 512 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docUnitsReturned , 64 + 1024 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsReturned . bytes ( ) , 64 + 1024 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsReturned . units ( ) , 1 + 8 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . cursorSeeks , 1 + 1 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docBytesRead , 0 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docUnitsRead , 0 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntryBytesRead , 0 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntryUnitsRead , 0 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsRead . bytes ( ) , 0 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsRead . units ( ) , 0 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntriesRead . bytes ( ) , 0 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . idxEntriesRead . units ( ) , 0 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . keysSorted , 0 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . sorterSpills , 0 ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docUnitsReturned , 0 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsReturned . bytes ( ) , 0 ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . docsReturned . units ( ) , 0 ) ; <nl> ASSERT_EQ ( metricsCopy [ " db1 " ] . secondaryReadMetrics . cursorSeeks , 0 ) ; <nl> } <nl> <nl> TEST_F ( ResourceConsumptionMetricsTest , DocumentUnitsRead ) { <nl> } <nl> <nl> auto metricsCopy = globalResourceConsumption . getDbMetrics ( ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docBytesRead , expectedBytes ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docUnitsRead , expectedUnits ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsRead . bytes ( ) , expectedBytes ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . docsRead . units ( ) , expectedUnits ) ; <nl> } <nl> <nl> TEST_F ( ResourceConsumptionMetricsTest , DocumentUnitsWritten ) { <nl> TEST_F ( ResourceConsumptionMetricsTest , DocumentUnitsWritten ) { <nl> } <nl> <nl> auto metricsCopy = globalResourceConsumption . getDbMetrics ( ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . writeMetrics . docBytesWritten , expectedBytes ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . writeMetrics . docUnitsWritten , expectedUnits ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . writeMetrics . docsWritten . bytes ( ) , expectedBytes ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . writeMetrics . docsWritten . units ( ) , expectedUnits ) ; <nl> } <nl> <nl> TEST_F ( ResourceConsumptionMetricsTest , IdxEntryUnitsRead ) { <nl> TEST_F ( ResourceConsumptionMetricsTest , IdxEntryUnitsRead ) { <nl> } <nl> <nl> auto metricsCopy = globalResourceConsumption . getDbMetrics ( ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntryBytesRead , expectedBytes ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntryUnitsRead , expectedUnits ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntriesRead . bytes ( ) , expectedBytes ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . primaryReadMetrics . idxEntriesRead . units ( ) , expectedUnits ) ; <nl> } <nl> <nl> TEST_F ( ResourceConsumptionMetricsTest , IdxEntryUnitsWritten ) { <nl> TEST_F ( ResourceConsumptionMetricsTest , IdxEntryUnitsWritten ) { <nl> } <nl> <nl> auto metricsCopy = globalResourceConsumption . getDbMetrics ( ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . writeMetrics . idxEntryBytesWritten , expectedBytes ) ; <nl> - ASSERT_EQ ( metricsCopy [ " db1 " ] . writeMetrics . idxEntryUnitsWritten , expectedUnits ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . writeMetrics . idxEntriesWritten . bytes ( ) , expectedBytes ) ; <nl> + ASSERT_EQ ( metricsCopy [ " db1 " ] . writeMetrics . idxEntriesWritten . units ( ) , expectedUnits ) ; <nl> } <nl> <nl> TEST_F ( ResourceConsumptionMetricsTest , CpuNanos ) { <nl>
SERVER - 51030 Collect document units returned in command responses
mongodb/mongo
58febe4996944263d331c3f8deb8cefd10ace9a6
2020-12-08T01:43:05Z
mmm a / src / init . cpp <nl> ppp b / src / init . cpp <nl> bool AppInit2 ( ) <nl> / / Disable confusing " helpful " text message on abort , Ctrl - C <nl> _set_abort_behavior ( 0 , _WRITE_ABORT_MSG | _CALL_REPORTFAULT ) ; <nl> # endif <nl> - # ifndef WIN32 <nl> - umask ( 077 ) ; <nl> + # ifdef WIN32 <nl> + / / Enable Data Execution Prevention ( DEP ) <nl> + / / Minimum supported OS versions : WinXP SP3 , WinVista > = SP1 , Win Server 2008 <nl> + / / A failure is non - critical and needs no further attention ! <nl> + # ifndef PROCESS_DEP_ENABLE <nl> + / / We define this here , because GCCs winbase . h limits this to _WIN32_WINNT > = 0x0601 ( Windows 7 ) , <nl> + / / which is not correct . Can be removed , when GCCs winbase . h is fixed ! <nl> + # define PROCESS_DEP_ENABLE 0x00000001 <nl> + # endif <nl> + typedef BOOL ( WINAPI * PSETPROCDEPPOL ) ( DWORD ) ; <nl> + PSETPROCDEPPOL setProcDEPPol = ( PSETPROCDEPPOL ) GetProcAddress ( GetModuleHandleA ( " Kernel32 . dll " ) , " SetProcessDEPPolicy " ) ; <nl> + if ( setProcDEPPol ! = NULL ) setProcDEPPol ( PROCESS_DEP_ENABLE ) ; <nl> # endif <nl> # ifndef WIN32 <nl> + umask ( 077 ) ; <nl> + <nl> / / Clean shutdown on SIGTERM <nl> struct sigaction sa ; <nl> sa . sa_handler = HandleSIGTERM ; <nl>
Bitcoin - Qt ( Windows only ) : enable DEP for bitcoin - qt . exe
bitcoin/bitcoin
3d88c9b4d3714daddd77ab72d0e44b61c0b9800a
2012-08-03T12:51:51Z
mmm a / src / ast / scopes . cc <nl> ppp b / src / ast / scopes . cc <nl> Variable * Scope : : DeclareVariable ( <nl> / / with this new binding by doing the following : <nl> / / The proxy is bound to a lookup variable to force a dynamic declaration <nl> / / using the DeclareEvalVar or DeclareEvalFunction runtime functions . <nl> - VariableKind kind = NORMAL_VARIABLE ; <nl> - / / TODO ( sigurds ) figure out if kNotAssigned is OK here <nl> - var = new ( zone ( ) ) Variable ( this , name , mode , kind , init , kNotAssigned ) ; <nl> + var = new ( zone ( ) ) <nl> + Variable ( this , name , mode , NORMAL_VARIABLE , init , kMaybeAssigned ) ; <nl> var - > AllocateTo ( VariableLocation : : LOOKUP , - 1 ) ; <nl> } else { <nl> / / Declare the variable in the declaration scope . <nl> Variable * Scope : : DeclareVariableName ( const AstRawString * name , <nl> DCHECK_NE ( var , kDummyPreParserVariable ) ; <nl> if ( var = = nullptr ) { <nl> var = DeclareLocal ( name , mode ) ; <nl> - } else if ( ! IsLexicalVariableMode ( var - > mode ( ) ) & & <nl> - ! IsLexicalVariableMode ( mode ) ) { <nl> - DCHECK_EQ ( mode , VAR ) ; <nl> + } else if ( mode = = VAR ) { <nl> + DCHECK_EQ ( var - > mode ( ) , VAR ) ; <nl> var - > set_maybe_assigned ( ) ; <nl> } <nl> var - > set_is_used ( ) ; <nl>
[ ast ] Minor cleanup in scopes . cc .
v8/v8
fbd4cc9a369753c9470c895c879c9eaf32f4e815
2017-02-06T09:11:56Z
mmm a / build / pipelines / templates / build - console - steps . yml <nl> ppp b / build / pipelines / templates / build - console - steps . yml <nl> steps : <nl> configuration : ' $ ( BuildConfiguration ) ' <nl> msbuildArgs : " $ { { parameters . additionalBuildArguments } } " <nl> clean : true <nl> - maximumCpuCount : true <nl> + # The build agents cannot currently support parallel build due to the <nl> + # memory requirements of our PCH files . <nl> + maximumCpuCount : false <nl> <nl> - task : PowerShell @ 2 <nl> displayName : ' Check MSIX for common regressions ' <nl>
BUILD : Disable parallel build
microsoft/terminal
91ccbb79f0c1ee6165bd9ae2d265270954b4ec7c
2020-10-09T00:17:55Z
mmm a / src / rdb_protocol / query_language . cc <nl> ppp b / src / rdb_protocol / query_language . cc <nl> void check_write_query_type ( WriteQuery3 * w , type_checking_environment_t * env , bo <nl> <nl> bool deterministic = true ; <nl> switch ( w - > type ( ) ) { <nl> - case WriteQuery3 : : INSERT : { <nl> - check_protobuf ( w - > has_insert ( ) ) ; <nl> - check_table_ref ( w - > insert ( ) . table_ref ( ) , backtrace . with ( " table_ref " ) ) ; <nl> - if ( w - > insert ( ) . terms_size ( ) = = 1 ) { <nl> - / / We want to do the get to produce determinism information <nl> - get_term_type ( w - > mutable_insert ( ) - > mutable_terms ( 0 ) , env , backtrace ) ; <nl> - break ; / / Single - element insert polymorphic over streams and arrays <nl> - } <nl> - for ( int i = 0 ; i < w - > insert ( ) . terms_size ( ) ; + + i ) { <nl> - check_term_type ( w - > mutable_insert ( ) - > mutable_terms ( i ) , TERM_TYPE_JSON , env , is_det_out , backtrace . with ( strprintf ( " term : % d " , i ) ) ) ; <nl> - } <nl> - } break ; <nl> case WriteQuery3 : : FOREACH : { <nl> check_protobuf ( w - > has_for_each ( ) ) ; <nl> check_term_type ( w - > mutable_for_each ( ) - > mutable_stream ( ) , TERM_TYPE_ARBITRARY , env , is_det_out , backtrace . with ( " stream " ) ) ; <nl> int point_delete ( namespace_repo_t < rdb_protocol_t > : : access_t ns_access , boost : : sh <nl> void execute_write_query ( WriteQuery3 * w , runtime_environment_t * env , Response3 * res , const scopes_t & scopes , const backtrace_t & backtrace ) THROWS_ONLY ( interrupted_exc_t , runtime_exc_t , broken_client_exc_t ) { <nl> res - > set_status_code ( Response3 : : SUCCESS_JSON ) ; <nl> switch ( w - > type ( ) ) { <nl> - case WriteQuery3 : : INSERT : { <nl> - std : : string pk = get_primary_key ( w - > mutable_insert ( ) - > mutable_table_ref ( ) , env , backtrace ) ; <nl> - bool overwrite = w - > mutable_insert ( ) - > overwrite ( ) ; <nl> - namespace_repo_t < rdb_protocol_t > : : access_t ns_access = <nl> - eval_table_ref ( w - > mutable_insert ( ) - > mutable_table_ref ( ) , env , backtrace ) ; <nl> - <nl> - std : : string first_error ; <nl> - int errors = 0 ; <nl> - int inserted = 0 ; <nl> - std : : vector < std : : string > generated_keys ; <nl> - if ( w - > insert ( ) . terms_size ( ) = = 1 ) { <nl> - Term3 * t = w - > mutable_insert ( ) - > mutable_terms ( 0 ) ; <nl> - int32_t t_type = t - > GetExtension ( extension : : inferred_type ) ; <nl> - boost : : shared_ptr < json_stream_t > stream ; <nl> - if ( t_type = = TERM_TYPE_JSON ) { <nl> - boost : : shared_ptr < scoped_cJSON_t > data = eval_term_as_json ( t , env , scopes , backtrace . with ( " term : 0 " ) ) ; <nl> - if ( data - > type ( ) = = cJSON_Array ) { <nl> - stream . reset ( new in_memory_stream_t ( json_array_iterator_t ( data - > get ( ) ) ) ) ; <nl> - } else { <nl> - nonthrowing_insert ( ns_access , pk , data , env , backtrace . with ( " term : 0 " ) , overwrite , & generated_keys , <nl> - & inserted , & errors , & first_error ) ; <nl> - } <nl> - } else if ( t_type = = TERM_TYPE_STREAM | | t_type = = TERM_TYPE_VIEW ) { <nl> - stream = eval_term_as_stream ( w - > mutable_insert ( ) - > mutable_terms ( 0 ) , env , scopes , backtrace . with ( " term : 0 " ) ) ; <nl> - } else { unreachable ( " bad term type " ) ; } <nl> - if ( stream ) { <nl> - while ( boost : : shared_ptr < scoped_cJSON_t > data = stream - > next ( ) ) { <nl> - nonthrowing_insert ( ns_access , pk , data , env , backtrace . with ( " term : 0 " ) , overwrite , & generated_keys , <nl> - & inserted , & errors , & first_error ) ; <nl> - } <nl> - } <nl> - } else { <nl> - for ( int i = 0 ; i < w - > insert ( ) . terms_size ( ) ; + + i ) { <nl> - boost : : shared_ptr < scoped_cJSON_t > data = <nl> - eval_term_as_json ( w - > mutable_insert ( ) - > mutable_terms ( i ) , env , scopes , backtrace . with ( strprintf ( " term : % d " , i ) ) ) ; <nl> - nonthrowing_insert ( ns_access , pk , data , env , backtrace . with ( strprintf ( " term : % d " , i ) ) , overwrite , & generated_keys , <nl> - & inserted , & errors , & first_error ) ; <nl> - } <nl> - } <nl> - <nl> - / * Construct a response . * / <nl> - boost : : shared_ptr < scoped_cJSON_t > res_json ( new scoped_cJSON_t ( cJSON_CreateObject ( ) ) ) ; <nl> - res_json - > AddItemToObject ( " inserted " , safe_cJSON_CreateNumber ( inserted , backtrace ) ) ; <nl> - res_json - > AddItemToObject ( " errors " , safe_cJSON_CreateNumber ( errors , backtrace ) ) ; <nl> - <nl> - if ( first_error ! = " " ) res_json - > AddItemToObject ( " first_error " , cJSON_CreateString ( first_error . c_str ( ) ) ) ; <nl> - if ( ! generated_keys . empty ( ) ) { <nl> - res_json - > AddItemToObject ( " generated_keys " , cJSON_CreateArray ( ) ) ; <nl> - cJSON * array = res_json - > GetObjectItem ( " generated_keys " ) ; <nl> - guarantee ( array ) ; <nl> - for ( std : : vector < std : : string > : : iterator it = generated_keys . begin ( ) ; it ! = generated_keys . end ( ) ; + + it ) { <nl> - cJSON_AddItemToArray ( array , cJSON_CreateString ( it - > c_str ( ) ) ) ; <nl> - } <nl> - } <nl> - res - > add_response ( res_json - > Print ( ) ) ; <nl> - } break ; <nl> case WriteQuery3 : : FOREACH : { <nl> boost : : shared_ptr < json_stream_t > stream = <nl> eval_term_as_stream ( w - > mutable_for_each ( ) - > mutable_stream ( ) , env , scopes , backtrace . with ( " stream " ) ) ; <nl> mmm a / src / rdb_protocol / query_language . proto <nl> ppp b / src / rdb_protocol / query_language . proto <nl> message ReadQuery3 { <nl> <nl> message WriteQuery3 { <nl> enum WriteQueryType { <nl> - INSERT = 4 ; <nl> FOREACH = 6 ; <nl> POINTUPDATE = 7 ; <nl> POINTDELETE = 8 ; <nl> message WriteQuery3 { <nl> required WriteQueryType type = 1 ; <nl> optional bool atomic = 11 [ default = true ] ; <nl> <nl> - message Insert { <nl> - required TableRef table_ref = 1 ; <nl> - repeated Term3 terms = 2 ; <nl> - optional bool overwrite = 3 [ default = false ] ; <nl> - } ; <nl> - <nl> - optional Insert insert = 5 ; <nl> - <nl> message ForEach { <nl> required Term3 stream = 1 ; <nl> required string var = 2 ; <nl>
Removed WriteQuery3_Insert .
rethinkdb/rethinkdb
13fa18e6baa93a9a8dfeafb45a315b0f3db2bc61
2013-03-12T08:29:03Z
mmm a / tensorflow / tools / ci_build / windows / cpu / pip / build_tf_windows . sh <nl> ppp b / tensorflow / tools / ci_build / windows / cpu / pip / build_tf_windows . sh <nl> PY_TEST_DIR = " py_test_dir " <nl> SKIP_TEST = 0 <nl> RELEASE_BUILD = 0 <nl> TEST_TARGET = " / / $ { PY_TEST_DIR } / tensorflow / python / . . . " <nl> - EXTRA_BUILD_FLAGS = $ { EXTRA_BUILD_FLAGS : - } <nl> + PROJECT_NAME = " " <nl> + EXTRA_BUILD_FLAGS = " " <nl> <nl> # - - skip_test Skip running tests <nl> # - - enable_remote_cache Add options to enable remote cache for build and test <nl> for ARG in " $ @ " ; do <nl> - - release_build ) RELEASE_BUILD = 1 ; ; <nl> - - test_core_only ) TEST_TARGET = " / / $ { PY_TEST_DIR } / tensorflow / python / . . . " ; ; <nl> - - test_contrib_only ) TEST_TARGET = " / / $ { PY_TEST_DIR } / tensorflow / contrib / . . . " ; ; <nl> + - - extra_build_flags ) <nl> + shift <nl> + if [ [ - z " $ 1 " ] ] ; then <nl> + break <nl> + fi <nl> + EXTRA_BUILD_FLAGS = " $ 1 " <nl> + ; ; <nl> + - - project_name ) <nl> + shift <nl> + if [ [ - z " $ 1 " ] ] ; then <nl> + break <nl> + fi <nl> + PROJECT_NAME = " $ 1 " <nl> + ; ; <nl> * ) <nl> esac <nl> done <nl> fi <nl> <nl> if [ [ " $ TF_NIGHTLY " = = 1 ] ] ; then <nl> python tensorflow / tools / ci_build / update_version . py - - nightly <nl> - if [ - z $ { EXTRA_PIP_FLAGS } ] ; then <nl> + if [ - z $ { PROJECT_NAME } ] ; then <nl> EXTRA_PIP_FLAGS = " - - nightly_flag " <nl> else <nl> - EXTRA_PIP_FLAGS = " $ { EXTRA_PIP_FLAGS } - - nightly_flag " <nl> + EXTRA_PIP_FLAGS = " - - project_name = $ { PROJECT_NAME } - - nightly_flag " <nl> fi <nl> fi <nl> <nl> mmm a / tensorflow / tools / ci_build / windows / gpu / pip / build_tf_windows . sh <nl> ppp b / tensorflow / tools / ci_build / windows / gpu / pip / build_tf_windows . sh <nl> PY_TEST_DIR = " py_test_dir " <nl> SKIP_TEST = 0 <nl> RELEASE_BUILD = 0 <nl> TEST_TARGET = " / / $ { PY_TEST_DIR } / tensorflow / python / . . . " <nl> - EXTRA_BUILD_FLAGS = $ { EXTRA_BUILD_FLAGS : - } <nl> + PROJECT_NAME = " " <nl> + EXTRA_BUILD_FLAGS = " " <nl> <nl> # - - skip_test Skip running tests <nl> # - - enable_remote_cache Add options to enable remote cache for build and test <nl> for ARG in " $ @ " ; do <nl> - - release_build ) RELEASE_BUILD = 1 ; ; <nl> - - test_core_only ) TEST_TARGET = " / / $ { PY_TEST_DIR } / tensorflow / python / . . . " ; ; <nl> - - test_contrib_only ) TEST_TARGET = " / / $ { PY_TEST_DIR } / tensorflow / contrib / . . . " ; ; <nl> + - - extra_build_flags ) <nl> + shift <nl> + if [ [ - z " $ 1 " ] ] ; then <nl> + break <nl> + fi <nl> + EXTRA_BUILD_FLAGS = " $ 1 " <nl> + ; ; <nl> + - - project_name ) <nl> + shift <nl> + if [ [ - z " $ 1 " ] ] ; then <nl> + break <nl> + fi <nl> + PROJECT_NAME = " $ 1 " <nl> + ; ; <nl> * ) <nl> esac <nl> done <nl> fi <nl> <nl> if [ [ " $ TF_NIGHTLY " = = 1 ] ] ; then <nl> python tensorflow / tools / ci_build / update_version . py - - nightly <nl> - if [ - z $ { EXTRA_PIP_FLAGS } ] ; then <nl> + if [ - z $ { PROJECT_NAME } ] ; then <nl> EXTRA_PIP_FLAGS = " - - nightly_flag " <nl> else <nl> - EXTRA_PIP_FLAGS = " $ { EXTRA_PIP_FLAGS } - - nightly_flag " <nl> + EXTRA_PIP_FLAGS = " - - project_name = $ { PROJECT_NAME } - - nightly_flag " <nl> fi <nl> fi <nl> <nl>
Internal change .
tensorflow/tensorflow
594044db0505faa4ca41edb1d394d7c643c026ef
2018-12-01T00:59:47Z
mmm a / tools / tojs / cocos2dx . ini <nl> ppp b / tools / tojs / cocos2dx . ini <nl> skip = CCNode : : [ convertToWindowSpace ^ setPosition $ getGrid setGLServerState desc <nl> CC . * Delegate : : [ * ] , <nl> CCPoolManager : : [ * ] , <nl> CCTexture2D : : [ initWithPVRTCData addPVRTCImage releaseData setTexParameters initWithData keepData ] , <nl> - CCSet : : [ begin end ] , <nl> + CCSet : : [ begin end acceptVisitor ] , <nl> CCIMEDispatcher : : [ * ] , <nl> CCSAXParser : : [ * ] , <nl> CCThread : : [ * ] , <nl>
Update cocos2dx . ini , don ' t generate binding for CCSet : : acceptVisitor .
cocos2d/cocos2d-x
361004d4d30ad55a52c152568437461ff23ee676
2013-05-21T06:52:54Z
mmm a / tensorflow / python / framework / docs . py <nl> ppp b / tensorflow / python / framework / docs . py <nl> def collect_members ( module_to_name ) : <nl> if len ( fullname ) = = len ( other_fullname ) : <nl> raise RuntimeError ( " Can ' t decide whether to use % s or % s for % s : " <nl> " both full names have length % d " % <nl> - ( fullname , other_fullname , len ( fullname ) ) ) <nl> + ( fullname , other_fullname , name , len ( fullname ) ) ) <nl> if len ( fullname ) > len ( other_fullname ) : <nl> continue # Use the shorter full name <nl> members [ name ] = fullname , member <nl> mmm a / tensorflow / python / kernel_tests / rnn_test . py <nl> ppp b / tensorflow / python / kernel_tests / rnn_test . py <nl> def output_size ( self ) : <nl> def state_size ( self ) : <nl> return 5 <nl> <nl> - def __call__ ( self , input_ , state ) : <nl> + def __call__ ( self , input_ , state , scope = None ) : <nl> return ( input_ + 1 , state + 1 ) <nl> <nl> <nl> mmm a / tensorflow / python / ops / rnn_cell . py <nl> ppp b / tensorflow / python / ops / rnn_cell . py <nl> def output_size ( self ) : <nl> def state_size ( self ) : <nl> return self . _cell . state_size <nl> <nl> - def __call__ ( self , inputs , state ) : <nl> + def __call__ ( self , inputs , state , scope = None ) : <nl> " " " Run the cell with the declared dropouts . " " " <nl> if ( not isinstance ( self . _input_keep_prob , float ) or <nl> self . _input_keep_prob < 1 ) : <nl>
Add missing arguments .
tensorflow/tensorflow
7a73630e276e5bc36d89d75cfdd33d1d3a5f865b
2016-01-20T19:25:36Z
new file mode 100644 <nl> index 00000000000 . . 4e0c5f4dfa9 <nl> mmm / dev / null <nl> ppp b / Source / 1BitSGD / AllReduceDistGradAggregator . h <nl> <nl> + / / <nl> + / / Copyright ( c ) Microsoft . All rights reserved . <nl> + / / Licensed under the MIT license . See LICENSE . md file in the project root for full license information . <nl> + / / <nl> + <nl> + # pragma once <nl> + <nl> + # include " IDistGradAggregator . h " <nl> + # include " CUDAPageLockedMemAllocator . h " <nl> + # include " QuantizedMatrix . h " <nl> + # include " MatrixQuantizer . h " <nl> + # include " MatrixQuantizerGPU . h " <nl> + # include < future > <nl> + # include " TimerUtility . h " <nl> + <nl> + namespace Microsoft { namespace MSR { namespace CNTK { <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / AllReduceDistGradAggregator - - 1 - bit SGD . <nl> + / / This implements <nl> + / / Frank Seide , Hao Fu , Jasha Droppo , Gang Li , and Dong Yu : <nl> + / / " 1 - bit stochastic gradient descent and its application to data - parallel distributed training of speech DNNs " <nl> + / / In Proc . Interspeech 2014 . <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + template < class ElemType > <nl> + class AllReduceDistGradAggregator : public IDistGradAggregator < ElemType > <nl> + { <nl> + struct Stripe <nl> + { <nl> + size_t m_startCol ; <nl> + size_t m_numCols ; <nl> + } ; <nl> + <nl> + UsingIDistGradAggregatorMembers ; <nl> + <nl> + static const int DEBUG_OUTPUT_TRACE_LEVEL = 3 ; <nl> + <nl> + public : <nl> + AllReduceDistGradAggregator ( const std : : shared_ptr < MPIWrapper > & mpi , int nBits , bool zeroThresholdFor1Bit , bool useQuantizationForSelfStripe , bool useAsyncAggregation , int traceLevel , int syncStatsTrace ) <nl> + : IDistGradAggregator < ElemType > ( mpi ) , m_numQuantizationBits ( nBits ) , m_zeroThresholdFor1Bit ( zeroThresholdFor1Bit ) , m_useQuantizationForSelfStripe ( useQuantizationForSelfStripe ) , <nl> + m_traceLevel ( traceLevel ) , m_initialized ( false ) , m_useAsyncAggregation ( useAsyncAggregation ) , m_bufferedGradHeader ( nullptr ) , m_syncStatsTrace ( syncStatsTrace ) , m_iterationCount ( 0 ) <nl> + { } <nl> + <nl> + ~ AllReduceDistGradAggregator ( ) <nl> + { <nl> + for ( size_t i = 0 ; i < m_recvHeaders . size ( ) ; + + i ) <nl> + DistGradHeader : : Destroy ( m_recvHeaders [ i ] ) ; <nl> + <nl> + if ( m_bufferedGradHeader ! = nullptr ) <nl> + DistGradHeader : : Destroy ( m_bufferedGradHeader ) ; <nl> + } <nl> + <nl> + / / Gets the range of columns to be processed by the node with the specified rank <nl> + / / when parallel processing using ' numNodes ' nodes <nl> + static Stripe GetStripeForNode ( size_t numCols , size_t nodeRank , size_t numNodes ) <nl> + { <nl> + / / Determine which stripe of the gradient is this node responsible for <nl> + size_t numColsPerNode = numCols / numNodes ; <nl> + size_t residue = numCols % numNodes ; <nl> + size_t startColNumofStripe = ( numColsPerNode * nodeRank ) + min ( residue , nodeRank ) ; <nl> + size_t numColsinStripe = numColsPerNode + ( ( nodeRank < residue ) ? 1 : 0 ) ; <nl> + <nl> + return Stripe ( { startColNumofStripe , numColsinStripe } ) ; <nl> + } <nl> + <nl> + void ResetState ( const std : : vector < Matrix < ElemType > * > & gradients , int numEvalNodes , bool resetState ) <nl> + { <nl> + / / When called the first time let ' s setup the quantizers and matrices for holding quantized values . <nl> + / / These can live for the lifetime of the aggregator since the gradient matrix dimensions for learnable parameters <nl> + / / do not change <nl> + if ( ! m_initialized ) <nl> + { <nl> + m_initialized = true ; <nl> + int deviceId = gradients [ 0 ] - > GetDeviceId ( ) ; <nl> + if ( deviceId ! = CPUDEVICE ) <nl> + m_allocator . reset ( new CUDAPageLockedMemAllocator ( deviceId ) ) ; <nl> + <nl> + for ( size_t i = 0 ; i < gradients . size ( ) ; i + + ) <nl> + { <nl> + / / Make sure none of the gradient matrices are sparse - we currently do not support aggregation of sparse gradient matrices <nl> + if ( gradients [ i ] - > GetMatrixType ( ) ! = DENSE ) <nl> + RuntimeError ( " Gradient aggregation for sparse gradient matrices is currently unsupported ! " ) ; <nl> + <nl> + size_t nRow = gradients [ i ] - > GetNumRows ( ) ; <nl> + size_t nCol = gradients [ i ] - > GetNumCols ( ) ; <nl> + m_preAggGradQuantizers . push_back ( std : : unique_ptr < MatrixQuantizer < ElemType > > ( new MatrixQuantizer < ElemType > ( nRow , nCol , deviceId , m_useAsyncAggregation ) ) ) ; <nl> + m_gradQuantized . push_back ( std : : unique_ptr < QuantizedMatrix < ElemType > > ( new QuantizedMatrix < ElemType > ( nRow , nCol , m_numQuantizationBits , CPUDEVICE , m_allocator . get ( ) ) ) ) ; <nl> + <nl> + / / Determine which stripe of the gradient is this node responsible for <nl> + Stripe stripe = GetStripeForNode ( nCol , MyRank ( ) , NumProc ( ) ) ; <nl> + <nl> + MatrixQuantizer < ElemType > * currAggGradQuantizer = nullptr ; <nl> + std : : vector < std : : unique_ptr < QuantizedMatrix < ElemType > > > currRecvGradStripesQuantized ; <nl> + if ( stripe . m_numCols > 0 ) <nl> + { <nl> + currAggGradQuantizer = new MatrixQuantizer < ElemType > ( nRow , stripe . m_numCols , deviceId , m_useAsyncAggregation ) ; <nl> + for ( size_t j = 0 ; j < NumProc ( ) - 1 ; + + j ) <nl> + currRecvGradStripesQuantized . push_back ( std : : unique_ptr < QuantizedMatrix < ElemType > > ( new QuantizedMatrix < ElemType > ( nRow , stripe . m_numCols , m_numQuantizationBits , CPUDEVICE , m_allocator . get ( ) ) ) ) ; <nl> + } <nl> + <nl> + m_aggGradStripeQuantizers . push_back ( std : : unique_ptr < MatrixQuantizer < ElemType > > ( currAggGradQuantizer ) ) ; <nl> + m_recvGradStripesQuantized . push_back ( std : : move ( currRecvGradStripesQuantized ) ) ; <nl> + <nl> + if ( m_useAsyncAggregation ) <nl> + m_bufferedGradients [ gradients [ i ] ] . reset ( new Matrix < ElemType > ( gradients [ i ] - > GetNumRows ( ) , gradients [ i ] - > GetNumCols ( ) , deviceId ) ) ; <nl> + } <nl> + <nl> + if ( m_useAsyncAggregation ) <nl> + { <nl> + m_bufferedGradHeader = DistGradHeader : : Create ( numEvalNodes ) ; <nl> + m_bufferedGradHeader - > Clear ( ) ; <nl> + } <nl> + <nl> + if ( m_mpi - > IsMainNode ( ) ) <nl> + { <nl> + for ( size_t i = 0 ; i < NumProc ( ) - 1 ; + + i ) <nl> + m_recvHeaders . push_back ( DistGradHeader : : Create ( numEvalNodes ) ) ; <nl> + } <nl> + } <nl> + else if ( resetState ) <nl> + { <nl> + / / If we are resetting state , let ' s clear previous quantization residues <nl> + <nl> + / / Make sure there is no pending async aggregation <nl> + if ( m_useAsyncAggregation & & m_pendingAsyncAggregation . valid ( ) ) <nl> + LogicError ( " Unexpected pending async gradient aggregation found when resetting aggregator state ! " ) ; <nl> + <nl> + for ( size_t i = 0 ; i < m_preAggGradQuantizers . size ( ) ; + + i ) <nl> + m_preAggGradQuantizers [ i ] - > ResetResidue ( ) ; <nl> + <nl> + for ( size_t i = 0 ; i < m_aggGradStripeQuantizers . size ( ) ; + + i ) <nl> + { <nl> + if ( m_aggGradStripeQuantizers [ i ] ! = nullptr ) <nl> + m_aggGradStripeQuantizers [ i ] - > ResetResidue ( ) ; <nl> + } <nl> + <nl> + / / Zero out the buffered gradients if resetting state <nl> + if ( m_useAsyncAggregation ) <nl> + { <nl> + for ( size_t i = 0 ; i < gradients . size ( ) ; i + + ) <nl> + m_bufferedGradients [ gradients [ i ] ] - > SetValue ( 0 ) ; <nl> + <nl> + m_bufferedGradHeader - > Clear ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Aggregate the gradient matrices across all nodes <nl> + bool AggregateGradients ( const std : : vector < Matrix < ElemType > * > & gradients , DistGradHeader * headerCPU , bool resetState ) override <nl> + { <nl> + ResetState ( gradients , headerCPU - > numEvalNode , resetState ) ; <nl> + bool showSyncPerfStats = ( m_syncStatsTrace > 0 ) & & ( ( m_iterationCount % m_syncStatsTrace ) = = 0 ) ; <nl> + m_iterationCount + + ; <nl> + <nl> + if ( m_useAsyncAggregation ) <nl> + { <nl> + / / If we are performing async gradient aggregation , let ' s wait for the pending gradient aggregation to finish <nl> + / / then swap the contents of the buffered gradients and the new gradient matrices and fire an async aggreagation <nl> + / / of the new gradient matrices <nl> + if ( m_pendingAsyncAggregation . valid ( ) ) <nl> + { <nl> + Timer aggregationTimer ; <nl> + if ( showSyncPerfStats ) <nl> + aggregationTimer . Start ( ) ; <nl> + <nl> + m_pendingAsyncAggregation . get ( ) ; <nl> + <nl> + if ( showSyncPerfStats ) <nl> + { <nl> + aggregationTimer . Stop ( ) ; <nl> + double gradientAggregationTime = aggregationTimer . ElapsedSeconds ( ) ; <nl> + fprintf ( stderr , " Async gradient aggregation wait time : % . 6g \ n " , gradientAggregationTime ) ; <nl> + } <nl> + } <nl> + <nl> + std : : vector < Matrix < ElemType > * > newGradients ; <nl> + size_t numGradMatrices = gradients . size ( ) ; <nl> + for ( size_t i = 0 ; i < numGradMatrices ; i + + ) <nl> + { <nl> + Matrix < ElemType > * bufferedGradientMatrix = m_bufferedGradients [ gradients [ i ] ] . get ( ) ; <nl> + if ( ( bufferedGradientMatrix = = nullptr ) | | <nl> + ( bufferedGradientMatrix - > GetNumCols ( ) ! = gradients [ i ] - > GetNumCols ( ) ) | | <nl> + ( bufferedGradientMatrix - > GetNumRows ( ) ! = gradients [ i ] - > GetNumRows ( ) ) | | <nl> + ( bufferedGradientMatrix - > GetDeviceId ( ) ! = gradients [ i ] - > GetDeviceId ( ) ) ) <nl> + { <nl> + LogicError ( " No buffered gradient matrix found corresponding to a gradient matrix to be aggregated ! " ) ; <nl> + } <nl> + <nl> + / / Swap the gradient matrix contents with the buffered matrices <nl> + std : : swap ( * ( gradients [ i ] ) , * bufferedGradientMatrix ) ; <nl> + <nl> + newGradients . push_back ( bufferedGradientMatrix ) ; <nl> + } <nl> + <nl> + / / Swap the grad header contents with the buffered grad header <nl> + swap ( * headerCPU , * m_bufferedGradHeader ) ; <nl> + <nl> + / / Initiate aggregation only if any samples were processed in previous iteration <nl> + if ( resetState | | ( headerCPU - > numSamples ! = 0 ) ) <nl> + { <nl> + int deviceId = gradients [ 0 ] - > GetDeviceId ( ) ; <nl> + DistGradHeader * newGradHeader = m_bufferedGradHeader ; <nl> + <nl> + / / Since we will be aggregating the gradients asynchronously , let us <nl> + / / ensure that the gradient matrices have been computed before starting to aggregate <nl> + / / them asynchronously on another thread . This essentially means that when we are using <nl> + / / a GPU device , we will synchronize on the main GPU compute stream before starting <nl> + / / the gradient aggregation asynchronously on a separate stream <nl> + MatrixComputeStreamEvent * mainStreamSyncEvent = MatrixComputeStreamEvent : : Create ( deviceId ) ; <nl> + <nl> + m_pendingAsyncAggregation = std : : async ( std : : launch : : async , [ = ] { <nl> + / / We are starting on a new thread . Make sure the new thread is <nl> + / / setup to use the right device <nl> + Matrix < ElemType > : : SetDevice ( deviceId ) ; <nl> + <nl> + / / Synchronize the Quantization compute stream with the completion of <nl> + / / compute of the gradient matrices on the main compute stream <nl> + mainStreamSyncEvent - > SynchronizeQuantizationComputeStreamWithEvent < ElemType > ( ) ; <nl> + delete mainStreamSyncEvent ; <nl> + <nl> + AggregateGradientsImpl ( newGradients , newGradHeader , showSyncPerfStats ) ; <nl> + } ) ; <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + return false ; <nl> + } <nl> + else <nl> + { <nl> + AggregateGradientsImpl ( gradients , headerCPU , showSyncPerfStats ) ; <nl> + return ( headerCPU - > numSamples ! = 0 ) ; <nl> + } <nl> + } <nl> + <nl> + void AggregateGradientsImpl ( const std : : vector < Matrix < ElemType > * > & gradients , DistGradHeader * headerCPU , bool showSyncPerfStats ) <nl> + { <nl> + Timer aggregationTimer ; <nl> + int deviceId = gradients [ 0 ] - > GetDeviceId ( ) ; <nl> + if ( showSyncPerfStats ) <nl> + { <nl> + std : : unique_ptr < MatrixComputeStreamEvent > mainStreamSyncEvent ( MatrixComputeStreamEvent : : Create ( deviceId ) ) ; <nl> + mainStreamSyncEvent - > SynchronizeEvent ( ) ; <nl> + aggregationTimer . Start ( ) ; <nl> + } <nl> + <nl> + size_t numGradMatrices = gradients . size ( ) ; <nl> + <nl> + if ( headerCPU - > numSamples = = 0 ) <nl> + { <nl> + assert ( headerCPU - > criterion = = 0 . 0 ) ; <nl> + assert ( headerCPU - > numSamplesWithLabel = = 0 ) ; <nl> + for ( int i = 0 ; i < headerCPU - > numEvalNode ; + + i ) <nl> + assert ( headerCPU - > evalErrors [ i ] . first = = 0 & & headerCPU - > evalErrors [ i ] . second = = 0 ) ; <nl> + <nl> + / / If the current node did not process any samples , the gradients should be zero ' d <nl> + for ( size_t i = 0 ; i < numGradMatrices ; + + i ) <nl> + gradients [ i ] - > SetValue ( 0 ) ; <nl> + <nl> + if ( m_useAsyncAggregation ) <nl> + { <nl> + std : : unique_ptr < MatrixComputeStreamEvent > mainStreamSyncEvent ( MatrixComputeStreamEvent : : Create ( deviceId ) ) ; <nl> + mainStreamSyncEvent - > SynchronizeQuantizationComputeStreamWithEvent < ElemType > ( ) ; <nl> + } <nl> + } <nl> + <nl> + std : : vector < std : : unique_ptr < Matrix < ElemType > > > aggGradStripes ; <nl> + std : : vector < std : : unique_ptr < QuantizedMatrix < ElemType > > > aggGradStripesQuantized ; <nl> + for ( size_t i = 0 ; i < gradients . size ( ) ; i + + ) <nl> + { <nl> + size_t nCol = gradients [ i ] - > GetNumCols ( ) ; <nl> + <nl> + / / Determine which stripe of the gradient is this node responsible for <nl> + Stripe stripe = GetStripeForNode ( nCol , MyRank ( ) , NumProc ( ) ) ; <nl> + <nl> + Matrix < ElemType > * currAggGradStripe = nullptr ; <nl> + QuantizedMatrix < ElemType > * currAggGradStripeQuantized = nullptr ; <nl> + if ( stripe . m_numCols > 0 ) <nl> + { <nl> + currAggGradStripe = new Matrix < ElemType > ( gradients [ i ] - > ColumnSlice ( stripe . m_startCol , stripe . m_numCols ) ) ; <nl> + currAggGradStripeQuantized = new QuantizedMatrix < ElemType > ( m_gradQuantized [ i ] - > ColumnSlice ( stripe . m_startCol , stripe . m_numCols ) ) ; <nl> + } <nl> + <nl> + aggGradStripes . push_back ( std : : unique_ptr < Matrix < ElemType > > ( currAggGradStripe ) ) ; <nl> + aggGradStripesQuantized . push_back ( std : : unique_ptr < QuantizedMatrix < ElemType > > ( currAggGradStripeQuantized ) ) ; <nl> + } <nl> + <nl> + / / Initiate quantization of the gradient matrices <nl> + for ( size_t i = 0 ; i < numGradMatrices ; + + i ) <nl> + { <nl> + if ( m_traceLevel > = DEBUG_OUTPUT_TRACE_LEVEL ) <nl> + { <nl> + char printHeaderBuf [ 1024 ] ; <nl> + sprintf ( printHeaderBuf , " MPI Rank : % d , Original Gradient Matrix No . % d " , ( int ) MyRank ( ) , ( int ) i ) ; <nl> + PrintMatrix ( printHeaderBuf , gradients [ i ] ) ; <nl> + } <nl> + <nl> + m_preAggGradQuantizers [ i ] - > QuantizeAsync ( * ( gradients [ i ] ) , * ( m_gradQuantized [ i ] ) , m_zeroThresholdFor1Bit ) ; <nl> + } <nl> + <nl> + / / Initiate receive of the stripe to be aggregated by the current node , from all other nodes <nl> + std : : vector < MPI_Request > recvGradStripesQuantizedRequests ; <nl> + std : : vector < int > recvRequestIdxToGradientMatrixIdxMap ; <nl> + for ( size_t i = 0 ; i < numGradMatrices ; + + i ) <nl> + { <nl> + Stripe stripe = GetStripeForNode ( gradients [ i ] - > GetNumCols ( ) , MyRank ( ) , NumProc ( ) ) ; <nl> + if ( stripe . m_numCols > 0 ) <nl> + { <nl> + recvRequestIdxToGradientMatrixIdxMap . push_back ( i ) ; <nl> + for ( size_t j = 0 ; j < NumProc ( ) - 1 ; + + j ) <nl> + { <nl> + int source = ( j > = MyRank ( ) ) ? ( j + 1 ) : j ; <nl> + <nl> + recvGradStripesQuantizedRequests . push_back ( MPI_Request ( ) ) ; <nl> + int recvRequestIdx = recvGradStripesQuantizedRequests . size ( ) - 1 ; <nl> + <nl> + m_mpi - > Irecv ( m_recvGradStripesQuantized [ i ] [ j ] - > Buffer ( ) , m_recvGradStripesQuantized [ i ] [ j ] - > GetSize ( ) , MPI_CHAR , source , i , & ( recvGradStripesQuantizedRequests [ recvRequestIdx ] ) ) | | MpiFail ( " MPI_Irecv " ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Initiate receive of the header on the main node <nl> + std : : vector < MPI_Request > recvHeaderRequests ( NumProc ( ) - 1 ) ; <nl> + if ( m_mpi - > IsMainNode ( ) ) <nl> + { <nl> + for ( size_t j = 0 ; j < NumProc ( ) - 1 ; + + j ) <nl> + { <nl> + int source = ( j > = MyRank ( ) ) ? ( j + 1 ) : j ; <nl> + / / We use a tag of ' numGradMatrices ' for the pre - aggregation header <nl> + m_mpi - > Irecv ( m_recvHeaders [ j ] , m_recvHeaders [ j ] - > Size ( ) , MPI_CHAR , source , numGradMatrices , & ( recvHeaderRequests [ j ] ) ) | | MpiFail ( " MPI_Irecv " ) ; <nl> + } <nl> + } <nl> + <nl> + / / Asynchronously send stripes of the quantized gradient matrices to the respective nodes that own aggregation of that stripe <nl> + std : : vector < std : : vector < MPI_Request > > sendGradStripesQuantizedRequests ( numGradMatrices ) ; <nl> + for ( size_t i = 0 ; i < numGradMatrices ; + + i ) <nl> + { <nl> + m_preAggGradQuantizers [ i ] - > WaitQuantizeAsyncDone ( ) ; <nl> + size_t sendRequestIdx = 0 ; <nl> + for ( size_t j = 0 ; j < NumProc ( ) ; + + j ) <nl> + { <nl> + Stripe stripe = GetStripeForNode ( gradients [ i ] - > GetNumCols ( ) , j , NumProc ( ) ) ; <nl> + if ( stripe . m_numCols > 0 ) <nl> + { <nl> + / / Do not send stripe for self <nl> + if ( j ! = MyRank ( ) ) <nl> + { <nl> + sendGradStripesQuantizedRequests [ i ] . push_back ( MPI_Request ( ) ) ; <nl> + QuantizedMatrix < ElemType > quantizedStripe = m_gradQuantized [ i ] - > ColumnSlice ( stripe . m_startCol , stripe . m_numCols ) ; <nl> + if ( m_traceLevel > = DEBUG_OUTPUT_TRACE_LEVEL ) <nl> + { <nl> + char printHeaderBuf [ 1024 ] ; <nl> + sprintf ( printHeaderBuf , " MPI Rank : % d , Sending Gradient Matrix No . % d slice " , ( int ) MyRank ( ) , ( int ) i ) ; <nl> + const size_t numRowsToPeek = 3 ; <nl> + const size_t numColsToPeek = 3 ; <nl> + size_t numRowsToPrint = ( std : : min ) ( numRowsToPeek , quantizedStripe . GetNumRows ( ) ) ; <nl> + size_t numColsToPrint = ( std : : min ) ( numColsToPeek , quantizedStripe . GetNumCols ( ) ) ; <nl> + <nl> + quantizedStripe . Print ( printHeaderBuf , 0 , numRowsToPrint - 1 , 0 , numColsToPrint - 1 ) ; <nl> + } <nl> + <nl> + m_mpi - > Isend ( quantizedStripe . Buffer ( ) , quantizedStripe . GetSize ( ) , MPI_CHAR , j , i , & ( sendGradStripesQuantizedRequests [ i ] [ sendRequestIdx ] ) ) | | MpiFail ( " MPI_Isend " ) ; <nl> + sendRequestIdx + + ; <nl> + } <nl> + else <nl> + { <nl> + / / Initialize the aggregate for the stripe with the quantized gradients instead of the original <nl> + / / gradients themselves , if so desired <nl> + if ( m_useQuantizationForSelfStripe ) <nl> + { <nl> + QuantizedMatrix < ElemType > preAggGradSelfStripeQuantized = m_gradQuantized [ i ] - > ColumnSlice ( stripe . m_startCol , stripe . m_numCols ) ; <nl> + m_aggGradStripeQuantizers [ i ] - > UnquantizeAsync ( preAggGradSelfStripeQuantized , * ( aggGradStripes [ i ] ) , false ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Send the headers from all nodes but the main node <nl> + MPI_Request sendHeaderRequest ; <nl> + if ( ! m_mpi - > IsMainNode ( ) ) <nl> + m_mpi - > Isend ( headerCPU , headerCPU - > Size ( ) , MPI_CHAR , m_mpi - > MainNodeRank ( ) , numGradMatrices , & sendHeaderRequest ) | | MpiFail ( " MPI_Isend " ) ; <nl> + <nl> + / / Wait for the stripes to arrive from each node and unquantize and aggregate <nl> + size_t numReceivesExpected = recvGradStripesQuantizedRequests . size ( ) ; <nl> + size_t numActualReceives = 0 ; <nl> + std : : vector < int > perGradMatrixReceiveCount ( recvRequestIdxToGradientMatrixIdxMap . size ( ) , 0 ) ; <nl> + while ( numActualReceives < numReceivesExpected ) <nl> + { <nl> + int idx = MPI_UNDEFINED ; <nl> + m_mpi - > Waitany ( recvGradStripesQuantizedRequests . size ( ) , recvGradStripesQuantizedRequests . data ( ) , & idx , MPI_STATUS_IGNORE ) | | MpiFail ( " MPI_Waitany " ) ; <nl> + if ( idx = = MPI_UNDEFINED ) <nl> + { <nl> + break ; <nl> + } <nl> + <nl> + numActualReceives + + ; <nl> + <nl> + int gradMatrixIdxPosition = idx / ( NumProc ( ) - 1 ) ; <nl> + int recvBufferSubIndex = idx % ( NumProc ( ) - 1 ) ; <nl> + / / Map idx back to the actual gradient matrix index <nl> + int gradMatrixIdx = recvRequestIdxToGradientMatrixIdxMap [ gradMatrixIdxPosition ] ; <nl> + <nl> + / / Wait for the previous Unquantize to finish before issuing a new one <nl> + if ( m_useQuantizationForSelfStripe | | ( perGradMatrixReceiveCount [ gradMatrixIdxPosition ] > 0 ) ) <nl> + m_aggGradStripeQuantizers [ gradMatrixIdx ] - > WaitUnquantizeAsyncDone ( ) ; <nl> + <nl> + if ( m_traceLevel > = DEBUG_OUTPUT_TRACE_LEVEL ) <nl> + { <nl> + char printHeaderBuf [ 1024 ] ; <nl> + sprintf ( printHeaderBuf , " MPI Rank : % d , Received Gradient Matrix No . % d slice " , ( int ) MyRank ( ) , gradMatrixIdx ) ; <nl> + const size_t numRowsToPeek = 3 ; <nl> + const size_t numColsToPeek = 3 ; <nl> + size_t numRowsToPrint = ( std : : min ) ( numRowsToPeek , m_recvGradStripesQuantized [ gradMatrixIdx ] [ recvBufferSubIndex ] - > GetNumRows ( ) ) ; <nl> + size_t numColsToPrint = ( std : : min ) ( numColsToPeek , m_recvGradStripesQuantized [ gradMatrixIdx ] [ recvBufferSubIndex ] - > GetNumCols ( ) ) ; <nl> + <nl> + m_recvGradStripesQuantized [ gradMatrixIdx ] [ recvBufferSubIndex ] - > Print ( printHeaderBuf , 0 , numRowsToPrint - 1 , 0 , numColsToPrint - 1 ) ; <nl> + } <nl> + <nl> + m_aggGradStripeQuantizers [ gradMatrixIdx ] - > UnquantizeAsync ( * ( m_recvGradStripesQuantized [ gradMatrixIdx ] [ recvBufferSubIndex ] ) , * ( aggGradStripes [ gradMatrixIdx ] ) , true ) ; <nl> + <nl> + perGradMatrixReceiveCount [ gradMatrixIdxPosition ] + + ; <nl> + <nl> + / / Also issue the quantization if this stripe was the last one expected for this matrix <nl> + / / Note : We issue the quantization without waiting for the unquantization since the same stream <nl> + / / is used for both and they are implicitly sequenced <nl> + / / We reuse the buffer that we used for quantizing and sending out the pre - aggregation gradient <nl> + if ( perGradMatrixReceiveCount [ gradMatrixIdxPosition ] = = ( NumProc ( ) - 1 ) ) <nl> + { <nl> + Stripe stripe = GetStripeForNode ( gradients [ gradMatrixIdx ] - > GetNumCols ( ) , MyRank ( ) , NumProc ( ) ) ; <nl> + UNUSED ( stripe ) ; <nl> + assert ( stripe . m_numCols > 0 ) ; <nl> + m_aggGradStripeQuantizers [ gradMatrixIdx ] - > QuantizeAsync ( * ( aggGradStripes [ gradMatrixIdx ] ) , * ( aggGradStripesQuantized [ gradMatrixIdx ] ) , m_zeroThresholdFor1Bit ) ; <nl> + } <nl> + } <nl> + <nl> + assert ( numActualReceives = = numReceivesExpected ) ; <nl> + <nl> + / / On the main node wait for the headers to arrive and aggregate <nl> + if ( m_mpi - > IsMainNode ( ) ) <nl> + { <nl> + size_t numNodesHeadersReceivedFrom = 0 ; <nl> + while ( numNodesHeadersReceivedFrom < ( NumProc ( ) - 1 ) ) <nl> + { <nl> + int idx = MPI_UNDEFINED ; <nl> + m_mpi - > Waitany ( recvHeaderRequests . size ( ) , recvHeaderRequests . data ( ) , & idx , MPI_STATUS_IGNORE ) | | MpiFail ( " MPI_Waitany " ) ; <nl> + if ( idx = = MPI_UNDEFINED ) <nl> + break ; <nl> + <nl> + numNodesHeadersReceivedFrom + + ; <nl> + <nl> + headerCPU - > Aggregate ( m_recvHeaders [ idx ] , true ) ; <nl> + } <nl> + <nl> + assert ( numNodesHeadersReceivedFrom = = ( NumProc ( ) - 1 ) ) ; <nl> + } <nl> + <nl> + std : : vector < std : : vector < MPI_Request > > recvAggGradStripesQuantizedRequests ( numGradMatrices ) ; <nl> + / / Initiate receive of stripes of quantized aggregated gradients from different nodes <nl> + for ( size_t i = 0 ; i < numGradMatrices ; + + i ) <nl> + { <nl> + size_t recvRequestIdx = 0 ; <nl> + for ( size_t j = 0 ; j < NumProc ( ) ; + + j ) <nl> + { <nl> + / / Do not recv stripe for self <nl> + if ( j ! = MyRank ( ) ) <nl> + { <nl> + Stripe stripe = GetStripeForNode ( gradients [ i ] - > GetNumCols ( ) , j , NumProc ( ) ) ; <nl> + if ( stripe . m_numCols > 0 ) <nl> + { <nl> + recvAggGradStripesQuantizedRequests [ i ] . push_back ( MPI_Request ( ) ) ; <nl> + QuantizedMatrix < ElemType > quantizedStripe = m_gradQuantized [ i ] - > ColumnSlice ( stripe . m_startCol , stripe . m_numCols ) ; <nl> + m_mpi - > Irecv ( quantizedStripe . Buffer ( ) , quantizedStripe . GetSize ( ) , MPI_CHAR , j , numGradMatrices + 1 + i , & ( recvAggGradStripesQuantizedRequests [ i ] [ recvRequestIdx ] ) ) | | MpiFail ( " MPI_Irecv " ) ; <nl> + recvRequestIdx + + ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + MPI_Request recvAggHeaderRequest ; <nl> + / / Initiate receive of the aggregate header <nl> + if ( ! m_mpi - > IsMainNode ( ) ) <nl> + m_mpi - > Irecv ( headerCPU , headerCPU - > Size ( ) , MPI_CHAR , m_mpi - > MainNodeRank ( ) , numGradMatrices + 1 + numGradMatrices , & recvAggHeaderRequest ) | | MpiFail ( " MPI_Irecv " ) ; <nl> + <nl> + / / Initiate broadcast of quantized aggregated gradient stripes to all other nodes <nl> + std : : vector < std : : vector < MPI_Request > > sendAggGradStripeQuantizedRequests ( numGradMatrices ) ; <nl> + for ( size_t i = 0 ; i < numGradMatrices ; + + i ) <nl> + { <nl> + Stripe stripe = GetStripeForNode ( gradients [ i ] - > GetNumCols ( ) , MyRank ( ) , NumProc ( ) ) ; <nl> + if ( stripe . m_numCols > 0 ) <nl> + { <nl> + sendAggGradStripeQuantizedRequests [ i ] = std : : vector < MPI_Request > ( NumProc ( ) - 1 ) ; <nl> + m_aggGradStripeQuantizers [ i ] - > WaitQuantizeAsyncDone ( ) ; <nl> + for ( size_t j = 0 ; j < NumProc ( ) - 1 ; + + j ) <nl> + { <nl> + int dest = ( j > = MyRank ( ) ) ? ( j + 1 ) : j ; <nl> + / / TODO : Should we use MPI_Bcast instead for better performance <nl> + m_mpi - > Isend ( aggGradStripesQuantized [ i ] - > Buffer ( ) , aggGradStripesQuantized [ i ] - > GetSize ( ) , MPI_CHAR , dest , numGradMatrices + 1 + i , & ( sendAggGradStripeQuantizedRequests [ i ] [ j ] ) ) | | MpiFail ( " MPI_Irecv " ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Initiate send of the aggregate header from main node <nl> + std : : vector < MPI_Request > sendAggHeaderRequests ( NumProc ( ) - 1 ) ; <nl> + if ( m_mpi - > IsMainNode ( ) ) <nl> + { <nl> + for ( size_t j = 0 ; j < NumProc ( ) - 1 ; + + j ) <nl> + { <nl> + int dest = ( j > = MyRank ( ) ) ? ( j + 1 ) : j ; <nl> + / / TODO : Should we use MPI_Bcast instead for better performance <nl> + m_mpi - > Isend ( headerCPU , headerCPU - > Size ( ) , MPI_CHAR , dest , numGradMatrices + 1 + numGradMatrices , & ( sendAggHeaderRequests [ j ] ) ) | | MpiFail ( " MPI_Isend " ) ; <nl> + } <nl> + } <nl> + <nl> + / / Wait to receive all aggregated stripes and unquantize <nl> + for ( size_t i = 0 ; i < numGradMatrices ; + + i ) <nl> + { <nl> + m_mpi - > Waitall ( recvAggGradStripesQuantizedRequests [ i ] . size ( ) , recvAggGradStripesQuantizedRequests [ i ] . data ( ) , MPI_STATUSES_IGNORE ) | | MpiFail ( " MPI_Waitall " ) ; <nl> + <nl> + m_preAggGradQuantizers [ i ] - > UnquantizeAsync ( * ( m_gradQuantized [ i ] ) , * ( gradients [ i ] ) , false ) ; <nl> + } <nl> + <nl> + / / Wait to receive aggregate header <nl> + if ( ! m_mpi - > IsMainNode ( ) ) <nl> + m_mpi - > Wait ( & recvAggHeaderRequest , MPI_STATUSES_IGNORE ) | | MpiFail ( " MPI_Wait " ) ; <nl> + <nl> + / / Wait for all the unquantizations to finish <nl> + for ( size_t i = 0 ; i < numGradMatrices ; + + i ) <nl> + { <nl> + m_preAggGradQuantizers [ i ] - > WaitUnquantizeAsyncDone ( ) ; <nl> + <nl> + if ( m_traceLevel > = DEBUG_OUTPUT_TRACE_LEVEL ) <nl> + { <nl> + char printHeaderBuf [ 1024 ] ; <nl> + sprintf ( printHeaderBuf , " MPI Rank : % d , Aggregated Gradient Matrix No . % d " , ( int ) MyRank ( ) , ( int ) i ) ; <nl> + PrintMatrix ( printHeaderBuf , gradients [ i ] ) ; <nl> + } <nl> + } <nl> + <nl> + / / Wait for completion of the async send requests <nl> + for ( int i = 0 ; i < sendGradStripesQuantizedRequests . size ( ) ; + + i ) <nl> + { <nl> + if ( sendGradStripesQuantizedRequests [ i ] . size ( ) > 0 ) <nl> + m_mpi - > Waitall ( sendGradStripesQuantizedRequests [ i ] . size ( ) , sendGradStripesQuantizedRequests [ i ] . data ( ) , MPI_STATUSES_IGNORE ) | | MpiFail ( " MPI_Waitall " ) ; <nl> + } <nl> + <nl> + if ( ! m_mpi - > IsMainNode ( ) ) <nl> + m_mpi - > Wait ( & sendHeaderRequest , MPI_STATUSES_IGNORE ) | | MpiFail ( " MPI_Wait " ) ; <nl> + <nl> + for ( int i = 0 ; i < sendAggGradStripeQuantizedRequests . size ( ) ; + + i ) <nl> + { <nl> + if ( sendAggGradStripeQuantizedRequests [ i ] . size ( ) > 0 ) <nl> + m_mpi - > Waitall ( sendAggGradStripeQuantizedRequests [ i ] . size ( ) , sendAggGradStripeQuantizedRequests [ i ] . data ( ) , MPI_STATUSES_IGNORE ) | | MpiFail ( " MPI_Waitall " ) ; <nl> + } <nl> + <nl> + if ( m_mpi - > IsMainNode ( ) ) <nl> + m_mpi - > Waitall ( sendAggHeaderRequests . size ( ) , sendAggHeaderRequests . data ( ) , MPI_STATUSES_IGNORE ) | | MpiFail ( " MPI_Waitall " ) ; <nl> + <nl> + if ( showSyncPerfStats ) <nl> + { <nl> + aggregationTimer . Stop ( ) ; <nl> + double gradientAggregationTime = aggregationTimer . ElapsedSeconds ( ) ; <nl> + fprintf ( stderr , " Actual gradient aggregation time : % . 6g \ n " , gradientAggregationTime ) ; <nl> + } <nl> + } <nl> + <nl> + / / Debug helper to print matrix contents <nl> + static void PrintMatrix ( const char * printHeader , Matrix < ElemType > * matrixToPrint , bool peek = true ) <nl> + { <nl> + if ( peek ) <nl> + { <nl> + const size_t numRowsToPeek = 3 ; <nl> + const size_t numColsToPeek = 3 ; <nl> + <nl> + size_t numRowsToPrint = ( std : : min ) ( numRowsToPeek , matrixToPrint - > GetNumRows ( ) ) ; <nl> + size_t numColsToPrint = ( std : : min ) ( numColsToPeek , matrixToPrint - > GetNumCols ( ) ) ; <nl> + <nl> + matrixToPrint - > Print ( printHeader , 0 , numRowsToPrint - 1 , 0 , numColsToPrint - 1 ) ; <nl> + } <nl> + else <nl> + { <nl> + matrixToPrint - > Print ( printHeader ) ; <nl> + } <nl> + <nl> + fflush ( stderr ) ; <nl> + } <nl> + <nl> + private : <nl> + std : : unique_ptr < CUDAPageLockedMemAllocator > m_allocator ; <nl> + <nl> + std : : vector < std : : unique_ptr < MatrixQuantizer < ElemType > > > m_preAggGradQuantizers ; <nl> + std : : vector < std : : unique_ptr < QuantizedMatrix < ElemType > > > m_gradQuantized ; <nl> + <nl> + std : : vector < std : : unique_ptr < MatrixQuantizer < ElemType > > > m_aggGradStripeQuantizers ; <nl> + std : : vector < std : : vector < std : : unique_ptr < QuantizedMatrix < ElemType > > > > m_recvGradStripesQuantized ; <nl> + std : : vector < DistGradHeader * > m_recvHeaders ; <nl> + <nl> + / / Number of bits that each gradient value is quantized to before communication <nl> + / / with other nodes <nl> + int m_numQuantizationBits ; <nl> + <nl> + / / option for handling the mean for 1 - bit quantization <nl> + / / force 1 - bit quant to threshold against 0 rather than the midpoint between lower and upper <nl> + bool m_zeroThresholdFor1Bit ; <nl> + <nl> + / / Since the self - stripe in an all - reduce is not communicated , there is really no reason to <nl> + / / quantize it for reduced communication . However , we add this as an option for for consistency <nl> + / / across all stripes if desired <nl> + bool m_useQuantizationForSelfStripe ; <nl> + <nl> + / / Perform asynchronous gradient aggregation using double buffering of the gradient matrices <nl> + bool m_useAsyncAggregation ; <nl> + <nl> + / / Future corresponding to the current in - flight async gradient aggregation <nl> + std : : future < void > m_pendingAsyncAggregation ; <nl> + <nl> + / / Buffered gradients that we asynchronously aggregate <nl> + std : : unordered_map < Matrix < ElemType > * , std : : unique_ptr < Matrix < ElemType > > > m_bufferedGradients ; <nl> + DistGradHeader * m_bufferedGradHeader ; <nl> + <nl> + int m_traceLevel ; <nl> + int m_syncStatsTrace ; <nl> + <nl> + / / Only used for controlling frequency of measuring / showing gradient aggregation perf stats <nl> + size_t m_iterationCount ; <nl> + <nl> + bool m_initialized ; <nl> + } ; <nl> + <nl> + } } } <nl> new file mode 100644 <nl> index 00000000000 . . eda622dc37d <nl> mmm / dev / null <nl> ppp b / Source / 1BitSGD / BlockMomentumDistributedLearner . h <nl> <nl> + / / <nl> + / / Copyright ( c ) Microsoft . All rights reserved . <nl> + / / Licensed under the MIT license . See LICENSE . md file in the project root for full license information . <nl> + / / <nl> + <nl> + # pragma once <nl> + <nl> + # include < vector > <nl> + # include " CNTKLibrary . h " <nl> + # include " DistributedLearnerBase . h " <nl> + # include < numeric > <nl> + # include < iostream > <nl> + # include < sstream > <nl> + <nl> + namespace CNTK <nl> + { <nl> + / / / <nl> + / / / Block Momentum Trainer . <nl> + / / / <nl> + class BlockMomentumDistributedLearner : public DistributedLearnerBase <nl> + { <nl> + private : <nl> + enum class Action ; <nl> + friend std : : ostream & operator < < ( std : : ostream & out , const Action action ) <nl> + { <nl> + static std : : map < Action , std : : string > actionStr ; <nl> + if ( actionStr . size ( ) = = 0 ) <nl> + { <nl> + actionStr [ Action : : Aggregate ] = " Aggregate " ; <nl> + actionStr [ Action : : AggregateMetrics ] = " AggregateMetrics " ; <nl> + actionStr [ Action : : Checkpoint ] = " Checkpoint " ; <nl> + actionStr [ Action : : Shutdown ] = " Shutdown " ; <nl> + actionStr [ Action : : Wait ] = " Wait " ; <nl> + } <nl> + return out < < actionStr [ action ] ; <nl> + } <nl> + <nl> + / / Print debug info about synchronization action requested and granted <nl> + void DebugPrintSynchronizeInfo ( Action requestedAction , Action grantedAction ) <nl> + { <nl> + if ( GetTraceLevel ( ) > = TraceLevel : : Info ) <nl> + { <nl> + std : : ostringstream outString ; <nl> + outString < < " BMUF Rank " < < m_communicator - > CurrentWorker ( ) . m_globalRank < < " Action requested " < < requestedAction < < " Action returned " < < grantedAction < < std : : endl ; <nl> + std : : cerr < < outString . str ( ) ; / / stderr output <nl> + } <nl> + } <nl> + <nl> + template < class T > using Matrix = Microsoft : : MSR : : CNTK : : Matrix < T > ; <nl> + <nl> + public : <nl> + BlockMomentumDistributedLearner ( <nl> + DistributedCommunicatorPtr communicator , <nl> + LearnerPtr learner , <nl> + size_t distributedAfterSamples , <nl> + size_t globalModelAggregationBlockSize , <nl> + bool useNesterovMomentum , <nl> + bool resetSGDMomentumAfterAggregation , <nl> + double blockLearningRate ) <nl> + : BlockMomentumDistributedLearner ( <nl> + communicator , <nl> + learner , <nl> + distributedAfterSamples , <nl> + globalModelAggregationBlockSize , <nl> + useNesterovMomentum , <nl> + resetSGDMomentumAfterAggregation , <nl> + blockLearningRate , <nl> + Momentum2TimeConstant ( 1 . 0 - 1 . 0 / ( double ) communicator - > Workers ( ) . size ( ) , globalModelAggregationBlockSize ) ) <nl> + { } <nl> + <nl> + BlockMomentumDistributedLearner ( <nl> + DistributedCommunicatorPtr communicator , <nl> + LearnerPtr learner , <nl> + size_t distributedAfterSamples , <nl> + size_t globalModelAggregationBlockSize , <nl> + bool useNesterovMomentum , <nl> + bool resetSGDMomentumAfterAggregation , <nl> + double blockLearningRate , <nl> + double blockMomentumAsTimeConstant ) <nl> + : DistributedLearnerBase ( communicator , learner , distributedAfterSamples ) , <nl> + m_useNesterovMomentum ( useNesterovMomentum ) , <nl> + m_resetSGDMomentumAfterAggregation ( resetSGDMomentumAfterAggregation ) , <nl> + m_blockLearningRate ( blockLearningRate ) , <nl> + m_blockMomentumAsTimeConstantPerWorker ( blockMomentumAsTimeConstant / communicator - > Workers ( ) . size ( ) ) , <nl> + m_globalModelAggregationBlockSize ( globalModelAggregationBlockSize ) , <nl> + m_numSamplesSeenInCurrentBlock ( 0 ) , <nl> + m_endOfDataReached ( false ) , <nl> + m_localTotalNumSamplesSeen ( 0 ) , <nl> + m_syncPeriodPerWorker ( globalModelAggregationBlockSize / communicator - > Workers ( ) . size ( ) ) <nl> + { <nl> + if ( m_syncPeriodPerWorker = = 0 ) <nl> + InvalidArgument ( " Sync period is too small . " ) ; <nl> + <nl> + / / Need to allocate memory here to make sure not hitting OOM <nl> + std : : vector < NDArrayViewPtr > parameterValues ; <nl> + GetParameterValues ( learner - > Parameters ( ) , parameterValues ) ; <nl> + <nl> + m_blockLevelSmoothedGradient . resize ( parameterValues . size ( ) ) ; <nl> + m_prevParameters . resize ( parameterValues . size ( ) ) ; <nl> + m_tempBlockGradient . resize ( parameterValues . size ( ) ) ; <nl> + Reset ( parameterValues ) ; <nl> + } <nl> + <nl> + size_t MinibatchSizeScaleFactor ( ) override <nl> + { <nl> + return m_communicator - > Workers ( ) . size ( ) ; <nl> + } <nl> + <nl> + bool Update ( std : : unordered_map < Parameter , NDArrayViewPtr > & gradientValues , MinibatchInfo & info ) override <nl> + { <nl> + / / mark start of block before local update <nl> + std : : vector < NDArrayViewPtr > values ; <nl> + GetParameterValues ( m_learner - > Parameters ( ) , values ) ; <nl> + <nl> + / / note this is only for the first update , after that SyncBlock handles the bookkeeping <nl> + if ( ! m_prevParamInitialized ) <nl> + { <nl> + Reset ( values ) ; <nl> + m_prevParamInitialized = true ; <nl> + } <nl> + <nl> + / / do local update first , then block update . Local update would have different gradient for each worker , <nl> + / / and this order is to make sure all workers got the same model after block update <nl> + if ( ! info . IsEmpty ( ) ) <nl> + { <nl> + / / For block momentum the number of aggreagate / checkpoints should match , so for now we ignore the return value of local learners . <nl> + auto profWeights = Microsoft : : MSR : : CNTK : : ScopeProfile ( Microsoft : : MSR : : CNTK : : profilerEvtMainWeights ) ; <nl> + m_learner - > Update ( gradientValues , info . numberOfSamples , info . atEndOfSweep ) ; <nl> + <nl> + / / after local update , use the latest model for block update <nl> + values . clear ( ) ; <nl> + GetParameterValues ( m_learner - > Parameters ( ) , values ) ; <nl> + } <nl> + <nl> + auto profGradientAgg = Microsoft : : MSR : : CNTK : : ProfilerTimeBegin ( ) ; <nl> + bool updated = PerformDistributedUpdateIfNeeded ( values , info ) ; <nl> + Microsoft : : MSR : : CNTK : : ProfilerTimeEnd ( profGradientAgg , Microsoft : : MSR : : CNTK : : profilerEvtMainGradient ) ; <nl> + <nl> + return updated ; <nl> + } <nl> + <nl> + / / Optionally overridable method to get checkpoint state associated with this Distributed train method <nl> + Dictionary CreateCheckpoint ( ) override <nl> + { <nl> + std : : vector < NDArrayViewPtr > values ; <nl> + GetParameterValues ( m_learner - > Parameters ( ) , values ) ; <nl> + <nl> + / / During checkpoint , other workers could be in aggregation state . Let ' s allow them to finish aggregation . <nl> + Action action ; <nl> + while ( ( action = SynchronizeAction ( Action : : Checkpoint ) ) ! = Action : : Checkpoint ) <nl> + { <nl> + DebugPrintSynchronizeInfo ( Action : : Checkpoint , action ) ; <nl> + <nl> + if ( action = = Action : : Wait ) <nl> + continue ; <nl> + if ( action = = Action : : Aggregate ) <nl> + AggregateImpl ( values ) ; <nl> + else <nl> + RuntimeError ( " Unexpected action received . " ) ; <nl> + } <nl> + <nl> + DebugPrintSynchronizeInfo ( Action : : Checkpoint , action ) ; <nl> + <nl> + / / Always aggregate before the checkpoint , so prevParameter and m_numSamplesSeenInCurrentBlock don ' t need to be saved <nl> + SynchronizeAction ( Action : : Aggregate ) ; <nl> + AggregateImpl ( values ) ; <nl> + <nl> + std : : vector < DictionaryValue > serializedSmoothedGradients ; <nl> + for ( auto sg : m_blockLevelSmoothedGradient ) <nl> + { <nl> + serializedSmoothedGradients . push_back ( * sg ) ; <nl> + } <nl> + <nl> + Dictionary result ; <nl> + result [ L " base " ] = DistributedLearnerBase : : CreateCheckpoint ( ) ; <nl> + result [ L " localTotalNumSamplesSeen " ] = m_localTotalNumSamplesSeen ; <nl> + result [ L " blockLevelSmoothedGradient " ] = serializedSmoothedGradients ; <nl> + return result ; <nl> + } <nl> + <nl> + void RestoreFromCheckpoint ( const Dictionary & checkpoint ) override <nl> + { <nl> + DistributedLearnerBase : : RestoreFromCheckpoint ( checkpoint [ L " base " ] . Value < Dictionary > ( ) ) ; <nl> + m_localTotalNumSamplesSeen = checkpoint [ L " localTotalNumSamplesSeen " ] . Value < size_t > ( ) ; <nl> + const auto & smoothedGradients = checkpoint [ L " blockLevelSmoothedGradient " ] . Value < std : : vector < DictionaryValue > > ( ) ; <nl> + <nl> + if ( m_blockLevelSmoothedGradient . size ( ) ! = smoothedGradients . size ( ) ) <nl> + RuntimeError ( " Inconsistent parameter size between learner and checkpoint " ) ; <nl> + <nl> + for ( size_t i = 0 ; i < m_blockLevelSmoothedGradient . size ( ) ; i + + ) <nl> + { <nl> + m_blockLevelSmoothedGradient [ i ] - > CopyFrom ( smoothedGradients [ i ] . Value < NDArrayView > ( ) ) ; <nl> + } <nl> + <nl> + m_prevParamInitialized = false ; <nl> + } <nl> + <nl> + private : <nl> + / / Block momentum needs to do aggregation of loss and eval across workers . <nl> + virtual void DoAggregateMetricsIfNeeded ( NDArrayViewPtr & localTrainingLoss , NDArrayViewPtr & localEvalCriterion ) override <nl> + { <nl> + m_shutDownSeenBefore = false ; <nl> + / / If shutdown has been agreed upon before , then return from metrics aggregation . Other shutdown workers won ' t be able to sync now . <nl> + if ( m_communicator - > Workers ( ) . size ( ) = = 1 | | m_shutDownSeenBefore ) <nl> + { <nl> + return ; <nl> + } <nl> + <nl> + Action action ; <nl> + while ( ( action = SynchronizeAction ( Action : : AggregateMetrics ) ) ! = Action : : AggregateMetrics ) <nl> + { <nl> + DebugPrintSynchronizeInfo ( Action : : AggregateMetrics , action ) ; <nl> + <nl> + std : : vector < NDArrayViewPtr > paramValues ; <nl> + GetParameterValues ( m_learner - > Parameters ( ) , paramValues ) ; <nl> + <nl> + switch ( action ) <nl> + { <nl> + / / Aggregate params first and try for aggregate metrics again <nl> + case Action : : Aggregate : <nl> + AggregateImpl ( paramValues ) ; <nl> + break ; <nl> + / / Can ' t do checkpointing here since not called from checkpointing code , so return . Checkpointing will be called again eventually . <nl> + case Action : : Checkpoint : <nl> + return ; <nl> + / / Can ' t aggregate metrics since others are going in shutdown . <nl> + case Action : : Shutdown : <nl> + m_shutDownSeenBefore = true ; <nl> + return ; / / Can ' t aggregate if another worker is in shutdown mode <nl> + } <nl> + } <nl> + <nl> + DebugPrintSynchronizeInfo ( Action : : AggregateMetrics , action ) ; <nl> + <nl> + / / Synchronization complete - Start the loss and eval aggregation <nl> + float averageTrainingLoss = 0 ; <nl> + if ( localTrainingLoss ) <nl> + { <nl> + averageTrainingLoss = localTrainingLoss - > AsScalar < float > ( ) ; <nl> + } <nl> + <nl> + float averageEvalCriterion = 0 ; <nl> + if ( localEvalCriterion ) <nl> + { <nl> + averageEvalCriterion = localEvalCriterion - > AsScalar < float > ( ) ; <nl> + } <nl> + <nl> + NDArrayViewPtr inPlaceAggregateTrainingLoss = std : : make_shared < NDArrayView > ( averageTrainingLoss , NDShape { } , DeviceDescriptor : : CPUDevice ( ) ) ; <nl> + NDArrayViewPtr inPlaceAggregateEvalCriterion = std : : make_shared < NDArrayView > ( averageEvalCriterion , NDShape { } , DeviceDescriptor : : CPUDevice ( ) ) ; <nl> + vector < NDArrayViewPtr > inPlaceAggregateVector = { inPlaceAggregateTrainingLoss , inPlaceAggregateEvalCriterion } ; <nl> + <nl> + m_communicator - > AggregateInPlace ( inPlaceAggregateVector , m_communicator - > Workers ( ) ) ; <nl> + <nl> + if ( localTrainingLoss ) <nl> + { <nl> + inPlaceAggregateTrainingLoss - > SetValue ( inPlaceAggregateTrainingLoss - > AsScalar < float > ( ) / m_communicator - > Workers ( ) . size ( ) ) ; <nl> + localTrainingLoss - > CopyFrom ( * inPlaceAggregateTrainingLoss ) ; <nl> + } <nl> + <nl> + if ( localEvalCriterion ) <nl> + { <nl> + inPlaceAggregateEvalCriterion - > SetValue ( inPlaceAggregateEvalCriterion - > AsScalar < float > ( ) / m_communicator - > Workers ( ) . size ( ) ) ; <nl> + localEvalCriterion - > CopyFrom ( * inPlaceAggregateEvalCriterion ) ; <nl> + } <nl> + } <nl> + <nl> + / / Optional override that gets called per minibatch after finishing gradient computation but before updating model parameters <nl> + bool PerformDistributedUpdateIfNeeded ( std : : vector < NDArrayViewPtr > & parameterValues , MinibatchInfo & info ) <nl> + { <nl> + / / If the last minibatch , set the end of data state . <nl> + if ( info . atEndOfData ) <nl> + m_endOfDataReached = true ; <nl> + <nl> + m_localTotalNumSamplesSeen + = info . numberOfSamples ; <nl> + m_sampleCount + = info . numberOfSamples ; <nl> + <nl> + if ( m_distributeAfterSamples > m_sampleCount ) <nl> + { <nl> + if ( m_endOfDataReached ) <nl> + { <nl> + / / We have not even reached distributed state , <nl> + / / simply stop processing by returning false . <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + if ( ! m_endOfDataReached ) <nl> + { <nl> + m_numSamplesSeenInCurrentBlock + = info . numberOfSamples ; <nl> + if ( m_numSamplesSeenInCurrentBlock < m_syncPeriodPerWorker ) <nl> + return true ; <nl> + <nl> + Aggregate ( parameterValues ) ; <nl> + return true ; <nl> + } <nl> + <nl> + return Shutdown ( parameterValues ) ; <nl> + } <nl> + <nl> + / / Before doing any work , the distributed learner synchronizes with other learners to <nl> + / / decide what to do next . <nl> + / / The priority of actons are : <nl> + / / 1 ) If any worker wants to aggregate - aggregation is done . <nl> + / / 2 ) If any worker wants to checkpoint and nobody wants to aggregate - checkpointing is done . If anyone wants to aggregate metrics , wait to allow it to come in checkpoint state . <nl> + / / 3 ) If all want to shutdown - it means we reached the end of the data and shutdown can be done . If anyone wants to aggregate metrics , wait to allow it to come in shutdown state . <nl> + / / 4 ) If all worker wants to aggregate metrics - metrics aggregation is done . Otherwise return aggregate , checkpoint or shutdown if anyone else wants it <nl> + / / The priority above eliminate resolves situations when some of the workers run out of data <nl> + / / and other workers require checkpointing or aggregation . <nl> + enum class Action <nl> + { <nl> + Wait , / / Waits in the current state without doing anything . <nl> + Aggregate , <nl> + AggregateMetrics , / / Used to allow aggregation of loss and eval metrics . <nl> + Checkpoint , <nl> + Shutdown <nl> + } ; <nl> + <nl> + void GetParameterValues ( const std : : vector < Parameter > & parameters , std : : vector < NDArrayViewPtr > & result ) <nl> + { <nl> + for ( auto p : parameters ) <nl> + result . push_back ( p . Value ( ) ) ; <nl> + } <nl> + <nl> + void Aggregate ( std : : vector < NDArrayViewPtr > & parameters ) <nl> + { <nl> + / / Synchronization action . Aggregate has the highest priority , so the expected result is aggregate . <nl> + Action action = SynchronizeAction ( Action : : Aggregate ) ; <nl> + if ( action ! = Action : : Aggregate ) <nl> + LogicError ( " Unexpected action during aggregation . " ) ; <nl> + <nl> + AggregateImpl ( parameters ) ; <nl> + } <nl> + <nl> + bool Shutdown ( std : : vector < NDArrayViewPtr > & parameters ) <nl> + { <nl> + / / During shutdown , other workers could be in checkpointing or aggregation state . <nl> + / / Finished workers should properly behave in this case . <nl> + Action action ; <nl> + while ( ( action = SynchronizeAction ( Action : : Shutdown ) ) ! = Action : : Shutdown ) <nl> + { <nl> + DebugPrintSynchronizeInfo ( Action : : Shutdown , action ) ; <nl> + <nl> + switch ( action ) <nl> + { <nl> + case Action : : Aggregate : <nl> + AggregateImpl ( parameters ) ; <nl> + break ; <nl> + case Action : : Checkpoint : <nl> + / / Somebody still has to call the checkpoint from the outside . <nl> + return true ; <nl> + case Action : : Wait : <nl> + / / Someone is in aggregate metrics . Wait for it to come to shutdown . <nl> + continue ; <nl> + default : <nl> + RuntimeError ( " Unexpected action received . " ) ; <nl> + } <nl> + } <nl> + <nl> + DebugPrintSynchronizeInfo ( Action : : Shutdown , action ) ; <nl> + <nl> + / / Last synchronization <nl> + AggregateImpl ( parameters ) ; <nl> + return false ; / / Make compiler happy . <nl> + } <nl> + <nl> + / / Synchronize ( Agree ) on action before doing it . This is needed to prevent deadlock in MPI . <nl> + / / Aggregate is highest priority . So AggregateImpl can be called after calling SynchronizeAction ( Action : : Aggreagte ) . <nl> + / / Others need to ask for permission in a loop <nl> + Action SynchronizeAction ( Action self ) <nl> + { <nl> + assert ( self = = Action : : Checkpoint | | self = = Action : : Aggregate | | self = = Action : : Shutdown | | self = = Action : : AggregateMetrics ) ; <nl> + <nl> + double data [ 2 ] = { static_cast < double > ( self ) , static_cast < double > ( m_localTotalNumSamplesSeen ) } ; <nl> + auto a = std : : make_shared < NDArrayView > ( DataType : : Double , NDShape { 2 } , & data , sizeof ( double ) * 2 , DeviceDescriptor : : CPUDevice ( ) ) ; <nl> + m_communicator - > Concatenate ( std : : vector < NDArrayViewPtr > { a } , m_actionBuffer , m_communicator - > Workers ( ) ) ; <nl> + assert ( m_actionBuffer . size ( ) = = 1 ) ; <nl> + <nl> + auto buffer = m_actionBuffer . front ( ) - > DataBuffer < double > ( ) ; <nl> + auto bufferSize = m_actionBuffer . front ( ) - > Shape ( ) . TotalSize ( ) ; <nl> + auto bufferEnd = buffer + bufferSize ; <nl> + <nl> + std : : vector < Action > actions ; <nl> + actions . reserve ( m_communicator - > Workers ( ) . size ( ) ) ; <nl> + <nl> + std : : vector < size_t > localNumberOfSamples ; <nl> + localNumberOfSamples . reserve ( m_communicator - > Workers ( ) . size ( ) ) ; <nl> + <nl> + for ( const double * start = buffer ; start ! = bufferEnd ; start + = 2 ) <nl> + { <nl> + actions . push_back ( static_cast < Action > ( ( int ) * start ) ) ; <nl> + localNumberOfSamples . push_back ( static_cast < size_t > ( * ( start + 1 ) ) ) ; <nl> + } <nl> + m_sampleCount = std : : accumulate ( localNumberOfSamples . begin ( ) , localNumberOfSamples . end ( ) , ( size_t ) 0 ) ; <nl> + <nl> + / / If all want to aggregate metrics , only then we aggregate metrics . <nl> + if ( std : : all_of ( actions . begin ( ) , actions . end ( ) , [ ] ( Action c ) { return c = = Action : : AggregateMetrics ; } ) ) <nl> + return Action : : AggregateMetrics ; <nl> + <nl> + / / If all want to shutdown - we shutdown . <nl> + if ( std : : all_of ( actions . begin ( ) , actions . end ( ) , [ ] ( Action c ) { return c = = Action : : Shutdown ; } ) ) <nl> + return Action : : Shutdown ; <nl> + <nl> + / / If all want to checkpoint - we checkpoint . <nl> + if ( std : : all_of ( actions . begin ( ) , actions . end ( ) , [ ] ( Action c ) { return c = = Action : : Checkpoint ; } ) ) <nl> + return Action : : Checkpoint ; <nl> + <nl> + / / If all are either in Checkpoint , Shutdown or AggregateMetrics , <nl> + / / Then AggregateMetrics state has lowest priority . Workers in it return without doing anything . Other workers wait for Aggregate Metrics to come in their state . <nl> + / / Between Checkpoint and Shutdown , Shutdown has lower priority . Shutdown worker will return and checkpoint worker will wait for others to come in checkpoint state . <nl> + if ( std : : all_of ( actions . begin ( ) , actions . end ( ) , [ ] ( Action c ) { return c = = Action : : Checkpoint | | c = = Action : : Shutdown | | c = = Action : : AggregateMetrics ; } ) ) <nl> + { <nl> + bool isAnyCheckpoint = std : : any_of ( actions . begin ( ) , actions . end ( ) , [ ] ( Action c ) { return c = = Action : : Checkpoint ; } ) ; <nl> + bool isAnyShutdown = std : : any_of ( actions . begin ( ) , actions . end ( ) , [ ] ( Action c ) { return c = = Action : : Shutdown ; } ) ; <nl> + bool isAnyAggregateMetrics = std : : any_of ( actions . begin ( ) , actions . end ( ) , [ ] ( Action c ) { return c = = Action : : AggregateMetrics ; } ) ; <nl> + if ( self = = Action : : Shutdown ) <nl> + { <nl> + / / Do checkpoint first if any other requests checkpoint . Then come back to shutdown . <nl> + if ( isAnyCheckpoint ) <nl> + { <nl> + return Action : : Checkpoint ; <nl> + } <nl> + <nl> + / / Allow the aggregate metrics to come in shutdown state and request again . <nl> + if ( isAnyAggregateMetrics ) <nl> + { <nl> + return Action : : Wait ; <nl> + } <nl> + <nl> + return Action : : Shutdown ; <nl> + } <nl> + else if ( self = = Action : : Checkpoint ) <nl> + { <nl> + / / Wait for other in shutdown or aggregate metrics state to come to checkpoint state <nl> + if ( isAnyShutdown | | isAnyAggregateMetrics ) <nl> + { <nl> + return Action : : Wait ; <nl> + } <nl> + <nl> + return Action : : Checkpoint ; <nl> + } <nl> + else if ( self = = Action : : AggregateMetrics ) <nl> + { <nl> + / / AggregateMetrics can ' t do aggregate metrics if anyone is in shutdown <nl> + if ( isAnyShutdown ) <nl> + { <nl> + return Action : : Shutdown ; <nl> + } <nl> + <nl> + / / If all others are either metrics aggregate or checkpoint then state returned is checkpoint and we don ' t do metrics aggregation <nl> + return Action : : Checkpoint ; <nl> + } <nl> + } <nl> + <nl> + / / Otherwise we aggregate . This is given priority by all other workers in checkpoint , shutdown or aggregate metrics states . <nl> + return Action : : Aggregate ; <nl> + } <nl> + <nl> + void AggregateImpl ( std : : vector < NDArrayViewPtr > & parameters ) <nl> + { <nl> + / / Let update the weights . <nl> + if ( parameters . front ( ) - > GetDataType ( ) = = DataType : : Double ) <nl> + SynchronizeModel < double > ( parameters ) ; <nl> + else if ( parameters . front ( ) - > GetDataType ( ) = = DataType : : Float ) <nl> + SynchronizeModel < float > ( parameters ) ; <nl> + else <nl> + RuntimeError ( " Unsupported type . " ) ; <nl> + <nl> + m_numSamplesSeenInCurrentBlock = 0 ; <nl> + <nl> + if ( m_resetSGDMomentumAfterAggregation ) <nl> + m_learner - > ResetSmoothedGradients ( ) ; <nl> + } <nl> + <nl> + Dictionary CreateCheckpointImpl ( std : : vector < NDArrayViewPtr > & parameters ) <nl> + { <nl> + / / During checkpoint , other workers could be in aggregation state . Let ' s allow them to finish aggregation . <nl> + Action action ; <nl> + while ( ( action = SynchronizeAction ( Action : : Checkpoint ) ) ! = Action : : Checkpoint ) <nl> + { <nl> + DebugPrintSynchronizeInfo ( Action : : Checkpoint , action ) ; <nl> + <nl> + if ( action = = Action : : Wait ) <nl> + continue ; <nl> + if ( action = = Action : : Aggregate ) <nl> + AggregateImpl ( parameters ) ; <nl> + else <nl> + RuntimeError ( " Unexpected action received . " ) ; <nl> + } <nl> + <nl> + DebugPrintSynchronizeInfo ( Action : : Checkpoint , action ) ; <nl> + <nl> + return DistributedLearnerBase : : CreateCheckpoint ( ) ; <nl> + } <nl> + <nl> + bool IsResetRequired ( std : : vector < NDArrayViewPtr > & parameters ) const <nl> + { <nl> + if ( m_prevParameters . size ( ) ! = parameters . size ( ) | | <nl> + m_blockLevelSmoothedGradient . size ( ) ! = parameters . size ( ) ) <nl> + return true ; <nl> + <nl> + for ( size_t i = 0 ; i < parameters . size ( ) ; + + i ) <nl> + { <nl> + if ( m_prevParameters [ i ] - > Shape ( ) ! = parameters [ i ] - > Shape ( ) | | <nl> + m_prevParameters [ i ] - > Device ( ) ! = parameters [ i ] - > Device ( ) | | <nl> + m_blockLevelSmoothedGradient [ i ] - > Shape ( ) ! = parameters [ i ] - > Shape ( ) | | <nl> + m_blockLevelSmoothedGradient [ i ] - > Device ( ) ! = parameters [ i ] - > Device ( ) ) <nl> + { <nl> + return true ; <nl> + } <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + void Reset ( const std : : vector < NDArrayViewPtr > & parameters ) <nl> + { <nl> + for ( size_t i = 0 ; i < parameters . size ( ) ; + + i ) <nl> + { <nl> + auto & p = parameters [ i ] ; <nl> + <nl> + if ( p - > GetDataType ( ) = = DataType : : Double ) <nl> + ResetBuffer < double > ( i , p ) ; <nl> + else if ( p - > GetDataType ( ) = = DataType : : Float ) <nl> + ResetBuffer < float > ( i , p ) ; <nl> + else <nl> + RuntimeError ( " Unsupported type . " ) ; <nl> + } <nl> + } <nl> + <nl> + template < class ElemType > <nl> + void ResetBuffer ( size_t index , const NDArrayViewPtr & p ) <nl> + { <nl> + auto data = p - > GetMatrix < ElemType > ( ) ; <nl> + if ( ! m_blockLevelSmoothedGradient [ index ] ) <nl> + { <nl> + / / has not been initialized yet <nl> + auto pSmoothedGrad = std : : make_shared < NDArrayView > ( AsDataType < ElemType > ( ) , p - > Shape ( ) , AsDeviceDescriptor ( data - > GetDeviceId ( ) ) ) ; <nl> + pSmoothedGrad - > SetValue ( static_cast < ElemType > ( 0 ) ) ; <nl> + m_blockLevelSmoothedGradient [ index ] = pSmoothedGrad ; <nl> + } <nl> + <nl> + if ( ! m_prevParameters [ index ] ) <nl> + { <nl> + NDArrayViewPtr newValue = std : : make_shared < NDArrayView > ( AsDataType < ElemType > ( ) , p - > Shape ( ) , AsDeviceDescriptor ( data - > GetDeviceId ( ) ) ) ; <nl> + std : : shared_ptr < Matrix < ElemType > > newData = newValue - > GetWritableMatrix < ElemType > ( ) ; <nl> + newData - > SetValue ( * data ) ; <nl> + m_prevParameters [ index ] = newValue ; <nl> + } <nl> + else <nl> + { <nl> + m_prevParameters [ index ] - > GetWritableMatrix < ElemType > ( ) - > SetValue ( * data ) ; <nl> + } <nl> + <nl> + if ( ! m_tempBlockGradient [ index ] ) <nl> + { <nl> + m_tempBlockGradient [ index ] = std : : make_shared < NDArrayView > ( AsDataType < ElemType > ( ) , p - > Shape ( ) , AsDeviceDescriptor ( data - > GetDeviceId ( ) ) ) ; <nl> + } <nl> + } <nl> + <nl> + template < class ElemType > <nl> + void SynchronizeModel ( const std : : vector < NDArrayViewPtr > & parameterValues ) <nl> + { <nl> + ElemType blockMomentum = ( ElemType ) TimeConstant2Momentum ( m_blockMomentumAsTimeConstantPerWorker , m_numSamplesSeenInCurrentBlock ) ; <nl> + <nl> + / / 1 . Let ' s aggregate weights <nl> + for ( size_t i = 0 ; i < parameterValues . size ( ) ; + + i ) <nl> + { <nl> + / / Get current model <nl> + Matrix < ElemType > & previousWeight = * m_prevParameters [ i ] - > GetWritableMatrix < ElemType > ( ) ; / / prev model value <nl> + Matrix < ElemType > & currentWeight = * parameterValues [ i ] - > GetWritableMatrix < ElemType > ( ) ; <nl> + Matrix < ElemType > & blockGrad = * m_tempBlockGradient [ i ] - > GetWritableMatrix < ElemType > ( ) ; <nl> + <nl> + / / Subtract it from the previous model <nl> + blockGrad = previousWeight - currentWeight ; / / matW becomes local block gradient ( of one worker ) <nl> + } <nl> + <nl> + / / Send block gradient over MPI nodes . <nl> + m_communicator - > AggregateInPlace ( m_tempBlockGradient , m_communicator - > Workers ( ) ) ; <nl> + <nl> + / / 2 . Let ' s update the model <nl> + for ( size_t i = 0 ; i < parameterValues . size ( ) ; + + i ) <nl> + { <nl> + / / 2 block gradient aggregation <nl> + / / 2 . 1 . get current model <nl> + Matrix < ElemType > & previousWeight = * m_prevParameters [ i ] - > GetWritableMatrix < ElemType > ( ) ; / / prev model value <nl> + Matrix < ElemType > & currentWeight = * parameterValues [ i ] - > GetWritableMatrix < ElemType > ( ) ; <nl> + Matrix < ElemType > & blockGrad = * m_tempBlockGradient [ i ] - > GetWritableMatrix < ElemType > ( ) ; <nl> + / / 2 . 2 . model update <nl> + { <nl> + Matrix < ElemType > & sg = * m_blockLevelSmoothedGradient [ i ] - > GetWritableMatrix < ElemType > ( ) ; / / smoothed gradient <nl> + / / 2 . 2 . 1 update block level smoothed gradient ; <nl> + / / This is essentially a first - order infinite impulse response ( IIR ) filter with the gain ( 1 - blockMomentum ) * m_blockLearningRate : <nl> + / / smoothedGradient ( t ) = blockMomentum * smoothedGradients ( t - 1 ) + ( 1 - blockMomentum ) * m_blockLearningRate * blockGrad ( t ) <nl> + Matrix < ElemType > : : ScaleAndAdd ( ( ElemType ) ( ( 1 - blockMomentum ) * m_blockLearningRate ) , blockGrad , ( ElemType ) blockMomentum , sg ) ; <nl> + / / 2 . 2 . 2 update parameters ; <nl> + currentWeight . SetValue ( previousWeight ) ; <nl> + currentWeight - = sg ; <nl> + / / 2 . 2 . 3 Nesterov Momentum <nl> + / / A Nesterov momentum here is to do a partial weight update before calculating the gradient , i . e . , <nl> + / / ( step 1 ) w ( t ) < - - w ( t ) - \ eta * v ( t ) <nl> + / / ( step 2 ) g ( t + 1 ) < - - forwardbackward on minibatches with initial model as w ( t ) <nl> + / / ( step 3 ) v ( t + 1 ) < - - \ eta * v ( t ) + ( 1 - \ eta ) * learningRate * g ( t + 1 ) <nl> + / / ( step 4 ) w ( t + 1 ) < - - w ( t ) - v ( t ) <nl> + / / ( step 5 ) t < - - t + 1 <nl> + / / without step 1 , this becomes stanard momentum <nl> + if ( m_useNesterovMomentum ) <nl> + { <nl> + Matrix < ElemType > : : ScaleAndAdd ( ( ElemType ) - blockMomentum , sg , currentWeight ) ; <nl> + } <nl> + / / 2 . 2 . 4 update bookkeeping <nl> + previousWeight . SetValue ( currentWeight ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + static double TimeConstant2Momentum ( double timeConstant , size_t syncPeroid ) <nl> + { <nl> + if ( timeConstant = = 0 ) <nl> + return 0 ; <nl> + else <nl> + return exp ( - ( ( double ) syncPeroid ) / timeConstant ) ; <nl> + } <nl> + <nl> + static double Momentum2TimeConstant ( double bm , size_t syncPeroid ) <nl> + { <nl> + if ( bm > = 1 . 0 | | bm < 0 . 0 ) <nl> + { <nl> + InvalidArgument ( " Unexpected block momentum ( % . 2f ) . Block momentum should be in the range of [ 0 , 1 ) \ n " , bm ) ; <nl> + } <nl> + return - ( double ) syncPeroid / log ( bm ) ; <nl> + } <nl> + <nl> + const bool m_resetSGDMomentumAfterAggregation ; <nl> + const bool m_useNesterovMomentum ; <nl> + const double m_blockLearningRate ; <nl> + const double m_blockMomentumAsTimeConstantPerWorker ; <nl> + <nl> + const size_t m_syncPeriodPerWorker ; <nl> + const size_t m_globalModelAggregationBlockSize ; <nl> + size_t m_numSamplesSeenInCurrentBlock ; <nl> + size_t m_localTotalNumSamplesSeen ; <nl> + <nl> + / / parameters at the last model aggregation point <nl> + std : : vector < NDArrayViewPtr > m_prevParameters ; <nl> + std : : vector < NDArrayViewPtr > m_blockLevelSmoothedGradient ; <nl> + std : : vector < NDArrayViewPtr > m_tempBlockGradient ; <nl> + <nl> + / / temp storage for MPI <nl> + std : : vector < NDArrayViewPtr > m_actionBuffer ; <nl> + <nl> + bool m_prevParamInitialized = false ; <nl> + <nl> + bool m_endOfDataReached ; <nl> + bool m_shutDownSeenBefore = false ; <nl> + <nl> + DISABLE_COPY_AND_MOVE ( BlockMomentumDistributedLearner ) ; <nl> + } ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . b96476973ef <nl> mmm / dev / null <nl> ppp b / Source / 1BitSGD / BlockMomentumSGD . h <nl> <nl> + / / <nl> + / / Copyright ( c ) Microsoft . All rights reserved . <nl> + / / Licensed under the MIT license . See LICENSE . md file in the project root for full license information . <nl> + / / <nl> + <nl> + # pragma once <nl> + <nl> + # include " . . / SGDLib / MASGD . h " <nl> + <nl> + <nl> + <nl> + namespace Microsoft { namespace MSR { namespace CNTK { <nl> + <nl> + / / Implementation of Blockwise Model Update and Filtering ( BMUF , a . k . a . block momentum ) <nl> + / / For detail , see the following paper <nl> + / / Kai Chen and Qiang Huo , " Scalable training of deep learning machines by incremental block training <nl> + / / with intra - block parallel optimization and blockwise model - update filtering " , <nl> + / / in International Conference on Acoustics , Speech and Signal Processing , March 2016 , Shanghai , China . <nl> + <nl> + template < typename ElemType > <nl> + class BlockMomentumSGD : public IMASGD < ElemType > <nl> + { <nl> + typedef IMASGD < ElemType > Base ; <nl> + using Base : : m_pMPI ; <nl> + using Base : : m_deviceId ; <nl> + using Base : : DownCast ; <nl> + <nl> + protected : <nl> + bool m_resetSGDMomentumAfterAggregation ; <nl> + bool m_useNesterovMomentum ; <nl> + double m_blockLearningRate ; <nl> + double m_blockMomentumAsTimeConstantPerWorker ; <nl> + size_t m_syncPeriodPerWorker ; <nl> + map < wstring , shared_ptr < Matrix < ElemType > > > m_prevParameters ; / / parameters at the last model aggregation point <nl> + map < wstring , shared_ptr < Matrix < ElemType > > > m_blockLevelSmoothedGradient ; <nl> + <nl> + public : <nl> + BlockMomentumSGD ( const MPIWrapperPtr & pMPI , size_t reportFreq , DEVICEID_TYPE devID , <nl> + bool useNestrovMomentum , bool resetSGDM , <nl> + double blockLearningRate , <nl> + double blockMomentumAsTimeConstant , size_t syncPeriod ) <nl> + : IMASGD < ElemType > ( pMPI , reportFreq , devID ) <nl> + { <nl> + m_syncPeriodPerWorker = syncPeriod / pMPI - > NumNodesInUse ( ) ; <nl> + m_blockMomentumAsTimeConstantPerWorker = blockMomentumAsTimeConstant / pMPI - > NumNodesInUse ( ) ; <nl> + m_useNesterovMomentum = useNestrovMomentum ; <nl> + m_resetSGDMomentumAfterAggregation = resetSGDM ; <nl> + m_blockLearningRate = blockLearningRate ; <nl> + } <nl> + <nl> + / * virtual * / void OnEpochStart ( const std : : list < ComputationNodeBasePtr > & LearnableNodes ) override <nl> + { <nl> + Base : : OnEpochStart ( LearnableNodes ) ; <nl> + for ( auto & pNode : LearnableNodes ) <nl> + { <nl> + auto pnode = DownCast ( pNode ) ; <nl> + wstring name = pNode - > NodeName ( ) ; <nl> + <nl> + Matrix < ElemType > & NodeValue = pnode - > Value ( ) ; <nl> + if ( m_blockLevelSmoothedGradient . find ( name ) = = m_blockLevelSmoothedGradient . end ( ) ) <nl> + { <nl> + / / has not been initialized yet <nl> + auto pSmoothedGrad = make_shared < Matrix < ElemType > > ( NodeValue . GetDeviceId ( ) ) ; <nl> + pSmoothedGrad - > Resize ( NodeValue . GetNumRows ( ) , NodeValue . GetNumCols ( ) ) ; <nl> + pSmoothedGrad - > SetValue ( ( ElemType ) 0 ) ; <nl> + m_blockLevelSmoothedGradient [ name ] = pSmoothedGrad ; <nl> + } <nl> + if ( m_prevParameters . find ( name ) = = m_prevParameters . end ( ) ) <nl> + { <nl> + auto pValue = make_shared < Matrix < ElemType > > ( NodeValue . GetDeviceId ( ) ) ; <nl> + pValue - > SetValue ( NodeValue ) ; <nl> + m_prevParameters [ name ] = pValue ; <nl> + } <nl> + else <nl> + { <nl> + m_prevParameters [ name ] - > SetValue ( NodeValue ) ; <nl> + } <nl> + } <nl> + fprintf ( stderr , " Parallel training ( % d workers ) using BlockMomentumSGD with " <nl> + " block momentum = % 6 . 4f , " <nl> + " block momentum time constant ( per worker ) = % 6 . 4f , " <nl> + " block learning rate = % 6 . 4f , " <nl> + " block size per worker = % d samples , " <nl> + " % s " <nl> + " % s " <nl> + " \ n " , <nl> + ( int ) m_pMPI - > NumNodesInUse ( ) , <nl> + BlockMomentumSGD < double > : : TimeConstant2Momentum ( m_blockMomentumAsTimeConstantPerWorker , m_syncPeriodPerWorker ) , <nl> + m_blockMomentumAsTimeConstantPerWorker , <nl> + m_blockLearningRate , <nl> + ( int ) m_syncPeriodPerWorker , <nl> + m_useNesterovMomentum ? " using Nesterov - style block momentum , " : " " , <nl> + m_resetSGDMomentumAfterAggregation ? " resetting SGD momentum after sync . " : " . " <nl> + ) ; <nl> + } <nl> + / * virtual * / void OnEpochEnd ( const std : : list < ComputationNodeBasePtr > & LearnableNodes , <nl> + std : : list < Matrix < ElemType > > & smoothedGradient , <nl> + size_t samplesSinceLastSync ) override <nl> + { <nl> + Base : : OnEpochEnd ( LearnableNodes , smoothedGradient , samplesSinceLastSync ) ; <nl> + } <nl> + / * virtual * / void ModelAggregationProcessing ( <nl> + size_t samplesSinceLastSync , <nl> + const std : : list < ComputationNodeBasePtr > & learnableNodes , <nl> + std : : list < Matrix < ElemType > > & smoothedGradient , <nl> + size_t & totalSamplesProcessed , <nl> + float & secondsOnCommunication <nl> + ) override <nl> + { <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + / / 1 . communicate with other nodes to negotiate contribution weights <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + int nTotalSamples = samplesSinceLastSync ; <nl> + ElemType blockMomentum = ( ElemType ) BlockMomentumSGD < double > : : TimeConstant2Momentum ( m_blockMomentumAsTimeConstantPerWorker , m_syncPeriodPerWorker ) ; <nl> + Timer commTimer ; <nl> + secondsOnCommunication = 0 . 0f ; <nl> + commTimer . Start ( ) ; <nl> + m_pMPI - > AllReduce ( & nTotalSamples , 1 ) ; <nl> + commTimer . Stop ( ) ; <nl> + secondsOnCommunication + = ( float ) commTimer . ElapsedSeconds ( ) ; <nl> + totalSamplesProcessed = nTotalSamples ; <nl> + <nl> + for ( auto & pBaseNode : learnableNodes ) <nl> + { <nl> + if ( ! pBaseNode - > IsParameterUpdateRequired ( ) ) <nl> + { <nl> + continue ; <nl> + } <nl> + wstring name = pBaseNode - > NodeName ( ) ; <nl> + / / 2 block gradient aggregation <nl> + auto pNode = DownCast ( pBaseNode ) ; <nl> + / / 2 . 1 . get current model <nl> + Matrix < ElemType > & prevWeight = * m_prevParameters [ name ] ; / / prev model value <nl> + Matrix < ElemType > & currentWeight = pNode - > Value ( ) ; / / current model <nl> + / / 2 . 1 . 2 . subtract it from the previous model <nl> + Matrix < ElemType > blockGrad ( prevWeight . DeepClone ( ) ) ; <nl> + blockGrad - = currentWeight ; / / matW becomes local block gradient ( of one worker ) <nl> + / / 2 . 1 . 3 . send block gradient over MPI nodes ; <nl> + unique_ptr < ElemType [ ] > px ( blockGrad . CopyToArray ( ) ) ; <nl> + size_t nx = blockGrad . GetNumElements ( ) ; <nl> + / / 2 . 1 . 4 . inplace sum <nl> + commTimer . Restart ( ) ; <nl> + m_pMPI - > AllReduce ( px . get ( ) , nx ) ; <nl> + commTimer . Stop ( ) ; <nl> + secondsOnCommunication + = ( float ) commTimer . ElapsedSeconds ( ) ; <nl> + / / 2 . 1 . 5 . global block gradient <nl> + blockGrad . SetValue ( blockGrad . GetNumRows ( ) , <nl> + blockGrad . GetNumCols ( ) , <nl> + blockGrad . GetDeviceId ( ) , <nl> + px . get ( ) <nl> + ) ; <nl> + / / 2 . 2 . model update <nl> + { <nl> + / / alias for better readability <nl> + Matrix < ElemType > & smoothedGradientUpdate = * m_blockLevelSmoothedGradient [ name ] ; / / smoothed gradient <nl> + / / 2 . 2 . 1 update block level smoothed gradient ; <nl> + / / This is essentially a first - order infinite impulse response ( IIR ) filter with the gain ( 1 - blockMomentum ) * m_blockLearningRate : <nl> + / / smoothedGradientUpdate ( t ) = blockMomentum * smoothedGradients ( t - 1 ) + ( 1 - blockMomentum ) * m_blockLearningRate * blockGrad ( t ) <nl> + Matrix < ElemType > : : ScaleAndAdd ( ( ElemType ) ( ( 1 - blockMomentum ) * m_blockLearningRate ) , blockGrad , ( ElemType ) blockMomentum , smoothedGradientUpdate ) ; <nl> + / / 2 . 2 . 2 update parameters ; <nl> + currentWeight . SetValue ( prevWeight ) ; <nl> + currentWeight - = smoothedGradientUpdate ; <nl> + / / 2 . 2 . 3 Nesterov Momentum <nl> + / / A Nesterov momentum here is to do a partial weight update before calculating the gradient , i . e . , <nl> + / / ( step 1 ) w ( t ) < - - w ( t ) - \ eta * v ( t ) <nl> + / / ( step 2 ) g ( t + 1 ) < - - forwardbackward on minibatches with initial model as w ( t ) <nl> + / / ( step 3 ) v ( t + 1 ) < - - \ eta * v ( t ) + ( 1 - \ eta ) * learningRate * g ( t + 1 ) <nl> + / / ( step 4 ) w ( t + 1 ) < - - w ( t ) - v ( t ) <nl> + / / ( step 5 ) t < - - t + 1 <nl> + / / without step 1 , this becomes stanard momentum <nl> + if ( m_useNesterovMomentum ) <nl> + { <nl> + Matrix < ElemType > : : ScaleAndAdd ( ( ElemType ) - blockMomentum , smoothedGradientUpdate , currentWeight ) ; <nl> + } <nl> + / / 2 . 2 . 4 update bookkeeping <nl> + prevWeight . SetValue ( currentWeight ) ; <nl> + } <nl> + } <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + / / 3 . reset SGD momentum if necessary <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + if ( m_resetSGDMomentumAfterAggregation ) <nl> + { <nl> + for ( Matrix < ElemType > & x : smoothedGradient ) <nl> + { <nl> + x . SetValue ( ( ElemType ) 0 ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / * virtual * / void SaveToCheckPoint ( File & fstream ) override <nl> + { <nl> + if ( m_pMPI - > IsMainNode ( ) ) <nl> + { <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " BMACKP " ) ; <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " BOptions " ) ; <nl> + fstream < < m_resetSGDMomentumAfterAggregation ; <nl> + fstream . PutMarker ( FileMarker : : fileMarkerEndSection , L " EOptions " ) ; <nl> + <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " BMomentumAsTimeConstant " ) ; <nl> + fstream < < m_blockMomentumAsTimeConstantPerWorker ; <nl> + fstream . PutMarker ( FileMarker : : fileMarkerEndSection , L " EMomentumAsTimeConstant " ) ; <nl> + <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " BSyncPeriodInSamples " ) ; <nl> + fstream < < m_syncPeriodPerWorker ; <nl> + fstream . PutMarker ( FileMarker : : fileMarkerEndSection , L " ESyncPeriodInSamples " ) ; <nl> + <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " BParam " ) ; <nl> + SaveParameters ( fstream , m_prevParameters ) ; <nl> + SaveParameters ( fstream , m_blockLevelSmoothedGradient ) ; <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " EParam " ) ; <nl> + <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " EMACKP " ) ; <nl> + } <nl> + } <nl> + / * virtual * / void LoadFromCheckPoint ( File & fstream ) override <nl> + { <nl> + if ( fstream . TryGetMarker ( FileMarker : : fileMarkerBeginSection , L " BMACKP " ) ) <nl> + { <nl> + fstream . GetMarker ( FileMarker : : fileMarkerBeginSection , L " BOptions " ) ; <nl> + fstream > > m_resetSGDMomentumAfterAggregation ; <nl> + fstream . GetMarker ( FileMarker : : fileMarkerEndSection , L " EOptions " ) ; <nl> + <nl> + fstream . GetMarker ( FileMarker : : fileMarkerBeginSection , L " BMomentumAsTimeConstant " ) ; <nl> + fstream > > m_blockMomentumAsTimeConstantPerWorker ; <nl> + fstream . GetMarker ( FileMarker : : fileMarkerEndSection , L " EMomentumAsTimeConstant " ) ; <nl> + <nl> + fstream . GetMarker ( FileMarker : : fileMarkerBeginSection , L " BSyncPeriodInSamples " ) ; <nl> + fstream > > m_syncPeriodPerWorker ; <nl> + fstream . GetMarker ( FileMarker : : fileMarkerEndSection , L " ESyncPeriodInSamples " ) ; <nl> + <nl> + fstream . GetMarker ( FileMarker : : fileMarkerBeginSection , L " BParam " ) ; <nl> + LoadParameters ( fstream , m_prevParameters , m_deviceId ) ; <nl> + LoadParameters ( fstream , m_blockLevelSmoothedGradient , m_deviceId ) ; <nl> + fstream . GetMarker ( FileMarker : : fileMarkerBeginSection , L " EParam " ) ; <nl> + <nl> + fstream . GetMarker ( FileMarker : : fileMarkerEndSection , L " EMACKP " ) ; <nl> + } <nl> + } <nl> + private : <nl> + / / helper function to save / load map < wstring , shared_ptr < Matrix < ElemType > > structure <nl> + void SaveParameters ( File & f , const map < wstring , shared_ptr < Matrix < ElemType > > > & parameters ) const <nl> + { <nl> + / / save sizeof ( ElemType ) <nl> + unsigned int size = sizeof ( ElemType ) ; <nl> + f < < size ; <nl> + / / save number of pairs <nl> + unsigned int numPairs = parameters . size ( ) ; <nl> + f < < numPairs ; <nl> + for ( auto & x : parameters ) <nl> + { <nl> + f < < x . first ; <nl> + f < < * x . second ; <nl> + } <nl> + f . Flush ( ) ; <nl> + return ; <nl> + } <nl> + void LoadParameters ( File & f , map < wstring , shared_ptr < Matrix < ElemType > > > & parameters , DEVICEID_TYPE deviceID ) <nl> + { <nl> + unsigned int size = 0 ; <nl> + unsigned int pair = 0 ; <nl> + f > > size ; <nl> + f > > pair ; <nl> + if ( size ! = sizeof ( ElemType ) ) <nl> + { <nl> + LogicError ( " Mismatched ElemType in loading BlockMomentumSGD checkpoint . Expecting % s , while loading element size = % d \ n " , <nl> + sizeof ( ElemType ) = = 4 ? " float " : " double " , <nl> + size <nl> + ) ; <nl> + } <nl> + parameters . clear ( ) ; <nl> + for ( size_t i = 0 ; i < pair ; i + + ) <nl> + { <nl> + wstring name ; <nl> + f > > name ; <nl> + shared_ptr < Matrix < ElemType > > mat = make_shared < Matrix < ElemType > > ( deviceID ) ; <nl> + f > > * mat ; <nl> + parameters [ name ] = mat ; <nl> + } <nl> + } <nl> + <nl> + <nl> + public : <nl> + <nl> + static double TimeConstant2Momentum ( double timeConstant , size_t syncPeroid ) <nl> + { <nl> + return exp ( - ( ( double ) syncPeroid ) / timeConstant ) ; <nl> + } <nl> + static double Momentum2TimeConstant ( double bm , size_t syncPeroid ) <nl> + { <nl> + if ( bm > = 1 . 0 | | bm < 0 . 0 ) <nl> + { <nl> + InvalidArgument ( " Unexpected block momentum ( % . 2f ) . Block momentum should be in the range of [ 0 , 1 ) \ n " , bm ) ; <nl> + } <nl> + return - ( double ) syncPeroid / log ( bm ) ; <nl> + } <nl> + } ; <nl> + } } } <nl> new file mode 100644 <nl> index 00000000000 . . 3f87ba02f63 <nl> mmm / dev / null <nl> ppp b / Source / 1BitSGD / MatrixQuantizer . h <nl> <nl> + / / <nl> + / / Copyright ( c ) Microsoft . All rights reserved . <nl> + / / Licensed under the MIT license . See LICENSE . md file in the project root for full license information . <nl> + / / <nl> + <nl> + # pragma once <nl> + <nl> + # include " ColumnQuantizer . h " <nl> + # include " QuantizedMatrix . h " <nl> + # include " MatrixQuantizerImpl . h " <nl> + <nl> + namespace Microsoft { namespace MSR { namespace CNTK { <nl> + <nl> + / / This type does the quantization on a matrix <nl> + / / This is a technique to reduce the cost of communicating <nl> + / / the gradient matrices during aggregation across all nodes in <nl> + / / data - parallel SGD training , at the end of each minibatch . <nl> + / / Refer this paper http : / / research . microsoft . com / apps / pubs / ? id = 230137 <nl> + / / for details . <nl> + class MatrixQuantizerBase <nl> + { } ; <nl> + <nl> + template < class ElemType > <nl> + class MatrixQuantizer final : public MatrixQuantizerBase <nl> + { <nl> + public : <nl> + MatrixQuantizer ( size_t numRows , size_t numCols , int deviceId , bool useAsync ) : MatrixQuantizer ( deviceId , useAsync ) <nl> + { <nl> + m_residual = std : : make_shared < Matrix < ElemType > > ( numRows , numCols , deviceId , DENSE ) ; <nl> + } <nl> + <nl> + MatrixQuantizer ( int deviceId , bool useAsync ) : m_residual ( nullptr ) <nl> + { <nl> + m_quantizerImpl . reset ( MatrixQuantizerImpl < ElemType > : : Create ( deviceId , useAsync ) ) ; <nl> + } <nl> + <nl> + / / Disallow copy and move construction and assignment <nl> + DISABLE_COPY_AND_MOVE ( MatrixQuantizer ) ; <nl> + <nl> + void QuantizeAsync ( const Matrix < ElemType > & inMatrix , QuantizedMatrix < ElemType > & outQMatrix , bool zeroThresholdFor1Bit ) <nl> + { <nl> + m_quantizerImpl - > QuantizeAsync ( inMatrix , * m_residual , outQMatrix , * m_residual , zeroThresholdFor1Bit ) ; <nl> + } <nl> + <nl> + void QuantizeAsync ( const Matrix < ElemType > & inMatrix , const Matrix < ElemType > & inResidual , QuantizedMatrix < ElemType > & outQMatrix , Matrix < ElemType > & outResidual , bool zeroThresholdFor1Bit ) <nl> + { <nl> + m_quantizerImpl - > QuantizeAsync ( inMatrix , inResidual , outQMatrix , outResidual , zeroThresholdFor1Bit ) ; <nl> + } <nl> + <nl> + void WaitQuantizeAsyncDone ( ) <nl> + { <nl> + m_quantizerImpl - > WaitQuantizeAsyncDone ( ) ; <nl> + } <nl> + <nl> + void UnquantizeAsync ( QuantizedMatrix < ElemType > & inQMatrix , Matrix < ElemType > & outMatrix , bool add = false ) <nl> + { <nl> + m_quantizerImpl - > UnquantizeAsync ( inQMatrix , outMatrix , add ) ; <nl> + } <nl> + <nl> + void WaitUnquantizeAsyncDone ( ) <nl> + { <nl> + m_quantizerImpl - > WaitUnquantizeAsyncDone ( ) ; <nl> + } <nl> + <nl> + int GetDeviceId ( ) const <nl> + { <nl> + return m_quantizerImpl - > GetDeviceId ( ) ; <nl> + } <nl> + <nl> + void ResetResidue ( ) <nl> + { <nl> + m_residual - > SetValue ( 0 . 0 ) ; <nl> + } <nl> + <nl> + const Matrix < ElemType > & GetResidualMatrix ( ) const <nl> + { <nl> + return * m_residual ; <nl> + } <nl> + <nl> + private : <nl> + std : : unique_ptr < MatrixQuantizerImpl < ElemType > > m_quantizerImpl ; <nl> + <nl> + / / the residual matrix <nl> + std : : shared_ptr < Matrix < ElemType > > m_residual ; <nl> + } ; <nl> + <nl> + } } } <nl> new file mode 100644 <nl> index 00000000000 . . 167cac2437e <nl> mmm / dev / null <nl> ppp b / Source / 1BitSGD / QuantizedDataParallelDistributedLearner . h <nl> <nl> + / / <nl> + / / Copyright ( c ) Microsoft . All rights reserved . <nl> + / / Licensed under the MIT license . See LICENSE . md file in the project root for full license information . <nl> + / / <nl> + <nl> + # pragma once <nl> + <nl> + # include < vector > <nl> + # include " CNTKLibrary . h " <nl> + # include " DistributedLearnerBase . h " <nl> + # include " PerformanceProfiler . h " <nl> + <nl> + namespace CNTK <nl> + { <nl> + / / / <nl> + / / / Quantized Distributed Trainer . <nl> + / / / <nl> + class QuantizedDataParallelDistributedLearner : public DistributedLearnerBase <nl> + { <nl> + public : <nl> + QuantizedDataParallelDistributedLearner ( QuantizedDistributedCommunicatorPtr communicator , LearnerPtr learner , size_t distributeAfterSamples , bool useAsyncBufferedParameterUpdate ) <nl> + : DistributedLearnerBase ( communicator , learner , distributeAfterSamples ) <nl> + { <nl> + if ( useAsyncBufferedParameterUpdate ) <nl> + LogicError ( " Asynchronous parameter update is not yet supported . " ) ; <nl> + } <nl> + <nl> + / / Optional override that gets called per minibatch after finishing gradient computation but before updating model parameters <nl> + bool Update ( std : : unordered_map < Parameter , NDArrayViewPtr > & gradientValues , MinibatchInfo & info ) override <nl> + { <nl> + if ( m_sampleCount > = m_distributeAfterSamples ) <nl> + { <nl> + auto profGradientAgg = Microsoft : : MSR : : CNTK : : ScopeProfile ( Microsoft : : MSR : : CNTK : : profilerEvtMainGradient ) ; <nl> + <nl> + if ( info . IsEmpty ( ) ) <nl> + PrepaireZeroGradients ( gradientValues ) ; <nl> + <nl> + ConvertToOrdered ( gradientValues , m_gradientBuffer ) ; <nl> + <nl> + std : : vector < NDArrayViewPtr > headerToAggregate ; <nl> + headerToAggregate . push_back ( info . evalCriterionValue ) ; <nl> + headerToAggregate . push_back ( info . trainingLossValue ) ; <nl> + <nl> + auto value = MakeSharedObject < NDArrayView > ( static_cast < double > ( info . numberOfSamples ) , NDShape { 1 } , DeviceDescriptor : : CPUDevice ( ) ) ; <nl> + headerToAggregate . push_back ( value ) ; <nl> + <nl> + m_communicator - > AggregateInPlace ( headerToAggregate , m_communicator - > Workers ( ) ) ; <nl> + <nl> + info . numberOfSamples = static_cast < size_t > ( * headerToAggregate . back ( ) - > DataBuffer < double > ( ) ) ; <nl> + <nl> + std : : vector < NDArrayViewPtr > gradients ; <nl> + for ( const auto & i : m_gradientBuffer ) <nl> + gradients . push_back ( i . second ) ; <nl> + m_gradientBuffer . clear ( ) ; <nl> + <nl> + dynamic_cast < QuantizedDistributedCommunicator * > ( m_communicator . get ( ) ) - > QuantizedAggregateInPlace ( <nl> + gradients , <nl> + m_residuals , <nl> + m_stripeResiduals , <nl> + m_communicator - > Workers ( ) ) ; <nl> + } <nl> + <nl> + auto profWeights = Microsoft : : MSR : : CNTK : : ScopeProfile ( Microsoft : : MSR : : CNTK : : profilerEvtMainWeights ) ; <nl> + <nl> + m_sampleCount + = info . numberOfSamples ; <nl> + if ( info . IsEmpty ( ) ) <nl> + return false ; <nl> + <nl> + return m_learner - > Update ( gradientValues , info . numberOfSamples , info . atEndOfSweep ) ; <nl> + } <nl> + <nl> + / / Optionally overridable method to get checkpoint state associated with this Distributed train method <nl> + Dictionary CreateCheckpoint ( ) override <nl> + { <nl> + / / Resetting the residuals . <nl> + / / We do this to make sure that the returned checkpoint state is consistent with the in - memory state , since we do not checkpoint the residues . <nl> + for ( size_t i = 0 ; i < m_residuals . size ( ) ; + + i ) <nl> + if ( m_residuals [ i ] - > GetDataType ( ) = = DataType : : Double ) <nl> + m_residuals [ i ] - > SetValue ( 0 . 0 ) ; <nl> + else <nl> + m_residuals [ i ] - > SetValue ( 0 . 0f ) ; <nl> + <nl> + for ( size_t i = 0 ; i < m_stripeResiduals . size ( ) ; + + i ) <nl> + if ( m_stripeResiduals [ i ] ) <nl> + if ( m_stripeResiduals [ i ] - > GetDataType ( ) = = DataType : : Double ) <nl> + m_stripeResiduals [ i ] - > SetValue ( 0 . 0 ) ; <nl> + else <nl> + m_stripeResiduals [ i ] - > SetValue ( 0 . 0f ) ; <nl> + <nl> + return DistributedLearnerBase : : CreateCheckpoint ( ) ; <nl> + } <nl> + <nl> + private : <nl> + / / Residuals of quantized gradients . <nl> + std : : vector < NDArrayViewPtr > m_residuals ; <nl> + / / Residuals of quantized aggregated stripes this node is responsible for . <nl> + std : : vector < NDArrayViewPtr > m_stripeResiduals ; <nl> + } ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 1f5fbe1c3e0 <nl> mmm / dev / null <nl> ppp b / Source / 1BitSGD / QuantizedDistributedCommunicator . h <nl> <nl> + / / <nl> + / / Copyright ( c ) Microsoft . All rights reserved . <nl> + / / Licensed under the MIT license . See LICENSE . md file in the project root for full license information . <nl> + / / <nl> + <nl> + # pragma once <nl> + <nl> + # include " Basics . h " <nl> + # include " MPIWrapper . h " <nl> + # include " CNTKLibrary . h " <nl> + # include " MatrixQuantizerImpl . h " <nl> + # include " MatrixQuantizer . h " <nl> + # include " CUDAPageLockedMemAllocator . h " <nl> + # include " Utils . h " <nl> + # include " DistributedCommunicator . h " <nl> + <nl> + namespace Microsoft { namespace MSR { namespace CNTK { <nl> + class MatrixQuantizerBase ; <nl> + <nl> + class QuantizedMatrixBase ; <nl> + std : : shared_ptr < QuantizedMatrixBase > QuantizedMatrixBasePtr ; <nl> + <nl> + class CUDAPageLockedMemAllocator ; <nl> + } } } <nl> + <nl> + namespace CNTK <nl> + { <nl> + class QuantizedMPICommunicatorImpl final : public MPICommunicatorImpl , public QuantizedDistributedCommunicator <nl> + { <nl> + using Base = MPICommunicatorImpl ; <nl> + <nl> + template < class T > using vector = std : : vector < T > ; <nl> + template < class T > using shared_ptr = std : : shared_ptr < T > ; <nl> + template < class T > using unordered_set = std : : unordered_set < T > ; <nl> + <nl> + using MpiFail = Microsoft : : MSR : : CNTK : : MpiFail ; <nl> + using QuantizedMatrixBase = Microsoft : : MSR : : CNTK : : QuantizedMatrixBase ; <nl> + using QuantizedMatrixBasePtr = shared_ptr < QuantizedMatrixBase > ; <nl> + using MatrixQuantizerBase = Microsoft : : MSR : : CNTK : : MatrixQuantizerBase ; <nl> + using CUDAPageLockedMemAllocator = Microsoft : : MSR : : CNTK : : CUDAPageLockedMemAllocator ; <nl> + <nl> + template < class T > using MatrixQuantizer = Microsoft : : MSR : : CNTK : : MatrixQuantizer < T > ; <nl> + template < class T > using QuantizedMatrix = Microsoft : : MSR : : CNTK : : QuantizedMatrix < T > ; <nl> + template < class T > using Matrix = Microsoft : : MSR : : CNTK : : Matrix < T > ; <nl> + <nl> + public : <nl> + QuantizedMPICommunicatorImpl ( bool zeroThresholdFor1Bit , bool useQuantizationForSelfStripe , size_t numQuantizationBits ) <nl> + : m_zeroThresholdFor1Bit ( zeroThresholdFor1Bit ) , m_useQuantizationForSelfStripe ( useQuantizationForSelfStripe ) , m_numQuantizationBits ( numQuantizationBits ) <nl> + { } <nl> + <nl> + void QuantizedAggregateInPlace ( <nl> + std : : vector < NDArrayViewPtr > & inValues , <nl> + std : : vector < NDArrayViewPtr > & valueQuantizationResidues , <nl> + std : : vector < NDArrayViewPtr > & stripeQuantizationResidues , <nl> + const std : : unordered_set < DistributedWorkerDescriptor > & sendToWorkers ) override <nl> + { <nl> + QuantizedAggregate ( <nl> + inValues , valueQuantizationResidues , stripeQuantizationResidues , <nl> + inValues , valueQuantizationResidues , stripeQuantizationResidues , <nl> + sendToWorkers ) ; <nl> + } <nl> + <nl> + / / A collective communication API to perform quantized aggregation of values across all workers of this communicator <nl> + void QuantizedAggregate ( <nl> + const vector < NDArrayViewPtr > & inValues , <nl> + const vector < NDArrayViewPtr > & valueQuantizationResidues , <nl> + const vector < NDArrayViewPtr > & stripeQuantizationResidues , <nl> + vector < NDArrayViewPtr > & aggregatedOutputs , <nl> + vector < NDArrayViewPtr > & newQuantizationResidues , <nl> + vector < NDArrayViewPtr > & newStripeQuantizationResidues , <nl> + const unordered_set < DistributedWorkerDescriptor > & sendToWorkers ) override <nl> + { <nl> + CheckWorkers ( sendToWorkers ) ; <nl> + <nl> + if ( Workers ( ) . size ( ) = = 1 ) / / No need to aggregate anything . <nl> + { <nl> + aggregatedOutputs = inValues ; <nl> + newQuantizationResidues = valueQuantizationResidues ; <nl> + newStripeQuantizationResidues = stripeQuantizationResidues ; <nl> + return ; <nl> + } <nl> + <nl> + if ( inValues . empty ( ) ) <nl> + return ; <nl> + <nl> + DataType dataType = inValues . front ( ) - > GetDataType ( ) ; <nl> + for ( const auto & v : inValues ) <nl> + { <nl> + if ( v - > GetDataType ( ) ! = dataType ) <nl> + RuntimeError ( " Currently values of different types are not supported for quantize . " ) ; <nl> + } <nl> + <nl> + if ( dataType = = DataType : : Float ) <nl> + QuantizedAggregate < float > ( inValues , valueQuantizationResidues , stripeQuantizationResidues , aggregatedOutputs , newQuantizationResidues , newStripeQuantizationResidues , sendToWorkers ) ; <nl> + else if ( dataType = = DataType : : Double ) <nl> + QuantizedAggregate < double > ( inValues , valueQuantizationResidues , stripeQuantizationResidues , aggregatedOutputs , newQuantizationResidues , newStripeQuantizationResidues , sendToWorkers ) ; <nl> + else <nl> + LogicError ( " Unexpected type value . " ) ; <nl> + } <nl> + <nl> + / / Redefining inherited members . <nl> + / / TODO : Use using and virtual inheritance after switching to VS2015 . <nl> + const std : : unordered_set < DistributedWorkerDescriptor > & Workers ( ) const override { return Base : : Workers ( ) ; } <nl> + const DistributedWorkerDescriptor & CurrentWorker ( ) const override { return Base : : CurrentWorker ( ) ; } <nl> + DistributedCommunicatorPtr SubGroup ( const std : : unordered_set < DistributedWorkerDescriptor > & g ) const override { return Base : : SubGroup ( g ) ; } <nl> + void Concatenate ( <nl> + const std : : vector < ValuePtr > & in , <nl> + std : : vector < ValuePtr > & out , <nl> + const std : : unordered_set < DistributedWorkerDescriptor > & w ) override <nl> + { <nl> + Base : : Concatenate ( in , out , w ) ; <nl> + } <nl> + <nl> + void AggregateInPlace ( <nl> + const std : : vector < NDArrayViewPtr > & values , <nl> + const std : : unordered_set < DistributedWorkerDescriptor > & sendToWorkers ) override <nl> + { <nl> + Base : : AggregateInPlace ( values , sendToWorkers ) ; <nl> + } <nl> + <nl> + void Aggregate ( <nl> + const std : : vector < NDArrayViewPtr > & values , <nl> + std : : vector < NDArrayViewPtr > & outputValues , <nl> + const std : : unordered_set < DistributedWorkerDescriptor > & sendToWorkers ) override <nl> + { <nl> + Base : : Aggregate ( values , outputValues , sendToWorkers ) ; <nl> + } <nl> + <nl> + void Barrier ( ) override <nl> + { <nl> + Base : : Barrier ( ) ; <nl> + } <nl> + <nl> + virtual void Concatenate ( <nl> + const std : : vector < NDArrayViewPtr > & input , <nl> + std : : vector < NDArrayViewPtr > & output , <nl> + const std : : unordered_set < DistributedWorkerDescriptor > & sendToWorkers ) override <nl> + { <nl> + Base : : Concatenate ( input , output , sendToWorkers ) ; <nl> + } <nl> + <nl> + virtual void Gather ( <nl> + const Dictionary & input , <nl> + std : : vector < DictionaryPtr > & output , <nl> + const std : : unordered_set < DistributedWorkerDescriptor > & sendToWorkers ) override <nl> + { <nl> + Base : : Gather ( input , output , sendToWorkers ) ; <nl> + } <nl> + <nl> + private : <nl> + struct Stripe <nl> + { <nl> + size_t m_startCol ; <nl> + size_t m_numCols ; <nl> + } ; <nl> + <nl> + / / Determine which stripe of the gradient is this node responsible for <nl> + Stripe GetStripeForNode ( size_t numCols , size_t nodeRank , size_t numNodes ) <nl> + { <nl> + size_t numColsPerNode = numCols / numNodes ; <nl> + size_t residue = numCols % numNodes ; <nl> + size_t startColNumofStripe = ( numColsPerNode * nodeRank ) + min ( residue , nodeRank ) ; <nl> + size_t numColsinStripe = numColsPerNode + ( ( nodeRank < residue ) ? 1 : 0 ) ; <nl> + return Stripe { startColNumofStripe , numColsinStripe } ; <nl> + } <nl> + <nl> + template < typename ElementType > <nl> + MatrixQuantizer < ElementType > & GetQuantizer ( const shared_ptr < MatrixQuantizerBase > & quantizer ) <nl> + { <nl> + return static_cast < MatrixQuantizer < ElementType > & > ( * quantizer ) ; <nl> + } <nl> + <nl> + template < typename ElementType > <nl> + QuantizedMatrix < ElementType > & GetQuantizedMatrix ( QuantizedMatrixBase & matrix ) <nl> + { <nl> + return static_cast < QuantizedMatrix < ElementType > & > ( matrix ) ; <nl> + } <nl> + <nl> + void InitializeBuffers ( <nl> + const vector < NDArrayViewPtr > & inValues , <nl> + vector < NDArrayViewPtr > & valueQuantizationResidues , <nl> + vector < NDArrayViewPtr > & stripeQuantizationResidues , <nl> + vector < NDArrayViewPtr > & aggregatedOutputs , <nl> + vector < NDArrayViewPtr > & newQuantizationResidues , <nl> + vector < NDArrayViewPtr > & newStripeQuantizationResidues ) <nl> + { <nl> + m_preAggregatedGradientQuantizers . resize ( std : : max ( inValues . size ( ) , valueQuantizationResidues . size ( ) ) ) ; <nl> + if ( inValues . size ( ) ! = m_preAggregatedGradientQuantizers . size ( ) ) <nl> + LogicError ( " Number of aggregated values should be equal number of quantized residuals . " ) ; <nl> + <nl> + m_quantizedGradients . resize ( inValues . size ( ) ) ; <nl> + m_aggregatedGradientStripeQuantizers . resize ( std : : max ( inValues . size ( ) , stripeQuantizationResidues . size ( ) ) ) ; <nl> + if ( inValues . size ( ) ! = m_aggregatedGradientStripeQuantizers . size ( ) ) <nl> + LogicError ( " Number of aggregated values should be equal number of striped quantized residuals . " ) ; <nl> + <nl> + m_recvGradientStripesQuantized . resize ( inValues . size ( ) ) ; <nl> + <nl> + if ( valueQuantizationResidues . empty ( ) ) <nl> + valueQuantizationResidues . resize ( inValues . size ( ) ) ; <nl> + <nl> + if ( stripeQuantizationResidues . empty ( ) ) <nl> + stripeQuantizationResidues . resize ( inValues . size ( ) ) ; <nl> + <nl> + if ( newQuantizationResidues . empty ( ) ) <nl> + newQuantizationResidues . resize ( inValues . size ( ) ) ; <nl> + <nl> + if ( newStripeQuantizationResidues . empty ( ) ) <nl> + newStripeQuantizationResidues . resize ( inValues . size ( ) ) ; <nl> + <nl> + for ( auto i = 0 ; i < inValues . size ( ) ; + + i ) <nl> + { <nl> + auto view = inValues [ i ] ; <nl> + <nl> + / / Make sure none of the values are sparse - we currently do not support aggregation of sparse matrices <nl> + if ( view - > GetStorageFormat ( ) ! = StorageFormat : : Dense ) <nl> + RuntimeError ( " Aggregation for sparse matrices is currently not supported ! " ) ; <nl> + <nl> + / / Currently we always use async aggregation . Is this correct ? <nl> + if ( view - > GetDataType ( ) = = DataType : : Float ) <nl> + InitializeBuffer < float > ( inValues , valueQuantizationResidues , stripeQuantizationResidues , aggregatedOutputs , newQuantizationResidues , newStripeQuantizationResidues , i ) ; <nl> + else if ( view - > GetDataType ( ) = = DataType : : Double ) <nl> + InitializeBuffer < double > ( inValues , valueQuantizationResidues , stripeQuantizationResidues , aggregatedOutputs , newQuantizationResidues , newStripeQuantizationResidues , i ) ; <nl> + else <nl> + LogicError ( " Unsupported type " ) ; <nl> + } <nl> + } <nl> + <nl> + template < class ElemType > <nl> + void InitializeBuffer ( <nl> + const vector < NDArrayViewPtr > & inValues , <nl> + vector < NDArrayViewPtr > & valueQuantizationResidues , <nl> + vector < NDArrayViewPtr > & stripeQuantizationResidues , <nl> + vector < NDArrayViewPtr > & / * aggregatedOutputs * / , <nl> + vector < NDArrayViewPtr > & newQuantizationResidues , <nl> + vector < NDArrayViewPtr > & newStripeQuantizationResidues , <nl> + size_t index ) <nl> + { <nl> + int rank = static_cast < int > ( CurrentWorker ( ) . m_globalRank ) ; <nl> + int numWorkers = static_cast < int > ( Workers ( ) . size ( ) ) ; <nl> + <nl> + auto value = inValues [ index ] ; <nl> + auto v = GetMatrix < ElemType > ( value ) ; <nl> + size_t nRow = v - > GetNumRows ( ) ; <nl> + size_t nCol = v - > GetNumCols ( ) ; <nl> + <nl> + if ( ! valueQuantizationResidues [ index ] ) <nl> + { <nl> + auto residual = MakeSharedObject < NDArrayView > ( AsDataType < ElemType > ( ) , NDShape { nRow , nCol } , AsDeviceDescriptor ( v - > GetDeviceId ( ) ) ) ; <nl> + auto outputResidual = MakeSharedObject < NDArrayView > ( AsDataType < ElemType > ( ) , NDShape { nRow , nCol } , AsDeviceDescriptor ( v - > GetDeviceId ( ) ) ) ; <nl> + valueQuantizationResidues [ index ] = residual ; <nl> + newQuantizationResidues [ index ] = outputResidual ; <nl> + } <nl> + <nl> + Stripe stripe = GetStripeForNode ( v - > GetNumCols ( ) , rank , numWorkers ) ; <nl> + if ( ! stripeQuantizationResidues [ index ] & & stripe . m_numCols > 0 ) <nl> + { <nl> + auto residual = MakeSharedObject < NDArrayView > ( : : CNTK : : AsDataType < ElemType > ( ) , NDShape { nRow , stripe . m_numCols } , AsDeviceDescriptor ( v - > GetDeviceId ( ) ) ) ; <nl> + auto outputResidual = MakeSharedObject < NDArrayView > ( : : CNTK : : AsDataType < ElemType > ( ) , NDShape { nRow , stripe . m_numCols } , AsDeviceDescriptor ( v - > GetDeviceId ( ) ) ) ; <nl> + stripeQuantizationResidues [ index ] = residual ; <nl> + newStripeQuantizationResidues [ index ] = outputResidual ; <nl> + } <nl> + <nl> + auto inResidual = valueQuantizationResidues [ index ] ; <nl> + <nl> + / / Initialize buffer . <nl> + m_quantizedGradients [ index ] = std : : make_shared < QuantizedMatrix < ElemType > > ( v - > GetNumRows ( ) , v - > GetNumCols ( ) , m_numQuantizationBits , CPUDEVICE , m_allocator . get ( ) ) ; <nl> + <nl> + / / Initialize gradient quantizer . <nl> + m_preAggregatedGradientQuantizers [ index ] = std : : make_shared < MatrixQuantizer < ElemType > > ( GetMatrix < ElemType > ( inResidual ) - > GetDeviceId ( ) , true ) ; <nl> + <nl> + / / Determine which stripe of the gradient is this node responsible for <nl> + MatrixQuantizer < ElemType > * aggregatedGradientStripeQuantizers = nullptr ; <nl> + if ( stripe . m_numCols > 0 ) <nl> + { <nl> + / / Initialize quantizer <nl> + aggregatedGradientStripeQuantizers = new MatrixQuantizer < ElemType > ( GetMatrix < ElemType > ( inResidual ) - > GetDeviceId ( ) , true ) ; <nl> + m_recvGradientStripesQuantized [ index ] . resize ( numWorkers - 1 ) ; <nl> + for ( size_t j = 0 ; j < numWorkers - 1 ; + + j ) <nl> + m_recvGradientStripesQuantized [ index ] [ j ] = std : : unique_ptr < QuantizedMatrix < ElemType > > ( new QuantizedMatrix < ElemType > ( v - > GetNumRows ( ) , stripe . m_numCols , m_numQuantizationBits , CPUDEVICE , m_allocator . get ( ) ) ) ; <nl> + } <nl> + <nl> + m_aggregatedGradientStripeQuantizers [ index ] = std : : unique_ptr < MatrixQuantizer < ElemType > > ( aggregatedGradientStripeQuantizers ) ; <nl> + } <nl> + <nl> + template < class ElemType > <nl> + void QuantizedAggregate ( <nl> + const vector < NDArrayViewPtr > & inValues , <nl> + const vector < NDArrayViewPtr > & formalValueQuantizationResidues , <nl> + const vector < NDArrayViewPtr > & formalStripeQuantizationResidues , <nl> + vector < NDArrayViewPtr > & aggregatedOutputs , <nl> + vector < NDArrayViewPtr > & newQuantizationResidues , <nl> + vector < NDArrayViewPtr > & newStripeQuantizationResidues , <nl> + const unordered_set < DistributedWorkerDescriptor > & sendToWorkers ) <nl> + { <nl> + CheckWorkers ( sendToWorkers ) ; <nl> + <nl> + const int numWorkers = static_cast < int > ( Workers ( ) . size ( ) ) ; <nl> + const int rank = static_cast < int > ( CurrentWorker ( ) . m_globalRank ) ; <nl> + <nl> + auto valueQuantizationResidues = formalValueQuantizationResidues ; <nl> + auto stripeQuantizationResidues = formalStripeQuantizationResidues ; <nl> + <nl> + InitializeBuffers ( <nl> + inValues , <nl> + valueQuantizationResidues , <nl> + stripeQuantizationResidues , <nl> + aggregatedOutputs , <nl> + newQuantizationResidues , <nl> + newStripeQuantizationResidues ) ; <nl> + <nl> + vector < shared_ptr < Matrix < ElemType > > > inputValues ; <nl> + vector < shared_ptr < Matrix < ElemType > > > outputValues ; <nl> + vector < shared_ptr < Matrix < ElemType > > > inputResiduals ; <nl> + vector < shared_ptr < Matrix < ElemType > > > outputResiduals ; <nl> + vector < shared_ptr < Matrix < ElemType > > > inputStripeResiduals ; <nl> + vector < shared_ptr < Matrix < ElemType > > > outputStripeResiduals ; <nl> + <nl> + / / Check that input corresponds to output and covert NDArrayViews to the corresponding matrices . <nl> + for ( size_t i = 0 ; i < inValues . size ( ) ; i + + ) <nl> + { <nl> + assert ( inValues [ i ] - > Shape ( ) . TotalSize ( ) = = aggregatedOutputs [ i ] - > Shape ( ) . TotalSize ( ) ) ; <nl> + assert ( inValues [ i ] - > GetDataType ( ) = = aggregatedOutputs [ i ] - > GetDataType ( ) ) ; <nl> + assert ( inValues [ i ] - > Device ( ) = = aggregatedOutputs [ i ] - > Device ( ) ) ; <nl> + <nl> + assert ( inValues [ i ] ! = nullptr ) ; <nl> + inputValues . push_back ( GetWritableMatrix < ElemType > ( inValues [ i ] ) ) ; <nl> + <nl> + assert ( aggregatedOutputs [ i ] ! = nullptr ) ; <nl> + outputValues . push_back ( GetWritableMatrix < ElemType > ( aggregatedOutputs [ i ] ) ) ; <nl> + <nl> + assert ( valueQuantizationResidues [ i ] ! = nullptr ) ; <nl> + inputResiduals . push_back ( GetWritableMatrix < ElemType > ( valueQuantizationResidues [ i ] ) ) ; <nl> + <nl> + assert ( newQuantizationResidues [ i ] ! = nullptr ) ; <nl> + outputResiduals . push_back ( GetWritableMatrix < ElemType > ( newQuantizationResidues [ i ] ) ) ; ; <nl> + <nl> + / / Stripe residuals can be null in case when the stripe does not belong to this node . <nl> + inputStripeResiduals . push_back ( stripeQuantizationResidues [ i ] ? GetWritableMatrix < ElemType > ( stripeQuantizationResidues [ i ] ) : nullptr ) ; ; <nl> + outputStripeResiduals . push_back ( newStripeQuantizationResidues [ i ] ? GetWritableMatrix < ElemType > ( newStripeQuantizationResidues [ i ] ) : nullptr ) ; <nl> + } <nl> + <nl> + / / Prepare receiving buffers . <nl> + vector < std : : unique_ptr < Matrix < ElemType > > > aggGradStripes ; <nl> + vector < std : : unique_ptr < QuantizedMatrix < ElemType > > > aggGradStripesQuantized ; <nl> + for ( size_t i = 0 ; i < inputValues . size ( ) ; i + + ) <nl> + { <nl> + size_t nCol = inputValues [ i ] - > GetNumCols ( ) ; <nl> + <nl> + / / Determine which stripe of the gradient is this node responsible for <nl> + Stripe stripe = GetStripeForNode ( nCol , rank , numWorkers ) ; <nl> + Matrix < ElemType > * currAggGradStripe = nullptr ; <nl> + QuantizedMatrix < ElemType > * currAggGradStripeQuantized = nullptr ; <nl> + if ( stripe . m_numCols > 0 ) <nl> + { <nl> + currAggGradStripe = new Matrix < ElemType > ( inputValues [ i ] - > ColumnSlice ( stripe . m_startCol , stripe . m_numCols ) ) ; <nl> + currAggGradStripeQuantized = new QuantizedMatrix < ElemType > ( GetQuantizedMatrix < ElemType > ( * m_quantizedGradients [ i ] ) . ColumnSlice ( stripe . m_startCol , stripe . m_numCols ) ) ; <nl> + } <nl> + <nl> + aggGradStripes . push_back ( std : : unique_ptr < Matrix < ElemType > > ( currAggGradStripe ) ) ; <nl> + aggGradStripesQuantized . push_back ( std : : unique_ptr < QuantizedMatrix < ElemType > > ( currAggGradStripeQuantized ) ) ; <nl> + } <nl> + <nl> + / / Initiate quantization of the gradient matrices <nl> + for ( size_t i = 0 ; i < inValues . size ( ) ; + + i ) <nl> + GetQuantizer < ElemType > ( m_preAggregatedGradientQuantizers [ i ] ) . QuantizeAsync ( * ( inputValues [ i ] ) , * ( inputResiduals [ i ] ) , GetQuantizedMatrix < ElemType > ( * ( m_quantizedGradients [ i ] ) ) , * ( outputResiduals [ i ] ) , m_zeroThresholdFor1Bit ) ; <nl> + <nl> + / / Initiate receive of the stripe to be aggregated by the current node , from all other nodes <nl> + vector < MPI_Request > recvGradStripesQuantizedRequests ; <nl> + vector < int > recvRequestIdxToGradientMatrixIdxMap ; <nl> + for ( int i = 0 ; i < inputValues . size ( ) ; + + i ) <nl> + { <nl> + Stripe stripe = GetStripeForNode ( inputValues [ i ] - > GetNumCols ( ) , rank , numWorkers ) ; <nl> + if ( stripe . m_numCols > 0 ) <nl> + { <nl> + recvRequestIdxToGradientMatrixIdxMap . push_back ( i ) ; <nl> + for ( int j = 0 ; j < numWorkers - 1 ; + + j ) <nl> + { <nl> + int source = ( j > = rank ) ? ( j + 1 ) : j ; <nl> + <nl> + recvGradStripesQuantizedRequests . push_back ( MPI_Request ( ) ) ; <nl> + int recvRequestIdx = ( int ) recvGradStripesQuantizedRequests . size ( ) - 1 ; <nl> + <nl> + m_mpi - > Irecv ( GetQuantizedMatrix < ElemType > ( * m_recvGradientStripesQuantized [ i ] [ j ] ) . Buffer ( ) , ( int ) GetQuantizedMatrix < ElemType > ( * m_recvGradientStripesQuantized [ i ] [ j ] ) . GetSize ( ) , MPI_CHAR , source , i , & ( recvGradStripesQuantizedRequests [ recvRequestIdx ] ) ) | | MpiFail ( " MPI_Irecv " ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Asynchronously send stripes of the quantized gradient matrices to the respective nodes that own aggregation of that stripe <nl> + std : : vector < std : : vector < MPI_Request > > sendGradStripesQuantizedRequests ( inValues . size ( ) ) ; <nl> + for ( int i = 0 ; i < inValues . size ( ) ; + + i ) <nl> + { <nl> + GetQuantizer < ElemType > ( m_preAggregatedGradientQuantizers [ i ] ) . WaitQuantizeAsyncDone ( ) ; <nl> + <nl> + size_t sendRequestIdx = 0 ; <nl> + for ( int j = 0 ; j < numWorkers ; + + j ) <nl> + { <nl> + Stripe stripe = GetStripeForNode ( inputValues [ i ] - > GetNumCols ( ) , j , numWorkers ) ; <nl> + if ( stripe . m_numCols > 0 ) <nl> + { <nl> + / / Do not send stripe for self <nl> + if ( j ! = rank ) <nl> + { <nl> + sendGradStripesQuantizedRequests [ i ] . push_back ( MPI_Request ( ) ) ; <nl> + QuantizedMatrix < ElemType > quantizedStripe = GetQuantizedMatrix < ElemType > ( * m_quantizedGradients [ i ] ) . ColumnSlice ( stripe . m_startCol , stripe . m_numCols ) ; <nl> + <nl> + m_mpi - > Isend ( quantizedStripe . Buffer ( ) , ( int ) quantizedStripe . GetSize ( ) , MPI_CHAR , j , i , & ( sendGradStripesQuantizedRequests [ i ] [ sendRequestIdx ] ) ) | | MpiFail ( " MPI_Isend " ) ; <nl> + sendRequestIdx + + ; <nl> + } <nl> + else <nl> + { <nl> + / / Initialize the aggregate for the stripe with the quantized gradients instead of the original <nl> + / / gradients themselves , if so desired <nl> + if ( m_useQuantizationForSelfStripe ) <nl> + { <nl> + QuantizedMatrix < ElemType > preAggGradSelfStripeQuantized = GetQuantizedMatrix < ElemType > ( * m_quantizedGradients [ i ] ) . ColumnSlice ( stripe . m_startCol , stripe . m_numCols ) ; <nl> + GetQuantizer < ElemType > ( m_aggregatedGradientStripeQuantizers [ i ] ) . UnquantizeAsync ( preAggGradSelfStripeQuantized , * ( aggGradStripes [ i ] ) , false ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Wait for the stripes to arrive from each node and unquantize and aggregate <nl> + size_t numReceivesExpected = recvGradStripesQuantizedRequests . size ( ) ; <nl> + size_t numActualReceives = 0 ; <nl> + std : : vector < int > perGradMatrixReceiveCount ( recvRequestIdxToGradientMatrixIdxMap . size ( ) , 0 ) ; <nl> + while ( numActualReceives < numReceivesExpected ) <nl> + { <nl> + int idx = MPI_UNDEFINED ; <nl> + m_mpi - > Waitany ( ( int ) recvGradStripesQuantizedRequests . size ( ) , recvGradStripesQuantizedRequests . data ( ) , & idx , MPI_STATUS_IGNORE ) | | MpiFail ( " MPI_Waitany " ) ; <nl> + if ( idx = = MPI_UNDEFINED ) <nl> + { <nl> + break ; <nl> + } <nl> + <nl> + numActualReceives + + ; <nl> + <nl> + int gradMatrixIdxPosition = idx / ( numWorkers - 1 ) ; <nl> + int recvBufferSubIndex = idx % ( numWorkers - 1 ) ; <nl> + <nl> + / / Map idx back to the actual gradient matrix index <nl> + int gradMatrixIdx = recvRequestIdxToGradientMatrixIdxMap [ gradMatrixIdxPosition ] ; <nl> + <nl> + / / Wait for the previous Unquantize to finish before issuing a new one <nl> + if ( m_useQuantizationForSelfStripe | | ( perGradMatrixReceiveCount [ gradMatrixIdxPosition ] > 0 ) ) <nl> + GetQuantizer < ElemType > ( m_aggregatedGradientStripeQuantizers [ gradMatrixIdx ] ) . WaitUnquantizeAsyncDone ( ) ; <nl> + <nl> + GetQuantizer < ElemType > ( m_aggregatedGradientStripeQuantizers [ gradMatrixIdx ] ) . UnquantizeAsync ( <nl> + GetQuantizedMatrix < ElemType > ( * m_recvGradientStripesQuantized [ gradMatrixIdx ] [ recvBufferSubIndex ] ) , <nl> + * ( aggGradStripes [ gradMatrixIdx ] ) , <nl> + true ) ; <nl> + <nl> + perGradMatrixReceiveCount [ gradMatrixIdxPosition ] + + ; <nl> + <nl> + / / Also issue the quantization if this stripe was the last one expected for this matrix <nl> + / / Note : We issue the quantization without waiting for the unquantization since the same stream <nl> + / / is used for both and they are implicitly sequenced <nl> + / / We reuse the buffer that we used for quantizing and sending out the pre - aggregation gradient <nl> + if ( perGradMatrixReceiveCount [ gradMatrixIdxPosition ] = = ( numWorkers - 1 ) ) <nl> + { <nl> + Stripe stripe = GetStripeForNode ( inputValues [ gradMatrixIdx ] - > GetNumCols ( ) , rank , numWorkers ) ; <nl> + UNUSED ( stripe ) ; <nl> + assert ( stripe . m_numCols > 0 ) ; <nl> + GetQuantizer < ElemType > ( m_aggregatedGradientStripeQuantizers [ gradMatrixIdx ] ) . QuantizeAsync ( <nl> + * ( aggGradStripes [ gradMatrixIdx ] ) , <nl> + * ( inputStripeResiduals [ gradMatrixIdx ] ) , <nl> + * ( aggGradStripesQuantized [ gradMatrixIdx ] ) , <nl> + * ( outputStripeResiduals [ gradMatrixIdx ] ) , <nl> + m_zeroThresholdFor1Bit ) ; <nl> + } <nl> + } <nl> + <nl> + assert ( numActualReceives = = numReceivesExpected ) ; <nl> + <nl> + vector < vector < MPI_Request > > recvAggGradStripesQuantizedRequests ( inValues . size ( ) ) ; <nl> + / / Initiate receive of stripes of quantized aggregated gradients from different nodes <nl> + for ( int i = 0 ; i < inValues . size ( ) ; + + i ) <nl> + { <nl> + int recvRequestIdx = 0 ; <nl> + for ( int j = 0 ; j < numWorkers ; + + j ) <nl> + { <nl> + / / Do not recv stripe for self <nl> + if ( j ! = rank ) <nl> + { <nl> + Stripe stripe = GetStripeForNode ( inputValues [ i ] - > GetNumCols ( ) , j , numWorkers ) ; <nl> + if ( stripe . m_numCols > 0 ) <nl> + { <nl> + recvAggGradStripesQuantizedRequests [ i ] . push_back ( MPI_Request ( ) ) ; <nl> + QuantizedMatrix < ElemType > quantizedStripe = GetQuantizedMatrix < ElemType > ( * m_quantizedGradients [ i ] ) . ColumnSlice ( stripe . m_startCol , stripe . m_numCols ) ; <nl> + m_mpi - > Irecv ( quantizedStripe . Buffer ( ) , ( int ) quantizedStripe . GetSize ( ) , MPI_CHAR , j , ( int ) inValues . size ( ) + 1 + i , & ( recvAggGradStripesQuantizedRequests [ i ] [ recvRequestIdx ] ) ) | | MpiFail ( " MPI_Irecv " ) ; <nl> + recvRequestIdx + + ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Initiate broadcast of quantized aggregated gradient stripes to all other nodes <nl> + vector < vector < MPI_Request > > sendAggGradStripeQuantizedRequests ( inValues . size ( ) ) ; <nl> + for ( int i = 0 ; i < inValues . size ( ) ; + + i ) <nl> + { <nl> + Stripe stripe = GetStripeForNode ( inputValues [ i ] - > GetNumCols ( ) , rank , numWorkers ) ; <nl> + if ( stripe . m_numCols > 0 ) <nl> + { <nl> + sendAggGradStripeQuantizedRequests [ i ] = std : : vector < MPI_Request > ( numWorkers - 1 ) ; <nl> + GetQuantizer < ElemType > ( m_aggregatedGradientStripeQuantizers [ i ] ) . WaitQuantizeAsyncDone ( ) ; <nl> + for ( int j = 0 ; j < numWorkers - 1 ; + + j ) <nl> + { <nl> + int dest = ( j > = rank ) ? ( j + 1 ) : j ; <nl> + <nl> + / / TODO : Should we use MPI_Bcast instead for better performance <nl> + m_mpi - > Isend ( aggGradStripesQuantized [ i ] - > Buffer ( ) , ( int ) aggGradStripesQuantized [ i ] - > GetSize ( ) , MPI_CHAR , dest , ( int ) inValues . size ( ) + 1 + i , & ( sendAggGradStripeQuantizedRequests [ i ] [ j ] ) ) | | MpiFail ( " MPI_Irecv " ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Wait to receive all aggregated stripes and unquantize <nl> + for ( size_t i = 0 ; i < inValues . size ( ) ; + + i ) <nl> + { <nl> + m_mpi - > Waitall ( ( int ) recvAggGradStripesQuantizedRequests [ i ] . size ( ) , recvAggGradStripesQuantizedRequests [ i ] . data ( ) , MPI_STATUSES_IGNORE ) | | MpiFail ( " MPI_Waitall " ) ; <nl> + GetQuantizer < ElemType > ( m_preAggregatedGradientQuantizers [ i ] ) . UnquantizeAsync ( GetQuantizedMatrix < ElemType > ( * m_quantizedGradients [ i ] ) , * ( outputValues [ i ] ) , false ) ; <nl> + } <nl> + <nl> + / / Wait for all the unquantizations to finish <nl> + for ( size_t i = 0 ; i < inValues . size ( ) ; + + i ) <nl> + GetQuantizer < ElemType > ( m_preAggregatedGradientQuantizers [ i ] ) . WaitUnquantizeAsyncDone ( ) ; <nl> + <nl> + / / Wait for completion of the async send requests <nl> + for ( int i = 0 ; i < sendGradStripesQuantizedRequests . size ( ) ; + + i ) <nl> + { <nl> + if ( sendGradStripesQuantizedRequests [ i ] . size ( ) > 0 ) <nl> + m_mpi - > Waitall ( ( int ) sendGradStripesQuantizedRequests [ i ] . size ( ) , sendGradStripesQuantizedRequests [ i ] . data ( ) , MPI_STATUSES_IGNORE ) | | MpiFail ( " MPI_Waitall " ) ; <nl> + } <nl> + <nl> + for ( int i = 0 ; i < sendAggGradStripeQuantizedRequests . size ( ) ; + + i ) <nl> + { <nl> + if ( sendAggGradStripeQuantizedRequests [ i ] . size ( ) > 0 ) <nl> + m_mpi - > Waitall ( ( int ) sendAggGradStripeQuantizedRequests [ i ] . size ( ) , sendAggGradStripeQuantizedRequests [ i ] . data ( ) , MPI_STATUSES_IGNORE ) | | MpiFail ( " MPI_Waitall " ) ; <nl> + } <nl> + } <nl> + <nl> + / / option for handling the mean for 1 - bit quantization <nl> + / / force 1 - bit quant to threshold against 0 rather than the midpoint between lower and upper <nl> + const bool m_zeroThresholdFor1Bit ; <nl> + <nl> + / / Number of bits that each gradient value is quantized to before communication with other nodes . <nl> + const size_t m_numQuantizationBits ; <nl> + <nl> + / / Since the self - stripe in an all - reduce is not communicated , there is really no reason to <nl> + / / quantize it for reduced communication . However , we add this as an option for for consistency <nl> + / / across all stripes if desired <nl> + const bool m_useQuantizationForSelfStripe ; <nl> + <nl> + const std : : unique_ptr < CUDAPageLockedMemAllocator > m_allocator ; <nl> + <nl> + / / Buffer for quantized gradients . <nl> + vector < QuantizedMatrixBasePtr > m_quantizedGradients ; <nl> + <nl> + / / Buffer for quantized stripes . <nl> + vector < vector < QuantizedMatrixBasePtr > > m_recvGradientStripesQuantized ; <nl> + <nl> + / / Quantizers to quantize initial gradients . <nl> + vector < shared_ptr < MatrixQuantizerBase > > m_preAggregatedGradientQuantizers ; <nl> + <nl> + / / Quantizers to quantize aggregated stripes . <nl> + vector < shared_ptr < MatrixQuantizerBase > > m_aggregatedGradientStripeQuantizers ; <nl> + } ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 39c05dd0bc2 <nl> mmm / dev / null <nl> ppp b / Source / 1BitSGD / V2AllReduceDistGradAggregator . h <nl> <nl> + / / <nl> + / / Copyright ( c ) Microsoft . All rights reserved . <nl> + / / Licensed under the MIT license . See LICENSE . md file in the project root for full license information . <nl> + / / <nl> + <nl> + # pragma once <nl> + <nl> + # undef _SCL_SECURE_NO_WARNINGS <nl> + # include " CNTKLibrary . h " <nl> + # include " Utils . h " <nl> + <nl> + # include " IDistGradAggregator . h " <nl> + # include " CUDAPageLockedMemAllocator . h " <nl> + # include " QuantizedMatrix . h " <nl> + # include " MatrixQuantizer . h " <nl> + # include " MatrixQuantizerGPU . h " <nl> + # include < future > <nl> + # include " TimerUtility . h " <nl> + <nl> + namespace Microsoft { namespace MSR { namespace CNTK { <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / AllReduceDistGradAggregator - - 1 - bit SGD . <nl> + / / This implements <nl> + / / Frank Seide , Hao Fu , Jasha Droppo , Gang Li , and Dong Yu : <nl> + / / " 1 - bit stochastic gradient descent and its application to data - parallel distributed training of speech DNNs " <nl> + / / In Proc . Interspeech 2014 . <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + template < class ElemType > <nl> + class V2AllReduceDistGradAggregator : public IDistGradAggregator < ElemType > <nl> + { <nl> + UsingIDistGradAggregatorMembers ; <nl> + <nl> + static const int DEBUG_OUTPUT_TRACE_LEVEL = 3 ; <nl> + : : CNTK : : QuantizedDistributedCommunicatorPtr m_communicator ; <nl> + <nl> + public : <nl> + V2AllReduceDistGradAggregator ( : : CNTK : : QuantizedDistributedCommunicatorPtr communicator , bool useAsyncAggregation , int traceLevel , int syncStatsTrace ) <nl> + : IDistGradAggregator < ElemType > ( nullptr ) , m_traceLevel ( traceLevel ) , m_initialized ( false ) , m_useAsyncAggregation ( useAsyncAggregation ) , m_bufferedGradHeader ( nullptr ) , m_syncStatsTrace ( syncStatsTrace ) , m_iterationCount ( 0 ) , <nl> + m_communicator ( communicator ) <nl> + { } <nl> + <nl> + ~ V2AllReduceDistGradAggregator ( ) <nl> + { <nl> + if ( m_bufferedGradHeader ! = nullptr ) <nl> + DistGradHeader : : Destroy ( m_bufferedGradHeader ) ; <nl> + } <nl> + <nl> + void Initialize ( const std : : vector < Matrix < ElemType > * > & gradients , int numEvalNodes ) <nl> + { <nl> + / / When called the first time let ' s setup the quantizers and matrices for holding quantized values . <nl> + / / These can live for the lifetime of the aggregator since the gradient matrix dimensions for learnable parameters <nl> + / / do not change <nl> + m_initialized = true ; <nl> + int deviceId = gradients [ 0 ] - > GetDeviceId ( ) ; <nl> + <nl> + for ( size_t i = 0 ; i < gradients . size ( ) ; i + + ) <nl> + { <nl> + / / Make sure none of the gradient matrices are sparse - we currently do not support aggregation of sparse gradient matrices <nl> + if ( gradients [ i ] - > GetMatrixType ( ) ! = DENSE ) <nl> + RuntimeError ( " Gradient aggregation for sparse gradient matrices is currently unsupported ! " ) ; <nl> + <nl> + if ( m_useAsyncAggregation ) <nl> + m_bufferedGradients [ gradients [ i ] ] . reset ( new Matrix < ElemType > ( gradients [ i ] - > GetNumRows ( ) , gradients [ i ] - > GetNumCols ( ) , deviceId ) ) ; <nl> + } <nl> + <nl> + if ( m_useAsyncAggregation ) <nl> + { <nl> + m_bufferedGradHeader = DistGradHeader : : Create ( numEvalNodes ) ; <nl> + m_bufferedGradHeader - > Clear ( ) ; <nl> + } <nl> + } <nl> + <nl> + void ResetState ( const std : : vector < Matrix < ElemType > * > & gradients ) <nl> + { <nl> + / / If we are resetting state , let ' s clear previous quantization residues <nl> + / / Make sure there is no pending async aggregation <nl> + if ( m_useAsyncAggregation & & m_pendingAsyncAggregation . valid ( ) ) <nl> + LogicError ( " Unexpected pending async gradient aggregation found when resetting aggregator state ! " ) ; <nl> + <nl> + for ( size_t i = 0 ; i < m_residuals . size ( ) ; + + i ) <nl> + m_residuals [ i ] - > SetValue ( static_cast < ElemType > ( 0 . 0 ) ) ; <nl> + <nl> + for ( size_t i = 0 ; i < m_stripeResiduals . size ( ) ; + + i ) <nl> + if ( m_stripeResiduals [ i ] ) <nl> + m_stripeResiduals [ i ] - > SetValue ( static_cast < ElemType > ( 0 . 0 ) ) ; <nl> + <nl> + / / Zero out the buffered gradients if resetting state <nl> + if ( m_useAsyncAggregation ) <nl> + { <nl> + for ( size_t i = 0 ; i < gradients . size ( ) ; i + + ) <nl> + m_bufferedGradients [ gradients [ i ] ] - > SetValue ( static_cast < ElemType > ( 0 ) ) ; <nl> + <nl> + m_bufferedGradHeader - > Clear ( ) ; <nl> + } <nl> + } <nl> + <nl> + / / Aggregate the gradient matrices across all nodes <nl> + bool AggregateGradients ( const std : : vector < Matrix < ElemType > * > & gradients , DistGradHeader * headerCPU , bool resetState ) override <nl> + { <nl> + if ( ! m_initialized ) <nl> + Initialize ( gradients , headerCPU - > numEvalNode ) ; <nl> + else if ( resetState ) <nl> + ResetState ( gradients ) ; <nl> + <nl> + bool showSyncPerfStats = ( m_syncStatsTrace > 0 ) & & ( ( m_iterationCount % m_syncStatsTrace ) = = 0 ) ; <nl> + m_iterationCount + + ; <nl> + <nl> + if ( m_useAsyncAggregation ) <nl> + { <nl> + / / If we are performing async gradient aggregation , let ' s wait for the pending gradient aggregation to finish <nl> + / / then swap the contents of the buffered gradients and the new gradient matrices and fire an async aggreagation <nl> + / / of the new gradient matrices <nl> + if ( m_pendingAsyncAggregation . valid ( ) ) <nl> + { <nl> + Timer aggregationTimer ; <nl> + if ( showSyncPerfStats ) <nl> + aggregationTimer . Start ( ) ; <nl> + <nl> + m_pendingAsyncAggregation . get ( ) ; <nl> + <nl> + if ( showSyncPerfStats ) <nl> + { <nl> + aggregationTimer . Stop ( ) ; <nl> + double gradientAggregationTime = aggregationTimer . ElapsedSeconds ( ) ; <nl> + fprintf ( stderr , " Async gradient aggregation wait time : % . 6g \ n " , gradientAggregationTime ) ; <nl> + } <nl> + } <nl> + <nl> + std : : vector < Matrix < ElemType > * > newGradients ; <nl> + size_t numGradMatrices = gradients . size ( ) ; <nl> + for ( size_t i = 0 ; i < numGradMatrices ; i + + ) <nl> + { <nl> + Matrix < ElemType > * bufferedGradientMatrix = m_bufferedGradients [ gradients [ i ] ] . get ( ) ; <nl> + if ( ( bufferedGradientMatrix = = nullptr ) | | <nl> + ( bufferedGradientMatrix - > GetNumCols ( ) ! = gradients [ i ] - > GetNumCols ( ) ) | | <nl> + ( bufferedGradientMatrix - > GetNumRows ( ) ! = gradients [ i ] - > GetNumRows ( ) ) | | <nl> + ( bufferedGradientMatrix - > GetDeviceId ( ) ! = gradients [ i ] - > GetDeviceId ( ) ) ) <nl> + { <nl> + LogicError ( " No buffered gradient matrix found corresponding to a gradient matrix to be aggregated ! " ) ; <nl> + } <nl> + <nl> + / / Swap the gradient matrix contents with the buffered matrices <nl> + std : : swap ( * ( gradients [ i ] ) , * bufferedGradientMatrix ) ; <nl> + <nl> + newGradients . push_back ( bufferedGradientMatrix ) ; <nl> + } <nl> + <nl> + / / Swap the grad header contents with the buffered grad header <nl> + swap ( * headerCPU , * m_bufferedGradHeader ) ; <nl> + <nl> + / / Initiate aggregation only if any samples were processed in previous iteration <nl> + if ( resetState | | ( headerCPU - > numSamples ! = 0 ) ) <nl> + { <nl> + int deviceId = gradients [ 0 ] - > GetDeviceId ( ) ; <nl> + DistGradHeader * newGradHeader = m_bufferedGradHeader ; <nl> + <nl> + / / Since we will be aggregating the gradients asynchronously , let us <nl> + / / ensure that the gradient matrices have been computed before starting to aggregate <nl> + / / them asynchronously on another thread . This essentially means that when we are using <nl> + / / a GPU device , we will synchronize on the main GPU compute stream before starting <nl> + / / the gradient aggregation asynchronously on a separate stream <nl> + MatrixComputeStreamEvent * mainStreamSyncEvent = MatrixComputeStreamEvent : : Create ( deviceId ) ; <nl> + <nl> + m_pendingAsyncAggregation = std : : async ( std : : launch : : async , [ = ] { <nl> + / / We are starting on a new thread . Make sure the new thread is <nl> + / / setup to use the right device <nl> + Matrix < ElemType > : : SetDevice ( deviceId ) ; <nl> + <nl> + / / Synchronize the Quantization compute stream with the completion of <nl> + / / compute of the gradient matrices on the main compute stream <nl> + mainStreamSyncEvent - > SynchronizeQuantizationComputeStreamWithEvent < ElemType > ( ) ; <nl> + delete mainStreamSyncEvent ; <nl> + <nl> + AggregateGradientsImpl ( newGradients , newGradHeader , showSyncPerfStats ) ; <nl> + } ) ; <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + return false ; <nl> + } <nl> + else <nl> + { <nl> + AggregateGradientsImpl ( gradients , headerCPU , showSyncPerfStats ) ; <nl> + return ( headerCPU - > numSamples ! = 0 ) ; <nl> + } <nl> + } <nl> + <nl> + void AggregateGradientsImpl ( const std : : vector < Matrix < ElemType > * > & gradients , DistGradHeader * headerCPU , bool showSyncPerfStats ) <nl> + { <nl> + Timer aggregationTimer ; <nl> + int deviceId = gradients [ 0 ] - > GetDeviceId ( ) ; <nl> + if ( showSyncPerfStats ) <nl> + { <nl> + std : : unique_ptr < MatrixComputeStreamEvent > mainStreamSyncEvent ( MatrixComputeStreamEvent : : Create ( deviceId ) ) ; <nl> + mainStreamSyncEvent - > SynchronizeEvent ( ) ; <nl> + aggregationTimer . Start ( ) ; <nl> + } <nl> + <nl> + size_t numGradMatrices = gradients . size ( ) ; <nl> + <nl> + if ( headerCPU - > numSamples = = 0 ) <nl> + { <nl> + assert ( headerCPU - > criterion = = 0 . 0 ) ; <nl> + assert ( headerCPU - > numSamplesWithLabel = = 0 ) ; <nl> + for ( int i = 0 ; i < headerCPU - > numEvalNode ; + + i ) <nl> + assert ( headerCPU - > evalErrors [ i ] . first = = 0 & & headerCPU - > evalErrors [ i ] . second = = 0 ) ; <nl> + <nl> + / / If the current node did not process any samples , the gradients should be zero ' d <nl> + for ( size_t i = 0 ; i < numGradMatrices ; + + i ) <nl> + gradients [ i ] - > SetValue ( static_cast < ElemType > ( 0 ) ) ; <nl> + <nl> + if ( m_useAsyncAggregation ) <nl> + { <nl> + std : : unique_ptr < MatrixComputeStreamEvent > mainStreamSyncEvent ( MatrixComputeStreamEvent : : Create ( deviceId ) ) ; <nl> + mainStreamSyncEvent - > SynchronizeQuantizationComputeStreamWithEvent < ElemType > ( ) ; <nl> + } <nl> + } <nl> + <nl> + / / Aggregate header . <nl> + size_t numberOfElements = 1 + 1 + 1 + headerCPU - > numEvalNode * 2 ; <nl> + std : : unique_ptr < double [ ] > headerBuffer ( new double [ numberOfElements ] ) ; <nl> + headerBuffer [ 0 ] = headerCPU - > criterion ; <nl> + headerBuffer [ 1 ] = static_cast < double > ( headerCPU - > numSamples ) ; <nl> + headerBuffer [ 2 ] = static_cast < double > ( headerCPU - > numSamplesWithLabel ) ; <nl> + for ( size_t i = 0 ; i < headerCPU - > numEvalNode ; + + i ) <nl> + { <nl> + headerBuffer [ 3 + 2 * i ] = headerCPU - > evalErrors [ i ] . first ; <nl> + headerBuffer [ 3 + 2 * i + 1 ] = static_cast < double > ( headerCPU - > evalErrors [ i ] . second ) ; <nl> + } <nl> + <nl> + auto headerData = : : CNTK : : MakeSharedObject < : : CNTK : : NDArrayView > ( : : CNTK : : DataType : : Double , : : CNTK : : NDShape { numberOfElements } , headerBuffer . get ( ) , numberOfElements * sizeof ( double ) , : : CNTK : : DeviceDescriptor : : CPUDevice ( ) ) ; <nl> + std : : vector < : : CNTK : : NDArrayViewPtr > valuesToAggregate { headerData } ; <nl> + <nl> + / / TODO : Should be async <nl> + m_communicator - > AggregateInPlace ( valuesToAggregate , m_communicator - > Workers ( ) ) ; <nl> + <nl> + / / Copy data back to the header <nl> + headerCPU - > criterion = headerBuffer [ 0 ] ; <nl> + headerCPU - > numSamples = static_cast < size_t > ( headerBuffer [ 1 ] ) ; <nl> + headerCPU - > numSamplesWithLabel = static_cast < size_t > ( headerBuffer [ 2 ] ) ; <nl> + for ( size_t i = 0 ; i < headerCPU - > numEvalNode ; + + i ) <nl> + { <nl> + headerCPU - > evalErrors [ i ] . first = headerBuffer [ 3 + 2 * i ] ; <nl> + headerCPU - > evalErrors [ i ] . second = static_cast < size_t > ( headerBuffer [ 3 + 2 * i + 1 ] ) ; <nl> + } <nl> + <nl> + / / Aggregate gradients . <nl> + std : : vector < : : CNTK : : NDArrayViewPtr > gradientValues ; <nl> + for ( size_t i = 0 ; i < gradients . size ( ) ; + + i ) <nl> + { <nl> + assert ( gradients [ i ] - > Data ( ) ! = nullptr ) ; <nl> + : : CNTK : : NDShape shape { gradients [ i ] - > GetNumRows ( ) , gradients [ i ] - > GetNumCols ( ) } ; <nl> + auto data = : : CNTK : : MakeSharedObject < : : CNTK : : NDArrayView > ( : : CNTK : : AsDataType < ElemType > ( ) , shape , gradients [ i ] - > Data ( ) , gradients [ i ] - > GetNumElements ( ) * sizeof ( ElemType ) , : : CNTK : : AsDeviceDescriptor ( gradients [ i ] - > GetDeviceId ( ) ) ) ; <nl> + gradientValues . push_back ( data ) ; <nl> + } <nl> + <nl> + m_communicator - > QuantizedAggregateInPlace ( <nl> + gradientValues , <nl> + m_residuals , <nl> + m_stripeResiduals , <nl> + m_communicator - > Workers ( ) ) ; <nl> + <nl> + if ( showSyncPerfStats ) <nl> + { <nl> + aggregationTimer . Stop ( ) ; <nl> + double gradientAggregationTime = aggregationTimer . ElapsedSeconds ( ) ; <nl> + fprintf ( stderr , " Actual gradient aggregation time : % . 6g \ n " , gradientAggregationTime ) ; <nl> + } <nl> + } <nl> + <nl> + private : <nl> + / / Perform asynchronous gradient aggregation using double buffering of the gradient matrices <nl> + bool m_useAsyncAggregation ; <nl> + <nl> + / / Future corresponding to the current in - flight async gradient aggregation <nl> + std : : future < void > m_pendingAsyncAggregation ; <nl> + <nl> + / / Buffered gradients that we asynchronously aggregate <nl> + std : : unordered_map < Matrix < ElemType > * , std : : unique_ptr < Matrix < ElemType > > > m_bufferedGradients ; <nl> + DistGradHeader * m_bufferedGradHeader ; <nl> + <nl> + int m_traceLevel ; <nl> + int m_syncStatsTrace ; <nl> + <nl> + / / Only used for controlling frequency of measuring / showing gradient aggregation perf stats <nl> + size_t m_iterationCount ; <nl> + <nl> + bool m_initialized ; <nl> + <nl> + / / Residuals of quantized gradients . <nl> + std : : vector < : : CNTK : : NDArrayViewPtr > m_residuals ; <nl> + / / Residuals of quantized aggregated stripes this node is responsible for . <nl> + std : : vector < : : CNTK : : NDArrayViewPtr > m_stripeResiduals ; <nl> + } ; <nl> + <nl> + } } } <nl> new file mode 100644 <nl> index 00000000000 . . e9ededc92a5 <nl> mmm / dev / null <nl> ppp b / Source / 1BitSGD / V2BlockMomentumSGD . h <nl> <nl> + / / <nl> + / / Copyright ( c ) Microsoft . All rights reserved . <nl> + / / Licensed under the MIT license . See LICENSE . md file in the project root for full license information . <nl> + / / <nl> + <nl> + # pragma once <nl> + <nl> + # include " . . / SGDLib / MASGD . h " <nl> + # include < map > <nl> + # include < string > <nl> + # include < memory > <nl> + <nl> + namespace Microsoft { namespace MSR { namespace CNTK { <nl> + <nl> + / / Implementation of Blockwise Model Update and Filtering ( BMUF , a . k . a . block momentum ) <nl> + / / For detail , see the following paper <nl> + / / Kai Chen and Qiang Huo , " Scalable training of deep learning machines by incremental block training <nl> + / / with intra - block parallel optimization and blockwise model - update filtering " , <nl> + / / in International Conference on Acoustics , Speech and Signal Processing , March 2016 , Shanghai , China . <nl> + template < typename ElemType > <nl> + class V2BlockMomentumSGD : public IMASGD < ElemType > <nl> + { <nl> + typedef IMASGD < ElemType > Base ; <nl> + using Base : : m_deviceId ; <nl> + using Base : : DownCast ; <nl> + <nl> + bool m_resetSGDMomentumAfterAggregation ; <nl> + bool m_useNesterovMomentum ; <nl> + double m_blockLearningRate ; <nl> + double m_blockMomentumAsTimeConstantPerWorker ; <nl> + size_t m_syncPeriodPerWorker ; <nl> + : : CNTK : : DistributedCommunicatorPtr m_communicator ; <nl> + bool m_someWorkerHasFinished ; <nl> + <nl> + / / parameters at the last model aggregation point <nl> + std : : map < std : : wstring , std : : shared_ptr < Matrix < ElemType > > > m_prevParameters ; <nl> + std : : map < std : : wstring , std : : shared_ptr < Matrix < ElemType > > > m_blockLevelSmoothedGradient ; <nl> + <nl> + public : <nl> + V2BlockMomentumSGD ( const MPIWrapperPtr & pMPI , <nl> + : : CNTK : : DistributedCommunicatorPtr communicator , <nl> + size_t reportFrequency , <nl> + DEVICEID_TYPE deviceId , <nl> + bool useNestrovMomentum , <nl> + bool resetSGDM , <nl> + double blockLearningRate , <nl> + double blockMomentumAsTimeConstant , <nl> + size_t syncPeriod ) <nl> + : IMASGD < ElemType > ( pMPI , reportFrequency , deviceId ) , <nl> + m_communicator ( communicator ) , <nl> + m_useNesterovMomentum ( useNestrovMomentum ) , <nl> + m_resetSGDMomentumAfterAggregation ( resetSGDM ) , <nl> + m_blockLearningRate ( blockLearningRate ) , <nl> + m_blockMomentumAsTimeConstantPerWorker ( blockMomentumAsTimeConstant / communicator - > Workers ( ) . size ( ) ) <nl> + { <nl> + m_syncPeriodPerWorker = syncPeriod / communicator - > Workers ( ) . size ( ) ; <nl> + if ( m_syncPeriodPerWorker = = 0 ) <nl> + InvalidArgument ( " Sync period is too small . " ) ; <nl> + } <nl> + <nl> + void OnEpochStart ( const std : : list < ComputationNodeBasePtr > & learnableNodes ) override <nl> + { <nl> + m_someWorkerHasFinished = false ; <nl> + <nl> + for ( auto & n : learnableNodes ) <nl> + { <nl> + auto node = DownCast ( n ) ; <nl> + std : : wstring name = node - > NodeName ( ) ; <nl> + <nl> + Matrix < ElemType > & value = node - > Value ( ) ; <nl> + if ( m_blockLevelSmoothedGradient . find ( name ) = = m_blockLevelSmoothedGradient . end ( ) ) <nl> + { <nl> + / / has not been initialized yet <nl> + auto pSmoothedGrad = make_shared < Matrix < ElemType > > ( value . GetDeviceId ( ) ) ; <nl> + pSmoothedGrad - > Resize ( value . GetNumRows ( ) , value . GetNumCols ( ) ) ; <nl> + pSmoothedGrad - > SetValue ( ( ElemType ) 0 ) ; <nl> + m_blockLevelSmoothedGradient [ name ] = pSmoothedGrad ; <nl> + } <nl> + <nl> + if ( m_prevParameters . find ( name ) = = m_prevParameters . end ( ) ) <nl> + { <nl> + auto newValue = make_shared < Matrix < ElemType > > ( value . GetDeviceId ( ) ) ; <nl> + newValue - > SetValue ( value ) ; <nl> + m_prevParameters [ name ] = newValue ; <nl> + } <nl> + else <nl> + { <nl> + m_prevParameters [ name ] - > SetValue ( value ) ; <nl> + } <nl> + } <nl> + <nl> + fprintf ( stderr , " Parallel training ( % d workers ) using BlockMomentumSGD with " <nl> + " block momentum = % 6 . 4f , " <nl> + " block momentum time constant ( per worker ) = % 6 . 4f , " <nl> + " block learning rate = % 6 . 4f , " <nl> + " block size per worker = % d samples , " <nl> + " % s " <nl> + " % s " <nl> + " \ n " , <nl> + ( int ) m_communicator - > Workers ( ) . size ( ) , <nl> + BlockMomentumSGD < double > : : TimeConstant2Momentum ( m_blockMomentumAsTimeConstantPerWorker , m_syncPeriodPerWorker ) , <nl> + m_blockMomentumAsTimeConstantPerWorker , <nl> + m_blockLearningRate , <nl> + ( int ) m_syncPeriodPerWorker , <nl> + m_useNesterovMomentum ? " using Nesterov - style block momentum , " : " " , <nl> + m_resetSGDMomentumAfterAggregation ? " resetting SGD momentum after sync . " : " . " ) ; <nl> + } <nl> + <nl> + bool OnArrivingAtSyncPoint ( <nl> + const std : : list < ComputationNodeBasePtr > & learnableNodes , / * input / output : * / <nl> + std : : list < Matrix < ElemType > > & smoothedGradient , / * input / output : under some setup , it will reset to zero * / <nl> + size_t samplesSinceLastSync / * input : samples processed since last sync on this worker only * / <nl> + ) override <nl> + { <nl> + if ( m_someWorkerHasFinished ) <nl> + return false ; <nl> + <nl> + / / Let ' s check the status . <nl> + double statusValue = 0 ; <nl> + auto status = : : CNTK : : MakeSharedObject < : : CNTK : : NDArrayView > ( : : CNTK : : DataType : : Double , : : CNTK : : NDShape { 1 } , & statusValue , sizeof ( double ) , : : CNTK : : DeviceDescriptor : : CPUDevice ( ) ) ; <nl> + std : : vector < : : CNTK : : NDArrayViewPtr > aggregatedStatus { status } ; <nl> + m_communicator - > AggregateInPlace ( aggregatedStatus , m_communicator - > Workers ( ) ) ; <nl> + <nl> + if ( statusValue > 0 ) <nl> + { <nl> + m_someWorkerHasFinished = true ; <nl> + return false ; <nl> + } <nl> + <nl> + / / Otherwise let update the weights . <nl> + float secondsOnCommunication = 0 . 0f ; <nl> + size_t totalSamples = 0 ; <nl> + ModelAggregationProcessing ( samplesSinceLastSync , learnableNodes , smoothedGradient , totalSamples , secondsOnCommunication ) ; <nl> + return true ; <nl> + } <nl> + <nl> + / * virtual * / void OnEpochEnd ( const std : : list < ComputationNodeBasePtr > & learnableNodes , <nl> + std : : list < Matrix < ElemType > > & smoothedGradient , <nl> + size_t samplesSinceLastSync ) override <nl> + { <nl> + if ( ! m_someWorkerHasFinished ) <nl> + { <nl> + / / Let ' s update the other guys that we have finished . <nl> + m_someWorkerHasFinished = true ; <nl> + <nl> + double statusValue = 1 ; <nl> + auto status = : : CNTK : : MakeSharedObject < : : CNTK : : NDArrayView > ( : : CNTK : : DataType : : Double , : : CNTK : : NDShape { 1 } , & statusValue , sizeof ( double ) , : : CNTK : : DeviceDescriptor : : CPUDevice ( ) ) ; <nl> + std : : vector < : : CNTK : : NDArrayViewPtr > aggregatedStatus { status } ; <nl> + m_communicator - > AggregateInPlace ( aggregatedStatus , m_communicator - > Workers ( ) ) ; <nl> + } <nl> + <nl> + / / Let ' s update our weights no matter what . <nl> + float secondsOnCommunication = 0 . 0f ; <nl> + size_t totalSamples = 0 ; <nl> + ModelAggregationProcessing ( samplesSinceLastSync , learnableNodes , smoothedGradient , totalSamples , secondsOnCommunication ) ; <nl> + } <nl> + <nl> + / * virtual * / void ModelAggregationProcessing ( <nl> + size_t / * samplesSinceLastSync * / , <nl> + const std : : list < ComputationNodeBasePtr > & learnableNodes , <nl> + std : : list < Matrix < ElemType > > & smoothedGradient , <nl> + size_t & / * totalSamplesProcessed * / , / * out * / <nl> + float & secondsOnCommunication / * out * / <nl> + ) override <nl> + { <nl> + ElemType blockMomentum = ( ElemType ) BlockMomentumSGD < double > : : TimeConstant2Momentum ( m_blockMomentumAsTimeConstantPerWorker , m_syncPeriodPerWorker ) ; <nl> + Timer commTimer ; <nl> + secondsOnCommunication = 0 . 0f ; <nl> + <nl> + / / 1 . Let ' s aggregate weights <nl> + std : : map < std : : wstring , std : : shared_ptr < Matrix < ElemType > > > aggregatedWeights ; <nl> + std : : vector < : : CNTK : : NDArrayViewPtr > aggregatedWeightsPrepared ; <nl> + for ( auto & pBaseNode : learnableNodes ) <nl> + { <nl> + if ( ! pBaseNode - > IsParameterUpdateRequired ( ) ) <nl> + continue ; <nl> + <nl> + wstring name = pBaseNode - > NodeName ( ) ; <nl> + auto pNode = DownCast ( pBaseNode ) ; <nl> + <nl> + / / Get current model <nl> + Matrix < ElemType > & prevWeight = * m_prevParameters [ name ] ; / / prev model value <nl> + Matrix < ElemType > & currentWeight = pNode - > Value ( ) ; / / current model <nl> + <nl> + / / Subtract it from the previous model <nl> + auto blockGrad = std : : make_shared < Matrix < ElemType > > ( prevWeight , CPUDEVICE ) ; <nl> + * blockGrad - = currentWeight ; / / matW becomes local block gradient ( of one worker ) <nl> + <nl> + aggregatedWeights [ name ] = blockGrad ; <nl> + : : CNTK : : NDShape shape { blockGrad - > GetNumElements ( ) } ; <nl> + auto data = : : CNTK : : MakeSharedObject < : : CNTK : : NDArrayView > ( : : CNTK : : AsDataType < ElemType > ( ) , shape , blockGrad - > Data ( ) , blockGrad - > GetNumElements ( ) * sizeof ( ElemType ) , : : CNTK : : AsDeviceDescriptor ( blockGrad - > GetDeviceId ( ) ) ) ; <nl> + aggregatedWeightsPrepared . push_back ( data ) ; <nl> + } <nl> + <nl> + / / Send block gradient over MPI nodes . <nl> + m_communicator - > AggregateInPlace ( aggregatedWeightsPrepared , m_communicator - > Workers ( ) ) ; <nl> + <nl> + / / 2 . Let ' s update the model <nl> + for ( auto & pBaseNode : learnableNodes ) <nl> + { <nl> + if ( ! pBaseNode - > IsParameterUpdateRequired ( ) ) <nl> + continue ; <nl> + <nl> + wstring name = pBaseNode - > NodeName ( ) ; <nl> + auto pNode = DownCast ( pBaseNode ) ; <nl> + <nl> + / / 2 block gradient aggregation <nl> + / / 2 . 1 . get current model <nl> + Matrix < ElemType > & prevWeight = * m_prevParameters [ name ] ; / / prev model value <nl> + Matrix < ElemType > & currentWeight = pNode - > Value ( ) ; / / current model <nl> + auto blockGrad = aggregatedWeights [ name ] ; <nl> + / / 2 . 2 . model update <nl> + { <nl> + Matrix < ElemType > & sg = * m_blockLevelSmoothedGradient [ name ] ; / / smoothed gradient <nl> + blockGrad - > TransferToDeviceIfNotThere ( sg . GetDeviceId ( ) ) ; <nl> + / / 2 . 2 . 1 update block level smoothed gradient ; <nl> + / / This is essentially a first - order infinite impulse response ( IIR ) filter with the gain ( 1 - blockMomentum ) * m_blockLearningRate : <nl> + / / smoothedGradient ( t ) = blockMomentum * smoothedGradients ( t - 1 ) + ( 1 - blockMomentum ) * m_blockLearningRate * blockGrad ( t ) <nl> + Matrix < ElemType > : : ScaleAndAdd ( ( ElemType ) ( ( 1 - blockMomentum ) * m_blockLearningRate ) , * blockGrad , ( ElemType ) blockMomentum , sg ) ; <nl> + / / 2 . 2 . 2 update parameters ; <nl> + currentWeight . SetValue ( prevWeight ) ; <nl> + currentWeight - = sg ; <nl> + / / 2 . 2 . 3 Nesterov Momentum <nl> + / / A Nesterov momentum here is to do a partial weight update before calculating the gradient , i . e . , <nl> + / / ( step 1 ) w ( t ) < - - w ( t ) - \ eta * v ( t ) <nl> + / / ( step 2 ) g ( t + 1 ) < - - forwardbackward on minibatches with initial model as w ( t ) <nl> + / / ( step 3 ) v ( t + 1 ) < - - \ eta * v ( t ) + ( 1 - \ eta ) * learningRate * g ( t + 1 ) <nl> + / / ( step 4 ) w ( t + 1 ) < - - w ( t ) - v ( t ) <nl> + / / ( step 5 ) t < - - t + 1 <nl> + / / without step 1 , this becomes stanard momentum <nl> + if ( m_useNesterovMomentum ) <nl> + { <nl> + Matrix < ElemType > : : ScaleAndAdd ( ( ElemType ) - blockMomentum , sg , currentWeight ) ; <nl> + } <nl> + / / 2 . 2 . 4 update bookkeeping <nl> + prevWeight . SetValue ( currentWeight ) ; <nl> + } <nl> + } <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + / / 3 . reset SGD momentum if necessary <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + if ( m_resetSGDMomentumAfterAggregation ) <nl> + { <nl> + for ( Matrix < ElemType > & x : smoothedGradient ) <nl> + { <nl> + x . SetValue ( ( ElemType ) 0 ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void SaveToCheckPoint ( File & fstream ) override <nl> + { <nl> + if ( ! m_communicator - > CurrentWorker ( ) . IsMain ( ) ) <nl> + return ; <nl> + <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " BMACKP " ) ; <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " BOptions " ) ; <nl> + fstream < < m_resetSGDMomentumAfterAggregation ; <nl> + fstream . PutMarker ( FileMarker : : fileMarkerEndSection , L " EOptions " ) ; <nl> + <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " BMomentumAsTimeConstant " ) ; <nl> + fstream < < m_blockMomentumAsTimeConstantPerWorker ; <nl> + fstream . PutMarker ( FileMarker : : fileMarkerEndSection , L " EMomentumAsTimeConstant " ) ; <nl> + <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " BSyncPeriodInSamples " ) ; <nl> + fstream < < m_syncPeriodPerWorker ; <nl> + fstream . PutMarker ( FileMarker : : fileMarkerEndSection , L " ESyncPeriodInSamples " ) ; <nl> + <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " BParam " ) ; <nl> + SaveParameters ( fstream , m_prevParameters ) ; <nl> + SaveParameters ( fstream , m_blockLevelSmoothedGradient ) ; <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " EParam " ) ; <nl> + <nl> + fstream . PutMarker ( FileMarker : : fileMarkerBeginSection , L " EMACKP " ) ; <nl> + } <nl> + <nl> + void LoadFromCheckPoint ( File & fstream ) override <nl> + { <nl> + if ( ! fstream . TryGetMarker ( FileMarker : : fileMarkerBeginSection , L " BMACKP " ) ) <nl> + return ; <nl> + <nl> + fstream . GetMarker ( FileMarker : : fileMarkerBeginSection , L " BOptions " ) ; <nl> + fstream > > m_resetSGDMomentumAfterAggregation ; <nl> + fstream . GetMarker ( FileMarker : : fileMarkerEndSection , L " EOptions " ) ; <nl> + <nl> + fstream . GetMarker ( FileMarker : : fileMarkerBeginSection , L " BMomentumAsTimeConstant " ) ; <nl> + fstream > > m_blockMomentumAsTimeConstantPerWorker ; <nl> + fstream . GetMarker ( FileMarker : : fileMarkerEndSection , L " EMomentumAsTimeConstant " ) ; <nl> + <nl> + fstream . GetMarker ( FileMarker : : fileMarkerBeginSection , L " BSyncPeriodInSamples " ) ; <nl> + fstream > > m_syncPeriodPerWorker ; <nl> + fstream . GetMarker ( FileMarker : : fileMarkerEndSection , L " ESyncPeriodInSamples " ) ; <nl> + <nl> + fstream . GetMarker ( FileMarker : : fileMarkerBeginSection , L " BParam " ) ; <nl> + LoadParameters ( fstream , m_prevParameters , m_deviceId ) ; <nl> + LoadParameters ( fstream , m_blockLevelSmoothedGradient , m_deviceId ) ; <nl> + fstream . GetMarker ( FileMarker : : fileMarkerBeginSection , L " EParam " ) ; <nl> + <nl> + fstream . GetMarker ( FileMarker : : fileMarkerEndSection , L " EMACKP " ) ; <nl> + } <nl> + <nl> + private : <nl> + <nl> + / / helper function to save / load map < wstring , shared_ptr < Matrix < ElemType > > structure <nl> + void SaveParameters ( File & f , const map < wstring , shared_ptr < Matrix < ElemType > > > & parameters ) const <nl> + { <nl> + / / save sizeof ( ElemType ) <nl> + unsigned int size = sizeof ( ElemType ) ; <nl> + f < < size ; <nl> + / / save number of pairs <nl> + unsigned int numPairs = parameters . size ( ) ; <nl> + f < < numPairs ; <nl> + for ( auto & x : parameters ) <nl> + { <nl> + f < < x . first ; <nl> + f < < * x . second ; <nl> + } <nl> + f . Flush ( ) ; <nl> + return ; <nl> + } <nl> + <nl> + void LoadParameters ( File & f , map < wstring , shared_ptr < Matrix < ElemType > > > & parameters , DEVICEID_TYPE deviceID ) <nl> + { <nl> + unsigned int size = 0 ; <nl> + unsigned int pair = 0 ; <nl> + f > > size ; <nl> + f > > pair ; <nl> + if ( size ! = sizeof ( ElemType ) ) <nl> + { <nl> + LogicError ( " Mismatched ElemType in loading BlockMomentumSGD checkpoint . Expecting % s , while loading element size = % d \ n " , <nl> + sizeof ( ElemType ) = = 4 ? " float " : " double " , <nl> + size <nl> + ) ; <nl> + } <nl> + parameters . clear ( ) ; <nl> + for ( size_t i = 0 ; i < pair ; i + + ) <nl> + { <nl> + wstring name ; <nl> + f > > name ; <nl> + shared_ptr < Matrix < ElemType > > mat = make_shared < Matrix < ElemType > > ( deviceID ) ; <nl> + f > > * mat ; <nl> + parameters [ name ] = mat ; <nl> + } <nl> + } <nl> + <nl> + public : <nl> + static double TimeConstant2Momentum ( double timeConstant , size_t syncPeroid ) <nl> + { <nl> + return exp ( - ( ( double ) syncPeroid ) / timeConstant ) ; <nl> + } <nl> + <nl> + static double Momentum2TimeConstant ( double bm , size_t syncPeroid ) <nl> + { <nl> + if ( bm > = 1 . 0 | | bm < 0 . 0 ) <nl> + { <nl> + InvalidArgument ( " Unexpected block momentum ( % . 2f ) . Block momentum should be in the range of [ 0 , 1 ) \ n " , bm ) ; <nl> + } <nl> + return - ( double ) syncPeroid / log ( bm ) ; <nl> + } <nl> + } ; <nl> + } } } <nl>
Merge remote - tracking branch ' 1BitSGD / master ' into thiagofc / master
microsoft/CNTK
20056a545762e6eb0eba544ef198fa94f4d884bb
2018-03-06T18:59:34Z
mmm a / tensorflow / core / BUILD <nl> ppp b / tensorflow / core / BUILD <nl> tf_gen_op_libs ( <nl> op_lib_names = [ <nl> " string_ops " , <nl> ] , <nl> - deps = [ " @ com_google_absl / / absl / strings " ] , <nl> + deps = [ <nl> + " : lib_internal " , <nl> + " : lib_proto_parsing " , <nl> + " @ com_google_absl / / absl / strings " , <nl> + ] , <nl> ) <nl> <nl> tf_gen_op_libs ( <nl> new file mode 100644 <nl> index 0000000000000 . . 26f786586073f <nl> mmm / dev / null <nl> ppp b / tensorflow / core / api_def / base_api / api_def_UnicodeEncode . pbtxt <nl> <nl> + op { <nl> + graph_op_name : " UnicodeEncode " <nl> + visibility : HIDDEN <nl> + endpoint { <nl> + name : " UnicodeEncode " <nl> + } <nl> + in_arg { <nl> + name : " input_values " <nl> + description : < < END <nl> + A 1D tensor containing the unicode codepoints that should be encoded . <nl> + END <nl> + } <nl> + in_arg { <nl> + name : " input_splits " <nl> + description : < < END <nl> + A 1D tensor specifying how the unicode codepoints should be split into strings . <nl> + In particular , ` output [ i ] ` is constructed by encoding the codepoints in the <nl> + slice ` input_values [ input_splits [ i ] : input_splits [ i + 1 ] ] ` . <nl> + END <nl> + } <nl> + attr { <nl> + name : " output_encoding " <nl> + description : < < END <nl> + Unicode encoding of the output strings . Valid encodings are : ` " UTF - 8 " , <nl> + " UTF - 16 - BE " , and " UTF - 32 - BE " ` . <nl> + END <nl> + } <nl> + attr { <nl> + name : " errors " <nl> + description : < < END <nl> + Error handling policy when there is invalid formatting found in the input . <nl> + The value of ' strict ' will cause the operation to produce a InvalidArgument <nl> + error on any invalid input formatting . A value of ' replace ' ( the default ) will <nl> + cause the operation to replace any invalid formatting in the input with the <nl> + ` replacement_char ` codepoint . A value of ' ignore ' will cause the operation to <nl> + skip any invalid formatting in the input and produce no corresponding output <nl> + character . <nl> + END <nl> + } <nl> + attr { <nl> + name : " replacement_char " <nl> + description : < < END <nl> + The replacement character codepoint to be used in place of any invalid <nl> + formatting in the input when ` errors = ' replace ' ` . Any valid unicode codepoint may <nl> + be used . The default value is the default unicode replacement character is <nl> + 0xFFFD ( U + 65533 ) . <nl> + END <nl> + } <nl> + out_arg { <nl> + name : " output " <nl> + description : < < END <nl> + The 1 - D Tensor of strings encoded from the provided unicode codepoints . <nl> + END <nl> + } <nl> + summary : " Encode a tensor of ints into unicode strings . " <nl> + description : < < END <nl> + Returns a vector of strings , where ` output [ i ] ` is constructed by encoding the <nl> + Unicode codepoints in ` input_values [ input_splits [ i ] : input_splits [ i + 1 ] ] ` <nl> + using ` output_encoding ` . <nl> + <nl> + mmm <nl> + <nl> + Example : <nl> + <nl> + ` ` ` <nl> + input_values = [ 72 , 101 , 108 , 108 , 111 , 87 , 111 , 114 , 108 , 100 ] <nl> + input_splits = [ 0 , 5 , 10 ] <nl> + output_encoding = ' UTF - 8 ' <nl> + <nl> + output = [ ' Hello ' , ' World ' ] <nl> + ` ` ` <nl> + END <nl> + } <nl> mmm a / tensorflow / core / kernels / BUILD <nl> ppp b / tensorflow / core / kernels / BUILD <nl> tf_kernel_library ( <nl> name = " unicode_ops " , <nl> prefix = " unicode_ops " , <nl> deps = [ <nl> + " : bounds_check " , <nl> " : string_util " , <nl> " / / tensorflow / core : framework " , <nl> " / / tensorflow / core : lib " , <nl> " / / tensorflow / core : lib_internal " , <nl> " / / tensorflow / core : string_ops_op_lib " , <nl> + " / / third_party / eigen3 " , <nl> " / / third_party / icu / data : conversion_data " , <nl> " @ icu / / : common " , <nl> ] , <nl> filegroup ( <nl> " batch_kernels . * " , <nl> " regex_full_match_op . cc " , <nl> " regex_replace_op . cc " , <nl> - " unicode_script_op . cc " , <nl> " unicode_ops . cc " , <nl> + " unicode_script_op . cc " , <nl> # Ops that are inherently incompatible with Android ( e . g . tied to x86 platform ) . <nl> " mkl_ * " , <nl> " xsmm_ * " , <nl> mmm a / tensorflow / core / kernels / unicode_ops . cc <nl> ppp b / tensorflow / core / kernels / unicode_ops . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> + # include < stdint . h > <nl> + # include < cstddef > <nl> + # include < functional > <nl> # include < memory > <nl> # include < string > <nl> + # include < vector > <nl> <nl> + # include " third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor " <nl> + # include " unicode / appendable . h " / / TF : icu <nl> + # include " unicode / schriter . h " / / TF : icu <nl> + # include " unicode / uchar . h " / / TF : icu <nl> # include " unicode / ucnv . h " / / TF : icu <nl> # include " unicode / ucnv_err . h " / / TF : icu <nl> # include " unicode / umachine . h " / / TF : icu <nl> limitations under the License . <nl> # include " unicode / unistr . h " / / TF : icu <nl> # include " unicode / uset . h " / / TF : icu <nl> # include " unicode / utypes . h " / / TF : icu <nl> + # include " tensorflow / core / framework / kernel_def_builder . h " <nl> + # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> + # include " tensorflow / core / framework / register_types . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> + # include " tensorflow / core / framework / tensor_shape . h " <nl> + # include " tensorflow / core / framework / tensor_types . h " <nl> + # include " tensorflow / core / framework / types . h " <nl> + # include " tensorflow / core / kernels / bounds_check . h " <nl> # include " tensorflow / core / kernels / string_util . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> + # include " tensorflow / core / lib / core / stringpiece . h " <nl> + # include " tensorflow / core / platform / types . h " <nl> # include " tensorflow / core / util / bcast . h " <nl> # include " tensorflow / core / util / ptr_util . h " <nl> <nl> namespace tensorflow { <nl> - <nl> namespace { <nl> <nl> + void Encode ( const UnicodeEncoding encoding , const icu : : UnicodeString & in , <nl> + string * out ) { <nl> + if ( encoding = = UnicodeEncoding : : UTF8 ) { <nl> + out - > clear ( ) ; <nl> + in . toUTF8String ( * out ) ; <nl> + } else if ( encoding = = UnicodeEncoding : : UTF16BE ) { <nl> + / / TODO ( gbillock ) : consider using the <nl> + / / extract ( char * dest , int32_t destCapacity , UConverter * cnv ) <nl> + / / for UTF16 / 32 <nl> + out - > clear ( ) ; / / subtle : must come before reserve ( ) <nl> + out - > reserve ( 2 * in . length ( ) + 1 ) ; <nl> + const char16_t * buf = in . getBuffer ( ) ; <nl> + for ( int i = 0 ; i < in . length ( ) ; + + i ) { <nl> + / / Emit big - endian encoding for UTF - 16 always . <nl> + out - > push_back ( ( buf [ i ] & 0xFF00 ) > > 8 ) ; <nl> + out - > push_back ( buf [ i ] & 0x00FF ) ; <nl> + } <nl> + } else if ( encoding = = UnicodeEncoding : : UTF32BE ) { <nl> + out - > clear ( ) ; / / subtle : must come before reserve ( ) <nl> + out - > reserve ( 4 * in . countChar32 ( ) + 1 ) ; <nl> + icu : : StringCharacterIterator it ( in ) ; <nl> + UChar32 ch ; <nl> + while ( it . hasNext ( ) ) { <nl> + ch = it . next32PostInc ( ) ; <nl> + out - > push_back ( ( ch & 0xFF000000 ) > > 24 ) ; <nl> + out - > push_back ( ( ch & 0x00FF0000 ) > > 16 ) ; <nl> + out - > push_back ( ( ch & 0x0000FF00 ) > > 8 ) ; <nl> + out - > push_back ( ( ch & 0x000000FF ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> / / This error callback is only useful for finding illegal encoding errors when <nl> / / we want to be strict - - otherwise illegal encodings are replaced on read <nl> / / with 0xFFFD and signaled to the callback . <nl> Status GetErrorOptions ( OpKernelConstruction * ctx , ErrorOptions * out ) { <nl> " replacement_char out of unicode codepoint range " ) ; <nl> } <nl> <nl> - TF_RETURN_IF_ERROR ( ctx - > GetAttr ( " replace_control_characters " , <nl> - & ( out - > replace_control_chars ) ) ) ; <nl> + if ( ctx - > HasAttr ( " replace_control_characters " ) ) { <nl> + TF_RETURN_IF_ERROR ( ctx - > GetAttr ( " replace_control_characters " , <nl> + & ( out - > replace_control_chars ) ) ) ; <nl> + } <nl> <nl> return Status : : OK ( ) ; <nl> } <nl> class UnicodeTranscodeOp : public OpKernel { <nl> found_any_format_error , std : : placeholders : : _1 , <nl> std : : placeholders : : _2 , std : : placeholders : : _3 ) ) ; <nl> <nl> - if ( output_encoding_ = = UnicodeEncoding : : UTF8 ) { <nl> - s - > clear ( ) ; <nl> - source . toUTF8String ( * s ) ; <nl> - } else if ( output_encoding_ = = UnicodeEncoding : : UTF16BE ) { <nl> - / / TODO ( gbillock ) : consider using the <nl> - / / extract ( char * dest , int32_t destCapacity , UConverter * cnv ) <nl> - / / for UTF16 / 32 <nl> - s - > clear ( ) ; / / subtle : must come before reserve ( ) <nl> - s - > reserve ( 2 * source . length ( ) + 1 ) ; <nl> - const char16_t * buf = source . getBuffer ( ) ; <nl> - for ( int i = 0 ; i < source . length ( ) ; + + i ) { <nl> - / / Emit big - endian encoding for UTF - 16 always . <nl> - s - > push_back ( ( buf [ i ] & 0xFF00 ) > > 8 ) ; <nl> - s - > push_back ( buf [ i ] & 0x00FF ) ; <nl> - } <nl> - } else if ( output_encoding_ = = UnicodeEncoding : : UTF32BE ) { <nl> - s - > clear ( ) ; / / subtle : must come before reserve ( ) <nl> - s - > reserve ( 4 * source . countChar32 ( ) + 1 ) ; <nl> - for ( int i = 0 ; i < source . countChar32 ( ) ; + + i ) { <nl> - / / Emit big - endian encoding for UTF - 32 always . <nl> - UChar32 ch = source . char32At ( i ) ; <nl> - s - > push_back ( ( ch & 0xFF000000 ) > > 24 ) ; <nl> - s - > push_back ( ( ch & 0x00FF0000 ) > > 16 ) ; <nl> - s - > push_back ( ( ch & 0x0000FF00 ) > > 8 ) ; <nl> - s - > push_back ( ( ch & 0x000000FF ) ) ; <nl> - } <nl> - } <nl> + Encode ( output_encoding_ , source , s ) ; <nl> } <nl> <nl> string input_encoding_ ; <nl> class UnicodeDecodeWithOffsetsOp : public OpKernel { <nl> REGISTER_KERNEL_BUILDER ( Name ( " UnicodeDecodeWithOffsets " ) . Device ( DEVICE_CPU ) , <nl> UnicodeDecodeWithOffsetsOp ) ; <nl> <nl> + class UnicodeEncodeOp : public OpKernel { <nl> + public : <nl> + explicit UnicodeEncodeOp ( OpKernelConstruction * ctx ) : OpKernel ( ctx ) { <nl> + string encoding_tmp ; <nl> + OP_REQUIRES_OK ( ctx , ctx - > GetAttr ( " output_encoding " , & encoding_tmp ) ) ; <nl> + OP_REQUIRES_OK ( ctx , ParseUnicodeEncoding ( encoding_tmp , & encoding_ ) ) ; <nl> + OP_REQUIRES_OK ( ctx , GetErrorOptions ( ctx , & error_options_ ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Encodes Unicode codepoints into the desired string representation . <nl> + * <nl> + * We lose a dimension while encoding , since a series of integer codepoints is <nl> + * encoded into a single string . <nl> + * <nl> + * This accepts two input tensors : a rank 1 tensor of code point values and <nl> + * a single rank 1 tensor of splits which determine where each string begins <nl> + * and ends from the provided code points . <nl> + * / <nl> + void Compute ( OpKernelContext * context ) override { <nl> + / / Get inputs <nl> + const Tensor & input_tensor = context - > input ( 0 ) ; <nl> + const auto input_tensor_flat = input_tensor . flat < int32 > ( ) ; <nl> + const Tensor & input_splits = context - > input ( 1 ) ; <nl> + const auto input_splits_flat = input_splits . flat < int64 > ( ) ; <nl> + <nl> + / / Since we limit to a 2 - D input ( inner_values of rank 1 and a single splits <nl> + / / tensor ) , our output dimension will be 1 with it ' s size equal to the <nl> + / / number of splits ( outer dimension or ragged tensor ) . <nl> + TensorShape output_shape ( { input_splits . dim_size ( 0 ) - 1 } ) ; <nl> + Tensor * output_tensor ; <nl> + OP_REQUIRES_OK ( context , context - > allocate_output ( " output " , output_shape , <nl> + & output_tensor ) ) ; <nl> + auto output_tensor_flat = output_tensor - > flat < string > ( ) ; <nl> + <nl> + / / Use a single index over the flattened input values tensor . <nl> + int idx = 0 ; <nl> + / / Loop through our split dimension to create a new string at each split . <nl> + for ( int i = 1 ; i < input_splits_flat . size ( ) ; + + i ) { <nl> + icu : : UnicodeString unicode_string ; <nl> + icu : : UnicodeStringAppendable appendable_unicode_string ( unicode_string ) ; <nl> + for ( ; idx < input_splits_flat ( i ) ; + + idx ) { <nl> + int32 code_point = input_tensor_flat ( idx ) ; <nl> + / / Check for invalid code point <nl> + if ( code_point > UCHAR_MAX_VALUE | | code_point < UCHAR_MIN_VALUE ) { <nl> + if ( error_options_ . error_on_malformatting ) { <nl> + context - > CtxFailure ( errors : : InvalidArgument ( <nl> + " Code point value out of valid Unicode range . " ) ) ; <nl> + return ; <nl> + } else if ( ! error_options_ . elide_replacement ) { <nl> + code_point = error_options_ . subst ; <nl> + } <nl> + } <nl> + appendable_unicode_string . appendCodePoint ( code_point ) ; <nl> + } <nl> + / / Encode our string and save in the output . <nl> + string result ; <nl> + Encode ( encoding_ , unicode_string , & result ) ; <nl> + output_tensor_flat ( i - 1 ) = result ; <nl> + } <nl> + } <nl> + <nl> + private : <nl> + UnicodeEncoding encoding_ ; <nl> + ErrorOptions error_options_ ; <nl> + } ; <nl> + <nl> + REGISTER_KERNEL_BUILDER ( Name ( " UnicodeEncode " ) . Device ( DEVICE_CPU ) , <nl> + UnicodeEncodeOp ) ; <nl> + <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / ops / string_ops . cc <nl> ppp b / tensorflow / core / ops / string_ops . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> + # include < string > <nl> + # include < vector > <nl> + <nl> # include " absl / strings / str_split . h " <nl> # include " tensorflow / core / framework / common_shape_fns . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference . h " <nl> + # include " tensorflow / core / lib / core / errors . h " <nl> + # include " tensorflow / core / lib / core / status . h " <nl> + # include " tensorflow / core / lib / strings / strcat . h " <nl> + # include " tensorflow / core / platform / types . h " <nl> <nl> namespace tensorflow { <nl> <nl> + namespace shape_inference { <nl> + class InferenceContext ; <nl> + } / / namespace shape_inference <nl> + <nl> using shape_inference : : DimensionHandle ; <nl> using shape_inference : : InferenceContext ; <nl> using shape_inference : : ShapeHandle ; <nl> REGISTER_OP ( " UnicodeScript " ) <nl> . Output ( " output : int32 " ) <nl> . SetShapeFn ( shape_inference : : UnchangedShape ) ; <nl> <nl> + REGISTER_OP ( " UnicodeEncode " ) <nl> + . Input ( " input_values : int32 " ) <nl> + . Input ( " input_splits : int64 " ) <nl> + . Attr ( " errors : { ' ignore ' , ' replace ' , ' strict ' } = ' replace ' " ) <nl> + . Attr ( " output_encoding : { ' UTF - 8 ' , ' UTF - 16 - BE ' , ' UTF - 32 - BE ' } " ) <nl> + . Attr ( " replacement_char : int = 65533 " ) / / 0xFFFD unicode replacement char <nl> + . Output ( " output : string " ) <nl> + . SetShapeFn ( [ ] ( InferenceContext * c ) { <nl> + / / Check rank of inner values <nl> + ShapeHandle input_inner_values_shape = c - > input ( 0 ) ; <nl> + ShapeHandle unused ; <nl> + TF_RETURN_IF_ERROR ( c - > WithRank ( input_inner_values_shape , 1 , & unused ) ) ; <nl> + <nl> + / / Check rank of input_splits <nl> + ShapeHandle splits_shape = c - > input ( 1 ) ; <nl> + TF_RETURN_IF_ERROR ( c - > WithRank ( splits_shape , 1 , & unused ) ) ; <nl> + <nl> + / / Output shape is a 1 - D tensor with size equal to number of splits . <nl> + std : : vector < DimensionHandle > dims ( 1 ) ; <nl> + TF_RETURN_IF_ERROR ( c - > Subtract ( c - > Dim ( splits_shape , 0 ) , 1 , & dims [ 0 ] ) ) ; <nl> + c - > set_output ( 0 , c - > MakeShape ( dims ) ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } ) ; <nl> + <nl> REGISTER_OP ( " UnicodeTranscode " ) <nl> . Input ( " input : string " ) <nl> . Output ( " output : string " ) <nl> mmm a / tensorflow / python / __init__ . py <nl> ppp b / tensorflow / python / __init__ . py <nl> <nl> from tensorflow . python . ops import manip_ops as manip <nl> from tensorflow . python . ops import metrics <nl> from tensorflow . python . ops import nn <nl> + from tensorflow . python . ops import ragged <nl> from tensorflow . python . ops import sets <nl> from tensorflow . python . ops . distributions import distributions <nl> from tensorflow . python . ops . linalg import linalg <nl> mmm a / tensorflow / python / kernel_tests / BUILD <nl> ppp b / tensorflow / python / kernel_tests / BUILD <nl> cuda_py_test ( <nl> ] , <nl> ) <nl> <nl> + tf_py_test ( <nl> + name = " unicode_encode_op_test " , <nl> + size = " small " , <nl> + srcs = [ " unicode_encode_op_test . py " ] , <nl> + additional_deps = [ <nl> + " @ absl_py / / absl / testing : parameterized " , <nl> + " / / third_party / py / numpy " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python / ops / ragged : ragged_factory_ops " , <nl> + " / / tensorflow / python / ops / ragged : ragged_string_ops " , <nl> + ] , <nl> + ) <nl> + <nl> tf_py_test ( <nl> name = " unicode_transcode_op_test " , <nl> size = " small " , <nl> new file mode 100644 <nl> index 0000000000000 . . a5a5c2017c6fd <nl> mmm / dev / null <nl> ppp b / tensorflow / python / kernel_tests / unicode_encode_op_test . py <nl> <nl> + # Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Tests for UnicodeEncode op from ragged_string_ops . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + from absl . testing import parameterized <nl> + import numpy as np <nl> + <nl> + from tensorflow . python . framework import constant_op <nl> + from tensorflow . python . framework import errors_impl as errors <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . ops . ragged import ragged_factory_ops <nl> + from tensorflow . python . ops . ragged import ragged_string_ops <nl> + from tensorflow . python . platform import test <nl> + <nl> + <nl> + class UnicodeEncodeOpTest ( test . TestCase , parameterized . TestCase ) : <nl> + <nl> + def testScalar ( self ) : <nl> + with self . cached_session ( ) : <nl> + with self . assertRaises ( ValueError ) : <nl> + ragged_string_ops . unicode_encode ( 72 , " UTF - 8 " ) <nl> + with self . cached_session ( ) : <nl> + with self . assertRaises ( ValueError ) : <nl> + ragged_string_ops . unicode_encode ( constant_op . constant ( 72 ) , " UTF - 8 " ) <nl> + <nl> + def testRequireParams ( self ) : <nl> + with self . cached_session ( ) : <nl> + with self . assertRaises ( TypeError ) : <nl> + ragged_string_ops . unicode_encode ( ) <nl> + with self . cached_session ( ) : <nl> + with self . assertRaises ( TypeError ) : <nl> + ragged_string_ops . unicode_encode ( 72 ) <nl> + with self . cached_session ( ) : <nl> + with self . assertRaises ( TypeError ) : <nl> + ragged_string_ops . unicode_encode ( encoding = " UTF - 8 " ) <nl> + <nl> + @ parameterized . parameters ( " UTF - 8 " , " UTF - 16 - BE " , " UTF - 32 - BE " ) <nl> + def testStrictErrors ( self , encoding ) : <nl> + test_value = np . array ( [ 72 , 101 , 2147483647 , - 1 , 111 ] , np . int32 ) <nl> + with self . cached_session ( ) : <nl> + with self . assertRaises ( errors . InvalidArgumentError ) : <nl> + ragged_string_ops . unicode_encode ( test_value , encoding , " strict " ) . eval ( ) <nl> + <nl> + @ parameterized . parameters ( " UTF - 8 " , " UTF - 16 - BE " , " UTF - 32 - BE " ) <nl> + def testIgnoreErrors ( self , encoding ) : <nl> + test_value = np . array ( [ 72 , 101 , 2147483647 , - 1 , 111 ] , np . int32 ) <nl> + expected_value = u " Heo " . encode ( encoding ) <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding , <nl> + " ignore " ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertIsInstance ( result , bytes ) <nl> + self . assertAllEqual ( result , expected_value ) <nl> + <nl> + @ parameterized . parameters ( " UTF - 8 " , " UTF - 16 - BE " , " UTF - 32 - BE " ) <nl> + def testReplaceErrors ( self , encoding ) : <nl> + test_value = np . array ( [ 72 , 101 , 2147483647 , - 1 , 111 ] , np . int32 ) <nl> + expected_value = u " He \ U0000fffd \ U0000fffdo " . encode ( encoding ) <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding , <nl> + " replace " ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertIsInstance ( result , bytes ) <nl> + self . assertAllEqual ( result , expected_value ) <nl> + <nl> + # Test custom replacement character <nl> + test_value = np . array ( [ 72 , 101 , 2147483647 , - 1 , 111 ] , np . int32 ) <nl> + expected_value = u " Heooo " . encode ( encoding ) <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding , <nl> + " replace " , 111 ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertIsInstance ( result , bytes ) <nl> + self . assertAllEqual ( result , expected_value ) <nl> + <nl> + # Verify " replace " is default <nl> + test_value = np . array ( [ 72 , 101 , 2147483647 , - 1 , 111 ] , np . int32 ) <nl> + expected_value = u " He \ U0000fffd \ U0000fffdo " . encode ( encoding ) <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertIsInstance ( result , bytes ) <nl> + self . assertAllEqual ( result , expected_value ) <nl> + <nl> + # Replacement_char must be within range <nl> + test_value = np . array ( [ 72 , 101 , 2147483647 , - 1 , 111 ] , np . int32 ) <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding , <nl> + " replace " , 1114112 ) <nl> + with self . cached_session ( ) : <nl> + with self . assertRaises ( errors . InvalidArgumentError ) : <nl> + unicode_encode_op . eval ( ) <nl> + <nl> + # - - regular Tensor tests - - # <nl> + <nl> + @ parameterized . parameters ( " UTF - 8 " , " UTF - 16 - BE " , " UTF - 32 - BE " ) <nl> + def testVector ( self , encoding ) : <nl> + test_value = np . array ( [ 72 , 101 , 108 , 108 , 111 ] , np . int32 ) <nl> + expected_value = u " Hello " . encode ( encoding ) <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertIsInstance ( result , bytes ) <nl> + self . assertAllEqual ( result , expected_value ) <nl> + <nl> + test_value = np . array ( [ 72 , 101 , 195 , 195 , 128516 ] , np . int32 ) <nl> + expected_value = u " He \ xc3 \ xc3 \ U0001f604 " . encode ( encoding ) <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertIsInstance ( result , bytes ) <nl> + self . assertAllEqual ( result , expected_value ) <nl> + <nl> + # Single character string <nl> + test_value = np . array ( [ 72 ] , np . int32 ) <nl> + expected_value = u " H " . encode ( encoding ) <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertIsInstance ( result , bytes ) <nl> + self . assertAllEqual ( result , expected_value ) <nl> + <nl> + test_value = np . array ( [ 128516 ] , np . int32 ) <nl> + expected_value = u " \ U0001f604 " . encode ( encoding ) <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertIsInstance ( result , bytes ) <nl> + self . assertAllEqual ( result , expected_value ) <nl> + <nl> + @ parameterized . parameters ( " UTF - 8 " , " UTF - 16 - BE " , " UTF - 32 - BE " ) <nl> + def testMatrix ( self , encoding ) : <nl> + test_value = np . array ( <nl> + [ [ 72 , 128516 , 108 , 108 , 111 ] , [ 87 , 128516 , 114 , 108 , 100 ] ] , np . int32 ) <nl> + expected_value = [ <nl> + u " H \ U0001f604llo " . encode ( encoding ) , u " W \ U0001f604rld " . encode ( encoding ) <nl> + ] <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertIsInstance ( unicode_encode_op , ops . Tensor ) <nl> + self . assertAllEqual ( result , expected_value ) <nl> + <nl> + @ parameterized . parameters ( " UTF - 8 " , " UTF - 16 - BE " , " UTF - 32 - BE " ) <nl> + def test3DimMatrix ( self , encoding ) : <nl> + test_value = constant_op . constant ( <nl> + [ [ [ 72 , 101 , 108 , 108 , 111 ] , [ 87 , 111 , 114 , 108 , 100 ] ] , <nl> + [ [ 102 , 105 , 120 , 101 , 100 ] , [ 119 , 111 , 114 , 100 , 115 ] ] , <nl> + [ [ 72 , 121 , 112 , 101 , 114 ] , [ 99 , 117 , 98 , 101 , 46 ] ] ] , np . int32 ) <nl> + expected_value = [ [ u " Hello " . encode ( encoding ) , u " World " . encode ( encoding ) ] , <nl> + [ u " fixed " . encode ( encoding ) , u " words " . encode ( encoding ) ] , <nl> + [ u " Hyper " . encode ( encoding ) , u " cube . " . encode ( encoding ) ] ] <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertIsInstance ( unicode_encode_op , ops . Tensor ) <nl> + self . assertAllEqual ( result , expected_value ) <nl> + <nl> + @ parameterized . parameters ( " UTF - 8 " , " UTF - 16 - BE " , " UTF - 32 - BE " ) <nl> + def test4DimMatrix ( self , encoding ) : <nl> + test_value = constant_op . constant ( <nl> + [ [ [ [ 72 , 101 , 108 , 108 , 111 ] ] , [ [ 87 , 111 , 114 , 108 , 100 ] ] ] , <nl> + [ [ [ 102 , 105 , 120 , 101 , 100 ] ] , [ [ 119 , 111 , 114 , 100 , 115 ] ] ] , <nl> + [ [ [ 72 , 121 , 112 , 101 , 114 ] ] , [ [ 99 , 117 , 98 , 101 , 46 ] ] ] ] , np . int32 ) <nl> + expected_value = [ [ [ u " Hello " . encode ( encoding ) ] , <nl> + [ u " World " . encode ( encoding ) ] ] , <nl> + [ [ u " fixed " . encode ( encoding ) ] , <nl> + [ u " words " . encode ( encoding ) ] ] , <nl> + [ [ u " Hyper " . encode ( encoding ) ] , <nl> + [ u " cube . " . encode ( encoding ) ] ] ] <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertIsInstance ( unicode_encode_op , ops . Tensor ) <nl> + self . assertAllEqual ( result , expected_value ) <nl> + <nl> + # - - Ragged Tensor tests - - # <nl> + <nl> + @ parameterized . parameters ( " UTF - 8 " , " UTF - 16 - BE " , " UTF - 32 - BE " ) <nl> + def testRaggedMatrix ( self , encoding ) : <nl> + test_value = ragged_factory_ops . constant ( <nl> + [ [ 72 , 195 , 108 , 108 , 111 ] , [ 87 , 128516 , 114 , 108 , 100 , 46 ] ] , np . int32 ) <nl> + expected_value = [ <nl> + u " H \ xc3llo " . encode ( encoding ) , u " W \ U0001f604rld . " . encode ( encoding ) <nl> + ] <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertIsInstance ( unicode_encode_op , ops . Tensor ) <nl> + self . assertAllEqual ( result , expected_value ) <nl> + <nl> + @ parameterized . parameters ( " UTF - 8 " , " UTF - 16 - BE " , " UTF - 32 - BE " ) <nl> + def test3DimMatrixWithRagged2ndDim ( self , encoding ) : <nl> + test_value = ragged_factory_ops . constant ( <nl> + [ [ [ 72 , 101 , 108 , 108 , 111 ] , [ 87 , 111 , 114 , 108 , 100 ] ] , <nl> + [ [ 102 , 105 , 120 , 101 , 100 ] ] , <nl> + [ [ 72 , 121 , 112 , 101 , 114 ] , [ 119 , 111 , 114 , 100 , 115 ] , <nl> + [ 99 , 117 , 98 , 101 , 46 ] ] ] , np . int32 ) <nl> + expected_value = [ [ u " Hello " . encode ( encoding ) , u " World " . encode ( encoding ) ] , <nl> + [ u " fixed " . encode ( encoding ) ] , <nl> + [ <nl> + u " Hyper " . encode ( encoding ) , u " words " . encode ( encoding ) , <nl> + u " cube . " . encode ( encoding ) <nl> + ] ] <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertEqual ( unicode_encode_op . ragged_rank , 1 ) <nl> + self . assertAllEqual ( result . tolist ( ) , expected_value ) <nl> + <nl> + @ parameterized . parameters ( " UTF - 8 " , " UTF - 16 - BE " , " UTF - 32 - BE " ) <nl> + def test3DimMatrixWithRagged3rdDim ( self , encoding ) : <nl> + test_value = ragged_factory_ops . constant ( <nl> + [ [ [ 72 , 101 , 108 , 108 , 111 ] , [ 87 , 111 , 114 , 108 , 100 , 46 ] ] , <nl> + [ [ 68 , 111 , 110 , 39 , 116 ] , [ 119 , 195 , 114 , 114 , 121 , 44 , 32 , 98 , 101 ] ] , <nl> + [ [ 128516 ] , [ ] ] ] , np . int32 ) <nl> + expected_value = [ [ u " Hello " . encode ( encoding ) , u " World . " . encode ( encoding ) ] , <nl> + [ <nl> + u " Don ' t " . encode ( encoding ) , <nl> + u " w \ xc3rry , be " . encode ( encoding ) <nl> + ] , [ u " \ U0001f604 " . encode ( encoding ) , u " " . encode ( encoding ) ] ] <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertEqual ( unicode_encode_op . ragged_rank , 1 ) <nl> + self . assertAllEqual ( result . tolist ( ) , expected_value ) <nl> + <nl> + @ parameterized . parameters ( " UTF - 8 " , " UTF - 16 - BE " , " UTF - 32 - BE " ) <nl> + def test3DimMatrixWithRagged2ndAnd3rdDim ( self , encoding ) : <nl> + test_value = ragged_factory_ops . constant ( <nl> + [ [ [ 72 , 101 , 108 , 108 , 111 ] , [ 87 , 111 , 114 , 108 , 100 , 46 ] ] , [ ] , <nl> + [ [ 128516 ] ] ] , np . int32 ) <nl> + expected_value = [ [ u " Hello " . encode ( encoding ) , u " World . " . encode ( encoding ) ] , <nl> + [ ] , [ u " \ U0001f604 " . encode ( encoding ) ] ] <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertEqual ( unicode_encode_op . ragged_rank , 1 ) <nl> + self . assertAllEqual ( result . tolist ( ) , expected_value ) <nl> + <nl> + @ parameterized . parameters ( " UTF - 8 " , " UTF - 16 - BE " , " UTF - 32 - BE " ) <nl> + def test4DimRaggedMatrix ( self , encoding ) : <nl> + test_value = ragged_factory_ops . constant ( <nl> + [ [ [ [ 72 , 101 , 108 , 108 , 111 ] , [ 87 , 111 , 114 , 108 , 100 ] ] ] , <nl> + [ [ [ ] ] , [ [ 72 , 121 , 112 , 101 ] ] ] ] , np . int32 ) <nl> + expected_value = [ [ [ u " Hello " . encode ( encoding ) , u " World " . encode ( encoding ) ] ] , <nl> + [ [ u " " . encode ( encoding ) ] , [ u " Hype " . encode ( encoding ) ] ] ] <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertEqual ( unicode_encode_op . ragged_rank , 2 ) <nl> + self . assertAllEqual ( result . tolist ( ) , expected_value ) <nl> + <nl> + @ parameterized . parameters ( " UTF - 8 " , " UTF - 16 - BE " , " UTF - 32 - BE " ) <nl> + def testRaggedMatrixWithMultiDimensionInnerValues ( self , encoding ) : <nl> + test_inner_values = constant_op . constant ( [ [ [ 72 , 101 , 108 , 108 , 111 ] , <nl> + [ 87 , 111 , 114 , 108 , 100 ] ] , <nl> + [ [ 102 , 105 , 120 , 101 , 100 ] , <nl> + [ 119 , 111 , 114 , 100 , 115 ] ] , <nl> + [ [ 72 , 121 , 112 , 101 , 114 ] , <nl> + [ 99 , 117 , 98 , 101 , 46 ] ] ] ) <nl> + test_row_splits = [ <nl> + constant_op . constant ( [ 0 , 2 , 3 ] , dtype = np . int64 ) , <nl> + constant_op . constant ( [ 0 , 1 , 1 , 3 ] , dtype = np . int64 ) <nl> + ] <nl> + test_value = ragged_factory_ops . from_nested_row_splits ( test_inner_values , <nl> + test_row_splits ) <nl> + expected_value = [ [ [ [ u " Hello " . encode ( encoding ) , u " World " . encode ( encoding ) ] ] , <nl> + [ ] ] , <nl> + [ [ [ u " fixed " . encode ( encoding ) , u " words " . encode ( encoding ) ] , <nl> + [ u " Hyper " . encode ( encoding ) , <nl> + u " cube . " . encode ( encoding ) ] ] ] ] <nl> + unicode_encode_op = ragged_string_ops . unicode_encode ( test_value , encoding ) <nl> + with self . cached_session ( ) : <nl> + result = unicode_encode_op . eval ( ) <nl> + self . assertEqual ( unicode_encode_op . ragged_rank , 2 ) <nl> + self . assertAllEqual ( result . tolist ( ) , expected_value ) <nl> + # These next two assertions don ' t necessarily need to be here as they test <nl> + # internal representations and we already verified the value is correct . <nl> + self . assertAllEqual ( len ( result . nested_row_splits ) , len ( test_row_splits ) ) <nl> + self . assertEqual ( unicode_encode_op . inner_values . shape . ndims , <nl> + test_inner_values . shape . ndims - 1 ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + test . main ( ) <nl> mmm a / tensorflow / python / ops / ragged / BUILD <nl> ppp b / tensorflow / python / ops / ragged / BUILD <nl> py_library ( <nl> " : ragged_map_ops " , <nl> " : ragged_math_ops " , <nl> " : ragged_operators " , <nl> + " : ragged_string_ops " , <nl> " : ragged_tensor " , <nl> " : ragged_tensor_shape " , <nl> " : ragged_tensor_value " , <nl> py_library ( <nl> ] , <nl> ) <nl> <nl> + py_library ( <nl> + name = " ragged_string_ops " , <nl> + srcs = [ " ragged_string_ops . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : ragged_array_ops " , <nl> + " : ragged_conversion_ops " , <nl> + " : ragged_factory_ops " , <nl> + " : ragged_tensor " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : util " , <nl> + ] , <nl> + ) <nl> + <nl> py_library ( <nl> name = " ragged_tensor " , <nl> srcs = [ " ragged_tensor . py " ] , <nl> mmm a / tensorflow / python / ops / ragged / __init__ . py <nl> ppp b / tensorflow / python / ops / ragged / __init__ . py <nl> class documentation . <nl> from __future__ import print_function <nl> <nl> from tensorflow . python . ops . ragged import ragged_operators <nl> + from tensorflow . python . ops . ragged import ragged_string_ops <nl> <nl> from tensorflow . python . ops . ragged . ragged_array_ops import batch_gather <nl> from tensorflow . python . ops . ragged . ragged_array_ops import boolean_mask <nl> new file mode 100644 <nl> index 0000000000000 . . cdcdbdff07b12 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / ops / ragged / ragged_string_ops . py <nl> <nl> + # Copyright 2015 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Ragged operations for working with string Tensors . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + from tensorflow . python . framework import dtypes <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import gen_string_ops <nl> + from tensorflow . python . ops . ragged import ragged_conversion_ops <nl> + from tensorflow . python . ops . ragged import ragged_factory_ops <nl> + from tensorflow . python . ops . ragged import ragged_tensor <nl> + from tensorflow . python . util . tf_export import tf_export <nl> + <nl> + <nl> + # pylint : disable = redefined - builtin <nl> + @ tf_export ( " strings . unicode_encode " ) <nl> + def unicode_encode ( input , output_encoding , errors = " replace " , <nl> + replacement_char = 65533 , name = None ) : <nl> + r " " " Encodes each sequence of Unicode code points in ` input ` into a string . <nl> + <nl> + ` result [ i1 . . . iN ] ` is the string formed by concatenating the Unicode <nl> + codepoints ` input [ 1 . . . iN , : ] ` , encoded using ` output_encoding ` . <nl> + <nl> + Args : <nl> + input : An ` N + 1 ` dimensional potentially ragged integer tensor with <nl> + shape ` [ D1 . . . DN , num_chars ] ` . <nl> + output_encoding : Unicode encoding that should be used to encode each <nl> + codepoint sequence . Can be ` " UTF - 8 " ` , ` " UTF - 16 - BE " ` , or ` " UTF - 32 - BE " ` . <nl> + errors : Specifies the response when an invalid codepoint is encountered <nl> + ( optional ) . One of : <nl> + * ` ' replace ' ` : Replace invalid codepoint with the <nl> + ` replacement_char ` . ( default ) <nl> + * ` ' ignore ' ` : Skip invalid codepoints . <nl> + * ` ' strict ' ` : Raise an exception for any invalid codepoint . <nl> + replacement_char : The replacement character codepoint to be used in place of <nl> + any invalid input when ` errors = ' replace ' ` . Any valid unicode codepoint may <nl> + be used . The default value is the default unicode replacement character <nl> + which is 0xFFFD ( U + 65533 ) . <nl> + name : A name for the operation ( optional ) . <nl> + <nl> + Returns : <nl> + A ` N ` dimensional ` string ` tensor with shape ` [ D1 . . . DN ] ` . <nl> + <nl> + # # # # Example : <nl> + ` ` ` python <nl> + > > > input = [ [ 71 , 246 , 246 , 100 , 110 , 105 , 103 , 104 , 116 ] , [ 128522 ] ] <nl> + > > > unicode_encode ( input , ' UTF8 ' ) <nl> + [ ' G \ xc3 \ xb6 \ xc3 \ xb6dnight ' , ' \ xf0 \ x9f \ x98 \ x8a ' ] <nl> + ` ` ` <nl> + " " " <nl> + with ops . name_scope ( name , " UnicodeEncode " , [ input ] ) : <nl> + input_tensor = ragged_factory_ops . convert_to_tensor_or_ragged_tensor ( input ) <nl> + if input_tensor . shape . ndims is None : <nl> + raise ValueError ( " Rank of input_tensor must be statically known . " ) <nl> + if ragged_tensor . is_ragged ( input_tensor ) : <nl> + if input_tensor . inner_values . shape . ndims > 1 : <nl> + # If the inner_values of our ragged tensor is multi - dimensional , we can <nl> + # process it separately and our output will have the same nested splits <nl> + # as our input . <nl> + return input_tensor . with_inner_values ( <nl> + unicode_encode ( input_tensor . inner_values , output_encoding , errors , <nl> + replacement_char ) ) <nl> + elif input_tensor . ragged_rank > 1 : <nl> + # Recursively process the values of the ragged tensor . <nl> + return input_tensor . with_values ( <nl> + unicode_encode ( input_tensor . values , output_encoding , errors , <nl> + replacement_char ) ) <nl> + else : <nl> + # Our ragged tensor is of the correct shape ( rank 1 inner_values tensor <nl> + # with ragged_rank of 1 ) so we can process it as normal . <nl> + return gen_string_ops . unicode_encode ( <nl> + input_values = input_tensor . values , <nl> + input_splits = input_tensor . row_splits , <nl> + output_encoding = output_encoding , <nl> + errors = errors , <nl> + replacement_char = replacement_char ) <nl> + else : <nl> + if input_tensor . shape . ndims = = 2 : <nl> + # The input tensor is of the correct 2 - D shape , it ' s just not ragged . <nl> + return unicode_encode ( ragged_conversion_ops . from_tensor ( input_tensor ) , <nl> + output_encoding , errors , replacement_char ) <nl> + elif input_tensor . shape . ndims > 2 : <nl> + # We need to initially flatten the input tensor to 2 - D , and then can <nl> + # reshape the output of our processed flattened tensor . <nl> + flat_input_tensor = array_ops . reshape ( <nl> + input_tensor , <nl> + array_ops . stack ( [ - 1 , array_ops . shape ( input_tensor ) [ - 1 ] ] ) ) <nl> + flat_output_tensor = unicode_encode ( flat_input_tensor , output_encoding , <nl> + errors , replacement_char ) <nl> + return array_ops . reshape ( flat_output_tensor , input_tensor . shape [ : - 1 ] ) <nl> + elif input_tensor . shape . ndims = = 0 : <nl> + raise ValueError ( " input_tensor ' s rank must be at least 1 . " ) <nl> + else : <nl> + # Our input tensor is rank 1 , so we create a ragged tensor with an added <nl> + # dimension to create the correct input shape & type , and then remove <nl> + # the additional dimension from the output and return the string scalar . <nl> + ragged_input_tensor = ragged_factory_ops . from_row_splits ( <nl> + input_tensor , <nl> + array_ops . stack ( [ 0 , array_ops . shape ( input_tensor , <nl> + out_type = dtypes . int64 ) [ 0 ] ] ) ) <nl> + output_tensor = unicode_encode ( ragged_input_tensor , output_encoding , <nl> + errors , replacement_char ) <nl> + return array_ops . reshape ( output_tensor , [ ] ) <nl> mmm a / tensorflow / tools / api / golden / v1 / tensorflow . strings . pbtxt <nl> ppp b / tensorflow / tools / api / golden / v1 / tensorflow . strings . pbtxt <nl> tf_module { <nl> name : " to_number " <nl> argspec : " args = [ \ ' string_tensor \ ' , \ ' out_type \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ " < dtype : \ ' float32 \ ' > \ " , \ ' None \ ' ] , " <nl> } <nl> + member_method { <nl> + name : " unicode_encode " <nl> + argspec : " args = [ \ ' input \ ' , \ ' output_encoding \ ' , \ ' errors \ ' , \ ' replacement_char \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ ' replace \ ' , \ ' 65533 \ ' , \ ' None \ ' ] , " <nl> + } <nl> member_method { <nl> name : " unicode_script " <nl> argspec : " args = [ \ ' input \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' ] , " <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . strings . pbtxt <nl> ppp b / tensorflow / tools / api / golden / v2 / tensorflow . strings . pbtxt <nl> tf_module { <nl> name : " to_number " <nl> argspec : " args = [ \ ' input \ ' , \ ' out_type \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ " < dtype : \ ' float32 \ ' > \ " , \ ' None \ ' ] , " <nl> } <nl> + member_method { <nl> + name : " unicode_encode " <nl> + argspec : " args = [ \ ' input \ ' , \ ' output_encoding \ ' , \ ' errors \ ' , \ ' replacement_char \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ ' replace \ ' , \ ' 65533 \ ' , \ ' None \ ' ] , " <nl> + } <nl> member_method { <nl> name : " unicode_script " <nl> argspec : " args = [ \ ' input \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' ] , " <nl>
Add new unicode_encode op , which encodes integer codepoints into the desired unicode formatted string .
tensorflow/tensorflow
f1668a718e287b1178f3a3903602d52a2820607e
2018-11-29T19:59:25Z
mmm a / contrib / Python / doc / gettingstarted . rst <nl> ppp b / contrib / Python / doc / gettingstarted . rst <nl> to persist . <nl> <nl> <nl> <nl> - <nl> Operators <nl> mmmmmmmmm - <nl> <nl>
Restructuring
microsoft/CNTK
0bf56a41e79d51f9c7d281daa39dbcf68d6000f7
2016-04-29T10:44:52Z
mmm a / tensorflow / python / compat / compat . py <nl> ppp b / tensorflow / python / compat / compat . py <nl> <nl> # This value changes every day with an automatic CL . It can be modified in code <nl> # via ` forward_compatibility_horizon ( ) ` or with the environment variable <nl> # TF_FORWARD_COMPATIBILITY_DELTA_DAYS , which is added to the compatibility date . <nl> - _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2020 , 10 , 5 ) <nl> + _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2020 , 10 , 6 ) <nl> _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = " TF_FORWARD_COMPATIBILITY_DELTA_DAYS " <nl> _FORWARD_COMPATIBILITY_DATE_NUMBER = None <nl> <nl>
compat : Update forward compatibility horizon to 2020 - 10 - 06
tensorflow/tensorflow
6f518ec19da38ebab5e3c2b1df1cd8f353f00c71
2020-10-06T09:06:13Z
mmm a / lib / Sema / CSApply . cpp <nl> ppp b / lib / Sema / CSApply . cpp <nl> Solution : : convertBooleanTypeToBuiltinI1 ( Expr * expr , <nl> <nl> auto type = cs . getType ( expr ) ; <nl> <nl> + / / We allow UnresolvedType < c $ T for all $ T , so we might end up here <nl> + / / in diagnostics . Just bail out . <nl> + if ( type - > is < UnresolvedType > ( ) ) <nl> + return expr ; <nl> + <nl> / / Look for the builtin name . If we don ' t have it , we need to call the <nl> / / general name via the witness table . <nl> NameLookupOptions lookupOptions = defaultMemberLookupOptions ; <nl> mmm a / test / Constraints / if_expr . swift <nl> ppp b / test / Constraints / if_expr . swift <nl> let ib : Bool ! = false <nl> let eb : Bool ? = . some ( false ) <nl> let conditional = ib ? " Broken " : " Heart " / / should infer Bool ! <nl> let conditional = eb ? " Broken " : " Heart " / / expected - error { { value of optional type ' Bool ? ' not unwrapped ; did you mean to use ' ! ' or ' ? ' ? } } <nl> + <nl> + / / < rdar : / / problem / 39586166 > - crash when IfExpr has UnresolvedType in condition <nl> + struct Delegate { <nl> + var shellTasks : [ ShellTask ] <nl> + } <nl> + <nl> + extension Array { <nl> + subscript ( safe safe : Int ) - > Element ? { / / expected - note { { found this candidate } } <nl> + get { } <nl> + set { } <nl> + } <nl> + } <nl> + <nl> + struct ShellTask { <nl> + var commandLine : [ String ] <nl> + } <nl> + <nl> + let delegate = Delegate ( shellTasks : [ ] ) <nl> + _ = delegate . shellTasks [ safe : 0 ] ? . commandLine . compactMap ( { $ 0 . asString . hasPrefix ( " " ) ? $ 0 : nil } ) . count ? ? 0 <nl> + / / expected - error @ - 1 { { ambiguous reference to member ' subscript ' } } <nl> + <nl> + / / FIXME : Horrible diagnostic , but at least we no longer crash <nl>
Merge remote - tracking branch ' origin / master ' into master - next
apple/swift
e846620893d214b16ae1287e28b8b4077198f728
2018-04-26T06:29:12Z
mmm a / modules / imgproc / include / opencv2 / imgproc . hpp <nl> ppp b / modules / imgproc / include / opencv2 / imgproc . hpp <nl> computed by stereoRectify can be passed here . If the matrix is empty , the identi <nl> is assumed . In cvInitUndistortMap R assumed to be an identity matrix . <nl> @ param newCameraMatrix New camera matrix \ f $ A ' = \ vecthreethree { f_x ' } { 0 } { c_x ' } { 0 } { f_y ' } { c_y ' } { 0 } { 0 } { 1 } \ f $ . <nl> @ param size Undistorted image size . <nl> - @ param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 , see cv : : convertMaps <nl> + @ param m1type Type of the first output map that can be CV_32FC1 , CV_32FC2 or CV_16SC2 , see cv : : convertMaps <nl> @ param map1 The first output map . <nl> @ param map2 The second output map . <nl> * / <nl>
initUndistortRectifyMap : CV_32FC2 is also supported as m1type
opencv/opencv
40686b5e87a1a4d2b96c1a09d249cf58fc819d7d
2016-12-20T09:22:32Z
mmm a / BUILD <nl> ppp b / BUILD <nl> grpc_cc_library ( <nl> <nl> grpc_cc_library ( <nl> name = " grpc + + _test " , <nl> + srcs = [ <nl> + " src / cpp / client / channel_test_peer . cc " , <nl> + ] , <nl> public_hdrs = [ <nl> " include / grpc + + / test / mock_stream . h " , <nl> " include / grpc + + / test / server_context_test_spouse . h " , <nl> + " include / grpcpp / test / channel_test_peer . h " , <nl> + " include / grpcpp / test / default_reactor_test_peer . h " , <nl> " include / grpcpp / test / mock_stream . h " , <nl> " include / grpcpp / test / server_context_test_spouse . h " , <nl> - " include / grpcpp / test / default_reactor_test_peer . h " , <nl> ] , <nl> deps = [ <nl> " : grpc + + " , <nl> mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> add_executable ( end2end_test <nl> $ { _gRPC_PROTO_GENS_DIR } / src / proto / grpc / testing / simple_messages . grpc . pb . cc <nl> $ { _gRPC_PROTO_GENS_DIR } / src / proto / grpc / testing / simple_messages . pb . h <nl> $ { _gRPC_PROTO_GENS_DIR } / src / proto / grpc / testing / simple_messages . grpc . pb . h <nl> + src / cpp / client / channel_test_peer . cc <nl> test / cpp / end2end / end2end_test . cc <nl> test / cpp / end2end / interceptors_util . cc <nl> test / cpp / end2end / test_service_impl . cc <nl> add_executable ( mock_test <nl> $ { _gRPC_PROTO_GENS_DIR } / src / proto / grpc / testing / simple_messages . grpc . pb . cc <nl> $ { _gRPC_PROTO_GENS_DIR } / src / proto / grpc / testing / simple_messages . pb . h <nl> $ { _gRPC_PROTO_GENS_DIR } / src / proto / grpc / testing / simple_messages . grpc . pb . h <nl> + src / cpp / client / channel_test_peer . cc <nl> test / cpp / end2end / mock_test . cc <nl> third_party / googletest / googletest / src / gtest - all . cc <nl> third_party / googletest / googlemock / src / gmock - all . cc <nl> endif ( ) <nl> if ( gRPC_BUILD_TESTS ) <nl> <nl> add_executable ( server_context_test_spouse_test <nl> + src / cpp / client / channel_test_peer . cc <nl> test / cpp / test / server_context_test_spouse_test . cc <nl> third_party / googletest / googletest / src / gtest - all . cc <nl> third_party / googletest / googlemock / src / gmock - all . cc <nl> mmm a / Makefile <nl> ppp b / Makefile <nl> END2END_TEST_SRC = \ <nl> $ ( GENDIR ) / src / proto / grpc / testing / echo . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . grpc . pb . cc \ <nl> $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . grpc . pb . cc \ <nl> $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . grpc . pb . cc \ <nl> + src / cpp / client / channel_test_peer . cc \ <nl> test / cpp / end2end / end2end_test . cc \ <nl> test / cpp / end2end / interceptors_util . cc \ <nl> test / cpp / end2end / test_service_impl . cc \ <nl> $ ( OBJDIR ) / $ ( CONFIG ) / src / proto / grpc / testing / echo_messages . o : $ ( LIBDIR ) / $ ( CONFIG ) <nl> <nl> $ ( OBJDIR ) / $ ( CONFIG ) / src / proto / grpc / testing / simple_messages . o : $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + _test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc_test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc . a $ ( LIBDIR ) / $ ( CONFIG ) / libgpr . a $ ( LIBDIR ) / $ ( CONFIG ) / libaddress_sorting . a $ ( LIBDIR ) / $ ( CONFIG ) / libupb . a <nl> <nl> + $ ( OBJDIR ) / $ ( CONFIG ) / src / cpp / client / channel_test_peer . o : $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + _test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc_test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc . a $ ( LIBDIR ) / $ ( CONFIG ) / libgpr . a $ ( LIBDIR ) / $ ( CONFIG ) / libaddress_sorting . a $ ( LIBDIR ) / $ ( CONFIG ) / libupb . a <nl> + <nl> $ ( OBJDIR ) / $ ( CONFIG ) / test / cpp / end2end / end2end_test . o : $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + _test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc_test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc . a $ ( LIBDIR ) / $ ( CONFIG ) / libgpr . a $ ( LIBDIR ) / $ ( CONFIG ) / libaddress_sorting . a $ ( LIBDIR ) / $ ( CONFIG ) / libupb . a <nl> <nl> $ ( OBJDIR ) / $ ( CONFIG ) / test / cpp / end2end / interceptors_util . o : $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + _test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc_test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc . a $ ( LIBDIR ) / $ ( CONFIG ) / libgpr . a $ ( LIBDIR ) / $ ( CONFIG ) / libaddress_sorting . a $ ( LIBDIR ) / $ ( CONFIG ) / libupb . a <nl> ifneq ( $ ( NO_DEPS ) , true ) <nl> - include $ ( END2END_TEST_OBJS : . o = . dep ) <nl> endif <nl> endif <nl> + $ ( OBJDIR ) / $ ( CONFIG ) / src / cpp / client / channel_test_peer . o : $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . grpc . pb . cc <nl> $ ( OBJDIR ) / $ ( CONFIG ) / test / cpp / end2end / end2end_test . o : $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . grpc . pb . cc <nl> $ ( OBJDIR ) / $ ( CONFIG ) / test / cpp / end2end / interceptors_util . o : $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . grpc . pb . cc <nl> $ ( OBJDIR ) / $ ( CONFIG ) / test / cpp / end2end / test_service_impl . o : $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . grpc . pb . cc <nl> MOCK_TEST_SRC = \ <nl> $ ( GENDIR ) / src / proto / grpc / testing / echo . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . grpc . pb . cc \ <nl> $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . grpc . pb . cc \ <nl> $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . grpc . pb . cc \ <nl> + src / cpp / client / channel_test_peer . cc \ <nl> test / cpp / end2end / mock_test . cc \ <nl> <nl> MOCK_TEST_OBJS = $ ( addprefix $ ( OBJDIR ) / $ ( CONFIG ) / , $ ( addsuffix . o , $ ( basename $ ( MOCK_TEST_SRC ) ) ) ) <nl> $ ( OBJDIR ) / $ ( CONFIG ) / src / proto / grpc / testing / echo_messages . o : $ ( LIBDIR ) / $ ( CONFIG ) <nl> <nl> $ ( OBJDIR ) / $ ( CONFIG ) / src / proto / grpc / testing / simple_messages . o : $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + _test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc_test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc . a $ ( LIBDIR ) / $ ( CONFIG ) / libgpr . a $ ( LIBDIR ) / $ ( CONFIG ) / libaddress_sorting . a $ ( LIBDIR ) / $ ( CONFIG ) / libupb . a <nl> <nl> + $ ( OBJDIR ) / $ ( CONFIG ) / src / cpp / client / channel_test_peer . o : $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + _test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc_test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc . a $ ( LIBDIR ) / $ ( CONFIG ) / libgpr . a $ ( LIBDIR ) / $ ( CONFIG ) / libaddress_sorting . a $ ( LIBDIR ) / $ ( CONFIG ) / libupb . a <nl> + <nl> $ ( OBJDIR ) / $ ( CONFIG ) / test / cpp / end2end / mock_test . o : $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + _test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc_test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc . a $ ( LIBDIR ) / $ ( CONFIG ) / libgpr . a $ ( LIBDIR ) / $ ( CONFIG ) / libaddress_sorting . a $ ( LIBDIR ) / $ ( CONFIG ) / libupb . a <nl> <nl> deps_mock_test : $ ( MOCK_TEST_OBJS : . o = . dep ) <nl> ifneq ( $ ( NO_DEPS ) , true ) <nl> - include $ ( MOCK_TEST_OBJS : . o = . dep ) <nl> endif <nl> endif <nl> + $ ( OBJDIR ) / $ ( CONFIG ) / src / cpp / client / channel_test_peer . o : $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . grpc . pb . cc <nl> $ ( OBJDIR ) / $ ( CONFIG ) / test / cpp / end2end / mock_test . o : $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / duplicate / echo_duplicate . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / echo_messages . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / simple_messages . grpc . pb . cc <nl> <nl> <nl> $ ( OBJDIR ) / $ ( CONFIG ) / test / cpp / server / server_builder_with_socket_mutator_test . o : $ <nl> <nl> <nl> SERVER_CONTEXT_TEST_SPOUSE_TEST_SRC = \ <nl> + src / cpp / client / channel_test_peer . cc \ <nl> test / cpp / test / server_context_test_spouse_test . cc \ <nl> <nl> SERVER_CONTEXT_TEST_SPOUSE_TEST_OBJS = $ ( addprefix $ ( OBJDIR ) / $ ( CONFIG ) / , $ ( addsuffix . o , $ ( basename $ ( SERVER_CONTEXT_TEST_SPOUSE_TEST_SRC ) ) ) ) <nl> endif <nl> <nl> endif <nl> <nl> + $ ( OBJDIR ) / $ ( CONFIG ) / src / cpp / client / channel_test_peer . o : $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + _test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc_test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc . a $ ( LIBDIR ) / $ ( CONFIG ) / libgpr . a $ ( LIBDIR ) / $ ( CONFIG ) / libaddress_sorting . a $ ( LIBDIR ) / $ ( CONFIG ) / libupb . a <nl> + <nl> $ ( OBJDIR ) / $ ( CONFIG ) / test / cpp / test / server_context_test_spouse_test . o : $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + _test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc_test_util . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc + + . a $ ( LIBDIR ) / $ ( CONFIG ) / libgrpc . a $ ( LIBDIR ) / $ ( CONFIG ) / libgpr . a $ ( LIBDIR ) / $ ( CONFIG ) / libaddress_sorting . a $ ( LIBDIR ) / $ ( CONFIG ) / libupb . a <nl> <nl> deps_server_context_test_spouse_test : $ ( SERVER_CONTEXT_TEST_SPOUSE_TEST_OBJS : . o = . dep ) <nl> mmm a / build_autogenerated . yaml <nl> ppp b / build_autogenerated . yaml <nl> targets : <nl> - src / proto / grpc / testing / echo . proto <nl> - src / proto / grpc / testing / echo_messages . proto <nl> - src / proto / grpc / testing / simple_messages . proto <nl> + - src / cpp / client / channel_test_peer . cc <nl> - test / cpp / end2end / end2end_test . cc <nl> - test / cpp / end2end / interceptors_util . cc <nl> - test / cpp / end2end / test_service_impl . cc <nl> targets : <nl> - src / proto / grpc / testing / echo . proto <nl> - src / proto / grpc / testing / echo_messages . proto <nl> - src / proto / grpc / testing / simple_messages . proto <nl> + - src / cpp / client / channel_test_peer . cc <nl> - test / cpp / end2end / mock_test . cc <nl> deps : <nl> - grpc + + _test_util <nl> targets : <nl> language : c + + <nl> headers : [ ] <nl> src : <nl> + - src / cpp / client / channel_test_peer . cc <nl> - test / cpp / test / server_context_test_spouse_test . cc <nl> deps : <nl> - grpc + + _test_util <nl> mmm a / include / grpc / grpc . h <nl> ppp b / include / grpc / grpc . h <nl> GRPCAPI void grpc_channel_ping ( grpc_channel * channel , grpc_completion_queue * cq , <nl> void * tag , void * reserved ) ; <nl> <nl> / * * Pre - register a method / host pair on a channel . <nl> - method and host are not owned and must remain alive while the server is <nl> - running . * / <nl> + method and host are not owned and must remain alive while the channel is <nl> + alive . * / <nl> GRPCAPI void * grpc_channel_register_call ( grpc_channel * channel , <nl> const char * method , const char * host , <nl> void * reserved ) ; <nl> mmm a / include / grpcpp / channel_impl . h <nl> ppp b / include / grpcpp / channel_impl . h <nl> <nl> struct grpc_channel ; <nl> <nl> namespace grpc { <nl> + namespace testing { <nl> + class ChannelTestPeer ; <nl> + } / / namespace testing <nl> <nl> std : : shared_ptr < : : grpc_impl : : Channel > CreateChannelInternal ( <nl> const grpc : : string & host , grpc_channel * c_channel , <nl> class Channel final : public : : grpc : : ChannelInterface , <nl> private : <nl> template < class InputMessage , class OutputMessage > <nl> friend class : : grpc : : internal : : BlockingUnaryCallImpl ; <nl> + friend class : : grpc : : testing : : ChannelTestPeer ; <nl> friend void experimental : : ChannelResetConnectionBackoff ( Channel * channel ) ; <nl> friend std : : shared_ptr < Channel > grpc : : CreateChannelInternal ( <nl> const grpc : : string & host , grpc_channel * c_channel , <nl> new file mode 100644 <nl> index 00000000000 . . e41bbfa4604 <nl> mmm / dev / null <nl> ppp b / include / grpcpp / test / channel_test_peer . h <nl> <nl> + / * <nl> + * <nl> + * Copyright 2020 gRPC authors . <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * <nl> + * / <nl> + <nl> + # ifndef GRPCPP_TEST_CHANNEL_TEST_PEER_H <nl> + # define GRPCPP_TEST_CHANNEL_TEST_PEER_H <nl> + <nl> + # include < grpcpp / channel . h > <nl> + <nl> + namespace grpc { <nl> + namespace testing { <nl> + <nl> + / / / A test - only class to access private members of Channel . <nl> + class ChannelTestPeer { <nl> + public : <nl> + explicit ChannelTestPeer ( Channel * channel ) : channel_ ( channel ) { } <nl> + <nl> + / / / Provide the gRPC Core channel <nl> + grpc_channel * channel ( ) const { return channel_ - > c_channel_ ; } <nl> + int registered_calls ( ) const ; <nl> + int registration_attempts ( ) const ; <nl> + <nl> + private : <nl> + Channel * channel_ ; / / not owned <nl> + } ; <nl> + <nl> + } / / namespace testing <nl> + } / / namespace grpc <nl> + <nl> + # endif / / GRPCPP_TEST_CHANNEL_TEST_PEER_H <nl> mmm a / src / core / lib / surface / channel . cc <nl> ppp b / src / core / lib / surface / channel . cc <nl> <nl> * ( OK , Cancelled , Unknown ) . * / <nl> # define NUM_CACHED_STATUS_ELEMS 3 <nl> <nl> - typedef struct registered_call { <nl> - grpc_mdelem path ; <nl> - grpc_mdelem authority ; <nl> - struct registered_call * next ; <nl> - } registered_call ; <nl> - <nl> static void destroy_channel ( void * arg , grpc_error * error ) ; <nl> <nl> grpc_channel * grpc_channel_create_with_builder ( <nl> grpc_channel * grpc_channel_create_with_builder ( <nl> channel - > target = target ; <nl> channel - > resource_user = resource_user ; <nl> channel - > is_client = grpc_channel_stack_type_is_client ( channel_stack_type ) ; <nl> - gpr_mu_init ( & channel - > registered_call_mu ) ; <nl> - channel - > registered_calls = nullptr ; <nl> + channel - > registration_table . Init ( ) ; <nl> <nl> gpr_atm_no_barrier_store ( <nl> & channel - > call_size_estimate , <nl> grpc_call * grpc_channel_create_pollset_set_call ( <nl> deadline ) ; <nl> } <nl> <nl> + namespace grpc_core { <nl> + <nl> + RegisteredCall : : RegisteredCall ( const char * method , const char * host ) { <nl> + path = grpc_mdelem_from_slices ( GRPC_MDSTR_PATH , <nl> + grpc_core : : ExternallyManagedSlice ( method ) ) ; <nl> + authority = <nl> + host ? grpc_mdelem_from_slices ( GRPC_MDSTR_AUTHORITY , <nl> + grpc_core : : ExternallyManagedSlice ( host ) ) <nl> + : GRPC_MDNULL ; <nl> + } <nl> + <nl> + / / TODO ( vjpai ) : Delete copy - constructor when allowed by all supported compilers . <nl> + RegisteredCall : : RegisteredCall ( const RegisteredCall & other ) { <nl> + path = other . path ; <nl> + authority = other . authority ; <nl> + GRPC_MDELEM_REF ( path ) ; <nl> + GRPC_MDELEM_REF ( authority ) ; <nl> + } <nl> + <nl> + RegisteredCall : : RegisteredCall ( RegisteredCall & & other ) { <nl> + path = other . path ; <nl> + authority = other . authority ; <nl> + other . path = GRPC_MDNULL ; <nl> + other . authority = GRPC_MDNULL ; <nl> + } <nl> + <nl> + RegisteredCall : : ~ RegisteredCall ( ) { <nl> + GRPC_MDELEM_UNREF ( path ) ; <nl> + GRPC_MDELEM_UNREF ( authority ) ; <nl> + } <nl> + <nl> + } / / namespace grpc_core <nl> + <nl> void * grpc_channel_register_call ( grpc_channel * channel , const char * method , <nl> const char * host , void * reserved ) { <nl> - registered_call * rc = <nl> - static_cast < registered_call * > ( gpr_malloc ( sizeof ( registered_call ) ) ) ; <nl> GRPC_API_TRACE ( <nl> " grpc_channel_register_call ( channel = % p , method = % s , host = % s , reserved = % p ) " , <nl> 4 , ( channel , method , host , reserved ) ) ; <nl> GPR_ASSERT ( ! reserved ) ; <nl> grpc_core : : ExecCtx exec_ctx ; <nl> <nl> - rc - > path = grpc_mdelem_from_slices ( GRPC_MDSTR_PATH , <nl> - grpc_core : : ExternallyManagedSlice ( method ) ) ; <nl> - rc - > authority = <nl> - host ? grpc_mdelem_from_slices ( GRPC_MDSTR_AUTHORITY , <nl> - grpc_core : : ExternallyManagedSlice ( host ) ) <nl> - : GRPC_MDNULL ; <nl> - gpr_mu_lock ( & channel - > registered_call_mu ) ; <nl> - rc - > next = channel - > registered_calls ; <nl> - channel - > registered_calls = rc ; <nl> - gpr_mu_unlock ( & channel - > registered_call_mu ) ; <nl> - <nl> - return rc ; <nl> + grpc_core : : MutexLock lock ( & channel - > registration_table - > mu ) ; <nl> + channel - > registration_table - > method_registration_attempts + + ; <nl> + auto key = std : : make_pair ( host , method ) ; <nl> + auto rc_posn = channel - > registration_table - > map . find ( key ) ; <nl> + if ( rc_posn ! = channel - > registration_table - > map . end ( ) ) { <nl> + return & rc_posn - > second ; <nl> + } <nl> + auto insertion_result = channel - > registration_table - > map . insert ( <nl> + { key , grpc_core : : RegisteredCall ( method , host ) } ) ; <nl> + return & insertion_result . first - > second ; <nl> } <nl> <nl> grpc_call * grpc_channel_create_registered_call ( <nl> grpc_channel * channel , grpc_call * parent_call , uint32_t propagation_mask , <nl> grpc_completion_queue * completion_queue , void * registered_call_handle , <nl> gpr_timespec deadline , void * reserved ) { <nl> - registered_call * rc = static_cast < registered_call * > ( registered_call_handle ) ; <nl> + grpc_core : : RegisteredCall * rc = <nl> + static_cast < grpc_core : : RegisteredCall * > ( registered_call_handle ) ; <nl> GRPC_API_TRACE ( <nl> " grpc_channel_create_registered_call ( " <nl> " channel = % p , parent_call = % p , propagation_mask = % x , completion_queue = % p , " <nl> static void destroy_channel ( void * arg , grpc_error * / * error * / ) { <nl> channel - > channelz_node . reset ( ) ; <nl> } <nl> grpc_channel_stack_destroy ( CHANNEL_STACK_FROM_CHANNEL ( channel ) ) ; <nl> - while ( channel - > registered_calls ) { <nl> - registered_call * rc = channel - > registered_calls ; <nl> - channel - > registered_calls = rc - > next ; <nl> - GRPC_MDELEM_UNREF ( rc - > path ) ; <nl> - GRPC_MDELEM_UNREF ( rc - > authority ) ; <nl> - gpr_free ( rc ) ; <nl> - } <nl> + channel - > registration_table . Destroy ( ) ; <nl> if ( channel - > resource_user ! = nullptr ) { <nl> grpc_resource_user_free ( channel - > resource_user , <nl> GRPC_RESOURCE_QUOTA_CHANNEL_SIZE ) ; <nl> } <nl> - gpr_mu_destroy ( & channel - > registered_call_mu ) ; <nl> gpr_free ( channel - > target ) ; <nl> gpr_free ( channel ) ; <nl> / / See comment in grpc_channel_create ( ) for why we do this . <nl> mmm a / src / core / lib / surface / channel . h <nl> ppp b / src / core / lib / surface / channel . h <nl> <nl> <nl> # include < grpc / support / port_platform . h > <nl> <nl> + # include < map > <nl> + <nl> # include " src / core / lib / channel / channel_stack . h " <nl> # include " src / core / lib / channel / channel_stack_builder . h " <nl> # include " src / core / lib / channel / channelz . h " <nl> + # include " src / core / lib / gprpp / manual_constructor . h " <nl> # include " src / core / lib / surface / channel_stack_type . h " <nl> + # include " src / core / lib / transport / metadata . h " <nl> <nl> grpc_channel * grpc_channel_create ( const char * target , <nl> const grpc_channel_args * args , <nl> grpc_core : : channelz : : ChannelNode * grpc_channel_get_channelz_node ( <nl> size_t grpc_channel_get_call_size_estimate ( grpc_channel * channel ) ; <nl> void grpc_channel_update_call_size_estimate ( grpc_channel * channel , size_t size ) ; <nl> <nl> - struct registered_call ; <nl> + namespace grpc_core { <nl> + <nl> + struct RegisteredCall { <nl> + grpc_mdelem path ; <nl> + grpc_mdelem authority ; <nl> + <nl> + explicit RegisteredCall ( const char * method , const char * host ) ; <nl> + / / TODO ( vjpai ) : delete copy constructor once all supported compilers allow <nl> + / / std : : map value_type to be MoveConstructible . <nl> + RegisteredCall ( const RegisteredCall & other ) ; <nl> + RegisteredCall ( RegisteredCall & & other ) ; <nl> + <nl> + ~ RegisteredCall ( ) ; <nl> + } ; <nl> + <nl> + struct CallRegistrationTable { <nl> + grpc_core : : Mutex mu ; <nl> + std : : map < std : : pair < const char * , const char * > , RegisteredCall > <nl> + map / * GUARDED_BY ( mu ) * / ; <nl> + int method_registration_attempts / * GUARDED_BY ( mu ) * / = 0 ; <nl> + } ; <nl> + <nl> + } / / namespace grpc_core <nl> + <nl> struct grpc_channel { <nl> int is_client ; <nl> grpc_compression_options compression_options ; <nl> struct grpc_channel { <nl> gpr_atm call_size_estimate ; <nl> grpc_resource_user * resource_user ; <nl> <nl> - gpr_mu registered_call_mu ; <nl> - registered_call * registered_calls ; <nl> - <nl> + / / TODO ( vjpai ) : Once the grpc_channel is allocated via new rather than malloc , <nl> + / / expand the members of the CallRegistrationTable directly into <nl> + / / the grpc_channel . For now it is kept separate so that all the <nl> + / / manual constructing can be done with a single call rather than <nl> + / / a separate manual construction for each field . <nl> + grpc_core : : ManualConstructor < grpc_core : : CallRegistrationTable > <nl> + registration_table ; <nl> grpc_core : : RefCountedPtr < grpc_core : : channelz : : ChannelNode > channelz_node ; <nl> <nl> char * target ; <nl> new file mode 100644 <nl> index 00000000000 . . f921352a24a <nl> mmm / dev / null <nl> ppp b / src / cpp / client / channel_test_peer . cc <nl> <nl> + / * <nl> + * <nl> + * Copyright 2020 gRPC authors . <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * <nl> + * / <nl> + <nl> + # include < grpcpp / test / channel_test_peer . h > <nl> + <nl> + # include < grpc / support / sync . h > <nl> + # include < grpcpp / channel . h > <nl> + <nl> + # include " src / core / lib / surface / channel . h " <nl> + <nl> + namespace grpc { <nl> + namespace testing { <nl> + <nl> + int ChannelTestPeer : : registered_calls ( ) const { <nl> + grpc_core : : MutexLock lock ( & channel_ - > c_channel_ - > registration_table - > mu ) ; <nl> + return static_cast < int > ( channel_ - > c_channel_ - > registration_table - > map . size ( ) ) ; <nl> + } <nl> + <nl> + int ChannelTestPeer : : registration_attempts ( ) const { <nl> + grpc_core : : MutexLock lock ( & channel_ - > c_channel_ - > registration_table - > mu ) ; <nl> + return channel_ - > c_channel_ - > registration_table - > method_registration_attempts ; <nl> + } <nl> + <nl> + } / / namespace testing <nl> + } / / namespace grpc <nl> mmm a / test / core / end2end / tests / disappearing_server . cc <nl> ppp b / test / core / end2end / tests / disappearing_server . cc <nl> static void do_request_and_shutdown_server ( grpc_end2end_test_config / * config * / , <nl> CQ_EXPECT_COMPLETION ( cqv , tag ( 1 ) , 1 ) ; <nl> CQ_EXPECT_COMPLETION ( cqv , tag ( 1000 ) , 1 ) ; <nl> cq_verify ( cqv ) ; <nl> + / * Please refer https : / / github . com / grpc / grpc / issues / 21221 for additional <nl> + * details . <nl> + * TODO ( yashykt @ ) - The following line should be removeable after C - Core <nl> + * correctly handles GOAWAY frames . Internal Reference b / 135458602 . If this <nl> + * test remains flaky even after this , an alternative fix would be to send a <nl> + * request when the server is in the shut down state . <nl> + * / <nl> + cq_verify_empty ( cqv ) ; <nl> <nl> GPR_ASSERT ( status = = GRPC_STATUS_UNIMPLEMENTED ) ; <nl> GPR_ASSERT ( 0 = = grpc_slice_str_cmp ( details , " xyz " ) ) ; <nl> mmm a / test / cpp / end2end / BUILD <nl> ppp b / test / cpp / end2end / BUILD <nl> grpc_cc_library ( <nl> " / / : gpr " , <nl> " / / : grpc " , <nl> " / / : grpc + + " , <nl> + " / / : grpc + + _test " , <nl> " / / src / proto / grpc / testing : echo_messages_proto " , <nl> " / / src / proto / grpc / testing : echo_proto " , <nl> " / / src / proto / grpc / testing / duplicate : echo_duplicate_proto " , <nl> mmm a / test / cpp / end2end / end2end_test . cc <nl> ppp b / test / cpp / end2end / end2end_test . cc <nl> <nl> # include < grpcpp / server_builder . h > <nl> # include < grpcpp / server_context . h > <nl> # include < grpcpp / support / string_ref . h > <nl> + # include < grpcpp / test / channel_test_peer . h > <nl> <nl> # include < mutex > <nl> # include < thread > <nl> TEST_P ( End2endTest , MultipleRpcs ) { <nl> } <nl> } <nl> <nl> + TEST_P ( End2endTest , ManyStubs ) { <nl> + MAYBE_SKIP_TEST ; <nl> + ResetStub ( ) ; <nl> + ChannelTestPeer peer ( channel_ . get ( ) ) ; <nl> + int registered_calls_pre = peer . registered_calls ( ) ; <nl> + int registration_attempts_pre = peer . registration_attempts ( ) ; <nl> + for ( int i = 0 ; i < 1000 ; + + i ) { <nl> + grpc : : testing : : EchoTestService : : NewStub ( channel_ ) ; <nl> + } <nl> + EXPECT_EQ ( peer . registered_calls ( ) , registered_calls_pre ) ; <nl> + EXPECT_GT ( peer . registration_attempts ( ) , registration_attempts_pre ) ; <nl> + } <nl> + <nl> TEST_P ( End2endTest , EmptyBinaryMetadata ) { <nl> MAYBE_SKIP_TEST ; <nl> ResetStub ( ) ; <nl> mmm a / tools / internal_ci / linux / grpc_xds . cfg <nl> ppp b / tools / internal_ci / linux / grpc_xds . cfg <nl> <nl> # Config file for the internal CI ( in protobuf text format ) <nl> <nl> # Location of the continuous shell script in repository . <nl> - build_file : " grpc / tools / internal_ci / linux / grpc_xds . sh " <nl> + build_file : " grpc / tools / internal_ci / linux / grpc_bazel . sh " <nl> timeout_mins : 60 <nl> + env_vars { <nl> + key : " BAZEL_SCRIPT " <nl> + value : " tools / internal_ci / linux / grpc_xds_bazel_test_in_docker . sh " <nl> + } <nl> deleted file mode 100755 <nl> index 2e64455c9b2 . . 00000000000 <nl> mmm a / tools / internal_ci / linux / grpc_xds . sh <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env bash <nl> - # Copyright 2020 gRPC authors . <nl> - # <nl> - # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - # you may not use this file except in compliance with the License . <nl> - # You may obtain a copy of the License at <nl> - # <nl> - # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - # <nl> - # Unless required by applicable law or agreed to in writing , software <nl> - # distributed under the License is distributed on an " AS IS " BASIS , <nl> - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - # See the License for the specific language governing permissions and <nl> - # limitations under the License . <nl> - <nl> - set - ex <nl> - <nl> - # change to grpc repo root <nl> - cd $ ( dirname $ 0 ) / . . / . . / . . <nl> - <nl> - tools / run_tests / helper_scripts / prep_xds . sh <nl> - python3 tools / run_tests / run_xds_tests . py \ <nl> - - - test_case = all \ <nl> - - - project_id = grpc - testing \ <nl> - - - gcp_suffix = $ ( date ' + % s ' ) \ <nl> - - - verbose \ <nl> - - - client_cmd = ' bazel run test / cpp / interop : xds_interop_client - - - - server = xds - experimental : / / / { service_host } : { service_port } - - stats_port = { stats_port } - - qps = { qps } ' <nl> new file mode 100755 <nl> index 00000000000 . . abf2cb1462c <nl> mmm / dev / null <nl> ppp b / tools / internal_ci / linux / grpc_xds_bazel_test_in_docker . sh <nl> <nl> + # ! / usr / bin / env bash <nl> + # Copyright 2020 gRPC authors . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + <nl> + set - ex - o igncr | | set - ex <nl> + <nl> + mkdir - p / var / local / git <nl> + git clone / var / local / jenkins / grpc / var / local / git / grpc <nl> + ( cd / var / local / jenkins / grpc / & & git submodule foreach ' cd / var / local / git / grpc \ <nl> + & & git submodule update - - init - - reference / var / local / jenkins / grpc / $ { name } \ <nl> + $ { name } ' ) <nl> + cd / var / local / git / grpc <nl> + <nl> + VIRTUAL_ENV = $ ( mktemp - d ) <nl> + virtualenv " $ VIRTUAL_ENV " <nl> + PYTHON = " $ VIRTUAL_ENV " / bin / python <nl> + " $ PYTHON " - m pip install - - upgrade grpcio grpcio - tools google - api - python - client google - auth - httplib2 oauth2client <nl> + <nl> + # Prepare generated Python code . <nl> + TOOLS_DIR = tools / run_tests <nl> + PROTO_SOURCE_DIR = src / proto / grpc / testing <nl> + PROTO_DEST_DIR = " $ TOOLS_DIR " / " $ PROTO_SOURCE_DIR " <nl> + mkdir - p " $ PROTO_DEST_DIR " <nl> + touch " $ TOOLS_DIR " / src / __init__ . py <nl> + touch " $ TOOLS_DIR " / src / proto / __init__ . py <nl> + touch " $ TOOLS_DIR " / src / proto / grpc / __init__ . py <nl> + touch " $ TOOLS_DIR " / src / proto / grpc / testing / __init__ . py <nl> + <nl> + " $ PYTHON " - m grpc_tools . protoc \ <nl> + - - proto_path = . \ <nl> + - - python_out = " $ TOOLS_DIR " \ <nl> + - - grpc_python_out = " $ TOOLS_DIR " \ <nl> + " $ PROTO_SOURCE_DIR " / test . proto \ <nl> + " $ PROTO_SOURCE_DIR " / messages . proto \ <nl> + " $ PROTO_SOURCE_DIR " / empty . proto <nl> + <nl> + " $ PYTHON " tools / run_tests / run_xds_tests . py \ <nl> + - - test_case = all \ <nl> + - - project_id = grpc - testing \ <nl> + - - gcp_suffix = $ ( date ' + % s ' ) \ <nl> + - - verbose \ <nl> + - - client_cmd = ' bazel run test / cpp / interop : xds_interop_client - - - - server = xds - experimental : / / / { service_host } : { service_port } - - stats_port = { stats_port } - - qps = { qps } ' <nl>
Merge from HEAD
grpc/grpc
1d47046807e89f237e918173d43e72981d6daeae
2020-02-27T19:00:28Z
mmm a / lib / Sema / TypeCheckAttr . cpp <nl> ppp b / lib / Sema / TypeCheckAttr . cpp <nl> void AttributeEarlyChecker : : visitSILStoredAttr ( SILStoredAttr * attr ) { <nl> <nl> static Optional < Diag < bool , Type > > <nl> isAcceptableOutletType ( Type type , bool & isArray , TypeChecker & TC ) { <nl> - if ( type - > isObjCExistentialType ( ) ) <nl> + if ( type - > isObjCExistentialType ( ) | | type - > isAny ( ) ) <nl> return None ; / / @ objc existential types are okay <nl> <nl> auto nominal = type - > getAnyNominal ( ) ; <nl> static bool checkObjectOrOptionalObjectType ( TypeChecker & TC , Decl * D , <nl> . highlight ( param - > getSourceRange ( ) ) ; <nl> return true ; <nl> } <nl> - } else if ( ty - > isObjCExistentialType ( ) ) { <nl> - / / @ objc existential types are okay <nl> + } else if ( ty - > isObjCExistentialType ( ) | | ty - > isAny ( ) ) { <nl> + / / @ objc existential types are okay , as is Any . <nl> / / Nothing to do . <nl> } else { <nl> / / No other types are permitted . <nl> mmm a / test / attr / attr_ibaction . swift <nl> ppp b / test / attr / attr_ibaction . swift <nl> protocol CP2 : class { } <nl> @ IBAction func action5 ( _ : AnyObject ? ) { } <nl> @ IBAction func action6 ( _ : AnyObject ! ) { } <nl> <nl> + / / Any <nl> + @ IBAction func action4a ( _ : Any ) { } <nl> + @ IBAction func action5a ( _ : Any ? ) { } <nl> + @ IBAction func action6a ( _ : Any ! ) { } <nl> + <nl> / / Protocol types <nl> @ IBAction func action7 ( _ : P1 ) { } / / expected - error { { argument to @ IBAction method cannot have non - object type ' P1 ' } } <nl> / / expected - error @ - 1 { { method cannot be marked @ IBAction because the type of the parameter cannot be represented in Objective - C } } <nl> mmm a / test / attr / attr_iboutlet . swift <nl> ppp b / test / attr / attr_iboutlet . swift <nl> class NonObjC { } <nl> @ IBOutlet var outlet5 : AnyObject ? <nl> @ IBOutlet var outlet6 : AnyObject ! <nl> <nl> + / / Any <nl> + @ IBOutlet var outlet5a : Any ? <nl> + @ IBOutlet var outlet6a : Any ! <nl> + <nl> / / Protocol types <nl> @ IBOutlet var outlet7 : P1 / / expected - error { { @ IBOutlet property cannot have non - ' @ objc ' protocol type ' P1 ' } } { { 3 - 13 = } } <nl> @ IBOutlet var outlet8 : CP1 / / expected - error { { @ IBOutlet property cannot have non - ' @ objc ' protocol type ' CP1 ' } } { { 3 - 13 = } } <nl>
Allow ' Any ' as an IBAction ' s sender and an IBOutlet ' s type . ( )
apple/swift
ed01f89401674e7c738657880c399671637a2d40
2016-08-16T21:49:23Z
mmm a / src / span . h <nl> ppp b / src / span . h <nl> class Span <nl> C * m_data ; <nl> std : : size_t m_size ; <nl> <nl> + template < class T > <nl> + struct is_Span_int : public std : : false_type { } ; <nl> + template < class T > <nl> + struct is_Span_int < Span < T > > : public std : : true_type { } ; <nl> + template < class T > <nl> + struct is_Span : public is_Span_int < typename std : : remove_cv < T > : : type > { } ; <nl> + <nl> + <nl> public : <nl> constexpr Span ( ) noexcept : m_data ( nullptr ) , m_size ( 0 ) { } <nl> <nl> class Span <nl> * To prevent surprises , only Spans for constant value types are supported when passing in temporaries . <nl> * Note that this restriction does not exist when converting arrays or other Spans ( see above ) . <nl> * / <nl> - template < typename V , typename std : : enable_if < ( std : : is_const < C > : : value | | std : : is_lvalue_reference < V > : : value ) & & std : : is_convertible < typename std : : remove_pointer < decltype ( std : : declval < V & > ( ) . data ( ) ) > : : type ( * ) [ ] , C ( * ) [ ] > : : value & & std : : is_convertible < decltype ( std : : declval < V & > ( ) . size ( ) ) , std : : size_t > : : value , int > : : type = 0 > <nl> - constexpr Span ( V & & v ) noexcept : m_data ( v . data ( ) ) , m_size ( v . size ( ) ) { } <nl> + template < typename V > <nl> + constexpr Span ( V & other , <nl> + typename std : : enable_if < ! is_Span < V > : : value & & <nl> + std : : is_convertible < typename std : : remove_pointer < decltype ( std : : declval < V & > ( ) . data ( ) ) > : : type ( * ) [ ] , C ( * ) [ ] > : : value & & <nl> + std : : is_convertible < decltype ( std : : declval < V & > ( ) . size ( ) ) , std : : size_t > : : value , std : : nullptr_t > : : type = nullptr ) <nl> + : m_data ( other . data ( ) ) , m_size ( other . size ( ) ) { } <nl> + <nl> + template < typename V > <nl> + constexpr Span ( const V & other , <nl> + typename std : : enable_if < ! is_Span < V > : : value & & <nl> + std : : is_convertible < typename std : : remove_pointer < decltype ( std : : declval < const V & > ( ) . data ( ) ) > : : type ( * ) [ ] , C ( * ) [ ] > : : value & & <nl> + std : : is_convertible < decltype ( std : : declval < const V & > ( ) . size ( ) ) , std : : size_t > : : value , std : : nullptr_t > : : type = nullptr ) <nl> + : m_data ( other . data ( ) ) , m_size ( other . size ( ) ) { } <nl> <nl> constexpr C * data ( ) const noexcept { return m_data ; } <nl> constexpr C * begin ( ) const noexcept { return m_data ; } <nl>
span : ( almost ) match std : : span ' s constructor behavior
bitcoin/bitcoin
62733fee874bfe7e833e71380eb8efd6a3126fbd
2020-06-29T17:51:24Z
new file mode 100644 <nl> index 000000000000 . . efd6a820b5ce <nl> mmm / dev / null <nl> ppp b / src / cuckoocache . h <nl> <nl> + / / Copyright ( c ) 2016 Jeremy Rubin <nl> + / / Distributed under the MIT software license , see the accompanying <nl> + / / file COPYING or http : / / www . opensource . org / licenses / mit - license . php . <nl> + <nl> + # ifndef _BITCOIN_CUCKOOCACHE_H_ <nl> + # define _BITCOIN_CUCKOOCACHE_H_ <nl> + <nl> + # include < array > <nl> + # include < algorithm > <nl> + # include < atomic > <nl> + # include < cstring > <nl> + # include < cmath > <nl> + # include < memory > <nl> + # include < vector > <nl> + <nl> + <nl> + / * * namespace CuckooCache provides high performance cache primitives <nl> + * <nl> + * Summary : <nl> + * <nl> + * 1 ) bit_packed_atomic_flags is bit - packed atomic flags for garbage collection <nl> + * <nl> + * 2 ) cache is a cache which is performant in memory usage and lookup speed . It <nl> + * is lockfree for erase operations . Elements are lazily erased on the next <nl> + * insert . <nl> + * / <nl> + namespace CuckooCache <nl> + { <nl> + / * * bit_packed_atomic_flags implements a container for garbage collection flags <nl> + * that is only thread unsafe on calls to setup . This class bit - packs collection <nl> + * flags for memory efficiency . <nl> + * <nl> + * All operations are std : : memory_order_relaxed so external mechanisms must <nl> + * ensure that writes and reads are properly synchronized . <nl> + * <nl> + * On setup ( n ) , all bits up to n are marked as collected . <nl> + * <nl> + * Under the hood , because it is an 8 - bit type , it makes sense to use a multiple <nl> + * of 8 for setup , but it will be safe if that is not the case as well . <nl> + * <nl> + * / <nl> + class bit_packed_atomic_flags <nl> + { <nl> + std : : unique_ptr < std : : atomic < uint8_t > [ ] > mem ; <nl> + <nl> + public : <nl> + / * * No default constructor as there must be some size * / <nl> + bit_packed_atomic_flags ( ) = delete ; <nl> + <nl> + / * * <nl> + * bit_packed_atomic_flags constructor creates memory to sufficiently <nl> + * keep track of garbage collection information for size entries . <nl> + * <nl> + * @ param size the number of elements to allocate space for <nl> + * <nl> + * @ post bit_set , bit_unset , and bit_is_set function properly forall x . x < <nl> + * size <nl> + * @ post All calls to bit_is_set ( without subsequent bit_unset ) will return <nl> + * true . <nl> + * / <nl> + bit_packed_atomic_flags ( uint32_t size ) <nl> + { <nl> + / / pad out the size if needed <nl> + size = ( size + 7 ) / 8 ; <nl> + mem . reset ( new std : : atomic < uint8_t > [ size ] ) ; <nl> + for ( uint32_t i = 0 ; i < size ; + + i ) <nl> + mem [ i ] . store ( 0xFF ) ; <nl> + } ; <nl> + <nl> + / * * setup marks all entries and ensures that bit_packed_atomic_flags can store <nl> + * at least size entries <nl> + * <nl> + * @ param b the number of elements to allocate space for <nl> + * @ post bit_set , bit_unset , and bit_is_set function properly forall x . x < <nl> + * b <nl> + * @ post All calls to bit_is_set ( without subsequent bit_unset ) will return <nl> + * true . <nl> + * / <nl> + inline void setup ( uint32_t b ) <nl> + { <nl> + bit_packed_atomic_flags d ( b ) ; <nl> + std : : swap ( mem , d . mem ) ; <nl> + } <nl> + <nl> + / * * bit_set sets an entry as discardable . <nl> + * <nl> + * @ param s the index of the entry to bit_set . <nl> + * @ post immediately subsequent call ( assuming proper external memory <nl> + * ordering ) to bit_is_set ( s ) = = true . <nl> + * <nl> + * / <nl> + inline void bit_set ( uint32_t s ) <nl> + { <nl> + mem [ s > > 3 ] . fetch_or ( 1 < < ( s & 7 ) , std : : memory_order_relaxed ) ; <nl> + } <nl> + <nl> + / * * bit_unset marks an entry as something that should not be overwritten <nl> + * <nl> + * @ param s the index of the entry to bit_unset . <nl> + * @ post immediately subsequent call ( assuming proper external memory <nl> + * ordering ) to bit_is_set ( s ) = = false . <nl> + * / <nl> + inline void bit_unset ( uint32_t s ) <nl> + { <nl> + mem [ s > > 3 ] . fetch_and ( ~ ( 1 < < ( s & 7 ) ) , std : : memory_order_relaxed ) ; <nl> + } <nl> + <nl> + / * * bit_is_set queries the table for discardability at s <nl> + * <nl> + * @ param s the index of the entry to read . <nl> + * @ returns if the bit at index s was set . <nl> + * * / <nl> + inline bool bit_is_set ( uint32_t s ) const <nl> + { <nl> + return ( 1 < < ( s & 7 ) ) & mem [ s > > 3 ] . load ( std : : memory_order_relaxed ) ; <nl> + } <nl> + } ; <nl> + <nl> + / * * cache implements a cache with properties similar to a cuckoo - set <nl> + * <nl> + * The cache is able to hold up to ( ~ ( uint32_t ) 0 ) - 1 elements . <nl> + * <nl> + * Read Operations : <nl> + * - contains ( * , false ) <nl> + * <nl> + * Read + Erase Operations : <nl> + * - contains ( * , true ) <nl> + * <nl> + * Erase Operations : <nl> + * - allow_erase ( ) <nl> + * <nl> + * Write Operations : <nl> + * - setup ( ) <nl> + * - setup_bytes ( ) <nl> + * - insert ( ) <nl> + * - please_keep ( ) <nl> + * <nl> + * Synchronization Free Operations : <nl> + * - invalid ( ) <nl> + * - compute_hashes ( ) <nl> + * <nl> + * User Must Guarantee : <nl> + * <nl> + * 1 ) Write Requires synchronized access ( e . g . , a lock ) <nl> + * 2 ) Read Requires no concurrent Write , synchronized with the last insert . <nl> + * 3 ) Erase requires no concurrent Write , synchronized with last insert . <nl> + * 4 ) An Erase caller must release all memory before allowing a new Writer . <nl> + * <nl> + * <nl> + * Note on function names : <nl> + * - The name " allow_erase " is used because the real discard happens later . <nl> + * - The name " please_keep " is used because elements may be erased anyways on insert . <nl> + * <nl> + * @ tparam Element should be a movable and copyable type <nl> + * @ tparam Hash should be a function / callable which takes a template parameter <nl> + * hash_select and an Element and extracts a hash from it . Should return <nl> + * high - entropy hashes for ` Hash h ; h < 0 > ( e ) . . . h < 7 > ( e ) ` . <nl> + * / <nl> + template < typename Element , typename Hash > <nl> + class cache <nl> + { <nl> + private : <nl> + / * * table stores all the elements * / <nl> + std : : vector < Element > table ; <nl> + <nl> + / * * size stores the total available slots in the hash table * / <nl> + uint32_t size ; <nl> + <nl> + / * * The bit_packed_atomic_flags array is marked mutable because we want <nl> + * garbage collection to be allowed to occur from const methods * / <nl> + mutable bit_packed_atomic_flags collection_flags ; <nl> + <nl> + / * * epoch_flags tracks how recently an element was inserted into <nl> + * the cache . true denotes recent , false denotes not - recent . See insert ( ) <nl> + * method for full semantics . <nl> + * / <nl> + mutable std : : vector < bool > epoch_flags ; <nl> + <nl> + / * * epoch_heuristic_counter is used to determine when a epoch might be aged <nl> + * & an expensive scan should be done . epoch_heuristic_counter is <nl> + * decremented on insert and reset to the new number of inserts which would <nl> + * cause the epoch to reach epoch_size when it reaches zero . <nl> + * / <nl> + uint32_t epoch_heuristic_counter ; <nl> + <nl> + / * * epoch_size is set to be the number of elements supposed to be in a <nl> + * epoch . When the number of non - erased elements in a epoch <nl> + * exceeds epoch_size , a new epoch should be started and all <nl> + * current entries demoted . epoch_size is set to be 45 % of size because <nl> + * we want to keep load around 90 % , and we support 3 epochs at once - - <nl> + * one " dead " which has been erased , one " dying " which has been marked to be <nl> + * erased next , and one " living " which new inserts add to . <nl> + * / <nl> + uint32_t epoch_size ; <nl> + <nl> + / * * hash_mask should be set to appropriately mask out a hash such that every <nl> + * masked hash is [ 0 , size ) , eg , if floor ( log2 ( size ) ) = = 20 , then hash_mask <nl> + * should be ( 1 < < 20 ) - 1 <nl> + * / <nl> + uint32_t hash_mask ; <nl> + <nl> + / * * depth_limit determines how many elements insert should try to replace . <nl> + * Should be set to log2 ( n ) * / <nl> + uint8_t depth_limit ; <nl> + <nl> + / * * hash_function is a const instance of the hash function . It cannot be <nl> + * static or initialized at call time as it may have internal state ( such as <nl> + * a nonce ) . <nl> + * * / <nl> + const Hash hash_function ; <nl> + <nl> + / * * compute_hashes is convenience for not having to write out this <nl> + * expression everywhere we use the hash values of an Element . <nl> + * <nl> + * @ param e the element whose hashes will be returned <nl> + * @ returns std : : array < uint32_t , 8 > of deterministic hashes derived from e <nl> + * / <nl> + inline std : : array < uint32_t , 8 > compute_hashes ( const Element & e ) const <nl> + { <nl> + return { { hash_function . template operator ( ) < 0 > ( e ) & hash_mask , <nl> + hash_function . template operator ( ) < 1 > ( e ) & hash_mask , <nl> + hash_function . template operator ( ) < 2 > ( e ) & hash_mask , <nl> + hash_function . template operator ( ) < 3 > ( e ) & hash_mask , <nl> + hash_function . template operator ( ) < 4 > ( e ) & hash_mask , <nl> + hash_function . template operator ( ) < 5 > ( e ) & hash_mask , <nl> + hash_function . template operator ( ) < 6 > ( e ) & hash_mask , <nl> + hash_function . template operator ( ) < 7 > ( e ) & hash_mask } } ; <nl> + } <nl> + <nl> + / * end <nl> + * @ returns a constexpr index that can never be inserted to * / <nl> + constexpr uint32_t invalid ( ) const <nl> + { <nl> + return ~ ( uint32_t ) 0 ; <nl> + } <nl> + <nl> + / * * allow_erase marks the element at index n as discardable . Threadsafe <nl> + * without any concurrent insert . <nl> + * @ param n the index to allow erasure of <nl> + * / <nl> + inline void allow_erase ( uint32_t n ) const <nl> + { <nl> + collection_flags . bit_set ( n ) ; <nl> + } <nl> + <nl> + / * * please_keep marks the element at index n as an entry that should be kept . <nl> + * Threadsafe without any concurrent insert . <nl> + * @ param n the index to prioritize keeping <nl> + * / <nl> + inline void please_keep ( uint32_t n ) const <nl> + { <nl> + collection_flags . bit_unset ( n ) ; <nl> + } <nl> + <nl> + / * * epoch_check handles the changing of epochs for elements stored in the <nl> + * cache . epoch_check should be run before every insert . <nl> + * <nl> + * First , epoch_check decrements and checks the cheap heuristic , and then does <nl> + * a more expensive scan if the cheap heuristic runs out . If the expensive <nl> + * scan suceeds , the epochs are aged and old elements are allow_erased . The <nl> + * cheap heuristic is reset to retrigger after the worst case growth of the <nl> + * current epoch ' s elements would exceed the epoch_size . <nl> + * / <nl> + void epoch_check ( ) <nl> + { <nl> + if ( epoch_heuristic_counter ! = 0 ) { <nl> + - - epoch_heuristic_counter ; <nl> + return ; <nl> + } <nl> + / / count the number of elements from the latest epoch which <nl> + / / have not been erased . <nl> + uint32_t epoch_unused_count = 0 ; <nl> + for ( uint32_t i = 0 ; i < size ; + + i ) <nl> + epoch_unused_count + = epoch_flags [ i ] & & <nl> + ! collection_flags . bit_is_set ( i ) ; <nl> + / / If there are more non - deleted entries in the current epoch than the <nl> + / / epoch size , then allow_erase on all elements in the old epoch ( marked <nl> + / / false ) and move all elements in the current epoch to the old epoch <nl> + / / but do not call allow_erase on their indices . <nl> + if ( epoch_unused_count > = epoch_size ) { <nl> + for ( uint32_t i = 0 ; i < size ; + + i ) <nl> + if ( epoch_flags [ i ] ) <nl> + epoch_flags [ i ] = false ; <nl> + else <nl> + allow_erase ( i ) ; <nl> + epoch_heuristic_counter = epoch_size ; <nl> + } else <nl> + / / reset the epoch_heuristic_counter to next do a scan when worst <nl> + / / case behavior ( no intermittent erases ) would exceed epoch size , <nl> + / / with a reasonable minimum scan size . <nl> + / / Ordinarily , we would have to sanity check std : : min ( epoch_size , <nl> + / / epoch_unused_count ) , but we already know that ` epoch_unused_count <nl> + / / < epoch_size ` in this branch <nl> + epoch_heuristic_counter = std : : max ( 1u , std : : max ( epoch_size / 16 , <nl> + epoch_size - epoch_unused_count ) ) ; <nl> + } <nl> + <nl> + public : <nl> + / * * You must always construct a cache with some elements via a subsequent <nl> + * call to setup or setup_bytes , otherwise operations may segfault . <nl> + * / <nl> + cache ( ) : table ( ) , size ( ) , collection_flags ( 0 ) , epoch_flags ( ) , <nl> + epoch_heuristic_counter ( ) , epoch_size ( ) , depth_limit ( 0 ) , hash_function ( ) <nl> + { <nl> + } <nl> + <nl> + / * * setup initializes the container to store no more than new_size <nl> + * elements . setup rounds down to a power of two size . <nl> + * <nl> + * setup should only be called once . <nl> + * <nl> + * @ param new_size the desired number of elements to store <nl> + * @ returns the maximum number of elements storable <nl> + * * / <nl> + uint32_t setup ( uint32_t new_size ) <nl> + { <nl> + / / depth_limit must be at least one otherwise errors can occur . <nl> + depth_limit = static_cast < uint8_t > ( std : : log2 ( static_cast < float > ( std : : max ( ( uint32_t ) 2 , new_size ) ) ) ) ; <nl> + size = 1 < < depth_limit ; <nl> + hash_mask = size - 1 ; <nl> + table . resize ( size ) ; <nl> + collection_flags . setup ( size ) ; <nl> + epoch_flags . resize ( size ) ; <nl> + / / Set to 45 % as described above <nl> + epoch_size = std : : max ( ( uint32_t ) 1 , ( 45 * size ) / 100 ) ; <nl> + / / Initially set to wait for a whole epoch <nl> + epoch_heuristic_counter = epoch_size ; <nl> + return size ; <nl> + } <nl> + <nl> + / * * setup_bytes is a convenience function which accounts for internal memory <nl> + * usage when deciding how many elements to store . It isn ' t perfect because <nl> + * it doesn ' t account for any overhead ( struct size , MallocUsage , collection <nl> + * and epoch flags ) . This was done to simplify selecting a power of two <nl> + * size . In the expected use case , an extra two bits per entry should be <nl> + * negligible compared to the size of the elements . <nl> + * <nl> + * @ param bytes the approximate number of bytes to use for this data <nl> + * structure . <nl> + * @ returns the maximum number of elements storable ( see setup ( ) <nl> + * documentation for more detail ) <nl> + * / <nl> + uint32_t setup_bytes ( size_t bytes ) <nl> + { <nl> + return setup ( bytes / sizeof ( Element ) ) ; <nl> + } <nl> + <nl> + / * * insert loops at most depth_limit times trying to insert a hash <nl> + * at various locations in the table via a variant of the Cuckoo Algorithm <nl> + * with eight hash locations . <nl> + * <nl> + * It drops the last tried element if it runs out of depth before <nl> + * encountering an open slot . <nl> + * <nl> + * Thus <nl> + * <nl> + * insert ( x ) ; <nl> + * return contains ( x , false ) ; <nl> + * <nl> + * is not guaranteed to return true . <nl> + * <nl> + * @ param e the element to insert <nl> + * @ post one of the following : All previously inserted elements and e are <nl> + * now in the table , one previously inserted element is evicted from the <nl> + * table , the entry attempted to be inserted is evicted . <nl> + * <nl> + * / <nl> + inline void insert ( Element e ) <nl> + { <nl> + epoch_check ( ) ; <nl> + uint32_t last_loc = invalid ( ) ; <nl> + bool last_epoch = true ; <nl> + std : : array < uint32_t , 8 > locs = compute_hashes ( e ) ; <nl> + / / Make sure we have not already inserted this element <nl> + / / If we have , make sure that it does not get deleted <nl> + for ( uint32_t loc : locs ) <nl> + if ( table [ loc ] = = e ) { <nl> + please_keep ( loc ) ; <nl> + epoch_flags [ loc ] = last_epoch ; <nl> + return ; <nl> + } <nl> + for ( uint8_t depth = 0 ; depth < depth_limit ; + + depth ) { <nl> + / / First try to insert to an empty slot , if one exists <nl> + for ( uint32_t loc : locs ) { <nl> + if ( ! collection_flags . bit_is_set ( loc ) ) <nl> + continue ; <nl> + table [ loc ] = std : : move ( e ) ; <nl> + please_keep ( loc ) ; <nl> + epoch_flags [ loc ] = last_epoch ; <nl> + return ; <nl> + } <nl> + / * * Swap with the element at the location that was <nl> + * not the last one looked at . Example : <nl> + * <nl> + * 1 ) On first iteration , last_loc = = invalid ( ) , find returns last , so <nl> + * last_loc defaults to locs [ 0 ] . <nl> + * 2 ) On further iterations , where last_loc = = locs [ k ] , last_loc will <nl> + * go to locs [ k + 1 % 8 ] , i . e . , next of the 8 indicies wrapping around <nl> + * to 0 if needed . <nl> + * <nl> + * This prevents moving the element we just put in . <nl> + * <nl> + * The swap is not a move - - we must switch onto the evicted element <nl> + * for the next iteration . <nl> + * / <nl> + last_loc = locs [ ( 1 + ( std : : find ( locs . begin ( ) , locs . end ( ) , last_loc ) - locs . begin ( ) ) ) & 7 ] ; <nl> + std : : swap ( table [ last_loc ] , e ) ; <nl> + / / Can ' t std : : swap a std : : vector < bool > : : reference and a bool & . <nl> + bool epoch = last_epoch ; <nl> + last_epoch = epoch_flags [ last_loc ] ; <nl> + epoch_flags [ last_loc ] = epoch ; <nl> + <nl> + / / Recompute the locs - - unfortunately happens one too many times ! <nl> + locs = compute_hashes ( e ) ; <nl> + } <nl> + } <nl> + <nl> + / * contains iterates through the hash locations for a given element <nl> + * and checks to see if it is present . <nl> + * <nl> + * contains does not check garbage collected state ( in other words , <nl> + * garbage is only collected when the space is needed ) , so : <nl> + * <nl> + * insert ( x ) ; <nl> + * if ( contains ( x , true ) ) <nl> + * return contains ( x , false ) ; <nl> + * else <nl> + * return true ; <nl> + * <nl> + * executed on a single thread will always return true ! <nl> + * <nl> + * This is a great property for re - org performance for example . <nl> + * <nl> + * contains returns a bool set true if the element was found . <nl> + * <nl> + * @ param e the element to check <nl> + * @ param erase <nl> + * <nl> + * @ post if erase is true and the element is found , then the garbage collect <nl> + * flag is set <nl> + * @ returns true if the element is found , false otherwise <nl> + * / <nl> + inline bool contains ( const Element & e , const bool erase ) const <nl> + { <nl> + std : : array < uint32_t , 8 > locs = compute_hashes ( e ) ; <nl> + for ( uint32_t loc : locs ) <nl> + if ( table [ loc ] = = e ) { <nl> + if ( erase ) <nl> + allow_erase ( loc ) ; <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + } ; <nl> + } / / namespace CuckooCache <nl> + <nl> + # endif <nl> mmm a / src / init . cpp <nl> ppp b / src / init . cpp <nl> bool AppInit2 ( boost : : thread_group & threadGroup , CScheduler & scheduler ) <nl> LogPrintf ( " Using config file % s \ n " , GetConfigFile ( GetArg ( " - conf " , BITCOIN_CONF_FILENAME ) ) . string ( ) ) ; <nl> LogPrintf ( " Using at most % i connections ( % i file descriptors available ) \ n " , nMaxConnections , nFD ) ; <nl> <nl> + InitSignatureCache ( ) ; <nl> + <nl> LogPrintf ( " Using % u threads for script verification \ n " , nScriptCheckThreads ) ; <nl> if ( nScriptCheckThreads ) { <nl> for ( int i = 0 ; i < nScriptCheckThreads - 1 ; i + + ) <nl> mmm a / src / script / sigcache . cpp <nl> ppp b / src / script / sigcache . cpp <nl> <nl> # include " uint256 . h " <nl> # include " util . h " <nl> <nl> + # include " cuckoocache . h " <nl> # include < boost / thread . hpp > <nl> - # include < boost / unordered_set . hpp > <nl> <nl> namespace { <nl> <nl> / * * <nl> * We ' re hashing a nonce into the entries themselves , so we don ' t need extra <nl> * blinding in the set hash computation . <nl> + * <nl> + * This may exhibit platform endian dependent behavior but because these are <nl> + * nonced hashes ( random ) and this state is only ever used locally it is safe . <nl> + * All that matters is local consistency . <nl> * / <nl> - class CSignatureCacheHasher <nl> + class SignatureCacheHasher <nl> { <nl> public : <nl> - size_t operator ( ) ( const uint256 & key ) const { <nl> - return key . GetCheapHash ( ) ; <nl> + template < uint8_t hash_select > <nl> + uint32_t operator ( ) ( const uint256 & key ) const <nl> + { <nl> + static_assert ( hash_select < 8 , " SignatureCacheHasher only has 8 hashes available . " ) ; <nl> + uint32_t u ; <nl> + std : : memcpy ( & u , key . begin ( ) + 4 * hash_select , 4 ) ; <nl> + return u ; <nl> } <nl> } ; <nl> <nl> class CSignatureCache <nl> private : <nl> / / ! Entries are SHA256 ( nonce | | signature hash | | public key | | signature ) : <nl> uint256 nonce ; <nl> - typedef boost : : unordered_set < uint256 , CSignatureCacheHasher > map_type ; <nl> + typedef CuckooCache : : cache < uint256 , SignatureCacheHasher > map_type ; <nl> map_type setValid ; <nl> boost : : shared_mutex cs_sigcache ; <nl> <nl> - <nl> public : <nl> CSignatureCache ( ) <nl> { <nl> class CSignatureCache <nl> } <nl> <nl> bool <nl> - Get ( const uint256 & entry ) <nl> + Get ( const uint256 & entry , const bool erase ) <nl> { <nl> boost : : shared_lock < boost : : shared_mutex > lock ( cs_sigcache ) ; <nl> - return setValid . count ( entry ) ; <nl> + return setValid . contains ( entry , erase ) ; <nl> } <nl> <nl> - void Erase ( const uint256 & entry ) <nl> + void Set ( uint256 & entry ) <nl> { <nl> boost : : unique_lock < boost : : shared_mutex > lock ( cs_sigcache ) ; <nl> - setValid . erase ( entry ) ; <nl> + setValid . insert ( entry ) ; <nl> } <nl> - <nl> - void Set ( const uint256 & entry ) <nl> + uint32_t setup_bytes ( size_t n ) <nl> { <nl> - size_t nMaxCacheSize = GetArg ( " - maxsigcachesize " , DEFAULT_MAX_SIG_CACHE_SIZE ) * ( ( size_t ) 1 < < 20 ) ; <nl> - if ( nMaxCacheSize < = 0 ) return ; <nl> - <nl> - boost : : unique_lock < boost : : shared_mutex > lock ( cs_sigcache ) ; <nl> - while ( memusage : : DynamicUsage ( setValid ) > nMaxCacheSize ) <nl> - { <nl> - map_type : : size_type s = GetRand ( setValid . bucket_count ( ) ) ; <nl> - map_type : : local_iterator it = setValid . begin ( s ) ; <nl> - if ( it ! = setValid . end ( s ) ) { <nl> - setValid . erase ( * it ) ; <nl> - } <nl> - } <nl> - <nl> - setValid . insert ( entry ) ; <nl> + return setValid . setup_bytes ( n ) ; <nl> } <nl> } ; <nl> <nl> + / * In previous versions of this code , signatureCache was a local static variable <nl> + * in CachingTransactionSignatureChecker : : VerifySignature . We initialize <nl> + * signatureCache outside of VerifySignature to avoid the atomic operation per <nl> + * call overhead associated with local static variables even though <nl> + * signatureCache could be made local to VerifySignature . <nl> + * / <nl> + static CSignatureCache signatureCache ; <nl> } <nl> <nl> - bool CachingTransactionSignatureChecker : : VerifySignature ( const std : : vector < unsigned char > & vchSig , const CPubKey & pubkey , const uint256 & sighash ) const <nl> + / / To be called once in AppInit2 / TestingSetup to initialize the signatureCache <nl> + void InitSignatureCache ( ) <nl> { <nl> - static CSignatureCache signatureCache ; <nl> + size_t nMaxCacheSize = GetArg ( " - maxsigcachesize " , DEFAULT_MAX_SIG_CACHE_SIZE ) * ( ( size_t ) 1 < < 20 ) ; <nl> + if ( nMaxCacheSize < = 0 ) return ; <nl> + size_t nElems = signatureCache . setup_bytes ( nMaxCacheSize ) ; <nl> + LogPrintf ( " Using % zu MiB out of % zu requested for signature cache , able to store % zu elements \ n " , <nl> + ( nElems * sizeof ( uint256 ) ) > > 20 , nMaxCacheSize > > 20 , nElems ) ; <nl> + } <nl> <nl> + bool CachingTransactionSignatureChecker : : VerifySignature ( const std : : vector < unsigned char > & vchSig , const CPubKey & pubkey , const uint256 & sighash ) const <nl> + { <nl> uint256 entry ; <nl> signatureCache . ComputeEntry ( entry , sighash , vchSig , pubkey ) ; <nl> - <nl> - if ( signatureCache . Get ( entry ) ) { <nl> - if ( ! store ) { <nl> - signatureCache . Erase ( entry ) ; <nl> - } <nl> + if ( signatureCache . Get ( entry , ! store ) ) <nl> return true ; <nl> - } <nl> - <nl> if ( ! TransactionSignatureChecker : : VerifySignature ( vchSig , pubkey , sighash ) ) <nl> return false ; <nl> - <nl> - if ( store ) { <nl> + if ( store ) <nl> signatureCache . Set ( entry ) ; <nl> - } <nl> return true ; <nl> } <nl> mmm a / src / script / sigcache . h <nl> ppp b / src / script / sigcache . h <nl> <nl> <nl> # include < vector > <nl> <nl> - / / DoS prevention : limit cache size to less than 40MB ( over 500000 <nl> - / / entries on 64 - bit systems ) . <nl> - static const unsigned int DEFAULT_MAX_SIG_CACHE_SIZE = 40 ; <nl> + / / DoS prevention : limit cache size to 32MB ( over 1000000 entries on 64 - bit <nl> + / / systems ) . Due to how we count cache size , actual memory usage is slightly <nl> + / / more ( ~ 32 . 25 MB ) <nl> + static const unsigned int DEFAULT_MAX_SIG_CACHE_SIZE = 32 ; <nl> <nl> class CPubKey ; <nl> <nl> class CachingTransactionSignatureChecker : public TransactionSignatureChecker <nl> bool VerifySignature ( const std : : vector < unsigned char > & vchSig , const CPubKey & vchPubKey , const uint256 & sighash ) const ; <nl> } ; <nl> <nl> + void InitSignatureCache ( ) ; <nl> + <nl> # endif / / BITCOIN_SCRIPT_SIGCACHE_H <nl> mmm a / src / test / test_bitcoin . cpp <nl> ppp b / src / test / test_bitcoin . cpp <nl> <nl> # include " ui_interface . h " <nl> # include " rpc / server . h " <nl> # include " rpc / register . h " <nl> + # include " script / sigcache . h " <nl> <nl> # include " test / testutil . h " <nl> <nl> BasicTestingSetup : : BasicTestingSetup ( const std : : string & chainName ) <nl> ECC_Start ( ) ; <nl> SetupEnvironment ( ) ; <nl> SetupNetworking ( ) ; <nl> + InitSignatureCache ( ) ; <nl> fPrintToDebugLog = false ; / / don ' t want to write to debug . log file <nl> fCheckBlockIndex = true ; <nl> SelectParams ( chainName ) ; <nl>
Add CuckooCache implementation and replace the sigcache map_type with it
bitcoin/bitcoin
c9e69fbf3915fe1187b4c2e77be5ae6b16121194
2016-12-14T21:02:05Z
mmm a / build_msvc / README . md <nl> ppp b / build_msvc / README . md <nl> Quick Start <nl> The minimal steps required to build Bitcoin Core with the msbuild toolchain are below . More detailed instructions are contained in the following sections . <nl> <nl> ` ` ` <nl> - vcpkg install - - triplet x64 - windows - static boost - filesystem boost - signals2 boost - test libevent openssl zeromq berkeleydb rapidcheck double - conversion <nl> + vcpkg install - - triplet x64 - windows - static boost - filesystem boost - multi - index boost - signals2 boost - test boost - thread libevent openssl zeromq berkeleydb rapidcheck double - conversion <nl> py - 3 build_msvc \ msvc - autogen . py <nl> msbuild / m build_msvc \ bitcoin . sln / p : Platform = x64 / p : Configuration = Release / t : build <nl> ` ` ` <nl> The [ external dependencies ] ( https : / / github . com / bitcoin / bitcoin / blob / master / doc / d <nl> <nl> Qt <nl> mmmmmmmmmmmmmmmmmmmmm <nl> - All the Bitcoin Core applications are configured to build with static linking . In order to build the Bitcoin Core Qt applications a static build of Qt is required . <nl> + In order to build the Bitcoin Core a static build of Qt is required . The runtime library version ( e . g . v141 , v142 ) and platform type ( x86 or x64 ) must also match . <nl> <nl> - The runtime library version ( e . g . v141 , v142 ) and platform type ( x86 or x64 ) must also match . OpenSSL must also be linked into the Qt binaries in order to provide full functionality of the Bitcoin Core Qt programs . An example of the configure command to build Qtv5 . 9 . 7 locally to link with Bitcoin Core is shown below ( adjust paths accordingly ) , note it can be expected that the configure and subsequent build will fail numerous times until dependency issues are resolved . <nl> + A prebuilt version of Qt can be downloaded from [ here ] ( https : / / github . com / sipsorcery / qt_win_binary / releases ) . Please be aware this download is NOT an officially sanctioned Bitcoin Core distribution and is provided for developer convenience . It should NOT be used for builds that will be used in a production environment or with real funds . <nl> <nl> - ` ` ` ` <nl> - . . \ Qtv5 . 9 . 7_src \ configure - developer - build - confirm - license - debug - and - release - opensource - platform win32 - msvc - opengl desktop - no - shared - static - no - static - runtime - mp - qt - zlib - qt - pcre - qt - libpng - ltcg - make libs - make tools - no - libjpeg - nomake examples - no - compile - examples - no - dbus - no - libudev - no - qml - debug - no - icu - no - gtk - no - opengles3 - no - angle - no - sql - sqlite - no - sql - odbc - no - sqlite - no - libudev - skip qt3d - skip qtactiveqt - skip qtandroidextras - skip qtcanvas3d - skip qtcharts - skip qtconnectivity - skip qtdatavis3d - skip qtdeclarative - skip qtdoc - skip qtgamepad - skip qtgraphicaleffects - skip qtimageformats - skip qtlocation - skip qtmacextras - skip qtmultimedia - skip qtnetworkauth - skip qtpurchasing - skip qtquickcontrols - skip qtquickcontrols2 - skip qtscript - skip qtscxml - skip qtsensors - skip qtserialbus - skip qtserialport - skip qtspeech - skip qtvirtualkeyboard - skip qtwayland - skip qtwebchannel - skip qtwebengine - skip qtwebsockets - skip qtwebview - skip qtx11extras - skip qtxmlpatterns - nomake tests - openssl - linked - IC : \ Dev \ github \ vcpkg \ installed \ x64 - windows - static \ include - LC : \ Dev \ github \ vcpkg \ installed \ x64 - windows - static \ lib OPENSSL_LIBS = " - llibeay32 - lssleay32 - lgdi32 - luser32 - lwsock32 - ladvapi32 " - prefix C : \ Qt5 . 9 . 7_ssl_x64_static_vs2017 <nl> - ` ` ` ` <nl> - <nl> - A prebuilt version for x64 and Visual C + + runtime v141 ( Visual Studio 2017 ) can be downloaded from [ here ] ( https : / / github . com / sipsorcery / qt_win_binary / releases ) . Please be aware this download is NOT an officially sanctioned Bitcoin Core distribution and is provided for developer convenience . It should NOT be used for builds that will be used in a production environment or with real funds . <nl> - <nl> - To build Bitcoin Core without Qt unload or disable the bitcoin - qt , libbitcoin_qt and test_bitcoin - qt projects . <nl> + To build Bitcoin Core without Qt unload or disable the ` bitcoin - qt ` , ` libbitcoin_qt ` and ` test_bitcoin - qt ` projects . <nl> <nl> Building <nl> mmmmmmmmmmmmmmmmmmmmm <nl> The instructions below use ` vcpkg ` to install the dependencies . <nl> <nl> - - Clone ` vcpkg ` from the [ github repository ] ( https : / / github . com / Microsoft / vcpkg ) and install as per the instructions in the main README . md . <nl> + - Install [ ` vcpkg ` ] ( https : / / github . com / Microsoft / vcpkg ) . <nl> - Install the required packages ( replace x64 with x86 as required ) : <nl> <nl> ` ` ` <nl> - PS > . \ vcpkg install - - triplet x64 - windows - static boost - filesystem boost - signals2 boost - test libevent openssl zeromq berkeleydb rapidcheck double - conversion <nl> + PS > . \ vcpkg install - - triplet x64 - windows - static boost - filesystem boost - multi - index boost - signals2 boost - test boost - thread libevent openssl zeromq berkeleydb rapidcheck double - conversion <nl> ` ` ` <nl> <nl> - - Use Python to generate * . vcxproj from Makefile <nl> + - Use Python to generate ` * . vcxproj ` from Makefile <nl> <nl> ` ` ` <nl> - PS > py - 3 msvc - autogen . py <nl> + PS > py - 3 msvc - autogen . py <nl> ` ` ` <nl> <nl> - An optional step is to adjust the settings in the build_msvc directory and the common . init . vcxproj file . This project file contains settings that are common to all projects such as the runtime library version and target Windows SDK version . The Qt directories can also be set . <nl>
doc : update MSVC instructions to remove Qt configuration
bitcoin/bitcoin
b1f1fb5f1dbdeb329c8c764f92a891329dc10863
2019-11-01T19:25:52Z
mmm a / imgui . cpp <nl> ppp b / imgui . cpp <nl> bool ImGui : : BeginChild ( ImGuiID id , const ImVec2 & size_arg , bool border , ImGuiWin <nl> <nl> void ImGui : : EndChild ( ) <nl> { <nl> - ImGuiWindow * window = GetCurrentWindow ( ) ; <nl> + ImGuiContext & g = * GImGui ; <nl> + ImGuiWindow * window = g . CurrentWindow ; <nl> <nl> IM_ASSERT ( window - > Flags & ImGuiWindowFlags_ChildWindow ) ; / / Mismatched BeginChild ( ) / EndChild ( ) callss <nl> if ( window - > BeginCount > 1 ) <nl> void ImGui : : EndChild ( ) <nl> sz . y = ImMax ( 4 . 0f , sz . y ) ; <nl> End ( ) ; <nl> <nl> - ImGuiWindow * parent_window = GetCurrentWindow ( ) ; <nl> + ImGuiWindow * parent_window = g . CurrentWindow ; <nl> ImRect bb ( parent_window - > DC . CursorPos , parent_window - > DC . CursorPos + sz ) ; <nl> + <nl> ItemSize ( sz ) ; <nl> - if ( ! ( window - > Flags & ImGuiWindowFlags_NavFlattened ) & & ( window - > DC . NavLayerActiveMask ! = 0 | | window - > DC . NavHasScroll ) ) <nl> + if ( ( window - > DC . NavLayerActiveMask ! = 0 | | window - > DC . NavHasScroll ) & & ! ( window - > Flags & ImGuiWindowFlags_NavFlattened ) ) <nl> { <nl> ItemAdd ( bb , window - > ChildId ) ; <nl> RenderNavHighlight ( bb , window - > ChildId ) ; <nl> + <nl> + / / When browsing a window that has no activable items ( scroll only ) we keep a highlight on the child <nl> + if ( window - > DC . NavLayerActiveMask = = 0 & & window = = g . NavWindow ) <nl> + RenderNavHighlight ( ImRect ( bb . Min - ImVec2 ( 2 , 2 ) , bb . Max + ImVec2 ( 2 , 2 ) ) , g . NavId , ImGuiNavHighlightFlags_TypeThin ) ; <nl> } <nl> else <nl> { <nl>
Nav : when browsing a window that has no activable items ( scroll only ) we keep a highlight on the child . ( )
ocornut/imgui
76d8af40369a8f32be1de52c5c1fc1cbf8591354
2018-01-31T20:25:52Z
mmm a / src / WebSocket . h <nl> ppp b / src / WebSocket . h <nl> struct WebSocket : AsyncSocket < SSL > { <nl> / * See AsyncSocket * / <nl> using Super : : getBufferedAmount ; <nl> <nl> + / * Simple , immediate close of the socket . Emits close event * / <nl> + using Super : : close ; <nl> + <nl> / * Send or buffer a WebSocket frame , compressed or not . Returns false on increased user space backpressure . * / <nl> bool send ( std : : string_view message , uWS : : OpCode opCode = uWS : : OpCode : : BINARY , bool compress = false ) { <nl> / * Transform the message to compressed domain if requested * / <nl> struct WebSocket : AsyncSocket < SSL > { <nl> } <nl> <nl> / * Send websocket close frame , emit close event , send FIN if successful * / <nl> - void close ( int code , std : : string_view message = { } ) { <nl> + void end ( int code , std : : string_view message = { } ) { <nl> / * Check if we already called this one * / <nl> WebSocketData * webSocketData = ( WebSocketData * ) us_new_socket_ext ( SSL , ( us_new_socket_t * ) this ) ; <nl> if ( webSocketData - > isShuttingDown ) { <nl> struct WebSocket : AsyncSocket < SSL > { <nl> webSocketContextData - > closeHandler ( this , code , message ) ; <nl> } <nl> } <nl> - <nl> - / * Simple , immediate close of the socket . Emits close event * / <nl> - void terminate ( ) { <nl> - / * This calls close event in context where it checks for isShuttingDown and either emits websocket close or not * / <nl> - Super : : close ( ) ; <nl> - } <nl> } ; <nl> <nl> } <nl> mmm a / src / WebSocketContext . h <nl> ppp b / src / WebSocketContext . h <nl> struct WebSocketContext { <nl> if ( ! remainingBytes & & fin & & ! webSocketData - > controlTipLength ) { <nl> if ( opCode = = CLOSE ) { <nl> auto closeFrame = protocol : : parseClosePayload ( data , length ) ; <nl> - webSocket - > close ( closeFrame . code , std : : string_view ( closeFrame . message , closeFrame . length ) ) ; <nl> + webSocket - > end ( closeFrame . code , std : : string_view ( closeFrame . message , closeFrame . length ) ) ; <nl> return true ; <nl> } else { <nl> if ( opCode = = PING ) { <nl> struct WebSocketContext { <nl> char * controlBuffer = ( char * ) webSocketData - > fragmentBuffer . data ( ) + webSocketData - > fragmentBuffer . length ( ) - webSocketData - > controlTipLength ; <nl> if ( opCode = = CLOSE ) { <nl> protocol : : CloseFrame closeFrame = protocol : : parseClosePayload ( controlBuffer , webSocketData - > controlTipLength ) ; <nl> - webSocket - > close ( closeFrame . code , std : : string_view ( closeFrame . message , closeFrame . length ) ) ; <nl> + webSocket - > end ( closeFrame . code , std : : string_view ( closeFrame . message , closeFrame . length ) ) ; <nl> return true ; <nl> } else { <nl> if ( opCode = = PING ) { <nl>
API CHANGE : ws . close - > ws . end , ws . terminate - > ws . close
uNetworking/uWebSockets
7d990a3bc177271eb64af2f58c30a5253507fa7f
2019-01-26T16:20:47Z
mmm a / tensorflow / core / kernels / maxpooling_op . cc <nl> ppp b / tensorflow / core / kernels / maxpooling_op . cc <nl> struct LaunchMaxPoolingGradWithArgmax < CPUDevice , T > { <nl> <nl> static void launch ( OpKernelContext * context , const PoolParameters & params , <nl> const Tensor & grad_in , const Tensor & argmax , <nl> - Tensor * grad_out ) { <nl> + Tensor * grad_out , const bool include_batch_in_index ) { <nl> const DeviceBase : : CpuWorkerThreads & worker_threads = <nl> * ( context - > device ( ) - > tensorflow_cpu_worker_threads ( ) ) ; <nl> <nl> - auto shard = [ & grad_in , & argmax , & grad_out ] ( int64 start , int64 limit ) { <nl> + auto shard = [ & grad_in , & argmax , & grad_out , include_batch_in_index ] ( <nl> + int64 start , int64 limit ) { <nl> const int64 batch_size = <nl> GetTensorDim ( grad_out - > shape ( ) , FORMAT_NHWC , ' N ' ) ; <nl> const int64 output_size_per_batch = grad_out - > NumElements ( ) / batch_size ; <nl> struct LaunchMaxPoolingGradWithArgmax < CPUDevice , T > { <nl> const int input_start = start * input_size_per_batch ; <nl> const int input_end = limit * input_size_per_batch ; <nl> for ( int64 index = input_start ; index < input_end ; index + + ) { <nl> - const int64 grad_out_index = argmax_flat ( index ) ; <nl> + const int64 grad_out_index = include_batch_in_index <nl> + ? argmax_flat ( index ) <nl> + : argmax_flat ( index ) + input_start ; <nl> CHECK ( grad_out_index > = output_start & & grad_out_index < output_end ) <nl> < < " Invalid output gradient index : " < < grad_out_index < < " , " <nl> < < output_start < < " , " < < output_end ; <nl> class MaxPoolingGradWithArgmaxOp : public OpKernel { <nl> OP_REQUIRES ( context , ksize_ [ 0 ] = = 1 & & stride_ [ 0 ] = = 1 , <nl> errors : : Unimplemented ( <nl> " Pooling is not yet supported on the batch dimension . " ) ) ; <nl> + OP_REQUIRES_OK ( context , context - > GetAttr ( " include_batch_in_index " , <nl> + & include_batch_in_index_ ) ) ; <nl> } <nl> <nl> void Compute ( OpKernelContext * context ) override { <nl> class MaxPoolingGradWithArgmaxOp : public OpKernel { <nl> OP_REQUIRES_OK ( context , context - > forward_input_or_allocate_output ( <nl> { 1 } , 0 , out_shape , & grad_out ) ) ; <nl> <nl> - LaunchMaxPoolingGradWithArgmax < Device , T > : : launch ( context , params , grad_in , <nl> - argmax , grad_out ) ; <nl> + LaunchMaxPoolingGradWithArgmax < Device , T > : : launch ( <nl> + context , params , grad_in , argmax , grad_out , include_batch_in_index_ ) ; <nl> } <nl> <nl> private : <nl> class MaxPoolingGradWithArgmaxOp : public OpKernel { <nl> std : : vector < int32 > stride_ ; <nl> Padding padding_ ; <nl> TensorFormat data_format_ ; <nl> + bool include_batch_in_index_ ; <nl> } ; <nl> <nl> template < typename Device , typename T > <nl> class MaxPoolingGradGradWithArgmaxOp : public OpKernel { <nl> OP_REQUIRES ( context , ksize_ [ 0 ] = = 1 & & stride_ [ 0 ] = = 1 , <nl> errors : : Unimplemented ( <nl> " Pooling is not yet supported on the batch dimension . " ) ) ; <nl> + OP_REQUIRES_OK ( context , context - > GetAttr ( " include_batch_in_index " , <nl> + & include_batch_in_index_ ) ) ; <nl> } <nl> <nl> void Compute ( OpKernelContext * context ) override { <nl> class MaxPoolingGradGradWithArgmaxOp : public OpKernel { <nl> { 1 } , 0 , out_shape , & grad_out ) ) ; <nl> <nl> LaunchMaxPoolingGradGradWithArgmax < Device , T > : : launch ( <nl> - context , params , grad_in , argmax , grad_out ) ; <nl> + context , params , grad_in , argmax , grad_out , include_batch_in_index_ ) ; <nl> } <nl> <nl> private : <nl> std : : vector < int32 > ksize_ ; <nl> std : : vector < int32 > stride_ ; <nl> Padding padding_ ; <nl> + bool include_batch_in_index_ ; <nl> } ; <nl> <nl> # if GOOGLE_CUDA <nl> template < typename T > <nl> struct LaunchMaxPoolingGradWithArgmax < Eigen : : GpuDevice , T > { <nl> static void launch ( OpKernelContext * context , const PoolParameters & params , <nl> const Tensor & grad_in , const Tensor & argmax , <nl> - Tensor * grad_out ) { <nl> + Tensor * grad_out , const bool include_batch_in_index ) { <nl> + / / TODO ( facaiy ) : support include_batch_in_index = true for gpu kernel . <nl> + if ( include_batch_in_index ) { <nl> + LOG ( WARNING ) < < " include_batch_in_index = true is not supported " <nl> + < < " on GPU kernel of MaxPoolGradWithArgmax . " <nl> + < < " Ignore it . " ; <nl> + } <nl> const int input_size = params . tensor_in_batch * params . tensor_in_rows * <nl> params . tensor_in_cols * params . depth ; <nl> const int output_size = params . tensor_in_batch * params . out_height * <nl> template < typename T > <nl> struct LaunchMaxPoolingGradGradWithArgmax < Eigen : : GpuDevice , T > { <nl> static void launch ( OpKernelContext * context , const PoolParameters & params , <nl> const Tensor & grad_in , const Tensor & argmax , <nl> - Tensor * grad_out ) { <nl> + Tensor * grad_out , const bool include_batch_in_index ) { <nl> + / / TODO ( facaiy ) : support include_batch_in_index = true for gpu kernel . <nl> + if ( include_batch_in_index ) { <nl> + LOG ( WARNING ) < < " include_batch_in_index = true is not supported " <nl> + < < " on GPU kernel of MaxPoolGradGradWithArgmax . " <nl> + < < " Ignore it . " ; <nl> + } <nl> const int input_size = params . tensor_in_batch * params . tensor_in_rows * <nl> params . tensor_in_cols * params . depth ; <nl> const int output_size = params . tensor_in_batch * params . out_height * <nl> mmm a / tensorflow / core / ops / nn_ops . cc <nl> ppp b / tensorflow / core / ops / nn_ops . cc <nl> REGISTER_OP ( " MaxPoolGradWithArgmax " ) <nl> . Attr ( " ksize : list ( int ) > = 4 " ) <nl> . Attr ( " strides : list ( int ) > = 4 " ) <nl> . Attr ( GetPaddingAttrString ( ) ) <nl> + . Attr ( " include_batch_in_index : bool = false " ) <nl> . Attr ( " Targmax : { int32 , int64 } " ) <nl> . Input ( " input : T " ) <nl> . Input ( " grad : T " ) <nl> REGISTER_OP ( " MaxPoolGradGradWithArgmax " ) <nl> . Attr ( " ksize : list ( int ) > = 4 " ) <nl> . Attr ( " strides : list ( int ) > = 4 " ) <nl> . Attr ( GetPaddingAttrString ( ) ) <nl> + . Attr ( " include_batch_in_index : bool = false " ) <nl> . Attr ( " Targmax : { int32 , int64 } " ) <nl> . Input ( " input : T " ) <nl> . Input ( " grad : T " ) <nl> mmm a / tensorflow / python / kernel_tests / pooling_ops_test . py <nl> ppp b / tensorflow / python / kernel_tests / pooling_ops_test . py <nl> def testMaxPoolingWithArgmax ( self ) : <nl> self . assertAllEqual ( argmax . ravel ( ) , argmax_exp ) <nl> <nl> def testMaxPoolingGradWithArgmax ( self ) : <nl> - orig_input = [ 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 , 0 . 0 , 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 ] <nl> - tensor_input = [ 11 . 0 , 12 . 0 , 13 . 0 , 14 . 0 ] <nl> - tensor_argmax = list ( np . array ( [ 0 , 1 , 3 , 5 ] , dtype = np . int64 ) ) <nl> - with self . session ( use_gpu = True ) : <nl> - orig_in = constant_op . constant ( orig_input , shape = [ 1 , 3 , 3 , 1 ] ) <nl> - t = constant_op . constant ( tensor_input , shape = [ 1 , 2 , 2 , 1 ] ) <nl> - argmax = constant_op . constant ( <nl> - tensor_argmax , shape = [ 1 , 2 , 2 , 1 ] , dtype = dtypes . int64 ) <nl> - out_op = gen_nn_ops . max_pool_grad_with_argmax ( <nl> - orig_in , <nl> - t , <nl> - argmax , <nl> - ksize = [ 1 , 2 , 2 , 1 ] , <nl> - strides = [ 1 , 1 , 1 , 1 ] , <nl> - padding = " VALID " ) <nl> - out = self . evaluate ( out_op ) . flatten ( ) <nl> - self . assertAllClose ( out , <nl> - [ 11 . 0 , 12 . 0 , 0 . 0 , 13 . 0 , 0 . 0 , 14 . 0 , 0 . 0 , 0 . 0 , 0 . 0 ] ) <nl> + orig_input = [ 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 , 0 . 0 , 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 , <nl> + 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 , 0 . 0 , 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 ] <nl> + tensor_input = [ 11 . 0 , 12 . 0 , 13 . 0 , 14 . 0 , <nl> + 21 . 0 , 22 . 0 , 23 . 0 , 24 . 0 ] <nl> + <nl> + configs = [ <nl> + [ False , False , [ 0 , 1 , 3 , 5 , 0 , 1 , 3 , 5 ] ] , <nl> + [ False , True , [ 0 , 1 , 3 , 5 , 9 , 10 , 12 , 14 ] ] , <nl> + [ True , False , [ 0 , 1 , 3 , 5 , 0 , 1 , 3 , 5 ] ] ] <nl> + <nl> + for use_gpu , include_batch_in_index , argmax in configs : <nl> + with self . session ( use_gpu = use_gpu ) : <nl> + orig_in = constant_op . constant ( orig_input , shape = [ 2 , 3 , 3 , 1 ] ) <nl> + t = constant_op . constant ( tensor_input , shape = [ 2 , 2 , 2 , 1 ] ) <nl> + argmax_t = constant_op . constant ( <nl> + argmax , shape = [ 2 , 2 , 2 , 1 ] , dtype = dtypes . int64 ) <nl> + out_op = gen_nn_ops . max_pool_grad_with_argmax ( <nl> + orig_in , <nl> + t , <nl> + argmax_t , <nl> + ksize = [ 1 , 2 , 2 , 1 ] , <nl> + strides = [ 1 , 1 , 1 , 1 ] , <nl> + padding = " VALID " , <nl> + include_batch_in_index = include_batch_in_index ) <nl> + out = self . evaluate ( out_op ) . flatten ( ) <nl> + self . assertAllClose ( <nl> + out , <nl> + [ 11 . 0 , 12 . 0 , 0 . 0 , 13 . 0 , 0 . 0 , 14 . 0 , 0 . 0 , 0 . 0 , 0 . 0 , <nl> + 21 . 0 , 22 . 0 , 0 . 0 , 23 . 0 , 0 . 0 , 24 . 0 , 0 . 0 , 0 . 0 , 0 . 0 ] ) <nl> <nl> def testMaxPoolingGradGradWithArgmax ( self ) : <nl> # MaxPoolWithArgMax is implemented only on CUDA . <nl> def testMaxPoolingGradGradWithArgmax ( self ) : <nl> argmax , <nl> ksize = [ 1 , 2 , 2 , 1 ] , <nl> strides = [ 1 , 1 , 1 , 1 ] , <nl> - padding = " VALID " ) <nl> + padding = " VALID " , <nl> + include_batch_in_index = False ) <nl> out = self . evaluate ( out_op ) . flatten ( ) <nl> self . assertAllClose ( out , [ 11 . 0 , 12 . 0 , 14 . 0 , 16 . 0 ] ) <nl> <nl> mmm a / tensorflow / python / ops / nn_grad . py <nl> ppp b / tensorflow / python / ops / nn_grad . py <nl> def _MaxPoolGradV2 ( op , grad ) : <nl> <nl> @ ops . RegisterGradient ( " MaxPoolWithArgmax " ) <nl> def _MaxPoolGradWithArgmax ( op , grad , unused_argmax_grad ) : <nl> + del unused_argmax_grad <nl> return gen_nn_ops . max_pool_grad_with_argmax ( <nl> op . inputs [ 0 ] , <nl> grad , <nl> op . outputs [ 1 ] , <nl> op . get_attr ( " ksize " ) , <nl> op . get_attr ( " strides " ) , <nl> - padding = op . get_attr ( " padding " ) ) <nl> + padding = op . get_attr ( " padding " ) , <nl> + include_batch_in_index = op . get_attr ( " include_batch_in_index " ) ) <nl> <nl> <nl> @ ops . RegisterGradient ( " MaxPoolGrad " ) <nl>
ENH : add include_batch_in_index for gradient of max_pool_with_argmax
tensorflow/tensorflow
69d18454383d49ecdc6ab995aa7ab56d20a109e2
2019-01-28T08:06:19Z
mmm a / folly / configure . ac <nl> ppp b / folly / configure . ac <nl> AC_CHECK_HEADER ( [ lz4 . h ] , AC_CHECK_LIB ( [ lz4 ] , [ LZ4_decompress_safe ] ) ) <nl> AC_CHECK_HEADER ( [ snappy . h ] , AC_CHECK_LIB ( [ snappy ] , [ main ] ) ) <nl> AC_CHECK_HEADER ( [ zlib . h ] , AC_CHECK_LIB ( [ z ] , [ main ] ) ) <nl> AC_CHECK_HEADER ( [ lzma . h ] , AC_CHECK_LIB ( [ lzma ] , [ main ] ) ) <nl> + AC_CHECK_HEADER ( [ zstd . h ] , AC_CHECK_LIB ( [ zstd ] , [ main ] ) ) <nl> <nl> # Include directory that contains " folly " so # include < folly / Foo . h > works <nl> AM_CPPFLAGS = ' - I $ ( top_srcdir ) / . . ' <nl> mmm a / folly / io / Compression . cpp <nl> ppp b / folly / io / Compression . cpp <nl> <nl> # include < lzma . h > <nl> # endif <nl> <nl> + # if FOLLY_HAVE_LIBZSTD <nl> + # include < zstd . h > <nl> + # endif <nl> + <nl> # include < folly / Conv . h > <nl> # include < folly / Memory . h > <nl> # include < folly / Portability . h > <nl> std : : unique_ptr < IOBuf > LZMA2Codec : : doUncompress ( const IOBuf * data , <nl> <nl> # endif / / FOLLY_HAVE_LIBLZMA <nl> <nl> + # ifdef FOLLY_HAVE_LIBZSTD <nl> + <nl> + / * * <nl> + * ZSTD_BETA compression <nl> + * / <nl> + class ZSTDCodec final : public Codec { <nl> + public : <nl> + static std : : unique_ptr < Codec > create ( int level , CodecType ) ; <nl> + explicit ZSTDCodec ( int level , CodecType type ) ; <nl> + <nl> + private : <nl> + bool doNeedsUncompressedLength ( ) const override ; <nl> + std : : unique_ptr < IOBuf > doCompress ( const IOBuf * data ) override ; <nl> + std : : unique_ptr < IOBuf > doUncompress ( <nl> + const IOBuf * data , <nl> + uint64_t uncompressedLength ) override ; <nl> + } ; <nl> + <nl> + std : : unique_ptr < Codec > ZSTDCodec : : create ( int level , CodecType type ) { <nl> + return make_unique < ZSTDCodec > ( level , type ) ; <nl> + } <nl> + <nl> + ZSTDCodec : : ZSTDCodec ( int level , CodecType type ) : Codec ( type ) { <nl> + DCHECK ( type = = CodecType : : ZSTD_BETA ) ; <nl> + } <nl> + <nl> + bool ZSTDCodec : : doNeedsUncompressedLength ( ) const { <nl> + return true ; <nl> + } <nl> + <nl> + std : : unique_ptr < IOBuf > ZSTDCodec : : doCompress ( const IOBuf * data ) { <nl> + size_t rc ; <nl> + size_t maxCompressedLength = ZSTD_compressBound ( data - > length ( ) ) ; <nl> + auto out = IOBuf : : createCombined ( maxCompressedLength ) ; <nl> + <nl> + CHECK_EQ ( out - > length ( ) , 0 ) ; <nl> + <nl> + rc = ZSTD_compress ( <nl> + out - > writableTail ( ) , out - > capacity ( ) , data - > data ( ) , data - > length ( ) ) ; <nl> + <nl> + if ( ZSTD_isError ( rc ) ) { <nl> + throw std : : runtime_error ( to < std : : string > ( <nl> + " ZSTD compression returned an error : " , <nl> + ZSTD_getErrorName ( rc ) ) ) ; <nl> + } <nl> + <nl> + out - > append ( rc ) ; <nl> + CHECK_EQ ( out - > length ( ) , rc ) ; <nl> + <nl> + return out ; <nl> + } <nl> + <nl> + std : : unique_ptr < IOBuf > ZSTDCodec : : doUncompress ( const IOBuf * data , <nl> + uint64_t uncompressedLength ) { <nl> + size_t rc ; <nl> + auto out = IOBuf : : createCombined ( uncompressedLength ) ; <nl> + <nl> + CHECK_GE ( out - > capacity ( ) , uncompressedLength ) ; <nl> + CHECK_EQ ( out - > length ( ) , 0 ) ; <nl> + <nl> + rc = ZSTD_decompress ( <nl> + out - > writableTail ( ) , out - > capacity ( ) , data - > data ( ) , data - > length ( ) ) ; <nl> + <nl> + if ( ZSTD_isError ( rc ) ) { <nl> + throw std : : runtime_error ( to < std : : string > ( <nl> + " ZSTD decompression returned an error : " , <nl> + ZSTD_getErrorName ( rc ) ) ) ; <nl> + } <nl> + <nl> + out - > append ( rc ) ; <nl> + CHECK_EQ ( out - > length ( ) , rc ) ; <nl> + <nl> + return out ; <nl> + } <nl> + <nl> + # endif / / FOLLY_HAVE_LIBZSTD <nl> + <nl> } / / namespace <nl> <nl> std : : unique_ptr < Codec > getCodec ( CodecType type , int level ) { <nl> std : : unique_ptr < Codec > getCodec ( CodecType type , int level ) { <nl> nullptr , <nl> nullptr , <nl> # endif <nl> + <nl> + # if FOLLY_HAVE_LIBZSTD <nl> + ZSTDCodec : : create , <nl> + # else <nl> + nullptr , <nl> + # endif <nl> } ; <nl> <nl> size_t idx = static_cast < size_t > ( type ) ; <nl> mmm a / folly / io / Compression . h <nl> ppp b / folly / io / Compression . h <nl> enum class CodecType { <nl> LZMA2 = 6 , <nl> LZMA2_VARINT_SIZE = 7 , <nl> <nl> - NUM_CODEC_TYPES = 8 , <nl> + / * * <nl> + * Use ZSTD_BETA compression . <nl> + * This format is not yet final ; please do not rely on it for anything other <nl> + * than testing purposes yet . <nl> + * / <nl> + ZSTD_BETA = 8 , <nl> + <nl> + NUM_CODEC_TYPES = 9 , <nl> } ; <nl> <nl> class Codec { <nl> mmm a / folly / io / test / CompressionTest . cpp <nl> ppp b / folly / io / test / CompressionTest . cpp <nl> TEST ( CompressionTestNeedsUncompressedLength , Simple ) { <nl> EXPECT_TRUE ( getCodec ( CodecType : : LZMA2 ) - > needsUncompressedLength ( ) ) ; <nl> EXPECT_FALSE ( getCodec ( CodecType : : LZMA2_VARINT_SIZE ) <nl> - > needsUncompressedLength ( ) ) ; <nl> + EXPECT_TRUE ( getCodec ( CodecType : : ZSTD_BETA ) - > needsUncompressedLength ( ) ) ; <nl> } <nl> <nl> class CompressionTest <nl> INSTANTIATE_TEST_CASE_P ( <nl> CodecType : : ZLIB , <nl> CodecType : : LZ4_VARINT_SIZE , <nl> CodecType : : LZMA2 , <nl> - CodecType : : LZMA2_VARINT_SIZE ) ) ) ; <nl> + CodecType : : LZMA2_VARINT_SIZE , <nl> + CodecType : : ZSTD_BETA ) ) ) ; <nl> <nl> class CompressionVarintTest <nl> : public testing : : TestWithParam < std : : tr1 : : tuple < int , CodecType > > { <nl>
Bring ZSTD support into folly / io / Compression . h
facebook/folly
f9d4e395976d3a6238a6fcd0d9e642f6f84cd1a4
2015-10-16T17:20:21Z
mmm a / xbmc / settings / AdvancedSettings . cpp <nl> ppp b / xbmc / settings / AdvancedSettings . cpp <nl> void CAdvancedSettings : : Initialize ( ) <nl> <nl> m_bVideoLibraryHideAllItems = false ; <nl> m_bVideoLibraryAllItemsOnBottom = false ; <nl> + m_iVideoLibraryRecentlyAddedItems = 25 ; <nl> m_bVideoLibraryHideEmptySeries = false ; <nl> m_bVideoLibraryCleanOnUpdate = false ; <nl> m_bVideoLibraryExportAutoThumbs = false ; <nl> void CAdvancedSettings : : ParseSettingsFile ( const CStdString & file ) <nl> { <nl> XMLUtils : : GetBoolean ( pElement , " hideallitems " , m_bVideoLibraryHideAllItems ) ; <nl> XMLUtils : : GetBoolean ( pElement , " allitemsonbottom " , m_bVideoLibraryAllItemsOnBottom ) ; <nl> + XMLUtils : : GetInt ( pElement , " recentlyaddeditems " , m_iVideoLibraryRecentlyAddedItems , 1 , INT_MAX ) ; <nl> XMLUtils : : GetBoolean ( pElement , " hideemptyseries " , m_bVideoLibraryHideEmptySeries ) ; <nl> XMLUtils : : GetBoolean ( pElement , " cleanonupdate " , m_bVideoLibraryCleanOnUpdate ) ; <nl> XMLUtils : : GetString ( pElement , " itemseparator " , m_videoItemSeparator ) ; <nl> mmm a / xbmc / settings / AdvancedSettings . h <nl> ppp b / xbmc / settings / AdvancedSettings . h <nl> class CAdvancedSettings <nl> <nl> bool m_bVideoLibraryHideAllItems ; <nl> bool m_bVideoLibraryAllItemsOnBottom ; <nl> + int m_iVideoLibraryRecentlyAddedItems ; <nl> bool m_bVideoLibraryHideEmptySeries ; <nl> bool m_bVideoLibraryCleanOnUpdate ; <nl> bool m_bVideoLibraryExportAutoThumbs ; <nl> mmm a / xbmc / video / VideoDatabase . cpp <nl> ppp b / xbmc / video / VideoDatabase . cpp <nl> bool CVideoDatabase : : GetRecentlyAddedMoviesNav ( const CStdString & strBaseDir , CFi <nl> { <nl> Filter filter ; <nl> filter . order = " dateAdded desc , idMovie desc " ; <nl> - filter . limit = PrepareSQL ( " % u " , limit ? limit : 25 ) ; <nl> + filter . limit = PrepareSQL ( " % u " , limit ? limit : g_advancedSettings . m_iVideoLibraryRecentlyAddedItems ) ; <nl> return GetMoviesByWhere ( strBaseDir , filter , items ) ; <nl> } <nl> <nl> bool CVideoDatabase : : GetRecentlyAddedEpisodesNav ( const CStdString & strBaseDir , C <nl> { <nl> Filter filter ; <nl> filter . order = " dateAdded desc , idEpisode desc " ; <nl> - filter . limit = PrepareSQL ( " % u " , limit ? limit : 25 ) ; <nl> + filter . limit = PrepareSQL ( " % u " , limit ? limit : g_advancedSettings . m_iVideoLibraryRecentlyAddedItems ) ; <nl> return GetEpisodesByWhere ( strBaseDir , filter , items , false ) ; <nl> } <nl> <nl> bool CVideoDatabase : : GetRecentlyAddedMusicVideosNav ( const CStdString & strBaseDir <nl> { <nl> Filter filter ; <nl> filter . order = " dateAdded desc , idMVideo desc " ; <nl> - filter . limit = PrepareSQL ( " % u " , limit ? limit : 25 ) ; <nl> + filter . limit = PrepareSQL ( " % u " , limit ? limit : g_advancedSettings . m_iVideoLibraryRecentlyAddedItems ) ; <nl> return GetMusicVideosByWhere ( strBaseDir , filter , items ) ; <nl> } <nl> <nl>
advancedsettings : revive " recentlyaddeditems " ( reverts part of 3fc04bc4a273b9acf05fa8e6f6e4fc001fd1f8c8 )
xbmc/xbmc
a6af5721275799417d70940312ab1d580c73201b
2012-11-08T07:45:53Z
mmm a / tensorflow / workspace . bzl <nl> ppp b / tensorflow / workspace . bzl <nl> def tf_repositories ( path_prefix = " " , tf_repo_name = " " ) : <nl> ) <nl> <nl> # Check out LLVM and MLIR from llvm - project . <nl> - LLVM_COMMIT = " edba2864a7a86a97276c555d02276712e45d60fc " <nl> - LLVM_SHA256 = " 0ae8e67c0ce525f3ed920e5234d5ab5c55c043260e33e7fce78c91532576539a " <nl> + LLVM_COMMIT = " cd209f1a3790af774b75213d7914c844a6140b4b " <nl> + LLVM_SHA256 = " ff780a7e2a0b647bc2a0918653dd34a41bb1cd8653790dfc9c46540f08e4b562 " <nl> LLVM_URLS = [ <nl> " https : / / storage . googleapis . com / mirror . tensorflow . org / github . com / llvm / llvm - project / archive / { commit } . tar . gz " . format ( commit = LLVM_COMMIT ) , <nl> " https : / / github . com / llvm / llvm - project / archive / { commit } . tar . gz " . format ( commit = LLVM_COMMIT ) , <nl>
Integrate LLVM at https : / / github . com / llvm / llvm - project / commit / cd209f1a3790
tensorflow/tensorflow
d74b3a97e16bf058308f91367b74b76aa607a79e
2020-07-06T14:34:49Z
mmm a / imgui . cpp <nl> ppp b / imgui . cpp <nl> static bool IsKeyPressedMap ( ImGuiKey key , bool repeat = true ) ; <nl> <nl> static void SetCurrentFont ( ImFont * font ) ; <nl> static void SetCurrentWindow ( ImGuiWindow * window ) ; <nl> + static void SetWindowScrollX ( ImGuiWindow * window , float new_scroll_x ) ; <nl> static void SetWindowScrollY ( ImGuiWindow * window , float new_scroll_y ) ; <nl> static void SetWindowPos ( ImGuiWindow * window , const ImVec2 & pos , ImGuiSetCond cond ) ; <nl> static void SetWindowSize ( ImGuiWindow * window , const ImVec2 & size , ImGuiSetCond cond ) ; <nl> static void NavUpdate ( ) <nl> IM_ASSERT ( g . NavWindow ) ; <nl> <nl> / / Scroll to keep newly navigated item fully into view <nl> - ImRect window_rect_rel ( g . NavWindow - > InnerRect . Min - g . NavWindow - > Pos , g . NavWindow - > InnerRect . Max - g . NavWindow - > Pos ) ; <nl> - window_rect_rel . Expand ( 1 . 0f ) ; <nl> + ImRect window_rect_rel ( g . NavWindow - > InnerRect . Min - g . NavWindow - > Pos - ImVec2 ( 1 , 1 ) , g . NavWindow - > InnerRect . Max - g . NavWindow - > Pos + ImVec2 ( 1 , 1 ) ) ; <nl> / / g . OverlayDrawList . AddRect ( g . NavWindow - > Pos + window_rect_rel . Min , g . NavWindow - > Pos + window_rect_rel . Max , IM_COL32_WHITE ) ; / / [ DEBUG ] <nl> if ( g . NavLayer = = 0 & & ! window_rect_rel . Contains ( g . NavMoveResultRectRel ) ) <nl> { <nl> static void NavUpdate ( ) <nl> / / Apply result from previous navigation directional move request <nl> ImGui : : SetActiveID ( 0 ) ; <nl> SetNavIdMoveMouse ( g . NavMoveResultId , g . NavMoveResultRectRel ) ; <nl> + g . NavMoveFromClampedRefRect = false ; <nl> } <nl> <nl> / / Navigation windowing mode ( change focus , move / resize window ) <nl> static void NavUpdate ( ) <nl> g . NavWindowingDisplayAlpha = 1 . 0f ; <nl> } <nl> <nl> + / / Move window <nl> + if ( g . NavWindowingTarget & & ! ( g . NavWindowingTarget - > Flags & ImGuiWindowFlags_NoMove ) ) <nl> + { <nl> + const ImVec2 move_delta = ImGui : : NavGetMovingDir ( 1 ) ; <nl> + if ( move_delta . x ! = 0 . 0f | | move_delta . y ! = 0 . 0f ) <nl> + { <nl> + const float move_speed = ImFloor ( 600 * g . IO . DeltaTime * ImMin ( g . IO . DisplayFramebufferScale . x , g . IO . DisplayFramebufferScale . y ) ) ; <nl> + g . NavWindowingTarget - > PosFloat + = move_delta * move_speed ; <nl> + if ( ! ( g . NavWindowingTarget - > Flags & ImGuiWindowFlags_NoSavedSettings ) ) <nl> + MarkSettingsDirty ( ) ; <nl> + } <nl> + } <nl> + <nl> if ( ! IsKeyDownMap ( ImGuiKey_NavMenu ) ) <nl> { <nl> / / Apply actual focus only when releasing the NavMenu button ( until then the window was merely rendered front - most ) <nl> static void NavUpdate ( ) <nl> g . NavWindow = g . FocusedWindow ; <nl> } <nl> <nl> - / / Fallback manual - scroll with NavUp / NavDown when window has no navigable item <nl> - if ( g . FocusedWindow & & ! g . FocusedWindow - > DC . NavLayerActiveFlags & & g . FocusedWindow - > DC . NavHasScroll & & ! ( g . FocusedWindow - > Flags & ImGuiWindowFlags_NoNav ) & & g . NavMoveRequest & & ( g . NavMoveDir = = ImGuiNavDir_Up | | g . NavMoveDir = = ImGuiNavDir_Down ) ) <nl> + / / Scrolling <nl> + if ( g . FocusedWindow & & ! ( g . FocusedWindow - > Flags & ImGuiWindowFlags_NoNav ) ) <nl> { <nl> - float scroll_speed = ImFloor ( g . FocusedWindow - > CalcFontSize ( ) * 100 * g . IO . DeltaTime + 0 . 5f ) ; / / We need round the scrolling speed because sub - pixel scroll isn ' t reliably supported . <nl> - SetWindowScrollY ( g . FocusedWindow , ImFloor ( g . FocusedWindow - > Scroll . y + ( ( g . NavMoveDir = = ImGuiNavDir_Up ) ? - 1 . 0f : + 1 . 0f ) * scroll_speed ) ) ; <nl> + / / Fallback manual - scroll with NavUp / NavDown when window has no navigable item <nl> + const float scroll_speed = ImFloor ( g . FocusedWindow - > CalcFontSize ( ) * 100 * g . IO . DeltaTime + 0 . 5f ) ; / / We need round the scrolling speed because sub - pixel scroll isn ' t reliably supported . <nl> + if ( ! g . FocusedWindow - > DC . NavLayerActiveFlags & & g . FocusedWindow - > DC . NavHasScroll & & g . NavMoveRequest & & ( g . NavMoveDir = = ImGuiNavDir_Up | | g . NavMoveDir = = ImGuiNavDir_Down ) ) <nl> + SetWindowScrollY ( g . FocusedWindow , ImFloor ( g . FocusedWindow - > Scroll . y + ( ( g . NavMoveDir = = ImGuiNavDir_Up ) ? - 1 . 0f : + 1 . 0f ) * scroll_speed ) ) ; <nl> + <nl> + / / Manual scroll with NavScrollXXX keys <nl> + ImVec2 scroll_dir = ImGui : : NavGetMovingDir ( 1 , 1 . 0f / 10 . 0f , 10 . 0f ) ; <nl> + if ( scroll_dir . x ! = 0 . 0f & & g . NavWindow - > ScrollbarX ) <nl> + { <nl> + SetWindowScrollX ( g . FocusedWindow , ImFloor ( g . FocusedWindow - > Scroll . x + scroll_dir . x * scroll_speed ) ) ; <nl> + g . NavMoveFromClampedRefRect = true ; <nl> + } <nl> + if ( scroll_dir . y ! = 0 . 0f ) <nl> + { <nl> + SetWindowScrollY ( g . FocusedWindow , ImFloor ( g . FocusedWindow - > Scroll . y + scroll_dir . y * scroll_speed ) ) ; <nl> + g . NavMoveFromClampedRefRect = true ; <nl> + } <nl> } <nl> <nl> / / Reset search <nl> g . NavMoveResultId = 0 ; <nl> g . NavMoveResultDistAxial = g . NavMoveResultDistBox = g . NavMoveResultDistCenter = FLT_MAX ; <nl> + if ( g . NavMoveRequest & & g . NavMoveFromClampedRefRect & & g . NavLayer = = 0 ) <nl> + { <nl> + / / When we have manually scrolled and NavId is out of bounds , we clamp its bounding box ( used for search ) to the visible area to restart navigation within visible items <nl> + ImRect window_rect_rel ( g . NavWindow - > InnerRect . Min - g . NavWindow - > Pos - ImVec2 ( 1 , 1 ) , g . NavWindow - > InnerRect . Max - g . NavWindow - > Pos + ImVec2 ( 1 , 1 ) ) ; <nl> + if ( ! window_rect_rel . Contains ( g . NavRefRectRel ) ) <nl> + { <nl> + float pad = g . NavWindow - > CalcFontSize ( ) * 0 . 5f ; <nl> + window_rect_rel . Expand ( ImVec2 ( - ImMin ( window_rect_rel . GetWidth ( ) , pad ) , - ImMin ( window_rect_rel . GetHeight ( ) , pad ) ) ) ; / / Terrible approximation for the intend of starting navigation from first fully visible item <nl> + window_rect_rel . Clip ( g . NavRefRectRel ) ; <nl> + g . NavId = 0 ; <nl> + } <nl> + g . NavMoveFromClampedRefRect = false ; <nl> + } <nl> <nl> / / For scoring we use a single segment on the left side our current item bounding box ( not touching the edge to avoid box overlap with zero - spaced items ) <nl> g . NavScoringRectScreen = g . NavWindow ? ImRect ( g . NavWindow - > Pos + g . NavRefRectRel . Min , g . NavWindow - > Pos + g . NavRefRectRel . Max ) : ImRect ( ) ; <nl> bool ImGui : : Begin ( const char * name , bool * p_open , const ImVec2 & size_on_first_us <nl> if ( g . NavWindowingTarget = = window ) <nl> { <nl> const float resize_speed = ImFloor ( 600 * g . IO . DeltaTime * ImMin ( g . IO . DisplayFramebufferScale . x , g . IO . DisplayFramebufferScale . y ) ) ; <nl> - nav_resize_delta = NavGetMovingDir ( ) * resize_speed ; <nl> + nav_resize_delta = NavGetMovingDir ( 0 ) * resize_speed ; <nl> held | = ( nav_resize_delta . x ! = 0 . 0f | | nav_resize_delta . y ! = 0 . 0f ) ; / / For coloring <nl> } <nl> <nl> ImVec2 ImGui : : GetWindowPos ( ) <nl> return window - > Pos ; <nl> } <nl> <nl> + static void SetWindowScrollX ( ImGuiWindow * window , float new_scroll_x ) <nl> + { <nl> + window - > DC . CursorMaxPos . x + = window - > Scroll . x ; / / SizeContents is generally computed based on CursorMaxPos which is affected by scroll position , so we need to apply our change to it . <nl> + window - > Scroll . x = new_scroll_x ; <nl> + window - > DC . CursorMaxPos . x - = window - > Scroll . x ; <nl> + } <nl> + <nl> static void SetWindowScrollY ( ImGuiWindow * window , float new_scroll_y ) <nl> { <nl> window - > DC . CursorMaxPos . y + = window - > Scroll . y ; / / SizeContents is generally computed based on CursorMaxPos which is affected by scroll position , so we need to apply our change to it . <nl> int ImGui : : ParseFormatPrecision ( const char * fmt , int default_precision ) <nl> return precision ; <nl> } <nl> <nl> - ImVec2 ImGui : : NavGetMovingDir ( ) <nl> + ImVec2 ImGui : : NavGetMovingDir ( int stick_no , float slow_factor , float fast_factor ) <nl> { <nl> + IM_ASSERT ( stick_no > = 0 & & stick_no < 2 ) ; <nl> ImVec2 dir ( 0 . 0f , 0 . 0f ) ; <nl> - if ( IsKeyDownMap ( ImGuiKey_NavLeft ) ) dir . x - = 1 . 0f ; <nl> - if ( IsKeyDownMap ( ImGuiKey_NavRight ) ) dir . x + = 1 . 0f ; <nl> - if ( IsKeyDownMap ( ImGuiKey_NavUp ) ) dir . y - = 1 . 0f ; <nl> - if ( IsKeyDownMap ( ImGuiKey_NavDown ) ) dir . y + = 1 . 0f ; <nl> + if ( stick_no = = 0 ) <nl> + { <nl> + if ( IsKeyDownMap ( ImGuiKey_NavLeft ) ) dir . x - = 1 . 0f ; <nl> + if ( IsKeyDownMap ( ImGuiKey_NavRight ) ) dir . x + = 1 . 0f ; <nl> + if ( IsKeyDownMap ( ImGuiKey_NavUp ) ) dir . y - = 1 . 0f ; <nl> + if ( IsKeyDownMap ( ImGuiKey_NavDown ) ) dir . y + = 1 . 0f ; <nl> + } <nl> + if ( stick_no = = 1 ) <nl> + { <nl> + if ( IsKeyDownMap ( ImGuiKey_NavScrollLeft ) ) dir . x - = 1 . 0f ; <nl> + if ( IsKeyDownMap ( ImGuiKey_NavScrollRight ) ) dir . x + = 1 . 0f ; <nl> + if ( IsKeyDownMap ( ImGuiKey_NavScrollUp ) ) dir . y - = 1 . 0f ; <nl> + if ( IsKeyDownMap ( ImGuiKey_NavScrollDown ) ) dir . y + = 1 . 0f ; <nl> + } <nl> + if ( slow_factor ! = 0 . 0f & & IsKeyDownMap ( ImGuiKey_NavTweakSlower ) ) <nl> + dir * = slow_factor ; <nl> + if ( fast_factor ! = 0 . 0f & & IsKeyDownMap ( ImGuiKey_NavTweakFaster ) ) <nl> + dir * = fast_factor ; <nl> return dir ; <nl> } <nl> <nl> void ImGui : : ShowMetricsWindow ( bool * p_open ) <nl> } <nl> ImDrawIdx * idx_buffer = ( draw_list - > IdxBuffer . Size > 0 ) ? draw_list - > IdxBuffer . Data : NULL ; <nl> bool pcmd_node_open = ImGui : : TreeNode ( ( void * ) ( pcmd - draw_list - > CmdBuffer . begin ( ) ) , " Draw % - 4d % s vtx , tex = % p , clip_rect = ( % . 0f , % . 0f ) . . ( % . 0f , % . 0f ) " , pcmd - > ElemCount , draw_list - > IdxBuffer . Size > 0 ? " indexed " : " non - indexed " , pcmd - > TextureId , pcmd - > ClipRect . x , pcmd - > ClipRect . y , pcmd - > ClipRect . z , pcmd - > ClipRect . w ) ; <nl> - if ( show_clip_rects & & ImGui : : IsItemHovered ( ) ) <nl> + if ( show_clip_rects & & ( ImGui : : IsItemHovered ( ) | | ImGui : : IsItemFocused ( ) ) ) <nl> { <nl> ImRect clip_rect = pcmd - > ClipRect ; <nl> ImRect vtxs_rect ; <nl> void ImGui : : ShowMetricsWindow ( bool * p_open ) <nl> buf_p + = sprintf ( buf_p , " % s % 04d { pos = ( % 8 . 2f , % 8 . 2f ) , uv = ( % . 6f , % . 6f ) , col = % 08X } \ n " , ( n = = 0 ) ? " vtx " : " " , vtx_i , v . pos . x , v . pos . y , v . uv . x , v . uv . y , v . col ) ; <nl> } <nl> ImGui : : Selectable ( buf , false ) ; <nl> - if ( ImGui : : IsItemHovered ( ) ) <nl> + if ( ImGui : : IsItemHovered ( ) | | ImGui : : IsItemFocused ( ) ) <nl> overlay_draw_list - > AddPolyline ( triangles_pos , 3 , IM_COL32 ( 255 , 255 , 0 , 255 ) , true , 1 . 0f , false ) ; / / Add triangle without AA , more readable for large - thin triangle <nl> } <nl> ImGui : : TreePop ( ) ; <nl> mmm a / imgui . h <nl> ppp b / imgui . h <nl> enum ImGuiKey_ <nl> ImGuiKey_NavRight , / / e . g . Right arrow , D - Pad right <nl> ImGuiKey_NavUp , / / e . g . Up arrow , D - Pad up <nl> ImGuiKey_NavDown , / / e . g . Down arrow , D - Pad down <nl> + ImGuiKey_NavScrollLeft , / / e . g . Analog left <nl> + ImGuiKey_NavScrollRight , / / e . g . Analog right <nl> + ImGuiKey_NavScrollUp , / / e . g . Analog up <nl> + ImGuiKey_NavScrollDown , / / e . g . Analog down <nl> ImGuiKey_NavTweakFaster , / / e . g . Shift key , R - trigger <nl> ImGuiKey_NavTweakSlower , / / e . g . Alt key , L - trigger <nl> + ImGuiKey_NavLast_ , <nl> <nl> ImGuiKey_COUNT <nl> } ; <nl> mmm a / imgui_demo . cpp <nl> ppp b / imgui_demo . cpp <nl> static void ShowExampleAppConstrainedResize ( bool * p_open ) <nl> static void ShowExampleAppFixedOverlay ( bool * p_open ) <nl> { <nl> ImGui : : SetNextWindowPos ( ImVec2 ( 10 , 10 ) ) ; <nl> - if ( ! ImGui : : Begin ( " Example : Fixed Overlay " , p_open , ImVec2 ( 0 , 0 ) , 0 . 3f , ImGuiWindowFlags_NoTitleBar | ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoMove | ImGuiWindowFlags_NoSavedSettings ) ) <nl> + if ( ! ImGui : : Begin ( " Example : Fixed Overlay " , p_open , ImVec2 ( 0 , 0 ) , 0 . 3f , ImGuiWindowFlags_NoTitleBar | ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoMove | ImGuiWindowFlags_NoSavedSettings | ImGuiWindowFlags_NoNav ) ) <nl> { <nl> ImGui : : End ( ) ; <nl> return ; <nl> mmm a / imgui_internal . h <nl> ppp b / imgui_internal . h <nl> struct ImGuiContext <nl> ImRect NavInitDefaultResultRectRel ; <nl> bool NavInitDefaultResultExplicit ; / / Whether the result was explicitly requested with SetItemDefaultFocus ( ) <nl> bool NavMoveRequest ; / / Move request for this frame <nl> + bool NavMoveFromClampedRefRect ; / / Set by manual scrolling , if we scroll to a point where NavId isn ' t visible we reset navigation from visible items <nl> ImGuiNavDir NavMoveDir ; / / West / East / North / South <nl> ImGuiID NavMoveResultId ; / / Best move request candidate <nl> float NavMoveResultDistBox ; / / Best move request candidate box distance to current NavId <nl> namespace ImGui <nl> IMGUI_API void OpenPopupEx ( const char * str_id , bool reopen_existing ) ; <nl> <nl> IMGUI_API ImVec2 NavGetTweakDelta ( ) ; <nl> - IMGUI_API ImVec2 NavGetMovingDir ( ) ; <nl> + IMGUI_API ImVec2 NavGetMovingDir ( int stick_no , float slow_factor = 0 . 0f , float fast_factor = 0 . 0f ) ; <nl> <nl> inline IMGUI_API ImU32 GetColorU32 ( ImGuiCol idx , float alpha_mul ) { ImVec4 c = GImGui - > Style . Colors [ idx ] ; c . w * = GImGui - > Style . Alpha * alpha_mul ; return ImGui : : ColorConvertFloat4ToU32 ( c ) ; } <nl> inline IMGUI_API ImU32 GetColorU32 ( const ImVec4 & col ) { ImVec4 c = col ; c . w * = GImGui - > Style . Alpha ; return ImGui : : ColorConvertFloat4ToU32 ( c ) ; } <nl>
Nav : first committed pass for manual moving and manual scrolling ( after a bunch of attempts ) ( )
ocornut/imgui
04157da2919b657aa77a6b07863ae2007ceefe5f
2016-07-30T15:18:34Z
mmm a / folly / synchronization / DistributedMutex - inl . h <nl> ppp b / folly / synchronization / DistributedMutex - inl . h <nl> using CombineFunction = detail : : InlineFunctionRef < void ( ) , 48 > ; <nl> template < template < typename > class Atomic > <nl> class Waiter { <nl> public : <nl> - Waiter ( ) = default ; <nl> + Waiter ( ) { } <nl> Waiter ( Waiter & & ) = delete ; <nl> Waiter ( const Waiter & ) = delete ; <nl> Waiter & operator = ( Waiter & & ) = delete ; <nl>
Fix build of DistributedMutex
facebook/folly
9a38d12232daeab3db31c9f05dad3bfbf4f2f1b3
2019-04-09T05:43:10Z
mmm a / tensorflow / lite / delegates / flex / kernel_test . cc <nl> ppp b / tensorflow / lite / delegates / flex / kernel_test . cc <nl> TfLiteStatus GenericPrepare ( TfLiteContext * context , TfLiteDelegate * delegate , <nl> } <nl> <nl> / / There is no easy way to pass a parameter into the TfLiteDelegate ' s <nl> - / / ' prepare ' function , so we keep a global map for testing purpused . <nl> + / / ' prepare ' function , so we keep a global map for testing purposed . <nl> / / To avoid collisions use : GetPrepareFunction < __LINE__ > ( ) . <nl> std : : map < int , std : : vector < int > > * GetGlobalOpLists ( ) { <nl> static auto * op_list = new std : : map < int , std : : vector < int > > ; <nl> mmm a / tensorflow / lite / delegates / gpu / README . md <nl> ppp b / tensorflow / lite / delegates / gpu / README . md <nl> const TfLiteGpuDelegateOptionsV2 kDefaultOptions = <nl> TfLiteGpuDelegateOptionsV2Default ( ) ; <nl> ` ` ` <nl> <nl> - Similar for ` NewTfLiteMetalDelgate ( ) ` : <nl> + Similar for ` NewTfLiteMetalDelegate ( ) ` : <nl> <nl> ` ` ` c + + <nl> const TfLiteMetalDelegateOptions kDefaultOptions = { <nl> mmm a / tensorflow / lite / delegates / gpu / cl / cl_command_queue . h <nl> ppp b / tensorflow / lite / delegates / gpu / cl / cl_command_queue . h <nl> class ProfilingCommandQueue : public CLCommandQueue { <nl> double GetQueueExecutionTimeMs ( ) const ; <nl> <nl> / / Difference from GetQueueExecutionTimeMs is that this number doesn ' t include <nl> - / / time between kernels ( kernels launchs or preparing ) on GPU . Usually , this <nl> + / / time between kernels ( kernels launches or preparing ) on GPU . Usually , this <nl> / / time should be 5 - 10 % better than GetQueueExecutionTimeMs , because 5 - 10 % <nl> - / / spend on something else ( maybe kernels launchs or preparing ) <nl> + / / spend on something else ( maybe kernels launches or preparing ) <nl> double GetSumOfEventsTimeMs ( ) const ; <nl> <nl> / / This label will be used for all subsequent dispatches . <nl> mmm a / tensorflow / lite / delegates / gpu / cl / cl_program . h <nl> ppp b / tensorflow / lite / delegates / gpu / cl / cl_program . h <nl> class CLProgram { <nl> <nl> / / Return the cl_device_id associated with the program object . <nl> / / This can be the device associated with context on which the program object <nl> - / / has been created or can be device that was specified when a progam object <nl> + / / has been created or can be device that was specified when a program object <nl> / / was created using clCreateProgramWithBinary . <nl> cl_device_id GetDeviceId ( ) const { return device_id_ ; } <nl> <nl> mmm a / tensorflow / lite / delegates / gpu / cl / gl_interop . h <nl> ppp b / tensorflow / lite / delegates / gpu / cl / gl_interop . h <nl> Status CreateEglSyncFromClEvent ( cl_event event , EGLDisplay display , <nl> bool IsEglSyncFromClEventSupported ( ) ; <nl> <nl> / / Creates CL event from EGL sync . <nl> - / / Created event could only be comsumed by AcquiredGlObject : : Acquire call as <nl> + / / Created event could only be consumed by AcquiredGlObject : : Acquire call as <nl> / / a ' wait_event ' . <nl> Status CreateClEventFromEglSync ( cl_context context , const EglSync & egl_sync , <nl> CLEvent * event ) ; <nl> mmm a / tensorflow / lite / delegates / gpu / cl / inference_context . h <nl> ppp b / tensorflow / lite / delegates / gpu / cl / inference_context . h <nl> struct CLNode { <nl> / / for every operation . <nl> std : : vector < int2 > ranges ; <nl> <nl> - / / Mostly for debug purposess . <nl> + / / Mostly for debug purposes . <nl> std : : string name ; <nl> <nl> CLNode ( ) = default ; <nl> class InferenceContext { <nl> CalculationsPrecision precision_ ; <nl> TensorStorageType storage_type_ ; <nl> <nl> - / / Directly mapped nodes from graph , but some of them " inactiv " due <nl> - / / to fusion ( inactiv = fused ) . <nl> + / / Directly mapped nodes from graph , but some of them " inactive " due <nl> + / / to fusion ( inactive = fused ) . <nl> / / Memory is allocated only once , in ConvertOperations , and is not modified <nl> / / anywhere . <nl> std : : vector < CLNode > nodes_ ; <nl> mmm a / tensorflow / lite / delegates / gpu / cl / kernels / fully_connected . cc <nl> ppp b / tensorflow / lite / delegates / gpu / cl / kernels / fully_connected . cc <nl> namespace { <nl> / / vec mat mult ) on 4 parts to create more threads <nl> / / tid . y thread process every 4 - th element in vec vec dot <nl> / / Good results for ~ 1024 x 1024 sizes , for other can be written more <nl> - / / otimized shaders <nl> + / / optimized shaders <nl> <nl> std : : string GetFullyConnectedKernelCode ( <nl> const OperationDef & op_def , const LinearStorage & biases , <nl> mmm a / tensorflow / lite / delegates / gpu / cl / kernels / max_unpooling . cc <nl> ppp b / tensorflow / lite / delegates / gpu / cl / kernels / max_unpooling . cc <nl> namespace gpu { <nl> namespace cl { <nl> namespace { <nl> <nl> - std : : string GetMaxUnoolingKernelCode ( <nl> + std : : string GetMaxUnpoolingKernelCode ( <nl> const OperationDef & op_def , const CLDevice & device , <nl> const std : : vector < ElementwiseOperation * > & linked_operations ) { <nl> TensorCodeGenerator src ( " src_data " , <nl> std : : string GetMaxUnoolingKernelCode ( <nl> return c ; <nl> } <nl> <nl> - std : : string GetMaxUnooling3DKernelCode ( <nl> + std : : string GetMaxUnpooling3DKernelCode ( <nl> const OperationDef & op_def , const CLDevice & device , <nl> const std : : vector < ElementwiseOperation * > & linked_operations ) { <nl> TensorCodeGenerator src ( <nl> MaxUnpooling & MaxUnpooling : : operator = ( MaxUnpooling & & kernel ) { <nl> } <nl> <nl> Status MaxUnpooling : : Compile ( const CreationContext & creation_context ) { <nl> - const auto code = GetMaxUnoolingKernelCode ( <nl> + const auto code = GetMaxUnpoolingKernelCode ( <nl> definition_ , * creation_context . device , linked_operations_ ) ; <nl> return creation_context . cache - > GetOrCreateCLKernel ( <nl> code , " main_function " , * creation_context . context , <nl> MaxUnpooling3D & MaxUnpooling3D : : operator = ( MaxUnpooling3D & & kernel ) { <nl> } <nl> <nl> Status MaxUnpooling3D : : Compile ( const CreationContext & creation_context ) { <nl> - const auto code = GetMaxUnooling3DKernelCode ( <nl> + const auto code = GetMaxUnpooling3DKernelCode ( <nl> definition_ , * creation_context . device , linked_operations_ ) ; <nl> return creation_context . cache - > GetOrCreateCLKernel ( <nl> code , " main_function " , * creation_context . context , <nl> mmm a / tensorflow / lite / delegates / gpu / cl / kernels / strided_slice . cc <nl> ppp b / tensorflow / lite / delegates / gpu / cl / kernels / strided_slice . cc <nl> std : : string GetStridedSliceCode ( <nl> return c ; <nl> } <nl> <nl> - bool Is4Alighed ( const SliceAttributes & attr ) { <nl> + bool Is4Aligned ( const SliceAttributes & attr ) { <nl> return attr . strides . c = = 1 & & attr . starts . c % 4 = = 0 ; <nl> } <nl> <nl> int4 GetOffset ( const SliceAttributes & attr , int src_width , int src_height , <nl> offset . z = src_channels + attr . ends . c ; <nl> } <nl> } <nl> - if ( Is4Alighed ( attr ) ) { <nl> + if ( Is4Aligned ( attr ) ) { <nl> offset . z / = 4 ; <nl> } <nl> if ( attr . strides . b > 0 ) { <nl> StridedSlice & StridedSlice : : operator = ( StridedSlice & & operation ) { <nl> } <nl> <nl> Status StridedSlice : : Compile ( const CreationContext & creation_context ) { <nl> - const auto code = GetStridedSliceCode ( definition_ , Is4Alighed ( attributes_ ) , <nl> + const auto code = GetStridedSliceCode ( definition_ , Is4Aligned ( attributes_ ) , <nl> linked_operations_ ) ; <nl> return creation_context . cache - > GetOrCreateCLKernel ( <nl> code , " main_function " , * creation_context . context , <nl> mmm a / tensorflow / lite / delegates / gpu / cl / opencl_wrapper . h <nl> ppp b / tensorflow / lite / delegates / gpu / cl / opencl_wrapper . h <nl> extern PFN_clCreateFromEGLImageKHR clCreateFromEGLImageKHR ; <nl> extern PFN_clEnqueueAcquireEGLObjectsKHR clEnqueueAcquireEGLObjectsKHR ; <nl> extern PFN_clEnqueueReleaseEGLObjectsKHR clEnqueueReleaseEGLObjectsKHR ; <nl> <nl> - / / For convinient image creation <nl> + / / For convenient image creation <nl> / / It uses clCreateImage if it available ( clCreateImage available since cl 1 . 2 ) <nl> / / otherwise it will use legacy clCreateImage2D <nl> cl_mem CreateImage2DLegacy ( cl_context context , cl_mem_flags flags , <nl> mmm a / tensorflow / lite / delegates / gpu / cl / precision . h <nl> ppp b / tensorflow / lite / delegates / gpu / cl / precision . h <nl> enum class CalculationsPrecision { F32 , F32_F16 , F16 } ; <nl> / / F32_F16 - as F16 , but some operations ( Convolution , <nl> / / DepthWiseConvolution , FullyConnected , ConvolutionTransposed ) <nl> / / have accumulator in F32 and usually it calculates 4 mads in F16 , sum them , <nl> - / / than converts this partial sum to F32 and add to acumulator . <nl> + / / than converts this partial sum to F32 and add to accumulator . <nl> <nl> DataType DeduceDataTypeFromPrecision ( CalculationsPrecision precision ) ; <nl> <nl> mmm a / tensorflow / lite / delegates / gpu / cl / tensor . cc <nl> ppp b / tensorflow / lite / delegates / gpu / cl / tensor . cc <nl> Status AllocateTensorMemory ( const CLContext & context , const CLDevice & device , <nl> case TensorStorageType : : SINGLE_TEXTURE_2D : { <nl> if ( slices ! = 1 ) { <nl> return InvalidArgumentError ( absl : : StrCat ( <nl> - " SINGLE_TEXTURE_2D support only cnannels in range [ 1 - 4 ] , but " , <nl> + " SINGLE_TEXTURE_2D support only channels in range [ 1 - 4 ] , but " , <nl> shape . c , " was provided " ) ) ; <nl> } <nl> cl_image_desc desc ; <nl> mmm a / tensorflow / lite / delegates / gpu / common / memory_management . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / memory_management . h <nl> enum class MemoryStrategy { <nl> Status BestGreedy ( const std : : vector < TensorUsageRecord < size_t > > & usage_records , <nl> ObjectsAssignment < size_t > * assignment ) ; <nl> <nl> - / / Calculates the assignement of shared objects to given tensors , including <nl> + / / Calculates the assignment of shared objects to given tensors , including <nl> / / objects ' sizes . Below there are specializations for different types , that <nl> / / support more memory strategies . <nl> / / If reallocation_graph is provided , assignment of shared objects support <nl> Status AssignObjectsToTensors ( <nl> MemoryStrategy strategy , ObjectsAssignment < uint3 > * assignment , <nl> const UsageGraph * reallocation_graph ) ; <nl> <nl> - / / Calculates the assignement of tensors to offsets , considering those tensors <nl> + / / Calculates the assignment of tensors to offsets , considering those tensors <nl> / / are going to be allocated in one continuous memory block . <nl> Status AssignOffsetsToTensors ( <nl> const std : : vector < TensorUsageRecord < size_t > > & usage_records , <nl> mmm a / tensorflow / lite / delegates / gpu / common / memory_management / greedy_by_size_assignment . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / memory_management / greedy_by_size_assignment . cc <nl> Status GreedyBySizeAssignment ( <nl> assignment - > offsets . resize ( num_tensors ) ; <nl> assignment - > total_size = 0 ; <nl> <nl> - / / Ordered records are to be sorted by size of corrseponding tensor . <nl> + / / Ordered records are to be sorted by size of corresponding tensor . <nl> std : : vector < TensorUsageWithIndex < size_t > > ordered_records ; <nl> for ( size_t i = 0 ; i < num_tensors ; + + i ) { <nl> ordered_records . emplace_back ( & usage_records [ i ] , i ) ; <nl> Status GreedyBySizeAssignment ( <nl> / / - We have tensor usage records of all intermideate tensors as an input . Each <nl> / / record consists of tensor size , first and last tasks , that use it . Let ' s call <nl> / / [ first_task . . last_task ] a tensor usage interval ; <nl> - / / - Distance between two usage intervals is the absoulte difference between <nl> + / / - Distance between two usage intervals is the absolute difference between <nl> / / closest tasks in their intervals . If two usage intervals don ' t intersect , <nl> / / than the distance between them is positive ; <nl> / / - Calculate positional maximums vector , e . g . the vector of lower bounds on <nl> mmm a / tensorflow / lite / delegates / gpu / common / memory_management / greedy_by_size_assignment . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / memory_management / greedy_by_size_assignment . h <nl> namespace gpu { <nl> / / gap ; <nl> / / - If such a gap has been found , current tensor should be allocated into this <nl> / / gap . Otherwise we can allocate it after the rightmost tensor , which usage <nl> - / / interval intersects with usage inteval of current tensor . So we assign <nl> + / / interval intersects with usage interval of current tensor . So we assign <nl> / / corresponding offset to current tensor and the tensor becomes assigned . <nl> Status GreedyBySizeAssignment ( <nl> const std : : vector < TensorUsageRecord < size_t > > & usage_records , <nl> Status GreedyBySizeAssignment ( <nl> / / - We have tensor usage records of all intermideate tensors as an input . Each <nl> / / record consists of tensor size , first and last tasks , that use it . Let ' s call <nl> / / [ first_task . . last_task ] a tensor usage interval ; <nl> - / / - Distance between two usage intervals is the absoulte difference between <nl> + / / - Distance between two usage intervals is the absolute difference between <nl> / / closest tasks in their intervals . If two usage intervals don ' t intersect , <nl> / / than the distance between them is positive ; <nl> / / - Calculate positional maximums vector , e . g . the vector of lower bounds on <nl> mmm a / tensorflow / lite / delegates / gpu / common / memory_management / internal . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / memory_management / internal . h <nl> bool CompareBySize ( const TensorUsageWithIndex < size_t > & first , <nl> const TensorUsageWithIndex < size_t > & second ) ; <nl> <nl> / / TaskProfile is a vector with information about all intermediate tensors , that <nl> - / / should exist in memory during the executon of the task . Elements of the <nl> + / / should exist in memory during the execution of the task . Elements of the <nl> / / vector must be sorted in non - increasing order of corresponding tensors sizes . <nl> using TaskProfile = std : : vector < TensorUsageWithIndex < size_t > > ; <nl> <nl> mmm a / tensorflow / lite / delegates / gpu / common / testing / interpreter_utils . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / testing / interpreter_utils . h <nl> namespace gpu { <nl> namespace testing { <nl> <nl> / / Runs Tensorflow Lite model using Tensorflow Lite with a delegate and <nl> - / / an appropriate operations resolver . If delegate is nullptr , infererence will <nl> + / / an appropriate operations resolver . If delegate is nullptr , inference will <nl> / / be done only on CPU . <nl> Status InterpreterInvokeWithOpResolver ( const : : tflite : : Model * model , <nl> TfLiteDelegate * delegate , <nl> Status InterpreterInvokeWithOpResolver ( const : : tflite : : Model * model , <nl> std : : vector < TensorFloat32 > * outputs ) ; <nl> <nl> / / Runs Tensorflow Lite model using Tensorflow Lite with a delegate and <nl> - / / builtin operations resolver . If delegate is nullptr , infererence will <nl> + / / builtin operations resolver . If delegate is nullptr , inference will <nl> / / be done only on CPU . <nl> Status InterpreterInvoke ( const : : tflite : : Model * model , TfLiteDelegate * delegate , <nl> const std : : vector < TensorFloat32 > & inputs , <nl> mmm a / tensorflow / lite / delegates / gpu / common / workgroup_selection . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / workgroup_selection . cc <nl> std : : vector < int > GetDivisorsForRange ( int number , int range ) { <nl> std : : vector < int > GetPossibleSizes ( int number , <nl> WorkGroupSizeAlignment z_alignment ) { <nl> if ( z_alignment = = WorkGroupSizeAlignment : : PRECISE ) { <nl> - / / we will use for potential sizes , sizes that cover grid preciselly <nl> + / / we will use for potential sizes , sizes that cover grid precisely <nl> / / work group size * k ( k is integer ) = = grid_size <nl> return GetDivisors ( number ) ; <nl> } else { <nl> mmm a / tensorflow / lite / delegates / gpu / delegate . h <nl> ppp b / tensorflow / lite / delegates / gpu / delegate . h <nl> typedef struct { <nl> / / each time inference engine needs to make a decision , it uses <nl> / / ordered priorities to do so . <nl> / / For example : <nl> - / / MAX_PRECISION at priority1 would not allow to decrease presision , <nl> + / / MAX_PRECISION at priority1 would not allow to decrease precision , <nl> / / but moving it to priority2 or priority3 would result in F16 calculation . <nl> / / <nl> / / Priority is defined in TfLiteGpuInferencePriority . <nl> mmm a / tensorflow / lite / delegates / gpu / gl / compiler / variable_accessor . h <nl> ppp b / tensorflow / lite / delegates / gpu / gl / compiler / variable_accessor . h <nl> class VariableAccessor : public InlineRewrite { <nl> / / Returns const variables that need to be inlined in the a shader ' s code . <nl> std : : string GetConstDeclarations ( ) const ; <nl> <nl> - / / Returns shared varaible declarations that need to be inlined . <nl> + / / Returns shared variable declarations that need to be inlined . <nl> std : : string GetSharedVariableDeclarations ( ) const ; <nl> <nl> / / Returns uniform parameter declarations that need to be inlined . <nl> mmm a / tensorflow / lite / delegates / gpu / gl / gl_errors . cc <nl> ppp b / tensorflow / lite / delegates / gpu / gl / gl_errors . cc <nl> Status GetEglError ( ) { <nl> case EGL_CONTEXT_LOST : <nl> return InternalError ( <nl> " A power management event has occurred . The application must destroy " <nl> - " all contexts and reinitialise OpenGL ES state and objects to " <nl> + " all contexts and reinitialize OpenGL ES state and objects to " <nl> " continue rendering . " ) ; <nl> } <nl> return UnknownError ( " EGL error : " + std : : to_string ( error ) ) ; <nl> mmm a / tensorflow / lite / delegates / gpu / gl / gl_sync . h <nl> ppp b / tensorflow / lite / delegates / gpu / gl / gl_sync . h <nl> class GlSync { <nl> / / Waits until GPU is done with processing . <nl> Status GlSyncWait ( ) ; <nl> <nl> - / / Waits until all comands are flushed and then performs active waiting by <nl> + / / Waits until all commands are flushed and then performs active waiting by <nl> / / spinning a thread and checking sync status . It leads to shorter wait time <nl> / / ( up to tens of ms ) but consumes more CPU . <nl> Status GlActiveSyncWait ( ) ; <nl> mmm a / tensorflow / lite / delegates / gpu / gl / kernels / add_test . cc <nl> ppp b / tensorflow / lite / delegates / gpu / gl / kernels / add_test . cc <nl> TEST ( AddTest , InputTensorAndScalar ) { <nl> Pointwise ( FloatNear ( 1e - 6 ) , { - 1 . 9 , 0 . 3 , 0 . 8 , 0 . 9 , 1 . 2 , 2 . 1 } ) ) ; <nl> } <nl> <nl> - TEST ( AddTest , InputTensorWithConstandBroadcast ) { <nl> + TEST ( AddTest , InputTensorWithConstantBroadcast ) { <nl> TensorRef < BHWC > input ; <nl> input . type = DataType : : FLOAT32 ; <nl> input . ref = 0 ; <nl> mmm a / tensorflow / lite / delegates / gpu / gl / workgroups / ideal_workgroup_picker . cc <nl> ppp b / tensorflow / lite / delegates / gpu / gl / workgroups / ideal_workgroup_picker . cc <nl> namespace { <nl> / / ( b / 117291356 ) . <nl> <nl> / / Describes the ideal convolution for the specific operation case <nl> - / / Case here means specific " kernel + strides " conbination for specific <nl> - / / operatoins type , not sizes of input and output tensors , they can be any . <nl> + / / Case here means specific " kernel + strides " combination for specific <nl> + / / operations type , not sizes of input and output tensors , they can be any . <nl> struct IdealByCase { <nl> bool ParamsAccepted ( OperationType in_op_type , HW in_kernel , <nl> HW in_strides ) const { <nl> mmm a / tensorflow / lite / delegates / gpu / metal / compiled_model . cc <nl> ppp b / tensorflow / lite / delegates / gpu / metal / compiled_model . cc <nl> uint32_t BufferUseCount ( ValueId id , <nl> } <nl> <nl> / / Examines if the second operation can be linked to the first one . Linking may <nl> - / / be skipped in the situation when conflic may happen : if first operation ' s <nl> + / / be skipped in the situation when conflict may happen : if first operation ' s <nl> / / output is used by more than 1 other operation . <nl> bool CanFuseOperations ( const ComputeTaskDescriptorPtr first , <nl> const ComputeTaskDescriptorPtr second , <nl> ComputeTaskDescriptorPtr NonLinkableStub ( int operation_id , ValueId input_id , <nl> } <nl> <nl> ComputeTaskDescriptorPtr FuseChain ( const FusionSequence & chain ) { <nl> - auto fused_desciptor = std : : make_shared < ComputeTaskDescriptor > ( ) ; <nl> + auto fused_descriptor = std : : make_shared < ComputeTaskDescriptor > ( ) ; <nl> / / The id of fused descriptor is the id of the first descriptor in the list . <nl> - fused_desciptor - > id = chain . front ( ) - > id ; <nl> + fused_descriptor - > id = chain . front ( ) - > id ; <nl> FusionSequence sequence ; <nl> if ( chain . front ( ) - > is_linkable ) { <nl> / / The first task is linkable so it contains only linkable code . Insert <nl> ComputeTaskDescriptorPtr FuseChain ( const FusionSequence & chain ) { <nl> buffer . declaration + name + " [ [ buffer ( " + index + " ) ] ] , \ n " ; <nl> call_arguments + = " , buffer " + index ; <nl> input_index + + ; <nl> - fused_desciptor - > input_buffers . push_back ( { buffer . id , " " } ) ; <nl> + fused_descriptor - > input_buffers . push_back ( { buffer . id , " " } ) ; <nl> } <nl> } <nl> / / We have an output id that is the input for the next task . <nl> ComputeTaskDescriptorPtr FuseChain ( const FusionSequence & chain ) { <nl> buffer . declaration + name + " [ [ buffer ( " + index + " ) ] ] , \ n " ; <nl> call_arguments + = " , buffer " + index ; <nl> immutable_index + + ; <nl> - fused_desciptor - > immutable_buffers . push_back ( buffer ) ; <nl> + fused_descriptor - > immutable_buffers . push_back ( buffer ) ; <nl> } <nl> <nl> for ( auto buffer : desc - > uniform_buffers ) { <nl> ComputeTaskDescriptorPtr FuseChain ( const FusionSequence & chain ) { <nl> buffer . declaration + name + " [ [ buffer ( " + index + " ) ] ] , \ n " ; <nl> call_arguments + = " , buffer " + index ; <nl> uniform_index + + ; <nl> - fused_desciptor - > uniform_buffers . push_back ( { " " , buffer . data_function } ) ; <nl> + fused_descriptor - > uniform_buffers . push_back ( { " " , buffer . data_function } ) ; <nl> } <nl> <nl> if ( desc - > is_linkable ) { <nl> ComputeTaskDescriptorPtr FuseChain ( const FusionSequence & chain ) { <nl> } <nl> <nl> ComputeTaskDescriptorPtr non_linkable = sequence . front ( ) ; <nl> - fused_desciptor - > shader_source = <nl> + fused_descriptor - > shader_source = <nl> absl : : Substitute ( non_linkable - > shader_source , function_code , <nl> buffer_declarations , call_code ) ; <nl> std : : vector < ValueId > alias ; <nl> ComputeTaskDescriptorPtr FuseChain ( const FusionSequence & chain ) { <nl> for ( int i = 0 ; i < chain . size ( ) - 1 ; i + + ) { <nl> alias . push_back ( chain [ i ] - > output_buffer . id ) ; <nl> } <nl> - fused_desciptor - > output_buffer = { <nl> + fused_descriptor - > output_buffer = { <nl> fused_id , " " , non_linkable - > output_buffer . dimensions_function , alias } ; <nl> - fused_desciptor - > resize_function = non_linkable - > resize_function ; <nl> + fused_descriptor - > resize_function = non_linkable - > resize_function ; <nl> for ( const auto & desc : sequence ) { <nl> - fused_desciptor - > description + = desc - > description + " _ " ; <nl> + fused_descriptor - > description + = desc - > description + " _ " ; <nl> } <nl> - return fused_desciptor ; <nl> + return fused_descriptor ; <nl> } <nl> <nl> } / / namespace <nl> mmm a / tensorflow / lite / delegates / gpu / metal / inference_context . h <nl> ppp b / tensorflow / lite / delegates / gpu / metal / inference_context . h <nl> limitations under the License . <nl> / / / 2 . Model compilation . Global list of ComputeTaskDescriptors is transformed <nl> / / / into the sorted list of sets of descriptors . A set can be transformed <nl> / / / later into a single GPU task . <nl> - / / / 3 . GPU compute tasks generation . Shader code generation happes here . <nl> + / / / 3 . GPU compute tasks generation . Shader code generation happens here . <nl> / / / 4 . Intermediate resource allocation . <nl> / / / Inference . <nl> @ interface TFLInferenceContext : NSObject <nl> limitations under the License . <nl> / / / Inserts all GPU compute tasks into the command encoder . <nl> / / / @ param inputOutputBuffers Must be created and passed into the method with pairs ID : buffer <nl> / / / @ param encoderBlock User - defined block to take control over command encoder . Can be nil . <nl> - / / / The block can be used , for example , for fine - graned benchmarking where end encoding <nl> + / / / The block can be used , for example , for fine - grained benchmarking where end encoding <nl> / / / is performed and command buffer is committed with completion block . A new command <nl> / / / buffer must be created and new command encoder must be returned by the block . <nl> / / / The block is called after every dispatch encoding . <nl> - / / / @ discussion No GPU sychronization functions are used inside . All GPU resources must be created <nl> + / / / @ discussion No GPU synchronization functions are used inside . All GPU resources must be created <nl> / / / with the same device which has been used in compileModelWithDevice ( ) method . <nl> - ( void ) encodeWithEncoder : ( id < MTLComputeCommandEncoder > ) commandEncoder <nl> inputOutputBuffers : ( const std : : map < : : tflite : : gpu : : ValueId , id < MTLBuffer > > & ) inputOutputBuffers <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / add_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / add_test . mm <nl> - ( void ) testInputTensorAndScalar { <nl> XCTAssertTrue ( status . ok ( ) , @ " % s " , status . error_message ( ) . c_str ( ) ) ; <nl> } <nl> <nl> - - ( void ) testInputTensorWithConstandBroadcast { <nl> + - ( void ) testInputTensorWithConstantBroadcast { <nl> TensorRef < BHWC > input ; <nl> input . type = DataType : : FLOAT32 ; <nl> input . ref = 0 ; <nl> mmm a / tensorflow / lite / delegates / nnapi / acceleration_test_list . cc <nl> ppp b / tensorflow / lite / delegates / nnapi / acceleration_test_list . cc <nl> const constexpr char * NnapiAccelerationTestParams : : kAccelerationTestConfig = <nl> # <nl> # The test_id is test_suite_name / test_name , this differs from the <nl> # name used by the build because of the / separator instead of . <nl> - # Parametrised tests names are composed by the base test name / test / ordinal <nl> + # Parameterized tests names are composed by the base test name / test / ordinal <nl> # the ordinal is the position in the list of parameters generated by the <nl> # cardinal product of all the different parameter sets <nl> <nl> const constexpr char * NnapiAccelerationTestParams : : kAccelerationTestConfig = <nl> <nl> # # Test Arguments <nl> # <nl> - # The test can be parametrised with the minimum Android SDK version <nl> + # The test can be parameterized with the minimum Android SDK version <nl> # to apply the acceleration validation for . <nl> # If omitted will use 27 <nl> <nl> mmm a / tensorflow / lite / delegates / nnapi / nnapi_delegate . cc <nl> ppp b / tensorflow / lite / delegates / nnapi / nnapi_delegate . cc <nl> bool IsScalarInputSupported ( int builtin_code ) { <nl> } <nl> } <nl> <nl> - / / Check if the operation requires explict conversion from int8 to uint8 values . <nl> + / / Check if the operation requires explicit conversion from int8 to uint8 <nl> + / / values . <nl> bool NeedInt8Conversion ( const TfLiteContext * context , int builtin_code , <nl> const TfLiteNode * node ) { <nl> const int input_id = node - > inputs - > data [ 0 ] ; <nl> mmm a / tensorflow / lite / delegates / nnapi / nnapi_delegate . h <nl> ppp b / tensorflow / lite / delegates / nnapi / nnapi_delegate . h <nl> class StatefulNnApiDelegate : public TfLiteDelegate { <nl> bool disallow_nnapi_cpu ; <nl> / / Tensor to ANeuralNetworksMemory mapping . <nl> std : : vector < MemoryRegistration > tensor_memory_map ; <nl> - / / Constains a non zero value if any NNAPI method call <nl> + / / Contains a non zero value if any NNAPI method call <nl> / / operation returned a non zero result code . <nl> int nnapi_errno ; <nl> / / Cache of kernels already built in StatefulNnApiDelegate : : DoPrepare <nl> mmm a / tensorflow / lite / delegates / nnapi / nnapi_delegate_test . cc <nl> ppp b / tensorflow / lite / delegates / nnapi / nnapi_delegate_test . cc <nl> class PadV2OpConstModel : public PadOpModel < T1 > { <nl> } ; <nl> <nl> / / Test case where paddings is a non - const tensor . <nl> - template < typename RegularInputOuput > <nl> - class PadV2OpDynamicModel : public PadOpModel < RegularInputOuput > { <nl> + template < typename RegularInputOutput > <nl> + class PadV2OpDynamicModel : public PadOpModel < RegularInputOutput > { <nl> public : <nl> PadV2OpDynamicModel ( const TensorData & input , <nl> std : : initializer_list < int > paddings_shape , <nl> - RegularInputOuput constant_values , <nl> + RegularInputOutput constant_values , <nl> const TensorData & output ) { <nl> this - > input_ = this - > AddInput ( input ) ; <nl> this - > paddings_ = this - > AddInput ( TensorType_INT32 ) ; <nl> this - > constant_values_ = this - > AddConstInput ( <nl> - GetTensorType < RegularInputOuput > ( ) , { constant_values } , { 1 } ) ; <nl> + GetTensorType < RegularInputOutput > ( ) , { constant_values } , { 1 } ) ; <nl> this - > output_ = this - > AddOutput ( output ) ; <nl> <nl> this - > SetBuiltinOp ( BuiltinOperator_PADV2 , BuiltinOptions_PadV2Options , <nl>
Merge pull request from kiszk : spelling_tweaks_lite_delegates
tensorflow/tensorflow
ee57aaeae9bfdbe201d234660bc3064a87c91053
2020-03-20T19:21:50Z
mmm a / src / core . h <nl> ppp b / src / core . h <nl> class CTxOut <nl> / / to spend something , then we consider it dust . <nl> / / A typical txout is 34 bytes big , and will <nl> / / need a CTxIn of at least 148 bytes to spend , <nl> - / / so dust is a txout less than 54 uBTC <nl> - / / ( 5460 satoshis ) with default nMinRelayTxFee <nl> + / / so dust is a txout less than 546 satoshis <nl> + / / with default nMinRelayTxFee . <nl> return ( ( nValue * 1000 ) / ( 3 * ( ( int ) GetSerializeSize ( SER_DISK , 0 ) + 148 ) ) < nMinRelayTxFee ) ; <nl> } <nl> <nl> mmm a / src / main . cpp <nl> ppp b / src / main . cpp <nl> unsigned int nCoinCacheSize = 5000 ; <nl> / * * Fees smaller than this ( in satoshi ) are considered zero fee ( for transaction creation ) * / <nl> int64_t CTransaction : : nMinTxFee = 10000 ; / / Override with - mintxfee <nl> / * * Fees smaller than this ( in satoshi ) are considered zero fee ( for relaying ) * / <nl> - int64_t CTransaction : : nMinRelayTxFee = 10000 ; <nl> + int64_t CTransaction : : nMinRelayTxFee = 1000 ; <nl> <nl> static CMedianFilter < int > cPeerBlockCounts ( 8 , 0 ) ; / / Amount of blocks that other nodes claim to have <nl> <nl> mmm a / src / test / transaction_tests . cpp <nl> ppp b / src / test / transaction_tests . cpp <nl> BOOST_AUTO_TEST_CASE ( test_IsStandard ) <nl> string reason ; <nl> BOOST_CHECK ( IsStandardTx ( t , reason ) ) ; <nl> <nl> - t . vout [ 0 ] . nValue = 5011 ; / / dust <nl> + t . vout [ 0 ] . nValue = 501 ; / / dust <nl> BOOST_CHECK ( ! IsStandardTx ( t , reason ) ) ; <nl> <nl> - t . vout [ 0 ] . nValue = 6011 ; / / not dust <nl> + t . vout [ 0 ] . nValue = 601 ; / / not dust <nl> BOOST_CHECK ( IsStandardTx ( t , reason ) ) ; <nl> <nl> t . vout [ 0 ] . scriptPubKey = CScript ( ) < < OP_1 ; <nl>
Merge pull request from mikehearn / fee_drop
bitcoin/bitcoin
beabca2be092d0e2a1de26989d4e63a12cce1284
2014-02-24T19:05:54Z
mmm a / BUCK <nl> ppp b / BUCK <nl> GMOCK_OVERRIDE_FLAGS = [ <nl> ] <nl> <nl> COMPILER_FLAGS = LIBRARY_COMPILER_FLAGS + [ <nl> - " - std = c11 " , <nl> + " - std = c + + 1y " , <nl> ] <nl> <nl> - TEST_COMPILER_FLAGS = BASE_COMPILER_FLAGS + GMOCK_OVERRIDE_FLAGS + [ " - std = c + + 11 " ] <nl> + TEST_COMPILER_FLAGS = BASE_COMPILER_FLAGS + GMOCK_OVERRIDE_FLAGS + [ " - std = c + + 1y " ] <nl> <nl> cxx_library ( <nl> name = " yoga " , <nl> - srcs = glob ( [ " yoga / * . c " ] ) , <nl> + srcs = glob ( [ " yoga / * . cpp " ] ) , <nl> header_namespace = " " , <nl> exported_headers = subdir_glob ( [ ( " " , " yoga / * . h " ) ] ) , <nl> compiler_flags = COMPILER_FLAGS , <nl> mmm a / Yoga . podspec <nl> ppp b / Yoga . podspec <nl> Pod : : Spec . new do | spec | <nl> ' - fexceptions ' , <nl> ' - Wall ' , <nl> ' - Werror ' , <nl> - ' - std = c11 ' , <nl> + ' - std = c + + 1y ' , <nl> ' - fPIC ' <nl> ] <nl> - spec . source_files = ' yoga / * * / * . { c , h } ' <nl> + spec . source_files = ' yoga / * * / * . { c , h , cpp } ' <nl> end <nl> mmm a / enums . py <nl> ppp b / enums . py <nl> def to_log_lower ( symbol ) : <nl> f . write ( ' YG_EXTERN_C_END \ n ' ) <nl> <nl> # write out C body for printing <nl> - with open ( root + ' / yoga / YGEnums . c ' , ' w ' ) as f : <nl> + with open ( root + ' / yoga / YGEnums . cpp ' , ' w ' ) as f : <nl> f . write ( LICENSE ) <nl> f . write ( ' # include " YGEnums . h " \ n \ n ' ) <nl> for name , values in sorted ( ENUMS . items ( ) ) : <nl> mmm a / javascript / binding . gyp <nl> ppp b / javascript / binding . gyp <nl> <nl> ] , <nl> <nl> " sources " : [ <nl> - " sources / yoga / YGNodeList . c " , <nl> - " sources / yoga / Yoga . c " , <nl> + " sources / yoga / YGNodeList . cpp " , <nl> + " sources / yoga / Yoga . cpp " , <nl> " sources / Config . cc " , <nl> " sources / Node . cc " , <nl> " sources / global . cc " , <nl> similarity index 99 % <nl> rename from yoga / YGEnums . c <nl> rename to yoga / YGEnums . cpp <nl> mmm a / yoga / YGEnums . c <nl> ppp b / yoga / YGEnums . cpp <nl> const char * YGWrapToString ( const YGWrap value ) { <nl> } <nl> return " unknown " ; <nl> } <nl> - <nl> similarity index 78 % <nl> rename from yoga / YGNodeList . c <nl> rename to yoga / YGNodeList . cpp <nl> mmm a / yoga / YGNodeList . c <nl> ppp b / yoga / YGNodeList . cpp <nl> <nl> <nl> # include " YGNodeList . h " <nl> <nl> - extern YGMalloc gYGMalloc ; <nl> - extern YGRealloc gYGRealloc ; <nl> - extern YGFree gYGFree ; <nl> - <nl> struct YGNodeList { <nl> uint32_t capacity ; <nl> uint32_t count ; <nl> struct YGNodeList { <nl> } ; <nl> <nl> YGNodeListRef YGNodeListNew ( const uint32_t initialCapacity ) { <nl> - const YGNodeListRef list = gYGMalloc ( sizeof ( struct YGNodeList ) ) ; <nl> - YGAssert ( list ! = NULL , " Could not allocate memory for list " ) ; <nl> + const YGNodeListRef list = <nl> + ( const YGNodeListRef ) malloc ( sizeof ( struct YGNodeList ) ) ; <nl> + YGAssert ( list ! = nullptr , " Could not allocate memory for list " ) ; <nl> <nl> list - > capacity = initialCapacity ; <nl> list - > count = 0 ; <nl> - list - > items = gYGMalloc ( sizeof ( YGNodeRef ) * list - > capacity ) ; <nl> - YGAssert ( list - > items ! = NULL , " Could not allocate memory for items " ) ; <nl> + list - > items = ( YGNodeRef * ) malloc ( sizeof ( YGNodeRef ) * list - > capacity ) ; <nl> + YGAssert ( list - > items ! = nullptr , " Could not allocate memory for items " ) ; <nl> <nl> return list ; <nl> } <nl> <nl> void YGNodeListFree ( const YGNodeListRef list ) { <nl> if ( list ) { <nl> - gYGFree ( list - > items ) ; <nl> - gYGFree ( list ) ; <nl> + free ( list - > items ) ; <nl> + free ( list ) ; <nl> } <nl> } <nl> <nl> void YGNodeListInsert ( YGNodeListRef * listp , const YGNodeRef node , const uint32_t <nl> <nl> if ( list - > count = = list - > capacity ) { <nl> list - > capacity * = 2 ; <nl> - list - > items = gYGRealloc ( list - > items , sizeof ( YGNodeRef ) * list - > capacity ) ; <nl> - YGAssert ( list - > items ! = NULL , " Could not extend allocation for items " ) ; <nl> + list - > items = <nl> + ( YGNodeRef * ) realloc ( list - > items , sizeof ( YGNodeRef ) * list - > capacity ) ; <nl> + YGAssert ( list - > items ! = nullptr , " Could not extend allocation for items " ) ; <nl> } <nl> <nl> for ( uint32_t i = list - > count ; i > index ; i - - ) { <nl> void YGNodeListReplace ( YGNodeListRef list , const uint32_t index , const YGNodeRef <nl> <nl> void YGNodeListRemoveAll ( const YGNodeListRef list ) { <nl> for ( uint32_t i = 0 ; i < list - > count ; i + + ) { <nl> - list - > items [ i ] = NULL ; <nl> + list - > items [ i ] = nullptr ; <nl> } <nl> list - > count = 0 ; <nl> } <nl> <nl> YGNodeRef YGNodeListRemove ( const YGNodeListRef list , const uint32_t index ) { <nl> const YGNodeRef removed = list - > items [ index ] ; <nl> - list - > items [ index ] = NULL ; <nl> + list - > items [ index ] = nullptr ; <nl> <nl> for ( uint32_t i = index ; i < list - > count - 1 ; i + + ) { <nl> list - > items [ i ] = list - > items [ i + 1 ] ; <nl> - list - > items [ i + 1 ] = NULL ; <nl> + list - > items [ i + 1 ] = nullptr ; <nl> } <nl> <nl> list - > count - - ; <nl> YGNodeRef YGNodeListDelete ( const YGNodeListRef list , const YGNodeRef node ) { <nl> } <nl> } <nl> <nl> - return NULL ; <nl> + return nullptr ; <nl> } <nl> <nl> YGNodeRef YGNodeListGet ( const YGNodeListRef list , const uint32_t index ) { <nl> YGNodeRef YGNodeListGet ( const YGNodeListRef list , const uint32_t index ) { <nl> return list - > items [ index ] ; <nl> } <nl> <nl> - return NULL ; <nl> + return nullptr ; <nl> } <nl> <nl> YGNodeListRef YGNodeListClone ( const YGNodeListRef oldList ) { <nl> if ( ! oldList ) { <nl> - return NULL ; <nl> + return nullptr ; <nl> } <nl> const uint32_t count = oldList - > count ; <nl> if ( count = = 0 ) { <nl> - return NULL ; <nl> + return nullptr ; <nl> } <nl> const YGNodeListRef newList = YGNodeListNew ( count ) ; <nl> memcpy ( newList - > items , oldList - > items , sizeof ( YGNodeRef ) * count ) ; <nl> similarity index 95 % <nl> rename from yoga / Yoga . c <nl> rename to yoga / Yoga . cpp <nl> mmm a / yoga / Yoga . c <nl> ppp b / yoga / Yoga . cpp <nl> <nl> # ifndef isnan <nl> # define isnan _isnan <nl> # endif <nl> - <nl> # ifndef __cplusplus <nl> # define inline __inline <nl> # endif <nl> static const float kDefaultFlexGrow = 0 . 0f ; <nl> static const float kDefaultFlexShrink = 0 . 0f ; <nl> static const float kWebDefaultFlexShrink = 1 . 0f ; <nl> <nl> + static const YGStyle gYGNodeStyleDefaults = { <nl> + . direction = YGDirectionInherit , <nl> + . flexDirection = YGFlexDirectionColumn , <nl> + . justifyContent = YGJustifyFlexStart , <nl> + . alignContent = YGAlignFlexStart , <nl> + . alignItems = YGAlignStretch , <nl> + . alignSelf = YGAlignAuto , <nl> + . positionType = YGPositionTypeRelative , <nl> + . flexWrap = YGWrapNoWrap , <nl> + . overflow = YGOverflowVisible , <nl> + . display = YGDisplayFlex , <nl> + . flex = YGUndefined , <nl> + . flexGrow = YGUndefined , <nl> + . flexShrink = YGUndefined , <nl> + . flexBasis = YG_AUTO_VALUES , <nl> + . margin = YG_DEFAULT_EDGE_VALUES_UNIT , <nl> + . position = YG_DEFAULT_EDGE_VALUES_UNIT , <nl> + . padding = YG_DEFAULT_EDGE_VALUES_UNIT , <nl> + . border = YG_DEFAULT_EDGE_VALUES_UNIT , <nl> + . dimensions = YG_DEFAULT_DIMENSION_VALUES_AUTO_UNIT , <nl> + . minDimensions = YG_DEFAULT_DIMENSION_VALUES_UNIT , <nl> + . maxDimensions = YG_DEFAULT_DIMENSION_VALUES_UNIT , <nl> + . aspectRatio = YGUndefined , <nl> + } ; <nl> + <nl> + static const YGLayout gYGNodeLayoutDefaults = { <nl> + . position = { } , <nl> + . dimensions = YG_DEFAULT_DIMENSION_VALUES , <nl> + . margin = { } , <nl> + . border = { } , <nl> + . padding = { } , <nl> + . direction = YGDirectionInherit , <nl> + . computedFlexBasisGeneration = 0 , <nl> + . computedFlexBasis = YGUndefined , <nl> + . hadOverflow = false , <nl> + . generationCount = 0 , <nl> + . lastParentDirection = ( YGDirection ) - 1 , <nl> + . nextCachedMeasurementsIndex = 0 , <nl> + . cachedMeasurements = { } , <nl> + . measuredDimensions = YG_DEFAULT_DIMENSION_VALUES , <nl> + . cachedLayout = <nl> + { <nl> + . availableWidth = 0 , <nl> + . availableHeight = 0 , <nl> + . widthMeasureMode = ( YGMeasureMode ) - 1 , <nl> + . heightMeasureMode = ( YGMeasureMode ) - 1 , <nl> + . computedWidth = - 1 , <nl> + . computedHeight = - 1 , <nl> + } , <nl> + } ; <nl> + <nl> static const YGNode gYGNodeDefaults = { <nl> - . parent = NULL , <nl> - . children = NULL , <nl> - . hasNewLayout = true , <nl> + . style = gYGNodeStyleDefaults , <nl> + . layout = gYGNodeLayoutDefaults , <nl> + . lineIndex = 0 , <nl> + . parent = nullptr , <nl> + . children = nullptr , <nl> + . nextChild = nullptr , <nl> + . measure = nullptr , <nl> + . baseline = nullptr , <nl> + . print = nullptr , <nl> + . config = nullptr , <nl> + . context = nullptr , <nl> . isDirty = false , <nl> + . hasNewLayout = true , <nl> . nodeType = YGNodeTypeDefault , <nl> . resolvedDimensions = { [ YGDimensionWidth ] = & YGValueUndefined , <nl> [ YGDimensionHeight ] = & YGValueUndefined } , <nl> - <nl> - . style = <nl> - { <nl> - . flex = YGUndefined , <nl> - . flexGrow = YGUndefined , <nl> - . flexShrink = YGUndefined , <nl> - . flexBasis = YG_AUTO_VALUES , <nl> - . justifyContent = YGJustifyFlexStart , <nl> - . alignItems = YGAlignStretch , <nl> - . alignContent = YGAlignFlexStart , <nl> - . direction = YGDirectionInherit , <nl> - . flexDirection = YGFlexDirectionColumn , <nl> - . overflow = YGOverflowVisible , <nl> - . display = YGDisplayFlex , <nl> - . dimensions = YG_DEFAULT_DIMENSION_VALUES_AUTO_UNIT , <nl> - . minDimensions = YG_DEFAULT_DIMENSION_VALUES_UNIT , <nl> - . maxDimensions = YG_DEFAULT_DIMENSION_VALUES_UNIT , <nl> - . position = YG_DEFAULT_EDGE_VALUES_UNIT , <nl> - . margin = YG_DEFAULT_EDGE_VALUES_UNIT , <nl> - . padding = YG_DEFAULT_EDGE_VALUES_UNIT , <nl> - . border = YG_DEFAULT_EDGE_VALUES_UNIT , <nl> - . aspectRatio = YGUndefined , <nl> - } , <nl> - <nl> - . layout = <nl> - { <nl> - . dimensions = YG_DEFAULT_DIMENSION_VALUES , <nl> - . lastParentDirection = ( YGDirection ) - 1 , <nl> - . nextCachedMeasurementsIndex = 0 , <nl> - . computedFlexBasis = YGUndefined , <nl> - . hadOverflow = false , <nl> - . measuredDimensions = YG_DEFAULT_DIMENSION_VALUES , <nl> - <nl> - . cachedLayout = <nl> - { <nl> - . widthMeasureMode = ( YGMeasureMode ) - 1 , <nl> - . heightMeasureMode = ( YGMeasureMode ) - 1 , <nl> - . computedWidth = - 1 , <nl> - . computedHeight = - 1 , <nl> - } , <nl> - } , <nl> } ; <nl> <nl> # ifdef ANDROID <nl> static int YGDefaultLog ( const YGConfigRef config , <nl> static YGConfig gYGConfigDefaults = { <nl> . experimentalFeatures = <nl> { <nl> - [ YGExperimentalFeatureWebFlexBasis ] = false , <nl> + [ YGExperimentalFeatureWebFlexBasis ] = false , <nl> } , <nl> . useWebDefaults = false , <nl> + . useLegacyStretchBehaviour = false , <nl> . pointScaleFactor = 1 . 0f , <nl> # ifdef ANDROID <nl> . logger = & YGAndroidLog , <nl> # else <nl> . logger = & YGDefaultLog , <nl> # endif <nl> - . context = NULL , <nl> + . cloneNodeCallback = nullptr , <nl> + . context = nullptr , <nl> } ; <nl> <nl> static void YGNodeMarkDirtyInternal ( const YGNodeRef node ) ; <nl> <nl> - YGMalloc gYGMalloc = & malloc ; <nl> - YGCalloc gYGCalloc = & calloc ; <nl> - YGRealloc gYGRealloc = & realloc ; <nl> - YGFree gYGFree = & free ; <nl> - <nl> static YGValue YGValueZero = { . value = 0 , . unit = YGUnitPoint } ; <nl> <nl> # ifdef ANDROID <nl> static int YGDefaultLog ( const YGConfigRef config , <nl> } <nl> # endif <nl> <nl> + bool YGFloatIsUndefined ( const float value ) { <nl> + return isnan ( value ) ; <nl> + } <nl> + <nl> static inline const YGValue * YGComputedEdgeValue ( const YGValue edges [ YGEdgeCount ] , <nl> const YGEdge edge , <nl> const YGValue * const defaultValue ) { <nl> int32_t gNodeInstanceCount = 0 ; <nl> int32_t gConfigInstanceCount = 0 ; <nl> <nl> WIN_EXPORT YGNodeRef YGNodeNewWithConfig ( const YGConfigRef config ) { <nl> - const YGNodeRef node = gYGMalloc ( sizeof ( YGNode ) ) ; <nl> - YGAssertWithConfig ( config , node ! = NULL , " Could not allocate memory for node " ) ; <nl> + const YGNodeRef node = ( const YGNodeRef ) malloc ( sizeof ( YGNode ) ) ; <nl> + YGAssertWithConfig ( <nl> + config , node ! = nullptr , " Could not allocate memory for node " ) ; <nl> gNodeInstanceCount + + ; <nl> <nl> memcpy ( node , & gYGNodeDefaults , sizeof ( YGNode ) ) ; <nl> YGNodeRef YGNodeNew ( void ) { <nl> } <nl> <nl> YGNodeRef YGNodeClone ( const YGNodeRef oldNode ) { <nl> - const YGNodeRef node = gYGMalloc ( sizeof ( YGNode ) ) ; <nl> - YGAssertWithConfig ( oldNode - > config , node ! = NULL , " Could not allocate memory for node " ) ; <nl> + const YGNodeRef node = ( const YGNodeRef ) malloc ( sizeof ( YGNode ) ) ; <nl> + YGAssertWithConfig ( <nl> + oldNode - > config , node ! = nullptr , " Could not allocate memory for node " ) ; <nl> gNodeInstanceCount + + ; <nl> <nl> memcpy ( node , oldNode , sizeof ( YGNode ) ) ; <nl> node - > children = YGNodeListClone ( oldNode - > children ) ; <nl> - node - > parent = NULL ; <nl> + node - > parent = nullptr ; <nl> return node ; <nl> } <nl> <nl> void YGNodeFree ( const YGNodeRef node ) { <nl> if ( node - > parent ) { <nl> YGNodeListDelete ( node - > parent - > children , node ) ; <nl> - node - > parent = NULL ; <nl> + node - > parent = nullptr ; <nl> } <nl> <nl> const uint32_t childCount = YGNodeGetChildCount ( node ) ; <nl> for ( uint32_t i = 0 ; i < childCount ; i + + ) { <nl> const YGNodeRef child = YGNodeGetChild ( node , i ) ; <nl> - child - > parent = NULL ; <nl> + child - > parent = nullptr ; <nl> } <nl> <nl> YGNodeListFree ( node - > children ) ; <nl> - gYGFree ( node ) ; <nl> + free ( node ) ; <nl> gNodeInstanceCount - - ; <nl> } <nl> <nl> void YGNodeReset ( const YGNodeRef node ) { <nl> YGAssertWithNode ( node , <nl> YGNodeGetChildCount ( node ) = = 0 , <nl> " Cannot reset a node which still has children attached " ) ; <nl> - YGAssertWithNode ( node , node - > parent = = NULL , " Cannot reset a node still attached to a parent " ) ; <nl> + YGAssertWithNode ( <nl> + node , <nl> + node - > parent = = nullptr , <nl> + " Cannot reset a node still attached to a parent " ) ; <nl> <nl> YGNodeListFree ( node - > children ) ; <nl> <nl> YGConfigRef YGConfigGetDefault ( ) { <nl> } <nl> <nl> YGConfigRef YGConfigNew ( void ) { <nl> - const YGConfigRef config = gYGMalloc ( sizeof ( YGConfig ) ) ; <nl> - YGAssert ( config ! = NULL , " Could not allocate memory for config " ) ; <nl> + const YGConfigRef config = ( const YGConfigRef ) malloc ( sizeof ( YGConfig ) ) ; <nl> + YGAssert ( config ! = nullptr , " Could not allocate memory for config " ) ; <nl> <nl> gConfigInstanceCount + + ; <nl> memcpy ( config , & gYGConfigDefaults , sizeof ( YGConfig ) ) ; <nl> YGConfigRef YGConfigNew ( void ) { <nl> } <nl> <nl> void YGConfigFree ( const YGConfigRef config ) { <nl> - gYGFree ( config ) ; <nl> + free ( config ) ; <nl> gConfigInstanceCount - - ; <nl> } <nl> <nl> static void YGNodeMarkDirtyInternal ( const YGNodeRef node ) { <nl> } <nl> <nl> void YGNodeSetMeasureFunc ( const YGNodeRef node , YGMeasureFunc measureFunc ) { <nl> - if ( measureFunc = = NULL ) { <nl> - node - > measure = NULL ; <nl> + if ( measureFunc = = nullptr ) { <nl> + node - > measure = nullptr ; <nl> / / TODO : t18095186 Move nodeType to opt - in function and mark appropriate places in Litho <nl> node - > nodeType = YGNodeTypeDefault ; <nl> } else { <nl> static void YGCloneChildrenIfNeeded ( const YGNodeRef parent ) { <nl> } <nl> <nl> void YGNodeInsertChild ( const YGNodeRef node , const YGNodeRef child , const uint32_t index ) { <nl> - YGAssertWithNode ( node , <nl> - child - > parent = = NULL , <nl> - " Child already has a parent , it must be removed first . " ) ; <nl> - YGAssertWithNode ( node , <nl> - node - > measure = = NULL , <nl> - " Cannot add child : Nodes with measure functions cannot have children . " ) ; <nl> + YGAssertWithNode ( <nl> + node , <nl> + child - > parent = = nullptr , <nl> + " Child already has a parent , it must be removed first . " ) ; <nl> + YGAssertWithNode ( <nl> + node , <nl> + node - > measure = = nullptr , <nl> + " Cannot add child : Nodes with measure functions cannot have children . " ) ; <nl> <nl> YGCloneChildrenIfNeeded ( node ) ; <nl> <nl> void YGNodeRemoveChild ( const YGNodeRef parent , const YGNodeRef excludedChild ) { <nl> if ( firstChild - > parent = = parent ) { <nl> / / If the first child has this node as its parent , we assume that it is already unique . <nl> / / We can now try to delete a child in this list . <nl> - if ( YGNodeListDelete ( parent - > children , excludedChild ) ! = NULL ) { <nl> + if ( YGNodeListDelete ( parent - > children , excludedChild ) ! = nullptr ) { <nl> excludedChild - > layout = gYGNodeDefaults . layout ; / / layout is no longer valid <nl> - excludedChild - > parent = NULL ; <nl> + excludedChild - > parent = nullptr ; <nl> YGNodeMarkDirtyInternal ( parent ) ; <nl> } <nl> return ; <nl> void YGNodeRemoveAllChildren ( const YGNodeRef parent ) { <nl> for ( uint32_t i = 0 ; i < childCount ; i + + ) { <nl> const YGNodeRef oldChild = YGNodeGetChild ( parent , i ) ; <nl> oldChild - > layout = gYGNodeDefaults . layout ; / / layout is no longer valid <nl> - oldChild - > parent = NULL ; <nl> + oldChild - > parent = nullptr ; <nl> } <nl> YGNodeListRemoveAll ( parent - > children ) ; <nl> YGNodeMarkDirtyInternal ( parent ) ; <nl> return ; <nl> } <nl> / / Otherwise , we are not the owner of the child set . We don ' t have to do anything to clear it . <nl> - parent - > children = NULL ; <nl> + parent - > children = nullptr ; <nl> YGNodeMarkDirtyInternal ( parent ) ; <nl> } <nl> <nl> uint32_t YGNodeGetChildCount ( const YGNodeRef node ) { <nl> } <nl> <nl> void YGNodeMarkDirty ( const YGNodeRef node ) { <nl> - YGAssertWithNode ( node , <nl> - node - > measure ! = NULL , <nl> - " Only leaf nodes with custom measure functions " <nl> - " should manually mark themselves as dirty " ) ; <nl> + YGAssertWithNode ( <nl> + node , <nl> + node - > measure ! = nullptr , <nl> + " Only leaf nodes with custom measure functions " <nl> + " should manually mark themselves as dirty " ) ; <nl> <nl> YGNodeMarkDirtyInternal ( node ) ; <nl> } <nl> void YGNodeCopyStyle ( const YGNodeRef dstNode , const YGNodeRef srcNode ) { <nl> <nl> static inline float YGResolveFlexGrow ( const YGNodeRef node ) { <nl> / / Root nodes flexGrow should always be 0 <nl> - if ( node - > parent = = NULL ) { <nl> + if ( node - > parent = = nullptr ) { <nl> return 0 . 0 ; <nl> } <nl> if ( ! YGFloatIsUndefined ( node - > style . flexGrow ) ) { <nl> float YGNodeStyleGetFlexShrink ( const YGNodeRef node ) { <nl> <nl> static inline float YGNodeResolveFlexShrink ( const YGNodeRef node ) { <nl> / / Root nodes flexShrink should always be 0 <nl> - if ( node - > parent = = NULL ) { <nl> + if ( node - > parent = = nullptr ) { <nl> return 0 . 0 ; <nl> } <nl> if ( ! YGFloatIsUndefined ( node - > style . flexShrink ) ) { <nl> bool YGLayoutNodeInternal ( const YGNodeRef node , <nl> const char * reason , <nl> const YGConfigRef config ) ; <nl> <nl> - inline bool YGFloatIsUndefined ( const float value ) { <nl> - return isnan ( value ) ; <nl> - } <nl> - <nl> static inline bool YGValueEqual ( const YGValue a , const YGValue b ) { <nl> if ( a . unit ! = b . unit ) { <nl> return false ; <nl> static inline bool YGValueEqual ( const YGValue a , const YGValue b ) { <nl> } <nl> <nl> static inline void YGResolveDimensions ( YGNodeRef node ) { <nl> - for ( YGDimension dim = YGDimensionWidth ; dim < = YGDimensionHeight ; dim + + ) { <nl> + for ( uint32_t dim = YGDimensionWidth ; dim < YGDimensionCount ; dim + + ) { <nl> if ( node - > style . maxDimensions [ dim ] . unit ! = YGUnitUndefined & & <nl> YGValueEqual ( node - > style . maxDimensions [ dim ] , node - > style . minDimensions [ dim ] ) ) { <nl> node - > resolvedDimensions [ dim ] = & node - > style . maxDimensions [ dim ] ; <nl> static void YGWriteToStringStream ( YGStringStream * stream , const char * format , . . <nl> va_list argsCopy ; <nl> va_copy ( argsCopy , args ) ; <nl> int available = stream - > capacity - stream - > length ; <nl> - int required = vsnprintf ( NULL , 0 , format , args ) ; <nl> + int required = vsnprintf ( nullptr , 0 , format , args ) ; <nl> va_end ( args ) ; <nl> if ( required > = available ) { <nl> char * newStr = ( char * ) realloc ( stream - > str , sizeof ( char ) * ( stream - > capacity ) * 2 ) ; <nl> - if ( newStr ! = NULL ) { <nl> + if ( newStr ! = nullptr ) { <nl> stream - > str = newStr ; <nl> stream - > capacity * = 2 ; <nl> available = stream - > capacity - stream - > length ; <nl> static void YGPrintEdges ( YGStringStream * stream , const char * str , const YGValue <nl> if ( YGFourValuesEqual ( edges ) ) { <nl> YGPrintNumberIfNotZero ( stream , str , & edges [ YGEdgeLeft ] ) ; <nl> } else { <nl> - for ( YGEdge edge = YGEdgeLeft ; edge < YGEdgeCount ; edge + + ) { <nl> + for ( uint32_t edge = 0 ; edge < YGEdgeCount ; edge + + ) { <nl> char buf [ 30 ] ; <nl> - snprintf ( buf , sizeof ( buf ) , " % s - % s " , str , YGEdgeToString ( edge ) ) ; <nl> + snprintf ( buf , sizeof ( buf ) , " % s - % s " , str , YGEdgeToString ( ( YGEdge ) edge ) ) ; <nl> YGPrintNumberIfNotZero ( stream , buf , & edges [ edge ] ) ; <nl> } <nl> } <nl> static void YGNodeToString ( YGStringStream * stream , <nl> YGPrintEdgeIfNotUndefined ( stream , " bottom " , node - > style . position , YGEdgeBottom ) ; <nl> YGWriteToStringStream ( stream , " \ " " ) ; <nl> <nl> - if ( node - > measure ! = NULL ) { <nl> + if ( node - > measure ! = nullptr ) { <nl> YGWriteToStringStream ( stream , " has - custom - measure = \ " true \ " " ) ; <nl> } <nl> } <nl> static void YGNodePrintInternal ( const YGNodeRef node , <nl> stream . str = ( char * ) malloc ( sizeof ( char ) * 1024 ) ; <nl> stream . length = 0 ; <nl> stream . capacity = 1024 ; <nl> - if ( stream . str ! = NULL ) { <nl> + if ( stream . str ! = nullptr ) { <nl> YGNodeToString ( & stream , node , options , 0 ) ; <nl> YGLog ( node , YGLogLevelDebug , stream . str ) ; <nl> free ( stream . str ) ; <nl> static inline YGDirection YGNodeResolveDirection ( const YGNodeRef node , <nl> } <nl> <nl> static float YGBaseline ( const YGNodeRef node ) { <nl> - if ( node - > baseline ! = NULL ) { <nl> + if ( node - > baseline ! = nullptr ) { <nl> const float baseline = node - > baseline ( node , <nl> node - > layout . measuredDimensions [ YGDimensionWidth ] , <nl> node - > layout . measuredDimensions [ YGDimensionHeight ] ) ; <nl> static float YGBaseline ( const YGNodeRef node ) { <nl> return baseline ; <nl> } <nl> <nl> - YGNodeRef baselineChild = NULL ; <nl> + YGNodeRef baselineChild = nullptr ; <nl> const uint32_t childCount = YGNodeGetChildCount ( node ) ; <nl> for ( uint32_t i = 0 ; i < childCount ; i + + ) { <nl> const YGNodeRef child = YGNodeGetChild ( node , i ) ; <nl> static float YGBaseline ( const YGNodeRef node ) { <nl> break ; <nl> } <nl> <nl> - if ( baselineChild = = NULL ) { <nl> + if ( baselineChild = = nullptr ) { <nl> baselineChild = child ; <nl> } <nl> } <nl> <nl> - if ( baselineChild = = NULL ) { <nl> + if ( baselineChild = = nullptr ) { <nl> return node - > layout . measuredDimensions [ YGDimensionHeight ] ; <nl> } <nl> <nl> static void YGNodeSetPosition ( const YGNodeRef node , <nl> const float crossSize , <nl> const float parentWidth ) { <nl> / * Root nodes should be always layouted as LTR , so we don ' t return negative values . * / <nl> - const YGDirection directionRespectingRoot = node - > parent ! = NULL ? direction : YGDirectionLTR ; <nl> + const YGDirection directionRespectingRoot = <nl> + node - > parent ! = nullptr ? direction : YGDirectionLTR ; <nl> const YGFlexDirection mainAxis = <nl> YGResolveFlexDirection ( node - > style . flexDirection , directionRespectingRoot ) ; <nl> const YGFlexDirection crossAxis = YGFlexDirectionCross ( mainAxis , directionRespectingRoot ) ; <nl> static void YGNodeWithMeasureFuncSetMeasuredDimensions ( const YGNodeRef node , <nl> const YGMeasureMode heightMeasureMode , <nl> const float parentWidth , <nl> const float parentHeight ) { <nl> - YGAssertWithNode ( node , node - > measure ! = NULL , " Expected node to have custom measure function " ) ; <nl> + YGAssertWithNode ( <nl> + node , <nl> + node - > measure ! = nullptr , <nl> + " Expected node to have custom measure function " ) ; <nl> <nl> const float paddingAndBorderAxisRow = <nl> YGNodePaddingAndBorderForAxis ( node , YGFlexDirectionRow , availableWidth ) ; <nl> static void YGNodelayoutImpl ( const YGNodeRef node , <nl> const float mainAxisParentSize = isMainAxisRow ? parentWidth : parentHeight ; <nl> const float crossAxisParentSize = isMainAxisRow ? parentHeight : parentWidth ; <nl> <nl> - YGNodeRef firstAbsoluteChild = NULL ; <nl> - YGNodeRef currentAbsoluteChild = NULL ; <nl> + YGNodeRef firstAbsoluteChild = nullptr ; <nl> + YGNodeRef currentAbsoluteChild = nullptr ; <nl> <nl> const float leadingPaddingAndBorderMain = <nl> YGNodeLeadingPaddingAndBorder ( node , mainAxis , parentWidth ) ; <nl> static void YGNodelayoutImpl ( const YGNodeRef node , <nl> / / If there is only one child with flexGrow + flexShrink it means we can set the <nl> / / computedFlexBasis to 0 instead of measuring and shrinking / flexing the child to exactly <nl> / / match the remaining space <nl> - YGNodeRef singleFlexChild = NULL ; <nl> + YGNodeRef singleFlexChild = nullptr ; <nl> if ( measureModeMainDim = = YGMeasureModeExactly ) { <nl> for ( uint32_t i = 0 ; i < childCount ; i + + ) { <nl> const YGNodeRef child = YGNodeGetChild ( node , i ) ; <nl> if ( singleFlexChild ) { <nl> if ( YGNodeIsFlex ( child ) ) { <nl> / / There is already a flexible child , abort . <nl> - singleFlexChild = NULL ; <nl> + singleFlexChild = nullptr ; <nl> break ; <nl> } <nl> } else if ( YGResolveFlexGrow ( child ) > 0 . 0f & & YGNodeResolveFlexShrink ( child ) > 0 . 0f ) { <nl> static void YGNodelayoutImpl ( const YGNodeRef node , <nl> if ( child - > style . positionType = = YGPositionTypeAbsolute ) { <nl> / / Store a private linked list of absolutely positioned children <nl> / / so that we can efficiently traverse them later . <nl> - if ( firstAbsoluteChild = = NULL ) { <nl> + if ( firstAbsoluteChild = = nullptr ) { <nl> firstAbsoluteChild = child ; <nl> } <nl> - if ( currentAbsoluteChild ! = NULL ) { <nl> + if ( currentAbsoluteChild ! = nullptr ) { <nl> currentAbsoluteChild - > nextChild = child ; <nl> } <nl> currentAbsoluteChild = child ; <nl> - child - > nextChild = NULL ; <nl> + child - > nextChild = nullptr ; <nl> } else { <nl> if ( child = = singleFlexChild ) { <nl> child - > layout . computedFlexBasisGeneration = gCurrentGenerationCount ; <nl> static void YGNodelayoutImpl ( const YGNodeRef node , <nl> float totalFlexShrinkScaledFactors = 0 ; <nl> <nl> / / Maintain a linked list of the child nodes that can shrink and / or grow . <nl> - YGNodeRef firstRelativeChild = NULL ; <nl> - YGNodeRef currentRelativeChild = NULL ; <nl> + YGNodeRef firstRelativeChild = nullptr ; <nl> + YGNodeRef currentRelativeChild = nullptr ; <nl> <nl> / / Add items to the current line until it ' s full or we run out of items . <nl> for ( uint32_t i = startOfLineIndex ; i < childCount ; i + + , endOfLineIndex + + ) { <nl> static void YGNodelayoutImpl ( const YGNodeRef node , <nl> } <nl> <nl> / / Store a private linked list of children that need to be layed out . <nl> - if ( firstRelativeChild = = NULL ) { <nl> + if ( firstRelativeChild = = nullptr ) { <nl> firstRelativeChild = child ; <nl> } <nl> - if ( currentRelativeChild ! = NULL ) { <nl> + if ( currentRelativeChild ! = nullptr ) { <nl> currentRelativeChild - > nextChild = child ; <nl> } <nl> currentRelativeChild = child ; <nl> - child - > nextChild = NULL ; <nl> + child - > nextChild = nullptr ; <nl> } <nl> } <nl> <nl> static void YGNodelayoutImpl ( const YGNodeRef node , <nl> float deltaFlexShrinkScaledFactors = 0 ; <nl> float deltaFlexGrowFactors = 0 ; <nl> currentRelativeChild = firstRelativeChild ; <nl> - while ( currentRelativeChild ! = NULL ) { <nl> + while ( currentRelativeChild ! = nullptr ) { <nl> childFlexBasis = <nl> fminf ( YGResolveValue ( & currentRelativeChild - > style . maxDimensions [ dim [ mainAxis ] ] , <nl> mainAxisParentSize ) , <nl> static void YGNodelayoutImpl ( const YGNodeRef node , <nl> / / Second pass : resolve the sizes of the flexible items <nl> deltaFreeSpace = 0 ; <nl> currentRelativeChild = firstRelativeChild ; <nl> - while ( currentRelativeChild ! = NULL ) { <nl> + while ( currentRelativeChild ! = nullptr ) { <nl> childFlexBasis = <nl> fminf ( YGResolveValue ( & currentRelativeChild - > style . maxDimensions [ dim [ mainAxis ] ] , <nl> mainAxisParentSize ) , <nl> static void YGNodelayoutImpl ( const YGNodeRef node , <nl> <nl> if ( performLayout ) { <nl> / / STEP 10 : SIZING AND POSITIONING ABSOLUTE CHILDREN <nl> - for ( currentAbsoluteChild = firstAbsoluteChild ; currentAbsoluteChild ! = NULL ; <nl> + for ( currentAbsoluteChild = firstAbsoluteChild ; <nl> + currentAbsoluteChild ! = nullptr ; <nl> currentAbsoluteChild = currentAbsoluteChild - > nextChild ) { <nl> YGNodeAbsoluteLayoutChild ( node , <nl> currentAbsoluteChild , <nl> float YGRoundValueToPixelGrid ( const float value , <nl> scaledValue = scaledValue - fractial ; <nl> } else { <nl> / / Finally we just round the value <nl> - scaledValue = scaledValue - fractial + ( fractial > 0 . 5f | | YGFloatsEqual ( fractial , 0 . 5f ) ? 1 . 0f : 0 . 0f ) ; <nl> + scaledValue = scaledValue - fractial + <nl> + ( fractial > 0 . 5f | | YGFloatsEqual ( fractial , 0 . 5f ) ? 1 . 0f : 0 . 0f ) ; <nl> } <nl> return scaledValue / pointScaleFactor ; <nl> } <nl> bool YGNodeCanUseCachedMeasurement ( const YGMeasureMode widthMode , <nl> if ( lastComputedHeight < 0 | | lastComputedWidth < 0 ) { <nl> return false ; <nl> } <nl> - bool useRoundedComparison = config ! = NULL & & config - > pointScaleFactor ! = 0 ; <nl> + bool useRoundedComparison = <nl> + config ! = nullptr & & config - > pointScaleFactor ! = 0 ; <nl> const float effectiveWidth = <nl> useRoundedComparison ? YGRoundValueToPixelGrid ( width , config - > pointScaleFactor , false , false ) <nl> : width ; <nl> bool YGLayoutNodeInternal ( const YGNodeRef node , <nl> layout - > cachedLayout . computedHeight = - 1 ; <nl> } <nl> <nl> - YGCachedMeasurement * cachedResults = NULL ; <nl> + YGCachedMeasurement * cachedResults = nullptr ; <nl> <nl> / / Determine whether the results are already cached . We maintain a separate <nl> / / cache for layouts and measurements . A layout operation modifies the <nl> bool YGLayoutNodeInternal ( const YGNodeRef node , <nl> } <nl> } <nl> <nl> - if ( ! needToVisitNode & & cachedResults ! = NULL ) { <nl> + if ( ! needToVisitNode & & cachedResults ! = nullptr ) { <nl> layout - > measuredDimensions [ YGDimensionWidth ] = cachedResults - > computedWidth ; <nl> layout - > measuredDimensions [ YGDimensionHeight ] = cachedResults - > computedHeight ; <nl> <nl> bool YGLayoutNodeInternal ( const YGNodeRef node , <nl> if ( node - > print ) { <nl> node - > print ( node ) ; <nl> } <nl> - YGLog ( node , YGLogLevelVerbose , " wm : % s , hm : % s , aw : % f ah : % f = > d : ( % f , % f ) % s \ n " , <nl> - YGMeasureModeName ( widthMeasureMode , performLayout ) , <nl> - YGMeasureModeName ( heightMeasureMode , performLayout ) , <nl> - availableWidth , <nl> - availableHeight , <nl> - cachedResults - > computedWidth , <nl> - cachedResults - > computedHeight , <nl> - reason ) ; <nl> + YGLog ( <nl> + node , <nl> + YGLogLevelVerbose , <nl> + " wm : % s , hm : % s , aw : % f ah : % f = > d : ( % f , % f ) % s \ n " , <nl> + YGMeasureModeName ( widthMeasureMode , performLayout ) , <nl> + YGMeasureModeName ( heightMeasureMode , performLayout ) , <nl> + availableWidth , <nl> + availableHeight , <nl> + cachedResults - > computedWidth , <nl> + cachedResults - > computedHeight , <nl> + reason ) ; <nl> } <nl> } else { <nl> if ( gPrintChanges ) { <nl> - YGLog ( node , YGLogLevelVerbose , " % s % d . { % s " , YGSpacer ( gDepth ) , gDepth , needToVisitNode ? " * " : " " ) ; <nl> + YGLog ( <nl> + node , <nl> + YGLogLevelVerbose , <nl> + " % s % d . { % s " , <nl> + YGSpacer ( gDepth ) , <nl> + gDepth , <nl> + needToVisitNode ? " * " : " " ) ; <nl> if ( node - > print ) { <nl> node - > print ( node ) ; <nl> } <nl> - YGLog ( node , YGLogLevelVerbose , " wm : % s , hm : % s , aw : % f ah : % f % s \ n " , <nl> - YGMeasureModeName ( widthMeasureMode , performLayout ) , <nl> - YGMeasureModeName ( heightMeasureMode , performLayout ) , <nl> - availableWidth , <nl> - availableHeight , <nl> - reason ) ; <nl> + YGLog ( <nl> + node , <nl> + YGLogLevelVerbose , <nl> + " wm : % s , hm : % s , aw : % f ah : % f % s \ n " , <nl> + YGMeasureModeName ( widthMeasureMode , performLayout ) , <nl> + YGMeasureModeName ( heightMeasureMode , performLayout ) , <nl> + availableWidth , <nl> + availableHeight , <nl> + reason ) ; <nl> } <nl> <nl> YGNodelayoutImpl ( node , <nl> bool YGLayoutNodeInternal ( const YGNodeRef node , <nl> config ) ; <nl> <nl> if ( gPrintChanges ) { <nl> - YGLog ( node , YGLogLevelVerbose , " % s % d . } % s " , YGSpacer ( gDepth ) , gDepth , needToVisitNode ? " * " : " " ) ; <nl> + YGLog ( <nl> + node , <nl> + YGLogLevelVerbose , <nl> + " % s % d . } % s " , <nl> + YGSpacer ( gDepth ) , <nl> + gDepth , <nl> + needToVisitNode ? " * " : " " ) ; <nl> if ( node - > print ) { <nl> node - > print ( node ) ; <nl> } <nl> - YGLog ( node , YGLogLevelVerbose , " wm : % s , hm : % s , d : ( % f , % f ) % s \ n " , <nl> - YGMeasureModeName ( widthMeasureMode , performLayout ) , <nl> - YGMeasureModeName ( heightMeasureMode , performLayout ) , <nl> - layout - > measuredDimensions [ YGDimensionWidth ] , <nl> - layout - > measuredDimensions [ YGDimensionHeight ] , <nl> - reason ) ; <nl> + YGLog ( <nl> + node , <nl> + YGLogLevelVerbose , <nl> + " wm : % s , hm : % s , d : ( % f , % f ) % s \ n " , <nl> + YGMeasureModeName ( widthMeasureMode , performLayout ) , <nl> + YGMeasureModeName ( heightMeasureMode , performLayout ) , <nl> + layout - > measuredDimensions [ YGDimensionWidth ] , <nl> + layout - > measuredDimensions [ YGDimensionHeight ] , <nl> + reason ) ; <nl> } <nl> <nl> layout - > lastParentDirection = parentDirection ; <nl> <nl> - if ( cachedResults = = NULL ) { <nl> + if ( cachedResults = = nullptr ) { <nl> if ( layout - > nextCachedMeasurementsIndex = = YG_MAX_CACHED_RESULT_COUNT ) { <nl> if ( gPrintChanges ) { <nl> YGLog ( node , YGLogLevelVerbose , " Out of cache entries ! \ n " ) ; <nl> bool YGLayoutNodeInternal ( const YGNodeRef node , <nl> <nl> gDepth - - ; <nl> layout - > generationCount = gCurrentGenerationCount ; <nl> - return ( needToVisitNode | | cachedResults = = NULL ) ; <nl> + return ( needToVisitNode | | cachedResults = = nullptr ) ; <nl> } <nl> <nl> void YGConfigSetPointScaleFactor ( const YGConfigRef config , const float pixelsInPoint ) { <nl> void YGNodeCalculateLayout ( const YGNodeRef node , <nl> YGRoundToPixelGrid ( node , node - > config - > pointScaleFactor , 0 . 0f , 0 . 0f ) ; <nl> <nl> if ( gPrintTree ) { <nl> - YGNodePrint ( node , YGPrintOptionsLayout | YGPrintOptionsChildren | YGPrintOptionsStyle ) ; <nl> + YGNodePrint ( <nl> + node , <nl> + ( YGPrintOptions ) ( <nl> + YGPrintOptionsLayout | YGPrintOptionsChildren | <nl> + YGPrintOptionsStyle ) ) ; <nl> } <nl> } <nl> } <nl> <nl> void YGConfigSetLogger ( const YGConfigRef config , YGLogger logger ) { <nl> - if ( logger ! = NULL ) { <nl> + if ( logger ! = nullptr ) { <nl> config - > logger = logger ; <nl> } else { <nl> # ifdef ANDROID <nl> static void YGVLog ( const YGConfigRef config , <nl> YGLogLevel level , <nl> const char * format , <nl> va_list args ) { <nl> - const YGConfigRef logConfig = config ! = NULL ? config : & gYGConfigDefaults ; <nl> + const YGConfigRef logConfig = config ! = nullptr ? config : & gYGConfigDefaults ; <nl> logConfig - > logger ( logConfig , node , level , format , args ) ; <nl> <nl> if ( level = = YGLogLevelFatal ) { <nl> static void YGVLog ( const YGConfigRef config , <nl> void YGLogWithConfig ( const YGConfigRef config , YGLogLevel level , const char * format , . . . ) { <nl> va_list args ; <nl> va_start ( args , format ) ; <nl> - YGVLog ( config , NULL , level , format , args ) ; <nl> + YGVLog ( config , nullptr , level , format , args ) ; <nl> va_end ( args ) ; <nl> } <nl> <nl> void YGLog ( const YGNodeRef node , YGLogLevel level , const char * format , . . . ) { <nl> va_list args ; <nl> va_start ( args , format ) ; <nl> - YGVLog ( node = = NULL ? NULL : node - > config , node , level , format , args ) ; <nl> + YGVLog ( node = = nullptr ? nullptr : node - > config , node , level , format , args ) ; <nl> va_end ( args ) ; <nl> } <nl> <nl> void YGAssert ( const bool condition , const char * message ) { <nl> if ( ! condition ) { <nl> - YGLog ( NULL , YGLogLevelFatal , " % s \ n " , message ) ; <nl> + YGLog ( nullptr , YGLogLevelFatal , " % s \ n " , message ) ; <nl> } <nl> } <nl> <nl> mmm a / yoga / Yoga . h <nl> ppp b / yoga / Yoga . h <nl> typedef void ( * YGNodeClonedFunc ) ( YGNodeRef oldNode , <nl> YGNodeRef parent , <nl> int childIndex ) ; <nl> <nl> - typedef void * ( * YGMalloc ) ( size_t size ) ; <nl> - typedef void * ( * YGCalloc ) ( size_t count , size_t size ) ; <nl> - typedef void * ( * YGRealloc ) ( void * ptr , size_t size ) ; <nl> - typedef void ( * YGFree ) ( void * ptr ) ; <nl> - <nl> / / YGNode <nl> WIN_EXPORT YGNodeRef YGNodeNew ( void ) ; <nl> WIN_EXPORT YGNodeRef YGNodeNewWithConfig ( const YGConfigRef config ) ; <nl>
Change c files to cpp
facebook/yoga
02c00f271108c08e11325001f85c22589eda2bf8
2017-11-21T18:28:51Z
mmm a / aten / src / THC / generic / THCTensorMathMagma . cu <nl> ppp b / aten / src / THC / generic / THCTensorMathMagma . cu <nl> static void THCTensor_ ( copyArray2d ) ( THCState * state , THCTensor * self , real * src , <nl> <nl> static void THCTensor_ ( copyTensor2d ) ( THCState * state , real * dst , THCTensor * self ) <nl> { <nl> - THAssert ( self - > _dim ( ) = = 2 ) ; <nl> + THAssert ( self - > dim ( ) = = 2 ) ; <nl> size_t len = THCTensor_ ( nElement ) ( state , self ) * sizeof ( real ) ; <nl> THCTensor * temp = THCTensor_ ( newTranspose ) ( state , self , 0 , 1 ) ; <nl> THCTensor * selfc = THCTensor_ ( newContiguous ) ( state , temp ) ; <nl> static void THCTensor_ ( copyTensor2d ) ( THCState * state , real * dst , THCTensor * self <nl> <nl> static THCTensor * THCTensor_ ( newColumnMajor ) ( THCState * state , THCTensor * self , THCTensor * src ) <nl> { <nl> - THAssert ( src - > _dim ( ) = = 2 ) ; <nl> + THAssert ( src - > dim ( ) = = 2 ) ; <nl> if ( self = = src & & self - > stride ( 0 ) = = 1 & & self - > stride ( 1 ) = = self - > size ( 0 ) ) <nl> { <nl> THCTensor_ ( retain ) ( state , self ) ; <nl> THC_API void THCTensor_ ( syev ) ( THCState * state , THCTensor * re_ , THCTensor * rv_ , T <nl> THCTensor * input = THCTensor_ ( newColumnMajor ) ( state , rv_ , a ) ; <nl> real * input_data = THCTensor_ ( data ) ( state , input ) ; <nl> <nl> - / / eigen values and workspace <nl> - real * w = th_magma_malloc_pinned < real > ( n ) ; <nl> - real * wA = th_magma_malloc_pinned < real > ( lda * n ) ; <nl> + if ( n > 0 ) { <nl> + / / eigen values and workspace <nl> + real * w = th_magma_malloc_pinned < real > ( n ) ; <nl> + real * wA = th_magma_malloc_pinned < real > ( lda * n ) ; <nl> <nl> - / / compute optimal size of work array <nl> - int info ; <nl> - real lwork ; <nl> - int liwork ; <nl> + / / compute optimal size of work array <nl> + int info ; <nl> + real lwork ; <nl> + int liwork ; <nl> <nl> # if defined ( THC_REAL_IS_FLOAT ) <nl> - magma_ssyevd_gpu ( jobz , uplo , n , input_data , lda , w , wA , n , & lwork , - 1 , & liwork , - 1 , & info ) ; <nl> + magma_ssyevd_gpu ( jobz , uplo , n , input_data , lda , w , wA , n , & lwork , - 1 , & liwork , - 1 , & info ) ; <nl> # else <nl> - magma_dsyevd_gpu ( jobz , uplo , n , input_data , lda , w , wA , n , & lwork , - 1 , & liwork , - 1 , & info ) ; <nl> + magma_dsyevd_gpu ( jobz , uplo , n , input_data , lda , w , wA , n , & lwork , - 1 , & liwork , - 1 , & info ) ; <nl> # endif <nl> <nl> - real * work = th_magma_malloc_pinned < real > ( ( size_t ) lwork ) ; <nl> - int * iwork = th_magma_malloc_pinned < int > ( liwork ) ; <nl> + real * work = th_magma_malloc_pinned < real > ( ( size_t ) lwork ) ; <nl> + int * iwork = th_magma_malloc_pinned < int > ( liwork ) ; <nl> <nl> / / compute eigenvalues and , optionally , eigenvectors <nl> # if defined ( THC_REAL_IS_FLOAT ) <nl> - magma_ssyevd_gpu ( jobz , uplo , n , input_data , lda , w , wA , n , work , ( int ) lwork , iwork , liwork , & info ) ; <nl> + magma_ssyevd_gpu ( jobz , uplo , n , input_data , lda , w , wA , n , work , ( int ) lwork , iwork , liwork , & info ) ; <nl> # else <nl> - magma_dsyevd_gpu ( jobz , uplo , n , input_data , lda , w , wA , n , work , ( int ) lwork , iwork , liwork , & info ) ; <nl> + magma_dsyevd_gpu ( jobz , uplo , n , input_data , lda , w , wA , n , work , ( int ) lwork , iwork , liwork , & info ) ; <nl> # endif <nl> <nl> - / / copy eigen values from w to re_ <nl> - if ( info = = 0 ) <nl> - THCTensor_ ( copyArray1d ) ( state , re_ , w , n ) ; <nl> - <nl> - magma_free_pinned ( iwork ) ; <nl> - magma_free_pinned ( work ) ; <nl> - magma_free_pinned ( wA ) ; <nl> - magma_free_pinned ( w ) ; <nl> + / / copy eigen values from w to re_ <nl> + if ( info = = 0 ) <nl> + THCTensor_ ( copyArray1d ) ( state , re_ , w , n ) ; <nl> <nl> - / / check error value <nl> - if ( info > 0 ) <nl> - THError ( " MAGMA syev : Failed to converge . % d off - diagonal elements of an didn ' t converge to zero " , info ) ; <nl> - else if ( info < 0 ) <nl> - THError ( " MAGMA syev : Argument % d : illegal value " , - info ) ; <nl> + magma_free_pinned ( iwork ) ; <nl> + magma_free_pinned ( work ) ; <nl> + magma_free_pinned ( wA ) ; <nl> + magma_free_pinned ( w ) ; <nl> <nl> + / / check error value <nl> + if ( info > 0 ) <nl> + THError ( " MAGMA syev : Failed to converge . % d off - diagonal elements of an didn ' t converge to zero " , info ) ; <nl> + else if ( info < 0 ) <nl> + THError ( " MAGMA syev : Argument % d : illegal value " , - info ) ; <nl> + } <nl> THCTensor_ ( freeCopyTo ) ( state , input , rv_ ) ; <nl> # else <nl> THError ( NoMagma ( syev ) ) ; <nl> THC_API void THCTensor_ ( syev ) ( THCState * state , THCTensor * re_ , THCTensor * rv_ , T <nl> THC_API void THCTensor_ ( geev ) ( THCState * state , THCTensor * re_ , THCTensor * rv_ , THCTensor * a_ , const char * jobvrs ) <nl> { <nl> # ifdef USE_MAGMA <nl> - THArgCheck ( ! a_ - > is_empty ( ) & & a_ - > dim ( ) = = 2 , 3 , " A should be ( non - empty ) 2 dimensional " ) ; <nl> + THArgCheck ( a_ - > dim ( ) = = 2 , 3 , " A should be 2 dimensional " ) ; <nl> THArgCheck ( a_ - > size ( 0 ) = = a_ - > size ( 1 ) , 3 , " A should be square " ) ; <nl> <nl> magma_vec_t jobvr = jobvrs [ 0 ] = = ' N ' ? MagmaNoVec : MagmaVec ; <nl> THC_API void THCTensor_ ( geev ) ( THCState * state , THCTensor * re_ , THCTensor * rv_ , T <nl> ldvr = n ; <nl> } <nl> <nl> - real wkopt ; <nl> - int info ; <nl> + real * work_data = nullptr ; <nl> <nl> + if ( n > 0 ) { <nl> + int info ; <nl> + real wkopt ; <nl> # if defined ( THC_REAL_IS_FLOAT ) <nl> - magma_sgeev ( MagmaNoVec , jobvr , n , a_data , n , wr , wi , NULL , 1 , vr_data , ldvr , & wkopt , - 1 , & info ) ; <nl> + magma_sgeev ( MagmaNoVec , jobvr , n , a_data , n , wr , wi , NULL , 1 , vr_data , ldvr , & wkopt , - 1 , & info ) ; <nl> # else <nl> - magma_dgeev ( MagmaNoVec , jobvr , n , a_data , n , wr , wi , NULL , 1 , vr_data , ldvr , & wkopt , - 1 , & info ) ; <nl> + magma_dgeev ( MagmaNoVec , jobvr , n , a_data , n , wr , wi , NULL , 1 , vr_data , ldvr , & wkopt , - 1 , & info ) ; <nl> # endif <nl> <nl> - int lwork = ( int ) wkopt ; <nl> - real * work_data = th_magma_malloc_pinned < real > ( lwork ) ; <nl> + int lwork = ( int ) wkopt ; <nl> + work_data = th_magma_malloc_pinned < real > ( lwork ) ; <nl> <nl> # if defined ( THC_REAL_IS_FLOAT ) <nl> - magma_sgeev ( MagmaNoVec , jobvr , n , a_data , n , wr , wi , NULL , 1 , vr_data , ldvr , work_data , lwork , & info ) ; <nl> + magma_sgeev ( MagmaNoVec , jobvr , n , a_data , n , wr , wi , NULL , 1 , vr_data , ldvr , work_data , lwork , & info ) ; <nl> # else <nl> - magma_dgeev ( MagmaNoVec , jobvr , n , a_data , n , wr , wi , NULL , 1 , vr_data , ldvr , work_data , lwork , & info ) ; <nl> + magma_dgeev ( MagmaNoVec , jobvr , n , a_data , n , wr , wi , NULL , 1 , vr_data , ldvr , work_data , lwork , & info ) ; <nl> # endif <nl> <nl> - if ( info > 0 ) <nl> - THError ( " MAGMA geev : Failed to converge . % d off - diagonal elements of an didn ' t converge to zero " , info ) ; <nl> - else if ( info < 0 ) <nl> - THError ( " MAGMA geev : Argument % d : illegal value " , - info ) ; <nl> + if ( info > 0 ) <nl> + THError ( " MAGMA geev : Failed to converge . % d off - diagonal elements of an didn ' t converge to zero " , info ) ; <nl> + else if ( info < 0 ) <nl> + THError ( " MAGMA geev : Argument % d : illegal value " , - info ) ; <nl> + } <nl> <nl> { <nl> THCTensor_ ( resize2d ) ( state , re_ , 2 , n ) ; <nl> THCTensor * re = THCTensor_ ( newContiguous ) ( state , re_ ) ; <nl> - THCudaCheck ( cudaMemcpy ( THCStorage_ ( data ) ( state , re - > storage ) + re - > storageOffset , wr , n * sizeof ( real ) , cudaMemcpyHostToDevice ) ) ; <nl> - THCudaCheck ( cudaMemcpy ( THCStorage_ ( data ) ( state , re - > storage ) + re - > storageOffset + n , wi , n * sizeof ( real ) , cudaMemcpyHostToDevice ) ) ; <nl> + if ( n > 0 ) { <nl> + THCudaCheck ( cudaMemcpy ( THCStorage_ ( data ) ( state , re - > storage ) + re - > storageOffset , wr , n * sizeof ( real ) , cudaMemcpyHostToDevice ) ) ; <nl> + THCudaCheck ( cudaMemcpy ( THCStorage_ ( data ) ( state , re - > storage ) + re - > storageOffset + n , wi , n * sizeof ( real ) , cudaMemcpyHostToDevice ) ) ; <nl> + } <nl> THCTensor_ ( freeCopyTo ) ( state , re , re_ ) ; <nl> THCTensor_ ( transpose ) ( state , re_ , NULL , 0 , 1 ) ; <nl> } <nl> mmm a / test / test_torch . py <nl> ppp b / test / test_torch . py <nl> def test_lapack_empty ( self ) : <nl> # numpy / sci often has a direct wrapper ( e . g . lu_factor ) and a wrapper that " does the right thing " <nl> # ( e . g . lu ) . We often name our functions identically to the lapack function , so it will take work <nl> # to name / migrate - to better wrappers . <nl> - <nl> - # FIXME : enable CUDA tests . <nl> - devices = [ ' cpu ' ] # if not torch . cuda . is_available ( ) else [ ' cpu ' , ' cuda ' ] <nl> + devices = [ ' cpu ' ] if not torch . cuda . is_available ( ) else [ ' cpu ' , ' cuda ' ] <nl> for device in devices : <nl> <nl> def fn ( torchfn , * args ) : <nl>
Support ( some ) CUDA Lapack on n - dimensional empty tensors .
pytorch/pytorch
bae156a481351a7f2d0f5f2dbced5dfab2c58f19
2018-07-20T18:40:25Z
mmm a / . gitignore <nl> ppp b / . gitignore <nl> test / test - * <nl> doc / mkdocs / venv / <nl> doc / mkdocs / docs / images <nl> doc / mkdocs / docs / examples <nl> + doc / mkdocs / site <nl> mmm a / doc / mkdocs / Makefile <nl> ppp b / doc / mkdocs / Makefile <nl> <nl> + # serve the site locally <nl> serve : prepare_files <nl> venv / bin / mkdocs serve <nl> <nl> - prepare_files : <nl> + # create files that are not versioned inside the mkdocs folder <nl> + prepare_files : clean <nl> # build Doxygen <nl> $ ( MAKE ) - C . . <nl> - # clean subfolders <nl> - rm - fr docs / images docs / examples <nl> # create subfolders <nl> mkdir docs / images docs / examples <nl> # copy images <nl> prepare_files : <nl> # copy examples <nl> cp - vr . . / examples / * . cpp . . / examples / * . output docs / examples <nl> <nl> + # clean subfolders <nl> + clean : <nl> + rm - fr docs / images docs / examples <nl> + <nl> + # publish site to GitHub pages <nl> publish : prepare_files <nl> venv / bin / mkdocs gh - deploy - - clean - - force <nl> <nl> - install_venv : <nl> + # install a Python virtual environment <nl> + install_venv : requirements . txt <nl> python3 - mvenv venv <nl> venv / bin / pip install - r requirements . txt <nl> <nl> - uninstall_venv : <nl> + # uninstall the virtual environment <nl> + uninstall_venv : clean <nl> rm - fr venv <nl> new file mode 100644 <nl> index 000000000 . . 0cb890b0a <nl> mmm / dev / null <nl> ppp b / doc / mkdocs / docs / home / releases . md <nl> <nl> + # Releases <nl> + <nl> + # # v3 . 7 . 3 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ include . zip ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 3 / include . zip ) ( 274 KB ) <nl> + - [ include . zip . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 3 / include . zip . asc ) ( 1 KB ) <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 3 / json . hpp ) ( 791 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 3 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2019 - 11 - 17 <nl> + SHA - 256 : 3b5d2b8f8282b80557091514d8ab97e27f9574336c804ee666fda673a9b59926 ( json . hpp ) , 87b5884741427220d3a33df1363ae0e8b898099fbc59f1c451113f6732891014 ( include . zip ) <nl> + <nl> + # # # Summary <nl> + <nl> + This release fixes a bug introduced in release 3 . 7 . 2 which could yield quadratic complexity in destructor calls . All changes are backward - compatible . <nl> + <nl> + # # # : bug : Bug Fixes <nl> + <nl> + - Removed ` reserve ( ) ` calls from the destructor which could lead to quadratic complexity . # 1837 # 1838 <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + This release does not deprecate any functions . As an overview , the following functions have been deprecated in earlier versions and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - Function [ ` iterator_wrapper ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1592a06bc63811886ade4f9d965045e . html # af1592a06bc63811886ade4f9d965045e ) are deprecated . Please use the member function [ ` items ( ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) instead . <nl> + - Functions [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) and [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) are deprecated . Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + <nl> + # # v3 . 7 . 2 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ include . zip ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 2 / include . zip ) ( 274 KB ) <nl> + - [ include . zip . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 2 / include . zip . asc ) ( 1 KB ) <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 2 / json . hpp ) ( 791 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 2 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2019 - 11 - 10 <nl> + SHA - 256 : 0a65fcbbe1b334d3f45c9498e5ee28c3f3b2428aea98557da4a3ff12f0f14ad6 ( json . hpp ) , 67f69c9a93b7fa0612dc1b6273119d2c560317333581845f358aaa68bff8f087 ( include . zip ) <nl> + <nl> + # # # Summary <nl> + <nl> + Project [ bad_json_parsers ] ( https : / / github . com / lovasoa / bad_json_parsers ) tested how JSON parser libraries react on * * deeply nested inputs * * . It turns out that this library segfaulted at a certain nesting depth . This bug was fixed with this release . * * Now the parsing is only bounded by the available memory . * * All changes are backward - compatible . <nl> + <nl> + # # # : bug : Bug Fixes <nl> + <nl> + * Fixed a bug that lead to stack overflow for deeply nested JSON values ( objects , array ) by changing the implementation of the destructor from a recursive to an iterative approach . # 832 , # 1419 , # 1835 <nl> + <nl> + # # # : hammer : Further Changes <nl> + <nl> + * Added WhiteStone Bolt . # 1830 <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + This release does not deprecate any functions . As an overview , the following functions have been deprecated in earlier versions and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - Function [ ` iterator_wrapper ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1592a06bc63811886ade4f9d965045e . html # af1592a06bc63811886ade4f9d965045e ) are deprecated . Please use the member function [ ` items ( ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) instead . <nl> + - Functions [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) and [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) are deprecated . Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + # # v3 . 7 . 1 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ include . zip ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 1 / include . zip ) ( 273 KB ) <nl> + - [ include . zip . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 1 / include . zip . asc ) ( 1 KB ) <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 1 / json . hpp ) ( 789 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 1 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2019 - 11 - 06 <nl> + SHA - 256 : b5ba7228f3c22a882d379e93d08eab4349458ee16fbf45291347994eac7dc7ce ( json . hpp ) , 77b9f54b34e7989e6f402afb516f7ff2830df551c3a36973085e2c7a6b1045fe ( include . zip ) <nl> + <nl> + # # # Summary <nl> + <nl> + This release fixes several small bugs in the library . All changes are backward - compatible . <nl> + <nl> + # # # : bug : Bug Fixes <nl> + <nl> + - Fixed a segmentation fault when serializing ` std : : int64_t ` minimum value . # 1708 # 1722 <nl> + - Fixed the [ ` contains ( ) ` ] ( https : / / nlohmann . github . io / json / classnlohmann_1_1basic__json_ab23b04802eb9da97dc3f664e54e09cb3 . html # ab23b04802eb9da97dc3f664e54e09cb3 ) function for JSON Pointers . # 1727 # 1741 <nl> + - Fixed too lax SFINAE guard for conversion from ` std : : pair ` and ` std : : tuple ` to ` json ` . # 1805 # 1806 # 1825 # 1826 <nl> + - Fixed some regressions detected by UBSAN . Updated CI to use Clang - Tidy 7 . 1 . 0 . # 1716 # 1728 <nl> + - Fixed integer truncation in ` iteration_proxy ` . # 1797 <nl> + - Updated [ Hedley ] ( https : / / github . com / nemequ / hedley ) to v11 to [ fix a E2512 error ] ( https : / / github . com / nemequ / hedley / issues / 28 ) in MSVC . # 1799 <nl> + - Fixed a compile error in enum deserialization of non non - default - constructible types . # 1647 # 1821 <nl> + - Fixed the conversion from ` json ` to ` std : : valarray ` . <nl> + <nl> + # # # : zap : Improvements <nl> + <nl> + - The [ ` items ( ) ` ] ( https : / / nlohmann . github . io / json / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) function can now be used with a custom string type . # 1765 <nl> + - Made [ ` json_pointer : : back ` ] ( https : / / nlohmann . github . io / json / classnlohmann_1_1json__pointer_a213bc67c32a30c68ac6bf06f5195d482 . html # a213bc67c32a30c68ac6bf06f5195d482 ) ` const ` . # 1764 # 1769 <nl> + - Meson is part of the release archive . # 1672 # 1694 <nl> + - Improved documentation on the Meson and Spack package manager . # 1694 # 1720 <nl> + <nl> + # # # : hammer : Further Changes <nl> + <nl> + - Added GitHub Workflow with ` ubuntu - latest ` / GCC 7 . 4 . 0 as CI step . <nl> + - Added GCC 9 to Travis CI to compile with C + + 20 support . # 1724 <nl> + - Added MSVC 2019 to the AppVeyor CI . # 1780 <nl> + - Added badge to [ fuzzing status ] ( https : / / bugs . chromium . org / p / oss - fuzz / issues / list ? sort = - opened & can = 1 & q = proj : json ) . <nl> + - Fixed some cppcheck warnings . # 1760 <nl> + - Fixed several typos in the documentation . # 1720 # 1767 # 1803 <nl> + - Added documentation on the ` JSON_THROW_USER ` , ` JSON_TRY_USER ` , and ` JSON_CATCH_USER ` macros to control user - defined exception handling . <nl> + - Used GitHub ' s [ CODEOWNERS ] ( https : / / github . com / nlohmann / json / blob / develop / . github / CODEOWNERS ) and [ SECURITY ] ( https : / / github . com / nlohmann / json / blob / develop / . github / SECURITY . md ) feature . <nl> + - Removed ` GLOB ` from CMake files . # 1779 <nl> + - Updated to [ Doctest ] ( https : / / github . com / onqtam / doctest ) 2 . 3 . 5 . <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + This release does not deprecate any functions . As an overview , the following functions have been deprecated in earlier versions and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - Function [ ` iterator_wrapper ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1592a06bc63811886ade4f9d965045e . html # af1592a06bc63811886ade4f9d965045e ) are deprecated . Please use the member function [ ` items ( ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) instead . <nl> + - Functions [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) and [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) are deprecated . Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + # # v3 . 7 . 0 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ include . zip ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 0 / include . zip ) ( 143 KB ) <nl> + - [ include . zip . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 0 / include . zip . asc ) ( 1 KB ) <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 0 / json . hpp ) ( 782 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 7 . 0 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2019 - 07 - 28 <nl> + SHA - 256 : a503214947952b69f0062f572cb74c17582a495767446347ce2e452963fc2ca4 ( json . hpp ) , 541c34438fd54182e9cdc68dd20c898d766713ad6d901fb2c6e28ff1f1e7c10d ( include . zip ) <nl> + <nl> + # # # Summary <nl> + <nl> + This release introduces a few convenience functions and performs a lot of house keeping ( bug fixes and small improvements ) . All changes are backward - compatible . <nl> + <nl> + # # # : sparkles : New Features <nl> + <nl> + - Add overload of the * * [ ` contains ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab23b04802eb9da97dc3f664e54e09cb3 . html # ab23b04802eb9da97dc3f664e54e09cb3 ) function * * to check if a JSON pointer is valid without throwing exceptions , just like its [ counterpart for object keys ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9286acdc0578fc66e9346323e69fc0e3 . html # a9286acdc0578fc66e9346323e69fc0e3 ) . # 1600 <nl> + - Add a function * * [ ` to_string ` ] ( http : / / nlohmann . github . io / json / doxygen / namespacenlohmann_a6ce645a0b8717757e096a5b5773b7a16 . html # a6ce645a0b8717757e096a5b5773b7a16 ) * * to allow for generic conversion to strings . # 916 # 1585 <nl> + - Add * * return value for the [ ` emplace_back ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_abf29131f898b05aad2c01a9c80e7a002 . html # abf29131f898b05aad2c01a9c80e7a002 ) function * * , returning a reference to the added element just like C + + 17 is [ introducing this ] ( https : / / en . cppreference . com / w / cpp / container / vector / emplace_back ) for ` std : : vector ` . # 1609 <nl> + - Add info how to use the library with the * * [ pacman ] ( https : / / wiki . archlinux . org / index . php / pacman ) package manager * * on MSYS2 . # 1670 <nl> + <nl> + # # # : bug : Bug Fixes <nl> + <nl> + - Fix an issue where typedefs with certain names yielded a compilation error . # 1642 # 1643 <nl> + - Fix a conversion to ` std : : string_view ` in the unit tests . # 1634 # 1639 <nl> + - Fix MSVC Debug build . # 1536 # 1570 # 1608 <nl> + - Fix [ ` get_to ` ] ( https : / / nlohmann . github . io / json / classnlohmann_1_1basic__json_a65753c68f06639eda0d355f919564e01 . html # a65753c68f06639eda0d355f919564e01 ) method to clear existing content before writing . # 1511 # 1555 <nl> + - Fix a ` - Wc + + 17 - extensions ` warning . ` nodiscard ` attributes are now only used with Clang when ` - std = c + + 17 ` is used . # 1535 # 1551 <nl> + <nl> + # # # : zap : Improvements <nl> + <nl> + - Switch from [ Catch ] ( https : / / github . com / philsquared / Catch ) to * * [ doctest ] ( https : / / github . com / onqtam / doctest ) * * for the unit tests which speeds up compilation and runtime of the 112 , 112 , 308 tests . <nl> + - Add an explicit section to the [ README ] ( https : / / github . com / nlohmann / json / blob / develop / README . md ) about the * * frequently addressed topics * * [ character encoding ] ( https : / / github . com / nlohmann / json # character - encoding ) , [ comments in JSON ] ( https : / / github . com / nlohmann / json # comments - in - json ) , and the [ order of object keys ] ( https : / / github . com / nlohmann / json # order - of - object - keys ) . <nl> + <nl> + # # # : hammer : Further Changes <nl> + <nl> + - Use [ ` GNUInstallDirs ` ] ( https : / / cmake . org / cmake / help / v3 . 0 / module / GNUInstallDirs . html ) to set library install directories . # 1673 <nl> + - Fix links in the [ README ] ( https : / / github . com / nlohmann / json / blob / develop / README . md ) . # 1620 # 1621 # 1622 # 1623 # 1625 <nl> + - Mention [ ` json ` type ] ( http : / / nlohmann . github . io / json / doxygen / namespacenlohmann_a2bfd99e845a2e5cd90aeaf1b1431f474 . html # a2bfd99e845a2e5cd90aeaf1b1431f474 ) on the [ documentation start page ] ( http : / / nlohmann . github . io / json / doxygen / index . html ) . # 1616 <nl> + - Complete documentation of [ ` value ( ) ` function ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_adcf8ca5079f5db993820bf50036bf45d . html # adcf8ca5079f5db993820bf50036bf45d ) with respect to ` type_error . 302 ` exception . # 1601 <nl> + - Fix links in the documentation . # 1598 <nl> + - Add regression tests for MSVC . # 1543 # 1570 <nl> + - Use * * [ CircleCI ] ( http : / / circleci . com ) * * for [ continuous integration ] ( https : / / circleci . com / gh / nlohmann / json ) . <nl> + - Use * * [ Doozer ] ( https : / / doozer . io ) * * for [ continuous integration ] ( https : / / doozer . io / nlohmann / json ) on Linux ( CentOS , Raspbian , Fedora ) <nl> + - Add tests to check each CMake flag ( ` JSON_BuildTests ` , ` JSON_Install ` , ` JSON_MultipleHeaders ` , ` JSON_Sanitizer ` , ` JSON_Valgrind ` , ` JSON_NoExceptions ` , ` JSON_Coverage ` ) . <nl> + - Use [ Hedley ] ( https : / / nemequ . github . io / hedley / ) to avoid re - inventing several compiler - agnostic feature macros like ` JSON_DEPRECATED ` , ` JSON_NODISCARD ` , ` JSON_LIKELY ` , ` JSON_UNLIKELY ` , ` JSON_HAS_CPP_14 ` , or ` JSON_HAS_CPP_17 ` . Functions taking or returning pointers are annotated accordingly when a pointer will not be null . <nl> + - Build and run tests on [ AppVeyor ] ( https : / / ci . appveyor . com / project / nlohmann / json ) in DEBUG and RELEASE mode . <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + This release does not deprecate any functions . As an overview , the following functions have been deprecated in earlier versions and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - Function [ ` iterator_wrapper ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1592a06bc63811886ade4f9d965045e . html # af1592a06bc63811886ade4f9d965045e ) are deprecated . Please use the member function [ ` items ( ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) instead . <nl> + - Functions [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) and [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) are deprecated . Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + # # v3 . 6 . 1 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ include . zip ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 6 . 1 / include . zip ) ( 136 KB ) <nl> + - [ include . zip . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 6 . 1 / include . zip . asc ) ( 1 KB ) <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 6 . 1 / json . hpp ) ( 711 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 6 . 1 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2019 - 03 - 20 <nl> + SHA - 256 : d2eeb25d2e95bffeb08ebb7704cdffd2e8fca7113eba9a0b38d60a5c391ea09a ( json . hpp ) , 69cc88207ce91347ea530b227ff0776db82dcb8de6704e1a3d74f4841bc651cf ( include . zip ) <nl> + <nl> + # # # Summary <nl> + <nl> + This release * * fixes a regression and a bug * * introduced by the earlier 3 . 6 . 0 release . All changes are backward - compatible . <nl> + <nl> + # # # : bug : Bug Fixes <nl> + <nl> + - Fixed regression of # 590 which could lead to compilation errors with GCC 7 and GCC 8 . # 1530 <nl> + - Fixed a compilation error when ` < Windows . h > ` was included . # 1531 <nl> + <nl> + # # # : hammer : Further Changes <nl> + <nl> + - Fixed a warning for missing field initializers . # 1527 <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + This release does not deprecate any functions . As an overview , the following functions have been deprecated in earlier versions and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - Function [ ` iterator_wrapper ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1592a06bc63811886ade4f9d965045e . html # af1592a06bc63811886ade4f9d965045e ) are deprecated . Please use the member function [ ` items ( ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) instead . <nl> + - Functions [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) and [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) are deprecated . Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + # # v3 . 6 . 0 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ include . zip ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 6 . 0 / include . zip ) ( 136 KB ) <nl> + - [ include . zip . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 6 . 0 / include . zip . asc ) ( 1 KB ) <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 6 . 0 / json . hpp ) ( 711 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 6 . 0 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2019 - 03 - 20 <nl> + SHA - 256 : ce9839370f28094c71107c405affb3b08c4a098154988014cbb0800b1c44a831 ( json . hpp ) , 237c5e66e7f8186a02804ce9dbd5f69ce89fe7424ef84adf6142e973bd9532f4 ( include . zip ) <nl> + <nl> + ℹ ️ * * This release introduced a regression . Please update to [ version 3 . 6 . 1 ] ( https : / / github . com / nlohmann / json / releases / tag / v3 . 6 . 1 ) ! * * <nl> + <nl> + # # # Summary <nl> + <nl> + This release adds some * * convenience functions for JSON Pointers * * , introduces a [ ` contains ` ] ( <nl> + http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a0a45fc740637123fdf05fef970f8be47 . html # a0a45fc740637123fdf05fef970f8be47 ) function to check if a key is present in an object , and improves the * * performance of integer serialization * * . Furthermore , a lot of small bug fixes and improvements have been made . All changes are backward - compatible . <nl> + <nl> + # # # : sparkles : New Features <nl> + <nl> + - Overworked the public interface for JSON Pointers . The creation of JSON Pointers is simplified with [ ` operator / ` ] ( <nl> + http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1json__pointer_a90a11fe6c7f37b1746a3ff9cb24b0d53 . html # a90a11fe6c7f37b1746a3ff9cb24b0d53 ) and [ ` operator / = ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1json__pointer_a7395bd0af29ac23fd3f21543c935cdfa . html # a7395bd0af29ac23fd3f21543c935cdfa ) . JSON Pointers can be inspected with [ ` empty ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1json__pointer_a649252bda4a2e75a0915b11a25d8bcc3 . html # a649252bda4a2e75a0915b11a25d8bcc3 ) , [ ` back ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1json__pointer_a6bd5b554c10f15672135c216893eef31 . html # a6bd5b554c10f15672135c216893eef31 ) , and [ ` parent_pointer ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1json__pointer_afdaacce1edb7145e0434e014f0e8685a . html # afdaacce1edb7145e0434e014f0e8685a ) , and manipulated with [ ` push_back ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1json__pointer_a697d12b5bd6205f8866691b166b7c7dc . html # a697d12b5bd6205f8866691b166b7c7dc ) and [ ` pop_back ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1json__pointer_a4b1ee4d511ca195bed896a3da47e264c . html # a4b1ee4d511ca195bed896a3da47e264c ) . # 1434 <nl> + - Added a boolean method [ ` contains ` ] ( <nl> + http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a0a45fc740637123fdf05fef970f8be47 . html # a0a45fc740637123fdf05fef970f8be47 ) to check whether an element exists in a JSON object with a given key . Returns false when called on non - object types . # 1471 # 1474 <nl> + <nl> + # # # : bug : Bug Fixes <nl> + <nl> + - Fixed a compilation issues with libc 2 . 12 . # 1483 # 1514 <nl> + - Fixed endian conversion on PPC64 . # 1489 <nl> + - Fixed library to compile with GCC 9 . # 1472 # 1492 <nl> + - Fixed a compilation issue with GCC 7 on CentOS . # 1496 <nl> + - Fixed an integer overflow . # 1447 <nl> + - Fixed buffer flushing in serializer . # 1445 # 1446 <nl> + <nl> + # # # : zap : Improvements <nl> + <nl> + - The performance of dumping integers has been greatly improved . # 1411 <nl> + - Added CMake parameter ` JSON_Install ` to control whether the library should be installed ( default : on ) . # 1330 <nl> + - Fixed a lot of compiler and linter warnings . # 1400 # 1435 # 1502 <nl> + - Reduced required CMake version from 3 . 8 to 3 . 1 . # 1409 # 1428 # 1441 # 1498 <nl> + - Added ` nodiscard ` attribute to ` meta ( ) ` , ` array ( ) ` , ` object ( ) ` , ` from_cbor ` , ` from_msgpack ` , ` from_ubjson ` , ` from_bson ` , and ` parse ` . # 1433 <nl> + <nl> + # # # : hammer : Further Changes <nl> + <nl> + - Added missing headers . # 1500 <nl> + - Fixed typos and broken links in README . # 1417 # 1423 # 1425 # 1451 # 1455 # 1491 <nl> + - Fixed documentation of parse function . # 1473 <nl> + - Suppressed warning that cannot be fixed inside the library . # 1401 # 1468 <nl> + - Imroved package manager suppert : <nl> + - Updated Buckaroo instructions . # 1495 <nl> + - Improved Meson support . # 1463 <nl> + - Added Conda package manager documentation . # 1430 <nl> + - Added NuGet package manager documentation . # 1132 <nl> + - Continuous Integration <nl> + - Removed unstable or deprecated Travis builders ( Xcode 6 . 4 - 8 . 2 ) and added Xcode 10 . 1 builder . <nl> + - Added Clang 7 to Travis CI . <nl> + - Fixed AppVeyor x64 builds . # 1374 # 1414 <nl> + - Updated thirdparty libraries : <nl> + - Catch 1 . 12 . 0 - > 1 . 12 . 2 <nl> + - Google Benchmark 1 . 3 . 0 - > 1 . 4 . 1 <nl> + - Doxygen 1 . 8 . 15 - > 1 . 8 . 16 <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + This release does not deprecate any functions . As an overview , the following functions have been deprecated in earlier versions and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - Function [ ` iterator_wrapper ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1592a06bc63811886ade4f9d965045e . html # af1592a06bc63811886ade4f9d965045e ) are deprecated . Please use the member function [ ` items ( ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) instead . <nl> + - Functions [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) and [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) are deprecated . Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + # # v3 . 5 . 0 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ include . zip ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 5 . 0 / include . zip ) ( 133 KB ) <nl> + - [ include . zip . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 5 . 0 / include . zip . asc ) ( 1 KB ) <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 5 . 0 / json . hpp ) ( 693 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 5 . 0 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2018 - 12 - 22 <nl> + SHA - 256 : 8a6dbf3bf01156f438d0ca7e78c2971bca50eec4ca6f0cf59adf3464c43bb9d5 ( json . hpp ) , 3564da9c5b0cf2e032f97c69baedf10ddbc98030c337d0327a215ea72259ea21 ( include . zip ) <nl> + <nl> + # # # Summary <nl> + <nl> + This release introduces the support for * * structured bindings * * and reading from * * ` FILE * ` * * . Besides , a few bugs have been fixed . All changes are backward - compatible . <nl> + <nl> + # # # : sparkles : New Features <nl> + <nl> + - * * Structured bindings * * are now supported for JSON objects and arrays via the [ ` items ( ) ` ] ( https : / / nlohmann . github . io / json / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) member function , so finally this code is possible : <nl> + ` ` ` cpp <nl> + for ( auto & [ key , val ] : j . items ( ) ) { <nl> + std : : cout < < key < < ' : ' < < val < < ' \ n ' ; <nl> + } <nl> + ` ` ` <nl> + # 1388 # 1391 <nl> + <nl> + - Added support for * * reading from ` FILE * ` * * to support situations in which streams are nit available or would require too much RAM . # 1370 # 1392 <nl> + <nl> + # # # : bug : Bug Fixes <nl> + <nl> + - The ` eofbit ` was not set for input streams when the end of a stream was reached while parsing . # 1340 # 1343 <nl> + - Fixed a bug in the SAX parser for BSON arrays . <nl> + <nl> + # # # : zap : Improvements <nl> + <nl> + - Added support for Clang 5 . 0 . 1 ( PS4 version ) . # 1341 # 1342 <nl> + <nl> + # # # : hammer : Further Changes <nl> + <nl> + - Added a warning for implicit conversions to the documentation : It is not recommended to use implicit conversions when reading * * from * * a JSON value . Details about this recommendation can be found [ here ] ( https : / / www . github . com / nlohmann / json / issues / 958 ) . # 1363 <nl> + - Fixed typos in the documentation . # 1329 # 1380 # 1382 <nl> + - Fixed a C4800 warning . # 1364 <nl> + - Fixed a ` - Wshadow ` warning # 1346 <nl> + - Wrapped ` std : : snprintf ` calls to avoid error in MSVC . # 1337 <nl> + - Added code to allow installation via Meson . # 1345 <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + This release does not deprecate any functions . As an overview , the following functions have been deprecated in earlier versions and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - Function [ ` iterator_wrapper ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1592a06bc63811886ade4f9d965045e . html # af1592a06bc63811886ade4f9d965045e ) are deprecated . Please use the member function [ ` items ( ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) instead . <nl> + - Functions [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) and [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) are deprecated . Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + # # v3 . 4 . 0 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ include . zip ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 4 . 0 / include . zip ) ( 132 KB ) <nl> + - [ include . zip . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 4 . 0 / include . zip . asc ) ( 1 KB ) <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 4 . 0 / json . hpp ) ( 689 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 4 . 0 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2018 - 10 - 30 <nl> + SHA - 256 : 63da6d1f22b2a7bb9e4ff7d6b255cf691a161ff49532dcc45d398a53e295835f ( json . hpp ) , bfec46fc0cee01c509cf064d2254517e7fa80d1e7647fea37cf81d97c5682bdc ( include . zip ) <nl> + <nl> + # # # Summary <nl> + <nl> + This release introduces three new features : <nl> + <nl> + - * * BSON ( Binary JSON ) * * is next to CBOR , MessagePack , and UBJSON the fourth binary ( de ) serialization format supported by the library . <nl> + - * * Adjustable error handlers for invalid Unicode * * allows to specify the behavior when invalid byte sequences are serialized . <nl> + - * * Simplified enum / JSON mapping * * with a macro in case the default mapping to integers is not desired . <nl> + <nl> + Furthermore , some effort has been invested in improving the * * parse error messages * * . Besides , a few bugs have been fixed . All changes are backward - compatible . <nl> + <nl> + # # # : sparkles : New Features <nl> + <nl> + - The library can read and write a subset of * * [ BSON ] ( http : / / bsonspec . org / ) ( Binary JSON ) * * . All data types known from JSON are supported , whereas other types more tied to MongoDB such as timestamps , object ids , or binary data are currently not implemented . See [ the README ] ( https : / / github . com / nlohmann / json # binary - formats - bson - cbor - messagepack - and - ubjson ) for examples . # 1244 # 1320 <nl> + - The behavior when the library encounters an invalid Unicode sequence during serialization can now be controlled by defining one of three * * Unicode error handlers * * : ( 1 ) throw an exception ( default behavior ) , ( 2 ) replace invalid sequences by the Unicode replacement character ( U + FFFD ) , or ( 3 ) ignore / filter invalid sequences . See the [ documentation of the ` dump ` function ] ( https : / / nlohmann . github . io / json / classnlohmann_1_1basic__json_a50ec80b02d0f3f51130d4abb5d1cfdc5 . html # a50ec80b02d0f3f51130d4abb5d1cfdc5 ) for examples . # 1198 # 1314 <nl> + - To easily specify a user - defined * * enum / JSON mapping * * , a macro ` NLOHMANN_JSON_SERIALIZE_ENUM ` has been introduced . See the [ README section ] ( https : / / github . com / nlohmann / json # specializing - enum - conversion ) for more information . # 1208 # 1323 <nl> + <nl> + # # # : bug : Bug Fixes <nl> + <nl> + - fixed truncation # 1286 # 1315 <nl> + - fixed an issue with std : : pair # 1299 # 1301 <nl> + - fixed an issue with std : : variant # 1292 # 1294 <nl> + - fixed a bug in the JSON Pointer parser <nl> + <nl> + # # # : zap : Improvements <nl> + <nl> + - The * * diagnosis messages for parse errors * * have been improved : error messages now indicated line / column positions where possible ( in addition to a byte count ) and also the context in which the error occurred ( e . g . , " while parsing a JSON string " ) . Example : error ` parse error at 2 : syntax error - invalid string : control character must be escaped ; last read : ' < U + 0009 > ' ` is now reported as ` parse error at line 1 , column 2 : syntax error while parsing value - invalid string : control character U + 0009 ( HT ) must be escaped to \ u0009 or \ t ; last read : ' < U + 0009 > ' ` . # 1280 # 1288 # 1303 <nl> + <nl> + # # # : hammer : Further Changes <nl> + <nl> + - improved Meson documentation # 1305 <nl> + - fixed some more linter warnings # 1280 <nl> + - fixed Clang detection for third - party Google Benchmark library # 1277 <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + This release does not deprecate any functions . As an overview , the following functions have been deprecated in earlier versions and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - Function [ ` iterator_wrapper ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1592a06bc63811886ade4f9d965045e . html # af1592a06bc63811886ade4f9d965045e ) are deprecated . Please use the member function [ ` items ( ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) instead . <nl> + - Functions [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) and [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) are deprecated . Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + # # v3 . 3 . 0 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ include . zip ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 3 . 0 / include . zip ) ( 123 KB ) <nl> + - [ include . zip . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 3 . 0 / include . zip . asc ) ( 1 KB ) <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 3 . 0 / json . hpp ) ( 635 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 3 . 0 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2018 - 10 - 05 <nl> + SHA - 256 : f1327bb60c58757a3dd2b0c9c45d49503d571337681d950ec621f8374bcc14d4 ( json . hpp ) , 9588d63557333aaa485e92221ec38014a85a6134e7486fe3441e0541a5a89576 ( include . zip ) <nl> + <nl> + # # # Summary <nl> + <nl> + This release adds support for * * GCC 4 . 8 * * . Furthermore , it adds a function [ * * ` get_to ` * * ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a8a3db7d78f74232d3a6fb8f1abf69709 . html # a8a3db7d78f74232d3a6fb8f1abf69709 ) to write a JSON value to a passed reference . Another topic of this release was the * * CMake support * * which has been overworked and documented . <nl> + <nl> + Besides , a lot of bugs have been fixed and slight improvements have been made . All changes are backward - compatible . <nl> + <nl> + # # # : sparkles : New Features <nl> + <nl> + - The library can now also built with * * GCC 4 . 8 * * . Though this compiler does not fully support C + + 11 , it can successfully compile and run the test suite . Note that bug [ 57824 ] ( https : / / gcc . gnu . org / bugzilla / show_bug . cgi ? id = 57824 ) in GCC 4 . 8 still forbids to use multiline raw strings in arguments to macros . # 1257 <nl> + - Added new function [ * * ` get_to ` * * ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a8a3db7d78f74232d3a6fb8f1abf69709 . html # a8a3db7d78f74232d3a6fb8f1abf69709 ) to write a JSON value to a passed reference . The destination type is automatically derived which allows more succinct code compared to the ` get ` function . # 1227 # 1231 <nl> + <nl> + # # # : bug : Bug Fixes <nl> + <nl> + - Fixed a bug in the CMake file that made ` target_link_libraries ` to not properly include ` nlohmann_json ` . # 1243 # 1245 # 1260 <nl> + - Fixed a warning in MSVC 2017 complaining about a constexpr if . # 1204 # 1268 # 1272 <nl> + - Fixed a bug that prevented compilation with ICPC . # 755 # 1222 <nl> + - Improved the SFINAE correctness to fix a bug in the conversion operator . # 1237 # 1238 <nl> + - Fixed a ` - Wctor - dtor - privacy ` warning . # 1224 <nl> + - Fixed a warning on a lambda in unevaluated context . # 1225 # 1230 <nl> + - Fixed a bug introduced in version 3 . 2 . 0 where defining ` JSON_CATCH_USER ` led to duplicate macro definition of ` JSON_INTERNAL_CATCH ` . # 1213 # 1214 <nl> + - Fixed a bug that prevented compilation with Clang 3 . 4 . 2 in RHEL 7 . # 1179 # 1249 <nl> + <nl> + # # # : zap : Improvements <nl> + <nl> + - Added [ documentation on CMake integration ] ( https : / / github . com / nlohmann / json # cmake ) of the library . # 1270 <nl> + - Changed the CMake file to use ` find_package ( nlohmann_json ) ` without installing the library . # 1202 <nl> + - Improved error messages in case ` operator [ ] ` is used with the wrong combination ( json . exception . type_error . 305 ) of JSON container type and argument type . Example : " cannot use operator [ ] with a string argument " . # 1220 # 1221 <nl> + - Added a license and version information to the Meson build file . # 1252 <nl> + - Removed static assertions to indicated missing ` to_json ` or ` from_json ` functions as such assertions do not play well with SFINAE . These assertions also led to problems with GMock . # 960 # 1212 # 1228 <nl> + - The test suite now does not wait forever if run in a wrong directory and input files are not found . # 1262 <nl> + - The test suite does not show deprecation warnings for deprecated functions which frequently led to confusion . # 1271 <nl> + <nl> + # # # : hammer : Further Changes <nl> + <nl> + - GCC 4 . 8 and Xcode 10 were added to the [ continuous integration suite ] ( https : / / travis - ci . org / nlohmann / json ) at Travis . <nl> + - Added [ lgtm ] ( https : / / lgtm . com / projects / g / nlohmann / json / context : cpp ) checks to pull requests . <nl> + - Added tests for CMake integration . # 1260 <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + This release does not deprecate any functions . As an overview , the following functions have been deprecated in earlier versions and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - Function [ ` iterator_wrapper ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1592a06bc63811886ade4f9d965045e . html # af1592a06bc63811886ade4f9d965045e ) are deprecated . Please use the member function [ ` items ( ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) instead . <nl> + - Functions [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) and [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) are deprecated . Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + <nl> + # # v3 . 2 . 0 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ include . zip ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 2 . 0 / include . zip ) ( 124 KB ) <nl> + - [ include . zip . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 2 . 0 / include . zip . asc ) ( 1 KB ) <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 2 . 0 / json . hpp ) ( 636 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 2 . 0 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2018 - 08 - 20 <nl> + SHA - 256 : ce6b5610a051ec6795fa11c33854abebb086f0fd67c311f5921c3c07f9531b44 ( json . hpp ) , 35ee642558b90e2f9bc758995c4788c4b4d4dec54eef95fb8f38cb4d49c8fc7c ( include . zip ) <nl> + <nl> + # # # Summary <nl> + <nl> + This release introduces a [ * * SAX interface * * ] ( https : / / nlohmann . github . io / json / structnlohmann_1_1json__sax . html ) to the library . While this may be a very special feature used by only few people , it allowed to unify all functions that consumed input and created some kind of JSON value . Internally , now all existing functions like ` parse ` , ` accept ` , ` from_cbor ` , ` from_msgpack ` , and ` from_ubjson ` use the SAX interface with different event processors . This allowed to separate the input processing from the value generation . Furthermore , throwing an exception in case of a parse error is now optional and up to the event processor . Finally , the JSON parser is now non - recursive ( meaning it does not use the call stack , but ` std : : vector < bool > ` to track the hierarchy of structured values ) which allows to process nested input more efficiently . <nl> + <nl> + Furthermore , the library finally is able to parse from * * wide string types * * . This is the first step toward opening the library from UTF - 8 to UTF - 16 and UTF - 32 . <nl> + <nl> + This release further fixes several bugs in the library . All changes are backward - compatible . <nl> + <nl> + # # # : sparkles : New Features <nl> + <nl> + - added a parser with a * * SAX interface * * ( # 971 , # 1153 ) <nl> + - support to parse from * * wide string types * * ` std : : wstring ` , ` std : : u16string ` , and ` std : : u32string ` ; the input will be converted to UTF - 8 ( # 1031 ) <nl> + - added support for * * ` std : : string_view ` * * when using C + + 17 ( # 1028 ) <nl> + - allow to * * roundtrip ` std : : map ` and ` std : : unordered_map ` * * from JSON if key type is not convertible to string ; in these cases , values are serialized to arrays of pairs ( # 1079 , # 1089 , # 1133 , # 1138 ) <nl> + <nl> + # # # : bug : Bug Fixes <nl> + <nl> + - allow to create ` nullptr_t ` from JSON allowing to properly roundtrip ` null ` values ( # 1169 ) <nl> + - allow compare user - defined string types ( # 1130 ) <nl> + - better support for algorithms using iterators from ` items ( ) ` ( # 1045 , # 1134 ) <nl> + - added parameter to avoid compilation error with MSVC 2015 debug builds ( # 1114 ) <nl> + - re - added accidentially skipped unit tests ( # 1176 ) <nl> + - fixed MSVC issue with ` std : : swap ` ( # 1168 ) <nl> + <nl> + # # # : zap : Improvements <nl> + <nl> + - ` key ( ) ` function for iterators returns a const reference rather than a string copy ( # 1098 ) <nl> + - binary formats CBOR , MessagePack , and UBJSON now supports ` float ` as type for floating - point numbers ( # 1021 ) <nl> + <nl> + # # # : hammer : Further Changes <nl> + <nl> + - changed issue templates <nl> + - improved continuous integration : added builders for Xcode 9 . 3 and 9 . 4 , added builders for GCC 8 and Clang 6 , added builder for MinGW , added builders for MSVC targeting x86 <nl> + - required CMake version is now at least 3 . 8 ( # 1040 ) <nl> + - overworked CMake file wrt . packaging ( # 1048 ) <nl> + - added package managers : Spack ( # 1041 ) and CocoaPods ( # 1148 ) <nl> + - fixed Meson include directory ( # 1142 ) <nl> + - preprocessor macro ` JSON_SKIP_UNSUPPORTED_COMPILER_CHECK ` can skip the rejection of unsupported compilers - use at your own risk ! ( # 1128 ) <nl> + - preprocessor macro ` JSON_INTERNAL_CATCH ` / ` JSON_INTERNAL_CATCH_USER ` allows to control the behavior of exception handling inside the library ( # 1187 ) <nl> + - added note on ` char ` to JSON conversion <nl> + - added note how to send security - related issue via encrypted email <nl> + - removed dependency to ` std : : stringstream ` ( # 1117 ) <nl> + - added SPDX - License - Identifier <nl> + - added updated JSON Parsing Test Suite , described in [ Parsing JSON is a Minefield 💣 ] ( http : / / seriot . ch / parsing_json . php ) <nl> + - updated to Catch 1 . 12 . 0 <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + This release does not deprecate any functions . As an overview , the following functions have been deprecated in earlier versions and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - Function [ ` iterator_wrapper ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1592a06bc63811886ade4f9d965045e . html # af1592a06bc63811886ade4f9d965045e ) are deprecated . Please use the member function [ ` items ( ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) instead . <nl> + - Functions [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) and [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) are deprecated . Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + <nl> + <nl> + # # v3 . 1 . 2 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ include . zip ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 1 . 2 / include . zip ) ( 115 KB ) <nl> + - [ include . zip . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 1 . 2 / include . zip . asc ) ( 1 KB ) <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 1 . 2 / json . hpp ) ( 582 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 1 . 2 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2018 - 03 - 14 <nl> + SHA - 256 : fbdfec4b4cf63b3b565d09f87e6c3c183bdd45c5be1864d3fcb338f6f02c1733 ( json . hpp ) , 495362ee1b9d03d9526ba9ccf1b4a9c37691abe3a642ddbced13e5778c16660c ( include . zip ) <nl> + <nl> + # # # Summary <nl> + <nl> + This release fixes several bugs in the library . All changes are backward - compatible . <nl> + <nl> + # # # : bug : Bug Fixes <nl> + <nl> + - Fixed a * * memory leak * * occurring in the parser callback ( # 1001 ) . <nl> + - Different * * specializations of ` basic_json ` * * ( e . g . , using different template arguments for strings or objects ) can now be used in assignments ( # 972 , # 977 , # 986 ) . <nl> + - Fixed a logical error in an iterator range check ( # 992 ) . <nl> + <nl> + # # # : zap : Improvements <nl> + <nl> + - The parser and the serialization now support * * user - defined string types * * ( # 1006 , # 1009 ) . <nl> + <nl> + # # # : hammer : Further Changes <nl> + <nl> + - * * [ Clang Analyzer ] ( http : / / clang - analyzer . llvm . org ) * * is now used as additional static analyzer ; see ` make clang_analyze ` . <nl> + - Overworked [ README ] ( https : / / github . com / nlohmann / json / blob / develop / README . md ) by adding links to the [ documentation ] ( https : / / nlohmann . github . io / json / ) ( # 981 ) . <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + This release does not deprecate any functions . As an overview , the following functions have been deprecated in earlier versions and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - Function [ ` iterator_wrapper ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1592a06bc63811886ade4f9d965045e . html # af1592a06bc63811886ade4f9d965045e ) are deprecated . Please use the member function [ ` items ( ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) instead . <nl> + - Functions [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) and [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) are deprecated . Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + <nl> + # # v3 . 1 . 1 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ include . zip ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 1 . 1 / include . zip ) ( 114 KB ) <nl> + - [ include . zip . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 1 . 1 / include . zip . asc ) ( 1 KB ) <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 1 . 1 / json . hpp ) ( 577 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 1 . 1 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2018 - 02 - 13 <nl> + SHA - 256 : e14ce5e33d6a2daf748026bd4947f3d9686ca4cfd53d10c3da46a0a9aceb7f2e ( json . hpp ) , fde771d4b9e4f222965c00758a2bdd627d04fb7b59e09b7f3d1965abdc848505 ( include . zip ) <nl> + <nl> + # # # Summary <nl> + <nl> + This release fixes several bugs in the library . All changes are backward - compatible . <nl> + <nl> + # # # : bug : Bug Fixes <nl> + <nl> + - Fixed parsing of * * CBOR strings with indefinite length * * ( # 961 ) . Earlier versions of this library misinterpreted the CBOR standard and rejected input with the ` 0x7F ` start byte . <nl> + - Fixed user - defined * * conversion to vector type * * ( # 924 , # 969 ) . A wrong SFINAE check rejected code though a user - defined conversion was provided . <nl> + - Fixed documentation of the parser behavior for * * objects with duplicate keys * * ( # 963 ) . The exact behavior is not specified by [ RFC 8259 ] ( https : / / tools . ietf . org / html / rfc8259 ) and the library now also provides no guarantee which object key is stored . <nl> + - Added check to detect memory * * overflow when parsing UBJSON containers * * ( # 962 ) . The optimized UBJSON format allowed for specifying an array with billions of ` null ` elements with a few bytes and the library did not check whether this size exceeded ` max_size ( ) ` . <nl> + <nl> + # # # : hammer : Further Changes <nl> + <nl> + - [ Code coverage ] ( https : / / coveralls . io / github / nlohmann / json ) is now calculated for the individual header files , allowing to find uncovered lines more quickly than by browsing through the single header version ( # 953 , # 957 ) . <nl> + - A Makefile target ` run_benchmarks ` was added to quickly build and run the benchmark suite . <nl> + - The documentation was harmonized with respect to the header inclusion ( # 955 ) . Now all examples and the README use ` # include < nlohmann / json . hpp > ` to allow for selecting ` single_include ` or ` include ` or whatever installation folder as include directory . <nl> + - Added note on how to use the library with the [ cget ] ( http : / / cget . readthedocs . io / en / latest / ) package manager ( # 954 ) . <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + This release does not deprecate any functions . As an overview , the following functions have been deprecated in earlier versions and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - Function [ ` iterator_wrapper ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1592a06bc63811886ade4f9d965045e . html # af1592a06bc63811886ade4f9d965045e ) are deprecated . Please use the member function [ ` items ( ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) instead . <nl> + - Functions [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) and [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) are deprecated . Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + # # v3 . 1 . 0 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ include . zip ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 1 . 0 / include . zip ) ( 114 KB ) <nl> + - [ include . zip . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 1 . 0 / include . zip . asc ) ( 1 KB ) <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 1 . 0 / json . hpp ) ( 577 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 1 . 0 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2018 - 02 - 01 <nl> + SHA - 256 : d40f614d10a6e4e4e80dca9463da905285f20e93116c36d97d4dc1aa63d10ba4 ( json . hpp ) , 2b7234fca394d1e27b7e017117ed80b7518fafbb4f4c13a7c069624f6f924673 ( include . zip ) <nl> + <nl> + # # # Summary <nl> + <nl> + This release adds support for the [ * * UBJSON * * ] ( http : / / ubjson . org ) format and [ * * JSON Merge Patch * * ] ( https : / / tools . ietf . org / html / rfc7386 ) . It also contains some minor changes and bug fixes . All changes are backward - compatible . <nl> + <nl> + # # # : sparkles : New features <nl> + <nl> + - The library now supports [ * * UBJSON * * ] ( http : / / ubjson . org ) ( Universal Binary JSON Specification ) as binary format to read and write JSON values space - efficiently . See the [ documentation overview ] ( https : / / github . com / nlohmann / json / blob / develop / doc / binary_formats . md ) for a comparison of the different formats CBOR , MessagePack , and UBJSON . <nl> + - [ * * JSON Merge Patch * * ] ( https : / / tools . ietf . org / html / rfc7386 ) ( RFC 7386 ) offers an intuitive means to describe patches between JSON values ( # 876 , # 877 ) . See the documentation of [ ` merge_patch ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a0ec0cd19cce42ae6071f3cc6870ea295 . html # a0ec0cd19cce42ae6071f3cc6870ea295 ) for more information . <nl> + <nl> + # # # : zap : Improvements <nl> + <nl> + - The library now uses the * * Grisu2 algorithm * * for printing floating - point numbers ( based on the reference implementation by Florian Loitsch ) which produces a short representation which is guaranteed to round - trip ( # 360 , # 935 , # 936 ) . <nl> + - The * * UTF - 8 handling * * was further simplified by using the decoder of Björn Hoehrmann in more scenarios . <nl> + <nl> + # # # : truck : Reorganization <nl> + <nl> + - Though the library is released as a single header , its development got more and more complicated . With this release , the header is * * split into several files * * and the single - header file ` json . hpp ` can be generated from these development sources . In the repository , folder ` include ` contains the development sources and ` single_include ` contains the single ` json . hpp ` header ( # 700 , # 906 , # 907 , # 910 , # 911 , # 915 , # 920 , # 924 , # 925 , # 928 , # 944 ) . <nl> + - The split further allowed for a * * forward declaration header * * ` include / nlohmann / json_fwd . hpp ` to speed up compilation times ( # 314 ) . <nl> + <nl> + # # # : hammer : Further changes <nl> + <nl> + - [ Google Benchmark ] ( https : / / github . com / google / benchmark ) is now used for micro benchmarks ( see ` benchmarks ` folder , # 921 ) . <nl> + - The serialization ( JSON and binary formats ) now properly work with the libraries string template parameter , allowing for optimized string implementations to be used in constraint environments such as embedded software ( # 941 , # 950 ) . <nl> + - The exceptional behavior can now be overridden by defining macros ` JSON_THROW_USER ` , ` JSON_TRY_USER ` , and ` JSON_CATCH_USER ` , defining the behavior of ` throw ` , ` try ` and ` catch ` , respectively . This allows to switch off C + + ' s exception mechanism yet still execute user - defined code in case an error condition occurs ( # 938 ) . <nl> + - To facilitate the interplay with [ flex ] ( https : / / github . com / westes / flex ) and [ Bison ] ( https : / / www . gnu . org / software / bison / ) , the library does not use the variable name ` yytext ` any more as it could clash with macro definitions ( # 933 ) . <nl> + - The library now defines ` NLOHMANN_JSON_VERSION_MAJOR ` , ` NLOHMANN_JSON_VERSION_MINOR ` , and ` NLOHMANN_JSON_VERSION_PATCH ` to allow for conditional compilation based on the included library version ( # 943 , # 948 ) . <nl> + - A compilation error with ICC has been fixed ( # 947 ) . <nl> + - Typos and links in the documentation have been fixed ( # 900 , # 930 ) . <nl> + - A compiler error related to incomplete types has been fixed ( # 919 ) . <nl> + - The tests form the [ UTF - 8 decoder stress test ] ( http : / / www . cl . cam . ac . uk / ~ mgk25 / ucs / examples / UTF - 8 - test . txt ) have been added to the test suite . <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + - Function [ ` iterator_wrapper ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1592a06bc63811886ade4f9d965045e . html # af1592a06bc63811886ade4f9d965045e ) has been deprecated ( # 874 ) . Since its introduction , the name was up for discussion , as it was too technical . We now introduced the member function [ ` items ( ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_afe3e137ace692efa08590d8df40f58dd . html # afe3e137ace692efa08590d8df40f58dd ) with the same semantics . ` iterator_wrapper ` will be removed in the next major version ( i . e . , 4 . 0 . 0 ) . <nl> + <nl> + Furthermore , the following functions are deprecated since version 3 . 0 . 0 and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) <nl> + - [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) <nl> + <nl> + Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + # # v3 . 0 . 1 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 0 . 1 / json . hpp ) ( 502 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 0 . 1 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2017 - 12 - 29 <nl> + SHA - 256 : c9b3591f1bb94e723a0cd7be861733a3a555b234ef132be1e9027a0364118c4c <nl> + <nl> + # # # Summary <nl> + <nl> + This release fixes small issues in the implementation of * * JSON Pointer * * and * * JSON Patch * * . All changes are backward - compatible . <nl> + <nl> + # # # Changes <nl> + <nl> + - : bug : The * * " copy " operation of JSON Patch * * ( [ RFC 6902 ] ( https : / / tools . ietf . org / html / rfc6902 ) ) requests that it is an error if the target path points into a non - existing array or object ( see # 894 for a detailed description ) . This release fixes the implementation to detect such invalid target paths and throw an exception . <nl> + - : bug : An * * array index in a JSON Pointer * * ( [ RFC 6901 ] ( https : / / tools . ietf . org / html / rfc6901 ) ) must be an integer . This release fixes the implementation to throw an exception in case invalid array indices such as ` 10e2 ` are used . <nl> + - : white_check_mark : Added the [ JSON Patch tests ] ( https : / / github . com / json - patch / json - patch - tests ) from Byron Ruth and Mike McCabe . <nl> + - : memo : Fixed the documentation of the [ ` at ( ptr ) ` function with JSON Pointers ] ( https : / / nlohmann . github . io / json / classnlohmann_1_1basic__json_a8ab61397c10f18b305520da7073b2b45 . html # a8ab61397c10f18b305520da7073b2b45 ) to list all possible exceptions ( see # 888 ) . <nl> + - : memo : Updated the [ container overview documentation ] ( https : / / nlohmann . github . io / json / ) ( see # 883 ) . <nl> + - : wrench : The CMake files now respect the [ ` BUILD_TESTING ` ] ( https : / / cmake . org / cmake / help / latest / module / CTest . html ? highlight = build_testing ) option ( see # 846 , # 885 ) <nl> + - : rotating_light : Fixed some compiler warnings ( see # 858 , # 882 ) . <nl> + <nl> + # # # Deprecated functions <nl> + <nl> + : fire : To unify the interfaces and to improve similarity with the STL , the following functions are deprecated since version 3 . 0 . 0 and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) <nl> + - [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) <nl> + <nl> + Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + # # v3 . 0 . 0 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 0 . 0 / json . hpp ) ( 501 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v3 . 0 . 0 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2017 - 12 - 17 <nl> + SHA - 256 : 076d4a0cb890a3c3d389c68421a11c3d77c64bd788e85d50f1b77ed252f2a462 <nl> + <nl> + # # # Summary <nl> + <nl> + < img src = " https : / / user - images . githubusercontent . com / 159488 / 34072418 - 8f5ba396 - e287 - 11e7 - 9de7 - 8bc7482ac23c . png " align = " right " > <nl> + <nl> + After almost a year , here is finally a new release of JSON for Modern C + + , and it is a major one ! As we adhere to [ semantic versioning ] ( https : / / semver . org ) , this means the release includes some breaking changes , so please read the next section carefully before you update . But don ' t worry , we also added a few new features and put a lot of effort into fixing a lot of bugs and straighten out a few inconsistencies . <nl> + <nl> + # # # : boom : Breaking changes <nl> + <nl> + This section describes changes that change the public API of the library and may require changes in code using a previous version of the library . In section " Moving from 2 . x . x to 3 . 0 . 0 " at the end of the release notes , we describe in detail how existing code needs to be changed . <nl> + <nl> + - The library now uses [ * * user - defined exceptions * * ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9a0aced019cb1d65bb49703406c84970 . html # a9a0aced019cb1d65bb49703406c84970 ) instead of re - using those defined in ` < stdexcept > ` ( # 244 ) . This not only allows to add more information to the exceptions ( every exception now has an identifier , and parse errors contain the position of the error ) , but also to easily catch all library exceptions with a single ` catch ( json : : exception ) ` . <nl> + - When strings with a different encoding as UTF - 8 were stored in JSON values , their serialization could not be parsed by the library itself , as only UTF - 8 is supported . To enforce this library limitation and improve consistency , * * non - UTF - 8 encoded strings now yield a ` json : : type_error ` exception during serialization * * ( # 838 ) . The check for valid UTF - 8 is realized with code from [ Björn Hoehrmann ] ( http : / / bjoern . hoehrmann . de / ) . <nl> + - * * NaN and infinity values can now be stored inside the JSON value * * without throwing an exception . They are , however , still serialized as ` null ` ( # 388 ) . <nl> + - The library ' s iterator tag was changed from RandomAccessIterator to * * [ BidirectionalIterator ] ( http : / / en . cppreference . com / w / cpp / concept / BidirectionalIterator ) * * ( # 593 ) . Supporting RandomAccessIterator was incorrect as it assumed an ordering of values in a JSON objects which are unordered by definition . <nl> + - The library does not include the standard headers ` < iostream > ` , ` < ctype > ` , and ` < stdexcept > ` any more . You may need to add these headers to code relying on them . <nl> + - Removed constructor ` explicit basic_json ( std : : istream & i , const parser_callback_t cb = nullptr ) ` which was deprecated in version 2 . 0 . 0 ( # 480 ) . <nl> + <nl> + # # # : fire : Deprecated functions <nl> + <nl> + To unify the interfaces and to improve similarity with the STL , the following functions are now deprecated and will be removed in the next major version ( i . e . , 4 . 0 . 0 ) : <nl> + <nl> + - [ ` friend std : : istream & operator < < ( basic_json & , std : : istream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ab7285a92514fcdbe6de505ebaba92ea3 . html # ab7285a92514fcdbe6de505ebaba92ea3 ) <nl> + - [ ` friend std : : ostream & operator > > ( const basic_json & , std : : ostream & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9e06deabe69262c3ffc5533d32856983 . html # a9e06deabe69262c3ffc5533d32856983 ) <nl> + <nl> + Please use [ ` friend std : : istream & operator > > ( std : : istream & , basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aaf363408931d76472ded14017e59c9e8 . html # aaf363408931d76472ded14017e59c9e8 ) and [ ` friend operator < < ( std : : ostream & , const basic_json & ) ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5e34c5435e557d0bf666bd7311211405 . html # a5e34c5435e557d0bf666bd7311211405 ) instead . <nl> + <nl> + # # # : sparkles : New features <nl> + <nl> + With all this breaking and deprecation out of the way , let ' s talk about features ! <nl> + <nl> + - We improved the * * diagnostic information for syntax errors * * ( # 301 ) . Now , an exception [ ` json : : parse_error ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1efc2468e6022be6e35fc2944cabe4d . html # af1efc2468e6022be6e35fc2944cabe4d ) is thrown which contains a detailed message on the error , but also a member ` byte ` to indicate the byte offset in the input where the error occurred . <nl> + - We added a * * non - throwing syntax check * * ( # 458 ) : The new ` accept ` function returns a Boolean indicating whether the input is proper JSON . We also added a Boolean parameter ` allow_exceptions ` to the existing [ ` parse ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_aa9676414f2e36383c4b181fe856aa3c0 . html # aa9676414f2e36383c4b181fe856aa3c0 ) functions to return a ` discarded ` value in case a syntax error occurs instead of throwing an exception . <nl> + - An [ ` update ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a1cfa9ae5e7c2434cab4cfe69bffffe11 . html # a1cfa9ae5e7c2434cab4cfe69bffffe11 ) function was added to * * merge two JSON objects * * ( # 428 ) . In case you are wondering : the name was inspired by [ Python ] ( https : / / docs . python . org / 2 / library / stdtypes . html # dict . update ) . <nl> + - The [ ` insert ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a1b0a4e60d56f1fe80501ed941e122892 . html # a1b0a4e60d56f1fe80501ed941e122892 ) function now also supports an iterator range to add elements to an object . <nl> + - The binary exchange formats * * CBOR and MessagePack can now be parsed from input streams and written to output streams * * ( # 477 ) . <nl> + - Input streams are now only read until the end of a JSON value instead of the end of the input ( # 367 ) . <nl> + - The serialization function [ ` dump ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a5adea76fedba9898d404fef8598aa663 . html # a5adea76fedba9898d404fef8598aa663 ) now has two optional parameters ` ensure_ascii ` to * * escape all non - ASCII characters * * with ` \ uxxxx ` and an ` indent_char ` parameter to choose whether to * * indent with spaces or tabs * * ( # 654 ) . <nl> + - Added * * built - in type support * * for C arrays ( # 502 ) , ` std : : pair ` and ` std : : tuple ` ( # 563 , # 614 ) , ` enum ` and ` enum class ` ( # 545 ) , ` std : : vector < bool > ` ( # 494 ) . Fixed support for ` std : : valarray ` ( # 702 ) , ` std : : array ` ( # 553 ) , and ` std : : map < std : : string , std : : string > ` ( # 600 , # 607 ) . <nl> + <nl> + # # # : hammer : Further changes <nl> + <nl> + Furthermore , there have been a lot of changes under the hood : <nl> + <nl> + - Replaced the [ re2c ] ( http : / / re2c . org ) generated scanner by a self - coded version which allows for a better modularization of the parser and better diagnostics . To test the new scanner , we added millions ( 8 , 860 , 608 to be exact ) of unit tests to check all valid and invalid byte sequences of the Unicode standard . <nl> + - Google ' s OSS - Fuzz is still constantly fuzz - testing the library and found several issues that were fixed in this release ( # 497 , # 504 , # 514 , # 516 , # 518 , # 519 , # 575 ) . <nl> + - We now also ignore UTF - 8 byte order marks when parsing from an iterator range ( # 602 ) . <nl> + - Values can be now moved from initializer lists ( # 663 ) . <nl> + - Updated to [ Catch ] ( https : / / github . com / catchorg / Catch2 ) 1 . 9 . 7 . Unfortunately , Catch2 currently has some performance issues . <nl> + - The non - exceptional paths of the library are now annotated with ` __builtin_expect ` to optimize branch prediction as long as no error occurs . <nl> + - MSVC now produces a stack trace in MSVC if a ` from_json ` or ` to_json ` function was not found for a user - defined type . We also added a debug visualizer [ ` nlohmann_json . natvis ` ] ( https : / / github . com / nlohmann / json / blob / develop / nlohmann_json . natvis ) for better debugging in MSVC ( # 844 ) . <nl> + - Overworked the documentation and added even more examples . <nl> + - The build workflow now relies on CMake and CTest . Special flags can be chosen with CMake , including coverage ( ` JSON_Coverage ` ) , compilation without exceptions ( ` JSON_NoExceptions ` ) , LLVM sanitizers ( ` JSON_Sanitizer ` ) , or execution with Valgrind ( ` JSON_Valgrind ` ) . <nl> + - Added support for package managers Meson ( # 576 ) , Conan ( # 566 ) , Hunter ( # 671 , # 829 ) , and vcpkg ( # 753 ) . <nl> + - Added CI builders : Xcode 8 . 3 , 9 . 0 , 9 . 1 , and 9 . 2 ; GCC 7 . 2 ; Clang 3 . 8 , 3 . 9 , 4 . 0 , and 5 . 0 ; Visual Studio 2017 . The library is further built with C + + 17 settings on the latest Clang , GCC , and MSVC version to quickly detect new issues . <nl> + <nl> + # # # Moving from 2 . x . x to 3 . 0 . 0 <nl> + <nl> + # # # # User - defined Exceptions <nl> + <nl> + There are five different exceptions inheriting from [ ` json : : exception ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a9a0aced019cb1d65bb49703406c84970 . html # a9a0aced019cb1d65bb49703406c84970 ) : <nl> + <nl> + - [ ` json : : parse_error ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af1efc2468e6022be6e35fc2944cabe4d . html # af1efc2468e6022be6e35fc2944cabe4d ) for syntax errors ( including the binary formats ) , <nl> + - [ ` json : : invalid_iterator ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_ac13d32f7cbd02d616e71d8dc30dadcbf . html # ac13d32f7cbd02d616e71d8dc30dadcbf ) for errors related to iterators , <nl> + - [ ` json : : type_error ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a4010e8e268fefd86da773c10318f2902 . html # a4010e8e268fefd86da773c10318f2902 ) for errors where functions were called with the wrong JSON type , <nl> + - [ ` json : : out_of_range ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a28f7c2f087274a0012eb7a2333ee1580 . html # a28f7c2f087274a0012eb7a2333ee1580 ) for range errors , and <nl> + - [ ` json : : other_error ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a3333a5a8714912adda33a35b369f7b3d . html # a3333a5a8714912adda33a35b369f7b3d ) for miscellaneous errors . <nl> + <nl> + To support these exception , the ` try ` / ` catch ` blocks of your code need to be adjusted : <nl> + <nl> + | new exception | previous exception | <nl> + | : - - | : - - | <nl> + | parse_error . 101 | invalid_argument | <nl> + | parse_error . 102 | invalid_argument | <nl> + | parse_error . 103 | invalid_argument | <nl> + | parse_error . 104 | invalid_argument | <nl> + | parse_error . 105 | invalid_argument | <nl> + | parse_error . 106 | domain_error | <nl> + | parse_error . 107 | domain_error | <nl> + | parse_error . 108 | domain_error | <nl> + | parse_error . 109 | invalid_argument | <nl> + | parse_error . 110 | out_of_range | <nl> + | parse_error . 111 | invalid_argument | <nl> + | parse_error . 112 | invalid_argument | <nl> + | invalid_iterator . 201 | domain_error | <nl> + | invalid_iterator . 202 | domain_error | <nl> + | invalid_iterator . 203 | domain_error | <nl> + | invalid_iterator . 204 | out_of_range | <nl> + | invalid_iterator . 205 | out_of_range | <nl> + | invalid_iterator . 206 | domain_error | <nl> + | invalid_iterator . 207 | domain_error | <nl> + | invalid_iterator . 208 | domain_error | <nl> + | invalid_iterator . 209 | domain_error | <nl> + | invalid_iterator . 210 | domain_error | <nl> + | invalid_iterator . 211 | domain_error | <nl> + | invalid_iterator . 212 | domain_error | <nl> + | invalid_iterator . 213 | domain_error | <nl> + | invalid_iterator . 214 | out_of_range | <nl> + | type_error . 301 | domain_error | <nl> + | type_error . 302 | domain_error | <nl> + | type_error . 303 | domain_error | <nl> + | type_error . 304 | domain_error | <nl> + | type_error . 305 | domain_error | <nl> + | type_error . 306 | domain_error | <nl> + | type_error . 307 | domain_error | <nl> + | type_error . 308 | domain_error | <nl> + | type_error . 309 | domain_error | <nl> + | type_error . 310 | domain_error | <nl> + | type_error . 311 | domain_error | <nl> + | type_error . 313 | domain_error | <nl> + | type_error . 314 | domain_error | <nl> + | type_error . 315 | domain_error | <nl> + | out_of_range . 401 | out_of_range | <nl> + | out_of_range . 402 | out_of_range | <nl> + | out_of_range . 403 | out_of_range | <nl> + | out_of_range . 404 | out_of_range | <nl> + | out_of_range . 405 | domain_error | <nl> + | other_error . 501 | domain_error | <nl> + <nl> + # # # # Handling of NaN and INF <nl> + <nl> + - If an overflow occurs during parsing a number from a JSON text , an exception [ ` json : : out_of_range ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a28f7c2f087274a0012eb7a2333ee1580 . html # a28f7c2f087274a0012eb7a2333ee1580 ) is thrown so that the overflow is detected early and roundtripping is guaranteed . <nl> + <nl> + - NaN and INF floating - point values can be stored in a JSON value and are not replaced by null . That is , the basic_json class behaves like ` double ` in this regard ( no exception occurs ) . However , NaN and INF are serialized to ` null ` . <nl> + <nl> + # # # # Removal of deprecated functions <nl> + <nl> + Function ` explicit basic_json ( std : : istream & i , const parser_callback_t cb = nullptr ) ` should be replaced by the ` parse ` function : Let ` ss ` be a stream and ` cb ` be a parse callback function . <nl> + <nl> + Old code : <nl> + <nl> + ` ` ` cpp <nl> + json j ( ss , cb ) ; <nl> + ` ` ` <nl> + <nl> + New code : <nl> + <nl> + ` ` ` cpp <nl> + json j = json : : parse ( ss , cb ) ; <nl> + ` ` ` <nl> + <nl> + If no callback function is used , also the following code works : <nl> + <nl> + ` ` ` cpp <nl> + json j ; <nl> + j < < ss ; <nl> + ` ` ` <nl> + <nl> + or <nl> + <nl> + ` ` ` cpp <nl> + json j ; <nl> + ss > > j ; <nl> + ` ` ` <nl> + <nl> + # # v2 . 1 . 1 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 1 . 1 / json . hpp ) ( 437 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 1 . 1 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + Release date : 2017 - 02 - 25 <nl> + SHA - 256 : faa2321beb1aa7416d035e7417fcfa59692ac3d8c202728f9bcc302e2d558f57 <nl> + <nl> + # # # Summary <nl> + <nl> + This release * * fixes a locale - related bug in the parser * * . To do so , the whole number handling ( lexer , parser , and also the serialization ) have been overworked . Furthermore , a lot of small changes added up that were added to this release . All changes are backward - compatible . <nl> + <nl> + # # # Changes <nl> + - : bug : Locales that have a different character than ` . ` as decimal separator ( e . g . , the Norwegian locale ` nb_NO . UTF - 8 ` ) led to truncated number parsing or parse errors . The library now has been fixed to work with * * any locale * * . Note that ` . ` is still the only valid decimal separator for JSON input . <nl> + - : bug : Numbers like ` 1 . 0 ` were correctly parsed as floating - point number , but serialized as integer ( ` 1 ` ) . Now , * * floating - point numbers correctly round trip * * . <nl> + - : bug : Parsing incorrect JSON numbers with leading 0 ( ` 0123 ` ) could yield a [ buffer overflow ] ( https : / / github . com / nlohmann / json / issues / 452 ) . This is fixed now by detecting such errors directly by the lexer . <nl> + - : bug : Constructing a JSON value from a pointer was incorrectly interpreted as a Boolean ; such code will now yield a compiler error . <nl> + - : bug : Comparing a JSON number with ` 0 ` led to a comparison with ` null ` . This is fixed now . <nl> + - : bug : All throw calls are now wrapped in macros . <nl> + - : lock : Starting during the preparation of this release ( since 8 February 2017 ) , commits and released files are * * cryptographically signed * * with [ this GPG key ] ( https : / / keybase . io / nlohmann / pgp_keys . asc ? fingerprint = 797167ae41c0a6d9232e48457f3cea63ae251b69 ) . Previous releases have also been signed . <nl> + - : sparkles : The parser for MessagePack and CBOR now supports an optional start index parameter to define a byte offset for the parser . <nl> + - : rotating_light : Some more warnings have been fixed . With Clang , the code compiles * * without warnings * * with ` - Weverything ` ( well , it needs ` - Wno - documentation - unknown - command ` and ` - Wno - deprecated - declarations ` , but you get the point ) . <nl> + - : hammer : The code can be compiled easier with many Android NDKs by avoiding macros like ` UINT8_MAX ` which previously required defining a preprocessor macro for compilation . <nl> + - : zap : The unit tests now compile two times faster . <nl> + - : heavy_plus_sign : [ Cotire ] ( https : / / github . com / sakra / cotire ) is used to speed up the build . <nl> + - : pencil2 : Fixed a lot of typos in the documentation . <nl> + - : memo : Added a section to the README file that lists all used [ third - party code / tools ] ( https : / / github . com / nlohmann / json # used - third - party - tools ) . <nl> + - : memo : Added a note on constructing a string value vs . parsing . <nl> + - : white_check_mark : The test suite now contains 11202597 unit tests . <nl> + - : memo : Improved the [ Doxygen documentation ] ( https : / / nlohmann . github . io / json / ) by shortening the template parameters of class ` basic_json ` . <nl> + - : construction_worker : Removed Doozer . <nl> + - : construction_worker : Added Codacity . <nl> + - : arrow_up : Upgraded Catch to version 1 . 7 . 2 . <nl> + <nl> + <nl> + # # v2 . 1 . 0 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 1 . 0 / json . hpp ) ( 426 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 1 . 0 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2017 - 01 - 28 <nl> + - SHA - 256 : a571dee92515b685784fd527e38405cf3f5e13e96edbfe3f03d6df2e363a767b <nl> + <nl> + # # # Summary <nl> + <nl> + This release introduces a means to convert from / to user - defined types . The release is backwards compatible . <nl> + <nl> + ! [ conversion ] ( https : / / cloud . githubusercontent . com / assets / 159488 / 22399173 / aebe8f7a - e597 - 11e6 - 930f - 7494ee615827 . png ) <nl> + <nl> + # # # Changes <nl> + - : sparkles : The library now offers an elegant way to * * convert from and to arbitrary value types * * . All you need to do is to implement two functions : ` to_json ` and ` from_json ` . Then , a conversion is as simple as putting a ` = ` between variables . See the [ README ] ( https : / / github . com / nlohmann / json # arbitrary - types - conversions ) for more information and examples . <nl> + - : sparkles : * * Exceptions can now be switched off . * * This can be done by defining the preprocessor symbol ` JSON_NOEXCEPTION ` or by passing ` - fno - exceptions ` to your compiler . In case the code would usually thrown an exception , ` abort ( ) ` is now called . <nl> + - : sparkles : * * Information on the library * * can be queried with the new ( static ) function ` meta ( ) ` which returns a JSON object with information on the version , compiler , and platform . See the [ documentation ] ( ) for an example . <nl> + - : bug : A bug in the CBOR parser was fixed which led to a buffer overflow . <nl> + - : sparkles : The function [ ` type_name ( ) ` ] ( ) is now public . It allows to query the type of a JSON value as string . <nl> + - : white_check_mark : Added the [ Big List of Naughty Strings ] ( https : / / github . com / minimaxir / big - list - of - naughty - strings ) as test case . <nl> + - : arrow_up : Updated to [ Catch v1 . 6 . 0 ] ( https : / / github . com / philsquared / Catch / releases / tag / v1 . 6 . 0 ) . <nl> + - : memo : Some typos in the documentation have been fixed . <nl> + <nl> + <nl> + # # v2 . 0 . 10 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 10 / json . hpp ) ( 409 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 10 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2017 - 01 - 02 <nl> + - SHA - 256 : ec27d4e74e9ce0f78066389a70724afd07f10761009322dc020656704ad5296d <nl> + <nl> + # # # Summary <nl> + <nl> + This release fixes several security - relevant bugs in the MessagePack and CBOR parsers . The fixes are backwards compatible . <nl> + <nl> + # # # Changes <nl> + - : bug : Fixed a lot of * * bugs in the CBOR and MesssagePack parsers * * . These bugs occurred if invalid input was parsed and then could lead in buffer overflows . These bugs were found with Google ' s [ OSS - Fuzz ] ( https : / / github . com / google / oss - fuzz ) , see # 405 , # 407 , # 408 , # 409 , # 411 , and # 412 for more information . <nl> + - : construction_worker : We now also use the * * [ Doozer ] ( https : / / doozer . io ) continuous integration platform * * . <nl> + - : construction_worker : The complete test suite is now also run with * * Clang ' s address sanitizer and undefined - behavior sanitizer * * . <nl> + - : white_check_mark : Overworked * * fuzz testing * * ; CBOR and MessagePack implementations are now fuzz - tested . Furthermore , all fuzz tests now include a round trip which ensures created output can again be properly parsed and yields the same JSON value . <nl> + - : memo : Clarified documentation of ` find ( ) ` function to always return ` end ( ) ` when called on non - object value types . <nl> + - : hammer : Moved thirdparty test code to ` test / thirdparty ` directory . <nl> + <nl> + # # v2 . 0 . 9 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 9 / json . hpp ) ( 406 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 9 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2016 - 12 - 16 <nl> + - SHA - 256 : fbf3396f13e187d6c214c297bddc742d918ea9b55e10bfb3d9f458b9bfdc22e5 <nl> + <nl> + # # # Summary <nl> + <nl> + This release implements with * * [ CBOR ] ( http : / / cbor . io ) * * and * * [ MessagePack ] ( http : / / msgpack . org ) * * two * * binary serialization / deserialization formats * * . It further contains some small fixes and improvements . The fixes are backwards compatible . <nl> + <nl> + ! [ cbor ] ( https : / / cloud . githubusercontent . com / assets / 159488 / 22399181 / d4d60d32 - e597 - 11e6 - 8dcb - 825abcf9ac2a . png ) <nl> + <nl> + # # # Changes <nl> + - : sparkles : The library can now read and write the binary formats * * [ CBOR ] ( http : / / cbor . io ) * * ( Concise Binary Object Representation ) and * * [ MessagePack ] ( http : / / msgpack . org ) * * . Both formats are aimed to produce a very compact representation of JSON which can be parsed very efficiently . See the [ README file ] ( https : / / github . com / nlohmann / json # binary - formats - cbor - and - messagepack ) for more information and examples . <nl> + - : fire : simplified the iteration implementation allowing to remove dozens of lines of code <nl> + - : bug : fixed an [ integer overflow error ] ( https : / / github . com / nlohmann / json / issues / 389 ) detected by [ Google ' s OSS - Fuzz ] ( https : / / github . com / google / oss - fuzz ) <nl> + - : bug : suppressed documentation warnings inside the library to facilitate compilation with ` - Wdocumentation ` <nl> + - : bug : fixed an overflow detection error in the number parser <nl> + - : memo : updated [ contribution guidelines ] ( https : / / github . com / nlohmann / json / blob / develop / . github / CONTRIBUTING . md ) to a list of frequentely asked features that will most likely be never added to the library <nl> + - : memo : added a * * table of contents * * to the [ README file ] ( https : / / github . com / nlohmann / json / blob / develop / README . md ) to add some structure <nl> + - : memo : mentioned the many [ examples ] ( https : / / github . com / nlohmann / json / tree / develop / doc / examples ) and the [ documentation ] ( https : / / nlohmann . github . io / json / ) in the [ README file ] ( ) <nl> + - : hammer : split [ unit tests ] ( https : / / github . com / nlohmann / json / tree / develop / test / src ) into individual independent binaries to speed up compilation and testing <nl> + - : white_check_mark : the test suite now contains * * 11201886 * * tests <nl> + <nl> + # # v2 . 0 . 8 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 8 / json . hpp ) ( 360 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 8 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2016 - 12 - 02 <nl> + - SHA - 256 : b70db0ad34f8e0e61dc3f0cbab88099336c9674c193d8a3439d93d6aca2d7120 <nl> + <nl> + # # # Summary <nl> + <nl> + This release combines a lot of small fixes and improvements . The fixes are backwards compatible . <nl> + <nl> + # # # Changes <nl> + - : bug : fixed a bug that froze the parser if a passed file was not found ( now , ` std : : invalid_argument ` is thrown ) <nl> + - : bug : fixed a bug that lead to an error of a file at EOF was parsed again ( now , ` std : : invalid_argument ` is thrown ) <nl> + - : sparkles : the well known functions [ ` emplace ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_a602f275f0359ab181221384989810604 . html # a602f275f0359ab181221384989810604 ) and [ ` emplace_back ` ] ( http : / / nlohmann . github . io / json / doxygen / classnlohmann_1_1basic__json_af8a435033327d9237da414afc1cce513 . html # af8a435033327d9237da414afc1cce513 ) have been added to JSON values and work as expected <nl> + - : zap : improved the performance of the serialization ( ` dump ` function ) <nl> + - : zap : improved the performance of the deserialization ( parser ) <nl> + - : construction_worker : some continuous integration images at [ Travis ] ( https : / / travis - ci . org / nlohmann / json ) were added and retired ; see [ here ] ( https : / / github . com / nlohmann / json # supported - compilers ) for the current continuous integration setup <nl> + - : construction_worker : the [ Coverity scan ] ( https : / / scan . coverity . com / projects / nlohmann - json ) works again <nl> + - : chart_with_upwards_trend : the benchmarking code has been improved to produce more stable results <nl> + - : memo : the [ README ] ( https : / / github . com / nlohmann / json / blob / develop / README . md ) file has been extended and includes more frequently asked examples <nl> + - : white_check_mark : the test suite now contains 8905518 tests <nl> + - : arrow_up : updated [ Catch ] ( https : / / github . com / philsquared / Catch ) to version 1 . 5 . 8 <nl> + <nl> + # # v2 . 0 . 7 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 7 / json . hpp ) ( 355 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 7 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2016 - 11 - 02 <nl> + - SHA - 256 : 5545c323670f8165bae90b9dc6078825e86ec310d96cc4e5b47233ea43715bbf <nl> + <nl> + # # # Summary <nl> + <nl> + This release fixes a few bugs in the JSON parser found in the [ Parsing JSON is a Minefield 💣 ] ( http : / / seriot . ch / parsing_json . html ) article . The fixes are backwards compatible . <nl> + <nl> + # # # Changes <nl> + - The article [ Parsing JSON is a Minefield 💣 ] ( http : / / seriot . ch / parsing_json . html ) discusses a lot of pitfalls of the JSON specification . When investigating the published test cases , a few bugs in the library were found and fixed : <nl> + - Files with less than 5 bytes can now be parsed without error . <nl> + - The library now properly rejects any file encoding other than UTF - 8 . Furthermore , incorrect surrogate pairs are properly detected and rejected . <nl> + - The library now accepts all but one " yes " test ( y_string_utf16 . json ) : UTF - 16 is not supported . <nl> + - The library rejects all but one " no " test ( n_number_then_00 . json ) : Null bytes are treated as end of file instead of an error . This allows to parse input from null - terminated strings . <nl> + - The string length passed to a user - defined string literal is now exploited to choose a more efficient constructor . <nl> + - A few grammar mistakes in the README file have been fixed . <nl> + <nl> + # # v2 . 0 . 6 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 6 / json . hpp ) ( 349 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 6 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2016 - 10 - 15 <nl> + - SHA256 : 459cc93d5e2f503e50c6d5876eb86bfea7daf405f5a567c5a2c9abc2383756ae <nl> + <nl> + # # # Summary <nl> + <nl> + This release fixes the semantics of ` operator [ ] ` for JSON Pointers ( see below ) . This fix is backwards compatible . <nl> + <nl> + # # # Changes <nl> + - * * ` operator [ ] ` for JSON Pointers * * now behaves like the other versions of ` operator [ ] ` and transforms ` null ` values into objects or arrays if required . This allows to created nested structues like ` j [ " / foo / bar / 2 " ] = 17 ` ( yielding ` { " foo " : " bar " : [ null , null , 17 ] } ` ) without problems . <nl> + - overworked a helper SFINAE function <nl> + - fixed some documentation issues <nl> + - fixed the CMake files to allow to run the test suite outside the main project directory <nl> + - restored test coverage to 100 % . <nl> + <nl> + # # v2 . 0 . 5 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 5 / json . hpp ) ( 347 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 5 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2016 - 09 - 14 <nl> + - SHA - 256 : 8b7565263a44e2b7d3b89808bc73d2d639037ff0c1f379e3d56dbd77e00b98d9 <nl> + <nl> + # # # Summary <nl> + <nl> + This release fixes a regression bug in the stream parser ( function ` parse ( ) ` and the ` < < ` / ` > > ` operators ) . This fix is backwards compatible . <nl> + <nl> + # # # Changes <nl> + - * * Bug fix * * : The end of a file stream was not detected properly which led to parse errors . This bug should have been fixed with 2 . 0 . 4 , but there was still a flaw in the code . <nl> + <nl> + # # v2 . 0 . 4 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 4 / json . hpp ) ( 347 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 4 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2016 - 09 - 11 <nl> + - SHA - 256 : 632ceec4c25c4e2153f71470d3a2b992c8355f6d8b4d627d05dd16095cd3aeda <nl> + <nl> + # # # Summary <nl> + <nl> + This release fixes a bug in the stream parser ( function ` parse ( ) ` and the ` < < ` / ` > > ` operators ) . This fix is backwards compatible . <nl> + <nl> + # # # Changes <nl> + - * * Bug fix * * : The end of a file stream was not detected properly which led to parse errors . <nl> + - Fixed a compiler warning about an unused variable . <nl> + <nl> + # # v2 . 0 . 3 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 3 / json . hpp ) ( 347 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 3 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2016 - 08 - 31 <nl> + - SHA - 256 : 535b73efe5546fde9e763c14aeadfc7b58183c0b3cd43c29741025aba6cf6bd3 <nl> + <nl> + # # # Summary <nl> + <nl> + This release combines a lot of small fixes and improvements . The release is backwards compatible . <nl> + <nl> + # # # Changes <nl> + - The * * parser / deserialization functions have been generalized * * to process any contiguous sequence of 1 - byte elements ( e . g . , ` char ` , ` unsigned char ` , ` uint8_t ` ) . This includes all kind of string representations ( string literals , char arrays , ` std : : string ` , ` const char * ` ) , contiguous containers ( C - style arrays , ` std : : vector ` , ` std : : array ` , ` std : : valarray ` , ` std : : initializer_list ` ) . User - defined containers providing random - access iterator access via ` std : : begin ` and ` std : : end ` can be used as well . See the documentation ( [ 1 ] ( https : / / nlohmann . github . io / json / classnlohmann_1_1basic__json_ace63ac4eb1dd7251a259d32e397461a3 . html # ace63ac4eb1dd7251a259d32e397461a3 ) , [ 2 ] ( https : / / nlohmann . github . io / json / classnlohmann_1_1basic__json_a90f05d55d9d0702c075cd281fd0d85ae . html # a90f05d55d9d0702c075cd281fd0d85ae ) , [ 3 ] ( https : / / nlohmann . github . io / json / classnlohmann_1_1basic__json_aeffd70f622f8f2a51fd3d95af64b63a7 . html # aeffd70f622f8f2a51fd3d95af64b63a7 ) , [ 4 ] ( https : / / nlohmann . github . io / json / classnlohmann_1_1basic__json_aa8dca2e91a6301c36890f844e64f0023 . html # aa8dca2e91a6301c36890f844e64f0023 ) ) for more information . Note that contiguous storage cannot be checked at compile time ; if any of the parse functions are called with a noncompliant container , the behavior is undefined and will most likely yield segmentation violation . The preconditions are enforced by an assertion unless the library is compiled with preprocessor symbol ` NDEBUG ` . <nl> + - As a general remark on * * assertions * * : The library uses assertions to preclude undefined behavior . A [ prominent example ] ( https : / / github . com / nlohmann / json / issues / 289 ) for this is the ` operator [ ] ` for const JSON objects . The behavior of this const version of the operator is undefined if the given key does not exist in the JSON object , because unlike the non - const version , it cannot add a ` null ` value at the given key . Assertions can be switched of by defining the preprocessor symbol ` NDEBUG ` . See the [ documentation of ` assert ` ] ( http : / / en . cppreference . com / w / cpp / error / assert ) for more information . <nl> + - In the course of cleaning up the parser / deserialization functions , the constructor [ ` basic_json ( std : : istream & , const parser_callback_t ) ` ] ( https : / / nlohmann . github . io / json / classnlohmann_1_1basic__json_a32350263eb105764844c5a85e156a255 . html # a32350263eb105764844c5a85e156a255 ) has been * * deprecated * * and will be deleted with the next major release 3 . 0 . 0 to unify the interface of the library . Deserialization will be done by stream operators or by calling one of the ` parse ` functions . That is , calls like ` json j ( i ) ; ` for an input stream ` i ` need to be replaced by ` json j = json : : parse ( i ) ; ` . Compilers will produce a deprecation warning if client code uses this function . <nl> + - Minor improvements : <nl> + - Improved the performance of the serialization by avoiding the re - creation of a locale object . <nl> + - Fixed two MSVC warnings . Compiling the test suite with ` / Wall ` now only warns about non - inlined functions ( C4710 ) and the deprecation of the constructor from input - stream ( C4996 ) . <nl> + - Some project internals : <nl> + - < img align = " right " src = " https : / / bestpractices . coreinfrastructure . org / assets / questions_page_badge - 17b338c0e8528d695d8676e23f39f17ca2b89bb88176370803ee69aeebcb5be4 . png " > The project has qualified for the [ Core Infrastructure Initiative Best Practices Badge ] ( https : / / bestpractices . coreinfrastructure . org / projects / 289 ) . While most requirements where already satisfied , some led to a more explicit documentation of quality - ensuring procedures . For instance , static analysis is now executed with every commit on the build server . Furthermore , the [ contribution guidelines document ] ( https : / / github . com / nlohmann / json / blob / develop / . github / CONTRIBUTING . md ) how to communicate security issues privately . <nl> + - The test suite has been overworked and split into several files to allow for faster compilation and analysis . The execute the test suite , simply execute ` make check ` . <nl> + - The continuous integration with [ Travis ] ( https : / / travis - ci . org / nlohmann / json ) was extended with Clang versions 3 . 6 . 0 to 3 . 8 . 1 and now includes 18 different compiler / OS combinations . <nl> + - An 11 - day run of [ American fuzzy lop ] ( http : / / lcamtuf . coredump . cx / afl / ) checked 962 million inputs on the parser and found no issue . <nl> + <nl> + # # v2 . 0 . 2 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 2 / json . hpp ) ( 338 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 2 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2016 - 07 - 31 <nl> + - SHA - 256 : 8e97b7965b4594b00998d6704465412360e1a0ed927badb51ded8b82291a8f3d <nl> + <nl> + # # # Summary <nl> + <nl> + This release combines a lot of small fixes and improvements . The release is backwards compatible . <nl> + <nl> + # # # Changes <nl> + - The * * parser * * has been overworked , and a lot of small issues have been fixed : <nl> + - Improved parser performance by avoiding recursion and using move semantics for the return value . <nl> + - Unescaped control charaters ` \ x10 ` - ` \ x1f ` are not accepted any more . <nl> + - Fixed a bug in the parser when reading from an input stream . <nl> + - Improved test case coverage for UTF - 8 parsing : now , all valid Unicode code points are tested both escaped and unescaped . <nl> + - The precision of output streams is now preserved by the parser . <nl> + - Started to check the * * code correctness * * by proving termination of important loops . Furthermore , individual assertions have been replaced by a more systematic function which checks the class invariants . Note that assertions should be switched off in production by defining the preprocessor macro ` NDEBUG ` , see the [ documentation of ` assert ` ] ( http : / / en . cppreference . com / w / cpp / error / assert ) . <nl> + - A lot of * * code cleanup * * : removed unused headers , fixed some compiler warnings , and fixed a build error for Windows - based Clang builds . <nl> + - Added some compile - time checks : <nl> + - Unsupported compilers are rejected during compilation with an ` # error ` command . <nl> + - Static assertion prohibits code with incompatible pointer types used in ` get_ptr ( ) ` . <nl> + - Improved the [ documentation ] ( https : / / nlohmann . github . io / json / ) , and adjusted the documentation script to choose the correct version of ` sed ` . <nl> + - Replaced a lot of " raw loops " by STL functions like ` std : : all_of ` , ` std : : for_each ` , or ` std : : accumulate ` . This facilitates reasoning about termination of loops and sometimes allowed to simplify functions to a single return statement . <nl> + - Implemented a ` value ( ) ` function for JSON pointers ( similar to ` at ` function ) . <nl> + - The Homebrew formula ( see [ Integration ] ( https : / / github . com / nlohmann / json # integration ) ) is now tested for all Xcode builds ( 6 . 1 - 8 . x ) with Travis . <nl> + - Avoided output to ` std : : cout ` in the test cases . <nl> + <nl> + # # v2 . 0 . 1 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 1 / json . hpp ) ( 321 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 1 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2016 - 06 - 28 <nl> + - SHA - 256 : ef550fcd7df572555bf068e9ec4e9d3b9e4cdd441cecb0dcea9ea7fd313f72dd <nl> + <nl> + # # # Summary <nl> + <nl> + This release fixes a performance regression in the JSON serialization ( function ` dump ( ) ` ) . This fix is backwards compatible . <nl> + <nl> + # # # Changes <nl> + - The locale of the output stream ( or the internal string stream if a JSON value is serialized to a string ) is now adjusted once for the whole serialization instead of for each floating - point number . <nl> + - The locale of an output stream is now correctly reset to the previous value by the JSON library . <nl> + <nl> + <nl> + # # v2 . 0 . 0 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 0 / json . hpp ) ( 321 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v2 . 0 . 0 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2016 - 06 - 24 <nl> + - SHA - 256 : ac9e1fb25c2ac9ca5fc501fcd2fe3281fe04f07018a1b48820e7b1b11491bb6c <nl> + <nl> + # # # Summary <nl> + <nl> + This release adds several features such as JSON Pointers , JSON Patch , or support for 64 bit unsigned integers . Furthermore , several ( subtle ) bugs have been fixed . <nl> + <nl> + As ` noexcept ` and ` constexpr ` specifier have been added to several functions , the public API has effectively been changed in a ( potential ) non - backwards compatible manner . As we adhere to [ Semantic Versioning ] ( http : / / semver . org ) , this calls for a new major version , so say hello to 2 ️ ⃣ . 0 ️ ⃣ . 0 ️ ⃣ . <nl> + <nl> + # # # Changes <nl> + - 🔟 A JSON value now uses ` uint64_t ` ( default value for template parameter ` NumberUnsignedType ` ) as data type for * * unsigned integer * * values . This type is used automatically when an unsigned number is parsed . Furthermore , constructors , conversion operators and an ` is_number_unsigned ( ) ` test have been added . <nl> + - 👉 * * JSON Pointer * * ( [ RFC 6901 ] ( https : / / tools . ietf . org / html / rfc6901 ) ) support : A JSON Pointer is a string ( similar to an XPath expression ) to address a value inside a structured JSON value . JSON Pointers can be used in ` at ( ) ` and ` operator [ ] ` functions . Furthermore , JSON values can be “ flattened ” to key / value pairs using ` flatten ( ) ` where each key is a JSON Pointer . The original value can be restored by “ unflattening ” the flattened value using ` unflatten ( ) ` . <nl> + - 🏥 * * JSON Patch * * ( [ RFC 6902 ] ( https : / / tools . ietf . org / html / rfc6902 ) ) support . A JSON Patch is a JSON value that describes the required edit operations ( add , change , remove , … ) to transform a JSON value into another one . A JSON Patch can be created with function ` diff ( const basic_json & ) ` and applied with ` patch ( const basic_json & ) ` . Note the created patches use a rather primitive algorithm so far and leave room for improvement . <nl> + - 🇪 🇺 The code is now * * locale - independent * * : Floating - point numbers are always serialized with a period ( ` . ` ) as decimal separator and ignores different settings from the locale . <nl> + - 🍺 * * Homebrew * * support : Install the library with ` brew tap nlohmann / json & & brew install nlohmann_json ` . <nl> + - Added constructor to create a JSON value by parsing a ` std : : istream ` ( e . g . , ` std : : stringstream ` or ` std : : ifstream ` ) . <nl> + - Added * * ` noexcept ` * * specifier to ` basic_json ( boolean_t ) ` , ` basic_json ( const number_integer_t ) ` , ` basic_json ( const int ) ` , ` basic_json ( const number_float_t ) ` , iterator functions ( ` begin ( ) ` , ` end ( ) ` , etc . ) <nl> + - When parsing numbers , the sign of ` 0 . 0 ` ( vs . ` - 0 . 0 ` ) is preserved . <nl> + - Improved MSVC 2015 , Android , and MinGW support . See [ README ] ( https : / / github . com / nlohmann / json # supported - compilers ) for more information . <nl> + - Improved test coverage ( added 2 , 225 , 386 tests ) . <nl> + - Removed some misuses of ` std : : move ` . <nl> + - Fixed several compiler warnings . <nl> + - Improved error messages from JSON parser . <nl> + - Updated to [ ` re2c ` ] ( http : / / re2c . org ) to version 0 . 16 to use a minimal DFAs for the lexer . <nl> + - Updated test suite to use [ Catch ] ( https : / / github . com / philsquared / Catch ) version 1 . 5 . 6 . <nl> + - Made type getters ( ` is_number ` , etc . ) and const value access ` constexpr ` . <nl> + - Functions ` push_back ` and ` operator + = ` now work with key / value pairs passed as initializer list , e . g . ` j_object + = { " key " , 1 } ` . <nl> + - Overworked ` CMakeLists . txt ` to make it easier to integrate the library into other projects . <nl> + <nl> + # # # Notes <nl> + - Parser error messages are still very vague and contain no information on the error location . <nl> + - The implemented ` diff ` function is rather primitive and does not create minimal diffs . <nl> + - The name of function ` iteration_wrapper ` may change in the future and the function will be deprecated in the next release . <nl> + - Roundtripping ( i . e . , parsing a JSON value from a string , serializing it , and comparing the strings ) of floating - point numbers is not 100 % accurate . Note that [ RFC 7159 ] ( https : / / tools . ietf . org / html / rfc7159 ) defines no format to internally represent numbers and states not requirement for roundtripping . Nevertheless , benchmarks like [ Native JSON Benchmark ] ( https : / / github . com / miloyip / nativejson - benchmark ) treat roundtripping deviations as conformance errors . <nl> + <nl> + <nl> + # # v1 . 1 . 0 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v1 . 1 . 0 / json . hpp ) ( 257 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v1 . 1 . 0 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2016 - 01 - 24 <nl> + - SHA - 256 : c0cf0e3017798ca6bb18e757ebc570d21a3bdac877845e2b9e9573d183ed2f05 <nl> + <nl> + # # # Summary <nl> + <nl> + This release fixes several small bugs and adds functionality in a backwards - compatible manner . Compared to the [ last version ( 1 . 0 . 0 ) ] ( https : / / github . com / nlohmann / json / releases / tag / v1 . 0 . 0 ) , the following changes have been made : <nl> + <nl> + # # # Changes <nl> + - _Fixed_ : * * Floating - point numbers * * are now serialized and deserialized properly such that rountripping works in more cases . [ # 185 , # 186 , # 190 , # 191 , # 194 ] <nl> + - _Added_ : The code now contains * * assertions * * to detect undefined behavior during development . As the standard function ` assert ` is used , the assertions can be switched off by defining the preprocessor symbol ` NDEBUG ` during compilation . [ # 168 ] <nl> + - _Added_ : It is now possible to get a * * reference * * to the stored values via the newly added function ` get_ref ( ) ` . [ # 128 , # 184 ] <nl> + - _Fixed_ : Access to object values via keys ( * * ` operator [ ] ` * * ) now works with all kind of string representations . [ # 171 , # 189 ] <nl> + - _Fixed_ : The code now compiles again with * * Microsoft Visual Studio 2015 * * . [ # 144 , # 167 , # 188 ] <nl> + - _Fixed_ : All required headers are now included . <nl> + - _Fixed_ : Typos and other small issues . [ # 162 , # 166 , # 175 , # 177 , # 179 , # 180 ] <nl> + <nl> + # # # Notes <nl> + <nl> + There are still known open issues ( # 178 , # 187 ) which will be fixed in version 2 . 0 . 0 . However , these fixes will require a small API change and will not be entirely backwards - compatible . <nl> + <nl> + <nl> + # # v1 . 0 . 0 <nl> + <nl> + ! ! ! summary " Files " <nl> + <nl> + - [ json . hpp ] ( https : / / github . com / nlohmann / json / releases / download / v1 . 0 . 0 / json . hpp ) ( 243 KB ) <nl> + - [ json . hpp . asc ] ( https : / / github . com / nlohmann / json / releases / download / v1 . 0 . 0 / json . hpp . asc ) ( 1 KB ) <nl> + <nl> + - Release date : 2015 - 12 - 28 <nl> + - SHA - 256 : 767dc2fab1819d7b9e19b6e456d61e38d21ef7182606ecf01516e3f5230446de <nl> + <nl> + # # # Summary <nl> + <nl> + This is the first official release . Compared to the [ prerelease version 1 . 0 . 0 - rc1 ] ( https : / / github . com / nlohmann / json / releases / tag / v1 . 0 . 0 - rc1 ) , only a few minor improvements have been made : <nl> + <nl> + # # # Changes <nl> + - _Changed_ : A * * UTF - 8 byte order mark * * is silently ignored . <nl> + - _Changed_ : ` sprintf ` is no longer used . <nl> + - _Changed_ : ` iterator_wrapper ` also works for const objects ; note : the name may change ! <nl> + - _Changed_ : * * Error messages * * during deserialization have been improved . <nl> + - _Added_ : The ` parse ` function now also works with type ` std : : istream & & ` . <nl> + - _Added_ : Function ` value ( key , default_value ) ` returns either a copy of an object ' s element at the specified key or a given default value if no element with the key exists . <nl> + - _Added_ : Public functions are tagged with the version they were introduced . This shall allow for better * * versioning * * in the future . <nl> + - _Added_ : All public functions and types are * * documented * * ( see http : / / nlohmann . github . io / json / doxygen / ) including executable examples . <nl> + - _Added_ : Allocation of all types ( in particular arrays , strings , and objects ) is now exception - safe . <nl> + - _Added_ : They descriptions of thrown exceptions have been overworked and are part of the tests suite and documentation . <nl> mmm a / doc / mkdocs / mkdocs . yml <nl> ppp b / doc / mkdocs / mkdocs . yml <nl> nav : <nl> - " Code of Conduct " : home / code_of_conduct . md <nl> - " FAQ " : home / faq . md <nl> - home / exceptions . md <nl> + - home / releases . md <nl> - home / design_goals . md <nl> - home / sponsors . md <nl> - Features : <nl>
: memo : add release page
nlohmann/json
3400af21cd066df7dacc4f3bcbdc45747a5aaa13
2020-05-24T11:40:43Z
mmm a / contrib / grpc - cmake / CMakeLists . txt <nl> ppp b / contrib / grpc - cmake / CMakeLists . txt <nl> <nl> cmake_minimum_required ( VERSION 3 . 5 . 1 ) <nl> <nl> + <nl> set ( GRPC_SOURCE_DIR $ { ClickHouse_SOURCE_DIR } / contrib / grpc ) <nl> set ( GRPC_INCLUDE_DIR $ { GRPC_SOURCE_DIR } / include / ) <nl> set ( GRPC_BINARY_DIR $ { ClickHouse_BINARY_DIR } / contrib / grpc ) <nl>
test
ClickHouse/ClickHouse
41d2e9d52da699af1c33148408807a331305e2e9
2020-05-11T12:12:54Z
mmm a / include / swift / Parse / Token . h <nl> ppp b / include / swift / Parse / Token . h <nl> enum class tok { <nl> # undef KEYWORD <nl> <nl> # define PUNCTUATOR ( X , Y ) X , <nl> + PUNCTUATOR ( l_paren_space , " ( " ) <nl> PUNCTUATOR ( l_paren , " ( " ) <nl> PUNCTUATOR ( r_paren , " ) " ) <nl> PUNCTUATOR ( l_brace , " { " ) <nl> mmm a / lib / Parse / Lexer . cpp <nl> ppp b / lib / Parse / Lexer . cpp <nl> void Lexer : : lexDigit ( ) { <nl> void Lexer : : lexImpl ( ) { <nl> assert ( CurPtr > = Buffer - > getBufferStart ( ) & & <nl> CurPtr < = Buffer - > getBufferEnd ( ) & & " Cur Char Pointer out of range ! " ) ; <nl> + <nl> + / / Keep track of the end of the previous token for whitespace sensitive tokens <nl> + / / like ' ( ' . <nl> + const char * EndOfPrevToken = CurPtr ; <nl> + <nl> Restart : <nl> / / Remember the start of the token so we can form the text range . <nl> const char * TokStart = CurPtr ; <nl> void Lexer : : lexImpl ( ) { <nl> / / Otherwise , this is the end of the buffer . Return EOF . <nl> return formToken ( tok : : eof , TokStart ) ; <nl> <nl> - case ' ( ' : return formToken ( tok : : l_paren , TokStart ) ; <nl> + case ' ( ' : { <nl> + / / This is either l_paren or l_paren_space depending on whether there is <nl> + / / whitespace before it and whether there was an operator before it . <nl> + if ( CurPtr - 1 ! = EndOfPrevToken ) <nl> + return formToken ( tok : : l_paren_space , TokStart ) ; / / had whitespace . <nl> + <nl> + / / ' ( ' at the start of a buffer is considered to be the start of an <nl> + / / expression . <nl> + if ( EndOfPrevToken = = Buffer - > getBufferStart ( ) ) <nl> + return formToken ( tok : : l_paren_space , TokStart ) ; <nl> + <nl> + char LastTokenChar = EndOfPrevToken [ - 1 ] ; <nl> + <nl> + / / If this ' ( ' was preceded by operator or some other punctuation ( but not <nl> + / / ' ) ' ) , then it is the start of an expression . <nl> + if ( isPunctuationIdentifierChar ( LastTokenChar ) | | <nl> + LastTokenChar = = ' ( ' | | LastTokenChar = = ' { ' | | LastTokenChar = = ' [ ' ) <nl> + return formToken ( tok : : l_paren_space , TokStart ) ; <nl> + <nl> + / / Otherwise , a ( without whitespace . <nl> + return formToken ( tok : : l_paren , TokStart ) ; <nl> + } <nl> case ' ) ' : return formToken ( tok : : r_paren , TokStart ) ; <nl> case ' { ' : return formToken ( tok : : l_brace , TokStart ) ; <nl> case ' } ' : return formToken ( tok : : r_brace , TokStart ) ; <nl> mmm a / lib / Parse / Parser . cpp <nl> ppp b / lib / Parse / Parser . cpp <nl> bool Parser : : parseVarName ( DeclVarName & Name ) { <nl> return false ; <nl> } <nl> <nl> - Name . LPLoc = Tok . getLoc ( ) ; <nl> - if ( parseToken ( tok : : l_paren , " expected identifier or ' ( ' in var name " ) ) <nl> + if ( Tok . isNot ( tok : : l_paren ) & & Tok . isNot ( tok : : l_paren_space ) ) { <nl> + error ( Tok . getLoc ( ) , " expected identifier or ' ( ' in var name " ) ; <nl> return true ; <nl> + } <nl> + Name . LPLoc = consumeToken ( ) ; <nl> <nl> SmallVector < DeclVarName * , 8 > ChildNames ; <nl> <nl> FuncDecl * Parser : : parseDeclFunc ( ) { <nl> } <nl> <nl> / / We force first type of a func declaration to be a tuple for consistency . <nl> - if ( Tok . isNot ( tok : : l_paren ) ) { <nl> + if ( Tok . isNot ( tok : : l_paren ) & & Tok . isNot ( tok : : l_paren_space ) ) { <nl> error ( Tok . getLoc ( ) , " expected ' ( ' in argument list of func declaration " ) ; <nl> return 0 ; <nl> } <nl> bool Parser : : parseType ( Type & Result , const Twine & Message ) { <nl> Result = S . Context . TheInt64Type ; <nl> consumeToken ( tok : : kw___builtin_int64_type ) ; <nl> break ; <nl> - case tok : : l_paren : { <nl> - SMLoc LPLoc = consumeToken ( tok : : l_paren ) ; <nl> + case tok : : l_paren : <nl> + case tok : : l_paren_space : { <nl> + SMLoc LPLoc = consumeToken ( ) ; <nl> if ( parseTypeTupleBody ( LPLoc , Result ) ) <nl> return true ; <nl> <nl> bool Parser : : parseTypeOneOfBody ( SMLoc OneOfLoc , const DeclAttributes & Attrs , <nl> <nl> static bool isStartOfExpr ( const Token & Tok , const Token & Next ) { <nl> if ( Tok . is ( tok : : numeric_constant ) | | Tok . is ( tok : : colon ) | | <nl> - Tok . is ( tok : : l_paren ) | | Tok . is ( tok : : dollarident ) | | <nl> + Tok . is ( tok : : l_paren_space ) | | Tok . is ( tok : : dollarident ) | | <nl> Tok . is ( tok : : identifier ) | | Tok . is ( tok : : oper ) ) <nl> return true ; <nl> <nl> / / " func ( " and " func { " are func expressions . " func x " is a func declaration . <nl> if ( Tok . is ( tok : : kw_func ) & & <nl> - ( Next . is ( tok : : l_paren ) | | Next . is ( tok : : l_brace ) ) ) <nl> + ( Next . is ( tok : : l_paren ) | | Next . is ( tok : : l_paren_space ) | | <nl> + Next . is ( tok : : l_brace ) ) ) <nl> return true ; <nl> return false ; <nl> } <nl> bool Parser : : parseExprPrimary ( SmallVectorImpl < Expr * > & ResVec , <nl> break ; <nl> } <nl> <nl> - case tok : : l_paren : <nl> + case tok : : l_paren_space : <nl> Result = parseExprParen ( ) ; <nl> break ; <nl> <nl> bool Parser : : parseExprPrimary ( SmallVectorImpl < Expr * > & ResVec , <nl> while ( 1 ) { <nl> / / Check for a . foo suffix . <nl> SMLoc TokLoc = Tok . getLoc ( ) ; <nl> + <nl> if ( consumeIf ( tok : : period ) ) { <nl> if ( Tok . isNot ( tok : : identifier ) & & Tok . isNot ( tok : : dollarident ) ) { <nl> error ( Tok . getLoc ( ) , " expected field name " ) ; <nl> bool Parser : : parseExprPrimary ( SmallVectorImpl < Expr * > & ResVec , <nl> continue ; <nl> } <nl> <nl> + / / Check for a ( ) suffix , which indicates a call . <nl> + if ( Tok . is ( tok : : l_paren ) ) { <nl> + if ( ( Result = parseExprParen ( ) ) ) return true ; <nl> + if ( ! Result . isSemaError ( ) ) <nl> + ResVec . push_back ( Result . get ( ) ) ; <nl> + continue ; <nl> + } <nl> + <nl> / / Check for a [ expr ] suffix . <nl> if ( consumeIf ( tok : : l_square ) ) { <nl> ParseResult < Expr > Idx ; <nl> ParseResult < Expr > Parser : : parseExprIdentifier ( ) { <nl> / / / ( ' . ' identifier ' = ' ) ? expr <nl> / / / <nl> ParseResult < Expr > Parser : : parseExprParen ( ) { <nl> - SMLoc LPLoc = consumeToken ( tok : : l_paren ) ; <nl> + SMLoc LPLoc = consumeToken ( ) ; <nl> <nl> SmallVector < Expr * , 8 > SubExprs ; <nl> SmallVector < Identifier , 8 > SubExprNames ; <nl> ParseResult < Expr > Parser : : parseExprFunc ( ) { <nl> Type Ty ; <nl> if ( Tok . is ( tok : : l_brace ) ) { <nl> Ty = TupleType : : getEmpty ( S . Context ) ; <nl> - } else if ( ! Tok . is ( tok : : l_paren ) ) { <nl> + } else if ( ! Tok . is ( tok : : l_paren ) & & ! Tok . is ( tok : : l_paren_space ) ) { <nl> error ( Tok . getLoc ( ) , " expected ' ( ' in func expression argument list " ) ; <nl> return true ; <nl> } else if ( parseType ( Ty ) ) { <nl>
Make the lexer disambiguate between a ( that is the start of an expression and a ( that is the start of
apple/swift
36f5427de387e8af4b885da7b3c7d6fa8d24d291
2011-08-12T01:46:39Z
mmm a / lib / Parse / ParseExpr . cpp <nl> ppp b / lib / Parse / ParseExpr . cpp <nl> Expr * Parser : : parseExprOperator ( ) { <nl> / / / <nl> / / / expr - identifier : <nl> / / / identifier <nl> - / / / identifier ' : : ' identifier <nl> + / / / scope - qualifier identifier <nl> ParseResult < Expr > Parser : : parseExprIdentifier ( ) { <nl> assert ( Tok . is ( tok : : identifier ) ) ; <nl> SMLoc Loc = Tok . getLoc ( ) ; <nl> mmm a / lib / Parse / ParseType . cpp <nl> ppp b / lib / Parse / ParseType . cpp <nl> bool Parser : : parseType ( Type & Result ) { <nl> / / / type - simple ' - > ' type <nl> / / / <nl> / / / type - simple : <nl> - / / / identifier <nl> + / / / type - identifier <nl> / / / type - tuple <nl> / / / type - oneof <nl> / / / type - protocol <nl> / / / <nl> + / / / type - identifier : <nl> + / / / identifier <nl> + / / / scope - qualifier identifier <nl> + / / / <nl> bool Parser : : parseType ( Type & Result , const Twine & Message ) { <nl> / / Parse type - simple first . <nl> switch ( Tok . getKind ( ) ) { <nl>
Document the scope - qualifier grammar correctly in the parser comments .
apple/swift
8a3f79b5bc3a436d54d6ecad82857ae1b34aa2cf
2011-09-06T21:55:41Z
mmm a / Telegram / SourceFiles / api / api_sending . cpp <nl> ppp b / Telegram / SourceFiles / api / api_sending . cpp <nl> For license and copyright information please follow this link : <nl> # include " history / history . h " <nl> # include " history / history_message . h " / / NewMessageFlags . <nl> # include " ui / text / text_entity . h " / / TextWithEntities . <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " mainwidget . h " <nl> # include " apiwrap . h " <nl> <nl> mmm a / Telegram / SourceFiles / apiwrap . cpp <nl> ppp b / Telegram / SourceFiles / apiwrap . cpp <nl> For license and copyright information please follow this link : <nl> # include " history / history_item_components . h " <nl> / / # include " history / feed / history_feed_section . h " / / # feed <nl> # include " storage / localstorage . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " boxes / confirm_box . h " <nl> # include " boxes / stickers_box . h " <nl> # include " boxes / sticker_set_box . h " <nl> bool ApiWrap : : BlockedUsersSlice : : operator ! = ( const BlockedUsersSlice & other ) cons <nl> return ! ( * this = = other ) ; <nl> } <nl> <nl> - ApiWrap : : ApiWrap ( not_null < AuthSession * > session ) <nl> + ApiWrap : : ApiWrap ( not_null < Main : : Session * > session ) <nl> : _session ( session ) <nl> , _messageDataResolveDelayed ( [ = ] { resolveMessageDatas ( ) ; } ) <nl> , _webPagesTimer ( [ = ] { resolveWebPages ( ) ; } ) <nl> ApiWrap : : ApiWrap ( not_null < AuthSession * > session ) <nl> } ) ; <nl> } <nl> <nl> - AuthSession & ApiWrap : : session ( ) const { <nl> + Main : : Session & ApiWrap : : session ( ) const { <nl> return * _session ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / apiwrap . h <nl> ppp b / Telegram / SourceFiles / apiwrap . h <nl> For license and copyright information please follow this link : <nl> # include " data / data_messages . h " <nl> <nl> class TaskQueue ; <nl> - class AuthSession ; <nl> struct MessageGroupId ; <nl> struct SendingAlbum ; <nl> enum class SendMediaType ; <nl> struct FileLoadTo ; <nl> class mtpFileLoader ; <nl> <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> + <nl> namespace Data { <nl> struct UpdatedFileReferences ; <nl> class WallPaper ; <nl> class ApiWrap : public MTP : : Sender , private base : : Subscriber { <nl> bool operator ! = ( const BlockedUsersSlice & other ) const ; <nl> } ; <nl> <nl> - explicit ApiWrap ( not_null < AuthSession * > session ) ; <nl> + explicit ApiWrap ( not_null < Main : : Session * > session ) ; <nl> <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> void applyUpdates ( <nl> const MTPUpdates & updates , <nl> class ApiWrap : public MTP : : Sender , private base : : Subscriber { <nl> <nl> void sendDialogRequests ( ) ; <nl> <nl> - not_null < AuthSession * > _session ; <nl> + not_null < Main : : Session * > _session ; <nl> <nl> base : : flat_map < QString , int > _modifyRequests ; <nl> <nl> mmm a / Telegram / SourceFiles / app . cpp <nl> ppp b / Telegram / SourceFiles / app . cpp <nl> For license and copyright information please follow this link : <nl> # include " apiwrap . h " <nl> # include " numbers . h " <nl> # include " observer_peer . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_overview . h " <nl> # include " styles / style_mediaview . h " <nl> # include " styles / style_chat_helpers . h " <nl> namespace App { <nl> void quit ( ) { <nl> if ( quitting ( ) ) { <nl> return ; <nl> - } else if ( AuthSession : : Exists ( ) <nl> + } else if ( Main : : Session : : Exists ( ) <nl> & & Auth ( ) . data ( ) . exportInProgress ( ) ) { <nl> Auth ( ) . data ( ) . stopExportWithConfirmation ( [ ] { App : : quit ( ) ; } ) ; <nl> return ; <nl> mmm a / Telegram / SourceFiles / boxes / add_contact_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / add_contact_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " mainwindow . h " <nl> # include " apiwrap . h " <nl> # include " observer_peer . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace { <nl> <nl> class RevokePublicLinkBox : : Inner : public TWidget , private MTP : : Sender { <nl> public : <nl> Inner ( <nl> QWidget * parent , <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> Fn < void ( ) > revokeCallback ) ; <nl> <nl> protected : <nl> class RevokePublicLinkBox : : Inner : public TWidget , private MTP : : Sender { <nl> void paintChat ( Painter & p , const ChatRow & row , bool selected ) const ; <nl> void updateSelected ( ) ; <nl> <nl> - const not_null < AuthSession * > _session ; <nl> + const not_null < Main : : Session * > _session ; <nl> <nl> PeerData * _selected = nullptr ; <nl> PeerData * _pressed = nullptr ; <nl> class RevokePublicLinkBox : : Inner : public TWidget , private MTP : : Sender { <nl> <nl> AddContactBox : : AddContactBox ( <nl> QWidget * , <nl> - not_null < AuthSession * > session ) <nl> + not_null < Main : : Session * > session ) <nl> : AddContactBox ( nullptr , session , QString ( ) , QString ( ) , QString ( ) ) { <nl> } <nl> <nl> AddContactBox : : AddContactBox ( <nl> QWidget * , <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> QString fname , <nl> QString lname , <nl> QString phone ) <nl> void AddContactBox : : updateButtons ( ) { <nl> <nl> GroupInfoBox : : GroupInfoBox ( <nl> QWidget * , <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> Type type , <nl> const QString & title , <nl> Fn < void ( not_null < ChannelData * > ) > channelDone ) <nl> bool EditNameBox : : saveSelfFail ( const RPCError & error ) { <nl> <nl> RevokePublicLinkBox : : Inner : : Inner ( <nl> QWidget * parent , <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> Fn < void ( ) > revokeCallback ) <nl> : TWidget ( parent ) <nl> , _session ( session ) <nl> RevokePublicLinkBox : : Inner : : Inner ( <nl> <nl> RevokePublicLinkBox : : RevokePublicLinkBox ( <nl> QWidget * , <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> Fn < void ( ) > revokeCallback ) <nl> : _session ( session ) <nl> , _aboutRevoke ( <nl> mmm a / Telegram / SourceFiles / boxes / add_contact_box . h <nl> ppp b / Telegram / SourceFiles / boxes / add_contact_box . h <nl> For license and copyright information please follow this link : <nl> <nl> class ConfirmBox ; <nl> class PeerListBox ; <nl> - class AuthSession ; <nl> <nl> - constexpr auto kMaxBioLength = 70 ; <nl> - <nl> - style : : InputField CreateBioFieldStyle ( ) ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Ui { <nl> class FlatLabel ; <nl> class LinkButton ; <nl> class UserpicButton ; <nl> } / / namespace Ui <nl> <nl> + constexpr auto kMaxBioLength = 70 ; <nl> + <nl> enum class PeerFloodType { <nl> Send , <nl> InviteGroup , <nl> InviteChannel , <nl> } ; <nl> + <nl> + style : : InputField CreateBioFieldStyle ( ) ; <nl> + <nl> QString PeerFloodErrorText ( PeerFloodType type ) ; <nl> void ShowAddParticipantsError ( <nl> const QString & error , <nl> void ShowAddParticipantsError ( <nl> <nl> class AddContactBox : public BoxContent { <nl> public : <nl> - AddContactBox ( QWidget * , not_null < AuthSession * > session ) ; <nl> + AddContactBox ( QWidget * , not_null < Main : : Session * > session ) ; <nl> AddContactBox ( <nl> QWidget * , <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> QString fname , <nl> QString lname , <nl> QString phone ) ; <nl> class AddContactBox : public BoxContent { <nl> void updateButtons ( ) ; <nl> void importDone ( const MTPcontacts_ImportedContacts & result ) ; <nl> <nl> - const not_null < AuthSession * > _session ; <nl> + const not_null < Main : : Session * > _session ; <nl> <nl> object_ptr < Ui : : InputField > _first ; <nl> object_ptr < Ui : : InputField > _last ; <nl> class GroupInfoBox : public BoxContent , private MTP : : Sender { <nl> } ; <nl> GroupInfoBox ( <nl> QWidget * , <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> Type type , <nl> const QString & title = QString ( ) , <nl> Fn < void ( not_null < ChannelData * > ) > channelDone = nullptr ) ; <nl> class GroupInfoBox : public BoxContent , private MTP : : Sender { <nl> void descriptionResized ( ) ; <nl> void updateMaxHeight ( ) ; <nl> <nl> - const not_null < AuthSession * > _session ; <nl> + const not_null < Main : : Session * > _session ; <nl> <nl> Type _type = Type : : Group ; <nl> QString _initialTitle ; <nl> class RevokePublicLinkBox : public BoxContent , public RPCSender { <nl> public : <nl> RevokePublicLinkBox ( <nl> QWidget * , <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> Fn < void ( ) > revokeCallback ) ; <nl> <nl> protected : <nl> class RevokePublicLinkBox : public BoxContent , public RPCSender { <nl> void resizeEvent ( QResizeEvent * e ) override ; <nl> <nl> private : <nl> - const not_null < AuthSession * > _session ; <nl> + const not_null < Main : : Session * > _session ; <nl> <nl> object_ptr < Ui : : FlatLabel > _aboutRevoke ; <nl> <nl> mmm a / Telegram / SourceFiles / boxes / auto_download_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / auto_download_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / auto_download_box . h " <nl> <nl> # include " lang / lang_keys . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_session . h " <nl> # include " info / profile / info_profile_button . h " <nl> # include " ui / widgets / continuous_sliders . h " <nl> mmm a / Telegram / SourceFiles / boxes / background_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / background_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " lang / lang_keys . h " <nl> # include " ui / effects / round_checkbox . h " <nl> # include " ui / image / image . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " mtproto / sender . h " <nl> # include " data / data_session . h " <nl> mmm a / Telegram / SourceFiles / boxes / background_preview_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / background_preview_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " history / history . h " <nl> # include " history / history_message . h " <nl> # include " history / view / history_view_message . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " data / data_session . h " <nl> # include " data / data_user . h " <nl> mmm a / Telegram / SourceFiles / boxes / change_phone_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / change_phone_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / text / text_utilities . h " <nl> # include " boxes / confirm_phone_box . h " <nl> # include " boxes / confirm_box . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_session . h " <nl> # include " styles / style_boxes . h " <nl> <nl> mmm a / Telegram / SourceFiles / boxes / confirm_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / confirm_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_chat . h " <nl> # include " data / data_user . h " <nl> # include " base / unixtime . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " observer_peer . h " <nl> <nl> TextParseOptions _confirmBoxTextOptions = { <nl> mmm a / Telegram / SourceFiles / boxes / edit_caption_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / edit_caption_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / edit_caption_box . h " <nl> <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " chat_helpers / emoji_suggestions_widget . h " <nl> # include " chat_helpers / message_field . h " <nl> # include " chat_helpers / tabbed_panel . h " <nl> mmm a / Telegram / SourceFiles / boxes / edit_privacy_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / edit_privacy_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " base / binary_guard . h " <nl> # include " lang / lang_keys . h " <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_user . h " <nl> # include " data / data_chat . h " <nl> # include " data / data_channel . h " <nl> mmm a / Telegram / SourceFiles / boxes / local_storage_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / local_storage_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_session . h " <nl> # include " lang / lang_keys . h " <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " layout . h " <nl> # include " styles / style_boxes . h " <nl> <nl> mmm a / Telegram / SourceFiles / boxes / mute_settings_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / mute_settings_box . cpp <nl> Copyright ( C ) 2017 , Nicholas Guriev < guriev - ns @ ya . ru > <nl> # include " boxes / mute_settings_box . h " <nl> <nl> # include " lang / lang_keys . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_session . h " <nl> # include " styles / style_boxes . h " <nl> # include " ui / special_buttons . h " <nl> mmm a / Telegram / SourceFiles / boxes / passcode_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / passcode_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / confirm_box . h " <nl> # include " boxes / confirm_phone_box . h " <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " storage / localstorage . h " <nl> # include " ui / widgets / buttons . h " <nl> # include " ui / widgets / input_fields . h " <nl> mmm a / Telegram / SourceFiles / boxes / peer_list_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / peer_list_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " styles / style_boxes . h " <nl> # include " styles / style_dialogs . h " <nl> # include " styles / style_widgets . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " mainwidget . h " <nl> # include " ui / widgets / multi_select . h " <nl> # include " ui / widgets / labels . h " <nl> mmm a / Telegram / SourceFiles / boxes / peer_list_controllers . cpp <nl> ppp b / Telegram / SourceFiles / boxes / peer_list_controllers . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / confirm_box . h " <nl> # include " observer_peer . h " <nl> # include " ui / widgets / checkbox . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_session . h " <nl> # include " data / data_channel . h " <nl> # include " data / data_chat . h " <nl> mmm a / Telegram / SourceFiles / boxes / peers / add_participants_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / peers / add_participants_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " history / history . h " <nl> # include " dialogs / dialogs_indexed_list . h " <nl> # include " base / unixtime . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " mainwidget . h " <nl> # include " mainwindow . h " <nl> # include " window / window_session_controller . h " <nl> mmm a / Telegram / SourceFiles / boxes / peers / edit_contact_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / peers / edit_contact_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " lang / lang_keys . h " <nl> # include " window / window_controller . h " <nl> # include " ui / toast / toast . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " styles / style_boxes . h " <nl> # include " styles / style_info . h " <nl> mmm a / Telegram / SourceFiles / boxes / peers / edit_linked_chat_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / peers / edit_linked_chat_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / confirm_box . h " <nl> # include " boxes / add_contact_box . h " <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_boxes . h " <nl> # include " styles / style_info . h " <nl> <nl> mmm a / Telegram / SourceFiles / boxes / peers / edit_participant_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / peers / edit_participant_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " core / core_cloud_password . h " <nl> # include " base / unixtime . h " <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_boxes . h " <nl> # include " styles / style_info . h " <nl> <nl> mmm a / Telegram / SourceFiles / boxes / peers / edit_participants_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / peers / edit_participants_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / peers / add_participants_box . h " <nl> # include " boxes / confirm_box . h " <nl> # include " boxes / add_contact_box . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " lang / lang_keys . h " <nl> # include " mainwidget . h " <nl> mmm a / Telegram / SourceFiles / boxes / peers / edit_peer_info_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / peers / edit_peer_info_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / peers / edit_peer_info_box . h " <nl> <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " boxes / add_contact_box . h " <nl> # include " boxes / confirm_box . h " <nl> # include " boxes / peer_list_controllers . h " <nl> mmm a / Telegram / SourceFiles / boxes / peers / edit_peer_type_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / peers / edit_peer_type_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / peers / edit_peer_type_box . h " <nl> <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " boxes / add_contact_box . h " <nl> # include " boxes / confirm_box . h " <nl> # include " boxes / peer_list_controllers . h " <nl> mmm a / Telegram / SourceFiles / boxes / rate_call_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / rate_call_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / widgets / buttons . h " <nl> # include " ui / widgets / input_fields . h " <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> <nl> namespace { <nl> mmm a / Telegram / SourceFiles / boxes / self_destruction_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / self_destruction_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / widgets / checkbox . h " <nl> # include " ui / widgets / labels . h " <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_boxes . h " <nl> <nl> SelfDestructionBox : : SelfDestructionBox ( <nl> mmm a / Telegram / SourceFiles / boxes / sessions_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / sessions_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " storage / localstorage . h " <nl> # include " mainwidget . h " <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_session . h " <nl> # include " base / unixtime . h " <nl> # include " boxes / confirm_box . h " <nl> mmm a / Telegram / SourceFiles / boxes / share_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / share_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_user . h " <nl> # include " data / data_session . h " <nl> # include " data / data_folder . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " core / application . h " <nl> # include " styles / style_boxes . h " <nl> # include " styles / style_history . h " <nl> <nl> - <nl> class ShareBox : : Inner <nl> : public Ui : : RpWidget <nl> , public RPCSender <nl> void ShareGameScoreByHash ( const QString & hash ) { <nl> } <nl> <nl> auto hashDataInts = reinterpret_cast < int32 * > ( hashData . data ( ) ) ; <nl> - if ( ! AuthSession : : Exists ( ) | | hashDataInts [ 0 ] ! = Auth ( ) . userId ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) | | hashDataInts [ 0 ] ! = Auth ( ) . userId ( ) ) { <nl> Ui : : show ( Box < InformBox > ( tr : : lng_share_wrong_user ( tr : : now ) ) ) ; <nl> return ; <nl> } <nl> mmm a / Telegram / SourceFiles / boxes / sticker_set_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / sticker_set_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " lottie / lottie_animation . h " <nl> # include " window / window_session_controller . h " <nl> # include " base / unixtime . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " mainwidget . h " <nl> # include " mainwindow . h " <nl> mmm a / Telegram / SourceFiles / boxes / stickers_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / stickers_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / widgets / input_fields . h " <nl> # include " ui / image / image . h " <nl> # include " window / window_session_controller . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_boxes . h " <nl> # include " styles / style_chat_helpers . h " <nl> <nl> void StickersBox : : saveChanges ( ) { <nl> if ( _someArchivedLoaded ) { <nl> Local : : writeArchivedStickers ( ) ; <nl> } <nl> - if ( AuthSession : : Exists ( ) ) { <nl> + if ( Main : : Session : : Exists ( ) ) { <nl> Auth ( ) . api ( ) . saveStickerSets ( _installed . widget ( ) - > getOrder ( ) , _installed . widget ( ) - > getRemovedSets ( ) ) ; <nl> } <nl> } <nl> mmm a / Telegram / SourceFiles / boxes / url_auth_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / url_auth_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / widgets / checkbox . h " <nl> # include " ui / widgets / labels . h " <nl> # include " lang / lang_keys . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " styles / style_boxes . h " <nl> <nl> mmm a / Telegram / SourceFiles / boxes / username_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / username_box . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / widgets / input_fields . h " <nl> # include " ui / toast / toast . h " <nl> # include " core / application . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_session . h " <nl> # include " data / data_user . h " <nl> # include " styles / style_boxes . h " <nl> mmm a / Telegram / SourceFiles / calls / calls_box_controller . cpp <nl> ppp b / Telegram / SourceFiles / calls / calls_box_controller . cpp <nl> For license and copyright information please follow this link : <nl> # include " history / history . h " <nl> # include " history / history_item . h " <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_session . h " <nl> # include " data / data_media_types . h " <nl> <nl> mmm a / Telegram / SourceFiles / calls / calls_call . cpp <nl> ppp b / Telegram / SourceFiles / calls / calls_call . cpp <nl> For license and copyright information please follow this link : <nl> * / <nl> # include " calls / calls_call . h " <nl> <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " lang / lang_keys . h " <nl> # include " boxes / confirm_box . h " <nl> mmm a / Telegram / SourceFiles / calls / calls_instance . cpp <nl> ppp b / Telegram / SourceFiles / calls / calls_instance . cpp <nl> For license and copyright information please follow this link : <nl> <nl> # include " mtproto / connection . h " <nl> # include " core / application . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " lang / lang_keys . h " <nl> # include " boxes / confirm_box . h " <nl> mmm a / Telegram / SourceFiles / calls / calls_panel . cpp <nl> ppp b / Telegram / SourceFiles / calls / calls_panel . cpp <nl> For license and copyright information please follow this link : <nl> # include " core / application . h " <nl> # include " mainwindow . h " <nl> # include " lang / lang_keys . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " observer_peer . h " <nl> # include " platform / platform_specific . h " <nl> mmm a / Telegram / SourceFiles / chat_helpers / bot_keyboard . cpp <nl> ppp b / Telegram / SourceFiles / chat_helpers / bot_keyboard . cpp <nl> For license and copyright information please follow this link : <nl> # include " history / history_item_components . h " <nl> # include " data / data_user . h " <nl> # include " data / data_session . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_widgets . h " <nl> # include " styles / style_history . h " <nl> <nl> mmm a / Telegram / SourceFiles / chat_helpers / emoji_keywords . cpp <nl> ppp b / Telegram / SourceFiles / chat_helpers / emoji_keywords . cpp <nl> For license and copyright information please follow this link : <nl> # include " platform / platform_info . h " <nl> # include " ui / emoji_config . h " <nl> # include " main / main_account . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> <nl> namespace ChatHelpers { <nl> int EmojiKeywords : : LangPack : : maxQueryLength ( ) const { <nl> <nl> EmojiKeywords : : EmojiKeywords ( ) { <nl> crl : : on_main ( & _guard , [ = ] { <nl> - handleAuthSessionChanges ( ) ; <nl> + handleSessionChanges ( ) ; <nl> } ) ; <nl> } <nl> <nl> void EmojiKeywords : : langPackRefreshed ( ) { <nl> _refreshed . fire ( { } ) ; <nl> } <nl> <nl> - void EmojiKeywords : : handleAuthSessionChanges ( ) { <nl> + void EmojiKeywords : : handleSessionChanges ( ) { <nl> Core : : App ( ) . activeAccount ( ) . sessionValue ( <nl> - ) | rpl : : map ( [ ] ( AuthSession * session ) { <nl> + ) | rpl : : map ( [ ] ( Main : : Session * session ) { <nl> return session ? & session - > api ( ) : nullptr ; <nl> } ) | rpl : : start_with_next ( [ = ] ( ApiWrap * api ) { <nl> apiChanged ( api ) ; <nl> void EmojiKeywords : : refresh ( ) { <nl> } <nl> <nl> std : : vector < QString > EmojiKeywords : : languages ( ) { <nl> - if ( ! AuthSession : : Exists ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) ) { <nl> return { } ; <nl> } <nl> refreshInputLanguages ( ) ; <nl> mmm a / Telegram / SourceFiles / chat_helpers / emoji_keywords . h <nl> ppp b / Telegram / SourceFiles / chat_helpers / emoji_keywords . h <nl> class EmojiKeywords final : private details : : EmojiKeywordsLangPackDelegate { <nl> ApiWrap * api ( ) override ; <nl> void langPackRefreshed ( ) override ; <nl> <nl> - void handleAuthSessionChanges ( ) ; <nl> + void handleSessionChanges ( ) ; <nl> void apiChanged ( ApiWrap * api ) ; <nl> void refreshInputLanguages ( ) ; <nl> [ [ nodiscard ] ] std : : vector < QString > languages ( ) ; <nl> mmm a / Telegram / SourceFiles / chat_helpers / emoji_suggestions_widget . cpp <nl> ppp b / Telegram / SourceFiles / chat_helpers / emoji_suggestions_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " platform / platform_specific . h " <nl> # include " core / application . h " <nl> # include " core / event_filter . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_chat_helpers . h " <nl> <nl> namespace Ui { <nl> mmm a / Telegram / SourceFiles / chat_helpers / field_autocomplete . cpp <nl> ppp b / Telegram / SourceFiles / chat_helpers / field_autocomplete . cpp <nl> For license and copyright information please follow this link : <nl> # include " lottie / lottie_single_player . h " <nl> # include " ui / widgets / scroll_area . h " <nl> # include " ui / image / image . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " chat_helpers / stickers . h " <nl> # include " base / unixtime . h " <nl> # include " styles / style_history . h " <nl> mmm a / Telegram / SourceFiles / chat_helpers / message_field . cpp <nl> ppp b / Telegram / SourceFiles / chat_helpers / message_field . cpp <nl> For license and copyright information please follow this link : <nl> # include " window / window_session_controller . h " <nl> # include " lang / lang_keys . h " <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_boxes . h " <nl> # include " styles / style_history . h " <nl> <nl> mmm a / Telegram / SourceFiles / chat_helpers / stickers . cpp <nl> ppp b / Telegram / SourceFiles / chat_helpers / stickers . cpp <nl> For license and copyright information please follow this link : <nl> # include " apiwrap . h " <nl> # include " storage / localstorage . h " <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " mainwindow . h " <nl> # include " ui / toast / toast . h " <nl> # include " ui / emoji_config . h " <nl> auto LottieCachedFromContent ( <nl> Method & & method , <nl> Storage : : Cache : : Key baseKey , <nl> LottieSize sizeTag , <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> const QByteArray & content , <nl> QSize box ) { <nl> const auto key = Storage : : Cache : : Key { <nl> mmm a / Telegram / SourceFiles / chat_helpers / stickers . h <nl> ppp b / Telegram / SourceFiles / chat_helpers / stickers . h <nl> For license and copyright information please follow this link : <nl> # include " ui / image / image_source . h " <nl> <nl> class DocumentData ; <nl> - class AuthSession ; <nl> + <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Storage { <nl> namespace Cache { <nl> mmm a / Telegram / SourceFiles / chat_helpers / stickers_list_widget . cpp <nl> ppp b / Telegram / SourceFiles / chat_helpers / stickers_list_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / stickers_box . h " <nl> # include " boxes / confirm_box . h " <nl> # include " window / window_session_controller . h " / / GifPauseReason . <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " observer_peer . h " <nl> # include " apiwrap . h " <nl> # include " styles / style_chat_helpers . h " <nl> StickersListWidget : : StickersListWidget ( <nl> } ) ) ; <nl> } <nl> <nl> - AuthSession & StickersListWidget : : session ( ) const { <nl> + Main : : Session & StickersListWidget : : session ( ) const { <nl> return controller ( ) - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / chat_helpers / stickers_list_widget . h <nl> ppp b / Telegram / SourceFiles / chat_helpers / stickers_list_widget . h <nl> For license and copyright information please follow this link : <nl> # include " base / variant . h " <nl> # include " base / timer . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Window { <nl> class SessionController ; <nl> class StickersListWidget <nl> QWidget * parent , <nl> not_null < Window : : SessionController * > controller ) ; <nl> <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> rpl : : producer < not_null < DocumentData * > > chosen ( ) const ; <nl> rpl : : producer < > scrollUpdated ( ) const ; <nl> mmm a / Telegram / SourceFiles / chat_helpers / tabbed_selector . cpp <nl> ppp b / Telegram / SourceFiles / chat_helpers / tabbed_selector . cpp <nl> TabbedSelector : : TabbedSelector ( <nl> <nl> TabbedSelector : : ~ TabbedSelector ( ) = default ; <nl> <nl> - AuthSession & TabbedSelector : : session ( ) const { <nl> + Main : : Session & TabbedSelector : : session ( ) const { <nl> return _controller - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / chat_helpers / tabbed_selector . h <nl> ppp b / Telegram / SourceFiles / chat_helpers / tabbed_selector . h <nl> For license and copyright information please follow this link : <nl> # include " ui / effects / animations . h " <nl> # include " ui / effects / panel_animation . h " <nl> # include " mtproto / sender . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace InlineBots { <nl> class Result ; <nl> class TabbedSelector : public Ui : : RpWidget , private base : : Subscriber { <nl> Mode mode = Mode : : Full ) ; <nl> ~ TabbedSelector ( ) ; <nl> <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> rpl : : producer < EmojiPtr > emojiChosen ( ) const ; <nl> rpl : : producer < not_null < DocumentData * > > fileChosen ( ) const ; <nl> mmm a / Telegram / SourceFiles / core / application . cpp <nl> ppp b / Telegram / SourceFiles / core / application . cpp <nl> For license and copyright information please follow this link : <nl> # include " mainwindow . h " <nl> # include " dialogs / dialogs_entry . h " <nl> # include " history / history . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " calls / calls_instance . h " <nl> # include " lang / lang_file_parser . h " <nl> Application : : ~ Application ( ) { <nl> _window . reset ( ) ; <nl> _mediaView . reset ( ) ; <nl> <nl> - / / This can call writeMap ( ) that serializes AuthSession . <nl> - / / In case it gets called after authSessionDestroy ( ) we get missing data . <nl> + / / This can call writeMap ( ) that serializes Main : : Session . <nl> + / / In case it gets called after destroySession ( ) we get missing data . <nl> Local : : finish ( ) ; <nl> <nl> / / Some MTP requests can be cancelled from data clearing . <nl> mmm a / Telegram / SourceFiles / core / application . h <nl> ppp b / Telegram / SourceFiles / core / application . h <nl> For license and copyright information please follow this link : <nl> # include " mtproto / auth_key . h " <nl> # include " base / timer . h " <nl> <nl> - class AuthSessionSettings ; <nl> class MainWindow ; <nl> class MainWidget ; <nl> class FileUploader ; <nl> class Application final : public QObject , private base : : Subscriber { <nl> return * _account ; <nl> } <nl> <nl> - / / AuthSession component . <nl> + / / Main : : Session component . <nl> int unreadBadge ( ) const ; <nl> bool unreadBadgeMuted ( ) const ; <nl> <nl> mmm a / Telegram / SourceFiles / core / changelogs . cpp <nl> ppp b / Telegram / SourceFiles / core / changelogs . cpp <nl> QString FormatVersionPrecise ( int version ) { <nl> <nl> } / / namespace <nl> <nl> - Changelogs : : Changelogs ( not_null < AuthSession * > session , int oldVersion ) <nl> + Changelogs : : Changelogs ( not_null < Main : : Session * > session , int oldVersion ) <nl> : _session ( session ) <nl> , _oldVersion ( oldVersion ) { <nl> _session - > data ( ) . chatsListChanges ( <nl> Changelogs : : Changelogs ( not_null < AuthSession * > session , int oldVersion ) <nl> } <nl> <nl> std : : unique_ptr < Changelogs > Changelogs : : Create ( <nl> - not_null < AuthSession * > session ) { <nl> + not_null < Main : : Session * > session ) { <nl> const auto oldVersion = Local : : oldMapVersion ( ) ; <nl> return ( oldVersion > 0 & & oldVersion < AppVersion ) <nl> ? std : : make_unique < Changelogs > ( session , oldVersion ) <nl> mmm a / Telegram / SourceFiles / core / changelogs . h <nl> ppp b / Telegram / SourceFiles / core / changelogs . h <nl> For license and copyright information please follow this link : <nl> <nl> # include " base / weak_ptr . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Core { <nl> <nl> class Changelogs : public base : : has_weak_ptr , private base : : Subscriber { <nl> public : <nl> - Changelogs ( not_null < AuthSession * > session , int oldVersion ) ; <nl> + Changelogs ( not_null < Main : : Session * > session , int oldVersion ) ; <nl> <nl> static std : : unique_ptr < Changelogs > Create ( <nl> - not_null < AuthSession * > session ) ; <nl> + not_null < Main : : Session * > session ) ; <nl> <nl> private : <nl> void requestCloudLogs ( ) ; <nl> class Changelogs : public base : : has_weak_ptr , private base : : Subscriber { <nl> void addBetaLogs ( ) ; <nl> void addBetaLog ( int changeVersion , const char * changes ) ; <nl> <nl> - const not_null < AuthSession * > _session ; <nl> + const not_null < Main : : Session * > _session ; <nl> const int _oldVersion = 0 ; <nl> rpl : : lifetime _chatsSubscription ; <nl> bool _addedSomeLocal = false ; <nl> mmm a / Telegram / SourceFiles / core / click_handler_types . cpp <nl> ppp b / Telegram / SourceFiles / core / click_handler_types . cpp <nl> For license and copyright information please follow this link : <nl> # include " core / local_url_handlers . h " <nl> # include " core / file_utilities . h " <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " platform / platform_specific . h " <nl> # include " history / view / history_view_element . h " <nl> # include " history / history_item . h " <nl> mmm a / Telegram / SourceFiles / core / file_utilities . cpp <nl> ppp b / Telegram / SourceFiles / core / file_utilities . cpp <nl> QString DefaultDownloadPath ( ) { <nl> return QStandardPaths : : writableLocation ( <nl> QStandardPaths : : DownloadLocation ) <nl> + ' / ' <nl> - + ( AuthSession : : Exists ( ) & & Auth ( ) . supportMode ( ) <nl> + + ( Main : : Session : : Exists ( ) & & Auth ( ) . supportMode ( ) <nl> ? " Tsupport Desktop " <nl> : str_const_toString ( AppName ) ) <nl> + ' / ' ; <nl> mmm a / Telegram / SourceFiles / core / local_url_handlers . cpp <nl> ppp b / Telegram / SourceFiles / core / local_url_handlers . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_channel . h " <nl> # include " mainwindow . h " <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> <nl> namespace Core { <nl> namespace { <nl> using Match = qthelp : : RegularExpressionMatch ; <nl> <nl> bool JoinGroupByHash ( const Match & match , const QVariant & context ) { <nl> - if ( ! AuthSession : : Exists ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) ) { <nl> return false ; <nl> } <nl> const auto hash = match - > captured ( 1 ) ; <nl> bool JoinGroupByHash ( const Match & match , const QVariant & context ) { <nl> } <nl> <nl> bool ShowStickerSet ( const Match & match , const QVariant & context ) { <nl> - if ( ! AuthSession : : Exists ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) ) { <nl> return false ; <nl> } <nl> Core : : App ( ) . hideMediaView ( ) ; <nl> bool SetLanguage ( const Match & match , const QVariant & context ) { <nl> } <nl> <nl> bool ShareUrl ( const Match & match , const QVariant & context ) { <nl> - if ( ! AuthSession : : Exists ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) ) { <nl> return false ; <nl> } <nl> auto params = url_parse_params ( <nl> bool ShareUrl ( const Match & match , const QVariant & context ) { <nl> } <nl> <nl> bool ConfirmPhone ( const Match & match , const QVariant & context ) { <nl> - if ( ! AuthSession : : Exists ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) ) { <nl> return false ; <nl> } <nl> auto params = url_parse_params ( <nl> bool ConfirmPhone ( const Match & match , const QVariant & context ) { <nl> } <nl> <nl> bool ShareGameScore ( const Match & match , const QVariant & context ) { <nl> - if ( ! AuthSession : : Exists ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) ) { <nl> return false ; <nl> } <nl> const auto params = url_parse_params ( <nl> bool ShowPassport ( const Match & match , const QVariant & context ) { <nl> } <nl> <nl> bool ShowWallPaper ( const Match & match , const QVariant & context ) { <nl> - if ( ! AuthSession : : Exists ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) ) { <nl> return false ; <nl> } <nl> const auto params = url_parse_params ( <nl> bool ShowWallPaper ( const Match & match , const QVariant & context ) { <nl> } <nl> <nl> bool ResolveUsername ( const Match & match , const QVariant & context ) { <nl> - if ( ! AuthSession : : Exists ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) ) { <nl> return false ; <nl> } <nl> const auto params = url_parse_params ( <nl> bool ResolveUsername ( const Match & match , const QVariant & context ) { <nl> } <nl> <nl> bool ResolvePrivatePost ( const Match & match , const QVariant & context ) { <nl> - if ( ! AuthSession : : Exists ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) ) { <nl> return false ; <nl> } <nl> const auto params = url_parse_params ( <nl> bool ResolvePrivatePost ( const Match & match , const QVariant & context ) { <nl> } <nl> <nl> bool HandleUnknown ( const Match & match , const QVariant & context ) { <nl> - if ( ! AuthSession : : Exists ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) ) { <nl> return false ; <nl> } <nl> const auto request = match - > captured ( 1 ) ; <nl> mmm a / Telegram / SourceFiles / data / data_channel . cpp <nl> ppp b / Telegram / SourceFiles / data / data_channel . cpp <nl> For license and copyright information please follow this link : <nl> # include " base / unixtime . h " <nl> # include " history / history . h " <nl> # include " observer_peer . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> <nl> namespace { <nl> mmm a / Telegram / SourceFiles / data / data_channel_admins . cpp <nl> ppp b / Telegram / SourceFiles / data / data_channel_admins . cpp <nl> For license and copyright information please follow this link : <nl> # include " history / history . h " <nl> # include " data / data_channel . h " <nl> # include " data / data_session . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Data { <nl> <nl> mmm a / Telegram / SourceFiles / data / data_chat . cpp <nl> ppp b / Telegram / SourceFiles / data / data_chat . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_channel . h " <nl> # include " data / data_session . h " <nl> # include " history / history . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " observer_peer . h " <nl> <nl> mmm a / Telegram / SourceFiles / data / data_document . cpp <nl> ppp b / Telegram / SourceFiles / data / data_document . cpp <nl> Data : : Session & DocumentData : : owner ( ) const { <nl> return * _owner ; <nl> } <nl> <nl> - AuthSession & DocumentData : : session ( ) const { <nl> + Main : : Session & DocumentData : : session ( ) const { <nl> return _owner - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / data / data_document . h <nl> ppp b / Telegram / SourceFiles / data / data_document . h <nl> For license and copyright information please follow this link : <nl> # include " data / data_types . h " <nl> # include " ui / image / image . h " <nl> <nl> + class mtpFileLoader ; <nl> + <nl> namespace Images { <nl> class Source ; <nl> } / / namespace Images <nl> namespace Data { <nl> class Session ; <nl> } / / namespace Data <nl> <nl> - class AuthSession ; <nl> - class mtpFileLoader ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> inline uint64 mediaMix32To64 ( int32 a , int32 b ) { <nl> return ( uint64 ( * reinterpret_cast < uint32 * > ( & a ) ) < < 32 ) <nl> class DocumentData { <nl> DocumentData ( not_null < Data : : Session * > owner , DocumentId id ) ; <nl> <nl> [ [ nodiscard ] ] Data : : Session & owner ( ) const ; <nl> - [ [ nodiscard ] ] AuthSession & session ( ) const ; <nl> + [ [ nodiscard ] ] Main : : Session & session ( ) const ; <nl> <nl> void setattributes ( <nl> const QVector < MTPDocumentAttribute > & attributes ) ; <nl> mmm a / Telegram / SourceFiles / data / data_document_good_thumbnail . cpp <nl> ppp b / Telegram / SourceFiles / data / data_document_good_thumbnail . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_file_origin . h " <nl> # include " media / clip / media_clip_reader . h " <nl> # include " lottie / lottie_animation . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Data { <nl> namespace { <nl> mmm a / Telegram / SourceFiles / data / data_folder . cpp <nl> ppp b / Telegram / SourceFiles / data / data_folder . cpp <nl> For license and copyright information please follow this link : <nl> # include " core / application . h " <nl> # include " main / main_account . h " <nl> / / # include " storage / storage_feed_messages . h " / / # feed <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " observer_peer . h " <nl> # include " apiwrap . h " <nl> # include " mainwidget . h " <nl> constexpr auto kLoadedChatsMinCount = 20 ; <nl> constexpr auto kShowChatNamesCount = 8 ; <nl> <nl> rpl : : producer < int > PinnedDialogsInFolderMaxValue ( <nl> - not_null < AuthSession * > session ) { <nl> + not_null < Main : : Session * > session ) { <nl> return rpl : : single ( <nl> rpl : : empty_value ( ) <nl> ) | rpl : : then ( <nl> mmm a / Telegram / SourceFiles / data / data_folder . h <nl> ppp b / Telegram / SourceFiles / data / data_folder . h <nl> For license and copyright information please follow this link : <nl> # include " data / data_messages . h " <nl> <nl> class ChannelData ; <nl> - class AuthSession ; <nl> + <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Data { <nl> <nl> mmm a / Telegram / SourceFiles / data / data_peer . cpp <nl> ppp b / Telegram / SourceFiles / data / data_peer . cpp <nl> For license and copyright information please follow this link : <nl> # include " observer_peer . h " <nl> # include " apiwrap . h " <nl> # include " boxes / confirm_box . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " core / application . h " <nl> # include " mainwindow . h " <nl> # include " window / window_session_controller . h " <nl> Data : : Session & PeerData : : owner ( ) const { <nl> return * _owner ; <nl> } <nl> <nl> - AuthSession & PeerData : : session ( ) const { <nl> + Main : : Session & PeerData : : session ( ) const { <nl> return _owner - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / data / data_peer . h <nl> ppp b / Telegram / SourceFiles / data / data_peer . h <nl> For license and copyright information please follow this link : <nl> # include " data / data_flags . h " <nl> # include " data / data_notify_settings . h " <nl> <nl> - namespace Ui { <nl> - class EmptyUserpic ; <nl> - } / / namespace Ui <nl> - <nl> - class AuthSession ; <nl> class PeerData ; <nl> class UserData ; <nl> class ChatData ; <nl> class ChannelData ; <nl> <nl> + namespace Ui { <nl> + class EmptyUserpic ; <nl> + } / / namespace Ui <nl> + <nl> namespace Main { <nl> class Account ; <nl> + class Session ; <nl> } / / namespace Main <nl> <nl> namespace Data { <nl> class PeerData { <nl> static constexpr auto kServiceNotificationsId = peerFromUser ( 777000 ) ; <nl> <nl> [ [ nodiscard ] ] Data : : Session & owner ( ) const ; <nl> - [ [ nodiscard ] ] AuthSession & session ( ) const ; <nl> + [ [ nodiscard ] ] Main : : Session & session ( ) const ; <nl> [ [ nodiscard ] ] Main : : Account & account ( ) const ; <nl> <nl> [ [ nodiscard ] ] bool isUser ( ) const { <nl> mmm a / Telegram / SourceFiles / data / data_photo . cpp <nl> ppp b / Telegram / SourceFiles / data / data_photo . cpp <nl> Data : : Session & PhotoData : : owner ( ) const { <nl> return * _owner ; <nl> } <nl> <nl> - AuthSession & PhotoData : : session ( ) const { <nl> + Main : : Session & PhotoData : : session ( ) const { <nl> return _owner - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / data / data_photo . h <nl> ppp b / Telegram / SourceFiles / data / data_photo . h <nl> For license and copyright information please follow this link : <nl> <nl> # include " data / data_types . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Data { <nl> class Session ; <nl> class PhotoData { <nl> PhotoData ( not_null < Data : : Session * > owner , PhotoId id ) ; <nl> <nl> [ [ nodiscard ] ] Data : : Session & owner ( ) const ; <nl> - [ [ nodiscard ] ] AuthSession & session ( ) const ; <nl> + [ [ nodiscard ] ] Main : : Session & session ( ) const ; <nl> <nl> void automaticLoad ( <nl> Data : : FileOrigin origin , <nl> mmm a / Telegram / SourceFiles / data / data_poll . cpp <nl> ppp b / Telegram / SourceFiles / data / data_poll . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_poll . h " <nl> <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace { <nl> <nl> mmm a / Telegram / SourceFiles / data / data_pts_waiter . cpp <nl> ppp b / Telegram / SourceFiles / data / data_pts_waiter . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_pts_waiter . h " <nl> <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> <nl> uint64 PtsWaiter : : ptsKey ( PtsSkippedQueue queue , int32 pts ) { <nl> mmm a / Telegram / SourceFiles / data / data_search_controller . cpp <nl> ppp b / Telegram / SourceFiles / data / data_search_controller . cpp <nl> For license and copyright information please follow this link : <nl> * / <nl> # include " data / data_search_controller . h " <nl> <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_session . h " <nl> # include " data / data_messages . h " <nl> # include " data / data_channel . h " <nl> mmm a / Telegram / SourceFiles / data / data_session . cpp <nl> ppp b / Telegram / SourceFiles / data / data_session . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_session . h " <nl> <nl> # include " observer_peer . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " mainwidget . h " <nl> # include " core / application . h " <nl> MTPPhotoSize FindDocumentThumbnail ( const MTPDdocument & data ) { <nl> } <nl> <nl> rpl : : producer < int > PinnedDialogsCountMaxValue ( <nl> - not_null < AuthSession * > session ) { <nl> + not_null < Main : : Session * > session ) { <nl> return rpl : : single ( <nl> rpl : : empty_value ( ) <nl> ) | rpl : : then ( <nl> bool PruneDestroyedAndSet ( <nl> <nl> } / / namespace <nl> <nl> - Session : : Session ( not_null < AuthSession * > session ) <nl> + Session : : Session ( not_null < Main : : Session * > session ) <nl> : _session ( session ) <nl> , _cache ( Core : : App ( ) . databases ( ) . get ( <nl> Local : : cachePath ( ) , <nl> mmm a / Telegram / SourceFiles / data / data_session . h <nl> ppp b / Telegram / SourceFiles / data / data_session . h <nl> class Element ; <nl> class ElementDelegate ; <nl> } / / namespace HistoryView <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Media { <nl> namespace Clip { <nl> class Session final { <nl> QString text ; <nl> } ; <nl> <nl> - explicit Session ( not_null < AuthSession * > session ) ; <nl> + explicit Session ( not_null < Main : : Session * > session ) ; <nl> ~ Session ( ) ; <nl> <nl> - [ [ nodiscard ] ] AuthSession & session ( ) const { <nl> + [ [ nodiscard ] ] Main : : Session & session ( ) const { <nl> return * _session ; <nl> } <nl> <nl> class Session final { <nl> <nl> void setWallpapers ( const QVector < MTPWallPaper > & data , int32 hash ) ; <nl> <nl> - not_null < AuthSession * > _session ; <nl> + not_null < Main : : Session * > _session ; <nl> <nl> Storage : : DatabasePointer _cache ; <nl> Storage : : DatabasePointer _bigFileCache ; <nl> mmm a / Telegram / SourceFiles / data / data_shared_media . cpp <nl> ppp b / Telegram / SourceFiles / data / data_shared_media . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_shared_media . h " <nl> <nl> # include < rpl / combine . h > <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " storage / storage_facade . h " <nl> # include " storage / storage_shared_media . h " <nl> mmm a / Telegram / SourceFiles / data / data_types . cpp <nl> ppp b / Telegram / SourceFiles / data / data_types . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / widgets / input_fields . h " <nl> # include " storage / cache / storage_cache_types . h " <nl> # include " base / openssl_help . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Data { <nl> namespace { <nl> mmm a / Telegram / SourceFiles / data / data_user_photos . cpp <nl> ppp b / Telegram / SourceFiles / data / data_user_photos . cpp <nl> For license and copyright information please follow this link : <nl> * / <nl> # include " data / data_user_photos . h " <nl> <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " data / data_session . h " <nl> # include " storage / storage_facade . h " <nl> mmm a / Telegram / SourceFiles / data / data_wall_paper . cpp <nl> ppp b / Telegram / SourceFiles / data / data_wall_paper . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_session . h " <nl> # include " storage / serialize_common . h " <nl> # include " core / application . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Data { <nl> namespace { <nl> mmm a / Telegram / SourceFiles / data / data_web_page . cpp <nl> ppp b / Telegram / SourceFiles / data / data_web_page . cpp <nl> For license and copyright information please follow this link : <nl> * / <nl> # include " data / data_web_page . h " <nl> <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " mainwidget . h " <nl> # include " data / data_session . h " <nl> mmm a / Telegram / SourceFiles / dialogs / dialogs_entry . cpp <nl> ppp b / Telegram / SourceFiles / dialogs / dialogs_entry . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_session . h " <nl> # include " data / data_folder . h " <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " history / history_item . h " <nl> # include " history / history . h " <nl> # include " styles / style_dialogs . h " / / st : : dialogsTextWidthMin <nl> Data : : Session & Entry : : owner ( ) const { <nl> return * _owner ; <nl> } <nl> <nl> - AuthSession & Entry : : session ( ) const { <nl> + Main : : Session & Entry : : session ( ) const { <nl> return _owner - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / dialogs / dialogs_entry . h <nl> ppp b / Telegram / SourceFiles / dialogs / dialogs_entry . h <nl> For license and copyright information please follow this link : <nl> <nl> # include " dialogs / dialogs_key . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Data { <nl> class Session ; <nl> class Entry { <nl> virtual ~ Entry ( ) = default ; <nl> <nl> Data : : Session & owner ( ) const ; <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> PositionChange adjustByPosInChatList ( Mode list ) ; <nl> bool inChatList ( Mode list = Mode : : All ) const { <nl> mmm a / Telegram / SourceFiles / dialogs / dialogs_indexed_list . cpp <nl> ppp b / Telegram / SourceFiles / dialogs / dialogs_indexed_list . cpp <nl> For license and copyright information please follow this link : <nl> * / <nl> # include " dialogs / dialogs_indexed_list . h " <nl> <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_session . h " <nl> # include " history / history . h " <nl> <nl> mmm a / Telegram / SourceFiles / dialogs / dialogs_inner_widget . cpp <nl> ppp b / Telegram / SourceFiles / dialogs / dialogs_inner_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " window / themes / window_theme . h " <nl> # include " observer_peer . h " <nl> # include " chat_helpers / stickers . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " window / notifications_manager . h " <nl> # include " window / window_session_controller . h " <nl> # include " window / window_peer_menu . h " <nl> InnerWidget : : InnerWidget ( <nl> setupShortcuts ( ) ; <nl> } <nl> <nl> - AuthSession & InnerWidget : : session ( ) const { <nl> + Main : : Session & InnerWidget : : session ( ) const { <nl> return _controller - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / dialogs / dialogs_inner_widget . h <nl> ppp b / Telegram / SourceFiles / dialogs / dialogs_inner_widget . h <nl> For license and copyright information please follow this link : <nl> # include " ui / rp_widget . h " <nl> # include " base / flags . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Ui { <nl> class IconButton ; <nl> public slots : <nl> NextOrOriginal , <nl> } ; <nl> <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> void dialogRowReplaced ( Row * oldRow , Row * newRow ) ; <nl> <nl> mmm a / Telegram / SourceFiles / dialogs / dialogs_search_from_controllers . cpp <nl> ppp b / Telegram / SourceFiles / dialogs / dialogs_search_from_controllers . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_chat . h " <nl> # include " data / data_user . h " <nl> # include " observer_peer . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> <nl> namespace Dialogs { <nl> mmm a / Telegram / SourceFiles / dialogs / dialogs_widget . cpp <nl> ppp b / Telegram / SourceFiles / dialogs / dialogs_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " lang / lang_keys . h " <nl> # include " mainwindow . h " <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " core / application . h " <nl> # include " core / event_filter . h " <nl> mmm a / Telegram / SourceFiles / dialogs / dialogs_widget . h <nl> ppp b / Telegram / SourceFiles / dialogs / dialogs_widget . h <nl> For license and copyright information please follow this link : <nl> # include " dialogs / dialogs_key . h " <nl> # include " ui / special_buttons . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace HistoryView { <nl> class TopBarWidget ; <nl> mmm a / Telegram / SourceFiles / export / view / export_view_panel_controller . cpp <nl> ppp b / Telegram / SourceFiles / export / view / export_view_panel_controller . cpp <nl> For license and copyright information please follow this link : <nl> # include " storage / localstorage . h " <nl> # include " core / file_utilities . h " <nl> # include " platform / platform_info . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_session . h " <nl> # include " base / unixtime . h " <nl> # include " styles / style_export . h " <nl> mmm a / Telegram / SourceFiles / export / view / export_view_settings . cpp <nl> ppp b / Telegram / SourceFiles / export / view / export_view_settings . cpp <nl> For license and copyright information please follow this link : <nl> # include " core / file_utilities . h " <nl> # include " boxes / calendar_box . h " <nl> # include " base / unixtime . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_widgets . h " <nl> # include " styles / style_export . h " <nl> # include " styles / style_boxes . h " <nl> mmm a / Telegram / SourceFiles / facades . cpp <nl> ppp b / Telegram / SourceFiles / facades . cpp <nl> For license and copyright information please follow this link : <nl> # include " mainwindow . h " <nl> # include " mainwidget . h " <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " boxes / confirm_box . h " <nl> # include " boxes / url_auth_box . h " <nl> # include " window / layer_widget . h " <nl> mmm a / Telegram / SourceFiles / history / admin_log / history_admin_log_inner . cpp <nl> ppp b / Telegram / SourceFiles / history / admin_log / history_admin_log_inner . cpp <nl> For license and copyright information please follow this link : <nl> # include " apiwrap . h " <nl> # include " layout . h " <nl> # include " window / window_session_controller . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " ui / widgets / popup_menu . h " <nl> # include " ui / image / image . h " <nl> # include " ui / text / text_utilities . h " <nl> InnerWidget : : InnerWidget ( <nl> requestAdmins ( ) ; <nl> } <nl> <nl> - AuthSession & InnerWidget : : session ( ) const { <nl> + Main : : Session & InnerWidget : : session ( ) const { <nl> return _controller - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / history / admin_log / history_admin_log_inner . h <nl> ppp b / Telegram / SourceFiles / history / admin_log / history_admin_log_inner . h <nl> For license and copyright information please follow this link : <nl> # include " mtproto / sender . h " <nl> # include " base / timer . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace HistoryView { <nl> class Element ; <nl> class InnerWidget final <nl> not_null < Window : : SessionController * > controller , <nl> not_null < ChannelData * > channel ) ; <nl> <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> base : : Observable < void > showSearchSignal ; <nl> base : : Observable < int > scrollToSignal ; <nl> mmm a / Telegram / SourceFiles / history / admin_log / history_admin_log_item . cpp <nl> ppp b / Telegram / SourceFiles / history / admin_log / history_admin_log_item . cpp <nl> For license and copyright information please follow this link : <nl> # include " base / unixtime . h " <nl> # include " core / application . h " <nl> # include " mainwindow . h " / / App : : wnd ( ) - > sessionController <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace AdminLog { <nl> namespace { <nl> mmm a / Telegram / SourceFiles / history / history . cpp <nl> ppp b / Telegram / SourceFiles / history / history . cpp <nl> For license and copyright information please follow this link : <nl> # include " mainwidget . h " <nl> # include " mainwindow . h " <nl> # include " observer_peer . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " window / notifications_manager . h " <nl> # include " calls / calls_instance . h " <nl> # include " storage / localstorage . h " <nl> mmm a / Telegram / SourceFiles / history / history . h <nl> ppp b / Telegram / SourceFiles / history / history . h <nl> class HistoryItem ; <nl> class HistoryMessage ; <nl> class HistoryService ; <nl> class HistoryMedia ; <nl> - class AuthSession ; <nl> + <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Data { <nl> struct Draft ; <nl> mmm a / Telegram / SourceFiles / history / history_inner_widget . cpp <nl> ppp b / Telegram / SourceFiles / history / history_inner_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " mainwindow . h " <nl> # include " mainwidget . h " <nl> # include " layout . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " core / application . h " <nl> # include " apiwrap . h " <nl> # include " platform / platform_info . h " <nl> HistoryInner : : HistoryInner ( <nl> } , lifetime ( ) ) ; <nl> } <nl> <nl> - AuthSession & HistoryInner : : session ( ) const { <nl> + Main : : Session & HistoryInner : : session ( ) const { <nl> return _controller - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / history / history_inner_widget . h <nl> ppp b / Telegram / SourceFiles / history / history_inner_widget . h <nl> class HistoryInner <nl> Ui : : ScrollArea * scroll , <nl> not_null < History * > history ) ; <nl> <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> void messagesReceived ( PeerData * peer , const QVector < MTPMessage > & messages ) ; <nl> void messagesReceivedDown ( PeerData * peer , const QVector < MTPMessage > & messages ) ; <nl> mmm a / Telegram / SourceFiles / history / history_item . cpp <nl> ppp b / Telegram / SourceFiles / history / history_item . cpp <nl> For license and copyright information please follow this link : <nl> # include " storage / storage_facade . h " <nl> # include " storage / storage_shared_media . h " <nl> / / # include " storage / storage_feed_messages . h " / / # feed <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " media / audio / media_audio . h " <nl> # include " core / application . h " <nl> mmm a / Telegram / SourceFiles / history / history_item_components . cpp <nl> ppp b / Telegram / SourceFiles / history / history_item_components . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_session . h " <nl> # include " data / data_user . h " <nl> # include " data / data_file_origin . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_widgets . h " <nl> # include " styles / style_history . h " <nl> # include " window / window_session_controller . h " <nl> mmm a / Telegram / SourceFiles / history / history_message . cpp <nl> ppp b / Telegram / SourceFiles / history / history_message . cpp <nl> For license and copyright information please follow this link : <nl> # include " history / history_service . h " <nl> # include " history / view / history_view_service_message . h " <nl> # include " history / view / history_view_context_menu . h " / / For CopyPostLink ( ) . <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " boxes / share_box . h " <nl> # include " boxes / confirm_box . h " <nl> # include " ui / toast / toast . h " <nl> mmm a / Telegram / SourceFiles / history / history_service . cpp <nl> ppp b / Telegram / SourceFiles / history / history_service . cpp <nl> For license and copyright information please follow this link : <nl> <nl> # include " lang / lang_keys . h " <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " layout . h " <nl> # include " history / history . h " <nl> mmm a / Telegram / SourceFiles / history / history_widget . cpp <nl> ppp b / Telegram / SourceFiles / history / history_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / widgets / popup_menu . h " <nl> # include " ui / text_options . h " <nl> # include " ui / unread_badge . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " window / themes / window_theme . h " <nl> # include " window / notifications_manager . h " <nl> # include " window / window_session_controller . h " <nl> mmm a / Telegram / SourceFiles / history / media / history_media_contact . cpp <nl> ppp b / Telegram / SourceFiles / history / media / history_media_contact . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_session . h " <nl> # include " data / data_user . h " <nl> # include " data / data_media_types . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_history . h " <nl> <nl> namespace { <nl> mmm a / Telegram / SourceFiles / history / media / history_media_poll . cpp <nl> ppp b / Telegram / SourceFiles / history / media / history_media_poll . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_poll . h " <nl> # include " data / data_session . h " <nl> # include " layout . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " styles / style_history . h " <nl> # include " styles / style_widgets . h " <nl> mmm a / Telegram / SourceFiles / history / view / history_view_contact_status . cpp <nl> ppp b / Telegram / SourceFiles / history / view / history_view_contact_status . cpp <nl> For license and copyright information please follow this link : <nl> # include " window / window_controller . h " <nl> # include " window / window_session_controller . h " <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " boxes / confirm_box . h " <nl> # include " boxes / generic_box . h " / / window - > show ( Box ( InitMethod ( ) ) ) <nl> # include " boxes / peers / edit_contact_box . h " <nl> mmm a / Telegram / SourceFiles / history / view / history_view_context_menu . cpp <nl> ppp b / Telegram / SourceFiles / history / view / history_view_context_menu . cpp <nl> For license and copyright information please follow this link : <nl> # include " core / application . h " <nl> # include " mainwidget . h " <nl> # include " mainwindow . h " / / App : : wnd ( ) - > sessionController <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> <nl> namespace HistoryView { <nl> mmm a / Telegram / SourceFiles / history / view / history_view_list_widget . cpp <nl> ppp b / Telegram / SourceFiles / history / view / history_view_list_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " layout . h " <nl> # include " window / window_session_controller . h " <nl> # include " window / window_peer_menu . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " ui / widgets / popup_menu . h " <nl> # include " ui / toast / toast . h " <nl> # include " lang / lang_keys . h " <nl> ListWidget : : ListWidget ( <nl> } ) ; <nl> } <nl> <nl> - AuthSession & ListWidget : : session ( ) const { <nl> + Main : : Session & ListWidget : : session ( ) const { <nl> return _controller - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / history / view / history_view_list_widget . h <nl> ppp b / Telegram / SourceFiles / history / view / history_view_list_widget . h <nl> For license and copyright information please follow this link : <nl> # include " data / data_messages . h " <nl> # include " history / view / history_view_element . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Ui { <nl> class PopupMenu ; <nl> class ListWidget final <nl> not_null < Window : : SessionController * > controller , <nl> not_null < ListDelegate * > delegate ) ; <nl> <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> not_null < ListDelegate * > delegate ( ) const ; <nl> <nl> mmm a / Telegram / SourceFiles / history / view / history_view_message . cpp <nl> ppp b / Telegram / SourceFiles / history / view / history_view_message . cpp <nl> For license and copyright information please follow this link : <nl> # include " lang / lang_keys . h " <nl> # include " mainwidget . h " <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " window / window_session_controller . h " <nl> # include " layout . h " <nl> # include " styles / style_widgets . h " <nl> mmm a / Telegram / SourceFiles / history / view / history_view_top_bar_widget . cpp <nl> ppp b / Telegram / SourceFiles / history / view / history_view_top_bar_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " storage / storage_shared_media . h " <nl> # include " mainwidget . h " <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " lang / lang_keys . h " <nl> # include " core / shortcuts . h " <nl> # include " ui / special_buttons . h " <nl> TopBarWidget : : TopBarWidget ( <nl> <nl> TopBarWidget : : ~ TopBarWidget ( ) = default ; <nl> <nl> - AuthSession & TopBarWidget : : session ( ) const { <nl> + Main : : Session & TopBarWidget : : session ( ) const { <nl> return _controller - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / history / view / history_view_top_bar_widget . h <nl> ppp b / Telegram / SourceFiles / history / view / history_view_top_bar_widget . h <nl> For license and copyright information please follow this link : <nl> # include " base / timer . h " <nl> # include " dialogs / dialogs_key . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Ui { <nl> class AbstractButton ; <nl> class TopBarWidget : public Ui : : RpWidget , private base : : Subscriber { <nl> not_null < Window : : SessionController * > controller ) ; <nl> ~ TopBarWidget ( ) ; <nl> <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> void updateControlsVisibility ( ) ; <nl> void finishAnimating ( ) ; <nl> mmm a / Telegram / SourceFiles / info / common_groups / info_common_groups_widget . cpp <nl> ppp b / Telegram / SourceFiles / info / common_groups / info_common_groups_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / widgets / scroll_area . h " <nl> # include " data / data_user . h " <nl> # include " data / data_session . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_info . h " <nl> <nl> namespace Info { <nl> mmm a / Telegram / SourceFiles / info / info_content_widget . cpp <nl> ppp b / Telegram / SourceFiles / info / info_content_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " info / info_controller . h " <nl> # include " boxes / peer_list_box . h " <nl> # include " data / data_session . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_info . h " <nl> # include " styles / style_profile . h " <nl> <nl> mmm a / Telegram / SourceFiles / info / info_controller . cpp <nl> ppp b / Telegram / SourceFiles / info / info_controller . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_channel . h " <nl> # include " data / data_chat . h " <nl> # include " data / data_session . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " window / window_session_controller . h " <nl> <nl> namespace Info { <nl> mmm a / Telegram / SourceFiles / info / info_layer_widget . cpp <nl> ppp b / Telegram / SourceFiles / info / info_layer_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " window / section_widget . h " <nl> # include " window / window_session_controller . h " <nl> # include " window / main_window . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_info . h " <nl> # include " styles / style_window . h " <nl> # include " styles / style_boxes . h " <nl> mmm a / Telegram / SourceFiles / info / info_memento . cpp <nl> ppp b / Telegram / SourceFiles / info / info_memento . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_channel . h " <nl> # include " data / data_chat . h " <nl> # include " data / data_session . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Info { <nl> <nl> mmm a / Telegram / SourceFiles / info / info_top_bar . cpp <nl> ppp b / Telegram / SourceFiles / info / info_top_bar . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / confirm_box . h " <nl> # include " boxes / peer_list_controllers . h " <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " ui / widgets / buttons . h " <nl> # include " ui / widgets / labels . h " <nl> # include " ui / widgets / input_fields . h " <nl> mmm a / Telegram / SourceFiles / info / info_wrap_widget . cpp <nl> ppp b / Telegram / SourceFiles / info / info_wrap_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " window / window_peer_menu . h " <nl> # include " boxes / peer_list_box . h " <nl> # include " boxes / confirm_box . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_session . h " <nl> # include " data / data_user . h " <nl> # include " mainwidget . h " <nl> mmm a / Telegram / SourceFiles / info / media / info_media_list_widget . cpp <nl> ppp b / Telegram / SourceFiles / info / media / info_media_list_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " storage / file_download . h " <nl> # include " ui / widgets / popup_menu . h " <nl> # include " lang / lang_keys . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " mainwidget . h " <nl> # include " window / main_window . h " <nl> # include " styles / style_overview . h " <nl> ListWidget : : ListWidget ( <nl> start ( ) ; <nl> } <nl> <nl> - AuthSession & ListWidget : : session ( ) const { <nl> + Main : : Session & ListWidget : : session ( ) const { <nl> return _controller - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / info / media / info_media_list_widget . h <nl> ppp b / Telegram / SourceFiles / info / media / info_media_list_widget . h <nl> For license and copyright information please follow this link : <nl> # include " data / data_shared_media . h " <nl> <nl> class DeleteMessagesBox ; <nl> - class AuthSession ; <nl> + <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace HistoryView { <nl> struct TextState ; <nl> class ListWidget : public Ui : : RpWidget { <nl> QWidget * parent , <nl> not_null < AbstractController * > controller ) ; <nl> <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> void restart ( ) ; <nl> <nl> mmm a / Telegram / SourceFiles / info / profile / info_profile_actions . cpp <nl> ppp b / Telegram / SourceFiles / info / profile / info_profile_actions . cpp <nl> For license and copyright information please follow this link : <nl> # include " window / window_peer_menu . h " <nl> # include " mainwidget . h " <nl> # include " mainwindow . h " / / MainWindow : : controller . <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " core / application . h " <nl> # include " apiwrap . h " <nl> # include " styles / style_info . h " <nl> mmm a / Telegram / SourceFiles / info / profile / info_profile_cover . cpp <nl> ppp b / Telegram / SourceFiles / info / profile / info_profile_cover . cpp <nl> For license and copyright information please follow this link : <nl> # include " window / window_session_controller . h " <nl> # include " observer_peer . h " <nl> # include " core / application . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> <nl> namespace Info { <nl> mmm a / Telegram / SourceFiles / info / profile / info_profile_inner_widget . cpp <nl> ppp b / Telegram / SourceFiles / info / profile / info_profile_inner_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / confirm_box . h " <nl> # include " boxes / report_box . h " <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " window / main_window . h " <nl> # include " window / window_session_controller . h " <nl> mmm a / Telegram / SourceFiles / info / profile / info_profile_members_controllers . cpp <nl> ppp b / Telegram / SourceFiles / info / profile / info_profile_members_controllers . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / widgets / popup_menu . h " <nl> # include " lang / lang_keys . h " <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " mainwidget . h " <nl> # include " observer_peer . h " <nl> # include " data / data_channel . h " <nl> mmm a / Telegram / SourceFiles / info / profile / info_profile_values . cpp <nl> ppp b / Telegram / SourceFiles / info / profile / info_profile_values . cpp <nl> For license and copyright information please follow this link : <nl> <nl> # include " observer_peer . h " <nl> # include " core / application . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " ui / wrap / slide_wrap . h " <nl> # include " ui / text / text_utilities . h " <nl> # include " lang / lang_keys . h " <nl> mmm a / Telegram / SourceFiles / inline_bots / inline_bot_layout_internal . cpp <nl> ppp b / Telegram / SourceFiles / inline_bots / inline_bot_layout_internal . cpp <nl> For license and copyright information please follow this link : <nl> # include " history / view / history_view_cursor_state . h " <nl> # include " storage / localstorage . h " <nl> # include " ui / image / image . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " lang / lang_keys . h " <nl> <nl> mmm a / Telegram / SourceFiles / inline_bots / inline_bot_result . cpp <nl> ppp b / Telegram / SourceFiles / inline_bots / inline_bot_result . cpp <nl> For license and copyright information please follow this link : <nl> # include " core / mime_type . h " <nl> # include " ui / image / image . h " <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace InlineBots { <nl> namespace { <nl> mmm a / Telegram / SourceFiles / inline_bots / inline_results_widget . cpp <nl> ppp b / Telegram / SourceFiles / inline_bots / inline_results_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " mainwindow . h " <nl> # include " apiwrap . h " <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " window / window_session_controller . h " <nl> # include " ui / widgets / scroll_area . h " <nl> # include " ui / widgets / labels . h " <nl> mmm a / Telegram / SourceFiles / intro / introwidget . cpp <nl> ppp b / Telegram / SourceFiles / intro / introwidget . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_user . h " <nl> # include " window / themes / window_theme . h " <nl> # include " lang / lang_cloud_manager . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_boxes . h " <nl> # include " styles / style_intro . h " <nl> # include " styles / style_window . h " <nl> mmm a / Telegram / SourceFiles / lang / lang_cloud_manager . cpp <nl> ppp b / Telegram / SourceFiles / lang / lang_cloud_manager . cpp <nl> For license and copyright information please follow this link : <nl> # include " storage / localstorage . h " <nl> # include " core / application . h " <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " boxes / confirm_box . h " <nl> # include " ui / wrap / padding_wrap . h " <nl> # include " ui / widgets / labels . h " <nl> void CloudManager : : setSuggestedLanguage ( const QString & langCode ) { <nl> _languageWasSuggested = true ; <nl> _firstLanguageSuggestion . notify ( ) ; <nl> <nl> - if ( AuthSession : : Exists ( ) <nl> + if ( Main : : Session : : Exists ( ) <nl> & & _langpack . id ( ) . isEmpty ( ) <nl> & & ! _suggestedLanguage . isEmpty ( ) ) { <nl> _offerSwitchToId = _suggestedLanguage ; <nl> bool CloudManager : : canApplyWithoutRestart ( const QString & id ) const { <nl> } <nl> <nl> / / We don ' t support instant language switch if the auth session exists : ( <nl> - return ! AuthSession : : Exists ( ) ; <nl> + return ! Main : : Session : : Exists ( ) ; <nl> } <nl> <nl> void CloudManager : : resetToDefault ( ) { <nl> mmm a / Telegram / SourceFiles / main / main_account . cpp <nl> ppp b / Telegram / SourceFiles / main / main_account . cpp <nl> For license and copyright information please follow this link : <nl> # include " media / audio / media_audio . h " <nl> # include " mainwidget . h " <nl> # include " observer_peer . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Main { <nl> <nl> void Account : : createSession ( const MTPUser & user ) { <nl> return true ; <nl> } ) ) ; <nl> <nl> - _session = std : : make_unique < AuthSession > ( this , user ) ; <nl> + _session = std : : make_unique < Session > ( this , user ) ; <nl> _sessionValue = _session . get ( ) ; <nl> } <nl> <nl> void Account : : destroySession ( ) { <nl> - _storedAuthSession . reset ( ) ; <nl> - _authSessionUserId = 0 ; <nl> - _authSessionUserSerialized = { } ; <nl> + _storedSettings . reset ( ) ; <nl> + _sessionUserId = 0 ; <nl> + _sessionUserSerialized = { } ; <nl> if ( ! sessionExists ( ) ) { <nl> return ; <nl> } <nl> bool Account : : sessionExists ( ) const { <nl> return ( _sessionValue . current ( ) ! = nullptr ) ; <nl> } <nl> <nl> - AuthSession & Account : : session ( ) { <nl> + Session & Account : : session ( ) { <nl> Expects ( sessionExists ( ) ) ; <nl> <nl> return * _sessionValue . current ( ) ; <nl> } <nl> <nl> - const AuthSession & Account : : session ( ) const { <nl> + const Session & Account : : session ( ) const { <nl> Expects ( sessionExists ( ) ) ; <nl> <nl> return * _sessionValue . current ( ) ; <nl> } <nl> <nl> - rpl : : producer < AuthSession * > Account : : sessionValue ( ) const { <nl> + rpl : : producer < Session * > Account : : sessionValue ( ) const { <nl> return _sessionValue . value ( ) ; <nl> } <nl> <nl> - rpl : : producer < AuthSession * > Account : : sessionChanges ( ) const { <nl> + rpl : : producer < Session * > Account : : sessionChanges ( ) const { <nl> return _sessionValue . changes ( ) ; <nl> } <nl> <nl> QByteArray Account : : serializeMtpAuthorization ( ) const { <nl> return serialize ( _mtpConfig . mainDcId , keys , keysToDestroy ) ; <nl> } <nl> <nl> - void Account : : setAuthSessionUserId ( UserId userId ) { <nl> + void Account : : setSessionUserId ( UserId userId ) { <nl> Expects ( ! sessionExists ( ) ) ; <nl> <nl> - _authSessionUserId = userId ; <nl> + _sessionUserId = userId ; <nl> } <nl> <nl> - void Account : : setAuthSessionFromStorage ( <nl> - std : : unique_ptr < AuthSessionSettings > data , <nl> + void Account : : setSessionFromStorage ( <nl> + std : : unique_ptr < Settings > data , <nl> QByteArray & & selfSerialized , <nl> int32 selfStreamVersion ) { <nl> Expects ( ! sessionExists ( ) ) ; <nl> <nl> - DEBUG_LOG ( ( " authSessionUserSerialized set : % 1 " <nl> + DEBUG_LOG ( ( " sessionUserSerialized set : % 1 " <nl> ) . arg ( selfSerialized . size ( ) ) ) ; <nl> <nl> - _storedAuthSession = std : : move ( data ) ; <nl> - _authSessionUserSerialized = std : : move ( selfSerialized ) ; <nl> - _authSessionUserStreamVersion = selfStreamVersion ; <nl> + _storedSettings = std : : move ( data ) ; <nl> + _sessionUserSerialized = std : : move ( selfSerialized ) ; <nl> + _sessionUserStreamVersion = selfStreamVersion ; <nl> } <nl> <nl> - AuthSessionSettings * Account : : getAuthSessionSettings ( ) { <nl> - if ( _authSessionUserId ) { <nl> - return _storedAuthSession ? _storedAuthSession . get ( ) : nullptr ; <nl> + Settings * Account : : getSessionSettings ( ) { <nl> + if ( _sessionUserId ) { <nl> + return _storedSettings ? _storedSettings . get ( ) : nullptr ; <nl> } else if ( sessionExists ( ) ) { <nl> return & session ( ) . settings ( ) ; <nl> } <nl> void Account : : setMtpAuthorization ( const QByteArray & serialized ) { <nl> return ; <nl> } <nl> <nl> - setAuthSessionUserId ( userId ) ; <nl> + setSessionUserId ( userId ) ; <nl> _mtpConfig . mainDcId = mainDcId ; <nl> <nl> const auto readKeys = [ & stream ] ( auto & keys ) { <nl> void Account : : startMtp ( ) { <nl> destroyMtpKeys ( base : : take ( _mtpKeysToDestroy ) ) ; <nl> } <nl> <nl> - if ( _authSessionUserId ) { <nl> - DEBUG_LOG ( ( " authSessionUserSerialized . size : % 1 " <nl> - ) . arg ( _authSessionUserSerialized . size ( ) ) ) ; <nl> - QDataStream peekStream ( _authSessionUserSerialized ) ; <nl> + if ( _sessionUserId ) { <nl> + DEBUG_LOG ( ( " sessionUserSerialized . size : % 1 " <nl> + ) . arg ( _sessionUserSerialized . size ( ) ) ) ; <nl> + QDataStream peekStream ( _sessionUserSerialized ) ; <nl> const auto phone = Serialize : : peekUserPhone ( <nl> - _authSessionUserStreamVersion , <nl> + _sessionUserStreamVersion , <nl> peekStream ) ; <nl> const auto flags = MTPDuser : : Flag : : f_self | ( phone . isEmpty ( ) <nl> ? MTPDuser : : Flag ( ) <nl> : MTPDuser : : Flag : : f_phone ) ; <nl> createSession ( MTP_user ( <nl> MTP_flags ( flags ) , <nl> - MTP_int ( base : : take ( _authSessionUserId ) ) , <nl> + MTP_int ( base : : take ( _sessionUserId ) ) , <nl> MTPlong ( ) , / / access_hash <nl> MTPstring ( ) , / / first_name <nl> MTPstring ( ) , / / last_name <nl> void Account : : startMtp ( ) { <nl> MTPstring ( ) , / / bot_inline_placeholder <nl> MTPstring ( ) ) ) ; / / lang_code <nl> Local : : readSelf ( <nl> - base : : take ( _authSessionUserSerialized ) , <nl> - base : : take ( _authSessionUserStreamVersion ) ) ; <nl> + base : : take ( _sessionUserSerialized ) , <nl> + base : : take ( _sessionUserStreamVersion ) ) ; <nl> } <nl> - if ( _storedAuthSession ) { <nl> + if ( _storedSettings ) { <nl> if ( sessionExists ( ) ) { <nl> - session ( ) . moveSettingsFrom ( std : : move ( * _storedAuthSession ) ) ; <nl> + session ( ) . moveSettingsFrom ( std : : move ( * _storedSettings ) ) ; <nl> } <nl> - _storedAuthSession . reset ( ) ; <nl> + _storedSettings . reset ( ) ; <nl> } <nl> <nl> if ( sessionExists ( ) ) { <nl> mmm a / Telegram / SourceFiles / main / main_account . h <nl> ppp b / Telegram / SourceFiles / main / main_account . h <nl> For license and copyright information please follow this link : <nl> # include " mtproto / auth_key . h " <nl> # include " base / weak_ptr . h " <nl> <nl> - class AuthSession ; <nl> - class AuthSessionSettings ; <nl> - <nl> namespace Main { <nl> <nl> + class Session ; <nl> + class Settings ; <nl> + <nl> class Account final : public base : : has_weak_ptr { <nl> public : <nl> explicit Account ( const QString & dataName ) ; <nl> class Account final : public base : : has_weak_ptr { <nl> void forcedLogOut ( ) ; <nl> <nl> [ [ nodiscard ] ] bool sessionExists ( ) const ; <nl> - [ [ nodiscard ] ] AuthSession & session ( ) ; <nl> - [ [ nodiscard ] ] const AuthSession & session ( ) const ; <nl> - [ [ nodiscard ] ] rpl : : producer < AuthSession * > sessionValue ( ) const ; <nl> - [ [ nodiscard ] ] rpl : : producer < AuthSession * > sessionChanges ( ) const ; <nl> + [ [ nodiscard ] ] Session & session ( ) ; <nl> + [ [ nodiscard ] ] const Session & session ( ) const ; <nl> + [ [ nodiscard ] ] rpl : : producer < Session * > sessionValue ( ) const ; <nl> + [ [ nodiscard ] ] rpl : : producer < Session * > sessionChanges ( ) const ; <nl> <nl> [ [ nodiscard ] ] MTP : : Instance * mtp ( ) { <nl> return _mtp . get ( ) ; <nl> class Account final : public base : : has_weak_ptr { <nl> / / Set from legacy storage . <nl> void setMtpMainDcId ( MTP : : DcId mainDcId ) ; <nl> void setMtpKey ( MTP : : DcId dcId , const MTP : : AuthKey : : Data & keyData ) ; <nl> - void setAuthSessionUserId ( UserId userId ) ; <nl> - void setAuthSessionFromStorage ( <nl> - std : : unique_ptr < AuthSessionSettings > data , <nl> + void setSessionUserId ( UserId userId ) ; <nl> + void setSessionFromStorage ( <nl> + std : : unique_ptr < Settings > data , <nl> QByteArray & & selfSerialized , <nl> int32 selfStreamVersion ) ; <nl> - [ [ nodiscard ] ] AuthSessionSettings * getAuthSessionSettings ( ) ; <nl> + [ [ nodiscard ] ] Settings * getSessionSettings ( ) ; <nl> <nl> / / Serialization . <nl> [ [ nodiscard ] ] QByteArray serializeMtpAuthorization ( ) const ; <nl> class Account final : public base : : has_weak_ptr { <nl> std : : unique_ptr < MTP : : Instance > _mtpForKeysDestroy ; <nl> rpl : : event_stream < > _configUpdates ; <nl> <nl> - std : : unique_ptr < AuthSession > _session ; <nl> - rpl : : variable < AuthSession * > _sessionValue ; <nl> + std : : unique_ptr < Session > _session ; <nl> + rpl : : variable < Session * > _sessionValue ; <nl> <nl> - UserId _authSessionUserId = 0 ; <nl> - QByteArray _authSessionUserSerialized ; <nl> - int32 _authSessionUserStreamVersion = 0 ; <nl> - std : : unique_ptr < AuthSessionSettings > _storedAuthSession ; <nl> + UserId _sessionUserId = 0 ; <nl> + QByteArray _sessionUserSerialized ; <nl> + int32 _sessionUserStreamVersion = 0 ; <nl> + std : : unique_ptr < Settings > _storedSettings ; <nl> MTP : : Instance : : Config _mtpConfig ; <nl> MTP : : AuthKeysList _mtpKeysToDestroy ; <nl> <nl> similarity index 84 % <nl> rename from Telegram / SourceFiles / auth_session . cpp <nl> rename to Telegram / SourceFiles / main / main_session . cpp <nl> mmm a / Telegram / SourceFiles / auth_session . cpp <nl> ppp b / Telegram / SourceFiles / main / main_session . cpp <nl> the official desktop application for the Telegram messaging service . <nl> For license and copyright information please follow this link : <nl> https : / / github . com / telegramdesktop / tdesktop / blob / master / LEGAL <nl> * / <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> # include " apiwrap . h " <nl> # include " core / application . h " <nl> For license and copyright information please follow this link : <nl> # include " support / support_helper . h " <nl> # include " observer_peer . h " <nl> <nl> + namespace Main { <nl> namespace { <nl> <nl> constexpr auto kAutoLockTimeoutLateMs = crl : : time ( 3000 ) ; <nl> constexpr auto kLegacyCallsPeerToPeerNobody = 4 ; <nl> <nl> } / / namespace <nl> <nl> - AuthSessionSettings : : Variables : : Variables ( ) <nl> + Settings : : Variables : : Variables ( ) <nl> : sendFilesWay ( SendFilesWay : : Album ) <nl> , selectorTab ( ChatHelpers : : SelectorTab : : Emoji ) <nl> , floatPlayerColumn ( Window : : Column : : Second ) <nl> AuthSessionSettings : : Variables : : Variables ( ) <nl> , supportSwitch ( Support : : SwitchSettings : : Next ) { <nl> } <nl> <nl> - QByteArray AuthSessionSettings : : serialize ( ) const { <nl> + QByteArray Settings : : serialize ( ) const { <nl> const auto autoDownload = _variables . autoDownload . serialize ( ) ; <nl> auto size = sizeof ( qint32 ) * 23 ; <nl> for ( auto i = _variables . soundOverrides . cbegin ( ) , e = _variables . soundOverrides . cend ( ) ; i ! = e ; + + i ) { <nl> QByteArray AuthSessionSettings : : serialize ( ) const { <nl> return result ; <nl> } <nl> <nl> - void AuthSessionSettings : : constructFromSerialized ( const QByteArray & serialized ) { <nl> + void Settings : : constructFromSerialized ( const QByteArray & serialized ) { <nl> if ( serialized . isEmpty ( ) ) { <nl> return ; <nl> } <nl> void AuthSessionSettings : : constructFromSerialized ( const QByteArray & serialized ) <nl> } <nl> if ( stream . status ( ) ! = QDataStream : : Ok ) { <nl> LOG ( ( " App Error : " <nl> - " Bad data for AuthSessionSettings : : constructFromSerialized ( ) " ) ) ; <nl> + " Bad data for Settings : : constructFromSerialized ( ) " ) ) ; <nl> return ; <nl> } <nl> if ( ! autoDownload . isEmpty ( ) <nl> void AuthSessionSettings : : constructFromSerialized ( const QByteArray & serialized ) <nl> _variables . skipArchiveInSearch = ( skipArchiveInSearch = = 1 ) ; <nl> } <nl> <nl> - void AuthSessionSettings : : setSupportChatsTimeSlice ( int slice ) { <nl> + void Settings : : setSupportChatsTimeSlice ( int slice ) { <nl> _variables . supportChatsTimeSlice = slice ; <nl> } <nl> <nl> - int AuthSessionSettings : : supportChatsTimeSlice ( ) const { <nl> + int Settings : : supportChatsTimeSlice ( ) const { <nl> return _variables . supportChatsTimeSlice . current ( ) ; <nl> } <nl> <nl> - rpl : : producer < int > AuthSessionSettings : : supportChatsTimeSliceValue ( ) const { <nl> + rpl : : producer < int > Settings : : supportChatsTimeSliceValue ( ) const { <nl> return _variables . supportChatsTimeSlice . value ( ) ; <nl> } <nl> <nl> - void AuthSessionSettings : : setSupportAllSearchResults ( bool all ) { <nl> + void Settings : : setSupportAllSearchResults ( bool all ) { <nl> _variables . supportAllSearchResults = all ; <nl> } <nl> <nl> - bool AuthSessionSettings : : supportAllSearchResults ( ) const { <nl> + bool Settings : : supportAllSearchResults ( ) const { <nl> return _variables . supportAllSearchResults . current ( ) ; <nl> } <nl> <nl> - rpl : : producer < bool > AuthSessionSettings : : supportAllSearchResultsValue ( ) const { <nl> + rpl : : producer < bool > Settings : : supportAllSearchResultsValue ( ) const { <nl> return _variables . supportAllSearchResults . value ( ) ; <nl> } <nl> <nl> - void AuthSessionSettings : : setTabbedSelectorSectionEnabled ( bool enabled ) { <nl> + void Settings : : setTabbedSelectorSectionEnabled ( bool enabled ) { <nl> _variables . tabbedSelectorSectionEnabled = enabled ; <nl> if ( enabled ) { <nl> setThirdSectionInfoEnabled ( false ) ; <nl> void AuthSessionSettings : : setTabbedSelectorSectionEnabled ( bool enabled ) { <nl> setTabbedReplacedWithInfo ( false ) ; <nl> } <nl> <nl> - rpl : : producer < bool > AuthSessionSettings : : tabbedReplacedWithInfoValue ( ) const { <nl> + rpl : : producer < bool > Settings : : tabbedReplacedWithInfoValue ( ) const { <nl> return _tabbedReplacedWithInfoValue . events_starting_with ( <nl> tabbedReplacedWithInfo ( ) ) ; <nl> } <nl> <nl> - void AuthSessionSettings : : setThirdSectionInfoEnabled ( bool enabled ) { <nl> + void Settings : : setThirdSectionInfoEnabled ( bool enabled ) { <nl> if ( _variables . thirdSectionInfoEnabled ! = enabled ) { <nl> _variables . thirdSectionInfoEnabled = enabled ; <nl> if ( enabled ) { <nl> void AuthSessionSettings : : setThirdSectionInfoEnabled ( bool enabled ) { <nl> } <nl> } <nl> <nl> - rpl : : producer < bool > AuthSessionSettings : : thirdSectionInfoEnabledValue ( ) const { <nl> + rpl : : producer < bool > Settings : : thirdSectionInfoEnabledValue ( ) const { <nl> return _thirdSectionInfoEnabledValue . events_starting_with ( <nl> thirdSectionInfoEnabled ( ) ) ; <nl> } <nl> <nl> - void AuthSessionSettings : : setTabbedReplacedWithInfo ( bool enabled ) { <nl> + void Settings : : setTabbedReplacedWithInfo ( bool enabled ) { <nl> if ( _tabbedReplacedWithInfo ! = enabled ) { <nl> _tabbedReplacedWithInfo = enabled ; <nl> _tabbedReplacedWithInfoValue . fire_copy ( enabled ) ; <nl> } <nl> } <nl> <nl> - QString AuthSessionSettings : : getSoundPath ( const QString & key ) const { <nl> + QString Settings : : getSoundPath ( const QString & key ) const { <nl> auto it = _variables . soundOverrides . constFind ( key ) ; <nl> if ( it ! = _variables . soundOverrides . end ( ) ) { <nl> return it . value ( ) ; <nl> QString AuthSessionSettings : : getSoundPath ( const QString & key ) const { <nl> return qsl ( " : / sounds / " ) + key + qsl ( " . mp3 " ) ; <nl> } <nl> <nl> - void AuthSessionSettings : : setDialogsWidthRatio ( float64 ratio ) { <nl> + void Settings : : setDialogsWidthRatio ( float64 ratio ) { <nl> _variables . dialogsWidthRatio = ratio ; <nl> } <nl> <nl> - float64 AuthSessionSettings : : dialogsWidthRatio ( ) const { <nl> + float64 Settings : : dialogsWidthRatio ( ) const { <nl> return _variables . dialogsWidthRatio . current ( ) ; <nl> } <nl> <nl> - rpl : : producer < float64 > AuthSessionSettings : : dialogsWidthRatioChanges ( ) const { <nl> + rpl : : producer < float64 > Settings : : dialogsWidthRatioChanges ( ) const { <nl> return _variables . dialogsWidthRatio . changes ( ) ; <nl> } <nl> <nl> - void AuthSessionSettings : : setThirdColumnWidth ( int width ) { <nl> + void Settings : : setThirdColumnWidth ( int width ) { <nl> _variables . thirdColumnWidth = width ; <nl> } <nl> <nl> - int AuthSessionSettings : : thirdColumnWidth ( ) const { <nl> + int Settings : : thirdColumnWidth ( ) const { <nl> return _variables . thirdColumnWidth . current ( ) ; <nl> } <nl> <nl> - rpl : : producer < int > AuthSessionSettings : : thirdColumnWidthChanges ( ) const { <nl> + rpl : : producer < int > Settings : : thirdColumnWidthChanges ( ) const { <nl> return _variables . thirdColumnWidth . changes ( ) ; <nl> } <nl> <nl> - void AuthSessionSettings : : setArchiveCollapsed ( bool collapsed ) { <nl> + void Settings : : setArchiveCollapsed ( bool collapsed ) { <nl> _variables . archiveCollapsed = collapsed ; <nl> } <nl> <nl> - bool AuthSessionSettings : : archiveCollapsed ( ) const { <nl> + bool Settings : : archiveCollapsed ( ) const { <nl> return _variables . archiveCollapsed . current ( ) ; <nl> } <nl> <nl> - rpl : : producer < bool > AuthSessionSettings : : archiveCollapsedChanges ( ) const { <nl> + rpl : : producer < bool > Settings : : archiveCollapsedChanges ( ) const { <nl> return _variables . archiveCollapsed . changes ( ) ; <nl> } <nl> <nl> - void AuthSessionSettings : : setArchiveInMainMenu ( bool inMainMenu ) { <nl> + void Settings : : setArchiveInMainMenu ( bool inMainMenu ) { <nl> _variables . archiveInMainMenu = inMainMenu ; <nl> } <nl> <nl> - bool AuthSessionSettings : : archiveInMainMenu ( ) const { <nl> + bool Settings : : archiveInMainMenu ( ) const { <nl> return _variables . archiveInMainMenu . current ( ) ; <nl> } <nl> <nl> - rpl : : producer < bool > AuthSessionSettings : : archiveInMainMenuChanges ( ) const { <nl> + rpl : : producer < bool > Settings : : archiveInMainMenuChanges ( ) const { <nl> return _variables . archiveInMainMenu . changes ( ) ; <nl> } <nl> <nl> - void AuthSessionSettings : : setNotifyAboutPinned ( bool notify ) { <nl> + void Settings : : setNotifyAboutPinned ( bool notify ) { <nl> _variables . notifyAboutPinned = notify ; <nl> } <nl> <nl> - bool AuthSessionSettings : : notifyAboutPinned ( ) const { <nl> + bool Settings : : notifyAboutPinned ( ) const { <nl> return _variables . notifyAboutPinned . current ( ) ; <nl> } <nl> <nl> - rpl : : producer < bool > AuthSessionSettings : : notifyAboutPinnedChanges ( ) const { <nl> + rpl : : producer < bool > Settings : : notifyAboutPinnedChanges ( ) const { <nl> return _variables . notifyAboutPinned . changes ( ) ; <nl> } <nl> <nl> - void AuthSessionSettings : : setSkipArchiveInSearch ( bool skip ) { <nl> + void Settings : : setSkipArchiveInSearch ( bool skip ) { <nl> _variables . skipArchiveInSearch = skip ; <nl> } <nl> <nl> - bool AuthSessionSettings : : skipArchiveInSearch ( ) const { <nl> + bool Settings : : skipArchiveInSearch ( ) const { <nl> return _variables . skipArchiveInSearch . current ( ) ; <nl> } <nl> <nl> - rpl : : producer < bool > AuthSessionSettings : : skipArchiveInSearchChanges ( ) const { <nl> + rpl : : producer < bool > Settings : : skipArchiveInSearchChanges ( ) const { <nl> return _variables . skipArchiveInSearch . changes ( ) ; <nl> } <nl> <nl> - AuthSession & Auth ( ) { <nl> - return Core : : App ( ) . activeAccount ( ) . session ( ) ; <nl> - } <nl> - <nl> - AuthSession : : AuthSession ( <nl> + Session : : Session ( <nl> not_null < Main : : Account * > account , <nl> const MTPUser & user ) <nl> : _account ( account ) <nl> AuthSession : : AuthSession ( <nl> Window : : Theme : : Background ( ) - > start ( ) ; <nl> } <nl> <nl> - AuthSession : : ~ AuthSession ( ) { <nl> + Session : : ~ Session ( ) { <nl> ClickHandler : : clearActive ( ) ; <nl> ClickHandler : : unpressed ( ) ; <nl> } <nl> <nl> - Main : : Account & AuthSession : : account ( ) const { <nl> + Main : : Account & Session : : account ( ) const { <nl> return * _account ; <nl> } <nl> <nl> - bool AuthSession : : Exists ( ) { <nl> + bool Session : : Exists ( ) { <nl> return Core : : IsAppLaunched ( ) <nl> & & Core : : App ( ) . activeAccount ( ) . sessionExists ( ) ; <nl> } <nl> <nl> - base : : Observable < void > & AuthSession : : downloaderTaskFinished ( ) { <nl> + base : : Observable < void > & Session : : downloaderTaskFinished ( ) { <nl> return downloader ( ) . taskFinished ( ) ; <nl> } <nl> <nl> - UserId AuthSession : : userId ( ) const { <nl> + UserId Session : : userId ( ) const { <nl> return _user - > bareId ( ) ; <nl> } <nl> <nl> - PeerId AuthSession : : userPeerId ( ) const { <nl> + PeerId Session : : userPeerId ( ) const { <nl> return _user - > id ; <nl> } <nl> <nl> - bool AuthSession : : validateSelf ( const MTPUser & user ) { <nl> + bool Session : : validateSelf ( const MTPUser & user ) { <nl> if ( user . type ( ) ! = mtpc_user | | ! user . c_user ( ) . is_self ( ) ) { <nl> LOG ( ( " API Error : bad self user received . " ) ) ; <nl> return false ; <nl> bool AuthSession : : validateSelf ( const MTPUser & user ) { <nl> return true ; <nl> } <nl> <nl> - void AuthSession : : moveSettingsFrom ( AuthSessionSettings & & other ) { <nl> + void Session : : moveSettingsFrom ( Settings & & other ) { <nl> _settings . moveFrom ( std : : move ( other ) ) ; <nl> if ( _settings . hadLegacyCallsPeerToPeerNobody ( ) ) { <nl> api ( ) . savePrivacy ( <nl> void AuthSession : : moveSettingsFrom ( AuthSessionSettings & & other ) { <nl> } <nl> } <nl> <nl> - void AuthSession : : saveSettingsDelayed ( crl : : time delay ) { <nl> + void Session : : saveSettingsDelayed ( crl : : time delay ) { <nl> Expects ( this = = & Auth ( ) ) ; <nl> <nl> _saveDataTimer . callOnce ( delay ) ; <nl> } <nl> <nl> - not_null < MTP : : Instance * > AuthSession : : mtp ( ) { <nl> + not_null < MTP : : Instance * > Session : : mtp ( ) { <nl> return _account - > mtp ( ) ; <nl> } <nl> <nl> - void AuthSession : : localPasscodeChanged ( ) { <nl> + void Session : : localPasscodeChanged ( ) { <nl> _shouldLockAt = 0 ; <nl> _autoLockTimer . cancel ( ) ; <nl> checkAutoLock ( ) ; <nl> } <nl> <nl> - void AuthSession : : termsDeleteNow ( ) { <nl> + void Session : : termsDeleteNow ( ) { <nl> api ( ) . request ( MTPaccount_DeleteAccount ( <nl> MTP_string ( " Decline ToS update " ) <nl> ) ) . send ( ) ; <nl> } <nl> <nl> - void AuthSession : : checkAutoLock ( ) { <nl> + void Session : : checkAutoLock ( ) { <nl> if ( ! Global : : LocalPasscode ( ) <nl> | | Core : : App ( ) . passcodeLocked ( ) ) { <nl> _shouldLockAt = 0 ; <nl> void AuthSession : : checkAutoLock ( ) { <nl> } <nl> } <nl> <nl> - void AuthSession : : checkAutoLockIn ( crl : : time time ) { <nl> + void Session : : checkAutoLockIn ( crl : : time time ) { <nl> if ( _autoLockTimer . isActive ( ) ) { <nl> auto remain = _autoLockTimer . remainingTime ( ) ; <nl> if ( remain > 0 & & remain < = time ) return ; <nl> void AuthSession : : checkAutoLockIn ( crl : : time time ) { <nl> _autoLockTimer . callOnce ( time ) ; <nl> } <nl> <nl> - bool AuthSession : : supportMode ( ) const { <nl> + bool Session : : supportMode ( ) const { <nl> return ( _supportHelper ! = nullptr ) ; <nl> } <nl> <nl> - Support : : Helper & AuthSession : : supportHelper ( ) const { <nl> + Support : : Helper & Session : : supportHelper ( ) const { <nl> Expects ( supportMode ( ) ) ; <nl> <nl> return * _supportHelper ; <nl> } <nl> <nl> - Support : : Templates & AuthSession : : supportTemplates ( ) const { <nl> + Support : : Templates & Session : : supportTemplates ( ) const { <nl> return supportHelper ( ) . templates ( ) ; <nl> } <nl> + <nl> + } / / namespace Main <nl> + <nl> + Main : : Session & Auth ( ) { <nl> + return Core : : App ( ) . activeAccount ( ) . session ( ) ; <nl> + } <nl> similarity index 95 % <nl> rename from Telegram / SourceFiles / auth_session . h <nl> rename to Telegram / SourceFiles / main / main_session . h <nl> mmm a / Telegram / SourceFiles / auth_session . h <nl> ppp b / Telegram / SourceFiles / main / main_session . h <nl> For license and copyright information please follow this link : <nl> class ApiWrap ; <nl> enum class SendFilesWay ; <nl> <nl> - namespace Main { <nl> - class Account ; <nl> - } / / namespace Main <nl> - <nl> namespace Ui { <nl> enum class InputSubmitSettings ; <nl> } / / namespace Ui <nl> namespace Core { <nl> class Changelogs ; <nl> } / / namespace Core <nl> <nl> - class AuthSessionSettings final { <nl> + namespace Main { <nl> + <nl> + class Account ; <nl> + <nl> + class Settings final { <nl> public : <nl> - void moveFrom ( AuthSessionSettings & & other ) { <nl> + void moveFrom ( Settings & & other ) { <nl> _variables = std : : move ( other . _variables ) ; <nl> } <nl> QByteArray serialize ( ) const ; <nl> class AuthSessionSettings final { <nl> <nl> } ; <nl> <nl> - class AuthSession ; <nl> - AuthSession & Auth ( ) ; <nl> - <nl> - class AuthSession final <nl> + class Session final <nl> : public base : : has_weak_ptr <nl> , private base : : Subscriber { <nl> public : <nl> - AuthSession ( not_null < Main : : Account * > account , const MTPUser & user ) ; <nl> - ~ AuthSession ( ) ; <nl> + Session ( not_null < Main : : Account * > account , const MTPUser & user ) ; <nl> + ~ Session ( ) ; <nl> <nl> - AuthSession ( const AuthSession & other ) = delete ; <nl> - AuthSession & operator = ( const AuthSession & other ) = delete ; <nl> + Session ( const Session & other ) = delete ; <nl> + Session & operator = ( const Session & other ) = delete ; <nl> <nl> static bool Exists ( ) ; <nl> <nl> class AuthSession final <nl> Data : : Session & data ( ) { <nl> return * _data ; <nl> } <nl> - AuthSessionSettings & settings ( ) { <nl> + Settings & settings ( ) { <nl> return _settings ; <nl> } <nl> - void moveSettingsFrom ( AuthSessionSettings & & other ) ; <nl> + void moveSettingsFrom ( Settings & & other ) ; <nl> void saveSettingsDelayed ( crl : : time delay = kDefaultSaveDelay ) ; <nl> <nl> not_null < MTP : : Instance * > mtp ( ) ; <nl> class AuthSession final <nl> <nl> const not_null < Main : : Account * > _account ; <nl> <nl> - AuthSessionSettings _settings ; <nl> + Settings _settings ; <nl> base : : Timer _saveDataTimer ; <nl> <nl> crl : : time _shouldLockAt = 0 ; <nl> class AuthSession final <nl> rpl : : lifetime _lifetime ; <nl> <nl> } ; <nl> + <nl> + } / / namespace Main <nl> + <nl> + Main : : Session & Auth ( ) ; <nl> mmm a / Telegram / SourceFiles / mainwidget . cpp <nl> ppp b / Telegram / SourceFiles / mainwidget . cpp <nl> For license and copyright information please follow this link : <nl> # include " export / export_settings . h " <nl> # include " export / view / export_view_top_bar . h " <nl> # include " export / view / export_view_panel_controller . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " support / support_helper . h " <nl> # include " storage / storage_facade . h " <nl> # include " storage / storage_shared_media . h " <nl> bool HasForceLogoutNotification ( const MTPUpdates & updates ) { <nl> } <nl> <nl> bool ForwardedInfoDataLoaded ( <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> const MTPMessageFwdHeader & header ) { <nl> return header . match ( [ & ] ( const MTPDmessageFwdHeader & data ) { <nl> if ( const auto channelId = data . vchannel_id ( ) ) { <nl> bool ForwardedInfoDataLoaded ( <nl> } <nl> <nl> bool MentionUsersLoaded ( <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> const MTPVector < MTPMessageEntity > & entities ) { <nl> for ( const auto & entity : entities . v ) { <nl> auto type = entity . type ( ) ; <nl> bool MentionUsersLoaded ( <nl> } <nl> <nl> DataIsLoadedResult AllDataLoadedForMessage ( <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> const MTPMessage & message ) { <nl> return message . match ( [ & ] ( const MTPDmessage & message ) { <nl> if ( const auto fromId = message . vfrom_id ( ) ) { <nl> MainWidget : : MainWidget ( <nl> } <nl> } <nl> <nl> - AuthSession & MainWidget : : session ( ) const { <nl> + Main : : Session & MainWidget : : session ( ) const { <nl> return _controller - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / mainwidget . h <nl> ppp b / Telegram / SourceFiles / mainwidget . h <nl> For license and copyright information please follow this link : <nl> # include " media / player / media_player_float . h " <nl> # include " data / data_pts_waiter . h " <nl> <nl> - class AuthSession ; <nl> struct HistoryMessageMarkupButton ; <nl> class MainWindow ; <nl> class ConfirmBox ; <nl> class HistoryWidget ; <nl> class StackItem ; <nl> struct FileLoadResult ; <nl> <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> + <nl> namespace Notify { <nl> struct PeerUpdate ; <nl> } / / namespace Notify <nl> class MainWidget <nl> <nl> MainWidget ( QWidget * parent , not_null < Window : : SessionController * > controller ) ; <nl> <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> bool isMainSectionShown ( ) const ; <nl> bool isThirdSectionShown ( ) const ; <nl> mmm a / Telegram / SourceFiles / mainwindow . cpp <nl> ppp b / Telegram / SourceFiles / mainwindow . cpp <nl> For license and copyright information please follow this link : <nl> # include " core / shortcuts . h " <nl> # include " core / sandbox . h " <nl> # include " core / application . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " intro / introwidget . h " <nl> # include " main / main_account . h " / / Account : : sessionValue . <nl> # include " mainwidget . h " <nl> MainWindow : : MainWindow ( not_null < Window : : Controller * > controller ) <nl> setLocale ( QLocale ( QLocale : : English , QLocale : : UnitedStates ) ) ; <nl> <nl> account ( ) . sessionValue ( <nl> - ) | rpl : : start_with_next ( [ = ] ( AuthSession * session ) { <nl> + ) | rpl : : start_with_next ( [ = ] ( Main : : Session * session ) { <nl> updateGlobalMenu ( ) ; <nl> if ( ! session ) { <nl> _mediaPreview . destroy ( ) ; <nl> mmm a / Telegram / SourceFiles / media / audio / media_audio . cpp <nl> ppp b / Telegram / SourceFiles / media / audio / media_audio . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_session . h " <nl> # include " platform / platform_audio . h " <nl> # include " core / application . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> # include < AL / al . h > <nl> # include < AL / alc . h > <nl> mmm a / Telegram / SourceFiles / media / player / media_player_float . cpp <nl> ppp b / Telegram / SourceFiles / media / player / media_player_float . cpp <nl> For license and copyright information please follow this link : <nl> # include " media / player / media_player_instance . h " <nl> # include " window / window_session_controller . h " <nl> # include " window / section_widget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_media_player . h " <nl> # include " styles / style_history . h " <nl> <nl> mmm a / Telegram / SourceFiles / media / player / media_player_instance . cpp <nl> ppp b / Telegram / SourceFiles / media / player / media_player_instance . cpp <nl> For license and copyright information please follow this link : <nl> # include " core / application . h " <nl> # include " main / main_account . h " / / Account : : sessionValue . <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Media { <nl> namespace Player { <nl> Instance : : Instance ( ) <nl> handleSongUpdate ( audioId ) ; <nl> } ) ; <nl> <nl> - / / While we have one Media : : Player : : Instance for all authsessions we have to do this . <nl> + / / While we have one Media : : Player : : Instance for all sessions we have to do this . <nl> Core : : App ( ) . activeAccount ( ) . sessionValue ( <nl> - ) | rpl : : start_with_next ( [ = ] ( AuthSession * session ) { <nl> + ) | rpl : : start_with_next ( [ = ] ( Main : : Session * session ) { <nl> if ( session ) { <nl> subscribe ( session - > calls ( ) . currentCallChanged ( ) , [ = ] ( Calls : : Call * call ) { <nl> if ( call ) { <nl> mmm a / Telegram / SourceFiles / media / player / media_player_panel . cpp <nl> ppp b / Telegram / SourceFiles / media / player / media_player_panel . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / widgets / shadow . h " <nl> # include " ui / widgets / scroll_area . h " <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_overview . h " <nl> # include " styles / style_widgets . h " <nl> # include " styles / style_media_player . h " <nl> mmm a / Telegram / SourceFiles / media / player / media_player_volume_controller . cpp <nl> ppp b / Telegram / SourceFiles / media / player / media_player_volume_controller . cpp <nl> For license and copyright information please follow this link : <nl> # include " styles / style_media_player . h " <nl> # include " styles / style_widgets . h " <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Media { <nl> namespace Player { <nl> mmm a / Telegram / SourceFiles / media / player / media_player_widget . cpp <nl> ppp b / Telegram / SourceFiles / media / player / media_player_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " history / history_item . h " <nl> # include " storage / localstorage . h " <nl> # include " layout . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Media { <nl> namespace Player { <nl> mmm a / Telegram / SourceFiles / media / streaming / media_streaming_loader_mtproto . cpp <nl> ppp b / Telegram / SourceFiles / media / streaming / media_streaming_loader_mtproto . cpp <nl> For license and copyright information please follow this link : <nl> # include " media / streaming / media_streaming_loader_mtproto . h " <nl> <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " storage / streamed_file_downloader . h " <nl> # include " storage / cache / storage_cache_types . h " <nl> <nl> mmm a / Telegram / SourceFiles / media / view / media_view_group_thumbs . cpp <nl> ppp b / Telegram / SourceFiles / media / view / media_view_group_thumbs . cpp <nl> For license and copyright information please follow this link : <nl> # include " history / history . h " <nl> # include " history / media / history_media . h " <nl> # include " ui / image / image . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_mediaview . h " <nl> <nl> namespace Media { <nl> mmm a / Telegram / SourceFiles / media / view / media_view_overlay_widget . cpp <nl> ppp b / Telegram / SourceFiles / media / view / media_view_overlay_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " main / main_account . h " / / Account : : sessionValue . <nl> # include " base / unixtime . h " <nl> # include " observer_peer . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " layout . h " <nl> # include " storage / file_download . h " <nl> # include " calls / calls_instance . h " <nl> OverlayWidget : : OverlayWidget ( ) <nl> <nl> connect ( QApplication : : desktop ( ) , SIGNAL ( resized ( int ) ) , this , SLOT ( onScreenResized ( int ) ) ) ; <nl> <nl> - / / While we have one mediaview for all authsessions we have to do this . <nl> + / / While we have one mediaview for all sessions we have to do this . <nl> Core : : App ( ) . activeAccount ( ) . sessionValue ( <nl> - ) | rpl : : start_with_next ( [ = ] ( AuthSession * session ) { <nl> + ) | rpl : : start_with_next ( [ = ] ( Main : : Session * session ) { <nl> if ( session ) { <nl> subscribe ( session - > downloaderTaskFinished ( ) , [ = ] { <nl> if ( ! isHidden ( ) ) { <nl> mmm a / Telegram / SourceFiles / mtproto / dedicated_file_loader . cpp <nl> ppp b / Telegram / SourceFiles / mtproto / dedicated_file_loader . cpp <nl> For license and copyright information please follow this link : <nl> * / <nl> # include " mtproto / dedicated_file_loader . h " <nl> <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " core / application . h " <nl> # include " main / main_account . h " / / Account : : sessionChanges . <nl> <nl> WeakInstance : : WeakInstance ( QPointer < MTP : : Instance > instance ) <nl> die ( ) ; <nl> } ) ; <nl> Core : : App ( ) . activeAccount ( ) . sessionChanges ( <nl> - ) | rpl : : filter ( [ ] ( AuthSession * session ) { <nl> + ) | rpl : : filter ( [ ] ( Main : : Session * session ) { <nl> return ! session ; <nl> } ) | rpl : : start_with_next ( [ = ] { <nl> die ( ) ; <nl> WeakInstance : : WeakInstance ( QPointer < MTP : : Instance > instance ) <nl> } <nl> <nl> bool WeakInstance : : valid ( ) const { <nl> - return ( _instance ! = nullptr ) & & AuthSession : : Exists ( ) ; <nl> + return ( _instance ! = nullptr ) & & Main : : Session : : Exists ( ) ; <nl> } <nl> <nl> QPointer < MTP : : Instance > WeakInstance : : instance ( ) const { <nl> void ResolveChannel ( <nl> ) . arg ( username ) ) ; <nl> fail ( ) ; <nl> } ; <nl> - if ( ! AuthSession : : Exists ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) ) { <nl> failed ( ) ; <nl> return ; <nl> } <nl> <nl> struct ResolveResult { <nl> - base : : weak_ptr < AuthSession > auth ; <nl> + base : : weak_ptr < Main : : Session > auth ; <nl> MTPInputChannel channel ; <nl> } ; <nl> static std : : map < QString , ResolveResult > ResolveCache ; <nl> mmm a / Telegram / SourceFiles / mtproto / mtp_instance . cpp <nl> ppp b / Telegram / SourceFiles / mtproto / mtp_instance . cpp <nl> For license and copyright information please follow this link : <nl> # include " storage / localstorage . h " <nl> # include " calls / calls_instance . h " <nl> # include " main / main_account . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " core / application . h " <nl> # include " base / unixtime . h " <nl> bool Instance : : Private : : rpcErrorOccured ( mtpRequestId requestId , const RPCFailHan <nl> } <nl> <nl> bool Instance : : Private : : hasAuthorization ( ) { <nl> - return AuthSession : : Exists ( ) ; <nl> + return Main : : Session : : Exists ( ) ; <nl> } <nl> <nl> void Instance : : Private : : importDone ( const MTPauth_Authorization & result , mtpRequestId requestId ) { <nl> mmm a / Telegram / SourceFiles / passport / passport_form_controller . cpp <nl> ppp b / Telegram / SourceFiles / passport / passport_form_controller . cpp <nl> For license and copyright information please follow this link : <nl> # include " window / window_session_controller . h " <nl> # include " core / click_handler_types . h " <nl> # include " ui / toast / toast . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " storage / localimageloader . h " <nl> # include " storage / localstorage . h " <nl> # include " storage / file_upload . h " <nl> mmm a / Telegram / SourceFiles / passport / passport_panel_details_row . cpp <nl> ppp b / Telegram / SourceFiles / passport / passport_panel_details_row . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / widgets / checkbox . h " <nl> # include " ui / wrap / slide_wrap . h " <nl> # include " ui / countryinput . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_user . h " <nl> # include " data / data_countries . h " <nl> # include " styles / style_boxes . h " <nl> mmm a / Telegram / SourceFiles / platform / mac / mac_touchbar . mm <nl> ppp b / Telegram / SourceFiles / platform / mac / mac_touchbar . mm <nl> <nl> # import < QuartzCore / QuartzCore . h > <nl> <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " api / api_sending . h " <nl> # include " boxes / confirm_box . h " <nl> # include " chat_helpers / emoji_list_widget . h " <nl> mmm a / Telegram / SourceFiles / platform / mac / main_window_mac . mm <nl> ppp b / Telegram / SourceFiles / platform / mac / main_window_mac . mm <nl> <nl> # include " mainwindow . h " <nl> # include " mainwidget . h " <nl> # include " core / application . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " history / history . h " <nl> # include " history / history_widget . h " <nl> # include " history / history_inner_widget . h " <nl> - ( void ) windowWillExitFullScreen : ( NSNotification * ) aNotification { <nl> } <nl> <nl> account ( ) . sessionValue ( <nl> - ) | rpl : : start_with_next ( [ = ] ( AuthSession * session ) { <nl> + ) | rpl : : start_with_next ( [ = ] ( Main : : Session * session ) { <nl> if ( session ) { <nl> / / We need only common pinned dialogs . <nl> if ( ! _private - > _touchBar ) { <nl> mmm a / Telegram / SourceFiles / platform / win / windows_event_filter . cpp <nl> ppp b / Telegram / SourceFiles / platform / win / windows_event_filter . cpp <nl> For license and copyright information please follow this link : <nl> <nl> # include " platform / win / windows_dlls . h " <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Platform { <nl> namespace { <nl> bool EventFilter : : mainWindowEvent ( <nl> switch ( msg ) { <nl> <nl> case WM_TIMECHANGE : { <nl> - if ( AuthSession : : Exists ( ) ) { <nl> + if ( Main : : Session : : Exists ( ) ) { <nl> Auth ( ) . checkAutoLockIn ( 100 ) ; <nl> } <nl> } return false ; <nl> mmm a / Telegram / SourceFiles / profile / profile_block_group_members . cpp <nl> ppp b / Telegram / SourceFiles / profile / profile_block_group_members . cpp <nl> For license and copyright information please follow this link : <nl> # include " mainwidget . h " <nl> # include " apiwrap . h " <nl> # include " observer_peer . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " lang / lang_keys . h " <nl> <nl> namespace Profile { <nl> mmm a / Telegram / SourceFiles / profile / profile_block_peer_list . cpp <nl> ppp b / Telegram / SourceFiles / profile / profile_block_peer_list . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / effects / ripple_animation . h " <nl> # include " ui / text_options . h " <nl> # include " data / data_peer . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_profile . h " <nl> # include " styles / style_widgets . h " <nl> <nl> mmm a / Telegram / SourceFiles / settings / settings_advanced . cpp <nl> ppp b / Telegram / SourceFiles / settings / settings_advanced . cpp <nl> For license and copyright information please follow this link : <nl> # include " core / application . h " <nl> # include " storage / localstorage . h " <nl> # include " data / data_session . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " layout . h " <nl> # include " styles / style_settings . h " <nl> <nl> mmm a / Telegram / SourceFiles / settings / settings_chat . cpp <nl> ppp b / Telegram / SourceFiles / settings / settings_chat . cpp <nl> For license and copyright information please follow this link : <nl> # include " platform / platform_info . h " <nl> # include " support / support_common . h " <nl> # include " support / support_templates . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " mainwidget . h " <nl> # include " styles / style_settings . h " <nl> # include " styles / style_boxes . h " <nl> mmm a / Telegram / SourceFiles / settings / settings_codes . cpp <nl> ppp b / Telegram / SourceFiles / settings / settings_codes . cpp <nl> auto GenerateCodes ( ) { <nl> } ; <nl> for ( auto & key : audioKeys ) { <nl> codes . emplace ( key , [ audioFilters , key ] { <nl> - if ( ! AuthSession : : Exists ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) ) { <nl> return ; <nl> } <nl> <nl> FileDialog : : GetOpenPath ( Core : : App ( ) . getFileDialogParent ( ) , " Open audio file " , audioFilters , [ key ] ( const FileDialog : : OpenResult & result ) { <nl> - if ( AuthSession : : Exists ( ) & & ! result . paths . isEmpty ( ) ) { <nl> + if ( Main : : Session : : Exists ( ) & & ! result . paths . isEmpty ( ) ) { <nl> auto track = Media : : Audio : : Current ( ) . createTrack ( ) ; <nl> track - > fillFromFile ( result . paths . front ( ) ) ; <nl> if ( track - > failed ( ) ) { <nl> auto GenerateCodes ( ) { <nl> } ) ; <nl> } <nl> codes . emplace ( qsl ( " sounds_reset " ) , [ ] { <nl> - if ( AuthSession : : Exists ( ) ) { <nl> + if ( Main : : Session : : Exists ( ) ) { <nl> Auth ( ) . settings ( ) . clearSoundOverrides ( ) ; <nl> Local : : writeUserSettings ( ) ; <nl> Ui : : show ( Box < InformBox > ( " All sound overrides were reset . " ) ) ; <nl> mmm a / Telegram / SourceFiles / settings / settings_common . cpp <nl> ppp b / Telegram / SourceFiles / settings / settings_common . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / abstract_box . h " <nl> # include " lang / lang_keys . h " <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " styles / style_boxes . h " <nl> # include " styles / style_settings . h " <nl> <nl> mmm a / Telegram / SourceFiles / settings / settings_information . cpp <nl> ppp b / Telegram / SourceFiles / settings / settings_information . cpp <nl> For license and copyright information please follow this link : <nl> # include " info / profile / info_profile_values . h " <nl> # include " info / profile / info_profile_button . h " <nl> # include " lang / lang_keys . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " core / file_utilities . h " <nl> # include " styles / style_boxes . h " <nl> mmm a / Telegram / SourceFiles / settings / settings_main . cpp <nl> ppp b / Telegram / SourceFiles / settings / settings_main . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_session . h " <nl> # include " lang / lang_keys . h " <nl> # include " storage / localstorage . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " window / window_session_controller . h " <nl> # include " core / file_utilities . h " <nl> void SetupHelp ( not_null < Ui : : VerticalLayout * > container ) { <nl> <nl> SetupFaq ( container ) ; <nl> <nl> - if ( AuthSession : : Exists ( ) ) { <nl> + if ( : : Main : : Session : : Exists ( ) ) { <nl> const auto button = AddButton ( <nl> container , <nl> tr : : lng_settings_ask_question ( ) , <nl> mmm a / Telegram / SourceFiles / settings / settings_notifications . cpp <nl> ppp b / Telegram / SourceFiles / settings / settings_notifications . cpp <nl> For license and copyright information please follow this link : <nl> # include " platform / platform_info . h " <nl> # include " mainwindow . h " <nl> # include " core / application . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " styles / style_settings . h " <nl> # include " styles / style_boxes . h " <nl> mmm a / Telegram / SourceFiles / settings / settings_privacy_controllers . cpp <nl> ppp b / Telegram / SourceFiles / settings / settings_privacy_controllers . cpp <nl> For license and copyright information please follow this link : <nl> # include " apiwrap . h " <nl> # include " observer_peer . h " <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " storage / localstorage . h " <nl> # include " data / data_user . h " <nl> # include " data / data_session . h " <nl> mmm a / Telegram / SourceFiles / settings / settings_privacy_security . cpp <nl> ppp b / Telegram / SourceFiles / settings / settings_privacy_security . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_session . h " <nl> # include " data / data_chat . h " <nl> # include " data / data_channel . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " styles / style_settings . h " <nl> # include " styles / style_boxes . h " <nl> bool CheckEditCloudPassword ( ) { <nl> return false ; <nl> } <nl> <nl> - object_ptr < BoxContent > EditCloudPasswordBox ( not_null < AuthSession * > session ) { <nl> + object_ptr < BoxContent > EditCloudPasswordBox ( not_null < Main : : Session * > session ) { <nl> const auto current = session - > api ( ) . passwordStateCurrent ( ) ; <nl> Assert ( current . has_value ( ) ) ; <nl> <nl> mmm a / Telegram / SourceFiles / settings / settings_privacy_security . h <nl> ppp b / Telegram / SourceFiles / settings / settings_privacy_security . h <nl> namespace Settings { <nl> int ExceptionUsersCount ( const std : : vector < not_null < PeerData * > > & exceptions ) ; <nl> <nl> bool CheckEditCloudPassword ( ) ; <nl> - object_ptr < BoxContent > EditCloudPasswordBox ( not_null < AuthSession * > session ) ; <nl> + object_ptr < BoxContent > EditCloudPasswordBox ( <nl> + not_null < : : Main : : Session * > session ) ; <nl> void RemoveCloudPassword ( ) ; <nl> object_ptr < BoxContent > CloudPasswordAppOutdatedBox ( ) ; <nl> <nl> mmm a / Telegram / SourceFiles / storage / file_download . cpp <nl> ppp b / Telegram / SourceFiles / storage / file_download . cpp <nl> For license and copyright information please follow this link : <nl> # include " storage / localstorage . h " <nl> # include " platform / platform_file_utilities . h " <nl> # include " mtproto / connection . h " / / for MTP : : kAckSendWaiting <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " core / crash_reports . h " <nl> # include " base / bytes . h " <nl> FileLoader : : FileLoader ( <nl> Expects ( ! _filename . isEmpty ( ) | | ( _size < = Storage : : kMaxFileInMemory ) ) ; <nl> } <nl> <nl> - AuthSession & FileLoader : : session ( ) const { <nl> + Main : : Session & FileLoader : : session ( ) const { <nl> return _downloader - > api ( ) . session ( ) ; <nl> } <nl> <nl> void FileLoader : : cancel ( bool fail ) { <nl> _file . setFileName ( _filename ) ; <nl> } <nl> <nl> - / / Current cancel ( ) call could be made from ~ AuthSession ( ) . <nl> + / / Current cancel ( ) call could be made from ~ Main : : Session ( ) . <nl> crl : : on_main ( sessionGuard , [ = ] { LoadNextFromQueue ( queue ) ; } ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / storage / file_download . h <nl> ppp b / Telegram / SourceFiles / storage / file_download . h <nl> For license and copyright information please follow this link : <nl> # include " data / data_file_origin . h " <nl> <nl> class ApiWrap ; <nl> - class AuthSession ; <nl> + <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Storage { <nl> namespace Cache { <nl> class FileLoader : public QObject { <nl> bool autoLoading , <nl> uint8 cacheTag ) ; <nl> <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> bool finished ( ) const { <nl> return _finished ; <nl> mmm a / Telegram / SourceFiles / storage / file_upload . cpp <nl> ppp b / Telegram / SourceFiles / storage / file_upload . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_document . h " <nl> # include " data / data_photo . h " <nl> # include " data / data_session . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Storage { <nl> namespace { <nl> mmm a / Telegram / SourceFiles / storage / localimageloader . cpp <nl> ppp b / Telegram / SourceFiles / storage / localimageloader . cpp <nl> For license and copyright information please follow this link : <nl> # include " storage / storage_media_prepare . h " <nl> # include " mainwidget . h " <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace { <nl> <nl> mmm a / Telegram / SourceFiles / storage / localstorage . cpp <nl> ppp b / Telegram / SourceFiles / storage / localstorage . cpp <nl> For license and copyright information please follow this link : <nl> # include " mtproto / dc_options . h " <nl> # include " core / application . h " <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " window / window_session_controller . h " <nl> # include " base / flags . h " <nl> # include " data / data_session . h " <nl> enum { <nl> dbiDcOptions = 0x4a , <nl> dbiMtpAuthorization = 0x4b , <nl> dbiLastSeenWarningSeenOld = 0x4c , <nl> - dbiAuthSessionSettings = 0x4d , <nl> + dbiSessionSettings = 0x4d , <nl> dbiLangPackKey = 0x4e , <nl> dbiConnectionType = 0x4f , <nl> dbiStickersFavedLimit = 0x50 , <nl> enum class WriteMapWhen { <nl> Soon , <nl> } ; <nl> <nl> - std : : unique_ptr < AuthSessionSettings > StoredAuthSessionCache ; <nl> - AuthSessionSettings & GetStoredAuthSessionCache ( ) { <nl> - if ( ! StoredAuthSessionCache ) { <nl> - StoredAuthSessionCache = std : : make_unique < AuthSessionSettings > ( ) ; <nl> + std : : unique_ptr < Main : : Settings > StoredSessionSettings ; <nl> + Main : : Settings & GetStoredSessionSettings ( ) { <nl> + if ( ! StoredSessionSettings ) { <nl> + StoredSessionSettings = std : : make_unique < Main : : Settings > ( ) ; <nl> } <nl> - return * StoredAuthSessionCache ; <nl> + return * StoredSessionSettings ; <nl> } <nl> <nl> void _writeMap ( WriteMapWhen when = WriteMapWhen : : Soon ) ; <nl> bool _readSetting ( quint32 blockId , QDataStream & stream , int version , ReadSetting <nl> <nl> DEBUG_LOG ( ( " MTP Info : user found , dc % 1 , uid % 2 " ) . arg ( dcId ) . arg ( userId ) ) ; <nl> Core : : App ( ) . activeAccount ( ) . setMtpMainDcId ( dcId ) ; <nl> - Core : : App ( ) . activeAccount ( ) . setAuthSessionUserId ( userId ) ; <nl> + Core : : App ( ) . activeAccount ( ) . setSessionUserId ( userId ) ; <nl> } break ; <nl> <nl> case dbiKey : { <nl> bool _readSetting ( quint32 blockId , QDataStream & stream , int version , ReadSetting <nl> if ( ! _checkStreamStatus ( stream ) ) return false ; <nl> <nl> using namespace Data : : AutoDownload ; <nl> - auto & settings = GetStoredAuthSessionCache ( ) . autoDownload ( ) ; <nl> + auto & settings = GetStoredSessionSettings ( ) . autoDownload ( ) ; <nl> const auto disabled = [ ] ( qint32 value , qint32 mask ) { <nl> return ( value & mask ) ! = 0 ; <nl> } ; <nl> bool _readSetting ( quint32 blockId , QDataStream & stream , int version , ReadSetting <nl> stream > > v ; <nl> if ( ! _checkStreamStatus ( stream ) ) return false ; <nl> <nl> - GetStoredAuthSessionCache ( ) . setIncludeMutedCounter ( v = = 1 ) ; <nl> + GetStoredSessionSettings ( ) . setIncludeMutedCounter ( v = = 1 ) ; <nl> } break ; <nl> <nl> case dbiShowingSavedGifsOld : { <nl> bool _readSetting ( quint32 blockId , QDataStream & stream , int version , ReadSetting <nl> stream > > v ; <nl> if ( ! _checkStreamStatus ( stream ) ) return false ; <nl> <nl> - GetStoredAuthSessionCache ( ) . setDialogsWidthRatio ( v / 1000000 . ) ; <nl> + GetStoredSessionSettings ( ) . setDialogsWidthRatio ( v / 1000000 . ) ; <nl> } break ; <nl> <nl> case dbiLastSeenWarningSeenOld : { <nl> bool _readSetting ( quint32 blockId , QDataStream & stream , int version , ReadSetting <nl> stream > > v ; <nl> if ( ! _checkStreamStatus ( stream ) ) return false ; <nl> <nl> - GetStoredAuthSessionCache ( ) . setLastSeenWarningSeen ( v = = 1 ) ; <nl> + GetStoredSessionSettings ( ) . setLastSeenWarningSeen ( v = = 1 ) ; <nl> } break ; <nl> <nl> - case dbiAuthSessionSettings : { <nl> + case dbiSessionSettings : { <nl> QByteArray v ; <nl> stream > > v ; <nl> if ( ! _checkStreamStatus ( stream ) ) return false ; <nl> <nl> - GetStoredAuthSessionCache ( ) . constructFromSerialized ( v ) ; <nl> + GetStoredSessionSettings ( ) . constructFromSerialized ( v ) ; <nl> } break ; <nl> <nl> case dbiWorkMode : { <nl> bool _readSetting ( quint32 blockId , QDataStream & stream , int version , ReadSetting <nl> & & unchecked ! = SendSettings : : CtrlEnter ) { <nl> return false ; <nl> } <nl> - GetStoredAuthSessionCache ( ) . setSendSubmitWay ( unchecked ) ; <nl> + GetStoredSessionSettings ( ) . setSendSubmitWay ( unchecked ) ; <nl> } break ; <nl> <nl> case dbiCatsAndDogs : { / / deprecated <nl> bool _readSetting ( quint32 blockId , QDataStream & stream , int version , ReadSetting <nl> stream > > v ; <nl> if ( ! _checkStreamStatus ( stream ) ) return false ; <nl> <nl> - GetStoredAuthSessionCache ( ) . setSendFilesWay ( ( v = = 1 ) <nl> + GetStoredSessionSettings ( ) . setSendFilesWay ( ( v = = 1 ) <nl> ? SendFilesWay : : Album <nl> : SendFilesWay : : Files ) ; <nl> } break ; <nl> void _writeUserSettings ( ) { <nl> recentEmojiPreloadData . push_back ( qMakePair ( item . first - > id ( ) , item . second ) ) ; <nl> } <nl> } <nl> - auto userDataInstance = StoredAuthSessionCache <nl> - ? StoredAuthSessionCache . get ( ) <nl> - : Core : : App ( ) . activeAccount ( ) . getAuthSessionSettings ( ) ; <nl> + auto userDataInstance = StoredSessionSettings <nl> + ? StoredSessionSettings . get ( ) <nl> + : Core : : App ( ) . activeAccount ( ) . getSessionSettings ( ) ; <nl> auto userData = userDataInstance <nl> ? userDataInstance - > serialize ( ) <nl> : QByteArray ( ) ; <nl> void _writeUserSettings ( ) { <nl> data . stream < < quint32 ( dbiUseExternalVideoPlayer ) < < qint32 ( cUseExternalVideoPlayer ( ) ) ; <nl> data . stream < < quint32 ( dbiCacheSettings ) < < qint64 ( _cacheTotalSizeLimit ) < < qint32 ( _cacheTotalTimeLimit ) < < qint64 ( _cacheBigFileTotalSizeLimit ) < < qint32 ( _cacheBigFileTotalTimeLimit ) ; <nl> if ( ! userData . isEmpty ( ) ) { <nl> - data . stream < < quint32 ( dbiAuthSessionSettings ) < < userData ; <nl> + data . stream < < quint32 ( dbiSessionSettings ) < < userData ; <nl> } <nl> data . stream < < quint32 ( dbiPlaybackSpeed ) < < qint32 ( Global : : VoiceMsgPlaybackDoubled ( ) ? 2 : 1 ) ; <nl> <nl> ReadMapState _readMap ( const QByteArray & pass ) { <nl> _readMtpData ( ) ; <nl> <nl> DEBUG_LOG ( ( " selfSerialized set : % 1 " ) . arg ( selfSerialized . size ( ) ) ) ; <nl> - Core : : App ( ) . activeAccount ( ) . setAuthSessionFromStorage ( <nl> - std : : move ( StoredAuthSessionCache ) , <nl> + Core : : App ( ) . activeAccount ( ) . setSessionFromStorage ( <nl> + std : : move ( StoredSessionSettings ) , <nl> std : : move ( selfSerialized ) , <nl> _oldMapVersion ) ; <nl> <nl> void _writeMap ( WriteMapWhen when ) { <nl> <nl> uint32 mapSize = 0 ; <nl> const auto self = [ ] { <nl> - if ( ! AuthSession : : Exists ( ) ) { <nl> + if ( ! Main : : Session : : Exists ( ) ) { <nl> DEBUG_LOG ( ( " AuthSelf Warning : Session does not exist . " ) ) ; <nl> return QByteArray ( ) ; <nl> } <nl> void reset ( ) { <nl> _cacheTotalTimeLimit = Database : : Settings ( ) . totalTimeLimit ; <nl> _cacheBigFileTotalSizeLimit = Database : : Settings ( ) . totalSizeLimit ; <nl> _cacheBigFileTotalTimeLimit = Database : : Settings ( ) . totalTimeLimit ; <nl> - StoredAuthSessionCache . reset ( ) ; <nl> + StoredSessionSettings . reset ( ) ; <nl> _mapChanged = true ; <nl> _writeMap ( WriteMapWhen : : Now ) ; <nl> <nl> mmm a / Telegram / SourceFiles / storage / localstorage . h <nl> ppp b / Telegram / SourceFiles / storage / localstorage . h <nl> For license and copyright information please follow this link : <nl> # include " storage / file_download . h " <nl> # include " storage / cache / storage_cache_database . h " <nl> # include " storage / localimageloader . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Data { <nl> class WallPaper ; <nl> mmm a / Telegram / SourceFiles / storage / serialize_common . cpp <nl> ppp b / Telegram / SourceFiles / storage / serialize_common . cpp <nl> For license and copyright information please follow this link : <nl> * / <nl> # include " storage / serialize_common . h " <nl> <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_channel . h " <nl> # include " data / data_chat . h " <nl> # include " data / data_user . h " <nl> mmm a / Telegram / SourceFiles / storage / serialize_document . cpp <nl> ppp b / Telegram / SourceFiles / storage / serialize_document . cpp <nl> For license and copyright information please follow this link : <nl> # include " chat_helpers / stickers . h " <nl> # include " data / data_session . h " <nl> # include " ui / image / image . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace { <nl> <nl> mmm a / Telegram / SourceFiles / support / support_autocomplete . cpp <nl> ppp b / Telegram / SourceFiles / support / support_autocomplete . cpp <nl> For license and copyright information please follow this link : <nl> # include " lang / lang_keys . h " <nl> # include " data / data_session . h " <nl> # include " base / unixtime . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " styles / style_chat_helpers . h " <nl> # include " styles / style_window . h " <nl> AdminLog : : OwnedItem GenerateContactItem ( <nl> <nl> } / / namespace <nl> <nl> - Autocomplete : : Autocomplete ( QWidget * parent , not_null < AuthSession * > session ) <nl> + Autocomplete : : Autocomplete ( QWidget * parent , not_null < Main : : Session * > session ) <nl> : RpWidget ( parent ) <nl> , _session ( session ) { <nl> setupContent ( ) ; <nl> mmm a / Telegram / SourceFiles / support / support_autocomplete . h <nl> ppp b / Telegram / SourceFiles / support / support_autocomplete . h <nl> For license and copyright information please follow this link : <nl> # include " history / view / history_view_element . h " <nl> # include " history / history . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Ui { <nl> class ScrollArea ; <nl> struct Contact { <nl> <nl> class Autocomplete : public Ui : : RpWidget { <nl> public : <nl> - Autocomplete ( QWidget * parent , not_null < AuthSession * > session ) ; <nl> + Autocomplete ( QWidget * parent , not_null < Main : : Session * > session ) ; <nl> <nl> void activate ( not_null < Ui : : InputField * > field ) ; <nl> void deactivate ( ) ; <nl> class Autocomplete : public Ui : : RpWidget { <nl> void setupContent ( ) ; <nl> void submitValue ( const QString & value ) ; <nl> <nl> - not_null < AuthSession * > _session ; <nl> + not_null < Main : : Session * > _session ; <nl> Fn < void ( ) > _activate ; <nl> Fn < void ( ) > _deactivate ; <nl> Fn < void ( int delta ) > _moveSelection ; <nl> mmm a / Telegram / SourceFiles / support / support_helper . cpp <nl> ppp b / Telegram / SourceFiles / support / support_helper . cpp <nl> For license and copyright information please follow this link : <nl> # include " storage / storage_media_prepare . h " <nl> # include " storage / localimageloader . h " <nl> # include " core / sandbox . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " observer_peer . h " <nl> # include " apiwrap . h " <nl> # include " styles / style_boxes . h " <nl> TimeId OccupiedBySomeoneTill ( History * history ) { <nl> <nl> } / / namespace <nl> <nl> - Helper : : Helper ( not_null < AuthSession * > session ) <nl> + Helper : : Helper ( not_null < Main : : Session * > session ) <nl> : _session ( session ) <nl> , _templates ( _session ) <nl> , _reoccupyTimer ( [ = ] { reoccupy ( ) ; } ) <nl> Helper : : Helper ( not_null < AuthSession * > session ) <nl> } ) . send ( ) ; <nl> } <nl> <nl> - std : : unique_ptr < Helper > Helper : : Create ( not_null < AuthSession * > session ) { <nl> + std : : unique_ptr < Helper > Helper : : Create ( not_null < Main : : Session * > session ) { <nl> / / return std : : make_unique < Helper > ( session ) ; AssertIsDebug ( ) ; <nl> const auto valid = session - > user ( ) - > phone ( ) . startsWith ( qstr ( " 424 " ) ) ; <nl> return valid ? std : : make_unique < Helper > ( session ) : nullptr ; <nl> mmm a / Telegram / SourceFiles / support / support_helper . h <nl> ppp b / Telegram / SourceFiles / support / support_helper . h <nl> For license and copyright information please follow this link : <nl> # include " support / support_templates . h " <nl> # include " mtproto / sender . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Window { <nl> class SessionController ; <nl> inline bool operator ! = ( const UserInfo & a , const UserInfo & b ) { <nl> <nl> class Helper : private MTP : : Sender { <nl> public : <nl> - explicit Helper ( not_null < AuthSession * > session ) ; <nl> + explicit Helper ( not_null < Main : : Session * > session ) ; <nl> <nl> - static std : : unique_ptr < Helper > Create ( not_null < AuthSession * > session ) ; <nl> + static std : : unique_ptr < Helper > Create ( not_null < Main : : Session * > session ) ; <nl> <nl> void registerWindow ( not_null < Window : : SessionController * > controller ) ; <nl> void cloudDraftChanged ( not_null < History * > history ) ; <nl> class Helper : private MTP : : Sender { <nl> TextWithEntities text , <nl> Fn < void ( bool success ) > done ) ; <nl> <nl> - not_null < AuthSession * > _session ; <nl> + not_null < Main : : Session * > _session ; <nl> Templates _templates ; <nl> QString _supportName ; <nl> QString _supportNameNormalized ; <nl> mmm a / Telegram / SourceFiles / support / support_templates . cpp <nl> ppp b / Telegram / SourceFiles / support / support_templates . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / toast / toast . h " <nl> # include " data / data_session . h " <nl> # include " core / shortcuts . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Support { <nl> namespace details { <nl> struct Templates : : Updates { <nl> std : : map < QString , QNetworkReply * > requests ; <nl> } ; <nl> <nl> - Templates : : Templates ( not_null < AuthSession * > session ) : _session ( session ) { <nl> + Templates : : Templates ( not_null < Main : : Session * > session ) : _session ( session ) { <nl> load ( ) ; <nl> Shortcuts : : Requests ( <nl> ) | rpl : : start_with_next ( [ = ] ( not_null < Shortcuts : : Request * > request ) { <nl> mmm a / Telegram / SourceFiles / support / support_templates . h <nl> ppp b / Telegram / SourceFiles / support / support_templates . h <nl> For license and copyright information please follow this link : <nl> <nl> # include " base / binary_guard . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Support { <nl> namespace details { <nl> struct TemplatesIndex { <nl> <nl> class Templates : public base : : has_weak_ptr { <nl> public : <nl> - explicit Templates ( not_null < AuthSession * > session ) ; <nl> + explicit Templates ( not_null < Main : : Session * > session ) ; <nl> <nl> void reload ( ) ; <nl> <nl> class Templates : public base : : has_weak_ptr { <nl> void checkUpdateFinished ( ) ; <nl> void setData ( details : : TemplatesData & & data ) ; <nl> <nl> - not_null < AuthSession * > _session ; <nl> + not_null < Main : : Session * > _session ; <nl> <nl> details : : TemplatesData _data ; <nl> details : : TemplatesIndex _index ; <nl> mmm a / Telegram / SourceFiles / ui / emoji_config . cpp <nl> ppp b / Telegram / SourceFiles / ui / emoji_config . cpp <nl> For license and copyright information please follow this link : <nl> # include " base / bytes . h " <nl> # include " base / openssl_help . h " <nl> # include " base / parse_helper . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Ui { <nl> namespace Emoji { <nl> mmm a / Telegram / SourceFiles / ui / image / image . cpp <nl> ppp b / Telegram / SourceFiles / ui / image / image . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_session . h " <nl> # include " data / data_file_origin . h " <nl> # include " chat_helpers / stickers . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> using namespace Images ; <nl> <nl> mmm a / Telegram / SourceFiles / ui / image / image_location . cpp <nl> ppp b / Telegram / SourceFiles / ui / image / image_location . cpp <nl> For license and copyright information please follow this link : <nl> # include " storage / serialize_common . h " <nl> # include " data / data_file_origin . h " <nl> # include " base / overload . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace { <nl> <nl> mmm a / Telegram / SourceFiles / ui / image / image_source . cpp <nl> ppp b / Telegram / SourceFiles / ui / image / image_source . cpp <nl> For license and copyright information please follow this link : <nl> # include " storage / cache / storage_cache_database . h " <nl> # include " history / history_item . h " <nl> # include " history / history . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Images { <nl> <nl> mmm a / Telegram / SourceFiles / ui / special_buttons . cpp <nl> ppp b / Telegram / SourceFiles / ui / special_buttons . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / confirm_box . h " <nl> # include " window / window_session_controller . h " <nl> # include " lang / lang_keys . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " mainwidget . h " <nl> # include " observer_peer . h " <nl> mmm a / Telegram / SourceFiles / ui / text / text_entity . cpp <nl> ppp b / Telegram / SourceFiles / ui / text / text_entity . cpp <nl> For license and copyright information please follow this link : <nl> * / <nl> # include " ui / text / text_entity . h " <nl> <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " lang / lang_tag . h " <nl> # include " base / qthelp_url . h " <nl> # include " ui / emoji_config . h " <nl> mmm a / Telegram / SourceFiles / ui / widgets / input_fields . cpp <nl> ppp b / Telegram / SourceFiles / ui / widgets / input_fields . cpp <nl> For license and copyright information please follow this link : <nl> # include " data / data_countries . h " / / Data : : ValidPhoneCode <nl> # include " mainwindow . h " <nl> # include " numbers . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " core / application . h " <nl> <nl> namespace Ui { <nl> void PhoneInput : : focusInEvent ( QFocusEvent * e ) { <nl> <nl> void PhoneInput : : clearText ( ) { <nl> QString phone ; <nl> - if ( AuthSession : : Exists ( ) ) { <nl> + if ( Main : : Session : : Exists ( ) ) { <nl> const auto self = Auth ( ) . user ( ) ; <nl> QVector < int > newPattern = phoneNumberParse ( self - > phone ( ) ) ; <nl> if ( ! newPattern . isEmpty ( ) ) { <nl> mmm a / Telegram / SourceFiles / window / layer_widget . cpp <nl> ppp b / Telegram / SourceFiles / window / layer_widget . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / emoji_config . h " <nl> # include " window / window_main_menu . h " <nl> # include " lottie / lottie_single_player . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " chat_helpers / stickers . h " <nl> # include " window / window_session_controller . h " <nl> # include " styles / style_boxes . h " <nl> mmm a / Telegram / SourceFiles / window / main_window . cpp <nl> ppp b / Telegram / SourceFiles / window / main_window . cpp <nl> For license and copyright information please follow this link : <nl> # include " window / window_outdated_bar . h " <nl> # include " window / window_controller . h " <nl> # include " boxes / confirm_box . h " <nl> - # include " main / main_account . h " / / Account : : authSessionValue . <nl> + # include " main / main_account . h " / / Account : : sessionValue . <nl> # include " core / click_handler_types . h " <nl> # include " core / application . h " <nl> # include " core / sandbox . h " <nl> # include " lang / lang_keys . h " <nl> # include " data / data_session . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " mainwindow . h " <nl> # include " styles / style_window . h " <nl> mmm a / Telegram / SourceFiles / window / notifications_manager . cpp <nl> ppp b / Telegram / SourceFiles / window / notifications_manager . cpp <nl> For license and copyright information please follow this link : <nl> # include " mainwindow . h " <nl> # include " mainwidget . h " <nl> # include " apiwrap . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Window { <nl> namespace Notifications { <nl> constexpr auto kWaitingForAllGroupedDelay = crl : : time ( 1000 ) ; <nl> <nl> } / / namespace <nl> <nl> - System : : System ( not_null < AuthSession * > session ) <nl> + System : : System ( not_null < Main : : Session * > session ) <nl> : _session ( session ) <nl> , _waitTimer ( [ = ] { showNext ( ) ; } ) <nl> , _waitForAllGroupedTimer ( [ = ] { showGrouped ( ) ; } ) { <nl> void System : : schedule ( <nl> not_null < HistoryItem * > item ) { <nl> if ( App : : quitting ( ) <nl> | | ! history - > currentNotification ( ) <nl> - | | ! AuthSession : : Exists ( ) ) return ; <nl> + | | ! Main : : Session : : Exists ( ) ) return ; <nl> <nl> const auto notifyBy = ( ! history - > peer - > isUser ( ) & & item - > mentionsMe ( ) ) <nl> ? item - > from ( ) . get ( ) <nl> mmm a / Telegram / SourceFiles / window / notifications_manager . h <nl> ppp b / Telegram / SourceFiles / window / notifications_manager . h <nl> For license and copyright information please follow this link : <nl> <nl> # include " base / timer . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Platform { <nl> namespace Notifications { <nl> class Manager ; <nl> <nl> class System final : private base : : Subscriber { <nl> public : <nl> - explicit System ( not_null < AuthSession * > session ) ; <nl> + explicit System ( not_null < Main : : Session * > session ) ; <nl> <nl> void createManager ( ) ; <nl> <nl> class System final : private base : : Subscriber { <nl> return _settingsChanged ; <nl> } <nl> <nl> - AuthSession & session ( ) const { <nl> + Main : : Session & session ( ) const { <nl> return * _session ; <nl> } <nl> <nl> class System final : private base : : Subscriber { <nl> void showGrouped ( ) ; <nl> void ensureSoundCreated ( ) ; <nl> <nl> - not_null < AuthSession * > _session ; <nl> + not_null < Main : : Session * > _session ; <nl> <nl> QMap < History * , QMap < MsgId , crl : : time > > _whenMaps ; <nl> <nl> mmm a / Telegram / SourceFiles / window / notifications_manager_default . cpp <nl> ppp b / Telegram / SourceFiles / window / notifications_manager_default . cpp <nl> For license and copyright information please follow this link : <nl> # include " styles / style_boxes . h " <nl> # include " styles / style_window . h " <nl> # include " storage / file_download . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " history / history . h " <nl> # include " history / history_item . h " <nl> # include " platform / platform_specific . h " <nl> mmm a / Telegram / SourceFiles / window / section_widget . cpp <nl> ppp b / Telegram / SourceFiles / window / section_widget . cpp <nl> For license and copyright information please follow this link : <nl> <nl> namespace Window { <nl> <nl> - AuthSession & AbstractSectionWidget : : session ( ) const { <nl> + Main : : Session & AbstractSectionWidget : : session ( ) const { <nl> return _controller - > session ( ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / window / section_widget . h <nl> ppp b / Telegram / SourceFiles / window / section_widget . h <nl> For license and copyright information please follow this link : <nl> # include " ui / rp_widget . h " <nl> # include " dialogs / dialogs_key . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Window { <nl> <nl> class AbstractSectionWidget <nl> , _controller ( controller ) { <nl> } <nl> <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> / / Float player interface . <nl> virtual bool wheelEventFromFloatPlayer ( QEvent * e ) { <nl> mmm a / Telegram / SourceFiles / window / themes / window_theme . cpp <nl> ppp b / Telegram / SourceFiles / window / themes / window_theme . cpp <nl> For license and copyright information please follow this link : <nl> <nl> # include " window / themes / window_theme_preview . h " <nl> # include " mainwidget . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " storage / localstorage . h " <nl> # include " storage / localimageloader . h " <nl> void ChatBackground : : start ( ) { <nl> } <nl> <nl> Core : : App ( ) . activeAccount ( ) . sessionValue ( <nl> - ) | rpl : : filter ( [ = ] ( AuthSession * session ) { <nl> + ) | rpl : : filter ( [ = ] ( Main : : Session * session ) { <nl> return session ! = _session ; <nl> - } ) | rpl : : start_with_next ( [ = ] ( AuthSession * session ) { <nl> + } ) | rpl : : start_with_next ( [ = ] ( Main : : Session * session ) { <nl> _session = session ; <nl> checkUploadWallPaper ( ) ; <nl> } , _lifetime ) ; <nl> mmm a / Telegram / SourceFiles / window / themes / window_theme . h <nl> ppp b / Telegram / SourceFiles / window / themes / window_theme . h <nl> For license and copyright information please follow this link : <nl> <nl> # include " data / data_wall_paper . h " <nl> <nl> - class AuthSession ; <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> <nl> namespace Window { <nl> namespace Theme { <nl> class ChatBackground <nl> friend void KeepApplied ( ) ; <nl> friend bool IsNonDefaultBackground ( ) ; <nl> <nl> - AuthSession * _session = nullptr ; <nl> + Main : : Session * _session = nullptr ; <nl> Data : : WallPaper _paper = Data : : details : : UninitializedWallPaper ( ) ; <nl> std : : optional < QColor > _paperColor ; <nl> QImage _original ; <nl> mmm a / Telegram / SourceFiles / window / window_controller . cpp <nl> ppp b / Telegram / SourceFiles / window / window_controller . cpp <nl> Controller : : Controller ( not_null < Main : : Account * > account ) <nl> : _account ( account ) <nl> , _widget ( this ) { <nl> _account - > sessionValue ( <nl> - ) | rpl : : start_with_next ( [ = ] ( AuthSession * session ) { <nl> + ) | rpl : : start_with_next ( [ = ] ( Main : : Session * session ) { <nl> _sessionController = session <nl> ? std : : make_unique < SessionController > ( session , & _widget ) <nl> : nullptr ; <nl> mmm a / Telegram / SourceFiles / window / window_lock_widgets . cpp <nl> ppp b / Telegram / SourceFiles / window / window_lock_widgets . cpp <nl> For license and copyright information please follow this link : <nl> # include " styles / style_boxes . h " <nl> # include " window / window_slide_animation . h " <nl> # include " window / window_session_controller . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> <nl> namespace Window { <nl> <nl> mmm a / Telegram / SourceFiles / window / window_main_menu . cpp <nl> ppp b / Telegram / SourceFiles / window / window_main_menu . cpp <nl> For license and copyright information please follow this link : <nl> # include " lang / lang_keys . h " <nl> # include " core / click_handler_types . h " <nl> # include " observer_peer . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " data / data_folder . h " <nl> # include " data / data_session . h " <nl> # include " data / data_user . h " <nl> mmm a / Telegram / SourceFiles / window / window_peer_menu . cpp <nl> ppp b / Telegram / SourceFiles / window / window_peer_menu . cpp <nl> For license and copyright information please follow this link : <nl> # include " ui / text / text_utilities . h " <nl> # include " ui / widgets / labels . h " <nl> # include " ui / widgets / checkbox . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " mainwidget . h " <nl> # include " mainwindow . h " <nl> mmm a / Telegram / SourceFiles / window / window_session_controller . cpp <nl> ppp b / Telegram / SourceFiles / window / window_session_controller . cpp <nl> For license and copyright information please follow this link : <nl> # include " boxes / calendar_box . h " <nl> # include " mainwidget . h " <nl> # include " mainwindow . h " <nl> - # include " auth_session . h " <nl> + # include " main / main_session . h " <nl> # include " apiwrap . h " <nl> # include " support / support_helper . h " <nl> # include " styles / style_window . h " <nl> void DateClickHandler : : onClick ( ClickContext context ) const { <nl> App : : wnd ( ) - > sessionController ( ) - > showJumpToDate ( _chat , _date ) ; <nl> } <nl> <nl> - SessionNavigation : : SessionNavigation ( not_null < AuthSession * > session ) <nl> + SessionNavigation : : SessionNavigation ( not_null < Main : : Session * > session ) <nl> : _session ( session ) { <nl> } <nl> <nl> - AuthSession & SessionNavigation : : session ( ) const { <nl> + Main : : Session & SessionNavigation : : session ( ) const { <nl> return * _session ; <nl> } <nl> <nl> void SessionNavigation : : showSettings ( const SectionShow & params ) { <nl> } <nl> <nl> SessionController : : SessionController ( <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> not_null < MainWindow * > window ) <nl> : SessionNavigation ( session ) <nl> , _window ( window ) { <nl> mmm a / Telegram / SourceFiles / window / window_session_controller . h <nl> ppp b / Telegram / SourceFiles / window / window_session_controller . h <nl> For license and copyright information please follow this link : <nl> # include " base / observer . h " <nl> # include " dialogs / dialogs_key . h " <nl> <nl> - class AuthSession ; <nl> class MainWidget ; <nl> class HistoryMessage ; <nl> class HistoryService ; <nl> <nl> + namespace Main { <nl> + class Session ; <nl> + } / / namespace Main <nl> + <nl> namespace Settings { <nl> enum class Type ; <nl> } / / namespace Settings <nl> class SessionController ; <nl> <nl> class SessionNavigation { <nl> public : <nl> - explicit SessionNavigation ( not_null < AuthSession * > session ) ; <nl> + explicit SessionNavigation ( not_null < Main : : Session * > session ) ; <nl> <nl> - AuthSession & session ( ) const ; <nl> + Main : : Session & session ( ) const ; <nl> <nl> virtual void showSection ( <nl> SectionMemento & & memento , <nl> class SessionNavigation { <nl> virtual ~ SessionNavigation ( ) = default ; <nl> <nl> private : <nl> - const not_null < AuthSession * > _session ; <nl> + const not_null < Main : : Session * > _session ; <nl> <nl> } ; <nl> <nl> class SessionController <nl> , private base : : Subscriber { <nl> public : <nl> SessionController ( <nl> - not_null < AuthSession * > session , <nl> + not_null < Main : : Session * > session , <nl> not_null < MainWindow * > window ) ; <nl> <nl> not_null < MainWindow * > window ( ) const { <nl> mmm a / Telegram / gyp / telegram_sources . txt <nl> ppp b / Telegram / gyp / telegram_sources . txt <nl> <nl> < ( src_loc ) / lang / lang_values . h <nl> < ( src_loc ) / main / main_account . cpp <nl> < ( src_loc ) / main / main_account . h <nl> + < ( src_loc ) / main / main_session . cpp <nl> + < ( src_loc ) / main / main_session . h <nl> < ( src_loc ) / media / audio / media_audio . cpp <nl> < ( src_loc ) / media / audio / media_audio . h <nl> < ( src_loc ) / media / audio / media_audio_capture . cpp <nl> <nl> < ( src_loc ) / apiwrap . h <nl> < ( src_loc ) / app . cpp <nl> < ( src_loc ) / app . h <nl> - < ( src_loc ) / auth_session . cpp <nl> - < ( src_loc ) / auth_session . h <nl> < ( src_loc ) / config . h <nl> < ( src_loc ) / facades . cpp <nl> < ( src_loc ) / facades . h <nl>
Rename AuthSession to Main : : Session .
telegramdesktop/tdesktop
ff44094ded20bdfffa2b411ca0a11333f2752f66
2019-07-24T11:45:24Z
mmm a / test / Driver / advanced_output_file_map . swift <nl> ppp b / test / Driver / advanced_output_file_map . swift <nl> <nl> / / BINDINGS - ENA : # " x86_64 - apple - macosx10 . 9 " - " ld { { ( . exe ) ? } } " , inputs : [ " . / obj / advanced_output_file_map . o " , " . / obj / main . o " , " . / obj / lib . o " , " . / OutputFileMap . swiftmodule " ] , output : { image : " . / advanced_output_file_map . out " } <nl> / / BINDINGS - ENA : # " x86_64 - apple - macosx10 . 9 " - " dsymutil { { ( \ . exe ) ? } } " , inputs : [ " . / advanced_output_file_map . out " ] , output : { dSYM : " . / advanced_output_file_map . out . dSYM " } <nl> <nl> - / / Defaulting to : - disable - only - one - dependency - file <nl> + / / Defaulting to : - enable - only - one - dependency - file <nl> <nl> / / RUN : % swiftc_driver - driver - print - output - file - map - target x86_64 - apple - macosx10 . 9 - emit - executable - emit - module - serialize - diagnostics % / s % / S / Inputs / main . swift % / S / Inputs / lib . swift - g - o . / advanced_output_file_map . out - emit - module - path . / OutputFileMap . swiftmodule - module - name OutputFileMap - output - file - map % t / ofm . json 2 > & 1 | % FileCheck % / s - check - prefix = DUMPOFM - DIS <nl> <nl> <nl> / / RUN : % swiftc_driver - driver - print - bindings - target x86_64 - apple - macosx10 . 9 - emit - executable - emit - module - serialize - diagnostics - emit - dependencies % / s % / S / Inputs / main . swift % / S / Inputs / lib . swift - g - o . / advanced_output_file_map . out - emit - module - path . / OutputFileMap . swiftmodule - module - name OutputFileMap - output - file - map % t / ofm . json 2 > & 1 | % FileCheck % / s - check - prefix = BINDINGS - DIS <nl> / / Should be no dummy files : <nl> / / RUN : test ! - e % t / d / advanced_output_file_map . d <nl> - / / RUN : test ! - e % t / d / main . d <nl> - / / RUN : test ! - e % t / d / lib . d <nl> + / / RUN : test - e % t / d / main . d - a ! - s % t / d / main . d <nl> + / / RUN : test - e % t / d / lib . d - a ! - s % t / d / lib . d <nl>
off - by - default & fix tests
apple/swift
78c7eecd9a2c1344a8ff96f5d22b4e73d171862e
2019-12-19T15:38:26Z
mmm a / xbmc / cores / AudioEngine / AEFactory . cpp <nl> ppp b / xbmc / cores / AudioEngine / AEFactory . cpp <nl> <nl> # endif <nl> <nl> # if defined ( HAS_PULSEAUDIO ) <nl> - # include " Engines / PulseAE . h " <nl> + # include " Engines / PulseAE / PulseAE . h " <nl> # endif <nl> <nl> IAE * CAEFactory : : AE = NULL ; <nl>
add missing PulseAE - folder in include
xbmc/xbmc
9471d32b345ed0eaa8d93fdb360aea4bca949f90
2012-05-12T13:05:04Z
new file mode 100755 <nl> index 00000000000 . . a7737d752e6 <nl> mmm / dev / null <nl> ppp b / tools / dockerfile / grpc_csharp_mono / build . sh <nl> <nl> + # ! / bin / bash <nl> + <nl> + cp - R / var / local / git - clone / grpc / var / local / git <nl> + <nl> + make install_grpc_csharp_ext - j12 - C / var / local / git / grpc <nl> + <nl> + cd / var / local / git / grpc / src / csharp & & mono / var / local / NuGet . exe restore Grpc . sln <nl> + <nl> + cd / var / local / git / grpc / src / csharp & & xbuild Grpc . sln <nl> + <nl> new file mode 100755 <nl> index 00000000000 . . fbbc61d5fa4 <nl> mmm / dev / null <nl> ppp b / tools / dockerfile / grpc_php / build . sh <nl> <nl> + # ! / bin / bash <nl> + <nl> + cp - R / var / local / git - clone / grpc / var / local / git <nl> + <nl> + make clean - C / var / local / git / grpc <nl> + <nl> + make install_c - j12 - C / var / local / git / grpc <nl> + <nl> + cd / var / local / git / grpc / src / php / ext / grpc & & git pull & & phpize <nl> + <nl> + cd / var / local / git / grpc / src / php / ext / grpc \ <nl> + & & . / configure \ <nl> + & & make <nl> + <nl> + cd / var / local / git / grpc / src / php & & composer install <nl> + <nl> + cd / var / local / git / grpc / src / php & & protoc - gen - php - i tests / interop / - o tests / interop / tests / interop / test . proto <nl> + <nl>
Merge pull request from donnadionne / test - log
grpc/grpc
5e9757bf0f8ada75068038c3e29d8b1e875d2ce3
2015-05-28T16:51:08Z
mmm a / xbmc / network / AirTunesServer . cpp <nl> ppp b / xbmc / network / AirTunesServer . cpp <nl> bool CAirTunesServer : : Initialize ( const std : : string & password ) <nl> if ( m_pLibShairplay - > Load ( ) ) <nl> { <nl> <nl> - raop_callbacks_t ao ; <nl> + raop_callbacks_t ao = { } ; <nl> ao . cls = m_pPipe ; <nl> ao . audio_init = AudioOutputFunctions : : audio_init ; <nl> ao . audio_set_volume = AudioOutputFunctions : : audio_set_volume ; <nl>
[ AirTunes ] - Don ' t leave audio_flush uninitialised
xbmc/xbmc
c94beb2e9994d1868cacffa55f3bbc877ae5446a
2014-08-15T19:48:02Z
mmm a / xbmc / addons / AddonManager . cpp <nl> ppp b / xbmc / addons / AddonManager . cpp <nl> std : : string CAddonMgr : : GetTranslatedString ( const cp_cfg_element_t * root , const c <nl> if ( ! root ) <nl> return " " ; <nl> <nl> - const cp_cfg_element_t * eng = NULL ; <nl> + std : : map < std : : string , std : : string > translatedValues ; <nl> for ( unsigned int i = 0 ; i < root - > num_children ; i + + ) <nl> { <nl> const cp_cfg_element_t & child = root - > children [ i ] ; <nl> if ( strcmp ( tag , child . name ) = = 0 ) <nl> - { / / see if we have a " lang " attribute <nl> + { <nl> + / / see if we have a " lang " attribute <nl> const char * lang = m_cpluff - > lookup_cfg_value ( ( cp_cfg_element_t * ) & child , " @ lang " ) ; <nl> - if ( lang ! = NULL & & g_langInfo . GetLocale ( ) . Matches ( lang ) ) <nl> - return child . value ? child . value : " " ; <nl> - if ( ! lang | | 0 = = strcmp ( lang , " en " ) ) <nl> - eng = & child ; <nl> + if ( lang ! = NULL & & <nl> + ( g_langInfo . GetLocale ( ) . Matches ( lang ) | | strcmp ( lang , " en " ) = = 0 ) ) <nl> + translatedValues . insert ( std : : make_pair ( lang , child . value ! = NULL ? child . value : " " ) ) ; <nl> + else if ( lang = = NULL ) <nl> + translatedValues . insert ( std : : make_pair ( " en " , child . value ! = NULL ? child . value : " " ) ) ; <nl> } <nl> } <nl> - return ( eng & & eng - > value ) ? eng - > value : " " ; <nl> + <nl> + / / put together a list of languages <nl> + std : : set < std : : string > languages ; <nl> + for ( auto const & translatedValue : translatedValues ) <nl> + languages . insert ( translatedValue . first ) ; <nl> + <nl> + / / find the language from the list that matches the current locale best <nl> + std : : string matchingLanguage = g_langInfo . GetLocale ( ) . FindBestMatch ( languages ) ; <nl> + if ( matchingLanguage . empty ( ) ) <nl> + matchingLanguage = " en " ; <nl> + <nl> + auto const & translatedValue = translatedValues . find ( matchingLanguage ) ; <nl> + if ( translatedValue ! = translatedValues . end ( ) ) <nl> + return translatedValue - > second ; <nl> + <nl> + return " " ; <nl> } <nl> <nl> AddonPtr CAddonMgr : : AddonFromProps ( AddonProps & addonProps ) <nl>
addons : find and use the best matching translation for an addon summary / description / disclaimer
xbmc/xbmc
0983bff12b047d6237d094c02c676670fbd0ddf6
2015-03-20T23:06:22Z
mmm a / xbmc / cores / dvdplayer / DVDCodecs / Video / DXVA . cpp <nl> ppp b / xbmc / cores / dvdplayer / DVDCodecs / Video / DXVA . cpp <nl> CDXVAContext : : CDXVAContext ( ) <nl> m_context = NULL ; <nl> m_refCount = 0 ; <nl> m_service = NULL ; <nl> + m_atiWorkaround = false ; <nl> } <nl> <nl> void CDXVAContext : : Release ( CDecoder * decoder ) <nl> bool CDXVAContext : : CreateContext ( ) <nl> { <nl> m_DXVA2CreateVideoService ( g_Windowing . Get3DDevice ( ) , IID_IDirectXVideoDecoderService , ( void * * ) & m_service ) ; <nl> QueryCaps ( ) ; <nl> + <nl> + / / Some older Ati devices can only open a single decoder at a given time <nl> + std : : string renderer = g_Windowing . GetRenderRenderer ( ) ; <nl> + if ( renderer . find ( " Radeon HD 2 " ) ! = std : : string : : npos | | <nl> + renderer . find ( " Radeon HD 3 " ) ! = std : : string : : npos ) <nl> + { <nl> + m_atiWorkaround = true ; <nl> + } <nl> + <nl> return true ; <nl> } <nl> <nl> bool CDXVAContext : : CreateDecoder ( GUID & inGuid , DXVA2_VideoDesc * format , const DX <nl> int retry = 0 ; <nl> while ( retry < 2 ) <nl> { <nl> - HRESULT res = m_service - > CreateVideoDecoder ( inGuid , format , config , surfaces , count , decoder ) ; <nl> - if ( ! FAILED ( res ) ) <nl> + if ( ! m_atiWorkaround | | retry > 0 ) <nl> { <nl> - return true ; <nl> + HRESULT res = m_service - > CreateVideoDecoder ( inGuid , format , config , surfaces , count , decoder ) ; <nl> + if ( ! FAILED ( res ) ) <nl> + { <nl> + return true ; <nl> + } <nl> } <nl> <nl> if ( retry = = 0 ) <nl> { <nl> - CLog : : Log ( LOGERROR , " % s - hw may not support multiple decoders , releasing existing ones " , __FUNCTION__ ) ; <nl> + CLog : : Log ( LOGNOTICE , " % s - hw may not support multiple decoders , releasing existing ones " , __FUNCTION__ ) ; <nl> std : : vector < CDecoder * > : : iterator it ; <nl> for ( it = m_decoders . begin ( ) ; it ! = m_decoders . end ( ) ; + + it ) <nl> { <nl> mmm a / xbmc / cores / dvdplayer / DVDCodecs / Video / DXVA . h <nl> ppp b / xbmc / cores / dvdplayer / DVDCodecs / Video / DXVA . h <nl> class CDXVAContext <nl> UINT m_input_count ; <nl> GUID * m_input_list ; <nl> std : : vector < CDecoder * > m_decoders ; <nl> + bool m_atiWorkaround ; <nl> } ; <nl> <nl> class CDecoder <nl>
dxva : never try to open more than a single hw decoder on ati HD 2xxx and HD3xxx
xbmc/xbmc
161060ec273937690760e56b756bbf64e86af61f
2014-09-19T13:05:46Z
mmm a / tensorflow / workspace . bzl <nl> ppp b / tensorflow / workspace . bzl <nl> def tf_workspace ( path_prefix = " " , tf_repo_name = " " ) : <nl> tf_http_archive ( <nl> name = " llvm " , <nl> build_file = clean_dep ( " / / third_party / llvm : llvm . autogenerated . BUILD " ) , <nl> - sha256 = " f4791ba3e166918bca82df34e2f854e8e188d6055888c64cb28743fd43f2d0d7 " , <nl> - strip_prefix = " llvm - b2a42b2112a511a5077fd747fb21e45349cff08d " , <nl> + sha256 = " ba61783c407c8decb2df1ca1c7019cbd0bb38419509ae8870fa7ce6bc30b6cfb " , <nl> + strip_prefix = " llvm - cdc3118297348c68b25dff92b504581f5dd78000 " , <nl> urls = [ <nl> - " https : / / mirror . bazel . build / github . com / llvm - mirror / llvm / archive / b2a42b2112a511a5077fd747fb21e45349cff08d . tar . gz " , <nl> - " https : / / github . com / llvm - mirror / llvm / archive / b2a42b2112a511a5077fd747fb21e45349cff08d . tar . gz " , <nl> + " https : / / mirror . bazel . build / github . com / llvm - mirror / llvm / archive / cdc3118297348c68b25dff92b504581f5dd78000 . tar . gz " , <nl> + " https : / / github . com / llvm - mirror / llvm / archive / cdc3118297348c68b25dff92b504581f5dd78000 . tar . gz " , <nl> ] , <nl> ) <nl> <nl>
[ TF : XLA ] Bump open source llvm revision to r347980
tensorflow/tensorflow
8f2fd6fde3236b034a3a7edcb48c5858bad0692e
2018-11-30T21:50:03Z
mmm a / modules / java / CMakeLists . txt <nl> ppp b / modules / java / CMakeLists . txt <nl> endif ( ANDROID ) <nl> # workarounding lack of ` __attribute__ ( ( visibility ( " default " ) ) ) ` in jni_md . h / JNIEXPORT <nl> string ( REPLACE " - fvisibility = hidden " " " CMAKE_CXX_FLAGS " $ { CMAKE_CXX_FLAGS } " ) <nl> <nl> - ocv_add_library ( $ { the_module } SHARED $ { handwritten_h_sources } $ { handwritten_cpp_sources } $ { generated_cpp_sources } <nl> + if ( ANDROID ) <nl> + # Android native code need to link with libopencv_java . so <nl> + ocv_add_library ( $ { the_module } SHARED $ { handwritten_h_sources } $ { handwritten_cpp_sources } $ { generated_cpp_sources } <nl> $ { copied_files } <nl> " $ { JAR_FILE } " " $ { JAR_FILE } . dephelper " ) <nl> + else ( ) <nl> + ocv_add_library ( $ { the_module } MODULE $ { handwritten_h_sources } $ { handwritten_cpp_sources } $ { generated_cpp_sources } <nl> + $ { copied_files } <nl> + " $ { JAR_FILE } " " $ { JAR_FILE } . dephelper " ) <nl> + endif ( ) <nl> <nl> if ( BUILD_FAT_JAVA_LIB ) <nl> set ( __deps $ { OPENCV_MODULE_ $ { the_module } _DEPS } $ { OPENCV_MODULES_BUILD } ) <nl> mmm a / modules / python / common . cmake <nl> ppp b / modules / python / common . cmake <nl> add_custom_command ( <nl> DEPENDS $ { CMAKE_CURRENT_BINARY_DIR } / headers . txt <nl> DEPENDS $ { opencv_hdrs } ) <nl> <nl> - ocv_add_library ( $ { the_module } SHARED $ { PYTHON_SOURCE_DIR } / src2 / cv2 . cpp $ { cv2_generated_hdrs } ) <nl> + ocv_add_library ( $ { the_module } MODULE $ { PYTHON_SOURCE_DIR } / src2 / cv2 . cpp $ { cv2_generated_hdrs } ) <nl> <nl> if ( PYTHON_DEBUG_LIBRARIES AND NOT PYTHON_LIBRARIES MATCHES " optimized . * debug " ) <nl> ocv_target_link_libraries ( $ { the_module } debug $ { PYTHON_DEBUG_LIBRARIES } optimized $ { PYTHON_LIBRARIES } ) <nl>
Merge pull request from alalek : fix_5019
opencv/opencv
aee03cd5e5602b34835142b807414050b48764b8
2015-12-14T19:52:32Z
mmm a / js / apps / system / _admin / aardvark / APP / frontend / js / routers / router . js <nl> ppp b / js / apps / system / _admin / aardvark / APP / frontend / js / routers / router . js <nl> <nl> if ( this . queryView ) { <nl> this . queryView . resize ( ) ; <nl> } <nl> + if ( this . naviView ) { <nl> + this . naviView . resize ( ) ; <nl> + } <nl> if ( this . graphViewer2 ) { <nl> this . graphViewer2 . resize ( ) ; <nl> } <nl> mmm a / js / apps / system / _admin / aardvark / APP / frontend / js / views / navigationView . js <nl> ppp b / js / apps / system / _admin / aardvark / APP / frontend / js / views / navigationView . js <nl> <nl> } ) ; <nl> } <nl> <nl> + self . resize ( ) ; <nl> + <nl> return this ; <nl> } , <nl> <nl> + resize : function ( ) { <nl> + / / set menu sizes - responsive <nl> + var height = $ ( window ) . height ( ) - $ ( ' . subMenuEntries ' ) . first ( ) . height ( ) ; <nl> + $ ( ' # navigationBar ' ) . css ( ' min - height ' , height ) ; <nl> + $ ( ' # navigationBar ' ) . css ( ' height ' , height ) ; <nl> + } , <nl> + <nl> navigateBySelect : function ( ) { <nl> var navigateTo = $ ( ' # arangoCollectionSelect ' ) . find ( ' option : selected ' ) . val ( ) ; <nl> window . App . navigate ( navigateTo , { trigger : true } ) ; <nl> mmm a / js / apps / system / _admin / aardvark / APP / frontend / scss / _login . scss <nl> ppp b / js / apps / system / _admin / aardvark / APP / frontend / scss / _login . scss <nl> <nl> <nl> . wrong - credentials { <nl> color : $ c - negative ; <nl> - margin - top : - 30px ; <nl> + margin - left : - 20px ; <nl> + margin - top : - 24px ; <nl> + position : absolute ; <nl> text - align : center ; <nl> + width : 100 % ; <nl> } <nl> <nl> . login - space { <nl> mmm a / js / apps / system / _admin / aardvark / APP / frontend / scss / _screenSizes . scss <nl> ppp b / js / apps / system / _admin / aardvark / APP / frontend / scss / _screenSizes . scss <nl> <nl> . social - icons { <nl> display : none ; <nl> } <nl> + <nl> + # navigationBar { <nl> + overflow : scroll ; <nl> + overflow - x : hidden ! important ; <nl> + } <nl> } <nl> <nl> @ media ( max - height : 525px ) { <nl>
fixed landscape smartphone navigation bug , improved login screen css
arangodb/arangodb
fe4268b759477b0802c07cb3397ed9a4bb130ee8
2016-07-22T20:12:27Z
mmm a / src / compiler / js - context - specialization . cc <nl> ppp b / src / compiler / js - context - specialization . cc <nl> namespace internal { <nl> namespace compiler { <nl> <nl> Reduction JSContextSpecialization : : Reduce ( Node * node ) { <nl> - DisallowHeapAccess no_heap_access ; <nl> switch ( node - > opcode ( ) ) { <nl> case IrOpcode : : kParameter : <nl> return ReduceParameter ( node ) ; <nl> mmm a / src / compiler / js - heap - broker . cc <nl> ppp b / src / compiler / js - heap - broker . cc <nl> class JSFunctionData : public JSObjectData { <nl> ObjectData * const shared ; <nl> <nl> JSFunctionData ( JSHeapBroker * broker_ , Handle < JSFunction > object_ , <nl> - HeapObjectType type_ ) <nl> - : JSObjectData ( broker_ , object_ , type_ ) , <nl> - global_proxy ( GET_OR_CREATE ( global_proxy ) ) , <nl> - prototype_or_initial_map ( object_ - > map ( ) - > has_prototype_slot ( ) <nl> - ? GET_OR_CREATE ( prototype_or_initial_map ) <nl> - : nullptr ) , <nl> - shared ( GET_OR_CREATE ( shared ) ) { } <nl> + HeapObjectType type_ ) ; <nl> } ; <nl> <nl> class JSRegExpData : public JSObjectData { } ; <nl> class ContextData : public HeapObjectData { <nl> } ; <nl> <nl> # define NATIVE_CONTEXT_DATA ( V ) \ <nl> + V ( array_function ) \ <nl> V ( fast_aliased_arguments_map ) \ <nl> V ( initial_array_iterator_map ) \ <nl> V ( iterator_result_map ) \ <nl> class AllocationSiteData : public HeapObjectData { } ; <nl> <nl> class MapData : public HeapObjectData { <nl> public : <nl> + InstanceType const instance_type ; <nl> int const instance_size ; <nl> byte const bit_field ; <nl> byte const bit_field2 ; <nl> uint32_t const bit_field3 ; <nl> <nl> - MapData ( JSHeapBroker * broker_ , Handle < Map > object_ , HeapObjectType type_ ) <nl> - : HeapObjectData ( broker_ , object_ , type_ ) , <nl> - instance_size ( object_ - > instance_size ( ) ) , <nl> - bit_field ( object_ - > bit_field ( ) ) , <nl> - bit_field2 ( object_ - > bit_field2 ( ) ) , <nl> - bit_field3 ( object_ - > bit_field3 ( ) ) { } <nl> + MapData ( JSHeapBroker * broker_ , Handle < Map > object_ , HeapObjectType type_ ) ; <nl> + <nl> + / / Extra information . <nl> + void SerializeElementsKindGeneralizations ( ) ; <nl> + const ZoneVector < MapData * > & elements_kind_generalizations ( ) { <nl> + return elements_kind_generalizations_ ; <nl> + } <nl> + <nl> + private : <nl> + ZoneVector < MapData * > elements_kind_generalizations_ ; <nl> } ; <nl> <nl> + MapData : : MapData ( JSHeapBroker * broker_ , Handle < Map > object_ , <nl> + HeapObjectType type_ ) <nl> + : HeapObjectData ( broker_ , object_ , type_ ) , <nl> + instance_type ( object_ - > instance_type ( ) ) , <nl> + instance_size ( object_ - > instance_size ( ) ) , <nl> + bit_field ( object_ - > bit_field ( ) ) , <nl> + bit_field2 ( object_ - > bit_field2 ( ) ) , <nl> + bit_field3 ( object_ - > bit_field3 ( ) ) , <nl> + elements_kind_generalizations_ ( broker - > zone ( ) ) { } <nl> + <nl> + JSFunctionData : : JSFunctionData ( JSHeapBroker * broker_ , <nl> + Handle < JSFunction > object_ , HeapObjectType type_ ) <nl> + : JSObjectData ( broker_ , object_ , type_ ) , <nl> + global_proxy ( GET_OR_CREATE ( global_proxy ) ) , <nl> + prototype_or_initial_map ( object_ - > map ( ) - > has_prototype_slot ( ) <nl> + ? GET_OR_CREATE ( prototype_or_initial_map ) <nl> + : nullptr ) , <nl> + shared ( GET_OR_CREATE ( shared ) ) { <nl> + if ( prototype_or_initial_map ! = nullptr & & <nl> + prototype_or_initial_map - > IsMap ( ) ) { <nl> + MapData * initial_map = prototype_or_initial_map - > AsMap ( ) ; <nl> + if ( initial_map - > instance_type = = JS_ARRAY_TYPE ) { <nl> + initial_map - > SerializeElementsKindGeneralizations ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void MapData : : SerializeElementsKindGeneralizations ( ) { <nl> + broker - > Trace ( " Computing ElementsKind generalizations of % p . \ n " , * object ) ; <nl> + DCHECK_EQ ( instance_type , JS_ARRAY_TYPE ) ; <nl> + MapRef self ( this ) ; <nl> + ElementsKind from_kind = self . elements_kind ( ) ; <nl> + for ( int i = FIRST_FAST_ELEMENTS_KIND ; i < = LAST_FAST_ELEMENTS_KIND ; i + + ) { <nl> + ElementsKind to_kind = static_cast < ElementsKind > ( i ) ; <nl> + if ( IsMoreGeneralElementsKindTransition ( from_kind , to_kind ) ) { <nl> + Handle < Map > target = <nl> + Map : : AsElementsKind ( broker - > isolate ( ) , self . object < Map > ( ) , to_kind ) ; <nl> + elements_kind_generalizations_ . push_back ( <nl> + broker - > GetOrCreateData ( target ) - > AsMap ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> class FixedArrayBaseData : public HeapObjectData { } ; <nl> class FixedArrayData : public FixedArrayBaseData { } ; <nl> class FixedDoubleArrayData : public FixedArrayBaseData { } ; <nl> void JSHeapBroker : : SerializeStandardObjects ( ) { <nl> GetOrCreateData ( b - > builtin_handle ( id ) ) ; <nl> } <nl> } <nl> + <nl> + Trace ( " Finished serializing standard objects . \ n " ) ; <nl> } <nl> <nl> HeapObjectType JSHeapBroker : : HeapObjectTypeFromMap ( Map * map ) const { <nl> void JSFunctionRef : : EnsureHasInitialMap ( ) const { <nl> JSFunction : : EnsureHasInitialMap ( object < JSFunction > ( ) ) ; <nl> } <nl> <nl> - / / TODO ( mslekova ) : Pre - compute these on the main thread . <nl> base : : Optional < MapRef > MapRef : : AsElementsKind ( ElementsKind kind ) const { <nl> - AllowHandleAllocation handle_allocation ; <nl> - AllowHeapAllocation heap_allocation ; <nl> - AllowHandleDereference allow_handle_dereference ; <nl> - return MapRef ( broker ( ) , <nl> - Map : : AsElementsKind ( broker ( ) - > isolate ( ) , object < Map > ( ) , kind ) ) ; <nl> + if ( broker ( ) - > mode ( ) = = JSHeapBroker : : kDisabled ) { <nl> + AllowHandleAllocation handle_allocation ; <nl> + AllowHeapAllocation heap_allocation ; <nl> + AllowHandleDereference allow_handle_dereference ; <nl> + return MapRef ( broker ( ) , Map : : AsElementsKind ( broker ( ) - > isolate ( ) , <nl> + object < Map > ( ) , kind ) ) ; <nl> + } else { <nl> + if ( kind = = elements_kind ( ) ) return * this ; <nl> + const ZoneVector < MapData * > & elements_kind_generalizations = <nl> + data ( ) - > AsMap ( ) - > elements_kind_generalizations ( ) ; <nl> + for ( auto data : elements_kind_generalizations ) { <nl> + MapRef map ( data ) ; <nl> + if ( map . elements_kind ( ) = = kind ) return map ; <nl> + } <nl> + return base : : Optional < MapRef > ( ) ; <nl> + } <nl> } <nl> <nl> int JSFunctionRef : : InitialMapInstanceSizeWithMinSlack ( ) const { <nl>
[ turbofan ] Precompute ElementsKind generalizations for initial maps .
v8/v8
53c3c6a9e57487dd3b66563a5871e1f664126ab5
2018-08-16T15:44:45Z
mmm a / xbmc / Application . cpp <nl> ppp b / xbmc / Application . cpp <nl> void CApplication : : Process ( ) <nl> / / ( this can only be done after CServiceBroker : : GetGUI ( ) - > GetWindowManager ( ) . Render ( ) ) <nl> CApplicationMessenger : : GetInstance ( ) . ProcessWindowMessages ( ) ; <nl> <nl> - if ( m_autoExecScriptExecuted ) <nl> - { <nl> - m_autoExecScriptExecuted = false ; <nl> - <nl> - / / autoexec . py - profile <nl> - std : : string strAutoExecPy = CSpecialProtocol : : TranslatePath ( " special : / / profile / autoexec . py " ) ; <nl> - <nl> - if ( XFILE : : CFile : : Exists ( strAutoExecPy ) ) <nl> - CScriptInvocationManager : : GetInstance ( ) . ExecuteAsync ( strAutoExecPy ) ; <nl> - else <nl> - CLog : : Log ( LOGDEBUG , " no profile autoexec . py ( % s ) found , skipping " , strAutoExecPy . c_str ( ) ) ; <nl> - } <nl> - <nl> / / handle any active scripts <nl> <nl> { <nl> void CApplication : : SetLoggingIn ( bool switchingProfiles ) <nl> / / would therefore write the previous skin ' s settings into the new profile <nl> / / instead of into the previous one <nl> m_saveSkinOnUnloading = ! switchingProfiles ; <nl> - <nl> - / / make sure that the autoexec . py script is executed after logging in <nl> - m_autoExecScriptExecuted = true ; <nl> } <nl> <nl> void CApplication : : CloseNetworkShares ( ) <nl> mmm a / xbmc / Application . h <nl> ppp b / xbmc / Application . h <nl> friend class CAppInboundProtocol ; <nl> bool m_ignoreSkinSettingChanges = false ; <nl> <nl> bool m_saveSkinOnUnloading = true ; <nl> - bool m_autoExecScriptExecuted = false ; <nl> <nl> # if defined ( TARGET_DARWIN_IOS ) <nl> friend class CWinEventsIOS ; <nl> mmm a / xbmc / interfaces / python / PythonInvoker . cpp <nl> ppp b / xbmc / interfaces / python / PythonInvoker . cpp <nl> void CPythonInvoker : : onError ( const std : : string & exceptionType / * = " " * / , const <nl> std : : string message ; <nl> if ( m_addon & & ! m_addon - > Name ( ) . empty ( ) ) <nl> message = StringUtils : : Format ( g_localizeStrings . Get ( 2102 ) . c_str ( ) , m_addon - > Name ( ) . c_str ( ) ) ; <nl> - else if ( m_sourceFile = = CSpecialProtocol : : TranslatePath ( " special : / / profile / autoexec . py " ) ) <nl> - message = StringUtils : : Format ( g_localizeStrings . Get ( 2102 ) . c_str ( ) , " autoexec . py " ) ; <nl> else <nl> message = g_localizeStrings . Get ( 2103 ) ; <nl> pDlgToast - > QueueNotification ( CGUIDialogKaiToast : : Error , message , g_localizeStrings . Get ( 2104 ) ) ; <nl>
Merge pull request from fuzzard / remove_autoexec
xbmc/xbmc
d26b198a3474ae2d73bf9dd9138254b51aa635d4
2020-09-28T08:56:28Z
mmm a / src / mongo / util / background . cpp <nl> ppp b / src / mongo / util / background . cpp <nl> namespace mongo { <nl> } <nl> <nl> const int ms = timer . millis ( ) ; <nl> - LOG ( ms < = 3 ? 3 : 0 ) < < " task : " < < taskName < < " took : " < < ms < < " ms " < < endl ; <nl> + const int kMinLogMs = 100 ; <nl> + LOG ( ms < = kMinLogMs ? 3 : 0 ) < < " task : " < < taskName < < " took : " < < ms < < " ms " < < endl ; <nl> } <nl> <nl> } / / namespace mongo <nl>
SERVER - 19054 : Don ' t log background tasks taking less than 100ms
mongodb/mongo
d09fdf2b5887bfd6773f1a6da374a1ff7fc0e242
2015-06-19T20:26:40Z
mmm a / src / rdb_protocol / datum . cc <nl> ppp b / src / rdb_protocol / datum . cc <nl> std : : string datum_t : : compose_secondary ( <nl> skey_version , truncated_secondary_key , primary_key_string , tag_string ) ; <nl> } <nl> <nl> - std : : string datum_t : : print_secondary ( reql_version_t rv , <nl> + std : : string datum_t : : print_secondary ( reql_version_t reql_version , <nl> const store_key_t & primary_key , <nl> boost : : optional < uint64_t > tag_num ) const { <nl> std : : string secondary_key_string ; <nl> std : : string datum_t : : print_secondary ( reql_version_t rv , <nl> get_type_name ( ) . c_str ( ) , trunc_print ( ) . c_str ( ) ) ) ; <nl> } <nl> <nl> - switch ( rv ) { <nl> + switch ( reql_version ) { <nl> case reql_version_t : : v1_13 : <nl> break ; <nl> case reql_version_t : : v1_14 : / / v1_15 is the same as v1_14 <nl> std : : string datum_t : : print_secondary ( reql_version_t rv , <nl> } <nl> <nl> return compose_secondary ( <nl> - skey_version_from_reql_version ( rv ) , <nl> + skey_version_from_reql_version ( reql_version ) , <nl> secondary_key_string , primary_key , tag_num ) ; <nl> } <nl> <nl>
Fixed variable name .
rethinkdb/rethinkdb
526859834e6fb9e8b556654d6ac9156934b93fe0
2015-01-21T19:30:36Z
mmm a / src / dialogs / canvasze . c <nl> ppp b / src / dialogs / canvasze . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / canvasze . h <nl> ppp b / src / dialogs / canvasze . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / colsel . c <nl> ppp b / src / dialogs / colsel . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / colsel . h <nl> ppp b / src / dialogs / colsel . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / dmapgen . c <nl> ppp b / src / dialogs / dmapgen . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / dmapgen . h <nl> ppp b / src / dialogs / dmapgen . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / dpaledit . c <nl> ppp b / src / dialogs / dpaledit . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / dpaledit . h <nl> ppp b / src / dialogs / dpaledit . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / drawtext . c <nl> ppp b / src / dialogs / drawtext . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / drawtext . h <nl> ppp b / src / dialogs / drawtext . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / filesel . h <nl> ppp b / src / dialogs / filesel . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / filmedit . c <nl> ppp b / src / dialogs / filmedit . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / filmedit . h <nl> ppp b / src / dialogs / filmedit . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / maskcol . c <nl> ppp b / src / dialogs / maskcol . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / maskcol . h <nl> ppp b / src / dialogs / maskcol . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / minipal . c <nl> ppp b / src / dialogs / minipal . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 , 2002 , 2003 , 2004 , 2005 , 2007 , <nl> - * 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / minipal . h <nl> ppp b / src / dialogs / minipal . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / options . c <nl> ppp b / src / dialogs / options . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / options . h <nl> ppp b / src / dialogs / options . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / playfli . c <nl> ppp b / src / dialogs / playfli . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / playfli . h <nl> ppp b / src / dialogs / playfli . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / quick . c <nl> ppp b / src / dialogs / quick . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / quick . h <nl> ppp b / src / dialogs / quick . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / repo . c <nl> ppp b / src / dialogs / repo . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / repo . h <nl> ppp b / src / dialogs / repo . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / tips . c <nl> ppp b / src / dialogs / tips . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / tips . h <nl> ppp b / src / dialogs / tips . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / vectmap . c <nl> ppp b / src / dialogs / vectmap . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / dialogs / vectmap . h <nl> ppp b / src / dialogs / vectmap . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / effect / colcurve . c <nl> ppp b / src / effect / colcurve . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / effect / colcurve . h <nl> ppp b / src / effect / colcurve . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / effect / convmatr . h <nl> ppp b / src / effect / convmatr . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / effect / effect . c <nl> ppp b / src / effect / effect . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / effect / effect . h <nl> ppp b / src / effect / effect . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / effect / invrtcol . c <nl> ppp b / src / effect / invrtcol . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / effect / invrtcol . h <nl> ppp b / src / effect / invrtcol . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / effect / median . c <nl> ppp b / src / effect / median . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / effect / median . h <nl> ppp b / src / effect / median . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / effect / replcol . c <nl> ppp b / src / effect / replcol . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / effect / replcol . h <nl> ppp b / src / effect / replcol . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / file / ase_format . c <nl> ppp b / src / file / ase_format . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / file / bmp_format . c <nl> ppp b / src / file / bmp_format . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / file / gif_format . c <nl> ppp b / src / file / gif_format . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / file / ico_format . c <nl> ppp b / src / file / ico_format . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / file / jpeg_format . c <nl> ppp b / src / file / jpeg_format . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / file / pcx_format . c <nl> ppp b / src / file / pcx_format . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / file / png_format . c <nl> ppp b / src / file / png_format . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / file / tga_format . c <nl> ppp b / src / file / tga_format . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / intl / intl . c <nl> ppp b / src / intl / intl . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / intl / intl . h <nl> ppp b / src / intl / intl . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / intl / msgids . h <nl> ppp b / src / intl / msgids . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / jinete / jaccel . h <nl> ppp b / src / jinete / jaccel . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jalert . c <nl> ppp b / src / jinete / jalert . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jalert . h <nl> ppp b / src / jinete / jalert . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jbox . c <nl> ppp b / src / jinete / jbox . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jbox . h <nl> ppp b / src / jinete / jbox . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jbutton . c <nl> ppp b / src / jinete / jbutton . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jbutton . h <nl> ppp b / src / jinete / jbutton . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jclipboard . c <nl> ppp b / src / jinete / jclipboard . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jclipboard . h <nl> ppp b / src / jinete / jclipboard . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jcombobox . h <nl> ppp b / src / jinete / jcombobox . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jdraw . c <nl> ppp b / src / jinete / jdraw . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jdraw . h <nl> ppp b / src / jinete / jdraw . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jentry . c <nl> ppp b / src / jinete / jentry . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jentry . h <nl> ppp b / src / jinete / jentry . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jfilesel . c <nl> ppp b / src / jinete / jfilesel . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jfilesel . h <nl> ppp b / src / jinete / jfilesel . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jfont . c <nl> ppp b / src / jinete / jfont . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jfont . h <nl> ppp b / src / jinete / jfont . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jfontbmp . c <nl> ppp b / src / jinete / jfontbmp . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jhook . c <nl> ppp b / src / jinete / jhook . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jhook . h <nl> ppp b / src / jinete / jhook . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jintern . c <nl> ppp b / src / jinete / jintern . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jlabel . c <nl> ppp b / src / jinete / jlabel . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jlabel . h <nl> ppp b / src / jinete / jlabel . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jlist . h <nl> ppp b / src / jinete / jlist . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jlistbox . c <nl> ppp b / src / jinete / jlistbox . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jlistbox . h <nl> ppp b / src / jinete / jlistbox . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jmanager . h <nl> ppp b / src / jinete / jmanager . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jmenu . h <nl> ppp b / src / jinete / jmenu . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jmessage . c <nl> ppp b / src / jinete / jmessage . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jmessage . h <nl> ppp b / src / jinete / jmessage . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jmutex . c <nl> ppp b / src / jinete / jmutex . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jmutex . h <nl> ppp b / src / jinete / jmutex . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jpanel . c <nl> ppp b / src / jinete / jpanel . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jpanel . h <nl> ppp b / src / jinete / jpanel . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jquickmenu . c <nl> ppp b / src / jinete / jquickmenu . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jquickmenu . h <nl> ppp b / src / jinete / jquickmenu . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jrect . c <nl> ppp b / src / jinete / jrect . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jrect . h <nl> ppp b / src / jinete / jrect . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jregion . h <nl> ppp b / src / jinete / jregion . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jscroll . h <nl> ppp b / src / jinete / jscroll . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jsep . c <nl> ppp b / src / jinete / jsep . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jsep . h <nl> ppp b / src / jinete / jsep . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jslider . c <nl> ppp b / src / jinete / jslider . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jslider . h <nl> ppp b / src / jinete / jslider . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jstream . c <nl> ppp b / src / jinete / jstream . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jstream . h <nl> ppp b / src / jinete / jstream . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jsystem . c <nl> ppp b / src / jinete / jsystem . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jsystem . h <nl> ppp b / src / jinete / jsystem . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jtextbox . h <nl> ppp b / src / jinete / jtextbox . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jtheme . h <nl> ppp b / src / jinete / jtheme . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jthread . c <nl> ppp b / src / jinete / jthread . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jthread . h <nl> ppp b / src / jinete / jthread . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jview . c <nl> ppp b / src / jinete / jview . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jview . h <nl> ppp b / src / jinete / jview . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jwidget . c <nl> ppp b / src / jinete / jwidget . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jwidget . h <nl> ppp b / src / jinete / jwidget . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jxml . c <nl> ppp b / src / jinete / jxml . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / jxml . h <nl> ppp b / src / jinete / jxml . h <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / jinete / themes / pcx2data . c <nl> ppp b / src / jinete / themes / pcx2data . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Read " LICENSE . txt " for more information . <nl> mmm a / src / modules / color . c <nl> ppp b / src / modules / color . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / color . h <nl> ppp b / src / modules / color . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / editors . c <nl> ppp b / src / modules / editors . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / editors . h <nl> ppp b / src / modules / editors . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / gfx . c <nl> ppp b / src / modules / gfx . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / gfx . h <nl> ppp b / src / modules / gfx . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / gfxdata . c <nl> ppp b / src / modules / gfxdata . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / palette . c <nl> ppp b / src / modules / palette . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / palette . h <nl> ppp b / src / modules / palette . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / recent . c <nl> ppp b / src / modules / recent . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / recent . h <nl> ppp b / src / modules / recent . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / rootmenu . h <nl> ppp b / src / modules / rootmenu . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 , 2002 , 2003 , 2004 , 2005 , 2007 , <nl> - * 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / sprites . h <nl> ppp b / src / modules / sprites . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / tools . c <nl> ppp b / src / modules / tools . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 , 2002 , 2003 , 2004 , 2005 , 2007 , <nl> - * 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / tools . h <nl> ppp b / src / modules / tools . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / tools2 . c <nl> ppp b / src / modules / tools2 . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / modules / tools2 . h <nl> ppp b / src / modules / tools2 . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / algo . c <nl> ppp b / src / raster / algo . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / algo . h <nl> ppp b / src / raster / algo . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / blend . c <nl> ppp b / src / raster / blend . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / blend . h <nl> ppp b / src / raster / blend . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / brush . c <nl> ppp b / src / raster / brush . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / brush . h <nl> ppp b / src / raster / brush . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / cel . c <nl> ppp b / src / raster / cel . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / cel . h <nl> ppp b / src / raster / cel . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / dirty . c <nl> ppp b / src / raster / dirty . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / dirty . h <nl> ppp b / src / raster / dirty . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / gfxobj . c <nl> ppp b / src / raster / gfxobj . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / gfxobj . h <nl> ppp b / src / raster / gfxobj . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / image . c <nl> ppp b / src / raster / image . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / image . h <nl> ppp b / src / raster / image . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / imgalleg . c <nl> ppp b / src / raster / imgalleg . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / imgbit . c <nl> ppp b / src / raster / imgbit . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / imggray . c <nl> ppp b / src / raster / imggray . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / imgindex . c <nl> ppp b / src / raster / imgindex . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / layer . h <nl> ppp b / src / raster / layer . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / mask . c <nl> ppp b / src / raster / mask . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / mask . h <nl> ppp b / src / raster / mask . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 , 2002 , 2003 , 2004 , 2005 , 2007 , <nl> - * 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / path . c <nl> ppp b / src / raster / path . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / path . h <nl> ppp b / src / raster / path . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / quant . c <nl> ppp b / src / raster / quant . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / quant . h <nl> ppp b / src / raster / quant . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / raster . h <nl> ppp b / src / raster / raster . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / rotate . c <nl> ppp b / src / raster / rotate . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / rotate . h <nl> ppp b / src / raster / rotate . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / sprite . c <nl> ppp b / src / raster / sprite . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / sprite . h <nl> ppp b / src / raster / sprite . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / stock . c <nl> ppp b / src / raster / stock . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / stock . h <nl> ppp b / src / raster / stock . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / undo . h <nl> ppp b / src / raster / undo . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / raster / x86 / int_mult . s <nl> ppp b / src / raster / x86 / int_mult . s <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / script / bindings . c <nl> ppp b / src / script / bindings . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / script / bindings . h <nl> ppp b / src / script / bindings . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / script / functions . c <nl> ppp b / src / script / functions . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / script / functions . h <nl> ppp b / src / script / functions . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / script / script . c <nl> ppp b / src / script / script . c <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / script / script . h <nl> ppp b / src / script / script . h <nl> <nl> / * ASE - Allegro Sprite Editor <nl> - * Copyright ( C ) 2001 - 2005 , 2007 David A . Capello <nl> + * Copyright ( C ) 2001 - 2008 David A . Capello <nl> * <nl> * This program is free software ; you can redistribute it and / or modify <nl> * it under the terms of the GNU General Public License as published by <nl> mmm a / src / test / jinete / 00hello . c <nl> ppp b / src / test / jinete / 00hello . c <nl> <nl> / * jinete - a GUI library <nl> - * Copyright ( C ) 2003 - 2005 , 2007 by David A . Capello <nl> + * Copyright ( C ) 2003 - 2008 by David A . Capello <nl> * <nl> * Jinete is gift - ware . <nl> * / <nl> mmm a / src / test / jinete / 03slider . c <nl> ppp b / src / test / jinete / 03slider . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / test / jinete / 18tips . c <nl> ppp b / src / test / jinete / 18tips . c <nl> <nl> / * Jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 David A . Capello . <nl> + * Copyright ( C ) 2003 - 2008 David A . Capello . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> mmm a / src / test / jinete / 22xml . c <nl> ppp b / src / test / jinete / 22xml . c <nl> <nl> / * jinete - a GUI library <nl> - * Copyright ( C ) 2003 , 2004 , 2005 , 2007 , 2008 by David A . Capello <nl> + * Copyright ( C ) 2003 - 2008 by David A . Capello . <nl> * <nl> * Jinete is gift - ware . <nl> * / <nl>
Copyright updated .
aseprite/aseprite
90a55ad443fbb353e3b13a849755cf5b2d33889e
2008-02-10T12:52:15Z
mmm a / tensorflow / lite / experimental / micro / tools / make / targets / ecm3531 / README . md <nl> ppp b / tensorflow / lite / experimental / micro / tools / make / targets / ecm3531 / README . md <nl> <nl> - Compiling instructions here ( https : / / github . com / tensorflow / tensorflow / tree / master / tensorflow / lite / experimental / micro ) <nl> + Compiling instructions here https : / / github . com / tensorflow / tensorflow / tree / master / tensorflow / lite / experimental / micro <nl> <nl> <nl> CONTACT INFORMATION : <nl>
edits
tensorflow/tensorflow
753f3755662cf998c27bfacc3dcaabbb66f08d4b
2019-02-08T23:48:42Z
new file mode 100644 <nl> index 0000000000 . . 136795228e <nl> mmm / dev / null <nl> ppp b / code / dynamic_programming / largest_sum_contiguous_subarray / largest_sum_contiguous_subarray . go <nl> <nl> + / / Part of Cosmos by OpenGenus Foundation <nl> + package main <nl> + <nl> + import " fmt " <nl> + <nl> + func max ( n1 , n2 int ) int { <nl> + if n1 > n2 { <nl> + return n1 <nl> + } <nl> + return n2 <nl> + } <nl> + <nl> + func maxSubArraySum ( data [ ] int ) int { <nl> + result : = data [ 0 ] <nl> + current_max : = max ( data [ 0 ] , 0 ) <nl> + <nl> + for i : = 1 ; i < len ( data ) ; i + + { <nl> + current_max + = data [ i ] <nl> + result = max ( result , current_max ) <nl> + current_max = max ( current_max , 0 ) <nl> + } <nl> + <nl> + return result <nl> + } <nl> + <nl> + func main ( ) { <nl> + input : = [ ] int { - 3 , 2 , - 1 , 4 , - 5 } <nl> + input2 : = [ ] int { - 1 , - 2 , - 3 , - 4 , - 5 } <nl> + <nl> + fmt . Printf ( " The max sum of % v is % d \ n " , input , maxSubArraySum ( input ) ) <nl> + fmt . Printf ( " The max sum of % v is % d \ n " , input2 , maxSubArraySum ( input2 ) ) <nl> + } <nl>
provide a implementation of larget_sum with DP in golang
OpenGenus/cosmos
c83959697737f6b4689bfa8cce226a8c8aee92a2
2017-11-09T07:42:59Z
mmm a / include / swift / AST / DiagnosticsSema . def <nl> ppp b / include / swift / AST / DiagnosticsSema . def <nl> ERROR ( reserved_member_name , none , <nl> " ' foo . % 1 ' expression " , ( DeclName , StringRef ) ) <nl> <nl> ERROR ( invalid_redecl , none , " invalid redeclaration of % 0 " , ( DeclName ) ) <nl> + ERROR ( invalid_redecl_init , none , <nl> + " invalid redeclaration of synthesized % select { | memberwise } 1 % 0 " , <nl> + ( DeclName , bool ) ) <nl> WARNING ( invalid_redecl_swift5_warning , none , <nl> " redeclaration of % 0 is deprecated and will be an error in Swift 5 " , <nl> ( DeclName ) ) <nl> mmm a / lib / Sema / TypeCheckDecl . cpp <nl> ppp b / lib / Sema / TypeCheckDecl . cpp <nl> static void checkRedeclaration ( TypeChecker & tc , ValueDecl * current ) { <nl> current - > getFullName ( ) ) ; <nl> tc . diagnose ( other , diag : : invalid_redecl_prev , other - > getFullName ( ) ) ; <nl> } else { <nl> - tc . diagnose ( current , diag : : invalid_redecl , current - > getFullName ( ) ) ; <nl> - tc . diagnose ( other , diag : : invalid_redecl_prev , other - > getFullName ( ) ) ; <nl> + const auto * otherInit = dyn_cast < ConstructorDecl > ( other ) ; <nl> + / / Provide a better description for implicit initializers . <nl> + if ( otherInit & & otherInit - > isImplicit ( ) ) { <nl> + / / Skip conflicts with inherited initializers , which only happen <nl> + / / when the current declaration is within an extension . The override <nl> + / / checker should have already taken care of emitting a more <nl> + / / productive diagnostic . <nl> + if ( ! other - > getOverriddenDecl ( ) ) <nl> + tc . diagnose ( current , diag : : invalid_redecl_init , <nl> + current - > getFullName ( ) , <nl> + otherInit - > isMemberwiseInitializer ( ) ) ; <nl> + } else { <nl> + tc . diagnose ( current , diag : : invalid_redecl , current - > getFullName ( ) ) ; <nl> + tc . diagnose ( other , diag : : invalid_redecl_prev , other - > getFullName ( ) ) ; <nl> + } <nl> markInvalid ( ) ; <nl> } <nl> <nl> mmm a / test / decl / init / basic_init . swift <nl> ppp b / test / decl / init / basic_init . swift <nl> <nl> - / / RUN : % target - typecheck - verify - swift <nl> + / / RUN : % target - typecheck - verify - swift - enable - objc - interop - disable - objc - attr - requires - foundation - module <nl> <nl> class Foo { <nl> func bar ( _ : bar ) { } / / expected - error { { use of undeclared type ' bar ' } } <nl> class C { <nl> typealias t = t / / expected - error { { type alias ' t ' references itself } } <nl> / / expected - note @ - 1 { { type declared here } } <nl> <nl> + extension Foo { <nl> + convenience init ( ) { } / / expected - error { { invalid redeclaration of synthesized ' init ( ) ' } } <nl> + } <nl> <nl> + class InitClass { <nl> + init ( arg : Bool ) { } / / expected - note { { add ' @ objc ' to make this declaration overridable } } <nl> + @ objc init ( baz : Int ) { } / / expected - note { { overridden declaration is here } } <nl> + @ objc dynamic init ( bar : Int ) { } <nl> + } <nl> + class InitSubclass : InitClass { } <nl> + / / expected - note @ - 1 { { ' init ( bar : ) ' previously overridden here } } <nl> + / / expected - note @ - 2 { { ' init ( baz : ) ' previously overridden here } } <nl> + extension InitSubclass { <nl> + convenience init ( arg : Bool ) { } / / expected - error { { overriding non - @ objc declarations from extensions is not supported } } <nl> + convenience override init ( baz : Int ) { } <nl> + / / expected - error @ - 1 { { cannot override a non - dynamic class declaration from an extension } } <nl> + / / expected - error @ - 2 { { ' init ( baz : ) ' has already been overridden } } <nl> + convenience override init ( bar : Int ) { } / / expected - error { { ' init ( bar : ) ' has already been overridden } } <nl> + } <nl> <nl> + struct InitStruct { <nl> + let foo : Int <nl> + } <nl> + extension InitStruct { <nl> + init ( foo : Int ) { } / / expected - error { { invalid redeclaration of synthesized memberwise ' init ( foo : ) ' } } <nl> + } <nl> <nl> / / < rdar : / / problem / 17564699 > QoI : Structs should get convenience initializers <nl> struct MyStruct { <nl>
Merge pull request from AnthonyLatsis / synth - init - conflict - diag
apple/swift
79bd38ba44ae784ba1c8ff08733a3b277e907717
2019-04-19T15:51:07Z
mmm a / objectivec / GPBDescriptor . m <nl> ppp b / objectivec / GPBDescriptor . m <nl> - ( BOOL ) isRepeated { <nl> return GPBExtensionIsRepeated ( description_ ) ; <nl> } <nl> <nl> - - ( BOOL ) isMap { <nl> - return ( description_ - > options & GPBFieldMapKeyMask ) ! = 0 ; <nl> - } <nl> - <nl> - ( BOOL ) isPackable { <nl> return GPBExtensionIsPacked ( description_ ) ; <nl> } <nl> mmm a / objectivec / GPBUnknownField . m <nl> ppp b / objectivec / GPBUnknownField . m <nl> - ( BOOL ) isEqual : ( id ) object { <nl> if ( self = = object ) return YES ; <nl> if ( ! [ object isKindOfClass : [ GPBUnknownField class ] ] ) return NO ; <nl> GPBUnknownField * field = ( GPBUnknownField * ) object ; <nl> + if ( number_ ! = field - > number_ ) return NO ; <nl> BOOL equalVarint = <nl> ( mutableVarintList_ . count = = 0 & & field - > mutableVarintList_ . count = = 0 ) | | <nl> [ mutableVarintList_ isEqual : field - > mutableVarintList_ ] ; <nl> - ( size_t ) serializedSizeAsMessageSetExtension { <nl> } <nl> <nl> - ( NSString * ) description { <nl> - NSMutableString * description = [ NSMutableString <nl> - stringWithFormat : @ " < % @ % p > : Field : % d { \ n " , [ self class ] , self , number_ ] ; <nl> + NSMutableString * description = <nl> + [ NSMutableString stringWithFormat : @ " < % @ % p > : Field : % d { \ n " , <nl> + [ self class ] , self , number_ ] ; <nl> [ mutableVarintList_ <nl> enumerateValuesWithBlock : ^ ( uint64_t value , NSUInteger idx , BOOL * stop ) { <nl> # pragma unused ( idx , stop ) <nl> mmm a / objectivec / Tests / GPBArrayTests . m <nl> ppp b / objectivec / Tests / GPBArrayTests . m <nl> <nl> / / To let the testing macros work , add some extra methods to simplify things . <nl> @ interface GPBEnumArray ( TestingTweak ) <nl> + ( instancetype ) arrayWithValue : ( int32_t ) value ; <nl> + + ( instancetype ) arrayWithCapacity : ( NSUInteger ) count ; <nl> - ( instancetype ) initWithValues : ( const int32_t [ ] ) values <nl> count : ( NSUInteger ) count ; <nl> @ end <nl> + ( instancetype ) arrayWithValue : ( int32_t ) value { <nl> rawValues : & value <nl> count : 1 ] autorelease ] ; <nl> } <nl> + + ( instancetype ) arrayWithCapacity : ( NSUInteger ) count { <nl> + return [ [ [ self alloc ] initWithValidationFunction : TestingEnum_IsValidValue <nl> + capacity : count ] autorelease ] ; <nl> + } <nl> - ( instancetype ) initWithValues : ( const int32_t [ ] ) values <nl> count : ( NSUInteger ) count { <nl> return [ self initWithValidationFunction : TestingEnum_IsValidValue <nl> - ( instancetype ) initWithValues : ( const int32_t [ ] ) values <nl> / / % XCTAssertNotEqual ( idx , 0U ) ; <nl> / / % + + idx2 ; <nl> / / % } ] ; <nl> + / / % / / Ensure description doesn ' t choke . <nl> + / / % XCTAssertTrue ( array . description . length > 10 ) ; <nl> / / % [ array release ] ; <nl> / / % } <nl> / / % <nl> - ( instancetype ) initWithValues : ( const int32_t [ ] ) values <nl> / / % NAME $ S count : GPBARRAYSIZE ( kValues3 ) ] ; <nl> / / % XCTAssertNotNil ( array3 ) ; <nl> / / % <nl> + / / % / / Identity <nl> + / / % XCTAssertTrue ( [ array1 isEqual : array1 ] ) ; <nl> + / / % / / Wrong type doesn ' t blow up . <nl> + / / % XCTAssertFalse ( [ array1 isEqual : @ " bogus " ] ) ; <nl> / / % / / 1 / 1Prime should be different objects , but equal . <nl> / / % XCTAssertNotEqual ( array1 , array1prime ) ; <nl> / / % XCTAssertEqualObjects ( array1 , array1prime ) ; <nl> - ( instancetype ) initWithValues : ( const int32_t [ ] ) values <nl> / / % [ array add # # HELPER # # ValuesFromArray : array2 ] ; <nl> / / % XCTAssertEqual ( array . count , 5U ) ; <nl> / / % <nl> + / / % / / Zero / nil inputs do nothing . <nl> + / / % [ array addValues : kValues1 count : 0 ] ; <nl> + / / % XCTAssertEqual ( array . count , 5U ) ; <nl> + / / % [ array addValues : NULL count : 5 ] ; <nl> + / / % XCTAssertEqual ( array . count , 5U ) ; <nl> + / / % <nl> / / % XCTAssertEqual ( [ array valueAtIndex : 0 ] , VAL1 ) ; <nl> / / % XCTAssertEqual ( [ array valueAtIndex : 1 ] , VAL2 ) ; <nl> / / % XCTAssertEqual ( [ array valueAtIndex : 2 ] , VAL3 ) ; <nl> - ( instancetype ) initWithValues : ( const int32_t [ ] ) values <nl> / / % - ( void ) testInternalResizing { <nl> / / % const TYPE kValues [ ] = { VAL1 , VAL2 , VAL3 , VAL4 } ; <nl> / / % GPB # # NAME # # Array * array = <nl> - / / % [ [ GPB # # NAME # # Array alloc ] initWithValues : kValues <nl> - / / % NAME $ S count : GPBARRAYSIZE ( kValues ) ] ; <nl> + / / % [ GPB # # NAME # # Array arrayWithCapacity : GPBARRAYSIZE ( kValues ) ] ; <nl> / / % XCTAssertNotNil ( array ) ; <nl> + / / % [ array addValues : kValues count : GPBARRAYSIZE ( kValues ) ] ; <nl> / / % <nl> / / % / / Add / remove to trigger the intneral buffer to grow / shrink . <nl> / / % for ( int i = 0 ; i < 100 ; + + i ) { <nl> - ( instancetype ) initWithValues : ( const int32_t [ ] ) values <nl> / / % XCTAssertEqual ( array . count , 404U ) ; <nl> / / % [ array removeAll ] ; <nl> / / % XCTAssertEqual ( array . count , 0U ) ; <nl> - / / % [ array release ] ; <nl> / / % } <nl> / / % <nl> / / % @ end <nl> - ( void ) testBasics { <nl> XCTAssertNotEqual ( idx , 0U ) ; <nl> + + idx2 ; <nl> } ] ; <nl> + / / Ensure description doesn ' t choke . <nl> + XCTAssertTrue ( array . description . length > 10 ) ; <nl> [ array release ] ; <nl> } <nl> <nl> - ( void ) testEquality { <nl> count : GPBARRAYSIZE ( kValues3 ) ] ; <nl> XCTAssertNotNil ( array3 ) ; <nl> <nl> + / / Identity <nl> + XCTAssertTrue ( [ array1 isEqual : array1 ] ) ; <nl> + / / Wrong type doesn ' t blow up . <nl> + XCTAssertFalse ( [ array1 isEqual : @ " bogus " ] ) ; <nl> / / 1 / 1Prime should be different objects , but equal . <nl> XCTAssertNotEqual ( array1 , array1prime ) ; <nl> XCTAssertEqualObjects ( array1 , array1prime ) ; <nl> - ( void ) testAdds { <nl> [ array addValuesFromArray : array2 ] ; <nl> XCTAssertEqual ( array . count , 5U ) ; <nl> <nl> + / / Zero / nil inputs do nothing . <nl> + [ array addValues : kValues1 count : 0 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + [ array addValues : NULL count : 5 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + <nl> XCTAssertEqual ( [ array valueAtIndex : 0 ] , 1 ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 1 ] , 2 ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 2 ] , 3 ) ; <nl> - ( void ) testInplaceMutation { <nl> - ( void ) testInternalResizing { <nl> const int32_t kValues [ ] = { 1 , 2 , 3 , 4 } ; <nl> GPBInt32Array * array = <nl> - [ [ GPBInt32Array alloc ] initWithValues : kValues <nl> - count : GPBARRAYSIZE ( kValues ) ] ; <nl> + [ GPBInt32Array arrayWithCapacity : GPBARRAYSIZE ( kValues ) ] ; <nl> XCTAssertNotNil ( array ) ; <nl> + [ array addValues : kValues count : GPBARRAYSIZE ( kValues ) ] ; <nl> <nl> / / Add / remove to trigger the intneral buffer to grow / shrink . <nl> for ( int i = 0 ; i < 100 ; + + i ) { <nl> - ( void ) testInternalResizing { <nl> XCTAssertEqual ( array . count , 404U ) ; <nl> [ array removeAll ] ; <nl> XCTAssertEqual ( array . count , 0U ) ; <nl> - [ array release ] ; <nl> } <nl> <nl> @ end <nl> - ( void ) testBasics { <nl> XCTAssertNotEqual ( idx , 0U ) ; <nl> + + idx2 ; <nl> } ] ; <nl> + / / Ensure description doesn ' t choke . <nl> + XCTAssertTrue ( array . description . length > 10 ) ; <nl> [ array release ] ; <nl> } <nl> <nl> - ( void ) testEquality { <nl> count : GPBARRAYSIZE ( kValues3 ) ] ; <nl> XCTAssertNotNil ( array3 ) ; <nl> <nl> + / / Identity <nl> + XCTAssertTrue ( [ array1 isEqual : array1 ] ) ; <nl> + / / Wrong type doesn ' t blow up . <nl> + XCTAssertFalse ( [ array1 isEqual : @ " bogus " ] ) ; <nl> / / 1 / 1Prime should be different objects , but equal . <nl> XCTAssertNotEqual ( array1 , array1prime ) ; <nl> XCTAssertEqualObjects ( array1 , array1prime ) ; <nl> - ( void ) testAdds { <nl> [ array addValuesFromArray : array2 ] ; <nl> XCTAssertEqual ( array . count , 5U ) ; <nl> <nl> + / / Zero / nil inputs do nothing . <nl> + [ array addValues : kValues1 count : 0 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + [ array addValues : NULL count : 5 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + <nl> XCTAssertEqual ( [ array valueAtIndex : 0 ] , 11U ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 1 ] , 12U ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 2 ] , 13U ) ; <nl> - ( void ) testInplaceMutation { <nl> - ( void ) testInternalResizing { <nl> const uint32_t kValues [ ] = { 11U , 12U , 13U , 14U } ; <nl> GPBUInt32Array * array = <nl> - [ [ GPBUInt32Array alloc ] initWithValues : kValues <nl> - count : GPBARRAYSIZE ( kValues ) ] ; <nl> + [ GPBUInt32Array arrayWithCapacity : GPBARRAYSIZE ( kValues ) ] ; <nl> XCTAssertNotNil ( array ) ; <nl> + [ array addValues : kValues count : GPBARRAYSIZE ( kValues ) ] ; <nl> <nl> / / Add / remove to trigger the intneral buffer to grow / shrink . <nl> for ( int i = 0 ; i < 100 ; + + i ) { <nl> - ( void ) testInternalResizing { <nl> XCTAssertEqual ( array . count , 404U ) ; <nl> [ array removeAll ] ; <nl> XCTAssertEqual ( array . count , 0U ) ; <nl> - [ array release ] ; <nl> } <nl> <nl> @ end <nl> - ( void ) testBasics { <nl> XCTAssertNotEqual ( idx , 0U ) ; <nl> + + idx2 ; <nl> } ] ; <nl> + / / Ensure description doesn ' t choke . <nl> + XCTAssertTrue ( array . description . length > 10 ) ; <nl> [ array release ] ; <nl> } <nl> <nl> - ( void ) testEquality { <nl> count : GPBARRAYSIZE ( kValues3 ) ] ; <nl> XCTAssertNotNil ( array3 ) ; <nl> <nl> + / / Identity <nl> + XCTAssertTrue ( [ array1 isEqual : array1 ] ) ; <nl> + / / Wrong type doesn ' t blow up . <nl> + XCTAssertFalse ( [ array1 isEqual : @ " bogus " ] ) ; <nl> / / 1 / 1Prime should be different objects , but equal . <nl> XCTAssertNotEqual ( array1 , array1prime ) ; <nl> XCTAssertEqualObjects ( array1 , array1prime ) ; <nl> - ( void ) testAdds { <nl> [ array addValuesFromArray : array2 ] ; <nl> XCTAssertEqual ( array . count , 5U ) ; <nl> <nl> + / / Zero / nil inputs do nothing . <nl> + [ array addValues : kValues1 count : 0 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + [ array addValues : NULL count : 5 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + <nl> XCTAssertEqual ( [ array valueAtIndex : 0 ] , 31LL ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 1 ] , 32LL ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 2 ] , 33LL ) ; <nl> - ( void ) testInplaceMutation { <nl> - ( void ) testInternalResizing { <nl> const int64_t kValues [ ] = { 31LL , 32LL , 33LL , 34LL } ; <nl> GPBInt64Array * array = <nl> - [ [ GPBInt64Array alloc ] initWithValues : kValues <nl> - count : GPBARRAYSIZE ( kValues ) ] ; <nl> + [ GPBInt64Array arrayWithCapacity : GPBARRAYSIZE ( kValues ) ] ; <nl> XCTAssertNotNil ( array ) ; <nl> + [ array addValues : kValues count : GPBARRAYSIZE ( kValues ) ] ; <nl> <nl> / / Add / remove to trigger the intneral buffer to grow / shrink . <nl> for ( int i = 0 ; i < 100 ; + + i ) { <nl> - ( void ) testInternalResizing { <nl> XCTAssertEqual ( array . count , 404U ) ; <nl> [ array removeAll ] ; <nl> XCTAssertEqual ( array . count , 0U ) ; <nl> - [ array release ] ; <nl> } <nl> <nl> @ end <nl> - ( void ) testBasics { <nl> XCTAssertNotEqual ( idx , 0U ) ; <nl> + + idx2 ; <nl> } ] ; <nl> + / / Ensure description doesn ' t choke . <nl> + XCTAssertTrue ( array . description . length > 10 ) ; <nl> [ array release ] ; <nl> } <nl> <nl> - ( void ) testEquality { <nl> count : GPBARRAYSIZE ( kValues3 ) ] ; <nl> XCTAssertNotNil ( array3 ) ; <nl> <nl> + / / Identity <nl> + XCTAssertTrue ( [ array1 isEqual : array1 ] ) ; <nl> + / / Wrong type doesn ' t blow up . <nl> + XCTAssertFalse ( [ array1 isEqual : @ " bogus " ] ) ; <nl> / / 1 / 1Prime should be different objects , but equal . <nl> XCTAssertNotEqual ( array1 , array1prime ) ; <nl> XCTAssertEqualObjects ( array1 , array1prime ) ; <nl> - ( void ) testAdds { <nl> [ array addValuesFromArray : array2 ] ; <nl> XCTAssertEqual ( array . count , 5U ) ; <nl> <nl> + / / Zero / nil inputs do nothing . <nl> + [ array addValues : kValues1 count : 0 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + [ array addValues : NULL count : 5 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + <nl> XCTAssertEqual ( [ array valueAtIndex : 0 ] , 41ULL ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 1 ] , 42ULL ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 2 ] , 43ULL ) ; <nl> - ( void ) testInplaceMutation { <nl> - ( void ) testInternalResizing { <nl> const uint64_t kValues [ ] = { 41ULL , 42ULL , 43ULL , 44ULL } ; <nl> GPBUInt64Array * array = <nl> - [ [ GPBUInt64Array alloc ] initWithValues : kValues <nl> - count : GPBARRAYSIZE ( kValues ) ] ; <nl> + [ GPBUInt64Array arrayWithCapacity : GPBARRAYSIZE ( kValues ) ] ; <nl> XCTAssertNotNil ( array ) ; <nl> + [ array addValues : kValues count : GPBARRAYSIZE ( kValues ) ] ; <nl> <nl> / / Add / remove to trigger the intneral buffer to grow / shrink . <nl> for ( int i = 0 ; i < 100 ; + + i ) { <nl> - ( void ) testInternalResizing { <nl> XCTAssertEqual ( array . count , 404U ) ; <nl> [ array removeAll ] ; <nl> XCTAssertEqual ( array . count , 0U ) ; <nl> - [ array release ] ; <nl> } <nl> <nl> @ end <nl> - ( void ) testBasics { <nl> XCTAssertNotEqual ( idx , 0U ) ; <nl> + + idx2 ; <nl> } ] ; <nl> + / / Ensure description doesn ' t choke . <nl> + XCTAssertTrue ( array . description . length > 10 ) ; <nl> [ array release ] ; <nl> } <nl> <nl> - ( void ) testEquality { <nl> count : GPBARRAYSIZE ( kValues3 ) ] ; <nl> XCTAssertNotNil ( array3 ) ; <nl> <nl> + / / Identity <nl> + XCTAssertTrue ( [ array1 isEqual : array1 ] ) ; <nl> + / / Wrong type doesn ' t blow up . <nl> + XCTAssertFalse ( [ array1 isEqual : @ " bogus " ] ) ; <nl> / / 1 / 1Prime should be different objects , but equal . <nl> XCTAssertNotEqual ( array1 , array1prime ) ; <nl> XCTAssertEqualObjects ( array1 , array1prime ) ; <nl> - ( void ) testAdds { <nl> [ array addValuesFromArray : array2 ] ; <nl> XCTAssertEqual ( array . count , 5U ) ; <nl> <nl> + / / Zero / nil inputs do nothing . <nl> + [ array addValues : kValues1 count : 0 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + [ array addValues : NULL count : 5 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + <nl> XCTAssertEqual ( [ array valueAtIndex : 0 ] , 51 . f ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 1 ] , 52 . f ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 2 ] , 53 . f ) ; <nl> - ( void ) testInplaceMutation { <nl> - ( void ) testInternalResizing { <nl> const float kValues [ ] = { 51 . f , 52 . f , 53 . f , 54 . f } ; <nl> GPBFloatArray * array = <nl> - [ [ GPBFloatArray alloc ] initWithValues : kValues <nl> - count : GPBARRAYSIZE ( kValues ) ] ; <nl> + [ GPBFloatArray arrayWithCapacity : GPBARRAYSIZE ( kValues ) ] ; <nl> XCTAssertNotNil ( array ) ; <nl> + [ array addValues : kValues count : GPBARRAYSIZE ( kValues ) ] ; <nl> <nl> / / Add / remove to trigger the intneral buffer to grow / shrink . <nl> for ( int i = 0 ; i < 100 ; + + i ) { <nl> - ( void ) testInternalResizing { <nl> XCTAssertEqual ( array . count , 404U ) ; <nl> [ array removeAll ] ; <nl> XCTAssertEqual ( array . count , 0U ) ; <nl> - [ array release ] ; <nl> } <nl> <nl> @ end <nl> - ( void ) testBasics { <nl> XCTAssertNotEqual ( idx , 0U ) ; <nl> + + idx2 ; <nl> } ] ; <nl> + / / Ensure description doesn ' t choke . <nl> + XCTAssertTrue ( array . description . length > 10 ) ; <nl> [ array release ] ; <nl> } <nl> <nl> - ( void ) testEquality { <nl> count : GPBARRAYSIZE ( kValues3 ) ] ; <nl> XCTAssertNotNil ( array3 ) ; <nl> <nl> + / / Identity <nl> + XCTAssertTrue ( [ array1 isEqual : array1 ] ) ; <nl> + / / Wrong type doesn ' t blow up . <nl> + XCTAssertFalse ( [ array1 isEqual : @ " bogus " ] ) ; <nl> / / 1 / 1Prime should be different objects , but equal . <nl> XCTAssertNotEqual ( array1 , array1prime ) ; <nl> XCTAssertEqualObjects ( array1 , array1prime ) ; <nl> - ( void ) testAdds { <nl> [ array addValuesFromArray : array2 ] ; <nl> XCTAssertEqual ( array . count , 5U ) ; <nl> <nl> + / / Zero / nil inputs do nothing . <nl> + [ array addValues : kValues1 count : 0 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + [ array addValues : NULL count : 5 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + <nl> XCTAssertEqual ( [ array valueAtIndex : 0 ] , 61 . ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 1 ] , 62 . ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 2 ] , 63 . ) ; <nl> - ( void ) testInplaceMutation { <nl> - ( void ) testInternalResizing { <nl> const double kValues [ ] = { 61 . , 62 . , 63 . , 64 . } ; <nl> GPBDoubleArray * array = <nl> - [ [ GPBDoubleArray alloc ] initWithValues : kValues <nl> - count : GPBARRAYSIZE ( kValues ) ] ; <nl> + [ GPBDoubleArray arrayWithCapacity : GPBARRAYSIZE ( kValues ) ] ; <nl> XCTAssertNotNil ( array ) ; <nl> + [ array addValues : kValues count : GPBARRAYSIZE ( kValues ) ] ; <nl> <nl> / / Add / remove to trigger the intneral buffer to grow / shrink . <nl> for ( int i = 0 ; i < 100 ; + + i ) { <nl> - ( void ) testInternalResizing { <nl> XCTAssertEqual ( array . count , 404U ) ; <nl> [ array removeAll ] ; <nl> XCTAssertEqual ( array . count , 0U ) ; <nl> - [ array release ] ; <nl> } <nl> <nl> @ end <nl> - ( void ) testBasics { <nl> XCTAssertNotEqual ( idx , 0U ) ; <nl> + + idx2 ; <nl> } ] ; <nl> + / / Ensure description doesn ' t choke . <nl> + XCTAssertTrue ( array . description . length > 10 ) ; <nl> [ array release ] ; <nl> } <nl> <nl> - ( void ) testEquality { <nl> count : GPBARRAYSIZE ( kValues3 ) ] ; <nl> XCTAssertNotNil ( array3 ) ; <nl> <nl> + / / Identity <nl> + XCTAssertTrue ( [ array1 isEqual : array1 ] ) ; <nl> + / / Wrong type doesn ' t blow up . <nl> + XCTAssertFalse ( [ array1 isEqual : @ " bogus " ] ) ; <nl> / / 1 / 1Prime should be different objects , but equal . <nl> XCTAssertNotEqual ( array1 , array1prime ) ; <nl> XCTAssertEqualObjects ( array1 , array1prime ) ; <nl> - ( void ) testAdds { <nl> [ array addValuesFromArray : array2 ] ; <nl> XCTAssertEqual ( array . count , 5U ) ; <nl> <nl> + / / Zero / nil inputs do nothing . <nl> + [ array addValues : kValues1 count : 0 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + [ array addValues : NULL count : 5 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + <nl> XCTAssertEqual ( [ array valueAtIndex : 0 ] , TRUE ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 1 ] , TRUE ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 2 ] , FALSE ) ; <nl> - ( void ) testInplaceMutation { <nl> - ( void ) testInternalResizing { <nl> const BOOL kValues [ ] = { TRUE , TRUE , FALSE , FALSE } ; <nl> GPBBoolArray * array = <nl> - [ [ GPBBoolArray alloc ] initWithValues : kValues <nl> - count : GPBARRAYSIZE ( kValues ) ] ; <nl> + [ GPBBoolArray arrayWithCapacity : GPBARRAYSIZE ( kValues ) ] ; <nl> XCTAssertNotNil ( array ) ; <nl> + [ array addValues : kValues count : GPBARRAYSIZE ( kValues ) ] ; <nl> <nl> / / Add / remove to trigger the intneral buffer to grow / shrink . <nl> for ( int i = 0 ; i < 100 ; + + i ) { <nl> - ( void ) testInternalResizing { <nl> XCTAssertEqual ( array . count , 404U ) ; <nl> [ array removeAll ] ; <nl> XCTAssertEqual ( array . count , 0U ) ; <nl> - [ array release ] ; <nl> } <nl> <nl> @ end <nl> - ( void ) testBasics { <nl> XCTAssertNotEqual ( idx , 0U ) ; <nl> + + idx2 ; <nl> } ] ; <nl> + / / Ensure description doesn ' t choke . <nl> + XCTAssertTrue ( array . description . length > 10 ) ; <nl> [ array release ] ; <nl> } <nl> <nl> - ( void ) testEquality { <nl> count : GPBARRAYSIZE ( kValues3 ) ] ; <nl> XCTAssertNotNil ( array3 ) ; <nl> <nl> + / / Identity <nl> + XCTAssertTrue ( [ array1 isEqual : array1 ] ) ; <nl> + / / Wrong type doesn ' t blow up . <nl> + XCTAssertFalse ( [ array1 isEqual : @ " bogus " ] ) ; <nl> / / 1 / 1Prime should be different objects , but equal . <nl> XCTAssertNotEqual ( array1 , array1prime ) ; <nl> XCTAssertEqualObjects ( array1 , array1prime ) ; <nl> - ( void ) testAdds { <nl> [ array addRawValuesFromArray : array2 ] ; <nl> XCTAssertEqual ( array . count , 5U ) ; <nl> <nl> + / / Zero / nil inputs do nothing . <nl> + [ array addValues : kValues1 count : 0 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + [ array addValues : NULL count : 5 ] ; <nl> + XCTAssertEqual ( array . count , 5U ) ; <nl> + <nl> XCTAssertEqual ( [ array valueAtIndex : 0 ] , 71 ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 1 ] , 72 ) ; <nl> XCTAssertEqual ( [ array valueAtIndex : 2 ] , 73 ) ; <nl> - ( void ) testInplaceMutation { <nl> - ( void ) testInternalResizing { <nl> const int32_t kValues [ ] = { 71 , 72 , 73 , 74 } ; <nl> GPBEnumArray * array = <nl> - [ [ GPBEnumArray alloc ] initWithValues : kValues <nl> - count : GPBARRAYSIZE ( kValues ) ] ; <nl> + [ GPBEnumArray arrayWithCapacity : GPBARRAYSIZE ( kValues ) ] ; <nl> XCTAssertNotNil ( array ) ; <nl> + [ array addValues : kValues count : GPBARRAYSIZE ( kValues ) ] ; <nl> <nl> / / Add / remove to trigger the intneral buffer to grow / shrink . <nl> for ( int i = 0 ; i < 100 ; + + i ) { <nl> - ( void ) testInternalResizing { <nl> XCTAssertEqual ( array . count , 404U ) ; <nl> [ array removeAll ] ; <nl> XCTAssertEqual ( array . count , 0U ) ; <nl> - [ array release ] ; <nl> } <nl> <nl> @ end <nl> mmm a / objectivec / Tests / GPBDescriptorTests . m <nl> ppp b / objectivec / Tests / GPBDescriptorTests . m <nl> <nl> <nl> # import < objc / runtime . h > <nl> <nl> - # import " GPBDescriptor . h " <nl> + # import " GPBDescriptor_PackagePrivate . h " <nl> # import " google / protobuf / Unittest . pbobjc . h " <nl> # import " google / protobuf / UnittestObjc . pbobjc . h " <nl> # import " google / protobuf / Descriptor . pbobjc . h " <nl> - ( void ) testFieldDescriptor { <nl> XCTAssertNotNil ( fieldDescriptorWithNumber . enumDescriptor ) ; <nl> XCTAssertEqualObjects ( fieldDescriptorWithNumber . enumDescriptor . name , <nl> @ " TestAllTypes_NestedEnum " ) ; <nl> + XCTAssertEqual ( fieldDescriptorWithName . number , fieldDescriptorWithNumber . number ) ; <nl> + XCTAssertEqual ( fieldDescriptorWithName . dataType , GPBDataTypeEnum ) ; <nl> <nl> / / Foreign Enum <nl> fieldDescriptorWithName = [ descriptor fieldWithName : @ " optionalForeignEnum " ] ; <nl> - ( void ) testFieldDescriptor { <nl> XCTAssertNotNil ( fieldDescriptorWithNumber . enumDescriptor ) ; <nl> XCTAssertEqualObjects ( fieldDescriptorWithNumber . enumDescriptor . name , <nl> @ " ForeignEnum " ) ; <nl> + XCTAssertEqual ( fieldDescriptorWithName . number , fieldDescriptorWithNumber . number ) ; <nl> + XCTAssertEqual ( fieldDescriptorWithName . dataType , GPBDataTypeEnum ) ; <nl> <nl> / / Import Enum <nl> fieldDescriptorWithName = [ descriptor fieldWithName : @ " optionalImportEnum " ] ; <nl> - ( void ) testFieldDescriptor { <nl> XCTAssertNotNil ( fieldDescriptorWithNumber . enumDescriptor ) ; <nl> XCTAssertEqualObjects ( fieldDescriptorWithNumber . enumDescriptor . name , <nl> @ " ImportEnum " ) ; <nl> + XCTAssertEqual ( fieldDescriptorWithName . number , fieldDescriptorWithNumber . number ) ; <nl> + XCTAssertEqual ( fieldDescriptorWithName . dataType , GPBDataTypeEnum ) ; <nl> <nl> / / Nested Message <nl> fieldDescriptorWithName = [ descriptor fieldWithName : @ " optionalNestedMessage " ] ; <nl> - ( void ) testFieldDescriptor { <nl> XCTAssertNotNil ( fieldDescriptorWithNumber ) ; <nl> XCTAssertEqual ( fieldDescriptorWithName , fieldDescriptorWithNumber ) ; <nl> XCTAssertNil ( fieldDescriptorWithNumber . enumDescriptor ) ; <nl> + XCTAssertEqual ( fieldDescriptorWithName . number , fieldDescriptorWithNumber . number ) ; <nl> + XCTAssertEqual ( fieldDescriptorWithName . dataType , GPBDataTypeMessage ) ; <nl> <nl> / / Foreign Message <nl> fieldDescriptorWithName = <nl> - ( void ) testFieldDescriptor { <nl> XCTAssertNotNil ( fieldDescriptorWithNumber ) ; <nl> XCTAssertEqual ( fieldDescriptorWithName , fieldDescriptorWithNumber ) ; <nl> XCTAssertNil ( fieldDescriptorWithNumber . enumDescriptor ) ; <nl> + XCTAssertEqual ( fieldDescriptorWithName . number , fieldDescriptorWithNumber . number ) ; <nl> + XCTAssertEqual ( fieldDescriptorWithName . dataType , GPBDataTypeMessage ) ; <nl> <nl> / / Import Message <nl> fieldDescriptorWithName = [ descriptor fieldWithName : @ " optionalImportMessage " ] ; <nl> - ( void ) testFieldDescriptor { <nl> XCTAssertNotNil ( fieldDescriptorWithNumber ) ; <nl> XCTAssertEqual ( fieldDescriptorWithName , fieldDescriptorWithNumber ) ; <nl> XCTAssertNil ( fieldDescriptorWithNumber . enumDescriptor ) ; <nl> + XCTAssertEqual ( fieldDescriptorWithName . number , fieldDescriptorWithNumber . number ) ; <nl> + XCTAssertEqual ( fieldDescriptorWithName . dataType , GPBDataTypeMessage ) ; <nl> + <nl> + / / Some failed lookups . <nl> + XCTAssertNil ( [ descriptor fieldWithName : @ " NOT THERE " ] ) ; <nl> + XCTAssertNil ( [ descriptor fieldWithNumber : 9876543 ] ) ; <nl> } <nl> <nl> - ( void ) testEnumDescriptor { <nl> - ( void ) testEnumDescriptor { <nl> XCTAssertNotNil ( enumName ) ; <nl> XCTAssertTrue ( [ descriptor getValue : & value forEnumTextFormatName : @ " FOO " ] ) ; <nl> XCTAssertEqual ( value , TestAllTypes_NestedEnum_Foo ) ; <nl> + XCTAssertNil ( [ descriptor textFormatNameForValue : 99999 ] ) ; <nl> <nl> / / Bad values <nl> enumName = [ descriptor enumNameForValue : 0 ] ; <nl> - ( void ) testOneofDescriptor { <nl> XCTAssertNil ( bazString . containingOneof ) ; <nl> } <nl> <nl> + - ( void ) testExtensiondDescriptor { <nl> + Class msgClass = [ TestAllExtensions class ] ; <nl> + Class packedMsgClass = [ TestPackedExtensions class ] ; <nl> + <nl> + / / Int <nl> + <nl> + GPBExtensionDescriptor * descriptor = [ UnittestRoot optionalInt32Extension ] ; <nl> + XCTAssertNotNil ( descriptor ) ; <nl> + XCTAssertEqual ( descriptor . containingMessageClass , msgClass ) ; / / ptr equality <nl> + XCTAssertFalse ( descriptor . isPackable ) ; <nl> + XCTAssertEqualObjects ( descriptor . defaultValue , @ 0 ) ; <nl> + XCTAssertNil ( descriptor . enumDescriptor ) ; <nl> + <nl> + descriptor = [ UnittestRoot defaultInt32Extension ] ; <nl> + XCTAssertNotNil ( descriptor ) ; <nl> + XCTAssertEqual ( descriptor . containingMessageClass , msgClass ) ; / / ptr equality <nl> + XCTAssertFalse ( descriptor . isPackable ) ; <nl> + XCTAssertEqualObjects ( descriptor . defaultValue , @ 41 ) ; <nl> + XCTAssertNil ( descriptor . enumDescriptor ) ; <nl> + <nl> + / / Enum <nl> + <nl> + descriptor = [ UnittestRoot optionalNestedEnumExtension ] ; <nl> + XCTAssertNotNil ( descriptor ) ; <nl> + XCTAssertEqual ( descriptor . containingMessageClass , msgClass ) ; / / ptr equality <nl> + XCTAssertFalse ( descriptor . isPackable ) ; <nl> + XCTAssertEqual ( descriptor . defaultValue , @ 1 ) ; <nl> + XCTAssertEqualObjects ( descriptor . enumDescriptor . name , @ " TestAllTypes_NestedEnum " ) ; <nl> + <nl> + descriptor = [ UnittestRoot defaultNestedEnumExtension ] ; <nl> + XCTAssertNotNil ( descriptor ) ; <nl> + XCTAssertEqual ( descriptor . containingMessageClass , msgClass ) ; / / ptr equality <nl> + XCTAssertFalse ( descriptor . isPackable ) ; <nl> + XCTAssertEqual ( descriptor . defaultValue , @ 2 ) ; <nl> + XCTAssertEqualObjects ( descriptor . enumDescriptor . name , @ " TestAllTypes_NestedEnum " ) ; <nl> + <nl> + / / Message <nl> + <nl> + descriptor = [ UnittestRoot optionalNestedMessageExtension ] ; <nl> + XCTAssertNotNil ( descriptor ) ; <nl> + XCTAssertEqual ( descriptor . containingMessageClass , msgClass ) ; / / ptr equality <nl> + XCTAssertFalse ( descriptor . isPackable ) ; <nl> + XCTAssertNil ( descriptor . defaultValue ) ; <nl> + XCTAssertNil ( descriptor . enumDescriptor ) ; <nl> + <nl> + / / Repeated Int <nl> + <nl> + descriptor = [ UnittestRoot repeatedInt32Extension ] ; <nl> + XCTAssertNotNil ( descriptor ) ; <nl> + XCTAssertEqual ( descriptor . containingMessageClass , msgClass ) ; / / ptr equality <nl> + XCTAssertFalse ( descriptor . isPackable ) ; <nl> + XCTAssertNil ( descriptor . defaultValue ) ; <nl> + XCTAssertNil ( descriptor . enumDescriptor ) ; <nl> + <nl> + descriptor = [ UnittestRoot packedInt32Extension ] ; <nl> + XCTAssertNotNil ( descriptor ) ; <nl> + XCTAssertEqual ( descriptor . containingMessageClass , packedMsgClass ) ; / / ptr equality <nl> + XCTAssertTrue ( descriptor . isPackable ) ; <nl> + XCTAssertNil ( descriptor . defaultValue ) ; <nl> + XCTAssertNil ( descriptor . enumDescriptor ) ; <nl> + <nl> + / / Repeated Enum <nl> + <nl> + descriptor = [ UnittestRoot repeatedNestedEnumExtension ] ; <nl> + XCTAssertNotNil ( descriptor ) ; <nl> + XCTAssertEqual ( descriptor . containingMessageClass , msgClass ) ; / / ptr equality <nl> + XCTAssertFalse ( descriptor . isPackable ) ; <nl> + XCTAssertNil ( descriptor . defaultValue ) ; <nl> + XCTAssertEqualObjects ( descriptor . enumDescriptor . name , @ " TestAllTypes_NestedEnum " ) ; <nl> + <nl> + descriptor = [ UnittestRoot packedEnumExtension ] ; <nl> + XCTAssertNotNil ( descriptor ) ; <nl> + XCTAssertEqual ( descriptor . containingMessageClass , packedMsgClass ) ; / / ptr equality <nl> + XCTAssertTrue ( descriptor . isPackable ) ; <nl> + XCTAssertNil ( descriptor . defaultValue ) ; <nl> + XCTAssertEqualObjects ( descriptor . enumDescriptor . name , @ " ForeignEnum " ) ; <nl> + <nl> + / / Repeated Message <nl> + <nl> + descriptor = [ UnittestRoot repeatedNestedMessageExtension ] ; <nl> + XCTAssertNotNil ( descriptor ) ; <nl> + XCTAssertEqual ( descriptor . containingMessageClass , msgClass ) ; / / ptr equality <nl> + XCTAssertFalse ( descriptor . isPackable ) ; <nl> + XCTAssertNil ( descriptor . defaultValue ) ; <nl> + XCTAssertNil ( descriptor . enumDescriptor ) ; <nl> + <nl> + / / Compare ( used internally for serialization ) . <nl> + <nl> + GPBExtensionDescriptor * ext1 = [ UnittestRoot optionalInt32Extension ] ; <nl> + XCTAssertEqual ( ext1 . fieldNumber , 1u ) ; <nl> + GPBExtensionDescriptor * ext2 = [ UnittestRoot optionalInt64Extension ] ; <nl> + XCTAssertEqual ( ext2 . fieldNumber , 2u ) ; <nl> + <nl> + XCTAssertEqual ( [ ext1 compareByFieldNumber : ext2 ] , NSOrderedAscending ) ; <nl> + XCTAssertEqual ( [ ext2 compareByFieldNumber : ext1 ] , NSOrderedDescending ) ; <nl> + XCTAssertEqual ( [ ext1 compareByFieldNumber : ext1 ] , NSOrderedSame ) ; <nl> + } <nl> + <nl> @ end <nl> mmm a / objectivec / Tests / GPBUnknownFieldSetTest . m <nl> ppp b / objectivec / Tests / GPBUnknownFieldSetTest . m <nl> - ( void ) setUp { <nl> unknownFields_ = emptyMessage_ . unknownFields ; <nl> } <nl> <nl> + - ( void ) testInvalidFieldNumber { <nl> + GPBUnknownFieldSet * set = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> + GPBUnknownField * field = [ [ [ GPBUnknownField alloc ] initWithNumber : 0 ] autorelease ] ; <nl> + XCTAssertThrowsSpecificNamed ( [ set addField : field ] , NSException , NSInvalidArgumentException ) ; <nl> + } <nl> + <nl> + - ( void ) testEqualityAndHash { <nl> + / / Empty <nl> + <nl> + GPBUnknownFieldSet * set1 = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> + XCTAssertTrue ( [ set1 isEqual : set1 ] ) ; <nl> + XCTAssertFalse ( [ set1 isEqual : @ " foo " ] ) ; <nl> + GPBUnknownFieldSet * set2 = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> + XCTAssertEqualObjects ( set1 , set2 ) ; <nl> + XCTAssertEqual ( [ set1 hash ] , [ set2 hash ] ) ; <nl> + <nl> + / / Varint <nl> + <nl> + GPBUnknownField * field1 = [ [ [ GPBUnknownField alloc ] initWithNumber : 1 ] autorelease ] ; <nl> + [ field1 addVarint : 1 ] ; <nl> + [ set1 addField : field1 ] ; <nl> + XCTAssertNotEqualObjects ( set1 , set2 ) ; <nl> + GPBUnknownField * field2 = [ [ [ GPBUnknownField alloc ] initWithNumber : 1 ] autorelease ] ; <nl> + [ field2 addVarint : 1 ] ; <nl> + [ set2 addField : field2 ] ; <nl> + XCTAssertEqualObjects ( set1 , set2 ) ; <nl> + XCTAssertEqual ( [ set1 hash ] , [ set2 hash ] ) ; <nl> + <nl> + / / Fixed32 <nl> + <nl> + field1 = [ [ [ GPBUnknownField alloc ] initWithNumber : 2 ] autorelease ] ; <nl> + [ field1 addFixed32 : 2 ] ; <nl> + [ set1 addField : field1 ] ; <nl> + XCTAssertNotEqualObjects ( set1 , set2 ) ; <nl> + field2 = [ [ [ GPBUnknownField alloc ] initWithNumber : 2 ] autorelease ] ; <nl> + [ field2 addFixed32 : 2 ] ; <nl> + [ set2 addField : field2 ] ; <nl> + XCTAssertEqualObjects ( set1 , set2 ) ; <nl> + XCTAssertEqual ( [ set1 hash ] , [ set2 hash ] ) ; <nl> + <nl> + / / Fixed64 <nl> + <nl> + field1 = [ [ [ GPBUnknownField alloc ] initWithNumber : 3 ] autorelease ] ; <nl> + [ field1 addFixed64 : 3 ] ; <nl> + [ set1 addField : field1 ] ; <nl> + XCTAssertNotEqualObjects ( set1 , set2 ) ; <nl> + field2 = [ [ [ GPBUnknownField alloc ] initWithNumber : 3 ] autorelease ] ; <nl> + [ field2 addFixed64 : 3 ] ; <nl> + [ set2 addField : field2 ] ; <nl> + XCTAssertEqualObjects ( set1 , set2 ) ; <nl> + XCTAssertEqual ( [ set1 hash ] , [ set2 hash ] ) ; <nl> + <nl> + / / LengthDelimited <nl> + <nl> + field1 = [ [ [ GPBUnknownField alloc ] initWithNumber : 4 ] autorelease ] ; <nl> + [ field1 addLengthDelimited : DataFromCStr ( " foo " ) ] ; <nl> + [ set1 addField : field1 ] ; <nl> + XCTAssertNotEqualObjects ( set1 , set2 ) ; <nl> + field2 = [ [ [ GPBUnknownField alloc ] initWithNumber : 4 ] autorelease ] ; <nl> + [ field2 addLengthDelimited : DataFromCStr ( " foo " ) ] ; <nl> + [ set2 addField : field2 ] ; <nl> + XCTAssertEqualObjects ( set1 , set2 ) ; <nl> + XCTAssertEqual ( [ set1 hash ] , [ set2 hash ] ) ; <nl> + <nl> + / / Group <nl> + <nl> + GPBUnknownFieldSet * group1 = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> + GPBUnknownField * fieldGroup1 = [ [ [ GPBUnknownField alloc ] initWithNumber : 10 ] autorelease ] ; <nl> + [ fieldGroup1 addVarint : 1 ] ; <nl> + [ group1 addField : fieldGroup1 ] ; <nl> + GPBUnknownFieldSet * group2 = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> + GPBUnknownField * fieldGroup2 = [ [ [ GPBUnknownField alloc ] initWithNumber : 10 ] autorelease ] ; <nl> + [ fieldGroup2 addVarint : 1 ] ; <nl> + [ group2 addField : fieldGroup2 ] ; <nl> + <nl> + field1 = [ [ [ GPBUnknownField alloc ] initWithNumber : 5 ] autorelease ] ; <nl> + [ field1 addGroup : group1 ] ; <nl> + [ set1 addField : field1 ] ; <nl> + XCTAssertNotEqualObjects ( set1 , set2 ) ; <nl> + field2 = [ [ [ GPBUnknownField alloc ] initWithNumber : 5 ] autorelease ] ; <nl> + [ field2 addGroup : group2 ] ; <nl> + [ set2 addField : field2 ] ; <nl> + XCTAssertEqualObjects ( set1 , set2 ) ; <nl> + XCTAssertEqual ( [ set1 hash ] , [ set2 hash ] ) ; <nl> + <nl> + / / Exercise description for completeness . <nl> + XCTAssertTrue ( set1 . description . length > 10 ) ; <nl> + } <nl> + <nl> / / Constructs a protocol buffer which contains fields with all the same <nl> / / numbers as allFieldsData except that each field is some other wire <nl> / / type . <nl> - ( void ) testMergeFrom { <nl> field = [ [ [ GPBUnknownField alloc ] initWithNumber : 3 ] autorelease ] ; <nl> [ field addVarint : 4 ] ; <nl> [ set1 addField : field ] ; <nl> + field = [ [ [ GPBUnknownField alloc ] initWithNumber : 4 ] autorelease ] ; <nl> + [ field addFixed32 : 6 ] ; <nl> + [ set1 addField : field ] ; <nl> + field = [ [ [ GPBUnknownField alloc ] initWithNumber : 5 ] autorelease ] ; <nl> + [ field addFixed64 : 20 ] ; <nl> + [ set1 addField : field ] ; <nl> field = [ [ [ GPBUnknownField alloc ] initWithNumber : 10 ] autorelease ] ; <nl> [ field addLengthDelimited : DataFromCStr ( " data1 " ) ] ; <nl> [ set1 addField : field ] ; <nl> <nl> + GPBUnknownFieldSet * group1 = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> + GPBUnknownField * fieldGroup1 = [ [ [ GPBUnknownField alloc ] initWithNumber : 200 ] autorelease ] ; <nl> + [ fieldGroup1 addVarint : 100 ] ; <nl> + [ group1 addField : fieldGroup1 ] ; <nl> + <nl> + field = [ [ [ GPBUnknownField alloc ] initWithNumber : 11 ] autorelease ] ; <nl> + [ field addGroup : group1 ] ; <nl> + [ set1 addField : field ] ; <nl> + <nl> GPBUnknownFieldSet * set2 = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> field = [ [ [ GPBUnknownField alloc ] initWithNumber : 1 ] autorelease ] ; <nl> [ field addVarint : 1 ] ; <nl> - ( void ) testMergeFrom { <nl> field = [ [ [ GPBUnknownField alloc ] initWithNumber : 3 ] autorelease ] ; <nl> [ field addVarint : 3 ] ; <nl> [ set2 addField : field ] ; <nl> + field = [ [ [ GPBUnknownField alloc ] initWithNumber : 4 ] autorelease ] ; <nl> + [ field addFixed32 : 7 ] ; <nl> + [ set2 addField : field ] ; <nl> + field = [ [ [ GPBUnknownField alloc ] initWithNumber : 5 ] autorelease ] ; <nl> + [ field addFixed64 : 30 ] ; <nl> + [ set2 addField : field ] ; <nl> field = [ [ [ GPBUnknownField alloc ] initWithNumber : 10 ] autorelease ] ; <nl> [ field addLengthDelimited : DataFromCStr ( " data2 " ) ] ; <nl> [ set2 addField : field ] ; <nl> <nl> + GPBUnknownFieldSet * group2 = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> + GPBUnknownField * fieldGroup2 = [ [ [ GPBUnknownField alloc ] initWithNumber : 201 ] autorelease ] ; <nl> + [ fieldGroup2 addVarint : 99 ] ; <nl> + [ group2 addField : fieldGroup2 ] ; <nl> + <nl> + field = [ [ [ GPBUnknownField alloc ] initWithNumber : 11 ] autorelease ] ; <nl> + [ field addGroup : group2 ] ; <nl> + [ set2 addField : field ] ; <nl> + <nl> GPBUnknownFieldSet * set3 = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> field = [ [ [ GPBUnknownField alloc ] initWithNumber : 1 ] autorelease ] ; <nl> [ field addVarint : 1 ] ; <nl> - ( void ) testMergeFrom { <nl> [ set3 addField : field ] ; <nl> [ field addVarint : 3 ] ; <nl> [ set3 addField : field ] ; <nl> + field = [ [ [ GPBUnknownField alloc ] initWithNumber : 4 ] autorelease ] ; <nl> + [ field addFixed32 : 6 ] ; <nl> + [ field addFixed32 : 7 ] ; <nl> + [ set3 addField : field ] ; <nl> + field = [ [ [ GPBUnknownField alloc ] initWithNumber : 5 ] autorelease ] ; <nl> + [ field addFixed64 : 20 ] ; <nl> + [ field addFixed64 : 30 ] ; <nl> + [ set3 addField : field ] ; <nl> field = [ [ [ GPBUnknownField alloc ] initWithNumber : 10 ] autorelease ] ; <nl> [ field addLengthDelimited : DataFromCStr ( " data1 " ) ] ; <nl> [ field addLengthDelimited : DataFromCStr ( " data2 " ) ] ; <nl> [ set3 addField : field ] ; <nl> <nl> + GPBUnknownFieldSet * group3a = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> + GPBUnknownField * fieldGroup3a1 = [ [ [ GPBUnknownField alloc ] initWithNumber : 200 ] autorelease ] ; <nl> + [ fieldGroup3a1 addVarint : 100 ] ; <nl> + [ group3a addField : fieldGroup3a1 ] ; <nl> + GPBUnknownFieldSet * group3b = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> + GPBUnknownField * fieldGroup3b2 = [ [ [ GPBUnknownField alloc ] initWithNumber : 201 ] autorelease ] ; <nl> + [ fieldGroup3b2 addVarint : 99 ] ; <nl> + [ group3b addField : fieldGroup3b2 ] ; <nl> + <nl> + field = [ [ [ GPBUnknownField alloc ] initWithNumber : 11 ] autorelease ] ; <nl> + [ field addGroup : group1 ] ; <nl> + [ field addGroup : group3b ] ; <nl> + [ set3 addField : field ] ; <nl> + <nl> TestEmptyMessage * source1 = [ TestEmptyMessage message ] ; <nl> [ source1 setUnknownFields : set1 ] ; <nl> TestEmptyMessage * source2 = [ TestEmptyMessage message ] ; <nl> - ( void ) testLargeVarint { <nl> XCTAssertEqual ( 0x7FFFFFFFFFFFFFFFULL , [ field2 . varintList valueAtIndex : 0 ] ) ; <nl> } <nl> <nl> + # pragma mark - Field tests <nl> + / / Some tests directly on fields since the dictionary in FieldSet can gate <nl> + / / testing some of these . <nl> + <nl> + - ( void ) testFieldEqualityAndHash { <nl> + GPBUnknownField * field1 = [ [ [ GPBUnknownField alloc ] initWithNumber : 1 ] autorelease ] ; <nl> + XCTAssertTrue ( [ field1 isEqual : field1 ] ) ; <nl> + XCTAssertFalse ( [ field1 isEqual : @ " foo " ] ) ; <nl> + GPBUnknownField * field2 = [ [ [ GPBUnknownField alloc ] initWithNumber : 2 ] autorelease ] ; <nl> + XCTAssertNotEqualObjects ( field1 , field2 ) ; <nl> + <nl> + field2 = [ [ [ GPBUnknownField alloc ] initWithNumber : 1 ] autorelease ] ; <nl> + XCTAssertEqualObjects ( field1 , field2 ) ; <nl> + XCTAssertEqual ( [ field1 hash ] , [ field2 hash ] ) ; <nl> + <nl> + / / Varint <nl> + <nl> + [ field1 addVarint : 10 ] ; <nl> + XCTAssertNotEqualObjects ( field1 , field2 ) ; <nl> + [ field2 addVarint : 10 ] ; <nl> + XCTAssertEqualObjects ( field1 , field2 ) ; <nl> + XCTAssertEqual ( [ field1 hash ] , [ field2 hash ] ) ; <nl> + [ field1 addVarint : 11 ] ; <nl> + XCTAssertNotEqualObjects ( field1 , field2 ) ; <nl> + [ field2 addVarint : 11 ] ; <nl> + XCTAssertEqualObjects ( field1 , field2 ) ; <nl> + XCTAssertEqual ( [ field1 hash ] , [ field2 hash ] ) ; <nl> + <nl> + / / Fixed32 <nl> + <nl> + [ field1 addFixed32 : 20 ] ; <nl> + XCTAssertNotEqualObjects ( field1 , field2 ) ; <nl> + [ field2 addFixed32 : 20 ] ; <nl> + XCTAssertEqualObjects ( field1 , field2 ) ; <nl> + XCTAssertEqual ( [ field1 hash ] , [ field2 hash ] ) ; <nl> + [ field1 addFixed32 : 21 ] ; <nl> + XCTAssertNotEqualObjects ( field1 , field2 ) ; <nl> + [ field2 addFixed32 : 21 ] ; <nl> + XCTAssertEqualObjects ( field1 , field2 ) ; <nl> + XCTAssertEqual ( [ field1 hash ] , [ field2 hash ] ) ; <nl> + <nl> + / / Fixed64 <nl> + <nl> + [ field1 addFixed64 : 30 ] ; <nl> + XCTAssertNotEqualObjects ( field1 , field2 ) ; <nl> + [ field2 addFixed64 : 30 ] ; <nl> + XCTAssertEqualObjects ( field1 , field2 ) ; <nl> + XCTAssertEqual ( [ field1 hash ] , [ field2 hash ] ) ; <nl> + [ field1 addFixed64 : 31 ] ; <nl> + XCTAssertNotEqualObjects ( field1 , field2 ) ; <nl> + [ field2 addFixed64 : 31 ] ; <nl> + XCTAssertEqualObjects ( field1 , field2 ) ; <nl> + XCTAssertEqual ( [ field1 hash ] , [ field2 hash ] ) ; <nl> + <nl> + / / LengthDelimited <nl> + <nl> + [ field1 addLengthDelimited : DataFromCStr ( " foo " ) ] ; <nl> + XCTAssertNotEqualObjects ( field1 , field2 ) ; <nl> + [ field2 addLengthDelimited : DataFromCStr ( " foo " ) ] ; <nl> + XCTAssertEqualObjects ( field1 , field2 ) ; <nl> + XCTAssertEqual ( [ field1 hash ] , [ field2 hash ] ) ; <nl> + [ field1 addLengthDelimited : DataFromCStr ( " bar " ) ] ; <nl> + XCTAssertNotEqualObjects ( field1 , field2 ) ; <nl> + [ field2 addLengthDelimited : DataFromCStr ( " bar " ) ] ; <nl> + XCTAssertEqualObjects ( field1 , field2 ) ; <nl> + XCTAssertEqual ( [ field1 hash ] , [ field2 hash ] ) ; <nl> + <nl> + / / Group <nl> + <nl> + GPBUnknownFieldSet * group = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> + GPBUnknownField * fieldGroup = [ [ [ GPBUnknownField alloc ] initWithNumber : 100 ] autorelease ] ; <nl> + [ fieldGroup addVarint : 100 ] ; <nl> + [ group addField : fieldGroup ] ; <nl> + [ field1 addGroup : group ] ; <nl> + XCTAssertNotEqualObjects ( field1 , field2 ) ; <nl> + group = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> + fieldGroup = [ [ [ GPBUnknownField alloc ] initWithNumber : 100 ] autorelease ] ; <nl> + [ fieldGroup addVarint : 100 ] ; <nl> + [ group addField : fieldGroup ] ; <nl> + [ field2 addGroup : group ] ; <nl> + XCTAssertEqualObjects ( field1 , field2 ) ; <nl> + XCTAssertEqual ( [ field1 hash ] , [ field2 hash ] ) ; <nl> + <nl> + group = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> + fieldGroup = [ [ [ GPBUnknownField alloc ] initWithNumber : 101 ] autorelease ] ; <nl> + [ fieldGroup addVarint : 101 ] ; <nl> + [ group addField : fieldGroup ] ; <nl> + [ field1 addGroup : group ] ; <nl> + XCTAssertNotEqualObjects ( field1 , field2 ) ; <nl> + group = [ [ [ GPBUnknownFieldSet alloc ] init ] autorelease ] ; <nl> + fieldGroup = [ [ [ GPBUnknownField alloc ] initWithNumber : 101 ] autorelease ] ; <nl> + [ fieldGroup addVarint : 101 ] ; <nl> + [ group addField : fieldGroup ] ; <nl> + [ field2 addGroup : group ] ; <nl> + XCTAssertEqualObjects ( field1 , field2 ) ; <nl> + XCTAssertEqual ( [ field1 hash ] , [ field2 hash ] ) ; <nl> + <nl> + / / Exercise description for completeness . <nl> + XCTAssertTrue ( field1 . description . length > 10 ) ; <nl> + } <nl> + <nl> - ( void ) testMergingFields { <nl> GPBUnknownField * field1 = [ [ [ GPBUnknownField alloc ] initWithNumber : 1 ] autorelease ] ; <nl> [ field1 addVarint : 1 ] ; <nl> - ( void ) testMergingFields { <nl> [ field1 addFixed64 : 3 ] ; <nl> [ field1 addLengthDelimited : [ NSData dataWithBytes : " hello " length : 5 ] ] ; <nl> [ field1 addGroup : [ [ unknownFields_ copy ] autorelease ] ] ; <nl> - GPBUnknownField * field2 = [ [ [ GPBUnknownField alloc ] initWithNumber : 2 ] autorelease ] ; <nl> + GPBUnknownField * field2 = [ [ [ GPBUnknownField alloc ] initWithNumber : 1 ] autorelease ] ; <nl> [ field2 mergeFromField : field1 ] ; <nl> - XCTAssertEqualObjects ( field1 , field2 ) ; <nl> } <nl> <nl> @ end <nl>
Build out more complete code coverage in the tests .
protocolbuffers/protobuf
a274c67caf9b6f85587ac812bf6e5837280dbaef
2017-10-03T15:54:44Z
mmm a / src / core / loader / nca . cpp <nl> ppp b / src / core / loader / nca . cpp <nl> <nl> / / Refer to the license . txt file included . <nl> <nl> # include < utility > <nl> - # include < vector > <nl> <nl> # include " common / file_util . h " <nl> # include " common / logging / log . h " <nl> - # include " common / string_util . h " <nl> - # include " common / swap . h " <nl> - # include " core / core . h " <nl> # include " core / file_sys / content_archive . h " <nl> - # include " core / file_sys / program_metadata . h " <nl> - # include " core / gdbstub / gdbstub . h " <nl> # include " core / hle / kernel / process . h " <nl> - # include " core / hle / kernel / resource_limit . h " <nl> # include " core / hle / service / filesystem / filesystem . h " <nl> + # include " core / loader / deconstructed_rom_directory . h " <nl> # include " core / loader / nca . h " <nl> - # include " core / loader / nso . h " <nl> - # include " core / memory . h " <nl> <nl> namespace Loader { <nl> <nl> AppLoader_NCA : : AppLoader_NCA ( FileSys : : VirtualFile file_ ) <nl> : AppLoader ( std : : move ( file_ ) ) , nca ( std : : make_unique < FileSys : : NCA > ( file ) ) { } <nl> <nl> + AppLoader_NCA : : ~ AppLoader_NCA ( ) = default ; <nl> + <nl> FileType AppLoader_NCA : : IdentifyType ( const FileSys : : VirtualFile & file ) { <nl> FileSys : : NCA nca ( file ) ; <nl> <nl> ResultStatus AppLoader_NCA : : ReadProgramId ( u64 & out_program_id ) { <nl> return ResultStatus : : Success ; <nl> } <nl> <nl> - AppLoader_NCA : : ~ AppLoader_NCA ( ) = default ; <nl> - <nl> } / / namespace Loader <nl> mmm a / src / core / loader / nca . h <nl> ppp b / src / core / loader / nca . h <nl> <nl> <nl> # pragma once <nl> <nl> - # include < string > <nl> # include " common / common_types . h " <nl> - # include " core / file_sys / content_archive . h " <nl> - # include " core / file_sys / program_metadata . h " <nl> + # include " core / file_sys / vfs . h " <nl> # include " core / hle / kernel / object . h " <nl> # include " core / loader / loader . h " <nl> - # include " deconstructed_rom_directory . h " <nl> + <nl> + namespace FileSys { <nl> + class NCA ; <nl> + } <nl> <nl> namespace Loader { <nl> <nl> + class AppLoader_DeconstructedRomDirectory ; <nl> + <nl> / / / Loads an NCA file <nl> class AppLoader_NCA final : public AppLoader { <nl> public : <nl> explicit AppLoader_NCA ( FileSys : : VirtualFile file ) ; <nl> + ~ AppLoader_NCA ( ) override ; <nl> <nl> / * * <nl> * Returns the type of the file <nl> class AppLoader_NCA final : public AppLoader { <nl> ResultStatus ReadRomFS ( FileSys : : VirtualFile & dir ) override ; <nl> ResultStatus ReadProgramId ( u64 & out_program_id ) override ; <nl> <nl> - ~ AppLoader_NCA ( ) ; <nl> - <nl> private : <nl> - FileSys : : ProgramMetadata metadata ; <nl> - <nl> - FileSys : : NCAHeader header ; <nl> std : : unique_ptr < FileSys : : NCA > nca ; <nl> std : : unique_ptr < AppLoader_DeconstructedRomDirectory > directory_loader ; <nl> } ; <nl> mmm a / src / core / loader / xci . cpp <nl> ppp b / src / core / loader / xci . cpp <nl> <nl> <nl> # include < vector > <nl> <nl> - # include " common / file_util . h " <nl> - # include " common / logging / log . h " <nl> - # include " common / string_util . h " <nl> - # include " common / swap . h " <nl> - # include " core / core . h " <nl> + # include " common / common_types . h " <nl> + # include " core / file_sys / card_image . h " <nl> # include " core / file_sys / content_archive . h " <nl> # include " core / file_sys / control_metadata . h " <nl> - # include " core / file_sys / program_metadata . h " <nl> # include " core / file_sys / romfs . h " <nl> - # include " core / gdbstub / gdbstub . h " <nl> # include " core / hle / kernel / process . h " <nl> - # include " core / hle / kernel / resource_limit . h " <nl> - # include " core / hle / service / filesystem / filesystem . h " <nl> - # include " core / loader / nso . h " <nl> + # include " core / loader / nca . h " <nl> # include " core / loader / xci . h " <nl> - # include " core / memory . h " <nl> <nl> namespace Loader { <nl> <nl> mmm a / src / core / loader / xci . h <nl> ppp b / src / core / loader / xci . h <nl> <nl> <nl> # include < memory > <nl> # include " common / common_types . h " <nl> - # include " core / file_sys / card_image . h " <nl> + # include " core / file_sys / vfs . h " <nl> # include " core / loader / loader . h " <nl> - # include " core / loader / nca . h " <nl> + <nl> + namespace FileSys { <nl> + class NACP ; <nl> + class XCI ; <nl> + } / / namespace FileSys <nl> <nl> namespace Loader { <nl> <nl> + class AppLoader_NCA ; <nl> + <nl> / / / Loads an XCI file <nl> class AppLoader_XCI final : public AppLoader { <nl> public : <nl> class AppLoader_XCI final : public AppLoader { <nl> ResultStatus ReadTitle ( std : : string & title ) override ; <nl> <nl> private : <nl> - FileSys : : ProgramMetadata metadata ; <nl> - <nl> std : : unique_ptr < FileSys : : XCI > xci ; <nl> std : : unique_ptr < AppLoader_NCA > nca_loader ; <nl> <nl>
Merge pull request from lioncash / include
yuzu-emu/yuzu
24a759de4a62dd39e3a0e97c97f13c6cb98ccd7c
2018-08-16T04:04:25Z
mmm a / spec / api - app - spec . js <nl> ppp b / spec / api - app - spec . js <nl> describe ( ' app module ' , ( ) = > { <nl> let server = null <nl> const socketPath = process . platform = = = ' win32 ' ? ' \ \ \ \ . \ \ pipe \ \ electron - app - relaunch ' : ' / tmp / electron - app - relaunch ' <nl> <nl> + / / TODO ( alexeykuzmin ) : [ Ch68 ] Fails on Linux . <nl> + / / Enable the test back . <nl> + before ( function ( ) { <nl> + if ( process . platform = = = ' linux ' ) { <nl> + this . skip ( ) <nl> + } <nl> + } ) <nl> + <nl> beforeEach ( done = > { <nl> fs . unlink ( socketPath , ( ) = > { <nl> server = net . createServer ( ) <nl>
test : disable the " app . relaunch " test on Linux
electron/electron
21f382fcaec4ca7ae28e09a322809327dad016c1
2018-10-03T22:21:58Z
mmm a / src / python / grpcio_tests / tests_aio / unit / call_test . py <nl> ppp b / src / python / grpcio_tests / tests_aio / unit / call_test . py <nl> async def test_time_remaining ( self ) : <nl> <nl> # Should be around the same as the timeout <nl> remained_time = call . time_remaining ( ) <nl> - self . assertGreater ( remained_time , test_constants . SHORT_TIMEOUT * 3 / / 2 ) <nl> - self . assertLess ( remained_time , test_constants . SHORT_TIMEOUT * 2 ) <nl> + self . assertGreater ( remained_time , test_constants . SHORT_TIMEOUT * 3 / 2 ) <nl> + self . assertLess ( remained_time , test_constants . SHORT_TIMEOUT * 5 / 2 ) <nl> <nl> response = await call . read ( ) <nl> self . assertEqual ( _RESPONSE_PAYLOAD_SIZE , len ( response . payload . body ) ) <nl> <nl> # Should be around the timeout minus a unit of wait time <nl> remained_time = call . time_remaining ( ) <nl> - self . assertGreater ( remained_time , test_constants . SHORT_TIMEOUT / / 2 ) <nl> - self . assertLess ( remained_time , test_constants . SHORT_TIMEOUT * 3 / / 2 ) <nl> + self . assertGreater ( remained_time , test_constants . SHORT_TIMEOUT / 2 ) <nl> + self . assertLess ( remained_time , test_constants . SHORT_TIMEOUT * 3 / 2 ) <nl> <nl> self . assertEqual ( grpc . StatusCode . OK , await call . code ( ) ) <nl> <nl>
Merge pull request from lidizheng / fix - time - remaining
grpc/grpc
086297b1e9efdfd35cdc7b6661a0f1a9e34a5a8b
2020-03-10T22:57:59Z
mmm a / hphp / runtime / base / ref - data . h <nl> ppp b / hphp / runtime / base / ref - data . h <nl> struct RefBits { <nl> * For more info on the PHP extension compatibility layer , check out <nl> * the documentation at " doc / php . extension . compat . layer " . <nl> * / <nl> - struct RefData final : Countable , type_scan : : MarkScannableCountable < RefData > { <nl> + struct RefData final : Countable , type_scan : : MarkScannableCollectable < RefData > { <nl> / * <nl> * Some RefData ' s ( static locals ) are allocated in RDS , and <nl> * live until the end of the request . In this case , we start with a <nl> mmm a / hphp / tools / type - info - gens / gen - type - scanners . cpp <nl> ppp b / hphp / tools / type - info - gens / gen - type - scanners . cpp <nl> <nl> * be conservatively scanned . <nl> * <nl> * - " Collectable " . A type is collectable if MarkCollectable < > or <nl> - * MarkScannableCountable < > is instantiated on it . Type - scanners are never <nl> + * MarkScannableCollectable < > is instantiated on it . Type - scanners are never <nl> * generated for collectable types , it is assumed their scanners will be <nl> - * hand - written . The exception is if MarkScannableCountable < > is used , in <nl> + * hand - written . The exception is if MarkScannableCollectable < > is used , in <nl> * which case they ' ll be scanned if explicitly requested . The point of the <nl> - * type - scanners is to determine how to find pointers to countable types from <nl> - * other types . Collectable types correspond to the set of types in HHVM which <nl> - * are explicitly managed by the GC . <nl> + * type - scanners is to determine how to find pointers to collectable types <nl> + * from other types . Collectable types correspond to the set of types in HHVM <nl> + * which are explicitly managed by the GC . <nl> * <nl> * - " Indexed " . An indexed type is a combination of a type and an action . These <nl> * occur from an instantiation of Indexer < > . Any particular type can be part <nl> struct Generator { <nl> const std : : string & name ) ; <nl> <nl> static bool isMarkCollectableName ( const std : : string & ) ; <nl> - static bool isMarkScannableCountableName ( const std : : string & ) ; <nl> + static bool isMarkScannableCollectableName ( const std : : string & ) ; <nl> static bool isIndexerName ( const std : : string & ) ; <nl> static bool isConservativeActionName ( const std : : string & ) ; <nl> static bool isWithSuffixActionName ( const std : : string & ) ; <nl> struct Generator { <nl> <nl> const Object & getObject ( const ObjectType & ) const ; <nl> <nl> - const Object & getMarkedCountable ( const Object & ) const ; <nl> + const Object & getMarkedCollectable ( const Object & ) const ; <nl> <nl> void genLayout ( const Type & , Layout & , size_t , <nl> bool conservative_everything = false ) const ; <nl> struct Generator { <nl> std : : vector < Generator : : IndexedType > <nl> ) const ; <nl> <nl> - bool hasCountableBase ( const Object & object ) const ; <nl> + bool hasCollectableBase ( const Object & object ) const ; <nl> <nl> bool forbiddenTemplateCheck ( const Type & type ) const ; <nl> bool forbiddenTemplateCheck ( const Object & object ) const ; <nl> struct Generator { <nl> / / are discovered ( it must grow monotonically , never removing anything ) . <nl> std : : unordered_set < const Object * > m_ptr_followable ; <nl> std : : unordered_set < const Object * > m_collectable ; <nl> - std : : unordered_set < const Object * > m_scannable_countable ; <nl> + std : : unordered_set < const Object * > m_scannable_collectable ; <nl> <nl> / / List of all layouts . Once computed , the indexed types will have an index <nl> / / into this table for its associated layout . <nl> struct Generator { <nl> / / sync with the types in type - scan . h . <nl> static constexpr const char * const s_mark_collectable_name = <nl> " HPHP : : type_scan : : MarkCollectable " ; <nl> - static constexpr const char * const s_mark_scannable_countable_name = <nl> - " HPHP : : type_scan : : MarkScannableCountable " ; <nl> + static constexpr const char * const s_mark_scannable_collectable_name = <nl> + " HPHP : : type_scan : : MarkScannableCollectable " ; <nl> static constexpr const char * const s_indexer_name = <nl> " HPHP : : type_scan : : detail : : Indexer " ; <nl> static constexpr const char * const s_auto_action_name = <nl> Generator : : Generator ( const std : : string & filename , bool skip ) { <nl> <nl> tbb : : concurrent_vector < ObjectType > indexer_types ; <nl> tbb : : concurrent_vector < ObjectType > collectable_markers ; <nl> - tbb : : concurrent_vector < ObjectType > scannable_countable_markers ; <nl> + tbb : : concurrent_vector < ObjectType > scannable_collectable_markers ; <nl> <nl> / / Iterate through all the objects the debug info parser found , storing the <nl> / / MarkCollectable < > markers , and the Indexer < > instances . For everything , <nl> Generator : : Generator ( const std : : string & filename , bool skip ) { <nl> indexer_types . push_back ( type ) ; <nl> } else if ( isMarkCollectableName ( type . name . name ) ) { <nl> collectable_markers . push_back ( type ) ; <nl> - } else if ( isMarkScannableCountableName ( type . name . name ) ) { <nl> + } else if ( isMarkScannableCollectableName ( type . name . name ) ) { <nl> collectable_markers . push_back ( type ) ; <nl> - scannable_countable_markers . push_back ( type ) ; <nl> + scannable_collectable_markers . push_back ( type ) ; <nl> } <nl> <nl> / / Incomplete types are useless for our purposes , so just ignore <nl> Generator : : Generator ( const std : : string & filename , bool skip ) { <nl> " Is debug - info enabled ? " < < std : : endl ; <nl> } <nl> <nl> - / / Extract all the types that Mark [ Scannable ] Countable < > was instantiated on <nl> - / / to obtain all the types which are countable . Since all countable types are <nl> - / / automatically pointer followable , mark them as such . <nl> + / / Extract all the types that Mark [ Scannable ] Collectable < > was instantiated on <nl> + / / to obtain all the types which are collectable . Since all collectable types <nl> + / / are automatically pointer followable , mark them as such . <nl> m_collectable = extractFromMarkers < decltype ( m_collectable ) > ( <nl> collectable_markers , <nl> - [ & ] ( const Object & o ) { return & getMarkedCountable ( o ) ; } <nl> + [ & ] ( const Object & o ) { return & getMarkedCollectable ( o ) ; } <nl> ) ; <nl> - m_scannable_countable = extractFromMarkers < decltype ( m_scannable_countable ) > ( <nl> - scannable_countable_markers , <nl> - [ & ] ( const Object & o ) { return & getMarkedCountable ( o ) ; } <nl> + m_scannable_collectable = <nl> + extractFromMarkers < decltype ( m_scannable_collectable ) > ( <nl> + scannable_collectable_markers , <nl> + [ & ] ( const Object & o ) { return & getMarkedCollectable ( o ) ; } <nl> ) ; <nl> for ( const auto * obj : m_collectable ) { <nl> makePtrFollowable ( * obj ) ; <nl> Generator : : Generator ( const std : : string & filename , bool skip ) { <nl> if ( indexed . scan ) continue ; <nl> <nl> / / If the underlying type is an object type , and its associated action is <nl> - / / always non - trivial , or if the object type has a countable type as a base <nl> - / / class , then the object type is always pointer followable . Same logic for <nl> - / / any suffix type . <nl> + / / always non - trivial , or if the object type has a collectable type as a <nl> + / / base class , then the object type is always pointer followable . Same logic <nl> + / / for any suffix type . <nl> if ( const auto * obj = stripModifiers ( * indexed . type ) . asObject ( ) ) { <nl> const auto & object = getObject ( * obj ) ; <nl> - if ( getAction ( object ) . isAlwaysNonTrivial ( ) | | hasCountableBase ( object ) ) { <nl> + if ( getAction ( object ) . isAlwaysNonTrivial ( ) | | <nl> + hasCollectableBase ( object ) ) { <nl> makePtrFollowable ( object ) ; <nl> } <nl> } <nl> bool Generator : : isTemplateName ( const std : : string & candidate , <nl> bool Generator : : isMarkCollectableName ( const std : : string & name ) { <nl> return isTemplateName ( name , s_mark_collectable_name ) ; <nl> } <nl> - bool Generator : : isMarkScannableCountableName ( const std : : string & name ) { <nl> - return isTemplateName ( name , s_mark_scannable_countable_name ) ; <nl> + bool Generator : : isMarkScannableCollectableName ( const std : : string & name ) { <nl> + return isTemplateName ( name , s_mark_scannable_collectable_name ) ; <nl> } <nl> bool Generator : : isIndexerName ( const std : : string & name ) { <nl> return isTemplateName ( name , s_indexer_name ) ; <nl> void Generator : : sanityCheckTemplateParams ( const Object & object ) { <nl> } <nl> } <nl> <nl> - / / Given a Mark [ Scannable ] Countable < > marker instantiation , extract the <nl> - / / object - type its marking . Actually very simple , but do a lot of sanity <nl> + / / Given a Mark [ Scannable ] CollectiblCollectable < > marker instantiation , extract <nl> + / / the object - type its marking . Actually very simple , but do a lot of sanity <nl> / / checking on the result . <nl> - const Object & Generator : : getMarkedCountable ( const Object & mark ) const { <nl> + const Object & Generator : : getMarkedCollectable ( const Object & mark ) const { <nl> if ( mark . incomplete ) { <nl> throw Exception { <nl> folly : : sformat ( <nl> - " Countable marker ' { } ' at ( { } , { } ) is an incomplete type " , <nl> + " Collectable marker ' { } ' at ( { } , { } ) is an incomplete type " , <nl> mark . name . name , <nl> mark . key . object_id , <nl> mark . key . compile_unit_id <nl> const Object & Generator : : getMarkedCountable ( const Object & mark ) const { <nl> if ( mark . kind ! = Object : : Kind : : k_class ) { <nl> throw Exception { <nl> folly : : sformat ( <nl> - " Countable marker ' { } ' at ( { } , { } ) isn ' t a class type " , <nl> + " Collectable marker ' { } ' at ( { } , { } ) isn ' t a class type " , <nl> mark . name . name , <nl> mark . key . object_id , <nl> mark . key . compile_unit_id <nl> const Object & Generator : : getMarkedCountable ( const Object & mark ) const { <nl> if ( ! mark . bases . empty ( ) ) { <nl> throw Exception { <nl> folly : : sformat ( <nl> - " Countable marker ' { } ' at ( { } , { } ) has base classes " , <nl> + " Collectable marker ' { } ' at ( { } , { } ) has base classes " , <nl> mark . name . name , <nl> mark . key . object_id , <nl> mark . key . compile_unit_id <nl> const Object & Generator : : getMarkedCountable ( const Object & mark ) const { <nl> if ( ! mark . members . empty ( ) ) { <nl> throw Exception { <nl> folly : : sformat ( <nl> - " Countable marker ' { } ' at ( { } , { } ) has members " , <nl> + " Collectable marker ' { } ' at ( { } , { } ) has members " , <nl> mark . name . name , <nl> mark . key . object_id , <nl> mark . key . compile_unit_id <nl> const Object & Generator : : getMarkedCountable ( const Object & mark ) const { <nl> if ( mark . name . linkage ! = ObjectTypeName : : Linkage : : external ) { <nl> throw Exception { <nl> folly : : sformat ( <nl> - " Countable marker ' { } ' at ( { } , { } ) does not have external linkage " , <nl> + " Collectable marker ' { } ' at ( { } , { } ) does not have external linkage " , <nl> mark . name . name , <nl> mark . key . object_id , <nl> mark . key . compile_unit_id <nl> const Object & Generator : : getMarkedCountable ( const Object & mark ) const { <nl> if ( mark . template_params . size ( ) ! = 1 ) { <nl> throw Exception { <nl> folly : : sformat ( <nl> - " Countable marker ' { } ' at ( { } , { } ) does not have exactly " <nl> + " Collectable marker ' { } ' at ( { } , { } ) does not have exactly " <nl> " one template parameter " , <nl> mark . name . name , <nl> mark . key . object_id , <nl> const Object & Generator : : getMarkedCountable ( const Object & mark ) const { <nl> if ( ! obj_type ) { <nl> throw Exception { <nl> folly : : sformat ( <nl> - " Countable marker ' { } ' at ( { } , { } ) is instantiated on type ' { } ' , " <nl> + " Collectable marker ' { } ' at ( { } , { } ) is instantiated on type ' { } ' , " <nl> " which is not an object " , <nl> mark . name . name , <nl> mark . key . object_id , <nl> const Object & Generator : : getMarkedCountable ( const Object & mark ) const { <nl> if ( obj_type - > name . linkage ! = ObjectTypeName : : Linkage : : external ) { <nl> throw Exception { <nl> folly : : sformat ( <nl> - " Countable marker ' { } ' at ( { } , { } ) is instantiated on object type ' { } ' " <nl> - " at ( { } , { } ) , which does not have external linkage " , <nl> + " Collectable marker ' { } ' at ( { } , { } ) is instantiated on object type ' { } ' " <nl> + " at ( { } , { } ) , which does not have external linkage " , <nl> mark . name . name , <nl> mark . key . object_id , <nl> mark . key . compile_unit_id , <nl> const Object & Generator : : getMarkedCountable ( const Object & mark ) const { <nl> if ( obj . incomplete ) { <nl> throw Exception { <nl> folly : : sformat ( <nl> - " Countable marker ' { } ' at ( { } , { } ) is instantiated on object type ' { } ' " <nl> - " at ( { } , { } ) , which is an incomplete type " , <nl> + " Collectable marker ' { } ' at ( { } , { } ) is instantiated on object type ' { } ' " <nl> + " at ( { } , { } ) , which is an incomplete type " , <nl> mark . name . name , <nl> mark . key . object_id , <nl> mark . key . compile_unit_id , <nl> const Object & Generator : : getMarkedCountable ( const Object & mark ) const { <nl> if ( obj . kind ! = Object : : Kind : : k_class ) { <nl> throw Exception { <nl> folly : : sformat ( <nl> - " Countable marker ' { } ' at ( { } , { } ) is instantiated on object type ' { } ' " <nl> - " at ( { } , { } ) , which is not a class type " , <nl> + " Collectable marker ' { } ' at ( { } , { } ) is instantiated on object type ' { } ' " <nl> + " at ( { } , { } ) , which is not a class type " , <nl> mark . name . name , <nl> mark . key . object_id , <nl> mark . key . compile_unit_id , <nl> void Generator : : genLayout ( const Object & object , <nl> size_t offset , <nl> bool do_forbidden_check , <nl> bool conservative_everything ) const { <nl> - / / Never generate layout for countable types , unless it was marked as <nl> + / / Never generate layout for collectable types , unless it was marked as <nl> / / scannable . <nl> if ( m_collectable . count ( & object ) > 0 & & <nl> - ! m_scannable_countable . count ( & object ) ) { <nl> + ! m_scannable_collectable . count ( & object ) ) { <nl> return ; <nl> } <nl> <nl> void Generator : : makePtrFollowable ( const Object & obj ) { <nl> } <nl> } <nl> <nl> - / / Recursive function to check if a given object has a countable base somewhere <nl> - / / in its type hierarchy . <nl> - bool Generator : : hasCountableBase ( const Object & object ) const { <nl> + / / Recursive function to check if a given object has a collectable base <nl> + / / somewhere in its type hierarchy . <nl> + bool Generator : : hasCollectableBase ( const Object & object ) const { <nl> if ( m_collectable . count ( & object ) ) return true ; <nl> return std : : any_of ( <nl> object . bases . begin ( ) , <nl> object . bases . end ( ) , <nl> [ this ] ( const Object : : Base & b ) { <nl> - return hasCountableBase ( getObject ( b . type ) ) ; <nl> + return hasCollectableBase ( getObject ( b . type ) ) ; <nl> } <nl> ) ; <nl> } <nl> void Generator : : genMetrics ( std : : ostream & os ) const { <nl> os < < " / / unique layouts : " < < m_layouts . size ( ) < < std : : endl ; <nl> os < < " / / indexed types : " < < m_indexed_types . size ( ) < < std : : endl ; <nl> os < < " / / pointer followable types : " < < m_ptr_followable . size ( ) < < std : : endl ; <nl> - os < < " / / countable types : " < < m_collectable . size ( ) < < std : : endl ; <nl> - os < < " / / scannable countable types : " < < m_scannable_countable . size ( ) <nl> + os < < " / / collectable types : " < < m_collectable . size ( ) < < std : : endl ; <nl> + os < < " / / scannable collectable types : " < < m_scannable_collectable . size ( ) <nl> < < std : : endl ; <nl> <nl> size_t conservative_fields { 0 } ; <nl> mmm a / hphp / util / type - scan . h <nl> ppp b / hphp / util / type - scan . h <nl> template < typename T > struct MarkCollectable { } ; <nl> / / requested . However , you may want to scan a countable type in certain contexts <nl> / / ( for example , a countable type which can be both allocated in memory and the <nl> / / stack ) . In that case , use this marker instead . <nl> - template < typename T > struct MarkScannableCountable { } ; <nl> + template < typename T > struct MarkScannableCollectable { } ; <nl> <nl> / / Obtain a type index for the given type T and an optional action . Asserts that <nl> / / this index will be used to scan T , and that T is being allocated here . <nl>
Small codemod : replace MarkScannableCountable
facebook/hhvm
1d13bfdb8fc5fd7314f533e1170dcb30135f97e1
2017-11-06T19:59:47Z
mmm a / R - package / R / xgb . importance . R <nl> ppp b / R - package / R / xgb . importance . R <nl> <nl> # ' @ importFrom Matrix cBind <nl> # ' @ importFrom Matrix sparseVector <nl> # ' <nl> - # ' @ param feature_names names of each feature as a character vector . Can be extracted from a sparse matrix ( see example ) . If model dump already contains feature names , this argument should be \ code { NULL } . <nl> - # ' @ param model generated by the \ code { xgb . train } function . Avoid the creation of a dump file . <nl> + # ' @ param feature_names names of each feature as a \ code { character } vector . Can be extracted from a sparse matrix ( see example ) . If model dump already contains feature names , this argument should be \ code { NULL } . <nl> + # ' @ param model generated by the \ code { xgb . train } function . <nl> # ' @ param data the dataset used for the training step . Will be used with \ code { label } parameter for co - occurence computation . More information in \ code { Detail } part . This parameter is optional . <nl> # ' @ param label the label vetor used for the training step . Will be used with \ code { data } parameter for co - occurence computation . More information in \ code { Detail } part . This parameter is optional . <nl> # ' @ param target a function which returns \ code { TRUE } or \ code { 1 } when an observation should be count as a co - occurence and \ code { FALSE } or \ code { 0 } otherwise . Default function is provided for computing co - occurences in a binary classification . The \ code { target } function should have only one parameter . This parameter will be used to provide each important feature vector after having applied the split condition , therefore these vector will be only made of 0 and 1 only , whatever was the information before . More information in \ code { Detail } part . This parameter is optional . <nl> <nl> # ' @ return A \ code { data . table } of the features used in the model with their average gain ( and their weight for boosted tree model ) in the model . <nl> # ' <nl> # ' @ details <nl> - # ' This is the function to understand the model trained ( and through your model , your data ) . <nl> - # ' <nl> # ' This function is for both linear and tree models . <nl> # ' <nl> # ' \ code { data . table } is returned by the function . <nl> mmm a / R - package / R / xgb . plot . deepness . R <nl> ppp b / R - package / R / xgb . plot . deepness . R <nl> get . paths . to . leaf < - function ( dt . tree ) { <nl> # ' @ importFrom data . table setnames <nl> # ' @ importFrom data . table : = <nl> # ' @ importFrom magrittr % > % <nl> - # ' @ param model dump generated by the \ code { xgb . train } function . Avoid the creation of a dump file . <nl> + # ' @ param model dump generated by the \ code { xgb . train } function . <nl> # ' <nl> # ' @ return Two graphs showing the distribution of the model deepness . <nl> # ' <nl> get . paths . to . leaf < - function ( dt . tree ) { <nl> # ' <nl> # ' \ itemize { <nl> # ' \ item Count : number of leaf per level of deepness ; <nl> - # ' \ item Weighted cover : noramlized weighted cover per Leaf ( weighted number of instances ) . <nl> + # ' \ item Weighted cover : noramlized weighted cover per leaf ( weighted number of instances ) . <nl> # ' } <nl> # ' <nl> # ' This function is inspired by the blog post \ url { http : / / aysent . github . io / 2015 / 11 / 08 / random - forest - leaf - visualization . html } <nl> mmm a / R - package / R / xgb . plot . multi . trees . R <nl> ppp b / R - package / R / xgb . plot . multi . trees . R <nl> <nl> # ' @ importFrom stringr str_detect <nl> # ' @ importFrom stringr str_extract <nl> # ' <nl> - # ' @ param model dump generated by the \ code { xgb . train } function . Avoid the creation of a dump file . <nl> - # ' @ param feature_names names of each feature as a character vector . Can be extracted from a sparse matrix ( see example ) . If model dump already contains feature names , this argument should be \ code { NULL } . <nl> + # ' @ param model dump generated by the \ code { xgb . train } function . <nl> + # ' @ param feature_names names of each feature as a \ code { character } vector . Can be extracted from a sparse matrix ( see example ) . If model dump already contains feature names , this argument should be \ code { NULL } . <nl> # ' @ param features . keep number of features to keep in each position of the multi trees . <nl> # ' @ param plot . width width in pixels of the graph to produce <nl> # ' @ param plot . height height in pixels of the graph to produce <nl> mmm a / R - package / R / xgb . plot . tree . R <nl> ppp b / R - package / R / xgb . plot . tree . R <nl> <nl> # ' Plot a boosted tree model <nl> # ' <nl> - # ' Read a tree model text dump . <nl> - # ' Plotting only works for boosted tree model ( not linear model ) . <nl> + # ' Read a tree model text dump and plot the model . <nl> # ' <nl> # ' @ importFrom data . table data . table <nl> # ' @ importFrom data . table : = <nl> # ' @ importFrom magrittr % > % <nl> - # ' @ param feature_names names of each feature as a character vector . Can be extracted from a sparse matrix ( see example ) . If model dump already contains feature names , this argument should be \ code { NULL } . <nl> + # ' @ param feature_names names of each feature as a \ code { character } vector . Can be extracted from a sparse matrix ( see example ) . If model dump already contains feature names , this argument should be \ code { NULL } . <nl> # ' @ param model generated by the \ code { xgb . train } function . Avoid the creation of a dump file . <nl> # ' @ param n_first_tree limit the plot to the n first trees . If \ code { NULL } , all trees of the model are plotted . Performance can be low for huge models . <nl> # ' @ param plot . width the width of the diagram in pixels . <nl> <nl> # ' The content of each node is organised that way : <nl> # ' <nl> # ' \ itemize { <nl> - # ' \ item \ code { feature } value ; <nl> - # ' \ item \ code { cover } : the sum of second order gradient of training data classified to the leaf , if it is square loss , this simply corresponds to the number of instances in that branch . Deeper in the tree a node is , lower this metric will be ; <nl> + # ' \ item \ code { feature } value ; <nl> + # ' \ item \ code { cover } : the sum of second order gradient of training data classified to the leaf , if it is square loss , this simply corresponds to the number of instances in that branch . Deeper in the tree a node is , lower this metric will be ; <nl> # ' \ item \ code { gain } : metric the importance of the node in the model . <nl> # ' } <nl> # ' <nl> - # ' Each branch finishes with a leaf . For each leaf , only the \ code { cover } is indicated . <nl> - # ' It uses \ href { http : / / www . graphviz . org / } { GraphViz } library for that purpose . <nl> + # ' The function uses \ href { http : / / www . graphviz . org / } { GraphViz } library for that purpose . <nl> # ' <nl> # ' @ examples <nl> # ' data ( agaricus . train , package = ' xgboost ' ) <nl> # ' <nl> - # ' # Both dataset are list with two items , a sparse matrix and labels <nl> - # ' # ( labels = outcome column which will be learned ) . <nl> - # ' # Each column of the sparse Matrix is a feature in one hot encoding format . <nl> - # ' <nl> # ' bst < - xgboost ( data = agaricus . train $ data , label = agaricus . train $ label , max . depth = 2 , <nl> # ' eta = 1 , nthread = 2 , nround = 2 , objective = " binary : logistic " ) <nl> # ' <nl> - # ' # agaricus . test $ data @ @ Dimnames [ [ 2 ] ] represents the column names of the sparse matrix . <nl> + # ' # agaricus . train $ data @ @ Dimnames [ [ 2 ] ] represents the column names of the sparse matrix . <nl> # ' xgb . plot . tree ( feature_names = agaricus . train $ data @ @ Dimnames [ [ 2 ] ] , model = bst ) <nl> # ' <nl> # ' @ export <nl> mmm a / R - package / man / xgb . importance . Rd <nl> ppp b / R - package / man / xgb . importance . Rd <nl> xgb . importance ( feature_names = NULL , model = NULL , data = NULL , <nl> label = NULL , target = function ( x ) ( ( x + label ) = = 2 ) ) <nl> } <nl> \ arguments { <nl> - \ item { feature_names } { names of each feature as a character vector . Can be extracted from a sparse matrix ( see example ) . If model dump already contains feature names , this argument should be \ code { NULL } . } <nl> + \ item { feature_names } { names of each feature as a \ code { character } vector . Can be extracted from a sparse matrix ( see example ) . If model dump already contains feature names , this argument should be \ code { NULL } . } <nl> <nl> - \ item { model } { generated by the \ code { xgb . train } function . Avoid the creation of a dump file . } <nl> + \ item { model } { generated by the \ code { xgb . train } function . } <nl> <nl> \ item { data } { the dataset used for the training step . Will be used with \ code { label } parameter for co - occurence computation . More information in \ code { Detail } part . This parameter is optional . } <nl> <nl> A \ code { data . table } of the features used in the model with their average gain ( a <nl> Create a \ code { data . table } of the most important features of a model . <nl> } <nl> \ details { <nl> - This is the function to understand the model trained ( and through your model , your data ) . <nl> - <nl> This function is for both linear and tree models . <nl> <nl> \ code { data . table } is returned by the function . <nl> mmm a / R - package / man / xgb . plot . deepness . Rd <nl> ppp b / R - package / man / xgb . plot . deepness . Rd <nl> <nl> xgb . plot . deepness ( model = NULL ) <nl> } <nl> \ arguments { <nl> - \ item { model } { dump generated by the \ code { xgb . train } function . Avoid the creation of a dump file . } <nl> + \ item { model } { dump generated by the \ code { xgb . train } function . } <nl> } <nl> \ value { <nl> Two graphs showing the distribution of the model deepness . <nl> The graph is made of two parts : <nl> <nl> \ itemize { <nl> \ item Count : number of leaf per level of deepness ; <nl> - \ item Weighted cover : noramlized weighted cover per Leaf ( weighted number of instances ) . <nl> + \ item Weighted cover : noramlized weighted cover per leaf ( weighted number of instances ) . <nl> } <nl> <nl> This function is inspired by the blog post \ url { http : / / aysent . github . io / 2015 / 11 / 08 / random - forest - leaf - visualization . html } <nl> mmm a / R - package / man / xgb . plot . multi . trees . Rd <nl> ppp b / R - package / man / xgb . plot . multi . trees . Rd <nl> xgb . plot . multi . trees ( model , feature_names = NULL , features . keep = 5 , <nl> plot . width = NULL , plot . height = NULL ) <nl> } <nl> \ arguments { <nl> - \ item { model } { dump generated by the \ code { xgb . train } function . Avoid the creation of a dump file . } <nl> + \ item { model } { dump generated by the \ code { xgb . train } function . } <nl> <nl> - \ item { feature_names } { names of each feature as a character vector . Can be extracted from a sparse matrix ( see example ) . If model dump already contains feature names , this argument should be \ code { NULL } . } <nl> + \ item { feature_names } { names of each feature as a \ code { character } vector . Can be extracted from a sparse matrix ( see example ) . If model dump already contains feature names , this argument should be \ code { NULL } . } <nl> <nl> \ item { features . keep } { number of features to keep in each position of the multi trees . } <nl> <nl> mmm a / R - package / man / xgb . plot . tree . Rd <nl> ppp b / R - package / man / xgb . plot . tree . Rd <nl> xgb . plot . tree ( feature_names = NULL , model = NULL , n_first_tree = NULL , <nl> plot . width = NULL , plot . height = NULL ) <nl> } <nl> \ arguments { <nl> - \ item { feature_names } { names of each feature as a character vector . Can be extracted from a sparse matrix ( see example ) . If model dump already contains feature names , this argument should be \ code { NULL } . } <nl> + \ item { feature_names } { names of each feature as a \ code { character } vector . Can be extracted from a sparse matrix ( see example ) . If model dump already contains feature names , this argument should be \ code { NULL } . } <nl> <nl> \ item { model } { generated by the \ code { xgb . train } function . Avoid the creation of a dump file . } <nl> <nl> xgb . plot . tree ( feature_names = NULL , model = NULL , n_first_tree = NULL , <nl> A \ code { DiagrammeR } of the model . <nl> } <nl> \ description { <nl> - Read a tree model text dump . <nl> - Plotting only works for boosted tree model ( not linear model ) . <nl> + Read a tree model text dump and plot the model . <nl> } <nl> \ details { <nl> The content of each node is organised that way : <nl> <nl> \ itemize { <nl> - \ item \ code { feature } value ; <nl> - \ item \ code { cover } : the sum of second order gradient of training data classified to the leaf , if it is square loss , this simply corresponds to the number of instances in that branch . Deeper in the tree a node is , lower this metric will be ; <nl> + \ item \ code { feature } value ; <nl> + \ item \ code { cover } : the sum of second order gradient of training data classified to the leaf , if it is square loss , this simply corresponds to the number of instances in that branch . Deeper in the tree a node is , lower this metric will be ; <nl> \ item \ code { gain } : metric the importance of the node in the model . <nl> } <nl> <nl> - Each branch finishes with a leaf . For each leaf , only the \ code { cover } is indicated . <nl> - It uses \ href { http : / / www . graphviz . org / } { GraphViz } library for that purpose . <nl> + The function uses \ href { http : / / www . graphviz . org / } { GraphViz } library for that purpose . <nl> } <nl> \ examples { <nl> data ( agaricus . train , package = ' xgboost ' ) <nl> <nl> - # Both dataset are list with two items , a sparse matrix and labels <nl> - # ( labels = outcome column which will be learned ) . <nl> - # Each column of the sparse Matrix is a feature in one hot encoding format . <nl> - <nl> bst < - xgboost ( data = agaricus . train $ data , label = agaricus . train $ label , max . depth = 2 , <nl> eta = 1 , nthread = 2 , nround = 2 , objective = " binary : logistic " ) <nl> <nl> - # agaricus . test $ data @ Dimnames [ [ 2 ] ] represents the column names of the sparse matrix . <nl> + # agaricus . train $ data @ Dimnames [ [ 2 ] ] represents the column names of the sparse matrix . <nl> xgb . plot . tree ( feature_names = agaricus . train $ data @ Dimnames [ [ 2 ] ] , model = bst ) <nl> <nl> } <nl>
Merge pull request from pommedeterresautee / master
dmlc/xgboost
5a49eb06ca0effc5749cd3712b31035f044a5f49
2015-12-10T17:54:52Z
mmm a / xbmc / pvr / PVRManager . cpp <nl> ppp b / xbmc / pvr / PVRManager . cpp <nl> using namespace KODI : : MESSAGING ; <nl> <nl> using KODI : : MESSAGING : : HELPERS : : DialogResponse ; <nl> <nl> - const int CPVRManager : : m_pvrWindowIds [ 12 ] = { <nl> - WINDOW_TV_CHANNELS , <nl> - WINDOW_TV_GUIDE , <nl> - WINDOW_TV_RECORDINGS , <nl> - WINDOW_TV_SEARCH , <nl> - WINDOW_TV_TIMERS , <nl> - WINDOW_TV_TIMER_RULES , <nl> - WINDOW_RADIO_CHANNELS , <nl> - WINDOW_RADIO_GUIDE , <nl> - WINDOW_RADIO_RECORDINGS , <nl> - WINDOW_RADIO_SEARCH , <nl> - WINDOW_RADIO_TIMERS , <nl> - WINDOW_RADIO_TIMER_RULES <nl> - } ; <nl> - <nl> CPVRManager : : CPVRManager ( void ) : <nl> CThread ( " PVRManager " ) , <nl> m_triggerEvent ( true ) , <nl> bool CPVRManager : : Load ( bool bShowProgress ) <nl> <nl> CLog : : Log ( LOGDEBUG , " PVRManager - % s - active clients found . continue to start " , __FUNCTION__ ) ; <nl> <nl> - / * reset observer for pvr windows * / <nl> - for ( std : : size_t i = 0 ; i ! = ARRAY_SIZE ( m_pvrWindowIds ) ; i + + ) <nl> - { <nl> - CSingleExit exit ( m_critSection ) ; <nl> - CGUIWindowPVRBase * pWindow = ( CGUIWindowPVRBase * ) g_windowManager . GetWindow ( m_pvrWindowIds [ i ] ) ; <nl> - if ( pWindow ) <nl> - pWindow - > ResetObservers ( ) ; <nl> - } <nl> - <nl> / * load all channels and groups * / <nl> if ( bShowProgress ) <nl> ShowProgressDialog ( g_localizeStrings . Get ( 19236 ) , 0 ) ; / / Loading channels from clients <nl> mmm a / xbmc / pvr / PVRManager . h <nl> ppp b / xbmc / pvr / PVRManager . h <nl> namespace PVR <nl> CCriticalSection m_managerStateMutex ; <nl> ManagerState m_managerState ; <nl> std : : unique_ptr < CStopWatch > m_parentalTimer ; <nl> - static const int m_pvrWindowIds [ 12 ] ; <nl> <nl> std : : atomic_bool m_isChannelPreview ; <nl> CEventSource < PVREvent > m_events ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRBase . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRBase . cpp <nl> CGUIWindowPVRBase : : CGUIWindowPVRBase ( bool bRadio , int id , const std : : string & xml <nl> { <nl> m_selectedItemPaths [ false ] = " " ; <nl> m_selectedItemPaths [ true ] = " " ; <nl> + <nl> + RegisterObservers ( ) ; <nl> } <nl> <nl> CGUIWindowPVRBase : : ~ CGUIWindowPVRBase ( void ) <nl> { <nl> + UnregisterObservers ( ) ; <nl> } <nl> <nl> void CGUIWindowPVRBase : : SetSelectedItemPath ( bool bRadio , const std : : string & path ) <nl> void CGUIWindowPVRBase : : UpdateSelectedItemPath ( ) <nl> m_selectedItemPaths [ m_bRadio ] = m_viewControl . GetSelectedItemPath ( ) ; <nl> } <nl> <nl> - void CGUIWindowPVRBase : : ResetObservers ( void ) <nl> - { <nl> - UnregisterObservers ( ) ; <nl> - if ( IsActive ( ) ) <nl> - RegisterObservers ( ) ; <nl> - } <nl> - <nl> void CGUIWindowPVRBase : : RegisterObservers ( void ) <nl> { <nl> CSingleLock lock ( m_critSection ) ; <nl> + g_PVRManager . RegisterObserver ( this ) ; <nl> if ( m_channelGroup ) <nl> m_channelGroup - > RegisterObserver ( this ) ; <nl> } ; <nl> void CGUIWindowPVRBase : : UnregisterObservers ( void ) <nl> CSingleLock lock ( m_critSection ) ; <nl> if ( m_channelGroup ) <nl> m_channelGroup - > UnregisterObserver ( this ) ; <nl> + g_PVRManager . UnregisterObserver ( this ) ; <nl> } ; <nl> <nl> void CGUIWindowPVRBase : : Notify ( const Observable & obs , const ObservableMessage msg ) <nl> { <nl> - CGUIMessage m ( GUI_MSG_REFRESH_LIST , GetID ( ) , 0 , msg ) ; <nl> - CApplicationMessenger : : GetInstance ( ) . SendGUIMessage ( m ) ; <nl> + if ( IsActive ( ) ) <nl> + { <nl> + CGUIMessage m ( GUI_MSG_REFRESH_LIST , GetID ( ) , 0 , msg ) ; <nl> + CApplicationMessenger : : GetInstance ( ) . SendGUIMessage ( m ) ; <nl> + } <nl> } <nl> <nl> bool CGUIWindowPVRBase : : OnAction ( const CAction & action ) <nl> void CGUIWindowPVRBase : : OnInitWindow ( void ) <nl> { <nl> SetProperty ( " IsRadio " , m_bRadio ? " true " : " " ) ; <nl> <nl> - g_PVRManager . RegisterObserver ( this ) ; <nl> - <nl> if ( InitChannelGroup ( ) ) <nl> { <nl> CGUIMediaWindow : : OnInitWindow ( ) ; <nl> <nl> / / mark item as selected by channel path <nl> m_viewControl . SetSelectedItem ( GetSelectedItemPath ( m_bRadio ) ) ; <nl> - <nl> - RegisterObservers ( ) ; <nl> } <nl> else <nl> { <nl> void CGUIWindowPVRBase : : OnInitWindow ( void ) <nl> void CGUIWindowPVRBase : : OnDeinitWindow ( int nextWindowID ) <nl> { <nl> g_PVRManager . HideProgressDialog ( ) ; <nl> - g_PVRManager . UnregisterObserver ( this ) ; <nl> - UnregisterObservers ( ) ; <nl> UpdateSelectedItemPath ( ) ; <nl> CGUIMediaWindow : : OnDeinitWindow ( nextWindowID ) ; <nl> } <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRBase . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRBase . h <nl> namespace PVR <nl> virtual void SetInvalid ( ) override ; <nl> virtual bool CanBeActivated ( ) const override ; <nl> <nl> - void ResetObservers ( void ) ; <nl> - <nl> static std : : string GetSelectedItemPath ( bool bRadio ) ; <nl> static void SetSelectedItemPath ( bool bRadio , const std : : string & path ) ; <nl> <nl> namespace PVR <nl> bool OnContextButtonEditTimerRule ( CFileItem * item , CONTEXT_BUTTON button ) ; <nl> bool OnContextButtonDeleteTimerRule ( CFileItem * item , CONTEXT_BUTTON button ) ; <nl> <nl> - virtual void RegisterObservers ( void ) ; <nl> - virtual void UnregisterObservers ( void ) ; <nl> + void RegisterObservers ( void ) ; <nl> + void UnregisterObservers ( void ) ; <nl> <nl> static CCriticalSection m_selectedItemPathsLock ; <nl> static std : : string m_selectedItemPaths [ 2 ] ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRChannels . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRChannels . cpp <nl> CGUIWindowPVRChannels : : CGUIWindowPVRChannels ( bool bRadio ) : <nl> CGUIWindowPVRBase ( bRadio , bRadio ? WINDOW_RADIO_CHANNELS : WINDOW_TV_CHANNELS , " MyPVRChannels . xml " ) , <nl> m_bShowHiddenChannels ( false ) <nl> { <nl> - } <nl> - <nl> - void CGUIWindowPVRChannels : : RegisterObservers ( void ) <nl> - { <nl> - CSingleLock lock ( m_critSection ) ; <nl> g_EpgContainer . RegisterObserver ( this ) ; <nl> - g_PVRManager . RegisterObserver ( this ) ; <nl> g_infoManager . RegisterObserver ( this ) ; <nl> - CGUIWindowPVRBase : : RegisterObservers ( ) ; <nl> } <nl> <nl> - void CGUIWindowPVRChannels : : UnregisterObservers ( void ) <nl> + CGUIWindowPVRChannels : : ~ CGUIWindowPVRChannels ( ) <nl> { <nl> - CSingleLock lock ( m_critSection ) ; <nl> - CGUIWindowPVRBase : : UnregisterObservers ( ) ; <nl> g_infoManager . UnregisterObserver ( this ) ; <nl> - g_PVRManager . UnregisterObserver ( this ) ; <nl> g_EpgContainer . UnregisterObserver ( this ) ; <nl> } <nl> <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRChannels . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRChannels . h <nl> namespace PVR <nl> { <nl> public : <nl> CGUIWindowPVRChannels ( bool bRadio ) ; <nl> - virtual ~ CGUIWindowPVRChannels ( void ) { } ; <nl> + virtual ~ CGUIWindowPVRChannels ( void ) ; <nl> <nl> virtual bool OnMessage ( CGUIMessage & message ) override ; <nl> virtual void GetContextButtons ( int itemNumber , CContextButtons & buttons ) override ; <nl> namespace PVR <nl> <nl> protected : <nl> virtual std : : string GetDirectoryPath ( void ) override ; <nl> - virtual void RegisterObservers ( void ) override ; <nl> - virtual void UnregisterObservers ( void ) override ; <nl> <nl> private : <nl> bool OnContextButtonAdd ( CFileItem * item , CONTEXT_BUTTON button ) ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRGuide . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRGuide . cpp <nl> CGUIWindowPVRGuide : : CGUIWindowPVRGuide ( bool bRadio ) : <nl> m_cachedChannelGroup ( new CPVRChannelGroup ) <nl> { <nl> m_bRefreshTimelineItems = false ; <nl> + g_EpgContainer . RegisterObserver ( this ) ; <nl> } <nl> <nl> CGUIWindowPVRGuide : : ~ CGUIWindowPVRGuide ( void ) <nl> { <nl> + g_EpgContainer . UnregisterObserver ( this ) ; <nl> StopRefreshTimelineItemsThread ( ) ; <nl> } <nl> <nl> void CGUIWindowPVRGuide : : StopRefreshTimelineItemsThread ( ) <nl> m_refreshTimelineItemsThread - > StopThread ( false ) ; <nl> } <nl> <nl> - void CGUIWindowPVRGuide : : RegisterObservers ( void ) <nl> - { <nl> - CSingleLock lock ( m_critSection ) ; <nl> - g_EpgContainer . RegisterObserver ( this ) ; <nl> - g_PVRManager . RegisterObserver ( this ) ; <nl> - CGUIWindowPVRBase : : RegisterObservers ( ) ; <nl> - } <nl> - <nl> - void CGUIWindowPVRGuide : : UnregisterObservers ( void ) <nl> - { <nl> - CSingleLock lock ( m_critSection ) ; <nl> - CGUIWindowPVRBase : : UnregisterObservers ( ) ; <nl> - g_PVRManager . UnregisterObserver ( this ) ; <nl> - g_EpgContainer . UnregisterObserver ( this ) ; <nl> - } <nl> - <nl> void CGUIWindowPVRGuide : : Notify ( const Observable & obs , const ObservableMessage msg ) <nl> { <nl> - if ( m_viewControl . GetCurrentControl ( ) = = GUIDE_VIEW_TIMELINE & & <nl> + if ( IsActive ( ) & & <nl> + m_viewControl . GetCurrentControl ( ) = = GUIDE_VIEW_TIMELINE & & <nl> ( msg = = ObservableMessageEpg | | <nl> msg = = ObservableMessageEpgContainer | | <nl> msg = = ObservableMessageChannelGroupReset | | <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRGuide . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRGuide . h <nl> namespace PVR <nl> virtual void UpdateSelectedItemPath ( ) override ; <nl> virtual std : : string GetDirectoryPath ( void ) override { return " " ; } <nl> virtual bool GetDirectory ( const std : : string & strDirectory , CFileItemList & items ) override ; <nl> - virtual void RegisterObservers ( void ) override ; <nl> - virtual void UnregisterObservers ( void ) override ; <nl> <nl> private : <nl> void Init ( ) ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRRecordings . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRRecordings . cpp <nl> CGUIWindowPVRRecordings : : CGUIWindowPVRRecordings ( bool bRadio ) : <nl> CGUIWindowPVRBase ( bRadio , bRadio ? WINDOW_RADIO_RECORDINGS : WINDOW_TV_RECORDINGS , " MyPVRRecordings . xml " ) , <nl> m_bShowDeletedRecordings ( false ) <nl> { <nl> - } <nl> - <nl> - void CGUIWindowPVRRecordings : : RegisterObservers ( void ) <nl> - { <nl> - CSingleLock lock ( m_critSection ) ; <nl> - g_PVRManager . RegisterObserver ( this ) ; <nl> g_infoManager . RegisterObserver ( this ) ; <nl> - CGUIWindowPVRBase : : RegisterObservers ( ) ; <nl> } <nl> <nl> - void CGUIWindowPVRRecordings : : UnregisterObservers ( void ) <nl> + CGUIWindowPVRRecordings : : ~ CGUIWindowPVRRecordings ( ) <nl> { <nl> - CSingleLock lock ( m_critSection ) ; <nl> - CGUIWindowPVRBase : : UnregisterObservers ( ) ; <nl> g_infoManager . UnregisterObserver ( this ) ; <nl> - g_PVRManager . UnregisterObserver ( this ) ; <nl> } <nl> <nl> void CGUIWindowPVRRecordings : : OnWindowLoaded ( ) <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRRecordings . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRRecordings . h <nl> namespace PVR <nl> { <nl> public : <nl> CGUIWindowPVRRecordings ( bool bRadio ) ; <nl> - virtual ~ CGUIWindowPVRRecordings ( void ) { } ; <nl> + virtual ~ CGUIWindowPVRRecordings ( void ) ; <nl> <nl> static std : : string GetResumeString ( const CFileItem & item ) ; <nl> <nl> namespace PVR <nl> protected : <nl> virtual std : : string GetDirectoryPath ( void ) override ; <nl> virtual void OnPrepareFileItems ( CFileItemList & items ) override ; <nl> - virtual void RegisterObservers ( void ) override ; <nl> - virtual void UnregisterObservers ( void ) override ; <nl> <nl> private : <nl> bool ActionDeleteRecording ( CFileItem * item ) ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRTimersBase . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRTimersBase . cpp <nl> using namespace PVR ; <nl> CGUIWindowPVRTimersBase : : CGUIWindowPVRTimersBase ( bool bRadio , int id , const std : : string & xmlFile ) : <nl> CGUIWindowPVRBase ( bRadio , id , xmlFile ) <nl> { <nl> - } <nl> - <nl> - void CGUIWindowPVRTimersBase : : RegisterObservers ( void ) <nl> - { <nl> - CSingleLock lock ( m_critSection ) ; <nl> - g_PVRManager . RegisterObserver ( this ) ; <nl> g_infoManager . RegisterObserver ( this ) ; <nl> - CGUIWindowPVRBase : : RegisterObservers ( ) ; <nl> } <nl> <nl> - void CGUIWindowPVRTimersBase : : UnregisterObservers ( void ) <nl> + CGUIWindowPVRTimersBase : : ~ CGUIWindowPVRTimersBase ( ) <nl> { <nl> - CSingleLock lock ( m_critSection ) ; <nl> - CGUIWindowPVRBase : : UnregisterObservers ( ) ; <nl> g_infoManager . UnregisterObserver ( this ) ; <nl> - g_PVRManager . UnregisterObserver ( this ) ; <nl> } <nl> <nl> void CGUIWindowPVRTimersBase : : GetContextButtons ( int itemNumber , CContextButtons & buttons ) <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRTimersBase . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRTimersBase . h <nl> namespace PVR <nl> { <nl> public : <nl> CGUIWindowPVRTimersBase ( bool bRadio , int id , const std : : string & xmlFile ) ; <nl> - virtual ~ CGUIWindowPVRTimersBase ( void ) { } ; <nl> + virtual ~ CGUIWindowPVRTimersBase ( void ) ; <nl> <nl> bool OnMessage ( CGUIMessage & message ) ; <nl> bool OnAction ( const CAction & action ) ; <nl> namespace PVR <nl> bool OnContextButton ( int itemNumber , CONTEXT_BUTTON button ) ; <nl> void UpdateButtons ( void ) ; <nl> <nl> - protected : <nl> - virtual void RegisterObservers ( void ) ; <nl> - virtual void UnregisterObservers ( void ) ; <nl> - <nl> private : <nl> bool ActionDeleteTimer ( CFileItem * item ) ; <nl> bool ActionShowTimer ( CFileItem * item ) ; <nl>
[ PVR ] Simplify and cleanup pvr window ' s oberserver register / unregister .
xbmc/xbmc
10770fb1d2296a7571a3ba7d65a91a9539996b49
2016-09-16T22:25:00Z
mmm a / modules / calib3d / src / triangulate . cpp <nl> ppp b / modules / calib3d / src / triangulate . cpp <nl> cvTriangulatePoints ( CvMat * projMatr1 , CvMat * projMatr2 , CvMat * projPoints1 , CvMa <nl> CV_Error ( CV_StsUnmatchedSizes , " Size of projection matrices must be 3x4 " ) ; <nl> <nl> CvMat matrA ; <nl> - double matrA_dat [ 24 ] ; <nl> - matrA = cvMat ( 6 , 4 , CV_64F , matrA_dat ) ; <nl> + double matrA_dat [ 16 ] ; <nl> + matrA = cvMat ( 4 , 4 , CV_64F , matrA_dat ) ; <nl> <nl> / / CvMat matrU ; <nl> CvMat matrW ; <nl> CvMat matrV ; <nl> / / double matrU_dat [ 9 * 9 ] ; <nl> - double matrW_dat [ 6 * 4 ] ; <nl> + double matrW_dat [ 4 * 4 ] ; <nl> double matrV_dat [ 4 * 4 ] ; <nl> <nl> / / matrU = cvMat ( 6 , 6 , CV_64F , matrU_dat ) ; <nl> - matrW = cvMat ( 6 , 4 , CV_64F , matrW_dat ) ; <nl> + matrW = cvMat ( 4 , 4 , CV_64F , matrW_dat ) ; <nl> matrV = cvMat ( 4 , 4 , CV_64F , matrV_dat ) ; <nl> <nl> CvMat * projPoints [ 2 ] ; <nl> cvTriangulatePoints ( CvMat * projMatr1 , CvMat * projMatr2 , CvMat * projPoints1 , CvMa <nl> y = cvmGet ( projPoints [ j ] , 1 , i ) ; <nl> for ( int k = 0 ; k < 4 ; k + + ) <nl> { <nl> - cvmSet ( & matrA , j * 3 + 0 , k , x * cvmGet ( projMatrs [ j ] , 2 , k ) - cvmGet ( projMatrs [ j ] , 0 , k ) ) ; <nl> - cvmSet ( & matrA , j * 3 + 1 , k , y * cvmGet ( projMatrs [ j ] , 2 , k ) - cvmGet ( projMatrs [ j ] , 1 , k ) ) ; <nl> - cvmSet ( & matrA , j * 3 + 2 , k , x * cvmGet ( projMatrs [ j ] , 1 , k ) - y * cvmGet ( projMatrs [ j ] , 0 , k ) ) ; <nl> + cvmSet ( & matrA , j * 2 + 0 , k , x * cvmGet ( projMatrs [ j ] , 2 , k ) - cvmGet ( projMatrs [ j ] , 0 , k ) ) ; <nl> + cvmSet ( & matrA , j * 2 + 1 , k , y * cvmGet ( projMatrs [ j ] , 2 , k ) - cvmGet ( projMatrs [ j ] , 1 , k ) ) ; <nl> } <nl> } <nl> / * Solve system for current point * / <nl>
Fixed triangulation bug http : / / code . opencv . org / issues / 4334 .
opencv/opencv
242e64c08d83d70b7fa24e1c087647a2aeda6077
2015-05-13T13:47:00Z
mmm a / grpc / src / compiler / go_generator . cc <nl> ppp b / grpc / src / compiler / go_generator . cc <nl> namespace grpc_go_generator { <nl> grpc : : string unexportName ( grpc : : string s ) { <nl> if ( s . empty ( ) ) <nl> return s ; <nl> - s [ 0 ] = std : : tolower ( s [ 0 ] ) ; <nl> + s [ 0 ] = static_cast < char > ( std : : tolower ( s [ 0 ] ) ) ; <nl> return s ; <nl> } <nl> <nl> grpc : : string unexportName ( grpc : : string s ) { <nl> grpc : : string exportName ( grpc : : string s ) { <nl> if ( s . empty ( ) ) <nl> return s ; <nl> - s [ 0 ] = std : : toupper ( s [ 0 ] ) ; <nl> + s [ 0 ] = static_cast < char > ( std : : toupper ( s [ 0 ] ) ) ; <nl> return s ; <nl> } <nl> <nl>
Fix compiler warning on Visual Studio .
google/flatbuffers
4bc4979acccdb6d4c9a43da856f8ef8e5ed23909
2017-01-19T00:09:35Z
mmm a / tensorflow / c / eager / c_api . cc <nl> ppp b / tensorflow / c / eager / c_api . cc <nl> TFE_TensorHandle * TFE_TensorHandleCopyToDevice ( TFE_TensorHandle * h , <nl> return new TFE_TensorHandle ( h - > t , dst_cpu ? nullptr : dstd ) ; <nl> } <nl> tensorflow : : Tensor * src = & ( h - > t ) ; <nl> - if ( ! dst_cpu & & ! tensorflow : : DataTypeCanUseMemcpy ( src - > dtype ( ) ) ) { <nl> + if ( ! dst_cpu & & ( src - > dtype ( ) ! = tensorflow : : DT_VARIANT & & <nl> + ! tensorflow : : DataTypeCanUseMemcpy ( src - > dtype ( ) ) ) ) { <nl> TF_SetStatus ( <nl> status , TF_INVALID_ARGUMENT , <nl> tensorflow : : strings : : StrCat ( " Can ' t copy Tensor with type " , <nl> TFE_TensorHandle * TFE_TensorHandleCopyToDevice ( TFE_TensorHandle * h , <nl> . c_str ( ) ) ; <nl> return nullptr ; <nl> } <nl> - tensorflow : : Tensor dst ( dstd - > GetAllocator ( tensorflow : : AllocatorAttributes ( ) ) , <nl> - src - > dtype ( ) , src - > shape ( ) ) ; <nl> + tensorflow : : AllocatorAttributes attr ; <nl> + if ( src - > dtype ( ) = = tensorflow : : DT_VARIANT ) { <nl> + attr . set_on_host ( true ) ; <nl> + } <nl> + tensorflow : : Tensor dst ( dstd - > GetAllocator ( attr ) , src - > dtype ( ) , src - > shape ( ) ) ; <nl> if ( src - > shape ( ) . num_elements ( ) = = 0 ) { <nl> return new TFE_TensorHandle ( dst , dst_cpu ? nullptr : dstd ) ; <nl> } <nl> mmm a / tensorflow / contrib / cmake / tf_core_ops . cmake <nl> ppp b / tensorflow / contrib / cmake / tf_core_ops . cmake <nl> set ( tf_op_lib_names <nl> " image_ops " <nl> " io_ops " <nl> " linalg_ops " <nl> + " list_ops " <nl> " lookup_ops " <nl> " logging_ops " <nl> " math_ops " <nl> mmm a / tensorflow / contrib / cmake / tf_python . cmake <nl> ppp b / tensorflow / contrib / cmake / tf_python . cmake <nl> GENERATE_PYTHON_OP_LIB ( " dataset_ops " ) <nl> GENERATE_PYTHON_OP_LIB ( " image_ops " ) <nl> GENERATE_PYTHON_OP_LIB ( " io_ops " ) <nl> GENERATE_PYTHON_OP_LIB ( " linalg_ops " ) <nl> + GENERATE_PYTHON_OP_LIB ( " list_ops " ) <nl> GENERATE_PYTHON_OP_LIB ( " logging_ops " ) <nl> GENERATE_PYTHON_OP_LIB ( " lookup_ops " ) <nl> GENERATE_PYTHON_OP_LIB ( " nn_ops " ) <nl> mmm a / tensorflow / contrib / cmake / tf_tests . cmake <nl> ppp b / tensorflow / contrib / cmake / tf_tests . cmake <nl> if ( tensorflow_BUILD_PYTHON_TESTS ) <nl> " $ { tensorflow_source_dir } / tensorflow / python / kernel_tests / as_string_op_test . py " <nl> " $ { tensorflow_source_dir } / tensorflow / python / kernel_tests / string_to_number_op_test . py " <nl> " $ { tensorflow_source_dir } / tensorflow / python / kernel_tests / clip_ops_test . py " <nl> + " $ { tensorflow_source_dir } / tensorflow / python / kernel_tests / list_ops_test . py " # Needs portpicker . <nl> " $ { tensorflow_source_dir } / tensorflow / python / kernel_tests / tensor_array_ops_test . py " # Needs portpicker . <nl> # Numerical issues , calculations off . <nl> " $ { tensorflow_source_dir } / tensorflow / python / kernel_tests / concat_op_test . py " <nl> mmm a / tensorflow / core / BUILD <nl> ppp b / tensorflow / core / BUILD <nl> tf_gen_op_libs ( <nl> " image_ops " , <nl> " io_ops " , <nl> " linalg_ops " , <nl> + " list_ops " , <nl> " lookup_ops " , <nl> " logging_ops " , <nl> " math_ops " , <nl> cc_library ( <nl> " : image_ops_op_lib " , <nl> " : io_ops_op_lib " , <nl> " : linalg_ops_op_lib " , <nl> + " : list_ops_op_lib " , <nl> " : logging_ops_op_lib " , <nl> " : lookup_ops_op_lib " , <nl> " : math_ops_op_lib " , <nl> cc_library ( <nl> " / / tensorflow / core / kernels : image " , <nl> " / / tensorflow / core / kernels : io " , <nl> " / / tensorflow / core / kernels : linalg " , <nl> + " / / tensorflow / core / kernels : list_kernels " , <nl> " / / tensorflow / core / kernels : lookup " , <nl> " / / tensorflow / core / kernels : logging " , <nl> " / / tensorflow / core / kernels : math " , <nl> new file mode 100644 <nl> index 0000000000000 . . 2c2ad003d0177 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / api_def / base_api / api_def_EmptyTensorList . pbtxt <nl> <nl> + op { <nl> + graph_op_name : " EmptyTensorList " <nl> + summary : " Creates and returns an empty tensor list . " <nl> + description : < < END <nl> + All list elements must be tensors of dtype element_dtype and shape compatible <nl> + with element_shape . <nl> + <nl> + handle : an empty tensor list . <nl> + element_dtype : the type of elements in the list . <nl> + element_shape : a shape compatible with that of elements in the list . <nl> + END <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 633b7c18019ba <nl> mmm / dev / null <nl> ppp b / tensorflow / core / api_def / base_api / api_def_TensorListFromTensor . pbtxt <nl> <nl> + op { <nl> + graph_op_name : " TensorListFromTensor " <nl> + summary : " Creates a TensorList which , when stacked , has the value of ` tensor ` . " <nl> + description : < < END <nl> + Each tensor in the result list corresponds to one row of the input tensor . <nl> + <nl> + tensor : The input tensor . <nl> + output_handle : The list . <nl> + END <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . f450c20f86b34 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / api_def / base_api / api_def_TensorListLength . pbtxt <nl> <nl> + op { <nl> + graph_op_name : " TensorListLength " <nl> + summary : " Returns the number of tensors in the input tensor list . " <nl> + description : < < END <nl> + input_handle : the input list <nl> + length : the number of tensors in the list <nl> + END <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 0f752f9cf45d9 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / api_def / base_api / api_def_TensorListPopBack . pbtxt <nl> <nl> + op { <nl> + graph_op_name : " TensorListPopBack " <nl> + summary : " Returns the last element of the input list as well as a list with all but that element . " <nl> + description : < < END <nl> + Fails if the list is empty . <nl> + <nl> + input_handle : the input list <nl> + tensor : the withdrawn last element of the list <nl> + element_dtype : the type of elements in the list <nl> + element_shape : the shape of the output tensor <nl> + END <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 73297c03003d9 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / api_def / base_api / api_def_TensorListPushBack . pbtxt <nl> <nl> + op { <nl> + graph_op_name : " TensorListPushBack " <nl> + summary : " Returns a list list which has the passed - in ` Tensor ` as last element and the other elements of the given list in ` input_handle ` . " <nl> + description : < < END <nl> + tensor : The tensor to put on the list . <nl> + input_handle : The old list . <nl> + output_handle : A list with the elements of the old list followed by tensor . <nl> + element_dtype : the type of elements in the list . <nl> + element_shape : a shape compatible with that of elements in the list . <nl> + END <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 2402875951848 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / api_def / base_api / api_def_TensorListStack . pbtxt <nl> <nl> + op { <nl> + graph_op_name : " TensorListStack " <nl> + summary : " Stacks all tensors in the list . " <nl> + description : < < END <nl> + Requires that all tensors have the same shape . <nl> + <nl> + input_handle : the input list <nl> + tensor : the gathered result <nl> + num_elements : optional . If not - 1 , the number of elements in the list . <nl> + <nl> + END <nl> + } <nl> mmm a / tensorflow / core / framework / variant_tensor_data . cc <nl> ppp b / tensorflow / core / framework / variant_tensor_data . cc <nl> const Tensor & VariantTensorData : : tensors ( int index ) const { <nl> return tensors_ [ index ] ; <nl> } <nl> <nl> - std : : vector < Tensor > VariantTensorData : : tensors ( ) { return tensors_ ; } <nl> + const std : : vector < Tensor > & VariantTensorData : : tensors ( ) const { <nl> + return tensors_ ; <nl> + } <nl> <nl> Tensor * VariantTensorData : : add_tensors ( ) { <nl> tensors_ . emplace_back ( ) ; <nl> mmm a / tensorflow / core / framework / variant_tensor_data . h <nl> ppp b / tensorflow / core / framework / variant_tensor_data . h <nl> class VariantTensorData { <nl> / / Tensors contained within objects being serialized . <nl> int tensors_size ( ) const ; <nl> const Tensor & tensors ( int index ) const ; <nl> - std : : vector < Tensor > tensors ( ) ; <nl> + const std : : vector < Tensor > & tensors ( ) const ; <nl> Tensor * add_tensors ( ) ; <nl> <nl> / / Conversion to and from VariantTensorDataProto <nl> mmm a / tensorflow / core / kernels / BUILD <nl> ppp b / tensorflow / core / kernels / BUILD <nl> tf_kernel_library ( <nl> ] , <nl> ) <nl> <nl> + tf_kernel_library ( <nl> + name = " list_kernels " , <nl> + srcs = [ " list_kernels . cc " ] , <nl> + hdrs = [ " list_kernels . h " ] , <nl> + gpu_srcs = [ <nl> + " list_kernels . cu . cc " , <nl> + " list_kernels . h " , <nl> + ] , <nl> + deps = [ <nl> + " : concat_lib " , <nl> + " / / tensorflow / core : framework " , <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : list_ops_op_lib " , <nl> + " / / third_party / eigen3 " , <nl> + ] , <nl> + ) <nl> + <nl> tf_kernel_library ( <nl> name = " fact_op " , <nl> prefix = " fact_op " , <nl> new file mode 100644 <nl> index 0000000000000 . . 5e405f16a4d14 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / kernels / list_kernels . cc <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include < limits > <nl> + <nl> + # define EIGEN_USE_THREADS <nl> + # if GOOGLE_CUDA <nl> + # define EIGEN_USE_GPU <nl> + # endif / / GOOGLE_CUDA <nl> + <nl> + # include " tensorflow / core / kernels / list_kernels . h " <nl> + <nl> + # include " third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor " <nl> + # include " tensorflow / core / framework / op_kernel . h " <nl> + # include " tensorflow / core / framework / register_types . h " <nl> + # include " tensorflow / core / framework / tensor_types . h " <nl> + # include " tensorflow / core / framework / variant . h " <nl> + # include " tensorflow / core / framework / variant_op_registry . h " <nl> + # include " tensorflow / core / kernels / concat_lib . h " <nl> + # include " tensorflow / core / lib / core / coding . h " <nl> + # include " tensorflow / core / lib / core / errors . h " <nl> + # include " tensorflow / core / util / util . h " <nl> + <nl> + namespace tensorflow { <nl> + <nl> + typedef Eigen : : ThreadPoolDevice CPUDevice ; <nl> + <nl> + / / Variant compatible type for a list of tensors . This is mutable but instances <nl> + / / should never be mutated after stored in a variant tensor . <nl> + TensorList : : TensorList ( const TensorList & other ) <nl> + : tensors ( other . tensors ) , <nl> + element_shape ( other . element_shape ) , <nl> + element_dtype ( other . element_dtype ) { } <nl> + <nl> + void TensorList : : Encode ( VariantTensorData * data ) const { <nl> + data - > set_type_name ( TypeName ( ) ) ; <nl> + for ( const Tensor & t : tensors ) { <nl> + * data - > add_tensors ( ) = t ; <nl> + } <nl> + string metadata ; <nl> + core : : PutVarint64 ( & metadata , static_cast < uint64 > ( element_dtype ) ) ; <nl> + if ( ! element_shape . unknown_rank ( ) ) { <nl> + for ( TensorShapeDim dim : element_shape ) { <nl> + if ( dim . size > 0 ) { <nl> + core : : PutVarint64 ( & metadata , dim . size ) ; <nl> + } else { <nl> + core : : PutVarint64 ( & metadata , std : : numeric_limits < uint64 > : : max ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + data - > set_metadata ( metadata ) ; <nl> + } <nl> + <nl> + static Status TensorListDeviceCopy ( <nl> + const TensorList & from , TensorList * to , <nl> + const UnaryVariantOpRegistry : : AsyncTensorDeviceCopyFn & copy ) { <nl> + to - > element_shape = from . element_shape ; <nl> + to - > element_dtype = from . element_dtype ; <nl> + to - > tensors . reserve ( from . tensors . size ( ) ) ; <nl> + for ( const Tensor & t : from . tensors ) { <nl> + Tensor tmp ( t . dtype ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( copy ( t , & tmp ) ) ; <nl> + to - > tensors . push_back ( tmp ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + # define REGISTER_LIST_COPY ( DIRECTION ) \ <nl> + INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION ( \ <nl> + TensorList , DIRECTION , TensorList : : kTypeName , TensorListDeviceCopy ) <nl> + <nl> + REGISTER_LIST_COPY ( VariantDeviceCopyDirection : : HOST_TO_DEVICE ) ; <nl> + REGISTER_LIST_COPY ( VariantDeviceCopyDirection : : DEVICE_TO_HOST ) ; <nl> + REGISTER_LIST_COPY ( VariantDeviceCopyDirection : : DEVICE_TO_DEVICE ) ; <nl> + <nl> + REGISTER_UNARY_VARIANT_DECODE_FUNCTION ( TensorList , TensorList : : kTypeName ) ; <nl> + <nl> + bool TensorList : : Decode ( const VariantTensorData & data ) { <nl> + tensors = data . tensors ( ) ; <nl> + string metadata ; <nl> + data . get_metadata ( & metadata ) ; <nl> + uint64 scratch ; <nl> + StringPiece iter ( metadata ) ; <nl> + core : : GetVarint64 ( & iter , & scratch ) ; <nl> + element_dtype = static_cast < DataType > ( scratch ) ; <nl> + std : : vector < int64 > dims ; <nl> + while ( ! iter . empty ( ) ) { <nl> + core : : GetVarint64 ( & iter , & scratch ) ; <nl> + if ( scratch = = std : : numeric_limits < uint64 > : : max ( ) ) { <nl> + dims . push_back ( - 1 ) ; <nl> + } else { <nl> + dims . push_back ( scratch ) ; <nl> + } <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + Status TensorShapeFromTensor ( const Tensor & t , PartialTensorShape * out ) { <nl> + if ( t . shape ( ) = = TensorShape ( { } ) ) { <nl> + if ( ( t . dtype ( ) = = DT_INT32 & & t . scalar < int32 > ( ) ( ) = = - 1 ) | | <nl> + ( t . dtype ( ) = = DT_INT64 & & t . scalar < int64 > ( ) ( ) = = - 1 ) ) { <nl> + return Status : : OK ( ) ; <nl> + } <nl> + return errors : : InvalidArgument ( <nl> + " The only valid scalar shape tensor is the fully unknown shape " <nl> + " specified as - 1 . " ) ; <nl> + } <nl> + if ( t . dtype ( ) = = DT_INT32 ) { <nl> + return PartialTensorShape : : MakePartialShape ( t . vec < int32 > ( ) . data ( ) , <nl> + t . NumElements ( ) , out ) ; <nl> + } else if ( t . dtype ( ) = = DT_INT64 ) { <nl> + return PartialTensorShape : : MakePartialShape ( t . vec < int64 > ( ) . data ( ) , <nl> + t . NumElements ( ) , out ) ; <nl> + } <nl> + return errors : : InvalidArgument ( <nl> + " Expected an int32 or int64 shape tensor ; found " , <nl> + DataTypeString ( t . dtype ( ) ) ) ; <nl> + } <nl> + <nl> + class EmptyTensorList : public OpKernel { <nl> + public : <nl> + explicit EmptyTensorList ( OpKernelConstruction * ctx ) : OpKernel ( ctx ) { <nl> + OP_REQUIRES_OK ( ctx , ctx - > GetAttr ( " element_dtype " , & element_dtype_ ) ) ; <nl> + } <nl> + <nl> + void Compute ( OpKernelContext * ctx ) override { <nl> + Tensor * result ; <nl> + AllocatorAttributes attr ; <nl> + attr . set_on_host ( true ) ; <nl> + OP_REQUIRES_OK ( ctx , ctx - > allocate_output ( 0 , TensorShape { } , & result , attr ) ) ; <nl> + TensorList empty ; <nl> + empty . element_dtype = element_dtype_ ; <nl> + PartialTensorShape element_shape ; <nl> + OP_REQUIRES_OK ( ctx , TensorShapeFromTensor ( ctx - > input ( 0 ) , & element_shape ) ) ; <nl> + empty . element_shape = element_shape ; <nl> + result - > scalar < Variant > ( ) ( ) = std : : move ( empty ) ; <nl> + } <nl> + <nl> + private : <nl> + DataType element_dtype_ ; <nl> + } ; <nl> + <nl> + const char TensorList : : kTypeName [ ] = " tensorflow : : TensorList " ; <nl> + <nl> + REGISTER_KERNEL_BUILDER ( Name ( " EmptyTensorList " ) . Device ( DEVICE_CPU ) , <nl> + EmptyTensorList ) ; <nl> + <nl> + # if GOOGLE_CUDA <nl> + <nl> + REGISTER_KERNEL_BUILDER ( <nl> + Name ( " EmptyTensorList " ) . Device ( DEVICE_GPU ) . HostMemory ( " element_shape " ) , <nl> + EmptyTensorList ) ; <nl> + <nl> + # endif / / GOOGLE_CUDA <nl> + <nl> + class TensorListPushBack : public OpKernel { <nl> + public : <nl> + explicit TensorListPushBack ( OpKernelConstruction * c ) : OpKernel ( c ) { <nl> + OP_REQUIRES_OK ( c , c - > GetAttr ( " element_dtype " , & element_dtype_ ) ) ; <nl> + } <nl> + <nl> + ~ TensorListPushBack ( ) override { } <nl> + <nl> + void Compute ( OpKernelContext * c ) override { <nl> + const Tensor & input = c - > input ( 1 ) ; <nl> + OP_REQUIRES ( c , element_dtype_ = = input . dtype ( ) , <nl> + errors : : InvalidArgument ( " Invalid data types ; list elements " , <nl> + DataTypeString ( element_dtype_ ) , <nl> + " but tried to append " , <nl> + DataTypeString ( input . dtype ( ) ) ) ) ; <nl> + <nl> + const TensorList * l = c - > input ( 0 ) . scalar < Variant > ( ) ( ) . get < TensorList > ( ) ; <nl> + OP_REQUIRES ( c , l ! = nullptr , <nl> + errors : : InvalidArgument ( <nl> + " Input handle is not a list . Saw : ' " , <nl> + c - > input ( 0 ) . scalar < Variant > ( ) ( ) . DebugString ( ) , " ' " ) ) ; <nl> + OP_REQUIRES ( c , l - > element_shape . IsCompatibleWith ( input . shape ( ) ) , <nl> + errors : : InvalidArgument ( <nl> + " Tried to append a tensor with incompatible shape to a " <nl> + " list . Op element shape : " , <nl> + input . shape ( ) . DebugString ( ) , <nl> + " list shape : " , l - > element_shape . DebugString ( ) ) ) ; <nl> + OP_REQUIRES ( c , element_dtype_ = = l - > element_dtype , <nl> + errors : : InvalidArgument ( " Invalid data types ; op elements " , <nl> + DataTypeString ( element_dtype_ ) , <nl> + " but list elements " , <nl> + DataTypeString ( l - > element_dtype ) ) ) ; <nl> + <nl> + TensorList output ; <nl> + output = * l ; <nl> + output . tensors . push_back ( input ) ; <nl> + Tensor * result ; <nl> + AllocatorAttributes attr ; <nl> + attr . set_on_host ( true ) ; <nl> + OP_REQUIRES_OK ( c , c - > allocate_output ( 0 , TensorShape { } , & result , attr ) ) ; <nl> + result - > scalar < Variant > ( ) ( ) = std : : move ( output ) ; <nl> + } <nl> + <nl> + private : <nl> + DataType element_dtype_ ; <nl> + } ; <nl> + <nl> + REGISTER_KERNEL_BUILDER ( Name ( " TensorListPushBack " ) . Device ( DEVICE_CPU ) , <nl> + TensorListPushBack ) ; <nl> + <nl> + # if GOOGLE_CUDA <nl> + <nl> + REGISTER_KERNEL_BUILDER ( Name ( " TensorListPushBack " ) . Device ( DEVICE_GPU ) , <nl> + TensorListPushBack ) ; <nl> + <nl> + # endif / / GOOGLE_CUDA <nl> + <nl> + class TensorListLength : public OpKernel { <nl> + public : <nl> + explicit TensorListLength ( OpKernelConstruction * c ) : OpKernel ( c ) { } <nl> + ~ TensorListLength ( ) override { } <nl> + <nl> + void Compute ( OpKernelContext * c ) override { <nl> + const TensorList * l = c - > input ( 0 ) . scalar < Variant > ( ) ( ) . get < TensorList > ( ) ; <nl> + OP_REQUIRES ( <nl> + c , l ! = nullptr , <nl> + errors : : InvalidArgument ( <nl> + " TensorListLength received a variant which is not a list . Saw : ' " , <nl> + c - > input ( 0 ) . scalar < Variant > ( ) ( ) . DebugString ( ) , " ' " ) ) ; <nl> + Tensor * result ; <nl> + OP_REQUIRES_OK ( c , c - > allocate_output ( 0 , TensorShape { } , & result ) ) ; <nl> + result - > scalar < int32 > ( ) ( ) = l - > tensors . size ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + REGISTER_KERNEL_BUILDER ( Name ( " TensorListLength " ) . Device ( DEVICE_CPU ) , <nl> + TensorListLength ) ; <nl> + <nl> + # if GOOGLE_CUDA <nl> + <nl> + REGISTER_KERNEL_BUILDER ( <nl> + Name ( " TensorListLength " ) . Device ( DEVICE_GPU ) . HostMemory ( " length " ) , <nl> + TensorListLength ) ; <nl> + <nl> + # endif / / GOOGLE_CUDA <nl> + <nl> + class TensorListPopBack : public OpKernel { <nl> + public : <nl> + explicit TensorListPopBack ( OpKernelConstruction * c ) : OpKernel ( c ) { <nl> + OP_REQUIRES_OK ( c , c - > GetAttr ( " element_dtype " , & element_dtype_ ) ) ; <nl> + } <nl> + <nl> + ~ TensorListPopBack ( ) override { } <nl> + <nl> + void Compute ( OpKernelContext * c ) override { <nl> + const TensorList * l = c - > input ( 0 ) . scalar < Variant > ( ) ( ) . get < TensorList > ( ) ; <nl> + OP_REQUIRES ( c , l ! = nullptr , <nl> + errors : : InvalidArgument ( <nl> + " Input handle is not a list . Saw : ' " , <nl> + c - > input ( 0 ) . scalar < Variant > ( ) ( ) . DebugString ( ) , " ' " ) ) ; <nl> + OP_REQUIRES ( c , element_dtype_ = = l - > element_dtype , <nl> + errors : : InvalidArgument ( " Invalid data types ; op elements " , <nl> + DataTypeString ( element_dtype_ ) , <nl> + " but list elements " , <nl> + DataTypeString ( l - > element_dtype ) ) ) ; <nl> + <nl> + OP_REQUIRES ( c , ! l - > tensors . empty ( ) , <nl> + errors : : InvalidArgument ( " Trying to pop from an empty list . " ) ) ; <nl> + <nl> + c - > set_output ( 1 , l - > tensors . back ( ) ) ; <nl> + TensorList output ; <nl> + output = * l ; <nl> + output . tensors . pop_back ( ) ; <nl> + Tensor * result ; <nl> + AllocatorAttributes attr ; <nl> + attr . set_on_host ( true ) ; <nl> + OP_REQUIRES_OK ( c , c - > allocate_output ( 0 , TensorShape { } , & result , attr ) ) ; <nl> + result - > scalar < Variant > ( ) ( ) = std : : move ( output ) ; <nl> + } <nl> + <nl> + private : <nl> + DataType element_dtype_ ; <nl> + } ; <nl> + <nl> + REGISTER_KERNEL_BUILDER ( Name ( " TensorListPopBack " ) . Device ( DEVICE_CPU ) , <nl> + TensorListPopBack ) ; <nl> + <nl> + # if GOOGLE_CUDA <nl> + <nl> + REGISTER_KERNEL_BUILDER ( Name ( " TensorListPopBack " ) . Device ( DEVICE_GPU ) , <nl> + TensorListPopBack ) ; <nl> + <nl> + # endif / / GOOGLE_CUDA <nl> + <nl> + # define REGISTER_TENSOR_LIST_STACK_CPU ( T ) \ <nl> + REGISTER_KERNEL_BUILDER ( Name ( " TensorListStack " ) \ <nl> + . TypeConstraint < T > ( " element_dtype " ) \ <nl> + . Device ( DEVICE_CPU ) , \ <nl> + TensorListStack < CPUDevice , T > ) <nl> + <nl> + TF_CALL_POD_STRING_TYPES ( REGISTER_TENSOR_LIST_STACK_CPU ) ; <nl> + REGISTER_TENSOR_LIST_STACK_CPU ( quint8 ) ; <nl> + REGISTER_TENSOR_LIST_STACK_CPU ( qint8 ) ; <nl> + REGISTER_TENSOR_LIST_STACK_CPU ( quint16 ) ; <nl> + REGISTER_TENSOR_LIST_STACK_CPU ( qint16 ) ; <nl> + REGISTER_TENSOR_LIST_STACK_CPU ( qint32 ) ; <nl> + REGISTER_TENSOR_LIST_STACK_CPU ( bfloat16 ) ; <nl> + <nl> + # undef REGISTER_TENSOR_LIST_STACK_CPU <nl> + <nl> + # define REGISTER_TENSOR_LIST_FROM_TENSOR_CPU ( T ) \ <nl> + REGISTER_KERNEL_BUILDER ( Name ( " TensorListFromTensor " ) \ <nl> + . TypeConstraint < T > ( " element_dtype " ) \ <nl> + . Device ( DEVICE_CPU ) , \ <nl> + TensorListFromTensor < CPUDevice , T > ) <nl> + <nl> + TF_CALL_POD_STRING_TYPES ( REGISTER_TENSOR_LIST_FROM_TENSOR_CPU ) ; <nl> + REGISTER_TENSOR_LIST_FROM_TENSOR_CPU ( quint8 ) ; <nl> + REGISTER_TENSOR_LIST_FROM_TENSOR_CPU ( qint8 ) ; <nl> + REGISTER_TENSOR_LIST_FROM_TENSOR_CPU ( quint16 ) ; <nl> + REGISTER_TENSOR_LIST_FROM_TENSOR_CPU ( qint16 ) ; <nl> + REGISTER_TENSOR_LIST_FROM_TENSOR_CPU ( qint32 ) ; <nl> + REGISTER_TENSOR_LIST_FROM_TENSOR_CPU ( bfloat16 ) ; <nl> + <nl> + # undef REGISTER_TENSOR_LIST_FROM_TENSOR_CPU <nl> + <nl> + REGISTER_UNARY_VARIANT_BINARY_OP_FUNCTION ( ADD_VARIANT_BINARY_OP , DEVICE_CPU , <nl> + TensorList , TensorList : : kTypeName , <nl> + TensorListBinaryAdd < CPUDevice > ) ; <nl> + <nl> + REGISTER_UNARY_VARIANT_UNARY_OP_FUNCTION ( ZEROS_LIKE_VARIANT_UNARY_OP , <nl> + DEVICE_CPU , TensorList , <nl> + TensorList : : kTypeName , <nl> + TensorListZerosLike < CPUDevice > ) ; <nl> + <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . 2b9cdcf3bca3f <nl> mmm / dev / null <nl> ppp b / tensorflow / core / kernels / list_kernels . cu . cc <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include < limits > <nl> + <nl> + # define EIGEN_USE_THREADS <nl> + # if GOOGLE_CUDA <nl> + # define EIGEN_USE_GPU <nl> + # endif / / GOOGLE_CUDA <nl> + <nl> + # include " tensorflow / core / kernels / list_kernels . h " <nl> + <nl> + # include " third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor " <nl> + # include " tensorflow / core / framework / op_kernel . h " <nl> + # include " tensorflow / core / framework / register_types . h " <nl> + # include " tensorflow / core / framework / tensor_types . h " <nl> + # include " tensorflow / core / framework / variant . h " <nl> + # include " tensorflow / core / framework / variant_op_registry . h " <nl> + # include " tensorflow / core / kernels / concat_lib . h " <nl> + # include " tensorflow / core / lib / core / coding . h " <nl> + # include " tensorflow / core / lib / core / errors . h " <nl> + # include " tensorflow / core / util / util . h " <nl> + <nl> + namespace tensorflow { <nl> + <nl> + typedef Eigen : : GpuDevice GPUDevice ; <nl> + <nl> + # define REGISTER_TENSOR_LIST_STACK_GPU ( T ) \ <nl> + REGISTER_KERNEL_BUILDER ( Name ( " TensorListStack " ) \ <nl> + . TypeConstraint < T > ( " element_dtype " ) \ <nl> + . Device ( DEVICE_GPU ) , \ <nl> + TensorListStack < GPUDevice , T > ) <nl> + <nl> + TF_CALL_GPU_NUMBER_TYPES ( REGISTER_TENSOR_LIST_STACK_GPU ) ; <nl> + REGISTER_TENSOR_LIST_STACK_GPU ( bfloat16 ) ; <nl> + TF_CALL_complex64 ( REGISTER_TENSOR_LIST_STACK_GPU ) ; <nl> + TF_CALL_complex128 ( REGISTER_TENSOR_LIST_STACK_GPU ) ; <nl> + TF_CALL_int64 ( REGISTER_TENSOR_LIST_STACK_GPU ) ; <nl> + REGISTER_TENSOR_LIST_STACK_GPU ( bool ) ; <nl> + <nl> + # undef REGISTER_TENSOR_LIST_STACK_GPU <nl> + <nl> + # define REGISTER_TENSOR_LIST_FROM_TENSOR_GPU ( T ) \ <nl> + REGISTER_KERNEL_BUILDER ( Name ( " TensorListFromTensor " ) \ <nl> + . TypeConstraint < T > ( " element_dtype " ) \ <nl> + . Device ( DEVICE_GPU ) \ <nl> + . HostMemory ( " element_shape " ) , \ <nl> + TensorListFromTensor < GPUDevice , T > ) <nl> + <nl> + TF_CALL_GPU_NUMBER_TYPES ( REGISTER_TENSOR_LIST_FROM_TENSOR_GPU ) ; <nl> + REGISTER_TENSOR_LIST_FROM_TENSOR_GPU ( bfloat16 ) ; <nl> + TF_CALL_complex64 ( REGISTER_TENSOR_LIST_FROM_TENSOR_GPU ) ; <nl> + TF_CALL_complex128 ( REGISTER_TENSOR_LIST_FROM_TENSOR_GPU ) ; <nl> + TF_CALL_int64 ( REGISTER_TENSOR_LIST_FROM_TENSOR_GPU ) ; <nl> + REGISTER_TENSOR_LIST_FROM_TENSOR_GPU ( bool ) ; <nl> + <nl> + # undef REGISTER_TENSOR_LIST_FROM_TENSOR_GPU <nl> + <nl> + REGISTER_UNARY_VARIANT_BINARY_OP_FUNCTION ( ADD_VARIANT_BINARY_OP , DEVICE_GPU , <nl> + TensorList , TensorList : : kTypeName , <nl> + TensorListBinaryAdd < GPUDevice > ) ; <nl> + REGISTER_UNARY_VARIANT_UNARY_OP_FUNCTION ( ZEROS_LIKE_VARIANT_UNARY_OP , <nl> + DEVICE_GPU , TensorList , <nl> + TensorList : : kTypeName , <nl> + TensorListZerosLike < GPUDevice > ) ; <nl> + <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . 6a2a572b6d747 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / kernels / list_kernels . h <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + # ifndef THIRD_PARTY_TENSORFLOW_CORE_KERNELS_LIST_KERNELS_H_ <nl> + # define THIRD_PARTY_TENSORFLOW_CORE_KERNELS_LIST_KERNELS_H_ <nl> + <nl> + # define EIGEN_USE_THREADS <nl> + # if GOOGLE_CUDA <nl> + # define EIGEN_USE_GPU <nl> + # endif / / GOOGLE_CUDA <nl> + <nl> + # include " third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor " <nl> + # include " tensorflow / core / framework / op_kernel . h " <nl> + # include " tensorflow / core / framework / register_types . h " <nl> + # include " tensorflow / core / framework / tensor . h " <nl> + # include " tensorflow / core / framework / tensor_types . h " <nl> + # include " tensorflow / core / framework / variant . h " <nl> + # include " tensorflow / core / framework / variant_op_registry . h " <nl> + # include " tensorflow / core / kernels / concat_lib . h " <nl> + # include " tensorflow / core / lib / core / coding . h " <nl> + # include " tensorflow / core / lib / core / errors . h " <nl> + # include " tensorflow / core / util / util . h " <nl> + <nl> + namespace tensorflow { <nl> + <nl> + / / Variant compatible type for a list of tensors . This is mutable but instances <nl> + / / should never be mutated after stored in a variant tensor . <nl> + struct TensorList { <nl> + public : <nl> + TensorList ( ) { } <nl> + TensorList ( const TensorList & other ) ; <nl> + <nl> + static const char kTypeName [ ] ; <nl> + string TypeName ( ) const { return kTypeName ; } <nl> + <nl> + void Encode ( VariantTensorData * data ) const ; <nl> + <nl> + bool Decode ( const VariantTensorData & data ) ; <nl> + <nl> + / / TODO ( apassos ) fill this out <nl> + string DebugString ( ) const { return " TensorList " ; } <nl> + <nl> + std : : vector < Tensor > tensors ; <nl> + PartialTensorShape element_shape ; <nl> + DataType element_dtype ; <nl> + } ; <nl> + <nl> + Status TensorShapeFromTensor ( const Tensor & t , PartialTensorShape * out ) ; <nl> + <nl> + template < typename Device , typename T > <nl> + class TensorListStack : public OpKernel { <nl> + public : <nl> + typedef std : : vector < std : : unique_ptr < typename TTypes < T , 2 > : : ConstMatrix > > <nl> + ConstMatrixVector ; <nl> + explicit TensorListStack ( OpKernelConstruction * c ) : OpKernel ( c ) { <nl> + OP_REQUIRES_OK ( c , c - > GetAttr ( " element_dtype " , & element_dtype_ ) ) ; <nl> + OP_REQUIRES_OK ( c , c - > GetAttr ( " num_elements " , & num_elements_ ) ) ; <nl> + } <nl> + <nl> + ~ TensorListStack ( ) { } <nl> + <nl> + void Compute ( OpKernelContext * c ) override { <nl> + const TensorList * l = c - > input ( 0 ) . scalar < Variant > ( ) ( ) . get < TensorList > ( ) ; <nl> + OP_REQUIRES ( c , l ! = nullptr , <nl> + errors : : InvalidArgument ( <nl> + " Input handle is not a list . Saw : ' " , <nl> + c - > input ( 0 ) . scalar < Variant > ( ) ( ) . DebugString ( ) , " ' " ) ) ; <nl> + OP_REQUIRES ( c , l - > element_shape . IsFullyDefined ( ) , <nl> + errors : : InvalidArgument ( " Tried to stack elements from a list " <nl> + " with non - fully - defined shape . " ) ) ; <nl> + OP_REQUIRES ( c , element_dtype_ = = l - > element_dtype , <nl> + errors : : InvalidArgument ( " Invalid data types ; op elements " , <nl> + DataTypeString ( element_dtype_ ) , <nl> + " but list elements " , <nl> + DataTypeString ( l - > element_dtype ) ) ) ; <nl> + if ( num_elements_ ! = - 1 ) { <nl> + OP_REQUIRES ( c , l - > tensors . size ( ) = = num_elements_ , <nl> + errors : : InvalidArgument ( " Operation expected a list with " , <nl> + num_elements_ , <nl> + " elements but got a list with " , <nl> + l - > tensors . size ( ) , " elements . " ) ) ; <nl> + } <nl> + TensorShape resulting_shape ; <nl> + resulting_shape . AddDim ( l - > tensors . size ( ) ) ; <nl> + for ( TensorShapeDim s : l - > element_shape ) { <nl> + resulting_shape . AddDim ( s . size ) ; <nl> + } <nl> + Tensor * output ; <nl> + OP_REQUIRES_OK ( c , c - > allocate_output ( 0 , resulting_shape , & output ) ) ; <nl> + <nl> + ConstMatrixVector inputs_flat ; <nl> + inputs_flat . reserve ( l - > tensors . size ( ) ) ; <nl> + for ( const auto & t : l - > tensors ) { <nl> + inputs_flat . emplace_back ( new typename TTypes < T , 2 > : : ConstMatrix ( <nl> + t . shaped < T , 2 > ( { 1 , t . NumElements ( ) } ) ) ) ; <nl> + } <nl> + auto output_flat = <nl> + output - > shaped < T , 2 > ( { 1 , static_cast < int64 > ( l - > tensors . size ( ) ) * <nl> + l - > element_shape . num_elements ( ) } ) ; <nl> + <nl> + # if GOOGLE_CUDA <nl> + if ( std : : is_same < Device , Eigen : : GpuDevice > : : value ) { <nl> + ConcatGPU < T > ( c , inputs_flat , output , & output_flat ) ; <nl> + return ; <nl> + } <nl> + # endif / / GOOGLE_CUDA <nl> + ConcatCPU < T > ( c - > device ( ) , inputs_flat , & output_flat ) ; <nl> + } <nl> + <nl> + private : <nl> + int num_elements_ ; <nl> + DataType element_dtype_ ; <nl> + } ; <nl> + <nl> + template < typename Device , typename T > <nl> + class TensorListFromTensor : public OpKernel { <nl> + public : <nl> + TensorListFromTensor ( OpKernelConstruction * c ) : OpKernel ( c ) { } <nl> + <nl> + void Compute ( OpKernelContext * c ) override { <nl> + Tensor * output_tensor ; <nl> + AllocatorAttributes attr ; <nl> + attr . set_on_host ( true ) ; <nl> + OP_REQUIRES_OK ( c , c - > allocate_output ( 0 , { } , & output_tensor , attr ) ) ; <nl> + PartialTensorShape element_shape ; <nl> + OP_REQUIRES_OK ( c , TensorShapeFromTensor ( c - > input ( 1 ) , & element_shape ) ) ; <nl> + TensorList output_list ; <nl> + const Tensor & t = c - > input ( 0 ) ; <nl> + output_list . element_dtype = t . dtype ( ) ; <nl> + TensorShape output_shape ( t . shape ( ) ) ; <nl> + output_shape . RemoveDim ( 0 ) ; <nl> + OP_REQUIRES ( c , element_shape . IsCompatibleWith ( output_shape ) , <nl> + errors : : InvalidArgument ( <nl> + " Specified a list with shape " , element_shape . DebugString ( ) , <nl> + " from a tensor with shape " , output_shape . DebugString ( ) ) ) ; <nl> + output_list . element_shape = element_shape ; <nl> + output_list . tensors . reserve ( t . shape ( ) . dim_size ( 0 ) ) ; <nl> + for ( int i = 0 ; i < t . shape ( ) . dim_size ( 0 ) ; + + i ) { <nl> + Tensor tmp = t . Slice ( i , i + 1 ) ; <nl> + TensorShape tmp_shape = tmp . shape ( ) ; <nl> + tmp_shape . RemoveDim ( 0 ) ; <nl> + OP_REQUIRES ( c , tmp . CopyFrom ( tmp , tmp_shape ) , <nl> + errors : : Unknown ( " Unexpected shape error . " ) ) ; <nl> + if ( tmp . IsAligned ( ) | | ! DataTypeCanUseMemcpy ( DataTypeToEnum < T > : : value ) ) { <nl> + output_list . tensors . push_back ( tmp ) ; <nl> + } else { <nl> + Tensor aligned ; <nl> + OP_REQUIRES_OK ( c , c - > allocate_temp ( tmp . dtype ( ) , tmp . shape ( ) , & aligned ) ) ; <nl> + aligned . flat < T > ( ) . device ( c - > eigen_device < Device > ( ) ) = <nl> + tmp . unaligned_flat < T > ( ) ; <nl> + output_list . tensors . push_back ( aligned ) ; <nl> + } <nl> + } <nl> + output_tensor - > scalar < Variant > ( ) ( ) = std : : move ( output_list ) ; <nl> + } <nl> + } ; <nl> + <nl> + template < typename Device > <nl> + Status TensorListBinaryAdd ( OpKernelContext * c , const TensorList & a , <nl> + const TensorList & b , TensorList * out ) { <nl> + if ( a . element_dtype ! = b . element_dtype ) { <nl> + return errors : : InvalidArgument ( <nl> + " Trying to add two lists of tensors of different dtypes . One is " , <nl> + DataTypeString ( a . element_dtype ) , " and the other is " , <nl> + DataTypeString ( b . element_dtype ) ) ; <nl> + } <nl> + out - > element_dtype = a . element_dtype ; <nl> + if ( ! a . element_shape . IsCompatibleWith ( b . element_shape ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " Trying to add two lists of tensors with incompatible element shapes . " <nl> + " One is " , <nl> + a . element_shape . DebugString ( ) , " and the other is " , <nl> + b . element_shape . DebugString ( ) ) ; <nl> + } <nl> + <nl> + TF_RETURN_IF_ERROR ( <nl> + a . element_shape . MergeWith ( b . element_shape , & out - > element_shape ) ) ; <nl> + if ( a . tensors . size ( ) ! = b . tensors . size ( ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " Trying to add two lists of tensors with different lengths . One is " , <nl> + a . tensors . size ( ) , " and the other is " , b . tensors . size ( ) ) ; <nl> + } <nl> + out - > tensors . reserve ( a . tensors . size ( ) ) ; <nl> + for ( int i = 0 ; i < a . tensors . size ( ) ; + + i ) { <nl> + const Tensor & a_tensor = a . tensors [ i ] ; <nl> + const Tensor & b_tensor = b . tensors [ i ] ; <nl> + if ( a_tensor . shape ( ) ! = b_tensor . shape ( ) ) { <nl> + / / TODO ( apassos ) support broadcasting additions here ? <nl> + return errors : : InvalidArgument ( <nl> + " Trying to add two tensors with incompatible element shapes . " <nl> + " One is " , <nl> + a_tensor . shape ( ) . DebugString ( ) , " and the other is " , <nl> + b_tensor . shape ( ) . DebugString ( ) ) ; <nl> + } <nl> + Tensor out_tensor ; <nl> + TF_RETURN_IF_ERROR ( <nl> + c - > allocate_temp ( a_tensor . dtype ( ) , a_tensor . shape ( ) , & out_tensor ) ) ; <nl> + switch ( out_tensor . dtype ( ) ) { <nl> + # define DTYPE_CASE ( dtype ) \ <nl> + case DataTypeToEnum < dtype > : : value : \ <nl> + out_tensor . flat < dtype > ( ) . device ( c - > eigen_device < Device > ( ) ) = \ <nl> + a_tensor . flat < dtype > ( ) + b_tensor . flat < dtype > ( ) ; \ <nl> + break ; <nl> + <nl> + TF_CALL_NUMBER_TYPES ( DTYPE_CASE ) <nl> + <nl> + # undef DTYPE_CASE <nl> + default : <nl> + return errors : : InvalidArgument ( " Trying to add unsupported dtype " , <nl> + out_tensor . dtype ( ) ) ; <nl> + } <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + template < typename Device > <nl> + Status TensorListZerosLike ( OpKernelContext * c , const TensorList & x , <nl> + TensorList * y ) { <nl> + y - > element_dtype = x . element_dtype ; <nl> + y - > element_shape = x . element_shape ; <nl> + y - > tensors . reserve ( x . tensors . size ( ) ) ; <nl> + for ( const Tensor & t : x . tensors ) { <nl> + Tensor out_tensor ; <nl> + TF_RETURN_IF_ERROR ( c - > allocate_temp ( t . dtype ( ) , t . shape ( ) , & out_tensor ) ) ; <nl> + switch ( out_tensor . dtype ( ) ) { <nl> + # define DTYPE_CASE ( dtype ) \ <nl> + case DataTypeToEnum < dtype > : : value : \ <nl> + out_tensor . flat < dtype > ( ) . device ( c - > eigen_device < Device > ( ) ) = \ <nl> + out_tensor . flat < dtype > ( ) . constant ( dtype ( 0 ) ) ; \ <nl> + break ; <nl> + <nl> + TF_CALL_NUMBER_TYPES ( DTYPE_CASE ) <nl> + <nl> + # undef DTYPE_CASE <nl> + default : <nl> + return errors : : InvalidArgument ( <nl> + " Trying to compute zeros_like for unsupported dtype " , <nl> + out_tensor . dtype ( ) ) ; <nl> + } <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / THIRD_PARTY_TENSORFLOW_CORE_KERNELS_LIST_KERNELS_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . db534857720f5 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / ops / list_ops . cc <nl> <nl> + / * Copyright 2015 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / core / framework / common_shape_fns . h " <nl> + # include " tensorflow / core / framework / op . h " <nl> + # include " tensorflow / core / framework / shape_inference . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace { <nl> + <nl> + REGISTER_OP ( " EmptyTensorList " ) <nl> + . Input ( " element_shape : shape_type " ) <nl> + . Output ( " handle : variant " ) <nl> + . Attr ( " element_dtype : type " ) <nl> + . Attr ( " shape_type : { int32 , int64 } " ) <nl> + . SetShapeFn ( [ ] ( shape_inference : : InferenceContext * c ) { <nl> + c - > set_output ( 0 , c - > Scalar ( ) ) ; <nl> + DataType t ; <nl> + TF_RETURN_IF_ERROR ( c - > GetAttr ( " element_dtype " , & t ) ) ; <nl> + shape_inference : : ShapeHandle s ; <nl> + TF_RETURN_IF_ERROR ( c - > MakeShapeFromShapeTensor ( 0 , & s ) ) ; <nl> + c - > set_output_handle_shapes_and_types ( <nl> + 0 , std : : vector < shape_inference : : ShapeAndType > { { s , t } } ) ; <nl> + return Status : : OK ( ) ; <nl> + } ) ; <nl> + <nl> + REGISTER_OP ( " TensorListPushBack " ) <nl> + . Input ( " input_handle : variant " ) <nl> + . Input ( " tensor : element_dtype " ) <nl> + . Output ( " output_handle : variant " ) <nl> + . Attr ( " element_dtype : type " ) <nl> + . SetShapeFn ( [ ] ( shape_inference : : InferenceContext * c ) { <nl> + c - > set_output ( 0 , c - > Scalar ( ) ) ; <nl> + DataType t ; <nl> + TF_RETURN_IF_ERROR ( c - > GetAttr ( " element_dtype " , & t ) ) ; <nl> + shape_inference : : ShapeHandle s = c - > UnknownShape ( ) ; <nl> + <nl> + auto * handle_data = c - > input_handle_shapes_and_types ( 0 ) ; <nl> + if ( handle_data ! = nullptr & & handle_data - > size ( ) ! = 1 ) { <nl> + return errors : : InvalidArgument ( <nl> + " Trying to push to list with wrong variant data . " ) ; <nl> + } <nl> + if ( handle_data ! = nullptr ) { <nl> + const shape_inference : : ShapeAndType & list_shape_type = <nl> + ( * handle_data ) [ 0 ] ; <nl> + if ( list_shape_type . dtype ! = t ) { <nl> + return errors : : InvalidArgument ( <nl> + " Trying to push to list with wrong element dtype . List has type " , <nl> + DataTypeString ( list_shape_type . dtype ) , <nl> + " but trying to push element with type " , DataTypeString ( t ) ) ; <nl> + } <nl> + shape_inference : : ShapeHandle ignored ; <nl> + TF_RETURN_IF_ERROR ( c - > Merge ( s , list_shape_type . shape , & ignored ) ) ; <nl> + s = list_shape_type . shape ; <nl> + } <nl> + c - > set_output_handle_shapes_and_types ( <nl> + 0 , std : : vector < shape_inference : : ShapeAndType > { { s , t } } ) ; <nl> + return Status : : OK ( ) ; <nl> + } ) ; <nl> + <nl> + REGISTER_OP ( " TensorListLength " ) <nl> + . Input ( " input_handle : variant " ) <nl> + . Output ( " length : int32 " ) <nl> + . SetShapeFn ( shape_inference : : ScalarShape ) ; <nl> + <nl> + REGISTER_OP ( " TensorListPopBack " ) <nl> + . Input ( " input_handle : variant " ) <nl> + . Output ( " output_handle : variant " ) <nl> + . Output ( " tensor : element_dtype " ) <nl> + . Attr ( " element_dtype : type " ) <nl> + . SetShapeFn ( [ ] ( shape_inference : : InferenceContext * c ) { <nl> + DataType t ; <nl> + TF_RETURN_IF_ERROR ( c - > GetAttr ( " element_dtype " , & t ) ) ; <nl> + shape_inference : : ShapeHandle s = c - > UnknownShape ( ) ; <nl> + auto * handle_data = c - > input_handle_shapes_and_types ( 0 ) ; <nl> + if ( handle_data ! = nullptr & & handle_data - > size ( ) ! = 1 ) { <nl> + return errors : : InvalidArgument ( <nl> + " Trying to read from list with invalid variant data . " ) ; <nl> + } <nl> + if ( handle_data ! = nullptr ) { <nl> + const shape_inference : : ShapeAndType & list_shape_type = <nl> + ( * handle_data ) [ 0 ] ; <nl> + if ( list_shape_type . dtype ! = t ) { <nl> + return errors : : InvalidArgument ( <nl> + " Trying to read from list with wrong element dtype . List has " <nl> + " type " , <nl> + DataTypeString ( list_shape_type . dtype ) , <nl> + " but trying to push element with type " , DataTypeString ( t ) ) ; <nl> + } <nl> + shape_inference : : ShapeHandle ignored ; <nl> + TF_RETURN_IF_ERROR ( c - > Merge ( s , list_shape_type . shape , & ignored ) ) ; <nl> + c - > set_output_handle_shapes_and_types ( 0 , * handle_data ) ; <nl> + s = list_shape_type . shape ; <nl> + } <nl> + c - > set_output ( 1 , s ) ; <nl> + c - > set_output ( 0 , c - > Scalar ( ) ) ; <nl> + return Status : : OK ( ) ; <nl> + } ) ; <nl> + <nl> + REGISTER_OP ( " TensorListStack " ) <nl> + . Input ( " input_handle : variant " ) <nl> + . Output ( " tensor : element_dtype " ) <nl> + . Attr ( " element_dtype : type " ) <nl> + . Attr ( " num_elements : int = - 1 " ) <nl> + . SetShapeFn ( [ ] ( shape_inference : : InferenceContext * c ) { <nl> + DataType t ; <nl> + TF_RETURN_IF_ERROR ( c - > GetAttr ( " element_dtype " , & t ) ) ; <nl> + shape_inference : : ShapeHandle s = c - > UnknownShape ( ) ; <nl> + auto * handle_data = c - > input_handle_shapes_and_types ( 0 ) ; <nl> + if ( handle_data ! = nullptr & & handle_data - > size ( ) ! = 1 ) { <nl> + return errors : : InvalidArgument ( <nl> + " Trying to read from list with wrong variant data . " ) ; <nl> + } <nl> + if ( handle_data ! = nullptr ) { <nl> + const shape_inference : : ShapeAndType & list_shape_type = <nl> + ( * handle_data ) [ 0 ] ; <nl> + if ( list_shape_type . dtype ! = t ) { <nl> + return errors : : InvalidArgument ( <nl> + " Trying to read from list with wrong element dtype . List has " <nl> + " type " , <nl> + DataTypeString ( list_shape_type . dtype ) , " but expectec type " , <nl> + DataTypeString ( t ) ) ; <nl> + } <nl> + shape_inference : : ShapeHandle ignored ; <nl> + TF_RETURN_IF_ERROR ( c - > Merge ( s , list_shape_type . shape , & ignored ) ) ; <nl> + if ( ! c - > FullyDefined ( s ) | | ! c - > FullyDefined ( list_shape_type . shape ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " Can only gather from a list with fully defined shapes . " ) ; <nl> + } <nl> + s = list_shape_type . shape ; <nl> + } <nl> + int expected_num_elements = - 1 ; <nl> + TF_RETURN_IF_ERROR ( c - > GetAttr ( " num_elements " , & expected_num_elements ) ) ; <nl> + shape_inference : : ShapeHandle num_elements ; <nl> + if ( expected_num_elements = = - 1 ) { <nl> + num_elements = c - > MakeShape ( { c - > UnknownDim ( ) } ) ; <nl> + } else { <nl> + num_elements = c - > MakeShape ( { expected_num_elements } ) ; <nl> + } <nl> + shape_inference : : ShapeHandle result ; <nl> + TF_RETURN_IF_ERROR ( c - > Concatenate ( num_elements , s , & result ) ) ; <nl> + c - > set_output ( 0 , result ) ; <nl> + return Status : : OK ( ) ; <nl> + } ) ; <nl> + <nl> + REGISTER_OP ( " TensorListFromTensor " ) <nl> + . Input ( " tensor : element_dtype " ) <nl> + . Input ( " element_shape : shape_type " ) <nl> + . Output ( " output_handle : variant " ) <nl> + . Attr ( " element_dtype : type " ) <nl> + . Attr ( " shape_type : { int32 , int64 } " ) <nl> + . SetShapeFn ( [ ] ( shape_inference : : InferenceContext * c ) { <nl> + c - > set_output ( 0 , c - > Scalar ( ) ) ; <nl> + DataType t ; <nl> + TF_RETURN_IF_ERROR ( c - > GetAttr ( " element_dtype " , & t ) ) ; <nl> + shape_inference : : ShapeHandle s = c - > input ( 0 ) ; <nl> + shape_inference : : ShapeHandle o ; <nl> + TF_RETURN_IF_ERROR ( c - > Subshape ( s , 1 , & o ) ) ; <nl> + shape_inference : : ShapeHandle element_shape ; <nl> + TF_RETURN_IF_ERROR ( c - > MakeShapeFromShapeTensor ( 1 , & element_shape ) ) ; <nl> + TF_RETURN_IF_ERROR ( c - > Merge ( o , element_shape , & o ) ) ; <nl> + c - > set_output_handle_shapes_and_types ( <nl> + 0 , std : : vector < shape_inference : : ShapeAndType > { { element_shape , t } } ) ; <nl> + return Status : : OK ( ) ; <nl> + } ) ; <nl> + <nl> + } / / namespace <nl> + } / / namespace tensorflow <nl> mmm a / tensorflow / python / BUILD <nl> ppp b / tensorflow / python / BUILD <nl> py_library ( <nl> " : io_ops " , <nl> " : layers " , <nl> " : lib " , <nl> + " : list_ops " , <nl> " : math_ops " , <nl> " : metrics " , <nl> " : nn " , <nl> tf_gen_op_wrapper_private_py ( <nl> name = " resource_variable_ops_gen " , <nl> ) <nl> <nl> + tf_gen_op_wrapper_private_py ( <nl> + name = " list_ops_gen " , <nl> + ) <nl> + <nl> tf_gen_op_wrapper_private_py ( <nl> name = " script_ops_gen " , <nl> ) <nl> py_library ( <nl> ] , <nl> ) <nl> <nl> + py_library ( <nl> + name = " list_ops " , <nl> + srcs = [ " ops / list_ops . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : list_ops_gen " , <nl> + ] , <nl> + ) <nl> + <nl> py_library ( <nl> name = " nn " , <nl> srcs = [ <nl> mmm a / tensorflow / python / eager / backprop . py <nl> ppp b / tensorflow / python / eager / backprop . py <nl> def _fast_fill ( value , shape , dtype ) : <nl> <nl> def _zeros ( shape , dtype ) : <nl> " " " Wraps array_ops . zeros to cache last zero for a given shape and dtype . " " " <nl> + if dtype = = dtypes . variant : <nl> + # TODO ( apassos ) : need to save enough information about variant tensors to do <nl> + # a zeros <nl> + return None <nl> if [ shape , dtype ] ! = _last_shape_dtype : <nl> _last_shape_dtype [ : ] = [ shape , dtype ] <nl> _last_zero [ 0 ] = _fast_fill ( 0 , shape , dtype ) <nl> mmm a / tensorflow / python / kernel_tests / BUILD <nl> ppp b / tensorflow / python / kernel_tests / BUILD <nl> tf_py_test ( <nl> ] , <nl> ) <nl> <nl> + cuda_py_test ( <nl> + name = " list_ops_test " , <nl> + size = " small " , <nl> + srcs = [ " list_ops_test . py " ] , <nl> + additional_deps = [ <nl> + " / / third_party / py / numpy " , <nl> + " / / tensorflow / python : list_ops " , <nl> + " / / tensorflow / python / eager : context " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework_test_lib " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + ] , <nl> + grpc_enabled = True , <nl> + ) <nl> + <nl> cuda_py_test ( <nl> name = " benchmark_test " , <nl> size = " small " , <nl> new file mode 100644 <nl> index 0000000000000 . . 8fae044e2e1e8 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / kernel_tests / list_ops_test . py <nl> <nl> + # Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Tests for ops which manipulate lists of tensors . " " " <nl> + <nl> + # pylint : disable = g - bad - name <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + import numpy as np # pylint : disable = unused - import <nl> + <nl> + from tensorflow . python . client import session <nl> + from tensorflow . python . eager import backprop <nl> + from tensorflow . python . eager import context <nl> + from tensorflow . python . framework import constant_op <nl> + from tensorflow . python . framework import dtypes <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . framework import test_util <nl> + from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import list_ops <nl> + from tensorflow . python . platform import test <nl> + from tensorflow . python . training import server_lib <nl> + <nl> + <nl> + def scalar_shape ( ) : <nl> + return ops . convert_to_tensor ( [ ] , dtype = dtypes . int32 ) <nl> + <nl> + <nl> + class ListOpsTest ( test_util . TensorFlowTestCase ) : <nl> + <nl> + def testPushPop ( self ) : <nl> + l = list_ops . empty_tensor_list ( element_dtype = dtypes . float32 , <nl> + element_shape = scalar_shape ( ) ) <nl> + l = list_ops . tensor_list_push_back ( l , constant_op . constant ( 1 . 0 ) ) <nl> + l , e = list_ops . tensor_list_pop_back ( l , element_dtype = dtypes . float32 ) <nl> + self . assertAllEqual ( e , 1 . 0 ) <nl> + <nl> + def testPushPopGPU ( self ) : <nl> + if not context . num_gpus ( ) : <nl> + return <nl> + with context . device ( " gpu : 0 " ) : <nl> + self . testPushPop ( ) <nl> + <nl> + def testStack ( self ) : <nl> + l = list_ops . empty_tensor_list ( element_dtype = dtypes . float32 , <nl> + element_shape = scalar_shape ( ) ) <nl> + l = list_ops . tensor_list_push_back ( l , constant_op . constant ( 1 . 0 ) ) <nl> + l = list_ops . tensor_list_push_back ( l , constant_op . constant ( 2 . 0 ) ) <nl> + t = list_ops . tensor_list_stack ( l , element_dtype = dtypes . float32 ) <nl> + self . assertAllEqual ( t , [ 1 . 0 , 2 . 0 ] ) <nl> + <nl> + def testStackGPU ( self ) : <nl> + if not context . num_gpus ( ) : <nl> + return <nl> + with context . device ( " gpu : 0 " ) : <nl> + self . testStack ( ) <nl> + <nl> + def testTensorListFromTensor ( self ) : <nl> + t = constant_op . constant ( [ 1 . 0 , 2 . 0 ] ) <nl> + l = list_ops . tensor_list_from_tensor ( t , element_shape = scalar_shape ( ) ) <nl> + l , e = list_ops . tensor_list_pop_back ( l , element_dtype = dtypes . float32 ) <nl> + self . assertAllEqual ( e , 2 . 0 ) <nl> + l , e = list_ops . tensor_list_pop_back ( l , element_dtype = dtypes . float32 ) <nl> + self . assertAllEqual ( e , 1 . 0 ) <nl> + self . assertAllEqual ( list_ops . tensor_list_length ( l ) , 0 ) <nl> + <nl> + def testFromTensorGPU ( self ) : <nl> + if not context . num_gpus ( ) : <nl> + return <nl> + with context . device ( " gpu : 0 " ) : <nl> + self . testTensorListFromTensor ( ) <nl> + <nl> + def testUnknownShape ( self ) : <nl> + l = list_ops . empty_tensor_list ( element_dtype = dtypes . float32 , <nl> + element_shape = - 1 ) <nl> + l = list_ops . tensor_list_push_back ( l , constant_op . constant ( 1 . 0 ) ) <nl> + l = list_ops . tensor_list_push_back ( l , constant_op . constant ( [ 1 . 0 , 2 . 0 ] ) ) <nl> + _ , e = list_ops . tensor_list_pop_back ( l , element_dtype = dtypes . float32 ) <nl> + self . assertAllEqual ( e , [ 1 . 0 , 2 . 0 ] ) <nl> + <nl> + def testCPUGPUCopy ( self ) : <nl> + if not context . num_gpus ( ) : <nl> + return <nl> + t = constant_op . constant ( [ 1 . 0 , 2 . 0 ] ) <nl> + l = list_ops . tensor_list_from_tensor ( t , element_shape = scalar_shape ( ) ) <nl> + with context . device ( " gpu : 0 " ) : <nl> + l_gpu = array_ops . identity ( l ) <nl> + self . assertAllEqual ( <nl> + list_ops . tensor_list_pop_back ( <nl> + l_gpu , element_dtype = dtypes . float32 ) [ 1 ] , <nl> + 2 . 0 ) <nl> + l_cpu = array_ops . identity ( l_gpu ) <nl> + self . assertAllEqual ( <nl> + list_ops . tensor_list_pop_back ( <nl> + l_cpu , element_dtype = dtypes . float32 ) [ 1 ] , <nl> + 2 . 0 ) <nl> + <nl> + def testSerialize ( self ) : <nl> + # pylint : disable = g - import - not - at - top <nl> + try : <nl> + import portpicker <nl> + except ImportError : <nl> + return <nl> + with context . graph_mode ( ) : <nl> + worker_port = portpicker . pick_unused_port ( ) <nl> + ps_port = portpicker . pick_unused_port ( ) <nl> + cluster_dict = { <nl> + " worker " : [ " localhost : % s " % worker_port ] , <nl> + " ps " : [ " localhost : % s " % ps_port ] <nl> + } <nl> + cs = server_lib . ClusterSpec ( cluster_dict ) <nl> + <nl> + worker = server_lib . Server ( <nl> + cs , job_name = " worker " , protocol = " grpc " , task_index = 0 , start = True ) <nl> + unused_ps = server_lib . Server ( <nl> + cs , job_name = " ps " , protocol = " grpc " , task_index = 0 , start = True ) <nl> + with ops . Graph ( ) . as_default ( ) , session . Session ( target = worker . target ) : <nl> + with ops . device ( " / job : worker " ) : <nl> + t = constant_op . constant ( [ [ 1 . 0 ] , [ 2 . 0 ] ] ) <nl> + l = list_ops . tensor_list_from_tensor ( t , element_shape = [ 1 ] ) <nl> + with ops . device ( " / job : ps " ) : <nl> + l_ps = array_ops . identity ( l ) <nl> + l_ps , e = list_ops . tensor_list_pop_back ( <nl> + l_ps , element_dtype = dtypes . float32 ) <nl> + with ops . device ( " / job : worker " ) : <nl> + worker_e = array_ops . identity ( e ) <nl> + self . assertAllEqual ( worker_e . eval ( ) , [ 2 . 0 ] ) <nl> + <nl> + def testPushPopGradients ( self ) : <nl> + with backprop . GradientTape ( ) as tape : <nl> + l = list_ops . empty_tensor_list ( element_dtype = dtypes . float32 , <nl> + element_shape = scalar_shape ( ) ) <nl> + c = constant_op . constant ( 1 . 0 ) <nl> + tape . watch ( c ) <nl> + l = list_ops . tensor_list_push_back ( l , c ) <nl> + l , e = list_ops . tensor_list_pop_back ( l , element_dtype = dtypes . float32 ) <nl> + e = 2 * e <nl> + self . assertAllEqual ( tape . gradient ( e , [ c ] ) [ 0 ] , 2 . 0 ) <nl> + <nl> + def testStackFromTensorGradients ( self ) : <nl> + with backprop . GradientTape ( ) as tape : <nl> + c = constant_op . constant ( [ 1 . 0 , 2 . 0 ] ) <nl> + tape . watch ( c ) <nl> + l = list_ops . tensor_list_from_tensor ( c , element_shape = scalar_shape ( ) ) <nl> + c2 = list_ops . tensor_list_stack ( <nl> + l , element_dtype = dtypes . float32 ) <nl> + result = c2 * 2 . 0 <nl> + self . assertAllEqual ( tape . gradient ( result , [ c ] ) [ 0 ] , [ 2 . 0 , 2 . 0 ] ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + ops . enable_eager_execution ( ) <nl> + test . main ( ) <nl> new file mode 100644 <nl> index 0000000000000 . . 6b31c0063983d <nl> mmm / dev / null <nl> ppp b / tensorflow / python / ops / list_ops . py <nl> <nl> + # Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Ops to manipulate lists of tensors . " " " <nl> + <nl> + # pylint : disable = g - bad - name <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . ops import gen_list_ops <nl> + # go / tf - wildcard - import <nl> + # pylint : disable = wildcard - import <nl> + from tensorflow . python . ops . gen_list_ops import * <nl> + # pylint : enable = wildcard - import <nl> + <nl> + <nl> + @ ops . RegisterGradient ( " TensorListPushBack " ) <nl> + def _PushBackGradient ( op , dresult ) : <nl> + return gen_list_ops . tensor_list_pop_back ( <nl> + dresult , element_dtype = op . get_attr ( " element_dtype " ) ) <nl> + <nl> + <nl> + @ ops . RegisterGradient ( " TensorListPopBack " ) <nl> + def _PopBackGradient ( unused_op , dlist , delement ) : <nl> + if dlist is None : <nl> + dlist = gen_list_ops . empty_tensor_list ( <nl> + element_dtype = delement . dtype , <nl> + element_shape = - 1 ) <nl> + return gen_list_ops . tensor_list_push_back ( dlist , delement ) <nl> + <nl> + <nl> + @ ops . RegisterGradient ( " TensorListStack " ) <nl> + def _TensorListStack ( unused_op , dtensor ) : <nl> + return gen_list_ops . tensor_list_from_tensor ( dtensor , <nl> + element_shape = dtensor . shape [ 1 : ] ) <nl> + <nl> + <nl> + @ ops . RegisterGradient ( " TensorListFromTensor " ) <nl> + def _TensorListFromTensor ( op , dlist ) : <nl> + if op . inputs [ 0 ] . shape [ 0 ] is not None : <nl> + num_elements = op . inputs [ 0 ] . shape [ 0 ] <nl> + else : <nl> + num_elements = None <nl> + if dlist is None : <nl> + dlist = gen_list_ops . empty_tensor_list ( <nl> + element_dtype = op . inputs [ 0 ] . dtype , <nl> + element_shape = - 1 ) <nl> + return gen_list_ops . tensor_list_stack ( <nl> + dlist , element_dtype = op . inputs [ 0 ] . dtype , <nl> + num_elements = num_elements ) <nl>
Immutable differentiable tf lists .
tensorflow/tensorflow
f24322bec4d45ca712c2efa60d84a43ac73d7b8c
2018-01-12T23:53:50Z
mmm a / src / app / ui / color_bar . cpp <nl> ppp b / src / app / ui / color_bar . cpp <nl> ColorBar : : ColorBar ( int align ) <nl> m_paletteButton . DropDownClick . connect ( Bind < void > ( & ColorBar : : onPaletteButtonDropDownClick , this ) ) ; <nl> <nl> onColorButtonChange ( getFgColor ( ) ) ; <nl> + <nl> + UIContext : : instance ( ) - > addObserver ( this ) ; <nl> } <nl> <nl> ColorBar : : ~ ColorBar ( ) <nl> { <nl> + UIContext : : instance ( ) - > removeObserver ( this ) ; <nl> + <nl> set_config_color ( " ColorBar " , " FG " , getFgColor ( ) ) ; <nl> set_config_color ( " ColorBar " , " BG " , getBgColor ( ) ) ; <nl> } <nl> void ColorBar : : setPaletteEditorButtonState ( bool state ) <nl> m_paletteButton . setSelected ( state ) ; <nl> } <nl> <nl> + void ColorBar : : onSetActiveDocument ( doc : : Document * document ) <nl> + { <nl> + destroyRemap ( ) ; <nl> + } <nl> + <nl> / / Switches the palette - editor <nl> void ColorBar : : onPaletteButtonClick ( ) <nl> { <nl> void ColorBar : : onRemapButtonClick ( ) <nl> Transaction transaction ( writer . context ( ) , " Remap Colors " , ModifyDocument ) ; <nl> transaction . execute ( new cmd : : RemapColors ( sprite , * m_remap ) ) ; <nl> transaction . commit ( ) ; <nl> - <nl> - delete m_remap ; <nl> - m_remap = nullptr ; <nl> } <nl> - <nl> update_screen_for_document ( writer . document ( ) ) ; <nl> + destroyRemap ( ) ; <nl> } <nl> catch ( base : : Exception & e ) { <nl> Console : : showException ( e ) ; <nl> } <nl> - <nl> - m_remapButton . setVisible ( false ) ; <nl> - layout ( ) ; <nl> } <nl> <nl> void ColorBar : : onPaletteViewIndexChange ( int index , ui : : MouseButtons buttons ) <nl> void ColorBar : : onColorButtonChange ( const app : : Color & color ) <nl> m_paletteView . selectColor ( color . getIndex ( ) ) ; <nl> } <nl> <nl> + void ColorBar : : destroyRemap ( ) <nl> + { <nl> + if ( ! m_remap ) <nl> + return ; <nl> + <nl> + delete m_remap ; <nl> + m_remap = nullptr ; <nl> + <nl> + m_remapButton . setVisible ( false ) ; <nl> + layout ( ) ; <nl> + } <nl> + <nl> } / / namespace app <nl> mmm a / src / app / ui / color_bar . h <nl> ppp b / src / app / ui / color_bar . h <nl> <nl> # include " app / ui / palette_view . h " <nl> # include " base / signal . h " <nl> # include " base / unique_ptr . h " <nl> + # include " doc / context_observer . h " <nl> # include " doc / pixel_format . h " <nl> # include " ui / box . h " <nl> # include " ui / button . h " <nl> namespace app { <nl> class PaletteIndexChangeEvent ; <nl> <nl> class ColorBar : public ui : : Box <nl> - , public PaletteViewDelegate { <nl> + , public PaletteViewDelegate <nl> + , public doc : : ContextObserver { <nl> static ColorBar * m_instance ; <nl> public : <nl> static ColorBar * instance ( ) { return m_instance ; } <nl> namespace app { <nl> / / when the visibility of the dialog changes . <nl> void setPaletteEditorButtonState ( bool state ) ; <nl> <nl> + / / ContextObserver impl <nl> + void onSetActiveDocument ( doc : : Document * document ) override ; <nl> + <nl> / / Signals <nl> Signal1 < void , const app : : Color & > FgColorChange ; <nl> Signal1 < void , const app : : Color & > BgColorChange ; <nl> namespace app { <nl> void onPaletteViewChangeSize ( int boxsize ) override ; <nl> <nl> private : <nl> + void destroyRemap ( ) ; <nl> + <nl> class ScrollableView : public ui : : View { <nl> public : <nl> ScrollableView ( ) ; <nl>
Destroy " remap " data in ColorBar when we change to another document
aseprite/aseprite
d6ecdefe220a1758da4c727c4fd443e906817a22
2015-03-23T17:55:34Z
mmm a / include / nlohmann / detail / meta / type_traits . hpp <nl> ppp b / include / nlohmann / detail / meta / type_traits . hpp <nl> template < typename > struct is_basic_json : std : : false_type { } ; <nl> NLOHMANN_BASIC_JSON_TPL_DECLARATION <nl> struct is_basic_json < NLOHMANN_BASIC_JSON_TPL > : std : : true_type { } ; <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / <nl> + / / jspn_ref helpers / / <nl> + / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + template < typename > <nl> + class json_ref ; <nl> + <nl> + template < typename > <nl> + struct is_json_ref : std : : false_type { } ; <nl> + <nl> + template < typename T > <nl> + struct is_json_ref < json_ref < T > > : std : : true_type { } ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / aliases for detected / / <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / include / nlohmann / json . hpp <nl> ppp b / include / nlohmann / json . hpp <nl> class basic_json <nl> / / other constructors and destructor / / <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - / / / @ private <nl> - basic_json ( const detail : : json_ref < basic_json > & ref ) <nl> - : basic_json ( ref . moved_or_copied ( ) ) <nl> - { } <nl> + template < typename JsonRef , <nl> + detail : : enable_if_t < detail : : conjunction < detail : : is_json_ref < JsonRef > , <nl> + std : : is_same < typename JsonRef : : value_type , basic_json > > : : value , int > = 0 > <nl> + basic_json ( const JsonRef & ref ) : basic_json ( ref . moved_or_copied ( ) ) { } <nl> <nl> / * ! <nl> @ brief copy constructor <nl> mmm a / single_include / nlohmann / json . hpp <nl> ppp b / single_include / nlohmann / json . hpp <nl> template < typename > struct is_basic_json : std : : false_type { } ; <nl> NLOHMANN_BASIC_JSON_TPL_DECLARATION <nl> struct is_basic_json < NLOHMANN_BASIC_JSON_TPL > : std : : true_type { } ; <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / <nl> + / / jspn_ref helpers / / <nl> + / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + template < typename > <nl> + class json_ref ; <nl> + <nl> + template < typename > <nl> + struct is_json_ref : std : : false_type { } ; <nl> + <nl> + template < typename T > <nl> + struct is_json_ref < json_ref < T > > : std : : true_type { } ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / aliases for detected / / <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> class basic_json <nl> / / other constructors and destructor / / <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - / / / @ private <nl> - basic_json ( const detail : : json_ref < basic_json > & ref ) <nl> - : basic_json ( ref . moved_or_copied ( ) ) <nl> - { } <nl> + template < typename JsonRef , <nl> + detail : : enable_if_t < detail : : conjunction < detail : : is_json_ref < JsonRef > , <nl> + std : : is_same < typename JsonRef : : value_type , basic_json > > : : value , int > = 0 > <nl> + basic_json ( const JsonRef & ref ) : basic_json ( ref . moved_or_copied ( ) ) { } <nl> <nl> / * ! <nl> @ brief copy constructor <nl>
Templatize basic_json ctor from json_ref
nlohmann/json
ec955f08b47ab7cb81f6e4a4c3e7b331ddf50f71
2020-04-12T19:32:39Z
mmm a / scripts / eosio_build_darwin . sh <nl> ppp b / scripts / eosio_build_darwin . sh <nl> <nl> fi <nl> <nl> printf " \ tHome Brew installation found . \ n \ n " <nl> - DCOUNT = 0 <nl> COUNT = 1 <nl> PERMISSION_GETTEXT = 0 <nl> DISPLAY = " " <nl> <nl> continue <nl> fi <nl> fi <nl> - let DCOUNT + + <nl> if [ $ brewname = " gettext " ] ; then <nl> PERMISSION_GETTEXT = 1 <nl> fi <nl> <nl> done < scripts / eosio_build_dep <nl> IFS = $ { var_ifs } <nl> <nl> - printf " \ tChecking Python3 installation . . . " <nl> + printf " \ tChecking Python3 . . . " <nl> if [ - z ` python3 - c ' import sys ; print ( sys . version_info . major ) ' 2 > / dev / null ` ] ; then <nl> DEP = $ DEP " python @ 3 " <nl> DISPLAY = " $ { DISPLAY } $ { COUNT } . Python 3 \ n \ t " <nl> <nl> printf " \ t \ t Python3 found \ n " <nl> fi <nl> <nl> - if [ $ DCOUNT - ne 0 ] ; then <nl> + if [ $ COUNT - gt 1 ] ; then <nl> printf " \ n \ tThe following dependencies are required to install EOSIO . \ n " <nl> printf " \ n \ t $ DISPLAY \ n \ n " <nl> echo " Do you wish to install these packages ? " <nl>
darwin : minor logic changes in checking for installed dependencies
EOSIO/eos
af5a07c86c5ebaf5cacba7a2ad20d274b8f58309
2018-04-05T15:10:30Z
mmm a / Makefile . am <nl> ppp b / Makefile . am <nl> java_EXTRA_DIST = <nl> java / core / src / main / java / com / google / protobuf / ByteString . java \ <nl> java / core / src / main / java / com / google / protobuf / CodedInputStream . java \ <nl> java / core / src / main / java / com / google / protobuf / CodedOutputStream . java \ <nl> + java / core / src / main / java / com / google / protobuf / DiscardUnknownFieldsParser . java \ <nl> java / core / src / main / java / com / google / protobuf / Descriptors . java \ <nl> java / core / src / main / java / com / google / protobuf / DoubleArrayList . java \ <nl> java / core / src / main / java / com / google / protobuf / DynamicMessage . java \ <nl> java_EXTRA_DIST = <nl> java / core / src / test / java / com / google / protobuf / CodedOutputStreamTest . java \ <nl> java / core / src / test / java / com / google / protobuf / DeprecatedFieldTest . java \ <nl> java / core / src / test / java / com / google / protobuf / DescriptorsTest . java \ <nl> + java / core / src / test / java / com / google / protobuf / DiscardUnknownFieldsTest . java \ <nl> java / core / src / test / java / com / google / protobuf / DoubleArrayListTest . java \ <nl> java / core / src / test / java / com / google / protobuf / DynamicMessageTest . java \ <nl> java / core / src / test / java / com / google / protobuf / EnumTest . java \ <nl> js_EXTRA_DIST = \ <nl> js / compatibility_tests / v3 . 0 . 0 / testempty . proto \ <nl> js / compatibility_tests / v3 . 0 . 0 / test . proto \ <nl> js / compatibility_tests / v3 . 0 . 0 / test . sh \ <nl> + js / compatibility_tests / v3 . 1 . 0 / testempty . proto \ <nl> + js / compatibility_tests / v3 . 1 . 0 / testbinary . proto \ <nl> + js / compatibility_tests / v3 . 1 . 0 / test5 . proto \ <nl> + js / compatibility_tests / v3 . 1 . 0 / test4 . proto \ <nl> + js / compatibility_tests / v3 . 1 . 0 / test3 . proto \ <nl> + js / compatibility_tests / v3 . 1 . 0 / test2 . proto \ <nl> + js / compatibility_tests / v3 . 1 . 0 / test . proto \ <nl> + js / compatibility_tests / v3 . 1 . 0 / proto3_test . proto \ <nl> + js / compatibility_tests / v3 . 1 . 0 / proto3_test . js \ <nl> + js / compatibility_tests / v3 . 1 . 0 / message_test . js \ <nl> + js / compatibility_tests / v3 . 1 . 0 / maps_test . js \ <nl> + js / compatibility_tests / v3 . 1 . 0 / debug_test . js \ <nl> + js / compatibility_tests / v3 . 1 . 0 / data . proto \ <nl> + js / compatibility_tests / v3 . 1 . 0 / commonjs / test7 / test7 . proto \ <nl> + js / compatibility_tests / v3 . 1 . 0 / commonjs / test6 / test6 . proto \ <nl> + js / compatibility_tests / v3 . 1 . 0 / binary / writer_test . js \ <nl> + js / compatibility_tests / v3 . 1 . 0 / binary / utils_test . js \ <nl> + js / compatibility_tests / v3 . 1 . 0 / binary / reader_test . js \ <nl> + js / compatibility_tests / v3 . 1 . 0 / binary / proto_test . js \ <nl> + js / compatibility_tests / v3 . 1 . 0 / binary / decoder_test . js \ <nl> + js / compatibility_tests / v3 . 1 . 0 / binary / arith_test . js \ <nl> js / data . proto \ <nl> js / debug . js \ <nl> js / debug_test . js \ <nl>
Add java and JS dist files .
protocolbuffers/protobuf
bf658c28d0eea18b44be19d36c4a86a311c83777
2017-07-19T21:32:22Z
mmm a / third_party / gloo <nl> ppp b / third_party / gloo <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit aad0002fb40612e991390d8e807f247ed23f13c5 <nl> + Subproject commit 69eef748cc1dfbe0fefed69b34e6545495f67ac5 <nl>
Bump gloo submodule ( )
pytorch/pytorch
005eef50277e06270046c1a6c285faa9a6a54f49
2018-06-06T20:31:29Z
mmm a / src / debug / debug - coverage . cc <nl> ppp b / src / debug / debug - coverage . cc <nl> class SharedToCounterMap <nl> base : : KeyEqualityMatcher < Object > , <nl> base : : DefaultAllocationPolicy > { <nl> public : <nl> - typedef base : : TemplateHashMapEntry < SharedFunctionInfo , uint32_t > Entry ; <nl> + using Entry = base : : TemplateHashMapEntry < SharedFunctionInfo , uint32_t > ; <nl> inline void Add ( SharedFunctionInfo key , uint32_t count ) { <nl> Entry * entry = LookupOrInsert ( key , Hash ( key ) , [ ] ( ) { return 0 ; } ) ; <nl> uint32_t old_count = entry - > value ; <nl> mmm a / src / debug / debug - evaluate . cc <nl> ppp b / src / debug / debug - evaluate . cc <nl> bool IntrinsicHasNoSideEffect ( Runtime : : FunctionId id ) { <nl> } <nl> <nl> bool BytecodeHasNoSideEffect ( interpreter : : Bytecode bytecode ) { <nl> - typedef interpreter : : Bytecode Bytecode ; <nl> - typedef interpreter : : Bytecodes Bytecodes ; <nl> + using interpreter : : Bytecode ; <nl> + using interpreter : : Bytecodes ; <nl> if ( Bytecodes : : IsWithoutExternalSideEffects ( bytecode ) ) return true ; <nl> if ( Bytecodes : : IsCallOrConstruct ( bytecode ) ) return true ; <nl> if ( Bytecodes : : IsJumpIfToBoolean ( bytecode ) ) return true ; <nl> DebugInfo : : SideEffectState BuiltinGetSideEffectState ( Builtins : : Name id ) { <nl> } <nl> <nl> bool BytecodeRequiresRuntimeCheck ( interpreter : : Bytecode bytecode ) { <nl> - typedef interpreter : : Bytecode Bytecode ; <nl> + using interpreter : : Bytecode ; <nl> switch ( bytecode ) { <nl> case Bytecode : : kStaNamedProperty : <nl> case Bytecode : : kStaNamedPropertyNoFeedback : <nl> mmm a / src / debug / debug - scopes . h <nl> ppp b / src / debug / debug - scopes . h <nl> class ScopeIterator { <nl> <nl> void UnwrapEvaluationContext ( ) ; <nl> <nl> - typedef std : : function < bool ( Handle < String > name , Handle < Object > value ) > <nl> - Visitor ; <nl> + using Visitor = <nl> + std : : function < bool ( Handle < String > name , Handle < Object > value ) > ; <nl> <nl> Handle < JSObject > WithContextExtension ( ) ; <nl> <nl> mmm a / src / debug / debug . h <nl> ppp b / src / debug / debug . h <nl> class V8_EXPORT_PRIVATE Debug { <nl> void ClearAllDebuggerHints ( ) ; <nl> <nl> / / Wraps logic for clearing and maybe freeing all debug infos . <nl> - typedef std : : function < void ( Handle < DebugInfo > ) > DebugInfoClearFunction ; <nl> + using DebugInfoClearFunction = std : : function < void ( Handle < DebugInfo > ) > ; <nl> void ClearAllDebugInfos ( const DebugInfoClearFunction & clear_function ) ; <nl> <nl> void FindDebugInfo ( Handle < DebugInfo > debug_info , DebugInfoListNode * * prev , <nl> mmm a / src / debug / interface - types . h <nl> ppp b / src / debug / interface - types . h <nl> class ConsoleDelegate { <nl> virtual ~ ConsoleDelegate ( ) = default ; <nl> } ; <nl> <nl> - typedef int BreakpointId ; <nl> + using BreakpointId = int ; <nl> <nl> } / / namespace debug <nl> } / / namespace v8 <nl>
[ debug ] [ cleanup ] Using ' using ' instead of ' typedef '
v8/v8
ede4557491e5b5ac42697f63180d409875b65543
2019-04-09T11:02:28Z
mmm a / jstests / aggregation / expressions / date_from_string . js <nl> ppp b / jstests / aggregation / expressions / date_from_string . js <nl> load ( " jstests / aggregation / extras / utils . js " ) ; / / For assertErrorCode <nl> tojson ( testCase ) ) ; <nl> } ) ; <nl> <nl> + / * mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm * / <nl> + / * Normal format tests with UTC offset . * / <nl> + <nl> + coll . drop ( ) ; <nl> + assert . writeOK ( coll . insert ( { _id : 0 } ) ) ; <nl> + <nl> + testCases = [ <nl> + { expect : " 2017 - 07 - 04T10 : 56 : 02Z " , inputString : " 2017 - 07 - 04T11 : 56 . 02 " } , <nl> + { expect : " 2017 - 07 - 04T10 : 56 : 02 . 813Z " , inputString : " 2017 - 07 - 04T11 : 56 . 02 . 813 " } , <nl> + { expect : " 2017 - 07 - 04T10 : 56 : 02 . 810Z " , inputString : " 2017 - 07 - 04T11 : 56 . 02 . 81 " } , <nl> + { expect : " 2017 - 07 - 04T10 : 56 : 02 . 800Z " , inputString : " 2017 - 07 - 04T11 : 56 . 02 . 8 " } , <nl> + ] ; <nl> + testCases . forEach ( function ( testCase ) { <nl> + assert . eq ( [ { _id : 0 , date : ISODate ( testCase . expect ) } ] , <nl> + coll . aggregate ( { <nl> + $ project : { <nl> + date : { <nl> + $ dateFromString : <nl> + { dateString : testCase . inputString , timezone : " + 01 : 00 " } <nl> + } <nl> + } <nl> + } ) <nl> + . toArray ( ) , <nl> + tojson ( testCase ) ) ; <nl> + } ) ; <nl> + <nl> / * mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm * / <nl> / * Normal format tests from data . * / <nl> <nl> load ( " jstests / aggregation / extras / utils . js " ) ; / / For assertErrorCode <nl> { _id : 3 , dateString : " 1960 - 07 - 10T12 : 35 : 37 . 513 " , timezone : " Europe / London " } , <nl> { _id : 4 , dateString : " 2017 - 07 - 06T12 : 35 : 37 . 513 " , timezone : " America / Los_Angeles " } , <nl> { _id : 5 , dateString : " 2017 - 07 - 06T12 : 35 : 37 . 513 " , timezone : " Europe / Paris " } , <nl> + { _id : 6 , dateString : " 2017 - 07 - 06T12 : 35 : 37 . 513 " , timezone : " + 04 : 00 " } , <nl> ] ) ) ; <nl> <nl> assert . eq ( <nl> load ( " jstests / aggregation / extras / utils . js " ) ; / / For assertErrorCode <nl> { " _id " : 3 , " date " : ISODate ( " 1960 - 07 - 10T11 : 35 : 37 . 513Z " ) } , <nl> { " _id " : 4 , " date " : ISODate ( " 2017 - 07 - 06T19 : 35 : 37 . 513Z " ) } , <nl> { " _id " : 5 , " date " : ISODate ( " 2017 - 07 - 06T10 : 35 : 37 . 513Z " ) } , <nl> + { " _id " : 6 , " date " : ISODate ( " 2017 - 07 - 06T08 : 35 : 37 . 513Z " ) } , <nl> ] , <nl> coll . aggregate ( [ <nl> { <nl> load ( " jstests / aggregation / extras / utils . js " ) ; / / For assertErrorCode <nl> assert . writeOK ( coll . insert ( [ <nl> { _id : 0 , timezone : " Europe / London " } , <nl> { _id : 1 , timezone : " America / New_York " } , <nl> + { _id : 2 , timezone : " - 05 : 00 " } , <nl> ] ) ) ; <nl> <nl> assert . eq ( <nl> [ <nl> { " _id " : 0 , " date " : ISODate ( " 2017 - 07 - 19T17 : 52 : 35 . 199Z " ) } , <nl> { " _id " : 1 , " date " : ISODate ( " 2017 - 07 - 19T22 : 52 : 35 . 199Z " ) } , <nl> + { " _id " : 2 , " date " : ISODate ( " 2017 - 07 - 19T23 : 52 : 35 . 199Z " ) } , <nl> ] , <nl> coll . aggregate ( [ <nl> { <nl> mmm a / src / mongo / db / pipeline / expression_test . cpp <nl> ppp b / src / mongo / db / pipeline / expression_test . cpp <nl> TEST_F ( ExpressionDateFromStringTest , RejectsTimeZoneInStringAndArgument ) { <nl> ASSERT_THROWS_CODE ( dateExp - > evaluate ( { } ) , UserException , 40554 ) ; <nl> } <nl> <nl> + TEST_F ( ExpressionDateFromStringTest , ReadWithUTCOffset ) { <nl> + auto expCtx = getExpCtx ( ) ; <nl> + <nl> + auto spec = BSON ( " $ dateFromString " < < BSON ( " dateString " <nl> + < < " 2017 - 07 - 28T10 : 47 : 52 . 912 " <nl> + < < " timezone " <nl> + < < " - 01 : 00 " ) ) ; <nl> + auto dateExp = Expression : : parseExpression ( expCtx , spec , expCtx - > variablesParseState ) ; <nl> + auto dateVal = Date_t : : fromMillisSinceEpoch ( 1501242472912 ) ; <nl> + ASSERT_VALUE_EQ ( Value ( dateVal ) , dateExp - > evaluate ( Document { } ) ) ; <nl> + <nl> + spec = BSON ( " $ dateFromString " < < BSON ( " dateString " <nl> + < < " 2017 - 07 - 28T10 : 47 : 52 . 912 " <nl> + < < " timezone " <nl> + < < " + 01 : 00 " ) ) ; <nl> + dateExp = Expression : : parseExpression ( expCtx , spec , expCtx - > variablesParseState ) ; <nl> + dateVal = Date_t : : fromMillisSinceEpoch ( 1501235272912 ) ; <nl> + ASSERT_VALUE_EQ ( Value ( dateVal ) , dateExp - > evaluate ( Document { } ) ) ; <nl> + <nl> + spec = BSON ( " $ dateFromString " < < BSON ( " dateString " <nl> + < < " 2017 - 07 - 28T10 : 47 : 52 . 912 " <nl> + < < " timezone " <nl> + < < " + 0445 " ) ) ; <nl> + dateExp = Expression : : parseExpression ( expCtx , spec , expCtx - > variablesParseState ) ; <nl> + dateVal = Date_t : : fromMillisSinceEpoch ( 1501221772912 ) ; <nl> + ASSERT_VALUE_EQ ( Value ( dateVal ) , dateExp - > evaluate ( Document { } ) ) ; <nl> + <nl> + spec = BSON ( " $ dateFromString " < < BSON ( " dateString " <nl> + < < " 2017 - 07 - 28T10 : 47 : 52 . 912 " <nl> + < < " timezone " <nl> + < < " + 10 : 45 " ) ) ; <nl> + dateExp = Expression : : parseExpression ( expCtx , spec , expCtx - > variablesParseState ) ; <nl> + dateVal = Date_t : : fromMillisSinceEpoch ( 1501200172912 ) ; <nl> + ASSERT_VALUE_EQ ( Value ( dateVal ) , dateExp - > evaluate ( Document { } ) ) ; <nl> + <nl> + spec = BSON ( " $ dateFromString " < < BSON ( " dateString " <nl> + < < " 1945 - 07 - 28T10 : 47 : 52 . 912 " <nl> + < < " timezone " <nl> + < < " - 08 : 00 " ) ) ; <nl> + dateExp = Expression : : parseExpression ( expCtx , spec , expCtx - > variablesParseState ) ; <nl> + dateVal = Date_t : : fromMillisSinceEpoch ( - 770879527088 ) ; <nl> + ASSERT_VALUE_EQ ( Value ( dateVal ) , dateExp - > evaluate ( Document { } ) ) ; <nl> + } <nl> + <nl> } / / namespace ExpressionDateFromStringTest <nl> <nl> class All : public Suite { <nl> mmm a / src / mongo / db / query / datetime / date_time_support . cpp <nl> ppp b / src / mongo / db / query / datetime / date_time_support . cpp <nl> Date_t TimeZoneDatabase : : fromString ( StringData dateString , boost : : optional < TimeZ <nl> " at the same time " ) ; <nl> break ; <nl> } <nl> - <nl> - / / _tzInfo , although private , can be accessed because TimeZoneDatabase is a friend of <nl> - / / TimeZone . <nl> - timelib_update_ts ( parsedTime . get ( ) , tz - > _tzInfo . get ( ) ) ; <nl> - } else { <nl> - timelib_update_ts ( parsedTime . get ( ) , nullptr ) ; <nl> } <nl> <nl> - timelib_unixtime2local ( parsedTime . get ( ) , parsedTime - > sse ) ; <nl> + tz - > adjustTimeZone ( parsedTime . get ( ) ) ; <nl> <nl> return Date_t : : fromMillisSinceEpoch ( <nl> durationCount < Milliseconds > ( Seconds ( parsedTime - > sse ) + Microseconds ( parsedTime - > us ) ) ) ; <nl> TimeZone TimeZoneDatabase : : getTimeZone ( StringData timeZoneId ) const { <nl> str : : stream ( ) < < " unrecognized time zone identifier : \ " " < < timeZoneId < < " \ " " ) ; <nl> } <nl> <nl> - void TimeZone : : adjustTimeZone ( timelib_time * t ) const { <nl> - if ( _tzInfo ) { <nl> - timelib_set_timezone ( t , _tzInfo . get ( ) ) ; <nl> - } else if ( durationCount < Seconds > ( _utcOffset ) ) { <nl> - timelib_set_timezone_from_offset ( t , - durationCount < Seconds > ( _utcOffset ) ) ; <nl> + void TimeZone : : adjustTimeZone ( timelib_time * timelibTime ) const { <nl> + if ( isTimeZoneIDZone ( ) ) { <nl> + timelib_set_timezone ( timelibTime , _tzInfo . get ( ) ) ; <nl> + } else if ( isUtcOffsetZone ( ) ) { <nl> + timelib_set_timezone_from_offset ( timelibTime , - durationCount < Seconds > ( _utcOffset ) ) ; <nl> } <nl> - timelib_update_ts ( t , nullptr ) ; <nl> - timelib_update_from_sse ( t ) ; <nl> + timelib_update_ts ( timelibTime , nullptr ) ; <nl> + timelib_update_from_sse ( timelibTime ) ; <nl> } <nl> <nl> Date_t TimeZone : : createFromDateParts ( <nl> mmm a / src / mongo / db / query / datetime / date_time_support . h <nl> ppp b / src / mongo / db / query / datetime / date_time_support . h <nl> namespace mongo { <nl> * for the hour , minute , or second of a date , even when given the same date . <nl> * / <nl> class TimeZone { <nl> - friend class TimeZoneDatabase ; <nl> <nl> public : <nl> / * * <nl> class TimeZone { <nl> * Returns whether this is the zone representing UTC . <nl> * / <nl> bool isUtcZone ( ) const { <nl> - return _tzInfo = = nullptr ; <nl> + return ( _tzInfo = = nullptr & & ! durationCount < Seconds > ( _utcOffset ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Returns whether this is a zone representing a UTC offset , like " + 04 : 00 " . <nl> + * / <nl> + bool isUtcOffsetZone ( ) const { <nl> + return durationCount < Seconds > ( _utcOffset ) ! = 0 ; <nl> + } <nl> + <nl> + / * * <nl> + * Returns whether this is a zone representing an Olson time zone , like " Europe / London " . <nl> + * / <nl> + bool isTimeZoneIDZone ( ) const { <nl> + return _tzInfo ! = nullptr ; <nl> } <nl> <nl> / * * <nl> class TimeZone { <nl> * / <nl> Seconds utcOffset ( Date_t ) const ; <nl> <nl> + / * * <nl> + * Adjusts ' timelibTime ' according to this time zone definition . <nl> + * / <nl> + void adjustTimeZone ( timelib_time * timelibTime ) const ; <nl> + <nl> / * * <nl> * Converts a date object to a string according to ' format ' . ' format ' can be any string literal , <nl> * containing 0 or more format specifiers like % Y ( year ) or % d ( day of month ) . Callers must pass <nl> class TimeZone { <nl> void operator ( ) ( timelib_tzinfo * tzInfo ) ; <nl> } ; <nl> <nl> - / * * <nl> - * Helper function to apply tzInfo and utcOffset information to the constructed timelib_time * <nl> - * value . <nl> - * / <nl> - void adjustTimeZone ( timelib_time * t ) const ; <nl> - <nl> / / null if this TimeZone represents the default UTC time zone , or a UTC - offset time zone <nl> std : : shared_ptr < timelib_tzinfo > _tzInfo ; <nl> <nl>
SERVER - 29284 Add support for UTC offsets to $ dateFromString
mongodb/mongo
dcb9dbffa65d8ddcf5be80e88868fd11c01397df
2017-07-30T14:40:46Z
mmm a / tools / simulator / frameworks / runtime - src / proj . android / build - cfg . json <nl> ppp b / tools / simulator / frameworks / runtime - src / proj . android / build - cfg . json <nl> <nl> { <nl> " ndk_module_path " : [ <nl> - " . . / . . / cocos2d - x " , <nl> - " . . / . . / cocos2d - x / cocos / " , <nl> - " . . / . . / cocos2d - x / external " , <nl> - " . . / . . / cocos2d - x / cocos / scripting " <nl> + " . . / . . / . . / . . / . . / " , <nl> + " . . / . . / . . / . . / . . / cocos / " , <nl> + " . . / . . / . . / . . / . . / external " , <nl> + " . . / . . / . . / . . / . . / cocos / scripting " <nl> ] , <nl> " copy_resources " : [ <nl> - { <nl> - " from " : " . . / . . / . . / src " , <nl> - " to " : " src " <nl> - } , <nl> - { <nl> - " from " : " . . / . . / . . / res " , <nl> - " to " : " res " <nl> - } <nl> ] , <nl> " must_copy_resources " : [ <nl> { <nl> mmm a / tools / simulator / frameworks / runtime - src / proj . android / jni / Android . mk <nl> ppp b / tools / simulator / frameworks / runtime - src / proj . android / jni / Android . mk <nl> LOCAL_SRC_FILES : = \ <nl> . . / . . / Classes / VisibleRect . cpp \ <nl> . . / . . / Classes / AppDelegate . cpp \ <nl> . . / . . / Classes / ConfigParser . cpp \ <nl> - . . / . . / Classes / ProjectConfig / ProjectConfig . cpp \ <nl> - . . / . . / Classes / ProjectConfig / SimulatorConfig . cpp \ <nl> - . . / . . / Classes / network / CCHTTPRequest . cpp \ <nl> hellolua / Runtime_android . cpp \ <nl> hellolua / main . cpp <nl> <nl> LOCAL_C_INCLUDES : = \ <nl> $ ( LOCAL_PATH ) / . . / . . / Classes / protobuf - lite \ <nl> $ ( LOCAL_PATH ) / . . / . . / Classes / runtime \ <nl> $ ( LOCAL_PATH ) / . . / . . / Classes \ <nl> - $ ( LOCAL_PATH ) / . . / . . / . . / cocos2d - x / external <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / . . / . . / . . / external \ <nl> + $ ( LOCAL_PATH ) / . . / . . / . . / . . / . . / . . / tools / simulator / libsimulator / lib <nl> <nl> - LOCAL_STATIC_LIBRARIES : = cocos2d_lua_static <nl> + LOCAL_STATIC_LIBRARIES : = cocos2d_lua_static <nl> + LOCAL_STATIC_LIBRARIES + = cocos2d_simulator_static <nl> <nl> include $ ( BUILD_SHARED_LIBRARY ) <nl> <nl> $ ( call import - module , scripting / lua - bindings / proj . android ) <nl> + $ ( call import - module , tools / simulator / libsimulator / proj . android ) <nl> mmm a / tools / simulator / frameworks / runtime - src / proj . android / project . properties <nl> ppp b / tools / simulator / frameworks / runtime - src / proj . android / project . properties <nl> <nl> # Project target . <nl> target = android - 10 <nl> <nl> - android . library . reference . 1 = . . / . . / cocos2d - x / cocos / platform / android / java <nl> + android . library . reference . 1 = . . / . . / . . / . . / . . / cocos / platform / android / java <nl> mmm a / tools / simulator / frameworks / runtime - src / proj . ios_mac / simulator . xcodeproj / project . pbxproj <nl> ppp b / tools / simulator / frameworks / runtime - src / proj . ios_mac / simulator . xcodeproj / project . pbxproj <nl> <nl> C00FD49D1938512100C6382D / * Shine_png . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = C00FD4911938512100C6382D / * Shine_png . cpp * / ; } ; <nl> C033B51C191B337200D06937 / * VisibleRect . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = C033B51A191B337200D06937 / * VisibleRect . cpp * / ; } ; <nl> C033B51D191B337200D06937 / * VisibleRect . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = C033B51A191B337200D06937 / * VisibleRect . cpp * / ; } ; <nl> - C03781B918BF655400FE4F13 / * res in Resources * / = { isa = PBXBuildFile ; fileRef = C03781B718BF655400FE4F13 / * res * / ; } ; <nl> - C03781BB18BF655400FE4F13 / * src in Resources * / = { isa = PBXBuildFile ; fileRef = C03781B818BF655400FE4F13 / * src * / ; } ; <nl> C05D1C121923449100B808A4 / * config . json in Resources * / = { isa = PBXBuildFile ; fileRef = C05D1C111923449100B808A4 / * config . json * / ; } ; <nl> C05D1C131923449100B808A4 / * config . json in Resources * / = { isa = PBXBuildFile ; fileRef = C05D1C111923449100B808A4 / * config . json * / ; } ; <nl> C0619CD71896894800872C26 / * Runtime_ios - mac . mm in Sources * / = { isa = PBXBuildFile ; fileRef = C0619CD61896894800872C26 / * Runtime_ios - mac . mm * / ; } ; <nl> <nl> 9FD6FC6F1A5D2A820028EDC6 / * ConsoleWindow . xib * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = file . xib ; path = ConsoleWindow . xib ; sourceTree = " < group > " ; } ; <nl> 9FD6FC701A5D2A820028EDC6 / * ConsoleWindowController . h * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . h ; path = ConsoleWindowController . h ; sourceTree = " < group > " ; } ; <nl> 9FD6FC711A5D2A820028EDC6 / * ConsoleWindowController . m * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . objc ; path = ConsoleWindowController . m ; sourceTree = " < group > " ; } ; <nl> - 9FFC07051A4A739200AED399 / * cocos2dx_extra . h * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . h ; name = cocos2dx_extra . h ; path = . . / Classes / cocos2dx_extra . h ; sourceTree = " < group > " ; } ; <nl> 9FFC07061A4A739200AED399 / * lang * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = text ; name = lang ; path = . . / Classes / lang ; sourceTree = " < group > " ; } ; <nl> 9FFC07351A4A764100AED399 / * Base * / = { isa = PBXFileReference ; lastKnownFileType = file . xib ; name = Base ; path = Base . lproj / MainMenu . xib ; sourceTree = " < group > " ; } ; <nl> 9FFC07371A4A765100AED399 / * zh - Hans * / = { isa = PBXFileReference ; lastKnownFileType = file . xib ; name = " zh - Hans " ; path = " zh - Hans . lproj / MainMenu . xib " ; sourceTree = " < group > " ; } ; <nl> <nl> C033B51A191B337200D06937 / * VisibleRect . cpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . cpp ; name = VisibleRect . cpp ; path = . . / Classes / VisibleRect . cpp ; sourceTree = " < group > " ; } ; <nl> C033B51B191B337200D06937 / * VisibleRect . h * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . h ; name = VisibleRect . h ; path = . . / Classes / VisibleRect . h ; sourceTree = " < group > " ; } ; <nl> C03781AE18BF654500FE4F13 / * cocos2d_lua_bindings . xcodeproj * / = { isa = PBXFileReference ; lastKnownFileType = " wrapper . pb - project " ; name = cocos2d_lua_bindings . xcodeproj ; path = " . . / . . / . . / . . / . . / cocos / scripting / lua - bindings / proj . ios_mac / cocos2d_lua_bindings . xcodeproj " ; sourceTree = " < group > " ; } ; <nl> - C03781B718BF655400FE4F13 / * res * / = { isa = PBXFileReference ; lastKnownFileType = folder ; name = res ; path = . . / . . / . . / res ; sourceTree = " < group > " ; } ; <nl> - C03781B818BF655400FE4F13 / * src * / = { isa = PBXFileReference ; lastKnownFileType = folder ; name = src ; path = . . / . . / . . / src ; sourceTree = " < group > " ; } ; <nl> C05D1C111923449100B808A4 / * config . json * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = text . json ; name = config . json ; path = . . / . . / . . / config . json ; sourceTree = " < group > " ; } ; <nl> C0619CD61896894800872C26 / * Runtime_ios - mac . mm * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . objcpp ; path = " Runtime_ios - mac . mm " ; sourceTree = " < group > " ; } ; <nl> C06C3794191A1D1E00617BED / * ConfigParser . cpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . cpp ; name = ConfigParser . cpp ; path = . . / Classes / ConfigParser . cpp ; sourceTree = " < group > " ; } ; <nl> <nl> isa = PBXGroup ; <nl> children = ( <nl> 9FFC07381A4A902500AED399 / * CodeIDESupport . h * / , <nl> - 9FFC07051A4A739200AED399 / * cocos2dx_extra . h * / , <nl> 9FFC07061A4A739200AED399 / * lang * / , <nl> 15427CE2198F237300DC375D / * lua_module_register . h * / , <nl> C00FD4891938512100C6382D / * runtime * / , <nl> <nl> isa = PBXGroup ; <nl> children = ( <nl> C05D1C111923449100B808A4 / * config . json * / , <nl> - C03781B718BF655400FE4F13 / * res * / , <nl> - C03781B818BF655400FE4F13 / * src * / , <nl> ) ; <nl> name = Resources ; <nl> path = . . / Resources ; <nl> <nl> F293B3C415EB7BE500256477 / * Sources * / , <nl> F293B3C515EB7BE500256477 / * Frameworks * / , <nl> F293B3C615EB7BE500256477 / * Resources * / , <nl> - 3EC988201966433500A45E0E / * ShellScript * / , <nl> ) ; <nl> buildRules = ( <nl> ) ; <nl> <nl> 5023811F17EBBCAC00990C9B / * Icon - 152 . png in Resources * / , <nl> 5023812017EBBCAC00990C9B / * Icon - 57 . png in Resources * / , <nl> 521A8E7019F0C3D200D177D7 / * Default - 667h @ 2x . png in Resources * / , <nl> - C03781B918BF655400FE4F13 / * res in Resources * / , <nl> 5023812217EBBCAC00990C9B / * Icon - 76 . png in Resources * / , <nl> 5091733A17ECE17A00D62437 / * Icon - 80 . png in Resources * / , <nl> 5091733717ECE17A00D62437 / * Icon - 40 . png in Resources * / , <nl> 5023811E17EBBCAC00990C9B / * Icon - 144 . png in Resources * / , <nl> 5023811A17EBBCAC00990C9B / * Default . png in Resources * / , <nl> - C03781BB18BF655400FE4F13 / * src in Resources * / , <nl> 5091733817ECE17A00D62437 / * Icon - 50 . png in Resources * / , <nl> 5023812117EBBCAC00990C9B / * Icon - 72 . png in Resources * / , <nl> C05D1C121923449100B808A4 / * config . json in Resources * / , <nl> <nl> } ; <nl> / * End PBXResourcesBuildPhase section * / <nl> <nl> - / * Begin PBXShellScriptBuildPhase section * / <nl> - 3EC988201966433500A45E0E / * ShellScript * / = { <nl> - isa = PBXShellScriptBuildPhase ; <nl> - buildActionMask = 2147483647 ; <nl> - files = ( <nl> - ) ; <nl> - inputPaths = ( <nl> - ) ; <nl> - outputPaths = ( <nl> - ) ; <nl> - runOnlyForDeploymentPostprocessing = 0 ; <nl> - shellPath = / bin / sh ; <nl> - shellScript = " find $ { SRCROOT } / . . / . . / . . / src / - name \ " * \ " - exec touch - cm { } \ \ ; \ nfind $ { SRCROOT } / . . / . . / . . / res / - name \ " * \ " - exec touch - cm { } \ \ ; " ; <nl> - } ; <nl> - / * End PBXShellScriptBuildPhase section * / <nl> - <nl> / * Begin PBXSourcesBuildPhase section * / <nl> 5023813117EBBCE400990C9B / * Sources * / = { <nl> isa = PBXSourcesBuildPhase ; <nl> <nl> GCC_PRECOMPILE_PREFIX_HEADER = YES ; <nl> GCC_PREFIX_HEADER = mac / Prefix . pch ; <nl> GCC_PREPROCESSOR_DEFINITIONS = ( <nl> + " $ ( inherited ) " , <nl> GLFW_EXPOSE_NATIVE_COCOA , <nl> GLFW_EXPOSE_NATIVE_NSGL , <nl> CC_TARGET_OS_MAC , <nl> - " $ ( inherited ) " , <nl> ) ; <nl> HEADER_SEARCH_PATHS = ( <nl> " $ ( SRCROOT ) / . . / Classes / protobuf - lite " , <nl> <nl> GCC_PRECOMPILE_PREFIX_HEADER = YES ; <nl> GCC_PREFIX_HEADER = mac / Prefix . pch ; <nl> GCC_PREPROCESSOR_DEFINITIONS = ( <nl> + " $ ( inherited ) " , <nl> GLFW_EXPOSE_NATIVE_COCOA , <nl> GLFW_EXPOSE_NATIVE_NSGL , <nl> CC_TARGET_OS_MAC , <nl> - " $ ( inherited ) " , <nl> ) ; <nl> HEADER_SEARCH_PATHS = ( <nl> " $ ( SRCROOT ) / . . / Classes / protobuf - lite " , <nl> <nl> USE_FILE32API , <nl> " CC_LUA_ENGINE_ENABLED = 1 " , <nl> " CC_ENABLE_CHIPMUNK_INTEGRATION = 1 " , <nl> - GLFW_EXPOSE_NATIVE_COCOA , <nl> ) ; <nl> GCC_SYMBOLS_PRIVATE_EXTERN = NO ; <nl> GCC_WARN_ABOUT_RETURN_TYPE = YES ; <nl> <nl> CLANG_CXX_LIBRARY = " libc + + " ; <nl> GCC_C_LANGUAGE_STANDARD = c99 ; <nl> GCC_PREPROCESSOR_DEFINITIONS = ( <nl> - NDEBUG , <nl> + DEBUG , <nl> + " COCOS2D_DEBUG = 1 " , <nl> USE_FILE32API , <nl> " CC_LUA_ENGINE_ENABLED = 1 " , <nl> " CC_ENABLE_CHIPMUNK_INTEGRATION = 1 " , <nl> <nl> ) ; <nl> HEADER_SEARCH_PATHS = ( <nl> " $ ( SRCROOT ) / . . / Classes / protobuf - lite " , <nl> - " $ ( SRCROOT ) / . . / Classes / service " , <nl> " $ ( SRCROOT ) / . . / Classes " , <nl> + " $ ( SRCROOT ) / . . / . . / . . / libsimulator / lib " , <nl> ) ; <nl> INFOPLIST_FILE = ios / Info . plist ; <nl> IPHONEOS_DEPLOYMENT_TARGET = 5 . 0 ; <nl> <nl> ) ; <nl> HEADER_SEARCH_PATHS = ( <nl> " $ ( SRCROOT ) / . . / Classes / protobuf - lite " , <nl> - " $ ( SRCROOT ) / . . / Classes / service " , <nl> " $ ( SRCROOT ) / . . / Classes " , <nl> + " $ ( SRCROOT ) / . . / . . / . . / libsimulator / lib " , <nl> ) ; <nl> INFOPLIST_FILE = ios / Info . plist ; <nl> IPHONEOS_DEPLOYMENT_TARGET = 5 . 0 ; <nl>
support android
cocos2d/cocos2d-x
f1f4e5f3c277566ed2fe5c89d46f7dc8cd7dc7f6
2015-01-07T14:19:46Z
mmm a / modules / ocl / src / arithm . cpp <nl> ppp b / modules / ocl / src / arithm . cpp <nl> namespace cv <nl> extern const char * arithm_bitwise_binary_scalar ; <nl> extern const char * arithm_bitwise_binary_scalar_mask ; <nl> extern const char * arithm_bitwise_not ; <nl> - extern const char * arithm_compare_eq ; <nl> - extern const char * arithm_compare_ne ; <nl> + extern const char * arithm_compare ; <nl> extern const char * arithm_transpose ; <nl> extern const char * arithm_flip ; <nl> extern const char * arithm_flip_rc ; <nl> void cv : : ocl : : absdiff ( const oclMat & src1 , const Scalar & src2 , oclMat & dst ) <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / compare / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - static void compare_run ( const oclMat & src1 , const oclMat & src2 , oclMat & dst , string kernelName , const char * * kernelString ) <nl> + <nl> + static void compare_run ( const oclMat & src1 , const oclMat & src2 , oclMat & dst , int cmpOp , <nl> + string kernelName , const char * * kernelString ) <nl> { <nl> - dst . create ( src1 . size ( ) , CV_8UC1 ) ; <nl> - CV_Assert ( src1 . oclchannels ( ) = = 1 ) ; <nl> CV_Assert ( src1 . type ( ) = = src2 . type ( ) ) ; <nl> - Context * clCxt = src1 . clCxt ; <nl> + dst . create ( src1 . size ( ) , CV_8UC1 ) ; <nl> + Context * clCxt = src1 . clCxt ; <nl> + <nl> int depth = src1 . depth ( ) ; <nl> - int vector_lengths [ 7 ] = { 4 , 0 , 4 , 4 , 4 , 4 , 4 } ; <nl> - size_t vector_length = vector_lengths [ depth ] ; <nl> - int offset_cols = ( dst . offset / dst . elemSize1 ( ) ) & ( vector_length - 1 ) ; <nl> - int cols = divUp ( dst . cols + offset_cols , vector_length ) ; <nl> size_t localThreads [ 3 ] = { 64 , 4 , 1 } ; <nl> - size_t globalThreads [ 3 ] = { cols , dst . rows , 1 } ; <nl> + size_t globalThreads [ 3 ] = { dst . cols , dst . rows , 1 } ; <nl> + <nl> + int src1step1 = src1 . step1 ( ) , src1offset1 = src1 . offset / src1 . elemSize1 ( ) ; <nl> + int src2step1 = src2 . step1 ( ) , src2offset1 = src2 . offset / src2 . elemSize1 ( ) ; <nl> + int dststep1 = dst . step1 ( ) , dstoffset1 = dst . offset / dst . elemSize1 ( ) ; <nl> + <nl> + const char * const typeMap [ ] = { " uchar " , " char " , " ushort " , " short " , " int " , " float " , " double " } ; <nl> + const char * operationMap [ ] = { " = = " , " > " , " > = " , " < " , " < = " , " ! = " } ; <nl> + std : : string buildOptions = format ( " - D T = % s - D Operation = % s " , typeMap [ depth ] , operationMap [ cmpOp ] ) ; <nl> <nl> - int dst_step1 = dst . cols * dst . elemSize ( ) ; <nl> vector < pair < size_t , const void * > > args ; <nl> args . push_back ( make_pair ( sizeof ( cl_mem ) , ( void * ) & src1 . data ) ) ; <nl> - args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & src1 . step ) ) ; <nl> - args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & src1 . offset ) ) ; <nl> + args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & src1step1 ) ) ; <nl> + args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & src1offset1 ) ) ; <nl> args . push_back ( make_pair ( sizeof ( cl_mem ) , ( void * ) & src2 . data ) ) ; <nl> - args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & src2 . step ) ) ; <nl> - args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & src2 . offset ) ) ; <nl> + args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & src2step1 ) ) ; <nl> + args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & src2offset1 ) ) ; <nl> args . push_back ( make_pair ( sizeof ( cl_mem ) , ( void * ) & dst . data ) ) ; <nl> - args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & dst . step ) ) ; <nl> - args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & dst . offset ) ) ; <nl> + args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & dststep1 ) ) ; <nl> + args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & dstoffset1 ) ) ; <nl> + args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & src1 . cols ) ) ; <nl> args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & src1 . rows ) ) ; <nl> - args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & cols ) ) ; <nl> - args . push_back ( make_pair ( sizeof ( cl_int ) , ( void * ) & dst_step1 ) ) ; <nl> - openCLExecuteKernel ( clCxt , kernelString , kernelName , globalThreads , localThreads , args , - 1 , depth ) ; <nl> + <nl> + openCLExecuteKernel ( clCxt , kernelString , kernelName , globalThreads , localThreads , <nl> + args , - 1 , - 1 , buildOptions . c_str ( ) ) ; <nl> } <nl> <nl> void cv : : ocl : : compare ( const oclMat & src1 , const oclMat & src2 , oclMat & dst , int cmpOp ) <nl> { <nl> - if ( ! src1 . clCxt - > supportsFeature ( Context : : CL_DOUBLE ) & & src1 . type ( ) = = CV_64F ) <nl> + if ( ! src1 . clCxt - > supportsFeature ( Context : : CL_DOUBLE ) & & src1 . depth ( ) = = CV_64F ) <nl> { <nl> cout < < " Selected device do not support double " < < endl ; <nl> return ; <nl> } <nl> - string kernelName ; <nl> - const char * * kernelString = NULL ; <nl> - switch ( cmpOp ) <nl> - { <nl> - case CMP_EQ : <nl> - kernelName = " arithm_compare_eq " ; <nl> - kernelString = & arithm_compare_eq ; <nl> - break ; <nl> - case CMP_GT : <nl> - kernelName = " arithm_compare_gt " ; <nl> - kernelString = & arithm_compare_eq ; <nl> - break ; <nl> - case CMP_GE : <nl> - kernelName = " arithm_compare_ge " ; <nl> - kernelString = & arithm_compare_eq ; <nl> - break ; <nl> - case CMP_NE : <nl> - kernelName = " arithm_compare_ne " ; <nl> - kernelString = & arithm_compare_ne ; <nl> - break ; <nl> - case CMP_LT : <nl> - kernelName = " arithm_compare_lt " ; <nl> - kernelString = & arithm_compare_ne ; <nl> - break ; <nl> - case CMP_LE : <nl> - kernelName = " arithm_compare_le " ; <nl> - kernelString = & arithm_compare_ne ; <nl> - break ; <nl> - default : <nl> - CV_Error ( CV_StsBadArg , " Unknown comparison method " ) ; <nl> - } <nl> - compare_run ( src1 , src2 , dst , kernelName , kernelString ) ; <nl> + <nl> + CV_Assert ( src1 . channels ( ) = = 1 & & src2 . channels ( ) = = 1 ) ; <nl> + CV_Assert ( cmpOp > = CMP_EQ & & cmpOp < = CMP_NE ) ; <nl> + <nl> + compare_run ( src1 , src2 , dst , cmpOp , " arithm_compare " , & arithm_compare ) ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> new file mode 100644 <nl> index 00000000000 . . d0842db1801 <nl> mmm / dev / null <nl> ppp b / modules / ocl / src / opencl / arithm_compare . cl <nl> <nl> + / * M / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / <nl> + / / IMPORTANT : READ BEFORE DOWNLOADING , COPYING , INSTALLING OR USING . <nl> + / / <nl> + / / By downloading , copying , installing or using the software you agree to this license . <nl> + / / If you do not agree to this license , do not download , install , <nl> + / / copy or use the software . <nl> + / / <nl> + / / <nl> + / / License Agreement <nl> + / / For Open Source Computer Vision Library <nl> + / / <nl> + / / Copyright ( C ) 2010 - 2012 , Institute Of Software Chinese Academy Of Science , all rights reserved . <nl> + / / Copyright ( C ) 2010 - 2012 , Advanced Micro Devices , Inc . , all rights reserved . <nl> + / / Third party copyrights are property of their respective owners . <nl> + / / <nl> + / / @ Authors <nl> + / / Jia Haipeng , jiahaipeng95 @ gmail . com <nl> + / / <nl> + / / Redistribution and use in source and binary forms , with or without modification , <nl> + / / are permitted provided that the following conditions are met : <nl> + / / <nl> + / / * Redistribution ' s of source code must retain the above copyright notice , <nl> + / / this list of conditions and the following disclaimer . <nl> + / / <nl> + / / * Redistribution ' s in binary form must reproduce the above copyright notice , <nl> + / / this list of conditions and the following disclaimer in the documentation <nl> + / / and / or other oclMaterials provided with the distribution . <nl> + / / <nl> + / / * The name of the copyright holders may not be used to endorse or promote products <nl> + / / derived from this software without specific prior written permission . <nl> + / / <nl> + / / This software is provided by the copyright holders and contributors as is and <nl> + / / any express or implied warranties , including , but not limited to , the implied <nl> + / / warranties of merchantability and fitness for a particular purpose are disclaimed . <nl> + / / In no event shall the Intel Corporation or contributors be liable for any direct , <nl> + / / indirect , incidental , special , exemplary , or consequential damages <nl> + / / ( including , but not limited to , procurement of substitute goods or services ; <nl> + / / loss of use , data , or profits ; or business interruption ) however caused <nl> + / / and on any theory of liability , whether in contract , strict liability , <nl> + / / or tort ( including negligence or otherwise ) arising in any way out of <nl> + / / the use of this software , even if advised of the possibility of such damage . <nl> + / / <nl> + / / M * / <nl> + <nl> + # if defined ( DOUBLE_SUPPORT ) <nl> + # ifdef cl_khr_fp64 <nl> + # pragma OPENCL EXTENSION cl_khr_fp64 : enable <nl> + # elif defined ( cl_amd_fp64 ) <nl> + # pragma OPENCL EXTENSION cl_amd_fp64 : enable <nl> + # endif <nl> + # endif <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / addWeighted / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + __kernel void arithm_compare ( __global T * src1 , int src1_step1 , int src1_offset1 , <nl> + __global T * src2 , int src2_step1 , int src2_offset1 , <nl> + __global uchar * dst , int dst_step1 , int dst_offset1 , <nl> + int cols1 , int rows ) <nl> + { <nl> + int x = get_global_id ( 0 ) ; <nl> + int y = get_global_id ( 1 ) ; <nl> + <nl> + if ( x < cols1 & & y < rows ) <nl> + { <nl> + int src1_index = mad24 ( y , src1_step1 , x + src1_offset1 ) ; <nl> + int src2_index = mad24 ( y , src2_step1 , x + src2_offset1 ) ; <nl> + int dst_index = mad24 ( y , dst_step1 , x + dst_offset1 ) ; <nl> + <nl> + dst [ dst_index ] = convert_uchar ( src1 [ src1_index ] Operation src2 [ src2_index ] ? 255 : 0 ) ; <nl> + } <nl> + } <nl> deleted file mode 100644 <nl> index 16a56acef31 . . 00000000000 <nl> mmm a / modules / ocl / src / opencl / arithm_compare_eq . cl <nl> ppp / dev / null <nl> <nl> - / * M / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / <nl> - / / IMPORTANT : READ BEFORE DOWNLOADING , COPYING , INSTALLING OR USING . <nl> - / / <nl> - / / By downloading , copying , installing or using the software you agree to this license . <nl> - / / If you do not agree to this license , do not download , install , <nl> - / / copy or use the software . <nl> - / / <nl> - / / <nl> - / / License Agreement <nl> - / / For Open Source Computer Vision Library <nl> - / / <nl> - / / Copyright ( C ) 2010 - 2012 , Institute Of Software Chinese Academy Of Science , all rights reserved . <nl> - / / Copyright ( C ) 2010 - 2012 , Advanced Micro Devices , Inc . , all rights reserved . <nl> - / / Third party copyrights are property of their respective owners . <nl> - / / <nl> - / / @ Authors <nl> - / / Jiang Liyuan , jlyuan001 . good @ 163 . com <nl> - / / <nl> - / / Redistribution and use in source and binary forms , with or without modification , <nl> - / / are permitted provided that the following conditions are met : <nl> - / / <nl> - / / * Redistribution ' s of source code must retain the above copyright notice , <nl> - / / this list of conditions and the following disclaimer . <nl> - / / <nl> - / / * Redistribution ' s in binary form must reproduce the above copyright notice , <nl> - / / this list of conditions and the following disclaimer in the documentation <nl> - / / and / or other oclMaterials provided with the distribution . <nl> - / / <nl> - / / * The name of the copyright holders may not be used to endorse or promote products <nl> - / / derived from this software without specific prior written permission . <nl> - / / <nl> - / / This software is provided by the copyright holders and contributors as is and <nl> - / / any express or implied warranties , including , but not limited to , the implied <nl> - / / warranties of merchantability and fitness for a particular purpose are disclaimed . <nl> - / / In no event shall the Intel Corporation or contributors be liable for any direct , <nl> - / / indirect , incidental , special , exemplary , or consequential damages <nl> - / / ( including , but not limited to , procurement of substitute goods or services ; <nl> - / / loss of use , data , or profits ; or business interruption ) however caused <nl> - / / and on any theory of liability , whether in contract , strict liability , <nl> - / / or tort ( including negligence or otherwise ) arising in any way out of <nl> - / / the use of this software , even if advised of the possibility of such damage . <nl> - / / <nl> - / / M * / <nl> - # if defined ( DOUBLE_SUPPORT ) <nl> - # ifdef cl_khr_fp64 <nl> - # pragma OPENCL EXTENSION cl_khr_fp64 : enable <nl> - # elif defined ( cl_amd_fp64 ) <nl> - # pragma OPENCL EXTENSION cl_amd_fp64 : enable <nl> - # endif <nl> - # endif <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / Compare EQ / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - __kernel void arithm_compare_eq_D0 ( __global uchar * src1 , int src1_step , int src1_offset , <nl> - __global uchar * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( dst_offset & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , x + src1_offset - dst_align ) ; <nl> - int src2_index = mad24 ( y , src2_step , x + src2_offset - dst_align ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - uchar4 src1_data = vload4 ( 0 , src1 + src1_index_fix ) ; <nl> - uchar4 src2_data = vload4 ( 0 , src2 + src2_index_fix ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - uchar4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - uchar4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data = = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - <nl> - __kernel void arithm_compare_ne_D2 ( __global ushort * src1 , int src1_step , int src1_offset , <nl> - __global ushort * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 1 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 1 ) + src1_offset - ( dst_align < < 1 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 1 ) + src2_offset - ( dst_align < < 1 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - ushort4 src1_data = vload4 ( 0 , ( __global ushort * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - ushort4 src2_data = vload4 ( 0 , ( __global ushort * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - ushort4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - ushort4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data = = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - <nl> - __kernel void arithm_compare_eq_D3 ( __global short * src1 , int src1_step , int src1_offset , <nl> - __global short * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 1 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 1 ) + src1_offset - ( dst_align < < 1 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 1 ) + src2_offset - ( dst_align < < 1 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - short4 src1_data = vload4 ( 0 , ( __global short * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - short4 src2_data = vload4 ( 0 , ( __global short * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - short4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - short4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data = = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - <nl> - <nl> - __kernel void arithm_compare_eq_D4 ( __global int * src1 , int src1_step , int src1_offset , <nl> - __global int * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 2 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 2 ) + src1_offset - ( dst_align < < 2 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 2 ) + src2_offset - ( dst_align < < 2 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - <nl> - int4 src1_data = vload4 ( 0 , ( __global int * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - int4 src2_data = vload4 ( 0 , ( __global int * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - int4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - int4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data = = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - __kernel void arithm_compare_eq_D5 ( __global float * src1 , int src1_step , int src1_offset , <nl> - __global float * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 2 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 2 ) + src1_offset - ( dst_align < < 2 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 2 ) + src2_offset - ( dst_align < < 2 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - float4 src1_data = vload4 ( 0 , ( __global float * ) ( ( __global char * ) src1 + src1_index_fix ) ) ; <nl> - float4 src2_data = vload4 ( 0 , ( __global float * ) ( ( __global char * ) src2 + src2_index_fix ) ) ; <nl> - if ( src2_index < 0 ) <nl> - { <nl> - float4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data = = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - # if defined ( DOUBLE_SUPPORT ) <nl> - __kernel void arithm_compare_eq_D6 ( __global double * src1 , int src1_step , int src1_offset , <nl> - __global double * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 3 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 3 ) + src1_offset - ( dst_align < < 3 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 3 ) + src2_offset - ( dst_align < < 3 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - double4 src1_data = vload4 ( 0 , ( __global double * ) ( ( __global char * ) src1 + src1_index_fix ) ) ; <nl> - double4 src2_data = vload4 ( 0 , ( __global double * ) ( ( __global char * ) src2 + src2_index_fix ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - double4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - double4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data = = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - # endif <nl> - <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Compare GT * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - __kernel void arithm_compare_gt_D0 ( __global uchar * src1 , int src1_step , int src1_offset , <nl> - __global uchar * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( dst_offset & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , x + src1_offset - dst_align ) ; <nl> - int src2_index = mad24 ( y , src2_step , x + src2_offset - dst_align ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - uchar4 src1_data = vload4 ( 0 , src1 + src1_index_fix ) ; <nl> - uchar4 src2_data = vload4 ( 0 , src2 + src2_index_fix ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - uchar4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - uchar4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data > src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - __kernel void arithm_compare_gt_D2 ( __global ushort * src1 , int src1_step , int src1_offset , <nl> - __global ushort * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 1 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 1 ) + src1_offset - ( dst_align < < 1 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 1 ) + src2_offset - ( dst_align < < 1 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - ushort4 src1_data = vload4 ( 0 , ( __global ushort * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - ushort4 src2_data = vload4 ( 0 , ( __global ushort * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - ushort4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - ushort4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data > src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - <nl> - <nl> - __kernel void arithm_compare_gt_D3 ( __global short * src1 , int src1_step , int src1_offset , <nl> - __global short * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 1 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 1 ) + src1_offset - ( dst_align < < 1 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 1 ) + src2_offset - ( dst_align < < 1 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - short4 src1_data = vload4 ( 0 , ( __global short * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - short4 src2_data = vload4 ( 0 , ( __global short * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - short4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - short4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data > src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - __kernel void arithm_compare_gt_D4 ( __global int * src1 , int src1_step , int src1_offset , <nl> - __global int * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 2 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 2 ) + src1_offset - ( dst_align < < 2 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 2 ) + src2_offset - ( dst_align < < 2 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - <nl> - int4 src1_data = vload4 ( 0 , ( __global int * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - int4 src2_data = vload4 ( 0 , ( __global int * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - int4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - int4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data > src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - __kernel void arithm_compare_gt_D5 ( __global float * src1 , int src1_step , int src1_offset , <nl> - __global float * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 2 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 2 ) + src1_offset - ( dst_align < < 2 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 2 ) + src2_offset - ( dst_align < < 2 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - float4 src1_data = vload4 ( 0 , ( __global float * ) ( ( __global char * ) src1 + src1_index_fix ) ) ; <nl> - float4 src2_data = vload4 ( 0 , ( __global float * ) ( ( __global char * ) src2 + src2_index_fix ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - float4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - float4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data > src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - # if defined ( DOUBLE_SUPPORT ) <nl> - __kernel void arithm_compare_gt_D6 ( __global double * src1 , int src1_step , int src1_offset , <nl> - __global double * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 3 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 3 ) + src1_offset - ( dst_align < < 3 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 3 ) + src2_offset - ( dst_align < < 3 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - double4 src1_data = vload4 ( 0 , ( __global double * ) ( ( __global char * ) src1 + src1_index_fix ) ) ; <nl> - double4 src2_data = vload4 ( 0 , ( __global double * ) ( ( __global char * ) src2 + src2_index_fix ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - double4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - double4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data > src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - # endif <nl> - <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Compare GE * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - __kernel void arithm_compare_ge_D0 ( __global uchar * src1 , int src1_step , int src1_offset , <nl> - __global uchar * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( dst_offset & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , x + src1_offset - dst_align ) ; <nl> - int src2_index = mad24 ( y , src2_step , x + src2_offset - dst_align ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - uchar4 src1_data = vload4 ( 0 , src1 + src1_index_fix ) ; <nl> - uchar4 src2_data = vload4 ( 0 , src2 + src2_index_fix ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - uchar4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - uchar4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data > = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - <nl> - <nl> - __kernel void arithm_compare_ge_D2 ( __global ushort * src1 , int src1_step , int src1_offset , <nl> - __global ushort * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 1 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 1 ) + src1_offset - ( dst_align < < 1 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 1 ) + src2_offset - ( dst_align < < 1 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - ushort4 src1_data = vload4 ( 0 , ( __global ushort * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - ushort4 src2_data = vload4 ( 0 , ( __global ushort * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - ushort4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - ushort4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data > = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - <nl> - <nl> - __kernel void arithm_compare_ge_D3 ( __global short * src1 , int src1_step , int src1_offset , <nl> - __global short * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 1 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 1 ) + src1_offset - ( dst_align < < 1 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 1 ) + src2_offset - ( dst_align < < 1 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - short4 src1_data = vload4 ( 0 , ( __global short * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - short4 src2_data = vload4 ( 0 , ( __global short * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - short4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - short4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data > = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - __kernel void arithm_compare_ge_D4 ( __global int * src1 , int src1_step , int src1_offset , <nl> - __global int * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 2 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 2 ) + src1_offset - ( dst_align < < 2 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 2 ) + src2_offset - ( dst_align < < 2 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - <nl> - int4 src1_data = vload4 ( 0 , ( __global int * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - int4 src2_data = vload4 ( 0 , ( __global int * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - int4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - int4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data > = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - __kernel void arithm_compare_ge_D5 ( __global float * src1 , int src1_step , int src1_offset , <nl> - __global float * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 2 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 2 ) + src1_offset - ( dst_align < < 2 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 2 ) + src2_offset - ( dst_align < < 2 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - float4 src1_data = vload4 ( 0 , ( __global float * ) ( ( __global char * ) src1 + src1_index_fix ) ) ; <nl> - float4 src2_data = vload4 ( 0 , ( __global float * ) ( ( __global char * ) src2 + src2_index_fix ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - <nl> - float4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - float4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data > = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - # if defined ( DOUBLE_SUPPORT ) <nl> - __kernel void arithm_compare_ge_D6 ( __global double * src1 , int src1_step , int src1_offset , <nl> - __global double * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 3 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 3 ) + src1_offset - ( dst_align < < 3 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 3 ) + src2_offset - ( dst_align < < 3 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - double4 src1_data = vload4 ( 0 , ( __global double * ) ( ( __global char * ) src1 + src1_index_fix ) ) ; <nl> - double4 src2_data = vload4 ( 0 , ( __global double * ) ( ( __global char * ) src2 + src2_index_fix ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - double4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - double4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data > = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - # endif <nl> deleted file mode 100644 <nl> index fb5859d3b23 . . 00000000000 <nl> mmm a / modules / ocl / src / opencl / arithm_compare_ne . cl <nl> ppp / dev / null <nl> <nl> - / * M / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / <nl> - / / IMPORTANT : READ BEFORE DOWNLOADING , COPYING , INSTALLING OR USING . <nl> - / / <nl> - / / By downloading , copying , installing or using the software you agree to this license . <nl> - / / If you do not agree to this license , do not download , install , <nl> - / / copy or use the software . <nl> - / / <nl> - / / <nl> - / / License Agreement <nl> - / / For Open Source Computer Vision Library <nl> - / / <nl> - / / Copyright ( C ) 2010 - 2012 , Institute Of Software Chinese Academy Of Science , all rights reserved . <nl> - / / Copyright ( C ) 2010 - 2012 , Advanced Micro Devices , Inc . , all rights reserved . <nl> - / / Third party copyrights are property of their respective owners . <nl> - / / <nl> - / / @ Authors <nl> - / / Jiang Liyuan , jlyuan001 . good @ 163 . com <nl> - / / <nl> - / / Redistribution and use in source and binary forms , with or without modification , <nl> - / / are permitted provided that the following conditions are met : <nl> - / / <nl> - / / * Redistribution ' s of source code must retain the above copyright notice , <nl> - / / this list of conditions and the following disclaimer . <nl> - / / <nl> - / / * Redistribution ' s in binary form must reproduce the above copyright notice , <nl> - / / this list of conditions and the following disclaimer in the documentation <nl> - / / and / or other oclMaterials provided with the distribution . <nl> - / / <nl> - / / * The name of the copyright holders may not be used to endorse or promote products <nl> - / / derived from this software without specific prior written permission . <nl> - / / <nl> - / / This software is provided by the copyright holders and contributors as is and <nl> - / / any express or implied warranties , including , but not limited to , the implied <nl> - / / warranties of merchantability and fitness for a particular purpose are disclaimed . <nl> - / / In no event shall the Intel Corporation or contributors be liable for any direct , <nl> - / / indirect , incidental , special , exemplary , or consequential damages <nl> - / / ( including , but not limited to , procurement of substitute goods or services ; <nl> - / / loss of use , data , or profits ; or business interruption ) however caused <nl> - / / and on any theory of liability , whether in contract , strict liability , <nl> - / / or tort ( including negligence or otherwise ) arising in any way out of <nl> - / / the use of this software , even if advised of the possibility of such damage . <nl> - / / <nl> - / / M * / <nl> - # if defined ( DOUBLE_SUPPORT ) <nl> - # ifdef cl_khr_fp64 <nl> - # pragma OPENCL EXTENSION cl_khr_fp64 : enable <nl> - # elif defined ( cl_amd_fp64 ) <nl> - # pragma OPENCL EXTENSION cl_amd_fp64 : enable <nl> - # endif <nl> - # endif <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Compare NE * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - __kernel void arithm_compare_ne_D0 ( __global uchar * src1 , int src1_step , int src1_offset , <nl> - __global uchar * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( dst_offset & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , x + src1_offset - dst_align ) ; <nl> - int src2_index = mad24 ( y , src2_step , x + src2_offset - dst_align ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - uchar4 src1_data = vload4 ( 0 , src1 + src1_index_fix ) ; <nl> - uchar4 src2_data = vload4 ( 0 , src2 + src2_index_fix ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - uchar4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - uchar4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data ! = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - <nl> - <nl> - __kernel void arithm_compare_ne_D2 ( __global ushort * src1 , int src1_step , int src1_offset , <nl> - __global ushort * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 1 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 1 ) + src1_offset - ( dst_align < < 1 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 1 ) + src2_offset - ( dst_align < < 1 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - ushort4 src1_data = vload4 ( 0 , ( __global ushort * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - ushort4 src2_data = vload4 ( 0 , ( __global ushort * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - ushort4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - ushort4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data ! = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - <nl> - <nl> - __kernel void arithm_compare_ne_D3 ( __global short * src1 , int src1_step , int src1_offset , <nl> - __global short * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 1 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 1 ) + src1_offset - ( dst_align < < 1 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 1 ) + src2_offset - ( dst_align < < 1 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - short4 src1_data = vload4 ( 0 , ( __global short * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - short4 src2_data = vload4 ( 0 , ( __global short * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - short4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - short4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data ! = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - __kernel void arithm_compare_ne_D4 ( __global int * src1 , int src1_step , int src1_offset , <nl> - __global int * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 2 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 2 ) + src1_offset - ( dst_align < < 2 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 2 ) + src2_offset - ( dst_align < < 2 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - <nl> - int4 src1_data = vload4 ( 0 , ( __global int * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - int4 src2_data = vload4 ( 0 , ( __global int * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - int4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - int4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data ! = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - __kernel void arithm_compare_ne_D5 ( __global float * src1 , int src1_step , int src1_offset , <nl> - __global float * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 2 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 2 ) + src1_offset - ( dst_align < < 2 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 2 ) + src2_offset - ( dst_align < < 2 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - float4 src1_data = vload4 ( 0 , ( __global float * ) ( ( __global char * ) src1 + src1_index_fix ) ) ; <nl> - float4 src2_data = vload4 ( 0 , ( __global float * ) ( ( __global char * ) src2 + src2_index_fix ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - float4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - float4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data ! = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - # if defined ( DOUBLE_SUPPORT ) <nl> - __kernel void arithm_compare_ne_D6 ( __global double * src1 , int src1_step , int src1_offset , <nl> - __global double * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 3 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 3 ) + src1_offset - ( dst_align < < 3 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 3 ) + src2_offset - ( dst_align < < 3 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - double4 src1_data = vload4 ( 0 , ( __global double * ) ( ( __global char * ) src1 + src1_index_fix ) ) ; <nl> - double4 src2_data = vload4 ( 0 , ( __global double * ) ( ( __global char * ) src2 + src2_index_fix ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - double4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - double4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data ! = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - # endif <nl> - <nl> - <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Compare LT * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - __kernel void arithm_compare_lt_D0 ( __global uchar * src1 , int src1_step , int src1_offset , <nl> - __global uchar * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( dst_offset & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , x + src1_offset - dst_align ) ; <nl> - int src2_index = mad24 ( y , src2_step , x + src2_offset - dst_align ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - uchar4 src1_data = vload4 ( 0 , src1 + src1_index_fix ) ; <nl> - uchar4 src2_data = vload4 ( 0 , src2 + src2_index_fix ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - uchar4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - uchar4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data < src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - <nl> - <nl> - __kernel void arithm_compare_lt_D2 ( __global ushort * src1 , int src1_step , int src1_offset , <nl> - __global ushort * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 1 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 1 ) + src1_offset - ( dst_align < < 1 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 1 ) + src2_offset - ( dst_align < < 1 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - ushort4 src1_data = vload4 ( 0 , ( __global ushort * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - ushort4 src2_data = vload4 ( 0 , ( __global ushort * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - ushort4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - ushort4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data < src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - <nl> - <nl> - __kernel void arithm_compare_lt_D3 ( __global short * src1 , int src1_step , int src1_offset , <nl> - __global short * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 1 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 1 ) + src1_offset - ( dst_align < < 1 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 1 ) + src2_offset - ( dst_align < < 1 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - short4 src1_data = vload4 ( 0 , ( __global short * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - short4 src2_data = vload4 ( 0 , ( __global short * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - short4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - short4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data < src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - __kernel void arithm_compare_lt_D4 ( __global int * src1 , int src1_step , int src1_offset , <nl> - __global int * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 2 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 2 ) + src1_offset - ( dst_align < < 2 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 2 ) + src2_offset - ( dst_align < < 2 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - <nl> - int4 src1_data = vload4 ( 0 , ( __global int * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - int4 src2_data = vload4 ( 0 , ( __global int * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - int4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - int4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data < src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - __kernel void arithm_compare_lt_D5 ( __global float * src1 , int src1_step , int src1_offset , <nl> - __global float * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 2 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 2 ) + src1_offset - ( dst_align < < 2 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 2 ) + src2_offset - ( dst_align < < 2 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - float4 src1_data = vload4 ( 0 , ( __global float * ) ( ( __global char * ) src1 + src1_index_fix ) ) ; <nl> - float4 src2_data = vload4 ( 0 , ( __global float * ) ( ( __global char * ) src2 + src2_index_fix ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - float4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - float4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data < src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - # if defined ( DOUBLE_SUPPORT ) <nl> - __kernel void arithm_compare_lt_D6 ( __global double * src1 , int src1_step , int src1_offset , <nl> - __global double * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 3 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 3 ) + src1_offset - ( dst_align < < 3 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 3 ) + src2_offset - ( dst_align < < 3 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - double4 src1_data = vload4 ( 0 , ( __global double * ) ( ( __global char * ) src1 + src1_index_fix ) ) ; <nl> - double4 src2_data = vload4 ( 0 , ( __global double * ) ( ( __global char * ) src2 + src2_index_fix ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - double4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - double4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data < src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - # endif <nl> - <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Compare LE * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - __kernel void arithm_compare_le_D0 ( __global uchar * src1 , int src1_step , int src1_offset , <nl> - __global uchar * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( dst_offset & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , x + src1_offset - dst_align ) ; <nl> - int src2_index = mad24 ( y , src2_step , x + src2_offset - dst_align ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - uchar4 src1_data = vload4 ( 0 , src1 + src1_index_fix ) ; <nl> - uchar4 src2_data = vload4 ( 0 , src2 + src2_index_fix ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - uchar4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - uchar4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data < = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - <nl> - <nl> - __kernel void arithm_compare_le_D2 ( __global ushort * src1 , int src1_step , int src1_offset , <nl> - __global ushort * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 1 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 1 ) + src1_offset - ( dst_align < < 1 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 1 ) + src2_offset - ( dst_align < < 1 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - ushort4 src1_data = vload4 ( 0 , ( __global ushort * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - ushort4 src2_data = vload4 ( 0 , ( __global ushort * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - ushort4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - ushort4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data < = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - <nl> - <nl> - __kernel void arithm_compare_le_D3 ( __global short * src1 , int src1_step , int src1_offset , <nl> - __global short * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 1 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 1 ) + src1_offset - ( dst_align < < 1 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 1 ) + src2_offset - ( dst_align < < 1 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - short4 src1_data = vload4 ( 0 , ( __global short * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - short4 src2_data = vload4 ( 0 , ( __global short * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - short4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - short4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data < = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - __kernel void arithm_compare_le_D4 ( __global int * src1 , int src1_step , int src1_offset , <nl> - __global int * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 2 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 2 ) + src1_offset - ( dst_align < < 2 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 2 ) + src2_offset - ( dst_align < < 2 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - <nl> - int4 src1_data = vload4 ( 0 , ( __global int * ) ( ( __global char * ) src1 + src1_index ) ) ; <nl> - int4 src2_data = vload4 ( 0 , ( __global int * ) ( ( __global char * ) src2 + src2_index ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - int4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - int4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data < = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - __kernel void arithm_compare_le_D5 ( __global float * src1 , int src1_step , int src1_offset , <nl> - __global float * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 2 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 2 ) + src1_offset - ( dst_align < < 2 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 2 ) + src2_offset - ( dst_align < < 2 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - float4 src1_data = vload4 ( 0 , ( __global float * ) ( ( __global char * ) src1 + src1_index_fix ) ) ; <nl> - float4 src2_data = vload4 ( 0 , ( __global float * ) ( ( __global char * ) src2 + src2_index_fix ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - float4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - float4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data < = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - <nl> - # if defined ( DOUBLE_SUPPORT ) <nl> - __kernel void arithm_compare_le_D6 ( __global double * src1 , int src1_step , int src1_offset , <nl> - __global double * src2 , int src2_step , int src2_offset , <nl> - __global uchar * dst , int dst_step , int dst_offset , <nl> - int rows , int cols , int dst_step1 ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = get_global_id ( 1 ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - x = x < < 2 ; <nl> - # ifdef dst_align <nl> - # undef dst_align <nl> - # endif <nl> - # define dst_align ( ( dst_offset > > 3 ) & 3 ) <nl> - int src1_index = mad24 ( y , src1_step , ( x < < 3 ) + src1_offset - ( dst_align < < 3 ) ) ; <nl> - int src2_index = mad24 ( y , src2_step , ( x < < 3 ) + src2_offset - ( dst_align < < 3 ) ) ; <nl> - <nl> - int dst_start = mad24 ( y , dst_step , dst_offset ) ; <nl> - int dst_end = mad24 ( y , dst_step , dst_offset + dst_step1 ) ; <nl> - int dst_index = mad24 ( y , dst_step , dst_offset + x & ( int ) 0xfffffffc ) ; <nl> - int src1_index_fix = src1_index < 0 ? 0 : src1_index ; <nl> - int src2_index_fix = src2_index < 0 ? 0 : src2_index ; <nl> - double4 src1_data = vload4 ( 0 , ( __global double * ) ( ( __global char * ) src1 + src1_index_fix ) ) ; <nl> - double4 src2_data = vload4 ( 0 , ( __global double * ) ( ( __global char * ) src2 + src2_index_fix ) ) ; <nl> - if ( src1_index < 0 ) <nl> - { <nl> - double4 tmp ; <nl> - tmp . xyzw = ( src1_index = = - 2 ) ? src1_data . zwxy : src1_data . yzwx ; <nl> - src1_data . xyzw = ( src1_index = = - 1 ) ? src1_data . wxyz : tmp . xyzw ; <nl> - } <nl> - if ( src2_index < 0 ) <nl> - { <nl> - double4 tmp ; <nl> - tmp . xyzw = ( src2_index = = - 2 ) ? src2_data . zwxy : src2_data . yzwx ; <nl> - src2_data . xyzw = ( src2_index = = - 1 ) ? src2_data . wxyz : tmp . xyzw ; <nl> - } <nl> - <nl> - <nl> - uchar4 dst_data = * ( ( __global uchar4 * ) ( dst + dst_index ) ) ; <nl> - uchar4 tmp_data = convert_uchar4 ( ( src1_data < = src2_data ) ) ; <nl> - <nl> - dst_data . x = ( ( dst_index + 0 > = dst_start ) & & ( dst_index + 0 < dst_end ) ) ? tmp_data . x : dst_data . x ; <nl> - dst_data . y = ( ( dst_index + 1 > = dst_start ) & & ( dst_index + 1 < dst_end ) ) ? tmp_data . y : dst_data . y ; <nl> - dst_data . z = ( ( dst_index + 2 > = dst_start ) & & ( dst_index + 2 < dst_end ) ) ? tmp_data . z : dst_data . z ; <nl> - dst_data . w = ( ( dst_index + 3 > = dst_start ) & & ( dst_index + 3 < dst_end ) ) ? tmp_data . w : dst_data . w ; <nl> - <nl> - * ( ( __global uchar4 * ) ( dst + dst_index ) ) = dst_data ; <nl> - } <nl> - } <nl> - # endif <nl>
refactored and extended ocl : : compare
opencv/opencv
073096357657ceb251303dd5162f4a7eecf48846
2013-09-24T09:58:18Z
mmm a / dbms / src / Interpreters / Join . cpp <nl> ppp b / dbms / src / Interpreters / Join . cpp <nl> void Join : : setSampleBlock ( const Block & block ) <nl> if ( kind ! = ASTTableJoin : : Kind : : Left and kind ! = ASTTableJoin : : Kind : : Inner ) <nl> throw Exception ( " ASOF only supports LEFT and INNER as base joins " , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> <nl> - if ( key_columns . back ( ) - > sizeOfValueIfFixed ( ) ! = sizeof ( ASOFTimeType ) ) <nl> + const IColumn * asof_column = key_columns . back ( ) ; <nl> + <nl> + if ( auto t = AsofRowRefs : : getType ( asof_column ) ) <nl> + asof_type = * t ; <nl> + else <nl> { <nl> - std : : string msg = " ASOF join column needs to have size " ; <nl> - msg + = std : : to_string ( sizeof ( ASOFTimeType ) ) ; <nl> + std : : string msg = " ASOF join not supported for type " ; <nl> + msg + = asof_column - > getFamilyName ( ) ; <nl> throw Exception ( msg , ErrorCodes : : BAD_TYPE_OF_FIELD ) ; <nl> } <nl> + <nl> key_columns . pop_back ( ) ; <nl> <nl> if ( key_columns . empty ( ) ) <nl> void Join : : setSampleBlock ( const Block & block ) <nl> / / / Therefore , add it back in such that it can be extracted appropriately from the full stored <nl> / / / key_columns and key_sizes <nl> init ( chooseMethod ( key_columns , key_sizes ) ) ; <nl> - key_sizes . push_back ( sizeof ( ASOFTimeType ) ) ; <nl> + key_sizes . push_back ( AsofRowRefs : : getSize ( asof_type ) ) ; <nl> } <nl> else <nl> { <nl> void Join : : setSampleBlock ( const Block & block ) <nl> convertColumnToNullable ( sample_block_with_columns_to_add . getByPosition ( i ) ) ; <nl> } <nl> <nl> - void Join : : TSRowRef : : insert ( Join : : ASOFTimeType t , const Block * block , size_t row_num ) <nl> + void Join : : AsofRowRefs : : AsofLookups : : create ( Join : : AsofRowRefs : : AsofType which ) <nl> + { <nl> + switch ( which ) <nl> + { <nl> + # define M ( NAME , TYPE ) \ <nl> + case AsofType : : NAME : NAME = std : : make_unique < typename decltype ( NAME ) : : element_type > ( ) ; break ; <nl> + APPLY_FOR_ASOF_JOIN_VARIANTS ( M ) <nl> + # undef M <nl> + } <nl> + } <nl> + <nl> + void Join : : AsofRowRefs : : create ( AsofType which ) <nl> { <nl> - ts . insert ( std : : pair ( t , RowRef ( block , row_num ) ) ) ; <nl> + type = which ; <nl> + lookups . create ( which ) ; <nl> } <nl> <nl> - std : : string Join : : TSRowRef : : dumpStructure ( ) const <nl> + template < typename T > <nl> + using AsofGetterType = ColumnsHashing : : HashMethodOneNumber < T , T , T , false > ; <nl> + <nl> + void Join : : AsofRowRefs : : insert ( const IColumn * asof_column , const Block * block , size_t row_num , Arena & pool ) <nl> { <nl> - std : : stringstream ss ; <nl> + assert ( ! sorted ) ; <nl> + switch ( type ) <nl> + { <nl> + # define M ( NAME , TYPE ) \ <nl> + case AsofType : : NAME : { \ <nl> + auto asof_getter = AsofGetterType < TYPE > ( asof_column ) ; \ <nl> + auto entry = AsofEntry < TYPE > ( asof_getter . getKey ( row_num , pool ) , RowRef ( block , row_num ) ) ; \ <nl> + lookups . NAME - > push_back ( entry ) ; \ <nl> + break ; \ <nl> + } <nl> + APPLY_FOR_ASOF_JOIN_VARIANTS ( M ) <nl> + # undef M <nl> + } <nl> + } <nl> + <nl> + const Join : : RowRef * Join : : AsofRowRefs : : findAsof ( const IColumn * asof_column , size_t row_num , Arena & pool ) const <nl> + { <nl> + if ( ! sorted ) <nl> + { <nl> + / / sort whenever needed <nl> + switch ( type ) <nl> + { <nl> + # define M ( NAME , TYPE ) \ <nl> + case AsofType : : NAME : std : : sort ( lookups . NAME - > begin ( ) , lookups . NAME - > end ( ) ) ; break ; <nl> + APPLY_FOR_ASOF_JOIN_VARIANTS ( M ) <nl> + # undef M <nl> + } <nl> + sorted = true ; <nl> + } <nl> <nl> - for ( auto const & x : ts ) <nl> + switch ( type ) <nl> { <nl> - ss < < " ( t = " < < x . first < < " row_num = " < < x . second . row_num < < " ptr = " < < x . second . block < < " ) , " ; <nl> + # define M ( NAME , TYPE ) \ <nl> + case AsofType : : NAME : { \ <nl> + auto asof_getter = AsofGetterType < TYPE > ( asof_column ) ; \ <nl> + TYPE key = asof_getter . getKey ( row_num , pool ) ; \ <nl> + auto it = std : : upper_bound ( lookups . NAME - > cbegin ( ) , lookups . NAME - > cend ( ) , AsofEntry < TYPE > ( key ) ) ; \ <nl> + if ( it = = lookups . NAME - > cbegin ( ) ) \ <nl> + return nullptr ; \ <nl> + return & ( ( - - it ) - > row_ref ) ; \ <nl> + } <nl> + APPLY_FOR_ASOF_JOIN_VARIANTS ( M ) <nl> + # undef M <nl> } <nl> <nl> - return ss . str ( ) ; <nl> + __builtin_unreachable ( ) ; <nl> } <nl> - size_t Join : : TSRowRef : : size ( ) const <nl> + <nl> + std : : optional < Join : : AsofRowRefs : : AsofType > Join : : AsofRowRefs : : getType ( const IColumn * asof_column ) <nl> { <nl> - return ts . size ( ) ; <nl> + # define M ( NAME , TYPE ) \ <nl> + if ( strcmp ( # TYPE , asof_column - > getFamilyName ( ) ) = = 0 ) \ <nl> + return AsofType : : NAME ; <nl> + APPLY_FOR_ASOF_JOIN_VARIANTS ( M ) <nl> + # undef M <nl> + return { } ; <nl> } <nl> - std : : optional < std : : pair < Join : : ASOFTimeType , Join : : RowRef > > Join : : TSRowRef : : findAsof ( Join : : ASOFTimeType t ) const <nl> + <nl> + size_t Join : : AsofRowRefs : : getSize ( Join : : AsofRowRefs : : AsofType type ) <nl> { <nl> - auto it = ts . upper_bound ( t ) ; <nl> - if ( it = = ts . cbegin ( ) ) <nl> - return { } ; <nl> - return * ( - - it ) ; <nl> + switch ( type ) <nl> + { <nl> + # define M ( NAME , TYPE ) \ <nl> + case AsofType : : NAME : return sizeof ( TYPE ) ; <nl> + APPLY_FOR_ASOF_JOIN_VARIANTS ( M ) <nl> + # undef M <nl> + } <nl> + __builtin_unreachable ( ) ; <nl> } <nl> <nl> + <nl> namespace <nl> { <nl> / / / Inserting an element into a hash table of the form ` key - > reference to a string ` , which will then be used by JOIN . <nl> namespace <nl> template < typename Map , typename KeyGetter > <nl> struct Inserter < ASTTableJoin : : Strictness : : Asof , Map , KeyGetter > <nl> { <nl> - template < typename AsofGetter > <nl> - static ALWAYS_INLINE void insert ( Map & map , KeyGetter & key_getter , AsofGetter & asof_getter , Block * stored_block , size_t i , Arena & pool ) <nl> + static ALWAYS_INLINE void insert ( Map & map , KeyGetter & key_getter , Block * stored_block , size_t i , Arena & pool , const IColumn * asof_column ) <nl> { <nl> auto emplace_result = key_getter . emplaceKey ( map , i , pool ) ; <nl> typename Map : : mapped_type * time_series_map = & emplace_result . getMapped ( ) ; <nl> namespace <nl> if ( emplace_result . isInserted ( ) ) <nl> { <nl> time_series_map = new ( time_series_map ) typename Map : : mapped_type ( ) ; <nl> + / / TODO extract this from either the column type or from the main join object <nl> + time_series_map - > create ( Join : : AsofRowRefs : : AsofType : : key32 ) ; <nl> } <nl> - auto k = asof_getter . getKey ( i , pool ) ; <nl> - time_series_map - > insert ( k , stored_block , i ) ; <nl> - / / std : : cout < < " inserted key into time series map = " < < k < < " result = " < < time_series_map - > dumpStructure ( ) < < std : : endl ; <nl> + <nl> + time_series_map - > insert ( asof_column , stored_block , i , pool ) ; <nl> } <nl> } ; <nl> <nl> namespace <nl> continue ; <nl> <nl> if constexpr ( STRICTNESS = = ASTTableJoin : : Strictness : : Asof ) <nl> - { <nl> - auto asof_getter = Join : : AsofGetterType ( asof_column ) ; <nl> - Inserter < STRICTNESS , Map , KeyGetter > : : insert ( map , key_getter , asof_getter , stored_block , i , pool ) ; <nl> - } else <nl> + Inserter < STRICTNESS , Map , KeyGetter > : : insert ( map , key_getter , stored_block , i , pool , asof_column ) ; <nl> + else <nl> Inserter < STRICTNESS , Map , KeyGetter > : : insert ( map , key_getter , stored_block , i , pool ) ; <nl> } <nl> } <nl> void addFoundRow ( const typename Map : : mapped_type & mapped , AddedColumns & added , <nl> } <nl> } ; <nl> <nl> - template < typename Map > <nl> - bool addFoundRowAsof ( const typename Map : : mapped_type & mapped , AddedColumns & added , IColumn : : Offset & current_offset [ [ maybe_unused ] ] , Join : : ASOFTimeType asof_key ) <nl> - { <nl> - if ( auto v = mapped . findAsof ( asof_key ) ) <nl> - { <nl> - std : : pair < Join : : ASOFTimeType , Join : : RowRef > res = * v ; <nl> - / / std : : cout < < " Adder : : addFound " < < " to_add " < < num_columns_to_add < < " i = " < < i < < " asof_key = " < < asof_key < < " found = " < < res . first < < std : : endl ; <nl> - added . appendFromBlock ( * res . second . block , res . second . row_num ) ; <nl> - return true ; <nl> - } <nl> - / / std : : cout < < " Adder : : addFound " < < " not found in map " < < num_columns_to_add < < " i = " < < i < < " asof_key = " < < asof_key < < std : : endl ; <nl> - return false ; <nl> - } <nl> - <nl> template < bool _add_missing > <nl> void addNotFoundRow ( AddedColumns & added [ [ maybe_unused ] ] , IColumn : : Offset & current_offset [ [ maybe_unused ] ] ) <nl> { <nl> std : : unique_ptr < IColumn : : Offsets > NO_INLINE joinRightIndexedColumns ( <nl> auto & mapped = find_result . getMapped ( ) ; <nl> <nl> if constexpr ( STRICTNESS = = ASTTableJoin : : Strictness : : Asof ) <nl> - { <nl> - Join : : AsofGetterType asof_getter ( asof_column ) ; <nl> - auto asof_key = asof_getter . getKey ( i , pool ) ; <nl> - bool actually_found = addFoundRowAsof < Map > ( mapped , added_columns , current_offset , asof_key ) ; <nl> - <nl> - if ( actually_found ) <nl> + if ( const Join : : RowRef * found = mapped . findAsof ( asof_column , i , pool ) ) <nl> { <nl> - filter [ i ] = 1 ; <nl> - mapped . setUsed ( ) ; <nl> + filter [ i ] = 1 ; <nl> + mapped . setUsed ( ) ; <nl> + added_columns . appendFromBlock ( * found - > block , found - > row_num ) ; <nl> } <nl> else <nl> addNotFoundRow < _add_missing > ( added_columns , current_offset ) ; <nl> - } <nl> else <nl> { <nl> filter [ i ] = 1 ; <nl> mmm a / dbms / src / Interpreters / Join . h <nl> ppp b / dbms / src / Interpreters / Join . h <nl> class Join <nl> RowRefList ( const Block * block_ , size_t row_num_ ) : RowRef ( block_ , row_num_ ) { } <nl> } ; <nl> <nl> - / / / Map for a time series <nl> - using ASOFTimeType = UInt32 ; <nl> - using AsofGetterType = ColumnsHashing : : HashMethodOneNumber < ASOFTimeType , ASOFTimeType , ASOFTimeType , false > ; <nl> - struct TSRowRef <nl> + struct AsofRowRefs <nl> { <nl> - / / TODO use the arena allocator to get memory for this <nl> - / / This would require ditching std : : map because std : : allocator is incompatible with the arena allocator <nl> - std : : map < ASOFTimeType , RowRef > ts ; <nl> - <nl> - TSRowRef ( ) { } <nl> - void insert ( ASOFTimeType t , const Block * block , size_t row_num ) ; <nl> - std : : optional < std : : pair < ASOFTimeType , RowRef > > findAsof ( ASOFTimeType t ) const ; <nl> - std : : string dumpStructure ( ) const ; <nl> - size_t size ( ) const ; <nl> + / / / Different types of asof join keys <nl> + # define APPLY_FOR_ASOF_JOIN_VARIANTS ( M ) \ <nl> + M ( key32 , UInt32 ) \ <nl> + M ( key64 , UInt64 ) <nl> + <nl> + enum class AsofType <nl> + { <nl> + # define M ( NAME , TYPE ) NAME , <nl> + APPLY_FOR_ASOF_JOIN_VARIANTS ( M ) <nl> + # undef M <nl> + } ; <nl> + <nl> + static std : : optional < AsofType > getType ( const IColumn * asof_column ) ; <nl> + static size_t getSize ( AsofType type ) ; <nl> + <nl> + template < typename T > <nl> + struct AsofEntry <nl> + { <nl> + T asof_value ; <nl> + RowRef row_ref ; <nl> + <nl> + AsofEntry ( T v ) : asof_value ( v ) { } <nl> + AsofEntry ( T v , RowRef rr ) : asof_value ( v ) , row_ref ( rr ) { } <nl> + <nl> + bool operator < ( const AsofEntry & o ) const <nl> + { <nl> + return asof_value < o . asof_value ; <nl> + } <nl> + } ; <nl> + <nl> + struct AsofLookups <nl> + { <nl> + # define M ( NAME , TYPE ) \ <nl> + std : : unique_ptr < PODArray < AsofEntry < TYPE > > > NAME ; <nl> + APPLY_FOR_ASOF_JOIN_VARIANTS ( M ) <nl> + # undef M <nl> + <nl> + void create ( AsofType which ) ; <nl> + } ; <nl> + <nl> + AsofRowRefs ( ) { } <nl> + <nl> + void create ( AsofType which ) ; <nl> + void insert ( const IColumn * asof_column , const Block * block , size_t row_num , Arena & pool ) ; <nl> + <nl> + const RowRef * findAsof ( const IColumn * asof_column , size_t row_num , Arena & pool ) const ; <nl> + <nl> + private : <nl> + AsofType type ; <nl> + mutable AsofLookups lookups ; <nl> + mutable bool sorted = false ; <nl> } ; <nl> <nl> / * * Depending on template parameter , adds or doesn ' t add a flag , that element was used ( row was joined ) . <nl> class Join <nl> using MapsAnyFull = MapsTemplate < WithFlags < true , false , RowRef > > ; <nl> using MapsAnyFullOverwrite = MapsTemplate < WithFlags < true , true , RowRef > > ; <nl> using MapsAllFull = MapsTemplate < WithFlags < true , false , RowRefList > > ; <nl> - using MapsAsof = MapsTemplate < WithFlags < false , false , TSRowRef > > ; <nl> + using MapsAsof = MapsTemplate < WithFlags < false , false , AsofRowRefs > > ; <nl> <nl> template < ASTTableJoin : : Kind KIND > <nl> struct KindTrait <nl> class Join <nl> <nl> private : <nl> Type type = Type : : EMPTY ; <nl> + AsofRowRefs : : AsofType asof_type ; <nl> <nl> static Type chooseMethod ( const ColumnRawPtrs & key_columns , Sizes & key_sizes ) ; <nl> <nl>
asof join without using std : : map , but still only on u32
ClickHouse/ClickHouse
89515861dff53f74ea2ccd9660bc578373a7f588
2019-03-29T21:20:23Z
mmm a / csharp / ProtocolBuffers . Test / CodedOutputStreamTest . cs <nl> ppp b / csharp / ProtocolBuffers . Test / CodedOutputStreamTest . cs <nl> namespace Google . ProtocolBuffers { <nl> [ TestFixture ] <nl> public class CodedOutputStreamTest { <nl> <nl> - / / / < summary > <nl> - / / / Helper to construct a byte array from a bunch of bytes . The inputs are <nl> - / / / actually ints so that I can use hex notation and not get stupid errors <nl> - / / / about precision . <nl> - / / / < / summary > <nl> - private static byte [ ] Bytes ( params int [ ] bytesAsInts ) { <nl> - byte [ ] bytes = new byte [ bytesAsInts . Length ] ; <nl> - for ( int i = 0 ; i < bytesAsInts . Length ; i + + ) { <nl> - bytes [ i ] = ( byte ) bytesAsInts [ i ] ; <nl> - } <nl> - return bytes ; <nl> - } <nl> - <nl> private static void AssertEqualBytes ( byte [ ] a , byte [ ] b ) { <nl> Assert . AreEqual ( ByteString . CopyFrom ( a ) , ByteString . CopyFrom ( b ) ) ; <nl> } <nl> public class CodedOutputStreamTest { <nl> / / / < / summary > <nl> [ Test ] <nl> public void WriteVarint ( ) { <nl> - AssertWriteVarint ( Bytes ( 0x00 ) , 0 ) ; <nl> - AssertWriteVarint ( Bytes ( 0x01 ) , 1 ) ; <nl> - AssertWriteVarint ( Bytes ( 0x7f ) , 127 ) ; <nl> + AssertWriteVarint ( new byte [ ] { 0x00 } , 0 ) ; <nl> + AssertWriteVarint ( new byte [ ] { 0x01 } , 1 ) ; <nl> + AssertWriteVarint ( new byte [ ] { 0x7f } , 127 ) ; <nl> / / 14882 <nl> - AssertWriteVarint ( Bytes ( 0xa2 , 0x74 ) , ( 0x22 < < 0 ) | ( 0x74 < < 7 ) ) ; <nl> + AssertWriteVarint ( new byte [ ] { 0xa2 , 0x74 } , ( 0x22 < < 0 ) | ( 0x74 < < 7 ) ) ; <nl> / / 2961488830 <nl> - AssertWriteVarint ( Bytes ( 0xbe , 0xf7 , 0x92 , 0x84 , 0x0b ) , <nl> + AssertWriteVarint ( new byte [ ] { 0xbe , 0xf7 , 0x92 , 0x84 , 0x0b } , <nl> ( 0x3e < < 0 ) | ( 0x77 < < 7 ) | ( 0x12 < < 14 ) | ( 0x04 < < 21 ) | <nl> ( 0x0bL < < 28 ) ) ; <nl> <nl> / / 64 - bit <nl> / / 7256456126 <nl> - AssertWriteVarint ( Bytes ( 0xbe , 0xf7 , 0x92 , 0x84 , 0x1b ) , <nl> + AssertWriteVarint ( new byte [ ] { 0xbe , 0xf7 , 0x92 , 0x84 , 0x1b } , <nl> ( 0x3e < < 0 ) | ( 0x77 < < 7 ) | ( 0x12 < < 14 ) | ( 0x04 < < 21 ) | <nl> ( 0x1bL < < 28 ) ) ; <nl> / / 41256202580718336 <nl> AssertWriteVarint ( <nl> - Bytes ( 0x80 , 0xe6 , 0xeb , 0x9c , 0xc3 , 0xc9 , 0xa4 , 0x49 ) , <nl> + new byte [ ] { 0x80 , 0xe6 , 0xeb , 0x9c , 0xc3 , 0xc9 , 0xa4 , 0x49 } , <nl> ( 0x00 < < 0 ) | ( 0x66 < < 7 ) | ( 0x6b < < 14 ) | ( 0x1c < < 21 ) | <nl> ( 0x43UL < < 28 ) | ( 0x49L < < 35 ) | ( 0x24UL < < 42 ) | ( 0x49UL < < 49 ) ) ; <nl> / / 11964378330978735131 <nl> AssertWriteVarint ( <nl> - Bytes ( 0x9b , 0xa8 , 0xf9 , 0xc2 , 0xbb , 0xd6 , 0x80 , 0x85 , 0xa6 , 0x01 ) , <nl> + new byte [ ] { 0x9b , 0xa8 , 0xf9 , 0xc2 , 0xbb , 0xd6 , 0x80 , 0x85 , 0xa6 , 0x01 } , <nl> unchecked ( ( ulong ) <nl> ( ( 0x1b < < 0 ) | ( 0x28 < < 7 ) | ( 0x79 < < 14 ) | ( 0x42 < < 21 ) | <nl> ( 0x3bL < < 28 ) | ( 0x56L < < 35 ) | ( 0x00L < < 42 ) | <nl> public class CodedOutputStreamTest { <nl> / / / < / summary > <nl> [ Test ] <nl> public void WriteLittleEndian ( ) { <nl> - AssertWriteLittleEndian32 ( Bytes ( 0x78 , 0x56 , 0x34 , 0x12 ) , 0x12345678 ) ; <nl> - AssertWriteLittleEndian32 ( Bytes ( 0xf0 , 0xde , 0xbc , 0x9a ) , 0x9abcdef0 ) ; <nl> + AssertWriteLittleEndian32 ( new byte [ ] { 0x78 , 0x56 , 0x34 , 0x12 } , 0x12345678 ) ; <nl> + AssertWriteLittleEndian32 ( new byte [ ] { 0xf0 , 0xde , 0xbc , 0x9a } , 0x9abcdef0 ) ; <nl> <nl> AssertWriteLittleEndian64 ( <nl> - Bytes ( 0xf0 , 0xde , 0xbc , 0x9a , 0x78 , 0x56 , 0x34 , 0x12 ) , <nl> + new byte [ ] { 0xf0 , 0xde , 0xbc , 0x9a , 0x78 , 0x56 , 0x34 , 0x12 } , <nl> 0x123456789abcdef0L ) ; <nl> AssertWriteLittleEndian64 ( <nl> - Bytes ( 0x78 , 0x56 , 0x34 , 0x12 , 0xf0 , 0xde , 0xbc , 0x9a ) , <nl> + new byte [ ] { 0x78 , 0x56 , 0x34 , 0x12 , 0xf0 , 0xde , 0xbc , 0x9a } , <nl> 0x9abcdef012345678UL ) ; <nl> } <nl> <nl> mmm a / csharp / ProtocolBuffers . Test / TestUtil . cs <nl> ppp b / csharp / ProtocolBuffers . Test / TestUtil . cs <nl> internal static class TestUtil { <nl> registry . Add ( UnitTestProtoFile . DefaultCordExtension ) ; <nl> } <nl> <nl> + internal static string ReadTextFromFile ( string filePath ) { <nl> + return ReadBytesFromFile ( filePath ) . ToStringUtf8 ( ) ; <nl> + } <nl> <nl> internal static ByteString ReadBytesFromFile ( String filename ) { <nl> byte [ ] data = File . ReadAllBytes ( Path . Combine ( TestDataDirectory , filename ) ) ; <nl> internal static class TestUtil { <nl> Assert . AreEqual ( " abc " , message . GetExtension ( UnitTestProtoFile . DefaultStringPieceExtension ) ) ; <nl> Assert . AreEqual ( " 123 " , message . GetExtension ( UnitTestProtoFile . DefaultCordExtension ) ) ; <nl> } <nl> + <nl> + / / / < summary > <nl> + / / / Helper to construct a byte array from a bunch of bytes . <nl> + / / / < / summary > <nl> + internal static byte [ ] Bytes ( params byte [ ] bytesAsInts ) { <nl> + byte [ ] bytes = new byte [ bytesAsInts . Length ] ; <nl> + for ( int i = 0 ; i < bytesAsInts . Length ; i + + ) { <nl> + bytes [ i ] = ( byte ) bytesAsInts [ i ] ; <nl> + } <nl> + return bytes ; <nl> + } <nl> } <nl> } <nl> mmm a / csharp / ProtocolBuffers . Test / TextFormatTest . cs <nl> ppp b / csharp / ProtocolBuffers . Test / TextFormatTest . cs <nl> <nl>  using System ; <nl> - using System . Collections . Generic ; <nl> + using System . IO ; <nl> using System . Text ; <nl> + using Google . ProtocolBuffers . TestProtos ; <nl> using NUnit . Framework ; <nl> <nl> namespace Google . ProtocolBuffers { <nl> [ TestFixture ] <nl> public class TextFormatTest { <nl> + <nl> + / / / < summary > <nl> + / / / A basic string with different escapable characters for testing . <nl> + / / / < / summary > <nl> + private const string EscapeTestString = " \ " A string with ' characters \ n and \ r newlines and \ t tabs and \ 001 " <nl> + + " slashes \ \ " ; <nl> + <nl> + / / / < summary > <nl> + / / / A representation of the above string with all the characters escaped . <nl> + / / / < / summary > <nl> + private const string EscapeTestStringEscaped = " \ " \ \ \ " A string with \ \ ' characters \ \ n and \ \ r newlines " <nl> + + " and \ \ t tabs and \ \ 001 slashes \ \ \ \ \ " " ; <nl> + <nl> + private static readonly string AllFieldsSetText = TestUtil . ReadTextFromFile ( " text_format_unittest_data . txt " ) ; <nl> + private static readonly string AllExtensionsSetText = TestUtil . ReadTextFromFile ( " text_format_unittest_extensions_data . txt " ) ; <nl> + <nl> + / / / < summary > <nl> + / / / Note that this is slightly different to the Java - 123 . 0 becomes 123 , and 1 . 23E17 becomes 1 . 23E + 17 . <nl> + / / / Both of these differences can be parsed by the Java and the C + + , and we can parse their output too . <nl> + / / / < / summary > <nl> + private const string ExoticText = <nl> + " repeated_int32 : - 1 \ n " + <nl> + " repeated_int32 : - 2147483648 \ n " + <nl> + " repeated_int64 : - 1 \ n " + <nl> + " repeated_int64 : - 9223372036854775808 \ n " + <nl> + " repeated_uint32 : 4294967295 \ n " + <nl> + " repeated_uint32 : 2147483648 \ n " + <nl> + " repeated_uint64 : 18446744073709551615 \ n " + <nl> + " repeated_uint64 : 9223372036854775808 \ n " + <nl> + " repeated_double : 123 \ n " + <nl> + " repeated_double : 123 . 5 \ n " + <nl> + " repeated_double : 0 . 125 \ n " + <nl> + " repeated_double : 1 . 23E + 17 \ n " + <nl> + " repeated_double : 1 . 235E + 22 \ n " + <nl> + " repeated_double : 1 . 235E - 18 \ n " + <nl> + " repeated_double : 123 . 456789 \ n " + <nl> + " repeated_double : Infinity \ n " + <nl> + " repeated_double : - Infinity \ n " + <nl> + " repeated_double : NaN \ n " + <nl> + " repeated_string : \ " \ \ 000 \ \ 001 \ \ a \ \ b \ \ f \ \ n \ \ r \ \ t \ \ v \ \ \ \ \ \ ' \ \ \ " " + <nl> + " \ \ 341 \ \ 210 \ \ 264 \ " \ n " + <nl> + " repeated_bytes : \ " \ \ 000 \ \ 001 \ \ a \ \ b \ \ f \ \ n \ \ r \ \ t \ \ v \ \ \ \ \ \ ' \ \ \ " \ \ 376 \ " \ n " ; <nl> + <nl> + private const string MessageSetText = <nl> + " [ protobuf_unittest . TestMessageSetExtension1 ] { \ n " + <nl> + " i : 123 \ n " + <nl> + " } \ n " + <nl> + " [ protobuf_unittest . TestMessageSetExtension2 ] { \ n " + <nl> + " str : \ " foo \ " \ n " + <nl> + " } \ n " ; <nl> + <nl> + / / / < summary > <nl> + / / / Print TestAllTypes and compare with golden file . <nl> + / / / < / summary > <nl> + [ Test ] <nl> + public void PrintMessage ( ) { <nl> + string text = TextFormat . PrintToString ( TestUtil . GetAllSet ( ) ) ; <nl> + Assert . AreEqual ( AllFieldsSetText . Replace ( " \ r \ n " , " \ n " ) , text . Replace ( " \ r \ n " , " \ n " ) ) ; <nl> + } <nl> + <nl> + / / / < summary > <nl> + / / / Print TestAllExtensions and compare with golden file . <nl> + / / / < / summary > <nl> + [ Test ] <nl> + public void PrintExtensions ( ) { <nl> + string text = TextFormat . PrintToString ( TestUtil . GetAllExtensionsSet ( ) ) ; <nl> + <nl> + Assert . AreEqual ( AllExtensionsSetText . Replace ( " \ r \ n " , " \ n " ) , text . Replace ( " \ r \ n " , " \ n " ) ) ; <nl> + } <nl> + <nl> + / / / < summary > <nl> + / / / Test printing of unknown fields in a message . <nl> + / / / < / summary > <nl> + [ Test ] <nl> + public void PrintUnknownFields ( ) { <nl> + TestEmptyMessage message = <nl> + TestEmptyMessage . CreateBuilder ( ) <nl> + . SetUnknownFields ( <nl> + UnknownFieldSet . CreateBuilder ( ) <nl> + . AddField ( 5 , <nl> + UnknownField . CreateBuilder ( ) <nl> + . AddVarint ( 1 ) <nl> + . AddFixed32 ( 2 ) <nl> + . AddFixed64 ( 3 ) <nl> + . AddLengthDelimited ( ByteString . CopyFromUtf8 ( " 4 " ) ) <nl> + . AddGroup ( <nl> + UnknownFieldSet . CreateBuilder ( ) <nl> + . AddField ( 10 , <nl> + UnknownField . CreateBuilder ( ) <nl> + . AddVarint ( 5 ) <nl> + . Build ( ) ) <nl> + . Build ( ) ) <nl> + . Build ( ) ) <nl> + . AddField ( 8 , <nl> + UnknownField . CreateBuilder ( ) <nl> + . AddVarint ( 1 ) <nl> + . AddVarint ( 2 ) <nl> + . AddVarint ( 3 ) <nl> + . Build ( ) ) <nl> + . AddField ( 15 , <nl> + UnknownField . CreateBuilder ( ) <nl> + . AddVarint ( 0xABCDEF1234567890L ) <nl> + . AddFixed32 ( 0xABCD1234 ) <nl> + . AddFixed64 ( 0xABCDEF1234567890L ) <nl> + . Build ( ) ) <nl> + . Build ( ) ) <nl> + . Build ( ) ; <nl> + <nl> + Assert . AreEqual ( <nl> + " 5 : 1 \ n " + <nl> + " 5 : 0x00000002 \ n " + <nl> + " 5 : 0x0000000000000003 \ n " + <nl> + " 5 : \ " 4 \ " \ n " + <nl> + " 5 { \ n " + <nl> + " 10 : 5 \ n " + <nl> + " } \ n " + <nl> + " 8 : 1 \ n " + <nl> + " 8 : 2 \ n " + <nl> + " 8 : 3 \ n " + <nl> + " 15 : 12379813812177893520 \ n " + <nl> + " 15 : 0xabcd1234 \ n " + <nl> + " 15 : 0xabcdef1234567890 \ n " , <nl> + TextFormat . PrintToString ( message ) ) ; <nl> + } <nl> + <nl> + / / / < summary > <nl> + / / / Helper to construct a ByteString from a string containing only 8 - bit <nl> + / / / characters . The characters are converted directly to bytes , * not * <nl> + / / / encoded using UTF - 8 . <nl> + / / / < / summary > <nl> + private static ByteString Bytes ( string str ) { <nl> + return ByteString . CopyFrom ( Encoding . GetEncoding ( 28591 ) . GetBytes ( str ) ) ; <nl> + } <nl> + <nl> + [ Test ] <nl> + public void PrintExotic ( ) { <nl> + IMessage message = TestAllTypes . CreateBuilder ( ) <nl> + / / Signed vs . unsigned numbers . <nl> + . AddRepeatedInt32 ( - 1 ) <nl> + . AddRepeatedUint32 ( uint . MaxValue ) <nl> + . AddRepeatedInt64 ( - 1 ) <nl> + . AddRepeatedUint64 ( ulong . MaxValue ) <nl> + <nl> + . AddRepeatedInt32 ( 1 < < 31 ) <nl> + . AddRepeatedUint32 ( 1U < < 31 ) <nl> + . AddRepeatedInt64 ( 1L < < 63 ) <nl> + . AddRepeatedUint64 ( 1UL < < 63 ) <nl> + <nl> + / / Floats of various precisions and exponents . <nl> + . AddRepeatedDouble ( 123 ) <nl> + . AddRepeatedDouble ( 123 . 5 ) <nl> + . AddRepeatedDouble ( 0 . 125 ) <nl> + . AddRepeatedDouble ( 123e15 ) <nl> + . AddRepeatedDouble ( 123 . 5e20 ) <nl> + . AddRepeatedDouble ( 123 . 5e - 20 ) <nl> + . AddRepeatedDouble ( 123 . 456789 ) <nl> + . AddRepeatedDouble ( Double . PositiveInfinity ) <nl> + . AddRepeatedDouble ( Double . NegativeInfinity ) <nl> + . AddRepeatedDouble ( Double . NaN ) <nl> + <nl> + / / Strings and bytes that needing escaping . <nl> + . AddRepeatedString ( " \ 0 \ u0001 \ u0007 \ b \ f \ n \ r \ t \ v \ \ \ ' \ " \ u1234 " ) <nl> + . AddRepeatedBytes ( Bytes ( " \ 0 \ u0001 \ u0007 \ b \ f \ n \ r \ t \ v \ \ \ ' \ " \ u00fe " ) ) <nl> + . Build ( ) ; <nl> + <nl> + Assert . AreEqual ( ExoticText , message . ToString ( ) ) ; <nl> + } <nl> + <nl> + [ Test ] <nl> + public void PrintMessageSet ( ) { <nl> + TestMessageSet messageSet = <nl> + TestMessageSet . CreateBuilder ( ) <nl> + . SetExtension ( <nl> + TestMessageSetExtension1 . MessageSetExtension , <nl> + TestMessageSetExtension1 . CreateBuilder ( ) . SetI ( 123 ) . Build ( ) ) <nl> + . SetExtension ( <nl> + TestMessageSetExtension2 . MessageSetExtension , <nl> + TestMessageSetExtension2 . CreateBuilder ( ) . SetStr ( " foo " ) . Build ( ) ) <nl> + . Build ( ) ; <nl> + <nl> + Assert . AreEqual ( MessageSetText , messageSet . ToString ( ) ) ; <nl> + } <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + [ Test ] <nl> + [ Ignore ( " Parsing not implemented " ) ] <nl> + public void Parse ( ) { <nl> + TestAllTypes . Builder builder = TestAllTypes . CreateBuilder ( ) ; <nl> + TextFormat . Merge ( AllFieldsSetText , builder ) ; <nl> + TestUtil . AssertAllFieldsSet ( builder . Build ( ) ) ; <nl> + } <nl> + <nl> + [ Test ] <nl> + [ Ignore ( " Parsing not implemented " ) ] <nl> + public void ParseReader ( ) { <nl> + TestAllTypes . Builder builder = TestAllTypes . CreateBuilder ( ) ; <nl> + TextFormat . Merge ( new StringReader ( AllFieldsSetText ) , builder ) ; <nl> + TestUtil . AssertAllFieldsSet ( builder . Build ( ) ) ; <nl> + } <nl> + <nl> + [ Test ] <nl> + [ Ignore ( " Parsing not implemented " ) ] <nl> + public void ParseExtensions ( ) { <nl> + TestAllExtensions . Builder builder = TestAllExtensions . CreateBuilder ( ) ; <nl> + TextFormat . Merge ( AllExtensionsSetText , <nl> + TestUtil . CreateExtensionRegistry ( ) , <nl> + builder ) ; <nl> + TestUtil . AssertAllExtensionsSet ( builder . Build ( ) ) ; <nl> + } <nl> + <nl> + [ Test ] <nl> + [ Ignore ( " Parsing not implemented " ) ] <nl> + public void ParseExotic ( ) { <nl> + TestAllTypes . Builder builder = TestAllTypes . CreateBuilder ( ) ; <nl> + TextFormat . Merge ( ExoticText , builder ) ; <nl> + <nl> + / / Too lazy to check things individually . Don ' t try to debug this <nl> + / / if testPrintExotic ( ) is Assert . Failing . <nl> + Assert . AreEqual ( ExoticText , builder . Build ( ) . ToString ( ) ) ; <nl> + } <nl> + <nl> + [ Test ] <nl> + [ Ignore ( " Parsing not implemented " ) ] <nl> + public void ParseMessageSet ( ) { <nl> + ExtensionRegistry extensionRegistry = ExtensionRegistry . CreateInstance ( ) ; <nl> + extensionRegistry . Add ( TestMessageSetExtension1 . MessageSetExtension ) ; <nl> + extensionRegistry . Add ( TestMessageSetExtension2 . MessageSetExtension ) ; <nl> + <nl> + TestMessageSet . Builder builder = TestMessageSet . CreateBuilder ( ) ; <nl> + TextFormat . Merge ( MessageSetText , extensionRegistry , builder ) ; <nl> + TestMessageSet messageSet = builder . Build ( ) ; <nl> + <nl> + Assert . IsTrue ( messageSet . HasExtension ( TestMessageSetExtension1 . MessageSetExtension ) ) ; <nl> + Assert . AreEqual ( 123 , messageSet . GetExtension ( TestMessageSetExtension1 . MessageSetExtension ) . I ) ; <nl> + Assert . IsTrue ( messageSet . HasExtension ( TestMessageSetExtension2 . MessageSetExtension ) ) ; <nl> + Assert . AreEqual ( " foo " , messageSet . GetExtension ( TestMessageSetExtension2 . MessageSetExtension ) . Str ) ; <nl> + } <nl> + <nl> + [ Test ] <nl> + [ Ignore ( " Parsing not implemented " ) ] <nl> + public void ParseNumericEnum ( ) { <nl> + TestAllTypes . Builder builder = TestAllTypes . CreateBuilder ( ) ; <nl> + TextFormat . Merge ( " optional_nested_enum : 2 " , builder ) ; <nl> + Assert . AreEqual ( TestAllTypes . Types . NestedEnum . BAR , builder . OptionalNestedEnum ) ; <nl> + } <nl> + <nl> + [ Test ] <nl> + [ Ignore ( " Parsing not implemented " ) ] <nl> + public void ParseAngleBrackets ( ) { <nl> + TestAllTypes . Builder builder = TestAllTypes . CreateBuilder ( ) ; <nl> + TextFormat . Merge ( " OptionalGroup : < a : 1 > " , builder ) ; <nl> + Assert . IsTrue ( builder . HasOptionalGroup ) ; <nl> + Assert . AreEqual ( 1 , builder . OptionalGroup . A ) ; <nl> + } <nl> + <nl> + private static void AssertParseError ( string error , string text ) { <nl> + TestAllTypes . Builder builder = TestAllTypes . CreateBuilder ( ) ; <nl> + try { <nl> + TextFormat . Merge ( text , TestUtil . CreateExtensionRegistry ( ) , builder ) ; <nl> + Assert . Fail ( " Expected parse exception . " ) ; <nl> + } catch ( FormatException e ) { <nl> + Assert . AreEqual ( error , e . Message ) ; <nl> + } <nl> + } <nl> + <nl> + [ Test ] <nl> + [ Ignore ( " Parsing not implemented " ) ] <nl> + public void ParseErrors ( ) { <nl> + AssertParseError ( <nl> + " 1 : 16 : Expected \ " : \ " . " , <nl> + " optional_int32 123 " ) ; <nl> + AssertParseError ( <nl> + " 1 : 23 : Expected identifier . " , <nl> + " optional_nested_enum : ? " ) ; <nl> + AssertParseError ( <nl> + " 1 : 18 : Couldn ' t parse integer : Number must be positive : - 1 " , <nl> + " optional_uint32 : - 1 " ) ; <nl> + AssertParseError ( <nl> + " 1 : 17 : Couldn ' t parse integer : Number out of range for 32 - bit signed " + <nl> + " integer : 82301481290849012385230157 " , <nl> + " optional_int32 : 82301481290849012385230157 " ) ; <nl> + AssertParseError ( <nl> + " 1 : 16 : Expected \ " true \ " or \ " false \ " . " , <nl> + " optional_bool : maybe " ) ; <nl> + AssertParseError ( <nl> + " 1 : 18 : Expected string . " , <nl> + " optional_string : 123 " ) ; <nl> + AssertParseError ( <nl> + " 1 : 18 : string missing ending quote . " , <nl> + " optional_string : \ " ueoauaoe " ) ; <nl> + AssertParseError ( <nl> + " 1 : 18 : string missing ending quote . " , <nl> + " optional_string : \ " ueoauaoe \ n " + <nl> + " optional_int32 : 123 " ) ; <nl> + AssertParseError ( <nl> + " 1 : 18 : Invalid escape sequence : ' \ \ z ' " , <nl> + " optional_string : \ " \ \ z \ " " ) ; <nl> + AssertParseError ( <nl> + " 1 : 18 : string missing ending quote . " , <nl> + " optional_string : \ " ueoauaoe \ n " + <nl> + " optional_int32 : 123 " ) ; <nl> + AssertParseError ( <nl> + " 1 : 2 : Extension \ " nosuchext \ " not found in the ExtensionRegistry . " , <nl> + " [ nosuchext ] : 123 " ) ; <nl> + AssertParseError ( <nl> + " 1 : 20 : Extension \ " protobuf_unittest . optional_int32_extension \ " does " + <nl> + " not extend message type \ " protobuf_unittest . TestAllTypes \ " . " , <nl> + " [ protobuf_unittest . optional_int32_extension ] : 123 " ) ; <nl> + AssertParseError ( <nl> + " 1 : 1 : Message type \ " protobuf_unittest . TestAllTypes \ " has no field " + <nl> + " named \ " nosuchfield \ " . " , <nl> + " nosuchfield : 123 " ) ; <nl> + AssertParseError ( <nl> + " 1 : 21 : Expected \ " > \ " . " , <nl> + " OptionalGroup < a : 1 " ) ; <nl> + AssertParseError ( <nl> + " 1 : 23 : Enum type \ " protobuf_unittest . TestAllTypes . NestedEnum \ " has no " + <nl> + " value named \ " NO_SUCH_VALUE \ " . " , <nl> + " optional_nested_enum : NO_SUCH_VALUE " ) ; <nl> + AssertParseError ( <nl> + " 1 : 23 : Enum type \ " protobuf_unittest . TestAllTypes . NestedEnum \ " has no " + <nl> + " value with number 123 . " , <nl> + " optional_nested_enum : 123 " ) ; <nl> + <nl> + / / Delimiters must match . <nl> + AssertParseError ( <nl> + " 1 : 22 : Expected identifier . " , <nl> + " OptionalGroup < a : 1 } " ) ; <nl> + AssertParseError ( <nl> + " 1 : 22 : Expected identifier . " , <nl> + " OptionalGroup { a : 1 > " ) ; <nl> + } <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + private static ByteString Bytes ( params byte [ ] bytes ) { <nl> + return ByteString . CopyFrom ( bytes ) ; <nl> + } <nl> + <nl> + private delegate void FormattingAction ( ) ; <nl> + <nl> + private static void AssertFormatException ( FormattingAction action ) { <nl> + try { <nl> + action ( ) ; <nl> + Assert . Fail ( " Should have thrown an exception . " ) ; <nl> + } catch ( FormatException ) { <nl> + / / success <nl> + } <nl> + } <nl> + <nl> + [ Test ] <nl> + public void Escape ( ) { <nl> + / / Escape sequences . <nl> + Assert . AreEqual ( " \ \ 000 \ \ 001 \ \ a \ \ b \ \ f \ \ n \ \ r \ \ t \ \ v \ \ \ \ \ \ ' \ \ \ " " , <nl> + TextFormat . EscapeBytes ( Bytes ( " \ 0 \ u0001 \ u0007 \ b \ f \ n \ r \ t \ v \ \ \ ' \ " " ) ) ) ; <nl> + Assert . AreEqual ( " \ \ 000 \ \ 001 \ \ a \ \ b \ \ f \ \ n \ \ r \ \ t \ \ v \ \ \ \ \ \ ' \ \ \ " " , <nl> + TextFormat . EscapeText ( " \ 0 \ u0001 \ u0007 \ b \ f \ n \ r \ t \ v \ \ \ ' \ " " ) ) ; <nl> + Assert . AreEqual ( Bytes ( " \ 0 \ u0001 \ u0007 \ b \ f \ n \ r \ t \ v \ \ \ ' \ " " ) , <nl> + TextFormat . UnescapeBytes ( " \ \ 000 \ \ 001 \ \ a \ \ b \ \ f \ \ n \ \ r \ \ t \ \ v \ \ \ \ \ \ ' \ \ \ " " ) ) ; <nl> + Assert . AreEqual ( " \ 0 \ u0001 \ u0007 \ b \ f \ n \ r \ t \ v \ \ \ ' \ " " , <nl> + TextFormat . UnescapeText ( " \ \ 000 \ \ 001 \ \ a \ \ b \ \ f \ \ n \ \ r \ \ t \ \ v \ \ \ \ \ \ ' \ \ \ " " ) ) ; <nl> + <nl> + / / Unicode handling . <nl> + Assert . AreEqual ( " \ \ 341 \ \ 210 \ \ 264 " , TextFormat . EscapeText ( " \ u1234 " ) ) ; <nl> + Assert . AreEqual ( " \ \ 341 \ \ 210 \ \ 264 " , TextFormat . EscapeBytes ( Bytes ( 0xe1 , 0x88 , 0xb4 ) ) ) ; <nl> + Assert . AreEqual ( " \ u1234 " , TextFormat . UnescapeText ( " \ \ 341 \ \ 210 \ \ 264 " ) ) ; <nl> + Assert . AreEqual ( Bytes ( 0xe1 , 0x88 , 0xb4 ) , TextFormat . UnescapeBytes ( " \ \ 341 \ \ 210 \ \ 264 " ) ) ; <nl> + Assert . AreEqual ( " \ u1234 " , TextFormat . UnescapeText ( " \ \ xe1 \ \ x88 \ \ xb4 " ) ) ; <nl> + Assert . AreEqual ( Bytes ( 0xe1 , 0x88 , 0xb4 ) , TextFormat . UnescapeBytes ( " \ \ xe1 \ \ x88 \ \ xb4 " ) ) ; <nl> + <nl> + / / Errors . <nl> + AssertFormatException ( ( ) = > TextFormat . UnescapeText ( " \ \ x " ) ) ; <nl> + AssertFormatException ( ( ) = > TextFormat . UnescapeText ( " \ \ z " ) ) ; <nl> + AssertFormatException ( ( ) = > TextFormat . UnescapeText ( " \ \ " ) ) ; <nl> + } <nl> + <nl> + [ Test ] <nl> + public void ParseInteger ( ) { <nl> + Assert . AreEqual ( 0 , TextFormat . ParseInt32 ( " 0 " ) ) ; <nl> + Assert . AreEqual ( 1 , TextFormat . ParseInt32 ( " 1 " ) ) ; <nl> + Assert . AreEqual ( - 1 , TextFormat . ParseInt32 ( " - 1 " ) ) ; <nl> + Assert . AreEqual ( 12345 , TextFormat . ParseInt32 ( " 12345 " ) ) ; <nl> + Assert . AreEqual ( - 12345 , TextFormat . ParseInt32 ( " - 12345 " ) ) ; <nl> + Assert . AreEqual ( 2147483647 , TextFormat . ParseInt32 ( " 2147483647 " ) ) ; <nl> + Assert . AreEqual ( - 2147483648 , TextFormat . ParseInt32 ( " - 2147483648 " ) ) ; <nl> + <nl> + Assert . AreEqual ( 0 , TextFormat . ParseUInt32 ( " 0 " ) ) ; <nl> + Assert . AreEqual ( 1 , TextFormat . ParseUInt32 ( " 1 " ) ) ; <nl> + Assert . AreEqual ( 12345 , TextFormat . ParseUInt32 ( " 12345 " ) ) ; <nl> + Assert . AreEqual ( 2147483647 , TextFormat . ParseUInt32 ( " 2147483647 " ) ) ; <nl> + Assert . AreEqual ( 2147483648U , TextFormat . ParseUInt32 ( " 2147483648 " ) ) ; <nl> + Assert . AreEqual ( 4294967295U , TextFormat . ParseUInt32 ( " 4294967295 " ) ) ; <nl> + <nl> + Assert . AreEqual ( 0L , TextFormat . ParseInt64 ( " 0 " ) ) ; <nl> + Assert . AreEqual ( 1L , TextFormat . ParseInt64 ( " 1 " ) ) ; <nl> + Assert . AreEqual ( - 1L , TextFormat . ParseInt64 ( " - 1 " ) ) ; <nl> + Assert . AreEqual ( 12345L , TextFormat . ParseInt64 ( " 12345 " ) ) ; <nl> + Assert . AreEqual ( - 12345L , TextFormat . ParseInt64 ( " - 12345 " ) ) ; <nl> + Assert . AreEqual ( 2147483647L , TextFormat . ParseInt64 ( " 2147483647 " ) ) ; <nl> + Assert . AreEqual ( - 2147483648L , TextFormat . ParseInt64 ( " - 2147483648 " ) ) ; <nl> + Assert . AreEqual ( 4294967295L , TextFormat . ParseInt64 ( " 4294967295 " ) ) ; <nl> + Assert . AreEqual ( 4294967296L , TextFormat . ParseInt64 ( " 4294967296 " ) ) ; <nl> + Assert . AreEqual ( 9223372036854775807L , TextFormat . ParseInt64 ( " 9223372036854775807 " ) ) ; <nl> + Assert . AreEqual ( - 9223372036854775808L , TextFormat . ParseInt64 ( " - 9223372036854775808 " ) ) ; <nl> + <nl> + Assert . AreEqual ( 0L , TextFormat . ParseUInt64 ( " 0 " ) ) ; <nl> + Assert . AreEqual ( 1L , TextFormat . ParseUInt64 ( " 1 " ) ) ; <nl> + Assert . AreEqual ( 12345L , TextFormat . ParseUInt64 ( " 12345 " ) ) ; <nl> + Assert . AreEqual ( 2147483647L , TextFormat . ParseUInt64 ( " 2147483647 " ) ) ; <nl> + Assert . AreEqual ( 4294967295L , TextFormat . ParseUInt64 ( " 4294967295 " ) ) ; <nl> + Assert . AreEqual ( 4294967296L , TextFormat . ParseUInt64 ( " 4294967296 " ) ) ; <nl> + Assert . AreEqual ( 9223372036854775807UL , TextFormat . ParseUInt64 ( " 9223372036854775807 " ) ) ; <nl> + Assert . AreEqual ( 9223372036854775808UL , TextFormat . ParseUInt64 ( " 9223372036854775808 " ) ) ; <nl> + Assert . AreEqual ( 18446744073709551615UL , TextFormat . ParseUInt64 ( " 18446744073709551615 " ) ) ; <nl> + <nl> + / / Hex <nl> + Assert . AreEqual ( 0x1234abcd , TextFormat . ParseInt32 ( " 0x1234abcd " ) ) ; <nl> + Assert . AreEqual ( - 0x1234abcd , TextFormat . ParseInt32 ( " - 0x1234abcd " ) ) ; <nl> + Assert . AreEqual ( 0xffffffffffffffffUL , TextFormat . ParseUInt64 ( " 0xffffffffffffffff " ) ) ; <nl> + Assert . AreEqual ( 0x7fffffffffffffffL , <nl> + TextFormat . ParseInt64 ( " 0x7fffffffffffffff " ) ) ; <nl> + <nl> + / / Octal <nl> + Assert . AreEqual ( 342391 , TextFormat . ParseInt32 ( " 01234567 " ) ) ; <nl> + <nl> + / / Out - of - range <nl> + AssertFormatException ( ( ) = > TextFormat . ParseInt32 ( " 2147483648 " ) ) ; <nl> + AssertFormatException ( ( ) = > TextFormat . ParseInt32 ( " - 2147483649 " ) ) ; <nl> + AssertFormatException ( ( ) = > TextFormat . ParseUInt32 ( " 4294967296 " ) ) ; <nl> + AssertFormatException ( ( ) = > TextFormat . ParseUInt32 ( " - 1 " ) ) ; <nl> + AssertFormatException ( ( ) = > TextFormat . ParseInt64 ( " 9223372036854775808 " ) ) ; <nl> + AssertFormatException ( ( ) = > TextFormat . ParseInt64 ( " - 9223372036854775809 " ) ) ; <nl> + AssertFormatException ( ( ) = > TextFormat . ParseUInt64 ( " 18446744073709551616 " ) ) ; <nl> + AssertFormatException ( ( ) = > TextFormat . ParseUInt64 ( " - 1 " ) ) ; <nl> + AssertFormatException ( ( ) = > TextFormat . ParseInt32 ( " abcd " ) ) ; <nl> + } <nl> } <nl> } <nl> mmm a / csharp / ProtocolBuffers / TextFormat . cs <nl> ppp b / csharp / ProtocolBuffers / TextFormat . cs <nl> public static class TextFormat { <nl> } <nl> } <nl> <nl> - internal static ulong ParseUInt64 ( string text ) { <nl> + / / TODO ( jonskeet ) : InternalsVisibleTo <nl> + public static ulong ParseUInt64 ( string text ) { <nl> return ( ulong ) ParseInteger ( text , false , true ) ; <nl> } <nl> <nl> - internal static long ParseInt64 ( string text ) { <nl> + / / TODO ( jonskeet ) : InternalsVisibleTo <nl> + public static long ParseInt64 ( string text ) { <nl> return ParseInteger ( text , true , true ) ; <nl> } <nl> <nl> - internal static uint ParseUInt32 ( string text ) { <nl> + / / TODO ( jonskeet ) : InternalsVisibleTo <nl> + public static uint ParseUInt32 ( string text ) { <nl> return ( uint ) ParseInteger ( text , false , false ) ; <nl> } <nl> <nl> - internal static int ParseInt32 ( string text ) { <nl> + / / TODO ( jonskeet ) : InternalsVisibleTo <nl> + public static int ParseInt32 ( string text ) { <nl> return ( int ) ParseInteger ( text , true , false ) ; <nl> } <nl> <nl> public static class TextFormat { <nl> text = text . Substring ( 2 ) ; <nl> } else if ( text . StartsWith ( " 0 " ) ) { <nl> radix = 8 ; <nl> - text = text . Substring ( 1 ) ; <nl> } <nl> <nl> - ulong result = Convert . ToUInt64 ( text , radix ) ; <nl> + ulong result ; <nl> + try { <nl> + / / Workaround for https : / / connect . microsoft . com / VisualStudio / feedback / ViewFeedback . aspx ? FeedbackID = 278448 <nl> + / / We should be able to use Convert . ToUInt64 for all cases . <nl> + result = radix = = 10 ? ulong . Parse ( text ) : Convert . ToUInt64 ( text , radix ) ; <nl> + } catch ( OverflowException ) { <nl> + / / Convert OverflowException to FormatException so there ' s a single exception type this method can throw . <nl> + throw new FormatException ( " Number of out range : " + original ) ; <nl> + } <nl> <nl> if ( negative ) { <nl> ulong max = isLong ? 0x8000000000000000UL : 0x80000000L ; <nl> public static class TextFormat { <nl> } <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Unescapes a text string as escaped using < see cref = " EscapeText ( string ) " / > . <nl> + / / / Two - digit hex escapes ( starting with " \ x " are also recognised . <nl> + / / / TODO ( jonskeet ) : InternalsVisibleTo <nl> + / / / < / summary > <nl> + public static string UnescapeText ( string input ) { <nl> + return UnescapeBytes ( input ) . ToStringUtf8 ( ) ; <nl> + } <nl> + <nl> / / / < summary > <nl> / / / Like < see cref = " EscapeBytes " / > but escapes a text string . <nl> / / / The string is first encoded as UTF - 8 , then each byte escaped individually . <nl> / / / The returned value is guaranteed to be entirely ASCII . <nl> + / / / TODO ( jonskeet ) : InternalsVisibleTo <nl> / / / < / summary > <nl> - static String EscapeText ( string input ) { <nl> + public static string EscapeText ( string input ) { <nl> return EscapeBytes ( ByteString . CopyFromUtf8 ( input ) ) ; <nl> } <nl> / / / < summary > <nl> public static class TextFormat { <nl> / / / which no defined short - hand escape sequence is defined will be escaped <nl> / / / using 3 - digit octal sequences . <nl> / / / The returned value is guaranteed to be entirely ASCII . <nl> + / / / TODO ( jonskeet ) : InternalsVisibleTo <nl> / / / < / summary > <nl> - private static String EscapeBytes ( ByteString input ) { <nl> + public static String EscapeBytes ( ByteString input ) { <nl> StringBuilder builder = new StringBuilder ( input . Length ) ; <nl> foreach ( byte b in input ) { <nl> switch ( b ) { <nl> public static class TextFormat { <nl> case ( byte ) ' \ ' ' : builder . Append ( " \ \ \ ' " ) ; break ; <nl> case ( byte ) ' " ' : builder . Append ( " \ \ \ " " ) ; break ; <nl> default : <nl> - if ( b > = 0x20 ) { <nl> + if ( b > = 0x20 & & b < 128 ) { <nl> builder . Append ( ( char ) b ) ; <nl> } else { <nl> builder . Append ( ' \ \ ' ) ; <nl> public static class TextFormat { <nl> <nl> / / / < summary > <nl> / / / Performs string unescaping from C style ( octal , hex , form feeds , tab etc ) into a byte string . <nl> + / / / TODO ( jonskeet ) : Make this internal again , and use InternalsVisibleTo . <nl> / / / < / summary > <nl> - internal static ByteString UnescapeBytes ( string input ) { <nl> + public static ByteString UnescapeBytes ( string input ) { <nl> byte [ ] result = new byte [ input . Length ] ; <nl> int pos = 0 ; <nl> for ( int i = 0 ; i < input . Length ; i + + ) { <nl> public static class TextFormat { <nl> <nl> return ByteString . CopyFrom ( result , 0 , pos ) ; <nl> } <nl> + <nl> + public static void Merge ( string text , IBuilder builder ) { <nl> + throw new NotImplementedException ( ) ; <nl> + } <nl> + <nl> + public static void Merge ( TextReader reader , IBuilder builder ) { <nl> + throw new NotImplementedException ( ) ; <nl> + } <nl> + <nl> + public static void Merge ( string text , ExtensionRegistry registry , IBuilder builder ) { <nl> + throw new NotImplementedException ( ) ; <nl> + } <nl> + <nl> + public static void Merge ( TextReader reader , ExtensionRegistry registry , IBuilder builder ) { <nl> + throw new NotImplementedException ( ) ; <nl> + } <nl> } <nl> } <nl>
Lots of text formatting tests , but ignored the parsing ones for the moment .
protocolbuffers/protobuf
feb9385b0441f5047d608fead0c9e79d032ded7a
2008-08-14T19:35:30Z
mmm a / tests / Node . py <nl> ppp b / tests / Node . py <nl> def getEosAccount ( self , name , exitOnError = False ) : <nl> else : <nl> return self . getEosAccountFromDb ( name , exitOnError = exitOnError ) <nl> <nl> - def getEosAccountFromDb ( self , name ) : <nl> + def getEosAccountFromDb ( self , name , exitOnError = False ) : <nl> cmd = " % s % s " % ( Utils . MongoPath , self . mongoEndpointArgs ) <nl> subcommand = ' db . accounts . findOne ( { " name " : " % s " } ) ' % ( name ) <nl> if Utils . Debug : Utils . Print ( " cmd : echo ' % s ' | % s " % ( subcommand , cmd ) ) <nl> try : <nl> - trans = Node . runMongoCmdReturnJson ( cmd . split ( ) , subcommand ) <nl> + trans = Node . runMongoCmdReturnJson ( cmd . split ( ) , subcommand , exitOnError = exitOnError ) <nl> return trans <nl> except subprocess . CalledProcessError as ex : <nl> msg = ex . output . decode ( " utf - 8 " ) <nl> - Utils . Print ( " ERROR : Exception during get account from db . % s " % ( msg ) ) <nl> + if exitOnError : <nl> + Utils . cmdError ( " Exception during get account from db for % s . % s " % ( name , msg ) ) <nl> + errorExit ( " Failed during get account from db for % s . % s " % ( name , msg ) ) <nl> + <nl> + Utils . Print ( " ERROR : Exception during get account from db for % s . % s " % ( name , msg ) ) <nl> return None <nl> <nl> def getTable ( self , contract , scope , table , exitOnError = False ) : <nl>
Fixed use of exitOnError flag . GH
EOSIO/eos
48caac897a4b483224eae0057b8d9c5cb9089440
2018-07-25T16:56:17Z
mmm a / stdlib / core / Reflection . swift <nl> ppp b / stdlib / core / Reflection . swift <nl> public struct _MagicMirrorData { <nl> } <nl> } <nl> <nl> - public / / protocol conformance used in Reflection . mm <nl> struct _OpaqueMirror : MirrorType { <nl> let data : _MagicMirrorData <nl> <nl> - public var value : Any { return data . value } <nl> - public var valueType : Any . Type { return data . valueType } <nl> - public var objectIdentifier : ObjectIdentifier ? { return nil } <nl> - public var count : Int { return 0 } <nl> - public subscript ( i : Int ) - > ( String , MirrorType ) { <nl> + var value : Any { return data . value } <nl> + var valueType : Any . Type { return data . valueType } <nl> + var objectIdentifier : ObjectIdentifier ? { return nil } <nl> + var count : Int { return 0 } <nl> + subscript ( i : Int ) - > ( String , MirrorType ) { <nl> _preconditionFailure ( " no children " ) <nl> } <nl> - public var summary : String { return data . summary } <nl> - public var quickLookObject : QuickLookObject ? { return nil } <nl> - public var disposition : MirrorDisposition { return . Aggregate } <nl> + var summary : String { return data . summary } <nl> + var quickLookObject : QuickLookObject ? { return nil } <nl> + var disposition : MirrorDisposition { return . Aggregate } <nl> } <nl> <nl> - public / / protocol conformance used in Reflection . mm <nl> - struct _TupleMirror : MirrorType { <nl> + internal struct _TupleMirror : MirrorType { <nl> let data : _MagicMirrorData <nl> <nl> - public var value : Any { return data . value } <nl> - public var valueType : Any . Type { return data . valueType } <nl> - public var objectIdentifier : ObjectIdentifier ? { return nil } <nl> - public var count : Int { <nl> + var value : Any { return data . value } <nl> + var valueType : Any . Type { return data . valueType } <nl> + var objectIdentifier : ObjectIdentifier ? { return nil } <nl> + var count : Int { <nl> @ asmname ( " swift_TupleMirror_count " ) get <nl> } <nl> - public subscript ( i : Int ) - > ( String , MirrorType ) { <nl> + subscript ( i : Int ) - > ( String , MirrorType ) { <nl> @ asmname ( " swift_TupleMirror_subscript " ) get <nl> } <nl> - public var summary : String { return " ( \ ( count ) elements ) " } <nl> - public var quickLookObject : QuickLookObject ? { return nil } <nl> - public var disposition : MirrorDisposition { return . Tuple } <nl> + var summary : String { return " ( \ ( count ) elements ) " } <nl> + var quickLookObject : QuickLookObject ? { return nil } <nl> + var disposition : MirrorDisposition { return . Tuple } <nl> } <nl> <nl> - public / / protocol conformance used in Reflection . mm <nl> struct _StructMirror : MirrorType { <nl> let data : _MagicMirrorData <nl> <nl> - public var value : Any { return data . value } <nl> - public var valueType : Any . Type { return data . valueType } <nl> - public var objectIdentifier : ObjectIdentifier ? { return nil } <nl> - public var count : Int { <nl> + var value : Any { return data . value } <nl> + var valueType : Any . Type { return data . valueType } <nl> + var objectIdentifier : ObjectIdentifier ? { return nil } <nl> + var count : Int { <nl> @ asmname ( " swift_StructMirror_count " ) get <nl> } <nl> - public subscript ( i : Int ) - > ( String , MirrorType ) { <nl> + subscript ( i : Int ) - > ( String , MirrorType ) { <nl> @ asmname ( " swift_StructMirror_subscript " ) get <nl> } <nl> <nl> - public var summary : String { <nl> + var summary : String { <nl> return _stdlib_getDemangledTypeName ( value ) <nl> } <nl> - public var quickLookObject : QuickLookObject ? { return nil } <nl> - public var disposition : MirrorDisposition { return . Struct } <nl> + var quickLookObject : QuickLookObject ? { return nil } <nl> + var disposition : MirrorDisposition { return . Struct } <nl> } <nl> <nl> @ asmname ( " swift_ClassMirror_count " ) <nl> func _getClassChild ( Int , _MagicMirrorData ) - > ( String , MirrorType ) <nl> @ asmname ( " swift_ClassMirror_quickLookObject " ) public <nl> func _getClassQuickLookObject ( data : _MagicMirrorData ) - > QuickLookObject ? <nl> <nl> - public / / protocol conformance used in Reflection . mm <nl> struct _ClassMirror : MirrorType { <nl> let data : _MagicMirrorData <nl> <nl> - public var value : Any { return data . value } <nl> - public var valueType : Any . Type { return data . valueType } <nl> - public var objectIdentifier : ObjectIdentifier ? { <nl> + var value : Any { return data . value } <nl> + var valueType : Any . Type { return data . valueType } <nl> + var objectIdentifier : ObjectIdentifier ? { <nl> return data . _loadValue ( ) as ObjectIdentifier <nl> } <nl> - public var count : Int { <nl> + var count : Int { <nl> return _getClassCount ( data ) <nl> } <nl> - public subscript ( i : Int ) - > ( String , MirrorType ) { <nl> + subscript ( i : Int ) - > ( String , MirrorType ) { <nl> return _getClassChild ( i , data ) <nl> } <nl> - public var summary : String { <nl> + var summary : String { <nl> return _stdlib_getDemangledTypeName ( value ) <nl> } <nl> - public var quickLookObject : QuickLookObject ? { <nl> + var quickLookObject : QuickLookObject ? { <nl> return _getClassQuickLookObject ( data ) <nl> } <nl> - public var disposition : MirrorDisposition { return . Class } <nl> + var disposition : MirrorDisposition { return . Class } <nl> } <nl> <nl> - public / / protocol conformance used in Reflection . mm <nl> struct _ClassSuperMirror : MirrorType { <nl> let data : _MagicMirrorData <nl> <nl> - public var value : Any { return data . value } <nl> - public var valueType : Any . Type { return data . valueType } <nl> + var value : Any { return data . value } <nl> + var valueType : Any . Type { return data . valueType } <nl> <nl> / / Suppress the value identifier for super mirrors . <nl> - public var objectIdentifier : ObjectIdentifier ? { <nl> + var objectIdentifier : ObjectIdentifier ? { <nl> return nil <nl> } <nl> - public var count : Int { <nl> + var count : Int { <nl> return _getClassCount ( data ) <nl> } <nl> - public subscript ( i : Int ) - > ( String , MirrorType ) { <nl> + subscript ( i : Int ) - > ( String , MirrorType ) { <nl> return _getClassChild ( i , data ) <nl> } <nl> - public var summary : String { <nl> + var summary : String { <nl> return _stdlib_getDemangledTypeName ( value ) <nl> } <nl> - public var quickLookObject : QuickLookObject ? { return nil } <nl> - public var disposition : MirrorDisposition { return . Class } <nl> + var quickLookObject : QuickLookObject ? { return nil } <nl> + var disposition : MirrorDisposition { return . Class } <nl> } <nl> <nl>
Revert " [ stdlib ] Make some structs public which adopt MirrorType , because the conformances are accessed in Reflection . nm "
apple/swift
f82dbb8176701d9314057762dfd61754178d29d0
2014-10-27T15:37:33Z
mmm a / tensorflow / core / profiler / utils / xplane_schema . cc <nl> ppp b / tensorflow / core / profiler / utils / xplane_schema . cc <nl> limitations under the License . <nl> namespace tensorflow { <nl> namespace profiler { <nl> <nl> - const absl : : string_view kHostThreads = " Host Threads " ; <nl> - const absl : : string_view kGpuPlanePrefix = " GPU : " ; <nl> + const absl : : string_view kHostThreads = " / host : CPU " ; <nl> + const absl : : string_view kGpuPlanePrefix = " / device : GPU : " ; <nl> const int32 kHostPlaneId = 49 ; <nl> const int32 kGpuPlaneBaseId = 0 ; <nl> <nl>
Change xplane name to old style device name .
tensorflow/tensorflow
d0499921175c93a02a96161747cee991307b8d45
2020-01-29T00:58:12Z
mmm a / src / core / settings . cpp <nl> ppp b / src / core / settings . cpp <nl> void LogSettings ( ) { <nl> log_setting ( " System_RegionIndex " , values . region_index . GetValue ( ) ) ; <nl> log_setting ( " System_TimeZoneIndex " , values . time_zone_index . GetValue ( ) ) ; <nl> log_setting ( " Core_UseMultiCore " , values . use_multi_core . GetValue ( ) ) ; <nl> + log_setting ( " CPU_Accuracy " , values . cpu_accuracy ) ; <nl> log_setting ( " Renderer_UseResolutionFactor " , values . resolution_factor . GetValue ( ) ) ; <nl> log_setting ( " Renderer_UseFrameLimit " , values . use_frame_limit . GetValue ( ) ) ; <nl> log_setting ( " Renderer_FrameLimit " , values . frame_limit . GetValue ( ) ) ; <nl>
Merge pull request from lat9nq / log - cpu - accuracy
yuzu-emu/yuzu
e82997374291f738b013581f0b2833028b25c21f
2020-11-07T22:01:33Z
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> include ( cmake / OpenCVGenConfig . cmake ) <nl> include ( cmake / OpenCVGenInfoPlist . cmake ) <nl> <nl> # Generate environment setup file <nl> - if ( INSTALL_TESTS AND OPENCV_TEST_DATA_PATH AND UNIX AND NOT ANDROID ) <nl> - configure_file ( " $ { CMAKE_CURRENT_SOURCE_DIR } / cmake / templates / opencv_testing . sh . in " <nl> - " $ { CMAKE_BINARY_DIR } / unix - install / opencv_testing . sh " @ ONLY ) <nl> - install ( FILES " $ { CMAKE_BINARY_DIR } / unix - install / opencv_testing . sh " <nl> - DESTINATION / etc / profile . d / COMPONENT tests ) <nl> - configure_file ( " $ { CMAKE_CURRENT_SOURCE_DIR } / cmake / templates / opencv_run_all_tests . sh . in " <nl> - " $ { CMAKE_BINARY_DIR } / unix - install / opencv_run_all_tests . sh " @ ONLY ) <nl> - install ( FILES " $ { CMAKE_BINARY_DIR } / unix - install / opencv_run_all_tests . sh " <nl> - PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ OWNER_EXECUTE GROUP_EXECUTE WORLD_EXECUTE <nl> - DESTINATION $ { OPENCV_TEST_INSTALL_PATH } COMPONENT tests ) <nl> + if ( INSTALL_TESTS AND OPENCV_TEST_DATA_PATH AND UNIX ) <nl> + if ( ANDROID ) <nl> + get_filename_component ( TEST_PATH $ { OPENCV_TEST_INSTALL_PATH } DIRECTORY ) <nl> + configure_file ( " $ { CMAKE_CURRENT_SOURCE_DIR } / cmake / templates / opencv_run_all_tests_android . sh . in " <nl> + " $ { CMAKE_BINARY_DIR } / unix - install / opencv_run_all_tests . sh " @ ONLY ) <nl> + install ( PROGRAMS " $ { CMAKE_BINARY_DIR } / unix - install / opencv_run_all_tests . sh " <nl> + DESTINATION $ { CMAKE_INSTALL_PREFIX } COMPONENT tests ) <nl> + else ( ) <nl> + configure_file ( " $ { CMAKE_CURRENT_SOURCE_DIR } / cmake / templates / opencv_testing . sh . in " <nl> + " $ { CMAKE_BINARY_DIR } / unix - install / opencv_testing . sh " @ ONLY ) <nl> + install ( FILES " $ { CMAKE_BINARY_DIR } / unix - install / opencv_testing . sh " <nl> + DESTINATION / etc / profile . d / COMPONENT tests ) <nl> + configure_file ( " $ { CMAKE_CURRENT_SOURCE_DIR } / cmake / templates / opencv_run_all_tests_unix . sh . in " <nl> + " $ { CMAKE_BINARY_DIR } / unix - install / opencv_run_all_tests . sh " @ ONLY ) <nl> + install ( PROGRAMS " $ { CMAKE_BINARY_DIR } / unix - install / opencv_run_all_tests . sh " <nl> + DESTINATION $ { OPENCV_TEST_INSTALL_PATH } COMPONENT tests ) <nl> + <nl> + endif ( ) <nl> endif ( ) <nl> <nl> # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> new file mode 100644 <nl> index 00000000000 . . 93373fa964b <nl> mmm / dev / null <nl> ppp b / cmake / templates / opencv_run_all_tests_android . sh . in <nl> <nl> + # ! / bin / sh <nl> + <nl> + BASE_DIR = ` dirname $ 0 ` <nl> + OPENCV_TEST_PATH = $ BASE_DIR / @ TEST_PATH @ <nl> + OPENCV_TEST_DATA_PATH = $ BASE_DIR / sdk / etc / testdata / <nl> + <nl> + if [ $ # - ne 1 ] ; then <nl> + echo " Device architecture is not preset in command line " <nl> + echo " Tests are available for architectures : ` ls - m $ { OPENCV_TEST_PATH } ` " <nl> + echo " Usage : $ 0 < target_device_arch > " <nl> + return 1 <nl> + else <nl> + TARGET_ARCH = $ 1 <nl> + fi <nl> + <nl> + if [ - z ` which adb ` ] ; then <nl> + echo " adb command was not found in PATH " <nl> + return 1 <nl> + fi <nl> + <nl> + adb push $ OPENCV_TEST_DATA_PATH / sdcard / opencv_testdata <nl> + <nl> + adb shell " mkdir - p / data / local / tmp / opencv_test " <nl> + SUMMARY_STATUS = 0 <nl> + for t in " $ OPENCV_TEST_PATH / $ TARGET_ARCH / " opencv_test_ * " $ OPENCV_TEST_PATH / $ TARGET_ARCH / " opencv_perf_ * ; <nl> + do <nl> + test_name = ` basename " $ t " ` <nl> + report = " $ test_name - ` date - - rfc - 3339 = date ` . xml " <nl> + adb push $ t / data / local / tmp / opencv_test / <nl> + adb shell " export OPENCV_TEST_DATA_PATH = / sdcard / opencv_testdata & & / data / local / tmp / opencv_test / $ test_name - - perf_min_samples = 1 - - perf_force_samples = 1 - - gtest_output = xml : / data / local / tmp / opencv_test / $ report " <nl> + adb pull " / data / local / tmp / opencv_test / $ report " $ report <nl> + TEST_STATUS = 0 <nl> + if [ - e $ report ] ; then <nl> + if [ ` grep - c " < fail " $ report ` - ne 0 ] ; then <nl> + TEST_STATUS = 2 <nl> + fi <nl> + else <nl> + TEST_STATUS = 3 <nl> + fi <nl> + if [ $ TEST_STATUS - ne 0 ] ; then <nl> + SUMMARY_STATUS = $ TEST_STATUS <nl> + fi <nl> + done <nl> + <nl> + if [ $ SUMMARY_STATUS - eq 0 ] ; then <nl> + echo " All OpenCV tests finished successfully " <nl> + else <nl> + echo " OpenCV tests finished with status $ SUMMARY_STATUS " <nl> + fi <nl> + <nl> + return $ SUMMARY_STATUS <nl> \ No newline at end of file <nl> similarity index 100 % <nl> rename from cmake / templates / opencv_run_all_tests . sh . in <nl> rename to cmake / templates / opencv_run_all_tests_unix . sh . in <nl>
opencv_run_all_tests . sh implemented for Android SDK .
opencv/opencv
d02c2911607b199e18988c29c3fb9df141555974
2014-02-12T10:21:58Z
mmm a / INSTALL . md <nl> ppp b / INSTALL . md <nl> <nl> See http : / / caffe . berkeleyvision . org / installation . html for the latest <nl> installation instructions . <nl> <nl> - Check the issue tracker in case you need help : <nl> - https : / / github . com / BVLC / caffe / issues <nl> + Check the users group in case you need help : <nl> + https : / / groups . google . com / forum / # ! forum / caffe - users <nl>
Merge pull request from shelhamer / install - caffe - users
BVLC/caffe
fb9754dfb10199436daf0f43f55abce3449c40cf
2015-10-19T18:43:36Z
mmm a / src / app / cmd / flatten_layers . cpp <nl> ppp b / src / app / cmd / flatten_layers . cpp <nl> <nl> # include " app / cmd / flatten_layers . h " <nl> <nl> # include " app / cmd / add_layer . h " <nl> + # include " app / cmd / set_layer_name . h " <nl> # include " app / cmd / configure_background . h " <nl> # include " app / cmd / copy_rect . h " <nl> # include " app / cmd / remove_layer . h " <nl> void FlattenLayers : : onExecute ( ) <nl> sprite - > width ( ) , <nl> sprite - > height ( ) ) ) ; <nl> <nl> - / / If there aren ' t a background layer we must to create the background . <nl> - LayerImage * background = sprite - > backgroundLayer ( ) ; <nl> - bool created = false ; <nl> - if ( ! background ) { <nl> - background = new LayerImage ( sprite ) ; <nl> - executeAndAdd ( new cmd : : AddLayer ( sprite - > folder ( ) , background , nullptr ) ) ; <nl> - executeAndAdd ( new cmd : : ConfigureBackground ( background ) ) ; <nl> - created = true ; <nl> + LayerImage * flatLayer ; / / The layer onto which everything will be flattened . <nl> + color_t bgcolor ; / / The background color to use for flatLayer . <nl> + <nl> + flatLayer = sprite - > backgroundLayer ( ) ; <nl> + if ( flatLayer & & flatLayer - > isVisible ( ) ) <nl> + { <nl> + / / There exists a visible background layer , so we will flatten onto that . <nl> + bgcolor = doc - > bgColor ( flatLayer ) ; <nl> } <nl> - <nl> - color_t bgcolor ; <nl> - if ( created | | ! background - > isVisible ( ) ) <nl> - bgcolor = doc - > bgColor ( background ) ; / / Use color bar background color <nl> else <nl> + { <nl> + / / Create a new transparent layer to flatten everything onto . <nl> + flatLayer = new LayerImage ( sprite ) ; <nl> + ASSERT ( flatLayer - > isVisible ( ) ) ; <nl> + executeAndAdd ( new cmd : : AddLayer ( sprite - > folder ( ) , flatLayer , nullptr ) ) ; <nl> + executeAndAdd ( new cmd : : SetLayerName ( flatLayer , " Flattened " ) ) ; <nl> bgcolor = sprite - > transparentColor ( ) ; <nl> + } <nl> <nl> render : : Render render ; <nl> render . setBgType ( render : : BgType : : NONE ) ; <nl> void FlattenLayers : : onExecute ( ) <nl> / / TODO Keep cel links when possible <nl> <nl> ImageRef cel_image ; <nl> - Cel * cel = background - > cel ( frame ) ; <nl> + Cel * cel = flatLayer - > cel ( frame ) ; <nl> if ( cel ) { <nl> if ( cel - > links ( ) ) <nl> executeAndAdd ( new cmd : : UnlinkCel ( cel ) ) ; <nl> void FlattenLayers : : onExecute ( ) <nl> else { <nl> cel_image . reset ( Image : : createCopy ( image . get ( ) ) ) ; <nl> cel = new Cel ( frame , cel_image ) ; <nl> - background - > addCel ( cel ) ; <nl> + flatLayer - > addCel ( cel ) ; <nl> } <nl> } <nl> <nl> - / / Show background if it ' s hidden <nl> - if ( ! background - > isVisible ( ) ) { <nl> - LayerFlags newFlags = LayerFlags ( <nl> - int ( background - > flags ( ) ) | int ( LayerFlags : : Visible ) ) ; <nl> - <nl> - executeAndAdd ( new cmd : : SetLayerFlags ( background , newFlags ) ) ; <nl> - } <nl> - <nl> / / Delete old layers . <nl> LayerList layers = sprite - > folder ( ) - > getLayersList ( ) ; <nl> for ( Layer * layer : layers ) <nl> - if ( layer ! = background ) <nl> + if ( layer ! = flatLayer ) <nl> executeAndAdd ( new cmd : : RemoveLayer ( layer ) ) ; <nl> } <nl> <nl>
Flatten layers command no longer converts result to background when it shouldn ' t . ( fix )
aseprite/aseprite
3eb01db8e1341c3d82d4be08e5d8001636886ce0
2015-09-21T17:19:39Z
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> target_link_libraries ( lib $ { APP_NAME_LC } PUBLIC $ { core_DEPENDS } $ { SYSTEM_LDFLAGS } <nl> set_target_properties ( lib $ { APP_NAME_LC } PROPERTIES PROJECT_LABEL " xbmc " ) <nl> source_group_by_folder ( lib $ { APP_NAME_LC } RELATIVE $ { CMAKE_SOURCE_DIR } / xbmc ) <nl> if ( WIN32 ) <nl> - add_precompiled_header ( lib $ { APP_NAME_LC } pch . h $ { CMAKE_SOURCE_DIR } / xbmc / platform / win32 / pch . cpp PCH_TARGET kodi ) <nl> + add_precompiled_header ( lib $ { APP_NAME_LC } pch . h $ { CMAKE_SOURCE_DIR } / xbmc / platform / win32 / pch . cpp ) <nl> set_language_cxx ( lib $ { APP_NAME_LC } ) <nl> endif ( ) <nl> <nl> whole_archive ( _TEST_LIBRARIES $ { core_DEPENDS } gtest ) <nl> target_link_libraries ( $ { APP_NAME_LC } - test PRIVATE $ { SYSTEM_LDFLAGS } $ { _TEST_LIBRARIES } lib $ { APP_NAME_LC } $ { DEPLIBS } $ { CMAKE_DL_LIBS } ) <nl> unset ( _TEST_LIBRARIES ) <nl> add_dependencies ( $ { APP_NAME_LC } - test $ { APP_NAME_LC } - libraries export - files ) <nl> - if ( WIN32 ) <nl> - add_precompiled_header ( $ { APP_NAME_LC } - test pch . h $ { CMAKE_SOURCE_DIR } / xbmc / platform / win32 / pch . cpp PCH_TARGET kodi ) <nl> - endif ( ) <nl> <nl> # Enable unit - test related targets <nl> if ( CORE_HOST_IS_TARGET ) <nl> mmm a / xbmc / interfaces / swig / AddonModuleXbmc . i <nl> ppp b / xbmc / interfaces / swig / AddonModuleXbmc . i <nl> <nl> % module ( directors = " 1 " ) xbmc <nl> <nl> % { <nl> + # if defined ( TARGET_WINDOWS ) | | defined ( TARGET_WIN10 ) <nl> + # if ! defined ( WIN32_LEAN_AND_MEAN ) <nl> + # define WIN32_LEAN_AND_MEAN <nl> + # endif <nl> + # include < windows . h > <nl> + # endif <nl> + <nl> # include " interfaces / legacy / Player . h " <nl> # include " interfaces / legacy / RenderCapture . h " <nl> # include " interfaces / legacy / Keyboard . h " <nl> mmm a / xbmc / interfaces / swig / AddonModuleXbmcaddon . i <nl> ppp b / xbmc / interfaces / swig / AddonModuleXbmcaddon . i <nl> <nl> % module xbmcaddon <nl> <nl> % { <nl> + # if defined ( TARGET_WINDOWS ) | | defined ( TARGET_WIN10 ) <nl> + # if ! defined ( WIN32_LEAN_AND_MEAN ) <nl> + # define WIN32_LEAN_AND_MEAN <nl> + # endif <nl> + # include < windows . h > <nl> + # endif <nl> + <nl> # include " interfaces / legacy / Addon . h " <nl> <nl> using namespace XBMCAddon ; <nl> mmm a / xbmc / interfaces / swig / AddonModuleXbmcgui . i <nl> ppp b / xbmc / interfaces / swig / AddonModuleXbmcgui . i <nl> <nl> % module ( directors = " 1 " ) xbmcgui <nl> <nl> % { <nl> + # if defined ( TARGET_WINDOWS ) | | defined ( TARGET_WIN10 ) <nl> + # if ! defined ( WIN32_LEAN_AND_MEAN ) <nl> + # define WIN32_LEAN_AND_MEAN <nl> + # endif <nl> + # include < windows . h > <nl> + # endif <nl> + <nl> # include " interfaces / legacy / Dialog . h " <nl> # include " interfaces / legacy / ModuleXbmcgui . h " <nl> # include " interfaces / legacy / Control . h " <nl> mmm a / xbmc / interfaces / swig / AddonModuleXbmcplugin . i <nl> ppp b / xbmc / interfaces / swig / AddonModuleXbmcplugin . i <nl> <nl> % module xbmcplugin <nl> <nl> % { <nl> + # if defined ( TARGET_WINDOWS ) | | defined ( TARGET_WIN10 ) <nl> + # if ! defined ( WIN32_LEAN_AND_MEAN ) <nl> + # define WIN32_LEAN_AND_MEAN <nl> + # endif <nl> + # include < windows . h > <nl> + # endif <nl> + <nl> # include " interfaces / legacy / ModuleXbmcplugin . h " <nl> <nl> using namespace XBMCAddon ; <nl> mmm a / xbmc / interfaces / swig / AddonModuleXbmcvfs . i <nl> ppp b / xbmc / interfaces / swig / AddonModuleXbmcvfs . i <nl> <nl> % module xbmcvfs <nl> <nl> % { <nl> + # if defined ( TARGET_WINDOWS ) | | defined ( TARGET_WIN10 ) <nl> + # if ! defined ( WIN32_LEAN_AND_MEAN ) <nl> + # define WIN32_LEAN_AND_MEAN <nl> + # endif <nl> + # include < windows . h > <nl> + # endif <nl> + <nl> # include " interfaces / legacy / ModuleXbmcvfs . h " <nl> # include " interfaces / legacy / File . h " <nl> # include " interfaces / legacy / Stat . h " <nl> mmm a / xbmc / interfaces / swig / AddonModuleXbmcwsgi . i <nl> ppp b / xbmc / interfaces / swig / AddonModuleXbmcwsgi . i <nl> <nl> * / <nl> <nl> % begin % { <nl> + # if defined ( TARGET_WINDOWS ) | | defined ( TARGET_WIN10 ) <nl> + # if ! defined ( WIN32_LEAN_AND_MEAN ) <nl> + # define WIN32_LEAN_AND_MEAN <nl> + # endif <nl> + # include < windows . h > <nl> + # endif <nl> + <nl> # include " system . h " <nl> <nl> # ifdef HAS_WEB_SERVER <nl> mmm a / xbmc / interfaces / swig / CMakeLists . txt <nl> ppp b / xbmc / interfaces / swig / CMakeLists . txt <nl> endforeach ( ) <nl> add_library ( python_binding STATIC $ { SOURCES } ) <nl> set_target_properties ( python_binding PROPERTIES POSITION_INDEPENDENT_CODE TRUE <nl> FOLDER " Build Utilities " ) <nl> - set ( core_DEPENDS python_binding $ { core_DEPENDS } CACHE STRING " " FORCE ) <nl> - if ( WIN32 ) <nl> - add_precompiled_header ( python_binding pch . h $ { CMAKE_SOURCE_DIR } / xbmc / platform / win32 / pch . cpp PCH_TARGET kodi ) <nl> - endif ( ) <nl> + set ( core_DEPENDS python_binding $ { core_DEPENDS } CACHE STRING " " FORCE ) <nl> \ No newline at end of file <nl> mmm a / xbmc / network / test / TestWebServer . cpp <nl> ppp b / xbmc / network / test / TestWebServer . cpp <nl> <nl> * <nl> * / <nl> <nl> + # if defined ( TARGET_WINDOWS ) | | defined ( TARGET_WIN10 ) <nl> + # if ! defined ( WIN32_LEAN_AND_MEAN ) <nl> + # define WIN32_LEAN_AND_MEAN <nl> + # endif <nl> + # include < windows . h > <nl> + # endif <nl> + <nl> # include < errno . h > <nl> # include < stdlib . h > <nl> <nl> mmm a / xbmc / utils / test / TestArchive . cpp <nl> ppp b / xbmc / utils / test / TestArchive . cpp <nl> <nl> * <nl> * / <nl> <nl> + # if defined ( TARGET_WINDOWS ) | | defined ( TARGET_WIN10 ) <nl> + # if ! defined ( WIN32_LEAN_AND_MEAN ) <nl> + # define WIN32_LEAN_AND_MEAN <nl> + # endif <nl> + # include < windows . h > <nl> + # endif <nl> + <nl> # include " utils / Archive . h " <nl> # include " utils / Variant . h " <nl> # include " filesystem / File . h " <nl> mmm a / xbmc / utils / test / TestCPUInfo . cpp <nl> ppp b / xbmc / utils / test / TestCPUInfo . cpp <nl> <nl> * <nl> * / <nl> <nl> + # if defined ( TARGET_WINDOWS ) | | defined ( TARGET_WIN10 ) <nl> + # if ! defined ( WIN32_LEAN_AND_MEAN ) <nl> + # define WIN32_LEAN_AND_MEAN <nl> + # endif <nl> + # include < windows . h > <nl> + # endif <nl> + <nl> # include " utils / CPUInfo . h " <nl> # include " utils / Temperature . h " <nl> # include " settings / AdvancedSettings . h " <nl>
Merge pull request from Paxxi / pch
xbmc/xbmc
5e3af173c3ec0ad357e1b775644537fb92ef3246
2017-05-20T15:33:08Z
mmm a / contracts / eosiolib / dispatcher . hpp <nl> ppp b / contracts / eosiolib / dispatcher . hpp <nl> namespace eosio { <nl> <nl> template < typename T , typename . . . Args > <nl> bool execute_action ( T * obj , void ( T : : * func ) ( Args . . . ) ) { <nl> - char buffer [ action_data_size ( ) ] ; <nl> - read_action_data ( buffer , sizeof ( buffer ) ) ; <nl> + size_t size = action_data_size ( ) ; <nl> + char default_buffer [ 2048 ] ; <nl> + / / using malloc / free here potentially is not exception - safe , although WASM doesn ' t support exceptions <nl> + char * buffer = size < = sizeof ( default_buffer ) ? default_buffer : ( char * ) malloc ( size ) ; <nl> + read_action_data ( buffer , size ) ; <nl> <nl> - auto args = unpack < std : : tuple < std : : decay_t < Args > . . . > > ( buffer , sizeof ( buffer ) ) ; <nl> + auto args = unpack < std : : tuple < std : : decay_t < Args > . . . > > ( buffer , size ) ; <nl> + <nl> + if ( buffer ! = default_buffer ) { <nl> + free ( buffer ) ; <nl> + } <nl> <nl> auto f2 = [ & ] ( auto . . . a ) { <nl> ( obj - > * func ) ( a . . . ) ; <nl> mmm a / contracts / eosiolib / multi_index . hpp <nl> ppp b / contracts / eosiolib / multi_index . hpp <nl> class multi_index <nl> if ( itr2 ! = _items_vector . rend ( ) ) <nl> return * itr2 - > _item ; <nl> <nl> - auto size = db_get_i64 ( itr , nullptr , 0 ) ; <nl> + size_t size = db_get_i64 ( itr , nullptr , 0 ) ; <nl> eosio_assert ( size > = 0 , " error reading iterator " ) ; <nl> - char tmp [ size ] ; <nl> - db_get_i64 ( itr , tmp , uint32_t ( size ) ) ; <nl> <nl> - datastream < const char * > ds ( tmp , uint32_t ( size ) ) ; <nl> + char default_buffer [ 2048 ] ; <nl> + / / using malloc / free here potentially is not exception - safe , although WASM doesn ' t support exceptions <nl> + char * buffer = size < = sizeof ( default_buffer ) ? default_buffer : ( char * ) malloc ( size ) ; <nl> + <nl> + db_get_i64 ( itr , buffer , size ) ; <nl> + <nl> + datastream < const char * > ds ( buffer , uint32_t ( size ) ) ; <nl> + <nl> + if ( buffer = = default_buffer ) { <nl> + free ( buffer ) ; <nl> + } <nl> <nl> auto itm = std : : make_unique < item > ( this , [ & ] ( auto & i ) { <nl> T & val = static_cast < T & > ( i ) ; <nl> class multi_index <nl> T & obj = static_cast < T & > ( i ) ; <nl> constructor ( obj ) ; <nl> <nl> - char tmp [ pack_size ( obj ) ] ; <nl> - datastream < char * > ds ( tmp , sizeof ( tmp ) ) ; <nl> + size_t size = pack_size ( obj ) ; <nl> + <nl> + char default_buffer [ 2048 ] ; <nl> + / / using malloc / free here potentially is not exception - safe , although WASM doesn ' t support exceptions <nl> + char * buffer = size < = sizeof ( default_buffer ) ? default_buffer : ( char * ) malloc ( size ) ; <nl> + <nl> + datastream < char * > ds ( buffer , size ) ; <nl> ds < < obj ; <nl> <nl> auto pk = obj . primary_key ( ) ; <nl> <nl> - i . __primary_itr = db_store_i64 ( _scope , TableName , payer , pk , tmp , sizeof ( tmp ) ) ; <nl> + i . __primary_itr = db_store_i64 ( _scope , TableName , payer , pk , buffer , size ) ; <nl> + <nl> + if ( buffer = = default_buffer ) { <nl> + free ( buffer ) ; <nl> + } <nl> <nl> if ( pk > = _next_primary_key ) <nl> _next_primary_key = ( pk > = no_available_primary_key ) ? no_available_primary_key : ( pk + 1 ) ; <nl> class multi_index <nl> <nl> eosio_assert ( pk = = obj . primary_key ( ) , " updater cannot change primary key when modifying an object " ) ; <nl> <nl> - char tmp [ pack_size ( obj ) ] ; <nl> - datastream < char * > ds ( tmp , sizeof ( tmp ) ) ; <nl> + size_t size = pack_size ( obj ) ; <nl> + char default_buffer [ 2048 ] ; <nl> + / / using malloc / free here potentially is not exception - safe , although WASM doesn ' t support exceptions <nl> + char * buffer = size < = sizeof ( default_buffer ) ? default_buffer : ( char * ) malloc ( size ) ; <nl> + <nl> + datastream < char * > ds ( buffer , size ) ; <nl> ds < < obj ; <nl> <nl> - db_update_i64 ( objitem . __primary_itr , payer , tmp , sizeof ( tmp ) ) ; <nl> + db_update_i64 ( objitem . __primary_itr , payer , buffer , size ) ; <nl> + <nl> + if ( buffer = = default_buffer ) { <nl> + free ( buffer ) ; <nl> + } <nl> <nl> if ( pk > = _next_primary_key ) <nl> _next_primary_key = ( pk > = no_available_primary_key ) ? no_available_primary_key : ( pk + 1 ) ; <nl> mmm a / tests / wasm_tests / multisig_tests . cpp <nl> ppp b / tests / wasm_tests / multisig_tests . cpp <nl> <nl> # include < boost / test / unit_test . hpp > <nl> # include < eosio / testing / tester . hpp > <nl> # include < eosio / chain / contracts / abi_serializer . hpp > <nl> + # include < eosio / chain / wast_to_wasm . hpp > <nl> # include < eosio / chain_plugin / chain_plugin . hpp > <nl> <nl> # include < eosio . msig / eosio . msig . wast . hpp > <nl> # include < eosio . msig / eosio . msig . abi . hpp > <nl> <nl> + # include < exchange / exchange . wast . hpp > <nl> + # include < exchange / exchange . abi . hpp > <nl> + <nl> # include < Runtime / Runtime . h > <nl> <nl> # include < fc / variant_object . hpp > <nl> transaction eosio_msig_tester : : reqauth ( account_name from , const vector < permissi <nl> ( " region " , 1 ) <nl> ( " ref_block_num " , 2 ) <nl> ( " ref_block_prefix " , 3 ) <nl> - ( " packed_bandwidth_words " , 4 ) <nl> - ( " context_free_cpu_bandwidth " , 5 ) <nl> + ( " net_usage_words " , 4 ) <nl> + ( " kcpu_usage " , 5 ) <nl> + ( " delay_sec " , 0 ) <nl> ( " actions " , fc : : variants ( { <nl> fc : : mutable_variant_object ( ) <nl> ( " account " , name ( config : : system_account_name ) ) <nl> BOOST_FIXTURE_TEST_CASE ( propose_with_wrong_requested_auth , eosio_msig_tester ) <nl> ( " trx " , trx ) <nl> ( " requested " , vector < permission_level > { { N ( alice ) , config : : active_name } } ) <nl> ) ) ; <nl> + } FC_LOG_AND_RETHROW ( ) <nl> + <nl> <nl> + BOOST_FIXTURE_TEST_CASE ( big_transaction , eosio_msig_tester ) try { <nl> + vector < permission_level > perm = { { N ( alice ) , config : : active_name } , { N ( bob ) , config : : active_name } } ; <nl> + auto wasm = wast_to_wasm ( exchange_wast ) ; <nl> + <nl> + variant pretty_trx = fc : : mutable_variant_object ( ) <nl> + ( " expiration " , " 2020 - 01 - 01T00 : 30 " ) <nl> + ( " region " , 1 ) <nl> + ( " ref_block_num " , 2 ) <nl> + ( " ref_block_prefix " , 3 ) <nl> + ( " net_usage_words " , 4 ) <nl> + ( " kcpu_usage " , 10485760 ) <nl> + ( " delay_sec " , 0 ) <nl> + ( " actions " , fc : : variants ( { <nl> + fc : : mutable_variant_object ( ) <nl> + ( " account " , name ( config : : system_account_name ) ) <nl> + ( " name " , " setcode " ) <nl> + ( " authorization " , perm ) <nl> + ( " data " , fc : : mutable_variant_object ( ) <nl> + ( " account " , " alice " ) <nl> + ( " vmtype " , 0 ) <nl> + ( " vmversion " , 0 ) <nl> + ( " code " , bytes ( wasm . begin ( ) , wasm . end ( ) ) ) <nl> + ) <nl> + } ) <nl> + ) ; <nl> <nl> + transaction trx ; <nl> + contracts : : abi_serializer : : from_variant ( pretty_trx , trx , get_resolver ( ) ) ; <nl> + / * <nl> + trx . actions . emplace_back ( perm , <nl> + contracts : : setcode { <nl> + . account = N ( alice ) , <nl> + . vmtype = 0 , <nl> + . vmversion = 0 , <nl> + / / . code = bytes ( eosio_msig_wast , eosio_msig_wast + strlen ( eosio_msig_wast ) ) <nl> + . code = bytes ( wasm . begin ( ) , wasm . end ( ) ) <nl> + } ) ; <nl> + * / <nl> + BOOST_REQUIRE_EQUAL ( success ( ) , push_action ( N ( alice ) , N ( propose ) , mvo ( ) <nl> + ( " proposer " , " alice " ) <nl> + ( " proposal_name " , " first " ) <nl> + ( " trx " , trx ) <nl> + ( " requested " , perm ) <nl> + ) ) ; <nl> <nl> + / / approve by alice <nl> + BOOST_REQUIRE_EQUAL ( success ( ) , push_action ( N ( alice ) , N ( approve ) , mvo ( ) <nl> + ( " proposer " , " alice " ) <nl> + ( " proposal_name " , " first " ) <nl> + ( " level " , permission_level { N ( alice ) , config : : active_name } ) <nl> + ) ) ; <nl> + / / approve by bob and execute <nl> + BOOST_REQUIRE_EQUAL ( success ( ) , push_action ( N ( bob ) , N ( approve ) , mvo ( ) <nl> + ( " proposer " , " alice " ) <nl> + ( " proposal_name " , " first " ) <nl> + ( " level " , permission_level { N ( bob ) , config : : active_name } ) <nl> + ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( success ( ) , push_action ( N ( alice ) , N ( exec ) , mvo ( ) <nl> + ( " proposer " , " alice " ) <nl> + ( " proposal_name " , " first " ) <nl> + ( " executer " , " alice " ) <nl> + ) ) ; <nl> + auto traces = control - > push_deferred_transactions ( true ) ; <nl> + BOOST_CHECK_EQUAL ( 1 , traces . size ( ) ) ; <nl> } FC_LOG_AND_RETHROW ( ) <nl> <nl> BOOST_AUTO_TEST_SUITE_END ( ) <nl>
use heap for serialization / desirialization in dispatcher and multi_index in case of big objects , big transaction unit - test for multisig
EOSIO/eos
4014ed4e79722defa15707fe98189c8d5caf9266
2018-04-03T18:21:14Z