diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / include / swift / AST / Module . h <nl> ppp b / include / swift / AST / Module . h <nl> class ModuleDecl : public DeclContext , public TypeDecl { <nl> <nl> bool isClangModule ( ) const ; <nl> void addFile ( FileUnit & newFile ) ; <nl> - void removeFile ( FileUnit & existingFile ) ; <nl> <nl> / / / Creates a map from \ c # filePath strings to corresponding \ c # file <nl> / / / strings , diagnosing any conflicts . <nl> mmm a / lib / AST / Module . cpp <nl> ppp b / lib / AST / Module . cpp <nl> void ModuleDecl : : addFile ( FileUnit & newFile ) { <nl> clearLookupCache ( ) ; <nl> } <nl> <nl> - void ModuleDecl : : removeFile ( FileUnit & existingFile ) { <nl> - / / Do a reverse search ; usually the file to be deleted will be at the end . <nl> - std : : reverse_iterator < decltype ( Files ) : : iterator > I ( Files . end ( ) ) , <nl> - E ( Files . begin ( ) ) ; <nl> - I = std : : find ( I , E , & existingFile ) ; <nl> - assert ( I ! = E ) ; <nl> - <nl> - / / Adjust for the std : : reverse_iterator offset . <nl> - + + I ; <nl> - Files . erase ( I . base ( ) ) ; <nl> - clearLookupCache ( ) ; <nl> - } <nl> - <nl> # define FORWARD ( name , args ) \ <nl> for ( const FileUnit * file : getFiles ( ) ) \ <nl> file - > name args ; <nl> mmm a / lib / Frontend / Frontend . cpp <nl> ppp b / lib / Frontend / Frontend . cpp <nl> bool CompilerInstance : : loadPartialModulesAndImplicitImports ( ) { <nl> / / Parse all the partial modules first . <nl> for ( auto & PM : PartialModules ) { <nl> assert ( PM . ModuleBuffer ) ; <nl> - if ( ! SML - > loadAST ( * MainModule , SourceLoc ( ) , / * moduleInterfacePath * / " " , <nl> - std : : move ( PM . ModuleBuffer ) , std : : move ( PM . ModuleDocBuffer ) , <nl> - std : : move ( PM . ModuleSourceInfoBuffer ) , / * isFramework * / false ) ) <nl> + auto * file = <nl> + SML - > loadAST ( * MainModule , SourceLoc ( ) , / * moduleInterfacePath * / " " , <nl> + std : : move ( PM . ModuleBuffer ) , std : : move ( PM . ModuleDocBuffer ) , <nl> + std : : move ( PM . ModuleSourceInfoBuffer ) , <nl> + / * isFramework * / false ) ; <nl> + if ( file ) { <nl> + MainModule - > addFile ( * file ) ; <nl> + } else { <nl> hadLoadError = true ; <nl> + } <nl> } <nl> return hadLoadError ; <nl> } <nl> mmm a / lib / Serialization / SerializedModuleLoader . cpp <nl> ppp b / lib / Serialization / SerializedModuleLoader . cpp <nl> FileUnit * SerializedModuleLoaderBase : : loadAST ( <nl> <nl> / / We ' ve loaded the file . Now try to bring it into the AST . <nl> auto fileUnit = new ( Ctx ) SerializedASTFile ( M , * loadedModuleFile ) ; <nl> - M . addFile ( * fileUnit ) ; <nl> if ( extendedInfo . isTestable ( ) ) <nl> M . setTestingEnabled ( ) ; <nl> if ( extendedInfo . arePrivateImportsEnabled ( ) ) <nl> FileUnit * SerializedModuleLoaderBase : : loadAST ( <nl> findOverlayFiles ( diagLoc . getValueOr ( SourceLoc ( ) ) , & M , fileUnit ) ; <nl> return fileUnit ; <nl> } <nl> - <nl> - M . removeFile ( * fileUnit ) ; <nl> } <nl> <nl> / / From here on is the failure path . <nl> SerializedModuleLoaderBase : : loadModule ( SourceLoc importLoc , <nl> StringRef moduleInterfacePathStr = <nl> Ctx . AllocateCopy ( moduleInterfacePath . str ( ) ) ; <nl> <nl> - if ( ! loadAST ( * M , moduleID . Loc , moduleInterfacePathStr , <nl> - std : : move ( moduleInputBuffer ) , std : : move ( moduleDocInputBuffer ) , <nl> - std : : move ( moduleSourceInfoInputBuffer ) , isFramework ) ) { <nl> + auto * file = <nl> + loadAST ( * M , moduleID . Loc , moduleInterfacePathStr , <nl> + std : : move ( moduleInputBuffer ) , std : : move ( moduleDocInputBuffer ) , <nl> + std : : move ( moduleSourceInfoInputBuffer ) , isFramework ) ; <nl> + if ( file ) { <nl> + M - > addFile ( * file ) ; <nl> + } else { <nl> M - > setFailedToLoad ( ) ; <nl> } <nl> - <nl> return M ; <nl> } <nl> <nl> MemoryBufferSerializedModuleLoader : : loadModule ( SourceLoc importLoc , <nl> auto * M = ModuleDecl : : create ( moduleID . Item , Ctx ) ; <nl> SWIFT_DEFER { M - > setHasResolvedImports ( ) ; } ; <nl> <nl> - if ( ! loadAST ( * M , moduleID . Loc , / * moduleInterfacePath * / " " , <nl> - std : : move ( moduleInputBuffer ) , { } , { } , isFramework ) ) { <nl> + auto * file = loadAST ( * M , moduleID . Loc , / * moduleInterfacePath * / " " , <nl> + std : : move ( moduleInputBuffer ) , { } , { } , isFramework ) ; <nl> + if ( ! file ) <nl> return nullptr ; <nl> - } <nl> <nl> + M - > addFile ( * file ) ; <nl> Ctx . LoadedModules [ moduleID . Item ] = M ; <nl> return M ; <nl> } <nl> mmm a / tools / SourceKit / lib / SwiftLang / SwiftIndexing . cpp <nl> ppp b / tools / SourceKit / lib / SwiftLang / SwiftIndexing . cpp <nl> static void indexModule ( llvm : : MemoryBuffer * Input , <nl> return ; <nl> } <nl> <nl> + Mod - > addFile ( * FUnit ) ; <nl> Mod - > setHasResolvedImports ( ) ; <nl> } <nl> <nl> | [ Serialization ] Delay adding ModuleFiles until they ' re loaded | apple/swift | 6477cc68ebbb522057097dd56733e8e81f783f61 | 2020-05-28T19:09:10Z |
mmm a / code / online_challenges / src / project_euler / problem_005 / problem_005 . java <nl> ppp b / code / online_challenges / src / project_euler / problem_005 / problem_005 . java <nl> <nl> public class Problem005 { <nl> - public static boolean isDivisible ( long number ) { <nl> - for ( long i = 1 ; i < = 20 ; + + i ) { <nl> + public static boolean isDivisible ( int number ) { <nl> + for ( int i = 1 ; i < = 20 ; + + i ) { <nl> if ( number % i ! = 0 ) <nl> return false ; <nl> } <nl> return true ; <nl> } <nl> public static void main ( String [ ] args ) { <nl> - long number = 1 ; <nl> + int number = 1 ; <nl> while ( ! isDivisible ( number ) ) { <nl> + + number ; <nl> } <nl> | Update problem_005 . java | OpenGenus/cosmos | 3eae3794e34aeb321f35376342cc3bde35342fa1 | 2018-03-06T15:36:21Z |
mmm a / test / Serialization / load - target - normalization . swift <nl> ppp b / test / Serialization / load - target - normalization . swift <nl> <nl> / / RUN : touch % t / ForeignModule . swiftmodule / garbage - garbage - garbage . swiftmodule <nl> <nl> / / SR - 12363 : This test crashes on master - next . <nl> - / / XFAIL : * <nl> + / / XFAIL : asserts <nl> <nl> / / Test format : We try to import ForeignModule with architectures besides <nl> / / garbage - garbage - garbage and check the target triple listed in the error <nl> | Merge pull request from nathawes / restrict - xfail - to - asserts - builds | apple/swift | 24ad1425c6bf26e05c88a61d3d9133cd12e2fa71 | 2020-08-20T17:28:53Z |
mmm a / lib / SILOptimizer / Transforms / SimplifyCFG . cpp <nl> ppp b / lib / SILOptimizer / Transforms / SimplifyCFG . cpp <nl> static SILValue getTerminatorCondition ( TermInst * Term ) { <nl> <nl> / / / Is this basic block jump threadable . <nl> static bool isThreadableBlock ( SILBasicBlock * BB , <nl> - SmallPtrSet < SILBasicBlock * , 32 > & LoopHeaders ) { <nl> + SmallPtrSetImpl < SILBasicBlock * > & LoopHeaders ) { <nl> if ( isa < ReturnInst > ( BB - > getTerminator ( ) ) ) <nl> return false ; <nl> <nl> static SILValue createValueForEdge ( SILInstruction * UserInst , <nl> / / / of the operand of ' DominatingBB ' s terminator . <nl> static bool tryDominatorBasedSimplifications ( <nl> SILBasicBlock * DominatingBB , DominanceInfo * DT , <nl> - SmallPtrSet < SILBasicBlock * , 32 > & LoopHeaders , <nl> + SmallPtrSetImpl < SILBasicBlock * > & LoopHeaders , <nl> SmallVectorImpl < ThreadInfo > & JumpThreadableEdges , <nl> llvm : : DenseSet < std : : pair < SILBasicBlock * , SILBasicBlock * > > <nl> & ThreadedEdgeSet , <nl> bool SimplifyCFG : : simplifyAfterDroppingPredecessor ( SILBasicBlock * BB ) { <nl> <nl> static NullablePtr < EnumElementDecl > <nl> getEnumCaseRecursive ( SILValue Val , SILBasicBlock * UsedInBB , int RecursionDepth , <nl> - llvm : : SmallPtrSet < SILArgument * , 8 > HandledArgs ) { <nl> + llvm : : SmallPtrSetImpl < SILArgument * > & HandledArgs ) { <nl> / / Limit the number of recursions . This is an easy way to cope with cycles <nl> / / in the SSA graph . <nl> if ( RecursionDepth > 3 ) <nl> | Merge pull request from gottesmm / pr - 862698ff9224afe2ccafc3fa43c3016dd857c75d | apple/swift | 53c065a25b70a48d29ca96afb3e5f2a61c9496e2 | 2018-05-01T00:05:02Z |
mmm a / src / arm / code - stubs - arm . cc <nl> ppp b / src / arm / code - stubs - arm . cc <nl> void InternalArrayNArgumentsConstructorStub : : InitializeInterfaceDescriptor ( <nl> } <nl> <nl> <nl> - void UnaryOpStub : : InitializeInterfaceDescriptor ( <nl> - Isolate * isolate , <nl> - CodeStubInterfaceDescriptor * descriptor ) { <nl> - static Register registers [ ] = { r0 } ; <nl> - descriptor - > register_param_count_ = 1 ; <nl> - descriptor - > register_params_ = registers ; <nl> - descriptor - > deoptimization_handler_ = <nl> - FUNCTION_ADDR ( UnaryOpIC_Miss ) ; <nl> - } <nl> - <nl> - <nl> # define __ ACCESS_MASM ( masm ) <nl> <nl> - <nl> static void EmitIdenticalObjectComparison ( MacroAssembler * masm , <nl> Label * slow , <nl> Condition cond ) ; <nl> void StoreBufferOverflowStub : : Generate ( MacroAssembler * masm ) { <nl> } <nl> <nl> <nl> + void UnaryOpStub : : PrintName ( StringStream * stream ) { <nl> + const char * op_name = Token : : Name ( op_ ) ; <nl> + const char * overwrite_name = NULL ; / / Make g + + happy . <nl> + switch ( mode_ ) { <nl> + case UNARY_NO_OVERWRITE : overwrite_name = " Alloc " ; break ; <nl> + case UNARY_OVERWRITE : overwrite_name = " Overwrite " ; break ; <nl> + } <nl> + stream - > Add ( " UnaryOpStub_ % s_ % s_ % s " , <nl> + op_name , <nl> + overwrite_name , <nl> + UnaryOpIC : : GetName ( operand_type_ ) ) ; <nl> + } <nl> + <nl> + <nl> + / / TODO ( svenpanne ) : Use virtual functions instead of switch . <nl> + void UnaryOpStub : : Generate ( MacroAssembler * masm ) { <nl> + switch ( operand_type_ ) { <nl> + case UnaryOpIC : : UNINITIALIZED : <nl> + GenerateTypeTransition ( masm ) ; <nl> + break ; <nl> + case UnaryOpIC : : SMI : <nl> + GenerateSmiStub ( masm ) ; <nl> + break ; <nl> + case UnaryOpIC : : NUMBER : <nl> + GenerateNumberStub ( masm ) ; <nl> + break ; <nl> + case UnaryOpIC : : GENERIC : <nl> + GenerateGenericStub ( masm ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateTypeTransition ( MacroAssembler * masm ) { <nl> + __ mov ( r3 , Operand ( r0 ) ) ; / / the operand <nl> + __ mov ( r2 , Operand ( Smi : : FromInt ( op_ ) ) ) ; <nl> + __ mov ( r1 , Operand ( Smi : : FromInt ( mode_ ) ) ) ; <nl> + __ mov ( r0 , Operand ( Smi : : FromInt ( operand_type_ ) ) ) ; <nl> + __ Push ( r3 , r2 , r1 , r0 ) ; <nl> + <nl> + __ TailCallExternalReference ( <nl> + ExternalReference ( IC_Utility ( IC : : kUnaryOp_Patch ) , masm - > isolate ( ) ) , 4 , 1 ) ; <nl> + } <nl> + <nl> + <nl> + / / TODO ( svenpanne ) : Use virtual functions instead of switch . <nl> + void UnaryOpStub : : GenerateSmiStub ( MacroAssembler * masm ) { <nl> + switch ( op_ ) { <nl> + case Token : : SUB : <nl> + GenerateSmiStubSub ( masm ) ; <nl> + break ; <nl> + case Token : : BIT_NOT : <nl> + GenerateSmiStubBitNot ( masm ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateSmiStubSub ( MacroAssembler * masm ) { <nl> + Label non_smi , slow ; <nl> + GenerateSmiCodeSub ( masm , & non_smi , & slow ) ; <nl> + __ bind ( & non_smi ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateTypeTransition ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateSmiStubBitNot ( MacroAssembler * masm ) { <nl> + Label non_smi ; <nl> + GenerateSmiCodeBitNot ( masm , & non_smi ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateTypeTransition ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateSmiCodeSub ( MacroAssembler * masm , <nl> + Label * non_smi , <nl> + Label * slow ) { <nl> + __ JumpIfNotSmi ( r0 , non_smi ) ; <nl> + <nl> + / / The result of negating zero or the smallest negative smi is not a smi . <nl> + __ bic ( ip , r0 , Operand ( 0x80000000 ) , SetCC ) ; <nl> + __ b ( eq , slow ) ; <nl> + <nl> + / / Return ' 0 - value ' . <nl> + __ rsb ( r0 , r0 , Operand : : Zero ( ) ) ; <nl> + __ Ret ( ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateSmiCodeBitNot ( MacroAssembler * masm , <nl> + Label * non_smi ) { <nl> + __ JumpIfNotSmi ( r0 , non_smi ) ; <nl> + <nl> + / / Flip bits and revert inverted smi - tag . <nl> + __ mvn ( r0 , Operand ( r0 ) ) ; <nl> + __ bic ( r0 , r0 , Operand ( kSmiTagMask ) ) ; <nl> + __ Ret ( ) ; <nl> + } <nl> + <nl> + <nl> + / / TODO ( svenpanne ) : Use virtual functions instead of switch . <nl> + void UnaryOpStub : : GenerateNumberStub ( MacroAssembler * masm ) { <nl> + switch ( op_ ) { <nl> + case Token : : SUB : <nl> + GenerateNumberStubSub ( masm ) ; <nl> + break ; <nl> + case Token : : BIT_NOT : <nl> + GenerateNumberStubBitNot ( masm ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateNumberStubSub ( MacroAssembler * masm ) { <nl> + Label non_smi , slow , call_builtin ; <nl> + GenerateSmiCodeSub ( masm , & non_smi , & call_builtin ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateHeapNumberCodeSub ( masm , & slow ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateTypeTransition ( masm ) ; <nl> + __ bind ( & call_builtin ) ; <nl> + GenerateGenericCodeFallback ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateNumberStubBitNot ( MacroAssembler * masm ) { <nl> + Label non_smi , slow ; <nl> + GenerateSmiCodeBitNot ( masm , & non_smi ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateHeapNumberCodeBitNot ( masm , & slow ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateTypeTransition ( masm ) ; <nl> + } <nl> + <nl> + void UnaryOpStub : : GenerateHeapNumberCodeSub ( MacroAssembler * masm , <nl> + Label * slow ) { <nl> + EmitCheckForHeapNumber ( masm , r0 , r1 , r6 , slow ) ; <nl> + / / r0 is a heap number . Get a new heap number in r1 . <nl> + if ( mode_ = = UNARY_OVERWRITE ) { <nl> + __ ldr ( r2 , FieldMemOperand ( r0 , HeapNumber : : kExponentOffset ) ) ; <nl> + __ eor ( r2 , r2 , Operand ( HeapNumber : : kSignMask ) ) ; / / Flip sign . <nl> + __ str ( r2 , FieldMemOperand ( r0 , HeapNumber : : kExponentOffset ) ) ; <nl> + } else { <nl> + Label slow_allocate_heapnumber , heapnumber_allocated ; <nl> + __ AllocateHeapNumber ( r1 , r2 , r3 , r6 , & slow_allocate_heapnumber ) ; <nl> + __ jmp ( & heapnumber_allocated ) ; <nl> + <nl> + __ bind ( & slow_allocate_heapnumber ) ; <nl> + { <nl> + FrameScope scope ( masm , StackFrame : : INTERNAL ) ; <nl> + __ push ( r0 ) ; <nl> + __ CallRuntime ( Runtime : : kNumberAlloc , 0 ) ; <nl> + __ mov ( r1 , Operand ( r0 ) ) ; <nl> + __ pop ( r0 ) ; <nl> + } <nl> + <nl> + __ bind ( & heapnumber_allocated ) ; <nl> + __ ldr ( r3 , FieldMemOperand ( r0 , HeapNumber : : kMantissaOffset ) ) ; <nl> + __ ldr ( r2 , FieldMemOperand ( r0 , HeapNumber : : kExponentOffset ) ) ; <nl> + __ str ( r3 , FieldMemOperand ( r1 , HeapNumber : : kMantissaOffset ) ) ; <nl> + __ eor ( r2 , r2 , Operand ( HeapNumber : : kSignMask ) ) ; / / Flip sign . <nl> + __ str ( r2 , FieldMemOperand ( r1 , HeapNumber : : kExponentOffset ) ) ; <nl> + __ mov ( r0 , Operand ( r1 ) ) ; <nl> + } <nl> + __ Ret ( ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateHeapNumberCodeBitNot ( MacroAssembler * masm , <nl> + Label * slow ) { <nl> + EmitCheckForHeapNumber ( masm , r0 , r1 , r6 , slow ) ; <nl> + <nl> + / / Convert the heap number in r0 to an untagged integer in r1 . <nl> + __ vldr ( d0 , FieldMemOperand ( r0 , HeapNumber : : kValueOffset ) ) ; <nl> + __ ECMAToInt32 ( r1 , d0 , r2 , r3 , r4 , d1 ) ; <nl> + <nl> + / / Do the bitwise operation and check if the result fits in a smi . <nl> + Label try_float ; <nl> + __ mvn ( r1 , Operand ( r1 ) ) ; <nl> + __ cmn ( r1 , Operand ( 0x40000000 ) ) ; <nl> + __ b ( mi , & try_float ) ; <nl> + <nl> + / / Tag the result as a smi and we ' re done . <nl> + __ SmiTag ( r0 , r1 ) ; <nl> + __ Ret ( ) ; <nl> + <nl> + / / Try to store the result in a heap number . <nl> + __ bind ( & try_float ) ; <nl> + if ( mode_ = = UNARY_NO_OVERWRITE ) { <nl> + Label slow_allocate_heapnumber , heapnumber_allocated ; <nl> + __ AllocateHeapNumber ( r0 , r3 , r4 , r6 , & slow_allocate_heapnumber ) ; <nl> + __ jmp ( & heapnumber_allocated ) ; <nl> + <nl> + __ bind ( & slow_allocate_heapnumber ) ; <nl> + { <nl> + FrameScope scope ( masm , StackFrame : : INTERNAL ) ; <nl> + / / Push the lower bit of the result ( left shifted to look like a smi ) . <nl> + __ mov ( r2 , Operand ( r1 , LSL , 31 ) ) ; <nl> + / / Push the 31 high bits ( bit 0 cleared to look like a smi ) . <nl> + __ bic ( r1 , r1 , Operand ( 1 ) ) ; <nl> + __ Push ( r2 , r1 ) ; <nl> + __ CallRuntime ( Runtime : : kNumberAlloc , 0 ) ; <nl> + __ Pop ( r2 , r1 ) ; / / Restore the result . <nl> + __ orr ( r1 , r1 , Operand ( r2 , LSR , 31 ) ) ; <nl> + } <nl> + __ bind ( & heapnumber_allocated ) ; <nl> + } <nl> + <nl> + __ vmov ( s0 , r1 ) ; <nl> + __ vcvt_f64_s32 ( d0 , s0 ) ; <nl> + __ vstr ( d0 , FieldMemOperand ( r0 , HeapNumber : : kValueOffset ) ) ; <nl> + __ Ret ( ) ; <nl> + } <nl> + <nl> + <nl> + / / TODO ( svenpanne ) : Use virtual functions instead of switch . <nl> + void UnaryOpStub : : GenerateGenericStub ( MacroAssembler * masm ) { <nl> + switch ( op_ ) { <nl> + case Token : : SUB : <nl> + GenerateGenericStubSub ( masm ) ; <nl> + break ; <nl> + case Token : : BIT_NOT : <nl> + GenerateGenericStubBitNot ( masm ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateGenericStubSub ( MacroAssembler * masm ) { <nl> + Label non_smi , slow ; <nl> + GenerateSmiCodeSub ( masm , & non_smi , & slow ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateHeapNumberCodeSub ( masm , & slow ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateGenericCodeFallback ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateGenericStubBitNot ( MacroAssembler * masm ) { <nl> + Label non_smi , slow ; <nl> + GenerateSmiCodeBitNot ( masm , & non_smi ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateHeapNumberCodeBitNot ( masm , & slow ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateGenericCodeFallback ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateGenericCodeFallback ( MacroAssembler * masm ) { <nl> + / / Handle the slow case by jumping to the JavaScript builtin . <nl> + __ push ( r0 ) ; <nl> + switch ( op_ ) { <nl> + case Token : : SUB : <nl> + __ InvokeBuiltin ( Builtins : : UNARY_MINUS , JUMP_FUNCTION ) ; <nl> + break ; <nl> + case Token : : BIT_NOT : <nl> + __ InvokeBuiltin ( Builtins : : BIT_NOT , JUMP_FUNCTION ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> / / Generates code to call a C function to do a double operation . <nl> / / This code never falls through , but returns with a heap number containing <nl> / / the result in r0 . <nl> mmm a / src / arm / code - stubs - arm . h <nl> ppp b / src / arm / code - stubs - arm . h <nl> class StoreBufferOverflowStub : public PlatformCodeStub { <nl> } ; <nl> <nl> <nl> + class UnaryOpStub : public PlatformCodeStub { <nl> + public : <nl> + UnaryOpStub ( Token : : Value op , <nl> + UnaryOverwriteMode mode , <nl> + UnaryOpIC : : TypeInfo operand_type = UnaryOpIC : : UNINITIALIZED ) <nl> + : op_ ( op ) , <nl> + mode_ ( mode ) , <nl> + operand_type_ ( operand_type ) { <nl> + } <nl> + <nl> + private : <nl> + Token : : Value op_ ; <nl> + UnaryOverwriteMode mode_ ; <nl> + <nl> + / / Operand type information determined at runtime . <nl> + UnaryOpIC : : TypeInfo operand_type_ ; <nl> + <nl> + virtual void PrintName ( StringStream * stream ) ; <nl> + <nl> + class ModeBits : public BitField < UnaryOverwriteMode , 0 , 1 > { } ; <nl> + class OpBits : public BitField < Token : : Value , 1 , 7 > { } ; <nl> + class OperandTypeInfoBits : public BitField < UnaryOpIC : : TypeInfo , 8 , 3 > { } ; <nl> + <nl> + Major MajorKey ( ) { return UnaryOp ; } <nl> + int MinorKey ( ) { <nl> + return ModeBits : : encode ( mode_ ) <nl> + | OpBits : : encode ( op_ ) <nl> + | OperandTypeInfoBits : : encode ( operand_type_ ) ; <nl> + } <nl> + <nl> + / / Note : A lot of the helper functions below will vanish when we use virtual <nl> + / / function instead of switch more often . <nl> + void Generate ( MacroAssembler * masm ) ; <nl> + <nl> + void GenerateTypeTransition ( MacroAssembler * masm ) ; <nl> + <nl> + void GenerateSmiStub ( MacroAssembler * masm ) ; <nl> + void GenerateSmiStubSub ( MacroAssembler * masm ) ; <nl> + void GenerateSmiStubBitNot ( MacroAssembler * masm ) ; <nl> + void GenerateSmiCodeSub ( MacroAssembler * masm , Label * non_smi , Label * slow ) ; <nl> + void GenerateSmiCodeBitNot ( MacroAssembler * masm , Label * slow ) ; <nl> + <nl> + void GenerateNumberStub ( MacroAssembler * masm ) ; <nl> + void GenerateNumberStubSub ( MacroAssembler * masm ) ; <nl> + void GenerateNumberStubBitNot ( MacroAssembler * masm ) ; <nl> + void GenerateHeapNumberCodeSub ( MacroAssembler * masm , Label * slow ) ; <nl> + void GenerateHeapNumberCodeBitNot ( MacroAssembler * masm , Label * slow ) ; <nl> + <nl> + void GenerateGenericStub ( MacroAssembler * masm ) ; <nl> + void GenerateGenericStubSub ( MacroAssembler * masm ) ; <nl> + void GenerateGenericStubBitNot ( MacroAssembler * masm ) ; <nl> + void GenerateGenericCodeFallback ( MacroAssembler * masm ) ; <nl> + <nl> + virtual Code : : Kind GetCodeKind ( ) const { return Code : : UNARY_OP_IC ; } <nl> + <nl> + virtual InlineCacheState GetICState ( ) { <nl> + return UnaryOpIC : : ToState ( operand_type_ ) ; <nl> + } <nl> + <nl> + virtual void FinishCode ( Handle < Code > code ) { <nl> + code - > set_unary_op_type ( operand_type_ ) ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> class StringHelper : public AllStatic { <nl> public : <nl> / / Generate code for copying characters using a simple loop . This should only <nl> mmm a / src / arm / full - codegen - arm . cc <nl> ppp b / src / arm / full - codegen - arm . cc <nl> void FullCodeGenerator : : EmitUnaryOperation ( UnaryOperation * expr , <nl> const char * comment ) { <nl> / / TODO ( svenpanne ) : Allowing format strings in Comment would be nice here . . . <nl> Comment cmt ( masm_ , comment ) ; <nl> - UnaryOpStub stub ( expr - > op ( ) ) ; <nl> + bool can_overwrite = expr - > expression ( ) - > ResultOverwriteAllowed ( ) ; <nl> + UnaryOverwriteMode overwrite = <nl> + can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE ; <nl> + UnaryOpStub stub ( expr - > op ( ) , overwrite ) ; <nl> / / UnaryOpStub expects the argument to be in the <nl> / / accumulator register r0 . <nl> VisitForAccumulatorValue ( expr - > expression ( ) ) ; <nl> mmm a / src / arm / lithium - arm . cc <nl> ppp b / src / arm / lithium - arm . cc <nl> LInstruction * LChunkBuilder : : DoCheckHeapObject ( HCheckHeapObject * instr ) { <nl> } <nl> <nl> <nl> - LInstruction * LChunkBuilder : : DoCheckSmi ( HCheckSmi * instr ) { <nl> - LOperand * value = UseRegisterAtStart ( instr - > value ( ) ) ; <nl> - return AssignEnvironment ( new ( zone ( ) ) LCheckSmi ( value ) ) ; <nl> - } <nl> - <nl> - <nl> - LInstruction * LChunkBuilder : : DoIsNumberAndBranch ( HIsNumberAndBranch * instr ) { <nl> - return new ( zone ( ) ) <nl> - LIsNumberAndBranch ( UseRegisterOrConstantAtStart ( instr - > value ( ) ) ) ; <nl> - } <nl> - <nl> - <nl> LInstruction * LChunkBuilder : : DoCheckInstanceType ( HCheckInstanceType * instr ) { <nl> LOperand * value = UseRegisterAtStart ( instr - > value ( ) ) ; <nl> LInstruction * result = new ( zone ( ) ) LCheckInstanceType ( value ) ; <nl> mmm a / src / arm / lithium - arm . h <nl> ppp b / src / arm / lithium - arm . h <nl> class LCodeGen ; <nl> V ( IsConstructCallAndBranch ) \ <nl> V ( IsObjectAndBranch ) \ <nl> V ( IsStringAndBranch ) \ <nl> - V ( IsNumberAndBranch ) \ <nl> V ( IsSmiAndBranch ) \ <nl> V ( IsUndetectableAndBranch ) \ <nl> V ( Label ) \ <nl> class LIsObjectAndBranch : public LControlInstruction < 1 , 1 > { <nl> } ; <nl> <nl> <nl> - class LIsNumberAndBranch : public LControlInstruction < 1 , 0 > { <nl> - public : <nl> - explicit LIsNumberAndBranch ( LOperand * value ) { <nl> - inputs_ [ 0 ] = value ; <nl> - } <nl> - <nl> - LOperand * value ( ) { return inputs_ [ 0 ] ; } <nl> - <nl> - DECLARE_CONCRETE_INSTRUCTION ( IsNumberAndBranch , " is - number - and - branch " ) <nl> - DECLARE_HYDROGEN_ACCESSOR ( IsNumberAndBranch ) <nl> - } ; <nl> - <nl> - <nl> class LIsStringAndBranch : public LControlInstruction < 1 , 1 > { <nl> public : <nl> LIsStringAndBranch ( LOperand * value , LOperand * temp ) { <nl> mmm a / src / arm / lithium - codegen - arm . cc <nl> ppp b / src / arm / lithium - codegen - arm . cc <nl> int LCodeGen : : GetNextEmittedBlock ( ) const { <nl> <nl> template < class InstrType > <nl> void LCodeGen : : EmitBranch ( InstrType instr , Condition cc ) { <nl> - int left_block = instr - > TrueDestination ( chunk_ ) ; <nl> int right_block = instr - > FalseDestination ( chunk_ ) ; <nl> + int left_block = instr - > TrueDestination ( chunk_ ) ; <nl> <nl> int next_block = GetNextEmittedBlock ( ) ; <nl> <nl> - if ( right_block = = left_block | | cc = = al ) { <nl> + if ( right_block = = left_block ) { <nl> EmitGoto ( left_block ) ; <nl> } else if ( left_block = = next_block ) { <nl> __ b ( NegateCondition ( cc ) , chunk_ - > GetAssemblyLabel ( right_block ) ) ; <nl> void LCodeGen : : DoDebugBreak ( LDebugBreak * instr ) { <nl> } <nl> <nl> <nl> - void LCodeGen : : DoIsNumberAndBranch ( LIsNumberAndBranch * instr ) { <nl> - Representation r = instr - > hydrogen ( ) - > value ( ) - > representation ( ) ; <nl> - if ( r . IsSmiOrInteger32 ( ) | | r . IsDouble ( ) ) { <nl> - EmitBranch ( instr , al ) ; <nl> - } else { <nl> - ASSERT ( r . IsTagged ( ) ) ; <nl> - Register reg = ToRegister ( instr - > value ( ) ) ; <nl> - HType type = instr - > hydrogen ( ) - > value ( ) - > type ( ) ; <nl> - if ( type . IsTaggedNumber ( ) ) { <nl> - EmitBranch ( instr , al ) ; <nl> - } <nl> - __ JumpIfSmi ( reg , instr - > TrueLabel ( chunk_ ) ) ; <nl> - __ ldr ( scratch0 ( ) , FieldMemOperand ( reg , HeapObject : : kMapOffset ) ) ; <nl> - __ CompareRoot ( scratch0 ( ) , Heap : : kHeapNumberMapRootIndex ) ; <nl> - EmitBranch ( instr , eq ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> void LCodeGen : : DoBranch ( LBranch * instr ) { <nl> Representation r = instr - > hydrogen ( ) - > value ( ) - > representation ( ) ; <nl> if ( r . IsInteger32 ( ) | | r . IsSmi ( ) ) { <nl> mmm a / src / code - stubs - hydrogen . cc <nl> ppp b / src / code - stubs - hydrogen . cc <nl> Handle < Code > CompareNilICStub : : GenerateCode ( ) { <nl> } <nl> <nl> <nl> - template < > <nl> - HValue * CodeStubGraphBuilder < UnaryOpStub > : : BuildCodeInitializedStub ( ) { <nl> - UnaryOpStub * stub = casted_stub ( ) ; <nl> - Handle < Type > type = stub - > GetType ( graph ( ) - > isolate ( ) ) ; <nl> - HValue * input = GetParameter ( 0 ) ; <nl> - <nl> - / / Prevent unwanted HChange being inserted to ensure that the stub <nl> - / / deopts on newly encountered types . <nl> - if ( ! type - > Maybe ( Type : : Double ( ) ) ) { <nl> - input = AddInstruction ( new ( zone ( ) ) <nl> - HForceRepresentation ( input , Representation : : Smi ( ) ) ) ; <nl> - } <nl> - <nl> - if ( ! type - > Is ( Type : : Number ( ) ) ) { <nl> - / / If we expect to see other things than Numbers , we will create a generic <nl> - / / stub , which handles all numbers and calls into the runtime for the rest . <nl> - IfBuilder if_number ( this ) ; <nl> - if_number . If < HIsNumberAndBranch > ( input ) ; <nl> - if_number . Then ( ) ; <nl> - HInstruction * res = BuildUnaryMathOp ( input , type , stub - > operation ( ) ) ; <nl> - if_number . Return ( AddInstruction ( res ) ) ; <nl> - if_number . Else ( ) ; <nl> - AddInstruction ( new ( zone ( ) ) HPushArgument ( GetParameter ( 0 ) ) ) ; <nl> - if_number . Return ( AddInstruction ( new ( zone ( ) ) HCallConstantFunction ( <nl> - stub - > ToJSFunction ( isolate ( ) ) , 1 ) ) ) ; <nl> - if_number . End ( ) ; <nl> - return graph ( ) - > GetConstantUndefined ( ) ; <nl> - } <nl> - <nl> - return AddInstruction ( BuildUnaryMathOp ( input , type , stub - > operation ( ) ) ) ; <nl> - } <nl> - <nl> - <nl> - Handle < Code > UnaryOpStub : : GenerateCode ( ) { <nl> - return DoGenerateCode ( this ) ; <nl> - } <nl> - <nl> - <nl> template < > <nl> HValue * CodeStubGraphBuilder < ToBooleanStub > : : BuildCodeInitializedStub ( ) { <nl> ToBooleanStub * stub = casted_stub ( ) ; <nl> mmm a / src / code - stubs . cc <nl> ppp b / src / code - stubs . cc <nl> const char * CodeStub : : MajorName ( CodeStub : : Major major_key , <nl> } <nl> } <nl> <nl> - void CodeStub : : PrintBaseName ( StringStream * stream ) { <nl> - stream - > Add ( " % s " , MajorName ( MajorKey ( ) , false ) ) ; <nl> - } <nl> - <nl> <nl> void CodeStub : : PrintName ( StringStream * stream ) { <nl> - PrintBaseName ( stream ) ; <nl> - PrintState ( stream ) ; <nl> - } <nl> - <nl> - <nl> - Builtins : : JavaScript UnaryOpStub : : ToJSBuiltin ( ) { <nl> - switch ( operation_ ) { <nl> - default : <nl> - UNREACHABLE ( ) ; <nl> - case Token : : SUB : <nl> - return Builtins : : UNARY_MINUS ; <nl> - case Token : : BIT_NOT : <nl> - return Builtins : : BIT_NOT ; <nl> - } <nl> - } <nl> - <nl> - <nl> - Handle < JSFunction > UnaryOpStub : : ToJSFunction ( Isolate * isolate ) { <nl> - Handle < JSBuiltinsObject > builtins ( isolate - > js_builtins_object ( ) ) ; <nl> - Object * builtin = builtins - > javascript_builtin ( ToJSBuiltin ( ) ) ; <nl> - return Handle < JSFunction > ( JSFunction : : cast ( builtin ) , isolate ) ; <nl> - } <nl> - <nl> - <nl> - MaybeObject * UnaryOpStub : : Result ( Handle < Object > object , Isolate * isolate ) { <nl> - Handle < JSFunction > builtin_function = ToJSFunction ( isolate ) ; <nl> - bool caught_exception ; <nl> - Handle < Object > result = Execution : : Call ( builtin_function , object , <nl> - 0 , NULL , & caught_exception ) ; <nl> - if ( caught_exception ) { <nl> - return Failure : : Exception ( ) ; <nl> - } <nl> - return * result ; <nl> - } <nl> - <nl> - <nl> - void UnaryOpStub : : UpdateStatus ( Handle < Object > object ) { <nl> - State old_state ( state_ ) ; <nl> - if ( object - > IsSmi ( ) ) { <nl> - state_ . Add ( SMI ) ; <nl> - if ( operation_ = = Token : : SUB & & * object = = 0 ) { <nl> - / / The result ( - 0 ) has to be represented as double . <nl> - state_ . Add ( HEAP_NUMBER ) ; <nl> - } <nl> - } else if ( object - > IsHeapNumber ( ) ) { <nl> - state_ . Add ( HEAP_NUMBER ) ; <nl> - } else { <nl> - state_ . Add ( GENERIC ) ; <nl> - } <nl> - TraceTransition ( old_state , state_ ) ; <nl> - } <nl> - <nl> - <nl> - Handle < Type > UnaryOpStub : : GetType ( Isolate * isolate ) { <nl> - if ( state_ . Contains ( GENERIC ) ) { <nl> - return handle ( Type : : Any ( ) , isolate ) ; <nl> - } <nl> - Handle < Type > type = handle ( Type : : None ( ) , isolate ) ; <nl> - if ( state_ . Contains ( SMI ) ) { <nl> - type = handle ( <nl> - Type : : Union ( type , handle ( Type : : Smi ( ) , isolate ) ) , isolate ) ; <nl> - } <nl> - if ( state_ . Contains ( HEAP_NUMBER ) ) { <nl> - type = handle ( <nl> - Type : : Union ( type , handle ( Type : : Double ( ) , isolate ) ) , isolate ) ; <nl> - } <nl> - return type ; <nl> + stream - > Add ( " % s " , MajorName ( MajorKey ( ) , false ) ) ; <nl> } <nl> <nl> <nl> void BinaryOpStub : : GenerateCallRuntime ( MacroAssembler * masm ) { <nl> # undef __ <nl> <nl> <nl> - void UnaryOpStub : : PrintBaseName ( StringStream * stream ) { <nl> - CodeStub : : PrintBaseName ( stream ) ; <nl> - if ( operation_ = = Token : : SUB ) stream - > Add ( " Minus " ) ; <nl> - if ( operation_ = = Token : : BIT_NOT ) stream - > Add ( " Not " ) ; <nl> - } <nl> - <nl> - <nl> - void UnaryOpStub : : PrintState ( StringStream * stream ) { <nl> - state_ . Print ( stream ) ; <nl> - } <nl> - <nl> - <nl> - void UnaryOpStub : : State : : Print ( StringStream * stream ) const { <nl> - stream - > Add ( " ( " ) ; <nl> - SimpleListPrinter printer ( stream ) ; <nl> - if ( IsEmpty ( ) ) printer . Add ( " None " ) ; <nl> - if ( Contains ( GENERIC ) ) printer . Add ( " Generic " ) ; <nl> - if ( Contains ( HEAP_NUMBER ) ) printer . Add ( " HeapNumber " ) ; <nl> - if ( Contains ( SMI ) ) printer . Add ( " Smi " ) ; <nl> - stream - > Add ( " ) " ) ; <nl> - } <nl> - <nl> - <nl> void BinaryOpStub : : PrintName ( StringStream * stream ) { <nl> const char * op_name = Token : : Name ( op_ ) ; <nl> const char * overwrite_name ; <nl> void ICCompareStub : : Generate ( MacroAssembler * masm ) { <nl> } <nl> <nl> <nl> - void CompareNilICStub : : UpdateStatus ( Handle < Object > object ) { <nl> + void CompareNilICStub : : Record ( Handle < Object > object ) { <nl> ASSERT ( state_ ! = State : : Generic ( ) ) ; <nl> - State old_state ( state_ ) ; <nl> if ( object - > IsNull ( ) ) { <nl> state_ . Add ( NULL_TYPE ) ; <nl> } else if ( object - > IsUndefined ( ) ) { <nl> void CompareNilICStub : : UpdateStatus ( Handle < Object > object ) { <nl> } else { <nl> state_ . Add ( MONOMORPHIC_MAP ) ; <nl> } <nl> - TraceTransition ( old_state , state_ ) ; <nl> } <nl> <nl> <nl> - template < class StateType > <nl> - void HydrogenCodeStub : : TraceTransition ( StateType from , StateType to ) { <nl> + void CompareNilICStub : : State : : TraceTransition ( State to ) const { <nl> # ifdef DEBUG <nl> if ( ! FLAG_trace_ic ) return ; <nl> char buffer [ 100 ] ; <nl> NoAllocationStringAllocator allocator ( buffer , <nl> static_cast < unsigned > ( sizeof ( buffer ) ) ) ; <nl> StringStream stream ( & allocator ) ; <nl> - stream . Add ( " [ " ) ; <nl> - PrintBaseName ( & stream ) ; <nl> - stream . Add ( " : " ) ; <nl> - from . Print ( & stream ) ; <nl> + stream . Add ( " [ CompareNilIC : " ) ; <nl> + Print ( & stream ) ; <nl> stream . Add ( " = > " ) ; <nl> to . Print ( & stream ) ; <nl> stream . Add ( " ] \ n " ) ; <nl> void HydrogenCodeStub : : TraceTransition ( StateType from , StateType to ) { <nl> # endif <nl> } <nl> <nl> - void CompareNilICStub : : PrintBaseName ( StringStream * stream ) { <nl> - CodeStub : : PrintBaseName ( stream ) ; <nl> - stream - > Add ( ( nil_value_ = = kNullValue ) ? " ( NullValue ) " : <nl> - " ( UndefinedValue ) " ) ; <nl> - } <nl> <nl> - void CompareNilICStub : : PrintState ( StringStream * stream ) { <nl> + void CompareNilICStub : : PrintName ( StringStream * stream ) { <nl> + stream - > Add ( " CompareNilICStub_ " ) ; <nl> state_ . Print ( stream ) ; <nl> + stream - > Add ( ( nil_value_ = = kNullValue ) ? " ( NullValue | " : <nl> + " ( UndefinedValue | " ) ; <nl> } <nl> <nl> <nl> void CallConstructStub : : PrintName ( StringStream * stream ) { <nl> } <nl> <nl> <nl> - bool ToBooleanStub : : UpdateStatus ( Handle < Object > object ) { <nl> + bool ToBooleanStub : : Record ( Handle < Object > object ) { <nl> Types old_types ( types_ ) ; <nl> - bool to_boolean_value = types_ . UpdateStatus ( object ) ; <nl> - TraceTransition ( old_types , types_ ) ; <nl> + bool to_boolean_value = types_ . Record ( object ) ; <nl> + old_types . TraceTransition ( types_ ) ; <nl> return to_boolean_value ; <nl> } <nl> <nl> <nl> - void ToBooleanStub : : PrintState ( StringStream * stream ) { <nl> + void ToBooleanStub : : PrintName ( StringStream * stream ) { <nl> + stream - > Add ( " ToBooleanStub_ " ) ; <nl> types_ . Print ( stream ) ; <nl> } <nl> <nl> void ToBooleanStub : : Types : : Print ( StringStream * stream ) const { <nl> } <nl> <nl> <nl> - bool ToBooleanStub : : Types : : UpdateStatus ( Handle < Object > object ) { <nl> + void ToBooleanStub : : Types : : TraceTransition ( Types to ) const { <nl> + # ifdef DEBUG <nl> + if ( ! FLAG_trace_ic ) return ; <nl> + char buffer [ 100 ] ; <nl> + NoAllocationStringAllocator allocator ( buffer , <nl> + static_cast < unsigned > ( sizeof ( buffer ) ) ) ; <nl> + StringStream stream ( & allocator ) ; <nl> + stream . Add ( " [ ToBooleanIC : " ) ; <nl> + Print ( & stream ) ; <nl> + stream . Add ( " = > " ) ; <nl> + to . Print ( & stream ) ; <nl> + stream . Add ( " ] \ n " ) ; <nl> + stream . OutputToStdOut ( ) ; <nl> + # endif <nl> + } <nl> + <nl> + <nl> + bool ToBooleanStub : : Types : : Record ( Handle < Object > object ) { <nl> if ( object - > IsUndefined ( ) ) { <nl> Add ( UNDEFINED ) ; <nl> return false ; <nl> mmm a / src / code - stubs . h <nl> ppp b / src / code - stubs . h <nl> namespace internal { <nl> <nl> / / Mode to overwrite BinaryExpression values . <nl> enum OverwriteMode { NO_OVERWRITE , OVERWRITE_LEFT , OVERWRITE_RIGHT } ; <nl> + enum UnaryOverwriteMode { UNARY_OVERWRITE , UNARY_NO_OVERWRITE } ; <nl> + <nl> <nl> / / Stub is base classes of all stubs . <nl> class CodeStub BASE_EMBEDDED { <nl> class CodeStub BASE_EMBEDDED { <nl> return - 1 ; <nl> } <nl> <nl> - virtual void PrintName ( StringStream * stream ) ; <nl> - <nl> protected : <nl> static bool CanUseFPRegisters ( ) ; <nl> <nl> class CodeStub BASE_EMBEDDED { <nl> / / a fixed ( non - moveable ) code object . <nl> virtual bool NeedsImmovableCode ( ) { return false ; } <nl> <nl> - / / Returns a name for logging / debugging purposes . <nl> - SmartArrayPointer < const char > GetName ( ) ; <nl> - virtual void PrintBaseName ( StringStream * stream ) ; <nl> - virtual void PrintState ( StringStream * stream ) { } <nl> - <nl> private : <nl> / / Perform bookkeeping required after code generation when stub code is <nl> / / initially generated . <nl> class CodeStub BASE_EMBEDDED { <nl> / / If a stub uses a special cache override this . <nl> virtual bool UseSpecialCache ( ) { return false ; } <nl> <nl> + / / Returns a name for logging / debugging purposes . <nl> + SmartArrayPointer < const char > GetName ( ) ; <nl> + virtual void PrintName ( StringStream * stream ) ; <nl> + <nl> / / Computes the key based on major and minor . <nl> uint32_t GetKey ( ) { <nl> ASSERT ( static_cast < int > ( MajorKey ( ) ) < NUMBER_OF_IDS ) ; <nl> class HydrogenCodeStub : public CodeStub { <nl> <nl> Handle < Code > GenerateLightweightMissCode ( Isolate * isolate ) ; <nl> <nl> - template < class StateType > <nl> - void TraceTransition ( StateType from , StateType to ) ; <nl> - <nl> private : <nl> class MinorKeyBits : public BitField < int , 0 , kStubMinorKeyBits - 1 > { } ; <nl> class IsMissBits : public BitField < bool , kStubMinorKeyBits - 1 , 1 > { } ; <nl> class FastNewBlockContextStub : public PlatformCodeStub { <nl> } ; <nl> <nl> <nl> - class UnaryOpStub : public HydrogenCodeStub { <nl> - public : <nl> - / / Stub without type info available - > construct uninitialized <nl> - explicit UnaryOpStub ( Token : : Value operation ) <nl> - : HydrogenCodeStub ( UNINITIALIZED ) , operation_ ( operation ) { } <nl> - explicit UnaryOpStub ( Code : : ExtraICState ic_state ) : <nl> - state_ ( StateBits : : decode ( ic_state ) ) , <nl> - operation_ ( OperatorBits : : decode ( ic_state ) ) { } <nl> - <nl> - virtual void InitializeInterfaceDescriptor ( <nl> - Isolate * isolate , <nl> - CodeStubInterfaceDescriptor * descriptor ) ; <nl> - <nl> - virtual Code : : Kind GetCodeKind ( ) const { return Code : : UNARY_OP_IC ; } <nl> - virtual InlineCacheState GetICState ( ) { <nl> - if ( state_ . Contains ( GENERIC ) ) { <nl> - return MEGAMORPHIC ; <nl> - } else if ( state_ . IsEmpty ( ) ) { <nl> - return PREMONOMORPHIC ; <nl> - } else { <nl> - return MONOMORPHIC ; <nl> - } <nl> - } <nl> - virtual Code : : ExtraICState GetExtraICState ( ) { <nl> - return OperatorBits : : encode ( operation_ ) | <nl> - StateBits : : encode ( state_ . ToIntegral ( ) ) ; <nl> - } <nl> - <nl> - Token : : Value operation ( ) { return operation_ ; } <nl> - Handle < JSFunction > ToJSFunction ( Isolate * isolate ) ; <nl> - <nl> - void UpdateStatus ( Handle < Object > object ) ; <nl> - MaybeObject * Result ( Handle < Object > object , Isolate * isolate ) ; <nl> - Handle < Code > GenerateCode ( ) ; <nl> - Handle < Type > GetType ( Isolate * isolate ) ; <nl> - <nl> - protected : <nl> - void PrintState ( StringStream * stream ) ; <nl> - void PrintBaseName ( StringStream * stream ) ; <nl> - <nl> - private : <nl> - Builtins : : JavaScript ToJSBuiltin ( ) ; <nl> - <nl> - enum UnaryOpType { <nl> - SMI , <nl> - HEAP_NUMBER , <nl> - GENERIC , <nl> - NUMBER_OF_TYPES <nl> - } ; <nl> - <nl> - class State : public EnumSet < UnaryOpType , byte > { <nl> - public : <nl> - State ( ) : EnumSet < UnaryOpType , byte > ( ) { } <nl> - explicit State ( byte bits ) : EnumSet < UnaryOpType , byte > ( bits ) { } <nl> - void Print ( StringStream * stream ) const ; <nl> - } ; <nl> - <nl> - class StateBits : public BitField < int , 0 , NUMBER_OF_TYPES > { } ; <nl> - class OperatorBits : public BitField < Token : : Value , NUMBER_OF_TYPES , 8 > { } ; <nl> - <nl> - State state_ ; <nl> - Token : : Value operation_ ; <nl> - <nl> - virtual CodeStub : : Major MajorKey ( ) { return UnaryOp ; } <nl> - virtual int NotMissMinorKey ( ) { return GetExtraICState ( ) ; } <nl> - } ; <nl> - <nl> - <nl> class FastCloneShallowArrayStub : public HydrogenCodeStub { <nl> public : <nl> / / Maximum length of copied elements array . <nl> class CompareNilICStub : public HydrogenCodeStub { <nl> } <nl> <nl> void Print ( StringStream * stream ) const ; <nl> + void TraceTransition ( State to ) const ; <nl> } ; <nl> <nl> static Handle < Type > StateToType ( <nl> class CompareNilICStub : public HydrogenCodeStub { <nl> return NilValueField : : decode ( state ) ; <nl> } <nl> <nl> - void UpdateStatus ( Handle < Object > object ) ; <nl> + void Record ( Handle < Object > object ) ; <nl> <nl> bool IsMonomorphic ( ) const { return state_ . Contains ( MONOMORPHIC_MAP ) ; } <nl> NilValue GetNilValue ( ) const { return nil_value_ ; } <nl> State GetState ( ) const { return state_ ; } <nl> void ClearState ( ) { state_ . RemoveAll ( ) ; } <nl> <nl> - virtual void PrintState ( StringStream * stream ) ; <nl> - virtual void PrintBaseName ( StringStream * stream ) ; <nl> + virtual void PrintName ( StringStream * stream ) ; <nl> <nl> private : <nl> friend class CompareNilIC ; <nl> class ToBooleanStub : public HydrogenCodeStub { <nl> <nl> byte ToByte ( ) const { return ToIntegral ( ) ; } <nl> void Print ( StringStream * stream ) const ; <nl> - bool UpdateStatus ( Handle < Object > object ) ; <nl> + void TraceTransition ( Types to ) const ; <nl> + bool Record ( Handle < Object > object ) ; <nl> bool NeedsMap ( ) const ; <nl> bool CanBeUndetectable ( ) const ; <nl> bool IsGeneric ( ) const { return ToIntegral ( ) = = Generic ( ) . ToIntegral ( ) ; } <nl> class ToBooleanStub : public HydrogenCodeStub { <nl> explicit ToBooleanStub ( Code : : ExtraICState state ) <nl> : types_ ( static_cast < byte > ( state ) ) { } <nl> <nl> - bool UpdateStatus ( Handle < Object > object ) ; <nl> + bool Record ( Handle < Object > object ) ; <nl> Types GetTypes ( ) { return types_ ; } <nl> <nl> virtual Handle < Code > GenerateCode ( ) ; <nl> class ToBooleanStub : public HydrogenCodeStub { <nl> CodeStubInterfaceDescriptor * descriptor ) ; <nl> <nl> virtual Code : : Kind GetCodeKind ( ) const { return Code : : TO_BOOLEAN_IC ; } <nl> - virtual void PrintState ( StringStream * stream ) ; <nl> + virtual void PrintName ( StringStream * stream ) ; <nl> <nl> virtual bool SometimesSetsUpAFrame ( ) { return false ; } <nl> <nl> mmm a / src / hydrogen - instructions . cc <nl> ppp b / src / hydrogen - instructions . cc <nl> HType HCheckHeapObject : : CalculateInferredType ( ) { <nl> } <nl> <nl> <nl> - HType HCheckSmi : : CalculateInferredType ( ) { <nl> - return HType : : Smi ( ) ; <nl> - } <nl> - <nl> - <nl> HType HPhi : : CalculateInferredType ( ) { <nl> HType result = HType : : Uninitialized ( ) ; <nl> for ( int i = 0 ; i < OperandCount ( ) ; + + i ) { <nl> mmm a / src / hydrogen - instructions . h <nl> ppp b / src / hydrogen - instructions . h <nl> class LChunkBuilder ; <nl> V ( CheckHeapObject ) \ <nl> V ( CheckInstanceType ) \ <nl> V ( CheckMaps ) \ <nl> - V ( CheckSmi ) \ <nl> V ( CheckPrototypeMaps ) \ <nl> V ( ClampToUint8 ) \ <nl> V ( ClassOfTestAndBranch ) \ <nl> class LChunkBuilder ; <nl> V ( InvokeFunction ) \ <nl> V ( IsConstructCallAndBranch ) \ <nl> V ( IsObjectAndBranch ) \ <nl> - V ( IsNumberAndBranch ) \ <nl> V ( IsStringAndBranch ) \ <nl> V ( IsSmiAndBranch ) \ <nl> V ( IsUndetectableAndBranch ) \ <nl> class HCheckInstanceType : public HUnaryOperation { <nl> } ; <nl> <nl> <nl> - class HCheckSmi : public HUnaryOperation { <nl> - public : <nl> - explicit HCheckSmi ( HValue * value ) : HUnaryOperation ( value ) { <nl> - set_representation ( Representation : : Smi ( ) ) ; <nl> - SetFlag ( kUseGVN ) ; <nl> - } <nl> - <nl> - virtual Representation RequiredInputRepresentation ( int index ) { <nl> - return Representation : : Tagged ( ) ; <nl> - } <nl> - <nl> - virtual HType CalculateInferredType ( ) ; <nl> - <nl> - virtual HValue * Canonicalize ( ) { <nl> - HType value_type = value ( ) - > type ( ) ; <nl> - if ( value_type . IsSmi ( ) ) { <nl> - return NULL ; <nl> - } <nl> - return this ; <nl> - } <nl> - <nl> - DECLARE_CONCRETE_INSTRUCTION ( CheckSmi ) <nl> - <nl> - protected : <nl> - virtual bool DataEquals ( HValue * other ) { return true ; } <nl> - } ; <nl> - <nl> - <nl> - class HIsNumberAndBranch : public HUnaryControlInstruction { <nl> - public : <nl> - explicit HIsNumberAndBranch ( HValue * value ) <nl> - : HUnaryControlInstruction ( value , NULL , NULL ) { <nl> - SetFlag ( kFlexibleRepresentation ) ; <nl> - } <nl> - <nl> - virtual Representation RequiredInputRepresentation ( int index ) { <nl> - return Representation : : None ( ) ; <nl> - } <nl> - <nl> - DECLARE_CONCRETE_INSTRUCTION ( IsNumberAndBranch ) <nl> - } ; <nl> - <nl> - <nl> class HCheckHeapObject : public HUnaryOperation { <nl> public : <nl> explicit HCheckHeapObject ( HValue * value ) : HUnaryOperation ( value ) { <nl> mmm a / src / hydrogen . cc <nl> ppp b / src / hydrogen . cc <nl> HReturn * HGraphBuilder : : AddReturn ( HValue * value ) { <nl> } <nl> <nl> <nl> - void HGraphBuilder : : AddSoftDeoptimize ( ) { <nl> - isolate ( ) - > counters ( ) - > soft_deopts_requested ( ) - > Increment ( ) ; <nl> - if ( FLAG_always_opt ) return ; <nl> - if ( current_block ( ) - > IsDeoptimizing ( ) ) return ; <nl> - Add < HSoftDeoptimize > ( ) ; <nl> - isolate ( ) - > counters ( ) - > soft_deopts_inserted ( ) - > Increment ( ) ; <nl> - current_block ( ) - > MarkAsDeoptimizing ( ) ; <nl> - graph ( ) - > set_has_soft_deoptimize ( true ) ; <nl> - } <nl> - <nl> - <nl> HBasicBlock * HGraphBuilder : : CreateBasicBlock ( HEnvironment * env ) { <nl> HBasicBlock * b = graph ( ) - > CreateBasicBlock ( ) ; <nl> b - > SetInitialEnvironment ( env ) ; <nl> HValue * HGraphBuilder : : BuildCloneShallowArray ( HContext * context , <nl> } <nl> <nl> <nl> - HInstruction * HGraphBuilder : : BuildUnaryMathOp ( <nl> - HValue * input , Handle < Type > type , Token : : Value operation ) { <nl> - / / We only handle the numeric cases here <nl> - type = handle ( <nl> - Type : : Intersect ( type , handle ( Type : : Number ( ) , isolate ( ) ) ) , isolate ( ) ) ; <nl> - <nl> - switch ( operation ) { <nl> - default : <nl> - UNREACHABLE ( ) ; <nl> - case Token : : SUB : { <nl> - HInstruction * instr = <nl> - HMul : : New ( zone ( ) , environment ( ) - > LookupContext ( ) , <nl> - input , graph ( ) - > GetConstantMinus1 ( ) ) ; <nl> - Representation rep = Representation : : FromType ( type ) ; <nl> - if ( type - > Is ( Type : : None ( ) ) ) { <nl> - AddSoftDeoptimize ( ) ; <nl> - } <nl> - if ( instr - > IsBinaryOperation ( ) ) { <nl> - HBinaryOperation * binop = HBinaryOperation : : cast ( instr ) ; <nl> - binop - > set_observed_input_representation ( 1 , rep ) ; <nl> - binop - > set_observed_input_representation ( 2 , rep ) ; <nl> - } <nl> - return instr ; <nl> - } <nl> - case Token : : BIT_NOT : <nl> - if ( type - > Is ( Type : : None ( ) ) ) { <nl> - AddSoftDeoptimize ( ) ; <nl> - } <nl> - return new ( zone ( ) ) HBitNot ( input ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> void HGraphBuilder : : BuildCompareNil ( <nl> HValue * value , <nl> Handle < Type > type , <nl> void HOptimizedGraphBuilder : : PushAndAdd ( HInstruction * instr ) { <nl> } <nl> <nl> <nl> + void HOptimizedGraphBuilder : : AddSoftDeoptimize ( ) { <nl> + isolate ( ) - > counters ( ) - > soft_deopts_requested ( ) - > Increment ( ) ; <nl> + if ( FLAG_always_opt ) return ; <nl> + if ( current_block ( ) - > IsDeoptimizing ( ) ) return ; <nl> + Add < HSoftDeoptimize > ( ) ; <nl> + isolate ( ) - > counters ( ) - > soft_deopts_inserted ( ) - > Increment ( ) ; <nl> + current_block ( ) - > MarkAsDeoptimizing ( ) ; <nl> + graph ( ) - > set_has_soft_deoptimize ( true ) ; <nl> + } <nl> + <nl> + <nl> template < class Instruction > <nl> HInstruction * HOptimizedGraphBuilder : : PreProcessCall ( Instruction * call ) { <nl> int count = call - > argument_count ( ) ; <nl> void HOptimizedGraphBuilder : : VisitTypeof ( UnaryOperation * expr ) { <nl> void HOptimizedGraphBuilder : : VisitSub ( UnaryOperation * expr ) { <nl> CHECK_ALIVE ( VisitForValue ( expr - > expression ( ) ) ) ; <nl> HValue * value = Pop ( ) ; <nl> + HValue * context = environment ( ) - > LookupContext ( ) ; <nl> + HInstruction * instr = <nl> + HMul : : New ( zone ( ) , context , value , graph ( ) - > GetConstantMinus1 ( ) ) ; <nl> Handle < Type > operand_type = expr - > expression ( ) - > lower_type ( ) ; <nl> - HInstruction * instr = BuildUnaryMathOp ( value , operand_type , Token : : SUB ) ; <nl> + Representation rep = ToRepresentation ( operand_type ) ; <nl> + if ( operand_type - > Is ( Type : : None ( ) ) ) { <nl> + AddSoftDeoptimize ( ) ; <nl> + } <nl> + if ( instr - > IsBinaryOperation ( ) ) { <nl> + HBinaryOperation : : cast ( instr ) - > set_observed_input_representation ( 1 , rep ) ; <nl> + HBinaryOperation : : cast ( instr ) - > set_observed_input_representation ( 2 , rep ) ; <nl> + } <nl> return ast_context ( ) - > ReturnInstruction ( instr , expr - > id ( ) ) ; <nl> } <nl> <nl> void HOptimizedGraphBuilder : : VisitBitNot ( UnaryOperation * expr ) { <nl> CHECK_ALIVE ( VisitForValue ( expr - > expression ( ) ) ) ; <nl> HValue * value = Pop ( ) ; <nl> Handle < Type > operand_type = expr - > expression ( ) - > lower_type ( ) ; <nl> - HInstruction * instr = BuildUnaryMathOp ( value , operand_type , Token : : BIT_NOT ) ; <nl> + if ( operand_type - > Is ( Type : : None ( ) ) ) { <nl> + AddSoftDeoptimize ( ) ; <nl> + } <nl> + HInstruction * instr = new ( zone ( ) ) HBitNot ( value ) ; <nl> return ast_context ( ) - > ReturnInstruction ( instr , expr - > id ( ) ) ; <nl> } <nl> <nl> HInstruction * HOptimizedGraphBuilder : : BuildIncrement ( <nl> CountOperation * expr ) { <nl> / / The input to the count operation is on top of the expression stack . <nl> TypeInfo info = expr - > type ( ) ; <nl> - Representation rep = Representation : : FromType ( info ) ; <nl> + Representation rep = ToRepresentation ( info ) ; <nl> if ( rep . IsNone ( ) | | rep . IsTagged ( ) ) { <nl> rep = Representation : : Smi ( ) ; <nl> } <nl> HInstruction * HOptimizedGraphBuilder : : BuildBinaryOperation ( <nl> Handle < Type > right_type = expr - > right ( ) - > lower_type ( ) ; <nl> Handle < Type > result_type = expr - > result_type ( ) ; <nl> Maybe < int > fixed_right_arg = expr - > fixed_right_arg ( ) ; <nl> - Representation left_rep = Representation : : FromType ( left_type ) ; <nl> - Representation right_rep = Representation : : FromType ( right_type ) ; <nl> - Representation result_rep = Representation : : FromType ( result_type ) ; <nl> - <nl> + Representation left_rep = ToRepresentation ( left_type ) ; <nl> + Representation right_rep = ToRepresentation ( right_type ) ; <nl> + Representation result_rep = ToRepresentation ( result_type ) ; <nl> if ( left_type - > Is ( Type : : None ( ) ) ) { <nl> AddSoftDeoptimize ( ) ; <nl> / / TODO ( rossberg ) : we should be able to get rid of non - continuous defaults . <nl> void HOptimizedGraphBuilder : : VisitArithmeticExpression ( BinaryOperation * expr ) { <nl> } <nl> <nl> <nl> + / / TODO ( rossberg ) : this should die eventually . <nl> + Representation HOptimizedGraphBuilder : : ToRepresentation ( TypeInfo info ) { <nl> + if ( info . IsUninitialized ( ) ) return Representation : : None ( ) ; <nl> + / / TODO ( verwaest ) : Return Smi rather than Integer32 . <nl> + if ( info . IsSmi ( ) ) return Representation : : Integer32 ( ) ; <nl> + if ( info . IsInteger32 ( ) ) return Representation : : Integer32 ( ) ; <nl> + if ( info . IsDouble ( ) ) return Representation : : Double ( ) ; <nl> + if ( info . IsNumber ( ) ) return Representation : : Double ( ) ; <nl> + return Representation : : Tagged ( ) ; <nl> + } <nl> + <nl> + <nl> + Representation HOptimizedGraphBuilder : : ToRepresentation ( Handle < Type > type ) { <nl> + if ( type - > Is ( Type : : None ( ) ) ) return Representation : : None ( ) ; <nl> + if ( type - > Is ( Type : : Signed32 ( ) ) ) return Representation : : Integer32 ( ) ; <nl> + if ( type - > Is ( Type : : Number ( ) ) ) return Representation : : Double ( ) ; <nl> + return Representation : : Tagged ( ) ; <nl> + } <nl> + <nl> + <nl> void HOptimizedGraphBuilder : : HandleLiteralCompareTypeof ( CompareOperation * expr , <nl> HTypeof * typeof_expr , <nl> Handle < String > check ) { <nl> void HOptimizedGraphBuilder : : VisitCompareOperation ( CompareOperation * expr ) { <nl> Handle < Type > left_type = expr - > left ( ) - > lower_type ( ) ; <nl> Handle < Type > right_type = expr - > right ( ) - > lower_type ( ) ; <nl> Handle < Type > combined_type = expr - > combined_type ( ) ; <nl> - Representation combined_rep = Representation : : FromType ( combined_type ) ; <nl> - Representation left_rep = Representation : : FromType ( left_type ) ; <nl> - Representation right_rep = Representation : : FromType ( right_type ) ; <nl> + Representation combined_rep = ToRepresentation ( combined_type ) ; <nl> + Representation left_rep = ToRepresentation ( left_type ) ; <nl> + Representation right_rep = ToRepresentation ( right_type ) ; <nl> <nl> CHECK_ALIVE ( VisitForValue ( expr - > left ( ) ) ) ; <nl> CHECK_ALIVE ( VisitForValue ( expr - > right ( ) ) ) ; <nl> void HOptimizedGraphBuilder : : VisitCompareOperation ( CompareOperation * expr ) { <nl> result - > set_position ( expr - > position ( ) ) ; <nl> return ast_context ( ) - > ReturnInstruction ( result , expr - > id ( ) ) ; <nl> } else { <nl> - / / TODO ( verwaest ) : Remove once Representation : : FromType properly <nl> - / / returns Smi when the IC measures Smi . <nl> + / / TODO ( verwaest ) : Remove once ToRepresentation properly returns Smi when <nl> + / / the IC measures Smi . <nl> if ( left_type - > Is ( Type : : Smi ( ) ) ) left_rep = Representation : : Smi ( ) ; <nl> if ( right_type - > Is ( Type : : Smi ( ) ) ) right_rep = Representation : : Smi ( ) ; <nl> HCompareIDAndBranch * result = <nl> mmm a / src / hydrogen . h <nl> ppp b / src / hydrogen . h <nl> class HGraphBuilder { <nl> <nl> HLoadNamedField * AddLoadFixedArrayLength ( HValue * object ) ; <nl> <nl> - void AddSoftDeoptimize ( ) ; <nl> - <nl> class IfBuilder { <nl> public : <nl> explicit IfBuilder ( HGraphBuilder * builder , <nl> class HGraphBuilder { <nl> ElementsKind kind , <nl> int length ) ; <nl> <nl> - HInstruction * BuildUnaryMathOp ( <nl> - HValue * value , Handle < Type > type , Token : : Value token ) ; <nl> - <nl> void BuildCompareNil ( <nl> HValue * value , <nl> Handle < Type > type , <nl> class HOptimizedGraphBuilder : public HGraphBuilder , public AstVisitor { <nl> <nl> bool inline_bailout ( ) { return inline_bailout_ ; } <nl> <nl> + void AddSoftDeoptimize ( ) ; <nl> + <nl> void Bailout ( const char * reason ) ; <nl> <nl> HBasicBlock * CreateJoin ( HBasicBlock * first , <nl> class HOptimizedGraphBuilder : public HGraphBuilder , public AstVisitor { <nl> / / to push them as outgoing parameters . <nl> template < class Instruction > HInstruction * PreProcessCall ( Instruction * call ) ; <nl> <nl> + static Representation ToRepresentation ( TypeInfo info ) ; <nl> + static Representation ToRepresentation ( Handle < Type > type ) ; <nl> + <nl> void SetUpScope ( Scope * scope ) ; <nl> virtual void VisitStatements ( ZoneList < Statement * > * statements ) ; <nl> <nl> mmm a / src / ia32 / code - stubs - ia32 . cc <nl> ppp b / src / ia32 / code - stubs - ia32 . cc <nl> void ToBooleanStub : : InitializeInterfaceDescriptor ( <nl> } <nl> <nl> <nl> - void UnaryOpStub : : InitializeInterfaceDescriptor ( <nl> - Isolate * isolate , <nl> - CodeStubInterfaceDescriptor * descriptor ) { <nl> - static Register registers [ ] = { eax } ; <nl> - descriptor - > register_param_count_ = 1 ; <nl> - descriptor - > register_params_ = registers ; <nl> - descriptor - > deoptimization_handler_ = <nl> - FUNCTION_ADDR ( UnaryOpIC_Miss ) ; <nl> - } <nl> - <nl> - <nl> # define __ ACCESS_MASM ( masm ) <nl> <nl> <nl> static void ConvertHeapNumberToInt32 ( MacroAssembler * masm , <nl> } <nl> <nl> <nl> + void UnaryOpStub : : PrintName ( StringStream * stream ) { <nl> + const char * op_name = Token : : Name ( op_ ) ; <nl> + const char * overwrite_name = NULL ; / / Make g + + happy . <nl> + switch ( mode_ ) { <nl> + case UNARY_NO_OVERWRITE : overwrite_name = " Alloc " ; break ; <nl> + case UNARY_OVERWRITE : overwrite_name = " Overwrite " ; break ; <nl> + } <nl> + stream - > Add ( " UnaryOpStub_ % s_ % s_ % s " , <nl> + op_name , <nl> + overwrite_name , <nl> + UnaryOpIC : : GetName ( operand_type_ ) ) ; <nl> + } <nl> + <nl> + <nl> + / / TODO ( svenpanne ) : Use virtual functions instead of switch . <nl> + void UnaryOpStub : : Generate ( MacroAssembler * masm ) { <nl> + switch ( operand_type_ ) { <nl> + case UnaryOpIC : : UNINITIALIZED : <nl> + GenerateTypeTransition ( masm ) ; <nl> + break ; <nl> + case UnaryOpIC : : SMI : <nl> + GenerateSmiStub ( masm ) ; <nl> + break ; <nl> + case UnaryOpIC : : NUMBER : <nl> + GenerateNumberStub ( masm ) ; <nl> + break ; <nl> + case UnaryOpIC : : GENERIC : <nl> + GenerateGenericStub ( masm ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateTypeTransition ( MacroAssembler * masm ) { <nl> + __ pop ( ecx ) ; / / Save return address . <nl> + <nl> + __ push ( eax ) ; / / the operand <nl> + __ push ( Immediate ( Smi : : FromInt ( op_ ) ) ) ; <nl> + __ push ( Immediate ( Smi : : FromInt ( mode_ ) ) ) ; <nl> + __ push ( Immediate ( Smi : : FromInt ( operand_type_ ) ) ) ; <nl> + <nl> + __ push ( ecx ) ; / / Push return address . <nl> + <nl> + / / Patch the caller to an appropriate specialized stub and return the <nl> + / / operation result to the caller of the stub . <nl> + __ TailCallExternalReference ( <nl> + ExternalReference ( IC_Utility ( IC : : kUnaryOp_Patch ) , masm - > isolate ( ) ) , 4 , 1 ) ; <nl> + } <nl> + <nl> + <nl> + / / TODO ( svenpanne ) : Use virtual functions instead of switch . <nl> + void UnaryOpStub : : GenerateSmiStub ( MacroAssembler * masm ) { <nl> + switch ( op_ ) { <nl> + case Token : : SUB : <nl> + GenerateSmiStubSub ( masm ) ; <nl> + break ; <nl> + case Token : : BIT_NOT : <nl> + GenerateSmiStubBitNot ( masm ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateSmiStubSub ( MacroAssembler * masm ) { <nl> + Label non_smi , undo , slow ; <nl> + GenerateSmiCodeSub ( masm , & non_smi , & undo , & slow , <nl> + Label : : kNear , Label : : kNear , Label : : kNear ) ; <nl> + __ bind ( & undo ) ; <nl> + GenerateSmiCodeUndo ( masm ) ; <nl> + __ bind ( & non_smi ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateTypeTransition ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateSmiStubBitNot ( MacroAssembler * masm ) { <nl> + Label non_smi ; <nl> + GenerateSmiCodeBitNot ( masm , & non_smi ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateTypeTransition ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateSmiCodeSub ( MacroAssembler * masm , <nl> + Label * non_smi , <nl> + Label * undo , <nl> + Label * slow , <nl> + Label : : Distance non_smi_near , <nl> + Label : : Distance undo_near , <nl> + Label : : Distance slow_near ) { <nl> + / / Check whether the value is a smi . <nl> + __ JumpIfNotSmi ( eax , non_smi , non_smi_near ) ; <nl> + <nl> + / / We can ' t handle - 0 with smis , so use a type transition for that case . <nl> + __ test ( eax , eax ) ; <nl> + __ j ( zero , slow , slow_near ) ; <nl> + <nl> + / / Try optimistic subtraction ' 0 - value ' , saving operand in eax for undo . <nl> + __ mov ( edx , eax ) ; <nl> + __ Set ( eax , Immediate ( 0 ) ) ; <nl> + __ sub ( eax , edx ) ; <nl> + __ j ( overflow , undo , undo_near ) ; <nl> + __ ret ( 0 ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateSmiCodeBitNot ( <nl> + MacroAssembler * masm , <nl> + Label * non_smi , <nl> + Label : : Distance non_smi_near ) { <nl> + / / Check whether the value is a smi . <nl> + __ JumpIfNotSmi ( eax , non_smi , non_smi_near ) ; <nl> + <nl> + / / Flip bits and revert inverted smi - tag . <nl> + __ not_ ( eax ) ; <nl> + __ and_ ( eax , ~ kSmiTagMask ) ; <nl> + __ ret ( 0 ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateSmiCodeUndo ( MacroAssembler * masm ) { <nl> + __ mov ( eax , edx ) ; <nl> + } <nl> + <nl> + <nl> + / / TODO ( svenpanne ) : Use virtual functions instead of switch . <nl> + void UnaryOpStub : : GenerateNumberStub ( MacroAssembler * masm ) { <nl> + switch ( op_ ) { <nl> + case Token : : SUB : <nl> + GenerateNumberStubSub ( masm ) ; <nl> + break ; <nl> + case Token : : BIT_NOT : <nl> + GenerateNumberStubBitNot ( masm ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateNumberStubSub ( MacroAssembler * masm ) { <nl> + Label non_smi , undo , slow , call_builtin ; <nl> + GenerateSmiCodeSub ( masm , & non_smi , & undo , & call_builtin , Label : : kNear ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateHeapNumberCodeSub ( masm , & slow ) ; <nl> + __ bind ( & undo ) ; <nl> + GenerateSmiCodeUndo ( masm ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateTypeTransition ( masm ) ; <nl> + __ bind ( & call_builtin ) ; <nl> + GenerateGenericCodeFallback ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateNumberStubBitNot ( <nl> + MacroAssembler * masm ) { <nl> + Label non_smi , slow ; <nl> + GenerateSmiCodeBitNot ( masm , & non_smi , Label : : kNear ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateHeapNumberCodeBitNot ( masm , & slow ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateTypeTransition ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateHeapNumberCodeSub ( MacroAssembler * masm , <nl> + Label * slow ) { <nl> + __ mov ( edx , FieldOperand ( eax , HeapObject : : kMapOffset ) ) ; <nl> + __ cmp ( edx , masm - > isolate ( ) - > factory ( ) - > heap_number_map ( ) ) ; <nl> + __ j ( not_equal , slow ) ; <nl> + <nl> + if ( mode_ = = UNARY_OVERWRITE ) { <nl> + __ xor_ ( FieldOperand ( eax , HeapNumber : : kExponentOffset ) , <nl> + Immediate ( HeapNumber : : kSignMask ) ) ; / / Flip sign . <nl> + } else { <nl> + __ mov ( edx , eax ) ; <nl> + / / edx : operand <nl> + <nl> + Label slow_allocate_heapnumber , heapnumber_allocated ; <nl> + __ AllocateHeapNumber ( eax , ebx , ecx , & slow_allocate_heapnumber ) ; <nl> + __ jmp ( & heapnumber_allocated , Label : : kNear ) ; <nl> + <nl> + __ bind ( & slow_allocate_heapnumber ) ; <nl> + { <nl> + FrameScope scope ( masm , StackFrame : : INTERNAL ) ; <nl> + __ push ( edx ) ; <nl> + __ CallRuntime ( Runtime : : kNumberAlloc , 0 ) ; <nl> + __ pop ( edx ) ; <nl> + } <nl> + <nl> + __ bind ( & heapnumber_allocated ) ; <nl> + / / eax : allocated ' empty ' number <nl> + __ mov ( ecx , FieldOperand ( edx , HeapNumber : : kExponentOffset ) ) ; <nl> + __ xor_ ( ecx , HeapNumber : : kSignMask ) ; / / Flip sign . <nl> + __ mov ( FieldOperand ( eax , HeapNumber : : kExponentOffset ) , ecx ) ; <nl> + __ mov ( ecx , FieldOperand ( edx , HeapNumber : : kMantissaOffset ) ) ; <nl> + __ mov ( FieldOperand ( eax , HeapNumber : : kMantissaOffset ) , ecx ) ; <nl> + } <nl> + __ ret ( 0 ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateHeapNumberCodeBitNot ( MacroAssembler * masm , <nl> + Label * slow ) { <nl> + __ mov ( edx , FieldOperand ( eax , HeapObject : : kMapOffset ) ) ; <nl> + __ cmp ( edx , masm - > isolate ( ) - > factory ( ) - > heap_number_map ( ) ) ; <nl> + __ j ( not_equal , slow ) ; <nl> + <nl> + / / Convert the heap number in eax to an untagged integer in ecx . <nl> + IntegerConvert ( masm , eax , CpuFeatures : : IsSupported ( SSE3 ) , slow ) ; <nl> + <nl> + / / Do the bitwise operation and check if the result fits in a smi . <nl> + Label try_float ; <nl> + __ not_ ( ecx ) ; <nl> + __ cmp ( ecx , 0xc0000000 ) ; <nl> + __ j ( sign , & try_float , Label : : kNear ) ; <nl> + <nl> + / / Tag the result as a smi and we ' re done . <nl> + STATIC_ASSERT ( kSmiTagSize = = 1 ) ; <nl> + __ lea ( eax , Operand ( ecx , times_2 , kSmiTag ) ) ; <nl> + __ ret ( 0 ) ; <nl> + <nl> + / / Try to store the result in a heap number . <nl> + __ bind ( & try_float ) ; <nl> + if ( mode_ = = UNARY_NO_OVERWRITE ) { <nl> + Label slow_allocate_heapnumber , heapnumber_allocated ; <nl> + __ mov ( ebx , eax ) ; <nl> + __ AllocateHeapNumber ( eax , edx , edi , & slow_allocate_heapnumber ) ; <nl> + __ jmp ( & heapnumber_allocated ) ; <nl> + <nl> + __ bind ( & slow_allocate_heapnumber ) ; <nl> + { <nl> + FrameScope scope ( masm , StackFrame : : INTERNAL ) ; <nl> + / / Push the original HeapNumber on the stack . The integer value can ' t <nl> + / / be stored since it ' s untagged and not in the smi range ( so we can ' t <nl> + / / smi - tag it ) . We ' ll recalculate the value after the GC instead . <nl> + __ push ( ebx ) ; <nl> + __ CallRuntime ( Runtime : : kNumberAlloc , 0 ) ; <nl> + / / New HeapNumber is in eax . <nl> + __ pop ( edx ) ; <nl> + } <nl> + / / IntegerConvert uses ebx and edi as scratch registers . <nl> + / / This conversion won ' t go slow - case . <nl> + IntegerConvert ( masm , edx , CpuFeatures : : IsSupported ( SSE3 ) , slow ) ; <nl> + __ not_ ( ecx ) ; <nl> + <nl> + __ bind ( & heapnumber_allocated ) ; <nl> + } <nl> + if ( CpuFeatures : : IsSupported ( SSE2 ) ) { <nl> + CpuFeatureScope use_sse2 ( masm , SSE2 ) ; <nl> + __ cvtsi2sd ( xmm0 , ecx ) ; <nl> + __ movdbl ( FieldOperand ( eax , HeapNumber : : kValueOffset ) , xmm0 ) ; <nl> + } else { <nl> + __ push ( ecx ) ; <nl> + __ fild_s ( Operand ( esp , 0 ) ) ; <nl> + __ pop ( ecx ) ; <nl> + __ fstp_d ( FieldOperand ( eax , HeapNumber : : kValueOffset ) ) ; <nl> + } <nl> + __ ret ( 0 ) ; <nl> + } <nl> + <nl> + <nl> + / / TODO ( svenpanne ) : Use virtual functions instead of switch . <nl> + void UnaryOpStub : : GenerateGenericStub ( MacroAssembler * masm ) { <nl> + switch ( op_ ) { <nl> + case Token : : SUB : <nl> + GenerateGenericStubSub ( masm ) ; <nl> + break ; <nl> + case Token : : BIT_NOT : <nl> + GenerateGenericStubBitNot ( masm ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateGenericStubSub ( MacroAssembler * masm ) { <nl> + Label non_smi , undo , slow ; <nl> + GenerateSmiCodeSub ( masm , & non_smi , & undo , & slow , Label : : kNear ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateHeapNumberCodeSub ( masm , & slow ) ; <nl> + __ bind ( & undo ) ; <nl> + GenerateSmiCodeUndo ( masm ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateGenericCodeFallback ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateGenericStubBitNot ( MacroAssembler * masm ) { <nl> + Label non_smi , slow ; <nl> + GenerateSmiCodeBitNot ( masm , & non_smi , Label : : kNear ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateHeapNumberCodeBitNot ( masm , & slow ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateGenericCodeFallback ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateGenericCodeFallback ( MacroAssembler * masm ) { <nl> + / / Handle the slow case by jumping to the corresponding JavaScript builtin . <nl> + __ pop ( ecx ) ; / / pop return address . <nl> + __ push ( eax ) ; <nl> + __ push ( ecx ) ; / / push return address <nl> + switch ( op_ ) { <nl> + case Token : : SUB : <nl> + __ InvokeBuiltin ( Builtins : : UNARY_MINUS , JUMP_FUNCTION ) ; <nl> + break ; <nl> + case Token : : BIT_NOT : <nl> + __ InvokeBuiltin ( Builtins : : BIT_NOT , JUMP_FUNCTION ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> void BinaryOpStub : : Initialize ( ) { <nl> platform_specific_bit_ = CpuFeatures : : IsSupported ( SSE3 ) ; <nl> } <nl> mmm a / src / ia32 / code - stubs - ia32 . h <nl> ppp b / src / ia32 / code - stubs - ia32 . h <nl> class StoreBufferOverflowStub : public PlatformCodeStub { <nl> } ; <nl> <nl> <nl> + class UnaryOpStub : public PlatformCodeStub { <nl> + public : <nl> + UnaryOpStub ( Token : : Value op , <nl> + UnaryOverwriteMode mode , <nl> + UnaryOpIC : : TypeInfo operand_type = UnaryOpIC : : UNINITIALIZED ) <nl> + : op_ ( op ) , <nl> + mode_ ( mode ) , <nl> + operand_type_ ( operand_type ) { <nl> + } <nl> + <nl> + private : <nl> + Token : : Value op_ ; <nl> + UnaryOverwriteMode mode_ ; <nl> + <nl> + / / Operand type information determined at runtime . <nl> + UnaryOpIC : : TypeInfo operand_type_ ; <nl> + <nl> + virtual void PrintName ( StringStream * stream ) ; <nl> + <nl> + class ModeBits : public BitField < UnaryOverwriteMode , 0 , 1 > { } ; <nl> + class OpBits : public BitField < Token : : Value , 1 , 7 > { } ; <nl> + class OperandTypeInfoBits : public BitField < UnaryOpIC : : TypeInfo , 8 , 3 > { } ; <nl> + <nl> + Major MajorKey ( ) { return UnaryOp ; } <nl> + int MinorKey ( ) { <nl> + return ModeBits : : encode ( mode_ ) <nl> + | OpBits : : encode ( op_ ) <nl> + | OperandTypeInfoBits : : encode ( operand_type_ ) ; <nl> + } <nl> + <nl> + / / Note : A lot of the helper functions below will vanish when we use virtual <nl> + / / function instead of switch more often . <nl> + void Generate ( MacroAssembler * masm ) ; <nl> + <nl> + void GenerateTypeTransition ( MacroAssembler * masm ) ; <nl> + <nl> + void GenerateSmiStub ( MacroAssembler * masm ) ; <nl> + void GenerateSmiStubSub ( MacroAssembler * masm ) ; <nl> + void GenerateSmiStubBitNot ( MacroAssembler * masm ) ; <nl> + void GenerateSmiCodeSub ( MacroAssembler * masm , <nl> + Label * non_smi , <nl> + Label * undo , <nl> + Label * slow , <nl> + Label : : Distance non_smi_near = Label : : kFar , <nl> + Label : : Distance undo_near = Label : : kFar , <nl> + Label : : Distance slow_near = Label : : kFar ) ; <nl> + void GenerateSmiCodeBitNot ( MacroAssembler * masm , <nl> + Label * non_smi , <nl> + Label : : Distance non_smi_near = Label : : kFar ) ; <nl> + void GenerateSmiCodeUndo ( MacroAssembler * masm ) ; <nl> + <nl> + void GenerateNumberStub ( MacroAssembler * masm ) ; <nl> + void GenerateNumberStubSub ( MacroAssembler * masm ) ; <nl> + void GenerateNumberStubBitNot ( MacroAssembler * masm ) ; <nl> + void GenerateHeapNumberCodeSub ( MacroAssembler * masm , Label * slow ) ; <nl> + void GenerateHeapNumberCodeBitNot ( MacroAssembler * masm , Label * slow ) ; <nl> + <nl> + void GenerateGenericStub ( MacroAssembler * masm ) ; <nl> + void GenerateGenericStubSub ( MacroAssembler * masm ) ; <nl> + void GenerateGenericStubBitNot ( MacroAssembler * masm ) ; <nl> + void GenerateGenericCodeFallback ( MacroAssembler * masm ) ; <nl> + <nl> + virtual Code : : Kind GetCodeKind ( ) const { return Code : : UNARY_OP_IC ; } <nl> + <nl> + virtual InlineCacheState GetICState ( ) { <nl> + return UnaryOpIC : : ToState ( operand_type_ ) ; <nl> + } <nl> + <nl> + virtual void FinishCode ( Handle < Code > code ) { <nl> + code - > set_unary_op_type ( operand_type_ ) ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> class StringHelper : public AllStatic { <nl> public : <nl> / / Generate code for copying characters using a simple loop . This should only <nl> mmm a / src / ia32 / full - codegen - ia32 . cc <nl> ppp b / src / ia32 / full - codegen - ia32 . cc <nl> void FullCodeGenerator : : VisitUnaryOperation ( UnaryOperation * expr ) { <nl> void FullCodeGenerator : : EmitUnaryOperation ( UnaryOperation * expr , <nl> const char * comment ) { <nl> Comment cmt ( masm_ , comment ) ; <nl> - UnaryOpStub stub ( expr - > op ( ) ) ; <nl> + bool can_overwrite = expr - > expression ( ) - > ResultOverwriteAllowed ( ) ; <nl> + UnaryOverwriteMode overwrite = <nl> + can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE ; <nl> + UnaryOpStub stub ( expr - > op ( ) , overwrite ) ; <nl> / / UnaryOpStub expects the argument to be in the <nl> / / accumulator register eax . <nl> VisitForAccumulatorValue ( expr - > expression ( ) ) ; <nl> mmm a / src / ia32 / lithium - codegen - ia32 . cc <nl> ppp b / src / ia32 / lithium - codegen - ia32 . cc <nl> void LCodeGen : : DoArithmeticD ( LArithmeticD * instr ) { <nl> } <nl> <nl> <nl> - void LCodeGen : : DoNegateNoSSE2D ( LNegateNoSSE2D * instr ) { <nl> - __ push ( Immediate ( - 1 ) ) ; <nl> - __ fild_s ( Operand ( esp , 0 ) ) ; <nl> - __ add ( esp , Immediate ( kPointerSize ) ) ; <nl> - __ fmulp ( ) ; <nl> - CurrentInstructionReturnsX87Result ( ) ; <nl> - } <nl> - <nl> - <nl> - <nl> void LCodeGen : : DoArithmeticT ( LArithmeticT * instr ) { <nl> ASSERT ( ToRegister ( instr - > context ( ) ) . is ( esi ) ) ; <nl> ASSERT ( ToRegister ( instr - > left ( ) ) . is ( edx ) ) ; <nl> int LCodeGen : : GetNextEmittedBlock ( ) const { <nl> <nl> template < class InstrType > <nl> void LCodeGen : : EmitBranch ( InstrType instr , Condition cc ) { <nl> - int left_block = instr - > TrueDestination ( chunk_ ) ; <nl> int right_block = instr - > FalseDestination ( chunk_ ) ; <nl> + int left_block = instr - > TrueDestination ( chunk_ ) ; <nl> <nl> int next_block = GetNextEmittedBlock ( ) ; <nl> <nl> - if ( right_block = = left_block | | cc = = no_condition ) { <nl> + if ( right_block = = left_block ) { <nl> EmitGoto ( left_block ) ; <nl> } else if ( left_block = = next_block ) { <nl> __ j ( NegateCondition ( cc ) , chunk_ - > GetAssemblyLabel ( right_block ) ) ; <nl> void LCodeGen : : EmitBranch ( InstrType instr , Condition cc ) { <nl> } <nl> <nl> <nl> - void LCodeGen : : DoIsNumberAndBranch ( LIsNumberAndBranch * instr ) { <nl> - Representation r = instr - > hydrogen ( ) - > value ( ) - > representation ( ) ; <nl> - if ( r . IsSmiOrInteger32 ( ) | | r . IsDouble ( ) ) { <nl> - EmitBranch ( instr , no_condition ) ; <nl> - } else { <nl> - ASSERT ( r . IsTagged ( ) ) ; <nl> - Register reg = ToRegister ( instr - > value ( ) ) ; <nl> - HType type = instr - > hydrogen ( ) - > value ( ) - > type ( ) ; <nl> - if ( type . IsTaggedNumber ( ) ) { <nl> - EmitBranch ( instr , no_condition ) ; <nl> - } <nl> - __ JumpIfSmi ( reg , instr - > TrueLabel ( chunk_ ) ) ; <nl> - __ cmp ( FieldOperand ( reg , HeapObject : : kMapOffset ) , <nl> - factory ( ) - > heap_number_map ( ) ) ; <nl> - EmitBranch ( instr , equal ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> void LCodeGen : : DoBranch ( LBranch * instr ) { <nl> Representation r = instr - > hydrogen ( ) - > value ( ) - > representation ( ) ; <nl> if ( r . IsSmiOrInteger32 ( ) ) { <nl> mmm a / src / ia32 / lithium - ia32 . cc <nl> ppp b / src / ia32 / lithium - ia32 . cc <nl> LInstruction * LChunkBuilder : : DoMul ( HMul * instr ) { <nl> } <nl> return DefineSameAsFirst ( mul ) ; <nl> } else if ( instr - > representation ( ) . IsDouble ( ) ) { <nl> - if ( CpuFeatures : : IsSafeForSnapshot ( SSE2 ) ) { <nl> - return DoArithmeticD ( Token : : MUL , instr ) ; <nl> - } <nl> - ASSERT ( instr - > right ( ) - > IsConstant ( ) & & <nl> - static_cast < HConstant * > ( instr - > right ( ) ) - > DoubleValue ( ) = = - 1 ) ; <nl> - / / TODO ( olivf ) This is currently just a hack to support the UnaryOp Minus <nl> - / / Stub . This will go away once we can use more than one X87 register , <nl> - / / thus fully support binary instructions without SSE2 . <nl> - LOperand * left = UseX87TopOfStack ( instr - > left ( ) ) ; <nl> - LNegateNoSSE2D * result = new ( zone ( ) ) LNegateNoSSE2D ( left ) ; <nl> - return DefineX87TOS ( result ) ; <nl> + return DoArithmeticD ( Token : : MUL , instr ) ; <nl> } else { <nl> ASSERT ( instr - > representation ( ) . IsSmiOrTagged ( ) ) ; <nl> return DoArithmeticT ( Token : : MUL , instr ) ; <nl> LInstruction * LChunkBuilder : : DoCheckHeapObject ( HCheckHeapObject * instr ) { <nl> } <nl> <nl> <nl> - LInstruction * LChunkBuilder : : DoCheckSmi ( HCheckSmi * instr ) { <nl> - LOperand * value = UseRegisterAtStart ( instr - > value ( ) ) ; <nl> - return AssignEnvironment ( new ( zone ( ) ) LCheckSmi ( value ) ) ; <nl> - } <nl> - <nl> - <nl> - LInstruction * LChunkBuilder : : DoIsNumberAndBranch ( HIsNumberAndBranch * instr ) { <nl> - return new ( zone ( ) ) <nl> - LIsNumberAndBranch ( UseRegisterOrConstantAtStart ( instr - > value ( ) ) ) ; <nl> - } <nl> - <nl> - <nl> LInstruction * LChunkBuilder : : DoCheckInstanceType ( HCheckInstanceType * instr ) { <nl> LOperand * value = UseRegisterAtStart ( instr - > value ( ) ) ; <nl> LOperand * temp = TempRegister ( ) ; <nl> mmm a / src / ia32 / lithium - ia32 . h <nl> ppp b / src / ia32 / lithium - ia32 . h <nl> class LCodeGen ; <nl> V ( IsObjectAndBranch ) \ <nl> V ( IsStringAndBranch ) \ <nl> V ( IsSmiAndBranch ) \ <nl> - V ( IsNumberAndBranch ) \ <nl> V ( IsUndetectableAndBranch ) \ <nl> V ( Label ) \ <nl> V ( LazyBailout ) \ <nl> class LCodeGen ; <nl> V ( MathTan ) \ <nl> V ( ModI ) \ <nl> V ( MulI ) \ <nl> - V ( NegateNoSSE2D ) \ <nl> V ( NumberTagD ) \ <nl> V ( NumberTagI ) \ <nl> V ( NumberTagU ) \ <nl> class LMathFloorOfDiv : public LTemplateInstruction < 1 , 2 , 1 > { <nl> } ; <nl> <nl> <nl> - class LNegateNoSSE2D : public LTemplateInstruction < 1 , 1 , 0 > { <nl> - public : <nl> - explicit LNegateNoSSE2D ( LOperand * value ) { <nl> - inputs_ [ 0 ] = value ; <nl> - } <nl> - <nl> - LOperand * value ( ) { return inputs_ [ 0 ] ; } <nl> - <nl> - DECLARE_CONCRETE_INSTRUCTION ( NegateNoSSE2D , " negate - no - sse2 - d " ) <nl> - } ; <nl> - <nl> - <nl> class LMulI : public LTemplateInstruction < 1 , 2 , 1 > { <nl> public : <nl> LMulI ( LOperand * left , LOperand * right , LOperand * temp ) { <nl> class LIsObjectAndBranch : public LControlInstruction < 1 , 1 > { <nl> } ; <nl> <nl> <nl> - class LIsNumberAndBranch : public LControlInstruction < 1 , 0 > { <nl> - public : <nl> - explicit LIsNumberAndBranch ( LOperand * value ) { <nl> - inputs_ [ 0 ] = value ; <nl> - } <nl> - <nl> - LOperand * value ( ) { return inputs_ [ 0 ] ; } <nl> - <nl> - DECLARE_CONCRETE_INSTRUCTION ( IsNumberAndBranch , " is - number - and - branch " ) <nl> - DECLARE_HYDROGEN_ACCESSOR ( IsNumberAndBranch ) <nl> - } ; <nl> - <nl> - <nl> class LIsStringAndBranch : public LControlInstruction < 1 , 1 > { <nl> public : <nl> LIsStringAndBranch ( LOperand * value , LOperand * temp ) { <nl> mmm a / src / ic . cc <nl> ppp b / src / ic . cc <nl> RUNTIME_FUNCTION ( MaybeObject * , KeyedStoreIC_MissForceGeneric ) { <nl> } <nl> <nl> <nl> + void UnaryOpIC : : patch ( Code * code ) { <nl> + set_target ( code ) ; <nl> + } <nl> + <nl> + <nl> + const char * UnaryOpIC : : GetName ( TypeInfo type_info ) { <nl> + switch ( type_info ) { <nl> + case UNINITIALIZED : return " Uninitialized " ; <nl> + case SMI : return " Smi " ; <nl> + case NUMBER : return " Number " ; <nl> + case GENERIC : return " Generic " ; <nl> + default : return " Invalid " ; <nl> + } <nl> + } <nl> + <nl> + <nl> + UnaryOpIC : : State UnaryOpIC : : ToState ( TypeInfo type_info ) { <nl> + switch ( type_info ) { <nl> + case UNINITIALIZED : <nl> + return v8 : : internal : : UNINITIALIZED ; <nl> + case SMI : <nl> + case NUMBER : <nl> + return MONOMORPHIC ; <nl> + case GENERIC : <nl> + return v8 : : internal : : GENERIC ; <nl> + } <nl> + UNREACHABLE ( ) ; <nl> + return v8 : : internal : : UNINITIALIZED ; <nl> + } <nl> + <nl> + <nl> + Handle < Type > UnaryOpIC : : TypeInfoToType ( TypeInfo type_info , Isolate * isolate ) { <nl> + switch ( type_info ) { <nl> + case UNINITIALIZED : <nl> + return handle ( Type : : None ( ) , isolate ) ; <nl> + case SMI : <nl> + return handle ( Type : : Smi ( ) , isolate ) ; <nl> + case NUMBER : <nl> + return handle ( Type : : Number ( ) , isolate ) ; <nl> + case GENERIC : <nl> + return handle ( Type : : Any ( ) , isolate ) ; <nl> + } <nl> + UNREACHABLE ( ) ; <nl> + return handle ( Type : : Any ( ) , isolate ) ; <nl> + } <nl> + <nl> + <nl> + UnaryOpIC : : TypeInfo UnaryOpIC : : GetTypeInfo ( Handle < Object > operand ) { <nl> + v8 : : internal : : TypeInfo operand_type = <nl> + v8 : : internal : : TypeInfo : : FromValue ( operand ) ; <nl> + if ( operand_type . IsSmi ( ) ) { <nl> + return SMI ; <nl> + } else if ( operand_type . IsNumber ( ) ) { <nl> + return NUMBER ; <nl> + } else { <nl> + return GENERIC ; <nl> + } <nl> + } <nl> + <nl> + <nl> + UnaryOpIC : : TypeInfo UnaryOpIC : : ComputeNewType ( <nl> + TypeInfo current_type , <nl> + TypeInfo previous_type ) { <nl> + switch ( previous_type ) { <nl> + case UNINITIALIZED : <nl> + return current_type ; <nl> + case SMI : <nl> + return ( current_type = = GENERIC ) ? GENERIC : NUMBER ; <nl> + case NUMBER : <nl> + return GENERIC ; <nl> + case GENERIC : <nl> + / / We should never do patching if we are in GENERIC state . <nl> + UNREACHABLE ( ) ; <nl> + return GENERIC ; <nl> + } <nl> + UNREACHABLE ( ) ; <nl> + return GENERIC ; <nl> + } <nl> + <nl> + <nl> void BinaryOpIC : : patch ( Code * code ) { <nl> set_target ( code ) ; <nl> } <nl> void BinaryOpIC : : StubInfoToType ( int minor_key , <nl> } <nl> <nl> <nl> - MaybeObject * UnaryOpIC : : Transition ( Handle < Object > object ) { <nl> - Code : : ExtraICState extra_ic_state = target ( ) - > extended_extra_ic_state ( ) ; <nl> - UnaryOpStub stub ( extra_ic_state ) ; <nl> + RUNTIME_FUNCTION ( MaybeObject * , UnaryOp_Patch ) { <nl> + ASSERT ( args . length ( ) = = 4 ) ; <nl> <nl> - stub . UpdateStatus ( object ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < Object > operand = args . at < Object > ( 0 ) ; <nl> + Token : : Value op = static_cast < Token : : Value > ( args . smi_at ( 1 ) ) ; <nl> + UnaryOverwriteMode mode = static_cast < UnaryOverwriteMode > ( args . smi_at ( 2 ) ) ; <nl> + UnaryOpIC : : TypeInfo previous_type = <nl> + static_cast < UnaryOpIC : : TypeInfo > ( args . smi_at ( 3 ) ) ; <nl> <nl> - Handle < Code > code = stub . GetCode ( isolate ( ) ) ; <nl> - set_target ( * code ) ; <nl> + UnaryOpIC : : TypeInfo type = UnaryOpIC : : GetTypeInfo ( operand ) ; <nl> + type = UnaryOpIC : : ComputeNewType ( type , previous_type ) ; <nl> <nl> - return stub . Result ( object , isolate ( ) ) ; <nl> - } <nl> + UnaryOpStub stub ( op , mode , type ) ; <nl> + Handle < Code > code = stub . GetCode ( isolate ) ; <nl> + if ( ! code . is_null ( ) ) { <nl> + if ( FLAG_trace_ic ) { <nl> + PrintF ( " [ UnaryOpIC in " ) ; <nl> + JavaScriptFrame : : PrintTop ( isolate , stdout , false , true ) ; <nl> + PrintF ( " % s = > % s # % s @ % p ] \ n " , <nl> + UnaryOpIC : : GetName ( previous_type ) , <nl> + UnaryOpIC : : GetName ( type ) , <nl> + Token : : Name ( op ) , <nl> + static_cast < void * > ( * code ) ) ; <nl> + } <nl> + UnaryOpIC ic ( isolate ) ; <nl> + ic . patch ( * code ) ; <nl> + } <nl> + <nl> + Handle < JSBuiltinsObject > builtins ( isolate - > js_builtins_object ( ) ) ; <nl> + Object * builtin = NULL ; / / Initialization calms down the compiler . <nl> + switch ( op ) { <nl> + case Token : : SUB : <nl> + builtin = builtins - > javascript_builtin ( Builtins : : UNARY_MINUS ) ; <nl> + break ; <nl> + case Token : : BIT_NOT : <nl> + builtin = builtins - > javascript_builtin ( Builtins : : BIT_NOT ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> <nl> + Handle < JSFunction > builtin_function ( JSFunction : : cast ( builtin ) , isolate ) ; <nl> <nl> - RUNTIME_FUNCTION ( MaybeObject * , UnaryOpIC_Miss ) { <nl> - HandleScope scope ( isolate ) ; <nl> - Handle < Object > object = args . at < Object > ( 0 ) ; <nl> - UnaryOpIC ic ( isolate ) ; <nl> - return ic . Transition ( object ) ; <nl> + bool caught_exception ; <nl> + Handle < Object > result = Execution : : Call ( builtin_function , operand , 0 , NULL , <nl> + & caught_exception ) ; <nl> + if ( caught_exception ) { <nl> + return Failure : : Exception ( ) ; <nl> + } <nl> + return * result ; <nl> } <nl> <nl> <nl> MaybeObject * CompareNilIC : : CompareNil ( Handle < Object > object ) { <nl> / / types must be supported as a result of the miss . <nl> bool already_monomorphic = stub . IsMonomorphic ( ) ; <nl> <nl> - stub . UpdateStatus ( object ) ; <nl> + CompareNilICStub : : State old_state = stub . GetState ( ) ; <nl> + stub . Record ( object ) ; <nl> + old_state . TraceTransition ( stub . GetState ( ) ) ; <nl> <nl> NilValue nil = stub . GetNilValue ( ) ; <nl> <nl> RUNTIME_FUNCTION ( MaybeObject * , Unreachable ) { <nl> MaybeObject * ToBooleanIC : : ToBoolean ( Handle < Object > object , <nl> Code : : ExtraICState extra_ic_state ) { <nl> ToBooleanStub stub ( extra_ic_state ) ; <nl> - bool to_boolean_value = stub . UpdateStatus ( object ) ; <nl> + bool to_boolean_value = stub . Record ( object ) ; <nl> Handle < Code > code = stub . GetCode ( isolate ( ) ) ; <nl> set_target ( * code ) ; <nl> return Smi : : FromInt ( to_boolean_value ? 1 : 0 ) ; <nl> mmm a / src / ic . h <nl> ppp b / src / ic . h <nl> namespace internal { <nl> ICU ( LoadPropertyWithInterceptorForCall ) \ <nl> ICU ( KeyedLoadPropertyWithInterceptor ) \ <nl> ICU ( StoreInterceptorProperty ) \ <nl> + ICU ( UnaryOp_Patch ) \ <nl> ICU ( BinaryOp_Patch ) \ <nl> ICU ( CompareIC_Miss ) \ <nl> ICU ( CompareNilIC_Miss ) \ <nl> class KeyedStoreIC : public StoreIC { <nl> <nl> class UnaryOpIC : public IC { <nl> public : <nl> - explicit UnaryOpIC ( Isolate * isolate ) : IC ( EXTRA_CALL_FRAME , isolate ) { } <nl> + / / sorted : increasingly more unspecific ( ignoring UNINITIALIZED ) <nl> + / / TODO ( svenpanne ) Using enums + switch is an antipattern , use a class instead . <nl> + enum TypeInfo { <nl> + UNINITIALIZED , <nl> + SMI , <nl> + NUMBER , <nl> + GENERIC <nl> + } ; <nl> + <nl> + static Handle < Type > TypeInfoToType ( TypeInfo info , Isolate * isolate ) ; <nl> + <nl> + explicit UnaryOpIC ( Isolate * isolate ) : IC ( NO_EXTRA_FRAME , isolate ) { } <nl> + <nl> + void patch ( Code * code ) ; <nl> + <nl> + static const char * GetName ( TypeInfo type_info ) ; <nl> + <nl> + static State ToState ( TypeInfo type_info ) ; <nl> + <nl> + static TypeInfo GetTypeInfo ( Handle < Object > operand ) ; <nl> <nl> - MUST_USE_RESULT MaybeObject * Transition ( Handle < Object > object ) ; <nl> + static TypeInfo ComputeNewType ( TypeInfo type , TypeInfo previous ) ; <nl> } ; <nl> <nl> <nl> void PatchInlinedSmiCode ( Address address , InlinedSmiCheck check ) ; <nl> <nl> DECLARE_RUNTIME_FUNCTION ( MaybeObject * , KeyedLoadIC_MissFromStubFailure ) ; <nl> DECLARE_RUNTIME_FUNCTION ( MaybeObject * , KeyedStoreIC_MissFromStubFailure ) ; <nl> - DECLARE_RUNTIME_FUNCTION ( MaybeObject * , UnaryOpIC_Miss ) ; <nl> DECLARE_RUNTIME_FUNCTION ( MaybeObject * , CompareNilIC_Miss ) ; <nl> DECLARE_RUNTIME_FUNCTION ( MaybeObject * , ToBooleanIC_Miss ) ; <nl> <nl> mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> const char * Code : : StubType2String ( StubType type ) { <nl> <nl> <nl> void Code : : PrintExtraICState ( FILE * out , Kind kind , ExtraICState extra ) { <nl> - PrintF ( out , " extra_ic_state = " ) ; <nl> const char * name = NULL ; <nl> switch ( kind ) { <nl> case CALL_IC : <nl> void Code : : PrintExtraICState ( FILE * out , Kind kind , ExtraICState extra ) { <nl> break ; <nl> } <nl> if ( name ! = NULL ) { <nl> - PrintF ( out , " % s \ n " , name ) ; <nl> + PrintF ( out , " extra_ic_state = % s \ n " , name ) ; <nl> } else { <nl> - PrintF ( out , " % d \ n " , extra ) ; <nl> + PrintF ( out , " extra_ic_state = % d \ n " , extra ) ; <nl> } <nl> } <nl> <nl> void Code : : Disassemble ( const char * name , FILE * out ) { <nl> PrintF ( out , " kind = % s \ n " , Kind2String ( kind ( ) ) ) ; <nl> if ( is_inline_cache_stub ( ) ) { <nl> PrintF ( out , " ic_state = % s \ n " , ICState2String ( ic_state ( ) ) ) ; <nl> - PrintExtraICState ( out , kind ( ) , needs_extended_extra_ic_state ( kind ( ) ) ? <nl> - extended_extra_ic_state ( ) : extra_ic_state ( ) ) ; <nl> + PrintExtraICState ( out , kind ( ) , extra_ic_state ( ) ) ; <nl> if ( ic_state ( ) = = MONOMORPHIC ) { <nl> PrintF ( out , " type = % s \ n " , StubType2String ( type ( ) ) ) ; <nl> } <nl> mmm a / src / objects . h <nl> ppp b / src / objects . h <nl> class Code : public HeapObject { <nl> / / TODO ( danno ) : This is a bit of a hack right now since there are still <nl> / / clients of this API that pass " extra " values in for argc . These clients <nl> / / should be retrofitted to used ExtendedExtraICState . <nl> - return kind = = COMPARE_NIL_IC | | kind = = TO_BOOLEAN_IC | | <nl> - kind = = UNARY_OP_IC ; <nl> + return kind = = COMPARE_NIL_IC | | kind = = TO_BOOLEAN_IC ; <nl> } <nl> <nl> inline StubType type ( ) ; / / Only valid for monomorphic IC stubs . <nl> mmm a / src / property - details . h <nl> ppp b / src / property - details . h <nl> namespace v8 { <nl> namespace internal { <nl> <nl> class Smi ; <nl> - class Type ; <nl> - class TypeInfo ; <nl> <nl> / / Type of properties . <nl> / / Order of properties is significant . <nl> class Representation { <nl> <nl> static Representation FromKind ( Kind kind ) { return Representation ( kind ) ; } <nl> <nl> - / / TODO ( rossberg ) : this should die eventually . <nl> - static Representation FromType ( TypeInfo info ) ; <nl> - static Representation FromType ( Handle < Type > type ) ; <nl> - <nl> bool Equals ( const Representation & other ) const { <nl> return kind_ = = other . kind_ ; <nl> } <nl> mmm a / src / type - info . cc <nl> ppp b / src / type - info . cc <nl> Handle < Type > TypeFeedbackOracle : : UnaryType ( TypeFeedbackId id ) { <nl> } <nl> Handle < Code > code = Handle < Code > : : cast ( object ) ; <nl> ASSERT ( code - > is_unary_op_stub ( ) ) ; <nl> - return UnaryOpStub ( code - > extra_ic_state ( ) ) . GetType ( isolate ( ) ) ; <nl> + return UnaryOpIC : : TypeInfoToType ( <nl> + static_cast < UnaryOpIC : : TypeInfo > ( code - > unary_op_type ( ) ) , isolate ( ) ) ; <nl> } <nl> <nl> <nl> void TypeFeedbackOracle : : SetInfo ( TypeFeedbackId ast_id , Object * target ) { <nl> # endif <nl> } <nl> <nl> - <nl> - Representation Representation : : FromType ( TypeInfo info ) { <nl> - if ( info . IsUninitialized ( ) ) return Representation : : None ( ) ; <nl> - / / TODO ( verwaest ) : Return Smi rather than Integer32 . <nl> - if ( info . IsSmi ( ) ) return Representation : : Integer32 ( ) ; <nl> - if ( info . IsInteger32 ( ) ) return Representation : : Integer32 ( ) ; <nl> - if ( info . IsDouble ( ) ) return Representation : : Double ( ) ; <nl> - if ( info . IsNumber ( ) ) return Representation : : Double ( ) ; <nl> - return Representation : : Tagged ( ) ; <nl> - } <nl> - <nl> - <nl> } } / / namespace v8 : : internal <nl> mmm a / src / types . cc <nl> ppp b / src / types . cc <nl> Type * Type : : Optional ( Handle < Type > type ) { <nl> : Union ( type , Undefined ( ) - > handle_via_isolate_of ( * type ) ) ; <nl> } <nl> <nl> - <nl> - Representation Representation : : FromType ( Handle < Type > type ) { <nl> - if ( type - > Is ( Type : : None ( ) ) ) return Representation : : None ( ) ; <nl> - if ( type - > Is ( Type : : Signed32 ( ) ) ) return Representation : : Integer32 ( ) ; <nl> - if ( type - > Is ( Type : : Number ( ) ) ) return Representation : : Double ( ) ; <nl> - return Representation : : Tagged ( ) ; <nl> - } <nl> - <nl> - <nl> } } / / namespace v8 : : internal <nl> mmm a / src / x64 / code - stubs - x64 . cc <nl> ppp b / src / x64 / code - stubs - x64 . cc <nl> void ToBooleanStub : : InitializeInterfaceDescriptor ( <nl> descriptor - > deoptimization_handler_ = <nl> FUNCTION_ADDR ( ToBooleanIC_Miss ) ; <nl> descriptor - > SetMissHandler ( <nl> - ExternalReference ( IC_Utility ( IC : : kToBooleanIC_Miss ) , isolate ) ) ; <nl> - } <nl> - <nl> - <nl> - void UnaryOpStub : : InitializeInterfaceDescriptor ( <nl> - Isolate * isolate , <nl> - CodeStubInterfaceDescriptor * descriptor ) { <nl> - static Register registers [ ] = { rax } ; <nl> - descriptor - > register_param_count_ = 1 ; <nl> - descriptor - > register_params_ = registers ; <nl> - descriptor - > deoptimization_handler_ = <nl> - FUNCTION_ADDR ( UnaryOpIC_Miss ) ; <nl> + ExternalReference ( IC_Utility ( IC : : kToBooleanIC_Miss ) , isolate ) ) ; <nl> } <nl> <nl> <nl> void IntegerConvert ( MacroAssembler * masm , <nl> } <nl> <nl> <nl> + void UnaryOpStub : : Generate ( MacroAssembler * masm ) { <nl> + switch ( operand_type_ ) { <nl> + case UnaryOpIC : : UNINITIALIZED : <nl> + GenerateTypeTransition ( masm ) ; <nl> + break ; <nl> + case UnaryOpIC : : SMI : <nl> + GenerateSmiStub ( masm ) ; <nl> + break ; <nl> + case UnaryOpIC : : NUMBER : <nl> + GenerateNumberStub ( masm ) ; <nl> + break ; <nl> + case UnaryOpIC : : GENERIC : <nl> + GenerateGenericStub ( masm ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateTypeTransition ( MacroAssembler * masm ) { <nl> + __ pop ( rcx ) ; / / Save return address . <nl> + <nl> + __ push ( rax ) ; / / the operand <nl> + __ Push ( Smi : : FromInt ( op_ ) ) ; <nl> + __ Push ( Smi : : FromInt ( mode_ ) ) ; <nl> + __ Push ( Smi : : FromInt ( operand_type_ ) ) ; <nl> + <nl> + __ push ( rcx ) ; / / Push return address . <nl> + <nl> + / / Patch the caller to an appropriate specialized stub and return the <nl> + / / operation result to the caller of the stub . <nl> + __ TailCallExternalReference ( <nl> + ExternalReference ( IC_Utility ( IC : : kUnaryOp_Patch ) , masm - > isolate ( ) ) , 4 , 1 ) ; <nl> + } <nl> + <nl> + <nl> + / / TODO ( svenpanne ) : Use virtual functions instead of switch . <nl> + void UnaryOpStub : : GenerateSmiStub ( MacroAssembler * masm ) { <nl> + switch ( op_ ) { <nl> + case Token : : SUB : <nl> + GenerateSmiStubSub ( masm ) ; <nl> + break ; <nl> + case Token : : BIT_NOT : <nl> + GenerateSmiStubBitNot ( masm ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateSmiStubSub ( MacroAssembler * masm ) { <nl> + Label slow ; <nl> + GenerateSmiCodeSub ( masm , & slow , & slow , Label : : kNear , Label : : kNear ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateTypeTransition ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateSmiStubBitNot ( MacroAssembler * masm ) { <nl> + Label non_smi ; <nl> + GenerateSmiCodeBitNot ( masm , & non_smi , Label : : kNear ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateTypeTransition ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateSmiCodeSub ( MacroAssembler * masm , <nl> + Label * non_smi , <nl> + Label * slow , <nl> + Label : : Distance non_smi_near , <nl> + Label : : Distance slow_near ) { <nl> + Label done ; <nl> + __ JumpIfNotSmi ( rax , non_smi , non_smi_near ) ; <nl> + __ SmiNeg ( rax , rax , & done , Label : : kNear ) ; <nl> + __ jmp ( slow , slow_near ) ; <nl> + __ bind ( & done ) ; <nl> + __ ret ( 0 ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateSmiCodeBitNot ( MacroAssembler * masm , <nl> + Label * non_smi , <nl> + Label : : Distance non_smi_near ) { <nl> + __ JumpIfNotSmi ( rax , non_smi , non_smi_near ) ; <nl> + __ SmiNot ( rax , rax ) ; <nl> + __ ret ( 0 ) ; <nl> + } <nl> + <nl> + <nl> + / / TODO ( svenpanne ) : Use virtual functions instead of switch . <nl> + void UnaryOpStub : : GenerateNumberStub ( MacroAssembler * masm ) { <nl> + switch ( op_ ) { <nl> + case Token : : SUB : <nl> + GenerateNumberStubSub ( masm ) ; <nl> + break ; <nl> + case Token : : BIT_NOT : <nl> + GenerateNumberStubBitNot ( masm ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateNumberStubSub ( MacroAssembler * masm ) { <nl> + Label non_smi , slow , call_builtin ; <nl> + GenerateSmiCodeSub ( masm , & non_smi , & call_builtin , Label : : kNear ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateHeapNumberCodeSub ( masm , & slow ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateTypeTransition ( masm ) ; <nl> + __ bind ( & call_builtin ) ; <nl> + GenerateGenericCodeFallback ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateNumberStubBitNot ( <nl> + MacroAssembler * masm ) { <nl> + Label non_smi , slow ; <nl> + GenerateSmiCodeBitNot ( masm , & non_smi , Label : : kNear ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateHeapNumberCodeBitNot ( masm , & slow ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateTypeTransition ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateHeapNumberCodeSub ( MacroAssembler * masm , <nl> + Label * slow ) { <nl> + / / Check if the operand is a heap number . <nl> + __ CompareRoot ( FieldOperand ( rax , HeapObject : : kMapOffset ) , <nl> + Heap : : kHeapNumberMapRootIndex ) ; <nl> + __ j ( not_equal , slow ) ; <nl> + <nl> + / / Operand is a float , negate its value by flipping the sign bit . <nl> + if ( mode_ = = UNARY_OVERWRITE ) { <nl> + __ Set ( kScratchRegister , 0x01 ) ; <nl> + __ shl ( kScratchRegister , Immediate ( 63 ) ) ; <nl> + __ xor_ ( FieldOperand ( rax , HeapNumber : : kValueOffset ) , kScratchRegister ) ; <nl> + } else { <nl> + / / Allocate a heap number before calculating the answer , <nl> + / / so we don ' t have an untagged double around during GC . <nl> + Label slow_allocate_heapnumber , heapnumber_allocated ; <nl> + __ AllocateHeapNumber ( rcx , rbx , & slow_allocate_heapnumber ) ; <nl> + __ jmp ( & heapnumber_allocated ) ; <nl> + <nl> + __ bind ( & slow_allocate_heapnumber ) ; <nl> + { <nl> + FrameScope scope ( masm , StackFrame : : INTERNAL ) ; <nl> + __ push ( rax ) ; <nl> + __ CallRuntime ( Runtime : : kNumberAlloc , 0 ) ; <nl> + __ movq ( rcx , rax ) ; <nl> + __ pop ( rax ) ; <nl> + } <nl> + __ bind ( & heapnumber_allocated ) ; <nl> + / / rcx : allocated ' empty ' number <nl> + <nl> + / / Copy the double value to the new heap number , flipping the sign . <nl> + __ movq ( rdx , FieldOperand ( rax , HeapNumber : : kValueOffset ) ) ; <nl> + __ Set ( kScratchRegister , 0x01 ) ; <nl> + __ shl ( kScratchRegister , Immediate ( 63 ) ) ; <nl> + __ xor_ ( rdx , kScratchRegister ) ; / / Flip sign . <nl> + __ movq ( FieldOperand ( rcx , HeapNumber : : kValueOffset ) , rdx ) ; <nl> + __ movq ( rax , rcx ) ; <nl> + } <nl> + __ ret ( 0 ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateHeapNumberCodeBitNot ( MacroAssembler * masm , <nl> + Label * slow ) { <nl> + / / Check if the operand is a heap number . <nl> + __ CompareRoot ( FieldOperand ( rax , HeapObject : : kMapOffset ) , <nl> + Heap : : kHeapNumberMapRootIndex ) ; <nl> + __ j ( not_equal , slow ) ; <nl> + <nl> + / / Convert the heap number in rax to an untagged integer in rcx . <nl> + IntegerConvert ( masm , rax , rax ) ; <nl> + <nl> + / / Do the bitwise operation and smi tag the result . <nl> + __ notl ( rax ) ; <nl> + __ Integer32ToSmi ( rax , rax ) ; <nl> + __ ret ( 0 ) ; <nl> + } <nl> + <nl> + <nl> + / / TODO ( svenpanne ) : Use virtual functions instead of switch . <nl> + void UnaryOpStub : : GenerateGenericStub ( MacroAssembler * masm ) { <nl> + switch ( op_ ) { <nl> + case Token : : SUB : <nl> + GenerateGenericStubSub ( masm ) ; <nl> + break ; <nl> + case Token : : BIT_NOT : <nl> + GenerateGenericStubBitNot ( masm ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateGenericStubSub ( MacroAssembler * masm ) { <nl> + Label non_smi , slow ; <nl> + GenerateSmiCodeSub ( masm , & non_smi , & slow , Label : : kNear ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateHeapNumberCodeSub ( masm , & slow ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateGenericCodeFallback ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateGenericStubBitNot ( MacroAssembler * masm ) { <nl> + Label non_smi , slow ; <nl> + GenerateSmiCodeBitNot ( masm , & non_smi , Label : : kNear ) ; <nl> + __ bind ( & non_smi ) ; <nl> + GenerateHeapNumberCodeBitNot ( masm , & slow ) ; <nl> + __ bind ( & slow ) ; <nl> + GenerateGenericCodeFallback ( masm ) ; <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : GenerateGenericCodeFallback ( MacroAssembler * masm ) { <nl> + / / Handle the slow case by jumping to the JavaScript builtin . <nl> + __ pop ( rcx ) ; / / pop return address <nl> + __ push ( rax ) ; <nl> + __ push ( rcx ) ; / / push return address <nl> + switch ( op_ ) { <nl> + case Token : : SUB : <nl> + __ InvokeBuiltin ( Builtins : : UNARY_MINUS , JUMP_FUNCTION ) ; <nl> + break ; <nl> + case Token : : BIT_NOT : <nl> + __ InvokeBuiltin ( Builtins : : BIT_NOT , JUMP_FUNCTION ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void UnaryOpStub : : PrintName ( StringStream * stream ) { <nl> + const char * op_name = Token : : Name ( op_ ) ; <nl> + const char * overwrite_name = NULL ; / / Make g + + happy . <nl> + switch ( mode_ ) { <nl> + case UNARY_NO_OVERWRITE : overwrite_name = " Alloc " ; break ; <nl> + case UNARY_OVERWRITE : overwrite_name = " Overwrite " ; break ; <nl> + } <nl> + stream - > Add ( " UnaryOpStub_ % s_ % s_ % s " , <nl> + op_name , <nl> + overwrite_name , <nl> + UnaryOpIC : : GetName ( operand_type_ ) ) ; <nl> + } <nl> + <nl> + <nl> void BinaryOpStub : : Initialize ( ) { } <nl> <nl> <nl> mmm a / src / x64 / code - stubs - x64 . h <nl> ppp b / src / x64 / code - stubs - x64 . h <nl> class StoreBufferOverflowStub : public PlatformCodeStub { <nl> } ; <nl> <nl> <nl> + class UnaryOpStub : public PlatformCodeStub { <nl> + public : <nl> + UnaryOpStub ( Token : : Value op , <nl> + UnaryOverwriteMode mode , <nl> + UnaryOpIC : : TypeInfo operand_type = UnaryOpIC : : UNINITIALIZED ) <nl> + : op_ ( op ) , <nl> + mode_ ( mode ) , <nl> + operand_type_ ( operand_type ) { <nl> + } <nl> + <nl> + private : <nl> + Token : : Value op_ ; <nl> + UnaryOverwriteMode mode_ ; <nl> + <nl> + / / Operand type information determined at runtime . <nl> + UnaryOpIC : : TypeInfo operand_type_ ; <nl> + <nl> + virtual void PrintName ( StringStream * stream ) ; <nl> + <nl> + class ModeBits : public BitField < UnaryOverwriteMode , 0 , 1 > { } ; <nl> + class OpBits : public BitField < Token : : Value , 1 , 7 > { } ; <nl> + class OperandTypeInfoBits : public BitField < UnaryOpIC : : TypeInfo , 8 , 3 > { } ; <nl> + <nl> + Major MajorKey ( ) { return UnaryOp ; } <nl> + int MinorKey ( ) { <nl> + return ModeBits : : encode ( mode_ ) <nl> + | OpBits : : encode ( op_ ) <nl> + | OperandTypeInfoBits : : encode ( operand_type_ ) ; <nl> + } <nl> + <nl> + / / Note : A lot of the helper functions below will vanish when we use virtual <nl> + / / function instead of switch more often . <nl> + void Generate ( MacroAssembler * masm ) ; <nl> + <nl> + void GenerateTypeTransition ( MacroAssembler * masm ) ; <nl> + <nl> + void GenerateSmiStub ( MacroAssembler * masm ) ; <nl> + void GenerateSmiStubSub ( MacroAssembler * masm ) ; <nl> + void GenerateSmiStubBitNot ( MacroAssembler * masm ) ; <nl> + void GenerateSmiCodeSub ( MacroAssembler * masm , <nl> + Label * non_smi , <nl> + Label * slow , <nl> + Label : : Distance non_smi_near = Label : : kFar , <nl> + Label : : Distance slow_near = Label : : kFar ) ; <nl> + void GenerateSmiCodeBitNot ( MacroAssembler * masm , <nl> + Label * non_smi , <nl> + Label : : Distance non_smi_near ) ; <nl> + <nl> + void GenerateNumberStub ( MacroAssembler * masm ) ; <nl> + void GenerateNumberStubSub ( MacroAssembler * masm ) ; <nl> + void GenerateNumberStubBitNot ( MacroAssembler * masm ) ; <nl> + void GenerateHeapNumberCodeSub ( MacroAssembler * masm , Label * slow ) ; <nl> + void GenerateHeapNumberCodeBitNot ( MacroAssembler * masm , Label * slow ) ; <nl> + <nl> + void GenerateGenericStub ( MacroAssembler * masm ) ; <nl> + void GenerateGenericStubSub ( MacroAssembler * masm ) ; <nl> + void GenerateGenericStubBitNot ( MacroAssembler * masm ) ; <nl> + void GenerateGenericCodeFallback ( MacroAssembler * masm ) ; <nl> + <nl> + virtual Code : : Kind GetCodeKind ( ) const { return Code : : UNARY_OP_IC ; } <nl> + <nl> + virtual InlineCacheState GetICState ( ) { <nl> + return UnaryOpIC : : ToState ( operand_type_ ) ; <nl> + } <nl> + <nl> + virtual void FinishCode ( Handle < Code > code ) { <nl> + code - > set_unary_op_type ( operand_type_ ) ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> class StringHelper : public AllStatic { <nl> public : <nl> / / Generate code for copying characters using a simple loop . This should only <nl> mmm a / src / x64 / full - codegen - x64 . cc <nl> ppp b / src / x64 / full - codegen - x64 . cc <nl> void FullCodeGenerator : : EmitUnaryOperation ( UnaryOperation * expr , <nl> const char * comment ) { <nl> / / TODO ( svenpanne ) : Allowing format strings in Comment would be nice here . . . <nl> Comment cmt ( masm_ , comment ) ; <nl> - UnaryOpStub stub ( expr - > op ( ) ) ; <nl> + bool can_overwrite = expr - > expression ( ) - > ResultOverwriteAllowed ( ) ; <nl> + UnaryOverwriteMode overwrite = <nl> + can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE ; <nl> + UnaryOpStub stub ( expr - > op ( ) , overwrite ) ; <nl> / / UnaryOpStub expects the argument to be in the <nl> / / accumulator register rax . <nl> VisitForAccumulatorValue ( expr - > expression ( ) ) ; <nl> mmm a / src / x64 / lithium - codegen - x64 . cc <nl> ppp b / src / x64 / lithium - codegen - x64 . cc <nl> int LCodeGen : : GetNextEmittedBlock ( ) const { <nl> <nl> template < class InstrType > <nl> void LCodeGen : : EmitBranch ( InstrType instr , Condition cc ) { <nl> - int left_block = instr - > TrueDestination ( chunk_ ) ; <nl> int right_block = instr - > FalseDestination ( chunk_ ) ; <nl> + int left_block = instr - > TrueDestination ( chunk_ ) ; <nl> <nl> int next_block = GetNextEmittedBlock ( ) ; <nl> <nl> - if ( right_block = = left_block | | cc = = no_condition ) { <nl> + if ( right_block = = left_block ) { <nl> EmitGoto ( left_block ) ; <nl> } else if ( left_block = = next_block ) { <nl> __ j ( NegateCondition ( cc ) , chunk_ - > GetAssemblyLabel ( right_block ) ) ; <nl> void LCodeGen : : DoDebugBreak ( LDebugBreak * instr ) { <nl> } <nl> <nl> <nl> - void LCodeGen : : DoIsNumberAndBranch ( LIsNumberAndBranch * instr ) { <nl> - Representation r = instr - > hydrogen ( ) - > value ( ) - > representation ( ) ; <nl> - if ( r . IsSmiOrInteger32 ( ) | | r . IsDouble ( ) ) { <nl> - EmitBranch ( instr , no_condition ) ; <nl> - } else { <nl> - ASSERT ( r . IsTagged ( ) ) ; <nl> - Register reg = ToRegister ( instr - > value ( ) ) ; <nl> - HType type = instr - > hydrogen ( ) - > value ( ) - > type ( ) ; <nl> - if ( type . IsTaggedNumber ( ) ) { <nl> - EmitBranch ( instr , no_condition ) ; <nl> - } <nl> - __ JumpIfSmi ( reg , instr - > TrueLabel ( chunk_ ) ) ; <nl> - __ CompareRoot ( FieldOperand ( reg , HeapObject : : kMapOffset ) , <nl> - Heap : : kHeapNumberMapRootIndex ) ; <nl> - EmitBranch ( instr , equal ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> void LCodeGen : : DoBranch ( LBranch * instr ) { <nl> Representation r = instr - > hydrogen ( ) - > value ( ) - > representation ( ) ; <nl> if ( r . IsInteger32 ( ) ) { <nl> mmm a / src / x64 / lithium - x64 . cc <nl> ppp b / src / x64 / lithium - x64 . cc <nl> LInstruction * LChunkBuilder : : DoCheckHeapObject ( HCheckHeapObject * instr ) { <nl> } <nl> <nl> <nl> - LInstruction * LChunkBuilder : : DoCheckSmi ( HCheckSmi * instr ) { <nl> - LOperand * value = UseRegisterAtStart ( instr - > value ( ) ) ; <nl> - return AssignEnvironment ( new ( zone ( ) ) LCheckSmi ( value ) ) ; <nl> - } <nl> - <nl> - <nl> - LInstruction * LChunkBuilder : : DoIsNumberAndBranch ( HIsNumberAndBranch * instr ) { <nl> - return new ( zone ( ) ) LIsNumberAndBranch ( <nl> - UseRegisterOrConstantAtStart ( instr - > value ( ) ) ) ; <nl> - } <nl> - <nl> - <nl> LInstruction * LChunkBuilder : : DoCheckInstanceType ( HCheckInstanceType * instr ) { <nl> LOperand * value = UseRegisterAtStart ( instr - > value ( ) ) ; <nl> LCheckInstanceType * result = new ( zone ( ) ) LCheckInstanceType ( value ) ; <nl> mmm a / src / x64 / lithium - x64 . h <nl> ppp b / src / x64 / lithium - x64 . h <nl> class LCodeGen ; <nl> V ( IsObjectAndBranch ) \ <nl> V ( IsStringAndBranch ) \ <nl> V ( IsSmiAndBranch ) \ <nl> - V ( IsNumberAndBranch ) \ <nl> V ( IsUndetectableAndBranch ) \ <nl> V ( Label ) \ <nl> V ( LazyBailout ) \ <nl> class LIsObjectAndBranch : public LControlInstruction < 1 , 0 > { <nl> } ; <nl> <nl> <nl> - class LIsNumberAndBranch : public LControlInstruction < 1 , 0 > { <nl> - public : <nl> - explicit LIsNumberAndBranch ( LOperand * value ) { <nl> - inputs_ [ 0 ] = value ; <nl> - } <nl> - <nl> - LOperand * value ( ) { return inputs_ [ 0 ] ; } <nl> - <nl> - DECLARE_CONCRETE_INSTRUCTION ( IsNumberAndBranch , " is - number - and - branch " ) <nl> - DECLARE_HYDROGEN_ACCESSOR ( IsNumberAndBranch ) <nl> - } ; <nl> - <nl> - <nl> class LIsStringAndBranch : public LControlInstruction < 1 , 1 > { <nl> public : <nl> explicit LIsStringAndBranch ( LOperand * value , LOperand * temp ) { <nl> | Revert " Convert UnaryOpStub to a HydrogenCodeStub " | v8/v8 | 950a372834ad4252cade7c92e6565012a9d50865 | 2013-07-03T19:57:25Z |
mmm a / src / rdb_protocol / term_walker . cc <nl> ppp b / src / rdb_protocol / term_walker . cc <nl> class term_walker_t { <nl> <nl> if ( type = = Term : : ASC | | type = = Term : : DESC ) { <nl> rcheck_src ( bt , <nl> - prev_frame = = nullptr | | prev_frame - > type = = Term : : ORDER_BY , <nl> + prev_frame ! = nullptr & & prev_frame - > type = = Term : : ORDER_BY , <nl> base_exc_t : : LOGIC , <nl> strprintf ( " % s may only be used as an argument to ORDER_BY . " , <nl> ( type = = Term : : ASC ? " ASC " : " DESC " ) ) ) ; <nl> | Fix | rethinkdb/rethinkdb | 74c80d946db3897bf7338d19c5bdd26b83c6ad31 | 2015-10-14T18:58:26Z |
mmm a / Tests / EndToEndTests / EvalClientTests / CNTKLibraryCSEvalExamplesTest / MemoryTests . cs <nl> ppp b / Tests / EndToEndTests / EvalClientTests / CNTKLibraryCSEvalExamplesTest / MemoryTests . cs <nl> <nl> <nl> namespace CNTKLibraryCSEvalExamples <nl> { <nl> - class MemoryTests <nl> + internal sealed class MemoryTests <nl> { <nl> public static DeviceDescriptor Device0 ; <nl> public static Axis Axis0 ; <nl> class MemoryTests <nl> <nl> public static void ValidateObjectReferences ( DeviceDescriptor device ) <nl> { <nl> - var test = new SetupMemoeryTests ( ) ; <nl> - test . SetupUsingResetModel ( device ) ; <nl> + using ( var test = new SetupMemoeryTests ( ) ) <nl> + { <nl> + test . SetupUsingResetModel ( device ) ; <nl> + } <nl> <nl> Console . WriteLine ( " \ n1 . Run : Test saved object references . \ n " ) ; <nl> WriteOutputs ( ) ; <nl> mmm a / Tests / EndToEndTests / EvalClientTests / CNTKLibraryCSEvalExamplesTest / SetupMemoeryTests . cs <nl> ppp b / Tests / EndToEndTests / EvalClientTests / CNTKLibraryCSEvalExamplesTest / SetupMemoeryTests . cs <nl> <nl> <nl> namespace CNTKLibraryCSEvalExamples <nl> { <nl> - public class SetupMemoeryTests <nl> + internal sealed class SetupMemoeryTests : IDisposable <nl> { <nl> + public void Dispose ( ) <nl> + { <nl> + } <nl> + <nl> public void SetupUsingResetModel ( DeviceDescriptor device ) <nl> { <nl> try <nl> mmm a / bindings / csharp / Swig / cntk_cs . i <nl> ppp b / bindings / csharp / Swig / cntk_cs . i <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> public static System . Collections . Generic . IList < DeviceDescriptor > AllDevices ( ) <nl> { <nl> var deviceVector = GetAllDevices ( ) ; <nl> + / / The CopyTo is to ensure the elements in the deviceVector can live beyond deviceVector itself . <nl> var deviceArray = new DeviceDescriptor [ deviceVector . Count ] ; <nl> deviceVector . CopyTo ( deviceArray ) ; <nl> var deviceList = new System . Collections . Generic . List < DeviceDescriptor > ( deviceArray ) ; <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> return this . GetDeviceType ( ) . GetHashCode ( ) ; <nl> } <nl> <nl> - public static void SetExcludedDevices ( System . Collections . Generic . IList < DeviceDescriptor > excluded ) <nl> + public static void SetExcludedDevices ( System . Collections . Generic . IEnumerable < DeviceDescriptor > excluded ) <nl> { <nl> - var excludeVector = new DeviceDescriptorVector ( excluded . Count ) ; <nl> + var excludeVector = new DeviceDescriptorVector ( ) ; <nl> foreach ( DeviceDescriptor element in excluded ) <nl> { <nl> excludeVector . Add ( element ) ; <nl> | fix errors ; add dispose ( ) to SetupMemoryTests | microsoft/CNTK | d01c4c79e1812cb3eb5d9cfb8c5db73be9c9348d | 2017-03-27T05:52:51Z |
mmm a / xbmc / interfaces / json - rpc / ApplicationOperations . cpp <nl> ppp b / xbmc / interfaces / json - rpc / ApplicationOperations . cpp <nl> JSONRPC_STATUS CApplicationOperations : : GetPropertyValue ( const std : : string & prope <nl> result [ " revision " ] = CCompileInfo : : GetSCMID ( ) ; <nl> std : : string tag = CCompileInfo : : GetSuffix ( ) ; <nl> if ( StringUtils : : StartsWithNoCase ( tag , " alpha " ) ) <nl> + { <nl> result [ " tag " ] = " alpha " ; <nl> + result [ " tagversion " ] = StringUtils : : Mid ( tag , 5 ) ; <nl> + } <nl> else if ( StringUtils : : StartsWithNoCase ( tag , " beta " ) ) <nl> + { <nl> result [ " tag " ] = " beta " ; <nl> + result [ " tagversion " ] = StringUtils : : Mid ( tag , 4 ) ; <nl> + } <nl> else if ( StringUtils : : StartsWithNoCase ( tag , " rc " ) ) <nl> + { <nl> result [ " tag " ] = " releasecandidate " ; <nl> + result [ " tagversion " ] = StringUtils : : Mid ( tag , 2 ) ; <nl> + } <nl> else if ( tag . empty ( ) ) <nl> result [ " tag " ] = " stable " ; <nl> else <nl> mmm a / xbmc / interfaces / json - rpc / schema / types . json <nl> ppp b / xbmc / interfaces / json - rpc / schema / types . json <nl> <nl> " major " : { " type " : " integer " , " minimum " : 0 , " required " : true } , <nl> " minor " : { " type " : " integer " , " minimum " : 0 , " required " : true } , <nl> " revision " : { " type " : [ " string " , " integer " ] } , <nl> - " tag " : { " type " : " string " , " enum " : [ " prealpha " , " alpha " , " beta " , " releasecandidate " , " stable " ] , " required " : true } <nl> + " tag " : { " type " : " string " , " enum " : [ " prealpha " , " alpha " , " beta " , " releasecandidate " , " stable " ] , " required " : true } , <nl> + " tagversion " : { " type " : " string " } <nl> } <nl> } <nl> } <nl> mmm a / xbmc / interfaces / json - rpc / schema / version . txt <nl> ppp b / xbmc / interfaces / json - rpc / schema / version . txt <nl> @ @ - 1 + 1 @ @ <nl> - 6 . 17 . 0 <nl> + 6 . 18 . 0 <nl> | jsonrpc : add " tagversion " property to Application . GetProperties ' " version " property | xbmc/xbmc | 3b9d078cc9c3c84a1606ff752850d787f84aca43 | 2014-08-22T11:05:03Z |
mmm a / src / mongo / db / catalog / index_catalog . cpp <nl> ppp b / src / mongo / db / catalog / index_catalog . cpp <nl> namespace mongo { <nl> return _details - > getCompletedIndexCount ( ) ; <nl> } <nl> <nl> + bool IndexCatalog : : haveIdIndex ( ) const { <nl> + return _details - > isSystemFlagSet ( NamespaceDetails : : Flag_HaveIdIndex ) <nl> + | | findIdIndex ( ) ! = NULL ; <nl> + } <nl> + <nl> IndexDescriptor * IndexCatalog : : findIdIndex ( ) const { <nl> IndexIterator ii = getIndexIterator ( false ) ; <nl> while ( ii . more ( ) ) { <nl> mmm a / src / mongo / db / catalog / index_catalog . h <nl> ppp b / src / mongo / db / catalog / index_catalog . h <nl> namespace mongo { <nl> * in which case everything from this tree has to go away <nl> * / <nl> <nl> + bool haveIdIndex ( ) const ; <nl> + <nl> IndexDescriptor * findIdIndex ( ) const ; <nl> <nl> / * * <nl> mmm a / src / mongo / db / namespace_details . h <nl> ppp b / src / mongo / db / namespace_details . h <nl> namespace mongo { <nl> <nl> void syncUserFlags ( const string & ns ) ; <nl> <nl> - / * @ return - 1 = not found <nl> - generally id is first index , so not that expensive an operation ( assuming present ) . <nl> - * / <nl> - int findIdIndex ( ) { <nl> - IndexIterator i = ii ( ) ; <nl> - while ( i . more ( ) ) { <nl> - if ( i . next ( ) . isIdIndex ( ) ) <nl> - return i . pos ( ) - 1 ; <nl> - } <nl> - return - 1 ; <nl> - } <nl> - <nl> - bool haveIdIndex ( ) { <nl> - return isSystemFlagSet ( NamespaceDetails : : Flag_HaveIdIndex ) | | findIdIndex ( ) > = 0 ; <nl> - } <nl> - <nl> / * return which " deleted bucket " for this size object * / <nl> static int bucket ( int size ) { <nl> for ( int i = 0 ; i < Buckets ; i + + ) { <nl> mmm a / src / mongo / db / ops / update . cpp <nl> ppp b / src / mongo / db / ops / update . cpp <nl> namespace mongo { <nl> uasserted ( 16837 , status . reason ( ) ) ; <nl> } <nl> <nl> - const bool idRequired = collection - > details ( ) - > haveIdIndex ( ) ; <nl> + const bool idRequired = collection - > getIndexCatalog ( ) - > haveIdIndex ( ) ; <nl> <nl> / / Move _id as first element <nl> mb : : Element idElem = mb : : findFirstChildNamed ( doc . root ( ) , idFieldName ) ; <nl> namespace mongo { <nl> } <nl> <nl> / / If the collection doesn ' t exist or has an _id index , then an _id is required <nl> - const bool idRequired = collection ? collection - > details ( ) - > haveIdIndex ( ) : true ; <nl> + const bool idRequired = collection ? collection - > getIndexCatalog ( ) - > haveIdIndex ( ) : true ; <nl> <nl> mb : : Element idElem = mb : : findFirstChildNamed ( doc . root ( ) , idFieldName ) ; <nl> <nl> mmm a / src / mongo / db / repl / oplog . cpp <nl> ppp b / src / mongo / db / repl / oplog . cpp <nl> namespace mongo { <nl> Lock : : assertWriteLocked ( ns ) ; <nl> <nl> Collection * collection = cc ( ) . database ( ) - > getCollection ( ns ) ; <nl> - NamespaceDetails * nsd = collection = = NULL ? NULL : collection - > details ( ) ; <nl> + IndexCatalog * indexCatalog = collection = = NULL ? NULL : collection - > getIndexCatalog ( ) ; <nl> <nl> / / operation type - - see logOp ( ) comments for types <nl> const char * opType = fieldOp . valuestrsafe ( ) ; <nl> namespace mongo { <nl> else { <nl> / / probably don ' t need this since all replicated colls have _id indexes now <nl> / / but keep it just in case <nl> - RARELY if ( nsd & & ! nsd - > isCapped ( ) ) { <nl> - collection - > getIndexCatalog ( ) - > ensureHaveIdIndex ( ) ; <nl> + RARELY if ( indexCatalog & & ! collection - > isCapped ( ) ) { <nl> + indexCatalog - > ensureHaveIdIndex ( ) ; <nl> } <nl> <nl> / * todo : it may be better to do an insert here , and then catch the dup key exception and do update <nl> namespace mongo { <nl> <nl> / / probably don ' t need this since all replicated colls have _id indexes now <nl> / / but keep it just in case <nl> - RARELY if ( nsd & & ! nsd - > isCapped ( ) ) { <nl> - collection - > getIndexCatalog ( ) - > ensureHaveIdIndex ( ) ; <nl> + RARELY if ( indexCatalog & & ! collection - > isCapped ( ) ) { <nl> + indexCatalog - > ensureHaveIdIndex ( ) ; <nl> } <nl> <nl> OpDebug debug ; <nl> namespace mongo { <nl> / / { _id : . . . , { x : { $ size : . . . } } <nl> / / thus this is not ideal . <nl> else { <nl> - if ( nsd = = NULL | | <nl> - ( nsd - > findIdIndex ( ) > = 0 & & Helpers : : findById ( collection , updateCriteria ) . isNull ( ) ) | | <nl> + if ( collection = = NULL | | <nl> + ( indexCatalog - > haveIdIndex ( ) & & Helpers : : findById ( collection , updateCriteria ) . isNull ( ) ) | | <nl> / / capped collections won ' t have an _id index <nl> - ( nsd - > findIdIndex ( ) < 0 & & Helpers : : findOne ( ns , updateCriteria , false ) . isNull ( ) ) ) { <nl> + ( ! indexCatalog - > haveIdIndex ( ) & & Helpers : : findOne ( ns , updateCriteria , false ) . isNull ( ) ) ) { <nl> failedUpdate = true ; <nl> log ( ) < < " replication couldn ' t find doc : " < < op . toString ( ) < < endl ; <nl> } <nl> mmm a / src / mongo / dbtests / namespacetests . cpp <nl> ppp b / src / mongo / dbtests / namespacetests . cpp <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> / / Find the indexNamespace name and indexNsd metadata pointer . <nl> - int idIndexNo = nsd ( ) - > findIdIndex ( ) ; <nl> - IndexDetails & idx = nsd ( ) - > idx ( idIndexNo ) ; <nl> - string indexNamespace = idx . indexNamespace ( ) ; <nl> - ASSERT ( ! NamespaceString : : normal ( indexNamespace . c_str ( ) ) ) ; <nl> - NamespaceDetails * indexNsd = nsdetails ( indexNamespace . c_str ( ) ) ; <nl> + IndexDescriptor * desc = indexCatalog ( ) - > findIdIndex ( ) ; <nl> + string indexNamespace = desc - > indexNamespace ( ) ; <nl> + ASSERT ( ! NamespaceString : : normal ( indexNamespace ) ) ; <nl> + NamespaceDetails * indexNsd = nsdetails ( indexNamespace ) ; <nl> <nl> / / Check that no quantization is performed . <nl> DiskLoc actualLocation = indexNsd - > alloc ( indexNamespace . c_str ( ) , 300 ) ; <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> / / Find the indexNamespace name and indexNsd metadata pointer . <nl> - int idIndexNo = nsd ( ) - > findIdIndex ( ) ; <nl> - IndexDetails & idx = nsd ( ) - > idx ( idIndexNo ) ; <nl> - string indexNamespace = idx . indexNamespace ( ) ; <nl> + IndexDescriptor * desc = indexCatalog ( ) - > findIdIndex ( ) ; <nl> + string indexNamespace = desc - > indexNamespace ( ) ; <nl> ASSERT ( ! NamespaceString : : normal ( indexNamespace . c_str ( ) ) ) ; <nl> NamespaceDetails * indexNsd = nsdetails ( indexNamespace . c_str ( ) ) ; <nl> <nl> mmm a / src / mongo / dbtests / replsettests . cpp <nl> ppp b / src / mongo / dbtests / replsettests . cpp <nl> namespace ReplSetTests { <nl> <nl> / / check _id index created <nl> Client : : Context ctx ( cappedNs ( ) ) ; <nl> - NamespaceDetails * nsd = nsdetails ( cappedNs ( ) ) ; <nl> - verify ( nsd - > findIdIndex ( ) > - 1 ) ; <nl> + Collection * collection = ctx . db ( ) - > getCollection ( cappedNs ( ) ) ; <nl> + verify ( collection - > getIndexCatalog ( ) - > findIdIndex ( ) ) ; <nl> } <nl> } ; <nl> <nl> namespace ReplSetTests { <nl> / / this changed in 2 . 1 . 2 <nl> / / we now have indexes on capped collections <nl> Client : : Context ctx ( cappedNs ( ) ) ; <nl> - NamespaceDetails * nsd = nsdetails ( cappedNs ( ) ) ; <nl> - verify ( nsd - > findIdIndex ( ) > = 0 ) ; <nl> + Collection * collection = ctx . db ( ) - > getCollection ( cappedNs ( ) ) ; <nl> + verify ( collection - > getIndexCatalog ( ) - > findIdIndex ( ) ) ; <nl> } <nl> } ; <nl> <nl> | SERVER - 12213 : remove NamespaceDetais : : findIdIndex and NamespaceDetails : : haveIdIndex , replace to IndexCatalog | mongodb/mongo | e2161d0d4e668bb51b13cd38992c7f6c65e21f94 | 2014-01-09T19:21:49Z |
mmm a / modules / planning / lattice / trajectory_generation / BUILD <nl> ppp b / modules / planning / lattice / trajectory_generation / BUILD <nl> cc_library ( <nl> " / / modules / planning / lattice / trajectory_generation : lattice_trajectory1d " , <nl> " / / modules / planning / math / curve1d : quartic_polynomial_curve1d " , <nl> " / / modules / planning / math / curve1d : quintic_polynomial_curve1d " , <nl> - " / / modules / planning / proto : lattice_sampling_config_proto " , <nl> " / / modules / planning / proto : lattice_structure_proto " , <nl> ] , <nl> ) <nl> cc_library ( <nl> " / / modules / planning / lattice / behavior : path_time_graph " , <nl> " / / modules / planning / lattice / trajectory_generation : piecewise_braking_trajectory_generator " , <nl> " / / modules / planning / math / curve1d " , <nl> - " / / modules / planning / proto : lattice_sampling_config_proto " , <nl> ] , <nl> ) <nl> <nl> mmm a / modules / planning / proto / BUILD <nl> ppp b / modules / planning / proto / BUILD <nl> proto_library ( <nl> ] , <nl> ) <nl> <nl> - cc_proto_library ( <nl> - name = " lattice_sampling_config_proto " , <nl> - deps = [ <nl> - " : lattice_sampling_config_proto_lib " , <nl> - ] , <nl> - ) <nl> - <nl> - proto_library ( <nl> - name = " lattice_sampling_config_proto_lib " , <nl> - srcs = [ " lattice_sampling_config . proto " ] , <nl> - deps = [ <nl> - " / / modules / common / proto : pnc_point_proto_lib " , <nl> - ] , <nl> - ) <nl> - <nl> cc_proto_library ( <nl> name = " lattice_structure_proto " , <nl> deps = [ <nl> proto_library ( <nl> name = " lattice_structure_proto_lib " , <nl> srcs = [ " lattice_structure . proto " ] , <nl> deps = [ <nl> - " : lattice_sampling_config_proto_lib " , <nl> " / / modules / common / proto : pnc_point_proto_lib " , <nl> ] , <nl> ) <nl> py_proto ( <nl> ] , <nl> ) <nl> <nl> - py_proto ( <nl> - name = " lattice_sampling_config_pb2 " , <nl> - src = " lattice_sampling_config . proto " , <nl> - ) <nl> - <nl> py_proto ( <nl> name = " reference_line_smoother_config_pb2 " , <nl> src = " reference_line_smoother_config . proto " , <nl> deleted file mode 100644 <nl> index 60d8c0c7d26 . . 00000000000 <nl> mmm a / modules / planning / proto / lattice_sampling_config . proto <nl> ppp / dev / null <nl> <nl> - syntax = " proto2 " ; <nl> - <nl> - package apollo . planning ; <nl> - <nl> - message LonCondition { <nl> - optional double s = 1 [ default = 0 . 0 ] ; <nl> - optional double ds = 2 [ default = 0 . 0 ] ; <nl> - optional double dds = 3 [ default = 0 . 0 ] ; <nl> - } <nl> - <nl> - message LatCondition { <nl> - optional double l = 1 [ default = 0 . 0 ] ; <nl> - optional double dl = 2 [ default = 0 . 0 ] ; <nl> - optional double ddl = 3 [ default = 0 . 0 ] ; <nl> - } <nl> - <nl> - message TStrategy { <nl> - repeated double t_markers = 1 ; <nl> - optional double t_step = 2 [ default = 0 . 5 ] ; <nl> - optional string strategy = 3 ; <nl> - } <nl> - <nl> - message SStrategy { <nl> - repeated double s_markers = 1 ; <nl> - optional double s_step = 2 [ default = 0 . 5 ] ; <nl> - optional string strategy = 3 ; <nl> - } <nl> - <nl> - message LonSampleConfig { <nl> - optional LonCondition lon_end_condition = 1 ; <nl> - optional TStrategy t_strategy = 2 ; <nl> - } <nl> - <nl> - message LatSampleConfig { <nl> - optional LatCondition lat_end_condition = 1 ; <nl> - optional SStrategy s_strategy = 2 ; <nl> - } <nl> - <nl> - message LatticeSamplingConfig { <nl> - optional LonSampleConfig lon_sample_config = 1 ; <nl> - optional LatSampleConfig lat_sample_config = 2 ; <nl> - } <nl> \ No newline at end of file <nl> | planning : delete lattice_sampling_config . proto not in use | ApolloAuto/apollo | acb330cd3f8c890bfcccfa789f241733572286c0 | 2020-07-08T02:29:26Z |
mmm a / src / server / master . cc <nl> ppp b / src / server / master . cc <nl> int swServer_master_send ( swServer * serv , swSendData * _send ) <nl> { <nl> if ( serv - > send_yield ) <nl> { <nl> - SwooleG . error = SW_ERROR_OUTPUT_BUFFER_OVERFLOW ; <nl> + SwooleG . error = SW_ERROR_OUTPUT_SEND_YIELD ; <nl> } <nl> else <nl> { <nl> int swServer_master_send ( swServer * serv , swSendData * _send ) <nl> { <nl> if ( serv - > send_yield ) <nl> { <nl> - SwooleG . error = SW_ERROR_OUTPUT_BUFFER_OVERFLOW ; <nl> + SwooleG . error = SW_ERROR_OUTPUT_SEND_YIELD ; <nl> } <nl> else <nl> { <nl> mmm a / tests / swoole_http_server / send_yield . phpt <nl> ppp b / tests / swoole_http_server / send_yield . phpt <nl> foreach ( [ SWOOLE_BASE , SWOOLE_PROCESS ] as $ mode ) { <nl> $ http = new Swoole \ Http \ Server ( ' 127 . 0 . 0 . 1 ' , $ pm - > getFreePort ( ) , $ mode ) ; <nl> $ http - > set ( [ <nl> ' log_file ' = > ' / dev / null ' , <nl> - ' send_yield ' = > true <nl> + ' send_yield ' = > true , <nl> + ' http_compression ' = > false <nl> ] ) ; <nl> $ http - > on ( ' workerStart ' , function ( ) use ( $ pm ) { <nl> $ pm - > wakeup ( ) ; <nl> | Fix send_yield in BASE mode | swoole/swoole-src | d2c08218b6ca07b3217996960cf006b400915dca | 2020-01-13T11:14:14Z |
mmm a / hphp / runtime / base / smart - ptr . h <nl> ppp b / hphp / runtime / base / smart - ptr . h <nl> class SmartPtr { <nl> } <nl> <nl> / / Move ctor <nl> - SmartPtr ( SmartPtr & & src ) : m_px ( src . get ( ) ) { <nl> + SmartPtr ( SmartPtr & & src ) noexcept : m_px ( src . get ( ) ) { <nl> src . m_px = nullptr ; <nl> } <nl> <nl> class SmartPtr { <nl> * Magic delegation . <nl> * / <nl> T * operator - > ( ) const { <nl> - if ( UNLIKELY ( ! m_px ) ) throw_null_pointer_exception ( ) ; <nl> - return m_px ; <nl> + return m_px ; / / intentionally skip nullptr check . <nl> } <nl> <nl> / * * <nl> | Remove nullptr check from SmartPtr : : operator - > ( ) | facebook/hhvm | 6d89a9359553314ea6251f9910e792fbe326f85f | 2014-11-17T22:00:28Z |
mmm a / docs / CHANGELOG . txt <nl> ppp b / docs / CHANGELOG . txt <nl> Other Changes : <nl> - Misc : Windows : Do not use _wfopen ( ) if IMGUI_DISABLE_WIN32_FUNCTIONS is defined . ( # 2815 ) <nl> - Misc : Windows : Disabled win32 function by default when building with UWP . ( # 2892 , # 2895 ) <nl> - Misc : Using static_assert ( ) when using C + + 11 , instead of our own construct ( avoid zealous Clang warnings ) . <nl> + - Misc : Added IMGUI_DISABLE_FILE_FUNCTIONS / IMGUI_DISABLE_DEFAULT_FILE_FUNCTION to nullify or disable <nl> + default implementationof ImFileXXX functions linking with fopen / fclose / fread / fwrite . ( # 2734 ) <nl> - Docs : Improved and moved FAQ to docs / FAQ . md so it can be readable on the web . [ @ ButternCream , @ ocornut ] <nl> - Docs : Added permanent redirect from https : / / www . dearimgui . org / faq to FAQ page . <nl> - Demo : Added simple item reordering demo in Widgets - > Drag and Drop section . ( # 2823 , # 143 ) [ @ rokups ] <nl> mmm a / examples / example_emscripten / Makefile <nl> ppp b / examples / example_emscripten / Makefile <nl> EMS = - s USE_SDL = 2 - s WASM = 1 <nl> EMS + = - s ALLOW_MEMORY_GROWTH = 1 <nl> EMS + = - s DISABLE_EXCEPTION_CATCHING = 1 - s NO_EXIT_RUNTIME = 0 <nl> EMS + = - s ASSERTIONS = 1 <nl> + EMS + = - s NO_FILESYSTEM = 1 - DIMGUI_DISABLE_FILE_FUNCTIONS <nl> # Uncomment next line to fix possible rendering bugs with emscripten version older then 1 . 39 . 0 ( https : / / github . com / ocornut / imgui / issues / 2877 ) <nl> # EMS + = - s BINARYEN_TRAP_MODE = clamp <nl> - # EMS + = - s NO_FILESYSTEM = 1 # # Getting " error : undefined symbol : $ FS " if filesystem is removed <nl> # EMS + = - s SAFE_HEAP = 1 # # Adds overhead <nl> <nl> CPPFLAGS = - I . . / - I . . / . . / <nl> mmm a / imconfig . h <nl> ppp b / imconfig . h <nl> <nl> / / # define IMGUI_ENABLE_OSX_DEFAULT_CLIPBOARD_FUNCTIONS / / [ OSX ] Implement default OSX clipboard handler ( need to link with ' - framework ApplicationServices ' , this is why this is not the default ) . <nl> / / # define IMGUI_DISABLE_DEFAULT_FORMAT_STRING_FUNCTIONS / / Don ' t implement ImFormatString / ImFormatStringV so you can implement them yourself ( e . g . if you don ' t want to link with vsnprintf ) <nl> / / # define IMGUI_DISABLE_DEFAULT_MATH_FUNCTIONS / / Don ' t implement ImFabs / ImSqrt / ImPow / ImFmod / ImCos / ImSin / ImAcos / ImAtan2 so you can implement them yourself . <nl> + / / # define IMGUI_DISABLE_DEFAULT_FILE_FUNCTIONS / / Don ' t implement ImFileOpen / ImFileClose / ImFileRead / ImFileWrite so you can implement them yourself if you don ' t want to link with fopen / fclose / fread / fwrite . This will also disable the LogToTTY ( ) function . <nl> / / # define IMGUI_DISABLE_DEFAULT_ALLOCATORS / / Don ' t implement default allocators calling malloc ( ) / free ( ) to avoid linking with them . You will need to call ImGui : : SetAllocatorFunctions ( ) . <nl> <nl> / / mmm - Include imgui_user . h at the end of imgui . h as a convenience <nl> mmm a / imgui . cpp <nl> ppp b / imgui . cpp <nl> CODE <nl> / / [ SECTION ] CONTEXT AND MEMORY ALLOCATORS <nl> / / [ SECTION ] MAIN USER FACING STRUCTURES ( ImGuiStyle , ImGuiIO ) <nl> / / [ SECTION ] MISC HELPERS / UTILITIES ( Maths , String , Format , Hash , File functions ) <nl> + / / [ SECTION ] MISC HELPERS / UTILITIES ( File functions ) <nl> / / [ SECTION ] MISC HELPERS / UTILITIES ( ImText * functions ) <nl> / / [ SECTION ] MISC HELPERS / UTILITIES ( Color functions ) <nl> / / [ SECTION ] ImGuiStorage <nl> ImU32 ImHashStr ( const char * data_p , size_t data_size , ImU32 seed ) <nl> return ~ crc ; <nl> } <nl> <nl> - FILE * ImFileOpen ( const char * filename , const char * mode ) <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / [ SECTION ] MISC HELPERS / UTILITIES ( File functions ) <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> + / / Default file functions <nl> + # ifndef IMGUI_DISABLE_DEFAULT_FILE_FUNCTIONS <nl> + ImFileHandle ImFileOpen ( const char * filename , const char * mode ) <nl> { <nl> # if defined ( _WIN32 ) & & ! defined ( IMGUI_DISABLE_WIN32_FUNCTIONS ) & & ! defined ( __CYGWIN__ ) & & ! defined ( __GNUC__ ) <nl> - / / We need a fopen ( ) wrapper because MSVC / Windows fopen doesn ' t handle UTF - 8 filenames . Converting both strings from UTF - 8 to wchar format ( using a single allocation , because we can ) <nl> + / / We need a fopen ( ) wrapper because MSVC / Windows fopen doesn ' t handle UTF - 8 filenames . <nl> const int filename_wsize = ImTextCountCharsFromUtf8 ( filename , NULL ) + 1 ; <nl> const int mode_wsize = ImTextCountCharsFromUtf8 ( mode , NULL ) + 1 ; <nl> ImVector < ImWchar > buf ; <nl> FILE * ImFileOpen ( const char * filename , const char * mode ) <nl> return fopen ( filename , mode ) ; <nl> # endif <nl> } <nl> + int ImFileClose ( ImFileHandle f ) { return fclose ( f ) ; } <nl> + size_t ImFileGetSize ( ImFileHandle f ) { long off = 0 , sz = 0 ; return ( ( off = ftell ( f ) ) ! = - 1 & & ! fseek ( f , 0 , SEEK_END ) & & ( sz = ftell ( f ) ) ! = - 1 & & ! fseek ( f , off , SEEK_SET ) ) ? ( size_t ) sz : ( size_t ) - 1 ; } <nl> + size_t ImFileRead ( void * data , size_t sz , size_t count , ImFileHandle f ) { return fread ( data , sz , count , f ) ; } <nl> + size_t ImFileWrite ( const void * data , size_t sz , size_t count , ImFileHandle f ) { return fwrite ( data , sz , count , f ) ; } <nl> + # endif / / # ifndef IMGUI_DISABLE_DEFAULT_FILE_FUNCTIONS <nl> <nl> - / / Load file content into memory <nl> + / / Helper : Load file content into memory <nl> / / Memory allocated with IM_ALLOC ( ) , must be freed by user using IM_FREE ( ) = = ImGui : : MemFree ( ) <nl> - void * ImFileLoadToMemory ( const char * filename , const char * file_open_mode , size_t * out_file_size , int padding_bytes ) <nl> + void * ImFileLoadToMemory ( const char * filename , const char * mode , size_t * out_file_size , int padding_bytes ) <nl> { <nl> - IM_ASSERT ( filename & & file_open_mode ) ; <nl> + IM_ASSERT ( filename & & mode ) ; <nl> if ( out_file_size ) <nl> * out_file_size = 0 ; <nl> <nl> - FILE * f ; <nl> - if ( ( f = ImFileOpen ( filename , file_open_mode ) ) = = NULL ) <nl> + ImFileHandle f ; <nl> + if ( ( f = ImFileOpen ( filename , mode ) ) = = NULL ) <nl> return NULL ; <nl> <nl> - long file_size_signed ; <nl> - if ( fseek ( f , 0 , SEEK_END ) | | ( file_size_signed = ftell ( f ) ) = = - 1 | | fseek ( f , 0 , SEEK_SET ) ) <nl> + size_t file_size = ImFileGetSize ( f ) ; <nl> + if ( file_size = = ( size_t ) - 1 ) <nl> { <nl> - fclose ( f ) ; <nl> + ImFileClose ( f ) ; <nl> return NULL ; <nl> } <nl> <nl> - size_t file_size = ( size_t ) file_size_signed ; <nl> void * file_data = IM_ALLOC ( file_size + padding_bytes ) ; <nl> if ( file_data = = NULL ) <nl> { <nl> - fclose ( f ) ; <nl> + ImFileClose ( f ) ; <nl> return NULL ; <nl> } <nl> - if ( fread ( file_data , 1 , file_size , f ) ! = file_size ) <nl> + if ( ImFileRead ( file_data , 1 , file_size , f ) ! = file_size ) <nl> { <nl> - fclose ( f ) ; <nl> + ImFileClose ( f ) ; <nl> IM_FREE ( file_data ) ; <nl> return NULL ; <nl> } <nl> if ( padding_bytes > 0 ) <nl> memset ( ( void * ) ( ( ( char * ) file_data ) + file_size ) , 0 , ( size_t ) padding_bytes ) ; <nl> <nl> - fclose ( f ) ; <nl> + ImFileClose ( f ) ; <nl> if ( out_file_size ) <nl> * out_file_size = file_size ; <nl> <nl> void ImGui : : Shutdown ( ImGuiContext * context ) <nl> g . SettingsWindows . clear ( ) ; <nl> g . SettingsHandlers . clear ( ) ; <nl> <nl> - if ( g . LogFile & & g . LogFile ! = stdout ) <nl> + if ( g . LogFile ) <nl> { <nl> - fclose ( g . LogFile ) ; <nl> + # ifndef IMGUI_DISABLE_TTY_FUNCTIONS <nl> + if ( g . LogFile ! = stdout ) <nl> + # endif <nl> + ImFileClose ( g . LogFile ) ; <nl> g . LogFile = NULL ; <nl> } <nl> g . LogBuffer . clear ( ) ; <nl> void ImGui : : LogText ( const char * fmt , . . . ) <nl> va_list args ; <nl> va_start ( args , fmt ) ; <nl> if ( g . LogFile ) <nl> - vfprintf ( g . LogFile , fmt , args ) ; <nl> + { <nl> + g . LogBuffer . Buf . resize ( 0 ) ; <nl> + g . LogBuffer . appendfv ( fmt , args ) ; <nl> + ImFileWrite ( g . LogBuffer . c_str ( ) , sizeof ( char ) , ( size_t ) g . LogBuffer . size ( ) , g . LogFile ) ; <nl> + } <nl> else <nl> + { <nl> g . LogBuffer . appendfv ( fmt , args ) ; <nl> + } <nl> va_end ( args ) ; <nl> } <nl> <nl> void ImGui : : LogToTTY ( int auto_open_depth ) <nl> ImGuiContext & g = * GImGui ; <nl> if ( g . LogEnabled ) <nl> return ; <nl> + IM_UNUSED ( auto_open_depth ) ; <nl> + # ifndef IMGUI_DISABLE_TTY_FUNCTIONS <nl> LogBegin ( ImGuiLogType_TTY , auto_open_depth ) ; <nl> g . LogFile = stdout ; <nl> + # endif <nl> } <nl> <nl> / / Start logging / capturing text output to given file <nl> void ImGui : : LogToFile ( int auto_open_depth , const char * filename ) <nl> filename = g . IO . LogFilename ; <nl> if ( ! filename | | ! filename [ 0 ] ) <nl> return ; <nl> - FILE * f = ImFileOpen ( filename , " ab " ) ; <nl> - if ( f = = NULL ) <nl> + ImFileHandle f = ImFileOpen ( filename , " ab " ) ; <nl> + if ( ! f ) <nl> { <nl> IM_ASSERT ( 0 ) ; <nl> return ; <nl> void ImGui : : LogFinish ( ) <nl> switch ( g . LogType ) <nl> { <nl> case ImGuiLogType_TTY : <nl> + # ifndef IMGUI_DISABLE_TTY_FUNCTIONS <nl> fflush ( g . LogFile ) ; <nl> + # endif <nl> break ; <nl> case ImGuiLogType_File : <nl> - fclose ( g . LogFile ) ; <nl> + ImFileClose ( g . LogFile ) ; <nl> break ; <nl> case ImGuiLogType_Buffer : <nl> break ; <nl> void ImGui : : LogButtons ( ) <nl> ImGuiContext & g = * GImGui ; <nl> <nl> PushID ( " LogButtons " ) ; <nl> + # ifndef IMGUI_DISABLE_TTY_FUNCTIONS <nl> const bool log_to_tty = Button ( " Log To TTY " ) ; SameLine ( ) ; <nl> + # else <nl> + const bool log_to_tty = false ; <nl> + # endif <nl> const bool log_to_file = Button ( " Log To File " ) ; SameLine ( ) ; <nl> const bool log_to_clipboard = Button ( " Log To Clipboard " ) ; SameLine ( ) ; <nl> PushAllowKeyboardFocus ( false ) ; <nl> void ImGui : : SaveIniSettingsToDisk ( const char * ini_filename ) <nl> <nl> size_t ini_data_size = 0 ; <nl> const char * ini_data = SaveIniSettingsToMemory ( & ini_data_size ) ; <nl> - FILE * f = ImFileOpen ( ini_filename , " wt " ) ; <nl> + ImFileHandle f = ImFileOpen ( ini_filename , " wt " ) ; <nl> if ( ! f ) <nl> return ; <nl> - fwrite ( ini_data , sizeof ( char ) , ini_data_size , f ) ; <nl> - fclose ( f ) ; <nl> + ImFileWrite ( ini_data , sizeof ( char ) , ini_data_size , f ) ; <nl> + ImFileClose ( f ) ; <nl> } <nl> <nl> / / Call registered handlers ( e . g . SettingsHandlerWindow_WriteAll ( ) + custom handlers ) to write their stuff into a text buffer <nl> mmm a / imgui_demo . cpp <nl> ppp b / imgui_demo . cpp <nl> void ImGui : : ShowAboutWindow ( bool * p_open ) <nl> # ifdef IMGUI_DISABLE_DEFAULT_MATH_FUNCTIONS <nl> ImGui : : Text ( " define : IMGUI_DISABLE_DEFAULT_MATH_FUNCTIONS " ) ; <nl> # endif <nl> + # ifdef IMGUI_DISABLE_DEFAULT_FILE_FUNCTIONS <nl> + ImGui : : Text ( " define : IMGUI_DISABLE_DEFAULT_FILE_FUNCTIONS " ) ; <nl> + # endif <nl> + # ifdef IMGUI_DISABLE_FILE_FUNCTIONS <nl> + ImGui : : Text ( " define : IMGUI_DISABLE_FILE_FUNCTIONS " ) ; <nl> + # endif <nl> # ifdef IMGUI_DISABLE_DEFAULT_ALLOCATORS <nl> ImGui : : Text ( " define : IMGUI_DISABLE_DEFAULT_ALLOCATORS " ) ; <nl> # endif <nl> mmm a / imgui_internal . h <nl> ppp b / imgui_internal . h <nl> extern IMGUI_API ImGuiContext * GImGui ; / / Current implicit context pointer <nl> # endif <nl> <nl> / / Helpers : Misc <nl> - IMGUI_API void * ImFileLoadToMemory ( const char * filename , const char * file_open_mode , size_t * out_file_size = NULL , int padding_bytes = 0 ) ; <nl> - IMGUI_API FILE * ImFileOpen ( const char * filename , const char * file_open_mode ) ; <nl> # define ImQsort qsort <nl> IMGUI_API ImU32 ImHashData ( const void * data , size_t data_size , ImU32 seed = 0 ) ; <nl> IMGUI_API ImU32 ImHashStr ( const char * data , size_t data_size = 0 , ImU32 seed = 0 ) ; <nl> static inline ImVec4 operator - ( const ImVec4 & lhs , const ImVec4 & rhs ) <nl> static inline ImVec4 operator * ( const ImVec4 & lhs , const ImVec4 & rhs ) { return ImVec4 ( lhs . x * rhs . x , lhs . y * rhs . y , lhs . z * rhs . z , lhs . w * rhs . w ) ; } <nl> # endif <nl> <nl> + / / Helpers : File System <nl> + # if defined ( __EMSCRIPTEN__ ) & & ! defined ( IMGUI_DISABLE_FILE_FUNCTIONS ) <nl> + # define IMGUI_DISABLE_FILE_FUNCTIONS <nl> + # endif <nl> + # ifdef IMGUI_DISABLE_FILE_FUNCTIONS <nl> + # define IMGUI_DISABLE_DEFAULT_FILE_FUNCTIONS <nl> + typedef void * ImFileHandle ; <nl> + static inline ImFileHandle ImFileOpen ( const char * , const char * ) { return NULL ; } <nl> + static inline int ImFileClose ( ImFileHandle ) { return - 1 ; } <nl> + static inline size_t ImFileGetSize ( ImFileHandle ) { return ( size_t ) - 1 ; } <nl> + static inline size_t ImFileRead ( void * , size_t , size_t , ImFileHandle ) { return 0 ; } <nl> + static inline size_t ImFileWrite ( const void * , size_t , size_t , ImFileHandle ) { return 0 ; } <nl> + # endif <nl> + <nl> + # ifndef IMGUI_DISABLE_DEFAULT_FILE_FUNCTIONS <nl> + typedef FILE * ImFileHandle ; <nl> + IMGUI_API ImFileHandle ImFileOpen ( const char * filename , const char * mode ) ; <nl> + IMGUI_API int ImFileClose ( ImFileHandle file ) ; <nl> + IMGUI_API size_t ImFileGetSize ( ImFileHandle file ) ; <nl> + IMGUI_API size_t ImFileRead ( void * data , size_t size , size_t count , ImFileHandle file ) ; <nl> + IMGUI_API size_t ImFileWrite ( const void * data , size_t size , size_t count , ImFileHandle file ) ; <nl> + # else <nl> + # define IMGUI_DISABLE_TTY_FUNCTIONS / / Can ' t use stdout , fflush if we are not using default file functions <nl> + # endif <nl> + IMGUI_API void * ImFileLoadToMemory ( const char * filename , const char * mode , size_t * out_file_size = NULL , int padding_bytes = 0 ) ; <nl> + <nl> / / Helpers : Maths <nl> / / - Wrapper for standard libs functions . ( Note that imgui_demo . cpp does _not_ use them to keep the code easy to copy ) <nl> # ifndef IMGUI_DISABLE_DEFAULT_MATH_FUNCTIONS <nl> struct ImGuiContext <nl> / / Capture / Logging <nl> bool LogEnabled ; <nl> ImGuiLogType LogType ; <nl> - FILE * LogFile ; / / If ! = NULL log to stdout / file <nl> + ImFileHandle LogFile ; / / If ! = NULL log to stdout / file <nl> ImGuiTextBuffer LogBuffer ; / / Accumulation buffer when log to clipboard . This is pointer so our GImGui static constructor doesn ' t call heap allocators . <nl> float LogLinePosY ; <nl> bool LogLineFirstItem ; <nl> | Added IMGUI_DISABLE_DEFAULT_FILE_FUNCTIONS / IMGUI_DISABLE_FILE_FUNCTIONS ) | ocornut/imgui | 4e90906b04be67591bf7632c882d09f7bc5f00c3 | 2019-11-19T20:14:44Z |
mmm a / tools / compute_image_mean . cpp <nl> ppp b / tools / compute_image_mean . cpp <nl> <nl> <nl> using caffe : : Datum ; <nl> using caffe : : BlobProto ; <nl> + using std : : max ; <nl> <nl> int main ( int argc , char * * argv ) { <nl> : : google : : InitGoogleLogging ( argv [ 0 ] ) ; <nl> int main ( int argc , char * * argv ) { <nl> sum_blob . set_height ( datum . height ( ) ) ; <nl> sum_blob . set_width ( datum . width ( ) ) ; <nl> const int data_size = datum . channels ( ) * datum . height ( ) * datum . width ( ) ; <nl> - for ( int i = 0 ; i < datum . data ( ) . size ( ) ; + + i ) { <nl> + int size_in_datum = std : : max < int > ( datum . data ( ) . size ( ) , datum . float_data_size ( ) ) ; <nl> + for ( int i = 0 ; i < size_in_datum ; + + i ) { <nl> sum_blob . add_data ( 0 . ) ; <nl> } <nl> LOG ( INFO ) < < " Starting Iteration " ; <nl> int main ( int argc , char * * argv ) { <nl> / / just a dummy operation <nl> datum . ParseFromString ( it - > value ( ) . ToString ( ) ) ; <nl> const string & data = datum . data ( ) ; <nl> - CHECK_EQ ( data . size ( ) , data_size ) < < " Incorrect data field size " <nl> - < < data . size ( ) ; <nl> - for ( int i = 0 ; i < data . size ( ) ; + + i ) { <nl> - sum_blob . set_data ( i , sum_blob . data ( i ) + ( uint8_t ) data [ i ] ) ; <nl> + size_in_datum = std : : max < int > ( datum . data ( ) . size ( ) , datum . float_data_size ( ) ) ; <nl> + CHECK_EQ ( size_in_datum , data_size ) < < " Incorrect data field size " < < size_in_datum ; <nl> + if ( data . size ( ) ! = 0 ) { <nl> + for ( int i = 0 ; i < size_in_datum ; + + i ) { <nl> + sum_blob . set_data ( i , sum_blob . data ( i ) + ( uint8_t ) data [ i ] ) ; <nl> + } <nl> + } else { <nl> + for ( int i = 0 ; i < size_in_datum ; + + i ) { <nl> + sum_blob . set_data ( i , sum_blob . data ( i ) + ( float ) datum . float_data ( i ) ) ; <nl> + } <nl> } <nl> + + count ; <nl> if ( count % 10000 = = 0 ) { <nl> | Merge pull request from mavenlin / computemean | BVLC/caffe | 9caae608ccbf9a9b692cc4cb884db659334ce480 | 2014-03-11T01:31:38Z |
mmm a / tensorflow / tools / ci_build / Dockerfile . cpu . ppc64le <nl> ppp b / tensorflow / tools / ci_build / Dockerfile . cpu . ppc64le <nl> RUN / install / install_bazel_from_source . sh <nl> RUN / install / install_proto3 . sh <nl> RUN / install / install_buildifier_from_source . sh <nl> RUN / install / install_auditwheel . sh <nl> - RUN / install / install_golang_ppc64el . sh <nl> + RUN / install / install_golang_ppc64le . sh <nl> <nl> # Set up the master bazelrc configuration file . <nl> COPY install / . bazelrc / etc / bazel . bazelrc <nl> mmm a / tensorflow / tools / ci_build / Dockerfile . gpu . ppc64le <nl> ppp b / tensorflow / tools / ci_build / Dockerfile . gpu . ppc64le <nl> RUN / install / install_deb_packages . sh <nl> RUN apt - get update & & apt - get install - y libopenblas - dev <nl> RUN / install / install_pip_packages . sh <nl> RUN / install / install_bazel_from_source . sh <nl> - RUN / install / install_golang_ppc64el . sh <nl> + RUN / install / install_golang_ppc64le . sh <nl> <nl> # Set up the master bazelrc configuration file . <nl> COPY install / . bazelrc / etc / bazel . bazelrc <nl> similarity index 100 % <nl> rename from tensorflow / tools / ci_build / install / install_golang_ppc64el . sh <nl> rename to tensorflow / tools / ci_build / install / install_golang_ppc64le . sh <nl> | Fix golang_ppc64le filename | tensorflow/tensorflow | 339477aa8ad9abe17190a978dcfa2f0aaf8b3de5 | 2018-06-18T19:28:09Z |
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> else ( ) <nl> include ( CPack ) <nl> endif ( ) <nl> <nl> + set ( BUILD_FLOWBENCH OFF CACHE BOOL " Build microbenchmark program ( builds google microbenchmark dependency ) " ) <nl> + if ( BUILD_FLOWBENCH ) <nl> + add_subdirectory ( flowbench ) <nl> + endif ( ) <nl> + <nl> if ( CMAKE_SYSTEM_NAME STREQUAL " FreeBSD " ) <nl> add_link_options ( - lexecinfo ) <nl> endif ( ) <nl> mmm a / bindings / c / test / mako / mako . c <nl> ppp b / bindings / c / test / mako / mako . c <nl> void * worker_thread ( void * thread_args ) { <nl> int worker_process_main ( mako_args_t * args , int worker_id , mako_shmhdr_t * shm , pid_t * pid_main ) { <nl> int i ; <nl> pthread_t network_thread ; / * handle for thread which invoked fdb_run_network ( ) * / <nl> - pthread_t * worker_threads ; <nl> + pthread_t * worker_threads = NULL ; <nl> # if FDB_API_VERSION < 610 <nl> FDBCluster * cluster ; <nl> # endif <nl> process_info_t process ; <nl> - thread_args_t * thread_args ; <nl> + thread_args_t * thread_args = NULL ; <nl> int rc ; <nl> fdb_error_t err ; <nl> <nl> int worker_process_main ( mako_args_t * args , int worker_id , mako_shmhdr_t * shm , pi <nl> <nl> / * Everything starts from here * / <nl> err = fdb_select_api_version ( args - > api_version ) ; <nl> - check_fdb_error ( err ) ; <nl> + if ( err ) { <nl> + fprintf ( stderr , " ERROR : Failed at % s : % d ( % s ) \ n " , __FILE__ , __LINE__ , fdb_get_error ( err ) ) ; <nl> + return - 1 ; <nl> + } <nl> + <nl> <nl> / * enable flatbuffers if specified * / <nl> if ( args - > flatbuffers ) { <nl> int worker_process_main ( mako_args_t * args , int worker_id , mako_shmhdr_t * shm , pi <nl> / * Network thread must be setup before doing anything * / <nl> fprintf ( debugme , " DEBUG : fdb_setup_network \ n " ) ; <nl> err = fdb_setup_network ( ) ; <nl> - check_fdb_error ( err ) ; <nl> + if ( err ) { <nl> + fprintf ( stderr , " ERROR : Failed at % s : % d ( % s ) \ n " , __FILE__ , __LINE__ , fdb_get_error ( err ) ) ; <nl> + return - 1 ; <nl> + } <nl> + <nl> <nl> / * Each worker process will have its own network thread * / <nl> fprintf ( debugme , " DEBUG : creating network thread \ n " ) ; <nl> int main ( int argc , char * argv [ ] ) { <nl> int rc ; <nl> mako_args_t args ; <nl> int p ; <nl> - pid_t * worker_pids ; <nl> + pid_t * worker_pids = NULL ; <nl> proc_type_t proc_type = proc_master ; <nl> int worker_id ; <nl> pid_t pid ; <nl> int main ( int argc , char * argv [ ] ) { <nl> / * allocate * / <nl> shmsize = sizeof ( mako_shmhdr_t ) + ( sizeof ( mako_stats_t ) * args . num_processes * args . num_threads ) ; <nl> if ( ftruncate ( shmfd , shmsize ) < 0 ) { <nl> + shm = MAP_FAILED ; <nl> fprintf ( stderr , " ERROR : ftruncate ( fd : % d size : % llu ) failed \ n " , shmfd , ( unsigned long long ) shmsize ) ; <nl> goto failExit ; <nl> } <nl> mmm a / bindings / flow / tester / Tester . actor . cpp <nl> ppp b / bindings / flow / tester / Tester . actor . cpp <nl> const char * StartThreadFunc : : name = " START_THREAD " ; <nl> REGISTER_INSTRUCTION_FUNC ( StartThreadFunc ) ; <nl> <nl> ACTOR template < class Function > <nl> - Future < decltype ( fake < Function > ( ) ( Reference < ReadTransaction > ( ) ) . getValue ( ) ) > read ( Reference < Database > db , <nl> - Function func ) { <nl> + Future < decltype ( std : : declval < Function > ( ) ( Reference < ReadTransaction > ( ) ) . getValue ( ) ) > read ( Reference < Database > db , <nl> + Function func ) { <nl> state Reference < ReadTransaction > tr = db - > createTransaction ( ) ; <nl> loop { <nl> try { <nl> - state decltype ( fake < Function > ( ) ( Reference < ReadTransaction > ( ) ) . getValue ( ) ) result = wait ( func ( tr ) ) ; <nl> + state decltype ( std : : declval < Function > ( ) ( Reference < ReadTransaction > ( ) ) . getValue ( ) ) result = wait ( func ( tr ) ) ; <nl> return result ; <nl> } catch ( Error & e ) { <nl> wait ( tr - > onError ( e ) ) ; <nl> mmm a / bindings / flow / tester / Tester . actor . h <nl> ppp b / bindings / flow / tester / Tester . actor . h <nl> <nl> <nl> # pragma once <nl> <nl> + # include < utility > <nl> + <nl> # include " flow / IDispatched . h " <nl> # include " bindings / flow / fdb_flow . h " <nl> # include " bindings / flow / IDirectory . h " <nl> struct FlowTesterStack { <nl> void push ( Future < Standalone < StringRef > > value ) { <nl> data . push_back ( StackItem ( index , value ) ) ; <nl> } <nl> - <nl> + <nl> void push ( Standalone < StringRef > value ) { <nl> push ( Future < Standalone < StringRef > > ( value ) ) ; <nl> } <nl> struct FlowTesterStack { <nl> items . push_back ( data . back ( ) ) ; <nl> data . pop_back ( ) ; <nl> count - - ; <nl> - } <nl> + } <nl> return items ; <nl> } <nl> - <nl> + <nl> Future < std : : vector < FDB : : Tuple > > waitAndPop ( int count ) ; <nl> Future < FDB : : Tuple > waitAndPop ( ) ; <nl> <nl> struct FlowTesterStack { <nl> <nl> struct InstructionData : public ReferenceCounted < InstructionData > { <nl> bool isDatabase ; <nl> - bool isSnapshot ; <nl> + bool isSnapshot ; <nl> StringRef instruction ; <nl> Reference < FDB : : Transaction > tr ; <nl> <nl> struct DirectoryOrSubspace { <nl> return " DirectorySubspace " ; <nl> } <nl> else if ( directory . present ( ) ) { <nl> - return " IDirectory " ; <nl> + return " IDirectory " ; <nl> } <nl> else if ( subspace . present ( ) ) { <nl> return " Subspace " ; <nl> struct DirectoryTesterData { <nl> int directoryListIndex ; <nl> int directoryErrorIndex ; <nl> <nl> - Reference < FDB : : IDirectory > directory ( ) { <nl> + Reference < FDB : : IDirectory > directory ( ) { <nl> ASSERT ( directoryListIndex < directoryList . size ( ) ) ; <nl> ASSERT ( directoryList [ directoryListIndex ] . directory . present ( ) ) ; <nl> - return directoryList [ directoryListIndex ] . directory . get ( ) ; <nl> + return directoryList [ directoryListIndex ] . directory . get ( ) ; <nl> } <nl> <nl> FDB : : Subspace * subspace ( ) { <nl> struct FlowTesterData : public ReferenceCounted < FlowTesterData > { <nl> std : : string tupleToString ( FDB : : Tuple const & tuple ) ; <nl> <nl> ACTOR template < class F > <nl> - Future < decltype ( fake < F > ( ) ( ) . getValue ( ) ) > executeMutation ( Reference < InstructionData > instruction , F func ) { <nl> + Future < decltype ( std : : declval < F > ( ) ( ) . getValue ( ) ) > executeMutation ( Reference < InstructionData > instruction , F func ) { <nl> loop { <nl> try { <nl> - state decltype ( fake < F > ( ) ( ) . getValue ( ) ) result = wait ( func ( ) ) ; <nl> + state decltype ( std : : declval < F > ( ) ( ) . getValue ( ) ) result = wait ( func ( ) ) ; <nl> if ( instruction - > isDatabase ) { <nl> wait ( instruction - > tr - > commit ( ) ) ; <nl> } <nl> mmm a / bindings / go / src / fdb / transaction . go <nl> ppp b / bindings / go / src / fdb / transaction . go <nl> func ( t * transaction ) getEstimatedRangeSizeBytes ( beginKey Key , endKey Key ) Futur <nl> <nl> / / GetEstimatedRangeSizeBytes will get an estimate for the number of bytes <nl> / / stored in the given range . <nl> + / / Note : the estimated size is calculated based on the sampling done by FDB server . The sampling <nl> + / / algorithm works roughly in this way : the larger the key - value pair is , the more likely it would <nl> + / / be sampled and the more accurate its sampled size would be . And due to <nl> + / / that reason it is recommended to use this API to query against large ranges for accuracy considerations . <nl> + / / For a rough reference , if the returned size is larger than 3MB , one can consider the size to be <nl> + / / accurate . <nl> func ( t Transaction ) GetEstimatedRangeSizeBytes ( r ExactRange ) FutureInt64 { <nl> beginKey , endKey : = r . FDBRangeKeys ( ) <nl> return t . getEstimatedRangeSizeBytes ( <nl> mmm a / bindings / java / JavaWorkload . cpp <nl> ppp b / bindings / java / JavaWorkload . cpp <nl> void printTrace ( JNIEnv * env , jclass , jlong logger , jint severity , jstring messag <nl> sev = FDBSeverity : : Warn ; <nl> } else if ( severity < 40 ) { <nl> sev = FDBSeverity : : WarnAlways ; <nl> + } else { <nl> + assert ( false ) ; <nl> + std : : abort ( ) ; <nl> } <nl> log - > trace ( sev , msg , detailsMap ) ; <nl> if ( isCopy ) { <nl> mmm a / bindings / java / src / main / com / apple / foundationdb / ReadTransaction . java <nl> ppp b / bindings / java / src / main / com / apple / foundationdb / ReadTransaction . java <nl> <nl> * whether read conflict ranges are omitted for any reads done through this { @ code ReadTransaction } . <nl> * < br > <nl> * For more information about how to use snapshot reads correctly , see <nl> - * < a href = " / foundationdb / developer - guide . html # using - snapshot - reads " target = " _blank " > Using snapshot reads < / a > . <nl> + * < a href = " / foundationdb / developer - guide . html # snapshot - reads " target = " _blank " > Using snapshot reads < / a > . <nl> * <nl> * @ return whether this is a snapshot view of the database with relaxed isolation properties <nl> * @ see # snapshot ( ) <nl> <nl> / * * <nl> * Return a special - purpose , read - only view of the database . Reads done through this interface are known as " snapshot reads " . <nl> * Snapshot reads selectively relax FoundationDB ' s isolation property , reducing <nl> - * < a href = " / foundationdb / developer - guide . html # transaction - conflicts " target = " _blank " > Transaction conflicts < / a > <nl> + * < a href = " / foundationdb / developer - guide . html # conflict - ranges " target = " _blank " > Transaction conflicts < / a > <nl> * but making reasoning about concurrency harder . < br > <nl> * < br > <nl> * For more information about how to use snapshot reads correctly , see <nl> - * < a href = " / foundationdb / developer - guide . html # using - snapshot - reads " target = " _blank " > Using snapshot reads < / a > . <nl> + * < a href = " / foundationdb / developer - guide . html # snapshot - reads " target = " _blank " > Using snapshot reads < / a > . <nl> * <nl> * @ return a read - only view of this { @ code ReadTransaction } with relaxed isolation properties <nl> * / <nl> <nl> <nl> / * * <nl> * Gets an estimate for the number of bytes stored in the given range . <nl> + * Note : the estimated size is calculated based on the sampling done by FDB server . The sampling <nl> + * algorithm works roughly in this way : the larger the key - value pair is , the more likely it would <nl> + * be sampled and the more accurate its sampled size would be . And due to <nl> + * that reason it is recommended to use this API to query against large ranges for accuracy considerations . <nl> + * For a rough reference , if the returned size is larger than 3MB , one can consider the size to be <nl> + * accurate . <nl> * <nl> * @ param begin the beginning of the range ( inclusive ) <nl> * @ param end the end of the range ( exclusive ) <nl> <nl> <nl> / * * <nl> * Gets an estimate for the number of bytes stored in the given range . <nl> - * <nl> + * Note : the estimated size is calculated based on the sampling done by FDB server . The sampling <nl> + * algorithm works roughly in this way : the larger the key - value pair is , the more likely it would <nl> + * be sampled and the more accurate its sampled size would be . And due to <nl> + * that reason it is recommended to use this API to query against large ranges for accuracy considerations . <nl> + * For a rough reference , if the returned size is larger than 3MB , one can consider the size to be <nl> + * accurate . <nl> * @ param range the range of the keys <nl> * <nl> * @ return a handle to access the results of the asynchronous call <nl> mmm a / cmake / CompileBoost . cmake <nl> ppp b / cmake / CompileBoost . cmake <nl> else ( ) <nl> <nl> add_library ( boost_target INTERFACE ) <nl> add_dependencies ( boost_target boostProject ) <nl> - target_include_directories ( boost_target INTERFACE $ { BOOST_INCLUDE_DIR } ) <nl> + target_include_directories ( boost_target SYSTEM INTERFACE $ { BOOST_INCLUDE_DIR } ) <nl> endif ( ) <nl> mmm a / cmake / CompileRocksDB . cmake <nl> ppp b / cmake / CompileRocksDB . cmake <nl> if ( RocksDB_FOUND ) <nl> DOWNLOAD_COMMAND " " <nl> CMAKE_ARGS - DUSE_RTTI = 1 - DPORTABLE = $ { PORTABLE_ROCKSDB } <nl> - DCMAKE_CXX_STANDARD = $ { CMAKE_CXX_STANDARD } <nl> + - DCMAKE_CXX_COMPILER = $ { CMAKE_CXX_COMPILER } <nl> - DCMAKE_BUILD_TYPE = $ { CMAKE_BUILD_TYPE } <nl> - DWITH_GFLAGS = OFF <nl> - DWITH_TESTS = OFF <nl> else ( ) <nl> URL_HASH SHA256 = d573d2f15cdda883714f7e0bc87b814a8d4a53a82edde558f08f940e905541ee <nl> CMAKE_ARGS - DUSE_RTTI = 1 - DPORTABLE = $ { PORTABLE_ROCKSDB } <nl> - DCMAKE_CXX_STANDARD = $ { CMAKE_CXX_STANDARD } <nl> + - DCMAKE_CXX_COMPILER = $ { CMAKE_CXX_COMPILER } <nl> - DCMAKE_BUILD_TYPE = $ { CMAKE_BUILD_TYPE } <nl> - DWITH_GFLAGS = OFF <nl> - DWITH_TESTS = OFF <nl> mmm a / cmake / ConfigureCompiler . cmake <nl> ppp b / cmake / ConfigureCompiler . cmake <nl> else ( ) <nl> - Wno - unknown - attributes ) <nl> endif ( ) <nl> add_compile_options ( <nl> - - Wno - unknown - warning - option <nl> - - Wno - dangling - else <nl> - - Wno - sign - compare <nl> + - Wall - Wextra <nl> + # Here ' s the current set of warnings we need to explicitly disable to compile warning - free with clang 10 <nl> - Wno - comment <nl> - - Wno - unknown - pragmas <nl> + - Wno - dangling - else <nl> - Wno - delete - non - virtual - dtor <nl> + - Wno - format <nl> + - Wno - mismatched - tags <nl> + - Wno - missing - field - initializers <nl> + - Wno - overloaded - virtual <nl> + - Wno - reorder <nl> + - Wno - reorder - ctor <nl> + - Wno - sign - compare <nl> + - Wno - tautological - pointer - compare <nl> - Wno - undefined - var - template <nl> - Wno - tautological - pointer - compare <nl> - - Wno - format <nl> - Wredundant - move <nl> - Wpessimizing - move <nl> - Woverloaded - virtual <nl> + - Wno - unknown - pragmas <nl> + - Wno - unknown - warning - option <nl> + - Wno - unused - function <nl> + - Wno - unused - local - typedef <nl> + - Wno - unused - parameter <nl> + - Wno - unused - value <nl> + - Wno - self - assign <nl> ) <nl> if ( USE_CCACHE ) <nl> add_compile_options ( <nl> mmm a / contrib / TestHarness / CMakeLists . txt <nl> ppp b / contrib / TestHarness / CMakeLists . txt <nl> set ( SRCS <nl> Properties / AssemblyInfo . cs ) <nl> <nl> set ( TEST_HARNESS_REFERENCES <nl> - " - r : System , System . Core , System . Xml . Linq , System . Data . DataSetExtensions , Microsoft . CSharp , System . Data , System . Xml , $ { TraceLogHelperDll } " ) <nl> + " - r : System , System . Core , System . Xml . Linq , System . Data . DataSetExtensions , Microsoft . CSharp , System . Data , System . Xml , System . Runtime . Serialization , $ { TraceLogHelperDll } " ) <nl> <nl> set ( out_file $ { CMAKE_BINARY_DIR } / packages / bin / TestHarness . exe ) <nl> <nl> mmm a / contrib / TestHarness / Program . cs <nl> ppp b / contrib / TestHarness / Program . cs <nl> <nl> using System . ComponentModel ; <nl> using System . Runtime . InteropServices ; <nl> using System . Xml ; <nl> + using System . Runtime . Serialization . Json ; <nl> <nl> namespace SummarizeTest <nl> { <nl> static int Run ( string fdbserverName , string tlsPluginFile , string testFolder , st <nl> testFile = random . Choice ( uniqueFiles ) ; <nl> string oldBinaryVersionLowerBound = " 0 . 0 . 0 " ; <nl> string lastFolderName = Path . GetFileName ( Path . GetDirectoryName ( testFile ) ) ; <nl> - if ( lastFolderName . Contains ( " from_ " ) ) / / Only perform upgrade tests from certain versions <nl> + if ( lastFolderName . Contains ( " from_ " ) | | lastFolderName . Contains ( " to_ " ) ) / / Only perform upgrade / downgrade tests from certain versions <nl> { <nl> oldBinaryVersionLowerBound = lastFolderName . Split ( ' _ ' ) . Last ( ) ; <nl> } <nl> static int Run ( string fdbserverName , string tlsPluginFile , string testFolder , st <nl> <nl> if ( testDir . EndsWith ( " restarting " ) ) <nl> { <nl> + bool isDowngrade = Path . GetFileName ( Path . GetDirectoryName ( testFile ) ) . Contains ( " to_ " ) ; <nl> + string firstServerName = isDowngrade ? fdbserverName : oldServerName ; <nl> + string secondServerName = isDowngrade ? oldServerName : fdbserverName ; <nl> int expectedUnseed = - 1 ; <nl> int unseed ; <nl> string uid = Guid . NewGuid ( ) . ToString ( ) ; <nl> - bool useNewPlugin = oldServerName = = fdbserverName | | versionGreaterThanOrEqual ( oldServerName . Split ( ' - ' ) . Last ( ) , " 5 . 2 . 0 " ) ; <nl> - result = RunTest ( oldServerName , useNewPlugin ? tlsPluginFile : tlsPluginFile_5_1 , summaryFileName , errorFileName , seed , buggify , testFile + " - 1 . txt " , runDir , uid , expectedUnseed , out unseed , out retryableError , logOnRetryableError , useValgrind , false , true , oldServerName , traceToStdout ) ; <nl> + bool useNewPlugin = ( oldServerName = = fdbserverName ) | | versionGreaterThanOrEqual ( oldServerName . Split ( ' - ' ) . Last ( ) , " 5 . 2 . 0 " ) ; <nl> + result = RunTest ( firstServerName , useNewPlugin ? tlsPluginFile : tlsPluginFile_5_1 , summaryFileName , errorFileName , seed , buggify , testFile + " - 1 . txt " , runDir , uid , expectedUnseed , out unseed , out retryableError , logOnRetryableError , useValgrind , false , true , oldServerName , traceToStdout ) ; <nl> if ( result = = 0 ) <nl> { <nl> - result = RunTest ( fdbserverName , tlsPluginFile , summaryFileName , errorFileName , seed + 1 , buggify , testFile + " - 2 . txt " , runDir , uid , expectedUnseed , out unseed , out retryableError , logOnRetryableError , useValgrind , true , false , oldServerName , traceToStdout ) ; <nl> + result = RunTest ( secondServerName , tlsPluginFile , summaryFileName , errorFileName , seed + 1 , buggify , testFile + " - 2 . txt " , runDir , uid , expectedUnseed , out unseed , out retryableError , logOnRetryableError , useValgrind , true , false , oldServerName , traceToStdout ) ; <nl> } <nl> } <nl> else <nl> static int Run ( string fdbserverName , string tlsPluginFile , string testFolder , st <nl> { <nl> ErrorOutputListener errorListener = new ErrorOutputListener ( ) ; <nl> process . StartInfo . UseShellExecute = false ; <nl> + string tlsPluginArg = " " ; <nl> if ( tlsPluginFile . Length > 0 ) { <nl> process . StartInfo . EnvironmentVariables [ " FDB_TLS_PLUGIN " ] = tlsPluginFile ; <nl> + tlsPluginArg = " - - tls_plugin = " + tlsPluginFile ; <nl> } <nl> process . StartInfo . RedirectStandardOutput = true ; <nl> var args = " " ; <nl> if ( willRestart & & oldBinaryName . EndsWith ( " alpha6 " ) ) <nl> { <nl> - args = string . Format ( " - Rs 1000000000 - r simulation { 0 } - s { 1 } - f \ " { 2 } \ " - b { 3 } - - tls_plugin = { 4 } - - crash " , <nl> - IsRunningOnMono ( ) ? " " : " - q " , seed , testFile , buggify ? " on " : " off " , tlsPluginFile ) ; <nl> + args = string . Format ( " - Rs 1000000000 - r simulation { 0 } - s { 1 } - f \ " { 2 } \ " - b { 3 } { 4 } - - crash " , <nl> + IsRunningOnMono ( ) ? " " : " - q " , seed , testFile , buggify ? " on " : " off " , tlsPluginArg ) ; <nl> } <nl> else <nl> { <nl> - args = string . Format ( " - Rs 1GB - r simulation { 0 } - s { 1 } - f \ " { 2 } \ " - b { 3 } - - tls_plugin = { 4 } - - crash " , <nl> - IsRunningOnMono ( ) ? " " : " - q " , seed , testFile , buggify ? " on " : " off " , tlsPluginFile ) ; <nl> + args = string . Format ( " - Rs 1GB - r simulation { 0 } - s { 1 } - f \ " { 2 } \ " - b { 3 } { 4 } - - crash " , <nl> + IsRunningOnMono ( ) ? " " : " - q " , seed , testFile , buggify ? " on " : " off " , tlsPluginArg ) ; <nl> } <nl> if ( restarting ) args = args + " - - restarting " ; <nl> if ( useValgrind & & ! willRestart ) <nl> static int Run ( string fdbserverName , string tlsPluginFile , string testFolder , st <nl> memCheckThread . Join ( ) ; <nl> consoleThread . Join ( ) ; <nl> <nl> - var traceFiles = Directory . GetFiles ( tempPath , " trace * . xml " ) ; <nl> + var traceFiles = Directory . GetFiles ( tempPath , " trace * . * " ) . Where ( s = > s . EndsWith ( " . xml " ) | | s . EndsWith ( " . json " ) ) . ToArray ( ) ; <nl> if ( traceFiles . Length = = 0 ) <nl> { <nl> if ( ! traceToStdout ) <nl> static string [ ] ParseValgrindOutput ( string valgrindOutputFileName , bool traceToS <nl> return whats . ToArray ( ) ; <nl> } <nl> <nl> + delegate IEnumerable < Magnesium . Event > parseDelegate ( System . IO . Stream stream , string file , <nl> + bool keepOriginalElement = false , double startTime = - 1 , double endTime = Double . MaxValue , <nl> + double samplingFactor = 1 . 0 ) ; <nl> + <nl> static int Summarize ( string [ ] traceFiles , string summaryFileName , <nl> string errorFileName , bool ? killed , List < string > outputErrors , int ? exitCode , long ? peakMemory , <nl> string uid , string valgrindOutputFileName , int expectedUnseed , out int unseed , out bool retryableError , bool logOnRetryableError , <nl> static string [ ] ParseValgrindOutput ( string valgrindOutputFileName , bool traceToS <nl> { <nl> try <nl> { <nl> - foreach ( var ev in Magnesium . XmlParser . Parse ( traceFile , traceFileName ) ) <nl> + parseDelegate parse ; <nl> + if ( traceFileName . EndsWith ( " . json " ) ) <nl> + parse = Magnesium . JsonParser . Parse ; <nl> + else <nl> + parse = Magnesium . XmlParser . Parse ; <nl> + foreach ( var ev in parse ( traceFile , traceFileName ) ) <nl> { <nl> Magnesium . Severity newSeverity ; <nl> if ( severityMap . TryGetValue ( new KeyValuePair < string , Magnesium . Severity > ( ev . Type , ev . Severity ) , out newSeverity ) ) <nl> private static void AppendToErrorSummary ( string summaryFileName , List < XElement > <nl> <nl> private static void AppendToSummary ( string summaryFileName , XElement xout , bool traceToStdout = false , bool shouldLock = true ) <nl> { <nl> + bool useXml = true ; <nl> + if ( summaryFileName ! = null & & summaryFileName . EndsWith ( " . json " ) ) { <nl> + useXml = false ; <nl> + } <nl> + <nl> if ( traceToStdout ) <nl> { <nl> - using ( var wr = System . Xml . XmlWriter . Create ( Console . OpenStandardOutput ( ) , new System . Xml . XmlWriterSettings ( ) { OmitXmlDeclaration = true , Encoding = new System . Text . UTF8Encoding ( false ) } ) ) <nl> - xout . WriteTo ( wr ) ; <nl> + if ( useXml ) { <nl> + using ( var wr = System . Xml . XmlWriter . Create ( Console . OpenStandardOutput ( ) , new System . Xml . XmlWriterSettings ( ) { OmitXmlDeclaration = true , Encoding = new System . Text . UTF8Encoding ( false ) } ) ) <nl> + xout . WriteTo ( wr ) ; <nl> + } else { <nl> + using ( var wr = System . Runtime . Serialization . Json . JsonReaderWriterFactory . CreateJsonWriter ( Console . OpenStandardOutput ( ) ) ) <nl> + xout . WriteTo ( wr ) ; <nl> + } <nl> Console . WriteLine ( ) ; <nl> return ; <nl> } <nl> private static void AppendToSummary ( string summaryFileName , XElement xout , bool <nl> takeLock ( summaryFileName ) ; <nl> try <nl> { <nl> - <nl> using ( var f = System . IO . File . Open ( summaryFileName , System . IO . FileMode . Append , System . IO . FileAccess . Write ) ) <nl> { <nl> if ( f . Length = = 0 ) <nl> private static void AppendToSummary ( string summaryFileName , XElement xout , bool <nl> byte [ ] bytes = Encoding . UTF8 . GetBytes ( " < Trace > " ) ; <nl> f . Write ( bytes , 0 , bytes . Length ) ; <nl> } <nl> - using ( var wr = System . Xml . XmlWriter . Create ( f , new System . Xml . XmlWriterSettings ( ) { OmitXmlDeclaration = true } ) ) <nl> - xout . Save ( wr ) ; <nl> + if ( useXml ) { <nl> + using ( var wr = System . Xml . XmlWriter . Create ( f , new System . Xml . XmlWriterSettings ( ) { OmitXmlDeclaration = true } ) ) <nl> + xout . Save ( wr ) ; <nl> + } else { <nl> + using ( var wr = System . Runtime . Serialization . Json . JsonReaderWriterFactory . CreateJsonWriter ( f ) ) <nl> + xout . WriteTo ( wr ) ; <nl> + } <nl> var endl = Encoding . UTF8 . GetBytes ( Environment . NewLine ) ; <nl> f . Write ( endl , 0 , endl . Length ) ; <nl> } <nl> private static void AppendToSummary ( string summaryFileName , XElement xout , bool <nl> releaseLock ( summaryFileName ) ; <nl> } <nl> } <nl> + <nl> private static void AppendXmlMessageToSummary ( string summaryFileName , XElement xout , bool traceToStdout = false , string testFile = null , <nl> int ? seed = null , bool ? buggify = null , bool ? determinismCheck = null , string oldBinaryName = null ) <nl> { <nl> mmm a / contrib / TraceLogHelper / JsonParser . cs <nl> ppp b / contrib / TraceLogHelper / JsonParser . cs <nl> public static class JsonParser <nl> } <nl> catch ( Exception e ) <nl> { <nl> - throw new Exception ( string . Format ( " Failed to parse { 0 } " , root ) , e ) ; <nl> + throw new Exception ( string . Format ( " Failed to parse JSON { 0 } " , root ) , e ) ; <nl> } <nl> if ( ev ! = null ) yield return ev ; <nl> } <nl> private static Event ParseEvent ( XElement xEvent , string file , bool keepOriginalE <nl> TraceFile = file , <nl> DDetails = xEvent . Elements ( ) <nl> . Where ( a = > a . Name ! = " Type " & & a . Name ! = " Time " & & a . Name ! = " Machine " & & a . Name ! = " ID " & & a . Name ! = " Severity " & & ( ! rolledEvent | | a . Name ! = " OriginalTime " ) ) <nl> - . ToDictionary ( a = > string . Intern ( a . Name . LocalName ) , a = > ( object ) a . Value ) , <nl> - original = keepOriginalElement ? xEvent : null , <nl> + / / When the key contains a colon character , it gets parsed as a : item <nl> + . ToDictionary ( a = > a . Name . LocalName = = " item " ? a . Attribute ( " item " ) . Value : string . Intern ( a . Name . LocalName ) , a = > ( object ) a . Value ) , <nl> + original = keepOriginalElement ? xEvent : null <nl> } ; <nl> } <nl> <nl> mmm a / contrib / TraceLogHelper / XmlParser . cs <nl> ppp b / contrib / TraceLogHelper / XmlParser . cs <nl> public static class XmlParser <nl> } <nl> catch ( Exception e ) <nl> { <nl> - throw new Exception ( string . Format ( " Failed to parse { 0 } " , xev ) , e ) ; <nl> + throw new Exception ( string . Format ( " Failed to parse XML { 0 } " , xev ) , e ) ; <nl> } <nl> if ( ev ! = null ) yield return ev ; <nl> } <nl> new file mode 100644 <nl> index 0000000000 . . a9c6f89acb <nl> mmm / dev / null <nl> ppp b / contrib / fdbcstat / README . md <nl> <nl> + # fdbcstat <nl> + ` fdbcstat ` is a FoundationDB client monitoring tool which collects and displays transaction operation statistics inside the C API library ( ` libfdb_c . so ` ) . <nl> + <nl> + # # How it works <nl> + ` fdbcstat ` utilizes [ eBPF / bcc ] ( https : / / github . com / iovisor / bcc ) to attach to ` libfdb_c . so ` shared library and insert special instructions to collect statistics in several common ` fdb_transaction_ * ` calls , then it periodically displays the aggregated statistics . <nl> + <nl> + # # How to use <nl> + <nl> + # # # Syntax <nl> + ` fdbcstat < full path to libfdb_c . so > < options . . . > ` <nl> + <nl> + # # # Options <nl> + - ` - p ` or ` - - pid ` : Only capture statistics for the functions called by the specified process <nl> + - ` - i ` or ` - - interval ` : Specify the time interval in seconds between 2 outputs ( Default : 1 ) <nl> + - ` - d ` or ` - - duration ` : Specify the total duration in seconds ` fdbcstats ` will run ( Default : Unset / Forever ) <nl> + - ` - f ` or ` - - functions ` : Specify the comma - separated list of functions to monitor ( Default : Unset / All supported functions ) <nl> + <nl> + # # # Supported Functions <nl> + - get <nl> + - get_range <nl> + - get_read_version <nl> + - set <nl> + - clear <nl> + - clear_range <nl> + - commit <nl> + <nl> + # # # Examples <nl> + # # # # # Collect all statistics and display every second <nl> + ` fdbcstat / usr / lib64 / libfdb_c . so ` <nl> + # # # # # Collect all statistics for PID 12345 for 60 seconds with 10 second interval <nl> + ` fdbcstat / usr / lib64 / libfdb_c . so - p 12345 - d 60 - i 10 ` <nl> + # # # # # Collect statitics only for get and commit <nl> + ` fdbcstat / usr / lib64 / libfdb_c . so - f get , commit ` <nl> + <nl> + # # Output Format <nl> + Each line contains multiple fields . The first field is the timestamp . Other fields are the statistics for each operation . Each operation field contains the following statistics in a slash ( / ) separated format . <nl> + <nl> + - Function <nl> + - Number of calls per second <nl> + - Average latency in microseconds ( us ) <nl> + - Maximum latency in microseconds ( us ) <nl> + <nl> + * * Note * * : The latency is computed as the time difference between the start time and the end time of the ` fdb_transaction_ * ` function call except for ` get ` , ` get_range ` , ` get_read_version ` and ` commit ` . For those 4 functions , the latency is the time difference between the start time of the function and the end time of the following ` fdb_future_block_until_ready ` call . <nl> + <nl> + # # Sample Output <nl> + ` ` ` <nl> + . . . <nl> + 15 : 05 : 31 clear / 22426 / 2 / 34 commit / 18290 / 859 / 15977 get / 56230 / 1110 / 12748 get_range / 14141 / 23 / 75 set / 6276 / 3 / 19 <nl> + 15 : 05 : 41 clear / 24147 / 2 / 38 commit / 18259 / 894 / 44259 get / 57978 / 1098 / 15636 get_range / 13171 / 23 / 90 set / 6564 / 3 / 15 <nl> + 15 : 05 : 51 clear / 21287 / 2 / 34 commit / 18386 / 876 / 17824 get / 58318 / 1106 / 30539 get_range / 13018 / 23 / 68 set / 6559 / 3 / 13 <nl> + . . . <nl> + ` ` ` <nl> new file mode 100755 <nl> index 0000000000 . . 5feca920a6 <nl> mmm / dev / null <nl> ppp b / contrib / fdbcstat / fdbcstat <nl> <nl> + # ! / usr / bin / env python <nl> + <nl> + from __future__ import print_function <nl> + from bcc import BPF <nl> + from time import sleep , strftime , time <nl> + import argparse <nl> + import signal <nl> + <nl> + description = " " " The fdbcstat utility displays FDB C API statistics on terminal <nl> + that include calls - per - second , average latency and maximum latency <nl> + within the given time interval . <nl> + <nl> + Each field in the output represents the following elements <nl> + in a slash - separated format : <nl> + - Operation type <nl> + - Number of calls per second <nl> + - Average latency in microseconds ( us ) <nl> + - Maximum latency in microseconds ( us ) <nl> + " " " <nl> + <nl> + # supported APIs <nl> + # note : the array index is important here . <nl> + # it ' s used in BPF as the funciton identifier . <nl> + # 0 : get <nl> + # 1 : get_range <nl> + # 2 : get_read_version <nl> + # 3 : set <nl> + # 4 : clear <nl> + # 5 : clear_range <nl> + # 6 : commit <nl> + fdbfuncs = [ <nl> + { " name " : " get " , " waitfuture " : True , " enabled " : True } , <nl> + { " name " : " get_range " , " waitfuture " : True , " enabled " : True } , <nl> + { " name " : " get_read_version " , " waitfuture " : True , " enabled " : True } , <nl> + { " name " : " set " , " waitfuture " : False , " enabled " : True } , <nl> + { " name " : " clear " , " waitfuture " : False , " enabled " : True } , <nl> + { " name " : " clear_range " , " waitfuture " : False , " enabled " : True } , <nl> + { " name " : " commit " , " waitfuture " : True , " enabled " : True } <nl> + ] <nl> + <nl> + # arguments <nl> + parser = argparse . ArgumentParser ( <nl> + description = " FoundationDB client statistics collector " , <nl> + formatter_class = argparse . RawTextHelpFormatter , <nl> + epilog = description ) <nl> + parser . add_argument ( " - p " , " - - pid " , type = int , <nl> + help = " Capture for this PID only " ) <nl> + parser . add_argument ( " - i " , " - - interval " , type = int , <nl> + help = " Print interval in seconds ( Default : 1 second ) " ) <nl> + parser . add_argument ( " - d " , " - - duration " , type = int , <nl> + help = " Duration in seconds ( Default : unset ) " ) <nl> + parser . add_argument ( " - f " , " - - functions " , type = str , <nl> + help = ' ' ' Capture for specific functions ( comma - separated ) ( Default : unset ) <nl> + Supported functions : get , get_range , get_read_version , <nl> + set , clear , clear_range , commit ' ' ' ) <nl> + parser . add_argument ( " libpath " , <nl> + help = " Full path to libfdb_c . so " ) <nl> + args = parser . parse_args ( ) <nl> + <nl> + if not args . interval : <nl> + args . interval = 1 <nl> + <nl> + if args . functions : <nl> + # reset all <nl> + idx = 0 <nl> + while idx < len ( fdbfuncs ) : <nl> + fdbfuncs [ idx ] [ ' enabled ' ] = False <nl> + idx + = 1 <nl> + <nl> + # enable specified functions <nl> + for f in args . functions . split ( ' , ' ) : <nl> + idx = 0 <nl> + while idx < len ( fdbfuncs ) : <nl> + if fdbfuncs [ idx ] [ ' name ' ] = = f : <nl> + fdbfuncs [ idx ] [ ' enabled ' ] = True <nl> + idx + = 1 <nl> + <nl> + # check for libfdb_c . so <nl> + libpath = BPF . find_library ( args . libpath ) or BPF . find_exe ( args . libpath ) <nl> + if libpath is None : <nl> + print ( " Error : Can ' t find % s " % args . libpath ) <nl> + exit ( 1 ) <nl> + <nl> + # main BPF program <nl> + # we do not rely on PT_REGS_IP ( ) and BPF . sym ( ) to retrive the symbol name <nl> + # because some " backword - compatible " symbols do not get resovled through BPF . sym ( ) . <nl> + bpf_text = " " " <nl> + # include < uapi / linux / ptrace . h > <nl> + <nl> + typedef struct _stats_key_t { <nl> + u32 pid ; <nl> + u32 func ; <nl> + } stats_key_t ; <nl> + <nl> + typedef struct _stats_val_t { <nl> + u64 cnt ; <nl> + u64 total ; <nl> + u64 max ; <nl> + } stats_val_t ; <nl> + <nl> + BPF_HASH ( starttime , u32 , u64 ) ; <nl> + BPF_HASH ( startfunc , u32 , u32 ) ; <nl> + BPF_HASH ( stats , stats_key_t , stats_val_t ) ; <nl> + <nl> + static int trace_common_entry ( struct pt_regs * ctx , u32 func ) <nl> + { <nl> + u64 pid_tgid = bpf_get_current_pid_tgid ( ) ; <nl> + u32 pid = pid_tgid ; / * lower 32 - bit = Process ID ( Thread ID ) * / <nl> + u32 tgid = pid_tgid > > 32 ; / * upper 32 - bit = Thread Group ID ( Process ID ) * / <nl> + <nl> + / * if PID is specified , we ' ll filter by tgid here * / <nl> + FILTERPID <nl> + <nl> + / * start time in ns * / <nl> + u64 ts = bpf_ktime_get_ns ( ) ; <nl> + <nl> + / * function type * / <nl> + u32 f = func ; <nl> + startfunc . update ( & pid , & f ) ; <nl> + <nl> + / * update start time * / <nl> + starttime . update ( & pid , & ts ) ; <nl> + <nl> + return 0 ; <nl> + } <nl> + <nl> + int trace_get_entry ( struct pt_regs * ctx ) <nl> + { <nl> + return trace_common_entry ( ctx , 0 ) ; <nl> + } <nl> + <nl> + int trace_get_range_entry ( struct pt_regs * ctx ) <nl> + { <nl> + return trace_common_entry ( ctx , 1 ) ; <nl> + } <nl> + <nl> + int trace_get_read_version_entry ( struct pt_regs * ctx ) <nl> + { <nl> + return trace_common_entry ( ctx , 2 ) ; <nl> + } <nl> + <nl> + int trace_set_entry ( struct pt_regs * ctx ) <nl> + { <nl> + return trace_common_entry ( ctx , 3 ) ; <nl> + } <nl> + <nl> + int trace_clear_entry ( struct pt_regs * ctx ) <nl> + { <nl> + return trace_common_entry ( ctx , 4 ) ; <nl> + } <nl> + <nl> + int trace_clear_range_entry ( struct pt_regs * ctx ) <nl> + { <nl> + return trace_common_entry ( ctx , 5 ) ; <nl> + } <nl> + <nl> + int trace_commit_entry ( struct pt_regs * ctx ) <nl> + { <nl> + return trace_common_entry ( ctx , 6 ) ; <nl> + } <nl> + <nl> + int trace_func_return ( struct pt_regs * ctx ) <nl> + { <nl> + u64 * st ; / * start time * / <nl> + u64 duration ; <nl> + u64 pid_tgid = bpf_get_current_pid_tgid ( ) ; <nl> + u32 pid = pid_tgid ; <nl> + u32 tgid = pid_tgid > > 32 ; <nl> + <nl> + / * if PID is specified , we ' ll filter by tgid here * / <nl> + FILTERPID <nl> + <nl> + / * calculate duration in ns * / <nl> + st = starttime . lookup ( & pid ) ; <nl> + if ( ! st | | st = = 0 ) { <nl> + return 0 ; / * missed start * / <nl> + } <nl> + / * duration in ns * / <nl> + duration = bpf_ktime_get_ns ( ) - * st ; <nl> + starttime . delete ( & pid ) ; <nl> + <nl> + / * update stats * / <nl> + u32 func , * funcp = startfunc . lookup ( & pid ) ; <nl> + if ( funcp ) { <nl> + func = * funcp ; <nl> + stats_key_t key ; <nl> + stats_val_t * prev ; <nl> + stats_val_t cur ; <nl> + key . pid = pid ; / * pid here is the thread ID in user space * / <nl> + key . func = func ; <nl> + prev = stats . lookup ( & key ) ; <nl> + if ( prev ) { <nl> + cur . cnt = prev - > cnt + 1 ; <nl> + cur . total = prev - > total + duration ; <nl> + cur . max = ( duration > prev - > max ) ? duration : prev - > max ; <nl> + stats . update ( & key , & cur ) ; <nl> + } else { <nl> + cur . cnt = 1 ; <nl> + cur . total = duration ; <nl> + cur . max = duration ; <nl> + stats . insert ( & key , & cur ) ; <nl> + } <nl> + startfunc . delete ( & pid ) ; <nl> + } <nl> + return 0 ; <nl> + } <nl> + " " " <nl> + <nl> + # If PID is specified , insert the PID filter <nl> + if args . pid : <nl> + bpf_text = bpf_text . replace ( ' FILTERPID ' , <nl> + ' if ( tgid ! = % d ) { return 0 ; } ' % args . pid ) <nl> + else : <nl> + bpf_text = bpf_text . replace ( ' FILTERPID ' , ' ' ) <nl> + <nl> + # signal handler <nl> + def signal_ignore ( signal , frame ) : <nl> + pass <nl> + <nl> + # load BPF program <nl> + b = BPF ( text = bpf_text ) <nl> + <nl> + # attach probes <nl> + waitfuture = False ; <nl> + for f in fdbfuncs : <nl> + <nl> + # skip disabled functions <nl> + if not f [ ' enabled ' ] : <nl> + continue <nl> + <nl> + # attach the entry point <nl> + b . attach_uprobe ( name = libpath , sym = ' fdb_transaction_ ' + f [ ' name ' ] , <nl> + fn_name = ' trace_ ' + f [ ' name ' ] + ' _entry ' , pid = args . pid or - 1 ) <nl> + if f [ ' waitfuture ' ] : <nl> + waitfuture = True <nl> + else : <nl> + b . attach_uretprobe ( name = libpath , sym = ' fdb_transaction_ ' + f [ ' name ' ] , <nl> + fn_name = " trace_func_return " , pid = args . pid or - 1 ) <nl> + if waitfuture : <nl> + b . attach_uretprobe ( name = libpath , sym = ' fdb_future_block_until_ready ' , <nl> + fn_name = " trace_func_return " , pid = args . pid or - 1 ) <nl> + <nl> + # open uprobes <nl> + matched = b . num_open_uprobes ( ) <nl> + <nl> + if matched = = 0 : <nl> + print ( " 0 functions matched . . . Exiting . " ) <nl> + exit ( ) <nl> + <nl> + stats = b . get_table ( " stats " ) <nl> + <nl> + # aggregated stats dictionary <nl> + agg = { } <nl> + <nl> + exiting = 0 <nl> + seconds = 0 <nl> + prev = 0 . 0 <nl> + now = 0 . 0 <nl> + <nl> + # main loop <nl> + while ( 1 ) : <nl> + try : <nl> + sleep ( args . interval ) <nl> + seconds + = args . interval <nl> + prev = now <nl> + now = time ( ) <nl> + if prev = = 0 : <nl> + stats . clear ( ) <nl> + continue <nl> + except KeyboardInterrupt : <nl> + exiting = 1 <nl> + signal . signal ( signal . SIGINT , signal_ignore ) <nl> + <nl> + if args . duration and seconds > = args . duration : <nl> + exiting = 1 <nl> + <nl> + # walk through the stats and aggregate by the functions <nl> + for k , v in stats . items ( ) : <nl> + f = fdbfuncs [ k . func ] [ ' name ' ] <nl> + if f in agg : <nl> + # update an exiting entry <nl> + agg [ f ] [ ' cnt ' ] = agg [ f ] [ ' cnt ' ] + v . cnt <nl> + agg [ f ] [ ' total ' ] = agg [ f ] [ ' total ' ] + v . total ; <nl> + if v . cnt > agg [ f ] [ ' max ' ] : <nl> + agg [ f ] [ ' max ' ] = v . cnt <nl> + else : <nl> + # insert a new entry <nl> + agg [ f ] = { ' cnt ' : v . cnt , ' total ' : v . total , ' max ' : v . max } <nl> + <nl> + # print out aggregated stats <nl> + print ( " % - 8s " % ( strftime ( " % H : % M : % S " ) ) , end = " " , flush = True ) <nl> + for f in sorted ( agg ) : <nl> + print ( " % s / % d / % d / % d " % ( f , <nl> + agg [ f ] [ ' cnt ' ] / ( now - prev ) , <nl> + agg [ f ] [ ' total ' ] / agg [ f ] [ ' cnt ' ] / 1000 , # us <nl> + agg [ f ] [ ' max ' ] / 1000 ) , # us <nl> + end = " " ) <nl> + print ( ) <nl> + <nl> + stats . clear ( ) <nl> + agg . clear ( ) <nl> + <nl> + if exiting : <nl> + exit ( ) <nl> mmm a / design / special - key - space . md <nl> ppp b / design / special - key - space . md <nl> Consequently , the special - key - space framework wants to integrate all client func <nl> If your feature is exposing information to clients and the results are easily formatted as key - value pairs , then you can use special - key - space to implement your client function . <nl> <nl> # # How <nl> - If you choose to use , you need to implement a function class that inherits from ` SpecialKeyRangeBaseImpl ` , which has an abstract method ` Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) ` . <nl> + If you choose to use , you need to implement a function class that inherits from ` SpecialKeyRangeReadImpl ` , which has an abstract method ` Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) ` . <nl> This method can be treated as a callback , whose implementation details are determined by the developer . <nl> Once you fill out the method , register the function class to the corresponding key range . <nl> Below is a detailed example . <nl> ` ` ` c + + <nl> / / Implement the function class , <nl> / / the corresponding key range is [ \ xff \ xff / example / , \ xff \ xff / example / \ xff ) <nl> - class SKRExampleImpl : public SpecialKeyRangeBaseImpl { <nl> + class SKRExampleImpl : public SpecialKeyRangeReadImpl { <nl> public : <nl> - explicit SKRExampleImpl ( KeyRangeRef kr ) : SpecialKeyRangeBaseImpl ( kr ) { <nl> + explicit SKRExampleImpl ( KeyRangeRef kr ) : SpecialKeyRangeReadImpl ( kr ) { <nl> / / Our implementation is quite simple here , the key - value pairs are formatted as : <nl> / / \ xff \ xff / example / < country_name > : < capital_city_name > <nl> CountryToCapitalCity [ LiteralStringRef ( " USA " ) ] = LiteralStringRef ( " Washington , D . C . " ) ; <nl> mmm a / documentation / sphinx / source / administration . rst <nl> ppp b / documentation / sphinx / source / administration . rst <nl> If you interrupt the exclude command with Ctrl - C after seeing the " waiting for s <nl> <nl> 7 ) If you ever want to add a removed machine back to the cluster , you will have to take it off the excluded servers list to which it was added in step 3 . This can be done using the ` ` include ` ` command of ` ` fdbcli ` ` . If attempting to re - include a failed server , this can be done using the ` ` include failed ` ` command of ` ` fdbcli ` ` . Typing ` ` exclude ` ` with no parameters will tell you the current list of excluded and failed machines . <nl> <nl> + As of api version 700 , excluding servers can be done with the : ref : ` special key space management module < special - key - space - management - module > ` as well . <nl> + <nl> Moving a cluster <nl> = = = = = = = = = = = = = = = = <nl> <nl> mmm a / documentation / sphinx / source / api - c . rst <nl> ppp b / documentation / sphinx / source / api - c . rst <nl> Applications must provide error handling and an appropriate retry loop around th <nl> <nl> . . function : : FDBFuture * fdb_transaction_get_estimated_range_size_bytes ( FDBTransaction * tr , uint8_t const * begin_key_name , int begin_key_name_length , uint8_t const * end_key_name , int end_key_name_length ) <nl> Returns an estimated byte size of the key range . <nl> + . . note : : The estimated size is calculated based on the sampling done by FDB server . The sampling algorithm works roughly in this way : the larger the key - value pair is , the more likely it would be sampled and the more accurate its sampled size would be . And due to that reason it is recommended to use this API to query against large ranges for accuracy considerations . For a rough reference , if the returned size is larger than 3MB , one can consider the size to be accurate . <nl> <nl> | future - return0 | the estimated size of the key range given . | future - return1 | call : func : ` fdb_future_get_int64 ( ) ` to extract the size , | future - return2 | <nl> <nl> mmm a / documentation / sphinx / source / api - error - codes . rst <nl> ppp b / documentation / sphinx / source / api - error - codes . rst <nl> FoundationDB may return the following error codes from API functions . If you nee <nl> | special_keys_no_module_found | 2113 | Special key space range read does not intersect a module . | <nl> | | | Refer to the ` ` SPECIAL_KEY_SPACE_RELAXED ` ` transaction option for more details . | <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - + mmm - - + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - + <nl> + | special_keys_write_disabled | 2114 | Special key space is not allowed to write by default . Refer | <nl> + | | | to the ` ` SPECIAL_KEY_SPACE_ENABLE_WRITES ` ` transaction option for more details . | <nl> + + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - + mmm - - + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - + <nl> + | special_keys_no_write_module_found | 2115 | Special key space key or keyrange in set or clear does not intersect a module . | <nl> + + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - + mmm - - + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - + <nl> + | special_keys_cross_module_write | 2116 | Special key space clear crosses modules | <nl> + + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - + mmm - - + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - + <nl> + | special_keys_api_failure | 2117 | Api call through special keys failed . For more information , read the | <nl> + | | | ` ` 0xff0xff / error_message ` ` key | <nl> + + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - + mmm - - + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - + <nl> | api_version_unset | 2200 | API version is not set | <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - + mmm - - + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - + <nl> | api_version_already_set | 2201 | API version may be set only once | <nl> mmm a / documentation / sphinx / source / api - python . rst <nl> ppp b / documentation / sphinx / source / api - python . rst <nl> Transaction misc functions <nl> . . method : : Transaction . get_estimated_range_size_bytes ( begin_key , end_key ) <nl> <nl> Get the estimated byte size of the given key range . Returns a : class : ` FutureInt64 ` . <nl> + . . note : : The estimated size is calculated based on the sampling done by FDB server . The sampling algorithm works roughly in this way : the larger the key - value pair is , the more likely it would be sampled and the more accurate its sampled size would be . And due to that reason it is recommended to use this API to query against large ranges for accuracy considerations . For a rough reference , if the returned size is larger than 3MB , one can consider the size to be accurate . <nl> <nl> . . _api - python - transaction - options : <nl> <nl> mmm a / documentation / sphinx / source / api - ruby . rst <nl> ppp b / documentation / sphinx / source / api - ruby . rst <nl> Transaction misc functions <nl> . . method : : Transaction . get_estimated_range_size_bytes ( begin_key , end_key ) <nl> <nl> Get the estimated byte size of the given key range . Returns a : class : ` Int64Future ` . <nl> + . . note : : The estimated size is calculated based on the sampling done by FDB server . The sampling algorithm works roughly in this way : the larger the key - value pair is , the more likely it would be sampled and the more accurate its sampled size would be . And due to that reason it is recommended to use this API to query against large ranges for accuracy considerations . For a rough reference , if the returned size is larger than 3MB , one can consider the size to be accurate . <nl> <nl> . . method : : Transaction . get_approximate_size ( ) - > Int64Future <nl> <nl> mmm a / documentation / sphinx / source / developer - guide . rst <nl> ppp b / documentation / sphinx / source / developer - guide . rst <nl> Special keys <nl> Keys starting with the bytes ` ` \ xff \ xff ` ` are called " special " keys , and they are materialized when read . : doc : ` \ \ xff \ \ xff / status / json < mr - status > ` is an example of a special key . <nl> As of api version 630 , additional features have been exposed as special keys and are available to read as ranges instead of just individual keys . Additionally , the special keys are now organized into " modules " . <nl> <nl> - Modules <nl> mmmmmm - - <nl> + Read - only modules <nl> + mmmmmmmmmmmmmmm - - <nl> <nl> A module is loosely defined as a key range in the special key space where a user can expect similar behavior from reading any key in that range . <nl> By default , users will see a ` ` special_keys_no_module_found ` ` error if they read from a range not contained in a module . <nl> Caveats <nl> <nl> # . ` ` \ xff \ xff / metrics / health / ` ` These keys may return data that ' s several seconds old , and the data may not be available for a brief period during recovery . This will be indicated by the keys being absent . <nl> <nl> + <nl> + Read / write modules <nl> + mmmmmmmmmmmmmmmmmm <nl> + <nl> + As of api version 700 , some modules in the special key space allow writes as <nl> + well as reads . In these modules , a user can expect that mutations ( i . e . sets , <nl> + clears , etc ) do not have side - effects outside of the current transaction <nl> + until commit is called ( the same is true for writes to the normal key space ) . <nl> + A user can also expect the effects on commit to be atomic . Reads to <nl> + special keys may require reading system keys ( whose format is an implementation <nl> + detail ) , and for those reads appropriate read conflict ranges are added on <nl> + the underlying system keys . <nl> + <nl> + Writes to read / write modules in the special key space are disabled by <nl> + default . Use the ` ` special_key_space_enable_writes ` ` transaction option to <nl> + enable them [ # special_key_space_enable_writes ] _ . <nl> + <nl> + <nl> + . . _special - key - space - management - module : <nl> + <nl> + Management module <nl> + ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> + <nl> + The management module is for temporary cluster configuration changes . For <nl> + example , in order to safely remove a process from the cluster , one can add an <nl> + exclusion to the ` ` \ xff \ xff / management / excluded / ` ` key prefix that matches <nl> + that process , and wait for necessary data to be moved away . <nl> + <nl> + # . ` ` \ xff \ xff / management / excluded / < exclusion > ` ` Read / write . Indicates that the cluster should move data away from processes matching ` ` < exclusion > ` ` , so that they can be safely removed . See : ref : ` removing machines from a cluster < removing - machines - from - a - cluster > ` for documentation for the corresponding fdbcli command . <nl> + # . ` ` \ xff \ xff / management / failed / < exclusion > ` ` Read / write . Indicates that the cluster should consider matching processes as permanently failed . This allows the cluster to avoid maintaining extra state and doing extra work in the hope that these processes come back . See : ref : ` removing machines from a cluster < removing - machines - from - a - cluster > ` for documentation for the corresponding fdbcli command . <nl> + # . ` ` \ xff \ xff / management / inProgressExclusion / < address > ` ` Read - only . Indicates that the process matching ` ` < address > ` ` matches an exclusion , but still has necessary data and can ' t yet be safely removed . <nl> + # . ` ` \ xff \ xff / management / options / excluded / force ` ` Read / write . Setting this key disables safety checks for writes to ` ` \ xff \ xff / management / excluded / < exclusion > ` ` . Setting this key only has an effect in the current transaction and is not persisted on commit . <nl> + # . ` ` \ xff \ xff / management / options / failed / force ` ` Read / write . Setting this key disables safety checks for writes to ` ` \ xff \ xff / management / failed / < exclusion > ` ` . Setting this key only has an effect in the current transaction and is not persisted on commit . <nl> + <nl> + An exclusion is syntactically either an ip address ( e . g . ` ` 127 . 0 . 0 . 1 ` ` ) , or <nl> + an ip address and port ( e . g . ` ` 127 . 0 . 0 . 1 : 4500 ` ` ) . If no port is specified , <nl> + then all processes on that host match the exclusion . <nl> + <nl> + Error message module <nl> + ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> + <nl> + Each module written to validates the transaction before committing , and this <nl> + validation failing is indicated by a ` ` special_keys_api_failure ` ` error . <nl> + More detailed information about why this validation failed can be accessed through the ` ` \ xff \ xff / error_message ` ` key , whose value is a json document with the following schema . <nl> + <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + * * Field * * * * Type * * * * Description * * <nl> + mmmmmmmmmmmmmmmmmmmmmmmm - - mmmmmm - - mmmmmmmmmmmmmmm <nl> + retriable boolean Whether or not this operation might succeed if retried <nl> + command string The fdbcli command corresponding to this operation <nl> + message string Help text explaining the reason this operation failed <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> Performance considerations <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> At a first glance this looks very similar to an ` ` commit_unknown_result ` ` . Howev <nl> <nl> . . [ # conflicting_keys ] In practice , the transaction probably committed successfully . However , if you ' re running multiple resolvers then it ' s possible for a transaction to cause another to abort even if it doesn ' t commit successfully . <nl> . . [ # max_read_transaction_life_versions ] The number 5000000 comes from the server knob MAX_READ_TRANSACTION_LIFE_VERSIONS <nl> + . . [ # special_key_space_enable_writes ] Enabling this option enables other transaction options , such as ` ` ACCESS_SYSTEM_KEYS ` ` . This may change in the future . <nl> mmm a / documentation / sphinx / source / downloads . rst <nl> ppp b / documentation / sphinx / source / downloads . rst <nl> macOS <nl> <nl> The macOS installation package is supported on macOS 10 . 7 + . It includes the client and ( optionally ) the server . <nl> <nl> - * ` FoundationDB - 6 . 3 . 3 . pkg < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / macOS / installers / FoundationDB - 6 . 3 . 3 . pkg > ` _ <nl> + * ` FoundationDB - 6 . 3 . 4 . pkg < https : / / www . foundationdb . org / downloads / 6 . 3 . 4 / macOS / installers / FoundationDB - 6 . 3 . 4 . pkg > ` _ <nl> <nl> Ubuntu <nl> mmmmmm <nl> <nl> The Ubuntu packages are supported on 64 - bit Ubuntu 12 . 04 + , but beware of the Linux kernel bug in Ubuntu 12 . x . <nl> <nl> - * ` foundationdb - clients - 6 . 3 . 3 - 1_amd64 . deb < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / ubuntu / installers / foundationdb - clients_6 . 3 . 3 - 1_amd64 . deb > ` _ <nl> - * ` foundationdb - server - 6 . 3 . 3 - 1_amd64 . deb < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / ubuntu / installers / foundationdb - server_6 . 3 . 3 - 1_amd64 . deb > ` _ ( depends on the clients package ) <nl> + * ` foundationdb - clients - 6 . 3 . 4 - 1_amd64 . deb < https : / / www . foundationdb . org / downloads / 6 . 3 . 4 / ubuntu / installers / foundationdb - clients_6 . 3 . 4 - 1_amd64 . deb > ` _ <nl> + * ` foundationdb - server - 6 . 3 . 4 - 1_amd64 . deb < https : / / www . foundationdb . org / downloads / 6 . 3 . 4 / ubuntu / installers / foundationdb - server_6 . 3 . 4 - 1_amd64 . deb > ` _ ( depends on the clients package ) <nl> <nl> RHEL / CentOS EL6 <nl> mmmmmmmmmmmmmmm <nl> <nl> The RHEL / CentOS EL6 packages are supported on 64 - bit RHEL / CentOS 6 . x . <nl> <nl> - * ` foundationdb - clients - 6 . 3 . 3 - 1 . el6 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / rhel6 / installers / foundationdb - clients - 6 . 3 . 3 - 1 . el6 . x86_64 . rpm > ` _ <nl> - * ` foundationdb - server - 6 . 3 . 3 - 1 . el6 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / rhel6 / installers / foundationdb - server - 6 . 3 . 3 - 1 . el6 . x86_64 . rpm > ` _ ( depends on the clients package ) <nl> + * ` foundationdb - clients - 6 . 3 . 4 - 1 . el6 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 4 / rhel6 / installers / foundationdb - clients - 6 . 3 . 4 - 1 . el6 . x86_64 . rpm > ` _ <nl> + * ` foundationdb - server - 6 . 3 . 4 - 1 . el6 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 4 / rhel6 / installers / foundationdb - server - 6 . 3 . 4 - 1 . el6 . x86_64 . rpm > ` _ ( depends on the clients package ) <nl> <nl> RHEL / CentOS EL7 <nl> mmmmmmmmmmmmmmm <nl> <nl> The RHEL / CentOS EL7 packages are supported on 64 - bit RHEL / CentOS 7 . x . <nl> <nl> - * ` foundationdb - clients - 6 . 3 . 3 - 1 . el7 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / rhel7 / installers / foundationdb - clients - 6 . 3 . 3 - 1 . el7 . x86_64 . rpm > ` _ <nl> - * ` foundationdb - server - 6 . 3 . 3 - 1 . el7 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / rhel7 / installers / foundationdb - server - 6 . 3 . 3 - 1 . el7 . x86_64 . rpm > ` _ ( depends on the clients package ) <nl> + * ` foundationdb - clients - 6 . 3 . 4 - 1 . el7 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 4 / rhel7 / installers / foundationdb - clients - 6 . 3 . 4 - 1 . el7 . x86_64 . rpm > ` _ <nl> + * ` foundationdb - server - 6 . 3 . 4 - 1 . el7 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 4 / rhel7 / installers / foundationdb - server - 6 . 3 . 4 - 1 . el7 . x86_64 . rpm > ` _ ( depends on the clients package ) <nl> <nl> Windows <nl> mmmmmm - <nl> <nl> The Windows installer is supported on 64 - bit Windows XP and later . It includes the client and ( optionally ) the server . <nl> <nl> - * ` foundationdb - 6 . 3 . 3 - x64 . msi < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / windows / installers / foundationdb - 6 . 3 . 3 - x64 . msi > ` _ <nl> + * ` foundationdb - 6 . 3 . 4 - x64 . msi < https : / / www . foundationdb . org / downloads / 6 . 3 . 4 / windows / installers / foundationdb - 6 . 3 . 4 - x64 . msi > ` _ <nl> <nl> API Language Bindings <nl> = = = = = = = = = = = = = = = = = = = = = <nl> On macOS and Windows , the FoundationDB Python API bindings are installed as part <nl> <nl> If you need to use the FoundationDB Python API from other Python installations or paths , use the Python package manager ` ` pip ` ` ( ` ` pip install foundationdb ` ` ) or download the Python package : <nl> <nl> - * ` foundationdb - 6 . 3 . 3 . tar . gz < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / bindings / python / foundationdb - 6 . 3 . 3 . tar . gz > ` _ <nl> + * ` foundationdb - 6 . 3 . 4 . tar . gz < https : / / www . foundationdb . org / downloads / 6 . 3 . 4 / bindings / python / foundationdb - 6 . 3 . 4 . tar . gz > ` _ <nl> <nl> Ruby 1 . 9 . 3 / 2 . 0 . 0 + <nl> mmmmmmmmmmmmmmm - - <nl> <nl> - * ` fdb - 6 . 3 . 3 . gem < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / bindings / ruby / fdb - 6 . 3 . 3 . gem > ` _ <nl> + * ` fdb - 6 . 3 . 4 . gem < https : / / www . foundationdb . org / downloads / 6 . 3 . 4 / bindings / ruby / fdb - 6 . 3 . 4 . gem > ` _ <nl> <nl> Java 8 + <nl> mmmmmm - <nl> <nl> - * ` fdb - java - 6 . 3 . 3 . jar < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / bindings / java / fdb - java - 6 . 3 . 3 . jar > ` _ <nl> - * ` fdb - java - 6 . 3 . 3 - javadoc . jar < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / bindings / java / fdb - java - 6 . 3 . 3 - javadoc . jar > ` _ <nl> + * ` fdb - java - 6 . 3 . 4 . jar < https : / / www . foundationdb . org / downloads / 6 . 3 . 4 / bindings / java / fdb - java - 6 . 3 . 4 . jar > ` _ <nl> + * ` fdb - java - 6 . 3 . 4 - javadoc . jar < https : / / www . foundationdb . org / downloads / 6 . 3 . 4 / bindings / java / fdb - java - 6 . 3 . 4 - javadoc . jar > ` _ <nl> <nl> Go 1 . 11 + <nl> mmmmmm - - <nl> mmm a / documentation / sphinx / source / release - notes / release - notes - 620 . rst <nl> ppp b / documentation / sphinx / source / release - notes / release - notes - 620 . rst <nl> <nl> Release Notes <nl> # # # # # # # # # # # # # <nl> <nl> + 6 . 2 . 24 <nl> + = = = = = = <nl> + <nl> + Features <nl> + mmmmmm - - <nl> + <nl> + * Added the ` ` suspend ` ` command to ` ` fdbcli ` ` which kills a process and prevents it from rejoining the cluster for a specified duration . ` ( PR # 3550 ) < https : / / github . com / apple / foundationdb / pull / 3550 > ` _ <nl> + <nl> 6 . 2 . 23 <nl> = = = = = = <nl> <nl> mmm a / documentation / sphinx / source / release - notes / release - notes - 630 . rst <nl> ppp b / documentation / sphinx / source / release - notes / release - notes - 630 . rst <nl> <nl> Release Notes <nl> # # # # # # # # # # # # # <nl> <nl> - 6 . 3 . 3 <nl> + 6 . 3 . 5 <nl> + = = = = = <nl> + <nl> + * Fix an issue where ` ` fdbcli - - exec ' exclude no_wait . . . ' ` ` would incorrectly report that processes can safely be removed from the cluster . ` ( PR # 3566 ) < https : / / github . com / apple / foundationdb / pull / 3566 > ` _ <nl> + <nl> + 6 . 3 . 4 <nl> = = = = = <nl> <nl> Features <nl> Performance <nl> * Reduced the number of comparisons used by various map implementations . ` ( PR # 2882 ) < https : / / github . com / apple / foundationdb / pull / 2882 > ` _ <nl> * Reduced the serialized size of empty strings . ` ( PR # 3063 ) < https : / / github . com / apple / foundationdb / pull / 3063 > ` _ <nl> * Reduced the serialized size of various interfaces by 10x . ` ( PR # 3068 ) < https : / / github . com / apple / foundationdb / pull / 3068 > ` _ <nl> + * TLS handshakes can now be done in a background thread pool . ` ( PR # 3403 ) < https : / / github . com / apple / foundationdb / pull / 3403 > ` _ <nl> <nl> Reliability <nl> mmmmmmmmm - - <nl> Fixes <nl> * Transaction logs configured to spill by reference had an unintended delay between each spilled batch . ` ( PR # 3153 ) < https : / / github . com / apple / foundationdb / pull / 3153 > ` _ <nl> * Added guards to honor ` ` DISABLE_POSIX_KERNEL_AIO ` ` . ` ( PR # 2888 ) < https : / / github . com / apple / foundationdb / pull / 2888 > ` _ <nl> * Prevent blob upload timeout if request timeout is lower than expected request time . ` ( PR # 3533 ) < https : / / github . com / apple / foundationdb / pull / 3533 > ` _ <nl> + * In very rare scenarios , the data distributor process would crash when being shutdown . ` ( PR # 3530 ) < https : / / github . com / apple / foundationdb / pull / 3530 > ` _ <nl> + * The master would die immediately if it did not have the correct cluster controller interface when recruited . [ 6 . 3 . 4 ] ` ( PR # 3537 ) < https : / / github . com / apple / foundationdb / pull / 3537 > ` _ <nl> <nl> Status <nl> mmmmmm <nl> mmm a / fdbbackup / FileConverter . actor . cpp <nl> ppp b / fdbbackup / FileConverter . actor . cpp <nl> struct MutationFilesReadProgress : public ReferenceCounted < MutationFilesReadProg <nl> <nl> void dumpProgress ( std : : string msg ) { <nl> std : : cout < < msg < < " \ n " ; <nl> - for ( const auto fp : fileProgress ) { <nl> + for ( const auto & fp : fileProgress ) { <nl> std : : cout < < fp - > fd - > getFilename ( ) < < " " < < fp - > mutations . size ( ) < < " mutations " ; <nl> if ( fp - > mutations . size ( ) > 0 ) { <nl> std : : cout < < " , range " < < fp - > mutations [ 0 ] . version . toString ( ) < < " " <nl> mmm a / fdbbackup / backup . actor . cpp <nl> ppp b / fdbbackup / backup . actor . cpp <nl> CSimpleOpt : : SOption g_rgBackupListOptions [ ] = { <nl> SO_END_OF_OPTIONS <nl> } ; <nl> <nl> + / / g_rgRestoreOptions is used by fdbrestore and fastrestore_tool <nl> CSimpleOpt : : SOption g_rgRestoreOptions [ ] = { <nl> # ifdef _WIN32 <nl> { OPT_PARENTPID , " - - parentpid " , SO_REQ_SEP } , <nl> static void printRestoreUsage ( bool devhelp ) { <nl> printf ( " Prefix to add to the restored keys \ n " ) ; <nl> printf ( " - n , - - dryrun Perform a trial run with no changes made . \ n " ) ; <nl> printf ( " - - log Enables trace file logging for the CLI session . \ n " <nl> - " - - logdir PATH Specifes the output directory for trace files . If \ n " <nl> - " unspecified , defaults to the current directory . Has \ n " <nl> - " no effect unless - - log is specified . \ n " ) ; <nl> + " - - logdir PATH Specifies the output directory for trace files . If \ n " <nl> + " unspecified , defaults to the current directory . Has \ n " <nl> + " no effect unless - - log is specified . \ n " ) ; <nl> printf ( " - - loggroup LOG_GROUP \ n " <nl> " Sets the LogGroup field with the specified value for all \ n " <nl> " events in the trace output ( defaults to ` default ' ) . \ n " ) ; <nl> static void printRestoreUsage ( bool devhelp ) { <nl> } <nl> <nl> static void printFastRestoreUsage ( bool devhelp ) { <nl> - printf ( " FoundationDB " FDB_VT_PACKAGE_NAME " ( v " FDB_VT_VERSION " ) \ n " ) ; <nl> - printf ( " Usage : % s ( start | status | abort | wait ) [ OPTIONS ] \ n \ n " , exeRestore . toString ( ) . c_str ( ) ) ; <nl> - / / printf ( " FOLDERS Paths to folders containing the backup files . \ n " ) ; <nl> - printf ( " Options for all commands : \ n \ n " ) ; <nl> - printf ( " - C CONNFILE The path of a file containing the connection string for the \ n " <nl> - " FoundationDB cluster . The default is first the value of the \ n " <nl> - " FDB_CLUSTER_FILE environment variable , then ` . / fdb . cluster ' , \ n " <nl> - " then ` % s ' . \ n " , <nl> - platform : : getDefaultClusterFilePath ( ) . c_str ( ) ) ; <nl> - printf ( " - t TAGNAME The restore tag to act on . Default is ' default ' \ n " ) ; <nl> - printf ( " - - tagname TAGNAME \ n \ n " ) ; <nl> - printf ( " Options for start : \ n \ n " ) ; <nl> - printf ( " - r URL The Backup URL for the restore to read from . \ n " ) ; <nl> - printBackupContainerInfo ( ) ; <nl> - printf ( " - w Wait for the restore to complete before exiting . Prints progress updates . \ n " ) ; <nl> - printf ( " - - waitfordone \ n " ) ; <nl> - printf ( " - k KEYS List of key ranges from the backup to restore \ n " ) ; <nl> - printf ( " - - remove_prefix PREFIX prefix to remove from the restored keys \ n " ) ; <nl> - printf ( " - - add_prefix PREFIX prefix to add to the restored keys \ n " ) ; <nl> - printf ( " - n , - - dry - run Perform a trial run with no changes made . \ n " ) ; <nl> - printf ( " - v DBVERSION The version at which the database will be restored . \ n " ) ; <nl> - printf ( " - h , - - help Display this help and exit . \ n " ) ; <nl> - printf ( " NOTE : Fast restore is still under development . The options may not be fully supported . \ n " ) ; <nl> - <nl> - if ( devhelp ) { <nl> - # ifdef _WIN32 <nl> - printf ( " - q Disable error dialog on crash . \ n " ) ; <nl> - printf ( " - - parentpid PID \ n " ) ; <nl> - printf ( " Specify a process after whose termination to exit . \ n " ) ; <nl> - # endif <nl> - } <nl> - <nl> - printf ( " \ n " <nl> - " KEYS FORMAT : \ " < BEGINKEY > < ENDKEY > \ " [ . . . ] \ n " ) ; <nl> - printf ( " \ n " ) ; <nl> - puts ( BlobCredentialInfo ) ; <nl> - <nl> + printf ( " NOTE : Fast restore aims to support the same fdbrestore option list . \ n " ) ; <nl> + printf ( " But fast restore is still under development . The options may not be fully supported . \ n " ) ; <nl> + printf ( " Supported options are : - - dest_cluster_file , - r , - - waitfordone , - - logdir \ n " ) ; <nl> + printRestoreUsage ( devhelp ) ; <nl> return ; <nl> } <nl> <nl> ACTOR Future < Void > runFastRestoreTool ( Database db , std : : string tagName , std : : str <nl> <nl> printf ( " [ INFO ] runFastRestoreTool : restore_ranges : % d first range : % s \ n " , ranges . size ( ) , <nl> ranges . front ( ) . toString ( ) . c_str ( ) ) ; <nl> + TraceEvent ev ( " FastRestoreTool " ) ; <nl> + ev . detail ( " RestoreRanges " , ranges . size ( ) ) ; <nl> + for ( int i = 0 ; i < ranges . size ( ) ; + + i ) { <nl> + ev . detail ( format ( " Range % d " , i ) , ranges [ i ] ) ; <nl> + } <nl> <nl> if ( performRestore ) { <nl> if ( dbVersion = = invalidVersion ) { <nl> int main ( int argc , char * argv [ ] ) { <nl> break ; <nl> <nl> case EXE_FASTRESTORE_TOOL : <nl> - fprintf ( stderr , " ERROR : FDB Fast Restore Agent does not support argument value ` % s ' \ n " , <nl> + fprintf ( stderr , " ERROR : FDB Fast Restore Tool does not support argument value ` % s ' \ n " , <nl> args - > File ( argLoop ) ) ; <nl> printHelpTeaser ( argv [ 0 ] ) ; <nl> return FDB_EXIT_ERROR ; <nl> int main ( int argc , char * argv [ ] ) { <nl> } <nl> break ; <nl> case EXE_FASTRESTORE_TOOL : <nl> - / / TODO : We have not implmented the code commented out in this case <nl> - if ( ! initCluster ( ) ) return FDB_EXIT_ERROR ; <nl> + / / Support - - dest_cluster_file option as fdbrestore does <nl> + if ( dryRun ) { <nl> + if ( restoreType ! = RESTORE_START ) { <nl> + fprintf ( stderr , " Restore dry run only works for ' start ' command \ n " ) ; <nl> + return FDB_EXIT_ERROR ; <nl> + } <nl> + <nl> + / / Must explicitly call trace file options handling if not calling Database : : createDatabase ( ) <nl> + initTraceFile ( ) ; <nl> + } else { <nl> + if ( restoreClusterFileDest . empty ( ) ) { <nl> + fprintf ( stderr , " Restore destination cluster file must be specified explicitly . \ n " ) ; <nl> + return FDB_EXIT_ERROR ; <nl> + } <nl> + <nl> + if ( ! fileExists ( restoreClusterFileDest ) ) { <nl> + fprintf ( stderr , " Restore destination cluster file ' % s ' does not exist . \ n " , <nl> + restoreClusterFileDest . c_str ( ) ) ; <nl> + return FDB_EXIT_ERROR ; <nl> + } <nl> + <nl> + try { <nl> + db = Database : : createDatabase ( restoreClusterFileDest , Database : : API_VERSION_LATEST ) ; <nl> + } catch ( Error & e ) { <nl> + fprintf ( stderr , " Restore destination cluster file ' % s ' invalid : % s \ n " , <nl> + restoreClusterFileDest . c_str ( ) , e . what ( ) ) ; <nl> + return FDB_EXIT_ERROR ; <nl> + } <nl> + } <nl> + / / TODO : We have not implemented the code commented out in this case <nl> switch ( restoreType ) { <nl> case RESTORE_START : <nl> f = stopAfter ( runFastRestoreTool ( db , tagName , restoreContainer , backupKeys , restoreVersion , ! dryRun , <nl> - ! quietDisplay , waitForDone ) ) ; <nl> + ! quietDisplay , waitForDone ) ) ; <nl> break ; <nl> case RESTORE_WAIT : <nl> printf ( " [ TODO ] [ ERROR ] FastRestore does not support RESTORE_WAIT yet ! \ n " ) ; <nl> int main ( int argc , char * argv [ ] ) { <nl> printf ( " [ TODO ] [ ERROR ] FastRestore does not support RESTORE_ABORT yet ! \ n " ) ; <nl> throw restore_error ( ) ; <nl> / / f = stopAfter ( map ( ba . abortRestore ( db , KeyRef ( tagName ) ) , <nl> - / / [ tagName ] ( FileBackupAgent : : ERestoreState s ) - > Void { printf ( " Tag : % s State : % s \ n " , tagName . c_str ( ) , <nl> - / / FileBackupAgent : : restoreStateText ( s ) . toString ( ) . c_str ( ) ) ; return Void ( ) ; <nl> + / / [ tagName ] ( FileBackupAgent : : ERestoreState s ) - > Void { printf ( " Tag : % s State : % s \ n " , <nl> + / / tagName . c_str ( ) , <nl> + / / FileBackupAgent : : restoreStateText ( s ) . toString ( ) . c_str ( ) ) ; return Void ( ) ; <nl> / / } ) ) ; <nl> break ; <nl> case RESTORE_STATUS : <nl> int main ( int argc , char * argv [ ] ) { <nl> < < FastAllocator < 1024 > : : pageCount < < " " <nl> < < FastAllocator < 2048 > : : pageCount < < " " <nl> < < FastAllocator < 4096 > : : pageCount < < " " <nl> - < < FastAllocator < 8192 > : : pageCount < < endl ; <nl> + < < FastAllocator < 8192 > : : pageCount < < " " <nl> + < < FastAllocator < 16384 > : : pageCount < < endl ; <nl> <nl> vector < std : : pair < std : : string , const char * > > typeNames ; <nl> for ( auto i = allocInstr . begin ( ) ; i ! = allocInstr . end ( ) ; + + i ) { <nl> mmm a / fdbcli / fdbcli . actor . cpp <nl> ppp b / fdbcli / fdbcli . actor . cpp <nl> void initHelp ( ) { <nl> " kill all | list | < ADDRESS . . . > " , <nl> " attempts to kill one or more processes in the cluster " , <nl> " If no addresses are specified , populates the list of processes which can be killed . Processes cannot be killed before this list has been populated . \ n \ nIf ` all ' is specified , attempts to kill all known processes . \ n \ nIf ` list ' is specified , displays all known processes . This is only useful when the database is unresponsive . \ n \ nFor each IP : port pair in < ADDRESS . . . > , attempt to kill the specified process . " ) ; <nl> + helpMap [ " suspend " ] = CommandHelp ( <nl> + " suspend < SECONDS > < ADDRESS . . . > " , <nl> + " attempts to suspend one or more processes in the cluster " , <nl> + " If no parameters are specified , populates the list of processes which can be suspended . Processes cannot be suspended before this list has been populated . \ n \ nFor each IP : port pair in < ADDRESS . . . > , attempt to suspend the processes for the specified SECONDS after which the process will die . " ) ; <nl> helpMap [ " profile " ] = CommandHelp ( <nl> " profile < client | list | flow | heap > < action > < ARGS > " , <nl> " namespace for all the profiling - related commands . " , <nl> void initHelp ( ) { <nl> " view and control throttled tags " , <nl> " Use ` on ' and ` off ' to manually throttle or unthrottle tags . Use ` enable auto ' or ` disable auto ' to enable or disable automatic tag throttling . Use ` list ' to print the list of throttled tags . \ n " <nl> ) ; <nl> + helpMap [ " cache_range " ] = CommandHelp ( <nl> + " cache_range < set | clear > < BEGINKEY > < ENDKEY > " , <nl> + " Mark a key range to add to or remove from storage caches . " , <nl> + " Use the storage caches to assist in balancing hot read shards . Set the appropriate ranges when experiencing heavy load , and clear them when they are no longer necessary . " <nl> + ) ; <nl> helpMap [ " lock " ] = CommandHelp ( <nl> " lock " , <nl> " lock the database with a randomly generated lockUID " , <nl> ACTOR Future < bool > exclude ( Database db , std : : vector < StringRef > tokens , Referenc <nl> <nl> return false ; <nl> } else { <nl> - state std : : vector < AddressExclusion > addresses ; <nl> - state std : : set < AddressExclusion > exclusions ; <nl> + state std : : vector < AddressExclusion > exclusionVector ; <nl> + state std : : set < AddressExclusion > exclusionSet ; <nl> bool force = false ; <nl> state bool waitForAllExcluded = true ; <nl> state bool markFailed = false ; <nl> ACTOR Future < bool > exclude ( Database db , std : : vector < StringRef > tokens , Referenc <nl> printf ( " Do not include the ` : tls ' suffix when naming a process \ n " ) ; <nl> return true ; <nl> } <nl> - addresses . push_back ( a ) ; <nl> - exclusions . insert ( a ) ; <nl> + exclusionVector . push_back ( a ) ; <nl> + exclusionSet . insert ( a ) ; <nl> } <nl> } <nl> <nl> ACTOR Future < bool > exclude ( Database db , std : : vector < StringRef > tokens , Referenc <nl> if ( markFailed ) { <nl> state bool safe ; <nl> try { <nl> - bool _safe = wait ( makeInterruptable ( checkSafeExclusions ( db , addresses ) ) ) ; <nl> + bool _safe = wait ( makeInterruptable ( checkSafeExclusions ( db , exclusionVector ) ) ) ; <nl> safe = _safe ; <nl> } catch ( Error & e ) { <nl> + if ( e . code ( ) = = error_code_actor_cancelled ) throw ; <nl> TraceEvent ( " CheckSafeExclusionsError " ) . error ( e ) ; <nl> safe = false ; <nl> } <nl> ACTOR Future < bool > exclude ( Database db , std : : vector < StringRef > tokens , Referenc <nl> return true ; <nl> } <nl> NetworkAddress addr = NetworkAddress : : parse ( addrStr ) ; <nl> - bool excluded = ( process . has ( " excluded " ) & & process . last ( ) . get_bool ( ) ) | | addressExcluded ( exclusions , addr ) ; <nl> + bool excluded = <nl> + ( process . has ( " excluded " ) & & process . last ( ) . get_bool ( ) ) | | addressExcluded ( exclusionSet , addr ) ; <nl> ssTotalCount + + ; <nl> if ( excluded ) <nl> ssExcludedCount + + ; <nl> ACTOR Future < bool > exclude ( Database db , std : : vector < StringRef > tokens , Referenc <nl> } <nl> } <nl> <nl> - wait ( makeInterruptable ( excludeServers ( db , addresses , markFailed ) ) ) ; <nl> + wait ( makeInterruptable ( excludeServers ( db , exclusionVector , markFailed ) ) ) ; <nl> <nl> if ( waitForAllExcluded ) { <nl> printf ( " Waiting for state to be removed from all excluded servers . This may take a while . \ n " ) ; <nl> ACTOR Future < bool > exclude ( Database db , std : : vector < StringRef > tokens , Referenc <nl> warn . cancel ( ) ; <nl> <nl> state std : : set < NetworkAddress > notExcludedServers = <nl> - wait ( makeInterruptable ( checkForExcludingServers ( db , addresses , waitForAllExcluded ) ) ) ; <nl> + wait ( makeInterruptable ( checkForExcludingServers ( db , exclusionVector , waitForAllExcluded ) ) ) ; <nl> std : : vector < ProcessData > workers = wait ( makeInterruptable ( getWorkers ( db ) ) ) ; <nl> std : : map < IPAddress , std : : set < uint16_t > > workerPorts ; <nl> for ( auto addr : workers ) <nl> ACTOR Future < bool > exclude ( Database db , std : : vector < StringRef > tokens , Referenc <nl> <nl> / / Print a list of all excluded addresses that don ' t have a corresponding worker <nl> std : : set < AddressExclusion > absentExclusions ; <nl> - for ( auto addr : addresses ) { <nl> + for ( const auto & addr : exclusionVector ) { <nl> auto worker = workerPorts . find ( addr . ip ) ; <nl> if ( worker = = workerPorts . end ( ) ) <nl> absentExclusions . insert ( addr ) ; <nl> ACTOR Future < bool > exclude ( Database db , std : : vector < StringRef > tokens , Referenc <nl> absentExclusions . insert ( addr ) ; <nl> } <nl> <nl> - for ( auto addr : addresses ) { <nl> - NetworkAddress _addr ( addr . ip , addr . port ) ; <nl> - if ( absentExclusions . find ( addr ) ! = absentExclusions . end ( ) ) { <nl> - if ( addr . port = = 0 ) <nl> + for ( const auto & exclusion : exclusionVector ) { <nl> + if ( absentExclusions . find ( exclusion ) ! = absentExclusions . end ( ) ) { <nl> + if ( exclusion . port = = 0 ) { <nl> printf ( " % s ( Whole machine ) mmm - WARNING : Missing from cluster ! Be sure that you excluded the " <nl> " correct machines before removing them from the cluster ! \ n " , <nl> - addr . ip . toString ( ) . c_str ( ) ) ; <nl> - else <nl> + exclusion . ip . toString ( ) . c_str ( ) ) ; <nl> + } else { <nl> printf ( " % s mmm - WARNING : Missing from cluster ! Be sure that you excluded the correct processes " <nl> " before removing them from the cluster ! \ n " , <nl> - addr . toString ( ) . c_str ( ) ) ; <nl> - } else if ( notExcludedServers . find ( _addr ) ! = notExcludedServers . end ( ) ) { <nl> - if ( addr . port = = 0 ) <nl> + exclusion . toString ( ) . c_str ( ) ) ; <nl> + } <nl> + } else if ( std : : any_of ( notExcludedServers . begin ( ) , notExcludedServers . end ( ) , <nl> + [ & ] ( const NetworkAddress & a ) { return addressExcluded ( { exclusion } , a ) ; } ) ) { <nl> + if ( exclusion . port = = 0 ) { <nl> printf ( " % s ( Whole machine ) mmm - WARNING : Exclusion in progress ! It is not safe to remove this " <nl> " machine from the cluster \ n " , <nl> - addr . ip . toString ( ) . c_str ( ) ) ; <nl> - else <nl> + exclusion . ip . toString ( ) . c_str ( ) ) ; <nl> + } else { <nl> printf ( " % s mmm - WARNING : Exclusion in progress ! It is not safe to remove this process from the " <nl> " cluster \ n " , <nl> - addr . toString ( ) . c_str ( ) ) ; <nl> + exclusion . toString ( ) . c_str ( ) ) ; <nl> + } <nl> } else { <nl> - if ( addr . port = = 0 ) <nl> + if ( exclusion . port = = 0 ) { <nl> printf ( " % s ( Whole machine ) mmm - Successfully excluded . It is now safe to remove this machine " <nl> " from the cluster . \ n " , <nl> - addr . ip . toString ( ) . c_str ( ) ) ; <nl> - else <nl> + exclusion . ip . toString ( ) . c_str ( ) ) ; <nl> + } else { <nl> printf ( <nl> " % s mmm - Successfully excluded . It is now safe to remove this process from the cluster . \ n " , <nl> - addr . toString ( ) . c_str ( ) ) ; <nl> + exclusion . toString ( ) . c_str ( ) ) ; <nl> + } <nl> } <nl> } <nl> <nl> bool foundCoordinator = false ; <nl> auto ccs = ClusterConnectionFile ( ccf - > getFilename ( ) ) . getConnectionString ( ) ; <nl> for ( auto & c : ccs . coordinators ( ) ) { <nl> - if ( std : : count ( addresses . begin ( ) , addresses . end ( ) , AddressExclusion ( c . ip , c . port ) ) | | <nl> - std : : count ( addresses . begin ( ) , addresses . end ( ) , AddressExclusion ( c . ip ) ) ) { <nl> + if ( std : : count ( exclusionVector . begin ( ) , exclusionVector . end ( ) , AddressExclusion ( c . ip , c . port ) ) | | <nl> + std : : count ( exclusionVector . begin ( ) , exclusionVector . end ( ) , AddressExclusion ( c . ip ) ) ) { <nl> printf ( " WARNING : % s is a coordinator ! \ n " , c . toString ( ) . c_str ( ) ) ; <nl> foundCoordinator = true ; <nl> } <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> printf ( " \ n " ) ; <nl> } else if ( tokencmp ( tokens [ 1 ] , " all " ) ) { <nl> for ( auto it : address_interface ) { <nl> - tr - > set ( LiteralStringRef ( " \ xff \ xff / reboot_worker " ) , it . second . first ) ; <nl> + if ( db - > apiVersionAtLeast ( 700 ) ) <nl> + BinaryReader : : fromStringRef < ClientWorkerInterface > ( it . second . first , IncludeVersion ( ) ) <nl> + . reboot . send ( RebootRequest ( ) ) ; <nl> + else <nl> + tr - > set ( LiteralStringRef ( " \ xff \ xff / reboot_worker " ) , it . second . first ) ; <nl> } <nl> if ( address_interface . size ( ) = = 0 ) { <nl> printf ( " ERROR : no processes to kill . You must run the ` kill ’ command before running ` kill all ’ . \ n " ) ; <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> <nl> if ( ! is_error ) { <nl> for ( int i = 1 ; i < tokens . size ( ) ; i + + ) { <nl> - tr - > set ( LiteralStringRef ( " \ xff \ xff / reboot_worker " ) , address_interface [ tokens [ i ] ] . first ) ; <nl> + if ( db - > apiVersionAtLeast ( 700 ) ) <nl> + BinaryReader : : fromStringRef < ClientWorkerInterface > ( <nl> + address_interface [ tokens [ i ] ] . first , IncludeVersion ( ) ) <nl> + . reboot . send ( RebootRequest ( ) ) ; <nl> + else <nl> + tr - > set ( LiteralStringRef ( " \ xff \ xff / reboot_worker " ) , <nl> + address_interface [ tokens [ i ] ] . first ) ; <nl> } <nl> printf ( " Attempted to kill % zu processes \ n " , tokens . size ( ) - 1 ) ; <nl> } <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> continue ; <nl> } <nl> <nl> + if ( tokencmp ( tokens [ 0 ] , " suspend " ) ) { <nl> + getTransaction ( db , tr , options , intrans ) ; <nl> + if ( tokens . size ( ) = = 1 ) { <nl> + Standalone < RangeResultRef > kvs = wait ( <nl> + makeInterruptable ( tr - > getRange ( KeyRangeRef ( LiteralStringRef ( " \ xff \ xff / worker_interfaces / " ) , <nl> + LiteralStringRef ( " \ xff \ xff / worker_interfaces0 " ) ) , <nl> + CLIENT_KNOBS - > TOO_MANY ) ) ) ; <nl> + ASSERT ( ! kvs . more ) ; <nl> + Reference < FlowLock > connectLock ( new FlowLock ( CLIENT_KNOBS - > CLI_CONNECT_PARALLELISM ) ) ; <nl> + std : : vector < Future < Void > > addInterfs ; <nl> + for ( auto it : kvs ) { <nl> + addInterfs . push_back ( addInterface ( & address_interface , connectLock , it ) ) ; <nl> + } <nl> + wait ( waitForAll ( addInterfs ) ) ; <nl> + if ( address_interface . size ( ) = = 0 ) { <nl> + printf ( " \ nNo addresses can be suspended . \ n " ) ; <nl> + } else if ( address_interface . size ( ) = = 1 ) { <nl> + printf ( " \ nThe following address can be suspended : \ n " ) ; <nl> + } else { <nl> + printf ( " \ nThe following % zu addresses can be suspended : \ n " , address_interface . size ( ) ) ; <nl> + } <nl> + for ( auto it : address_interface ) { <nl> + printf ( " % s \ n " , printable ( it . first ) . c_str ( ) ) ; <nl> + } <nl> + printf ( " \ n " ) ; <nl> + } else if ( tokens . size ( ) = = 2 ) { <nl> + printUsage ( tokens [ 0 ] ) ; <nl> + is_error = true ; <nl> + } else { <nl> + for ( int i = 2 ; i < tokens . size ( ) ; i + + ) { <nl> + if ( ! address_interface . count ( tokens [ i ] ) ) { <nl> + printf ( " ERROR : process ` % s ' not recognized . \ n " , printable ( tokens [ i ] ) . c_str ( ) ) ; <nl> + is_error = true ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + if ( ! is_error ) { <nl> + double seconds ; <nl> + int n = 0 ; <nl> + auto secondsStr = tokens [ 1 ] . toString ( ) ; <nl> + if ( sscanf ( secondsStr . c_str ( ) , " % lf % n " , & seconds , & n ) ! = 1 | | n ! = secondsStr . size ( ) ) { <nl> + printUsage ( tokens [ 0 ] ) ; <nl> + is_error = true ; <nl> + } else { <nl> + int64_t timeout_ms = seconds * 1000 ; <nl> + tr - > setOption ( FDBTransactionOptions : : TIMEOUT , StringRef ( ( uint8_t * ) & timeout_ms , sizeof ( int64_t ) ) ) ; <nl> + for ( int i = 2 ; i < tokens . size ( ) ; i + + ) { <nl> + if ( db - > apiVersionAtLeast ( 700 ) ) <nl> + BinaryReader : : fromStringRef < ClientWorkerInterface > ( <nl> + address_interface [ tokens [ i ] ] . first , IncludeVersion ( ) ) <nl> + . reboot . send ( RebootRequest ( false , false , seconds ) ) ; <nl> + else <nl> + tr - > set ( LiteralStringRef ( " \ xff \ xff / suspend_worker " ) , <nl> + address_interface [ tokens [ i ] ] . first ) ; <nl> + } <nl> + printf ( " Attempted to suspend % zu processes \ n " , tokens . size ( ) - 2 ) ; <nl> + } <nl> + } <nl> + } <nl> + continue ; <nl> + } <nl> + <nl> if ( tokencmp ( tokens [ 0 ] , " force_recovery_with_data_loss " ) ) { <nl> if ( tokens . size ( ) ! = 2 ) { <nl> printUsage ( tokens [ 0 ] ) ; <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> continue ; <nl> } <nl> getTransaction ( db , tr , options , intrans ) ; <nl> - Standalone < RangeResultRef > kvs = wait ( makeInterruptable ( <nl> - tr - > getRange ( KeyRangeRef ( LiteralStringRef ( " \ xff \ xff / worker_interfaces " ) , <nl> - LiteralStringRef ( " \ xff \ xff \ xff " ) ) , <nl> - 1 ) ) ) ; <nl> + Standalone < RangeResultRef > kvs = wait ( <nl> + makeInterruptable ( tr - > getRange ( KeyRangeRef ( LiteralStringRef ( " \ xff \ xff / worker_interfaces / " ) , <nl> + LiteralStringRef ( " \ xff \ xff / worker_interfaces0 " ) ) , <nl> + CLIENT_KNOBS - > TOO_MANY ) ) ) ; <nl> + ASSERT ( ! kvs . more ) ; <nl> std : : map < Key , ClientWorkerInterface > interfaces ; <nl> for ( const auto & pair : kvs ) { <nl> - auto ip_port = pair . key . endsWith ( LiteralStringRef ( " : tls " ) ) ? pair . key . removeSuffix ( LiteralStringRef ( " : tls " ) ) : pair . key ; <nl> + auto ip_port = ( pair . key . endsWith ( LiteralStringRef ( " : tls " ) ) <nl> + ? pair . key . removeSuffix ( LiteralStringRef ( " : tls " ) ) <nl> + : pair . key ) <nl> + . removePrefix ( LiteralStringRef ( " \ xff \ xff / worker_interfaces / " ) ) ; <nl> interfaces . emplace ( ip_port , BinaryReader : : fromStringRef < ClientWorkerInterface > ( pair . value , IncludeVersion ( ) ) ) ; <nl> } <nl> state Key ip_port = tokens [ 2 ] ; <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> if ( tokencmp ( tokens [ 0 ] , " expensive_data_check " ) ) { <nl> getTransaction ( db , tr , options , intrans ) ; <nl> if ( tokens . size ( ) = = 1 ) { <nl> - Standalone < RangeResultRef > kvs = wait ( makeInterruptable ( tr - > getRange ( KeyRangeRef ( LiteralStringRef ( " \ xff \ xff / worker_interfaces " ) , LiteralStringRef ( " \ xff \ xff \ xff " ) ) , 1 ) ) ) ; <nl> + Standalone < RangeResultRef > kvs = wait ( <nl> + makeInterruptable ( tr - > getRange ( KeyRangeRef ( LiteralStringRef ( " \ xff \ xff / worker_interfaces / " ) , <nl> + LiteralStringRef ( " \ xff \ xff / worker_interfaces0 " ) ) , <nl> + CLIENT_KNOBS - > TOO_MANY ) ) ) ; <nl> + ASSERT ( ! kvs . more ) ; <nl> Reference < FlowLock > connectLock ( new FlowLock ( CLIENT_KNOBS - > CLI_CONNECT_PARALLELISM ) ) ; <nl> std : : vector < Future < Void > > addInterfs ; <nl> for ( auto it : kvs ) { <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> printf ( " \ n " ) ; <nl> } else if ( tokencmp ( tokens [ 1 ] , " all " ) ) { <nl> for ( auto it : address_interface ) { <nl> - tr - > set ( LiteralStringRef ( " \ xff \ xff / reboot_and_check_worker " ) , it . second . first ) ; <nl> + if ( db - > apiVersionAtLeast ( 700 ) ) <nl> + BinaryReader : : fromStringRef < ClientWorkerInterface > ( it . second . first , IncludeVersion ( ) ) <nl> + . reboot . send ( RebootRequest ( false , true ) ) ; <nl> + else <nl> + tr - > set ( LiteralStringRef ( " \ xff \ xff / reboot_and_check_worker " ) , it . second . first ) ; <nl> } <nl> if ( address_interface . size ( ) = = 0 ) { <nl> printf ( " ERROR : no processes to check . You must run the ` expensive_data_check ’ command before running ` expensive_data_check all ’ . \ n " ) ; <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> <nl> if ( ! is_error ) { <nl> for ( int i = 1 ; i < tokens . size ( ) ; i + + ) { <nl> - tr - > set ( LiteralStringRef ( " \ xff \ xff / reboot_and_check_worker " ) , address_interface [ tokens [ i ] ] . first ) ; <nl> + if ( db - > apiVersionAtLeast ( 700 ) ) <nl> + BinaryReader : : fromStringRef < ClientWorkerInterface > ( <nl> + address_interface [ tokens [ i ] ] . first , IncludeVersion ( ) ) <nl> + . reboot . send ( RebootRequest ( false , true ) ) ; <nl> + else <nl> + tr - > set ( LiteralStringRef ( " \ xff \ xff / reboot_and_check_worker " ) , <nl> + address_interface [ tokens [ i ] ] . first ) ; <nl> } <nl> printf ( " Attempted to kill and check % zu processes \ n " , tokens . size ( ) - 1 ) ; <nl> } <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> } <nl> continue ; <nl> } <nl> + if ( tokencmp ( tokens [ 0 ] , " cache_range " ) ) { <nl> + if ( tokens . size ( ) ! = 4 ) { <nl> + printUsage ( tokens [ 0 ] ) ; <nl> + is_error = true ; <nl> + continue ; <nl> + } <nl> + KeyRangeRef cacheRange ( tokens [ 2 ] , tokens [ 3 ] ) ; <nl> + if ( tokencmp ( tokens [ 1 ] , " set " ) ) { <nl> + wait ( makeInterruptable ( addCachedRange ( db , cacheRange ) ) ) ; <nl> + } else if ( tokencmp ( tokens [ 1 ] , " clear " ) ) { <nl> + wait ( makeInterruptable ( removeCachedRange ( db , cacheRange ) ) ) ; <nl> + } else { <nl> + printUsage ( tokens [ 0 ] ) ; <nl> + is_error = true ; <nl> + } <nl> + continue ; <nl> + } <nl> + <nl> <nl> printf ( " ERROR : Unknown command ` % s ' . Try ` help ' ? \ n " , formatStringRef ( tokens [ 0 ] ) . c_str ( ) ) ; <nl> is_error = true ; <nl> mmm a / fdbclient / AsyncFileBlobStore . actor . h <nl> ppp b / fdbclient / AsyncFileBlobStore . actor . h <nl> class AsyncFileBlobStoreWrite : public IAsyncFile , public ReferenceCounted < Async <nl> virtual void delref ( ) { ReferenceCounted < AsyncFileBlobStoreWrite > : : delref ( ) ; } <nl> <nl> struct Part : ReferenceCounted < Part > { <nl> - Part ( int n ) : number ( n ) , writer ( content . getWriteBuffer ( ) , NULL , Unversioned ( ) ) , length ( 0 ) { <nl> + Part ( int n , int minSize ) : number ( n ) , writer ( content . getWriteBuffer ( minSize ) , NULL , Unversioned ( ) ) , length ( 0 ) { <nl> etag = std : : string ( ) ; <nl> : : MD5_Init ( & content_md5_buf ) ; <nl> } <nl> class AsyncFileBlobStoreWrite : public IAsyncFile , public ReferenceCounted < Async <nl> <nl> / / Make a new part to write to <nl> if ( startNew ) <nl> - f - > m_parts . push_back ( Reference < Part > ( new Part ( f - > m_parts . size ( ) + 1 ) ) ) ; <nl> + f - > m_parts . push_back ( Reference < Part > ( new Part ( f - > m_parts . size ( ) + 1 , f - > m_bstore - > knobs . multipart_min_part_size ) ) ) ; <nl> <nl> return Void ( ) ; <nl> } <nl> class AsyncFileBlobStoreWrite : public IAsyncFile , public ReferenceCounted < Async <nl> : m_bstore ( bstore ) , m_bucket ( bucket ) , m_object ( object ) , m_cursor ( 0 ) , m_concurrentUploads ( bstore - > knobs . concurrent_writes_per_file ) { <nl> <nl> / / Add first part <nl> - m_parts . push_back ( Reference < Part > ( new Part ( 1 ) ) ) ; <nl> + m_parts . push_back ( Reference < Part > ( new Part ( 1 , m_bstore - > knobs . multipart_min_part_size ) ) ) ; <nl> } <nl> <nl> } ; <nl> mmm a / fdbclient / BackupAgent . actor . h <nl> ppp b / fdbclient / BackupAgent . actor . h <nl> struct StringRefReader { <nl> <nl> / / Functions for consuming big endian ( network byte order ) integers . <nl> / / Consumes a big endian number , swaps it to little endian , and returns it . <nl> - const int32_t consumeNetworkInt32 ( ) { return ( int32_t ) bigEndian32 ( ( uint32_t ) consume < int32_t > ( ) ) ; } <nl> - const uint32_t consumeNetworkUInt32 ( ) { return bigEndian32 ( consume < uint32_t > ( ) ) ; } <nl> + int32_t consumeNetworkInt32 ( ) { return ( int32_t ) bigEndian32 ( ( uint32_t ) consume < int32_t > ( ) ) ; } <nl> + uint32_t consumeNetworkUInt32 ( ) { return bigEndian32 ( consume < uint32_t > ( ) ) ; } <nl> <nl> / / Convert big Endian value ( e . g . , encoded in log file ) into a littleEndian uint64_t value . <nl> int64_t consumeNetworkInt64 ( ) { return ( int64_t ) bigEndian64 ( ( uint32_t ) consume < int64_t > ( ) ) ; } <nl> mmm a / fdbclient / BackupContainer . actor . cpp <nl> ppp b / fdbclient / BackupContainer . actor . cpp <nl> class BackupContainerFileSystem : public IBackupContainer { <nl> <nl> state std : : vector < LogFile > logs ; <nl> state std : : vector < LogFile > plogs ; <nl> + TraceEvent ( " BackupContainerListFiles " ) . detail ( " URL " , bc - > getURL ( ) ) ; <nl> + <nl> wait ( store ( logs , bc - > listLogFiles ( scanBegin , scanEnd , false ) ) & & <nl> store ( plogs , bc - > listLogFiles ( scanBegin , scanEnd , true ) ) & & <nl> store ( desc . snapshots , bc - > listKeyspaceSnapshots ( ) ) ) ; <nl> <nl> + TraceEvent ( " BackupContainerListFiles " ) <nl> + . detail ( " URL " , bc - > getURL ( ) ) <nl> + . detail ( " LogFiles " , logs . size ( ) ) <nl> + . detail ( " PLogsFiles " , plogs . size ( ) ) <nl> + . detail ( " Snapshots " , desc . snapshots . size ( ) ) ; <nl> + <nl> if ( plogs . size ( ) > 0 ) { <nl> desc . partitioned = true ; <nl> logs . swap ( plogs ) ; <nl> class BackupContainerFileSystem : public IBackupContainer { <nl> } <nl> <nl> / / for each range in tags , check all tags from 1 are continouous <nl> - for ( const auto [ beginEnd , count ] : tags ) { <nl> + for ( const auto & [ beginEnd , count ] : tags ) { <nl> for ( int i = 1 ; i < count ; i + + ) { <nl> if ( ! isContinuous ( files , tagIndices [ i ] , beginEnd . first , std : : min ( beginEnd . second - 1 , end ) , nullptr ) ) { <nl> TraceEvent ( SevWarn , " BackupFileNotContinuous " ) <nl> class BackupContainerFileSystem : public IBackupContainer { <nl> <nl> / / for each range in tags , check all partitions from 1 are continouous <nl> Version lastEnd = begin ; <nl> - for ( const auto [ beginEnd , count ] : tags ) { <nl> + for ( const auto & [ beginEnd , count ] : tags ) { <nl> Version tagEnd = beginEnd . second ; / / This range ' s minimum continous partition version <nl> for ( int i = 1 ; i < count ; i + + ) { <nl> std : : map < std : : pair < Version , Version > , int > rangeTags ; <nl> class BackupContainerLocalDirectory : public BackupContainerFileSystem , Referenc <nl> std : : string uniquePath = fullPath + " . " + deterministicRandom ( ) - > randomUniqueID ( ) . toString ( ) + " . lnk " ; <nl> unlink ( uniquePath . c_str ( ) ) ; <nl> ASSERT ( symlink ( basename ( path ) . c_str ( ) , uniquePath . c_str ( ) ) = = 0 ) ; <nl> - fullPath = uniquePath = uniquePath ; <nl> + fullPath = uniquePath ; <nl> } <nl> / / Opening cached mode forces read / write mode at a lower level , overriding the readonly request . So cached mode <nl> / / can ' t be used because backup files are read - only . Cached mode can only help during restore task retries handled <nl> mmm a / fdbclient / BlobStore . actor . cpp <nl> ppp b / fdbclient / BlobStore . actor . cpp <nl> ACTOR Future < Void > writeEntireFileFromBuffer_impl ( Reference < BlobStoreEndpoint > b <nl> <nl> ACTOR Future < Void > writeEntireFile_impl ( Reference < BlobStoreEndpoint > bstore , std : : string bucket , std : : string object , std : : string content ) { <nl> state UnsentPacketQueue packets ; <nl> - PacketWriter pw ( packets . getWriteBuffer ( ) , NULL , Unversioned ( ) ) ; <nl> + PacketWriter pw ( packets . getWriteBuffer ( content . size ( ) ) , NULL , Unversioned ( ) ) ; <nl> pw . serializeBytes ( content ) ; <nl> if ( content . size ( ) > bstore - > knobs . multipart_max_part_size ) <nl> throw file_too_large ( ) ; <nl> ACTOR Future < Void > finishMultiPartUpload_impl ( Reference < BlobStoreEndpoint > bstor <nl> <nl> std : : string resource = format ( " / % s / % s ? uploadId = % s " , bucket . c_str ( ) , object . c_str ( ) , uploadID . c_str ( ) ) ; <nl> HTTP : : Headers headers ; <nl> - PacketWriter pw ( part_list . getWriteBuffer ( ) , NULL , Unversioned ( ) ) ; <nl> + PacketWriter pw ( part_list . getWriteBuffer ( manifest . size ( ) ) , NULL , Unversioned ( ) ) ; <nl> pw . serializeBytes ( manifest ) ; <nl> Reference < HTTP : : Response > r = wait ( bstore - > doRequest ( " POST " , resource , headers , & part_list , manifest . size ( ) , { 200 } ) ) ; <nl> / / TODO : In the event that the client times out just before the request completes ( so the client is unaware ) then the next retry <nl> mmm a / fdbclient / CMakeLists . txt <nl> ppp b / fdbclient / CMakeLists . txt <nl> set ( FDBCLIENT_SRCS <nl> TagThrottle . h <nl> TaskBucket . actor . cpp <nl> TaskBucket . h <nl> - ThreadSafeTransaction . actor . cpp <nl> + ThreadSafeTransaction . cpp <nl> ThreadSafeTransaction . h <nl> Tuple . cpp <nl> Tuple . h <nl> mmm a / fdbclient / ClientWorkerInterface . h <nl> ppp b / fdbclient / ClientWorkerInterface . h <nl> struct RebootRequest { <nl> constexpr static FileIdentifier file_identifier = 11913957 ; <nl> bool deleteData ; <nl> bool checkData ; <nl> - uint32_t waitForDuration ; <nl> + uint32_t waitForDuration ; / / seconds <nl> <nl> explicit RebootRequest ( bool deleteData = false , bool checkData = false , uint32_t waitForDuration = 0 ) <nl> : deleteData ( deleteData ) , checkData ( checkData ) , waitForDuration ( waitForDuration ) { } <nl> mmm a / fdbclient / CommitTransaction . h <nl> ppp b / fdbclient / CommitTransaction . h <nl> struct MutationRef { <nl> <nl> MutationRef ( ) { } <nl> MutationRef ( Type t , StringRef a , StringRef b ) : type ( t ) , param1 ( a ) , param2 ( b ) { } <nl> + MutationRef ( Arena & to , Type t , StringRef a , StringRef b ) : type ( t ) , param1 ( to , a ) , param2 ( to , b ) { } <nl> MutationRef ( Arena & to , const MutationRef & from ) : type ( from . type ) , param1 ( to , from . param1 ) , param2 ( to , from . param2 ) { } <nl> int totalSize ( ) const { return OVERHEAD_BYTES + param1 . size ( ) + param2 . size ( ) ; } <nl> int expectedSize ( ) const { return param1 . size ( ) + param2 . size ( ) ; } <nl> mmm a / fdbclient / DatabaseContext . h <nl> ppp b / fdbclient / DatabaseContext . h <nl> class DatabaseContext : public ReferenceCounted < DatabaseContext > , public FastAll <nl> double detailedHealthMetricsLastUpdated ; <nl> <nl> UniqueOrderedOptionList < FDBTransactionOptions > transactionDefaults ; <nl> + <nl> Future < Void > cacheListMonitor ; <nl> AsyncTrigger updateCache ; <nl> - std : : vector < std : : unique_ptr < SpecialKeyRangeBaseImpl > > specialKeySpaceModules ; <nl> + std : : vector < std : : unique_ptr < SpecialKeyRangeReadImpl > > specialKeySpaceModules ; <nl> std : : unique_ptr < SpecialKeySpace > specialKeySpace ; <nl> - void registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE module , std : : unique_ptr < SpecialKeyRangeBaseImpl > impl ) ; <nl> + void registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE module , SpecialKeySpace : : IMPLTYPE type , <nl> + std : : unique_ptr < SpecialKeyRangeReadImpl > impl ) ; <nl> <nl> static bool debugUseTags ; <nl> static const std : : vector < std : : string > debugTransactionTagChoices ; <nl> mmm a / fdbclient / FDBTypes . h <nl> ppp b / fdbclient / FDBTypes . h <nl> struct TLogVersion { <nl> V2 = 2 , / / 6 . 0 <nl> V3 = 3 , / / 6 . 1 <nl> V4 = 4 , / / 6 . 2 <nl> - V5 = 5 , / / 7 . 0 <nl> + V5 = 5 , / / 6 . 3 <nl> MIN_SUPPORTED = V2 , <nl> MAX_SUPPORTED = V5 , <nl> - MIN_RECRUITABLE = V3 , <nl> - DEFAULT = V4 , <nl> + MIN_RECRUITABLE = V4 , <nl> + DEFAULT = V5 , <nl> } version ; <nl> <nl> TLogVersion ( ) : version ( UNSET ) { } <nl> mmm a / fdbclient / HTTP . actor . cpp <nl> ppp b / fdbclient / HTTP . actor . cpp <nl> namespace HTTP { <nl> send_start = timer ( ) ; <nl> <nl> loop { <nl> - wait ( conn - > onWritable ( ) ) ; <nl> - wait ( delay ( 0 , TaskPriority : : WriteSocket ) ) ; <nl> - <nl> / / If we already got a response , before finishing sending the request , then close the connection , <nl> / / set the Connection header to " close " as a hint to the caller that this connection can ' t be used <nl> / / again , and break out of the send loop . <nl> namespace HTTP { <nl> pContent - > sent ( len ) ; <nl> if ( pContent - > empty ( ) ) <nl> break ; <nl> + <nl> + wait ( conn - > onWritable ( ) ) ; <nl> + wait ( yield ( TaskPriority : : WriteSocket ) ) ; <nl> } <nl> <nl> wait ( responseReading ) ; <nl> mmm a / fdbclient / Knobs . cpp <nl> ppp b / fdbclient / Knobs . cpp <nl> void ClientKnobs : : initialize ( bool randomize ) { <nl> init ( STORAGE_METRICS_TOO_MANY_SHARDS_DELAY , 15 . 0 ) ; <nl> init ( AGGREGATE_HEALTH_METRICS_MAX_STALENESS , 0 . 5 ) ; <nl> init ( DETAILED_HEALTH_METRICS_MAX_STALENESS , 5 . 0 ) ; <nl> - init ( TAG_ENCODE_KEY_SERVERS , false ) ; if ( randomize & & BUGGIFY ) TAG_ENCODE_KEY_SERVERS = true ; <nl> + init ( TAG_ENCODE_KEY_SERVERS , true ) ; if ( randomize & & BUGGIFY ) TAG_ENCODE_KEY_SERVERS = false ; <nl> <nl> / / KeyRangeMap <nl> init ( KRM_GET_RANGE_LIMIT , 1e5 ) ; if ( randomize & & BUGGIFY ) KRM_GET_RANGE_LIMIT = 10 ; <nl> mmm a / fdbclient / ManagementAPI . actor . cpp <nl> ppp b / fdbclient / ManagementAPI . actor . cpp <nl> <nl> # include " fdbrpc / Replication . h " <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> - ACTOR static Future < vector < AddressExclusion > > getExcludedServers ( Transaction * tr ) ; <nl> <nl> bool isInteger ( const std : : string & s ) { <nl> if ( s . empty ( ) ) return false ; <nl> struct AutoQuorumChange : IQuorumChange { <nl> } ; <nl> Reference < IQuorumChange > autoQuorumChange ( int desired ) { return Reference < IQuorumChange > ( new AutoQuorumChange ( desired ) ) ; } <nl> <nl> - ACTOR Future < Void > excludeServers ( Database cx , vector < AddressExclusion > servers , bool failed ) { <nl> - state Transaction tr ( cx ) ; <nl> - state Key versionKey = BinaryWriter : : toValue ( deterministicRandom ( ) - > randomUniqueID ( ) , Unversioned ( ) ) ; <nl> - state std : : string excludeVersionKey = deterministicRandom ( ) - > randomUniqueID ( ) . toString ( ) ; <nl> + void excludeServers ( Transaction & tr , vector < AddressExclusion > & servers , bool failed ) { <nl> + tr . setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; <nl> + tr . setOption ( FDBTransactionOptions : : PRIORITY_SYSTEM_IMMEDIATE ) ; <nl> + tr . setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> + tr . setOption ( FDBTransactionOptions : : USE_PROVISIONAL_PROXIES ) ; <nl> + std : : string excludeVersionKey = deterministicRandom ( ) - > randomUniqueID ( ) . toString ( ) ; <nl> + auto serversVersionKey = failed ? failedServersVersionKey : excludedServersVersionKey ; <nl> + tr . addReadConflictRange ( singleKeyRange ( serversVersionKey ) ) ; / / To conflict with parallel includeServers <nl> + tr . set ( serversVersionKey , excludeVersionKey ) ; <nl> + for ( auto & s : servers ) { <nl> + if ( failed ) { <nl> + tr . set ( encodeFailedServersKey ( s ) , StringRef ( ) ) ; <nl> + } else { <nl> + tr . set ( encodeExcludedServersKey ( s ) , StringRef ( ) ) ; <nl> + } <nl> + } <nl> + TraceEvent ( " ExcludeServersCommit " ) . detail ( " Servers " , describe ( servers ) ) . detail ( " ExcludeFailed " , failed ) ; <nl> + } <nl> <nl> - loop { <nl> - try { <nl> - tr . setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; <nl> - tr . setOption ( FDBTransactionOptions : : PRIORITY_SYSTEM_IMMEDIATE ) ; <nl> - tr . setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> - tr . setOption ( FDBTransactionOptions : : USE_PROVISIONAL_PROXIES ) ; <nl> - auto serversVersionKey = failed ? failedServersVersionKey : excludedServersVersionKey ; <nl> - tr . addReadConflictRange ( singleKeyRange ( serversVersionKey ) ) ; / / To conflict with parallel includeServers <nl> - tr . set ( serversVersionKey , excludeVersionKey ) ; <nl> - for ( auto & s : servers ) { <nl> - if ( failed ) { <nl> - tr . set ( encodeFailedServersKey ( s ) , StringRef ( ) ) ; <nl> - } else { <nl> - tr . set ( encodeExcludedServersKey ( s ) , StringRef ( ) ) ; <nl> + ACTOR Future < Void > excludeServers ( Database cx , vector < AddressExclusion > servers , bool failed ) { <nl> + if ( cx - > apiVersionAtLeast ( 700 ) ) { <nl> + state ReadYourWritesTransaction ryw ( cx ) ; <nl> + loop { <nl> + try { <nl> + ryw . setOption ( FDBTransactionOptions : : SPECIAL_KEY_SPACE_ENABLE_WRITES ) ; <nl> + ryw . set ( SpecialKeySpace : : getManagementApiCommandOptionSpecialKey ( failed ? " failed " : " excluded " , " force " ) , ValueRef ( ) ) ; <nl> + for ( auto & s : servers ) { <nl> + Key addr = failed ? SpecialKeySpace : : getManagementApiCommandPrefix ( " failed " ) . withSuffix ( s . toString ( ) ) <nl> + : SpecialKeySpace : : getManagementApiCommandPrefix ( " exclude " ) . withSuffix ( s . toString ( ) ) ; <nl> + ryw . set ( addr , ValueRef ( ) ) ; <nl> } <nl> + TraceEvent ( " ExcludeServersSpecialKeySpaceCommit " ) . detail ( " Servers " , describe ( servers ) ) . detail ( " ExcludeFailed " , failed ) ; <nl> + wait ( ryw . commit ( ) ) ; <nl> + return Void ( ) ; <nl> + } catch ( Error & e ) { <nl> + wait ( ryw . onError ( e ) ) ; <nl> + } <nl> + } <nl> + } else { <nl> + state Transaction tr ( cx ) ; <nl> + loop { <nl> + try { <nl> + excludeServers ( tr , servers , failed ) ; <nl> + wait ( tr . commit ( ) ) ; <nl> + return Void ( ) ; <nl> + } catch ( Error & e ) { <nl> + wait ( tr . onError ( e ) ) ; <nl> } <nl> - <nl> - TraceEvent ( " ExcludeServersCommit " ) . detail ( " Servers " , describe ( servers ) ) . detail ( " ExcludeFailed " , failed ) ; <nl> - <nl> - wait ( tr . commit ( ) ) ; <nl> - return Void ( ) ; <nl> - } catch ( Error & e ) { <nl> - wait ( tr . onError ( e ) ) ; <nl> } <nl> } <nl> } <nl> <nl> ACTOR Future < Void > includeServers ( Database cx , vector < AddressExclusion > servers , bool failed ) { <nl> - state Transaction tr ( cx ) ; <nl> state std : : string versionKey = deterministicRandom ( ) - > randomUniqueID ( ) . toString ( ) ; <nl> - loop { <nl> - try { <nl> - tr . setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; <nl> - tr . setOption ( FDBTransactionOptions : : PRIORITY_SYSTEM_IMMEDIATE ) ; <nl> - tr . setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> - tr . setOption ( FDBTransactionOptions : : USE_PROVISIONAL_PROXIES ) ; <nl> - <nl> - / / includeServers might be used in an emergency transaction , so make sure it is retry - self - conflicting and CAUSAL_WRITE_RISKY <nl> - tr . setOption ( FDBTransactionOptions : : CAUSAL_WRITE_RISKY ) ; <nl> - if ( failed ) { <nl> - tr . addReadConflictRange ( singleKeyRange ( failedServersVersionKey ) ) ; <nl> - tr . set ( failedServersVersionKey , versionKey ) ; <nl> - } else { <nl> - tr . addReadConflictRange ( singleKeyRange ( excludedServersVersionKey ) ) ; <nl> - tr . set ( excludedServersVersionKey , versionKey ) ; <nl> - } <nl> - <nl> - for ( auto & s : servers ) { <nl> - if ( ! s . isValid ( ) ) { <nl> - if ( failed ) { <nl> - tr . clear ( failedServersKeys ) ; <nl> + if ( cx - > apiVersionAtLeast ( 700 ) ) { <nl> + state ReadYourWritesTransaction ryw ( cx ) ; <nl> + loop { <nl> + try { <nl> + ryw . setOption ( FDBTransactionOptions : : SPECIAL_KEY_SPACE_ENABLE_WRITES ) ; <nl> + for ( auto & s : servers ) { <nl> + if ( ! s . isValid ( ) ) { <nl> + if ( failed ) { <nl> + ryw . clear ( SpecialKeySpace : : getManamentApiCommandRange ( " failed " ) ) ; <nl> + } else { <nl> + ryw . clear ( SpecialKeySpace : : getManamentApiCommandRange ( " exclude " ) ) ; <nl> + } <nl> } else { <nl> - tr . clear ( excludedServersKeys ) ; <nl> + Key addr = failed ? SpecialKeySpace : : getManagementApiCommandPrefix ( " failed " ) . withSuffix ( s . toString ( ) ) <nl> + : SpecialKeySpace : : getManagementApiCommandPrefix ( " exclude " ) . withSuffix ( s . toString ( ) ) ; <nl> + ryw . clear ( addr ) ; <nl> + / / Eliminate both any ip - level exclusion ( 1 . 2 . 3 . 4 ) and any <nl> + / / port - level exclusions ( 1 . 2 . 3 . 4 : 5 ) <nl> + / / The range [ ' IP ' , ' IP ; ' ] was originally deleted . ' ; ' is <nl> + / / char ( ' : ' + 1 ) . This does not work , as other for all <nl> + / / x between 0 and 9 , ' IPx ' will also be in this range . <nl> + / / <nl> + / / This is why we now make two clears : first only of the ip <nl> + / / address , the second will delete all ports . <nl> + if ( s . isWholeMachine ( ) ) <nl> + ryw . clear ( KeyRangeRef ( addr . withSuffix ( LiteralStringRef ( " : " ) ) , addr . withSuffix ( LiteralStringRef ( " ; " ) ) ) ) ; <nl> } <nl> - } else if ( s . isWholeMachine ( ) ) { <nl> - / / Eliminate both any ip - level exclusion ( 1 . 2 . 3 . 4 ) and any <nl> - / / port - level exclusions ( 1 . 2 . 3 . 4 : 5 ) <nl> - / / The range [ ' IP ' , ' IP ; ' ] was originally deleted . ' ; ' is <nl> - / / char ( ' : ' + 1 ) . This does not work , as other for all <nl> - / / x between 0 and 9 , ' IPx ' will also be in this range . <nl> - / / <nl> - / / This is why we now make two clears : first only of the ip <nl> - / / address , the second will delete all ports . <nl> - auto addr = failed ? encodeFailedServersKey ( s ) : encodeExcludedServersKey ( s ) ; <nl> - tr . clear ( singleKeyRange ( addr ) ) ; <nl> - tr . clear ( KeyRangeRef ( addr + ' : ' , addr + char ( ' : ' + 1 ) ) ) ; <nl> + } <nl> + TraceEvent ( " IncludeServersCommit " ) . detail ( " Servers " , describe ( servers ) ) . detail ( " Failed " , failed ) ; <nl> + <nl> + wait ( ryw . commit ( ) ) ; <nl> + return Void ( ) ; <nl> + } catch ( Error & e ) { <nl> + TraceEvent ( " IncludeServersError " ) . error ( e , true ) ; <nl> + wait ( ryw . onError ( e ) ) ; <nl> + } <nl> + } <nl> + } else { <nl> + state Transaction tr ( cx ) ; <nl> + loop { <nl> + try { <nl> + tr . setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; <nl> + tr . setOption ( FDBTransactionOptions : : PRIORITY_SYSTEM_IMMEDIATE ) ; <nl> + tr . setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> + tr . setOption ( FDBTransactionOptions : : USE_PROVISIONAL_PROXIES ) ; <nl> + <nl> + / / includeServers might be used in an emergency transaction , so make sure it is retry - self - conflicting and CAUSAL_WRITE_RISKY <nl> + tr . setOption ( FDBTransactionOptions : : CAUSAL_WRITE_RISKY ) ; <nl> + if ( failed ) { <nl> + tr . addReadConflictRange ( singleKeyRange ( failedServersVersionKey ) ) ; <nl> + tr . set ( failedServersVersionKey , versionKey ) ; <nl> } else { <nl> - if ( failed ) { <nl> - tr . clear ( encodeFailedServersKey ( s ) ) ; <nl> + tr . addReadConflictRange ( singleKeyRange ( excludedServersVersionKey ) ) ; <nl> + tr . set ( excludedServersVersionKey , versionKey ) ; <nl> + } <nl> + <nl> + for ( auto & s : servers ) { <nl> + if ( ! s . isValid ( ) ) { <nl> + if ( failed ) { <nl> + tr . clear ( failedServersKeys ) ; <nl> + } else { <nl> + tr . clear ( excludedServersKeys ) ; <nl> + } <nl> + } else if ( s . isWholeMachine ( ) ) { <nl> + / / Eliminate both any ip - level exclusion ( 1 . 2 . 3 . 4 ) and any <nl> + / / port - level exclusions ( 1 . 2 . 3 . 4 : 5 ) <nl> + / / The range [ ' IP ' , ' IP ; ' ] was originally deleted . ' ; ' is <nl> + / / char ( ' : ' + 1 ) . This does not work , as other for all <nl> + / / x between 0 and 9 , ' IPx ' will also be in this range . <nl> + / / <nl> + / / This is why we now make two clears : first only of the ip <nl> + / / address , the second will delete all ports . <nl> + auto addr = failed ? encodeFailedServersKey ( s ) : encodeExcludedServersKey ( s ) ; <nl> + tr . clear ( singleKeyRange ( addr ) ) ; <nl> + tr . clear ( KeyRangeRef ( addr + ' : ' , addr + char ( ' : ' + 1 ) ) ) ; <nl> } else { <nl> - tr . clear ( encodeExcludedServersKey ( s ) ) ; <nl> + if ( failed ) { <nl> + tr . clear ( encodeFailedServersKey ( s ) ) ; <nl> + } else { <nl> + tr . clear ( encodeExcludedServersKey ( s ) ) ; <nl> + } <nl> } <nl> } <nl> - } <nl> + <nl> + TraceEvent ( " IncludeServersCommit " ) . detail ( " Servers " , describe ( servers ) ) . detail ( " Failed " , failed ) ; <nl> <nl> - TraceEvent ( " IncludeServersCommit " ) . detail ( " Servers " , describe ( servers ) ) . detail ( " Failed " , failed ) ; <nl> - <nl> - wait ( tr . commit ( ) ) ; <nl> - return Void ( ) ; <nl> - } catch ( Error & e ) { <nl> - TraceEvent ( " IncludeServersError " ) . error ( e , true ) ; <nl> - wait ( tr . onError ( e ) ) ; <nl> + wait ( tr . commit ( ) ) ; <nl> + return Void ( ) ; <nl> + } catch ( Error & e ) { <nl> + TraceEvent ( " IncludeServersError " ) . error ( e , true ) ; <nl> + wait ( tr . onError ( e ) ) ; <nl> + } <nl> } <nl> } <nl> } <nl> ACTOR Future < Void > setClass ( Database cx , AddressExclusion server , ProcessClass <nl> } <nl> } <nl> <nl> - ACTOR static Future < vector < AddressExclusion > > getExcludedServers ( Transaction * tr ) { <nl> + ACTOR Future < vector < AddressExclusion > > getExcludedServers ( Transaction * tr ) { <nl> state Standalone < RangeResultRef > r = wait ( tr - > getRange ( excludedServersKeys , CLIENT_KNOBS - > TOO_MANY ) ) ; <nl> ASSERT ( ! r . more & & r . size ( ) < CLIENT_KNOBS - > TOO_MANY ) ; <nl> state Standalone < RangeResultRef > r2 = wait ( tr - > getRange ( failedServersKeys , CLIENT_KNOBS - > TOO_MANY ) ) ; <nl> ACTOR Future < int > setDDMode ( Database cx , int mode ) { <nl> } <nl> } <nl> <nl> + ACTOR Future < bool > checkForExcludingServersTxActor ( ReadYourWritesTransaction * tr , <nl> + std : : set < AddressExclusion > * exclusions , <nl> + std : : set < NetworkAddress > * inProgressExclusion ) { <nl> + / / TODO : replace using ExclusionInProgressRangeImpl in special key space <nl> + ASSERT ( inProgressExclusion - > size ( ) = = 0 ) ; / / Make sure every time it is cleared beforehand <nl> + if ( ! exclusions - > size ( ) ) return true ; <nl> + <nl> + tr - > setOption ( FDBTransactionOptions : : READ_SYSTEM_KEYS ) ; <nl> + tr - > setOption ( FDBTransactionOptions : : PRIORITY_SYSTEM_IMMEDIATE ) ; / / necessary ? <nl> + tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> + <nl> + / / Just getting a consistent read version proves that a set of tlogs satisfying the exclusions has completed recovery <nl> + <nl> + / / Check that there aren ' t any storage servers with addresses violating the exclusions <nl> + Standalone < RangeResultRef > serverList = wait ( tr - > getRange ( serverListKeys , CLIENT_KNOBS - > TOO_MANY ) ) ; <nl> + ASSERT ( ! serverList . more & & serverList . size ( ) < CLIENT_KNOBS - > TOO_MANY ) ; <nl> + <nl> + state bool ok = true ; <nl> + for ( auto & s : serverList ) { <nl> + auto addresses = decodeServerListValue ( s . value ) . getKeyValues . getEndpoint ( ) . addresses ; <nl> + if ( addressExcluded ( * exclusions , addresses . address ) ) { <nl> + ok = false ; <nl> + inProgressExclusion - > insert ( addresses . address ) ; <nl> + } <nl> + if ( addresses . secondaryAddress . present ( ) & & addressExcluded ( * exclusions , addresses . secondaryAddress . get ( ) ) ) { <nl> + ok = false ; <nl> + inProgressExclusion - > insert ( addresses . secondaryAddress . get ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + if ( ok ) { <nl> + Optional < Standalone < StringRef > > value = wait ( tr - > get ( logsKey ) ) ; <nl> + ASSERT ( value . present ( ) ) ; <nl> + auto logs = decodeLogsValue ( value . get ( ) ) ; <nl> + for ( auto const & log : logs . first ) { <nl> + if ( log . second = = NetworkAddress ( ) | | addressExcluded ( * exclusions , log . second ) ) { <nl> + ok = false ; <nl> + inProgressExclusion - > insert ( log . second ) ; <nl> + } <nl> + } <nl> + for ( auto const & log : logs . second ) { <nl> + if ( log . second = = NetworkAddress ( ) | | addressExcluded ( * exclusions , log . second ) ) { <nl> + ok = false ; <nl> + inProgressExclusion - > insert ( log . second ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + return ok ; <nl> + } <nl> + <nl> ACTOR Future < std : : set < NetworkAddress > > checkForExcludingServers ( Database cx , vector < AddressExclusion > excl , <nl> bool waitForAllExcluded ) { <nl> state std : : set < AddressExclusion > exclusions ( excl . begin ( ) , excl . end ( ) ) ; <nl> state std : : set < NetworkAddress > inProgressExclusion ; <nl> <nl> - if ( ! excl . size ( ) ) return inProgressExclusion ; <nl> - <nl> loop { <nl> - state Transaction tr ( cx ) ; <nl> - <nl> + state ReadYourWritesTransaction tr ( cx ) ; <nl> + inProgressExclusion . clear ( ) ; <nl> try { <nl> - tr . setOption ( FDBTransactionOptions : : READ_SYSTEM_KEYS ) ; <nl> - tr . setOption ( FDBTransactionOptions : : PRIORITY_SYSTEM_IMMEDIATE ) ; / / necessary ? <nl> - tr . setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> - <nl> - / / Just getting a consistent read version proves that a set of tlogs satisfying the exclusions has completed recovery <nl> - <nl> - / / Check that there aren ' t any storage servers with addresses violating the exclusions <nl> - Standalone < RangeResultRef > serverList = wait ( tr . getRange ( serverListKeys , CLIENT_KNOBS - > TOO_MANY ) ) ; <nl> - ASSERT ( ! serverList . more & & serverList . size ( ) < CLIENT_KNOBS - > TOO_MANY ) ; <nl> - <nl> - state bool ok = true ; <nl> - inProgressExclusion . clear ( ) ; <nl> - for ( auto & s : serverList ) { <nl> - auto addresses = decodeServerListValue ( s . value ) . getKeyValues . getEndpoint ( ) . addresses ; <nl> - if ( addressExcluded ( exclusions , addresses . address ) ) { <nl> - ok = false ; <nl> - inProgressExclusion . insert ( addresses . address ) ; <nl> - } <nl> - if ( addresses . secondaryAddress . present ( ) & & addressExcluded ( exclusions , addresses . secondaryAddress . get ( ) ) ) { <nl> - ok = false ; <nl> - inProgressExclusion . insert ( addresses . secondaryAddress . get ( ) ) ; <nl> - } <nl> - } <nl> - <nl> - if ( ok ) { <nl> - Optional < Standalone < StringRef > > value = wait ( tr . get ( logsKey ) ) ; <nl> - ASSERT ( value . present ( ) ) ; <nl> - auto logs = decodeLogsValue ( value . get ( ) ) ; <nl> - for ( auto const & log : logs . first ) { <nl> - if ( log . second = = NetworkAddress ( ) | | addressExcluded ( exclusions , log . second ) ) { <nl> - ok = false ; <nl> - inProgressExclusion . insert ( log . second ) ; <nl> - } <nl> - } <nl> - for ( auto const & log : logs . second ) { <nl> - if ( log . second = = NetworkAddress ( ) | | addressExcluded ( exclusions , log . second ) ) { <nl> - ok = false ; <nl> - inProgressExclusion . insert ( log . second ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> + bool ok = wait ( checkForExcludingServersTxActor ( & tr , & exclusions , & inProgressExclusion ) ) ; <nl> if ( ok ) return inProgressExclusion ; <nl> if ( ! waitForAllExcluded ) break ; <nl> <nl> ACTOR Future < std : : set < NetworkAddress > > checkForExcludingServers ( Database cx , vec <nl> wait ( tr . onError ( e ) ) ; <nl> } <nl> } <nl> - <nl> return inProgressExclusion ; <nl> } <nl> <nl> mmm a / fdbclient / ManagementAPI . actor . h <nl> ppp b / fdbclient / ManagementAPI . actor . h <nl> Reference < IQuorumChange > nameQuorumChange ( std : : string const & name , Reference < IQu <nl> / / Exclude the given set of servers from use as state servers . Returns as soon as the change is durable , without necessarily waiting for <nl> / / the servers to be evacuated . A NetworkAddress with a port of 0 means all servers on the given IP . <nl> ACTOR Future < Void > excludeServers ( Database cx , vector < AddressExclusion > servers , bool failed = false ) ; <nl> + void excludeServers ( Transaction & tr , vector < AddressExclusion > & servers , bool failed = false ) ; <nl> <nl> / / Remove the given servers from the exclusion list . A NetworkAddress with a port of 0 means all servers on the given IP . A NetworkAddress ( ) means <nl> / / all servers ( don ' t exclude anything ) <nl> ACTOR Future < Void > setClass ( Database cx , AddressExclusion server , ProcessClas <nl> <nl> / / Get the current list of excluded servers <nl> ACTOR Future < vector < AddressExclusion > > getExcludedServers ( Database cx ) ; <nl> + ACTOR Future < vector < AddressExclusion > > getExcludedServers ( Transaction * tr ) ; <nl> <nl> / / Check for the given , previously excluded servers to be evacuated ( no longer used for state ) . If waitForExclusion is <nl> / / true , this actor returns once it is safe to shut down all such machines without impacting fault tolerance , until and <nl> / / unless any of them are explicitly included with includeServers ( ) <nl> ACTOR Future < std : : set < NetworkAddress > > checkForExcludingServers ( Database cx , vector < AddressExclusion > servers , <nl> bool waitForAllExcluded ) ; <nl> + ACTOR Future < bool > checkForExcludingServersTxActor ( ReadYourWritesTransaction * tr , <nl> + std : : set < AddressExclusion > * exclusions , <nl> + std : : set < NetworkAddress > * inProgressExclusion ) ; <nl> <nl> / / Gets a list of all workers in the cluster ( excluding testers ) <nl> ACTOR Future < vector < ProcessData > > getWorkers ( Database cx ) ; <nl> mmm a / fdbclient / MasterProxyInterface . h <nl> ppp b / fdbclient / MasterProxyInterface . h <nl> struct MasterProxyInterface { <nl> <nl> RequestStream < ReplyPromise < Void > > waitFailure ; <nl> <nl> - RequestStream < struct GetRawCommittedVersionRequest > getRawCommittedVersion ; <nl> RequestStream < struct TxnStateRequest > txnState ; <nl> RequestStream < struct GetHealthMetricsRequest > getHealthMetrics ; <nl> RequestStream < struct ProxySnapRequest > proxySnapReq ; <nl> struct MasterProxyInterface { <nl> getKeyServersLocations = RequestStream < struct GetKeyServerLocationsRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 2 ) ) ; <nl> getStorageServerRejoinInfo = RequestStream < struct GetStorageServerRejoinInfoRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 3 ) ) ; <nl> waitFailure = RequestStream < ReplyPromise < Void > > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 4 ) ) ; <nl> - getRawCommittedVersion = RequestStream < struct GetRawCommittedVersionRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 5 ) ) ; <nl> - txnState = RequestStream < struct TxnStateRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 6 ) ) ; <nl> - getHealthMetrics = RequestStream < struct GetHealthMetricsRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 7 ) ) ; <nl> - proxySnapReq = RequestStream < struct ProxySnapRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 8 ) ) ; <nl> - exclusionSafetyCheckReq = RequestStream < struct ExclusionSafetyCheckRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 9 ) ) ; <nl> - getDDMetrics = RequestStream < struct GetDDMetricsRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 10 ) ) ; <nl> + txnState = RequestStream < struct TxnStateRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 5 ) ) ; <nl> + getHealthMetrics = RequestStream < struct GetHealthMetricsRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 6 ) ) ; <nl> + proxySnapReq = RequestStream < struct ProxySnapRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 7 ) ) ; <nl> + exclusionSafetyCheckReq = RequestStream < struct ExclusionSafetyCheckRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 8 ) ) ; <nl> + getDDMetrics = RequestStream < struct GetDDMetricsRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 9 ) ) ; <nl> } <nl> } <nl> <nl> struct MasterProxyInterface { <nl> streams . push_back ( getKeyServersLocations . getReceiver ( TaskPriority : : ReadSocket ) ) ; / / priority lowered to TaskPriority : : DefaultEndpoint on the proxy <nl> streams . push_back ( getStorageServerRejoinInfo . getReceiver ( TaskPriority : : ProxyStorageRejoin ) ) ; <nl> streams . push_back ( waitFailure . getReceiver ( ) ) ; <nl> - streams . push_back ( getRawCommittedVersion . getReceiver ( TaskPriority : : ProxyGetRawCommittedVersion ) ) ; <nl> streams . push_back ( txnState . getReceiver ( ) ) ; <nl> streams . push_back ( getHealthMetrics . getReceiver ( ) ) ; <nl> streams . push_back ( proxySnapReq . getReceiver ( ) ) ; <nl> struct ProxySnapRequest <nl> { <nl> constexpr static FileIdentifier file_identifier = 5427684 ; <nl> Arena arena ; <nl> - StringRef snapPayload ; <nl> + StringRef snapPayload ; / / command used to snapshot the data folder <nl> UID snapUID ; <nl> ReplyPromise < Void > reply ; <nl> Optional < UID > debugID ; <nl> mmm a / fdbclient / MultiVersionTransaction . actor . cpp <nl> ppp b / fdbclient / MultiVersionTransaction . actor . cpp <nl> void MultiVersionDatabase : : Connector : : connect ( ) { <nl> } <nl> <nl> tr = candidateDatabase - > createTransaction ( ) ; <nl> - return ErrorOr < ThreadFuture < Void > > ( mapThreadFuture < Version , Void > ( tr - > getReadVersion ( ) , [ this ] ( ErrorOr < Version > v ) { <nl> - / / If the version attempt returns an error , we regard that as a connection ( except operation_cancelled ) <nl> - if ( v . isError ( ) & & v . getError ( ) . code ( ) = = error_code_operation_cancelled ) { <nl> - return ErrorOr < Void > ( v . getError ( ) ) ; <nl> - } <nl> - else { <nl> - return ErrorOr < Void > ( Void ( ) ) ; <nl> - } <nl> - } ) ) ; <nl> + return ErrorOr < ThreadFuture < Void > > ( <nl> + mapThreadFuture < Version , Void > ( tr - > getReadVersion ( ) , [ ] ( ErrorOr < Version > v ) { <nl> + / / If the version attempt returns an error , we regard that as a connection ( except <nl> + / / operation_cancelled ) <nl> + if ( v . isError ( ) & & v . getError ( ) . code ( ) = = error_code_operation_cancelled ) { <nl> + return ErrorOr < Void > ( v . getError ( ) ) ; <nl> + } else { <nl> + return ErrorOr < Void > ( Void ( ) ) ; <nl> + } <nl> + } ) ) ; <nl> } ) ; <nl> <nl> <nl> void MultiVersionApi : : setSupportedClientVersions ( Standalone < StringRef > versions ) <nl> } , NULL ) ; <nl> <nl> if ( ! bypassMultiClientApi ) { <nl> - runOnExternalClients ( [ this , versions ] ( Reference < ClientInfo > client ) { <nl> + runOnExternalClients ( [ versions ] ( Reference < ClientInfo > client ) { <nl> client - > api - > setNetworkOption ( FDBNetworkOptions : : SUPPORTED_CLIENT_VERSIONS , versions ) ; <nl> } ) ; <nl> } <nl> void MultiVersionApi : : setNetworkOptionInternal ( FDBNetworkOptions : : Option option , <nl> <nl> if ( ! bypassMultiClientApi ) { <nl> if ( networkSetup ) { <nl> - runOnExternalClients ( [ this , option , value ] ( Reference < ClientInfo > client ) { <nl> - client - > api - > setNetworkOption ( option , value ) ; <nl> - } ) ; <nl> + runOnExternalClients ( <nl> + [ option , value ] ( Reference < ClientInfo > client ) { client - > api - > setNetworkOption ( option , value ) ; } ) ; <nl> } <nl> else { <nl> options . push_back ( std : : make_pair ( option , value . castTo < Standalone < StringRef > > ( ) ) ) ; <nl> mmm a / fdbclient / NativeAPI . actor . cpp <nl> ppp b / fdbclient / NativeAPI . actor . cpp <nl> Future < HealthMetrics > DatabaseContext : : getHealthMetrics ( bool detailed = false ) { <nl> return getHealthMetricsActor ( this , detailed ) ; <nl> } <nl> <nl> - void DatabaseContext : : registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE module , std : : unique_ptr < SpecialKeyRangeBaseImpl > impl ) { <nl> - specialKeySpace - > registerKeyRange ( module , impl - > getKeyRange ( ) , impl . get ( ) ) ; <nl> + void DatabaseContext : : registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE module , SpecialKeySpace : : IMPLTYPE type , <nl> + std : : unique_ptr < SpecialKeyRangeReadImpl > impl ) { <nl> + specialKeySpace - > registerKeyRange ( module , type , impl - > getKeyRange ( ) , impl . get ( ) ) ; <nl> specialKeySpaceModules . push_back ( std : : move ( impl ) ) ; <nl> } <nl> <nl> ACTOR Future < Standalone < RangeResultRef > > getWorkerInterfaces ( Reference < ClusterConnectionFile > clusterFile ) ; <nl> ACTOR Future < Optional < Value > > getJSON ( Database db ) ; <nl> <nl> - struct WorkerInterfacesSpecialKeyImpl : SpecialKeyRangeBaseImpl { <nl> + struct WorkerInterfacesSpecialKeyImpl : SpecialKeyRangeReadImpl { <nl> Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const override { <nl> if ( ryw - > getDatabase ( ) . getPtr ( ) & & ryw - > getDatabase ( ) - > getConnectionFile ( ) ) { <nl> Key prefix = Key ( getKeyRange ( ) . begin ) ; <nl> struct WorkerInterfacesSpecialKeyImpl : SpecialKeyRangeBaseImpl { <nl> } <nl> } <nl> <nl> - explicit WorkerInterfacesSpecialKeyImpl ( KeyRangeRef kr ) : SpecialKeyRangeBaseImpl ( kr ) { } <nl> + explicit WorkerInterfacesSpecialKeyImpl ( KeyRangeRef kr ) : SpecialKeyRangeReadImpl ( kr ) { } <nl> } ; <nl> <nl> - struct SingleSpecialKeyImpl : SpecialKeyRangeBaseImpl { <nl> + struct SingleSpecialKeyImpl : SpecialKeyRangeReadImpl { <nl> Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const override { <nl> ASSERT ( kr . contains ( k ) ) ; <nl> return map ( f ( ryw ) , [ k = k ] ( Optional < Value > v ) { <nl> struct SingleSpecialKeyImpl : SpecialKeyRangeBaseImpl { <nl> } <nl> <nl> SingleSpecialKeyImpl ( KeyRef k , const std : : function < Future < Optional < Value > > ( ReadYourWritesTransaction * ) > & f ) <nl> - : SpecialKeyRangeBaseImpl ( singleKeyRange ( k ) ) , k ( k ) , f ( f ) { } <nl> + : SpecialKeyRangeReadImpl ( singleKeyRange ( k ) ) , k ( k ) , f ( f ) { } <nl> <nl> private : <nl> Key k ; <nl> DatabaseContext : : DatabaseContext ( Reference < AsyncVar < Reference < ClusterConnectionF <nl> monitorProxiesInfoChange = monitorProxiesChange ( clientInfo , & proxiesChangeTrigger ) ; <nl> clientStatusUpdater . actor = clientStatusUpdateActor ( this ) ; <nl> cacheListMonitor = monitorCacheList ( this ) ; <nl> + if ( apiVersionAtLeast ( 700 ) ) { <nl> + registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : ERRORMSG , SpecialKeySpace : : IMPLTYPE : : READONLY , <nl> + std : : make_unique < SingleSpecialKeyImpl > ( <nl> + SpecialKeySpace : : getModuleRange ( SpecialKeySpace : : MODULE : : ERRORMSG ) . begin , <nl> + [ ] ( ReadYourWritesTransaction * ryw ) - > Future < Optional < Value > > { <nl> + if ( ryw - > getSpecialKeySpaceErrorMsg ( ) . present ( ) ) <nl> + return Optional < Value > ( ryw - > getSpecialKeySpaceErrorMsg ( ) . get ( ) ) ; <nl> + else <nl> + return Optional < Value > ( ) ; <nl> + } ) ) ; <nl> + registerSpecialKeySpaceModule ( <nl> + SpecialKeySpace : : MODULE : : MANAGEMENT , SpecialKeySpace : : IMPLTYPE : : READWRITE , <nl> + std : : make_unique < ManagementCommandsOptionsImpl > ( <nl> + KeyRangeRef ( LiteralStringRef ( " options / " ) , LiteralStringRef ( " options0 " ) ) <nl> + . withPrefix ( SpecialKeySpace : : getModuleRange ( SpecialKeySpace : : MODULE : : MANAGEMENT ) . begin ) ) ) ; <nl> + registerSpecialKeySpaceModule ( <nl> + SpecialKeySpace : : MODULE : : MANAGEMENT , SpecialKeySpace : : IMPLTYPE : : READWRITE , <nl> + std : : make_unique < ExcludeServersRangeImpl > ( SpecialKeySpace : : getManamentApiCommandRange ( " exclude " ) ) ) ; <nl> + registerSpecialKeySpaceModule ( <nl> + SpecialKeySpace : : MODULE : : MANAGEMENT , SpecialKeySpace : : IMPLTYPE : : READWRITE , <nl> + std : : make_unique < FailedServersRangeImpl > ( SpecialKeySpace : : getManamentApiCommandRange ( " failed " ) ) ) ; <nl> + registerSpecialKeySpaceModule ( <nl> + SpecialKeySpace : : MODULE : : MANAGEMENT , SpecialKeySpace : : IMPLTYPE : : READONLY , <nl> + std : : make_unique < ExclusionInProgressRangeImpl > ( <nl> + KeyRangeRef ( LiteralStringRef ( " inProgressExclusion / " ) , LiteralStringRef ( " inProgressExclusion0 " ) ) <nl> + . withPrefix ( SpecialKeySpace : : getModuleRange ( SpecialKeySpace : : MODULE : : MANAGEMENT ) . begin ) ) ) ; <nl> + } <nl> if ( apiVersionAtLeast ( 630 ) ) { <nl> - registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : TRANSACTION , std : : make_unique < ConflictingKeysImpl > ( conflictingKeysRange ) ) ; <nl> - registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : TRANSACTION , std : : make_unique < ReadConflictRangeImpl > ( readConflictRangeKeysRange ) ) ; <nl> - registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : TRANSACTION , std : : make_unique < WriteConflictRangeImpl > ( writeConflictRangeKeysRange ) ) ; <nl> - registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : METRICS , <nl> + registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : TRANSACTION , SpecialKeySpace : : IMPLTYPE : : READONLY , <nl> + std : : make_unique < ConflictingKeysImpl > ( conflictingKeysRange ) ) ; <nl> + registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : TRANSACTION , SpecialKeySpace : : IMPLTYPE : : READONLY , <nl> + std : : make_unique < ReadConflictRangeImpl > ( readConflictRangeKeysRange ) ) ; <nl> + registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : TRANSACTION , SpecialKeySpace : : IMPLTYPE : : READONLY , <nl> + std : : make_unique < WriteConflictRangeImpl > ( writeConflictRangeKeysRange ) ) ; <nl> + registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : METRICS , SpecialKeySpace : : IMPLTYPE : : READONLY , <nl> std : : make_unique < DDStatsRangeImpl > ( ddStatsRange ) ) ; <nl> registerSpecialKeySpaceModule ( <nl> - SpecialKeySpace : : MODULE : : METRICS , <nl> + SpecialKeySpace : : MODULE : : METRICS , SpecialKeySpace : : IMPLTYPE : : READONLY , <nl> std : : make_unique < HealthMetricsRangeImpl > ( KeyRangeRef ( LiteralStringRef ( " \ xff \ xff / metrics / health / " ) , <nl> LiteralStringRef ( " \ xff \ xff / metrics / health0 " ) ) ) ) ; <nl> - registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : WORKERINTERFACE , std : : make_unique < WorkerInterfacesSpecialKeyImpl > ( KeyRangeRef ( <nl> - LiteralStringRef ( " \ xff \ xff / worker_interfaces / " ) , LiteralStringRef ( " \ xff \ xff / worker_interfaces0 " ) ) ) ) ; <nl> registerSpecialKeySpaceModule ( <nl> - SpecialKeySpace : : MODULE : : STATUSJSON , <nl> + SpecialKeySpace : : MODULE : : WORKERINTERFACE , SpecialKeySpace : : IMPLTYPE : : READONLY , <nl> + std : : make_unique < WorkerInterfacesSpecialKeyImpl > ( KeyRangeRef ( <nl> + LiteralStringRef ( " \ xff \ xff / worker_interfaces / " ) , LiteralStringRef ( " \ xff \ xff / worker_interfaces0 " ) ) ) ) ; <nl> + registerSpecialKeySpaceModule ( <nl> + SpecialKeySpace : : MODULE : : STATUSJSON , SpecialKeySpace : : IMPLTYPE : : READONLY , <nl> std : : make_unique < SingleSpecialKeyImpl > ( LiteralStringRef ( " \ xff \ xff / status / json " ) , <nl> [ ] ( ReadYourWritesTransaction * ryw ) - > Future < Optional < Value > > { <nl> if ( ryw - > getDatabase ( ) . getPtr ( ) & & <nl> DatabaseContext : : DatabaseContext ( Reference < AsyncVar < Reference < ClusterConnectionF <nl> } <nl> } ) ) ; <nl> registerSpecialKeySpaceModule ( <nl> - SpecialKeySpace : : MODULE : : CLUSTERFILEPATH , <nl> + SpecialKeySpace : : MODULE : : CLUSTERFILEPATH , SpecialKeySpace : : IMPLTYPE : : READONLY , <nl> std : : make_unique < SingleSpecialKeyImpl > ( <nl> LiteralStringRef ( " \ xff \ xff / cluster_file_path " ) , <nl> [ ] ( ReadYourWritesTransaction * ryw ) - > Future < Optional < Value > > { <nl> DatabaseContext : : DatabaseContext ( Reference < AsyncVar < Reference < ClusterConnectionF <nl> } ) ) ; <nl> <nl> registerSpecialKeySpaceModule ( <nl> - SpecialKeySpace : : MODULE : : CONNECTIONSTRING , <nl> + SpecialKeySpace : : MODULE : : CONNECTIONSTRING , SpecialKeySpace : : IMPLTYPE : : READONLY , <nl> std : : make_unique < SingleSpecialKeyImpl > ( <nl> LiteralStringRef ( " \ xff \ xff / connection_string " ) , <nl> [ ] ( ReadYourWritesTransaction * ryw ) - > Future < Optional < Value > > { <nl> void Transaction : : set ( const KeyRef & key , const ValueRef & value , bool addConflic <nl> auto & t = req . transaction ; <nl> auto r = singleKeyRange ( key , req . arena ) ; <nl> auto v = ValueRef ( req . arena , value ) ; <nl> - t . mutations . push_back ( req . arena , MutationRef ( MutationRef : : SetValue , r . begin , v ) ) ; <nl> + t . mutations . emplace_back ( req . arena , MutationRef : : SetValue , r . begin , v ) ; <nl> <nl> if ( addConflictRange ) { <nl> t . write_conflict_ranges . push_back ( req . arena , r ) ; <nl> void Transaction : : atomicOp ( const KeyRef & key , const ValueRef & operand , MutationR <nl> auto r = singleKeyRange ( key , req . arena ) ; <nl> auto v = ValueRef ( req . arena , operand ) ; <nl> <nl> - t . mutations . push_back ( req . arena , MutationRef ( operationType , r . begin , v ) ) ; <nl> + t . mutations . emplace_back ( req . arena , operationType , r . begin , v ) ; <nl> <nl> if ( addConflictRange & & operationType ! = MutationRef : : SetVersionstampedKey ) <nl> t . write_conflict_ranges . push_back ( req . arena , r ) ; <nl> void Transaction : : clear ( const KeyRangeRef & range , bool addConflictRange ) { <nl> auto r = KeyRangeRef ( req . arena , KeyRangeRef ( begin , end ) ) ; <nl> if ( r . empty ( ) ) return ; <nl> <nl> - t . mutations . push_back ( req . arena , MutationRef ( MutationRef : : ClearRange , r . begin , r . end ) ) ; <nl> + t . mutations . emplace_back ( req . arena , MutationRef : : ClearRange , r . begin , r . end ) ; <nl> <nl> if ( addConflictRange ) <nl> t . write_conflict_ranges . push_back ( req . arena , r ) ; <nl> void Transaction : : clear ( const KeyRef & key , bool addConflictRange ) { <nl> uint8_t * data = new ( req . arena ) uint8_t [ key . size ( ) + 1 ] ; <nl> memcpy ( data , key . begin ( ) , key . size ( ) ) ; <nl> data [ key . size ( ) ] = 0 ; <nl> - t . mutations . push_back ( req . arena , MutationRef ( MutationRef : : ClearRange , KeyRef ( data , key . size ( ) ) , KeyRef ( data , key . size ( ) + 1 ) ) ) ; <nl> + t . mutations . emplace_back ( req . arena , MutationRef : : ClearRange , KeyRef ( data , key . size ( ) ) , <nl> + KeyRef ( data , key . size ( ) + 1 ) ) ; <nl> <nl> if ( addConflictRange ) <nl> - t . write_conflict_ranges . push_back ( req . arena , KeyRangeRef ( KeyRef ( data , key . size ( ) ) , KeyRef ( data , key . size ( ) + 1 ) ) ) ; <nl> + t . write_conflict_ranges . emplace_back ( req . arena , KeyRef ( data , key . size ( ) ) , KeyRef ( data , key . size ( ) + 1 ) ) ; <nl> } <nl> void Transaction : : addWriteConflictRange ( const KeyRangeRef & keys ) { <nl> ASSERT ( ! keys . empty ( ) ) ; <nl> Future < Void > Transaction : : commitMutations ( ) { <nl> bool isCheckingWrites = options . checkWritesEnabled & & deterministicRandom ( ) - > random01 ( ) < 0 . 01 ; <nl> for ( int i = 0 ; i < extraConflictRanges . size ( ) ; i + + ) <nl> if ( extraConflictRanges [ i ] . isReady ( ) & & extraConflictRanges [ i ] . get ( ) . first < extraConflictRanges [ i ] . get ( ) . second ) <nl> - tr . transaction . read_conflict_ranges . push_back ( tr . arena , KeyRangeRef ( extraConflictRanges [ i ] . get ( ) . first , extraConflictRanges [ i ] . get ( ) . second ) ) ; <nl> + tr . transaction . read_conflict_ranges . emplace_back ( tr . arena , extraConflictRanges [ i ] . get ( ) . first , <nl> + extraConflictRanges [ i ] . get ( ) . second ) ; <nl> <nl> if ( ! options . causalWriteRisky & & ! intersects ( tr . transaction . write_conflict_ranges , tr . transaction . read_conflict_ranges ) . present ( ) ) <nl> makeSelfConflicting ( ) ; <nl> ACTOR Future < Void > readVersionBatcher ( DatabaseContext * cx , FutureStream < Databas <nl> } <nl> } <nl> <nl> - ACTOR Future < Version > extractReadVersion ( SpanID parentSpan , DatabaseContext * cx , TransactionPriority priority , <nl> + ACTOR Future < Version > extractReadVersion ( Location location , SpanID spanContext , SpanID parent , DatabaseContext * cx , TransactionPriority priority , <nl> Reference < TransactionLogInfo > trLogInfo , Future < GetReadVersionReply > f , <nl> bool lockAware , double startTime , Promise < Optional < Value > > metadataVersion , <nl> TagSet tags ) { <nl> - / / parentSpan here is only used to keep the parent alive until the request completes <nl> + state Span span ( spanContext , location , { parent } ) ; <nl> GetReadVersionReply rep = wait ( f ) ; <nl> double latency = now ( ) - startTime ; <nl> cx - > GRVLatencies . addSample ( latency ) ; <nl> Future < Version > Transaction : : getReadVersion ( uint32_t flags ) { <nl> batcher . actor = readVersionBatcher ( cx . getPtr ( ) , batcher . stream . getFuture ( ) , options . priority , flags ) ; <nl> } <nl> <nl> - Span span ( " NAPI : getReadVersion " _loc , info . spanID ) ; <nl> - auto const req = DatabaseContext : : VersionRequest ( span . context , options . tags , info . debugID ) ; <nl> + Location location = " NAPI : getReadVersion " _loc ; <nl> + UID spanContext = deterministicRandom ( ) - > randomUniqueID ( ) ; <nl> + auto const req = DatabaseContext : : VersionRequest ( spanContext , options . tags , info . debugID ) ; <nl> batcher . stream . send ( req ) ; <nl> startTime = now ( ) ; <nl> - readVersion = extractReadVersion ( span . context , cx . getPtr ( ) , options . priority , trLogInfo , req . reply . getFuture ( ) , <nl> - options . lockAware , startTime , metadataVersion , options . tags ) ; <nl> + readVersion = extractReadVersion ( location , spanContext , info . spanID , cx . getPtr ( ) , options . priority , trLogInfo , <nl> + req . reply . getFuture ( ) , options . lockAware , startTime , metadataVersion , options . tags ) ; <nl> } <nl> return readVersion ; <nl> } <nl> mmm a / fdbclient / ReadYourWrites . actor . cpp <nl> ppp b / fdbclient / ReadYourWrites . actor . cpp <nl> class RYWImpl { <nl> try { <nl> ryw - > commitStarted = true ; <nl> <nl> + if ( ryw - > options . specialKeySpaceChangeConfiguration ) <nl> + wait ( ryw - > getDatabase ( ) - > specialKeySpace - > commit ( ryw ) ) ; <nl> + <nl> Future < Void > ready = ryw - > reading ; <nl> wait ( ryw - > resetPromise . getFuture ( ) | | ready ) ; <nl> <nl> class RYWImpl { <nl> <nl> ReadYourWritesTransaction : : ReadYourWritesTransaction ( Database const & cx ) <nl> : cache ( & arena ) , writes ( & arena ) , tr ( cx ) , retries ( 0 ) , approximateSize ( 0 ) , creationTime ( now ( ) ) , commitStarted ( false ) , <nl> - options ( tr ) , deferredError ( cx - > deferredError ) , versionStampFuture ( tr . getVersionstamp ( ) ) { <nl> + options ( tr ) , deferredError ( cx - > deferredError ) , versionStampFuture ( tr . getVersionstamp ( ) ) , <nl> + specialKeySpaceWriteMap ( std : : make_pair ( false , Optional < Value > ( ) ) , specialKeys . end ) { <nl> std : : copy ( cx . getTransactionDefaults ( ) . begin ( ) , cx . getTransactionDefaults ( ) . end ( ) , <nl> std : : back_inserter ( persistentOptions ) ) ; <nl> applyPersistentOptions ( ) ; <nl> Future < Standalone < VectorRef < const char * > > > ReadYourWritesTransaction : : getAddre <nl> return result ; <nl> } <nl> <nl> - Future < int64_t > ReadYourWritesTransaction : : getEstimatedRangeSizeBytes ( const KeyRangeRef & keys ) { <nl> + Future < int64_t > ReadYourWritesTransaction : : getEstimatedRangeSizeBytes ( const KeyRange & keys ) { <nl> if ( checkUsedDuringCommit ( ) ) { <nl> throw used_during_commit ( ) ; <nl> } <nl> void ReadYourWritesTransaction : : atomicOp ( const KeyRef & key , const ValueRef & ope <nl> } <nl> <nl> void ReadYourWritesTransaction : : set ( const KeyRef & key , const ValueRef & value ) { <nl> - if ( key = = LiteralStringRef ( " \ xff \ xff / reboot_worker " ) ) { <nl> - BinaryReader : : fromStringRef < ClientWorkerInterface > ( value , IncludeVersion ( ) ) . reboot . send ( RebootRequest ( ) ) ; <nl> - return ; <nl> - } <nl> - if ( key = = LiteralStringRef ( " \ xff \ xff / reboot_and_check_worker " ) ) { <nl> - BinaryReader : : fromStringRef < ClientWorkerInterface > ( value , IncludeVersion ( ) ) . reboot . send ( RebootRequest ( false , true ) ) ; <nl> - return ; <nl> - } <nl> if ( key = = metadataVersionKey ) { <nl> throw client_invalid_operation ( ) ; <nl> } <nl> <nl> + if ( specialKeys . contains ( key ) ) { <nl> + if ( getDatabase ( ) - > apiVersionAtLeast ( 700 ) ) { <nl> + return getDatabase ( ) - > specialKeySpace - > set ( this , key , value ) ; <nl> + } else { <nl> + / / These three special keys are deprecated in 7 . 0 and an alternative C API is added <nl> + / / TODO : Rewrite related code using C api <nl> + if ( key = = LiteralStringRef ( " \ xff \ xff / reboot_worker " ) ) { <nl> + BinaryReader : : fromStringRef < ClientWorkerInterface > ( value , IncludeVersion ( ) ) <nl> + . reboot . send ( RebootRequest ( ) ) ; <nl> + return ; <nl> + } <nl> + if ( key = = LiteralStringRef ( " \ xff \ xff / suspend_worker " ) ) { <nl> + BinaryReader : : fromStringRef < ClientWorkerInterface > ( value , IncludeVersion ( ) ) <nl> + . reboot . send ( RebootRequest ( false , false , options . timeoutInSeconds ) ) ; <nl> + return ; <nl> + } <nl> + if ( key = = LiteralStringRef ( " \ xff \ xff / reboot_and_check_worker " ) ) { <nl> + BinaryReader : : fromStringRef < ClientWorkerInterface > ( value , IncludeVersion ( ) ) <nl> + . reboot . send ( RebootRequest ( false , true ) ) ; <nl> + return ; <nl> + } <nl> + } <nl> + } <nl> + <nl> bool addWriteConflict = ! options . getAndResetWriteConflictDisabled ( ) ; <nl> <nl> if ( checkUsedDuringCommit ( ) ) { <nl> void ReadYourWritesTransaction : : clear ( const KeyRangeRef & range ) { <nl> throw used_during_commit ( ) ; <nl> } <nl> <nl> + if ( specialKeys . contains ( range ) ) { <nl> + if ( getDatabase ( ) - > apiVersionAtLeast ( 700 ) ) { <nl> + return getDatabase ( ) - > specialKeySpace - > clear ( this , range ) ; <nl> + } <nl> + } <nl> + <nl> KeyRef maxKey = getMaxWriteKey ( ) ; <nl> if ( range . begin > maxKey | | range . end > maxKey ) <nl> throw key_outside_legal_range ( ) ; <nl> void ReadYourWritesTransaction : : clear ( const KeyRef & key ) { <nl> throw used_during_commit ( ) ; <nl> } <nl> <nl> + if ( specialKeys . contains ( key ) ) { <nl> + if ( getDatabase ( ) - > apiVersionAtLeast ( 700 ) ) { <nl> + return getDatabase ( ) - > specialKeySpace - > clear ( this , key ) ; <nl> + } <nl> + } <nl> + <nl> if ( key > = getMaxWriteKey ( ) ) <nl> throw key_outside_legal_range ( ) ; <nl> <nl> void ReadYourWritesTransaction : : setOptionImpl ( FDBTransactionOptions : : Option opt <nl> case FDBTransactionOptions : : SPECIAL_KEY_SPACE_RELAXED : <nl> validateOptionValue ( value , false ) ; <nl> options . specialKeySpaceRelaxed = true ; <nl> + break ; <nl> + case FDBTransactionOptions : : SPECIAL_KEY_SPACE_ENABLE_WRITES : <nl> + validateOptionValue ( value , false ) ; <nl> + options . specialKeySpaceChangeConfiguration = true ; <nl> + / / By default , it allows to read system keys <nl> + / / More options will be implicitly enabled if needed when doing set or clear <nl> + options . readSystemKeys = true ; <nl> + break ; <nl> default : <nl> break ; <nl> } <nl> void ReadYourWritesTransaction : : operator = ( ReadYourWritesTransaction & & r ) noexcep <nl> nativeReadRanges = std : : move ( r . nativeReadRanges ) ; <nl> nativeWriteRanges = std : : move ( r . nativeWriteRanges ) ; <nl> versionStampKeys = std : : move ( r . versionStampKeys ) ; <nl> + specialKeySpaceWriteMap = std : : move ( r . specialKeySpaceWriteMap ) ; <nl> } <nl> <nl> ReadYourWritesTransaction : : ReadYourWritesTransaction ( ReadYourWritesTransaction & & r ) noexcept <nl> ReadYourWritesTransaction : : ReadYourWritesTransaction ( ReadYourWritesTransaction & & <nl> nativeReadRanges = std : : move ( r . nativeReadRanges ) ; <nl> nativeWriteRanges = std : : move ( r . nativeWriteRanges ) ; <nl> versionStampKeys = std : : move ( r . versionStampKeys ) ; <nl> + specialKeySpaceWriteMap = std : : move ( r . specialKeySpaceWriteMap ) ; <nl> } <nl> <nl> Future < Void > ReadYourWritesTransaction : : onError ( Error const & e ) { <nl> void ReadYourWritesTransaction : : resetRyow ( ) { <nl> versionStampKeys = VectorRef < KeyRef > ( ) ; <nl> nativeReadRanges = Standalone < VectorRef < KeyRangeRef > > ( ) ; <nl> nativeWriteRanges = Standalone < VectorRef < KeyRangeRef > > ( ) ; <nl> + specialKeySpaceWriteMap = <nl> + KeyRangeMap < std : : pair < bool , Optional < Value > > > ( std : : make_pair ( false , Optional < Value > ( ) ) , specialKeys . end ) ; <nl> + specialKeySpaceErrorMsg . reset ( ) ; <nl> watchMap . clear ( ) ; <nl> reading = AndFuture ( ) ; <nl> approximateSize = 0 ; <nl> mmm a / fdbclient / ReadYourWrites . h <nl> ppp b / fdbclient / ReadYourWrites . h <nl> struct ReadYourWritesTransactionOptions { <nl> bool debugRetryLogging : 1 ; <nl> bool disableUsedDuringCommitProtection : 1 ; <nl> bool specialKeySpaceRelaxed : 1 ; <nl> + bool specialKeySpaceChangeConfiguration : 1 ; <nl> double timeoutInSeconds ; <nl> int maxRetries ; <nl> int snapshotRywEnabled ; <nl> class ReadYourWritesTransaction : NonCopyable , public ReferenceCounted < ReadYourW <nl> } <nl> <nl> [ [ nodiscard ] ] Future < Standalone < VectorRef < const char * > > > getAddressesForKey ( const Key & key ) ; <nl> - Future < int64_t > getEstimatedRangeSizeBytes ( const KeyRangeRef & keys ) ; <nl> + Future < int64_t > getEstimatedRangeSizeBytes ( const KeyRange & keys ) ; <nl> <nl> void addReadConflictRange ( KeyRangeRef const & keys ) ; <nl> void makeSelfConflicting ( ) { tr . makeSelfConflicting ( ) ; } <nl> class ReadYourWritesTransaction : NonCopyable , public ReferenceCounted < ReadYourW <nl> Standalone < RangeResultRef > getWriteConflictRangeIntersecting ( KeyRangeRef kr ) ; <nl> <nl> bool specialKeySpaceRelaxed ( ) const { return options . specialKeySpaceRelaxed ; } <nl> + bool specialKeySpaceChangeConfiguration ( ) const { return options . specialKeySpaceChangeConfiguration ; } <nl> + <nl> + KeyRangeMap < std : : pair < bool , Optional < Value > > > & getSpecialKeySpaceWriteMap ( ) { return specialKeySpaceWriteMap ; } <nl> + bool readYourWritesDisabled ( ) const { return options . readYourWritesDisabled ; } <nl> + const Optional < std : : string > & getSpecialKeySpaceErrorMsg ( ) { return specialKeySpaceErrorMsg ; } <nl> + void setSpecialKeySpaceErrorMsg ( const std : : string & msg ) { specialKeySpaceErrorMsg = msg ; } <nl> + Transaction & getTransaction ( ) { return tr ; } <nl> <nl> private : <nl> friend class RYWImpl ; <nl> class ReadYourWritesTransaction : NonCopyable , public ReferenceCounted < ReadYourW <nl> <nl> Reference < TransactionDebugInfo > transactionDebugInfo ; <nl> <nl> + KeyRangeMap < std : : pair < bool , Optional < Value > > > specialKeySpaceWriteMap ; <nl> + Optional < std : : string > specialKeySpaceErrorMsg ; <nl> + <nl> void resetTimeout ( ) ; <nl> void updateConflictMap ( KeyRef const & key , WriteMap : : iterator & it ) ; / / pre : it . segmentContains ( key ) <nl> void updateConflictMap ( KeyRangeRef const & keys , WriteMap : : iterator & it ) ; / / pre : it . segmentContains ( keys . begin ) , keys are already inside this - > arena <nl> mmm a / fdbclient / RunTransaction . actor . h <nl> ppp b / fdbclient / RunTransaction . actor . h <nl> <nl> # elif ! defined ( FDBCLIENT_RUNTRANSACTION_ACTOR_H ) <nl> # define FDBCLIENT_RUNTRANSACTION_ACTOR_H <nl> <nl> + # include < utility > <nl> + <nl> # include " flow / flow . h " <nl> # include " fdbclient / ReadYourWrites . h " <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> - ACTOR template < class Function > <nl> - Future < decltype ( fake < Function > ( ) ( Reference < ReadYourWritesTransaction > ( ) ) . getValue ( ) ) > <nl> - runRYWTransaction ( Database cx , Function func ) { <nl> + ACTOR template < class Function > <nl> + Future < decltype ( std : : declval < Function > ( ) ( Reference < ReadYourWritesTransaction > ( ) ) . getValue ( ) ) > runRYWTransaction ( <nl> + Database cx , Function func ) { <nl> state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ; <nl> loop { <nl> try { <nl> / / func should be idempodent ; otherwise , retry will get undefined result <nl> - state decltype ( fake < Function > ( ) ( Reference < ReadYourWritesTransaction > ( ) ) . getValue ( ) ) result = wait ( func ( tr ) ) ; <nl> + state decltype ( std : : declval < Function > ( ) ( Reference < ReadYourWritesTransaction > ( ) ) . getValue ( ) ) result = <nl> + wait ( func ( tr ) ) ; <nl> wait ( tr - > commit ( ) ) ; <nl> return result ; <nl> } <nl> runRYWTransaction ( Database cx , Function func ) { <nl> } <nl> } <nl> <nl> - ACTOR template < class Function > <nl> - Future < decltype ( fake < Function > ( ) ( Reference < ReadYourWritesTransaction > ( ) ) . getValue ( ) ) > <nl> + ACTOR template < class Function > <nl> + Future < decltype ( std : : declval < Function > ( ) ( Reference < ReadYourWritesTransaction > ( ) ) . getValue ( ) ) > <nl> runRYWTransactionFailIfLocked ( Database cx , Function func ) { <nl> state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ; <nl> loop { <nl> try { <nl> - state decltype ( fake < Function > ( ) ( Reference < ReadYourWritesTransaction > ( ) ) . getValue ( ) ) result = wait ( func ( tr ) ) ; <nl> + state decltype ( std : : declval < Function > ( ) ( Reference < ReadYourWritesTransaction > ( ) ) . getValue ( ) ) result = <nl> + wait ( func ( tr ) ) ; <nl> wait ( tr - > commit ( ) ) ; <nl> return result ; <nl> } <nl> runRYWTransactionFailIfLocked ( Database cx , Function func ) { <nl> } <nl> } <nl> <nl> - ACTOR template < class Function > <nl> - Future < decltype ( fake < Function > ( ) ( Reference < ReadYourWritesTransaction > ( ) ) . getValue ( ) ) > <nl> - runRYWTransactionNoRetry ( Database cx , Function func ) { <nl> + ACTOR template < class Function > <nl> + Future < decltype ( std : : declval < Function > ( ) ( Reference < ReadYourWritesTransaction > ( ) ) . getValue ( ) ) > runRYWTransactionNoRetry ( <nl> + Database cx , Function func ) { <nl> state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ; <nl> - state decltype ( fake < Function > ( ) ( Reference < ReadYourWritesTransaction > ( ) ) . getValue ( ) ) result = wait ( func ( tr ) ) ; <nl> + state decltype ( std : : declval < Function > ( ) ( Reference < ReadYourWritesTransaction > ( ) ) . getValue ( ) ) result = wait ( func ( tr ) ) ; <nl> wait ( tr - > commit ( ) ) ; <nl> return result ; <nl> } <nl> mmm a / fdbclient / Schemas . cpp <nl> ppp b / fdbclient / Schemas . cpp <nl> const KeyRef JSONSchemas : : aggregateHealthSchema = LiteralStringRef ( R " " " ( <nl> " worst_log_queue " : 156 <nl> } <nl> ) " " " ) ; <nl> + <nl> + const KeyRef JSONSchemas : : managementApiErrorSchema = LiteralStringRef ( R " " " ( <nl> + { <nl> + " retriable " : false , <nl> + " command " : " exclude " , <nl> + " message " : " The reason of the error " <nl> + } <nl> + ) " " " ) ; <nl> mmm a / fdbclient / Schemas . h <nl> ppp b / fdbclient / Schemas . h <nl> struct JSONSchemas { <nl> static const KeyRef logHealthSchema ; <nl> static const KeyRef storageHealthSchema ; <nl> static const KeyRef aggregateHealthSchema ; <nl> + static const KeyRef managementApiErrorSchema ; <nl> } ; <nl> <nl> # endif / * FDBCLIENT_SCHEMAS_H * / <nl> mmm a / fdbclient / SpecialKeySpace . actor . cpp <nl> ppp b / fdbclient / SpecialKeySpace . actor . cpp <nl> <nl> <nl> # include " fdbclient / SpecialKeySpace . actor . h " <nl> # include " flow / UnitTest . h " <nl> + # include " fdbclient / ManagementAPI . actor . h " <nl> + # include " fdbclient / StatusClient . h " <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> std : : unordered_map < SpecialKeySpace : : MODULE , KeyRange > SpecialKeySpace : : moduleToBoundary = { <nl> std : : unordered_map < SpecialKeySpace : : MODULE , KeyRange > SpecialKeySpace : : moduleToB <nl> { SpecialKeySpace : : MODULE : : CONNECTIONSTRING , singleKeyRange ( LiteralStringRef ( " \ xff \ xff / connection_string " ) ) } , <nl> { SpecialKeySpace : : MODULE : : CLUSTERFILEPATH , singleKeyRange ( LiteralStringRef ( " \ xff \ xff / cluster_file_path " ) ) } , <nl> { SpecialKeySpace : : MODULE : : METRICS , <nl> - KeyRangeRef ( LiteralStringRef ( " \ xff \ xff / metrics / " ) , LiteralStringRef ( " \ xff \ xff / metrics0 " ) ) } <nl> + KeyRangeRef ( LiteralStringRef ( " \ xff \ xff / metrics / " ) , LiteralStringRef ( " \ xff \ xff / metrics0 " ) ) } , <nl> + { SpecialKeySpace : : MODULE : : MANAGEMENT , <nl> + KeyRangeRef ( LiteralStringRef ( " \ xff \ xff / management / " ) , LiteralStringRef ( " \ xff \ xff / management0 " ) ) } , <nl> + { SpecialKeySpace : : MODULE : : ERRORMSG , singleKeyRange ( LiteralStringRef ( " \ xff \ xff / error_message " ) ) } <nl> } ; <nl> <nl> + std : : unordered_map < std : : string , KeyRange > SpecialKeySpace : : managementApiCommandToRange = { <nl> + { " exclude " , KeyRangeRef ( LiteralStringRef ( " excluded / " ) , LiteralStringRef ( " excluded0 " ) ) <nl> + . withPrefix ( moduleToBoundary [ MODULE : : MANAGEMENT ] . begin ) } , <nl> + { " failed " , KeyRangeRef ( LiteralStringRef ( " failed / " ) , LiteralStringRef ( " failed0 " ) ) <nl> + . withPrefix ( moduleToBoundary [ MODULE : : MANAGEMENT ] . begin ) } <nl> + } ; <nl> + <nl> + std : : set < std : : string > SpecialKeySpace : : options = { " excluded / force " , " failed / force " } ; <nl> + <nl> / / This function will move the given KeySelector as far as possible to the standard form : <nl> / / orEqual = = false & & offset = = 1 ( Standard form ) <nl> / / If the corresponding key is not in the underlying key range , it will move over the range <nl> / / The cache object is used to cache the first read result from the rpc call during the key resolution , <nl> / / then when we need to do key resolution or result filtering , <nl> / / we , instead of rpc call , read from this cache object have consistent results <nl> - ACTOR Future < Void > moveKeySelectorOverRangeActor ( const SpecialKeyRangeBaseImpl * skrImpl , ReadYourWritesTransaction * ryw , <nl> + ACTOR Future < Void > moveKeySelectorOverRangeActor ( const SpecialKeyRangeReadImpl * skrImpl , ReadYourWritesTransaction * ryw , <nl> KeySelector * ks , Optional < Standalone < RangeResultRef > > * cache ) { <nl> ASSERT ( ! ks - > orEqual ) ; / / should be removed before calling <nl> ASSERT ( ks - > offset ! = 1 ) ; / / never being called if KeySelector is already normalized <nl> ACTOR Future < Void > moveKeySelectorOverRangeActor ( const SpecialKeyRangeBaseImpl * <nl> ks - > setKey ( KeyRef ( ks - > arena ( ) , result [ ks - > offset - 1 ] . key ) ) ; <nl> ks - > offset = 1 ; <nl> } else { <nl> - ks - > setKey ( KeyRef ( ks - > arena ( ) , keyAfter ( result [ result . size ( ) - 1 ] . key ) ) ) ; <nl> + ks - > setKey ( KeyRef ( <nl> + ks - > arena ( ) , <nl> + keyAfter ( result [ result . size ( ) - 1 ] . key ) ) ) ; / / TODO : the keyAfter will just return if key = = \ xff \ xff <nl> ks - > offset - = result . size ( ) ; <nl> } <nl> } <nl> ACTOR Future < Void > normalizeKeySelectorActor ( SpecialKeySpace * sks , ReadYourWrite <nl> KeyRangeRef boundary , int * actualOffset , <nl> Standalone < RangeResultRef > * result , <nl> Optional < Standalone < RangeResultRef > > * cache ) { <nl> - state RangeMap < Key , SpecialKeyRangeBaseImpl * , KeyRangeRef > : : iterator iter = <nl> - ks - > offset < 1 ? sks - > getImpls ( ) . rangeContainingKeyBefore ( ks - > getKey ( ) ) <nl> - : sks - > getImpls ( ) . rangeContaining ( ks - > getKey ( ) ) ; <nl> + state RangeMap < Key , SpecialKeyRangeReadImpl * , KeyRangeRef > : : iterator iter = <nl> + ks - > offset < 1 ? sks - > getReadImpls ( ) . rangeContainingKeyBefore ( ks - > getKey ( ) ) <nl> + : sks - > getReadImpls ( ) . rangeContaining ( ks - > getKey ( ) ) ; <nl> while ( ( ks - > offset < 1 & & iter - > begin ( ) > boundary . begin ) | | ( ks - > offset > 1 & & iter - > begin ( ) < boundary . end ) ) { <nl> if ( iter - > value ( ) ! = nullptr ) { <nl> wait ( moveKeySelectorOverRangeActor ( iter - > value ( ) , ryw , ks , cache ) ) ; <nl> ACTOR Future < Void > normalizeKeySelectorActor ( SpecialKeySpace * sks , ReadYourWrite <nl> return Void ( ) ; <nl> } <nl> <nl> + SpecialKeySpace : : SpecialKeySpace ( KeyRef spaceStartKey , KeyRef spaceEndKey , bool testOnly ) <nl> + : range ( KeyRangeRef ( spaceStartKey , spaceEndKey ) ) , readImpls ( nullptr , spaceEndKey ) , writeImpls ( nullptr , spaceEndKey ) , <nl> + modules ( testOnly ? SpecialKeySpace : : MODULE : : TESTONLY : SpecialKeySpace : : MODULE : : UNKNOWN , spaceEndKey ) { <nl> + / / Default begin of KeyRangeMap is Key ( ) , insert the range to update start key <nl> + readImpls . insert ( range , nullptr ) ; <nl> + writeImpls . insert ( range , nullptr ) ; <nl> + if ( ! testOnly ) modulesBoundaryInit ( ) ; / / testOnly is used in the correctness workload <nl> + } <nl> + <nl> + void SpecialKeySpace : : modulesBoundaryInit ( ) { <nl> + for ( const auto & pair : moduleToBoundary ) { <nl> + ASSERT ( range . contains ( pair . second ) ) ; <nl> + / / Make sure the module is not overlapping with any registered read modules <nl> + / / Note : same like ranges , one module ' s end cannot be another module ' s start , relax the condition if needed <nl> + ASSERT ( modules . rangeContaining ( pair . second . begin ) = = modules . rangeContaining ( pair . second . end ) & & <nl> + modules [ pair . second . begin ] = = SpecialKeySpace : : MODULE : : UNKNOWN ) ; <nl> + modules . insert ( pair . second , pair . first ) ; <nl> + / / Note : Due to underlying implementation , the insertion here is important to make cross_module_read being <nl> + / / handled correctly <nl> + readImpls . insert ( pair . second , nullptr ) ; <nl> + writeImpls . insert ( pair . second , nullptr ) ; <nl> + } <nl> + } <nl> + <nl> ACTOR Future < Standalone < RangeResultRef > > SpecialKeySpace : : checkRYWValid ( SpecialKeySpace * sks , <nl> ReadYourWritesTransaction * ryw , <nl> KeySelector begin , KeySelector end , <nl> ACTOR Future < Standalone < RangeResultRef > > SpecialKeySpace : : getRangeAggregationAct <nl> / / KeySelector , GetRangeLimits and reverse are all handled here <nl> state Standalone < RangeResultRef > result ; <nl> state Standalone < RangeResultRef > pairs ; <nl> - state RangeMap < Key , SpecialKeyRangeBaseImpl * , KeyRangeRef > : : iterator iter ; <nl> + state RangeMap < Key , SpecialKeyRangeReadImpl * , KeyRangeRef > : : iterator iter ; <nl> state int actualBeginOffset ; <nl> state int actualEndOffset ; <nl> state KeyRangeRef moduleBoundary ; <nl> ACTOR Future < Standalone < RangeResultRef > > SpecialKeySpace : : getRangeAggregationAct <nl> TEST ( true ) ; <nl> return result ; <nl> } <nl> - state RangeMap < Key , SpecialKeyRangeBaseImpl * , KeyRangeRef > : : Ranges ranges = <nl> - sks - > impls . intersectingRanges ( KeyRangeRef ( begin . getKey ( ) , end . getKey ( ) ) ) ; <nl> + state RangeMap < Key , SpecialKeyRangeReadImpl * , KeyRangeRef > : : Ranges ranges = <nl> + sks - > getReadImpls ( ) . intersectingRanges ( KeyRangeRef ( begin . getKey ( ) , end . getKey ( ) ) ) ; <nl> / / TODO : workaround to write this two together to make the code compact <nl> / / The issue here is boost : : iterator_range < > doest not provide rbegin ( ) , rend ( ) <nl> iter = reverse ? ranges . end ( ) : ranges . begin ( ) ; <nl> ACTOR Future < Standalone < RangeResultRef > > SpecialKeySpace : : getRangeAggregationAct <nl> result . arena ( ) . dependsOn ( pairs . arena ( ) ) ; <nl> / / limits handler <nl> for ( int i = pairs . size ( ) - 1 ; i > = 0 ; - - i ) { <nl> + ASSERT ( iter - > range ( ) . contains ( pairs [ i ] . key ) ) ; <nl> result . push_back ( result . arena ( ) , pairs [ i ] ) ; <nl> / / Note : behavior here is even the last k - v pair makes total bytes larger than specified , it ' s still <nl> / / returned . In other words , the total size of the returned value ( less the last entry ) will be less <nl> ACTOR Future < Standalone < RangeResultRef > > SpecialKeySpace : : getRangeAggregationAct <nl> result . arena ( ) . dependsOn ( pairs . arena ( ) ) ; <nl> / / limits handler <nl> for ( int i = 0 ; i < pairs . size ( ) ; + + i ) { <nl> + ASSERT ( iter - > range ( ) . contains ( pairs [ i ] . key ) ) ; <nl> result . push_back ( result . arena ( ) , pairs [ i ] ) ; <nl> / / Note : behavior here is even the last k - v pair makes total bytes larger than specified , it ' s still <nl> / / returned . In other words , the total size of the returned value ( less the last entry ) will be less <nl> Future < Optional < Value > > SpecialKeySpace : : get ( ReadYourWritesTransaction * ryw , con <nl> return getActor ( this , ryw , key ) ; <nl> } <nl> <nl> - ReadConflictRangeImpl : : ReadConflictRangeImpl ( KeyRangeRef kr ) : SpecialKeyRangeBaseImpl ( kr ) { } <nl> + void SpecialKeySpace : : set ( ReadYourWritesTransaction * ryw , const KeyRef & key , const ValueRef & value ) { <nl> + if ( ! ryw - > specialKeySpaceChangeConfiguration ( ) ) throw special_keys_write_disabled ( ) ; <nl> + auto impl = writeImpls [ key ] ; <nl> + if ( impl = = nullptr ) { <nl> + TraceEvent ( SevDebug , " SpecialKeySpaceNoWriteModuleFound " ) <nl> + . detail ( " Key " , key . toString ( ) ) <nl> + . detail ( " Value " , value . toString ( ) ) ; <nl> + throw special_keys_no_write_module_found ( ) ; <nl> + } <nl> + return impl - > set ( ryw , key , value ) ; <nl> + } <nl> + <nl> + void SpecialKeySpace : : clear ( ReadYourWritesTransaction * ryw , const KeyRangeRef & range ) { <nl> + if ( ! ryw - > specialKeySpaceChangeConfiguration ( ) ) throw special_keys_write_disabled ( ) ; <nl> + if ( range . empty ( ) ) return ; <nl> + auto begin = writeImpls [ range . begin ] ; <nl> + auto end = writeImpls . rangeContainingKeyBefore ( range . end ) - > value ( ) ; <nl> + if ( begin ! = end ) { <nl> + TraceEvent ( SevDebug , " SpecialKeySpaceCrossModuleClear " ) . detail ( " Range " , range . toString ( ) ) ; <nl> + throw special_keys_cross_module_clear ( ) ; / / ban cross module clear <nl> + } else if ( begin = = nullptr ) { <nl> + TraceEvent ( SevDebug , " SpecialKeySpaceNoWriteModuleFound " ) . detail ( " Range " , range . toString ( ) ) ; <nl> + throw special_keys_no_write_module_found ( ) ; <nl> + } <nl> + return begin - > clear ( ryw , range ) ; <nl> + } <nl> + <nl> + void SpecialKeySpace : : clear ( ReadYourWritesTransaction * ryw , const KeyRef & key ) { <nl> + if ( ! ryw - > specialKeySpaceChangeConfiguration ( ) ) throw special_keys_write_disabled ( ) ; <nl> + auto impl = writeImpls [ key ] ; <nl> + if ( impl = = nullptr ) throw special_keys_no_write_module_found ( ) ; <nl> + return impl - > clear ( ryw , key ) ; <nl> + } <nl> + <nl> + void SpecialKeySpace : : registerKeyRange ( SpecialKeySpace : : MODULE module , SpecialKeySpace : : IMPLTYPE type , <nl> + const KeyRangeRef & kr , SpecialKeyRangeReadImpl * impl ) { <nl> + / / module boundary check <nl> + if ( module = = SpecialKeySpace : : MODULE : : TESTONLY ) <nl> + ASSERT ( normalKeys . contains ( kr ) ) ; <nl> + else <nl> + ASSERT ( moduleToBoundary . at ( module ) . contains ( kr ) ) ; <nl> + / / make sure the registered range is not overlapping with existing ones <nl> + / / Note : kr . end should not be the same as another range ' s begin , although it should work even they are the same <nl> + for ( auto iter = readImpls . rangeContaining ( kr . begin ) ; true ; + + iter ) { <nl> + ASSERT ( iter - > value ( ) = = nullptr ) ; <nl> + if ( iter = = readImpls . rangeContaining ( kr . end ) ) <nl> + break ; / / Note : relax the condition that the end can be another range ' s start , if needed <nl> + } <nl> + readImpls . insert ( kr , impl ) ; <nl> + / / if rw , it means the module can do both read and write <nl> + if ( type = = SpecialKeySpace : : IMPLTYPE : : READWRITE ) { <nl> + / / since write impls are always subset of read impls , <nl> + / / no need to check overlapped registration <nl> + auto rwImpl = dynamic_cast < SpecialKeyRangeRWImpl * > ( impl ) ; <nl> + ASSERT ( rwImpl ) ; <nl> + writeImpls . insert ( kr , rwImpl ) ; <nl> + } <nl> + } <nl> + <nl> + Key SpecialKeySpace : : decode ( const KeyRef & key ) { <nl> + auto impl = writeImpls [ key ] ; <nl> + ASSERT ( impl ! = nullptr ) ; <nl> + return impl - > decode ( key ) ; <nl> + } <nl> + <nl> + KeyRange SpecialKeySpace : : decode ( const KeyRangeRef & kr ) { <nl> + / / Only allow to decode key range in the same underlying impl range <nl> + auto begin = writeImpls . rangeContaining ( kr . begin ) ; <nl> + ASSERT ( begin - > value ( ) ! = nullptr ) ; <nl> + auto end = writeImpls . rangeContainingKeyBefore ( kr . end ) ; <nl> + ASSERT ( begin = = end ) ; <nl> + return KeyRangeRef ( begin - > value ( ) - > decode ( kr . begin ) , begin - > value ( ) - > decode ( kr . end ) ) ; <nl> + } <nl> + <nl> + ACTOR Future < Void > commitActor ( SpecialKeySpace * sks , ReadYourWritesTransaction * ryw ) { <nl> + state RangeMap < Key , std : : pair < bool , Optional < Value > > , KeyRangeRef > : : Ranges ranges = <nl> + ryw - > getSpecialKeySpaceWriteMap ( ) . containedRanges ( specialKeys ) ; <nl> + state RangeMap < Key , std : : pair < bool , Optional < Value > > , KeyRangeRef > : : iterator iter = ranges . begin ( ) ; <nl> + state std : : set < SpecialKeyRangeRWImpl * > writeModulePtrs ; <nl> + while ( iter ! = ranges . end ( ) ) { <nl> + std : : pair < bool , Optional < Value > > entry = iter - > value ( ) ; <nl> + if ( entry . first ) { <nl> + auto modulePtr = sks - > getRWImpls ( ) . rangeContaining ( iter - > begin ( ) ) - > value ( ) ; <nl> + writeModulePtrs . insert ( modulePtr ) ; <nl> + } <nl> + + + iter ; <nl> + } <nl> + state std : : set < SpecialKeyRangeRWImpl * > : : const_iterator it ; <nl> + for ( it = writeModulePtrs . begin ( ) ; it ! = writeModulePtrs . end ( ) ; + + it ) { <nl> + Optional < std : : string > msg = wait ( ( * it ) - > commit ( ryw ) ) ; <nl> + if ( msg . present ( ) ) { <nl> + ryw - > setSpecialKeySpaceErrorMsg ( msg . get ( ) ) ; <nl> + TraceEvent ( SevDebug , " SpecialKeySpaceManagemetnAPIError " ) <nl> + . detail ( " Reason " , msg . get ( ) ) <nl> + . detail ( " Range " , ( * it ) - > getKeyRange ( ) . toString ( ) ) ; <nl> + throw special_keys_api_failure ( ) ; <nl> + } <nl> + } <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + Future < Void > SpecialKeySpace : : commit ( ReadYourWritesTransaction * ryw ) { <nl> + return commitActor ( this , ryw ) ; <nl> + } <nl> + <nl> + ReadConflictRangeImpl : : ReadConflictRangeImpl ( KeyRangeRef kr ) : SpecialKeyRangeReadImpl ( kr ) { } <nl> <nl> ACTOR static Future < Standalone < RangeResultRef > > getReadConflictRangeImpl ( ReadYourWritesTransaction * ryw , KeyRange kr ) { <nl> wait ( ryw - > pendingReads ( ) ) ; <nl> Future < Standalone < RangeResultRef > > ReadConflictRangeImpl : : getRange ( ReadYourWrite <nl> return getReadConflictRangeImpl ( ryw , kr ) ; <nl> } <nl> <nl> - WriteConflictRangeImpl : : WriteConflictRangeImpl ( KeyRangeRef kr ) : SpecialKeyRangeBaseImpl ( kr ) { } <nl> + WriteConflictRangeImpl : : WriteConflictRangeImpl ( KeyRangeRef kr ) : SpecialKeyRangeReadImpl ( kr ) { } <nl> <nl> Future < Standalone < RangeResultRef > > WriteConflictRangeImpl : : getRange ( ReadYourWritesTransaction * ryw , <nl> KeyRangeRef kr ) const { <nl> return ryw - > getWriteConflictRangeIntersecting ( kr ) ; <nl> } <nl> <nl> - ConflictingKeysImpl : : ConflictingKeysImpl ( KeyRangeRef kr ) : SpecialKeyRangeBaseImpl ( kr ) { } <nl> + ConflictingKeysImpl : : ConflictingKeysImpl ( KeyRangeRef kr ) : SpecialKeyRangeReadImpl ( kr ) { } <nl> <nl> Future < Standalone < RangeResultRef > > ConflictingKeysImpl : : getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const { <nl> Standalone < RangeResultRef > result ; <nl> Future < Standalone < RangeResultRef > > ConflictingKeysImpl : : getRange ( ReadYourWritesT <nl> if ( beginIter - > begin ( ) ! = kr . begin ) + + beginIter ; <nl> auto endIter = krMapPtr - > rangeContaining ( kr . end ) ; <nl> for ( auto it = beginIter ; it ! = endIter ; + + it ) { <nl> - / / it - > begin ( ) is stored in the CoalescedKeyRangeMap in TransactionInfo <nl> - / / it - > value ( ) is always constants in SystemData . cpp <nl> - / / Thus , push_back ( ) can be used <nl> - result . push_back ( result . arena ( ) , KeyValueRef ( it - > begin ( ) , it - > value ( ) ) ) ; <nl> + result . push_back_deep ( result . arena ( ) , KeyValueRef ( it - > begin ( ) , it - > value ( ) ) ) ; <nl> } <nl> if ( endIter - > begin ( ) ! = kr . end ) <nl> - result . push_back ( result . arena ( ) , KeyValueRef ( endIter - > begin ( ) , endIter - > value ( ) ) ) ; <nl> + result . push_back_deep ( result . arena ( ) , KeyValueRef ( endIter - > begin ( ) , endIter - > value ( ) ) ) ; <nl> } <nl> return result ; <nl> } <nl> DDStatsRangeImpl : : DDStatsRangeImpl ( KeyRangeRef kr ) : SpecialKeyRangeAsyncImpl ( kr <nl> Future < Standalone < RangeResultRef > > DDStatsRangeImpl : : getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const { <nl> return ddMetricsGetRangeActor ( ryw , kr ) ; <nl> } <nl> + <nl> + Key SpecialKeySpace : : getManagementApiCommandOptionSpecialKey ( const std : : string & command , const std : : string & option ) { <nl> + Key prefix = LiteralStringRef ( " options / " ) . withPrefix ( moduleToBoundary [ MODULE : : MANAGEMENT ] . begin ) ; <nl> + auto pair = command + " / " + option ; <nl> + ASSERT ( options . find ( pair ) ! = options . end ( ) ) ; <nl> + return prefix . withSuffix ( pair ) ; <nl> + } <nl> + <nl> + ManagementCommandsOptionsImpl : : ManagementCommandsOptionsImpl ( KeyRangeRef kr ) : SpecialKeyRangeRWImpl ( kr ) { } <nl> + <nl> + Future < Standalone < RangeResultRef > > ManagementCommandsOptionsImpl : : getRange ( ReadYourWritesTransaction * ryw , <nl> + KeyRangeRef kr ) const { <nl> + Standalone < RangeResultRef > result ; <nl> + / / Since we only have limit number of options , a brute force loop here is enough <nl> + for ( const auto & option : SpecialKeySpace : : getManagementApiOptionsSet ( ) ) { <nl> + auto key = getKeyRange ( ) . begin . withSuffix ( option ) ; <nl> + / / ignore all invalid keys <nl> + auto r = ryw - > getSpecialKeySpaceWriteMap ( ) [ key ] ; <nl> + if ( kr . contains ( key ) & & r . first & & r . second . present ( ) ) { <nl> + result . push_back ( result . arena ( ) , KeyValueRef ( key , ValueRef ( ) ) ) ; <nl> + result . arena ( ) . dependsOn ( key . arena ( ) ) ; <nl> + } <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> + void ManagementCommandsOptionsImpl : : set ( ReadYourWritesTransaction * ryw , const KeyRef & key , const ValueRef & value ) { <nl> + std : : string option = key . removePrefix ( getKeyRange ( ) . begin ) . toString ( ) ; <nl> + / / ignore all invalid keys <nl> + if ( SpecialKeySpace : : getManagementApiOptionsSet ( ) . find ( option ) ! = <nl> + SpecialKeySpace : : getManagementApiOptionsSet ( ) . end ( ) ) { <nl> + TraceEvent ( SevDebug , " ManagementApiOption " ) . detail ( " Option " , option ) . detail ( " Key " , key ) ; <nl> + ryw - > getSpecialKeySpaceWriteMap ( ) . insert ( key , std : : make_pair ( true , Optional < Value > ( value ) ) ) ; <nl> + } <nl> + } <nl> + <nl> + void ManagementCommandsOptionsImpl : : clear ( ReadYourWritesTransaction * ryw , const KeyRangeRef & range ) { <nl> + ryw - > getSpecialKeySpaceWriteMap ( ) . rawErase ( range ) ; <nl> + } <nl> + <nl> + void ManagementCommandsOptionsImpl : : clear ( ReadYourWritesTransaction * ryw , const KeyRef & key ) { <nl> + std : : string option = key . removePrefix ( getKeyRange ( ) . begin ) . toString ( ) ; <nl> + / / ignore all invalid keys <nl> + if ( SpecialKeySpace : : getManagementApiOptionsSet ( ) . find ( option ) ! = <nl> + SpecialKeySpace : : getManagementApiOptionsSet ( ) . end ( ) ) { <nl> + ryw - > getSpecialKeySpaceWriteMap ( ) . rawErase ( singleKeyRange ( key ) ) ; <nl> + } <nl> + } <nl> + <nl> + Key ManagementCommandsOptionsImpl : : decode ( const KeyRef & key ) const { <nl> + / / Should never be used <nl> + ASSERT ( false ) ; <nl> + return key ; <nl> + } <nl> + <nl> + Key ManagementCommandsOptionsImpl : : encode ( const KeyRef & key ) const { <nl> + / / Should never be used <nl> + ASSERT ( false ) ; <nl> + return key ; <nl> + } <nl> + <nl> + Future < Optional < std : : string > > ManagementCommandsOptionsImpl : : commit ( ReadYourWritesTransaction * ryw ) { <nl> + / / Nothing to do , keys should be used by other impls ' commit callback <nl> + return Optional < std : : string > ( ) ; <nl> + } <nl> + <nl> + / / read from rwModule <nl> + ACTOR Future < Standalone < RangeResultRef > > rwModuleGetRangeActor ( ReadYourWritesTransaction * ryw , <nl> + const SpecialKeyRangeRWImpl * impl , KeyRangeRef kr ) { <nl> + state KeyRangeRef range = impl - > getKeyRange ( ) ; <nl> + Standalone < RangeResultRef > resultWithoutPrefix = <nl> + wait ( ryw - > getRange ( ryw - > getDatabase ( ) - > specialKeySpace - > decode ( kr ) , CLIENT_KNOBS - > TOO_MANY ) ) ; <nl> + ASSERT ( ! resultWithoutPrefix . more & & resultWithoutPrefix . size ( ) < CLIENT_KNOBS - > TOO_MANY ) ; <nl> + Standalone < RangeResultRef > result ; <nl> + if ( ryw - > readYourWritesDisabled ( ) ) { <nl> + for ( const KeyValueRef & kv : resultWithoutPrefix ) <nl> + result . push_back_deep ( result . arena ( ) , KeyValueRef ( impl - > encode ( kv . key ) , kv . value ) ) ; <nl> + } else { <nl> + RangeMap < Key , std : : pair < bool , Optional < Value > > , KeyRangeRef > : : Ranges ranges = <nl> + ryw - > getSpecialKeySpaceWriteMap ( ) . containedRanges ( range ) ; <nl> + RangeMap < Key , std : : pair < bool , Optional < Value > > , KeyRangeRef > : : iterator iter = ranges . begin ( ) ; <nl> + int index = 0 ; <nl> + while ( iter ! = ranges . end ( ) ) { <nl> + / / add all previous entries into result <nl> + Key rk = impl - > encode ( resultWithoutPrefix [ index ] . key ) ; <nl> + while ( index < resultWithoutPrefix . size ( ) & & rk < iter - > begin ( ) ) { <nl> + result . push_back_deep ( result . arena ( ) , KeyValueRef ( rk , resultWithoutPrefix [ index ] . value ) ) ; <nl> + + + index ; <nl> + } <nl> + std : : pair < bool , Optional < Value > > entry = iter - > value ( ) ; <nl> + if ( entry . first ) { <nl> + / / add the writen entries if exists <nl> + if ( entry . second . present ( ) ) { <nl> + result . push_back_deep ( result . arena ( ) , KeyValueRef ( iter - > begin ( ) , entry . second . get ( ) ) ) ; <nl> + } <nl> + / / move index to skip all entries in the iter - > range <nl> + while ( index < resultWithoutPrefix . size ( ) & & <nl> + iter - > range ( ) . contains ( impl - > encode ( resultWithoutPrefix [ index ] . key ) ) ) <nl> + + + index ; <nl> + } <nl> + + + iter ; <nl> + } <nl> + / / add all remaining entries into result <nl> + while ( index < resultWithoutPrefix . size ( ) ) { <nl> + const KeyValueRef & kv = resultWithoutPrefix [ index ] ; <nl> + result . push_back_deep ( result . arena ( ) , KeyValueRef ( impl - > encode ( kv . key ) , kv . value ) ) ; <nl> + + + index ; <nl> + } <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> + ExcludeServersRangeImpl : : ExcludeServersRangeImpl ( KeyRangeRef kr ) : SpecialKeyRangeRWImpl ( kr ) { } <nl> + <nl> + Future < Standalone < RangeResultRef > > ExcludeServersRangeImpl : : getRange ( ReadYourWritesTransaction * ryw , <nl> + KeyRangeRef kr ) const { <nl> + return rwModuleGetRangeActor ( ryw , this , kr ) ; <nl> + } <nl> + <nl> + void ExcludeServersRangeImpl : : set ( ReadYourWritesTransaction * ryw , const KeyRef & key , const ValueRef & value ) { <nl> + ryw - > getSpecialKeySpaceWriteMap ( ) . insert ( key , std : : make_pair ( true , Optional < Value > ( value ) ) ) ; <nl> + } <nl> + <nl> + void ExcludeServersRangeImpl : : clear ( ReadYourWritesTransaction * ryw , const KeyRef & key ) { <nl> + ryw - > getSpecialKeySpaceWriteMap ( ) . insert ( key , std : : make_pair ( true , Optional < Value > ( ) ) ) ; <nl> + } <nl> + <nl> + void ExcludeServersRangeImpl : : clear ( ReadYourWritesTransaction * ryw , const KeyRangeRef & range ) { <nl> + ryw - > getSpecialKeySpaceWriteMap ( ) . insert ( range , std : : make_pair ( true , Optional < Value > ( ) ) ) ; <nl> + } <nl> + <nl> + Key ExcludeServersRangeImpl : : decode ( const KeyRef & key ) const { <nl> + return key . removePrefix ( SpecialKeySpace : : getModuleRange ( SpecialKeySpace : : MODULE : : MANAGEMENT ) . begin ) <nl> + . withPrefix ( LiteralStringRef ( " \ xff / conf / " ) ) ; <nl> + } <nl> + <nl> + Key ExcludeServersRangeImpl : : encode ( const KeyRef & key ) const { <nl> + return key . removePrefix ( LiteralStringRef ( " \ xff / conf / " ) ) <nl> + . withPrefix ( SpecialKeySpace : : getModuleRange ( SpecialKeySpace : : MODULE : : MANAGEMENT ) . begin ) ; <nl> + } <nl> + <nl> + bool parseNetWorkAddrFromKeys ( ReadYourWritesTransaction * ryw , bool failed , std : : vector < AddressExclusion > & addresses , <nl> + std : : set < AddressExclusion > & exclusions , Optional < std : : string > & msg ) { <nl> + KeyRangeRef range = failed ? SpecialKeySpace : : getManamentApiCommandRange ( " failed " ) <nl> + : SpecialKeySpace : : getManamentApiCommandRange ( " exclude " ) ; <nl> + auto ranges = ryw - > getSpecialKeySpaceWriteMap ( ) . containedRanges ( range ) ; <nl> + auto iter = ranges . begin ( ) ; <nl> + while ( iter ! = ranges . end ( ) ) { <nl> + auto entry = iter - > value ( ) ; <nl> + / / only check for exclude ( set ) operation , include ( clear ) are not checked <nl> + TraceEvent ( SevInfo , " ParseNetworkAddress " ) <nl> + . detail ( " Valid " , entry . first ) <nl> + . detail ( " Set " , entry . second . present ( ) ) <nl> + . detail ( " Key " , iter - > begin ( ) . toString ( ) ) ; <nl> + if ( entry . first & & entry . second . present ( ) ) { <nl> + Key address = iter - > begin ( ) . removePrefix ( range . begin ) ; <nl> + auto a = AddressExclusion : : parse ( address ) ; <nl> + if ( ! a . isValid ( ) ) { <nl> + std : : string error = " ERROR : \ ' " + address . toString ( ) + " \ ' is not a valid network endpoint address \ n " ; <nl> + if ( address . toString ( ) . find ( " : tls " ) ! = std : : string : : npos ) <nl> + error + = " Do not include the ` : tls ' suffix when naming a process \ n " ; <nl> + msg = ManagementAPIError : : toJsonString ( <nl> + false , entry . second . present ( ) ? ( failed ? " exclude failed " : " exclude " ) : " include " , error ) ; <nl> + return false ; <nl> + } <nl> + addresses . push_back ( a ) ; <nl> + exclusions . insert ( a ) ; <nl> + } <nl> + + + iter ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + ACTOR Future < bool > checkExclusion ( Database db , std : : vector < AddressExclusion > * addresses , <nl> + std : : set < AddressExclusion > * exclusions , bool markFailed , Optional < std : : string > * msg ) { <nl> + <nl> + if ( markFailed ) { <nl> + state bool safe ; <nl> + try { <nl> + bool _safe = wait ( checkSafeExclusions ( db , * addresses ) ) ; <nl> + safe = _safe ; <nl> + } catch ( Error & e ) { <nl> + if ( e . code ( ) = = error_code_actor_cancelled ) throw ; <nl> + TraceEvent ( " CheckSafeExclusionsError " ) . error ( e ) ; <nl> + safe = false ; <nl> + } <nl> + if ( ! safe ) { <nl> + std : : string temp = " ERROR : It is unsafe to exclude the specified servers at this time . \ n " <nl> + " Please check that this exclusion does not bring down an entire storage team . \ n " <nl> + " Please also ensure that the exclusion will keep a majority of coordinators alive . \ n " <nl> + " You may add more storage processes or coordinators to make the operation safe . \ n " <nl> + " Call set ( \ " 0xff0xff / management / failed / < ADDRESS . . . > \ " , . . . ) to exclude without " <nl> + " performing safety checks . \ n " ; <nl> + * msg = ManagementAPIError : : toJsonString ( false , markFailed ? " exclude failed " : " exclude " , temp ) ; <nl> + return false ; <nl> + } <nl> + } <nl> + StatusObject status = wait ( StatusClient : : statusFetcher ( db ) ) ; <nl> + state std : : string errorString = <nl> + " ERROR : Could not calculate the impact of this exclude on the total free space in the cluster . \ n " <nl> + " Please try the exclude again in 30 seconds . \ n " <nl> + " Call set ( \ " 0xff0xff / management / options / exclude / force \ " , . . . ) first to exclude without checking free " <nl> + " space . \ n " ; <nl> + <nl> + StatusObjectReader statusObj ( status ) ; <nl> + <nl> + StatusObjectReader statusObjCluster ; <nl> + if ( ! statusObj . get ( " cluster " , statusObjCluster ) ) { <nl> + * msg = ManagementAPIError : : toJsonString ( false , markFailed ? " exclude failed " : " exclude " , errorString ) ; <nl> + return false ; <nl> + } <nl> + <nl> + StatusObjectReader processesMap ; <nl> + if ( ! statusObjCluster . get ( " processes " , processesMap ) ) { <nl> + * msg = ManagementAPIError : : toJsonString ( false , markFailed ? " exclude failed " : " exclude " , errorString ) ; <nl> + return false ; <nl> + } <nl> + <nl> + state int ssTotalCount = 0 ; <nl> + state int ssExcludedCount = 0 ; <nl> + state double worstFreeSpaceRatio = 1 . 0 ; <nl> + try { <nl> + for ( auto proc : processesMap . obj ( ) ) { <nl> + bool storageServer = false ; <nl> + StatusArray rolesArray = proc . second . get_obj ( ) [ " roles " ] . get_array ( ) ; <nl> + for ( StatusObjectReader role : rolesArray ) { <nl> + if ( role [ " role " ] . get_str ( ) = = " storage " ) { <nl> + storageServer = true ; <nl> + break ; <nl> + } <nl> + } <nl> + / / Skip non - storage servers in free space calculation <nl> + if ( ! storageServer ) continue ; <nl> + <nl> + StatusObjectReader process ( proc . second ) ; <nl> + std : : string addrStr ; <nl> + if ( ! process . get ( " address " , addrStr ) ) { <nl> + * msg = ManagementAPIError : : toJsonString ( false , markFailed ? " exclude failed " : " exclude " , errorString ) ; <nl> + return false ; <nl> + } <nl> + NetworkAddress addr = NetworkAddress : : parse ( addrStr ) ; <nl> + bool excluded = <nl> + ( process . has ( " excluded " ) & & process . last ( ) . get_bool ( ) ) | | addressExcluded ( * exclusions , addr ) ; <nl> + ssTotalCount + + ; <nl> + if ( excluded ) ssExcludedCount + + ; <nl> + <nl> + if ( ! excluded ) { <nl> + StatusObjectReader disk ; <nl> + if ( ! process . get ( " disk " , disk ) ) { <nl> + * msg = <nl> + ManagementAPIError : : toJsonString ( false , markFailed ? " exclude failed " : " exclude " , errorString ) ; <nl> + return false ; <nl> + } <nl> + <nl> + int64_t total_bytes ; <nl> + if ( ! disk . get ( " total_bytes " , total_bytes ) ) { <nl> + * msg = <nl> + ManagementAPIError : : toJsonString ( false , markFailed ? " exclude failed " : " exclude " , errorString ) ; <nl> + return false ; <nl> + } <nl> + <nl> + int64_t free_bytes ; <nl> + if ( ! disk . get ( " free_bytes " , free_bytes ) ) { <nl> + * msg = <nl> + ManagementAPIError : : toJsonString ( false , markFailed ? " exclude failed " : " exclude " , errorString ) ; <nl> + return false ; <nl> + } <nl> + <nl> + worstFreeSpaceRatio = std : : min ( worstFreeSpaceRatio , double ( free_bytes ) / total_bytes ) ; <nl> + } <nl> + } <nl> + } catch ( . . . ) / / std : : exception <nl> + { <nl> + * msg = ManagementAPIError : : toJsonString ( false , markFailed ? " exclude failed " : " exclude " , errorString ) ; <nl> + return false ; <nl> + } <nl> + <nl> + if ( ssExcludedCount = = ssTotalCount | | <nl> + ( 1 - worstFreeSpaceRatio ) * ssTotalCount / ( ssTotalCount - ssExcludedCount ) > 0 . 9 ) { <nl> + std : : string temp = " ERROR : This exclude may cause the total free space in the cluster to drop below 10 % . \ n " <nl> + " Call set ( \ " 0xff0xff / management / options / exclude / force \ " , . . . ) first to exclude without " <nl> + " checking free space . \ n " ; <nl> + * msg = ManagementAPIError : : toJsonString ( false , markFailed ? " exclude failed " : " exclude " , temp ) ; <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + void includeServers ( ReadYourWritesTransaction * ryw ) { <nl> + ryw - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; <nl> + ryw - > setOption ( FDBTransactionOptions : : PRIORITY_SYSTEM_IMMEDIATE ) ; <nl> + ryw - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> + ryw - > setOption ( FDBTransactionOptions : : USE_PROVISIONAL_PROXIES ) ; <nl> + / / includeServers might be used in an emergency transaction , so make sure it is retry - self - conflicting and <nl> + / / CAUSAL_WRITE_RISKY <nl> + ryw - > setOption ( FDBTransactionOptions : : CAUSAL_WRITE_RISKY ) ; <nl> + std : : string versionKey = deterministicRandom ( ) - > randomUniqueID ( ) . toString ( ) ; <nl> + / / for exluded servers <nl> + auto ranges = <nl> + ryw - > getSpecialKeySpaceWriteMap ( ) . containedRanges ( SpecialKeySpace : : getManamentApiCommandRange ( " exclude " ) ) ; <nl> + auto iter = ranges . begin ( ) ; <nl> + Transaction & tr = ryw - > getTransaction ( ) ; <nl> + while ( iter ! = ranges . end ( ) ) { <nl> + auto entry = iter - > value ( ) ; <nl> + if ( entry . first & & ! entry . second . present ( ) ) { <nl> + tr . addReadConflictRange ( singleKeyRange ( excludedServersVersionKey ) ) ; <nl> + tr . set ( excludedServersVersionKey , versionKey ) ; <nl> + tr . clear ( ryw - > getDatabase ( ) - > specialKeySpace - > decode ( iter - > range ( ) ) ) ; <nl> + } <nl> + + + iter ; <nl> + } <nl> + / / for failed servers <nl> + ranges = ryw - > getSpecialKeySpaceWriteMap ( ) . containedRanges ( SpecialKeySpace : : getManamentApiCommandRange ( " failed " ) ) ; <nl> + iter = ranges . begin ( ) ; <nl> + while ( iter ! = ranges . end ( ) ) { <nl> + auto entry = iter - > value ( ) ; <nl> + if ( entry . first & & ! entry . second . present ( ) ) { <nl> + tr . addReadConflictRange ( singleKeyRange ( failedServersVersionKey ) ) ; <nl> + tr . set ( failedServersVersionKey , versionKey ) ; <nl> + tr . clear ( ryw - > getDatabase ( ) - > specialKeySpace - > decode ( iter - > range ( ) ) ) ; <nl> + } <nl> + + + iter ; <nl> + } <nl> + } <nl> + <nl> + ACTOR Future < Optional < std : : string > > excludeCommitActor ( ReadYourWritesTransaction * ryw , bool failed ) { <nl> + / / parse network addresses <nl> + state Optional < std : : string > result ; <nl> + state std : : vector < AddressExclusion > addresses ; <nl> + state std : : set < AddressExclusion > exclusions ; <nl> + if ( ! parseNetWorkAddrFromKeys ( ryw , failed , addresses , exclusions , result ) ) return result ; <nl> + / / If force option is not set , we need to do safety check <nl> + auto force = ryw - > getSpecialKeySpaceWriteMap ( ) [ SpecialKeySpace : : getManagementApiCommandOptionSpecialKey ( <nl> + failed ? " failed " : " excluded " , " force " ) ] ; <nl> + / / only do safety check when we have servers to be excluded and the force option key is not set <nl> + if ( addresses . size ( ) & & ! ( force . first & & force . second . present ( ) ) ) { <nl> + bool safe = wait ( checkExclusion ( ryw - > getDatabase ( ) , & addresses , & exclusions , failed , & result ) ) ; <nl> + if ( ! safe ) return result ; <nl> + } <nl> + excludeServers ( ryw - > getTransaction ( ) , addresses , failed ) ; <nl> + includeServers ( ryw ) ; <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + Future < Optional < std : : string > > ExcludeServersRangeImpl : : commit ( ReadYourWritesTransaction * ryw ) { <nl> + return excludeCommitActor ( ryw , false ) ; <nl> + } <nl> + <nl> + FailedServersRangeImpl : : FailedServersRangeImpl ( KeyRangeRef kr ) : SpecialKeyRangeRWImpl ( kr ) { } <nl> + <nl> + Future < Standalone < RangeResultRef > > FailedServersRangeImpl : : getRange ( ReadYourWritesTransaction * ryw , <nl> + KeyRangeRef kr ) const { <nl> + return rwModuleGetRangeActor ( ryw , this , kr ) ; <nl> + } <nl> + <nl> + void FailedServersRangeImpl : : set ( ReadYourWritesTransaction * ryw , const KeyRef & key , const ValueRef & value ) { <nl> + ryw - > getSpecialKeySpaceWriteMap ( ) . insert ( key , std : : make_pair ( true , Optional < Value > ( value ) ) ) ; <nl> + } <nl> + <nl> + void FailedServersRangeImpl : : clear ( ReadYourWritesTransaction * ryw , const KeyRef & key ) { <nl> + ryw - > getSpecialKeySpaceWriteMap ( ) . insert ( key , std : : make_pair ( true , Optional < Value > ( ) ) ) ; <nl> + } <nl> + <nl> + void FailedServersRangeImpl : : clear ( ReadYourWritesTransaction * ryw , const KeyRangeRef & range ) { <nl> + ryw - > getSpecialKeySpaceWriteMap ( ) . insert ( range , std : : make_pair ( true , Optional < Value > ( ) ) ) ; <nl> + } <nl> + <nl> + Key FailedServersRangeImpl : : decode ( const KeyRef & key ) const { <nl> + return key . removePrefix ( SpecialKeySpace : : getModuleRange ( SpecialKeySpace : : MODULE : : MANAGEMENT ) . begin ) <nl> + . withPrefix ( LiteralStringRef ( " \ xff / conf / " ) ) ; <nl> + } <nl> + <nl> + Key FailedServersRangeImpl : : encode ( const KeyRef & key ) const { <nl> + return key . removePrefix ( LiteralStringRef ( " \ xff / conf / " ) ) <nl> + . withPrefix ( SpecialKeySpace : : getModuleRange ( SpecialKeySpace : : MODULE : : MANAGEMENT ) . begin ) ; <nl> + } <nl> + <nl> + Future < Optional < std : : string > > FailedServersRangeImpl : : commit ( ReadYourWritesTransaction * ryw ) { <nl> + return excludeCommitActor ( ryw , true ) ; <nl> + } <nl> + <nl> + ACTOR Future < Standalone < RangeResultRef > > ExclusionInProgressActor ( ReadYourWritesTransaction * ryw , KeyRef prefix , <nl> + KeyRangeRef kr ) { <nl> + state Standalone < RangeResultRef > result ; <nl> + state Transaction & tr = ryw - > getTransaction ( ) ; <nl> + tr . setOption ( FDBTransactionOptions : : READ_SYSTEM_KEYS ) ; <nl> + tr . setOption ( FDBTransactionOptions : : PRIORITY_SYSTEM_IMMEDIATE ) ; / / necessary ? <nl> + tr . setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> + <nl> + state std : : vector < AddressExclusion > excl = wait ( ( getExcludedServers ( & tr ) ) ) ; <nl> + state std : : set < AddressExclusion > exclusions ( excl . begin ( ) , excl . end ( ) ) ; <nl> + state std : : set < NetworkAddress > inProgressExclusion ; <nl> + / / Just getting a consistent read version proves that a set of tlogs satisfying the exclusions has completed <nl> + / / recovery Check that there aren ' t any storage servers with addresses violating the exclusions <nl> + state Standalone < RangeResultRef > serverList = wait ( tr . getRange ( serverListKeys , CLIENT_KNOBS - > TOO_MANY ) ) ; <nl> + ASSERT ( ! serverList . more & & serverList . size ( ) < CLIENT_KNOBS - > TOO_MANY ) ; <nl> + <nl> + for ( auto & s : serverList ) { <nl> + auto addresses = decodeServerListValue ( s . value ) . getKeyValues . getEndpoint ( ) . addresses ; <nl> + if ( addressExcluded ( exclusions , addresses . address ) ) { <nl> + inProgressExclusion . insert ( addresses . address ) ; <nl> + } <nl> + if ( addresses . secondaryAddress . present ( ) & & addressExcluded ( exclusions , addresses . secondaryAddress . get ( ) ) ) { <nl> + inProgressExclusion . insert ( addresses . secondaryAddress . get ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + Optional < Standalone < StringRef > > value = wait ( tr . get ( logsKey ) ) ; <nl> + ASSERT ( value . present ( ) ) ; <nl> + auto logs = decodeLogsValue ( value . get ( ) ) ; <nl> + for ( auto const & log : logs . first ) { <nl> + if ( log . second = = NetworkAddress ( ) | | addressExcluded ( exclusions , log . second ) ) { <nl> + inProgressExclusion . insert ( log . second ) ; <nl> + } <nl> + } <nl> + for ( auto const & log : logs . second ) { <nl> + if ( log . second = = NetworkAddress ( ) | | addressExcluded ( exclusions , log . second ) ) { <nl> + inProgressExclusion . insert ( log . second ) ; <nl> + } <nl> + } <nl> + <nl> + for ( auto const & address : inProgressExclusion ) { <nl> + Key addrKey = prefix . withSuffix ( address . toString ( ) ) ; <nl> + if ( kr . contains ( addrKey ) ) { <nl> + result . push_back ( result . arena ( ) , KeyValueRef ( addrKey , ValueRef ( ) ) ) ; <nl> + result . arena ( ) . dependsOn ( addrKey . arena ( ) ) ; <nl> + } <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> + ExclusionInProgressRangeImpl : : ExclusionInProgressRangeImpl ( KeyRangeRef kr ) : SpecialKeyRangeAsyncImpl ( kr ) { } <nl> + <nl> + Future < Standalone < RangeResultRef > > ExclusionInProgressRangeImpl : : getRange ( ReadYourWritesTransaction * ryw , <nl> + KeyRangeRef kr ) const { <nl> + return ExclusionInProgressActor ( ryw , getKeyRange ( ) . begin , kr ) ; <nl> + } <nl> mmm a / fdbclient / SpecialKeySpace . actor . h <nl> ppp b / fdbclient / SpecialKeySpace . actor . h <nl> <nl> # include " fdbclient / ReadYourWrites . h " <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> - class SpecialKeyRangeBaseImpl { <nl> + class SpecialKeyRangeReadImpl { <nl> public : <nl> / / Each derived class only needs to implement this simple version of getRange <nl> virtual Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const = 0 ; <nl> <nl> - explicit SpecialKeyRangeBaseImpl ( KeyRangeRef kr ) : range ( kr ) { } <nl> + explicit SpecialKeyRangeReadImpl ( KeyRangeRef kr ) : range ( kr ) { } <nl> KeyRangeRef getKeyRange ( ) const { return range ; } <nl> / / true if the getRange call can emit more than one rpc calls , <nl> / / we cache the results to keep consistency in the same getrange lifetime <nl> / / TODO : give this function a more descriptive name <nl> virtual bool isAsync ( ) const { return false ; } <nl> <nl> - virtual ~ SpecialKeyRangeBaseImpl ( ) { } <nl> + virtual ~ SpecialKeyRangeReadImpl ( ) { } <nl> <nl> protected : <nl> KeyRange range ; / / underlying key range for this function <nl> } ; <nl> <nl> - class SpecialKeyRangeAsyncImpl : public SpecialKeyRangeBaseImpl { <nl> + class ManagementAPIError { <nl> public : <nl> - explicit SpecialKeyRangeAsyncImpl ( KeyRangeRef kr ) : SpecialKeyRangeBaseImpl ( kr ) { } <nl> + static std : : string toJsonString ( bool retriable , const std : : string & command , const std : : string & msg ) { <nl> + json_spirit : : mObject errorObj ; <nl> + errorObj [ " retriable " ] = retriable ; <nl> + errorObj [ " command " ] = command ; <nl> + errorObj [ " message " ] = msg ; <nl> + return json_spirit : : write_string ( json_spirit : : mValue ( errorObj ) , json_spirit : : Output_options : : raw_utf8 ) ; <nl> + } <nl> + <nl> + private : <nl> + ManagementAPIError ( ) { } ; <nl> + } ; <nl> + <nl> + class SpecialKeyRangeRWImpl : public SpecialKeyRangeReadImpl { <nl> + public : <nl> + virtual void set ( ReadYourWritesTransaction * ryw , const KeyRef & key , const ValueRef & value ) = 0 ; <nl> + virtual void clear ( ReadYourWritesTransaction * ryw , const KeyRangeRef & range ) = 0 ; <nl> + virtual void clear ( ReadYourWritesTransaction * ryw , const KeyRef & key ) = 0 ; <nl> + virtual Future < Optional < std : : string > > commit ( <nl> + ReadYourWritesTransaction * ryw ) = 0 ; / / all delayed async operations of writes in special - key - space <nl> + / / Given the special key to write , return the real key that needs to be modified <nl> + virtual Key decode ( const KeyRef & key ) const = 0 ; <nl> + / / Given the read key , return the corresponding special key <nl> + virtual Key encode ( const KeyRef & key ) const = 0 ; <nl> + <nl> + explicit SpecialKeyRangeRWImpl ( KeyRangeRef kr ) : SpecialKeyRangeReadImpl ( kr ) { } <nl> + <nl> + virtual ~ SpecialKeyRangeRWImpl ( ) { } <nl> + } ; <nl> + <nl> + class SpecialKeyRangeAsyncImpl : public SpecialKeyRangeReadImpl { <nl> + public : <nl> + explicit SpecialKeyRangeAsyncImpl ( KeyRangeRef kr ) : SpecialKeyRangeReadImpl ( kr ) { } <nl> <nl> Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const = 0 ; <nl> <nl> class SpecialKeyRangeAsyncImpl : public SpecialKeyRangeBaseImpl { <nl> <nl> bool isAsync ( ) const override { return true ; } <nl> <nl> - ACTOR static Future < Standalone < RangeResultRef > > getRangeAsyncActor ( const SpecialKeyRangeBaseImpl * skrAyncImpl , <nl> + ACTOR static Future < Standalone < RangeResultRef > > getRangeAsyncActor ( const SpecialKeyRangeReadImpl * skrAyncImpl , <nl> ReadYourWritesTransaction * ryw , KeyRangeRef kr , <nl> Optional < Standalone < RangeResultRef > > * cache ) { <nl> ASSERT ( skrAyncImpl - > getKeyRange ( ) . contains ( kr ) ) ; <nl> class SpecialKeySpace { <nl> enum class MODULE { <nl> CLUSTERFILEPATH , <nl> CONNECTIONSTRING , <nl> + ERRORMSG , / / A single key space contains a json string which describes the last error in special - key - space <nl> + MANAGEMENT , / / Management - API <nl> METRICS , / / data - distribution metrics <nl> TESTONLY , / / only used by correctness tests <nl> TRANSACTION , / / transaction related info , conflicting keys , read / write conflict range <nl> class SpecialKeySpace { <nl> WORKERINTERFACE , <nl> } ; <nl> <nl> + enum class IMPLTYPE { <nl> + READONLY , / / The underlying special key range can only be called with get and getRange <nl> + READWRITE / / The underlying special key range can be called with get , getRange , set , clear <nl> + } ; <nl> + <nl> + SpecialKeySpace ( KeyRef spaceStartKey = Key ( ) , KeyRef spaceEndKey = normalKeys . end , bool testOnly = true ) ; <nl> + <nl> Future < Optional < Value > > get ( ReadYourWritesTransaction * ryw , const Key & key ) ; <nl> <nl> Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeySelector begin , KeySelector end , <nl> GetRangeLimits limits , bool reverse = false ) ; <nl> <nl> - SpecialKeySpace ( KeyRef spaceStartKey = Key ( ) , KeyRef spaceEndKey = normalKeys . end , bool testOnly = true ) <nl> - : range ( KeyRangeRef ( spaceStartKey , spaceEndKey ) ) , impls ( nullptr , spaceEndKey ) , <nl> - modules ( testOnly ? SpecialKeySpace : : MODULE : : TESTONLY : SpecialKeySpace : : MODULE : : UNKNOWN , spaceEndKey ) { <nl> - / / Default begin of KeyRangeMap is Key ( ) , insert the range to update start key if needed <nl> - impls . insert ( range , nullptr ) ; <nl> - if ( ! testOnly ) modulesBoundaryInit ( ) ; / / testOnly is used in the correctness workload <nl> - } <nl> - / / Initialize module boundaries , used to handle cross_module_read <nl> - void modulesBoundaryInit ( ) { <nl> - for ( const auto & pair : moduleToBoundary ) { <nl> - ASSERT ( range . contains ( pair . second ) ) ; <nl> - / / Make sure the module is not overlapping with any registered modules <nl> - / / Note : same like ranges , one module ' s end cannot be another module ' s start , relax the condition if needed <nl> - ASSERT ( modules . rangeContaining ( pair . second . begin ) = = modules . rangeContaining ( pair . second . end ) & & <nl> - modules [ pair . second . begin ] = = SpecialKeySpace : : MODULE : : UNKNOWN ) ; <nl> - modules . insert ( pair . second , pair . first ) ; <nl> - impls . insert ( pair . second , nullptr ) ; / / Note : Due to underlying implementation , the insertion here is <nl> - / / important to make cross_module_read being handled correctly <nl> - } <nl> - } <nl> - void registerKeyRange ( SpecialKeySpace : : MODULE module , const KeyRangeRef & kr , SpecialKeyRangeBaseImpl * impl ) { <nl> - / / module boundary check <nl> - if ( module = = SpecialKeySpace : : MODULE : : TESTONLY ) <nl> - ASSERT ( normalKeys . contains ( kr ) ) ; <nl> - else <nl> - ASSERT ( moduleToBoundary . at ( module ) . contains ( kr ) ) ; <nl> - / / make sure the registered range is not overlapping with existing ones <nl> - / / Note : kr . end should not be the same as another range ' s begin , although it should work even they are the same <nl> - for ( auto iter = impls . rangeContaining ( kr . begin ) ; true ; + + iter ) { <nl> - ASSERT ( iter - > value ( ) = = nullptr ) ; <nl> - if ( iter = = impls . rangeContaining ( kr . end ) ) <nl> - break ; / / relax the condition that the end can be another range ' s start , if needed <nl> - } <nl> - impls . insert ( kr , impl ) ; <nl> - } <nl> + void set ( ReadYourWritesTransaction * ryw , const KeyRef & key , const ValueRef & value ) ; <nl> + <nl> + void clear ( ReadYourWritesTransaction * ryw , const KeyRangeRef & range ) ; <nl> + <nl> + void clear ( ReadYourWritesTransaction * ryw , const KeyRef & key ) ; <nl> + <nl> + Future < Void > commit ( ReadYourWritesTransaction * ryw ) ; <nl> <nl> - KeyRangeMap < SpecialKeyRangeBaseImpl * > & getImpls ( ) { return impls ; } <nl> + void registerKeyRange ( SpecialKeySpace : : MODULE module , SpecialKeySpace : : IMPLTYPE type , const KeyRangeRef & kr , <nl> + SpecialKeyRangeReadImpl * impl ) ; <nl> + <nl> + Key decode ( const KeyRef & key ) ; <nl> + KeyRange decode ( const KeyRangeRef & kr ) ; <nl> + <nl> + KeyRangeMap < SpecialKeyRangeReadImpl * > & getReadImpls ( ) { return readImpls ; } <nl> + KeyRangeMap < SpecialKeyRangeRWImpl * > & getRWImpls ( ) { return writeImpls ; } <nl> KeyRangeMap < SpecialKeySpace : : MODULE > & getModules ( ) { return modules ; } <nl> KeyRangeRef getKeyRange ( ) const { return range ; } <nl> + static KeyRangeRef getModuleRange ( SpecialKeySpace : : MODULE module ) { return moduleToBoundary . at ( module ) ; } <nl> + static KeyRangeRef getManamentApiCommandRange ( const std : : string & command ) { <nl> + return managementApiCommandToRange . at ( command ) ; <nl> + } <nl> + static KeyRef getManagementApiCommandPrefix ( const std : : string & command ) { <nl> + return managementApiCommandToRange . at ( command ) . begin ; <nl> + } <nl> + static Key getManagementApiCommandOptionSpecialKey ( const std : : string & command , const std : : string & option ) ; <nl> + static const std : : set < std : : string > & getManagementApiOptionsSet ( ) { return options ; } <nl> <nl> private : <nl> ACTOR static Future < Optional < Value > > getActor ( SpecialKeySpace * sks , ReadYourWritesTransaction * ryw , KeyRef key ) ; <nl> <nl> - ACTOR static Future < Standalone < RangeResultRef > > checkRYWValid ( SpecialKeySpace * sks , <nl> - ReadYourWritesTransaction * ryw , KeySelector begin , <nl> - KeySelector end , GetRangeLimits limits , <nl> - bool reverse ) ; <nl> + ACTOR static Future < Standalone < RangeResultRef > > checkRYWValid ( SpecialKeySpace * sks , ReadYourWritesTransaction * ryw , <nl> + KeySelector begin , KeySelector end , <nl> + GetRangeLimits limits , bool reverse ) ; <nl> ACTOR static Future < Standalone < RangeResultRef > > getRangeAggregationActor ( SpecialKeySpace * sks , <nl> ReadYourWritesTransaction * ryw , <nl> KeySelector begin , KeySelector end , <nl> GetRangeLimits limits , bool reverse ) ; <nl> - KeyRange range ; <nl> - KeyRangeMap < SpecialKeyRangeBaseImpl * > impls ; <nl> + <nl> + KeyRangeMap < SpecialKeyRangeReadImpl * > readImpls ; <nl> KeyRangeMap < SpecialKeySpace : : MODULE > modules ; <nl> + KeyRangeMap < SpecialKeyRangeRWImpl * > writeImpls ; <nl> + KeyRange range ; / / key space range , ( \ xff \ xff , \ xff \ xff \ xff ) in prod and ( , \ xff ) in test <nl> <nl> static std : : unordered_map < SpecialKeySpace : : MODULE , KeyRange > moduleToBoundary ; <nl> + static std : : unordered_map < std : : string , KeyRange > <nl> + managementApiCommandToRange ; / / management command to its special keys ' range <nl> + static std : : set < std : : string > options ; / / " < command > / < option > " <nl> + <nl> + / / Initialize module boundaries , used to handle cross_module_read <nl> + void modulesBoundaryInit ( ) ; <nl> } ; <nl> <nl> / / Use special key prefix " \ xff \ xff / transaction / conflicting_keys / < some_key > " , <nl> class SpecialKeySpace { <nl> / / prefix / < key1 > : ' 1 ' - any keys equal or larger than this key are ( probably ) conflicting keys <nl> / / prefix / < key2 > : ' 0 ' - any keys equal or larger than this key are ( definitely ) not conflicting keys <nl> / / Currently , the conflicting keyranges returned are original read_conflict_ranges or union of them . <nl> - class ConflictingKeysImpl : public SpecialKeyRangeBaseImpl { <nl> + class ConflictingKeysImpl : public SpecialKeyRangeReadImpl { <nl> public : <nl> explicit ConflictingKeysImpl ( KeyRangeRef kr ) ; <nl> Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const override ; <nl> } ; <nl> <nl> - class ReadConflictRangeImpl : public SpecialKeyRangeBaseImpl { <nl> + class ReadConflictRangeImpl : public SpecialKeyRangeReadImpl { <nl> public : <nl> explicit ReadConflictRangeImpl ( KeyRangeRef kr ) ; <nl> Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const override ; <nl> } ; <nl> <nl> - class WriteConflictRangeImpl : public SpecialKeyRangeBaseImpl { <nl> + class WriteConflictRangeImpl : public SpecialKeyRangeReadImpl { <nl> public : <nl> explicit WriteConflictRangeImpl ( KeyRangeRef kr ) ; <nl> Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const override ; <nl> class DDStatsRangeImpl : public SpecialKeyRangeAsyncImpl { <nl> Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const override ; <nl> } ; <nl> <nl> + class ManagementCommandsOptionsImpl : public SpecialKeyRangeRWImpl { <nl> + public : <nl> + explicit ManagementCommandsOptionsImpl ( KeyRangeRef kr ) ; <nl> + Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const override ; <nl> + void set ( ReadYourWritesTransaction * ryw , const KeyRef & key , const ValueRef & value ) override ; <nl> + void clear ( ReadYourWritesTransaction * ryw , const KeyRangeRef & range ) override ; <nl> + void clear ( ReadYourWritesTransaction * ryw , const KeyRef & key ) override ; <nl> + Key decode ( const KeyRef & key ) const override ; <nl> + Key encode ( const KeyRef & key ) const override ; <nl> + Future < Optional < std : : string > > commit ( ReadYourWritesTransaction * ryw ) override ; <nl> + } ; <nl> + <nl> + class ExcludeServersRangeImpl : public SpecialKeyRangeRWImpl { <nl> + public : <nl> + explicit ExcludeServersRangeImpl ( KeyRangeRef kr ) ; <nl> + Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const override ; <nl> + void set ( ReadYourWritesTransaction * ryw , const KeyRef & key , const ValueRef & value ) override ; <nl> + void clear ( ReadYourWritesTransaction * ryw , const KeyRangeRef & range ) override ; <nl> + void clear ( ReadYourWritesTransaction * ryw , const KeyRef & key ) override ; <nl> + Key decode ( const KeyRef & key ) const override ; <nl> + Key encode ( const KeyRef & key ) const override ; <nl> + Future < Optional < std : : string > > commit ( ReadYourWritesTransaction * ryw ) override ; <nl> + } ; <nl> + <nl> + class FailedServersRangeImpl : public SpecialKeyRangeRWImpl { <nl> + public : <nl> + explicit FailedServersRangeImpl ( KeyRangeRef kr ) ; <nl> + Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const override ; <nl> + void set ( ReadYourWritesTransaction * ryw , const KeyRef & key , const ValueRef & value ) override ; <nl> + void clear ( ReadYourWritesTransaction * ryw , const KeyRangeRef & range ) override ; <nl> + void clear ( ReadYourWritesTransaction * ryw , const KeyRef & key ) override ; <nl> + Key decode ( const KeyRef & key ) const override ; <nl> + Key encode ( const KeyRef & key ) const override ; <nl> + Future < Optional < std : : string > > commit ( ReadYourWritesTransaction * ryw ) override ; <nl> + } ; <nl> + <nl> + class ExclusionInProgressRangeImpl : public SpecialKeyRangeAsyncImpl { <nl> + public : <nl> + explicit ExclusionInProgressRangeImpl ( KeyRangeRef kr ) ; <nl> + Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const override ; <nl> + } ; <nl> + <nl> # include " flow / unactorcompiler . h " <nl> # endif <nl> mmm a / fdbclient / TagThrottle . actor . cpp <nl> ppp b / fdbclient / TagThrottle . actor . cpp <nl> namespace ThrottleApi { <nl> loop { <nl> try { <nl> Optional < Value > value = wait ( tr . get ( tagThrottleAutoEnabledKey ) ) ; <nl> - if ( ! value . present ( ) | | ( enabled & & value . get ( ) ! = LiteralStringRef ( " 1 " ) | | ( ! enabled & & value . get ( ) ! = LiteralStringRef ( " 0 " ) ) ) ) { <nl> + if ( ! value . present ( ) | | ( enabled & & value . get ( ) ! = LiteralStringRef ( " 1 " ) ) | | ( ! enabled & & value . get ( ) ! = LiteralStringRef ( " 0 " ) ) ) { <nl> tr . set ( tagThrottleAutoEnabledKey , LiteralStringRef ( enabled ? " 1 " : " 0 " ) ) ; <nl> signalThrottleChange ( tr ) ; <nl> <nl> namespace ThrottleApi { <nl> } <nl> } <nl> } <nl> - } <nl> \ No newline at end of file <nl> + } <nl> similarity index 99 % <nl> rename from fdbclient / ThreadSafeTransaction . actor . cpp <nl> rename to fdbclient / ThreadSafeTransaction . cpp <nl> mmm a / fdbclient / ThreadSafeTransaction . actor . cpp <nl> ppp b / fdbclient / ThreadSafeTransaction . cpp <nl> <nl> / * <nl> - * ThreadSafeTransaction . actor . cpp <nl> + * ThreadSafeTransaction . cpp <nl> * <nl> * This source file is part of the FoundationDB open source project <nl> * <nl> mmm a / fdbclient / vexillographer / fdb . options <nl> ppp b / fdbclient / vexillographer / fdb . options <nl> description is not currently required but encouraged . <nl> description = " The transaction can retrieve keys that are conflicting with other transactions . " / > <nl> < Option name = " special_key_space_relaxed " code = " 713 " <nl> description = " By default , the special key space will only allow users to read from exactly one module ( a subspace in the special key space ) . Use this option to allow reading from zero or more modules . Users who set this option should be prepared for new modules , which may have different behaviors than the modules they ' re currently reading . For example , a new module might block or return an error . " / > <nl> + < Option name = " special_key_space_enable_writes " code = " 714 " <nl> + description = " By default , users are not allowed to write to special keys . Enable this option will implicitly enable all options required to achieve the configuration change . " / > <nl> < Option name = " tag " code = " 800 " paramType = " String " paramDescription = " String identifier used to associated this transaction with a throttling group . Must not exceed 16 characters . " <nl> description = " Adds a tag to the transaction that can be used to apply manual targeted throttling . At most 5 tags can be set on a transaction . " / > <nl> < Option name = " auto_throttle_tag " code = " 801 " paramType = " String " paramDescription = " String identifier used to associated this transaction with a throttling group . Must not exceed 16 characters . " <nl> mmm a / fdbrpc / FlowTransport . actor . cpp <nl> ppp b / fdbrpc / FlowTransport . actor . cpp <nl> ACTOR Future < Void > connectionWriter ( Reference < Peer > self , Reference < IConnection <nl> loop { <nl> lastWriteTime = now ( ) ; <nl> <nl> - int sent = conn - > write ( self - > unsent . getUnsent ( ) , / * limit = * / FLOW_KNOBS - > MAX_PACKET_SEND_BYTES ) ; <nl> - if ( sent ) { <nl> + int sent = conn - > write ( self - > unsent . getUnsent ( ) , FLOW_KNOBS - > MAX_PACKET_SEND_BYTES ) ; <nl> + <nl> + if ( sent ! = 0 ) { <nl> self - > transport - > bytesSent + = sent ; <nl> self - > unsent . sent ( sent ) ; <nl> } <nl> - if ( self - > unsent . empty ( ) ) break ; <nl> + <nl> + if ( self - > unsent . empty ( ) ) { <nl> + break ; <nl> + } <nl> <nl> TEST ( true ) ; / / We didn ' t write everything , so apparently the write buffer is full . Wait for it to be nonfull . <nl> wait ( conn - > onWritable ( ) ) ; <nl> mmm a / fdbrpc / sim2 . actor . cpp <nl> ppp b / fdbrpc / sim2 . actor . cpp <nl> class Sim2 : public ISimulator , public INetworkConnections { <nl> if ( tooManyDead ) { <nl> newKt = Reboot ; <nl> canSurvive = false ; <nl> - TraceEvent ( " KillChanged " ) . detail ( " KillType " , kt ) . detail ( " NewKillType " , newKt ) . detail ( " TLogPolicy " , tLogPolicy - > info ( ) ) . detail ( " Reason " , " tLogPolicy validates against dead processes . " ) ; <nl> + TraceEvent ( " KillChanged " ) <nl> + . detail ( " KillType " , kt ) <nl> + . detail ( " NewKillType " , newKt ) <nl> + . detail ( " TLogPolicy " , tLogPolicy - > info ( ) ) <nl> + . detail ( " Reason " , " Too many dead processes that cannot satisfy tLogPolicy . " ) ; <nl> } <nl> / / Reboot and Delete if remaining machines do NOT fulfill policies <nl> else if ( ( kt < RebootAndDelete ) & & notEnoughLeft ) { <nl> newKt = RebootAndDelete ; <nl> canSurvive = false ; <nl> - TraceEvent ( " KillChanged " ) . detail ( " KillType " , kt ) . detail ( " NewKillType " , newKt ) . detail ( " TLogPolicy " , tLogPolicy - > info ( ) ) . detail ( " Reason " , " tLogPolicy does not validates against remaining processes . " ) ; <nl> + TraceEvent ( " KillChanged " ) <nl> + . detail ( " KillType " , kt ) <nl> + . detail ( " NewKillType " , newKt ) <nl> + . detail ( " TLogPolicy " , tLogPolicy - > info ( ) ) <nl> + . detail ( " Reason " , " Not enough tLog left to satisfy tLogPolicy . " ) ; <nl> } <nl> else if ( ( kt < RebootAndDelete ) & & ( nQuorum > uniqueMachines . size ( ) ) ) { <nl> newKt = RebootAndDelete ; <nl> canSurvive = false ; <nl> - TraceEvent ( " KillChanged " ) . detail ( " KillType " , kt ) . detail ( " NewKillType " , newKt ) . detail ( " StoragePolicy " , storagePolicy - > info ( ) ) . detail ( " Quorum " , nQuorum ) . detail ( " Machines " , uniqueMachines . size ( ) ) . detail ( " Reason " , " Not enough unique machines to perform auto configuration of coordinators . " ) ; <nl> + TraceEvent ( " KillChanged " ) <nl> + . detail ( " KillType " , kt ) <nl> + . detail ( " NewKillType " , newKt ) <nl> + . detail ( " StoragePolicy " , storagePolicy - > info ( ) ) <nl> + . detail ( " Quorum " , nQuorum ) <nl> + . detail ( " Machines " , uniqueMachines . size ( ) ) <nl> + . detail ( " Reason " , " Not enough unique machines to perform auto configuration of coordinators . " ) ; <nl> } <nl> else { <nl> - TraceEvent ( " CanSurviveKills " ) . detail ( " KillType " , kt ) . detail ( " TLogPolicy " , tLogPolicy - > info ( ) ) . detail ( " StoragePolicy " , storagePolicy - > info ( ) ) . detail ( " Quorum " , nQuorum ) . detail ( " Machines " , uniqueMachines . size ( ) ) ; <nl> + TraceEvent ( " CanSurviveKills " ) <nl> + . detail ( " KillType " , kt ) <nl> + . detail ( " TLogPolicy " , tLogPolicy - > info ( ) ) <nl> + . detail ( " StoragePolicy " , storagePolicy - > info ( ) ) <nl> + . detail ( " Quorum " , nQuorum ) <nl> + . detail ( " Machines " , uniqueMachines . size ( ) ) ; <nl> } <nl> } <nl> if ( newKillType ) * newKillType = newKt ; <nl> mmm a / fdbserver / ApplyMetadataMutation . cpp <nl> ppp b / fdbserver / ApplyMetadataMutation . cpp <nl> Reference < StorageInfo > getStorageInfo ( UID id , std : : map < UID , Reference < StorageInf <nl> / / It is incredibly important that any modifications to txnStateStore are done in such a way that <nl> / / the same operations will be done on all proxies at the same time . Otherwise , the data stored in <nl> / / txnStateStore will become corrupted . <nl> - void applyMetadataMutations ( UID const & dbgid , Arena & arena , VectorRef < MutationRef > const & mutations , IKeyValueStore * txnStateStore , LogPushData * toCommit , bool * confChange , Reference < ILogSystem > logSystem , Version popVersion , <nl> - KeyRangeMap < std : : set < Key > > * vecBackupKeys , KeyRangeMap < ServerCacheInfo > * keyInfo , KeyRangeMap < bool > * cacheInfo , std : : map < Key , applyMutationsData > * uid_applyMutationsData , RequestStream < CommitTransactionRequest > commit , <nl> - Database cx , NotifiedVersion * commitVersion , std : : map < UID , Reference < StorageInfo > > * storageCache , std : : map < Tag , Version > * tag_popped , bool initialCommit ) { <nl> + void applyMetadataMutations ( UID const & dbgid , Arena & arena , VectorRef < MutationRef > const & mutations , <nl> + IKeyValueStore * txnStateStore , LogPushData * toCommit , bool & confChange , <nl> + Reference < ILogSystem > logSystem , Version popVersion , <nl> + KeyRangeMap < std : : set < Key > > * vecBackupKeys , KeyRangeMap < ServerCacheInfo > * keyInfo , <nl> + KeyRangeMap < bool > * cacheInfo , std : : map < Key , ApplyMutationsData > * uid_applyMutationsData , <nl> + RequestStream < CommitTransactionRequest > commit , Database cx , NotifiedVersion * commitVersion , <nl> + std : : map < UID , Reference < StorageInfo > > * storageCache , std : : map < Tag , Version > * tag_popped , <nl> + bool initialCommit ) { <nl> / / std : : map < keyRef , vector < uint16_t > > cacheRangeInfo ; <nl> std : : map < KeyRef , MutationRef > cachedRangeInfo ; <nl> for ( auto const & m : mutations ) { <nl> void applyMetadataMutations ( UID const & dbgid , Arena & arena , VectorRef < MutationRe <nl> . detail ( " M " , m . toString ( ) ) <nl> . detail ( " PrevValue " , t . present ( ) ? t . get ( ) : LiteralStringRef ( " ( none ) " ) ) <nl> . detail ( " ToCommit " , toCommit ! = nullptr ) ; <nl> - if ( confChange ) * confChange = true ; <nl> + confChange = true ; <nl> } <nl> } <nl> if ( ! initialCommit ) txnStateStore - > set ( KeyValueRef ( m . param1 , m . param2 ) ) ; <nl> void applyMetadataMutations ( UID const & dbgid , Arena & arena , VectorRef < MutationRe <nl> Version requested = BinaryReader : : fromStringRef < Version > ( m . param2 , Unversioned ( ) ) ; <nl> TraceEvent ( " MinRequiredCommitVersion " , dbgid ) . detail ( " Min " , requested ) . detail ( " Current " , popVersion ) . detail ( " HasConf " , ! ! confChange ) ; <nl> if ( ! initialCommit ) txnStateStore - > set ( KeyValueRef ( m . param1 , m . param2 ) ) ; <nl> - if ( confChange ) * confChange = true ; <nl> + confChange = true ; <nl> TEST ( true ) ; / / Recovering at a higher version . <nl> } <nl> } <nl> void applyMetadataMutations ( UID const & dbgid , Arena & arena , VectorRef < MutationRe <nl> if ( ! initialCommit ) txnStateStore - > clear ( range & configKeys ) ; <nl> if ( ! excludedServersKeys . contains ( range ) & & ! failedServersKeys . contains ( range ) ) { <nl> TraceEvent ( " MutationRequiresRestart " , dbgid ) . detail ( " M " , m . toString ( ) ) ; <nl> - if ( confChange ) * confChange = true ; <nl> + confChange = true ; <nl> } <nl> } <nl> if ( serverListKeys . intersects ( range ) ) { <nl> void applyMetadataMutations ( UID const & dbgid , Arena & arena , VectorRef < MutationRe <nl> auto serverKeysCleared = txnStateStore - > readRange ( range & serverTagKeys ) . get ( ) ; / / read is expected to be immediately available <nl> for ( auto & kv : serverKeysCleared ) { <nl> Tag tag = decodeServerTagValue ( kv . value ) ; <nl> - TraceEvent ( " ServerTagRemove " ) . detail ( " PopVersion " , popVersion ) . detail ( " Tag " , tag . toString ( ) ) . detail ( " Server " , decodeServerTagKey ( kv . key ) ) ; <nl> - logSystem - > pop ( popVersion , decodeServerTagValue ( kv . value ) ) ; <nl> + TraceEvent ( " ServerTagRemove " ) <nl> + . detail ( " PopVersion " , popVersion ) <nl> + . detail ( " Tag " , tag . toString ( ) ) <nl> + . detail ( " Server " , decodeServerTagKey ( kv . key ) ) ; <nl> + logSystem - > pop ( popVersion , decodeServerTagValue ( kv . value ) ) ; <nl> ( * tag_popped ) [ tag ] = popVersion ; <nl> <nl> - if ( toCommit ) { <nl> + if ( toCommit ) { <nl> MutationRef privatized = m ; <nl> privatized . param1 = kv . key . withPrefix ( systemKeys . begin , arena ) ; <nl> privatized . param2 = keyAfter ( kv . key , arena ) . withPrefix ( systemKeys . begin , arena ) ; <nl> void applyMetadataMutations ( UID const & dbgid , Arena & arena , VectorRef < MutationRe <nl> } <nl> } <nl> } <nl> + <nl> + void applyMetadataMutations ( ProxyCommitData & proxyCommitData , Arena & arena , Reference < ILogSystem > logSystem , <nl> + const VectorRef < MutationRef > & mutations , LogPushData * toCommit , bool & confChange , <nl> + Version popVersion , bool initialCommit ) { <nl> + <nl> + std : : map < Key , ApplyMutationsData > * uid_applyMutationsData = nullptr ; <nl> + if ( proxyCommitData . firstProxy ) { <nl> + uid_applyMutationsData = & proxyCommitData . uid_applyMutationsData ; <nl> + } <nl> + <nl> + applyMetadataMutations ( proxyCommitData . dbgid , arena , mutations , proxyCommitData . txnStateStore , toCommit , confChange , <nl> + logSystem , popVersion , & proxyCommitData . vecBackupKeys , & proxyCommitData . keyInfo , <nl> + & proxyCommitData . cacheInfo , uid_applyMutationsData , proxyCommitData . commit , <nl> + proxyCommitData . cx , & proxyCommitData . committedVersion , & proxyCommitData . storageCache , <nl> + & proxyCommitData . tag_popped , initialCommit ) ; <nl> + } <nl> + <nl> + void applyMetadataMutations ( const UID & dbgid , Arena & arena , const VectorRef < MutationRef > & mutations , <nl> + IKeyValueStore * txnStateStore ) { <nl> + <nl> + bool confChange ; / / Dummy variable , not used . <nl> + <nl> + applyMetadataMutations ( dbgid , arena , mutations , txnStateStore , / * toCommit = * / nullptr , confChange , <nl> + Reference < ILogSystem > ( ) , / * popVersion = * / 0 , / * vecBackupKeys = * / nullptr , <nl> + / * keyInfo = * / nullptr , / * cacheInfo = * / nullptr , / * uid_applyMutationsData = * / nullptr , <nl> + RequestStream < CommitTransactionRequest > ( ) , Database ( ) , / * commitVersion = * / nullptr , <nl> + / * storageCache = * / nullptr , / * tag_popped = * / nullptr , / * initialCommit = * / false ) ; <nl> + } <nl> \ No newline at end of file <nl> mmm a / fdbserver / ApplyMetadataMutation . h <nl> ppp b / fdbserver / ApplyMetadataMutation . h <nl> <nl> # include " fdbserver / IKeyValueStore . h " <nl> # include " fdbserver / LogSystem . h " <nl> # include " fdbserver / LogProtocolMessage . h " <nl> + # include " fdbserver / ProxyCommitData . actor . h " <nl> <nl> inline bool isMetadataMutation ( MutationRef const & m ) { <nl> / / FIXME : This is conservative - not everything in system keyspace is necessarily processed by applyMetadataMutations <nl> inline bool isMetadataMutation ( MutationRef const & m ) { <nl> ( m . type = = MutationRef : : ClearRange & & m . param2 . size ( ) & & m . param2 [ 0 ] = = systemKeys . begin [ 0 ] & & ! nonMetadataSystemKeys . contains ( KeyRangeRef ( m . param1 , m . param2 ) ) ) ; <nl> } <nl> <nl> - struct applyMutationsData { <nl> - Future < Void > worker ; <nl> - Version endVersion ; <nl> - Reference < KeyRangeMap < Version > > keyVersion ; <nl> - } ; <nl> - <nl> Reference < StorageInfo > getStorageInfo ( UID id , std : : map < UID , Reference < StorageInfo > > * storageCache , IKeyValueStore * txnStateStore ) ; <nl> <nl> - void applyMetadataMutations ( UID const & dbgid , Arena & arena , VectorRef < MutationRef > const & mutations , IKeyValueStore * txnStateStore , LogPushData * toCommit , bool * confChange , Reference < ILogSystem > logSystem = Reference < ILogSystem > ( ) , Version popVersion = 0 , <nl> - KeyRangeMap < std : : set < Key > > * vecBackupKeys = nullptr , KeyRangeMap < ServerCacheInfo > * keyInfo = nullptr , KeyRangeMap < bool > * cacheInfo = nullptr , std : : map < Key , applyMutationsData > * uid_applyMutationsData = nullptr , RequestStream < CommitTransactionRequest > commit = RequestStream < CommitTransactionRequest > ( ) , <nl> - Database cx = Database ( ) , NotifiedVersion * commitVersion = nullptr , std : : map < UID , Reference < StorageInfo > > * storageCache = nullptr , std : : map < Tag , Version > * tag_popped = nullptr , bool initialCommit = false ) ; <nl> + void applyMetadataMutations ( ProxyCommitData & proxyCommitData , Arena & arena , Reference < ILogSystem > logSystem , <nl> + const VectorRef < MutationRef > & mutations , LogPushData * pToCommit , bool & confChange , <nl> + Version popVersion , bool initialCommit ) ; <nl> + void applyMetadataMutations ( const UID & dbgid , Arena & arena , const VectorRef < MutationRef > & mutations , <nl> + IKeyValueStore * txnStateStore ) ; <nl> <nl> # endif <nl> mmm a / fdbserver / BackupProgress . actor . cpp <nl> ppp b / fdbserver / BackupProgress . actor . cpp <nl> TEST_CASE ( " / BackupProgress / Unfinished " ) { <nl> std : : map < std : : tuple < LogEpoch , Version , int > , std : : map < Tag , Version > > unfinished = progress . getUnfinishedBackup ( ) ; <nl> <nl> ASSERT ( unfinished . size ( ) = = 1 ) ; <nl> - for ( const auto [ epochVersionCount , tagVersion ] : unfinished ) { <nl> + for ( const auto & [ epochVersionCount , tagVersion ] : unfinished ) { <nl> ASSERT ( std : : get < 0 > ( epochVersionCount ) = = epoch1 & & std : : get < 1 > ( epochVersionCount ) = = end1 & & <nl> std : : get < 2 > ( epochVersionCount ) = = 1 ) ; <nl> ASSERT ( tagVersion . size ( ) = = 1 & & tagVersion . begin ( ) - > first = = tag1 & & tagVersion . begin ( ) - > second = = begin1 ) ; <nl> TEST_CASE ( " / BackupProgress / Unfinished " ) { <nl> progress . addBackupStatus ( status1 ) ; <nl> unfinished = progress . getUnfinishedBackup ( ) ; <nl> ASSERT ( unfinished . size ( ) = = 1 ) ; <nl> - for ( const auto [ epochVersionCount , tagVersion ] : unfinished ) { <nl> + for ( const auto & [ epochVersionCount , tagVersion ] : unfinished ) { <nl> ASSERT ( std : : get < 0 > ( epochVersionCount ) = = epoch1 & & std : : get < 1 > ( epochVersionCount ) = = end1 & & <nl> std : : get < 2 > ( epochVersionCount ) = = 1 ) ; <nl> ASSERT ( tagVersion . size ( ) = = 1 & & tagVersion . begin ( ) - > first = = tag1 & & tagVersion . begin ( ) - > second = = saved1 + 1 ) ; <nl> } <nl> <nl> return Void ( ) ; <nl> - } <nl> \ No newline at end of file <nl> + } <nl> mmm a / fdbserver / BackupWorker . actor . cpp <nl> ppp b / fdbserver / BackupWorker . actor . cpp <nl> struct VersionedMessage { <nl> <nl> VersionedMessage ( LogMessageVersion v , StringRef m , const VectorRef < Tag > & t , const Arena & a ) <nl> : version ( v ) , message ( m ) , tags ( t ) , arena ( a ) , bytes ( a . getSize ( ) ) { } <nl> - const Version getVersion ( ) const { return version . version ; } <nl> - const uint32_t getSubVersion ( ) const { return version . sub ; } <nl> + Version getVersion ( ) const { return version . version ; } <nl> + uint32_t getSubVersion ( ) const { return version . sub ; } <nl> <nl> / / Returns true if the message is a mutation that should be backuped , i . e . , <nl> / / either key is not in system key space or is not a metadataVersionKey . <nl> struct BackupData { <nl> bool modified = false ; <nl> bool minVersionChanged = false ; <nl> Version minVersion = std : : numeric_limits < Version > : : max ( ) ; <nl> - for ( const auto [ uid , version ] : uidVersions ) { <nl> + for ( const auto & [ uid , version ] : uidVersions ) { <nl> auto it = backups . find ( uid ) ; <nl> if ( it = = backups . end ( ) ) { <nl> modified = true ; <nl> mmm a / fdbserver / CMakeLists . txt <nl> ppp b / fdbserver / CMakeLists . txt <nl> set ( FDBSERVER_SRCS <nl> OldTLogServer_6_0 . actor . cpp <nl> OldTLogServer_6_2 . actor . cpp <nl> Orderer . actor . h <nl> + ProxyCommitData . actor . h <nl> pubsub . actor . cpp <nl> pubsub . h <nl> QuietDatabase . actor . cpp <nl> set ( FDBSERVER_SRCS <nl> workloads / DDMetricsExclude . actor . cpp <nl> workloads / DiskDurability . actor . cpp <nl> workloads / DiskDurabilityTest . actor . cpp <nl> + workloads / Downgrade . actor . cpp <nl> workloads / DummyWorkload . actor . cpp <nl> workloads / ExternalWorkload . actor . cpp <nl> workloads / FastTriggeredWatches . actor . cpp <nl> mmm a / fdbserver / ClusterController . actor . cpp <nl> ppp b / fdbserver / ClusterController . actor . cpp <nl> class ClusterControllerData { <nl> std : : set < Optional < Key > > dcIds = std : : set < Optional < Key > > ( ) , <nl> std : : vector < UID > exclusionWorkerIds = { } ) { <nl> TraceEvent ( " TL1 " ) <nl> - . detail ( " Conf " , conf . toString ( ) ) <nl> - . detail ( " L1 " , policy - > info ( ) ) <nl> - . detail ( " L2 " , id_used . size ( ) ) <nl> - . detail ( " L3 " , checkStable ) <nl> - . detail ( " L4 " , dcIds . size ( ) ) <nl> - . detail ( " L5 " , exclusionWorkerIds . size ( ) ) ; <nl> + . detail ( " Conf " , conf . toString ( ) ) <nl> + . detail ( " L1 " , policy - > info ( ) ) <nl> + . detail ( " L2 " , id_used . size ( ) ) <nl> + . detail ( " L3 " , checkStable ) <nl> + . detail ( " L4 " , dcIds . size ( ) ) <nl> + . detail ( " L5 " , exclusionWorkerIds . size ( ) ) ; <nl> for ( auto & pair : id_used ) { <nl> TraceEvent ( " IdUsed " ) <nl> - . detail ( " Key " , pair . first . present ( ) ? pair . first . get ( ) . toString ( ) : " absent " ) <nl> - . detail ( " Value " , pair . second ) ; <nl> + . detail ( " Key " , pair . first . present ( ) ? pair . first . get ( ) . toString ( ) : " absent " ) <nl> + . detail ( " Value " , pair . second ) ; <nl> } <nl> for ( auto & pair : dcIds ) { <nl> TraceEvent ( " DcIds " ) . detail ( " Key " , pair . present ( ) ? pair . get ( ) . toString ( ) : " absent " ) ; <nl> } <nl> - <nl> std : : map < std : : pair < ProcessClass : : Fitness , bool > , vector < WorkerDetails > > fitness_workers ; <nl> std : : vector < WorkerDetails > results ; <nl> std : : vector < LocalityData > unavailableLocals ; <nl> class ClusterControllerData { <nl> bCompleted = true ; <nl> break ; <nl> } <nl> - TraceEvent ( SevWarn , " GWFTADNotAcceptable " , id ) . detail ( " Fitness " , fitness ) . detail ( " Processes " , logServerSet - > size ( ) ) . detail ( " Required " , required ) . detail ( " TLogPolicy " , policy - > info ( ) ) . detail ( " DesiredLogs " , desired ) . detail ( " AddingDegraded " , addingDegraded ) ; <nl> + TraceEvent ( SevWarn , " GWFTADNotAcceptable " , id ) <nl> + . detail ( " Fitness " , fitness ) <nl> + . detail ( " Processes " , logServerSet - > size ( ) ) <nl> + . detail ( " Required " , required ) <nl> + . detail ( " TLogPolicy " , policy - > info ( ) ) <nl> + . detail ( " DesiredLogs " , desired ) <nl> + . detail ( " AddingDegraded " , addingDegraded ) ; <nl> } <nl> / / Try to select the desired size , if larger <nl> else { <nl> class ClusterControllerData { <nl> tLocalities . push_back ( object - > interf . locality ) ; <nl> } <nl> <nl> - TraceEvent ( SevWarn , " GetTLogTeamFailed " ) . detail ( " Policy " , policy - > info ( ) ) . detail ( " Processes " , logServerSet - > size ( ) ) . detail ( " Workers " , id_worker . size ( ) ) . detail ( " FitnessGroups " , fitness_workers . size ( ) ) <nl> - . detail ( " TLogZones " , : : describeZones ( tLocalities ) ) . detail ( " TLogDataHalls " , : : describeDataHalls ( tLocalities ) ) . detail ( " MissingZones " , : : describeZones ( unavailableLocals ) ) <nl> - . detail ( " MissingDataHalls " , : : describeDataHalls ( unavailableLocals ) ) . detail ( " Required " , required ) . detail ( " DesiredLogs " , desired ) . detail ( " RatingTests " , SERVER_KNOBS - > POLICY_RATING_TESTS ) <nl> - . detail ( " CheckStable " , checkStable ) . detail ( " NumExclusionWorkers " , exclusionWorkerIds . size ( ) ) . detail ( " PolicyGenerations " , SERVER_KNOBS - > POLICY_GENERATIONS ) . backtrace ( ) ; <nl> + TraceEvent ( SevWarn , " GetTLogTeamFailed " ) <nl> + . detail ( " Policy " , policy - > info ( ) ) <nl> + . detail ( " Processes " , logServerSet - > size ( ) ) <nl> + . detail ( " Workers " , id_worker . size ( ) ) <nl> + . detail ( " FitnessGroups " , fitness_workers . size ( ) ) <nl> + . detail ( " TLogZones " , : : describeZones ( tLocalities ) ) <nl> + . detail ( " TLogDataHalls " , : : describeDataHalls ( tLocalities ) ) <nl> + . detail ( " MissingZones " , : : describeZones ( unavailableLocals ) ) <nl> + . detail ( " MissingDataHalls " , : : describeDataHalls ( unavailableLocals ) ) <nl> + . detail ( " Required " , required ) <nl> + . detail ( " DesiredLogs " , desired ) <nl> + . detail ( " RatingTests " , SERVER_KNOBS - > POLICY_RATING_TESTS ) <nl> + . detail ( " CheckStable " , checkStable ) <nl> + . detail ( " NumExclusionWorkers " , exclusionWorkerIds . size ( ) ) <nl> + . detail ( " PolicyGenerations " , SERVER_KNOBS - > POLICY_GENERATIONS ) <nl> + . backtrace ( ) ; <nl> <nl> logServerSet - > clear ( ) ; <nl> logServerSet . clear ( ) ; <nl> mmm a / fdbserver / DataDistribution . actor . cpp <nl> ppp b / fdbserver / DataDistribution . actor . cpp <nl> class TCMachineTeamInfo : public ReferenceCounted < TCMachineTeamInfo > { <nl> <nl> / / TeamCollection ' s server team info . <nl> class TCTeamInfo : public ReferenceCounted < TCTeamInfo > , public IDataDistributionTeam { <nl> - public : <nl> vector < Reference < TCServerInfo > > servers ; <nl> vector < UID > serverIDs ; <nl> - Reference < TCMachineTeamInfo > machineTeam ; <nl> - Future < Void > tracker ; <nl> bool healthy ; <nl> bool wrongConfiguration ; / / True if any of the servers in the team have the wrong configuration <nl> int priority ; <nl> <nl> + public : <nl> + Reference < TCMachineTeamInfo > machineTeam ; <nl> + Future < Void > tracker ; <nl> + <nl> explicit TCTeamInfo ( vector < Reference < TCServerInfo > > const & servers ) <nl> : servers ( servers ) , healthy ( true ) , priority ( SERVER_KNOBS - > PRIORITY_TEAM_HEALTHY ) , wrongConfiguration ( false ) { <nl> if ( servers . empty ( ) ) { <nl> class TCTeamInfo : public ReferenceCounted < TCTeamInfo > , public IDataDistribution <nl> } <nl> } <nl> <nl> - virtual vector < StorageServerInterface > getLastKnownServerInterfaces ( ) { <nl> - vector < StorageServerInterface > v ; <nl> - v . reserve ( servers . size ( ) ) ; <nl> - for ( int i = 0 ; i < servers . size ( ) ; i + + ) <nl> - v . push_back ( servers [ i ] - > lastKnownInterface ) ; <nl> + vector < StorageServerInterface > getLastKnownServerInterfaces ( ) const override { <nl> + vector < StorageServerInterface > v ( servers . size ( ) ) ; <nl> + for ( const auto & server : servers ) v . push_back ( server - > lastKnownInterface ) ; <nl> return v ; <nl> } <nl> - virtual int size ( ) { <nl> + int size ( ) const override { <nl> ASSERT ( servers . size ( ) = = serverIDs . size ( ) ) ; <nl> return servers . size ( ) ; <nl> } <nl> - virtual vector < UID > const & getServerIDs ( ) { return serverIDs ; } <nl> + vector < UID > const & getServerIDs ( ) const override { return serverIDs ; } <nl> const vector < Reference < TCServerInfo > > & getServers ( ) { return servers ; } <nl> <nl> - virtual std : : string getServerIDsStr ( ) { <nl> + std : : string getServerIDsStr ( ) const { <nl> std : : stringstream ss ; <nl> <nl> if ( serverIDs . empty ( ) ) return " [ unset ] " ; <nl> class TCTeamInfo : public ReferenceCounted < TCTeamInfo > , public IDataDistribution <nl> return ss . str ( ) ; <nl> } <nl> <nl> - virtual void addDataInFlightToTeam ( int64_t delta ) { <nl> + void addDataInFlightToTeam ( int64_t delta ) override { <nl> for ( int i = 0 ; i < servers . size ( ) ; i + + ) <nl> servers [ i ] - > dataInFlightToServer + = delta ; <nl> } <nl> - virtual int64_t getDataInFlightToTeam ( ) { <nl> + int64_t getDataInFlightToTeam ( ) const override { <nl> int64_t dataInFlight = 0 . 0 ; <nl> for ( int i = 0 ; i < servers . size ( ) ; i + + ) <nl> dataInFlight + = servers [ i ] - > dataInFlightToServer ; <nl> return dataInFlight ; <nl> } <nl> <nl> - virtual int64_t getLoadBytes ( bool includeInFlight = true , double inflightPenalty = 1 . 0 ) { <nl> + int64_t getLoadBytes ( bool includeInFlight = true , double inflightPenalty = 1 . 0 ) const override { <nl> int64_t physicalBytes = getLoadAverage ( ) ; <nl> double minAvailableSpaceRatio = getMinAvailableSpaceRatio ( includeInFlight ) ; <nl> int64_t inFlightBytes = includeInFlight ? getDataInFlightToTeam ( ) / servers . size ( ) : 0 ; <nl> class TCTeamInfo : public ReferenceCounted < TCTeamInfo > , public IDataDistribution <nl> return ( physicalBytes + ( inflightPenalty * inFlightBytes ) ) * availableSpaceMultiplier ; <nl> } <nl> <nl> - virtual int64_t getMinAvailableSpace ( bool includeInFlight = true ) { <nl> + int64_t getMinAvailableSpace ( bool includeInFlight = true ) const override { <nl> int64_t minAvailableSpace = std : : numeric_limits < int64_t > : : max ( ) ; <nl> - for ( int i = 0 ; i < servers . size ( ) ; i + + ) { <nl> - if ( servers [ i ] - > serverMetrics . present ( ) ) { <nl> - auto & replyValue = servers [ i ] - > serverMetrics . get ( ) ; <nl> + for ( const auto & server : servers ) { <nl> + if ( server - > serverMetrics . present ( ) ) { <nl> + auto & replyValue = server - > serverMetrics . get ( ) ; <nl> <nl> ASSERT ( replyValue . available . bytes > = 0 ) ; <nl> ASSERT ( replyValue . capacity . bytes > = 0 ) ; <nl> <nl> int64_t bytesAvailable = replyValue . available . bytes ; <nl> if ( includeInFlight ) { <nl> - bytesAvailable - = servers [ i ] - > dataInFlightToServer ; <nl> + bytesAvailable - = server - > dataInFlightToServer ; <nl> } <nl> <nl> minAvailableSpace = std : : min ( bytesAvailable , minAvailableSpace ) ; <nl> class TCTeamInfo : public ReferenceCounted < TCTeamInfo > , public IDataDistribution <nl> return minAvailableSpace ; / / Could be negative <nl> } <nl> <nl> - virtual double getMinAvailableSpaceRatio ( bool includeInFlight = true ) { <nl> + double getMinAvailableSpaceRatio ( bool includeInFlight = true ) const override { <nl> double minRatio = 1 . 0 ; <nl> - for ( int i = 0 ; i < servers . size ( ) ; i + + ) { <nl> - if ( servers [ i ] - > serverMetrics . present ( ) ) { <nl> - auto & replyValue = servers [ i ] - > serverMetrics . get ( ) ; <nl> + for ( const auto & server : servers ) { <nl> + if ( server - > serverMetrics . present ( ) ) { <nl> + auto & replyValue = server - > serverMetrics . get ( ) ; <nl> <nl> ASSERT ( replyValue . available . bytes > = 0 ) ; <nl> ASSERT ( replyValue . capacity . bytes > = 0 ) ; <nl> <nl> int64_t bytesAvailable = replyValue . available . bytes ; <nl> if ( includeInFlight ) { <nl> - bytesAvailable = std : : max ( ( int64_t ) 0 , bytesAvailable - servers [ i ] - > dataInFlightToServer ) ; <nl> + bytesAvailable = std : : max ( ( int64_t ) 0 , bytesAvailable - server - > dataInFlightToServer ) ; <nl> } <nl> <nl> if ( replyValue . capacity . bytes = = 0 ) <nl> class TCTeamInfo : public ReferenceCounted < TCTeamInfo > , public IDataDistribution <nl> return minRatio ; <nl> } <nl> <nl> - virtual bool hasHealthyAvailableSpace ( double minRatio ) { <nl> + bool hasHealthyAvailableSpace ( double minRatio ) const override { <nl> return getMinAvailableSpaceRatio ( ) > = minRatio & & getMinAvailableSpace ( ) > SERVER_KNOBS - > MIN_AVAILABLE_SPACE ; <nl> } <nl> <nl> - virtual Future < Void > updateStorageMetrics ( ) { <nl> - return doUpdateStorageMetrics ( this ) ; <nl> - } <nl> + Future < Void > updateStorageMetrics ( ) override { return doUpdateStorageMetrics ( this ) ; } <nl> <nl> - virtual bool isOptimal ( ) { <nl> - for ( int i = 0 ; i < servers . size ( ) ; i + + ) { <nl> - if ( servers [ i ] - > lastKnownClass . machineClassFitness ( ProcessClass : : Storage ) > ProcessClass : : UnsetFit ) { <nl> + bool isOptimal ( ) const override { <nl> + for ( const auto & server : servers ) { <nl> + if ( server - > lastKnownClass . machineClassFitness ( ProcessClass : : Storage ) > ProcessClass : : UnsetFit ) { <nl> return false ; <nl> } <nl> } <nl> return true ; <nl> } <nl> <nl> - virtual bool isWrongConfiguration ( ) { return wrongConfiguration ; } <nl> - virtual void setWrongConfiguration ( bool wrongConfiguration ) { this - > wrongConfiguration = wrongConfiguration ; } <nl> - virtual bool isHealthy ( ) { return healthy ; } <nl> - virtual void setHealthy ( bool h ) { healthy = h ; } <nl> - virtual int getPriority ( ) { return priority ; } <nl> - virtual void setPriority ( int p ) { priority = p ; } <nl> + bool isWrongConfiguration ( ) const override { return wrongConfiguration ; } <nl> + void setWrongConfiguration ( bool wrongConfiguration ) override { this - > wrongConfiguration = wrongConfiguration ; } <nl> + bool isHealthy ( ) const override { return healthy ; } <nl> + void setHealthy ( bool h ) override { healthy = h ; } <nl> + int getPriority ( ) const override { return priority ; } <nl> + void setPriority ( int p ) override { priority = p ; } <nl> virtual void addref ( ) { ReferenceCounted < TCTeamInfo > : : addref ( ) ; } <nl> virtual void delref ( ) { ReferenceCounted < TCTeamInfo > : : delref ( ) ; } <nl> <nl> class TCTeamInfo : public ReferenceCounted < TCTeamInfo > , public IDataDistribution <nl> <nl> private : <nl> / / Calculate an " average " of the metrics replies that we received . Penalize teams from which we did not receive all replies . <nl> - int64_t getLoadAverage ( ) { <nl> + int64_t getLoadAverage ( ) const { <nl> int64_t bytesSum = 0 ; <nl> int added = 0 ; <nl> for ( int i = 0 ; i < servers . size ( ) ; i + + ) <nl> mmm a / fdbserver / DataDistribution . actor . h <nl> ppp b / fdbserver / DataDistribution . actor . h <nl> struct RelocateShard { <nl> } ; <nl> <nl> struct IDataDistributionTeam { <nl> - virtual vector < StorageServerInterface > getLastKnownServerInterfaces ( ) = 0 ; <nl> - virtual int size ( ) = 0 ; <nl> - virtual vector < UID > const & getServerIDs ( ) = 0 ; <nl> + virtual vector < StorageServerInterface > getLastKnownServerInterfaces ( ) const = 0 ; <nl> + virtual int size ( ) const = 0 ; <nl> + virtual vector < UID > const & getServerIDs ( ) const = 0 ; <nl> virtual void addDataInFlightToTeam ( int64_t delta ) = 0 ; <nl> - virtual int64_t getDataInFlightToTeam ( ) = 0 ; <nl> - virtual int64_t getLoadBytes ( bool includeInFlight = true , double inflightPenalty = 1 . 0 ) = 0 ; <nl> - virtual int64_t getMinAvailableSpace ( bool includeInFlight = true ) = 0 ; <nl> - virtual double getMinAvailableSpaceRatio ( bool includeInFlight = true ) = 0 ; <nl> - virtual bool hasHealthyAvailableSpace ( double minRatio ) = 0 ; <nl> + virtual int64_t getDataInFlightToTeam ( ) const = 0 ; <nl> + virtual int64_t getLoadBytes ( bool includeInFlight = true , double inflightPenalty = 1 . 0 ) const = 0 ; <nl> + virtual int64_t getMinAvailableSpace ( bool includeInFlight = true ) const = 0 ; <nl> + virtual double getMinAvailableSpaceRatio ( bool includeInFlight = true ) const = 0 ; <nl> + virtual bool hasHealthyAvailableSpace ( double minRatio ) const = 0 ; <nl> virtual Future < Void > updateStorageMetrics ( ) = 0 ; <nl> virtual void addref ( ) = 0 ; <nl> virtual void delref ( ) = 0 ; <nl> - virtual bool isHealthy ( ) = 0 ; <nl> + virtual bool isHealthy ( ) const = 0 ; <nl> virtual void setHealthy ( bool ) = 0 ; <nl> - virtual int getPriority ( ) = 0 ; <nl> + virtual int getPriority ( ) const = 0 ; <nl> virtual void setPriority ( int ) = 0 ; <nl> - virtual bool isOptimal ( ) = 0 ; <nl> - virtual bool isWrongConfiguration ( ) = 0 ; <nl> + virtual bool isOptimal ( ) const = 0 ; <nl> + virtual bool isWrongConfiguration ( ) const = 0 ; <nl> virtual void setWrongConfiguration ( bool ) = 0 ; <nl> virtual void addServers ( const vector < UID > & servers ) = 0 ; <nl> <nl> - std : : string getDesc ( ) { <nl> + std : : string getDesc ( ) const { <nl> const auto & servers = getLastKnownServerInterfaces ( ) ; <nl> std : : string s = format ( " Size % d ; " , servers . size ( ) ) ; <nl> for ( int i = 0 ; i < servers . size ( ) ; i + + ) { <nl> mmm a / fdbserver / DataDistributionQueue . actor . cpp <nl> ppp b / fdbserver / DataDistributionQueue . actor . cpp <nl> struct RelocateData { <nl> } ; <nl> <nl> class ParallelTCInfo : public ReferenceCounted < ParallelTCInfo > , public IDataDistributionTeam { <nl> - public : <nl> vector < Reference < IDataDistributionTeam > > teams ; <nl> - vector < UID > tempServerIDs ; <nl> - <nl> - ParallelTCInfo ( ) { } <nl> - <nl> - void addTeam ( Reference < IDataDistributionTeam > team ) { <nl> - teams . push_back ( team ) ; <nl> - } <nl> <nl> - void clear ( ) { <nl> - teams . clear ( ) ; <nl> - } <nl> - <nl> - int64_t sum ( std : : function < int64_t ( Reference < IDataDistributionTeam > ) > func ) { <nl> + int64_t sum ( std : : function < int64_t ( IDataDistributionTeam const & ) > func ) const { <nl> int64_t result = 0 ; <nl> - for ( auto it = teams . begin ( ) ; it ! = teams . end ( ) ; it + + ) { <nl> - result + = func ( * it ) ; <nl> + for ( const auto & team : teams ) { <nl> + result + = func ( * team ) ; <nl> } <nl> return result ; <nl> } <nl> <nl> - template < class T > <nl> - vector < T > collect ( std : : function < vector < T > ( Reference < IDataDistributionTeam > ) > func ) { <nl> - vector < T > result ; <nl> + template < class T > <nl> + vector < T > collect ( std : : function < vector < T > ( IDataDistributionTeam const & ) > func ) const { <nl> + vector < T > result ( teams . size ( ) ) ; <nl> <nl> - for ( auto it = teams . begin ( ) ; it ! = teams . end ( ) ; it + + ) { <nl> - vector < T > newItems = func ( * it ) ; <nl> + for ( const auto & team : teams ) { <nl> + vector < T > newItems = func ( * team ) ; <nl> result . insert ( result . end ( ) , newItems . begin ( ) , newItems . end ( ) ) ; <nl> } <nl> return result ; <nl> } <nl> <nl> - bool any ( std : : function < bool ( Reference < IDataDistributionTeam > ) > func ) { <nl> - for ( auto it = teams . begin ( ) ; it ! = teams . end ( ) ; it + + ) { <nl> - if ( func ( * it ) ) { <nl> + bool any ( std : : function < bool ( IDataDistributionTeam const & ) > func ) const { <nl> + for ( const auto & team : teams ) { <nl> + if ( func ( * team ) ) { <nl> return true ; <nl> } <nl> } <nl> return false ; <nl> } <nl> <nl> - bool all ( std : : function < bool ( Reference < IDataDistributionTeam > ) > func ) { <nl> - return ! any ( [ func ] ( Reference < IDataDistributionTeam > team ) { <nl> - return ! func ( team ) ; <nl> - } ) ; <nl> + public : <nl> + ParallelTCInfo ( ) = default ; <nl> + <nl> + void addTeam ( Reference < IDataDistributionTeam > team ) { teams . push_back ( team ) ; } <nl> + <nl> + void clear ( ) { teams . clear ( ) ; } <nl> + <nl> + bool all ( std : : function < bool ( IDataDistributionTeam const & ) > func ) const { <nl> + return ! any ( [ func ] ( IDataDistributionTeam const & team ) { return ! func ( team ) ; } ) ; <nl> } <nl> <nl> - virtual vector < StorageServerInterface > getLastKnownServerInterfaces ( ) { <nl> - return collect < StorageServerInterface > ( [ ] ( Reference < IDataDistributionTeam > team ) { <nl> - return team - > getLastKnownServerInterfaces ( ) ; <nl> - } ) ; <nl> + vector < StorageServerInterface > getLastKnownServerInterfaces ( ) const override { <nl> + return collect < StorageServerInterface > ( <nl> + [ ] ( IDataDistributionTeam const & team ) { return team . getLastKnownServerInterfaces ( ) ; } ) ; <nl> } <nl> <nl> - virtual int size ( ) { <nl> + int size ( ) const override { <nl> int totalSize = 0 ; <nl> for ( auto it = teams . begin ( ) ; it ! = teams . end ( ) ; it + + ) { <nl> totalSize + = ( * it ) - > size ( ) ; <nl> class ParallelTCInfo : public ReferenceCounted < ParallelTCInfo > , public IDataDist <nl> return totalSize ; <nl> } <nl> <nl> - virtual vector < UID > const & getServerIDs ( ) { <nl> + vector < UID > const & getServerIDs ( ) const override { <nl> + static vector < UID > tempServerIDs ; <nl> tempServerIDs . clear ( ) ; <nl> - for ( auto it = teams . begin ( ) ; it ! = teams . end ( ) ; it + + ) { <nl> - vector < UID > const & childIDs = ( * it ) - > getServerIDs ( ) ; <nl> + for ( const auto & team : teams ) { <nl> + vector < UID > const & childIDs = team - > getServerIDs ( ) ; <nl> tempServerIDs . insert ( tempServerIDs . end ( ) , childIDs . begin ( ) , childIDs . end ( ) ) ; <nl> } <nl> return tempServerIDs ; <nl> } <nl> <nl> - virtual void addDataInFlightToTeam ( int64_t delta ) { <nl> - for ( auto it = teams . begin ( ) ; it ! = teams . end ( ) ; it + + ) { <nl> - ( * it ) - > addDataInFlightToTeam ( delta ) ; <nl> + void addDataInFlightToTeam ( int64_t delta ) override { <nl> + for ( auto & team : teams ) { <nl> + team - > addDataInFlightToTeam ( delta ) ; <nl> } <nl> } <nl> <nl> - virtual int64_t getDataInFlightToTeam ( ) { <nl> - return sum ( [ ] ( Reference < IDataDistributionTeam > team ) { <nl> - return team - > getDataInFlightToTeam ( ) ; <nl> - } ) ; <nl> + int64_t getDataInFlightToTeam ( ) const override { <nl> + return sum ( [ ] ( IDataDistributionTeam const & team ) { return team . getDataInFlightToTeam ( ) ; } ) ; <nl> } <nl> <nl> - virtual int64_t getLoadBytes ( bool includeInFlight = true , double inflightPenalty = 1 . 0 ) { <nl> - return sum ( [ includeInFlight , inflightPenalty ] ( Reference < IDataDistributionTeam > team ) { <nl> - return team - > getLoadBytes ( includeInFlight , inflightPenalty ) ; <nl> + int64_t getLoadBytes ( bool includeInFlight = true , double inflightPenalty = 1 . 0 ) const override { <nl> + return sum ( [ includeInFlight , inflightPenalty ] ( IDataDistributionTeam const & team ) { <nl> + return team . getLoadBytes ( includeInFlight , inflightPenalty ) ; <nl> } ) ; <nl> } <nl> <nl> - virtual int64_t getMinAvailableSpace ( bool includeInFlight = true ) { <nl> + int64_t getMinAvailableSpace ( bool includeInFlight = true ) const override { <nl> int64_t result = std : : numeric_limits < int64_t > : : max ( ) ; <nl> - for ( auto it = teams . begin ( ) ; it ! = teams . end ( ) ; it + + ) { <nl> - result = std : : min ( result , ( * it ) - > getMinAvailableSpace ( includeInFlight ) ) ; <nl> + for ( const auto & team : teams ) { <nl> + result = std : : min ( result , team - > getMinAvailableSpace ( includeInFlight ) ) ; <nl> } <nl> return result ; <nl> } <nl> <nl> - virtual double getMinAvailableSpaceRatio ( bool includeInFlight = true ) { <nl> + double getMinAvailableSpaceRatio ( bool includeInFlight = true ) const override { <nl> double result = std : : numeric_limits < double > : : max ( ) ; <nl> - for ( auto it = teams . begin ( ) ; it ! = teams . end ( ) ; it + + ) { <nl> - result = std : : min ( result , ( * it ) - > getMinAvailableSpaceRatio ( includeInFlight ) ) ; <nl> + for ( const auto & team : teams ) { <nl> + result = std : : min ( result , team - > getMinAvailableSpaceRatio ( includeInFlight ) ) ; <nl> } <nl> return result ; <nl> } <nl> <nl> - virtual bool hasHealthyAvailableSpace ( double minRatio ) { <nl> - return all ( [ minRatio ] ( Reference < IDataDistributionTeam > team ) { <nl> - return team - > hasHealthyAvailableSpace ( minRatio ) ; <nl> - } ) ; <nl> + bool hasHealthyAvailableSpace ( double minRatio ) const { <nl> + return all ( [ minRatio ] ( IDataDistributionTeam const & team ) { return team . hasHealthyAvailableSpace ( minRatio ) ; } ) ; <nl> } <nl> <nl> virtual Future < Void > updateStorageMetrics ( ) { <nl> vector < Future < Void > > futures ; <nl> <nl> - for ( auto it = teams . begin ( ) ; it ! = teams . end ( ) ; it + + ) { <nl> - futures . push_back ( ( * it ) - > updateStorageMetrics ( ) ) ; <nl> + for ( auto & team : teams ) { <nl> + futures . push_back ( team - > updateStorageMetrics ( ) ) ; <nl> } <nl> return waitForAll ( futures ) ; <nl> } <nl> <nl> - virtual bool isOptimal ( ) { <nl> - return all ( [ ] ( Reference < IDataDistributionTeam > team ) { <nl> - return team - > isOptimal ( ) ; <nl> - } ) ; <nl> + bool isOptimal ( ) const override { <nl> + return all ( [ ] ( IDataDistributionTeam const & team ) { return team . isOptimal ( ) ; } ) ; <nl> } <nl> <nl> - virtual bool isWrongConfiguration ( ) { <nl> - return any ( [ ] ( Reference < IDataDistributionTeam > team ) { <nl> - return team - > isWrongConfiguration ( ) ; <nl> - } ) ; <nl> + bool isWrongConfiguration ( ) const override { <nl> + return any ( [ ] ( IDataDistributionTeam const & team ) { return team . isWrongConfiguration ( ) ; } ) ; <nl> } <nl> - virtual void setWrongConfiguration ( bool wrongConfiguration ) { <nl> + void setWrongConfiguration ( bool wrongConfiguration ) override { <nl> for ( auto it = teams . begin ( ) ; it ! = teams . end ( ) ; it + + ) { <nl> ( * it ) - > setWrongConfiguration ( wrongConfiguration ) ; <nl> } <nl> } <nl> <nl> - virtual bool isHealthy ( ) { <nl> - return all ( [ ] ( Reference < IDataDistributionTeam > team ) { <nl> - return team - > isHealthy ( ) ; <nl> - } ) ; <nl> + bool isHealthy ( ) const override { <nl> + return all ( [ ] ( IDataDistributionTeam const & team ) { return team . isHealthy ( ) ; } ) ; <nl> } <nl> <nl> - virtual void setHealthy ( bool h ) { <nl> + void setHealthy ( bool h ) override { <nl> for ( auto it = teams . begin ( ) ; it ! = teams . end ( ) ; it + + ) { <nl> ( * it ) - > setHealthy ( h ) ; <nl> } <nl> } <nl> <nl> - virtual int getPriority ( ) { <nl> + int getPriority ( ) const override { <nl> int priority = 0 ; <nl> for ( auto it = teams . begin ( ) ; it ! = teams . end ( ) ; it + + ) { <nl> priority = std : : max ( priority , ( * it ) - > getPriority ( ) ) ; <nl> class ParallelTCInfo : public ReferenceCounted < ParallelTCInfo > , public IDataDist <nl> return priority ; <nl> } <nl> <nl> - virtual void setPriority ( int p ) { <nl> + void setPriority ( int p ) override { <nl> for ( auto it = teams . begin ( ) ; it ! = teams . end ( ) ; it + + ) { <nl> ( * it ) - > setPriority ( p ) ; <nl> } <nl> class ParallelTCInfo : public ReferenceCounted < ParallelTCInfo > , public IDataDist <nl> virtual void addref ( ) { ReferenceCounted < ParallelTCInfo > : : addref ( ) ; } <nl> virtual void delref ( ) { ReferenceCounted < ParallelTCInfo > : : delref ( ) ; } <nl> <nl> - virtual void addServers ( const std : : vector < UID > & servers ) { <nl> + void addServers ( const std : : vector < UID > & servers ) override { <nl> ASSERT ( ! teams . empty ( ) ) ; <nl> teams [ 0 ] - > addServers ( servers ) ; <nl> } <nl> mmm a / fdbserver / DiskQueue . actor . cpp <nl> ppp b / fdbserver / DiskQueue . actor . cpp <nl> class RawDiskQueue_TwoFiles : public Tracked < RawDiskQueue_TwoFiles > { <nl> void dispose ( ) { shutdown ( this , true ) ; } <nl> void close ( ) { shutdown ( this , false ) ; } <nl> <nl> - StorageBytes getStorageBytes ( ) { <nl> + StorageBytes getStorageBytes ( ) const { <nl> int64_t free ; <nl> int64_t total ; <nl> <nl> class DiskQueue : public IDiskQueue , public Tracked < DiskQueue > { <nl> { <nl> } <nl> <nl> - virtual location push ( StringRef contents ) { <nl> + location push ( StringRef contents ) override { <nl> ASSERT ( recovered ) ; <nl> uint8_t const * begin = contents . begin ( ) ; <nl> uint8_t const * end = contents . end ( ) ; <nl> class DiskQueue : public IDiskQueue , public Tracked < DiskQueue > { <nl> return endLocation ( ) ; <nl> } <nl> <nl> - virtual void pop ( location upTo ) { <nl> + void pop ( location upTo ) override { <nl> ASSERT ( ! upTo . hi ) ; <nl> ASSERT ( ! recovered | | upTo . lo < = endLocation ( ) ) ; <nl> <nl> class DiskQueue : public IDiskQueue , public Tracked < DiskQueue > { <nl> } <nl> } <nl> <nl> - virtual Future < Standalone < StringRef > > read ( location from , location to , CheckHashes ch ) { return read ( this , from , to , ch ) ; } <nl> - <nl> - int getMaxPayload ( ) { <nl> - return Page : : maxPayload ; <nl> + Future < Standalone < StringRef > > read ( location from , location to , CheckHashes ch ) override { <nl> + return read ( this , from , to , ch ) ; <nl> } <nl> <nl> + int getMaxPayload ( ) const { return Page : : maxPayload ; } <nl> + <nl> / / Always commit an entire page . Commit overhead is the unused space in a to - be - committed page <nl> - virtual int getCommitOverhead ( ) { <nl> + int getCommitOverhead ( ) const override { <nl> if ( ! pushedPageCount ( ) ) { <nl> if ( ! anyPopped ) <nl> return 0 ; <nl> class DiskQueue : public IDiskQueue , public Tracked < DiskQueue > { <nl> return backPage ( ) . remainingCapacity ( ) ; <nl> } <nl> <nl> - virtual Future < Void > commit ( ) { <nl> + Future < Void > commit ( ) override { <nl> ASSERT ( recovered ) ; <nl> if ( ! pushedPageCount ( ) ) { <nl> if ( ! anyPopped ) return Void ( ) ; <nl> class DiskQueue : public IDiskQueue , public Tracked < DiskQueue > { <nl> rawQueue - > stall ( ) ; <nl> } <nl> <nl> - virtual Future < bool > initializeRecovery ( location recoverAt ) { return initializeRecovery ( this , recoverAt ) ; } <nl> - virtual Future < Standalone < StringRef > > readNext ( int bytes ) { return readNext ( this , bytes ) ; } <nl> + Future < bool > initializeRecovery ( location recoverAt ) override { return initializeRecovery ( this , recoverAt ) ; } <nl> + Future < Standalone < StringRef > > readNext ( int bytes ) override { return readNext ( this , bytes ) ; } <nl> <nl> / / FIXME : getNextReadLocation should ASSERT ( initialized ) , but the memory storage engine needs <nl> / / to be changed to understand the new intiailizeRecovery protocol . <nl> - virtual location getNextReadLocation ( ) { return nextReadLocation ; } <nl> - virtual location getNextCommitLocation ( ) { ASSERT ( initialized ) ; return lastCommittedSeq + sizeof ( Page ) ; } <nl> - virtual location getNextPushLocation ( ) { ASSERT ( initialized ) ; return endLocation ( ) ; } <nl> + location getNextReadLocation ( ) const override { return nextReadLocation ; } <nl> + location getNextCommitLocation ( ) const override { <nl> + ASSERT ( initialized ) ; <nl> + return lastCommittedSeq + sizeof ( Page ) ; <nl> + } <nl> + location getNextPushLocation ( ) const override { <nl> + ASSERT ( initialized ) ; <nl> + return endLocation ( ) ; <nl> + } <nl> <nl> - virtual Future < Void > getError ( ) { return rawQueue - > getError ( ) ; } <nl> - virtual Future < Void > onClosed ( ) { return rawQueue - > onClosed ( ) ; } <nl> + Future < Void > getError ( ) override { return rawQueue - > getError ( ) ; } <nl> + Future < Void > onClosed ( ) override { return rawQueue - > onClosed ( ) ; } <nl> <nl> - virtual void dispose ( ) { <nl> + void dispose ( ) override { <nl> TraceEvent ( " DQDestroy " , dbgid ) . detail ( " LastPoppedSeq " , lastPoppedSeq ) . detail ( " PoppedSeq " , poppedSeq ) . detail ( " NextPageSeq " , nextPageSeq ) . detail ( " File0Name " , rawQueue - > files [ 0 ] . dbgFilename ) ; <nl> dispose ( this ) ; <nl> } <nl> - ACTOR static void dispose ( DiskQueue * self ) { <nl> - wait ( self - > onSafeToDestruct ( ) ) ; <nl> - TraceEvent ( " DQDestroyDone " , self - > dbgid ) . detail ( " File0Name " , self - > rawQueue - > files [ 0 ] . dbgFilename ) ; <nl> - self - > rawQueue - > dispose ( ) ; <nl> - delete self ; <nl> - } <nl> <nl> - virtual void close ( ) { <nl> + void close ( ) override { <nl> TraceEvent ( " DQClose " , dbgid ) <nl> . detail ( " LastPoppedSeq " , lastPoppedSeq ) <nl> . detail ( " PoppedSeq " , poppedSeq ) <nl> class DiskQueue : public IDiskQueue , public Tracked < DiskQueue > { <nl> . detail ( " File0Name " , rawQueue - > files [ 0 ] . dbgFilename ) ; <nl> close ( this ) ; <nl> } <nl> + <nl> + StorageBytes getStorageBytes ( ) const override { return rawQueue - > getStorageBytes ( ) ; } <nl> + <nl> + private : <nl> + ACTOR static void dispose ( DiskQueue * self ) { <nl> + wait ( self - > onSafeToDestruct ( ) ) ; <nl> + TraceEvent ( " DQDestroyDone " , self - > dbgid ) . detail ( " File0Name " , self - > rawQueue - > files [ 0 ] . dbgFilename ) ; <nl> + self - > rawQueue - > dispose ( ) ; <nl> + delete self ; <nl> + } <nl> + <nl> ACTOR static void close ( DiskQueue * self ) { <nl> wait ( self - > onSafeToDestruct ( ) ) ; <nl> TraceEvent ( " DQCloseDone " , self - > dbgid ) . detail ( " File0Name " , self - > rawQueue - > files [ 0 ] . dbgFilename ) ; <nl> class DiskQueue : public IDiskQueue , public Tracked < DiskQueue > { <nl> delete self ; <nl> } <nl> <nl> - virtual StorageBytes getStorageBytes ( ) { <nl> - return rawQueue - > getStorageBytes ( ) ; <nl> - } <nl> - <nl> - private : <nl> # pragma pack ( push , 1 ) <nl> struct PageHeader { <nl> union { <nl> class DiskQueue_PopUncommitted : public IDiskQueue { <nl> Future < bool > initializeRecovery ( location recoverAt ) { return queue - > initializeRecovery ( recoverAt ) ; } <nl> Future < Standalone < StringRef > > readNext ( int bytes ) { return readNext ( this , bytes ) ; } <nl> <nl> - virtual location getNextReadLocation ( ) { return queue - > getNextReadLocation ( ) ; } <nl> - <nl> - virtual Future < Standalone < StringRef > > read ( location start , location end , CheckHashes ch ) { return queue - > read ( start , end , ch ) ; } <nl> - virtual location getNextCommitLocation ( ) { return queue - > getNextCommitLocation ( ) ; } <nl> - virtual location getNextPushLocation ( ) { return queue - > getNextPushLocation ( ) ; } <nl> + location getNextReadLocation ( ) const override { return queue - > getNextReadLocation ( ) ; } <nl> <nl> + Future < Standalone < StringRef > > read ( location start , location end , CheckHashes ch ) override { <nl> + return queue - > read ( start , end , ch ) ; <nl> + } <nl> + location getNextCommitLocation ( ) const override { return queue - > getNextCommitLocation ( ) ; } <nl> + location getNextPushLocation ( ) const override { return queue - > getNextPushLocation ( ) ; } <nl> <nl> - virtual location push ( StringRef contents ) { <nl> + location push ( StringRef contents ) override { <nl> pushed = queue - > push ( contents ) ; <nl> return pushed ; <nl> } <nl> <nl> - virtual void pop ( location upTo ) { <nl> + void pop ( location upTo ) override { <nl> popped = std : : max ( popped , upTo ) ; <nl> ASSERT_WE_THINK ( committed > = popped ) ; <nl> queue - > pop ( std : : min ( committed , popped ) ) ; <nl> } <nl> <nl> - virtual int getCommitOverhead ( ) { <nl> + int getCommitOverhead ( ) const override { <nl> return queue - > getCommitOverhead ( ) + ( popped > committed ? queue - > getMaxPayload ( ) : 0 ) ; <nl> } <nl> <nl> - Future < Void > commit ( ) { <nl> + Future < Void > commit ( ) override { <nl> location pushLocation = pushed ; <nl> location popLocation = popped ; <nl> <nl> class DiskQueue_PopUncommitted : public IDiskQueue { <nl> return commitFuture ; <nl> } <nl> <nl> - virtual StorageBytes getStorageBytes ( ) { return queue - > getStorageBytes ( ) ; } <nl> + StorageBytes getStorageBytes ( ) const override { return queue - > getStorageBytes ( ) ; } <nl> <nl> private : <nl> DiskQueue * queue ; <nl> mmm a / fdbserver / IDiskQueue . h <nl> ppp b / fdbserver / IDiskQueue . h <nl> class IDiskQueue : public IClosable { <nl> / / Before calling push or commit , the caller * must * perform recovery by calling readNext ( ) until it returns less than the requested number of bytes . <nl> / / Thereafter it may not be called again . <nl> virtual Future < Standalone < StringRef > > readNext ( int bytes ) = 0 ; / / Return the next bytes in the queue ( beginning , the first time called , with the first unpopped byte ) <nl> - virtual location getNextReadLocation ( ) = 0 ; / / Returns a location > = the location of all bytes previously returned by readNext ( ) , and < = the location of all bytes subsequently returned <nl> - virtual location getNextCommitLocation ( ) = 0 ; / / If commit ( ) were to be called , all buffered writes would be written starting at ` location ` . <nl> - virtual location getNextPushLocation ( ) = 0 ; / / If push ( ) were to be called , the pushed data would be written starting at ` location ` . <nl> + virtual location getNextReadLocation ( ) <nl> + const = 0 ; / / Returns a location > = the location of all bytes previously returned by readNext ( ) , and < = the <nl> + / / location of all bytes subsequently returned <nl> + virtual location getNextCommitLocation ( ) <nl> + const = 0 ; / / If commit ( ) were to be called , all buffered writes would be written starting at ` location ` . <nl> + virtual location getNextPushLocation ( ) <nl> + const = 0 ; / / If push ( ) were to be called , the pushed data would be written starting at ` location ` . <nl> <nl> virtual Future < Standalone < StringRef > > read ( location start , location end , CheckHashes vc ) = 0 ; <nl> virtual location push ( StringRef contents ) = 0 ; / / Appends the given bytes to the byte stream . Returns a location token representing the * end * of the contents . <nl> virtual void pop ( location upTo ) = 0 ; / / Removes all bytes before the given location token from the byte stream . <nl> virtual Future < Void > commit ( ) = 0 ; / / returns when all prior pushes and pops are durable . If commit does not return ( due to close or a crash ) , any prefix of the pushed bytes and any prefix of the popped bytes may be durable . <nl> <nl> - virtual int getCommitOverhead ( ) = 0 ; / / returns the amount of unused space that would be written by a commit that immediately followed this call <nl> + virtual int getCommitOverhead ( ) const = 0 ; / / returns the amount of unused space that would be written by a commit <nl> + / / that immediately followed this call <nl> <nl> - virtual StorageBytes getStorageBytes ( ) = 0 ; <nl> + virtual StorageBytes getStorageBytes ( ) const = 0 ; <nl> } ; <nl> <nl> template < > <nl> mmm a / fdbserver / KeyValueStoreRocksDB . actor . cpp <nl> ppp b / fdbserver / KeyValueStoreRocksDB . actor . cpp <nl> StringRef toStringRef ( rocksdb : : Slice s ) { <nl> return StringRef ( reinterpret_cast < const uint8_t * > ( s . data ( ) ) , s . size ( ) ) ; <nl> } <nl> <nl> - rocksdb : : Options getOptions ( const std : : string & path ) { <nl> + rocksdb : : Options getOptions ( ) { <nl> rocksdb : : Options options ; <nl> - bool exists = directoryExists ( path ) ; <nl> - options . create_if_missing = ! exists ; <nl> + options . create_if_missing = true ; <nl> return options ; <nl> } <nl> <nl> struct RocksDBKeyValueStore : IKeyValueStore { <nl> std : : vector < rocksdb : : ColumnFamilyDescriptor > defaultCF = { rocksdb : : ColumnFamilyDescriptor { <nl> " default " , getCFOptions ( ) } } ; <nl> std : : vector < rocksdb : : ColumnFamilyHandle * > handle ; <nl> - auto status = rocksdb : : DB : : Open ( getOptions ( a . path ) , a . path , defaultCF , & handle , & db ) ; <nl> + auto status = rocksdb : : DB : : Open ( getOptions ( ) , a . path , defaultCF , & handle , & db ) ; <nl> if ( ! status . ok ( ) ) { <nl> TraceEvent ( SevError , " RocksDBError " ) . detail ( " Error " , status . ToString ( ) ) . detail ( " Method " , " Open " ) ; <nl> a . done . sendError ( statusToError ( status ) ) ; <nl> struct RocksDBKeyValueStore : IKeyValueStore { <nl> if ( a . deleteOnClose ) { <nl> std : : vector < rocksdb : : ColumnFamilyDescriptor > defaultCF = { rocksdb : : ColumnFamilyDescriptor { <nl> " default " , getCFOptions ( ) } } ; <nl> - rocksdb : : DestroyDB ( a . path , getOptions ( a . path ) , defaultCF ) ; <nl> + rocksdb : : DestroyDB ( a . path , getOptions ( ) , defaultCF ) ; <nl> } <nl> a . done . send ( Void ( ) ) ; <nl> } <nl> struct RocksDBKeyValueStore : IKeyValueStore { <nl> struct Reader : IThreadPoolReceiver { <nl> DB & db ; <nl> rocksdb : : ReadOptions readOptions ; <nl> - std : : unique_ptr < rocksdb : : Iterator > cursor = nullptr ; <nl> <nl> explicit Reader ( DB & db ) : db ( db ) { } <nl> <nl> struct RocksDBKeyValueStore : IKeyValueStore { <nl> virtual double getTimeEstimate ( ) { return SERVER_KNOBS - > READ_RANGE_TIME_ESTIMATE ; } <nl> } ; <nl> void action ( ReadRangeAction & a ) { <nl> - if ( cursor = = nullptr ) { <nl> - cursor = std : : unique_ptr < rocksdb : : Iterator > ( db - > NewIterator ( readOptions ) ) ; <nl> - } else { <nl> - cursor - > Refresh ( ) ; <nl> - } <nl> + auto cursor = std : : unique_ptr < rocksdb : : Iterator > ( db - > NewIterator ( readOptions ) ) ; <nl> Standalone < RangeResultRef > result ; <nl> int accumulatedBytes = 0 ; <nl> if ( a . rowLimit > = 0 ) { <nl> struct RocksDBKeyValueStore : IKeyValueStore { <nl> if ( ! s . ok ( ) ) { <nl> TraceEvent ( SevError , " RocksDBError " ) . detail ( " Error " , s . ToString ( ) ) . detail ( " Method " , " ReadRange " ) ; <nl> } <nl> - result . more = ( result . size ( ) = = a . rowLimit ) ; <nl> + result . more = <nl> + ( result . size ( ) = = a . rowLimit ) | | ( result . size ( ) = = - a . rowLimit ) | | ( accumulatedBytes > = a . byteLimit ) ; <nl> if ( result . more ) { <nl> result . readThrough = result [ result . size ( ) - 1 ] . key ; <nl> } <nl> struct RocksDBKeyValueStore : IKeyValueStore { <nl> DB db = nullptr ; <nl> std : : string path ; <nl> UID id ; <nl> - size_t diskBytesUsed = 0 ; <nl> Reference < IThreadPool > writeThread ; <nl> Reference < IThreadPool > readThreads ; <nl> unsigned nReaders = 16 ; <nl> struct RocksDBKeyValueStore : IKeyValueStore { <nl> int64_t free ; <nl> int64_t total ; <nl> <nl> + uint64_t sstBytes = 0 ; <nl> + ASSERT ( db - > GetIntProperty ( rocksdb : : DB : : Properties : : kTotalSstFilesSize , & sstBytes ) ) ; <nl> + uint64_t memtableBytes = 0 ; <nl> + ASSERT ( db - > GetIntProperty ( rocksdb : : DB : : Properties : : kSizeAllMemTables , & memtableBytes ) ) ; <nl> g_network - > getDiskBytes ( path , free , total ) ; <nl> <nl> - return StorageBytes ( free , total , diskBytesUsed , free ) ; <nl> + return StorageBytes ( free , total , sstBytes + memtableBytes , free ) ; <nl> } <nl> } ; <nl> <nl> mmm a / fdbserver / Knobs . cpp <nl> ppp b / fdbserver / Knobs . cpp <nl> void ServerKnobs : : initialize ( bool randomize , ClientKnobs * clientKnobs , bool isSi <nl> init ( MAX_PROXY_COMPUTE , 2 . 0 ) ; <nl> init ( PROXY_COMPUTE_BUCKETS , 20000 ) ; <nl> init ( PROXY_COMPUTE_GROWTH_RATE , 0 . 01 ) ; <nl> - init ( TXN_STATE_SEND_AMOUNT , 2 ) ; <nl> - init ( ASK_READ_VERSION_FROM_MASTER , true ) ; <nl> + init ( TXN_STATE_SEND_AMOUNT , 4 ) ; <nl> <nl> / / Master Server <nl> / / masterCommitter ( ) in the master server will allow lower priority tasks ( e . g . DataDistibution ) <nl> void ServerKnobs : : initialize ( bool randomize , ClientKnobs * clientKnobs , bool isSi <nl> init ( PROVISIONAL_START_DELAY , 1 . 0 ) ; <nl> init ( PROVISIONAL_MAX_DELAY , 60 . 0 ) ; <nl> init ( PROVISIONAL_DELAY_GROWTH , 1 . 5 ) ; <nl> - init ( SECONDS_BEFORE_RECRUIT_BACKUP_WORKER , 4 . 0 ) ; if ( randomize & & BUGGIFY ) SECONDS_BEFORE_RECRUIT_BACKUP_WORKER = deterministicRandom ( ) - > random01 ( ) * 8 ; <nl> + init ( SECONDS_BEFORE_RECRUIT_BACKUP_WORKER , 4 . 0 ) ; if ( randomize & & BUGGIFY ) SECONDS_BEFORE_RECRUIT_BACKUP_WORKER = deterministicRandom ( ) - > random01 ( ) * 8 ; <nl> + init ( CC_INTERFACE_TIMEOUT , 10 . 0 ) ; if ( randomize & & BUGGIFY ) CC_INTERFACE_TIMEOUT = 0 . 0 ; <nl> <nl> / / Resolver <nl> init ( SAMPLE_OFFSET_PER_KEY , 100 ) ; <nl> void ServerKnobs : : initialize ( bool randomize , ClientKnobs * clientKnobs , bool isSi <nl> init ( MIN_TAG_PAGES_READ_RATE , 1 . 0e4 ) ; if ( randomize & & BUGGIFY ) MIN_TAG_PAGES_READ_RATE = 0 ; <nl> init ( READ_TAG_MEASUREMENT_INTERVAL , 30 . 0 ) ; if ( randomize & & BUGGIFY ) READ_TAG_MEASUREMENT_INTERVAL = 1 . 0 ; <nl> init ( OPERATION_COST_BYTE_FACTOR , 16384 ) ; if ( randomize & & BUGGIFY ) OPERATION_COST_BYTE_FACTOR = 4096 ; <nl> - init ( PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS , false ) ; if ( randomize & & BUGGIFY ) PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS = true ; <nl> + init ( PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS , true ) ; if ( randomize & & BUGGIFY ) PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS = false ; <nl> <nl> / / Wait Failure <nl> init ( MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS , 250 ) ; if ( randomize & & BUGGIFY ) MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS = 2 ; <nl> void ServerKnobs : : initialize ( bool randomize , ClientKnobs * clientKnobs , bool isSi <nl> init ( FASTRESTORE_SAMPLING_PERCENT , 100 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_SAMPLING_PERCENT = deterministicRandom ( ) - > random01 ( ) * 100 ; } <nl> init ( FASTRESTORE_NUM_LOADERS , 2 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_NUM_LOADERS = deterministicRandom ( ) - > random01 ( ) * 10 + 1 ; } <nl> init ( FASTRESTORE_NUM_APPLIERS , 3 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_NUM_APPLIERS = deterministicRandom ( ) - > random01 ( ) * 10 + 1 ; } <nl> - init ( FASTRESTORE_TXN_BATCH_MAX_BYTES , 1048576 . 0 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_TXN_BATCH_MAX_BYTES = deterministicRandom ( ) - > random01 ( ) * 1024 . 0 * 1024 . 0 + 1 . 0 ; } <nl> - init ( FASTRESTORE_VERSIONBATCH_MAX_BYTES , 10 . 0 * 1024 . 0 * 1024 . 0 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_VERSIONBATCH_MAX_BYTES = deterministicRandom ( ) - > random01 ( ) * 10 . 0 * 1024 . 0 * 1024 . 0 * 1024 . 0 ; } <nl> + init ( FASTRESTORE_TXN_BATCH_MAX_BYTES , 1024 . 0 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_TXN_BATCH_MAX_BYTES = deterministicRandom ( ) - > random01 ( ) * 1024 . 0 * 1024 . 0 + 1 . 0 ; } <nl> + init ( FASTRESTORE_VERSIONBATCH_MAX_BYTES , 2 . 0 * 1024 . 0 * 1024 . 0 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_VERSIONBATCH_MAX_BYTES = deterministicRandom ( ) - > random01 ( ) * 10 . 0 * 1024 . 0 * 1024 . 0 * 1024 . 0 ; } <nl> init ( FASTRESTORE_VB_PARALLELISM , 5 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_VB_PARALLELISM = deterministicRandom ( ) - > random01 ( ) * 20 + 1 ; } <nl> init ( FASTRESTORE_VB_MONITOR_DELAY , 30 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_VB_MONITOR_DELAY = deterministicRandom ( ) - > random01 ( ) * 20 + 1 ; } <nl> init ( FASTRESTORE_VB_LAUNCH_DELAY , 5 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_VB_LAUNCH_DELAY = deterministicRandom ( ) - > random01 ( ) * 60 + 1 ; } <nl> void ServerKnobs : : initialize ( bool randomize , ClientKnobs * clientKnobs , bool isSi <nl> init ( FASTRESTORE_TXN_CLEAR_MAX , 100 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_TXN_CLEAR_MAX = deterministicRandom ( ) - > random01 ( ) * 100 + 1 ; } <nl> init ( FASTRESTORE_TXN_RETRY_MAX , 10 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_TXN_RETRY_MAX = deterministicRandom ( ) - > random01 ( ) * 100 + 1 ; } <nl> init ( FASTRESTORE_TXN_EXTRA_DELAY , 0 . 1 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_TXN_EXTRA_DELAY = deterministicRandom ( ) - > random01 ( ) * 1 + 0 . 001 ; } <nl> + init ( FASTRESTORE_NOT_WRITE_DB , false ) ; / / Perf test only : set it to true will cause simulation failure <nl> + init ( FASTRESTORE_USE_RANGE_FILE , true ) ; / / Perf test only : set it to false will cause simulation failure <nl> + init ( FASTRESTORE_USE_LOG_FILE , true ) ; / / Perf test only : set it to false will cause simulation failure <nl> <nl> init ( REDWOOD_DEFAULT_PAGE_SIZE , 4096 ) ; <nl> init ( REDWOOD_KVSTORE_CONCURRENT_READS , 64 ) ; <nl> void ServerKnobs : : initialize ( bool randomize , ClientKnobs * clientKnobs , bool isSi <nl> init ( REDWOOD_LAZY_CLEAR_BATCH_SIZE_PAGES , 10 ) ; <nl> init ( REDWOOD_LAZY_CLEAR_MIN_PAGES , 0 ) ; <nl> init ( REDWOOD_LAZY_CLEAR_MAX_PAGES , 1e6 ) ; <nl> - init ( REDWOOD_REMAP_CLEANUP_BATCH_SIZE , 5000 ) ; <nl> - init ( REDWOOD_REMAP_CLEANUP_VERSION_LAG_MIN , 4 ) ; <nl> - init ( REDWOOD_REMAP_CLEANUP_VERSION_LAG_MAX , 15 ) ; <nl> + init ( REDWOOD_REMAP_CLEANUP_WINDOW , 50 ) ; <nl> + init ( REDWOOD_REMAP_CLEANUP_LAG , 0 . 1 ) ; <nl> init ( REDWOOD_LOGGING_INTERVAL , 5 . 0 ) ; <nl> <nl> / / Server request latency measurement <nl> mmm a / fdbserver / Knobs . h <nl> ppp b / fdbserver / Knobs . h <nl> class ServerKnobs : public Knobs { <nl> int PROXY_COMPUTE_BUCKETS ; <nl> double PROXY_COMPUTE_GROWTH_RATE ; <nl> int TXN_STATE_SEND_AMOUNT ; <nl> - bool ASK_READ_VERSION_FROM_MASTER ; <nl> <nl> / / Master Server <nl> double COMMIT_SLEEP_TIME ; <nl> class ServerKnobs : public Knobs { <nl> double PROVISIONAL_DELAY_GROWTH ; <nl> double PROVISIONAL_MAX_DELAY ; <nl> double SECONDS_BEFORE_RECRUIT_BACKUP_WORKER ; <nl> + double CC_INTERFACE_TIMEOUT ; <nl> <nl> / / Resolver <nl> int64_t KEY_BYTES_PER_SAMPLE ; <nl> class ServerKnobs : public Knobs { <nl> int FASTRESTORE_TXN_CLEAR_MAX ; / / threshold to start tracking each clear op in a txn <nl> int FASTRESTORE_TXN_RETRY_MAX ; / / threshold to start output error on too many retries <nl> double FASTRESTORE_TXN_EXTRA_DELAY ; / / extra delay to avoid overwhelming fdb <nl> + bool FASTRESTORE_NOT_WRITE_DB ; / / do not write result to DB . Only for dev testing <nl> + bool FASTRESTORE_USE_RANGE_FILE ; / / use range file in backup <nl> + bool FASTRESTORE_USE_LOG_FILE ; / / use log file in backup <nl> <nl> int REDWOOD_DEFAULT_PAGE_SIZE ; / / Page size for new Redwood files <nl> int REDWOOD_KVSTORE_CONCURRENT_READS ; / / Max number of simultaneous point or range reads in progress . <nl> class ServerKnobs : public Knobs { <nl> int REDWOOD_LAZY_CLEAR_BATCH_SIZE_PAGES ; / / Number of pages to try to pop from the lazy delete queue and process at once <nl> int REDWOOD_LAZY_CLEAR_MIN_PAGES ; / / Minimum number of pages to free before ending a lazy clear cycle , unless the queue is empty <nl> int REDWOOD_LAZY_CLEAR_MAX_PAGES ; / / Maximum number of pages to free before ending a lazy clear cycle , unless the queue is empty <nl> - int REDWOOD_REMAP_CLEANUP_BATCH_SIZE ; / / Number of queue entries for remap cleanup to process and potentially coalesce at once . <nl> - int REDWOOD_REMAP_CLEANUP_VERSION_LAG_MIN ; / / Number of versions between head of remap queue and oldest retained version before remap cleanup starts <nl> - int REDWOOD_REMAP_CLEANUP_VERSION_LAG_MAX ; / / Number of versions between head of remap queue and oldest retained version before remap cleanup may stop <nl> + int64_t REDWOOD_REMAP_CLEANUP_WINDOW ; / / Remap remover lag interval in which to coalesce page writes <nl> + double REDWOOD_REMAP_CLEANUP_LAG ; / / Maximum allowed remap remover lag behind the cleanup window as a multiple of the window size <nl> double REDWOOD_LOGGING_INTERVAL ; <nl> - <nl> + <nl> / / Server request latency measurement <nl> int LATENCY_SAMPLE_SIZE ; <nl> double LATENCY_METRICS_LOGGING_INTERVAL ; <nl> mmm a / fdbserver / LogSystemDiskQueueAdapter . actor . cpp <nl> ppp b / fdbserver / LogSystemDiskQueueAdapter . actor . cpp <nl> Future < Standalone < StringRef > > LogSystemDiskQueueAdapter : : readNext ( int bytes ) { <nl> return LogSystemDiskQueueAdapterImpl : : readNext ( this , bytes ) ; <nl> } <nl> <nl> - IDiskQueue : : location LogSystemDiskQueueAdapter : : getNextReadLocation ( ) { <nl> + IDiskQueue : : location LogSystemDiskQueueAdapter : : getNextReadLocation ( ) const { <nl> return IDiskQueue : : location ( 0 , recoveryQueueLoc ) ; <nl> } <nl> <nl> mmm a / fdbserver / LogSystemDiskQueueAdapter . h <nl> ppp b / fdbserver / LogSystemDiskQueueAdapter . h <nl> class LogSystemDiskQueueAdapter : public IDiskQueue { <nl> Future < CommitMessage > getCommitMessage ( ) ; <nl> <nl> / / IClosable interface <nl> - virtual Future < Void > getError ( ) ; <nl> - virtual Future < Void > onClosed ( ) ; <nl> - virtual void dispose ( ) ; <nl> - virtual void close ( ) ; <nl> + Future < Void > getError ( ) override ; <nl> + Future < Void > onClosed ( ) override ; <nl> + void dispose ( ) override ; <nl> + void close ( ) override ; <nl> <nl> / / IDiskQueue interface <nl> - virtual Future < bool > initializeRecovery ( location recoverAt ) { return false ; } <nl> - virtual Future < Standalone < StringRef > > readNext ( int bytes ) ; <nl> - virtual IDiskQueue : : location getNextReadLocation ( ) ; <nl> - virtual IDiskQueue : : location getNextCommitLocation ( ) { ASSERT ( false ) ; throw internal_error ( ) ; } <nl> - virtual IDiskQueue : : location getNextPushLocation ( ) { ASSERT ( false ) ; throw internal_error ( ) ; } <nl> - virtual Future < Standalone < StringRef > > read ( location start , location end , CheckHashes ch ) { ASSERT ( false ) ; throw internal_error ( ) ; } <nl> - virtual IDiskQueue : : location push ( StringRef contents ) ; <nl> - virtual void pop ( IDiskQueue : : location upTo ) ; <nl> - virtual Future < Void > commit ( ) ; <nl> - virtual StorageBytes getStorageBytes ( ) { ASSERT ( false ) ; throw internal_error ( ) ; } <nl> - virtual int getCommitOverhead ( ) { return 0 ; } / / SOMEDAY : could this be more accurate ? <nl> + Future < bool > initializeRecovery ( location recoverAt ) override { return false ; } <nl> + Future < Standalone < StringRef > > readNext ( int bytes ) override ; <nl> + IDiskQueue : : location getNextReadLocation ( ) const override ; <nl> + IDiskQueue : : location getNextCommitLocation ( ) const override { <nl> + ASSERT ( false ) ; <nl> + throw internal_error ( ) ; <nl> + } <nl> + IDiskQueue : : location getNextPushLocation ( ) const override { <nl> + ASSERT ( false ) ; <nl> + throw internal_error ( ) ; <nl> + } <nl> + Future < Standalone < StringRef > > read ( location start , location end , CheckHashes ch ) override { <nl> + ASSERT ( false ) ; <nl> + throw internal_error ( ) ; <nl> + } <nl> + IDiskQueue : : location push ( StringRef contents ) override ; <nl> + void pop ( IDiskQueue : : location upTo ) override ; <nl> + Future < Void > commit ( ) override ; <nl> + StorageBytes getStorageBytes ( ) const override { <nl> + ASSERT ( false ) ; <nl> + throw internal_error ( ) ; <nl> + } <nl> + int getCommitOverhead ( ) const override { return 0 ; } / / SOMEDAY : could this be more accurate ? <nl> <nl> private : <nl> Reference < AsyncVar < PeekTxsInfo > > peekLocality ; <nl> mmm a / fdbserver / MasterProxyServer . actor . cpp <nl> ppp b / fdbserver / MasterProxyServer . actor . cpp <nl> <nl> * limitations under the License . <nl> * / <nl> <nl> + # include < algorithm > <nl> # include < tuple > <nl> + <nl> # include < fdbclient / DatabaseContext . h > <nl> # include " fdbclient / Atomic . h " <nl> # include " fdbclient / DatabaseConfiguration . h " <nl> <nl> # include " fdbserver / LogSystemDiskQueueAdapter . h " <nl> # include " fdbserver / MasterInterface . h " <nl> # include " fdbserver / MutationTracking . h " <nl> + # include " fdbserver / ProxyCommitData . actor . h " <nl> # include " fdbserver / RecoveryState . h " <nl> # include " fdbserver / ServerDBInfo . h " <nl> # include " fdbserver / WaitFailure . h " <nl> <nl> # include " flow / Knobs . h " <nl> # include " flow / TDMetric . actor . h " <nl> # include " flow / Tracing . h " <nl> + <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> ACTOR Future < Void > broadcastTxnRequest ( TxnStateRequest req , int sendAmount , bool sendReply ) { <nl> ACTOR Future < Void > broadcastTxnRequest ( TxnStateRequest req , int sendAmount , bool <nl> return Void ( ) ; <nl> } <nl> <nl> - struct ProxyStats { <nl> - CounterCollection cc ; <nl> - <nl> - Counter txnCommitIn , txnCommitVersionAssigned , txnCommitResolving , txnCommitResolved , txnCommitOut , txnCommitOutSuccess , txnCommitErrors ; <nl> - Counter txnConflicts ; <nl> - Counter commitBatchIn , commitBatchOut ; <nl> - Counter mutationBytes ; <nl> - Counter mutations ; <nl> - Counter conflictRanges ; <nl> - Counter keyServerLocationIn , keyServerLocationOut , keyServerLocationErrors ; <nl> - Version lastCommitVersionAssigned ; <nl> - <nl> - LatencySample commitLatencySample ; <nl> - LatencyBands commitLatencyBands ; <nl> - <nl> - Future < Void > logger ; <nl> - <nl> - int recentRequests ; <nl> - Deque < int > requestBuckets ; <nl> - double lastBucketBegin ; <nl> - double bucketInterval ; <nl> - <nl> - void updateRequestBuckets ( ) { <nl> - while ( now ( ) - lastBucketBegin > bucketInterval ) { <nl> - lastBucketBegin + = bucketInterval ; <nl> - recentRequests - = requestBuckets . front ( ) ; <nl> - requestBuckets . pop_front ( ) ; <nl> - requestBuckets . push_back ( 0 ) ; <nl> - } <nl> - } <nl> - <nl> - void addRequest ( ) { <nl> - updateRequestBuckets ( ) ; <nl> - + + recentRequests ; <nl> - + + requestBuckets . back ( ) ; <nl> - } <nl> - <nl> - int getRecentRequests ( ) { <nl> - updateRequestBuckets ( ) ; <nl> - return recentRequests * FLOW_KNOBS - > BASIC_LOAD_BALANCE_UPDATE_RATE / ( FLOW_KNOBS - > BASIC_LOAD_BALANCE_UPDATE_RATE - ( lastBucketBegin + bucketInterval - now ( ) ) ) ; <nl> - } <nl> - <nl> - explicit ProxyStats ( UID id , Version * pVersion , NotifiedVersion * pCommittedVersion , <nl> - int64_t * commitBatchesMemBytesCountPtr ) <nl> - : cc ( " ProxyStats " , id . toString ( ) ) , recentRequests ( 0 ) , lastBucketBegin ( now ( ) ) , <nl> - bucketInterval ( FLOW_KNOBS - > BASIC_LOAD_BALANCE_UPDATE_RATE / FLOW_KNOBS - > BASIC_LOAD_BALANCE_BUCKETS ) , <nl> - txnCommitIn ( " TxnCommitIn " , cc ) , txnCommitVersionAssigned ( " TxnCommitVersionAssigned " , cc ) , <nl> - txnCommitResolving ( " TxnCommitResolving " , cc ) , txnCommitResolved ( " TxnCommitResolved " , cc ) , <nl> - txnCommitOut ( " TxnCommitOut " , cc ) , txnCommitOutSuccess ( " TxnCommitOutSuccess " , cc ) , <nl> - txnCommitErrors ( " TxnCommitErrors " , cc ) , txnConflicts ( " TxnConflicts " , cc ) , commitBatchIn ( " CommitBatchIn " , cc ) , <nl> - commitBatchOut ( " CommitBatchOut " , cc ) , mutationBytes ( " MutationBytes " , cc ) , mutations ( " Mutations " , cc ) , <nl> - conflictRanges ( " ConflictRanges " , cc ) , keyServerLocationIn ( " KeyServerLocationIn " , cc ) , <nl> - keyServerLocationOut ( " KeyServerLocationOut " , cc ) , keyServerLocationErrors ( " KeyServerLocationErrors " , cc ) , <nl> - lastCommitVersionAssigned ( 0 ) , <nl> - commitLatencySample ( " CommitLatencyMetrics " , id , SERVER_KNOBS - > LATENCY_METRICS_LOGGING_INTERVAL , <nl> - SERVER_KNOBS - > LATENCY_SAMPLE_SIZE ) , <nl> - commitLatencyBands ( " CommitLatencyBands " , id , SERVER_KNOBS - > STORAGE_LOGGING_DELAY ) { <nl> - specialCounter ( cc , " LastAssignedCommitVersion " , [ this ] ( ) { return this - > lastCommitVersionAssigned ; } ) ; <nl> - specialCounter ( cc , " Version " , [ pVersion ] ( ) { return * pVersion ; } ) ; <nl> - specialCounter ( cc , " CommittedVersion " , [ pCommittedVersion ] ( ) { return pCommittedVersion - > get ( ) ; } ) ; <nl> - specialCounter ( cc , " CommitBatchesMemBytesCount " , [ commitBatchesMemBytesCountPtr ] ( ) { return * commitBatchesMemBytesCountPtr ; } ) ; <nl> - logger = traceCounters ( " ProxyMetrics " , id , SERVER_KNOBS - > WORKER_LOGGING_INTERVAL , & cc , " ProxyMetrics " ) ; <nl> - for ( int i = 0 ; i < FLOW_KNOBS - > BASIC_LOAD_BALANCE_BUCKETS ; i + + ) { <nl> - requestBuckets . push_back ( 0 ) ; <nl> - } <nl> - } <nl> - } ; <nl> - <nl> struct TransactionRateInfo { <nl> double rate ; <nl> double limit ; <nl> struct TransactionRateInfo { <nl> Smoother smoothRate ; <nl> Smoother smoothReleased ; <nl> <nl> - TransactionRateInfo ( double rate ) : rate ( rate ) , limit ( 0 ) , budget ( 0 ) , disabled ( true ) , smoothRate ( SERVER_KNOBS - > START_TRANSACTION_RATE_WINDOW ) , <nl> + TransactionRateInfo ( double rate ) : rate ( rate ) , limit ( 0 ) , budget ( 0 ) , disabled ( true ) , smoothRate ( SERVER_KNOBS - > START_TRANSACTION_RATE_WINDOW ) , <nl> smoothReleased ( SERVER_KNOBS - > START_TRANSACTION_RATE_WINDOW ) { } <nl> <nl> void reset ( ) { <nl> struct TransactionRateInfo { <nl> / / have started but didn ' t , and making that our limit . More precisely , we track a smoothed rate limit and release rate , <nl> / / the difference of which is the rate of additional transactions that we could have released based on that window . <nl> / / Then we multiply by the window size to get a number of transactions . <nl> - / / <nl> + / / <nl> / / Limit can be negative in the event that we are releasing more transactions than we are allowed ( due to the use of <nl> / / our budget or because of higher priority transactions ) . <nl> double releaseRate = smoothRate . smoothTotal ( ) - smoothReleased . smoothRate ( ) ; <nl> struct TransactionRateInfo { <nl> void updateBudget ( int64_t numStartedAtPriority , bool queueEmptyAtPriority , double elapsed ) { <nl> / / Update the budget to accumulate any extra capacity available or remove any excess that was used . <nl> / / The actual delta is the portion of the limit we didn ' t use multiplied by the fraction of the window that elapsed . <nl> - / / <nl> - / / We may have exceeded our limit due to the budget or because of higher priority transactions , in which case this <nl> - / / delta will be negative . The delta can also be negative in the event that our limit was negative , which can happen <nl> + / / <nl> + / / We may have exceeded our limit due to the budget or because of higher priority transactions , in which case this <nl> + / / delta will be negative . The delta can also be negative in the event that our limit was negative , which can happen <nl> / / if we had already started more transactions in our window than our rate would have allowed . <nl> / / <nl> / / This budget has the property that when the budget is required to start transactions ( because batches are big ) , <nl> / / the sum limit + budget will increase linearly from 0 to the batch size over time and decrease by the batch size <nl> - / / upon starting a batch . In other words , this works equivalently to a model where we linearly accumulate budget over <nl> + / / upon starting a batch . In other words , this works equivalently to a model where we linearly accumulate budget over <nl> / / time in the case that our batches are too big to take advantage of the window based limits . <nl> budget = std : : max ( 0 . 0 , budget + elapsed * ( limit - numStartedAtPriority ) / SERVER_KNOBS - > START_TRANSACTION_RATE_WINDOW ) ; <nl> <nl> ACTOR void discardCommit ( UID id , Future < LogSystemDiskQueueAdapter : : CommitMessage <nl> ASSERT ( dummyCommitState . isReady ( ) ) ; <nl> } <nl> <nl> - DESCR struct SingleKeyMutation { <nl> - Standalone < StringRef > shardBegin ; <nl> - Standalone < StringRef > shardEnd ; <nl> - int64_t tag1 ; <nl> - int64_t tag2 ; <nl> - int64_t tag3 ; <nl> - } ; <nl> - <nl> - struct ProxyCommitData { <nl> - UID dbgid ; <nl> - int64_t commitBatchesMemBytesCount ; <nl> - ProxyStats stats ; <nl> - MasterInterface master ; <nl> - vector < ResolverInterface > resolvers ; <nl> - LogSystemDiskQueueAdapter * logAdapter ; <nl> - Reference < ILogSystem > logSystem ; <nl> - IKeyValueStore * txnStateStore ; <nl> - NotifiedVersion committedVersion ; / / Provided that this recovery has succeeded or will succeed , this version is fully committed ( durable ) <nl> - Version minKnownCommittedVersion ; / / No version smaller than this one will be used as the known committed version <nl> - / / during recovery <nl> - Version version ; / / The version at which txnStateStore is up to date <nl> - Promise < Void > validState ; / / Set once txnStateStore and version are valid <nl> - double lastVersionTime ; <nl> - KeyRangeMap < std : : set < Key > > vecBackupKeys ; <nl> - uint64_t commitVersionRequestNumber ; <nl> - uint64_t mostRecentProcessedRequestNumber ; <nl> - KeyRangeMap < Deque < std : : pair < Version , int > > > keyResolvers ; <nl> - KeyRangeMap < ServerCacheInfo > keyInfo ; <nl> - KeyRangeMap < bool > cacheInfo ; <nl> - std : : map < Key , applyMutationsData > uid_applyMutationsData ; <nl> - bool firstProxy ; <nl> - double lastCoalesceTime ; <nl> - bool locked ; <nl> - Optional < Value > metadataVersion ; <nl> - double commitBatchInterval ; <nl> - <nl> - int64_t localCommitBatchesStarted ; <nl> - NotifiedVersion latestLocalCommitBatchResolving ; <nl> - NotifiedVersion latestLocalCommitBatchLogging ; <nl> - <nl> - RequestStream < GetReadVersionRequest > getConsistentReadVersion ; <nl> - RequestStream < CommitTransactionRequest > commit ; <nl> - Database cx ; <nl> - Reference < AsyncVar < ServerDBInfo > > db ; <nl> - EventMetricHandle < SingleKeyMutation > singleKeyMutationEvent ; <nl> - <nl> - std : : map < UID , Reference < StorageInfo > > storageCache ; <nl> - std : : map < Tag , Version > tag_popped ; <nl> - Deque < std : : pair < Version , Version > > txsPopVersions ; <nl> - Version lastTxsPop ; <nl> - bool popRemoteTxs ; <nl> - vector < Standalone < StringRef > > whitelistedBinPathVec ; <nl> - <nl> - Optional < LatencyBandConfig > latencyBandConfig ; <nl> - double lastStartCommit ; <nl> - double lastCommitLatency ; <nl> - int updateCommitRequests = 0 ; <nl> - NotifiedDouble lastCommitTime ; <nl> - <nl> - vector < double > commitComputePerOperation ; <nl> - TransactionTagMap < TransactionCommitCostEstimation > transactionTagCommitCostEst ; <nl> - <nl> - / / The tag related to a storage server rarely change , so we keep a vector of tags for each key range to be slightly more CPU efficient . <nl> - / / When a tag related to a storage server does change , we empty out all of these vectors to signify they must be repopulated . <nl> - / / We do not repopulate them immediately to avoid a slow task . <nl> - const vector < Tag > & tagsForKey ( StringRef key ) { <nl> - auto & tags = keyInfo [ key ] . tags ; <nl> - if ( ! tags . size ( ) ) { <nl> - auto & r = keyInfo . rangeContaining ( key ) . value ( ) ; <nl> - for ( auto info : r . src_info ) { <nl> - r . tags . push_back ( info - > tag ) ; <nl> - } <nl> - for ( auto info : r . dest_info ) { <nl> - r . tags . push_back ( info - > tag ) ; <nl> - } <nl> - uniquify ( r . tags ) ; <nl> - return r . tags ; <nl> - } <nl> - return tags ; <nl> - } <nl> - <nl> - const bool needsCacheTag ( KeyRangeRef range ) { <nl> - auto ranges = cacheInfo . intersectingRanges ( range ) ; <nl> - for ( auto r : ranges ) { <nl> - if ( r . value ( ) ) { <nl> - return true ; <nl> - } <nl> - } <nl> - return false ; <nl> - } <nl> - <nl> - void updateLatencyBandConfig ( Optional < LatencyBandConfig > newLatencyBandConfig ) { <nl> - if ( newLatencyBandConfig . present ( ) ! = latencyBandConfig . present ( ) <nl> - | | ( newLatencyBandConfig . present ( ) & & newLatencyBandConfig . get ( ) . commitConfig ! = latencyBandConfig . get ( ) . commitConfig ) ) <nl> - { <nl> - TraceEvent ( " LatencyBandCommitUpdatingConfig " ) . detail ( " Present " , newLatencyBandConfig . present ( ) ) ; <nl> - stats . commitLatencyBands . clearBands ( ) ; <nl> - if ( newLatencyBandConfig . present ( ) ) { <nl> - for ( auto band : newLatencyBandConfig . get ( ) . commitConfig . bands ) { <nl> - stats . commitLatencyBands . addThreshold ( band ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - latencyBandConfig = newLatencyBandConfig ; <nl> - } <nl> - <nl> - ProxyCommitData ( UID dbgid , MasterInterface master , RequestStream < GetReadVersionRequest > getConsistentReadVersion , Version recoveryTransactionVersion , RequestStream < CommitTransactionRequest > commit , Reference < AsyncVar < ServerDBInfo > > db , bool firstProxy ) <nl> - : dbgid ( dbgid ) , stats ( dbgid , & version , & committedVersion , & commitBatchesMemBytesCount ) , master ( master ) , <nl> - logAdapter ( NULL ) , txnStateStore ( NULL ) , popRemoteTxs ( false ) , <nl> - committedVersion ( recoveryTransactionVersion ) , version ( 0 ) , minKnownCommittedVersion ( 0 ) , <nl> - lastVersionTime ( 0 ) , commitVersionRequestNumber ( 1 ) , mostRecentProcessedRequestNumber ( 0 ) , <nl> - getConsistentReadVersion ( getConsistentReadVersion ) , commit ( commit ) , lastCoalesceTime ( 0 ) , <nl> - localCommitBatchesStarted ( 0 ) , locked ( false ) , commitBatchInterval ( SERVER_KNOBS - > COMMIT_TRANSACTION_BATCH_INTERVAL_MIN ) , <nl> - firstProxy ( firstProxy ) , cx ( openDBOnServer ( db , TaskPriority : : DefaultEndpoint , true , true ) ) , db ( db ) , <nl> - singleKeyMutationEvent ( LiteralStringRef ( " SingleKeyMutation " ) ) , commitBatchesMemBytesCount ( 0 ) , lastTxsPop ( 0 ) , lastStartCommit ( 0 ) , lastCommitLatency ( SERVER_KNOBS - > REQUIRED_MIN_RECOVERY_DURATION ) , lastCommitTime ( 0 ) <nl> - { <nl> - commitComputePerOperation . resize ( SERVER_KNOBS - > PROXY_COMPUTE_BUCKETS , 0 . 0 ) ; <nl> - } <nl> - } ; <nl> - <nl> struct ResolutionRequestBuilder { <nl> ProxyCommitData * self ; <nl> vector < ResolveTransactionBatchRequest > requests ; <nl> struct ResolutionRequestBuilder { <nl> resolversUsed . push_back ( r ) ; <nl> outTr [ r ] - > report_conflicting_keys = trIn . report_conflicting_keys ; <nl> } <nl> - transactionResolverMap . push_back ( std : : move ( resolversUsed ) ) ; <nl> + transactionResolverMap . emplace_back ( std : : move ( resolversUsed ) ) ; <nl> } <nl> } ; <nl> <nl> ACTOR Future < Void > commitBatcher ( ProxyCommitData * commitData , PromiseStream < std : <nl> out . send ( { std : : move ( batch ) , batchBytes } ) ; <nl> lastBatch = now ( ) ; <nl> timeout = delayJittered ( commitData - > commitBatchInterval , TaskPriority : : ProxyCommitBatcher ) ; <nl> - batch = std : : vector < CommitTransactionRequest > ( ) ; <nl> + batch . clear ( ) ; <nl> batchBytes = 0 ; <nl> } <nl> <nl> ACTOR Future < Void > addBackupMutations ( ProxyCommitData * self , std : : map < Key , Mutat <nl> } <nl> <nl> Key val = valueWriter . toValue ( ) ; <nl> - <nl> + <nl> BinaryWriter wr ( Unversioned ( ) ) ; <nl> <nl> / / Serialize the log destination <nl> ACTOR Future < Void > addBackupMutations ( ProxyCommitData * self , std : : map < Key , Mutat <nl> / / Define the mutation type and and location <nl> backupMutation . param1 = wr . toValue ( ) ; <nl> ASSERT ( backupMutation . param1 . startsWith ( logRangeMutation - > first ) ) ; / / We are writing into the configured destination <nl> - <nl> + <nl> auto & tags = self - > tagsForKey ( backupMutation . param1 ) ; <nl> toCommit - > addTags ( tags ) ; <nl> toCommit - > addTypedMessage ( backupMutation ) ; <nl> ACTOR Future < Void > releaseResolvingAfter ( ProxyCommitData * self , Future < Void > rel <nl> return Void ( ) ; <nl> } <nl> <nl> - / / Commit one batch of transactions trs <nl> - ACTOR Future < Void > commitBatch ( <nl> - ProxyCommitData * self , <nl> - vector < CommitTransactionRequest > * pTrs , <nl> - int currentBatchMemBytesCount ) <nl> - { <nl> - / / WARNING : this code is run at a high priority ( until the first delay ( 0 ) ) , so it needs to do as little work as possible <nl> - state std : : vector < CommitTransactionRequest > trs ( std : : move ( * pTrs ) ) ; <nl> - state int64_t localBatchNumber = + + self - > localCommitBatchesStarted ; <nl> - state LogPushData toCommit ( self - > logSystem ) ; <nl> - state double t1 = now ( ) ; <nl> - state Optional < UID > debugID ; <nl> - state bool forceRecovery = false ; <nl> - state int batchOperations = 0 ; <nl> - state Span span ( " MP : commitBatch " _loc ) ; <nl> + namespace CommitBatch { <nl> + <nl> + struct CommitBatchContext { <nl> + using StoreCommit_t = std : : vector < std : : pair < Future < LogSystemDiskQueueAdapter : : CommitMessage > , Future < Void > > > ; <nl> + <nl> + ProxyCommitData * const pProxyCommitData ; <nl> + std : : vector < CommitTransactionRequest > trs ; <nl> + int currentBatchMemBytesCount ; <nl> + <nl> + double startTime ; <nl> + <nl> + Optional < UID > debugID ; <nl> + <nl> + bool forceRecovery = false ; <nl> + <nl> + int64_t localBatchNumber ; <nl> + LogPushData toCommit ; <nl> + <nl> + int batchOperations = 0 ; <nl> + <nl> + Span span = Span ( " MP : commitBatch " _loc ) ; <nl> + <nl> int64_t batchBytes = 0 ; <nl> - for ( int t = 0 ; t < trs . size ( ) ; t + + ) { <nl> - batchOperations + = trs [ t ] . transaction . mutations . size ( ) ; <nl> - batchBytes + = trs [ t ] . transaction . mutations . expectedSize ( ) ; <nl> - } <nl> - state int latencyBucket = batchOperations = = 0 ? 0 : std : : min < int > ( SERVER_KNOBS - > PROXY_COMPUTE_BUCKETS - 1 , SERVER_KNOBS - > PROXY_COMPUTE_BUCKETS * batchBytes / ( batchOperations * ( CLIENT_KNOBS - > VALUE_SIZE_LIMIT + CLIENT_KNOBS - > KEY_SIZE_LIMIT ) ) ) ; <nl> <nl> - ASSERT ( SERVER_KNOBS - > MAX_READ_TRANSACTION_LIFE_VERSIONS < = SERVER_KNOBS - > MAX_VERSIONS_IN_FLIGHT ) ; / / since we are using just the former to limit the number of versions actually in flight ! <nl> + int latencyBucket = 0 ; <nl> <nl> - / / Active load balancing runs at a very high priority ( to obtain accurate estimate of memory used by commit batches ) so we need to downgrade here <nl> - wait ( delay ( 0 , TaskPriority : : ProxyCommit ) ) ; <nl> + Version commitVersion ; <nl> + Version prevVersion ; <nl> + <nl> + int64_t maxTransactionBytes ; <nl> + std : : vector < std : : vector < int > > transactionResolverMap ; <nl> + std : : vector < std : : vector < std : : vector < int > > > txReadConflictRangeIndexMap ; <nl> <nl> - self - > lastVersionTime = t1 ; <nl> + Future < Void > releaseDelay ; <nl> + Future < Void > releaseFuture ; <nl> <nl> - + + self - > stats . commitBatchIn ; <nl> + std : : vector < ResolveTransactionBatchReply > resolution ; <nl> <nl> - for ( int t = 0 ; t < trs . size ( ) ; t + + ) { <nl> - if ( trs [ t ] . debugID . present ( ) ) { <nl> - if ( ! debugID . present ( ) ) <nl> + double computeStart ; <nl> + double computeDuration = 0 ; <nl> + <nl> + Arena arena ; <nl> + <nl> + / / / true if the batch is the 1st batch for this proxy , additional metadata <nl> + / / / processing is involved for this batch . <nl> + bool isMyFirstBatch ; <nl> + bool firstStateMutations ; <nl> + <nl> + Optional < Value > oldCoordinators ; <nl> + <nl> + StoreCommit_t storeCommits ; <nl> + <nl> + std : : vector < uint8_t > committed ; <nl> + <nl> + Optional < Key > lockedKey ; <nl> + bool locked ; <nl> + <nl> + int commitCount = 0 ; <nl> + <nl> + std : : vector < int > nextTr ; <nl> + <nl> + bool lockedAfter ; <nl> + <nl> + Optional < Value > metadataVersionAfter ; <nl> + <nl> + int mutationCount = 0 ; <nl> + int mutationBytes = 0 ; <nl> + <nl> + std : : map < Key , MutationListRef > logRangeMutations ; <nl> + Arena logRangeMutationsArena ; <nl> + <nl> + int transactionNum = 0 ; <nl> + int yieldBytes = 0 ; <nl> + <nl> + LogSystemDiskQueueAdapter : : CommitMessage msg ; <nl> + <nl> + Future < Version > loggingComplete ; <nl> + <nl> + double commitStartTime ; <nl> + <nl> + CommitBatchContext ( ProxyCommitData * , const std : : vector < CommitTransactionRequest > * , const int ) ; <nl> + <nl> + void setupTraceBatch ( ) ; <nl> + <nl> + private : <nl> + void evaluateBatchSize ( ) ; <nl> + } ; <nl> + <nl> + CommitBatchContext : : CommitBatchContext ( ProxyCommitData * const pProxyCommitData_ , <nl> + const std : : vector < CommitTransactionRequest > * trs_ , <nl> + const int currentBatchMemBytesCount ) <nl> + : <nl> + <nl> + pProxyCommitData ( pProxyCommitData_ ) , trs ( std : : move ( * const_cast < std : : vector < CommitTransactionRequest > * > ( trs_ ) ) ) , <nl> + currentBatchMemBytesCount ( currentBatchMemBytesCount ) , <nl> + <nl> + startTime ( g_network - > now ( ) ) , <nl> + <nl> + localBatchNumber ( + + pProxyCommitData - > localCommitBatchesStarted ) , toCommit ( pProxyCommitData - > logSystem ) , <nl> + <nl> + committed ( trs . size ( ) ) { <nl> + <nl> + evaluateBatchSize ( ) ; <nl> + <nl> + if ( batchOperations ! = 0 ) { <nl> + latencyBucket = std : : min < int > ( <nl> + SERVER_KNOBS - > PROXY_COMPUTE_BUCKETS - 1 , <nl> + SERVER_KNOBS - > PROXY_COMPUTE_BUCKETS * batchBytes / <nl> + ( batchOperations * ( <nl> + CLIENT_KNOBS - > VALUE_SIZE_LIMIT + <nl> + CLIENT_KNOBS - > KEY_SIZE_LIMIT <nl> + ) ) <nl> + ) ; <nl> + } <nl> + <nl> + / / since we are using just the former to limit the number of versions actually in flight ! <nl> + ASSERT ( SERVER_KNOBS - > MAX_READ_TRANSACTION_LIFE_VERSIONS < = SERVER_KNOBS - > MAX_VERSIONS_IN_FLIGHT ) ; <nl> + } <nl> + <nl> + void CommitBatchContext : : setupTraceBatch ( ) { <nl> + for ( const auto & tr : trs ) { <nl> + if ( tr . debugID . present ( ) ) { <nl> + if ( ! debugID . present ( ) ) { <nl> debugID = nondeterministicRandom ( ) - > randomUniqueID ( ) ; <nl> - g_traceBatch . addAttach ( " CommitAttachID " , trs [ t ] . debugID . get ( ) . first ( ) , debugID . get ( ) . first ( ) ) ; <nl> + } <nl> + <nl> + g_traceBatch . addAttach ( <nl> + " CommitAttachID " , <nl> + tr . debugID . get ( ) . first ( ) , <nl> + debugID . get ( ) . first ( ) <nl> + ) ; <nl> } <nl> - span . addParent ( trs [ t ] . spanContext ) ; <nl> + span . addParent ( tr . spanContext ) ; <nl> } <nl> <nl> - if ( localBatchNumber = = 2 & & ! debugID . present ( ) & & self - > firstProxy & & ! g_network - > isSimulated ( ) ) { <nl> - debugID = deterministicRandom ( ) - > randomUniqueID ( ) ; <nl> - TraceEvent ( " SecondCommitBatch " , self - > dbgid ) . detail ( " DebugID " , debugID . get ( ) ) ; <nl> + if ( debugID . present ( ) ) { <nl> + g_traceBatch . addEvent ( <nl> + " CommitDebug " , <nl> + debugID . get ( ) . first ( ) , <nl> + " MasterProxyServer . commitBatch . Before " <nl> + ) ; <nl> } <nl> + } <nl> <nl> - if ( debugID . present ( ) ) <nl> - g_traceBatch . addEvent ( " CommitDebug " , debugID . get ( ) . first ( ) , " MasterProxyServer . commitBatch . Before " ) ; <nl> + void CommitBatchContext : : evaluateBatchSize ( ) { <nl> + for ( const auto & tr : trs ) { <nl> + const auto & mutations = tr . transaction . mutations ; <nl> + batchOperations + = mutations . size ( ) ; <nl> + batchBytes + = mutations . expectedSize ( ) ; <nl> + } <nl> + } <nl> <nl> - / / / / / / / Phase 1 : Pre - resolution processing ( CPU bound except waiting for a version # which is separately pipelined and * should * be available by now ( unless empty commit ) ; ordered ; currently atomic but could yield ) <nl> + ACTOR Future < Void > preresolutionProcessing ( CommitBatchContext * self ) { <nl> + <nl> + state ProxyCommitData * const pProxyCommitData = self - > pProxyCommitData ; <nl> + state std : : vector < CommitTransactionRequest > & trs = self - > trs ; <nl> + state const int64_t localBatchNumber = self - > localBatchNumber ; <nl> + state const int latencyBucket = self - > latencyBucket ; <nl> + state const Optional < UID > & debugID = self - > debugID ; <nl> + <nl> + / / Pre - resolution the commits <nl> + TEST ( pProxyCommitData - > latestLocalCommitBatchResolving . get ( ) < localBatchNumber - 1 ) ; <nl> + wait ( pProxyCommitData - > latestLocalCommitBatchResolving . whenAtLeast ( localBatchNumber - 1 ) ) ; <nl> + self - > releaseDelay = delay ( <nl> + std : : min ( SERVER_KNOBS - > MAX_PROXY_COMPUTE , <nl> + self - > batchOperations * pProxyCommitData - > commitComputePerOperation [ latencyBucket ] ) , <nl> + TaskPriority : : ProxyMasterVersionReply <nl> + ) ; <nl> + <nl> + if ( debugID . present ( ) ) { <nl> + g_traceBatch . addEvent ( <nl> + " CommitDebug " , debugID . get ( ) . first ( ) , <nl> + " MasterProxyServer . commitBatch . GettingCommitVersion " <nl> + ) ; <nl> + } <nl> <nl> - / / Queuing pre - resolution commit processing <nl> - TEST ( self - > latestLocalCommitBatchResolving . get ( ) < localBatchNumber - 1 ) ; <nl> - wait ( self - > latestLocalCommitBatchResolving . whenAtLeast ( localBatchNumber - 1 ) ) ; <nl> - state Future < Void > releaseDelay = delay ( std : : min ( SERVER_KNOBS - > MAX_PROXY_COMPUTE , batchOperations * self - > commitComputePerOperation [ latencyBucket ] ) , TaskPriority : : ProxyMasterVersionReply ) ; <nl> + GetCommitVersionRequest req ( self - > span . context , pProxyCommitData - > commitVersionRequestNumber + + , <nl> + pProxyCommitData - > mostRecentProcessedRequestNumber , pProxyCommitData - > dbgid ) ; <nl> + GetCommitVersionReply versionReply = wait ( brokenPromiseToNever ( <nl> + pProxyCommitData - > master . getCommitVersion . getReply ( <nl> + req , TaskPriority : : ProxyMasterVersionReply <nl> + ) <nl> + ) ) ; <nl> <nl> - if ( debugID . present ( ) ) <nl> - g_traceBatch . addEvent ( " CommitDebug " , debugID . get ( ) . first ( ) , " MasterProxyServer . commitBatch . GettingCommitVersion " ) ; <nl> + pProxyCommitData - > mostRecentProcessedRequestNumber = versionReply . requestNum ; <nl> <nl> - GetCommitVersionRequest req ( span . context , self - > commitVersionRequestNumber + + , self - > mostRecentProcessedRequestNumber , self - > dbgid ) ; <nl> - GetCommitVersionReply versionReply = wait ( brokenPromiseToNever ( self - > master . getCommitVersion . getReply ( req , TaskPriority : : ProxyMasterVersionReply ) ) ) ; <nl> - self - > mostRecentProcessedRequestNumber = versionReply . requestNum ; <nl> + pProxyCommitData - > stats . txnCommitVersionAssigned + = trs . size ( ) ; <nl> + pProxyCommitData - > stats . lastCommitVersionAssigned = versionReply . version ; <nl> <nl> - self - > stats . txnCommitVersionAssigned + = trs . size ( ) ; <nl> - self - > stats . lastCommitVersionAssigned = versionReply . version ; <nl> - <nl> - state Version commitVersion = versionReply . version ; <nl> - state Version prevVersion = versionReply . prevVersion ; <nl> + self - > commitVersion = versionReply . version ; <nl> + self - > prevVersion = versionReply . prevVersion ; <nl> <nl> for ( auto it : versionReply . resolverChanges ) { <nl> - auto rs = self - > keyResolvers . modify ( it . range ) ; <nl> + auto rs = pProxyCommitData - > keyResolvers . modify ( it . range ) ; <nl> for ( auto r = rs . begin ( ) ; r ! = rs . end ( ) ; + + r ) <nl> r - > value ( ) . emplace_back ( versionReply . resolverChangesVersion , it . dest ) ; <nl> } <nl> <nl> - / / TraceEvent ( " ProxyGotVer " , self - > dbgid ) . detail ( " Commit " , commitVersion ) . detail ( " Prev " , prevVersion ) ; <nl> + / / TraceEvent ( " ProxyGotVer " , pProxyContext - > dbgid ) . detail ( " Commit " , commitVersion ) . detail ( " Prev " , prevVersion ) ; <nl> <nl> - if ( debugID . present ( ) ) <nl> - g_traceBatch . addEvent ( " CommitDebug " , debugID . get ( ) . first ( ) , " MasterProxyServer . commitBatch . GotCommitVersion " ) ; <nl> + if ( debugID . present ( ) ) { <nl> + g_traceBatch . addEvent ( <nl> + " CommitDebug " , debugID . get ( ) . first ( ) , <nl> + " MasterProxyServer . commitBatch . GotCommitVersion " <nl> + ) ; <nl> + } <nl> <nl> - ResolutionRequestBuilder requests ( self , commitVersion , prevVersion , self - > version , span ) ; <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + ACTOR Future < Void > getResolution ( CommitBatchContext * self ) { <nl> + / / Sending these requests is the fuzzy border between phase 1 and phase 2 ; it could conceivably overlap with <nl> + / / resolution processing but is still using CPU <nl> + ProxyCommitData * pProxyCommitData = self - > pProxyCommitData ; <nl> + std : : vector < CommitTransactionRequest > & trs = self - > trs ; <nl> + <nl> + ResolutionRequestBuilder requests ( <nl> + pProxyCommitData , <nl> + self - > commitVersion , <nl> + self - > prevVersion , <nl> + pProxyCommitData - > version , <nl> + self - > span <nl> + ) ; <nl> int conflictRangeCount = 0 ; <nl> - state int64_t maxTransactionBytes = 0 ; <nl> - for ( int t = 0 ; t < trs . size ( ) ; t + + ) { <nl> + self - > maxTransactionBytes = 0 ; <nl> + for ( int t = 0 ; t < trs . size ( ) ; t + + ) { <nl> requests . addTransaction ( trs [ t ] . transaction , t ) ; <nl> - conflictRangeCount + = trs [ t ] . transaction . read_conflict_ranges . size ( ) + trs [ t ] . transaction . write_conflict_ranges . size ( ) ; <nl> + conflictRangeCount + = <nl> + trs [ t ] . transaction . read_conflict_ranges . size ( ) + trs [ t ] . transaction . write_conflict_ranges . size ( ) ; <nl> / / TraceEvent ( " MPTransactionDump " , self - > dbgid ) . detail ( " Snapshot " , trs [ t ] . transaction . read_snapshot ) ; <nl> / / for ( auto & m : trs [ t ] . transaction . mutations ) <nl> - maxTransactionBytes = std : : max < int64_t > ( maxTransactionBytes , trs [ t ] . transaction . expectedSize ( ) ) ; <nl> + self - > maxTransactionBytes = std : : max < int64_t > ( <nl> + self - > maxTransactionBytes , trs [ t ] . transaction . expectedSize ( ) <nl> + ) ; <nl> / / TraceEvent ( " MPTransactionsDump " , self - > dbgid ) . detail ( " Mutation " , m . toString ( ) ) ; <nl> } <nl> - self - > stats . conflictRanges + = conflictRangeCount ; <nl> - <nl> - for ( int r = 1 ; r < self - > resolvers . size ( ) ; r + + ) <nl> - ASSERT ( requests . requests [ r ] . txnStateTransactions . size ( ) = = requests . requests [ 0 ] . txnStateTransactions . size ( ) ) ; <nl> - <nl> - / / Sending these requests is the fuzzy border between phase 1 and phase 2 ; it could conceivably overlap with resolution processing but is still using CPU <nl> - self - > stats . txnCommitResolving + = trs . size ( ) ; <nl> - vector < Future < ResolveTransactionBatchReply > > replies ; <nl> - for ( int r = 0 ; r < self - > resolvers . size ( ) ; r + + ) { <nl> - requests . requests [ r ] . debugID = debugID ; <nl> - replies . push_back ( brokenPromiseToNever ( self - > resolvers [ r ] . resolve . getReply ( requests . requests [ r ] , TaskPriority : : ProxyResolverReply ) ) ) ; <nl> + pProxyCommitData - > stats . conflictRanges + = conflictRangeCount ; <nl> + <nl> + for ( int r = 1 ; r < pProxyCommitData - > resolvers . size ( ) ; r + + ) <nl> + ASSERT ( requests . requests [ r ] . txnStateTransactions . size ( ) = = <nl> + requests . requests [ 0 ] . txnStateTransactions . size ( ) ) ; <nl> + <nl> + pProxyCommitData - > stats . txnCommitResolving + = trs . size ( ) ; <nl> + std : : vector < Future < ResolveTransactionBatchReply > > replies ; <nl> + for ( int r = 0 ; r < pProxyCommitData - > resolvers . size ( ) ; r + + ) { <nl> + requests . requests [ r ] . debugID = self - > debugID ; <nl> + replies . push_back ( brokenPromiseToNever ( <nl> + pProxyCommitData - > resolvers [ r ] . resolve . getReply ( <nl> + requests . requests [ r ] , TaskPriority : : ProxyResolverReply ) ) ) ; <nl> } <nl> <nl> - state vector < vector < int > > transactionResolverMap = std : : move ( requests . transactionResolverMap ) ; <nl> - state std : : vector < std : : vector < std : : vector < int > > > txReadConflictRangeIndexMap = <nl> - std : : move ( requests . txReadConflictRangeIndexMap ) ; / / used to report conflicting keys <nl> - state Future < Void > releaseFuture = releaseResolvingAfter ( self , releaseDelay , localBatchNumber ) ; <nl> - <nl> - / / / / / / / Phase 2 : Resolution ( waiting on the network ; pipelined ) <nl> - state vector < ResolveTransactionBatchReply > resolution = wait ( getAll ( replies ) ) ; <nl> - <nl> - if ( debugID . present ( ) ) <nl> - g_traceBatch . addEvent ( " CommitDebug " , debugID . get ( ) . first ( ) , " MasterProxyServer . commitBatch . AfterResolution " ) ; <nl> - <nl> - / / / / / / Phase 3 : Post - resolution processing ( CPU bound except for very rare situations ; ordered ; currently atomic but doesn ' t need to be ) <nl> - TEST ( self - > latestLocalCommitBatchLogging . get ( ) < localBatchNumber - 1 ) ; / / Queuing post - resolution commit processing <nl> - wait ( self - > latestLocalCommitBatchLogging . whenAtLeast ( localBatchNumber - 1 ) ) ; <nl> - wait ( yield ( TaskPriority : : ProxyCommitYield1 ) ) ; <nl> - <nl> - state double computeStart = g_network - > timer ( ) ; <nl> - state double computeDuration = 0 ; <nl> - self - > stats . txnCommitResolved + = trs . size ( ) ; <nl> - <nl> - if ( debugID . present ( ) ) <nl> - g_traceBatch . addEvent ( " CommitDebug " , debugID . get ( ) . first ( ) , " MasterProxyServer . commitBatch . ProcessingMutations " ) ; <nl> + self - > transactionResolverMap . swap ( requests . transactionResolverMap ) ; <nl> + / / Used to report conflicting keys <nl> + self - > txReadConflictRangeIndexMap . swap ( requests . txReadConflictRangeIndexMap ) ; <nl> + self - > releaseFuture = releaseResolvingAfter ( <nl> + pProxyCommitData , self - > releaseDelay , self - > localBatchNumber <nl> + ) ; <nl> + <nl> + / / Wait for the final resolution <nl> + std : : vector < ResolveTransactionBatchReply > resolutionResp = wait ( getAll ( replies ) ) ; <nl> + self - > resolution . swap ( * const_cast < std : : vector < ResolveTransactionBatchReply > * > ( & resolutionResp ) ) ; <nl> + <nl> + if ( self - > debugID . present ( ) ) { <nl> + g_traceBatch . addEvent ( <nl> + " CommitDebug " , self - > debugID . get ( ) . first ( ) , <nl> + " MasterProxyServer . commitBatch . AfterResolution " <nl> + ) ; <nl> + } <nl> <nl> - state Arena arena ; <nl> - state bool isMyFirstBatch = ! self - > version ; <nl> - state Optional < Value > oldCoordinators = self - > txnStateStore - > readValue ( coordinatorsKey ) . get ( ) ; <nl> + return Void ( ) ; <nl> + } <nl> <nl> - / / TraceEvent ( " ResolutionResult " , self - > dbgid ) . detail ( " Sequence " , sequence ) . detail ( " Version " , commitVersion ) . detail ( " StateMutationProxies " , resolution [ 0 ] . stateMutations . size ( ) ) . detail ( " WaitForResolution " , now ( ) - t1 ) . detail ( " R0Committed " , resolution [ 0 ] . committed . size ( ) ) <nl> - / / . detail ( " Transactions " , trs . size ( ) ) ; <nl> + void assertResolutionStateMutationsSizeConsistent ( <nl> + const std : : vector < ResolveTransactionBatchReply > & resolution ) { <nl> <nl> - for ( int r = 1 ; r < resolution . size ( ) ; r + + ) { <nl> - ASSERT ( resolution [ r ] . stateMutations . size ( ) = = resolution [ 0 ] . stateMutations . size ( ) ) ; <nl> - for ( int s = 0 ; s < resolution [ r ] . stateMutations . size ( ) ; s + + ) <nl> - ASSERT ( resolution [ r ] . stateMutations [ s ] . size ( ) = = resolution [ 0 ] . stateMutations [ s ] . size ( ) ) ; <nl> + for ( int r = 1 ; r < resolution . size ( ) ; r + + ) { <nl> + ASSERT ( resolution [ r ] . stateMutations . size ( ) = = resolution [ 0 ] . stateMutations . size ( ) ) ; <nl> + for ( int s = 0 ; s < resolution [ r ] . stateMutations . size ( ) ; s + + ) { <nl> + ASSERT ( resolution [ r ] . stateMutations [ s ] . size ( ) = = resolution [ 0 ] . stateMutations [ s ] . size ( ) ) ; <nl> + } <nl> } <nl> + } <nl> <nl> - / / Compute and apply " metadata " effects of each other proxy ' s most recent batch <nl> - bool initialState = isMyFirstBatch ; <nl> - state bool firstStateMutations = isMyFirstBatch ; <nl> - state vector < std : : pair < Future < LogSystemDiskQueueAdapter : : CommitMessage > , Future < Void > > > storeCommits ; <nl> - for ( int versionIndex = 0 ; versionIndex < resolution [ 0 ] . stateMutations . size ( ) ; versionIndex + + ) { <nl> - / / self - > logAdapter - > setNextVersion ( ? ? ? ) ; < < Ideally we would be telling the log adapter that the pushes in this commit will be in the version at which these state mutations were committed by another proxy , but at present we don ' t have that information here . So the disk queue may be unnecessarily conservative about popping . <nl> + / / Compute and apply " metadata " effects of each other proxy ' s most recent batch <nl> + void applyMetadataEffect ( CommitBatchContext * self ) { <nl> + bool initialState = self - > isMyFirstBatch ; <nl> + self - > firstStateMutations = self - > isMyFirstBatch ; <nl> + for ( int versionIndex = 0 ; versionIndex < self - > resolution [ 0 ] . stateMutations . size ( ) ; versionIndex + + ) { <nl> + / / pProxyCommitData - > logAdapter - > setNextVersion ( ? ? ? ) ; < < Ideally we would be telling the log adapter that the pushes in this commit will be in the version at which these state mutations were committed by another proxy , but at present we don ' t have that information here . So the disk queue may be unnecessarily conservative about popping . <nl> <nl> - for ( int transactionIndex = 0 ; transactionIndex < resolution [ 0 ] . stateMutations [ versionIndex ] . size ( ) & & ! forceRecovery ; transactionIndex + + ) { <nl> + for ( int transactionIndex = 0 ; transactionIndex < self - > resolution [ 0 ] . stateMutations [ versionIndex ] . size ( ) & & ! self - > forceRecovery ; transactionIndex + + ) { <nl> bool committed = true ; <nl> - for ( int resolver = 0 ; resolver < resolution . size ( ) ; resolver + + ) <nl> - committed = committed & & resolution [ resolver ] . stateMutations [ versionIndex ] [ transactionIndex ] . committed ; <nl> - if ( committed ) <nl> - applyMetadataMutations ( self - > dbgid , arena , resolution [ 0 ] . stateMutations [ versionIndex ] [ transactionIndex ] . mutations , self - > txnStateStore , nullptr , & forceRecovery , self - > logSystem , 0 , & self - > vecBackupKeys , & self - > keyInfo , & self - > cacheInfo , self - > firstProxy ? & self - > uid_applyMutationsData : nullptr , self - > commit , self - > cx , & self - > committedVersion , & self - > storageCache , & self - > tag_popped ) ; <nl> - <nl> - if ( resolution [ 0 ] . stateMutations [ versionIndex ] [ transactionIndex ] . mutations . size ( ) & & firstStateMutations ) { <nl> + for ( int resolver = 0 ; resolver < self - > resolution . size ( ) ; resolver + + ) <nl> + committed = committed & & self - > resolution [ resolver ] . stateMutations [ versionIndex ] [ transactionIndex ] . committed ; <nl> + if ( committed ) { <nl> + applyMetadataMutations ( * self - > pProxyCommitData , self - > arena , self - > pProxyCommitData - > logSystem , <nl> + self - > resolution [ 0 ] . stateMutations [ versionIndex ] [ transactionIndex ] . mutations , <nl> + / * pToCommit = * / nullptr , self - > forceRecovery , <nl> + / * popVersion = * / 0 , / * initialCommit * / false ) ; <nl> + } <nl> + if ( self - > resolution [ 0 ] . stateMutations [ versionIndex ] [ transactionIndex ] . mutations . size ( ) & & self - > firstStateMutations ) { <nl> ASSERT ( committed ) ; <nl> - firstStateMutations = false ; <nl> - forceRecovery = false ; <nl> + self - > firstStateMutations = false ; <nl> + self - > forceRecovery = false ; <nl> } <nl> - / / TraceEvent ( " MetadataTransaction " , self - > dbgid ) . detail ( " Committed " , committed ) . detail ( " Mutations " , resolution [ 0 ] . stateMutations [ versionIndex ] [ transactionIndex ] . second . size ( ) ) . detail ( " R1Mutations " , resolution . back ( ) . stateMutations [ versionIndex ] [ transactionIndex ] . second . size ( ) ) ; <nl> } <nl> - / / TraceEvent ( " MetadataBatch " , self - > dbgid ) . detail ( " Transactions " , resolution [ 0 ] . stateMutations [ versionIndex ] . size ( ) ) ; <nl> <nl> / / These changes to txnStateStore will be committed by the other proxy , so we simply discard the commit message <nl> - auto fcm = self - > logAdapter - > getCommitMessage ( ) ; <nl> - storeCommits . emplace_back ( fcm , self - > txnStateStore - > commit ( ) ) ; <nl> - / / discardCommit ( dbgid , fcm , txnStateStore - > commit ( ) ) ; <nl> + auto fcm = self - > pProxyCommitData - > logAdapter - > getCommitMessage ( ) ; <nl> + self - > storeCommits . emplace_back ( fcm , self - > pProxyCommitData - > txnStateStore - > commit ( ) ) ; <nl> <nl> if ( initialState ) { <nl> - / / TraceEvent ( " ResyncLog " , dbgid ) ; <nl> initialState = false ; <nl> - forceRecovery = false ; <nl> - self - > txnStateStore - > resyncLog ( ) ; <nl> + self - > forceRecovery = false ; <nl> + self - > pProxyCommitData - > txnStateStore - > resyncLog ( ) ; <nl> <nl> - for ( auto & p : storeCommits ) { <nl> + for ( auto & p : self - > storeCommits ) { <nl> ASSERT ( ! p . second . isReady ( ) ) ; <nl> p . first . get ( ) . acknowledge . send ( Void ( ) ) ; <nl> ASSERT ( p . second . isReady ( ) ) ; <nl> } <nl> - storeCommits . clear ( ) ; <nl> + self - > storeCommits . clear ( ) ; <nl> } <nl> } <nl> + } <nl> <nl> - / / Determine which transactions actually committed ( conservatively ) by combining results from the resolvers <nl> - state vector < uint8_t > committed ( trs . size ( ) ) ; <nl> - ASSERT ( transactionResolverMap . size ( ) = = committed . size ( ) ) ; <nl> + / / / Determine which transactions actually committed ( conservatively ) by combining results from the resolvers <nl> + void determineCommittedTransactions ( CommitBatchContext * self ) { <nl> + auto pProxyCommitData = self - > pProxyCommitData ; <nl> + const auto & trs = self - > trs ; <nl> + <nl> + ASSERT ( self - > transactionResolverMap . size ( ) = = self - > committed . size ( ) ) ; <nl> / / For each commitTransactionRef , it is only sent to resolvers specified in transactionResolverMap <nl> / / Thus , we use this nextTr to track the correct transaction index on each resolver . <nl> - state vector < int > nextTr ( resolution . size ( ) ) ; <nl> - for ( int t = 0 ; t < trs . size ( ) ; t + + ) { <nl> + self - > nextTr . resize ( self - > resolution . size ( ) ) ; <nl> + for ( int t = 0 ; t < trs . size ( ) ; t + + ) { <nl> uint8_t commit = ConflictBatch : : TransactionCommitted ; <nl> - for ( int r : transactionResolverMap [ t ] ) <nl> - { <nl> - commit = std : : min ( resolution [ r ] . committed [ nextTr [ r ] + + ] , commit ) ; <nl> + for ( int r : self - > transactionResolverMap [ t ] ) { <nl> + commit = std : : min ( self - > resolution [ r ] . committed [ self - > nextTr [ r ] + + ] , commit ) ; <nl> } <nl> - committed [ t ] = commit ; <nl> + self - > committed [ t ] = commit ; <nl> } <nl> - for ( int r = 0 ; r < resolution . size ( ) ; r + + ) <nl> - ASSERT ( nextTr [ r ] = = resolution [ r ] . committed . size ( ) ) ; <nl> + for ( int r = 0 ; r < self - > resolution . size ( ) ; r + + ) <nl> + ASSERT ( self - > nextTr [ r ] = = self - > resolution [ r ] . committed . size ( ) ) ; <nl> <nl> - self - > logAdapter - > setNextVersion ( commitVersion ) ; <nl> + pProxyCommitData - > logAdapter - > setNextVersion ( self - > commitVersion ) ; <nl> <nl> - state Optional < Key > lockedKey = self - > txnStateStore - > readValue ( databaseLockedKey ) . get ( ) ; <nl> - state bool locked = lockedKey . present ( ) & & lockedKey . get ( ) . size ( ) ; <nl> + self - > lockedKey = pProxyCommitData - > txnStateStore - > readValue ( databaseLockedKey ) . get ( ) ; <nl> + self - > locked = self - > lockedKey . present ( ) & & self - > lockedKey . get ( ) . size ( ) ; <nl> <nl> - state Optional < Key > mustContainSystemKey = self - > txnStateStore - > readValue ( mustContainSystemMutationsKey ) . get ( ) ; <nl> - if ( mustContainSystemKey . present ( ) & & mustContainSystemKey . get ( ) . size ( ) ) { <nl> - for ( int t = 0 ; t < trs . size ( ) ; t + + ) { <nl> - if ( committed [ t ] = = ConflictBatch : : TransactionCommitted ) { <nl> + const auto & mustContainSystemKey = pProxyCommitData - > txnStateStore - > readValue ( mustContainSystemMutationsKey ) . get ( ) ; <nl> + if ( mustContainSystemKey . present ( ) & & mustContainSystemKey . get ( ) . size ( ) ) { <nl> + for ( int t = 0 ; t < trs . size ( ) ; t + + ) { <nl> + if ( self - > committed [ t ] = = ConflictBatch : : TransactionCommitted ) { <nl> bool foundSystem = false ; <nl> for ( auto & m : trs [ t ] . transaction . mutations ) { <nl> if ( ( m . type = = MutationRef : : ClearRange ? m . param2 : m . param1 ) > = nonMetadataSystemKeys . end ) { <nl> ACTOR Future < Void > commitBatch ( <nl> } <nl> } <nl> if ( ! foundSystem ) { <nl> - committed [ t ] = ConflictBatch : : TransactionConflict ; <nl> + self - > committed [ t ] = ConflictBatch : : TransactionConflict ; <nl> } <nl> } <nl> } <nl> } <nl> + } <nl> <nl> - if ( forceRecovery ) { <nl> - wait ( Future < Void > ( Never ( ) ) ) ; <nl> - } <nl> + / / This first pass through committed transactions deals with " metadata " effects ( modifications of txnStateStore , changes to storage servers ' responsibilities ) <nl> + ACTOR Future < Void > applyMetadataToCommittedTransactions ( CommitBatchContext * self ) { <nl> + auto pProxyCommitData = self - > pProxyCommitData ; <nl> + const auto & trs = self - > trs ; <nl> <nl> - / / This first pass through committed transactions deals with " metadata " effects ( modifications of txnStateStore , changes to storage servers ' responsibilities ) <nl> int t ; <nl> - state int commitCount = 0 ; <nl> - for ( t = 0 ; t < trs . size ( ) & & ! forceRecovery ; t + + ) <nl> - { <nl> - if ( committed [ t ] = = ConflictBatch : : TransactionCommitted & & ( ! locked | | trs [ t ] . isLockAware ( ) ) ) { <nl> - commitCount + + ; <nl> - applyMetadataMutations ( self - > dbgid , arena , trs [ t ] . transaction . mutations , self - > txnStateStore , & toCommit , & forceRecovery , self - > logSystem , commitVersion + 1 , & self - > vecBackupKeys , & self - > keyInfo , & self - > cacheInfo , self - > firstProxy ? & self - > uid_applyMutationsData : NULL , self - > commit , self - > cx , & self - > committedVersion , & self - > storageCache , & self - > tag_popped ) ; <nl> + for ( t = 0 ; t < trs . size ( ) & & ! self - > forceRecovery ; t + + ) { <nl> + if ( self - > committed [ t ] = = ConflictBatch : : TransactionCommitted & & ( ! self - > locked | | trs [ t ] . isLockAware ( ) ) ) { <nl> + self - > commitCount + + ; <nl> + applyMetadataMutations ( * pProxyCommitData , self - > arena , pProxyCommitData - > logSystem , <nl> + trs [ t ] . transaction . mutations , & self - > toCommit , self - > forceRecovery , <nl> + self - > commitVersion + 1 , / * initialCommit = * / false ) ; <nl> } <nl> - if ( firstStateMutations ) { <nl> - ASSERT ( committed [ t ] = = ConflictBatch : : TransactionCommitted ) ; <nl> - firstStateMutations = false ; <nl> - forceRecovery = false ; <nl> + if ( self - > firstStateMutations ) { <nl> + ASSERT ( self - > committed [ t ] = = ConflictBatch : : TransactionCommitted ) ; <nl> + self - > firstStateMutations = false ; <nl> + self - > forceRecovery = false ; <nl> } <nl> } <nl> - if ( forceRecovery ) { <nl> - for ( ; t < trs . size ( ) ; t + + ) <nl> - committed [ t ] = ConflictBatch : : TransactionConflict ; <nl> - TraceEvent ( SevWarn , " RestartingTxnSubsystem " , self - > dbgid ) . detail ( " Stage " , " AwaitCommit " ) ; <nl> + if ( self - > forceRecovery ) { <nl> + for ( ; t < trs . size ( ) ; t + + ) <nl> + self - > committed [ t ] = ConflictBatch : : TransactionConflict ; <nl> + TraceEvent ( SevWarn , " RestartingTxnSubsystem " , pProxyCommitData - > dbgid ) . detail ( " Stage " , " AwaitCommit " ) ; <nl> } <nl> <nl> - lockedKey = self - > txnStateStore - > readValue ( databaseLockedKey ) . get ( ) ; <nl> - state bool lockedAfter = lockedKey . present ( ) & & lockedKey . get ( ) . size ( ) ; <nl> + self - > lockedKey = pProxyCommitData - > txnStateStore - > readValue ( databaseLockedKey ) . get ( ) ; <nl> + self - > lockedAfter = self - > lockedKey . present ( ) & & self - > lockedKey . get ( ) . size ( ) ; <nl> <nl> - state Optional < Value > metadataVersionAfter = self - > txnStateStore - > readValue ( metadataVersionKey ) . get ( ) ; <nl> + self - > metadataVersionAfter = pProxyCommitData - > txnStateStore - > readValue ( metadataVersionKey ) . get ( ) ; <nl> <nl> - auto fcm = self - > logAdapter - > getCommitMessage ( ) ; <nl> - storeCommits . emplace_back ( fcm , self - > txnStateStore - > commit ( ) ) ; <nl> - self - > version = commitVersion ; <nl> - if ( ! self - > validState . isSet ( ) ) self - > validState . send ( Void ( ) ) ; <nl> - ASSERT ( commitVersion ) ; <nl> + auto fcm = pProxyCommitData - > logAdapter - > getCommitMessage ( ) ; <nl> + self - > storeCommits . emplace_back ( fcm , pProxyCommitData - > txnStateStore - > commit ( ) ) ; <nl> + pProxyCommitData - > version = self - > commitVersion ; <nl> + if ( ! pProxyCommitData - > validState . isSet ( ) ) pProxyCommitData - > validState . send ( Void ( ) ) ; <nl> + ASSERT ( self - > commitVersion ) ; <nl> <nl> - if ( ! isMyFirstBatch & & self - > txnStateStore - > readValue ( coordinatorsKey ) . get ( ) . get ( ) ! = oldCoordinators . get ( ) ) { <nl> - wait ( brokenPromiseToNever ( self - > master . changeCoordinators . getReply ( ChangeCoordinatorsRequest ( self - > txnStateStore - > readValue ( coordinatorsKey ) . get ( ) . get ( ) ) ) ) ) ; <nl> + if ( ! self - > isMyFirstBatch & & pProxyCommitData - > txnStateStore - > readValue ( coordinatorsKey ) . get ( ) . get ( ) ! = self - > oldCoordinators . get ( ) ) { <nl> + wait ( brokenPromiseToNever ( pProxyCommitData - > master . changeCoordinators . getReply ( ChangeCoordinatorsRequest ( pProxyCommitData - > txnStateStore - > readValue ( coordinatorsKey ) . get ( ) . get ( ) ) ) ) ) ; <nl> ASSERT ( false ) ; / / ChangeCoordinatorsRequest should always throw <nl> } <nl> <nl> - / / This second pass through committed transactions assigns the actual mutations to the appropriate storage servers ' tags <nl> - state int mutationCount = 0 ; <nl> - state int mutationBytes = 0 ; <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + / / / This second pass through committed transactions assigns the actual mutations to the appropriate storage servers ' tags <nl> + ACTOR Future < Void > assignMutationsToStorageServers ( CommitBatchContext * self ) { <nl> + state ProxyCommitData * const pProxyCommitData = self - > pProxyCommitData ; <nl> + state std : : vector < CommitTransactionRequest > & trs = self - > trs ; <nl> <nl> - state std : : map < Key , MutationListRef > logRangeMutations ; <nl> - state Arena logRangeMutationsArena ; <nl> - state int transactionNum = 0 ; <nl> - state int yieldBytes = 0 ; <nl> + for ( ; self - > transactionNum < trs . size ( ) ; self - > transactionNum + + ) { <nl> + if ( ! ( self - > committed [ self - > transactionNum ] = = ConflictBatch : : TransactionCommitted & & ( ! self - > locked | | trs [ self - > transactionNum ] . isLockAware ( ) ) ) ) { <nl> + continue ; <nl> + } <nl> <nl> - for ( ; transactionNum < trs . size ( ) ; transactionNum + + ) { <nl> - if ( committed [ transactionNum ] = = ConflictBatch : : TransactionCommitted & & ( ! locked | | trs [ transactionNum ] . isLockAware ( ) ) ) { <nl> - state int mutationNum = 0 ; <nl> - state VectorRef < MutationRef > * pMutations = & trs [ transactionNum ] . transaction . mutations ; <nl> - for ( ; mutationNum < pMutations - > size ( ) ; mutationNum + + ) { <nl> - if ( yieldBytes > SERVER_KNOBS - > DESIRED_TOTAL_BYTES ) { <nl> - yieldBytes = 0 ; <nl> - if ( g_network - > check_yield ( TaskPriority : : ProxyCommitYield1 ) ) { <nl> - computeDuration + = g_network - > timer ( ) - computeStart ; <nl> - wait ( delay ( 0 , TaskPriority : : ProxyCommitYield1 ) ) ; <nl> - computeStart = g_network - > timer ( ) ; <nl> - } <nl> + state int mutationNum = 0 ; <nl> + state VectorRef < MutationRef > * pMutations = & trs [ self - > transactionNum ] . transaction . mutations ; <nl> + for ( ; mutationNum < pMutations - > size ( ) ; mutationNum + + ) { <nl> + if ( self - > yieldBytes > SERVER_KNOBS - > DESIRED_TOTAL_BYTES ) { <nl> + self - > yieldBytes = 0 ; <nl> + if ( g_network - > check_yield ( TaskPriority : : ProxyCommitYield1 ) ) { <nl> + self - > computeDuration + = g_network - > timer ( ) - self - > computeStart ; <nl> + wait ( delay ( 0 , TaskPriority : : ProxyCommitYield1 ) ) ; <nl> + self - > computeStart = g_network - > timer ( ) ; <nl> } <nl> + } <nl> <nl> - auto & m = ( * pMutations ) [ mutationNum ] ; <nl> - mutationCount + + ; <nl> - mutationBytes + = m . expectedSize ( ) ; <nl> - yieldBytes + = m . expectedSize ( ) ; <nl> - / / Determine the set of tags ( responsible storage servers ) for the mutation , splitting it <nl> - / / if necessary . Serialize ( splits of ) the mutation into the message buffer and add the tags . <nl> - <nl> - if ( isSingleKeyMutation ( ( MutationRef : : Type ) m . type ) ) { <nl> - auto & tags = self - > tagsForKey ( m . param1 ) ; <nl> - <nl> - if ( self - > singleKeyMutationEvent - > enabled ) { <nl> - KeyRangeRef shard = self - > keyInfo . rangeContaining ( m . param1 ) . range ( ) ; <nl> - self - > singleKeyMutationEvent - > tag1 = ( int64_t ) tags [ 0 ] . id ; <nl> - self - > singleKeyMutationEvent - > tag2 = ( int64_t ) tags [ 1 ] . id ; <nl> - self - > singleKeyMutationEvent - > tag3 = ( int64_t ) tags [ 2 ] . id ; <nl> - self - > singleKeyMutationEvent - > shardBegin = shard . begin ; <nl> - self - > singleKeyMutationEvent - > shardEnd = shard . end ; <nl> - self - > singleKeyMutationEvent - > log ( ) ; <nl> - } <nl> + auto & m = ( * pMutations ) [ mutationNum ] ; <nl> + self - > mutationCount + + ; <nl> + self - > mutationBytes + = m . expectedSize ( ) ; <nl> + self - > yieldBytes + = m . expectedSize ( ) ; <nl> + / / Determine the set of tags ( responsible storage servers ) for the mutation , splitting it <nl> + / / if necessary . Serialize ( splits of ) the mutation into the message buffer and add the tags . <nl> + <nl> + if ( isSingleKeyMutation ( ( MutationRef : : Type ) m . type ) ) { <nl> + auto & tags = pProxyCommitData - > tagsForKey ( m . param1 ) ; <nl> + <nl> + if ( pProxyCommitData - > singleKeyMutationEvent - > enabled ) { <nl> + KeyRangeRef shard = pProxyCommitData - > keyInfo . rangeContaining ( m . param1 ) . range ( ) ; <nl> + pProxyCommitData - > singleKeyMutationEvent - > tag1 = ( int64_t ) tags [ 0 ] . id ; <nl> + pProxyCommitData - > singleKeyMutationEvent - > tag2 = ( int64_t ) tags [ 1 ] . id ; <nl> + pProxyCommitData - > singleKeyMutationEvent - > tag3 = ( int64_t ) tags [ 2 ] . id ; <nl> + pProxyCommitData - > singleKeyMutationEvent - > shardBegin = shard . begin ; <nl> + pProxyCommitData - > singleKeyMutationEvent - > shardEnd = shard . end ; <nl> + pProxyCommitData - > singleKeyMutationEvent - > log ( ) ; <nl> + } <nl> <nl> - DEBUG_MUTATION ( " ProxyCommit " , commitVersion , m ) . detail ( " Dbgid " , self - > dbgid ) . detail ( " To " , tags ) . detail ( " Mutation " , m ) ; <nl> - <nl> - toCommit . addTags ( tags ) ; <nl> - if ( self - > cacheInfo [ m . param1 ] ) { <nl> - toCommit . addTag ( cacheTag ) ; <nl> - } <nl> - toCommit . addTypedMessage ( m ) ; <nl> + DEBUG_MUTATION ( " ProxyCommit " , self - > commitVersion , m ) . detail ( " Dbgid " , pProxyCommitData - > dbgid ) . detail ( " To " , tags ) . detail ( " Mutation " , m ) ; <nl> + self - > toCommit . addTags ( tags ) ; <nl> + if ( pProxyCommitData - > cacheInfo [ m . param1 ] ) { <nl> + self - > toCommit . addTag ( cacheTag ) ; <nl> + } <nl> + self - > toCommit . addTypedMessage ( m ) ; <nl> + } <nl> + else if ( m . type = = MutationRef : : ClearRange ) { <nl> + KeyRangeRef clearRange ( KeyRangeRef ( m . param1 , m . param2 ) ) ; <nl> + auto ranges = pProxyCommitData - > keyInfo . intersectingRanges ( clearRange ) ; <nl> + auto firstRange = ranges . begin ( ) ; <nl> + + + firstRange ; <nl> + if ( firstRange = = ranges . end ( ) ) { <nl> + / / Fast path <nl> + DEBUG_MUTATION ( " ProxyCommit " , self - > commitVersion , m ) . detail ( " Dbgid " , pProxyCommitData - > dbgid ) . detail ( " To " , ranges . begin ( ) . value ( ) . tags ) . detail ( " Mutation " , m ) ; <nl> + <nl> + ranges . begin ( ) . value ( ) . populateTags ( ) ; <nl> + self - > toCommit . addTags ( ranges . begin ( ) . value ( ) . tags ) ; <nl> } <nl> - else if ( m . type = = MutationRef : : ClearRange ) { <nl> - KeyRangeRef clearRange ( KeyRangeRef ( m . param1 , m . param2 ) ) ; <nl> - auto ranges = self - > keyInfo . intersectingRanges ( clearRange ) ; <nl> - auto firstRange = ranges . begin ( ) ; <nl> - + + firstRange ; <nl> - if ( firstRange = = ranges . end ( ) ) { <nl> - / / Fast path <nl> - DEBUG_MUTATION ( " ProxyCommit " , commitVersion , m ) . detail ( " Dbgid " , self - > dbgid ) . detail ( " To " , ranges . begin ( ) . value ( ) . tags ) . detail ( " Mutation " , m ) ; <nl> - <nl> - ranges . begin ( ) . value ( ) . populateTags ( ) ; <nl> - toCommit . addTags ( ranges . begin ( ) . value ( ) . tags ) ; <nl> + else { <nl> + TEST ( true ) ; / / A clear range extends past a shard boundary <nl> + std : : set < Tag > allSources ; <nl> + for ( auto r : ranges ) { <nl> + r . value ( ) . populateTags ( ) ; <nl> + allSources . insert ( r . value ( ) . tags . begin ( ) , r . value ( ) . tags . end ( ) ) ; <nl> } <nl> - else { <nl> - TEST ( true ) ; / / A clear range extends past a shard boundary <nl> - std : : set < Tag > allSources ; <nl> - for ( auto r : ranges ) { <nl> - r . value ( ) . populateTags ( ) ; <nl> - allSources . insert ( r . value ( ) . tags . begin ( ) , r . value ( ) . tags . end ( ) ) ; <nl> - } <nl> - DEBUG_MUTATION ( " ProxyCommit " , commitVersion , m ) . detail ( " Dbgid " , self - > dbgid ) . detail ( " To " , allSources ) . detail ( " Mutation " , m ) ; <nl> + DEBUG_MUTATION ( " ProxyCommit " , self - > commitVersion , m ) . detail ( " Dbgid " , pProxyCommitData - > dbgid ) . detail ( " To " , allSources ) . detail ( " Mutation " , m ) ; <nl> <nl> - toCommit . addTags ( allSources ) ; <nl> - } <nl> - if ( self - > needsCacheTag ( clearRange ) ) { <nl> - toCommit . addTag ( cacheTag ) ; <nl> - } <nl> - toCommit . addTypedMessage ( m ) ; <nl> - } else <nl> - UNREACHABLE ( ) ; <nl> + self - > toCommit . addTags ( allSources ) ; <nl> + } <nl> <nl> + if ( pProxyCommitData - > needsCacheTag ( clearRange ) ) { <nl> + self - > toCommit . addTag ( cacheTag ) ; <nl> + } <nl> + self - > toCommit . addTypedMessage ( m ) ; <nl> + } else { <nl> + UNREACHABLE ( ) ; <nl> + } <nl> <nl> - / / Check on backing up key , if backup ranges are defined and a normal key <nl> - if ( self - > vecBackupKeys . size ( ) > 1 & & ( normalKeys . contains ( m . param1 ) | | m . param1 = = metadataVersionKey ) ) { <nl> - if ( m . type ! = MutationRef : : Type : : ClearRange ) { <nl> - / / Add the mutation to the relevant backup tag <nl> - for ( auto backupName : self - > vecBackupKeys [ m . param1 ] ) { <nl> - logRangeMutations [ backupName ] . push_back_deep ( logRangeMutationsArena , m ) ; <nl> - } <nl> - } <nl> - else { <nl> - KeyRangeRef mutationRange ( m . param1 , m . param2 ) ; <nl> - KeyRangeRef intersectionRange ; <nl> + / / Check on backing up key , if backup ranges are defined and a normal key <nl> + if ( ! ( pProxyCommitData - > vecBackupKeys . size ( ) > 1 & & ( normalKeys . contains ( m . param1 ) | | m . param1 = = metadataVersionKey ) ) ) { <nl> + continue ; <nl> + } <nl> <nl> - / / Identify and add the intersecting ranges of the mutation to the array of mutations to serialize <nl> - for ( auto backupRange : self - > vecBackupKeys . intersectingRanges ( mutationRange ) ) <nl> - { <nl> - / / Get the backup sub range <nl> - const auto & backupSubrange = backupRange . range ( ) ; <nl> + if ( m . type ! = MutationRef : : Type : : ClearRange ) { <nl> + / / Add the mutation to the relevant backup tag <nl> + for ( auto backupName : pProxyCommitData - > vecBackupKeys [ m . param1 ] ) { <nl> + self - > logRangeMutations [ backupName ] . push_back_deep ( self - > logRangeMutationsArena , m ) ; <nl> + } <nl> + } <nl> + else { <nl> + KeyRangeRef mutationRange ( m . param1 , m . param2 ) ; <nl> + KeyRangeRef intersectionRange ; <nl> <nl> - / / Determine the intersecting range <nl> - intersectionRange = mutationRange & backupSubrange ; <nl> + / / Identify and add the intersecting ranges of the mutation to the array of mutations to serialize <nl> + for ( auto backupRange : pProxyCommitData - > vecBackupKeys . intersectingRanges ( mutationRange ) ) <nl> + { <nl> + / / Get the backup sub range <nl> + const auto & backupSubrange = backupRange . range ( ) ; <nl> <nl> - / / Create the custom mutation for the specific backup tag <nl> - MutationRef backupMutation ( MutationRef : : Type : : ClearRange , intersectionRange . begin , intersectionRange . end ) ; <nl> + / / Determine the intersecting range <nl> + intersectionRange = mutationRange & backupSubrange ; <nl> <nl> - / / Add the mutation to the relevant backup tag <nl> - for ( auto backupName : backupRange . value ( ) ) { <nl> - logRangeMutations [ backupName ] . push_back_deep ( logRangeMutationsArena , backupMutation ) ; <nl> - } <nl> - } <nl> + / / Create the custom mutation for the specific backup tag <nl> + MutationRef backupMutation ( MutationRef : : Type : : ClearRange , intersectionRange . begin , intersectionRange . end ) ; <nl> + <nl> + / / Add the mutation to the relevant backup tag <nl> + for ( auto backupName : backupRange . value ( ) ) { <nl> + self - > logRangeMutations [ backupName ] . push_back_deep ( self - > logRangeMutationsArena , backupMutation ) ; <nl> } <nl> } <nl> } <nl> } <nl> } <nl> <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + ACTOR Future < Void > postResolution ( CommitBatchContext * self ) { <nl> + state ProxyCommitData * const pProxyCommitData = self - > pProxyCommitData ; <nl> + state std : : vector < CommitTransactionRequest > & trs = self - > trs ; <nl> + state const int64_t localBatchNumber = self - > localBatchNumber ; <nl> + state const Optional < UID > & debugID = self - > debugID ; <nl> + <nl> + TEST ( pProxyCommitData - > latestLocalCommitBatchLogging . get ( ) < localBatchNumber - 1 ) ; / / Queuing post - resolution commit processing <nl> + wait ( pProxyCommitData - > latestLocalCommitBatchLogging . whenAtLeast ( localBatchNumber - 1 ) ) ; <nl> + wait ( yield ( TaskPriority : : ProxyCommitYield1 ) ) ; <nl> + <nl> + self - > computeStart = g_network - > now ( ) ; <nl> + <nl> + pProxyCommitData - > stats . txnCommitResolved + = trs . size ( ) ; <nl> + <nl> + if ( debugID . present ( ) ) { <nl> + g_traceBatch . addEvent ( <nl> + " CommitDebug " , debugID . get ( ) . first ( ) , <nl> + " MasterProxyServer . commitBatch . ProcessingMutations " <nl> + ) ; <nl> + } <nl> + <nl> + self - > isMyFirstBatch = ! pProxyCommitData - > version ; <nl> + self - > oldCoordinators = pProxyCommitData - > txnStateStore - > readValue ( coordinatorsKey ) . get ( ) ; <nl> + <nl> + assertResolutionStateMutationsSizeConsistent ( self - > resolution ) ; <nl> + <nl> + applyMetadataEffect ( self ) ; <nl> + <nl> + determineCommittedTransactions ( self ) ; <nl> + <nl> + if ( self - > forceRecovery ) { <nl> + wait ( Future < Void > ( Never ( ) ) ) ; <nl> + } <nl> + <nl> + / / First pass <nl> + wait ( applyMetadataToCommittedTransactions ( self ) ) ; <nl> + <nl> + / / Second pass <nl> + wait ( assignMutationsToStorageServers ( self ) ) ; <nl> + <nl> / / Serialize and backup the mutations as a single mutation <nl> - if ( ( self - > vecBackupKeys . size ( ) > 1 ) & & logRangeMutations . size ( ) ) { <nl> - wait ( addBackupMutations ( self , & logRangeMutations , & toCommit , commitVersion , & computeDuration , & computeStart ) ) ; <nl> + if ( ( pProxyCommitData - > vecBackupKeys . size ( ) > 1 ) & & self - > logRangeMutations . size ( ) ) { <nl> + wait ( addBackupMutations ( pProxyCommitData , & self - > logRangeMutations , & self - > toCommit , self - > commitVersion , & self - > computeDuration , & self - > computeStart ) ) ; <nl> } <nl> <nl> - self - > stats . mutations + = mutationCount ; <nl> - self - > stats . mutationBytes + = mutationBytes ; <nl> + pProxyCommitData - > stats . mutations + = self - > mutationCount ; <nl> + pProxyCommitData - > stats . mutationBytes + = self - > mutationBytes ; <nl> <nl> / / Storage servers mustn ' t make durable versions which are not fully committed ( because then they are impossible to roll back ) <nl> / / We prevent this by limiting the number of versions which are semi - committed but not fully committed to be less than the MVCC window <nl> - if ( self - > committedVersion . get ( ) < commitVersion - SERVER_KNOBS - > MAX_READ_TRANSACTION_LIFE_VERSIONS ) { <nl> - computeDuration + = g_network - > timer ( ) - computeStart ; <nl> + if ( pProxyCommitData - > committedVersion . get ( ) < self - > commitVersion - SERVER_KNOBS - > MAX_READ_TRANSACTION_LIFE_VERSIONS ) { <nl> + self - > computeDuration + = g_network - > timer ( ) - self - > computeStart ; <nl> state Span waitVersionSpan ; <nl> - while ( self - > committedVersion . get ( ) < commitVersion - SERVER_KNOBS - > MAX_READ_TRANSACTION_LIFE_VERSIONS ) { <nl> + while ( pProxyCommitData - > committedVersion . get ( ) < self - > commitVersion - SERVER_KNOBS - > MAX_READ_TRANSACTION_LIFE_VERSIONS ) { <nl> / / This should be * extremely * rare in the real world , but knob buggification should make it happen in simulation <nl> TEST ( true ) ; / / Semi - committed pipeline limited by MVCC window <nl> - / / TraceEvent ( " ProxyWaitingForCommitted " , self - > dbgid ) . detail ( " CommittedVersion " , self - > committedVersion . get ( ) ) . detail ( " NeedToCommit " , commitVersion ) ; <nl> - waitVersionSpan = Span ( deterministicRandom ( ) - > randomUniqueID ( ) , " MP : overMaxReadTransactionLifeVersions " _loc , { span . context } ) ; <nl> + / / TraceEvent ( " ProxyWaitingForCommitted " , pProxyCommitData - > dbgid ) . detail ( " CommittedVersion " , pProxyCommitData - > committedVersion . get ( ) ) . detail ( " NeedToCommit " , commitVersion ) ; <nl> + waitVersionSpan = Span ( deterministicRandom ( ) - > randomUniqueID ( ) , " MP : overMaxReadTransactionLifeVersions " _loc , { self - > span . context } ) ; <nl> choose { <nl> - when ( wait ( self - > committedVersion . whenAtLeast ( commitVersion - SERVER_KNOBS - > MAX_READ_TRANSACTION_LIFE_VERSIONS ) ) ) { <nl> + when ( wait ( pProxyCommitData - > committedVersion . whenAtLeast ( self - > commitVersion - SERVER_KNOBS - > MAX_READ_TRANSACTION_LIFE_VERSIONS ) ) ) { <nl> wait ( yield ( ) ) ; <nl> - break ; <nl> + break ; <nl> } <nl> - when ( wait ( self - > cx - > onProxiesChanged ( ) ) ) { } <nl> - when ( GetRawCommittedVersionReply v = wait ( self - > master . getLiveCommittedVersion . getReply ( <nl> + when ( wait ( pProxyCommitData - > cx - > onProxiesChanged ( ) ) ) { } <nl> + when ( GetRawCommittedVersionReply v = wait ( pProxyCommitData - > master . getLiveCommittedVersion . getReply ( <nl> GetRawCommittedVersionRequest ( waitVersionSpan . context , debugID ) , TaskPriority : : GetLiveCommittedVersionReply ) ) ) { <nl> - if ( v . version > self - > committedVersion . get ( ) ) { <nl> - self - > locked = v . locked ; <nl> - self - > metadataVersion = v . metadataVersion ; <nl> - self - > committedVersion . set ( v . version ) ; <nl> + if ( v . version > pProxyCommitData - > committedVersion . get ( ) ) { <nl> + pProxyCommitData - > locked = v . locked ; <nl> + pProxyCommitData - > metadataVersion = v . metadataVersion ; <nl> + pProxyCommitData - > committedVersion . set ( v . version ) ; <nl> } <nl> <nl> - if ( self - > committedVersion . get ( ) < commitVersion - SERVER_KNOBS - > MAX_READ_TRANSACTION_LIFE_VERSIONS ) <nl> + if ( pProxyCommitData - > committedVersion . get ( ) < self - > commitVersion - SERVER_KNOBS - > MAX_READ_TRANSACTION_LIFE_VERSIONS ) <nl> wait ( delay ( SERVER_KNOBS - > PROXY_SPIN_DELAY ) ) ; <nl> } <nl> } <nl> } <nl> waitVersionSpan = Span { } ; <nl> - computeStart = g_network - > timer ( ) ; <nl> + self - > computeStart = g_network - > timer ( ) ; <nl> } <nl> <nl> - state LogSystemDiskQueueAdapter : : CommitMessage msg = storeCommits . back ( ) . first . get ( ) ; <nl> + self - > msg = self - > storeCommits . back ( ) . first . get ( ) ; <nl> <nl> - if ( debugID . present ( ) ) <nl> - g_traceBatch . addEvent ( " CommitDebug " , debugID . get ( ) . first ( ) , " MasterProxyServer . commitBatch . AfterStoreCommits " ) ; <nl> + if ( self - > debugID . present ( ) ) <nl> + g_traceBatch . addEvent ( " CommitDebug " , self - > debugID . get ( ) . first ( ) , " MasterProxyServer . commitBatch . AfterStoreCommits " ) ; <nl> <nl> / / txnState ( transaction subsystem state ) tag : message extracted from log adapter <nl> bool firstMessage = true ; <nl> - for ( auto m : msg . messages ) { <nl> + for ( auto m : self - > msg . messages ) { <nl> if ( firstMessage ) { <nl> - toCommit . addTxsTag ( ) ; <nl> + self - > toCommit . addTxsTag ( ) ; <nl> } <nl> - toCommit . addMessage ( StringRef ( m . begin ( ) , m . size ( ) ) , ! firstMessage ) ; <nl> + self - > toCommit . addMessage ( StringRef ( m . begin ( ) , m . size ( ) ) , ! firstMessage ) ; <nl> firstMessage = false ; <nl> } <nl> <nl> - if ( prevVersion & & commitVersion - prevVersion < SERVER_KNOBS - > MAX_VERSIONS_IN_FLIGHT / 2 ) <nl> - debug_advanceMaxCommittedVersion ( UID ( ) , commitVersion ) ; / / < Is this valid ? <nl> + if ( self - > prevVersion & & self - > commitVersion - self - > prevVersion < SERVER_KNOBS - > MAX_VERSIONS_IN_FLIGHT / 2 ) <nl> + debug_advanceMaxCommittedVersion ( UID ( ) , self - > commitVersion ) ; / / < Is this valid ? <nl> <nl> - / / TraceEvent ( " ProxyPush " , self - > dbgid ) . detail ( " PrevVersion " , prevVersion ) . detail ( " Version " , commitVersion ) <nl> + / / TraceEvent ( " ProxyPush " , pProxyCommitData - > dbgid ) . detail ( " PrevVersion " , prevVersion ) . detail ( " Version " , commitVersion ) <nl> / / . detail ( " TransactionsSubmitted " , trs . size ( ) ) . detail ( " TransactionsCommitted " , commitCount ) . detail ( " TxsPopTo " , msg . popTo ) ; <nl> <nl> - if ( prevVersion & & commitVersion - prevVersion < SERVER_KNOBS - > MAX_VERSIONS_IN_FLIGHT / 2 ) <nl> - debug_advanceMaxCommittedVersion ( UID ( ) , commitVersion ) ; <nl> + if ( self - > prevVersion & & self - > commitVersion - self - > prevVersion < SERVER_KNOBS - > MAX_VERSIONS_IN_FLIGHT / 2 ) <nl> + debug_advanceMaxCommittedVersion ( UID ( ) , self - > commitVersion ) ; <nl> <nl> - state double commitStartTime = now ( ) ; <nl> - self - > lastStartCommit = commitStartTime ; <nl> - Future < Version > loggingComplete = self - > logSystem - > push ( prevVersion , commitVersion , self - > committedVersion . get ( ) , self - > minKnownCommittedVersion , toCommit , debugID ) ; <nl> + self - > commitStartTime = now ( ) ; <nl> + pProxyCommitData - > lastStartCommit = self - > commitStartTime ; <nl> + self - > loggingComplete = pProxyCommitData - > logSystem - > push ( self - > prevVersion , self - > commitVersion , pProxyCommitData - > committedVersion . get ( ) , pProxyCommitData - > minKnownCommittedVersion , self - > toCommit , self - > debugID ) ; <nl> <nl> - if ( ! forceRecovery ) { <nl> - ASSERT ( self - > latestLocalCommitBatchLogging . get ( ) = = localBatchNumber - 1 ) ; <nl> - self - > latestLocalCommitBatchLogging . set ( localBatchNumber ) ; <nl> + if ( ! self - > forceRecovery ) { <nl> + ASSERT ( pProxyCommitData - > latestLocalCommitBatchLogging . get ( ) = = self - > localBatchNumber - 1 ) ; <nl> + pProxyCommitData - > latestLocalCommitBatchLogging . set ( self - > localBatchNumber ) ; <nl> } <nl> <nl> - computeDuration + = g_network - > timer ( ) - computeStart ; <nl> - if ( computeDuration > SERVER_KNOBS - > MIN_PROXY_COMPUTE & & batchOperations > 0 ) { <nl> - double computePerOperation = computeDuration / batchOperations ; <nl> - if ( computePerOperation < = self - > commitComputePerOperation [ latencyBucket ] ) { <nl> - self - > commitComputePerOperation [ latencyBucket ] = computePerOperation ; <nl> + self - > computeDuration + = g_network - > timer ( ) - self - > computeStart ; <nl> + if ( self - > computeDuration > SERVER_KNOBS - > MIN_PROXY_COMPUTE & & self - > batchOperations > 0 ) { <nl> + double computePerOperation = self - > computeDuration / self - > batchOperations ; <nl> + if ( computePerOperation < = pProxyCommitData - > commitComputePerOperation [ self - > latencyBucket ] ) { <nl> + pProxyCommitData - > commitComputePerOperation [ self - > latencyBucket ] = computePerOperation ; <nl> } else { <nl> - self - > commitComputePerOperation [ latencyBucket ] = SERVER_KNOBS - > PROXY_COMPUTE_GROWTH_RATE * computePerOperation + ( ( 1 . 0 - SERVER_KNOBS - > PROXY_COMPUTE_GROWTH_RATE ) * self - > commitComputePerOperation [ latencyBucket ] ) ; <nl> + pProxyCommitData - > commitComputePerOperation [ self - > latencyBucket ] = SERVER_KNOBS - > PROXY_COMPUTE_GROWTH_RATE * computePerOperation + ( ( 1 . 0 - SERVER_KNOBS - > PROXY_COMPUTE_GROWTH_RATE ) * pProxyCommitData - > commitComputePerOperation [ self - > latencyBucket ] ) ; <nl> } <nl> } <nl> <nl> - / / / / / / / Phase 4 : Logging ( network bound ; pipelined up to MAX_READ_TRANSACTION_LIFE_VERSIONS ( limited by loop above ) ) <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + ACTOR Future < Void > transactionLogging ( CommitBatchContext * self ) { <nl> + state ProxyCommitData * const pProxyCommitData = self - > pProxyCommitData ; <nl> <nl> try { <nl> choose { <nl> - when ( Version ver = wait ( loggingComplete ) ) { <nl> - self - > minKnownCommittedVersion = std : : max ( self - > minKnownCommittedVersion , ver ) ; <nl> + when ( Version ver = wait ( self - > loggingComplete ) ) { <nl> + pProxyCommitData - > minKnownCommittedVersion = std : : max ( pProxyCommitData - > minKnownCommittedVersion , ver ) ; <nl> } <nl> - when ( wait ( self - > committedVersion . whenAtLeast ( commitVersion + 1 ) ) ) { } <nl> + when ( wait ( pProxyCommitData - > committedVersion . whenAtLeast ( self - > commitVersion + 1 ) ) ) { } <nl> } <nl> } catch ( Error & e ) { <nl> if ( e . code ( ) = = error_code_broken_promise ) { <nl> ACTOR Future < Void > commitBatch ( <nl> throw ; <nl> } <nl> <nl> - self - > lastCommitLatency = now ( ) - commitStartTime ; <nl> - self - > lastCommitTime = std : : max ( self - > lastCommitTime . get ( ) , commitStartTime ) ; <nl> + pProxyCommitData - > lastCommitLatency = now ( ) - self - > commitStartTime ; <nl> + pProxyCommitData - > lastCommitTime = std : : max ( pProxyCommitData - > lastCommitTime . get ( ) , self - > commitStartTime ) ; <nl> + <nl> wait ( yield ( TaskPriority : : ProxyCommitYield2 ) ) ; <nl> <nl> - if ( self - > popRemoteTxs & & msg . popTo > ( self - > txsPopVersions . size ( ) ? self - > txsPopVersions . back ( ) . second : self - > lastTxsPop ) ) { <nl> - if ( self - > txsPopVersions . size ( ) > = SERVER_KNOBS - > MAX_TXS_POP_VERSION_HISTORY ) { <nl> + if ( pProxyCommitData - > popRemoteTxs & & self - > msg . popTo > ( pProxyCommitData - > txsPopVersions . size ( ) ? pProxyCommitData - > txsPopVersions . back ( ) . second : pProxyCommitData - > lastTxsPop ) ) { <nl> + if ( pProxyCommitData - > txsPopVersions . size ( ) > = SERVER_KNOBS - > MAX_TXS_POP_VERSION_HISTORY ) { <nl> TraceEvent ( SevWarnAlways , " DiscardingTxsPopHistory " ) . suppressFor ( 1 . 0 ) ; <nl> - self - > txsPopVersions . pop_front ( ) ; <nl> + pProxyCommitData - > txsPopVersions . pop_front ( ) ; <nl> } <nl> <nl> - self - > txsPopVersions . emplace_back ( commitVersion , msg . popTo ) ; <nl> + pProxyCommitData - > txsPopVersions . emplace_back ( self - > commitVersion , self - > msg . popTo ) ; <nl> } <nl> - self - > logSystem - > popTxs ( msg . popTo ) ; <nl> + pProxyCommitData - > logSystem - > popTxs ( self - > msg . popTo ) ; <nl> <nl> - / / / / / / / Phase 5 : Replies ( CPU bound ; no particular order required , though ordered execution would be best for latency ) <nl> - if ( prevVersion & & commitVersion - prevVersion < SERVER_KNOBS - > MAX_VERSIONS_IN_FLIGHT / 2 ) <nl> - debug_advanceMinCommittedVersion ( UID ( ) , commitVersion ) ; <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + ACTOR Future < Void > reply ( CommitBatchContext * self ) { <nl> + state ProxyCommitData * const pProxyCommitData = self - > pProxyCommitData ; <nl> + <nl> + const Optional < UID > & debugID = self - > debugID ; <nl> <nl> - / / TraceEvent ( " ProxyPushed " , self - > dbgid ) . detail ( " PrevVersion " , prevVersion ) . detail ( " Version " , commitVersion ) ; <nl> + if ( self - > prevVersion & & self - > commitVersion - self - > prevVersion < SERVER_KNOBS - > MAX_VERSIONS_IN_FLIGHT / 2 ) <nl> + debug_advanceMinCommittedVersion ( UID ( ) , self - > commitVersion ) ; <nl> + <nl> + / / TraceEvent ( " ProxyPushed " , pProxyCommitData - > dbgid ) . detail ( " PrevVersion " , prevVersion ) . detail ( " Version " , commitVersion ) ; <nl> if ( debugID . present ( ) ) <nl> g_traceBatch . addEvent ( " CommitDebug " , debugID . get ( ) . first ( ) , " MasterProxyServer . commitBatch . AfterLogPush " ) ; <nl> <nl> - for ( auto & p : storeCommits ) { <nl> + for ( auto & p : self - > storeCommits ) { <nl> ASSERT ( ! p . second . isReady ( ) ) ; <nl> p . first . get ( ) . acknowledge . send ( Void ( ) ) ; <nl> ASSERT ( p . second . isReady ( ) ) ; <nl> ACTOR Future < Void > commitBatch ( <nl> / / up - to - date live committed version . We also maintain the invariant that master ' s committed version > = self - > committedVersion <nl> / / by reporting commit version first before updating self - > committedVersion . Otherwise , a client may get a commit <nl> / / version that the master is not aware of , and next GRV request may get a version less than self - > committedVersion . <nl> - TEST ( self - > committedVersion . get ( ) > commitVersion ) ; / / A later version was reported committed first <nl> - if ( SERVER_KNOBS - > ASK_READ_VERSION_FROM_MASTER & & commitVersion > = self - > committedVersion . get ( ) ) { <nl> - wait ( self - > master . reportLiveCommittedVersion . getReply ( ReportRawCommittedVersionRequest ( commitVersion , lockedAfter , metadataVersionAfter , self - > minKnownCommittedVersion ) , TaskPriority : : ProxyMasterVersionReply ) ) ; <nl> + TEST ( pProxyCommitData - > committedVersion . get ( ) > self - > commitVersion ) ; / / A later version was reported committed first <nl> + if ( self - > commitVersion > pProxyCommitData - > committedVersion . get ( ) ) { <nl> + wait ( pProxyCommitData - > master . reportLiveCommittedVersion . getReply ( <nl> + ReportRawCommittedVersionRequest ( <nl> + self - > commitVersion , <nl> + self - > lockedAfter , <nl> + self - > metadataVersionAfter ) , TaskPriority : : ProxyMasterVersionReply ) ) ; <nl> } <nl> - if ( commitVersion > self - > committedVersion . get ( ) ) { <nl> - self - > locked = lockedAfter ; <nl> - self - > metadataVersion = metadataVersionAfter ; <nl> - self - > committedVersion . set ( commitVersion ) ; <nl> + if ( self - > commitVersion > pProxyCommitData - > committedVersion . get ( ) ) { <nl> + pProxyCommitData - > locked = self - > lockedAfter ; <nl> + pProxyCommitData - > metadataVersion = self - > metadataVersionAfter ; <nl> + pProxyCommitData - > committedVersion . set ( self - > commitVersion ) ; <nl> } <nl> <nl> - if ( forceRecovery ) { <nl> - TraceEvent ( SevWarn , " RestartingTxnSubsystem " , self - > dbgid ) . detail ( " Stage " , " ProxyShutdown " ) ; <nl> + if ( self - > forceRecovery ) { <nl> + TraceEvent ( SevWarn , " RestartingTxnSubsystem " , pProxyCommitData - > dbgid ) . detail ( " Stage " , " ProxyShutdown " ) ; <nl> throw worker_removed ( ) ; <nl> } <nl> <nl> / / Send replies to clients <nl> double endTime = g_network - > timer ( ) ; <nl> / / Reset all to zero , used to track the correct index of each commitTransacitonRef on each resolver <nl> - std : : fill ( nextTr . begin ( ) , nextTr . end ( ) , 0 ) ; <nl> - for ( int t = 0 ; t < trs . size ( ) ; t + + ) { <nl> - if ( committed [ t ] = = ConflictBatch : : TransactionCommitted & & ( ! locked | | trs [ t ] . isLockAware ( ) ) ) { <nl> - ASSERT_WE_THINK ( commitVersion ! = invalidVersion ) ; <nl> - trs [ t ] . reply . send ( CommitID ( commitVersion , t , metadataVersionAfter ) ) ; <nl> + <nl> + std : : fill ( self - > nextTr . begin ( ) , self - > nextTr . end ( ) , 0 ) ; <nl> + for ( int t = 0 ; t < self - > trs . size ( ) ; t + + ) { <nl> + auto & tr = self - > trs [ t ] ; <nl> + if ( self - > committed [ t ] = = ConflictBatch : : TransactionCommitted & & ( ! self - > locked | | tr . isLockAware ( ) ) ) { <nl> + ASSERT_WE_THINK ( self - > commitVersion ! = invalidVersion ) ; <nl> + tr . reply . send ( CommitID ( self - > commitVersion , t , self - > metadataVersionAfter ) ) ; <nl> + <nl> / / aggregate commit cost estimation if committed <nl> - ASSERT ( trs [ t ] . commitCostEstimation . present ( ) = = trs [ t ] . tagSet . present ( ) ) ; <nl> - if ( trs [ t ] . tagSet . present ( ) ) { <nl> - TransactionCommitCostEstimation & costEstimation = trs [ t ] . commitCostEstimation . get ( ) ; <nl> - for ( auto & tag : trs [ t ] . tagSet . get ( ) ) { <nl> - self - > transactionTagCommitCostEst [ tag ] + = costEstimation ; <nl> + ASSERT ( tr . commitCostEstimation . present ( ) = = tr . tagSet . present ( ) ) ; <nl> + if ( tr . tagSet . present ( ) ) { <nl> + TransactionCommitCostEstimation & costEstimation = tr . commitCostEstimation . get ( ) ; <nl> + for ( auto & tag : tr . tagSet . get ( ) ) { <nl> + pProxyCommitData - > transactionTagCommitCostEst [ tag ] + = costEstimation ; <nl> } <nl> } <nl> } <nl> - else if ( committed [ t ] = = ConflictBatch : : TransactionTooOld ) { <nl> - trs [ t ] . reply . sendError ( transaction_too_old ( ) ) ; <nl> + else if ( self - > committed [ t ] = = ConflictBatch : : TransactionTooOld ) { <nl> + tr . reply . sendError ( transaction_too_old ( ) ) ; <nl> } <nl> else { <nl> / / If enable the option to report conflicting keys from resolvers , we send back all keyranges ' indices <nl> / / through CommitID <nl> - if ( trs [ t ] . transaction . report_conflicting_keys ) { <nl> + if ( tr . transaction . report_conflicting_keys ) { <nl> Standalone < VectorRef < int > > conflictingKRIndices ; <nl> - for ( int resolverInd : transactionResolverMap [ t ] ) { <nl> + for ( int resolverInd : self - > transactionResolverMap [ t ] ) { <nl> auto const & cKRs = <nl> - resolution [ resolverInd ] <nl> - . conflictingKeyRangeMap [ nextTr [ resolverInd ] ] ; / / nextTr [ resolverInd ] - > index of this trs [ t ] <nl> + self - > resolution [ resolverInd ] <nl> + . conflictingKeyRangeMap [ self - > nextTr [ resolverInd ] ] ; / / nextTr [ resolverInd ] - > index of this trs [ t ] <nl> / / on the resolver <nl> for ( auto const & rCRIndex : cKRs ) <nl> / / read_conflict_range can change when sent to resolvers , mapping the index from resolver - side <nl> / / to original index in commitTransactionRef <nl> conflictingKRIndices . push_back ( conflictingKRIndices . arena ( ) , <nl> - txReadConflictRangeIndexMap [ t ] [ resolverInd ] [ rCRIndex ] ) ; <nl> + self - > txReadConflictRangeIndexMap [ t ] [ resolverInd ] [ rCRIndex ] ) ; <nl> } <nl> / / At least one keyRange index should be returned <nl> ASSERT ( conflictingKRIndices . size ( ) ) ; <nl> - trs [ t ] . reply . send ( CommitID ( invalidVersion , t , Optional < Value > ( ) , <nl> + tr . reply . send ( CommitID ( invalidVersion , t , Optional < Value > ( ) , <nl> Optional < Standalone < VectorRef < int > > > ( conflictingKRIndices ) ) ) ; <nl> } else { <nl> - trs [ t ] . reply . sendError ( not_committed ( ) ) ; <nl> + tr . reply . sendError ( not_committed ( ) ) ; <nl> } <nl> } <nl> <nl> / / Update corresponding transaction indices on each resolver <nl> - for ( int resolverInd : transactionResolverMap [ t ] ) nextTr [ resolverInd ] + + ; <nl> + for ( int resolverInd : self - > transactionResolverMap [ t ] ) self - > nextTr [ resolverInd ] + + ; <nl> <nl> / / TODO : filter if pipelined with large commit <nl> - double duration = endTime - trs [ t ] . requestTime ( ) ; <nl> - self - > stats . commitLatencySample . addMeasurement ( duration ) ; <nl> - if ( self - > latencyBandConfig . present ( ) ) { <nl> - bool filter = maxTransactionBytes > self - > latencyBandConfig . get ( ) . commitConfig . maxCommitBytes . orDefault ( std : : numeric_limits < int > : : max ( ) ) ; <nl> - self - > stats . commitLatencyBands . addMeasurement ( duration , filter ) ; <nl> + const double duration = endTime - tr . requestTime ( ) ; <nl> + pProxyCommitData - > stats . commitLatencySample . addMeasurement ( duration ) ; <nl> + if ( pProxyCommitData - > latencyBandConfig . present ( ) ) { <nl> + bool filter = self - > maxTransactionBytes > pProxyCommitData - > latencyBandConfig . get ( ) . commitConfig . maxCommitBytes . orDefault ( std : : numeric_limits < int > : : max ( ) ) ; <nl> + pProxyCommitData - > stats . commitLatencyBands . addMeasurement ( duration , filter ) ; <nl> } <nl> } <nl> <nl> - + + self - > stats . commitBatchOut ; <nl> - self - > stats . txnCommitOut + = trs . size ( ) ; <nl> - self - > stats . txnConflicts + = trs . size ( ) - commitCount ; <nl> - self - > stats . txnCommitOutSuccess + = commitCount ; <nl> + + + pProxyCommitData - > stats . commitBatchOut ; <nl> + pProxyCommitData - > stats . txnCommitOut + = self - > trs . size ( ) ; <nl> + pProxyCommitData - > stats . txnConflicts + = self - > trs . size ( ) - self - > commitCount ; <nl> + pProxyCommitData - > stats . txnCommitOutSuccess + = self - > commitCount ; <nl> <nl> - if ( now ( ) - self - > lastCoalesceTime > SERVER_KNOBS - > RESOLVER_COALESCE_TIME ) { <nl> - self - > lastCoalesceTime = now ( ) ; <nl> - int lastSize = self - > keyResolvers . size ( ) ; <nl> - auto rs = self - > keyResolvers . ranges ( ) ; <nl> - Version oldestVersion = prevVersion - SERVER_KNOBS - > MAX_WRITE_TRANSACTION_LIFE_VERSIONS ; <nl> + if ( now ( ) - pProxyCommitData - > lastCoalesceTime > SERVER_KNOBS - > RESOLVER_COALESCE_TIME ) { <nl> + pProxyCommitData - > lastCoalesceTime = now ( ) ; <nl> + int lastSize = pProxyCommitData - > keyResolvers . size ( ) ; <nl> + auto rs = pProxyCommitData - > keyResolvers . ranges ( ) ; <nl> + Version oldestVersion = self - > prevVersion - SERVER_KNOBS - > MAX_WRITE_TRANSACTION_LIFE_VERSIONS ; <nl> for ( auto r = rs . begin ( ) ; r ! = rs . end ( ) ; + + r ) { <nl> while ( r - > value ( ) . size ( ) > 1 & & r - > value ( ) [ 1 ] . first < oldestVersion ) <nl> r - > value ( ) . pop_front ( ) ; <nl> if ( r - > value ( ) . size ( ) & & r - > value ( ) . front ( ) . first < oldestVersion ) <nl> r - > value ( ) . front ( ) . first = 0 ; <nl> } <nl> - self - > keyResolvers . coalesce ( allKeys ) ; <nl> - if ( self - > keyResolvers . size ( ) ! = lastSize ) <nl> - TraceEvent ( " KeyResolverSize " , self - > dbgid ) . detail ( " Size " , self - > keyResolvers . size ( ) ) ; <nl> + pProxyCommitData - > keyResolvers . coalesce ( allKeys ) ; <nl> + if ( pProxyCommitData - > keyResolvers . size ( ) ! = lastSize ) <nl> + TraceEvent ( " KeyResolverSize " , pProxyCommitData - > dbgid ) . detail ( " Size " , pProxyCommitData - > keyResolvers . size ( ) ) ; <nl> } <nl> <nl> / / Dynamic batching for commits <nl> - double target_latency = ( now ( ) - t1 ) * SERVER_KNOBS - > COMMIT_TRANSACTION_BATCH_INTERVAL_LATENCY_FRACTION ; <nl> - self - > commitBatchInterval = std : : max ( <nl> + double target_latency = ( now ( ) - self - > startTime ) * SERVER_KNOBS - > COMMIT_TRANSACTION_BATCH_INTERVAL_LATENCY_FRACTION ; <nl> + pProxyCommitData - > commitBatchInterval = std : : max ( <nl> SERVER_KNOBS - > COMMIT_TRANSACTION_BATCH_INTERVAL_MIN , <nl> std : : min ( SERVER_KNOBS - > COMMIT_TRANSACTION_BATCH_INTERVAL_MAX , <nl> target_latency * SERVER_KNOBS - > COMMIT_TRANSACTION_BATCH_INTERVAL_SMOOTHER_ALPHA + <nl> - self - > commitBatchInterval * ( 1 - SERVER_KNOBS - > COMMIT_TRANSACTION_BATCH_INTERVAL_SMOOTHER_ALPHA ) ) ) ; <nl> + pProxyCommitData - > commitBatchInterval * ( 1 - SERVER_KNOBS - > COMMIT_TRANSACTION_BATCH_INTERVAL_SMOOTHER_ALPHA ) ) ) ; <nl> + <nl> + pProxyCommitData - > commitBatchesMemBytesCount - = self - > currentBatchMemBytesCount ; <nl> + ASSERT_ABORT ( pProxyCommitData - > commitBatchesMemBytesCount > = 0 ) ; <nl> + wait ( self - > releaseFuture ) ; <nl> + <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + } / / namespace CommitBatch <nl> + <nl> + / / Commit one batch of transactions trs <nl> + ACTOR Future < Void > commitBatch ( <nl> + ProxyCommitData * self , <nl> + vector < CommitTransactionRequest > * trs , <nl> + int currentBatchMemBytesCount ) { <nl> + / / WARNING : this code is run at a high priority ( until the first delay ( 0 ) ) , so it needs to do as little work as possible <nl> + state CommitBatch : : CommitBatchContext context ( self , trs , currentBatchMemBytesCount ) ; <nl> + <nl> + / / Active load balancing runs at a very high priority ( to obtain accurate estimate of memory used by commit batches ) so we need to downgrade here <nl> + wait ( delay ( 0 , TaskPriority : : ProxyCommit ) ) ; <nl> + <nl> + context . pProxyCommitData - > lastVersionTime = context . startTime ; <nl> + + + context . pProxyCommitData - > stats . commitBatchIn ; <nl> + <nl> + / / / / / / / Phase 1 : Pre - resolution processing ( CPU bound except waiting for a version # which is separately pipelined and * should * be available by now ( unless empty commit ) ; ordered ; currently atomic but could yield ) <nl> + wait ( CommitBatch : : preresolutionProcessing ( & context ) ) ; <nl> + <nl> + / / / / / / / Phase 2 : Resolution ( waiting on the network ; pipelined ) <nl> + wait ( CommitBatch : : getResolution ( & context ) ) ; <nl> + <nl> + / / / / / / Phase 3 : Post - resolution processing ( CPU bound except for very rare situations ; ordered ; currently atomic but doesn ' t need to be ) <nl> + wait ( CommitBatch : : postResolution ( & context ) ) ; <nl> + <nl> + / / / / / / / Phase 4 : Logging ( network bound ; pipelined up to MAX_READ_TRANSACTION_LIFE_VERSIONS ( limited by loop above ) ) <nl> + wait ( CommitBatch : : transactionLogging ( & context ) ) ; <nl> + <nl> + / / / / / / / Phase 5 : Replies ( CPU bound ; no particular order required , though ordered execution would be best for latency ) <nl> + wait ( CommitBatch : : reply ( & context ) ) ; <nl> <nl> - self - > commitBatchesMemBytesCount - = currentBatchMemBytesCount ; <nl> - ASSERT_ABORT ( self - > commitBatchesMemBytesCount > = 0 ) ; <nl> - wait ( releaseFuture ) ; <nl> return Void ( ) ; <nl> } <nl> <nl> ACTOR Future < Void > masterProxyServerCore ( <nl> keyInfoData . emplace_back ( MapPair < Key , ServerCacheInfo > ( k , info ) , 1 ) ; <nl> } <nl> } else { <nl> - mutations . push_back ( mutations . arena ( ) , MutationRef ( MutationRef : : SetValue , kv . key , kv . value ) ) ; <nl> + mutations . emplace_back ( mutations . arena ( ) , MutationRef : : SetValue , kv . key , kv . value ) ; <nl> } <nl> } <nl> <nl> ACTOR Future < Void > masterProxyServerCore ( <nl> <nl> Arena arena ; <nl> bool confChanges ; <nl> - applyMetadataMutations ( commitData . dbgid , arena , mutations , commitData . txnStateStore , nullptr , & confChanges , Reference < ILogSystem > ( ) , 0 , & commitData . vecBackupKeys , & commitData . keyInfo , & commitData . cacheInfo , commitData . firstProxy ? & commitData . uid_applyMutationsData : nullptr , commitData . commit , commitData . cx , & commitData . committedVersion , & commitData . storageCache , & commitData . tag_popped , true ) ; <nl> + applyMetadataMutations ( commitData , arena , Reference < ILogSystem > ( ) , mutations , <nl> + / * pToCommit = * / nullptr , confChanges , <nl> + / * popVersion = * / 0 , / * initialCommit = * / true ) ; <nl> } <nl> <nl> auto lockedKey = commitData . txnStateStore - > readValue ( databaseLockedKey ) . get ( ) ; <nl> new file mode 100644 <nl> index 0000000000 . . 8c20951a4f <nl> mmm / dev / null <nl> ppp b / fdbserver / ProxyCommitData . actor . h <nl> <nl> + / * <nl> + * ProxyCommitData . h <nl> + * <nl> + * This source file is part of the FoundationDB open source project <nl> + * <nl> + * Copyright 2013 - 2020 Apple Inc . and the FoundationDB project authors <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # pragma once <nl> + # if defined ( NO_INTELLISENSE ) & & ! defined ( FDBSERVER_PROXYCOMMITDATA_ACTOR_G_H ) <nl> + # define FDBSERVER_PROXYCOMMITDATA_ACTOR_G_H <nl> + # include " fdbserver / ProxyCommitData . actor . g . h " <nl> + # elif ! defined ( FDBSERVER_PROXYCOMMITDATA_ACTOR_H ) <nl> + # define FDBSERVER_PROXYCOMMITDATA_ACTOR_H <nl> + <nl> + # include " fdbclient / FDBTypes . h " <nl> + # include " fdbrpc / Stats . h " <nl> + # include " fdbserver / Knobs . h " <nl> + # include " fdbserver / LogSystemDiskQueueAdapter . h " <nl> + # include " flow / IRandom . h " <nl> + <nl> + # include " flow / actorcompiler . h " / / This must be the last # include . <nl> + <nl> + DESCR struct SingleKeyMutation { <nl> + Standalone < StringRef > shardBegin ; <nl> + Standalone < StringRef > shardEnd ; <nl> + int64_t tag1 ; <nl> + int64_t tag2 ; <nl> + int64_t tag3 ; <nl> + } ; <nl> + <nl> + struct ApplyMutationsData { <nl> + Future < Void > worker ; <nl> + Version endVersion ; <nl> + Reference < KeyRangeMap < Version > > keyVersion ; <nl> + } ; <nl> + <nl> + struct ProxyStats { <nl> + CounterCollection cc ; <nl> + Counter txnCommitIn , txnCommitVersionAssigned , txnCommitResolving , txnCommitResolved , txnCommitOut , <nl> + txnCommitOutSuccess , txnCommitErrors ; <nl> + Counter txnConflicts ; <nl> + Counter commitBatchIn , commitBatchOut ; <nl> + Counter mutationBytes ; <nl> + Counter mutations ; <nl> + Counter conflictRanges ; <nl> + Counter keyServerLocationIn , keyServerLocationOut , keyServerLocationErrors ; <nl> + Version lastCommitVersionAssigned ; <nl> + <nl> + LatencySample commitLatencySample ; <nl> + LatencyBands commitLatencyBands ; <nl> + <nl> + Future < Void > logger ; <nl> + <nl> + int recentRequests ; <nl> + Deque < int > requestBuckets ; <nl> + double lastBucketBegin ; <nl> + double bucketInterval ; <nl> + <nl> + void updateRequestBuckets ( ) { <nl> + while ( now ( ) - lastBucketBegin > bucketInterval ) { <nl> + lastBucketBegin + = bucketInterval ; <nl> + recentRequests - = requestBuckets . front ( ) ; <nl> + requestBuckets . pop_front ( ) ; <nl> + requestBuckets . push_back ( 0 ) ; <nl> + } <nl> + } <nl> + <nl> + void addRequest ( ) { <nl> + updateRequestBuckets ( ) ; <nl> + + + recentRequests ; <nl> + + + requestBuckets . back ( ) ; <nl> + } <nl> + <nl> + int getRecentRequests ( ) { <nl> + updateRequestBuckets ( ) ; <nl> + return recentRequests * FLOW_KNOBS - > BASIC_LOAD_BALANCE_UPDATE_RATE / <nl> + ( FLOW_KNOBS - > BASIC_LOAD_BALANCE_UPDATE_RATE - ( lastBucketBegin + bucketInterval - now ( ) ) ) ; <nl> + } <nl> + <nl> + explicit ProxyStats ( UID id , Version * pVersion , NotifiedVersion * pCommittedVersion , <nl> + int64_t * commitBatchesMemBytesCountPtr ) <nl> + : cc ( " ProxyStats " , id . toString ( ) ) , recentRequests ( 0 ) , lastBucketBegin ( now ( ) ) , <nl> + bucketInterval ( FLOW_KNOBS - > BASIC_LOAD_BALANCE_UPDATE_RATE / FLOW_KNOBS - > BASIC_LOAD_BALANCE_BUCKETS ) , <nl> + txnCommitIn ( " TxnCommitIn " , cc ) , <nl> + txnCommitVersionAssigned ( " TxnCommitVersionAssigned " , cc ) , txnCommitResolving ( " TxnCommitResolving " , cc ) , <nl> + txnCommitResolved ( " TxnCommitResolved " , cc ) , txnCommitOut ( " TxnCommitOut " , cc ) , <nl> + txnCommitOutSuccess ( " TxnCommitOutSuccess " , cc ) , txnCommitErrors ( " TxnCommitErrors " , cc ) , <nl> + txnConflicts ( " TxnConflicts " , cc ) , commitBatchIn ( " CommitBatchIn " , cc ) , <nl> + commitBatchOut ( " CommitBatchOut " , cc ) , mutationBytes ( " MutationBytes " , cc ) , mutations ( " Mutations " , cc ) , <nl> + conflictRanges ( " ConflictRanges " , cc ) , keyServerLocationIn ( " KeyServerLocationIn " , cc ) , <nl> + keyServerLocationOut ( " KeyServerLocationOut " , cc ) , keyServerLocationErrors ( " KeyServerLocationErrors " , cc ) , <nl> + lastCommitVersionAssigned ( 0 ) , <nl> + commitLatencySample ( " CommitLatencyMetrics " , id , SERVER_KNOBS - > LATENCY_METRICS_LOGGING_INTERVAL , <nl> + SERVER_KNOBS - > LATENCY_SAMPLE_SIZE ) , <nl> + commitLatencyBands ( " CommitLatencyMetrics " , id , SERVER_KNOBS - > STORAGE_LOGGING_DELAY ) { <nl> + specialCounter ( cc , " LastAssignedCommitVersion " , [ this ] ( ) { return this - > lastCommitVersionAssigned ; } ) ; <nl> + specialCounter ( cc , " Version " , [ pVersion ] ( ) { return * pVersion ; } ) ; <nl> + specialCounter ( cc , " CommittedVersion " , [ pCommittedVersion ] ( ) { return pCommittedVersion - > get ( ) ; } ) ; <nl> + specialCounter ( cc , " CommitBatchesMemBytesCount " , <nl> + [ commitBatchesMemBytesCountPtr ] ( ) { return * commitBatchesMemBytesCountPtr ; } ) ; <nl> + logger = traceCounters ( " ProxyMetrics " , id , SERVER_KNOBS - > WORKER_LOGGING_INTERVAL , & cc , " ProxyMetrics " ) ; <nl> + for ( int i = 0 ; i < FLOW_KNOBS - > BASIC_LOAD_BALANCE_BUCKETS ; i + + ) { <nl> + requestBuckets . push_back ( 0 ) ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + struct ProxyCommitData { <nl> + UID dbgid ; <nl> + int64_t commitBatchesMemBytesCount ; <nl> + ProxyStats stats ; <nl> + MasterInterface master ; <nl> + vector < ResolverInterface > resolvers ; <nl> + LogSystemDiskQueueAdapter * logAdapter ; <nl> + Reference < ILogSystem > logSystem ; <nl> + IKeyValueStore * txnStateStore ; <nl> + NotifiedVersion committedVersion ; / / Provided that this recovery has succeeded or will succeed , this version is <nl> + / / fully committed ( durable ) <nl> + Version minKnownCommittedVersion ; / / No version smaller than this one will be used as the known committed version <nl> + / / during recovery <nl> + Version version ; / / The version at which txnStateStore is up to date <nl> + Promise < Void > validState ; / / Set once txnStateStore and version are valid <nl> + double lastVersionTime ; <nl> + KeyRangeMap < std : : set < Key > > vecBackupKeys ; <nl> + uint64_t commitVersionRequestNumber ; <nl> + uint64_t mostRecentProcessedRequestNumber ; <nl> + KeyRangeMap < Deque < std : : pair < Version , int > > > keyResolvers ; <nl> + KeyRangeMap < ServerCacheInfo > keyInfo ; <nl> + KeyRangeMap < bool > cacheInfo ; <nl> + std : : map < Key , ApplyMutationsData > uid_applyMutationsData ; <nl> + bool firstProxy ; <nl> + double lastCoalesceTime ; <nl> + bool locked ; <nl> + Optional < Value > metadataVersion ; <nl> + double commitBatchInterval ; <nl> + <nl> + int64_t localCommitBatchesStarted ; <nl> + NotifiedVersion latestLocalCommitBatchResolving ; <nl> + NotifiedVersion latestLocalCommitBatchLogging ; <nl> + <nl> + RequestStream < GetReadVersionRequest > getConsistentReadVersion ; <nl> + RequestStream < CommitTransactionRequest > commit ; <nl> + Database cx ; <nl> + Reference < AsyncVar < ServerDBInfo > > db ; <nl> + EventMetricHandle < SingleKeyMutation > singleKeyMutationEvent ; <nl> + <nl> + std : : map < UID , Reference < StorageInfo > > storageCache ; <nl> + std : : map < Tag , Version > tag_popped ; <nl> + Deque < std : : pair < Version , Version > > txsPopVersions ; <nl> + Version lastTxsPop ; <nl> + bool popRemoteTxs ; <nl> + vector < Standalone < StringRef > > whitelistedBinPathVec ; <nl> + <nl> + Optional < LatencyBandConfig > latencyBandConfig ; <nl> + double lastStartCommit ; <nl> + double lastCommitLatency ; <nl> + int updateCommitRequests = 0 ; <nl> + NotifiedDouble lastCommitTime ; <nl> + <nl> + vector < double > commitComputePerOperation ; <nl> + TransactionTagMap < TransactionCommitCostEstimation > transactionTagCommitCostEst ; <nl> + <nl> + / / The tag related to a storage server rarely change , so we keep a vector of tags for each key range to be slightly <nl> + / / more CPU efficient . When a tag related to a storage server does change , we empty out all of these vectors to <nl> + / / signify they must be repopulated . We do not repopulate them immediately to avoid a slow task . <nl> + const vector < Tag > & tagsForKey ( StringRef key ) { <nl> + auto & tags = keyInfo [ key ] . tags ; <nl> + if ( ! tags . size ( ) ) { <nl> + auto & r = keyInfo . rangeContaining ( key ) . value ( ) ; <nl> + for ( auto info : r . src_info ) { <nl> + r . tags . push_back ( info - > tag ) ; <nl> + } <nl> + for ( auto info : r . dest_info ) { <nl> + r . tags . push_back ( info - > tag ) ; <nl> + } <nl> + uniquify ( r . tags ) ; <nl> + return r . tags ; <nl> + } <nl> + return tags ; <nl> + } <nl> + <nl> + bool needsCacheTag ( KeyRangeRef range ) { <nl> + auto ranges = cacheInfo . intersectingRanges ( range ) ; <nl> + for ( auto r : ranges ) { <nl> + if ( r . value ( ) ) { <nl> + return true ; <nl> + } <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + void updateLatencyBandConfig ( Optional < LatencyBandConfig > newLatencyBandConfig ) { <nl> + if ( newLatencyBandConfig . present ( ) ! = latencyBandConfig . present ( ) | | <nl> + ( newLatencyBandConfig . present ( ) & & <nl> + newLatencyBandConfig . get ( ) . commitConfig ! = latencyBandConfig . get ( ) . commitConfig ) ) { <nl> + TraceEvent ( " LatencyBandCommitUpdatingConfig " ) . detail ( " Present " , newLatencyBandConfig . present ( ) ) ; <nl> + stats . commitLatencyBands . clearBands ( ) ; <nl> + if ( newLatencyBandConfig . present ( ) ) { <nl> + for ( auto band : newLatencyBandConfig . get ( ) . commitConfig . bands ) { <nl> + stats . commitLatencyBands . addThreshold ( band ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + latencyBandConfig = newLatencyBandConfig ; <nl> + } <nl> + <nl> + ProxyCommitData ( UID dbgid , MasterInterface master , RequestStream < GetReadVersionRequest > getConsistentReadVersion , <nl> + Version recoveryTransactionVersion , RequestStream < CommitTransactionRequest > commit , <nl> + Reference < AsyncVar < ServerDBInfo > > db , bool firstProxy ) <nl> + : dbgid ( dbgid ) , stats ( dbgid , & version , & committedVersion , & commitBatchesMemBytesCount ) , master ( master ) , <nl> + logAdapter ( NULL ) , txnStateStore ( NULL ) , popRemoteTxs ( false ) , committedVersion ( recoveryTransactionVersion ) , <nl> + version ( 0 ) , minKnownCommittedVersion ( 0 ) , lastVersionTime ( 0 ) , commitVersionRequestNumber ( 1 ) , <nl> + mostRecentProcessedRequestNumber ( 0 ) , getConsistentReadVersion ( getConsistentReadVersion ) , commit ( commit ) , <nl> + lastCoalesceTime ( 0 ) , localCommitBatchesStarted ( 0 ) , locked ( false ) , <nl> + commitBatchInterval ( SERVER_KNOBS - > COMMIT_TRANSACTION_BATCH_INTERVAL_MIN ) , firstProxy ( firstProxy ) , <nl> + cx ( openDBOnServer ( db , TaskPriority : : DefaultEndpoint , true , true ) ) , db ( db ) , <nl> + singleKeyMutationEvent ( LiteralStringRef ( " SingleKeyMutation " ) ) , commitBatchesMemBytesCount ( 0 ) , lastTxsPop ( 0 ) , <nl> + lastStartCommit ( 0 ) , lastCommitLatency ( SERVER_KNOBS - > REQUIRED_MIN_RECOVERY_DURATION ) , lastCommitTime ( 0 ) { <nl> + commitComputePerOperation . resize ( SERVER_KNOBS - > PROXY_COMPUTE_BUCKETS , 0 . 0 ) ; <nl> + } <nl> + } ; <nl> + <nl> + # include " flow / unactorcompiler . h " <nl> + # endif / / FDBSERVER_PROXYCOMMITDATA_H <nl> \ No newline at end of file <nl> mmm a / fdbserver / RestoreApplier . actor . cpp <nl> ppp b / fdbserver / RestoreApplier . actor . cpp <nl> <nl> # include " fdbserver / RestoreApplier . actor . h " <nl> <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> + # include " flow / network . h " <nl> <nl> ACTOR static Future < Void > handleSendMutationVectorRequest ( RestoreSendVersionedMutationsRequest req , <nl> Reference < RestoreApplierData > self ) ; <nl> ACTOR Future < Void > restoreApplierCore ( RestoreApplierInterface applierInterf , int <nl> return Void ( ) ; <nl> } <nl> <nl> - / / The actor may be invovked multiple times and executed async . <nl> + / / The actor may be invoked multiple times and executed async . <nl> / / No race condition as long as we do not wait or yield when operate the shared <nl> / / data . Multiple such actors can run on different fileIDs . <nl> / / Different files may contain mutations of the same commit versions , but with <nl> ACTOR static Future < Void > handleSendMutationVectorRequest ( RestoreSendVersionedMu <nl> . detail ( " Version " , versionedMutation . version . toString ( ) ) <nl> . detail ( " Index " , mIndex ) <nl> . detail ( " MutationReceived " , versionedMutation . mutation . toString ( ) ) ; <nl> + batchData - > receivedBytes + = versionedMutation . mutation . totalSize ( ) ; <nl> batchData - > counters . receivedBytes + = versionedMutation . mutation . totalSize ( ) ; <nl> batchData - > counters . receivedWeightedBytes + = <nl> versionedMutation . mutation . weightedTotalSize ( ) ; / / atomicOp will be amplified <nl> ACTOR static Future < Void > applyClearRangeMutations ( Standalone < VectorRef < KeyRange <nl> . detail ( " BatchIndex " , batchIndex ) <nl> . detail ( " Ranges " , ranges . size ( ) ) <nl> . detail ( " DelayTime " , delayTime ) ; <nl> + if ( SERVER_KNOBS - > FASTRESTORE_NOT_WRITE_DB ) { <nl> + TraceEvent ( " FastRestoreApplierClearRangeMutationsNotWriteDB " , applierID ) <nl> + . detail ( " BatchIndex " , batchIndex ) <nl> + . detail ( " Ranges " , ranges . size ( ) ) ; <nl> + ASSERT ( ! g_network - > isSimulated ( ) ) ; <nl> + return Void ( ) ; <nl> + } <nl> + <nl> loop { <nl> try { <nl> tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; <nl> ACTOR static Future < Void > getAndComputeStagingKeys ( <nl> state UID randomID = deterministicRandom ( ) - > randomUniqueID ( ) ; <nl> <nl> wait ( delay ( delayTime + deterministicRandom ( ) - > random01 ( ) * delayTime ) ) ; <nl> + <nl> + if ( SERVER_KNOBS - > FASTRESTORE_NOT_WRITE_DB ) { / / Get dummy value to short - circut DB <nl> + TraceEvent ( " FastRestoreApplierGetAndComputeStagingKeysStartNotUseDB " , applierID ) <nl> + . detail ( " RandomUID " , randomID ) <nl> + . detail ( " BatchIndex " , batchIndex ) <nl> + . detail ( " GetKeys " , incompleteStagingKeys . size ( ) ) <nl> + . detail ( " DelayTime " , delayTime ) ; <nl> + ASSERT ( ! g_network - > isSimulated ( ) ) ; <nl> + int i = 0 ; <nl> + for ( auto & key : incompleteStagingKeys ) { <nl> + MutationRef m ( MutationRef : : SetValue , key . first , LiteralStringRef ( " 0 " ) ) ; <nl> + key . second - > second . add ( m , LogMessageVersion ( 1 ) ) ; <nl> + key . second - > second . precomputeResult ( " GetAndComputeStagingKeys " , applierID , batchIndex ) ; <nl> + i + + ; <nl> + } <nl> + return Void ( ) ; <nl> + } <nl> + <nl> TraceEvent ( " FastRestoreApplierGetAndComputeStagingKeysStart " , applierID ) <nl> . detail ( " RandomUID " , randomID ) <nl> . detail ( " BatchIndex " , batchIndex ) <nl> ACTOR static Future < Void > applyStagingKeysBatch ( std : : map < Key , StagingKey > : : itera <nl> std : : map < Key , StagingKey > : : iterator end , Database cx , <nl> FlowLock * applyStagingKeysBatchLock , UID applierID , <nl> ApplierBatchData : : Counters * cc ) { <nl> + if ( SERVER_KNOBS - > FASTRESTORE_NOT_WRITE_DB ) { <nl> + TraceEvent ( " FastRestoreApplierPhaseApplyStagingKeysBatchSkipped " , applierID ) . detail ( " Begin " , begin - > first ) ; <nl> + ASSERT ( ! g_network - > isSimulated ( ) ) ; <nl> + return Void ( ) ; <nl> + } <nl> wait ( applyStagingKeysBatchLock - > take ( TaskPriority : : RestoreApplierWriteDB ) ) ; / / Q : Do we really need the lock ? <nl> state FlowLock : : Releaser releaser ( * applyStagingKeysBatchLock ) ; <nl> state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ; <nl> ACTOR static Future < Void > applyStagingKeys ( Reference < ApplierBatchData > batchData <nl> fBatches . push_back ( applyStagingKeysBatch ( begin , cur , cx , & batchData - > applyStagingKeysBatchLock , applierID , <nl> & batchData - > counters ) ) ; <nl> batchData - > counters . appliedBytes + = txnSize ; <nl> + batchData - > appliedBytes + = txnSize ; <nl> begin = cur ; <nl> txnSize = 0 ; <nl> txnBatches + + ; <nl> ACTOR static Future < Void > applyStagingKeys ( Reference < ApplierBatchData > batchData <nl> fBatches . push_back ( applyStagingKeysBatch ( begin , cur , cx , & batchData - > applyStagingKeysBatchLock , applierID , <nl> & batchData - > counters ) ) ; <nl> batchData - > counters . appliedBytes + = txnSize ; <nl> + batchData - > appliedBytes + = txnSize ; <nl> txnBatches + + ; <nl> } <nl> <nl> mmm a / fdbserver / RestoreApplier . actor . h <nl> ppp b / fdbserver / RestoreApplier . actor . h <nl> struct ApplierBatchData : public ReferenceCounted < ApplierBatchData > { <nl> <nl> long receiveMutationReqs ; <nl> <nl> + / / Stats <nl> + double receivedBytes ; <nl> + double appliedBytes ; <nl> + <nl> / / Status counters <nl> struct Counters { <nl> CounterCollection cc ; <nl> struct RestoreApplierData : RestoreRoleData , public ReferenceCounted < RestoreAppl <nl> / / even when no version batch has been started . <nl> int getVersionBatchState ( int batchIndex ) final { <nl> std : : map < int , Reference < ApplierBatchData > > : : iterator item = batch . find ( batchIndex ) ; <nl> - if ( item = = batch . end ( ) ) { / / Simply caller ' s effort in when it can call this func . <nl> + if ( item = = batch . end ( ) ) { / / Batch has not been initialized when we blindly profile the state <nl> return ApplierVersionBatchState : : INVALID ; <nl> } else { <nl> return item - > second - > vbState . get ( ) ; <nl> mmm a / fdbserver / RestoreController . actor . cpp <nl> ppp b / fdbserver / RestoreController . actor . cpp <nl> <nl> # include " fdbserver / RestoreApplier . actor . h " <nl> # include " fdbserver / RestoreLoader . actor . h " <nl> <nl> + # include " flow / Platform . h " <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> ACTOR static Future < Void > clearDB ( Database cx ) ; <nl> void splitKeyRangeForAppliers ( Reference < ControllerBatchData > batchData , <nl> ASSERT ( batchData - > samplesSize > = 0 ) ; <nl> / / Sanity check : samples should not be used after freed <nl> ASSERT ( ( batchData - > samplesSize > 0 & & ! batchData - > samples . empty ( ) ) | | <nl> - batchData - > samplesSize = = 0 & & batchData - > samples . empty ( ) ) ; <nl> + ( batchData - > samplesSize = = 0 & & batchData - > samples . empty ( ) ) ) ; <nl> int numAppliers = appliersInterf . size ( ) ; <nl> double slotSize = std : : max ( batchData - > samplesSize / numAppliers , 1 . 0 ) ; <nl> double cumulativeSize = slotSize ; <nl> ACTOR static Future < Version > collectBackupFiles ( Reference < IBackupContainer > bc , <nl> <nl> TraceEvent ( " FastRestoreControllerPhaseCollectBackupFilesStart " ) <nl> . detail ( " TargetVersion " , request . targetVersion ) <nl> - . detail ( " BackupDesc " , desc . toString ( ) ) ; <nl> + . detail ( " BackupDesc " , desc . toString ( ) ) <nl> + . detail ( " UseRangeFile " , SERVER_KNOBS - > FASTRESTORE_USE_RANGE_FILE ) <nl> + . detail ( " UseLogFile " , SERVER_KNOBS - > FASTRESTORE_USE_LOG_FILE ) ; <nl> if ( g_network - > isSimulated ( ) ) { <nl> std : : cout < < " Restore to version : " < < request . targetVersion < < " \ nBackupDesc : \ n " < < desc . toString ( ) < < " \ n \ n " ; <nl> } <nl> ACTOR static Future < Version > collectBackupFiles ( Reference < IBackupContainer > bc , <nl> <nl> std : : set < RestoreFileFR > uniqueRangeFiles ; <nl> std : : set < RestoreFileFR > uniqueLogFiles ; <nl> + double rangeSize = 0 ; <nl> + double logSize = 0 ; <nl> * minRangeVersion = MAX_VERSION ; <nl> - for ( const RangeFile & f : restorable . get ( ) . ranges ) { <nl> - TraceEvent ( SevFRDebugInfo , " FastRestoreControllerPhaseCollectBackupFiles " ) . detail ( " RangeFile " , f . toString ( ) ) ; <nl> - if ( f . fileSize < = 0 ) { <nl> - continue ; <nl> + if ( SERVER_KNOBS - > FASTRESTORE_USE_RANGE_FILE ) { <nl> + for ( const RangeFile & f : restorable . get ( ) . ranges ) { <nl> + TraceEvent ( SevFRDebugInfo , " FastRestoreControllerPhaseCollectBackupFiles " ) <nl> + . detail ( " RangeFile " , f . toString ( ) ) ; <nl> + if ( f . fileSize < = 0 ) { <nl> + continue ; <nl> + } <nl> + RestoreFileFR file ( f ) ; <nl> + TraceEvent ( SevFRDebugInfo , " FastRestoreControllerPhaseCollectBackupFiles " ) <nl> + . detail ( " RangeFileFR " , file . toString ( ) ) ; <nl> + uniqueRangeFiles . insert ( file ) ; <nl> + rangeSize + = file . fileSize ; <nl> + * minRangeVersion = std : : min ( * minRangeVersion , file . version ) ; <nl> } <nl> - RestoreFileFR file ( f ) ; <nl> - TraceEvent ( SevFRDebugInfo , " FastRestoreControllerPhaseCollectBackupFiles " ) <nl> - . detail ( " RangeFileFR " , file . toString ( ) ) ; <nl> - uniqueRangeFiles . insert ( file ) ; <nl> - * minRangeVersion = std : : min ( * minRangeVersion , file . version ) ; <nl> } <nl> - for ( const LogFile & f : restorable . get ( ) . logs ) { <nl> - TraceEvent ( SevFRDebugInfo , " FastRestoreControllerPhaseCollectBackupFiles " ) . detail ( " LogFile " , f . toString ( ) ) ; <nl> - if ( f . fileSize < = 0 ) { <nl> - continue ; <nl> + <nl> + if ( SERVER_KNOBS - > FASTRESTORE_USE_LOG_FILE ) { <nl> + for ( const LogFile & f : restorable . get ( ) . logs ) { <nl> + TraceEvent ( SevFRDebugInfo , " FastRestoreControllerPhaseCollectBackupFiles " ) . detail ( " LogFile " , f . toString ( ) ) ; <nl> + if ( f . fileSize < = 0 ) { <nl> + continue ; <nl> + } <nl> + RestoreFileFR file ( f ) ; <nl> + TraceEvent ( SevFRDebugInfo , " FastRestoreControllerPhaseCollectBackupFiles " ) <nl> + . detail ( " LogFileFR " , file . toString ( ) ) ; <nl> + logFiles - > push_back ( file ) ; <nl> + uniqueLogFiles . insert ( file ) ; <nl> + logSize + = file . fileSize ; <nl> } <nl> - RestoreFileFR file ( f ) ; <nl> - TraceEvent ( SevFRDebugInfo , " FastRestoreControllerPhaseCollectBackupFiles " ) . detail ( " LogFileFR " , file . toString ( ) ) ; <nl> - logFiles - > push_back ( file ) ; <nl> - uniqueLogFiles . insert ( file ) ; <nl> } <nl> + <nl> / / Assign unique range files and log files to output <nl> rangeFiles - > assign ( uniqueRangeFiles . begin ( ) , uniqueRangeFiles . end ( ) ) ; <nl> logFiles - > assign ( uniqueLogFiles . begin ( ) , uniqueLogFiles . end ( ) ) ; <nl> ACTOR static Future < Version > collectBackupFiles ( Reference < IBackupContainer > bc , <nl> TraceEvent ( " FastRestoreControllerPhaseCollectBackupFilesDone " ) <nl> . detail ( " BackupDesc " , desc . toString ( ) ) <nl> . detail ( " RangeFiles " , rangeFiles - > size ( ) ) <nl> - . detail ( " LogFiles " , logFiles - > size ( ) ) ; <nl> + . detail ( " LogFiles " , logFiles - > size ( ) ) <nl> + . detail ( " RangeFileBytes " , rangeSize ) <nl> + . detail ( " LogFileBytes " , logSize ) <nl> + . detail ( " UseRangeFile " , SERVER_KNOBS - > FASTRESTORE_USE_RANGE_FILE ) <nl> + . detail ( " UseLogFile " , SERVER_KNOBS - > FASTRESTORE_USE_LOG_FILE ) ; <nl> return request . targetVersion ; <nl> } <nl> <nl> ACTOR static Future < Void > initializeVersionBatch ( std : : map < UID , RestoreApplierInt <nl> } <nl> wait ( sendBatchRequests ( & RestoreLoaderInterface : : initVersionBatch , loadersInterf , requestsToLoaders ) ) ; <nl> <nl> - TraceEvent ( " FastRestoreControllerPhaseInitVersionBatchForLoadersDone " ) . detail ( " BatchIndex " , batchIndex ) ; <nl> + TraceEvent ( " FastRestoreControllerPhaseInitVersionBatchForAppliersDone " ) . detail ( " BatchIndex " , batchIndex ) ; <nl> return Void ( ) ; <nl> } <nl> <nl> mmm a / fdbserver / RestoreController . actor . h <nl> ppp b / fdbserver / RestoreController . actor . h <nl> struct RestoreControllerData : RestoreRoleData , public ReferenceCounted < RestoreC <nl> <nl> void dumpVersionBatches ( const std : : map < Version , VersionBatch > & versionBatches ) { <nl> int i = 1 ; <nl> + double rangeFiles = 0 ; <nl> + double rangeSize = 0 ; <nl> + double logFiles = 0 ; <nl> + double logSize = 0 ; <nl> for ( auto & vb : versionBatches ) { <nl> TraceEvent ( " FastRestoreVersionBatches " ) <nl> . detail ( " BatchIndex " , vb . second . batchIndex ) <nl> struct RestoreControllerData : RestoreRoleData , public ReferenceCounted < RestoreC <nl> TraceEvent ( invalidVersion ? SevError : SevInfo , " FastRestoreVersionBatches " ) <nl> . detail ( " BatchIndex " , i ) <nl> . detail ( " RangeFile " , f . toString ( ) ) ; <nl> + rangeSize + = f . fileSize ; <nl> + rangeFiles + + ; <nl> } <nl> for ( auto & f : vb . second . logFiles ) { <nl> bool outOfRange = ( f . beginVersion > = vb . second . endVersion | | f . endVersion < = vb . second . beginVersion ) ; <nl> TraceEvent ( outOfRange ? SevError : SevInfo , " FastRestoreVersionBatches " ) <nl> . detail ( " BatchIndex " , i ) <nl> . detail ( " LogFile " , f . toString ( ) ) ; <nl> + logSize + = f . fileSize ; <nl> + logFiles + + ; <nl> } <nl> + + i ; <nl> } <nl> + <nl> + TraceEvent ( " FastRestoreVersionBatchesSummary " ) <nl> + . detail ( " LogFiles " , logFiles ) <nl> + . detail ( " RangeFiles " , rangeFiles ) <nl> + . detail ( " LogBytes " , logSize ) <nl> + . detail ( " RangeBytes " , rangeSize ) ; <nl> } <nl> <nl> / / Input : Get the size of data in backup files in version range [ prevVersion , nextVersion ) <nl> struct RestoreControllerData : RestoreRoleData , public ReferenceCounted < RestoreC <nl> if ( bcUrl = = url & & bc . isValid ( ) ) { <nl> return ; <nl> } <nl> - printf ( " initBackupContainer , url : % s \ n " , url . toString ( ) . c_str ( ) ) ; <nl> + TraceEvent ( " FastRestoreControllerInitBackupContainer " ) . detail ( " URL " , url ) ; <nl> bcUrl = url ; <nl> bc = IBackupContainer : : openContainer ( url . toString ( ) ) ; <nl> } <nl> mmm a / fdbserver / RestoreLoader . actor . cpp <nl> ppp b / fdbserver / RestoreLoader . actor . cpp <nl> void _parseSerializedMutation ( KeyRangeMap < Version > * pRangeVersions , <nl> } <nl> } <nl> <nl> - cc - > sampledLogBytes + = mutation . totalSize ( ) ; <nl> + cc - > loadedLogBytes + = mutation . totalSize ( ) ; <nl> <nl> TraceEvent ( SevFRMutationInfo , " FastRestoreDecodeLogFile " ) <nl> . detail ( " CommitVersion " , commitVersion ) <nl> void _parseSerializedMutation ( KeyRangeMap < Version > * pRangeVersions , <nl> <nl> / / Sampling ( FASTRESTORE_SAMPLING_PERCENT % ) data <nl> if ( deterministicRandom ( ) - > random01 ( ) * 100 < SERVER_KNOBS - > FASTRESTORE_SAMPLING_PERCENT ) { <nl> + cc - > sampledLogBytes + = mutation . totalSize ( ) ; <nl> samples . push_back_deep ( samples . arena ( ) , mutation ) ; <nl> } <nl> ASSERT_WE_THINK ( kLen > = 0 & & kLen < val . size ( ) ) ; <nl> ACTOR static Future < Void > _parseLogFileToMutationsOnLoader ( NotifiedVersion * pPro <nl> <nl> if ( pProcessedFileOffset - > get ( ) = = asset . offset ) { <nl> for ( const KeyValueRef & kv : data ) { <nl> - / / Concatenate the backuped param1 and param2 ( KV ) at the same version . <nl> + / / Concatenate the backup param1 and param2 ( KV ) at the same version . <nl> concatenateBackupMutationForLogFile ( pMutationMap , kv . key , kv . value , asset ) ; <nl> } <nl> pProcessedFileOffset - > set ( asset . offset + asset . len ) ; <nl> mmm a / fdbserver / RestoreLoader . actor . h <nl> ppp b / fdbserver / RestoreLoader . actor . h <nl> struct RestoreLoaderData : RestoreRoleData , public ReferenceCounted < RestoreLoade <nl> <nl> int getVersionBatchState ( int batchIndex ) final { <nl> std : : map < int , Reference < LoaderBatchData > > : : iterator item = batch . find ( batchIndex ) ; <nl> - if ( item ! = batch . end ( ) ) { / / Simply caller ' s effort in when it can call this func . <nl> + if ( item = = batch . end ( ) ) { / / Batch has not been initialized when we blindly profile the state <nl> return LoaderVersionBatchState : : INVALID ; <nl> } else { <nl> return item - > second - > vbState . get ( ) ; <nl> mmm a / fdbserver / RestoreRoleCommon . actor . cpp <nl> ppp b / fdbserver / RestoreRoleCommon . actor . cpp <nl> ACTOR Future < Void > traceProcessMetrics ( Reference < RestoreRoleData > self , std : : str <nl> . detail ( " Role " , role ) <nl> . detail ( " PipelinedMaxVersionBatchIndex " , self - > versionBatchId . get ( ) ) <nl> . detail ( " FinishedVersionBatchIndex " , self - > finishedBatch . get ( ) ) <nl> + . detail ( " CurrentVersionBatchPhase " , self - > getVersionBatchState ( self - > finishedBatch . get ( ) + 1 ) ) <nl> . detail ( " CpuUsage " , self - > cpuUsage ) <nl> . detail ( " UsedMemory " , self - > memory ) <nl> . detail ( " ResidentMemory " , self - > residentMemory ) ; <nl> mmm a / fdbserver / StorageCache . actor . cpp <nl> ppp b / fdbserver / StorageCache . actor . cpp <nl> struct CacheRangeInfo : ReferenceCounted < CacheRangeInfo > , NonCopyable { <nl> } <nl> } ; <nl> <nl> - const int VERSION_OVERHEAD = 64 + sizeof ( Version ) + sizeof ( Standalone < VersionUpdateRef > ) + / / mutationLog , 64b overhead for map <nl> - 2 * ( 64 + sizeof ( Version ) + sizeof ( Reference < VersionedMap < KeyRef , <nl> - ValueOrClearToRef > : : PTreeT > ) ) ; / / versioned map [ x2 for createNewVersion ( version + 1 ) ] , 64b overhead for map <nl> + const int VERSION_OVERHEAD = <nl> + 64 + sizeof ( Version ) + sizeof ( Standalone < VerUpdateRef > ) + / / mutationLog , 64b overhead for map <nl> + 2 * ( 64 + sizeof ( Version ) + <nl> + sizeof ( <nl> + Reference < VersionedMap < KeyRef , <nl> + ValueOrClearToRef > : : PTreeT > ) ) ; / / versioned map [ x2 for createNewVersion ( version + 1 ) <nl> + / / ] , 64b overhead for map <nl> static int mvccStorageBytes ( MutationRef const & m ) { return VersionedMap < KeyRef , ValueOrClearToRef > : : overheadPerItem * 2 + ( MutationRef : : OVERHEAD_BYTES + m . param1 . size ( ) + m . param2 . size ( ) ) * 2 ; } <nl> <nl> struct FetchInjectionInfo { <nl> struct StorageCacheData { <nl> VersionedData versionedData ; <nl> / / in - memory mutationLog that the versionedData contains references to <nl> / / TODO change it to a deque , already contains mutations in version order <nl> - std : : map < Version , Standalone < VersionUpdateRef > > mutationLog ; / / versions ( durableVersion , version ] <nl> + std : : map < Version , Standalone < VerUpdateRef > > mutationLog ; / / versions ( durableVersion , version ] <nl> <nl> public : <nl> UID thisServerID ; / / unique id <nl> struct StorageCacheData { <nl> } <nl> <nl> Arena lastArena ; <nl> - std : : map < Version , Standalone < VersionUpdateRef > > const & getMutationLog ( ) const { return mutationLog ; } <nl> - std : : map < Version , Standalone < VersionUpdateRef > > & getMutableMutationLog ( ) { return mutationLog ; } <nl> + std : : map < Version , Standalone < VerUpdateRef > > const & getMutationLog ( ) const { return mutationLog ; } <nl> + std : : map < Version , Standalone < VerUpdateRef > > & getMutableMutationLog ( ) { return mutationLog ; } <nl> VersionedData const & data ( ) const { return versionedData ; } <nl> VersionedData & mutableData ( ) { return versionedData ; } <nl> <nl> - Standalone < VersionUpdateRef > & addVersionToMutationLog ( Version v ) { <nl> + Standalone < VerUpdateRef > & addVersionToMutationLog ( Version v ) { <nl> / / return existing version . . . <nl> auto m = mutationLog . find ( v ) ; <nl> if ( m ! = mutationLog . end ( ) ) <nl> struct StorageCacheData { <nl> return u ; <nl> } <nl> <nl> - MutationRef addMutationToMutationLog ( Standalone < VersionUpdateRef > & mLV , MutationRef const & m ) { <nl> + MutationRef addMutationToMutationLog ( Standalone < VerUpdateRef > & mLV , MutationRef const & m ) { <nl> + / / TODO find out more <nl> + / / byteSampleApplyMutation ( m , mLV . version ) ; <nl> counters . bytesInput + = mvccStorageBytes ( m ) ; <nl> - return mLV . mutations . push_back_deep ( mLV . arena ( ) , m ) ; <nl> + return mLV . push_back_deep ( mLV . arena ( ) , m ) ; <nl> } <nl> - <nl> } ; <nl> void applyMutation ( StorageCacheUpdater * updater , StorageCacheData * data , MutationRef const & mutation , Version version ) ; <nl> <nl> void StorageCacheData : : addMutation ( KeyRangeRef const & cachedKeyRange , Version ve <nl> / / mutableData ( ) . printTree ( version ) ; <nl> } <nl> <nl> - void removeDataRange ( StorageCacheData * sc , Standalone < VersionUpdateRef > & mLV , KeyRangeMap < Reference < CacheRangeInfo > > & cacheRanges , KeyRangeRef range ) { <nl> + void removeDataRange ( StorageCacheData * sc , Standalone < VerUpdateRef > & mLV , KeyRangeMap < Reference < CacheRangeInfo > > & cacheRanges , KeyRangeRef range ) { <nl> / / modify the latest version of data to remove all sets and trim all clears to exclude range . <nl> / / Add a clear to mLV ( mutationLog [ data . getLatestVersion ( ) ] ) that ensures all keys in range are removed from the disk when this latest version becomes durable <nl> / / mLV is also modified if necessary to ensure that split clears can be forgotten <nl> mmm a / fdbserver / TLogInterface . h <nl> ppp b / fdbserver / TLogInterface . h <nl> struct TLogConfirmRunningRequest { <nl> } <nl> } ; <nl> <nl> - struct VersionUpdateRef { <nl> - Version version ; <nl> - MutationListRef mutations ; <nl> - bool isPrivateData ; <nl> - <nl> - VersionUpdateRef ( ) : isPrivateData ( false ) , version ( invalidVersion ) { } <nl> - VersionUpdateRef ( Arena & to , const VersionUpdateRef & from ) : version ( from . version ) , mutations ( to , from . mutations ) , isPrivateData ( from . isPrivateData ) { } <nl> - int totalSize ( ) const { return mutations . totalSize ( ) ; } <nl> - int expectedSize ( ) const { return mutations . expectedSize ( ) ; } <nl> - <nl> - template < class Ar > <nl> - void serialize ( Ar & ar ) { <nl> - serializer ( ar , version , mutations , isPrivateData ) ; <nl> - } <nl> - } ; <nl> - <nl> struct VerUpdateRef { <nl> Version version ; <nl> VectorRef < MutationRef > mutations ; <nl> struct VerUpdateRef { <nl> VerUpdateRef ( Arena & to , const VerUpdateRef & from ) : version ( from . version ) , mutations ( to , from . mutations ) , isPrivateData ( from . isPrivateData ) { } <nl> int expectedSize ( ) const { return mutations . expectedSize ( ) ; } <nl> <nl> + MutationRef push_back_deep ( Arena & arena , const MutationRef & m ) { <nl> + mutations . push_back_deep ( arena , m ) ; <nl> + return mutations . back ( ) ; <nl> + } <nl> + <nl> template < class Ar > <nl> void serialize ( Ar & ar ) { <nl> serializer ( ar , version , mutations , isPrivateData ) ; <nl> mmm a / fdbserver / VersionedBTree . actor . cpp <nl> ppp b / fdbserver / VersionedBTree . actor . cpp <nl> class DWALPager : public IPager2 { <nl> public : <nl> typedef FastAllocatedPage Page ; <nl> typedef FIFOQueue < LogicalPageID > LogicalPageQueueT ; <nl> + typedef std : : map < Version , LogicalPageID > VersionToPageMapT ; <nl> + typedef std : : unordered_map < LogicalPageID , VersionToPageMapT > PageToVersionedMapT ; <nl> <nl> # pragma pack ( push , 1 ) <nl> struct DelayedFreePage { <nl> class DWALPager : public IPager2 { <nl> LogicalPageID originalPageID ; <nl> LogicalPageID newPageID ; <nl> <nl> + bool isFree ( ) const { <nl> + return newPageID = = invalidLogicalPageID ; <nl> + } <nl> + <nl> bool operator < ( const RemappedPage & rhs ) { return version < rhs . version ; } <nl> <nl> std : : string toString ( ) const { <nl> class DWALPager : public IPager2 { <nl> / / If the file already exists , pageSize might be different than desiredPageSize <nl> / / Use pageCacheSizeBytes = = 0 to use default from flow knobs <nl> / / If filename is empty , the pager will exist only in memory and once the cache is full writes will fail . <nl> - DWALPager ( int desiredPageSize , std : : string filename , int64_t pageCacheSizeBytes , bool memoryOnly = false ) <nl> + DWALPager ( int desiredPageSize , std : : string filename , int64_t pageCacheSizeBytes , Version remapCleanupWindow , bool memoryOnly = false ) <nl> : desiredPageSize ( desiredPageSize ) , filename ( filename ) , pHeader ( nullptr ) , pageCacheBytes ( pageCacheSizeBytes ) , <nl> - memoryOnly ( memoryOnly ) { <nl> + memoryOnly ( memoryOnly ) , remapCleanupWindow ( remapCleanupWindow ) { <nl> <nl> if ( ! g_redwoodMetricsActor . isValid ( ) ) { <nl> g_redwoodMetricsActor = redwoodMetricsLogger ( ) ; <nl> } <nl> <nl> - if ( pageCacheBytes = = 0 ) { <nl> - pageCacheBytes = g_network - > isSimulated ( ) <nl> - ? ( BUGGIFY ? FLOW_KNOBS - > BUGGIFY_SIM_PAGE_CACHE_4K : FLOW_KNOBS - > SIM_PAGE_CACHE_4K ) <nl> - : FLOW_KNOBS - > PAGE_CACHE_4K ; <nl> - } <nl> commitFuture = Void ( ) ; <nl> recoverFuture = forwardError ( recover ( this ) , errorPromise ) ; <nl> } <nl> class DWALPager : public IPager2 { <nl> <nl> Standalone < VectorRef < RemappedPage > > remaps = wait ( self - > remapQueue . peekAll ( ) ) ; <nl> for ( auto & r : remaps ) { <nl> - if ( r . newPageID ! = invalidLogicalPageID ) { <nl> - self - > remappedPages [ r . originalPageID ] [ r . version ] = r . newPageID ; <nl> - } <nl> + self - > remappedPages [ r . originalPageID ] [ r . version ] = r . newPageID ; <nl> } <nl> <nl> / / If the header was recovered from the backup at Page 1 then write and sync it to Page 0 before continuing . <nl> class DWALPager : public IPager2 { <nl> void freePage ( LogicalPageID pageID , Version v ) override { <nl> / / If pageID has been remapped , then it can ' t be freed until all existing remaps for that page have been undone , <nl> / / so queue it for later deletion <nl> - if ( remappedPages . find ( pageID ) ! = remappedPages . end ( ) ) { <nl> + auto i = remappedPages . find ( pageID ) ; <nl> + if ( i ! = remappedPages . end ( ) ) { <nl> debug_printf ( " DWALPager ( % s ) op = freeRemapped % s @ % " PRId64 " oldestVersion = % " PRId64 " \ n " , filename . c_str ( ) , <nl> toString ( pageID ) . c_str ( ) , v , pLastCommittedHeader - > oldestVersion ) ; <nl> remapQueue . pushBack ( RemappedPage { v , pageID , invalidLogicalPageID } ) ; <nl> + i - > second [ v ] = invalidLogicalPageID ; <nl> return ; <nl> } <nl> <nl> class DWALPager : public IPager2 { <nl> debug_printf ( " DWALPager ( % s ) read % s @ % " PRId64 " - > % s \ n " , filename . c_str ( ) , toString ( pageID ) . c_str ( ) , <nl> v , toString ( j - > second ) . c_str ( ) ) ; <nl> pageID = j - > second ; <nl> + ASSERT ( pageID ! = invalidLogicalPageID ) ; <nl> } <nl> } else { <nl> debug_printf ( " DWALPager ( % s ) read % s @ % " PRId64 " ( not remapped ) \ n " , filename . c_str ( ) , <nl> class DWALPager : public IPager2 { <nl> return std : : min ( pLastCommittedHeader - > oldestVersion , snapshots . front ( ) . version ) ; <nl> } <nl> <nl> - ACTOR static Future < Void > remapCopyAndFree ( DWALPager * self , RemappedPage m ) { <nl> - debug_printf ( " DWALPager ( % s ) remapCleanup copyAndFree % s \ n " , self - > filename . c_str ( ) , m . toString ( ) . c_str ( ) ) ; <nl> + ACTOR static Future < Void > remapCopyAndFree ( DWALPager * self , RemappedPage p , VersionToPageMapT * m , VersionToPageMapT : : iterator i ) { <nl> + debug_printf ( " DWALPager ( % s ) remapCleanup copyAndFree % s \ n " , self - > filename . c_str ( ) , p . toString ( ) . c_str ( ) ) ; <nl> <nl> / / Read the data from the page that the original was mapped to <nl> - Reference < IPage > data = wait ( self - > readPage ( m . newPageID , false ) ) ; <nl> + Reference < IPage > data = wait ( self - > readPage ( p . newPageID , false ) ) ; <nl> <nl> / / Write the data to the original page so it can be read using its original pageID <nl> - self - > updatePage ( m . originalPageID , data ) ; <nl> + self - > updatePage ( p . originalPageID , data ) ; <nl> + + g_redwoodMetrics . pagerRemapCopy ; <nl> <nl> - / / Remove all remaps for the original page ID up through version <nl> - auto i = self - > remappedPages . find ( m . originalPageID ) ; <nl> - i - > second . erase ( i - > second . begin ( ) , i - > second . upper_bound ( m . version ) ) ; <nl> - / / If the version map for this page is now empty , erase it <nl> - if ( i - > second . empty ( ) ) { <nl> - self - > remappedPages . erase ( i ) ; <nl> - } <nl> - <nl> - / / Now that the remap has been undone nothing will read this page so it can be freed as of the next <nl> - / / commit . <nl> - self - > freeUnmappedPage ( m . newPageID , 0 ) ; <nl> + / / Now that the page data has been copied to the original page , the versioned page map entry is no longer <nl> + / / needed and the new page ID can be freed as of the next commit . <nl> + m - > erase ( i ) ; <nl> + self - > freeUnmappedPage ( p . newPageID , 0 ) ; <nl> + + g_redwoodMetrics . pagerRemapFree ; <nl> <nl> return Void ( ) ; <nl> } <nl> <nl> - ACTOR static Future < Version > getRemapLag ( DWALPager * self ) { <nl> - Optional < RemappedPage > head = wait ( self - > remapQueue . peek ( ) ) ; <nl> - if ( head . present ( ) ) { <nl> - return self - > effectiveOldestVersion ( ) - head . get ( ) . version ; <nl> - } <nl> - return 0 ; <nl> - } <nl> - <nl> ACTOR static Future < Void > remapCleanup ( DWALPager * self ) { <nl> + state ActorCollection copies ( true ) ; <nl> + state Promise < Void > signal ; <nl> + copies . add ( signal . getFuture ( ) ) ; <nl> + <nl> self - > remapCleanupStop = false ; <nl> <nl> + / / The oldest retained version cannot change during the cleanup run as this would allow multiple read / copy <nl> + / / operations with the same original page ID destination to be started and they could complete out of order . <nl> + state Version oldestRetainedVersion = self - > effectiveOldestVersion ( ) ; <nl> + <nl> / / Cutoff is the version we can pop to <nl> state RemappedPage cutoff ; <nl> - cutoff . version = self - > effectiveOldestVersion ( ) ; <nl> - <nl> - / / Each page is only updated at most once per version , so in order to coalesce multiple updates <nl> - / / to the same page and skip some page writes we have to accumulate multiple versions worth of <nl> - / / poppable entries . <nl> - Version lag = wait ( getRemapLag ( self ) ) ; <nl> - debug_printf ( " DWALPager ( % s ) remapCleanup versionLag = % " PRId64 " \ n " , self - > filename . c_str ( ) , lag ) ; <nl> - if ( lag < SERVER_KNOBS - > REDWOOD_REMAP_CLEANUP_VERSION_LAG_MIN ) { <nl> - debug_printf ( " DWALPager ( % s ) not starting , lag too low \ n " , self - > filename . c_str ( ) ) ; <nl> - return Void ( ) ; <nl> - } <nl> + cutoff . version = oldestRetainedVersion - self - > remapCleanupWindow ; <nl> <nl> - loop { <nl> - / / Pop up to the pop size limit from the queue , but only keep the latest remap queue entry per <nl> - / / original page ID . This will coalesce multiple remaps of the same LogicalPageID within the <nl> - / / interval of pages being unmapped to a single page copy . <nl> - state int toPop = SERVER_KNOBS - > REDWOOD_REMAP_CLEANUP_BATCH_SIZE ; <nl> - state std : : unordered_map < LogicalPageID , RemappedPage > toCopy ; <nl> - toCopy . reserve ( toPop ) ; <nl> - <nl> - / / Take up to batch size pages from front of queue <nl> - while ( toPop > 0 ) { <nl> - state Optional < RemappedPage > p = wait ( self - > remapQueue . pop ( cutoff ) ) ; <nl> - debug_printf ( " DWALPager ( % s ) remapCleanup popped % s \ n " , self - > filename . c_str ( ) , : : toString ( p ) . c_str ( ) ) ; <nl> - if ( ! p . present ( ) ) { <nl> - break ; <nl> - } <nl> + / / Minimum version we must pop to before obeying stop command . <nl> + state Version minStopVersion = cutoff . version - ( self - > remapCleanupWindow * SERVER_KNOBS - > REDWOOD_REMAP_CLEANUP_LAG ) ; <nl> <nl> - / / Get the existing remap entry for the original page , which could be newly initialized <nl> - auto & m = toCopy [ p . get ( ) . originalPageID ] ; <nl> - / / If version is invalid then this is a newly constructed RemappedPage , so copy p . get ( ) over it <nl> - if ( m . version ! = invalidVersion ) { <nl> - ASSERT ( m . version < p . get ( ) . version ) ; <nl> - ASSERT ( m . newPageID ! = invalidLogicalPageID ) ; <nl> - / / We ' re replacing a previously popped item so we can avoid copying it over the original . <nl> - debug_printf ( " DWALPager ( % s ) remapCleanup elided % s \ n " , self - > filename . c_str ( ) , <nl> - m . toString ( ) . c_str ( ) ) ; <nl> - / / The remapped pages entries will be cleaned up below . <nl> - self - > freeUnmappedPage ( m . newPageID , 0 ) ; <nl> - + + g_redwoodMetrics . pagerRemapFree ; <nl> - + + g_redwoodMetrics . pagerRemapSkip ; <nl> - } <nl> - m = p . get ( ) ; <nl> - <nl> - - - toPop ; <nl> + loop { <nl> + state Optional < RemappedPage > p = wait ( self - > remapQueue . pop ( cutoff ) ) ; <nl> + debug_printf ( " DWALPager ( % s ) remapCleanup popped % s \ n " , self - > filename . c_str ( ) , : : toString ( p ) . c_str ( ) ) ; <nl> + if ( ! p . present ( ) ) { <nl> + break ; <nl> } <nl> <nl> - std : : vector < Future < Void > > copies ; <nl> - <nl> - for ( auto & e : toCopy ) { <nl> - const RemappedPage & m = e . second ; <nl> - / / If newPageID is invalid , originalPageID page was freed at version , not remapped <nl> - if ( m . newPageID = = invalidLogicalPageID ) { <nl> - debug_printf ( " DWALPager ( % s ) remapCleanup freeNoCopy % s \ n " , self - > filename . c_str ( ) , <nl> - m . toString ( ) . c_str ( ) ) ; <nl> - self - > remappedPages . erase ( m . originalPageID ) ; <nl> - self - > freeUnmappedPage ( m . originalPageID , 0 ) ; <nl> + / / Get iterator to the versioned page map entry for the original page <nl> + auto iPageMapPair = self - > remappedPages . find ( p . get ( ) . originalPageID ) ; <nl> + / / The iterator must be valid and not empty and its first page map entry must match p ' s version <nl> + ASSERT ( iPageMapPair ! = self - > remappedPages . end ( ) ) ; <nl> + ASSERT ( ! iPageMapPair - > second . empty ( ) ) ; <nl> + auto iVersionPagePair = iPageMapPair - > second . begin ( ) ; <nl> + ASSERT ( iVersionPagePair - > first = = p . get ( ) . version ) ; <nl> + <nl> + / / If this is a free page entry then free the original page ID <nl> + if ( p . get ( ) . isFree ( ) ) { <nl> + debug_printf ( " DWALPager ( % s ) remapCleanup free % s \ n " , self - > filename . c_str ( ) , <nl> + p . get ( ) . toString ( ) . c_str ( ) ) ; <nl> + self - > freeUnmappedPage ( p . get ( ) . originalPageID , 0 ) ; <nl> + + + g_redwoodMetrics . pagerRemapFree ; <nl> + <nl> + / / There can ' t be any more entries in the page map after this one so verify that <nl> + / / the map size is 1 and erase the map for p ' s original page ID . <nl> + ASSERT ( iPageMapPair - > second . size ( ) = = 1 ) ; <nl> + self - > remappedPages . erase ( iPageMapPair ) ; <nl> + } <nl> + else { <nl> + / / If there is no next page map entry or there is but it is after the oldest retained version <nl> + / / then p must be copied to unmap it . <nl> + auto iNextVersionPagePair = iVersionPagePair ; <nl> + + + iNextVersionPagePair ; <nl> + if ( iNextVersionPagePair = = iPageMapPair - > second . end ( ) | | iNextVersionPagePair - > first > oldestRetainedVersion ) { <nl> + / / Copy the remapped page to the original so it can be freed . <nl> + copies . add ( remapCopyAndFree ( self , p . get ( ) , & iPageMapPair - > second , iVersionPagePair ) ) ; <nl> + } <nl> + else { <nl> + debug_printf ( " DWALPager ( % s ) remapCleanup skipAndFree % s \ n " , self - > filename . c_str ( ) , p . get ( ) . toString ( ) . c_str ( ) ) ; <nl> + self - > freeUnmappedPage ( p . get ( ) . newPageID , 0 ) ; <nl> + + g_redwoodMetrics . pagerRemapFree ; <nl> - } else { <nl> - copies . push_back ( remapCopyAndFree ( self , m ) ) ; <nl> + + + g_redwoodMetrics . pagerRemapSkip ; <nl> + iPageMapPair - > second . erase ( iVersionPagePair ) ; <nl> } <nl> } <nl> <nl> - wait ( waitForAll ( copies ) ) ; <nl> - <nl> - / / Stop if there was nothing more that could be popped <nl> - if ( toPop > 0 ) { <nl> + / / If the stop flag is set and we ' ve reached the minimum stop version according the the allowed lag then stop . <nl> + if ( self - > remapCleanupStop & & p . get ( ) . version > = minStopVersion ) { <nl> break ; <nl> } <nl> - <nl> - / / If the stop flag is set then stop but only if the remap lag is below the maximum allowed <nl> - if ( self - > remapCleanupStop ) { <nl> - Version lag = wait ( getRemapLag ( self ) ) ; <nl> - if ( lag < = SERVER_KNOBS - > REDWOOD_REMAP_CLEANUP_VERSION_LAG_MAX ) { <nl> - break ; <nl> - } else { <nl> - debug_printf ( " DWALPager ( % s ) remapCleanup refusing to stop , versionLag = % " PRId64 " \ n " , <nl> - self - > filename . c_str ( ) , lag ) ; <nl> - } <nl> - } <nl> } <nl> <nl> debug_printf ( " DWALPager ( % s ) remapCleanup stopped ( stop = % d ) \ n " , self - > filename . c_str ( ) , self - > remapCleanupStop ) ; <nl> + signal . send ( Void ( ) ) ; <nl> + wait ( copies . getResult ( ) ) ; <nl> return Void ( ) ; <nl> } <nl> <nl> class DWALPager : public IPager2 { <nl> Future < int64_t > getUserPageCount ( ) override { <nl> return map ( getUserPageCount_cleanup ( this ) , [ = ] ( Void ) { <nl> int64_t userPages = pHeader - > pageCount - 2 - freeList . numPages - freeList . numEntries - <nl> - delayedFreeList . numPages - delayedFreeList . numEntries - remapQueue . numPages ; <nl> + delayedFreeList . numPages - delayedFreeList . numEntries - remapQueue . numPages <nl> + - remapQueue . numEntries ; <nl> + <nl> debug_printf ( " DWALPager ( % s ) userPages = % " PRId64 " totalPageCount = % " PRId64 " freeQueuePages = % " PRId64 <nl> " freeQueueCount = % " PRId64 " delayedFreeQueuePages = % " PRId64 " delayedFreeQueueCount = % " PRId64 <nl> " remapQueuePages = % " PRId64 " remapQueueCount = % " PRId64 " \ n " , <nl> class DWALPager : public IPager2 { <nl> DelayedFreePageQueueT delayedFreeList ; <nl> <nl> RemapQueueT remapQueue ; <nl> + Version remapCleanupWindow ; <nl> <nl> struct SnapshotEntry { <nl> Version version ; <nl> class DWALPager : public IPager2 { <nl> } ; <nl> <nl> / / TODO : Better data structure <nl> - std : : unordered_map < LogicalPageID , std : : map < Version , LogicalPageID > > remappedPages ; <nl> + PageToVersionedMapT remappedPages ; <nl> <nl> std : : deque < SnapshotEntry > snapshots ; <nl> } ; <nl> struct RedwoodRecordRef { <nl> inline RedwoodRecordRef withoutValue ( ) const { return RedwoodRecordRef ( key , version ) ; } <nl> <nl> inline RedwoodRecordRef withMaxPageID ( ) const { <nl> - return RedwoodRecordRef ( key , version , StringRef ( ( uint8_t * ) & maxPageID , sizeof ( maxPageID ) ) ) ; <nl> + return RedwoodRecordRef ( key , version , StringRef ( ( uint8_t * ) & maxPageID , sizeof ( maxPageID ) ) ) ; <nl> } <nl> <nl> / / Truncate ( key , version , part ) tuple to len bytes . <nl> class VersionedBTree : public IVersionedStore { <nl> ACTOR static Future < Void > destroyAndCheckSanity_impl ( VersionedBTree * self ) { <nl> ASSERT ( g_network - > isSimulated ( ) ) ; <nl> <nl> - / / This isn ' t pretty but remap cleanup is controlled by knobs and for this test we need the entire remap queue <nl> - / / to be processed . <nl> - const_cast < ServerKnobs * > ( SERVER_KNOBS ) - > REDWOOD_REMAP_CLEANUP_VERSION_LAG_MIN = 0 ; <nl> - const_cast < ServerKnobs * > ( SERVER_KNOBS ) - > REDWOOD_REMAP_CLEANUP_VERSION_LAG_MAX = 0 ; <nl> - <nl> debug_printf ( " Clearing tree . \ n " ) ; <nl> self - > setWriteVersion ( self - > getLatestVersion ( ) + 1 ) ; <nl> self - > clear ( KeyRangeRef ( dbBegin . key , dbEnd . key ) ) ; <nl> class VersionedBTree : public IVersionedStore { <nl> debug_printf ( " move % s ( ) first loop cursor = % s \ n " , forward ? " Next " : " Prev " , self - > toString ( ) . c_str ( ) ) ; <nl> auto & entry = self - > path . back ( ) ; <nl> bool success ; <nl> - if ( entry . cursor . valid ( ) ) { <nl> + if ( entry . cursor . valid ( ) ) { <nl> success = forward ? entry . cursor . moveNext ( ) : entry . cursor . movePrev ( ) ; <nl> } else { <nl> success = forward ? entry . cursor . moveFirst ( ) : false ; <nl> class KeyValueStoreRedwoodUnversioned : public IKeyValueStore { <nl> KeyValueStoreRedwoodUnversioned ( std : : string filePrefix , UID logID ) <nl> : m_filePrefix ( filePrefix ) , m_concurrentReads ( new FlowLock ( SERVER_KNOBS - > REDWOOD_KVSTORE_CONCURRENT_READS ) ) { <nl> / / TODO : This constructor should really just take an IVersionedStore <nl> - IPager2 * pager = new DWALPager ( SERVER_KNOBS - > REDWOOD_DEFAULT_PAGE_SIZE , filePrefix , 0 ) ; <nl> + <nl> + int64_t pageCacheBytes = g_network - > isSimulated ( ) <nl> + ? ( BUGGIFY ? FLOW_KNOBS - > BUGGIFY_SIM_PAGE_CACHE_4K : FLOW_KNOBS - > SIM_PAGE_CACHE_4K ) <nl> + : FLOW_KNOBS - > PAGE_CACHE_4K ; <nl> + Version remapCleanupWindow = BUGGIFY ? deterministicRandom ( ) - > randomInt64 ( 0 , 1000 ) : SERVER_KNOBS - > REDWOOD_REMAP_CLEANUP_WINDOW ; <nl> + <nl> + IPager2 * pager = new DWALPager ( SERVER_KNOBS - > REDWOOD_DEFAULT_PAGE_SIZE , filePrefix , pageCacheBytes , remapCleanupWindow ) ; <nl> m_tree = new VersionedBTree ( pager , filePrefix ) ; <nl> m_init = catchError ( init_impl ( this ) ) ; <nl> } <nl> class KeyValueStoreRedwoodUnversioned : public IKeyValueStore { <nl> / / Read page contents without using waits <nl> bool isRoot = cur . inRoot ( ) ; <nl> BTreePage : : BinaryTree : : Cursor leafCursor = cur . popPath ( ) ; <nl> - while ( leafCursor . valid ( ) ) { <nl> + while ( leafCursor . valid ( ) ) { <nl> KeyValueRef kv = leafCursor . get ( ) . toKeyValueRef ( ) ; <nl> - if ( kv . key > = keys . end ) { <nl> + if ( kv . key > = keys . end ) { <nl> break ; <nl> } <nl> accumulatedBytes + = kv . expectedSize ( ) ; <nl> class KeyValueStoreRedwoodUnversioned : public IKeyValueStore { <nl> } <nl> / / Stop if the leaf cursor is still valid which means we hit a key or size limit or <nl> / / if we started in the root page <nl> - if ( leafCursor . valid ( ) | | isRoot ) { <nl> + if ( leafCursor . valid ( ) | | isRoot ) { <nl> break ; <nl> } <nl> wait ( cur . moveNext ( ) ) ; <nl> class KeyValueStoreRedwoodUnversioned : public IKeyValueStore { <nl> / / Read page contents without using waits <nl> bool isRoot = cur . inRoot ( ) ; <nl> BTreePage : : BinaryTree : : Cursor leafCursor = cur . popPath ( ) ; <nl> - while ( leafCursor . valid ( ) ) { <nl> + while ( leafCursor . valid ( ) ) { <nl> KeyValueRef kv = leafCursor . get ( ) . toKeyValueRef ( ) ; <nl> - if ( kv . key < keys . begin ) { <nl> + if ( kv . key < keys . begin ) { <nl> break ; <nl> } <nl> accumulatedBytes + = kv . expectedSize ( ) ; <nl> class KeyValueStoreRedwoodUnversioned : public IKeyValueStore { <nl> } <nl> / / Stop if the leaf cursor is still valid which means we hit a key or size limit or <nl> / / if we started in the root page <nl> - if ( leafCursor . valid ( ) | | isRoot ) { <nl> + if ( leafCursor . valid ( ) | | isRoot ) { <nl> break ; <nl> } <nl> wait ( cur . movePrev ( ) ) ; <nl> ACTOR Future < int > seekAll ( VersionedBTree * btree , Version v , <nl> <nl> / / Verify the result of point reads for every set or cleared key at the given version <nl> ACTOR Future < int > seekAllBTreeCursor ( VersionedBTree * btree , Version v , <nl> - std : : map < std : : pair < std : : string , Version > , Optional < std : : string > > * written , int * pErrorCount ) { <nl> + std : : map < std : : pair < std : : string , Version > , Optional < std : : string > > * written , <nl> + int * pErrorCount ) { <nl> state std : : map < std : : pair < std : : string , Version > , Optional < std : : string > > : : const_iterator i = written - > cbegin ( ) ; <nl> state std : : map < std : : pair < std : : string , Version > , Optional < std : : string > > : : const_iterator iEnd = written - > cend ( ) ; <nl> state int errors = 0 ; <nl> ACTOR Future < int > seekAllBTreeCursor ( VersionedBTree * btree , Version v , <nl> if ( ! foundKey ) { <nl> printf ( " Verify ERROR : key_not_found : ' % s ' - > ' % s ' @ % " PRId64 " \ n " , key . c_str ( ) , <nl> val . get ( ) . c_str ( ) , ver ) ; <nl> - } <nl> - else if ( ! hasValue ) { <nl> + } else if ( ! hasValue ) { <nl> printf ( " Verify ERROR : value_not_found : ' % s ' - > ' % s ' @ % " PRId64 " \ n " , key . c_str ( ) , <nl> val . get ( ) . c_str ( ) , ver ) ; <nl> - } <nl> - else if ( ! valueMatch ) { <nl> + } else if ( ! valueMatch ) { <nl> printf ( " Verify ERROR : value_incorrect : for ' % s ' found ' % s ' expected ' % s ' @ % " PRId64 " \ n " , <nl> - key . c_str ( ) , cur . get ( ) . value . get ( ) . toString ( ) . c_str ( ) , val . get ( ) . c_str ( ) , <nl> - ver ) ; <nl> + key . c_str ( ) , cur . get ( ) . value . get ( ) . toString ( ) . c_str ( ) , val . get ( ) . c_str ( ) , ver ) ; <nl> } <nl> } <nl> } else if ( foundKey & & hasValue ) { <nl> + + errors ; <nl> + + * pErrorCount ; <nl> printf ( " Verify ERROR : cleared_key_found : ' % s ' - > ' % s ' @ % " PRId64 " \ n " , key . c_str ( ) , <nl> - cur . get ( ) . value . get ( ) . toString ( ) . c_str ( ) , ver ) ; <nl> + cur . get ( ) . value . get ( ) . toString ( ) . c_str ( ) , ver ) ; <nl> } <nl> } <nl> + + i ; <nl> ACTOR Future < Void > verify ( VersionedBTree * btree , FutureStream < Version > vStream , <nl> state Reference < IStoreCursor > cur = btree - > readAtVersion ( v ) ; <nl> <nl> debug_printf ( " Verifying entire key range at version % " PRId64 " \ n " , v ) ; <nl> - if ( deterministicRandom ( ) - > coinflip ( ) ) { <nl> - fRangeAll = verifyRange ( btree , LiteralStringRef ( " " ) , LiteralStringRef ( " \ xff \ xff " ) , v , written , <nl> - pErrorCount ) ; <nl> + if ( deterministicRandom ( ) - > coinflip ( ) ) { <nl> + fRangeAll = <nl> + verifyRange ( btree , LiteralStringRef ( " " ) , LiteralStringRef ( " \ xff \ xff " ) , v , written , pErrorCount ) ; <nl> } else { <nl> - fRangeAll = verifyRangeBTreeCursor ( btree , LiteralStringRef ( " " ) , LiteralStringRef ( " \ xff \ xff " ) , v , written , <nl> - pErrorCount ) ; <nl> + fRangeAll = verifyRangeBTreeCursor ( btree , LiteralStringRef ( " " ) , LiteralStringRef ( " \ xff \ xff " ) , v , <nl> + written , pErrorCount ) ; <nl> } <nl> if ( serial ) { <nl> wait ( success ( fRangeAll ) ) ; <nl> ACTOR Future < Void > verify ( VersionedBTree * btree , FutureStream < Version > vStream , <nl> Key end = randomKV ( ) . key ; <nl> debug_printf ( " Verifying range ( % s , % s ) at version % " PRId64 " \ n " , toString ( begin ) . c_str ( ) , <nl> toString ( end ) . c_str ( ) , v ) ; <nl> - if ( deterministicRandom ( ) - > coinflip ( ) ) { <nl> + if ( deterministicRandom ( ) - > coinflip ( ) ) { <nl> fRangeRandom = verifyRange ( btree , begin , end , v , written , pErrorCount ) ; <nl> } else { <nl> fRangeRandom = verifyRangeBTreeCursor ( btree , begin , end , v , written , pErrorCount ) ; <nl> ACTOR Future < Void > verify ( VersionedBTree * btree , FutureStream < Version > vStream , <nl> } <nl> <nl> debug_printf ( " Verifying seeks to each changed key at version % " PRId64 " \ n " , v ) ; <nl> - if ( deterministicRandom ( ) - > coinflip ( ) ) { <nl> + if ( deterministicRandom ( ) - > coinflip ( ) ) { <nl> fSeekAll = seekAll ( btree , v , written , pErrorCount ) ; <nl> } else { <nl> fSeekAll = seekAllBTreeCursor ( btree , v , written , pErrorCount ) ; <nl> TEST_CASE ( " ! / redwood / correctness / btree " ) { <nl> state int maxKeySize = deterministicRandom ( ) - > randomInt ( 1 , pageSize * 2 ) ; <nl> state int maxValueSize = randomSize ( pageSize * 25 ) ; <nl> state int maxCommitSize = shortTest ? 1000 : randomSize ( std : : min < int > ( ( maxKeySize + maxValueSize ) * 20000 , 10e6 ) ) ; <nl> - state int mutationBytesTarget = shortTest ? 100000 : randomSize ( std : : min < int > ( maxCommitSize * 100 , pageSize * 100000 ) ) ; <nl> + state int mutationBytesTarget = <nl> + shortTest ? 100000 : randomSize ( std : : min < int > ( maxCommitSize * 100 , pageSize * 100000 ) ) ; <nl> state double clearProbability = deterministicRandom ( ) - > random01 ( ) * . 1 ; <nl> state double clearSingleKeyProbability = deterministicRandom ( ) - > random01 ( ) ; <nl> state double clearPostSetProbability = deterministicRandom ( ) - > random01 ( ) * . 1 ; <nl> TEST_CASE ( " ! / redwood / correctness / btree " ) { <nl> state double maxDuration = 60 ; <nl> state int64_t cacheSizeBytes = <nl> pagerMemoryOnly ? 2e9 : ( BUGGIFY ? deterministicRandom ( ) - > randomInt ( 1 , 10 * pageSize ) : 0 ) ; <nl> + state Version versionIncrement = deterministicRandom ( ) - > randomInt64 ( 1 , 1e8 ) ; <nl> + state Version remapCleanupWindow = deterministicRandom ( ) - > randomInt64 ( 0 , versionIncrement * 50 ) ; <nl> <nl> printf ( " \ n " ) ; <nl> printf ( " pagerMemoryOnly : % d \ n " , pagerMemoryOnly ) ; <nl> TEST_CASE ( " ! / redwood / correctness / btree " ) { <nl> printf ( " coldStartProbability : % f \ n " , coldStartProbability ) ; <nl> printf ( " advanceOldVersionProbability : % f \ n " , advanceOldVersionProbability ) ; <nl> printf ( " cacheSizeBytes : % s \ n " , cacheSizeBytes = = 0 ? " default " : format ( " % " PRId64 , cacheSizeBytes ) . c_str ( ) ) ; <nl> + printf ( " versionIncrement : % " PRId64 " \ n " , versionIncrement ) ; <nl> + printf ( " remapCleanupWindow : % " PRId64 " \ n " , remapCleanupWindow ) ; <nl> printf ( " \ n " ) ; <nl> <nl> printf ( " Deleting existing test data . . . \ n " ) ; <nl> TEST_CASE ( " ! / redwood / correctness / btree " ) { <nl> printf ( " Initializing . . . \ n " ) ; <nl> state double startTime = now ( ) ; <nl> <nl> - pager = new DWALPager ( pageSize , pagerFile , cacheSizeBytes , pagerMemoryOnly ) ; <nl> + pager = new DWALPager ( pageSize , pagerFile , cacheSizeBytes , remapCleanupWindow , pagerMemoryOnly ) ; <nl> state VersionedBTree * btree = new VersionedBTree ( pager , pagerFile ) ; <nl> wait ( btree - > init ( ) ) ; <nl> <nl> TEST_CASE ( " ! / redwood / correctness / btree " ) { <nl> mutationBytesTarget = mutationBytes . get ( ) ; <nl> } <nl> <nl> - / / Sometimes advance the version <nl> + / / Sometimes increment the version <nl> if ( deterministicRandom ( ) - > random01 ( ) < 0 . 10 ) { <nl> + + version ; <nl> btree - > setWriteVersion ( version ) ; <nl> TEST_CASE ( " ! / redwood / correctness / btree " ) { <nl> / / amount . <nl> if ( deterministicRandom ( ) - > random01 ( ) < advanceOldVersionProbability ) { <nl> btree - > setOldestVersion ( btree - > getLastCommittedVersion ( ) - <nl> - deterministicRandom ( ) - > randomInt ( 0 , btree - > getLastCommittedVersion ( ) - <nl> + deterministicRandom ( ) - > randomInt64 ( 0 , btree - > getLastCommittedVersion ( ) - <nl> btree - > getOldestVersion ( ) + 1 ) ) ; <nl> } <nl> <nl> TEST_CASE ( " ! / redwood / correctness / btree " ) { <nl> wait ( closedFuture ) ; <nl> <nl> printf ( " Reopening btree from disk . \ n " ) ; <nl> - IPager2 * pager = new DWALPager ( pageSize , pagerFile , 0 ) ; <nl> + IPager2 * pager = new DWALPager ( pageSize , pagerFile , cacheSizeBytes , remapCleanupWindow ) ; <nl> btree = new VersionedBTree ( pager , pagerFile ) ; <nl> wait ( btree - > init ( ) ) ; <nl> <nl> TEST_CASE ( " ! / redwood / correctness / btree " ) { <nl> randomTask = randomReader ( btree ) | | btree - > getError ( ) ; <nl> } <nl> <nl> - + + version ; <nl> + version + = versionIncrement ; <nl> btree - > setWriteVersion ( version ) ; <nl> } <nl> <nl> TEST_CASE ( " ! / redwood / correctness / pager / cow " ) { <nl> deleteFile ( pagerFile ) ; <nl> <nl> int pageSize = 4096 ; <nl> - state IPager2 * pager = new DWALPager ( pageSize , pagerFile , 0 ) ; <nl> + state IPager2 * pager = new DWALPager ( pageSize , pagerFile , 0 , 0 ) ; <nl> <nl> wait ( success ( pager - > init ( ) ) ) ; <nl> state LogicalPageID id = wait ( pager - > newPageID ( ) ) ; <nl> TEST_CASE ( " ! / redwood / performance / set " ) { <nl> <nl> state int pageSize = SERVER_KNOBS - > REDWOOD_DEFAULT_PAGE_SIZE ; <nl> state int64_t pageCacheBytes = FLOW_KNOBS - > PAGE_CACHE_4K ; <nl> - DWALPager * pager = new DWALPager ( pageSize , pagerFile , pageCacheBytes ) ; <nl> - state VersionedBTree * btree = new VersionedBTree ( pager , pagerFile ) ; <nl> - wait ( btree - > init ( ) ) ; <nl> - <nl> state int nodeCount = 1e9 ; <nl> state int maxRecordsPerCommit = 20000 ; <nl> state int maxKVBytesPerCommit = 20e6 ; <nl> TEST_CASE ( " ! / redwood / performance / set " ) { <nl> state int maxConsecutiveRun = 10 ; <nl> state char firstKeyChar = ' a ' ; <nl> state char lastKeyChar = ' m ' ; <nl> + state Version remapCleanupWindow = SERVER_KNOBS - > REDWOOD_REMAP_CLEANUP_WINDOW ; <nl> <nl> printf ( " pageSize : % d \ n " , pageSize ) ; <nl> printf ( " pageCacheBytes : % " PRId64 " \ n " , pageCacheBytes ) ; <nl> TEST_CASE ( " ! / redwood / performance / set " ) { <nl> printf ( " maxCommitSize : % d \ n " , maxKVBytesPerCommit ) ; <nl> printf ( " kvBytesTarget : % " PRId64 " \ n " , kvBytesTarget ) ; <nl> printf ( " KeyLexicon ' % c ' to ' % c ' \ n " , firstKeyChar , lastKeyChar ) ; <nl> + printf ( " remapCleanupWindow : % " PRId64 " \ n " , remapCleanupWindow ) ; <nl> + <nl> + DWALPager * pager = new DWALPager ( pageSize , pagerFile , pageCacheBytes , remapCleanupWindow ) ; <nl> + state VersionedBTree * btree = new VersionedBTree ( pager , pagerFile ) ; <nl> + wait ( btree - > init ( ) ) ; <nl> <nl> state int64_t kvBytesThisCommit = 0 ; <nl> state int64_t kvBytesTotal = 0 ; <nl> mmm a / fdbserver / WorkerInterface . actor . h <nl> ppp b / fdbserver / WorkerInterface . actor . h <nl> ACTOR Future < Void > storageServer ( IKeyValueStore * persistentData , StorageServerIn <nl> Reference < AsyncVar < ServerDBInfo > > db , std : : string folder , <nl> Promise < Void > recovered , <nl> Reference < ClusterConnectionFile > connFile ) ; / / changes pssi - > id ( ) to be the recovered ID ) ; / / changes pssi - > id ( ) to be the recovered ID <nl> - ACTOR Future < Void > masterServer ( MasterInterface mi , Reference < AsyncVar < ServerDBInfo > > db , <nl> + ACTOR Future < Void > masterServer ( MasterInterface mi , Reference < AsyncVar < ServerDBInfo > > db , Reference < AsyncVar < Optional < ClusterControllerFullInterface > > > ccInterface , <nl> ServerCoordinators serverCoordinators , LifetimeToken lifetime , bool forceRecovery ) ; <nl> ACTOR Future < Void > masterProxyServer ( MasterProxyInterface proxy , InitializeMasterProxyRequest req , <nl> Reference < AsyncVar < ServerDBInfo > > db , std : : string whitelistBinPaths ) ; <nl> mmm a / fdbserver / fdbserver . actor . cpp <nl> ppp b / fdbserver / fdbserver . actor . cpp <nl> int main ( int argc , char * argv [ ] ) { <nl> < < FastAllocator < 1024 > : : pageCount < < " " <nl> < < FastAllocator < 2048 > : : pageCount < < " " <nl> < < FastAllocator < 4096 > : : pageCount < < " " <nl> - < < FastAllocator < 8192 > : : pageCount < < std : : endl ; <nl> + < < FastAllocator < 8192 > : : pageCount < < " " <nl> + < < FastAllocator < 16384 > : : pageCount < < std : : endl ; <nl> <nl> vector < std : : pair < std : : string , const char * > > typeNames ; <nl> for ( auto i = allocInstr . begin ( ) ; i ! = allocInstr . end ( ) ; + + i ) { <nl> mmm a / fdbserver / masterserver . actor . cpp <nl> ppp b / fdbserver / masterserver . actor . cpp <nl> <nl> * limitations under the License . <nl> * / <nl> <nl> - # include " flow / ActorCollection . h " <nl> - # include " fdbrpc / PerfMetric . h " <nl> - # include " flow / Trace . h " <nl> - # include " fdbrpc / FailureMonitor . h " <nl> + # include < iterator > <nl> + <nl> # include " fdbclient / NativeAPI . actor . h " <nl> # include " fdbclient / Notified . h " <nl> # include " fdbclient / SystemData . h " <nl> - # include " fdbserver / ConflictSet . h " <nl> - # include " fdbserver / DataDistribution . actor . h " <nl> - # include " fdbserver / Knobs . h " <nl> - # include < iterator > <nl> + # include " fdbrpc / FailureMonitor . h " <nl> + # include " fdbrpc / PerfMetric . h " <nl> + # include " fdbrpc / sim_validation . h " <nl> + # include " fdbserver / ApplyMetadataMutation . h " <nl> # include " fdbserver / BackupProgress . actor . h " <nl> - # include " fdbserver / MasterInterface . h " <nl> - # include " fdbserver / WaitFailure . h " <nl> - # include " fdbserver / WorkerInterface . actor . h " <nl> - # include " fdbserver / ServerDBInfo . h " <nl> + # include " fdbserver / ConflictSet . h " <nl> # include " fdbserver / CoordinatedState . h " <nl> # include " fdbserver / CoordinationInterface . h " / / copy constructors for ServerCoordinators class <nl> - # include " fdbrpc / sim_validation . h " <nl> # include " fdbserver / DBCoreState . h " <nl> + # include " fdbserver / DataDistribution . actor . h " <nl> + # include " fdbserver / IKeyValueStore . h " <nl> + # include " fdbserver / Knobs . h " <nl> # include " fdbserver / LogSystem . h " <nl> # include " fdbserver / LogSystemDiskQueueAdapter . h " <nl> - # include " fdbserver / IKeyValueStore . h " <nl> - # include " fdbserver / ApplyMetadataMutation . h " <nl> + # include " fdbserver / MasterInterface . h " <nl> + # include " fdbserver / ProxyCommitData . actor . h " <nl> # include " fdbserver / RecoveryState . h " <nl> + # include " fdbserver / ServerDBInfo . h " <nl> + # include " fdbserver / WaitFailure . h " <nl> + # include " fdbserver / WorkerInterface . actor . h " <nl> + # include " flow / ActorCollection . h " <nl> + # include " flow / Trace . h " <nl> + <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> using std : : vector ; <nl> ACTOR Future < Void > masterCore ( Reference < MasterData > self ) { <nl> } <nl> } <nl> <nl> - applyMetadataMutations ( self - > dbgid , recoveryCommitRequest . arena , tr . mutations . slice ( mmApplied , tr . mutations . size ( ) ) , self - > txnStateStore , nullptr , nullptr ) ; <nl> + applyMetadataMutations ( self - > dbgid , recoveryCommitRequest . arena , tr . mutations . slice ( mmApplied , tr . mutations . size ( ) ) , <nl> + self - > txnStateStore ) ; <nl> mmApplied = tr . mutations . size ( ) ; <nl> <nl> tr . read_snapshot = self - > recoveryTransactionVersion ; / / lastEpochEnd would make more sense , but isn ' t in the initial window of the resolver ( s ) <nl> ACTOR Future < Void > masterCore ( Reference < MasterData > self ) { <nl> throw internal_error ( ) ; <nl> } <nl> <nl> - ACTOR Future < Void > masterServer ( MasterInterface mi , Reference < AsyncVar < ServerDBInfo > > db , ServerCoordinators coordinators , LifetimeToken lifetime , bool forceRecovery ) <nl> + ACTOR Future < Void > masterServer ( MasterInterface mi , Reference < AsyncVar < ServerDBInfo > > db , Reference < AsyncVar < Optional < ClusterControllerFullInterface > > > ccInterface , ServerCoordinators coordinators , LifetimeToken lifetime , bool forceRecovery ) <nl> { <nl> + state Future < Void > ccTimeout = delay ( SERVER_KNOBS - > CC_INTERFACE_TIMEOUT ) ; <nl> + while ( ! ccInterface - > get ( ) . present ( ) | | db - > get ( ) . clusterInterface ! = ccInterface - > get ( ) . get ( ) ) { <nl> + wait ( ccInterface - > onChange ( ) | | db - > onChange ( ) | | ccTimeout ) ; <nl> + if ( ccTimeout . isReady ( ) ) { <nl> + TraceEvent ( " MasterTerminated " , mi . id ( ) ) . detail ( " Reason " , " Timeout " ) <nl> + . detail ( " CCInterface " , ccInterface - > get ( ) . present ( ) ? ccInterface - > get ( ) . get ( ) . id ( ) : UID ( ) ) <nl> + . detail ( " DBInfoInterface " , db - > get ( ) . clusterInterface . id ( ) ) ; <nl> + return Void ( ) ; <nl> + } <nl> + } <nl> + <nl> state Future < Void > onDBChange = Void ( ) ; <nl> state PromiseStream < Future < Void > > addActor ; <nl> state Reference < MasterData > self ( new MasterData ( db , mi , coordinators , db - > get ( ) . clusterInterface , LiteralStringRef ( " " ) , addActor , forceRecovery ) ) ; <nl> mmm a / fdbserver / networktest . actor . cpp <nl> ppp b / fdbserver / networktest . actor . cpp <nl> <nl> # include " fdbserver / NetworkTest . h " <nl> # include " flow / Knobs . h " <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> + # include " flow / ActorCollection . h " <nl> + # include " flow / UnitTest . h " <nl> + # include < inttypes . h > <nl> <nl> UID WLTOKEN_NETWORKTEST ( - 1 , 2 ) ; <nl> <nl> ACTOR Future < Void > networkTestClient ( std : : string testServers ) { <nl> wait ( waitForAll ( clients ) ) ; <nl> return Void ( ) ; <nl> } <nl> + <nl> + struct RandomIntRange { <nl> + int min ; <nl> + int max ; <nl> + <nl> + RandomIntRange ( int low = 0 , int high = 0 ) : min ( low ) , max ( high ) { <nl> + } <nl> + <nl> + / / Accepts strings of the form " min : max " or " N " <nl> + / / where N will be used for both min and max <nl> + RandomIntRange ( std : : string str ) { <nl> + StringRef high = str ; <nl> + StringRef low = high . eat ( " : " ) ; <nl> + if ( high . size ( ) = = 0 ) { <nl> + high = low ; <nl> + } <nl> + min = low . size ( ) = = 0 ? 0 : atol ( low . toString ( ) . c_str ( ) ) ; <nl> + max = high . size ( ) = = 0 ? 0 : atol ( high . toString ( ) . c_str ( ) ) ; <nl> + if ( min > max ) { <nl> + std : : swap ( min , max ) ; <nl> + } <nl> + } <nl> + <nl> + int get ( ) const { <nl> + return ( max = = 0 ) ? 0 : nondeterministicRandom ( ) - > randomInt ( min , max + 1 ) ; <nl> + } <nl> + <nl> + std : : string toString ( ) const { <nl> + return format ( " % d : % d " , min , max ) ; <nl> + } <nl> + } ; <nl> + <nl> + struct P2PNetworkTest { <nl> + / / Addresses to listen on <nl> + std : : vector < Reference < IListener > > listeners ; <nl> + / / Addresses to randomly connect to <nl> + std : : vector < NetworkAddress > remotes ; <nl> + / / Number of outgoing connections to maintain <nl> + int connectionsOut ; <nl> + / / Message size range to send on outgoing established connections <nl> + RandomIntRange requestBytes ; <nl> + / / Message size to reply with on incoming established connections <nl> + RandomIntRange replyBytes ; <nl> + / / Number of requests / replies per session <nl> + RandomIntRange requests ; <nl> + / / Delay after message send and receive are complete before closing connection <nl> + RandomIntRange idleMilliseconds ; <nl> + / / Random delay before socket reads <nl> + RandomIntRange waitReadMilliseconds ; <nl> + / / Random delay before socket writes <nl> + RandomIntRange waitWriteMilliseconds ; <nl> + <nl> + double startTime ; <nl> + int64_t bytesSent ; <nl> + int64_t bytesReceived ; <nl> + int sessionsIn ; <nl> + int sessionsOut ; <nl> + int connectErrors ; <nl> + int acceptErrors ; <nl> + int sessionErrors ; <nl> + <nl> + Standalone < StringRef > msgBuffer ; <nl> + <nl> + std : : string statsString ( ) { <nl> + double elapsed = now ( ) - startTime ; <nl> + std : : string s = format ( " % . 2f MB / s bytes in % . 2f MB / s bytes out % . 2f / s completed sessions in % . 2f / s completed sessions out " , <nl> + bytesReceived / elapsed / 1e6 , bytesSent / elapsed / 1e6 , sessionsIn / elapsed , sessionsOut / elapsed ) ; <nl> + s + = format ( " Total Errors % d connect = % d accept = % d session = % d " , <nl> + connectErrors + acceptErrors + sessionErrors , connectErrors , acceptErrors , sessionErrors ) ; <nl> + bytesSent = 0 ; <nl> + bytesReceived = 0 ; <nl> + sessionsIn = 0 ; <nl> + sessionsOut = 0 ; <nl> + startTime = now ( ) ; <nl> + return s ; <nl> + } <nl> + <nl> + P2PNetworkTest ( ) { } <nl> + <nl> + P2PNetworkTest ( std : : string listenerAddresses , std : : string remoteAddresses , int connectionsOut , RandomIntRange sendMsgBytes , RandomIntRange recvMsgBytes , RandomIntRange requests , RandomIntRange idleMilliseconds , RandomIntRange waitReadMilliseconds , RandomIntRange waitWriteMilliseconds ) <nl> + : connectionsOut ( connectionsOut ) , requestBytes ( sendMsgBytes ) , replyBytes ( recvMsgBytes ) , requests ( requests ) , idleMilliseconds ( idleMilliseconds ) , waitReadMilliseconds ( waitReadMilliseconds ) , waitWriteMilliseconds ( waitWriteMilliseconds ) { <nl> + bytesSent = 0 ; <nl> + bytesReceived = 0 ; <nl> + sessionsIn = 0 ; <nl> + sessionsOut = 0 ; <nl> + connectErrors = 0 ; <nl> + acceptErrors = 0 ; <nl> + sessionErrors = 0 ; <nl> + msgBuffer = makeString ( std : : max ( sendMsgBytes . max , recvMsgBytes . max ) ) ; <nl> + <nl> + if ( ! remoteAddresses . empty ( ) ) { <nl> + remotes = NetworkAddress : : parseList ( remoteAddresses ) ; <nl> + } <nl> + <nl> + if ( ! listenerAddresses . empty ( ) ) { <nl> + for ( auto a : NetworkAddress : : parseList ( listenerAddresses ) ) { <nl> + listeners . push_back ( INetworkConnections : : net ( ) - > listen ( a ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + NetworkAddress randomRemote ( ) { <nl> + return remotes [ nondeterministicRandom ( ) - > randomInt ( 0 , remotes . size ( ) ) ] ; <nl> + } <nl> + <nl> + ACTOR static Future < Standalone < StringRef > > readMsg ( P2PNetworkTest * self , Reference < IConnection > conn ) { <nl> + state Standalone < StringRef > buffer = makeString ( sizeof ( int ) ) ; <nl> + state int writeOffset = 0 ; <nl> + state bool gotHeader = false ; <nl> + <nl> + / / Fill buffer sequentially until the initial bytesToRead is read ( or more ) , then read <nl> + / / intended message size and add it to bytesToRead , continue if needed until bytesToRead is 0 . <nl> + loop { <nl> + int stutter = self - > waitReadMilliseconds . get ( ) ; <nl> + if ( stutter > 0 ) { <nl> + wait ( delay ( stutter / 1e3 ) ) ; <nl> + } <nl> + <nl> + int len = conn - > read ( ( uint8_t * ) buffer . begin ( ) + writeOffset , ( uint8_t * ) buffer . end ( ) ) ; <nl> + writeOffset + = len ; <nl> + self - > bytesReceived + = len ; <nl> + <nl> + / / If buffer is complete , either process it as a header or return it <nl> + if ( writeOffset = = buffer . size ( ) ) { <nl> + if ( gotHeader ) { <nl> + return buffer ; <nl> + } else { <nl> + gotHeader = true ; <nl> + int msgSize = * ( int * ) buffer . begin ( ) ; <nl> + if ( msgSize = = 0 ) { <nl> + return Standalone < StringRef > ( ) ; <nl> + } <nl> + buffer = makeString ( msgSize ) ; <nl> + writeOffset = 0 ; <nl> + } <nl> + } <nl> + <nl> + if ( len = = 0 ) { <nl> + wait ( conn - > onReadable ( ) ) ; <nl> + wait ( delay ( 0 , TaskPriority : : ReadSocket ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + ACTOR static Future < Void > writeMsg ( P2PNetworkTest * self , Reference < IConnection > conn , StringRef msg ) { <nl> + state UnsentPacketQueue packets ; <nl> + PacketWriter writer ( packets . getWriteBuffer ( msg . size ( ) ) , nullptr , Unversioned ( ) ) ; <nl> + writer . serializeBinaryItem ( ( int ) msg . size ( ) ) ; <nl> + writer . serializeBytes ( msg ) ; <nl> + <nl> + loop { <nl> + int stutter = self - > waitWriteMilliseconds . get ( ) ; <nl> + if ( stutter > 0 ) { <nl> + wait ( delay ( stutter / 1e3 ) ) ; <nl> + } <nl> + int sent = conn - > write ( packets . getUnsent ( ) , FLOW_KNOBS - > MAX_PACKET_SEND_BYTES ) ; <nl> + <nl> + if ( sent ! = 0 ) { <nl> + self - > bytesSent + = sent ; <nl> + packets . sent ( sent ) ; <nl> + } <nl> + <nl> + if ( packets . empty ( ) ) { <nl> + break ; <nl> + } <nl> + <nl> + wait ( conn - > onWritable ( ) ) ; <nl> + wait ( yield ( TaskPriority : : WriteSocket ) ) ; <nl> + } <nl> + <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + ACTOR static Future < Void > doSession ( P2PNetworkTest * self , Reference < IConnection > conn , bool incoming ) { <nl> + state int numRequests ; <nl> + <nl> + try { <nl> + if ( incoming ) { <nl> + wait ( conn - > acceptHandshake ( ) ) ; <nl> + <nl> + / / Read the number of requests for the session <nl> + Standalone < StringRef > buf = wait ( readMsg ( self , conn ) ) ; <nl> + ASSERT ( buf . size ( ) = = sizeof ( int ) ) ; <nl> + numRequests = * ( int * ) buf . begin ( ) ; <nl> + } else { <nl> + wait ( conn - > connectHandshake ( ) ) ; <nl> + <nl> + / / Pick the number of requests for the session and send it to remote <nl> + numRequests = self - > requests . get ( ) ; <nl> + wait ( writeMsg ( self , conn , StringRef ( ( const uint8_t * ) & numRequests , sizeof ( int ) ) ) ) ; <nl> + } <nl> + <nl> + while ( numRequests > 0 ) { <nl> + if ( incoming ) { <nl> + / / Wait for a request <nl> + wait ( success ( readMsg ( self , conn ) ) ) ; <nl> + / / Send a reply <nl> + wait ( writeMsg ( self , conn , self - > msgBuffer . substr ( 0 , self - > replyBytes . get ( ) ) ) ) ; <nl> + } <nl> + else { <nl> + / / Send a request <nl> + wait ( writeMsg ( self , conn , self - > msgBuffer . substr ( 0 , self - > requestBytes . get ( ) ) ) ) ; <nl> + / / Wait for a reply <nl> + wait ( success ( readMsg ( self , conn ) ) ) ; <nl> + } <nl> + <nl> + if ( - - numRequests = = 0 ) { <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + wait ( delay ( self - > idleMilliseconds . get ( ) / 1e3 ) ) ; <nl> + conn - > close ( ) ; <nl> + <nl> + if ( incoming ) { <nl> + + + self - > sessionsIn ; <nl> + } else { <nl> + + + self - > sessionsOut ; <nl> + } <nl> + } catch ( Error & e ) { <nl> + + + self - > sessionErrors ; <nl> + TraceEvent ( SevError , incoming ? " P2PIncomingSessionError " : " P2POutgoingSessionError " ) <nl> + . detail ( " Remote " , conn - > getPeerAddress ( ) ) <nl> + . error ( e ) ; <nl> + } <nl> + <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + ACTOR static Future < Void > outgoing ( P2PNetworkTest * self ) { <nl> + loop { <nl> + wait ( delay ( 0 , TaskPriority : : WriteSocket ) ) ; <nl> + state NetworkAddress remote = self - > randomRemote ( ) ; <nl> + <nl> + try { <nl> + state Reference < IConnection > conn = wait ( INetworkConnections : : net ( ) - > connect ( remote ) ) ; <nl> + / / printf ( " Connected to % s \ n " , remote . toString ( ) . c_str ( ) ) ; <nl> + wait ( doSession ( self , conn , false ) ) ; <nl> + } catch ( Error & e ) { <nl> + + + self - > connectErrors ; <nl> + TraceEvent ( SevError , " P2POutgoingError " ) <nl> + . detail ( " Remote " , remote ) <nl> + . error ( e ) ; <nl> + wait ( delay ( 1 ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + ACTOR static Future < Void > incoming ( P2PNetworkTest * self , Reference < IListener > listener ) { <nl> + state ActorCollection sessions ( false ) ; <nl> + <nl> + loop { <nl> + wait ( delay ( 0 , TaskPriority : : AcceptSocket ) ) ; <nl> + <nl> + try { <nl> + state Reference < IConnection > conn = wait ( listener - > accept ( ) ) ; <nl> + / / printf ( " Connected from % s \ n " , conn - > getPeerAddress ( ) . toString ( ) . c_str ( ) ) ; <nl> + sessions . add ( doSession ( self , conn , true ) ) ; <nl> + } catch ( Error & e ) { <nl> + + + self - > acceptErrors ; <nl> + TraceEvent ( SevError , " P2PIncomingError " ) <nl> + . detail ( " Listener " , listener - > getListenAddress ( ) ) <nl> + . error ( e ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + ACTOR static Future < Void > run_impl ( P2PNetworkTest * self ) { <nl> + state ActorCollection actors ( false ) ; <nl> + <nl> + self - > startTime = now ( ) ; <nl> + <nl> + printf ( " % d listeners , % d remotes , % d outgoing connections \ n " , self - > listeners . size ( ) , self - > remotes . size ( ) , self - > connectionsOut ) ; <nl> + printf ( " Request size : % s \ n " , self - > requestBytes . toString ( ) . c_str ( ) ) ; <nl> + printf ( " Response size : % s \ n " , self - > replyBytes . toString ( ) . c_str ( ) ) ; <nl> + printf ( " Requests per outgoing session : % d \ n " , self - > requests . toString ( ) . c_str ( ) ) ; <nl> + printf ( " Delay before socket read : % s \ n " , self - > waitReadMilliseconds . toString ( ) . c_str ( ) ) ; <nl> + printf ( " Delay before socket write : % s \ n " , self - > waitWriteMilliseconds . toString ( ) . c_str ( ) ) ; <nl> + printf ( " Delay before session close : % s \ n " , self - > idleMilliseconds . toString ( ) . c_str ( ) ) ; <nl> + printf ( " Send / Recv size % d bytes \ n " , FLOW_KNOBS - > MAX_PACKET_SEND_BYTES ) ; <nl> + <nl> + for ( auto n : self - > remotes ) { <nl> + printf ( " Remote : % s \ n " , n . toString ( ) . c_str ( ) ) ; <nl> + } <nl> + <nl> + for ( auto el : self - > listeners ) { <nl> + printf ( " Listener : % s \ n " , el - > getListenAddress ( ) . toString ( ) . c_str ( ) ) ; <nl> + actors . add ( incoming ( self , el ) ) ; <nl> + } <nl> + <nl> + if ( ! self - > remotes . empty ( ) ) { <nl> + for ( int i = 0 ; i < self - > connectionsOut ; + + i ) { <nl> + actors . add ( outgoing ( self ) ) ; <nl> + } <nl> + } <nl> + <nl> + loop { <nl> + wait ( delay ( 1 . 0 , TaskPriority : : Max ) ) ; <nl> + printf ( " % s \ n " , self - > statsString ( ) . c_str ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + Future < Void > run ( ) { <nl> + return run_impl ( this ) ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + int getEnvInt ( const char * name , int defaultValue = 0 ) { <nl> + const char * val = getenv ( name ) ; <nl> + return val ! = nullptr ? atol ( val ) : defaultValue ; <nl> + } <nl> + <nl> + std : : string getEnvStr ( const char * name , std : : string defaultValue = " " ) { <nl> + const char * val = getenv ( name ) ; <nl> + return val ! = nullptr ? val : defaultValue ; <nl> + } <nl> + <nl> + / / TODO : Remove this hacky thing and make a " networkp2ptest " role in fdbserver <nl> + TEST_CASE ( " ! p2ptest " ) { <nl> + state P2PNetworkTest p2p ( <nl> + getEnvStr ( " listenerAddresses " , " " ) , <nl> + getEnvStr ( " remoteAddresses " , " " ) , <nl> + getEnvInt ( " connectionsOut " , 0 ) , <nl> + getEnvStr ( " requestBytes " , " 0 " ) , <nl> + getEnvStr ( " replyBytes " , " 0 " ) , <nl> + getEnvStr ( " requests " , " 0 " ) , <nl> + getEnvStr ( " idleMilliseconds " , " 0 " ) , <nl> + getEnvStr ( " waitReadMilliseconds " , " 0 " ) , <nl> + getEnvStr ( " waitWriteMilliseconds " , " 0 " ) <nl> + ) ; <nl> + <nl> + wait ( p2p . run ( ) ) ; <nl> + return Void ( ) ; <nl> + } <nl> mmm a / fdbserver / storageserver . actor . cpp <nl> ppp b / fdbserver / storageserver . actor . cpp <nl> struct StorageServerDisk { <nl> struct StorageServer * data ; <nl> IKeyValueStore * storage ; <nl> <nl> - void writeMutations ( MutationListRef mutations , Version debugVersion , const char * debugContext ) ; <nl> + void writeMutations ( const VectorRef < MutationRef > & mutations , Version debugVersion , const char * debugContext ) ; <nl> <nl> ACTOR static Future < Key > readFirstKey ( IKeyValueStore * storage , KeyRangeRef range ) { <nl> Standalone < RangeResultRef > r = wait ( storage - > readRange ( range , 1 ) ) ; <nl> struct UpdateEagerReadInfo { <nl> } <nl> } ; <nl> <nl> - const int VERSION_OVERHEAD = 64 + sizeof ( Version ) + sizeof ( Standalone < VersionUpdateRef > ) + / / mutationLog , 64b overhead for map <nl> - 2 * ( 64 + sizeof ( Version ) + sizeof ( Reference < VersionedMap < KeyRef , ValueOrClearToRef > : : PTreeT > ) ) ; / / versioned map [ x2 for createNewVersion ( version + 1 ) ] , 64b overhead for map <nl> + const int VERSION_OVERHEAD = <nl> + 64 + sizeof ( Version ) + sizeof ( Standalone < VerUpdateRef > ) + / / mutationLog , 64b overhead for map <nl> + 2 * ( 64 + sizeof ( Version ) + <nl> + sizeof ( Reference < VersionedMap < KeyRef , ValueOrClearToRef > : : PTreeT > ) ) ; / / versioned map [ x2 for <nl> + / / createNewVersion ( version + 1 ) ] , 64b <nl> + / / overhead for map <nl> static int mvccStorageBytes ( MutationRef const & m ) { return VersionedMap < KeyRef , ValueOrClearToRef > : : overheadPerItem * 2 + ( MutationRef : : OVERHEAD_BYTES + m . param1 . size ( ) + m . param2 . size ( ) ) * 2 ; } <nl> <nl> struct FetchInjectionInfo { <nl> struct StorageServer { <nl> / / at older versions may contain older items which are also in storage ( this is OK because of idempotency ) <nl> <nl> VersionedData versionedData ; <nl> - std : : map < Version , Standalone < VersionUpdateRef > > mutationLog ; / / versions ( durableVersion , version ] <nl> + std : : map < Version , Standalone < VerUpdateRef > > mutationLog ; / / versions ( durableVersion , version ] <nl> <nl> public : <nl> Tag tag ; <nl> struct StorageServer { <nl> double cpuUsage ; <nl> double diskUsage ; <nl> <nl> - std : : map < Version , Standalone < VersionUpdateRef > > const & getMutationLog ( ) const { return mutationLog ; } <nl> - std : : map < Version , Standalone < VersionUpdateRef > > & getMutableMutationLog ( ) { return mutationLog ; } <nl> + std : : map < Version , Standalone < VerUpdateRef > > const & getMutationLog ( ) const { return mutationLog ; } <nl> + std : : map < Version , Standalone < VerUpdateRef > > & getMutableMutationLog ( ) { return mutationLog ; } <nl> VersionedData const & data ( ) const { return versionedData ; } <nl> VersionedData & mutableData ( ) { return versionedData ; } <nl> <nl> struct StorageServer { <nl> } <nl> } <nl> <nl> - Standalone < VersionUpdateRef > & addVersionToMutationLog ( Version v ) { <nl> + Standalone < VerUpdateRef > & addVersionToMutationLog ( Version v ) { <nl> / / return existing version . . . <nl> auto m = mutationLog . find ( v ) ; <nl> if ( m ! = mutationLog . end ( ) ) <nl> struct StorageServer { <nl> return u ; <nl> } <nl> <nl> - MutationRef addMutationToMutationLog ( Standalone < VersionUpdateRef > & mLV , MutationRef const & m ) { <nl> + MutationRef addMutationToMutationLog ( Standalone < VerUpdateRef > & mLV , MutationRef const & m ) { <nl> byteSampleApplyMutation ( m , mLV . version ) ; <nl> counters . bytesInput + = mvccStorageBytes ( m ) ; <nl> - return mLV . mutations . push_back_deep ( mLV . arena ( ) , m ) ; <nl> + return mLV . push_back_deep ( mLV . arena ( ) , m ) ; <nl> } <nl> <nl> StorageServerDisk storage ; <nl> ACTOR Future < GetKeyValuesReply > readRange ( StorageServer * data , Version version , <nl> while ( vCurrent & & vCurrent . key ( ) < range . end & & ! vCurrent - > isClearTo ( ) & & vCount < limit & & <nl> vSize < * pLimitBytes ) { <nl> / / Store the versionedData results in resultCache <nl> - resultCache . push_back ( result . arena , KeyValueRef ( vCurrent . key ( ) , vCurrent - > getValue ( ) ) ) ; <nl> + resultCache . emplace_back ( result . arena , vCurrent . key ( ) , vCurrent - > getValue ( ) ) ; <nl> vSize + = sizeof ( KeyValueRef ) + resultCache . cback ( ) . expectedSize ( ) ; <nl> + + vCount ; <nl> + + vCurrent ; <nl> ACTOR Future < GetKeyValuesReply > readRange ( StorageServer * data , Version version , <nl> while ( vCurrent & & vCurrent . key ( ) > = range . begin & & ! vCurrent - > isClearTo ( ) & & vCount < - limit & & <nl> vSize < * pLimitBytes ) { <nl> / / Store the versionedData results in resultCache <nl> - resultCache . push_back ( result . arena , KeyValueRef ( vCurrent . key ( ) , vCurrent - > getValue ( ) ) ) ; <nl> + resultCache . emplace_back ( result . arena , vCurrent . key ( ) , vCurrent - > getValue ( ) ) ; <nl> vSize + = sizeof ( KeyValueRef ) + resultCache . cback ( ) . expectedSize ( ) ; <nl> + + vCount ; <nl> - - vCurrent ; <nl> bool changeDurableVersion ( StorageServer * data , Version desiredDurableVersion ) <nl> verData . createNewVersion ( data - > version . get ( ) + 1 ) ; <nl> <nl> int64_t bytesDurable = VERSION_OVERHEAD ; <nl> - for ( auto m = v . mutations . begin ( ) ; m ; + + m ) { <nl> - bytesDurable + = mvccStorageBytes ( * m ) ; <nl> - auto i = verData . atLatest ( ) . find ( m - > param1 ) ; <nl> + for ( const auto & m : v . mutations ) { <nl> + bytesDurable + = mvccStorageBytes ( m ) ; <nl> + auto i = verData . atLatest ( ) . find ( m . param1 ) ; <nl> if ( i ) { <nl> - ASSERT ( i . key ( ) = = m - > param1 ) ; <nl> + ASSERT ( i . key ( ) = = m . param1 ) ; <nl> ASSERT ( i . insertVersion ( ) > = nextDurableVersion ) ; <nl> if ( i . insertVersion ( ) = = nextDurableVersion ) <nl> verData . erase ( i ) ; <nl> } <nl> - if ( m - > type = = MutationRef : : SetValue ) { <nl> + if ( m . type = = MutationRef : : SetValue ) { <nl> / / A set can split a clear , so there might be another entry immediately after this one that should also be cleaned up <nl> - i = verData . atLatest ( ) . upper_bound ( m - > param1 ) ; <nl> + i = verData . atLatest ( ) . upper_bound ( m . param1 ) ; <nl> if ( i ) { <nl> ASSERT ( i . insertVersion ( ) > = nextDurableVersion ) ; <nl> if ( i . insertVersion ( ) = = nextDurableVersion ) <nl> void applyMutation ( StorageServer * self , MutationRef const & m , Arena & arena , Sto <nl> <nl> } <nl> <nl> - void removeDataRange ( StorageServer * ss , Standalone < VersionUpdateRef > & mLV , KeyRangeMap < Reference < ShardInfo > > & shards , KeyRangeRef range ) { <nl> + void removeDataRange ( StorageServer * ss , Standalone < VerUpdateRef > & mLV , KeyRangeMap < Reference < ShardInfo > > & shards , <nl> + KeyRangeRef range ) { <nl> / / modify the latest version of data to remove all sets and trim all clears to exclude range . <nl> / / Add a clear to mLV ( mutationLog [ data . getLatestVersion ( ) ] ) that ensures all keys in range are removed from the disk when this latest version becomes durable <nl> / / mLV is also modified if necessary to ensure that split clears can be forgotten <nl> void StorageServer : : addMutation ( Version version , MutationRef const & mutation , Ke <nl> } <nl> <nl> struct OrderByVersion { <nl> - bool operator ( ) ( const VersionUpdateRef & a , const VersionUpdateRef & b ) { <nl> + bool operator ( ) ( const VerUpdateRef & a , const VerUpdateRef & b ) { <nl> if ( a . version ! = b . version ) return a . version < b . version ; <nl> if ( a . isPrivateData ! = b . isPrivateData ) return a . isPrivateData ; <nl> return false ; <nl> void StorageServerDisk : : writeMutation ( MutationRef mutation ) { <nl> ASSERT ( false ) ; <nl> } <nl> <nl> - void StorageServerDisk : : writeMutations ( MutationListRef mutations , Version debugVersion , const char * debugContext ) { <nl> - for ( auto m = mutations . begin ( ) ; m ; + + m ) { <nl> - DEBUG_MUTATION ( debugContext , debugVersion , * m ) . detail ( " UID " , data - > thisServerID ) ; <nl> - if ( m - > type = = MutationRef : : SetValue ) { <nl> - storage - > set ( KeyValueRef ( m - > param1 , m - > param2 ) ) ; <nl> - } else if ( m - > type = = MutationRef : : ClearRange ) { <nl> - storage - > clear ( KeyRangeRef ( m - > param1 , m - > param2 ) ) ; <nl> + void StorageServerDisk : : writeMutations ( const VectorRef < MutationRef > & mutations , Version debugVersion , <nl> + const char * debugContext ) { <nl> + for ( const auto & m : mutations ) { <nl> + DEBUG_MUTATION ( debugContext , debugVersion , m ) . detail ( " UID " , data - > thisServerID ) ; <nl> + if ( m . type = = MutationRef : : SetValue ) { <nl> + storage - > set ( KeyValueRef ( m . param1 , m . param2 ) ) ; <nl> + } else if ( m . type = = MutationRef : : ClearRange ) { <nl> + storage - > clear ( KeyRangeRef ( m . param1 , m . param2 ) ) ; <nl> } <nl> } <nl> } <nl> bool StorageServerDisk : : makeVersionMutationsDurable ( Version & prevStorageVersion <nl> / / Apply mutations from the mutationLog <nl> auto u = data - > getMutationLog ( ) . upper_bound ( prevStorageVersion ) ; <nl> if ( u ! = data - > getMutationLog ( ) . end ( ) & & u - > first < = newStorageVersion ) { <nl> - VersionUpdateRef const & v = u - > second ; <nl> + VerUpdateRef const & v = u - > second ; <nl> ASSERT ( v . version > prevStorageVersion & & v . version < = newStorageVersion ) ; <nl> / / TODO ( alexmiller ) : Update to version tracking . <nl> DEBUG_KEY_RANGE ( " makeVersionMutationsDurable " , v . version , KeyRangeRef ( ) ) ; <nl> writeMutations ( v . mutations , v . version , " makeVersionDurable " ) ; <nl> - for ( auto m = v . mutations . begin ( ) ; m ; + + m ) <nl> - bytesLeft - = mvccStorageBytes ( * m ) ; <nl> + for ( const auto & m : v . mutations ) bytesLeft - = mvccStorageBytes ( m ) ; <nl> prevStorageVersion = v . version ; <nl> return false ; <nl> } else { <nl> mmm a / fdbserver / worker . actor . cpp <nl> ppp b / fdbserver / worker . actor . cpp <nl> ACTOR Future < Void > workerServer ( <nl> DUMPTOKEN ( recruited . notifyBackupWorkerDone ) ; <nl> <nl> / / printf ( " Recruited as masterServer \ n " ) ; <nl> - Future < Void > masterProcess = masterServer ( recruited , dbInfo , ServerCoordinators ( connFile ) , req . lifetime , req . forceRecovery ) ; <nl> + Future < Void > masterProcess = masterServer ( recruited , dbInfo , ccInterface , ServerCoordinators ( connFile ) , req . lifetime , req . forceRecovery ) ; <nl> errorForwarders . add ( zombie ( recruited , forwardError ( errors , Role : : MASTER , recruited . id ( ) , masterProcess ) ) ) ; <nl> req . reply . send ( recruited ) ; <nl> } <nl> ACTOR Future < Void > workerServer ( <nl> DUMPTOKEN ( recruited . getKeyServersLocations ) ; <nl> DUMPTOKEN ( recruited . getStorageServerRejoinInfo ) ; <nl> DUMPTOKEN ( recruited . waitFailure ) ; <nl> - DUMPTOKEN ( recruited . getRawCommittedVersion ) ; <nl> DUMPTOKEN ( recruited . txnState ) ; <nl> <nl> / / printf ( " Recruited as masterProxyServer \ n " ) ; <nl> mmm a / fdbserver / workloads / ApiCorrectness . actor . cpp <nl> ppp b / fdbserver / workloads / ApiCorrectness . actor . cpp <nl> <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> / / An enum of API operation types used in the random test <nl> - enum OperationType { <nl> - SET , <nl> - GET , <nl> - GET_RANGE , <nl> - GET_RANGE_SELECTOR , <nl> - GET_KEY , <nl> - CLEAR , <nl> - CLEAR_RANGE <nl> - } ; <nl> + enum OperationType { SET , GET , GET_RANGE , GET_RANGE_SELECTOR , GET_KEY , CLEAR , CLEAR_RANGE , UNINITIALIZED } ; <nl> <nl> / / A workload that executes the NativeAPIs functions and verifies that their outcomes are correct <nl> struct ApiCorrectnessWorkload : ApiWorkload { <nl> struct ApiCorrectnessWorkload : ApiWorkload { <nl> int pdfArray [ ] = { 0 , ( int ) ( 100 * setProbability ) , 100 , 50 , 50 , 20 , ( int ) ( 100 * ( 1 - setProbability ) ) , ( int ) ( 10 * ( 1 - setProbability ) ) } ; <nl> vector < int > pdf = vector < int > ( pdfArray , pdfArray + 8 ) ; <nl> <nl> - OperationType operation ; <nl> + OperationType operation = UNINITIALIZED ; <nl> <nl> / / Choose a random operation type ( SET , GET , GET_RANGE , GET_RANGE_SELECTOR , GET_KEY , CLEAR , CLEAR_RANGE ) . <nl> int totalDensity = 0 ; <nl> struct ApiCorrectnessWorkload : ApiWorkload { <nl> <nl> cumulativeDensity + = pdf [ i ] ; <nl> } <nl> + ASSERT ( operation ! = UNINITIALIZED ) ; <nl> <nl> + + self - > numRandomOperations ; <nl> <nl> mmm a / fdbserver / workloads / AsyncFileCorrectness . actor . cpp <nl> ppp b / fdbserver / workloads / AsyncFileCorrectness . actor . cpp <nl> struct AsyncFileCorrectnessWorkload : public AsyncFileWorkload <nl> { <nl> int64_t maxOffset ; <nl> <nl> - / / Reads should not exceed the extent of written data <nl> - if ( info . operation = = READ ) <nl> - { <nl> + / / Reads should not exceed the extent of written data <nl> + if ( info . operation = = READ ) { <nl> maxOffset = fileSize - 1 ; <nl> - if ( maxOffset < 0 ) <nl> - info . operation = WRITE ; <nl> + if ( maxOffset < 0 ) info . operation = WRITE ; <nl> + / / Only allow reads once the file has gotten large enough ( to prevent blocking on locks ) <nl> + if ( maxOffset < targetFileSize / 2 ) info . operation = WRITE ; <nl> } <nl> <nl> - / / Only allow reads once the file has gotten large enough ( to prevent blocking on locks ) <nl> - if ( maxOffset < targetFileSize / 2 ) <nl> - info . operation = WRITE ; <nl> - <nl> / / Writes can be up to the target file size or the current file size ( the current file size could be larger than the target as a result of a truncate ) <nl> if ( info . operation = = WRITE ) <nl> maxOffset = std : : max ( fileSize , targetFileSize ) - 1 ; <nl> mmm a / fdbserver / workloads / BackupToDBCorrectness . actor . cpp <nl> ppp b / fdbserver / workloads / BackupToDBCorrectness . actor . cpp <nl> struct BackupToDBCorrectnessWorkload : TestWorkload { <nl> state Transaction tr3 ( cx ) ; <nl> loop { <nl> try { <nl> + / / Run on the first proxy to ensure data is cleared <nl> + / / when submitting the backup request below . <nl> + tr3 . setOption ( FDBTransactionOptions : : COMMIT_ON_FIRST_PROXY ) ; <nl> for ( auto r : self - > backupRanges ) { <nl> if ( ! r . empty ( ) ) { <nl> tr3 . addReadConflictRange ( r ) ; <nl> mmm a / fdbserver / workloads / Cycle . actor . cpp <nl> ppp b / fdbserver / workloads / Cycle . actor . cpp <nl> struct CycleWorkload : TestWorkload { <nl> } <nl> ACTOR Future < bool > cycleCheck ( Database cx , CycleWorkload * self , bool ok ) { <nl> if ( self - > transactions . getMetric ( ) . value ( ) < self - > testDuration * self - > minExpectedTransactionsPerSecond ) { <nl> - TraceEvent ( SevWarnAlways , " TestFailure " ) . detail ( " Reason " , " Rate below desired rate " ) . detail ( " Details " , format ( " % . 2f " , self - > transactions . getMetric ( ) . value ( ) / ( self - > transactionsPerSecond * self - > testDuration ) ) ) <nl> + TraceEvent ( SevWarnAlways , " TestFailure " ) <nl> + . detail ( " Reason " , " Rate below desired rate " ) <nl> + . detail ( " File " , __FILE__ ) <nl> + . detail ( " Details " , format ( " % . 2f " , self - > transactions . getMetric ( ) . value ( ) / ( self - > transactionsPerSecond * self - > testDuration ) ) ) <nl> . detail ( " TransactionsAchieved " , self - > transactions . getMetric ( ) . value ( ) ) <nl> . detail ( " MinTransactionsExpected " , self - > testDuration * self - > minExpectedTransactionsPerSecond ) <nl> . detail ( " TransactionGoal " , self - > transactionsPerSecond * self - > testDuration ) ; <nl> new file mode 100644 <nl> index 0000000000 . . 4ec5704f6f <nl> mmm / dev / null <nl> ppp b / fdbserver / workloads / Downgrade . actor . cpp <nl> <nl> + / * <nl> + * Downgrade . actor . cpp <nl> + * <nl> + * This source file is part of the FoundationDB open source project <nl> + * <nl> + * Copyright 2013 - 2020 Apple Inc . and the FoundationDB project authors <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # include " fdbclient / NativeAPI . actor . h " <nl> + # include " fdbserver / TesterInterface . actor . h " <nl> + # include " fdbserver / workloads / workloads . actor . h " <nl> + # include " flow / serialize . h " <nl> + # include " flow / actorcompiler . h " / / This must be the last # include . <nl> + <nl> + struct DowngradeWorkload : TestWorkload { <nl> + <nl> + static constexpr const char * NAME = " Downgrade " ; <nl> + Key oldKey , newKey ; <nl> + int numObjects ; <nl> + <nl> + DowngradeWorkload ( WorkloadContext const & wcx ) : TestWorkload ( wcx ) { <nl> + oldKey = getOption ( options , LiteralStringRef ( " oldKey " ) , LiteralStringRef ( " oldKey " ) ) ; <nl> + newKey = getOption ( options , LiteralStringRef ( " newKey " ) , LiteralStringRef ( " newKey " ) ) ; <nl> + numObjects = getOption ( options , LiteralStringRef ( " numOptions " ) , deterministicRandom ( ) - > randomInt ( 0 , 100 ) ) ; <nl> + } <nl> + <nl> + struct _Struct { <nl> + static constexpr FileIdentifier file_identifier = 2340487 ; <nl> + int oldField = 0 ; <nl> + } ; <nl> + <nl> + struct OldStruct : public _Struct { <nl> + void setFields ( ) { oldField = 1 ; } <nl> + bool isSet ( ) const { return oldField = = 1 ; } <nl> + <nl> + template < class Archive > <nl> + void serialize ( Archive & ar ) { <nl> + serializer ( ar , oldField ) ; <nl> + } <nl> + } ; <nl> + <nl> + struct NewStruct : public _Struct { <nl> + int newField = 0 ; <nl> + <nl> + bool isSet ( ) const { <nl> + return oldField = = 1 & & newField = = 2 ; <nl> + } <nl> + void setFields ( ) { <nl> + oldField = 1 ; <nl> + newField = 2 ; <nl> + } <nl> + <nl> + template < class Archive > <nl> + void serialize ( Archive & ar ) { <nl> + serializer ( ar , oldField , newField ) ; <nl> + } <nl> + } ; <nl> + <nl> + ACTOR static Future < Void > writeOld ( Database cx , int numObjects , Key key ) { <nl> + BinaryWriter writer ( IncludeVersion ( currentProtocolVersion ) ) ; <nl> + std : : vector < OldStruct > data ( numObjects ) ; <nl> + for ( auto & oldObject : data ) { <nl> + oldObject . setFields ( ) ; <nl> + } <nl> + writer < < data ; <nl> + state Value value = writer . toValue ( ) ; <nl> + <nl> + state Transaction tr ( cx ) ; <nl> + loop { <nl> + try { <nl> + tr . set ( key , value ) ; <nl> + wait ( tr . commit ( ) ) ; <nl> + return Void ( ) ; <nl> + } catch ( Error & e ) { <nl> + wait ( tr . onError ( e ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + ACTOR static Future < Void > writeNew ( Database cx , int numObjects , Key key ) { <nl> + ProtocolVersion protocolVersion = currentProtocolVersion ; <nl> + protocolVersion . addObjectSerializerFlag ( ) ; <nl> + ObjectWriter writer ( IncludeVersion ( protocolVersion ) ) ; <nl> + std : : vector < NewStruct > data ( numObjects ) ; <nl> + for ( auto & newObject : data ) { <nl> + newObject . setFields ( ) ; <nl> + } <nl> + writer . serialize ( data ) ; <nl> + state Value value = writer . toStringRef ( ) ; <nl> + <nl> + state Transaction tr ( cx ) ; <nl> + loop { <nl> + try { <nl> + tr . set ( key , value ) ; <nl> + wait ( tr . commit ( ) ) ; <nl> + return Void ( ) ; <nl> + } catch ( Error & e ) { <nl> + wait ( tr . onError ( e ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + ACTOR static Future < Void > readData ( Database cx , int numObjects , Key key ) { <nl> + state Transaction tr ( cx ) ; <nl> + state Value value ; <nl> + <nl> + loop { <nl> + try { <nl> + Optional < Value > _value = wait ( tr . get ( key ) ) ; <nl> + ASSERT ( _value . present ( ) ) ; <nl> + value = _value . get ( ) ; <nl> + break ; <nl> + } catch ( Error & e ) { <nl> + wait ( tr . onError ( e ) ) ; <nl> + } <nl> + } <nl> + <nl> + { <nl> + / / use BinaryReader <nl> + BinaryReader reader ( value , IncludeVersion ( ) ) ; <nl> + std : : vector < OldStruct > data ; <nl> + reader > > data ; <nl> + ASSERT ( data . size ( ) = = numObjects ) ; <nl> + for ( const auto & oldObject : data ) { <nl> + ASSERT ( oldObject . isSet ( ) ) ; <nl> + } <nl> + } <nl> + { <nl> + / / use ArenaReader <nl> + ArenaReader reader ( Arena ( ) , value , IncludeVersion ( ) ) ; <nl> + std : : vector < OldStruct > data ; <nl> + reader > > data ; <nl> + ASSERT ( data . size ( ) = = numObjects ) ; <nl> + for ( const auto & oldObject : data ) { <nl> + ASSERT ( oldObject . isSet ( ) ) ; <nl> + } <nl> + } <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + std : : string description ( ) override { return NAME ; } <nl> + <nl> + Future < Void > setup ( Database const & cx ) override { <nl> + return clientId ? Void ( ) : ( writeOld ( cx , numObjects , oldKey ) & & writeNew ( cx , numObjects , newKey ) ) ; <nl> + } <nl> + <nl> + Future < Void > start ( Database const & cx ) override { <nl> + return clientId ? Void ( ) : ( readData ( cx , numObjects , oldKey ) & & readData ( cx , numObjects , newKey ) ) ; <nl> + } <nl> + <nl> + Future < bool > check ( Database const & cx ) override { <nl> + / / Failures are checked with assertions <nl> + return true ; <nl> + } <nl> + void getMetrics ( vector < PerfMetric > & m ) override { } <nl> + } ; <nl> + <nl> + WorkloadFactory < DowngradeWorkload > DowngradeWorkloadFactory ( DowngradeWorkload : : NAME ) ; <nl> mmm a / fdbserver / workloads / FuzzApiCorrectness . actor . cpp <nl> ppp b / fdbserver / workloads / FuzzApiCorrectness . actor . cpp <nl> struct FuzzApiCorrectnessWorkload : TestWorkload { <nl> unsigned int operationId ; <nl> int64_t maximumTotalData ; <nl> bool specialKeysRelaxed ; <nl> + bool specialKeysWritesEnabled ; <nl> <nl> bool success ; <nl> <nl> struct FuzzApiCorrectnessWorkload : TestWorkload { <nl> useSystemKeys = deterministicRandom ( ) - > coinflip ( ) ; <nl> initialKeyDensity = deterministicRandom ( ) - > random01 ( ) ; / / This fraction of keys are present before the first transaction ( and after an unknown result ) <nl> specialKeysRelaxed = deterministicRandom ( ) - > coinflip ( ) ; <nl> + / / Only enable special keys writes when allowed to access system keys <nl> + specialKeysWritesEnabled = useSystemKeys & & deterministicRandom ( ) - > coinflip ( ) ; <nl> <nl> / / See https : / / github . com / apple / foundationdb / issues / 2424 <nl> if ( BUGGIFY ) { <nl> struct FuzzApiCorrectnessWorkload : TestWorkload { <nl> . detail ( " ValueSizeRange " , valueSizeRange . second ) <nl> . detail ( " MaxClearSize " , maxClearSize ) <nl> . detail ( " UseSystemKeys " , useSystemKeys ) <nl> - . detail ( " SpecialKeysRelaxed " , specialKeysRelaxed ) ; <nl> + . detail ( " SpecialKeysRelaxed " , specialKeysRelaxed ) <nl> + . detail ( " SpecialKeysWritesEnabled " , specialKeysWritesEnabled ) ; <nl> <nl> TraceEvent ( " RemapEventSeverity " ) . detail ( " TargetEvent " , " LargePacketSent " ) . detail ( " OriginalSeverity " , SevWarnAlways ) . detail ( " NewSeverity " , SevInfo ) ; <nl> TraceEvent ( " RemapEventSeverity " ) . detail ( " TargetEvent " , " LargePacketReceived " ) . detail ( " OriginalSeverity " , SevWarnAlways ) . detail ( " NewSeverity " , SevInfo ) ; <nl> struct FuzzApiCorrectnessWorkload : TestWorkload { <nl> if ( self - > useSystemKeys ) <nl> tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; <nl> if ( self - > specialKeysRelaxed ) tr - > setOption ( FDBTransactionOptions : : SPECIAL_KEY_SPACE_RELAXED ) ; <nl> + if ( self - > specialKeysWritesEnabled ) <nl> + tr - > setOption ( FDBTransactionOptions : : SPECIAL_KEY_SPACE_ENABLE_WRITES ) ; <nl> <nl> int end = std : : min ( self - > nodes , i + keysPerBatch ) ; <nl> tr - > clear ( KeyRangeRef ( self - > getKeyForIndex ( i ) , self - > getKeyForIndex ( end ) ) ) ; <nl> struct FuzzApiCorrectnessWorkload : TestWorkload { <nl> if ( self - > specialKeysRelaxed ) { <nl> tr - > setOption ( FDBTransactionOptions : : SPECIAL_KEY_SPACE_RELAXED ) ; <nl> } <nl> + if ( self - > specialKeysWritesEnabled ) { <nl> + tr - > setOption ( FDBTransactionOptions : : SPECIAL_KEY_SPACE_ENABLE_WRITES ) ; <nl> + } <nl> tr - > addWriteConflictRange ( self - > conflictRange ) ; <nl> <nl> try { <nl> struct FuzzApiCorrectnessWorkload : TestWorkload { <nl> key = makeKey ( ) ; <nl> } <nl> value = makeValue ( ) ; <nl> - contract = { <nl> - std : : make_pair ( error_code_key_too_large , ExceptionContract : : requiredIf ( key . size ( ) > ( key . startsWith ( systemKeys . begin ) ? CLIENT_KNOBS - > SYSTEM_KEY_SIZE_LIMIT : CLIENT_KNOBS - > KEY_SIZE_LIMIT ) ) ) , <nl> - std : : make_pair ( error_code_value_too_large , ExceptionContract : : requiredIf ( value . size ( ) > CLIENT_KNOBS - > VALUE_SIZE_LIMIT ) ) , <nl> - std : : make_pair ( error_code_key_outside_legal_range , ExceptionContract : : requiredIf ( <nl> - ( key > = ( workload - > useSystemKeys ? systemKeys . end : normalKeys . end ) ) ) ) <nl> - } ; <nl> + contract = { std : : make_pair ( <nl> + error_code_key_too_large , <nl> + ExceptionContract : : requiredIf ( key . size ( ) > ( key . startsWith ( systemKeys . begin ) <nl> + ? CLIENT_KNOBS - > SYSTEM_KEY_SIZE_LIMIT <nl> + : CLIENT_KNOBS - > KEY_SIZE_LIMIT ) ) ) , <nl> + std : : make_pair ( error_code_value_too_large , <nl> + ExceptionContract : : requiredIf ( value . size ( ) > CLIENT_KNOBS - > VALUE_SIZE_LIMIT ) ) , <nl> + std : : make_pair ( error_code_key_outside_legal_range , <nl> + ExceptionContract : : requiredIf ( <nl> + ( key > = ( workload - > useSystemKeys ? systemKeys . end : normalKeys . end ) ) & & <nl> + ! specialKeys . contains ( key ) ) ) , <nl> + std : : make_pair ( error_code_special_keys_write_disabled , <nl> + ExceptionContract : : requiredIf ( specialKeys . contains ( key ) & & <nl> + ! workload - > specialKeysWritesEnabled ) ) , <nl> + std : : make_pair ( error_code_special_keys_no_write_module_found , <nl> + ExceptionContract : : possibleIf ( specialKeys . contains ( key ) & & <nl> + workload - > specialKeysWritesEnabled ) ) } ; <nl> } <nl> <nl> void callback ( Reference < ITransaction > tr ) { <nl> struct FuzzApiCorrectnessWorkload : TestWorkload { <nl> key1 = makeKey ( ) ; <nl> key2 = makeKey ( ) ; <nl> } <nl> + <nl> + bool isSpecialKeyRange = specialKeys . contains ( key1 ) & & key2 < = specialKeys . end ; <nl> + <nl> contract = { <nl> - std : : make_pair ( error_code_inverted_range , ExceptionContract : : requiredIf ( key1 > key2 ) ) , <nl> - std : : make_pair ( error_code_key_outside_legal_range , ExceptionContract : : requiredIf ( <nl> - ( key1 > ( workload - > useSystemKeys ? systemKeys . end : normalKeys . end ) ) | | <nl> - ( key2 > ( workload - > useSystemKeys ? systemKeys . end : normalKeys . end ) ) ) ) <nl> + std : : make_pair ( error_code_inverted_range , ExceptionContract : : requiredIf ( key1 > key2 ) ) , <nl> + std : : make_pair ( error_code_key_outside_legal_range , <nl> + ExceptionContract : : requiredIf ( <nl> + ( ( key1 > ( workload - > useSystemKeys ? systemKeys . end : normalKeys . end ) ) | | <nl> + ( key2 > ( workload - > useSystemKeys ? systemKeys . end : normalKeys . end ) ) ) & & <nl> + ! isSpecialKeyRange ) ) , <nl> + std : : make_pair ( error_code_special_keys_write_disabled , <nl> + ExceptionContract : : requiredIf ( isSpecialKeyRange & & ! workload - > specialKeysWritesEnabled ) ) , <nl> + std : : make_pair ( error_code_special_keys_cross_module_clear , <nl> + ExceptionContract : : possibleIf ( isSpecialKeyRange & & workload - > specialKeysWritesEnabled ) ) , <nl> + std : : make_pair ( error_code_special_keys_no_write_module_found , <nl> + ExceptionContract : : possibleIf ( isSpecialKeyRange & & workload - > specialKeysWritesEnabled ) ) <nl> } ; <nl> } <nl> <nl> struct FuzzApiCorrectnessWorkload : TestWorkload { <nl> key1 = makeKey ( ) ; <nl> key2 = makeKey ( ) ; <nl> } <nl> + <nl> + bool isSpecialKeyRange = specialKeys . contains ( key1 ) & & key2 < = specialKeys . end ; <nl> + <nl> contract = { <nl> - std : : make_pair ( error_code_inverted_range , ExceptionContract : : requiredIf ( key1 > key2 ) ) , <nl> - std : : make_pair ( error_code_key_outside_legal_range , ExceptionContract : : requiredIf ( <nl> - ( key1 > ( workload - > useSystemKeys ? systemKeys . end : normalKeys . end ) ) | | <nl> - ( key2 > ( workload - > useSystemKeys ? systemKeys . end : normalKeys . end ) ) ) ) <nl> + std : : make_pair ( error_code_inverted_range , ExceptionContract : : requiredIf ( key1 > key2 ) ) , <nl> + std : : make_pair ( error_code_key_outside_legal_range , <nl> + ExceptionContract : : requiredIf ( <nl> + ( ( key1 > ( workload - > useSystemKeys ? systemKeys . end : normalKeys . end ) ) | | <nl> + ( key2 > ( workload - > useSystemKeys ? systemKeys . end : normalKeys . end ) ) ) & & <nl> + ! isSpecialKeyRange ) ) , <nl> + std : : make_pair ( error_code_special_keys_write_disabled , <nl> + ExceptionContract : : requiredIf ( isSpecialKeyRange & & ! workload - > specialKeysWritesEnabled ) ) , <nl> + std : : make_pair ( error_code_special_keys_cross_module_clear , <nl> + ExceptionContract : : possibleIf ( isSpecialKeyRange & & workload - > specialKeysWritesEnabled ) ) , <nl> + std : : make_pair ( error_code_special_keys_no_write_module_found , <nl> + ExceptionContract : : possibleIf ( isSpecialKeyRange & & workload - > specialKeysWritesEnabled ) ) <nl> } ; <nl> } <nl> <nl> struct FuzzApiCorrectnessWorkload : TestWorkload { <nl> while ( isProtectedKey ( key ) ) { <nl> key = makeKey ( ) ; <nl> } <nl> - contract = { <nl> - std : : make_pair ( error_code_key_outside_legal_range , ExceptionContract : : requiredIf ( <nl> - key > = ( workload - > useSystemKeys ? systemKeys . end : normalKeys . end ) ) ) <nl> - } ; <nl> + contract = { std : : make_pair ( error_code_key_outside_legal_range , <nl> + ExceptionContract : : requiredIf ( <nl> + key > = ( workload - > useSystemKeys ? systemKeys . end : normalKeys . end ) ) ) , <nl> + std : : make_pair ( error_code_special_keys_write_disabled , <nl> + ExceptionContract : : requiredIf ( specialKeys . contains ( key ) & & <nl> + ! workload - > specialKeysWritesEnabled ) ) , <nl> + std : : make_pair ( error_code_special_keys_no_write_module_found , <nl> + ExceptionContract : : possibleIf ( specialKeys . contains ( key ) & & <nl> + workload - > specialKeysWritesEnabled ) ) } ; <nl> } <nl> <nl> void callback ( Reference < ITransaction > tr ) { <nl> mmm a / fdbserver / workloads / Increment . actor . cpp <nl> ppp b / fdbserver / workloads / Increment . actor . cpp <nl> struct Increment : TestWorkload { <nl> } <nl> ACTOR Future < bool > incrementCheck ( Database cx , Increment * self , bool ok ) { <nl> if ( self - > transactions . getMetric ( ) . value ( ) < self - > testDuration * self - > minExpectedTransactionsPerSecond ) { <nl> - TraceEvent ( SevWarnAlways , " TestFailure " ) . detail ( " Reason " , " Rate below desired rate " ) . detail ( " Details " , format ( " % . 2f " , self - > transactions . getMetric ( ) . value ( ) / ( self - > transactionsPerSecond * self - > testDuration ) ) ) <nl> + TraceEvent ( SevWarnAlways , " TestFailure " ) <nl> + . detail ( " Reason " , " Rate below desired rate " ) <nl> + . detail ( " File " , __FILE__ ) <nl> + . detail ( " Details " , format ( " % . 2f " , self - > transactions . getMetric ( ) . value ( ) / ( self - > transactionsPerSecond * self - > testDuration ) ) ) <nl> . detail ( " TransactionsAchieved " , self - > transactions . getMetric ( ) . value ( ) ) <nl> . detail ( " MinTransactionsExpected " , self - > testDuration * self - > minExpectedTransactionsPerSecond ) <nl> . detail ( " TransactionGoal " , self - > transactionsPerSecond * self - > testDuration ) ; <nl> mmm a / fdbserver / workloads / RemoveServersSafely . actor . cpp <nl> ppp b / fdbserver / workloads / RemoveServersSafely . actor . cpp <nl> struct RemoveServersSafelyWorkload : TestWorkload { <nl> return processAddrs ; <nl> } <nl> <nl> + / / Get the list of processes whose ip : port or ip matches netAddrs . <nl> + / / Note : item in netAddrs may be ip ( representing a machine ) or ip : port ( representing a process ) <nl> virtual std : : vector < ISimulator : : ProcessInfo * > getProcesses ( std : : set < AddressExclusion > const & netAddrs ) <nl> { <nl> std : : vector < ISimulator : : ProcessInfo * > processes ; <nl> struct RemoveServersSafelyWorkload : TestWorkload { <nl> auto processNet = AddressExclusion ( processInfo - > address . ip , processInfo - > address . port ) ; <nl> if ( processAddrs . find ( processNet ) ! = processAddrs . end ( ) ) { <nl> processes . push_back ( processInfo ) ; <nl> - TraceEvent ( " RemoveAndKill " , functionId ) . detail ( " Step " , " getProcessItem " ) . detail ( " ProcessAddress " , processInfo - > address ) . detail ( " Process " , describe ( * processInfo ) ) . detail ( " Failed " , processInfo - > failed ) . detail ( " Excluded " , processInfo - > excluded ) . detail ( " Rebooting " , processInfo - > rebooting ) . detail ( " Protected " , g_simulator . protectedAddresses . count ( processInfo - > address ) ) ; <nl> + TraceEvent ( " RemoveAndKill " , functionId ) <nl> + . detail ( " Step " , " ProcessToKill " ) <nl> + . detail ( " ProcessAddress " , processInfo - > address ) <nl> + . detail ( " Process " , describe ( * processInfo ) ) <nl> + . detail ( " Failed " , processInfo - > failed ) <nl> + . detail ( " Excluded " , processInfo - > excluded ) <nl> + . detail ( " Rebooting " , processInfo - > rebooting ) <nl> + . detail ( " Protected " , g_simulator . protectedAddresses . count ( processInfo - > address ) ) ; <nl> } <nl> else { <nl> - TraceEvent ( " RemoveAndKill " , functionId ) . detail ( " Step " , " getProcessNoItem " ) . detail ( " ProcessAddress " , processInfo - > address ) . detail ( " Process " , describe ( * processInfo ) ) . detail ( " Failed " , processInfo - > failed ) . detail ( " Excluded " , processInfo - > excluded ) . detail ( " Rebooting " , processInfo - > rebooting ) . detail ( " Protected " , g_simulator . protectedAddresses . count ( processInfo - > address ) ) ; <nl> + TraceEvent ( " RemoveAndKill " , functionId ) <nl> + . detail ( " Step " , " ProcessNotToKill " ) <nl> + . detail ( " ProcessAddress " , processInfo - > address ) <nl> + . detail ( " Process " , describe ( * processInfo ) ) <nl> + . detail ( " Failed " , processInfo - > failed ) <nl> + . detail ( " Excluded " , processInfo - > excluded ) <nl> + . detail ( " Rebooting " , processInfo - > rebooting ) <nl> + . detail ( " Protected " , g_simulator . protectedAddresses . count ( processInfo - > address ) ) ; <nl> } <nl> } <nl> TraceEvent ( " RemoveAndKill " , functionId ) . detail ( " Step " , " getProcesses " ) <nl> struct RemoveServersSafelyWorkload : TestWorkload { <nl> / / Identify the largest set of processes which can be killed <nl> int randomIndex ; <nl> bool bCanKillProcess ; <nl> - ISimulator : : ProcessInfo * randomProcess ; <nl> + ISimulator : : ProcessInfo * randomProcess ; <nl> <nl> for ( int killsLeft = killProcArray . size ( ) ; killsLeft > 0 ; killsLeft - - ) <nl> { <nl> struct RemoveServersSafelyWorkload : TestWorkload { <nl> killableProcesses . push_back ( randomProcess ) ; <nl> killableAddrs . push_back ( AddressExclusion ( randomProcess - > address . ip , randomProcess - > address . port ) ) ; <nl> TraceEvent ( " RemoveAndKill " ) <nl> - . detail ( " Step " , " identifyVictim " ) <nl> + . detail ( " Step " , " IdentifyVictim " ) <nl> . detail ( " VictimCount " , killableAddrs . size ( ) ) <nl> . detail ( " Victim " , randomProcess - > toString ( ) ) <nl> . detail ( " Victims " , describe ( killableAddrs ) ) ; <nl> struct RemoveServersSafelyWorkload : TestWorkload { <nl> return killableProcesses ; <nl> } <nl> <nl> + / / toKill1 and toKill2 are two random subsets of all processes . If simply kill all processes in toKill1 or toKill2 , <nl> + / / we may kill too many processes to make the cluster unavailable and stuck . <nl> ACTOR static Future < Void > workloadMain ( RemoveServersSafelyWorkload * self , Database cx , double waitSeconds , <nl> std : : set < AddressExclusion > toKill1 , std : : set < AddressExclusion > toKill2 ) { <nl> wait ( delay ( waitSeconds ) ) ; <nl> struct RemoveServersSafelyWorkload : TestWorkload { <nl> state std : : vector < ISimulator : : ProcessInfo * > killProcArray ; <nl> state bool bClearedFirst ; <nl> <nl> - TraceEvent ( " RemoveAndKill " ) . detail ( " Step " , " exclude list first " ) . detail ( " ToKill " , describe ( toKill1 ) ) . detail ( " KillTotal " , toKill1 . size ( ) ) . detail ( " ClusterAvailable " , g_simulator . isAvailable ( ) ) ; <nl> + TraceEvent ( " RemoveAndKill " ) <nl> + . detail ( " Step " , " exclude list first " ) <nl> + . detail ( " ToKill " , describe ( toKill1 ) ) <nl> + . detail ( " KillTotal " , toKill1 . size ( ) ) <nl> + . detail ( " ClusterAvailable " , g_simulator . isAvailable ( ) ) ; <nl> + <nl> + / / toKill1 may kill too many servers to make cluster unavailable . <nl> + / / Get the processes in toKill1 that are safe to kill <nl> + killProcArray = self - > protectServers ( toKill1 ) ; <nl> + / / Update the kill networks to the killable processes <nl> + toKill1 = self - > getNetworks ( killProcArray ) ; <nl> + TraceEvent ( " RemoveAndKill " ) <nl> + . detail ( " Step " , " exclude list first " ) <nl> + . detail ( " ToKillModified " , describe ( toKill1 ) ) <nl> + . detail ( " KillTotalModified " , toKill1 . size ( ) ) <nl> + . detail ( " ClusterAvailable " , g_simulator . isAvailable ( ) ) ; <nl> + <nl> self - > excludeAddresses ( toKill1 ) ; <nl> <nl> Optional < Void > result = wait ( timeout ( removeAndKill ( self , cx , toKill1 , NULL , false ) , self - > kill1Timeout ) ) ; <nl> <nl> bClearedFirst = result . present ( ) ; <nl> - TraceEvent ( " RemoveAndKill " ) . detail ( " Step " , " excluded list first " ) . detail ( " Excluderesult " , bClearedFirst ? " succeeded " : " failed " ) . detail ( " KillTotal " , toKill1 . size ( ) ) . detail ( " Processes " , killProcArray . size ( ) ) . detail ( " ToKill1 " , describe ( toKill1 ) ) . detail ( " ClusterAvailable " , g_simulator . isAvailable ( ) ) ; <nl> + TraceEvent ( " RemoveAndKill " ) <nl> + . detail ( " Step " , " excluded list first " ) <nl> + . detail ( " ExcludeResult " , bClearedFirst ? " succeeded " : " failed " ) <nl> + . detail ( " KillTotal " , toKill1 . size ( ) ) <nl> + . detail ( " Processes " , killProcArray . size ( ) ) <nl> + . detail ( " ToKill1 " , describe ( toKill1 ) ) <nl> + . detail ( " ClusterAvailable " , g_simulator . isAvailable ( ) ) ; <nl> <nl> / / Include the servers , if unable to exclude <nl> / / Reinclude when buggify is on to increase the surface area of the next set of excludes <nl> struct RemoveServersSafelyWorkload : TestWorkload { <nl> self - > includeAddresses ( toKill1 ) ; <nl> } <nl> <nl> - / / Get the list of protected servers <nl> + / / toKill2 may kill too many servers to make cluster unavailable . <nl> + / / Get the processes in toKill2 that are safe to kill <nl> killProcArray = self - > protectServers ( toKill2 ) ; <nl> <nl> / / Update the kill networks to the killable processes <nl> struct RemoveServersSafelyWorkload : TestWorkload { <nl> state UID functionId = nondeterministicRandom ( ) - > randomUniqueID ( ) ; <nl> <nl> / / First clear the exclusion list and exclude the given list <nl> - TraceEvent ( " RemoveAndKill " , functionId ) . detail ( " Step " , " include all " ) . detail ( " ClusterAvailable " , g_simulator . isAvailable ( ) ) ; <nl> + TraceEvent ( " RemoveAndKill " , functionId ) <nl> + . detail ( " Step " , " Including all " ) <nl> + . detail ( " ClusterAvailable " , g_simulator . isAvailable ( ) ) <nl> + . detail ( " MarkExcludeAsFailed " , markExcludeAsFailed ) ; <nl> wait ( includeServers ( cx , vector < AddressExclusion > ( 1 ) ) ) ; <nl> - TraceEvent ( " RemoveAndKill " , functionId ) . detail ( " Step " , " included all " ) . detail ( " ClusterAvailable " , g_simulator . isAvailable ( ) ) ; <nl> + TraceEvent ( " RemoveAndKill " , functionId ) <nl> + . detail ( " Step " , " Included all " ) <nl> + . detail ( " ClusterAvailable " , g_simulator . isAvailable ( ) ) <nl> + . detail ( " MarkExcludeAsFailed " , markExcludeAsFailed ) ; <nl> / / Reinclude the addresses that were excluded , if present <nl> if ( pIncAddrs ) { <nl> self - > includeAddresses ( * pIncAddrs ) ; <nl> struct RemoveServersSafelyWorkload : TestWorkload { <nl> . detail ( " ToKill " , describe ( toKill ) ) <nl> . detail ( " Addresses " , describe ( toKillArray ) ) <nl> . detail ( " FailedAddresses " , describe ( toKillMarkFailedArray ) ) <nl> - . detail ( " ClusterAvailable " , g_simulator . isAvailable ( ) ) ; <nl> + . detail ( " ClusterAvailable " , g_simulator . isAvailable ( ) ) <nl> + . detail ( " MarkExcludeAsFailed " , markExcludeAsFailed ) ; <nl> if ( markExcludeAsFailed ) { <nl> wait ( excludeServers ( cx , toKillMarkFailedArray , true ) ) ; <nl> } <nl> mmm a / fdbserver / workloads / ReportConflictingKeys . actor . cpp <nl> ppp b / fdbserver / workloads / ReportConflictingKeys . actor . cpp <nl> struct ReportConflictingKeysWorkload : TestWorkload { <nl> } while ( deterministicRandom ( ) - > random01 ( ) < addWriteConflictRangeProb ) ; <nl> } <nl> <nl> - void emptyConflictingKeysTest ( Reference < ReadYourWritesTransaction > ryw ) { <nl> + void emptyConflictingKeysTest ( const Reference < ReadYourWritesTransaction > & ryw ) { <nl> / / This test is called when you want to make sure there is no conflictingKeys , <nl> / / which means you will get an empty result form getRange ( \ xff \ xff / transaction / conflicting_keys / , <nl> / / \ xff \ xff / transaction / conflicting_keys0 ) <nl> struct ReportConflictingKeysWorkload : TestWorkload { <nl> <nl> ACTOR Future < Void > conflictingClient ( Database cx , ReportConflictingKeysWorkload * self ) { <nl> <nl> - state ReadYourWritesTransaction tr1 ( cx ) ; <nl> - state ReadYourWritesTransaction tr2 ( cx ) ; <nl> + state Reference < ReadYourWritesTransaction > tr1 ( new ReadYourWritesTransaction ( cx ) ) ; <nl> + state Reference < ReadYourWritesTransaction > tr2 ( new ReadYourWritesTransaction ( cx ) ) ; <nl> state std : : vector < KeyRange > readConflictRanges ; <nl> state std : : vector < KeyRange > writeConflictRanges ; <nl> <nl> loop { <nl> try { <nl> / / set the flag for empty key range testing <nl> - tr1 . setOption ( FDBTransactionOptions : : REPORT_CONFLICTING_KEYS ) ; <nl> + tr1 - > setOption ( FDBTransactionOptions : : REPORT_CONFLICTING_KEYS ) ; <nl> / / tr1 should never have conflicting keys , the result should always be empty <nl> - self - > emptyConflictingKeysTest ( Reference < ReadYourWritesTransaction > : : addRef ( & tr1 ) ) ; <nl> + self - > emptyConflictingKeysTest ( tr1 ) ; <nl> <nl> - tr2 . setOption ( FDBTransactionOptions : : REPORT_CONFLICTING_KEYS ) ; <nl> + tr2 - > setOption ( FDBTransactionOptions : : REPORT_CONFLICTING_KEYS ) ; <nl> / / If READ_YOUR_WRITES_DISABLE set , it behaves like native transaction object <nl> / / where overlapped conflict ranges are not merged . <nl> - if ( deterministicRandom ( ) - > coinflip ( ) ) tr1 . setOption ( FDBTransactionOptions : : READ_YOUR_WRITES_DISABLE ) ; <nl> - if ( deterministicRandom ( ) - > coinflip ( ) ) tr2 . setOption ( FDBTransactionOptions : : READ_YOUR_WRITES_DISABLE ) ; <nl> + if ( deterministicRandom ( ) - > coinflip ( ) ) tr1 - > setOption ( FDBTransactionOptions : : READ_YOUR_WRITES_DISABLE ) ; <nl> + if ( deterministicRandom ( ) - > coinflip ( ) ) tr2 - > setOption ( FDBTransactionOptions : : READ_YOUR_WRITES_DISABLE ) ; <nl> / / We have the two tx with same grv , then commit the first <nl> / / If the second one is not able to commit due to conflicts , verify the returned conflicting keys <nl> / / Otherwise , there is no conflicts between tr1 ' s writeConflictRange and tr2 ' s readConflictRange <nl> - Version readVersion = wait ( tr1 . getReadVersion ( ) ) ; <nl> - tr2 . setVersion ( readVersion ) ; <nl> - self - > addRandomReadConflictRange ( & tr1 , nullptr ) ; <nl> - self - > addRandomWriteConflictRange ( & tr1 , & writeConflictRanges ) ; <nl> + Version readVersion = wait ( tr1 - > getReadVersion ( ) ) ; <nl> + tr2 - > setVersion ( readVersion ) ; <nl> + self - > addRandomReadConflictRange ( tr1 . getPtr ( ) , nullptr ) ; <nl> + self - > addRandomWriteConflictRange ( tr1 . getPtr ( ) , & writeConflictRanges ) ; <nl> + + self - > commits ; <nl> - wait ( tr1 . commit ( ) ) ; <nl> + wait ( tr1 - > commit ( ) ) ; <nl> + + self - > xacts ; <nl> / / tr1 should never have conflicting keys , test again after the commit <nl> - self - > emptyConflictingKeysTest ( Reference < ReadYourWritesTransaction > : : addRef ( & tr1 ) ) ; <nl> + self - > emptyConflictingKeysTest ( tr1 ) ; <nl> <nl> state bool foundConflict = false ; <nl> try { <nl> - self - > addRandomReadConflictRange ( & tr2 , & readConflictRanges ) ; <nl> - self - > addRandomWriteConflictRange ( & tr2 , nullptr ) ; <nl> + self - > addRandomReadConflictRange ( tr2 . getPtr ( ) , & readConflictRanges ) ; <nl> + self - > addRandomWriteConflictRange ( tr2 . getPtr ( ) , nullptr ) ; <nl> + + self - > commits ; <nl> - wait ( tr2 . commit ( ) ) ; <nl> + wait ( tr2 - > commit ( ) ) ; <nl> + + self - > xacts ; <nl> } catch ( Error & e ) { <nl> if ( e . code ( ) ! = error_code_not_committed ) throw e ; <nl> struct ReportConflictingKeysWorkload : TestWorkload { <nl> / / The getRange here using the special key prefix " \ xff \ xff / transaction / conflicting_keys / " happens <nl> / / locally Thus , the error handling is not needed here <nl> Future < Standalone < RangeResultRef > > conflictingKeyRangesFuture = <nl> - tr2 . getRange ( ckr , CLIENT_KNOBS - > TOO_MANY ) ; <nl> + tr2 - > getRange ( ckr , CLIENT_KNOBS - > TOO_MANY ) ; <nl> ASSERT ( conflictingKeyRangesFuture . isReady ( ) ) ; <nl> + <nl> + tr2 = Reference < ReadYourWritesTransaction > ( new ReadYourWritesTransaction ( cx ) ) ; <nl> + <nl> const Standalone < RangeResultRef > conflictingKeyRanges = conflictingKeyRangesFuture . get ( ) ; <nl> ASSERT ( conflictingKeyRanges . size ( ) & & <nl> ( conflictingKeyRanges . size ( ) < = readConflictRanges . size ( ) * 2 ) ) ; <nl> struct ReportConflictingKeysWorkload : TestWorkload { <nl> } <nl> } catch ( Error & e ) { <nl> state Error e2 = e ; <nl> - wait ( tr1 . onError ( e2 ) ) ; <nl> - wait ( tr2 . onError ( e2 ) ) ; <nl> + wait ( tr1 - > onError ( e2 ) ) ; <nl> + wait ( tr2 - > onError ( e2 ) ) ; <nl> } <nl> readConflictRanges . clear ( ) ; <nl> writeConflictRanges . clear ( ) ; <nl> - tr1 . reset ( ) ; <nl> - tr2 . reset ( ) ; <nl> + tr1 - > reset ( ) ; <nl> + tr2 - > reset ( ) ; <nl> } <nl> } <nl> } ; <nl> mmm a / fdbserver / workloads / SpecialKeySpaceCorrectness . actor . cpp <nl> ppp b / fdbserver / workloads / SpecialKeySpaceCorrectness . actor . cpp <nl> <nl> * limitations under the License . <nl> * / <nl> <nl> + # include " fdbclient / ManagementAPI . actor . h " <nl> # include " fdbclient / NativeAPI . actor . h " <nl> # include " fdbclient / ReadYourWrites . h " <nl> + # include " fdbclient / Schemas . h " <nl> # include " fdbclient / SpecialKeySpace . actor . h " <nl> # include " fdbserver / TesterInterface . actor . h " <nl> # include " fdbserver / workloads / workloads . actor . h " <nl> # include " flow / actorcompiler . h " <nl> <nl> - class SKSCTestImpl : public SpecialKeyRangeBaseImpl { <nl> + class SKSCTestImpl : public SpecialKeyRangeReadImpl { <nl> public : <nl> - explicit SKSCTestImpl ( KeyRangeRef kr ) : SpecialKeyRangeBaseImpl ( kr ) { } <nl> + explicit SKSCTestImpl ( KeyRangeRef kr ) : SpecialKeyRangeReadImpl ( kr ) { } <nl> virtual Future < Standalone < RangeResultRef > > getRange ( ReadYourWritesTransaction * ryw , KeyRangeRef kr ) const { <nl> ASSERT ( range . contains ( kr ) ) ; <nl> auto resultFuture = ryw - > getRange ( kr , CLIENT_KNOBS - > TOO_MANY ) ; <nl> struct SpecialKeySpaceCorrectnessWorkload : TestWorkload { <nl> self - > keys . push_back_deep ( self - > keys . arena ( ) , KeyRangeRef ( startKey , endKey ) ) ; <nl> self - > impls . push_back ( std : : make_shared < SKSCTestImpl > ( KeyRangeRef ( startKey , endKey ) ) ) ; <nl> / / Although there are already ranges registered , the testing range will replace them <nl> - cx - > specialKeySpace - > registerKeyRange ( SpecialKeySpace : : MODULE : : TESTONLY , self - > keys . back ( ) , <nl> + cx - > specialKeySpace - > registerKeyRange ( SpecialKeySpace : : MODULE : : TESTONLY , <nl> + SpecialKeySpace : : IMPLTYPE : : READONLY , self - > keys . back ( ) , <nl> self - > impls . back ( ) . get ( ) ) ; <nl> / / generate keys in each key range <nl> int keysInRange = deterministicRandom ( ) - > randomInt ( self - > minKeysPerRange , self - > maxKeysPerRange + 1 ) ; <nl> struct SpecialKeySpaceCorrectnessWorkload : TestWorkload { <nl> } <nl> ACTOR Future < Void > _start ( Database cx , SpecialKeySpaceCorrectnessWorkload * self ) { <nl> testRywLifetime ( cx ) ; <nl> - wait ( timeout ( self - > testModuleRangeReadErrors ( cx , self ) & & self - > getRangeCallActor ( cx , self ) & & <nl> - testConflictRanges ( cx , / * read * / true , self ) & & testConflictRanges ( cx , / * read * / false , self ) , <nl> + wait ( timeout ( self - > testSpecialKeySpaceErrors ( cx , self ) & & self - > getRangeCallActor ( cx , self ) & & <nl> + testConflictRanges ( cx , / * read * / true , self ) & & testConflictRanges ( cx , / * read * / false , self ) & & <nl> + self - > managementApiCorrectnessActor ( cx , self ) , <nl> self - > testDuration , Void ( ) ) ) ; <nl> return Void ( ) ; <nl> } <nl> struct SpecialKeySpaceCorrectnessWorkload : TestWorkload { <nl> return GetRangeLimits ( rowLimits , byteLimits ) ; <nl> } <nl> <nl> - ACTOR Future < Void > testModuleRangeReadErrors ( Database cx_ , SpecialKeySpaceCorrectnessWorkload * self ) { <nl> + ACTOR Future < Void > testSpecialKeySpaceErrors ( Database cx_ , SpecialKeySpaceCorrectnessWorkload * self ) { <nl> Database cx = cx_ - > clone ( ) ; <nl> state Reference < ReadYourWritesTransaction > tx = Reference ( new ReadYourWritesTransaction ( cx ) ) ; <nl> / / begin key outside module range <nl> struct SpecialKeySpaceCorrectnessWorkload : TestWorkload { <nl> } catch ( Error & e ) { <nl> throw ; <nl> } <nl> + / / Errors introduced by SpecialKeyRangeRWImpl <nl> + / / Writes are disabled by default <nl> + try { <nl> + tx - > set ( LiteralStringRef ( " \ xff \ xff / I_am_not_a_range_can_be_written " ) , ValueRef ( ) ) ; <nl> + } catch ( Error & e ) { <nl> + if ( e . code ( ) = = error_code_actor_cancelled ) throw ; <nl> + ASSERT ( e . code ( ) = = error_code_special_keys_write_disabled ) ; <nl> + tx - > reset ( ) ; <nl> + } <nl> + / / The special key is not in a range that can be called with set <nl> + try { <nl> + tx - > setOption ( FDBTransactionOptions : : SPECIAL_KEY_SPACE_ENABLE_WRITES ) ; <nl> + tx - > set ( LiteralStringRef ( " \ xff \ xff / I_am_not_a_range_can_be_written " ) , ValueRef ( ) ) ; <nl> + ASSERT ( false ) ; <nl> + } catch ( Error & e ) { <nl> + if ( e . code ( ) = = error_code_actor_cancelled ) throw ; <nl> + ASSERT ( e . code ( ) = = error_code_special_keys_no_write_module_found ) ; <nl> + tx - > reset ( ) ; <nl> + } <nl> + / / A clear cross two ranges are forbidden <nl> + try { <nl> + tx - > setOption ( FDBTransactionOptions : : SPECIAL_KEY_SPACE_ENABLE_WRITES ) ; <nl> + tx - > clear ( KeyRangeRef ( SpecialKeySpace : : getManamentApiCommandRange ( " exclude " ) . begin , <nl> + SpecialKeySpace : : getManamentApiCommandRange ( " failed " ) . end ) ) ; <nl> + ASSERT ( false ) ; <nl> + } catch ( Error & e ) { <nl> + if ( e . code ( ) = = error_code_actor_cancelled ) throw ; <nl> + ASSERT ( e . code ( ) = = error_code_special_keys_cross_module_clear ) ; <nl> + tx - > reset ( ) ; <nl> + } <nl> + / / Management api error , and error message shema check <nl> + try { <nl> + tx - > setOption ( FDBTransactionOptions : : SPECIAL_KEY_SPACE_ENABLE_WRITES ) ; <nl> + tx - > set ( LiteralStringRef ( " Invalid_Network_Address " ) <nl> + . withPrefix ( SpecialKeySpace : : getManagementApiCommandPrefix ( " exclude " ) ) , <nl> + ValueRef ( ) ) ; <nl> + wait ( tx - > commit ( ) ) ; <nl> + ASSERT ( false ) ; <nl> + } catch ( Error & e ) { <nl> + if ( e . code ( ) = = error_code_actor_cancelled ) throw ; <nl> + ASSERT ( e . code ( ) = = error_code_special_keys_api_failure ) ; <nl> + Optional < Value > errorMsg = <nl> + wait ( tx - > get ( SpecialKeySpace : : getModuleRange ( SpecialKeySpace : : MODULE : : ERRORMSG ) . begin ) ) ; <nl> + ASSERT ( errorMsg . present ( ) ) ; <nl> + std : : string errorStr ; <nl> + auto valueObj = readJSONStrictly ( errorMsg . get ( ) . toString ( ) ) . get_obj ( ) ; <nl> + auto schema = readJSONStrictly ( JSONSchemas : : managementApiErrorSchema . toString ( ) ) . get_obj ( ) ; <nl> + / / special_key_space_management_api_error_msg schema validation <nl> + ASSERT ( schemaMatch ( schema , valueObj , errorStr , SevError , true ) ) ; <nl> + tx - > reset ( ) ; <nl> + } <nl> <nl> return Void ( ) ; <nl> } <nl> struct SpecialKeySpaceCorrectnessWorkload : TestWorkload { <nl> } <nl> return Void ( ) ; <nl> } <nl> + <nl> + ACTOR Future < Void > managementApiCorrectnessActor ( Database cx_ , SpecialKeySpaceCorrectnessWorkload * self ) { <nl> + / / All management api related tests <nl> + Database cx = cx_ - > clone ( ) ; <nl> + state Reference < ReadYourWritesTransaction > tx = Reference ( new ReadYourWritesTransaction ( cx ) ) ; <nl> + / / test ordered option keys <nl> + { <nl> + tx - > setOption ( FDBTransactionOptions : : SPECIAL_KEY_SPACE_ENABLE_WRITES ) ; <nl> + for ( const std : : string & option : SpecialKeySpace : : getManagementApiOptionsSet ( ) ) { <nl> + tx - > set ( LiteralStringRef ( " options / " ) <nl> + . withPrefix ( SpecialKeySpace : : getModuleRange ( SpecialKeySpace : : MODULE : : MANAGEMENT ) . begin ) <nl> + . withSuffix ( option ) , <nl> + ValueRef ( ) ) ; <nl> + } <nl> + Standalone < RangeResultRef > res = wait ( tx - > getRange ( <nl> + KeyRangeRef ( LiteralStringRef ( " options / " ) , LiteralStringRef ( " options0 " ) ) <nl> + . withPrefix ( SpecialKeySpace : : getModuleRange ( SpecialKeySpace : : MODULE : : MANAGEMENT ) . begin ) , <nl> + CLIENT_KNOBS - > TOO_MANY ) ) ; <nl> + ASSERT ( res . size ( ) = = SpecialKeySpace : : getManagementApiOptionsSet ( ) . size ( ) ) ; <nl> + for ( int i = 0 ; i < res . size ( ) - 1 ; + + i ) ASSERT ( res [ i ] . key < res [ i + 1 ] . key ) ; <nl> + tx - > reset ( ) ; <nl> + } <nl> + return Void ( ) ; <nl> + } <nl> } ; <nl> <nl> WorkloadFactory < SpecialKeySpaceCorrectnessWorkload > SpecialKeySpaceCorrectnessFactory ( " SpecialKeySpaceCorrectness " ) ; <nl> mmm a / fdbserver / workloads / TriggerRecovery . actor . cpp <nl> ppp b / fdbserver / workloads / TriggerRecovery . actor . cpp <nl> struct TriggerRecoveryLoopWorkload : TestWorkload { <nl> address_interface [ ip_port ] = it . value ; <nl> } <nl> for ( auto it : address_interface ) { <nl> - tr . set ( LiteralStringRef ( " \ xff \ xff / reboot_worker " ) , it . second ) ; <nl> + if ( cx - > apiVersionAtLeast ( 700 ) ) <nl> + BinaryReader : : fromStringRef < ClientWorkerInterface > ( it . second , IncludeVersion ( ) ) <nl> + . reboot . send ( RebootRequest ( ) ) ; <nl> + else <nl> + tr . set ( LiteralStringRef ( " \ xff \ xff / reboot_worker " ) , it . second ) ; <nl> } <nl> TraceEvent ( SevInfo , " TriggerRecoveryLoop_AttempedKillAll " ) ; <nl> return Void ( ) ; <nl> mmm a / flow / Arena . cpp <nl> ppp b / flow / Arena . cpp <nl> void testIteratorIncrement ( ) { <nl> int i = 0 ; <nl> for ( auto iter = xs . begin ( ) ; iter < xs . end ( ) ; ) { <nl> ASSERT ( * iter = = StringRef ( std : : to_string ( i ) ) ) ; <nl> - iter + = 2 ; <nl> - i + = 2 ; <nl> + iter + = 1 ; <nl> + i + = 1 ; <nl> } <nl> } <nl> - { <nl> + if ( size > 0 ) { <nl> int i = xs . size ( ) - 1 ; <nl> for ( auto iter = xs . end ( ) - 1 ; iter > = xs . begin ( ) ; ) { <nl> ASSERT ( * iter = = StringRef ( std : : to_string ( i ) ) ) ; <nl> - iter - = 2 ; <nl> - i - = 2 ; <nl> + iter - = 1 ; <nl> + i - = 1 ; <nl> } <nl> } <nl> { <nl> int i = 0 ; <nl> for ( auto iter = xs . begin ( ) ; iter < xs . end ( ) ; ) { <nl> ASSERT ( * iter = = StringRef ( std : : to_string ( i ) ) ) ; <nl> - iter = iter + 2 ; <nl> - i + = 2 ; <nl> + iter = iter + 1 ; <nl> + i + = 1 ; <nl> } <nl> } <nl> - { <nl> + if ( size > 0 ) { <nl> int i = xs . size ( ) - 1 ; <nl> for ( auto iter = xs . end ( ) - 1 ; iter > = xs . begin ( ) ; ) { <nl> ASSERT ( * iter = = StringRef ( std : : to_string ( i ) ) ) ; <nl> - iter = iter - 2 ; <nl> - i - = 2 ; <nl> + iter = iter - 1 ; <nl> + i - = 1 ; <nl> } <nl> } <nl> } <nl> mmm a / flow / Arena . h <nl> ppp b / flow / Arena . h <nl> class VectorRef : public ComposedIdentifier < T , 3 > , public VectorRefPreserializer <nl> VPS : : add ( * ptr ) ; <nl> m_size + + ; <nl> } <nl> + <nl> + template < class . . . Us > <nl> + T & emplace_back ( Arena & p , Us & & . . . args ) { <nl> + if ( m_size + 1 > m_capacity ) reallocate ( p , m_size + 1 ) ; <nl> + auto ptr = new ( & data [ m_size ] ) T ( std : : forward < Us > ( args ) . . . ) ; <nl> + VPS : : add ( * ptr ) ; <nl> + m_size + + ; <nl> + return * ptr ; <nl> + } <nl> + <nl> / / invokes the " Deep copy constructor " T ( Arena & , const T & ) moving T entirely into arena <nl> void push_back_deep ( Arena & p , const T & value ) { <nl> if ( m_size + 1 > m_capacity ) reallocate ( p , m_size + 1 ) ; <nl> class VectorRef : public ComposedIdentifier < T , 3 > , public VectorRefPreserializer <nl> VPS : : add ( * ptr ) ; <nl> m_size + + ; <nl> } <nl> + <nl> + / / invokes the " Deep copy constructor " T ( Arena & , U & & ) moving T entirely into arena <nl> + template < class . . . Us > <nl> + T & emplace_back_deep ( Arena & p , Us & & . . . args ) { <nl> + if ( m_size + 1 > m_capacity ) reallocate ( p , m_size + 1 ) ; <nl> + auto ptr = new ( & data [ m_size ] ) T ( p , std : : forward < Us > ( args ) . . . ) ; <nl> + VPS : : add ( * ptr ) ; <nl> + m_size + + ; <nl> + return * ptr ; <nl> + } <nl> + <nl> template < class It > <nl> void append ( Arena & p , It begin , int count ) { <nl> if ( m_size + count > m_capacity ) reallocate ( p , m_size + count ) ; <nl> class SmallVectorRef { <nl> res . idx - = diff ; <nl> return res ; <nl> } <nl> - friend self_t operator - ( difference_type diff , const self_t & lhs ) { <nl> - auto res = lhs ; <nl> - res . idx - = diff ; <nl> - return res ; <nl> - } <nl> friend difference_type operator - ( const self_t & lhs , const self_t & rhs ) { <nl> ASSERT ( lhs . vec = = rhs . vec ) ; <nl> return lhs . idx - rhs . idx ; <nl> mmm a / flow / AsioReactor . h <nl> ppp b / flow / AsioReactor . h <nl> class ASIOReactor { <nl> # ifdef __linux__ <nl> class EventFD : public IEventFD { <nl> int fd ; <nl> - ASIOReactor * reactor ; <nl> boost : : asio : : posix : : stream_descriptor sd ; <nl> int64_t fdVal ; <nl> <nl> class ASIOReactor { <nl> } <nl> <nl> public : <nl> - EventFD ( ASIOReactor * reactor ) : reactor ( reactor ) , sd ( reactor - > ios , open ( ) ) { } <nl> + EventFD ( ASIOReactor * reactor ) : sd ( reactor - > ios , open ( ) ) { } <nl> ~ EventFD ( ) { <nl> sd . close ( ) ; / / Also closes the fd , I assume . . . <nl> } <nl> mmm a / flow / FastAlloc . cpp <nl> ppp b / flow / FastAlloc . cpp <nl> <nl> # include " flow / crc32c . h " <nl> # include " flow / flow . h " <nl> <nl> + # include < atomic > <nl> # include < cstdint > <nl> # include < unordered_map > <nl> <nl> void * FastAllocator < Size > : : freelist = nullptr ; <nl> typedef void ( * ThreadInitFunction ) ( ) ; <nl> <nl> ThreadInitFunction threadInitFunction = 0 ; / / See ThreadCleanup . cpp in the C binding <nl> - void setFastAllocatorThreadInitFunction ( ThreadInitFunction f ) { <nl> + void setFastAllocatorThreadInitFunction ( ThreadInitFunction f ) { <nl> ASSERT ( ! threadInitFunction ) ; <nl> - threadInitFunction = f ; <nl> + threadInitFunction = f ; <nl> } <nl> <nl> std : : atomic < int64_t > g_hugeArenaMemory ( 0 ) ; <nl> struct FastAllocator < Size > : : GlobalData { <nl> CRITICAL_SECTION mutex ; <nl> std : : vector < void * > magazines ; / / These magazines are always exactly magazine_size ( " full " ) <nl> std : : vector < std : : pair < int , void * > > partial_magazines ; / / Magazines that are not " full " and their counts . Only created by releaseThreadMagazines ( ) . <nl> - long long totalMemory ; <nl> + std : : atomic < long long > totalMemory ; <nl> long long partialMagazineUnallocatedMemory ; <nl> - long long activeThreads ; <nl> - GlobalData ( ) : totalMemory ( 0 ) , partialMagazineUnallocatedMemory ( 0 ) , activeThreads ( 0 ) { <nl> + std : : atomic < long long > activeThreads ; <nl> + GlobalData ( ) : totalMemory ( 0 ) , partialMagazineUnallocatedMemory ( 0 ) , activeThreads ( 0 ) { <nl> InitializeCriticalSection ( & mutex ) ; <nl> } <nl> } ; <nl> <nl> template < int Size > <nl> long long FastAllocator < Size > : : getTotalMemory ( ) { <nl> - return globalData ( ) - > totalMemory ; <nl> + return globalData ( ) - > totalMemory . load ( ) ; <nl> } <nl> <nl> / / This does not include memory held by various threads that ' s available for allocation <nl> template < int Size > <nl> long long FastAllocator < Size > : : getApproximateMemoryUnused ( ) { <nl> - return globalData ( ) - > magazines . size ( ) * magazine_size * Size + globalData ( ) - > partialMagazineUnallocatedMemory ; <nl> + EnterCriticalSection ( & globalData ( ) - > mutex ) ; <nl> + long long unused = <nl> + globalData ( ) - > magazines . size ( ) * magazine_size * Size + globalData ( ) - > partialMagazineUnallocatedMemory ; <nl> + LeaveCriticalSection ( & globalData ( ) - > mutex ) ; <nl> + return unused ; <nl> } <nl> <nl> template < int Size > <nl> long long FastAllocator < Size > : : getActiveThreads ( ) { <nl> - return globalData ( ) - > activeThreads ; <nl> + return globalData ( ) - > activeThreads . load ( ) ; <nl> } <nl> <nl> # if FAST_ALLOCATOR_DEBUG <nl> void FastAllocator < Size > : : initThread ( ) { <nl> threadInitFunction ( ) ; <nl> } <nl> <nl> - EnterCriticalSection ( & globalData ( ) - > mutex ) ; <nl> - + + globalData ( ) - > activeThreads ; <nl> - LeaveCriticalSection ( & globalData ( ) - > mutex ) ; <nl> + globalData ( ) - > activeThreads . fetch_add ( 1 ) ; <nl> <nl> threadData . freelist = nullptr ; <nl> threadData . alternate = nullptr ; <nl> void FastAllocator < Size > : : getMagazine ( ) { <nl> threadData . count = p . first ; <nl> return ; <nl> } <nl> - globalData ( ) - > totalMemory + = magazine_size * Size ; <nl> + globalData ( ) - > totalMemory . fetch_add ( magazine_size * Size ) ; <nl> LeaveCriticalSection ( & globalData ( ) - > mutex ) ; <nl> <nl> / / Allocate a new page of data from the system allocator <nl> void FastAllocator < Size > : : getMagazine ( ) { <nl> # if FAST_ALLOCATOR_DEBUG <nl> # ifdef WIN32 <nl> static int alt = 0 ; alt + + ; <nl> - block = ( void * * ) VirtualAllocEx ( GetCurrentProcess ( ) , <nl> - ( void * ) ( ( ( getSizeCode ( Size ) < < 11 ) + alt ) * magazine_size * Size ) , magazine_size * Size , MEM_COMMIT | MEM_RESERVE , PAGE_READWRITE ) ; <nl> + block = <nl> + ( void * * ) VirtualAllocEx ( GetCurrentProcess ( ) , ( void * ) ( ( ( getSizeCode ( Size ) < < 11 ) + alt ) * magazine_size * Size ) , <nl> + magazine_size * Size , MEM_COMMIT | MEM_RESERVE , PAGE_READWRITE ) ; <nl> # else <nl> static int alt = 0 ; alt + + ; <nl> void * desiredBlock = ( void * ) ( ( ( getSizeCode ( Size ) < < 11 ) + alt ) * magazine_size * Size ) ; <nl> void FastAllocator < Size > : : getMagazine ( ) { <nl> block [ i * PSize + 1 ] = block [ i * PSize ] = & block [ ( i + 1 ) * PSize ] ; <nl> check ( & block [ i * PSize ] , false ) ; <nl> } <nl> - <nl> + <nl> block [ ( magazine_size - 1 ) * PSize + 1 ] = block [ ( magazine_size - 1 ) * PSize ] = nullptr ; <nl> check ( & block [ ( magazine_size - 1 ) * PSize ] , false ) ; <nl> threadData . freelist = block ; <nl> void FastAllocator < Size > : : releaseThreadMagazines ( ) { <nl> globalData ( ) - > magazines . push_back ( thr . alternate ) ; <nl> } <nl> } <nl> - - - globalData ( ) - > activeThreads ; <nl> + globalData ( ) - > activeThreads . fetch_add ( - 1 ) ; <nl> LeaveCriticalSection ( & globalData ( ) - > mutex ) ; <nl> <nl> thr . count = 0 ; <nl> void releaseAllThreadMagazines ( ) { <nl> FastAllocator < 2048 > : : releaseThreadMagazines ( ) ; <nl> FastAllocator < 4096 > : : releaseThreadMagazines ( ) ; <nl> FastAllocator < 8192 > : : releaseThreadMagazines ( ) ; <nl> + FastAllocator < 16384 > : : releaseThreadMagazines ( ) ; <nl> } <nl> <nl> int64_t getTotalUnusedAllocatedMemory ( ) { <nl> int64_t getTotalUnusedAllocatedMemory ( ) { <nl> unusedMemory + = FastAllocator < 2048 > : : getApproximateMemoryUnused ( ) ; <nl> unusedMemory + = FastAllocator < 4096 > : : getApproximateMemoryUnused ( ) ; <nl> unusedMemory + = FastAllocator < 8192 > : : getApproximateMemoryUnused ( ) ; <nl> + unusedMemory + = FastAllocator < 16384 > : : getApproximateMemoryUnused ( ) ; <nl> <nl> return unusedMemory ; <nl> } <nl> template class FastAllocator < 1024 > ; <nl> template class FastAllocator < 2048 > ; <nl> template class FastAllocator < 4096 > ; <nl> template class FastAllocator < 8192 > ; <nl> + template class FastAllocator < 16384 > ; <nl> mmm a / flow / FastAlloc . h <nl> ppp b / flow / FastAlloc . h <nl> class FastAllocator { <nl> } ; <nl> static thread_local ThreadData threadData ; <nl> static thread_local bool threadInitialized ; <nl> - static GlobalData * globalData ( ) { <nl> + static GlobalData * globalData ( ) noexcept { <nl> # ifdef VALGRIND <nl> ANNOTATE_RWLOCK_ACQUIRED ( vLock , 1 ) ; <nl> # endif <nl> class FastAllocated { <nl> if ( size < = 2048 ) return FastAllocator < 2048 > : : allocate ( ) ; <nl> if ( size < = 4096 ) return FastAllocator < 4096 > : : allocate ( ) ; <nl> if ( size < = 8192 ) return FastAllocator < 8192 > : : allocate ( ) ; <nl> + if ( size < = 16384 ) return FastAllocator < 16384 > : : allocate ( ) ; <nl> return new uint8_t [ size ] ; <nl> } <nl> <nl> inline void freeFast ( int size , void * ptr ) { <nl> if ( size < = 2048 ) return FastAllocator < 2048 > : : release ( ptr ) ; <nl> if ( size < = 4096 ) return FastAllocator < 4096 > : : release ( ptr ) ; <nl> if ( size < = 8192 ) return FastAllocator < 8192 > : : release ( ptr ) ; <nl> + if ( size < = 16384 ) return FastAllocator < 16384 > : : release ( ptr ) ; <nl> delete [ ] ( uint8_t * ) ptr ; <nl> } <nl> <nl> mmm a / flow / FileIdentifier . h <nl> ppp b / flow / FileIdentifier . h <nl> <nl> <nl> using FileIdentifier = uint32_t ; <nl> <nl> - struct Empty { } ; <nl> - <nl> template < typename T , typename = int > <nl> struct HasFileIdentifierMember : std : : false_type { } ; <nl> <nl> mmm a / flow / IRandom . h <nl> ppp b / flow / IRandom . h <nl> <nl> # include < unordered_map > <nl> # endif <nl> # include < functional > <nl> + # include < utility > <nl> <nl> / / Until we move to C + + 20 , we ' ll need something to take the place of operator < = > . <nl> / / This is as good a place as any , I guess . <nl> class IRandom { <nl> <nl> / / The following functions have fixed implementations for now : <nl> template < class C > <nl> - decltype ( ( fake < const C > ( ) [ 0 ] ) ) randomChoice ( const C & c ) { return c [ randomInt ( 0 , ( int ) c . size ( ) ) ] ; } <nl> + decltype ( ( std : : declval < const C > ( ) [ 0 ] ) ) randomChoice ( const C & c ) { <nl> + return c [ randomInt ( 0 , ( int ) c . size ( ) ) ] ; <nl> + } <nl> <nl> template < class C > <nl> void randomShuffle ( C & container ) { <nl> extern FILE * randLog ; <nl> / / Sets the seed for the deterministic random number generator on the current thread <nl> void setThreadLocalDeterministicRandomSeed ( uint32_t seed ) ; <nl> <nl> - / / Returns the random number generator that can be seeded . This generator should only <nl> + / / Returns the random number generator that can be seeded . This generator should only <nl> / / be used in contexts where the choice to call it is deterministic . <nl> / / <nl> / / This generator is only deterministic if given a seed using setThreadLocalDeterministicRandomSeed <nl> Reference < IRandom > deterministicRandom ( ) ; <nl> <nl> - / / A random number generator that cannot be manually seeded and may be called in <nl> + / / A random number generator that cannot be manually seeded and may be called in <nl> / / non - deterministic contexts . <nl> Reference < IRandom > nondeterministicRandom ( ) ; <nl> <nl> mmm a / flow / IThreadPool . cpp <nl> ppp b / flow / IThreadPool . cpp <nl> class ThreadPool : public IThreadPool , public ReferenceCounted < ThreadPool > { <nl> threadUserObject = userObject ; <nl> try { <nl> userObject - > init ( ) ; <nl> - while ( pool - > ios . run_one ( ) & & ! pool - > mode ) ; <nl> + while ( pool - > ios . run_one ( ) & & ( pool - > mode = = Mode : : Run ) ) ; <nl> } catch ( Error & e ) { <nl> TraceEvent ( SevError , " ThreadPoolError " ) . error ( e ) ; <nl> } <nl> - delete userObject ; userObject = 0 ; <nl> + delete userObject ; <nl> + userObject = nullptr ; <nl> stopped . set ( ) ; <nl> } <nl> static void dispatch ( PThreadAction action ) { <nl> class ThreadPool : public IThreadPool , public ReferenceCounted < ThreadPool > { <nl> boost : : asio : : io_service : : work dontstop ; <nl> enum Mode { Run = 0 , Shutdown = 2 } ; <nl> volatile int mode ; <nl> + int stackSize ; <nl> <nl> struct ActionWrapper { <nl> PThreadAction action ; <nl> class ThreadPool : public IThreadPool , public ReferenceCounted < ThreadPool > { <nl> ActionWrapper & operator = ( ActionWrapper const & ) ; <nl> } ; <nl> public : <nl> - ThreadPool ( ) : dontstop ( ios ) , mode ( Run ) { } <nl> + ThreadPool ( int stackSize ) : dontstop ( ios ) , mode ( Run ) , stackSize ( stackSize ) { } <nl> ~ ThreadPool ( ) { } <nl> Future < Void > stop ( Error const & e = success ( ) ) { <nl> if ( mode = = Shutdown ) return Void ( ) ; <nl> class ThreadPool : public IThreadPool , public ReferenceCounted < ThreadPool > { <nl> virtual void delref ( ) { if ( ReferenceCounted < ThreadPool > : : delref_no_destroy ( ) ) stop ( ) ; } <nl> void addThread ( IThreadPoolReceiver * userData ) { <nl> threads . push_back ( new Thread ( this , userData ) ) ; <nl> - startThread ( start , threads . back ( ) ) ; <nl> + startThread ( start , threads . back ( ) , stackSize ) ; <nl> } <nl> void post ( PThreadAction action ) { <nl> ios . post ( ActionWrapper ( action ) ) ; <nl> class ThreadPool : public IThreadPool , public ReferenceCounted < ThreadPool > { <nl> } ; <nl> <nl> <nl> - Reference < IThreadPool > createGenericThreadPool ( ) <nl> + Reference < IThreadPool > createGenericThreadPool ( int stackSize ) <nl> { <nl> - return Reference < IThreadPool > ( new ThreadPool ) ; <nl> + return Reference < IThreadPool > ( new ThreadPool ( stackSize ) ) ; <nl> } <nl> <nl> thread_local IThreadPoolReceiver * ThreadPool : : Thread : : threadUserObject ; <nl> mmm a / flow / IThreadPool . h <nl> ppp b / flow / IThreadPool . h <nl> class ThreadReturnPromise : NonCopyable { <nl> Promise < T > promise ; <nl> } ; <nl> <nl> - Reference < IThreadPool > createGenericThreadPool ( ) ; <nl> + Reference < IThreadPool > createGenericThreadPool ( int stackSize = 0 ) ; <nl> <nl> class DummyThreadPool : public IThreadPool , ReferenceCounted < DummyThreadPool > { <nl> public : <nl> mmm a / flow / Knobs . cpp <nl> ppp b / flow / Knobs . cpp <nl> void FlowKnobs : : initialize ( bool randomize , bool isSimulated ) { <nl> init ( TLS_SERVER_CONNECTION_THROTTLE_TIMEOUT , 9 . 0 ) ; <nl> init ( TLS_CLIENT_CONNECTION_THROTTLE_TIMEOUT , 11 . 0 ) ; <nl> init ( TLS_SERVER_CONNECTION_THROTTLE_ATTEMPTS , 1 ) ; <nl> - init ( TLS_CLIENT_CONNECTION_THROTTLE_ATTEMPTS , 0 ) ; <nl> + init ( TLS_CLIENT_CONNECTION_THROTTLE_ATTEMPTS , 1 ) ; <nl> + init ( TLS_CLIENT_HANDSHAKE_THREADS , 0 ) ; <nl> + init ( TLS_SERVER_HANDSHAKE_THREADS , 1000 ) ; <nl> + init ( TLS_HANDSHAKE_THREAD_STACKSIZE , 64 * 1024 ) ; <nl> + init ( TLS_MALLOC_ARENA_MAX , 6 ) ; <nl> + init ( TLS_HANDSHAKE_LIMIT , 1000 ) ; <nl> <nl> init ( NETWORK_TEST_CLIENT_COUNT , 30 ) ; <nl> init ( NETWORK_TEST_REPLY_SIZE , 600e3 ) ; <nl> void FlowKnobs : : initialize ( bool randomize , bool isSimulated ) { <nl> init ( PACKET_LIMIT , 100LL < < 20 ) ; <nl> init ( PACKET_WARNING , 2LL < < 20 ) ; / / 2MB packet warning quietly allows for 1MB system messages <nl> init ( TIME_OFFSET_LOGGING_INTERVAL , 60 . 0 ) ; <nl> - init ( MAX_PACKET_SEND_BYTES , 256 * 1024 ) ; <nl> + init ( MAX_PACKET_SEND_BYTES , 128 * 1024 ) ; <nl> init ( MIN_PACKET_BUFFER_BYTES , 4 * 1024 ) ; <nl> init ( MIN_PACKET_BUFFER_FREE_BYTES , 256 ) ; <nl> init ( FLOW_TCP_NODELAY , 1 ) ; <nl> init ( FLOW_TCP_QUICKACK , 0 ) ; <nl> - init ( UNRESTRICTED_HANDSHAKE_LIMIT , 15 ) ; <nl> - init ( BOUNDED_HANDSHAKE_LIMIT , 400 ) ; <nl> <nl> / / Sim2 <nl> init ( MIN_OPEN_TIME , 0 . 0002 ) ; <nl> mmm a / flow / Knobs . h <nl> ppp b / flow / Knobs . h <nl> class FlowKnobs : public Knobs { <nl> double TLS_CLIENT_CONNECTION_THROTTLE_TIMEOUT ; <nl> int TLS_SERVER_CONNECTION_THROTTLE_ATTEMPTS ; <nl> int TLS_CLIENT_CONNECTION_THROTTLE_ATTEMPTS ; <nl> + int TLS_CLIENT_HANDSHAKE_THREADS ; <nl> + int TLS_SERVER_HANDSHAKE_THREADS ; <nl> + int TLS_HANDSHAKE_THREAD_STACKSIZE ; <nl> + int TLS_MALLOC_ARENA_MAX ; <nl> + int TLS_HANDSHAKE_LIMIT ; <nl> <nl> int NETWORK_TEST_CLIENT_COUNT ; <nl> int NETWORK_TEST_REPLY_SIZE ; <nl> class FlowKnobs : public Knobs { <nl> int MIN_PACKET_BUFFER_FREE_BYTES ; <nl> int FLOW_TCP_NODELAY ; <nl> int FLOW_TCP_QUICKACK ; <nl> - int UNRESTRICTED_HANDSHAKE_LIMIT ; <nl> - int BOUNDED_HANDSHAKE_LIMIT ; <nl> <nl> / / Sim2 <nl> / / FIMXE : more parameters could be factored out <nl> mmm a / flow / Net2 . actor . cpp <nl> ppp b / flow / Net2 . actor . cpp <nl> <nl> # include " flow / ProtocolVersion . h " <nl> # include " flow / TLSConfig . actor . h " <nl> # include " flow / genericactors . actor . h " <nl> + # include " flow / Util . h " <nl> <nl> / / See the comment in TLSConfig . actor . h for the explanation of why this module breaking include was done . <nl> # include " fdbrpc / IAsyncFile . h " <nl> class Net2 sealed : public INetwork , public INetworkConnections { <nl> <nl> public : <nl> Net2 ( const TLSConfig & tlsConfig , bool useThreadPool , bool useMetrics ) ; <nl> - void initTLS ( ) ; <nl> + void initTLS ( ETLSInitState targetState ) ; <nl> void run ( ) ; <nl> void initMetrics ( ) ; <nl> <nl> class Net2 sealed : public INetwork , public INetworkConnections { <nl> virtual bool checkRunnable ( ) override ; <nl> <nl> bool useThreadPool ; <nl> + <nl> / / private : <nl> <nl> ASIOReactor reactor ; <nl> # ifndef TLS_DISABLED <nl> AsyncVar < Reference < ReferencedObject < boost : : asio : : ssl : : context > > > sslContextVar ; <nl> + Reference < IThreadPool > sslHandshakerPool ; <nl> + int sslHandshakerThreadsStarted ; <nl> + int sslPoolHandshakesInProgress ; <nl> # endif <nl> TLSConfig tlsConfig ; <nl> Future < Void > backgroundCertRefresh ; <nl> - bool tlsInitialized ; <nl> + ETLSInitState tlsInitializedState ; <nl> <nl> INetworkConnections * network ; / / initially this , but can be changed <nl> <nl> class Listener : public IListener , ReferenceCounted < Listener > { <nl> # ifndef TLS_DISABLED <nl> typedef boost : : asio : : ssl : : stream < boost : : asio : : ip : : tcp : : socket & > ssl_socket ; <nl> <nl> + struct SSLHandshakerThread : IThreadPoolReceiver { <nl> + SSLHandshakerThread ( ) { } <nl> + virtual void init ( ) { } <nl> + <nl> + struct Handshake : TypedAction < SSLHandshakerThread , Handshake > { <nl> + Handshake ( ssl_socket & socket , ssl_socket : : handshake_type type ) : socket ( socket ) , type ( type ) { <nl> + } <nl> + virtual double getTimeEstimate ( ) { return 0 . 001 ; } <nl> + <nl> + ThreadReturnPromise < Void > done ; <nl> + ssl_socket & socket ; <nl> + ssl_socket : : handshake_type type ; <nl> + boost : : system : : error_code err ; <nl> + } ; <nl> + <nl> + void action ( Handshake & h ) { <nl> + try { <nl> + h . socket . next_layer ( ) . non_blocking ( false , h . err ) ; <nl> + if ( ! h . err . failed ( ) ) { <nl> + h . socket . handshake ( h . type , h . err ) ; <nl> + } <nl> + if ( ! h . err . failed ( ) ) { <nl> + h . socket . next_layer ( ) . non_blocking ( true , h . err ) ; <nl> + } <nl> + if ( h . err . failed ( ) ) { <nl> + TraceEvent ( SevWarn , h . type = = ssl_socket : : handshake_type : : client ? " N2_ConnectHandshakeError " : " N2_AcceptHandshakeError " ) <nl> + . detail ( " ErrorCode " , h . err . value ( ) ) <nl> + . detail ( " ErrorMsg " , h . err . message ( ) . c_str ( ) ) <nl> + . detail ( " BackgroundThread " , true ) ; <nl> + h . done . sendError ( connection_failed ( ) ) ; <nl> + } else { <nl> + h . done . send ( Void ( ) ) ; <nl> + } <nl> + } catch ( . . . ) { <nl> + TraceEvent ( SevWarn , h . type = = ssl_socket : : handshake_type : : client ? " N2_ConnectHandshakeUnknownError " : " N2_AcceptHandshakeUnknownError " ) <nl> + . detail ( " BackgroundThread " , true ) ; <nl> + h . done . sendError ( connection_failed ( ) ) ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> class SSLConnection : public IConnection , ReferenceCounted < SSLConnection > { <nl> public : <nl> virtual void addref ( ) { ReferenceCounted < SSLConnection > : : addref ( ) ; } <nl> class SSLConnection : public IConnection , ReferenceCounted < SSLConnection > { <nl> } <nl> <nl> ACTOR static void doAcceptHandshake ( Reference < SSLConnection > self , Promise < Void > connected ) { <nl> + state std : : pair < IPAddress , uint16_t > peerIP ; <nl> + state Hold < int > holder ; <nl> + <nl> try { <nl> - state std : : pair < IPAddress , uint16_t > peerIP = std : : make_pair ( self - > getPeerAddress ( ) . ip , static_cast < uint16_t > ( 0 ) ) ; <nl> + peerIP = std : : make_pair ( self - > getPeerAddress ( ) . ip , static_cast < uint16_t > ( 0 ) ) ; <nl> auto iter ( g_network - > networkInfo . serverTLSConnectionThrottler . find ( peerIP ) ) ; <nl> if ( iter ! = g_network - > networkInfo . serverTLSConnectionThrottler . end ( ) ) { <nl> if ( now ( ) < iter - > second . second ) { <nl> class SSLConnection : public IConnection , ReferenceCounted < SSLConnection > { <nl> } <nl> } <nl> <nl> - int64_t permitNumber = wait ( g_network - > networkInfo . handshakeLock - > take ( ) ) ; <nl> - state BoundedFlowLock : : Releaser releaser ( g_network - > networkInfo . handshakeLock , permitNumber ) ; <nl> + wait ( g_network - > networkInfo . handshakeLock - > take ( ) ) ; <nl> + state FlowLock : : Releaser releaser ( * g_network - > networkInfo . handshakeLock ) ; <nl> <nl> - BindPromise p ( " N2_AcceptHandshakeError " , UID ( ) ) ; <nl> - auto onHandshook = p . getFuture ( ) ; <nl> - self - > getSSLSocket ( ) . async_handshake ( boost : : asio : : ssl : : stream_base : : server , std : : move ( p ) ) ; <nl> + Future < Void > onHandshook ; <nl> + <nl> + / / If the background handshakers are not all busy , use one <nl> + if ( N2 : : g_net2 - > sslPoolHandshakesInProgress < N2 : : g_net2 - > sslHandshakerThreadsStarted ) { <nl> + holder = Hold ( & N2 : : g_net2 - > sslPoolHandshakesInProgress ) ; <nl> + auto handshake = new SSLHandshakerThread : : Handshake ( self - > ssl_sock , boost : : asio : : ssl : : stream_base : : server ) ; <nl> + onHandshook = handshake - > done . getFuture ( ) ; <nl> + N2 : : g_net2 - > sslHandshakerPool - > post ( handshake ) ; <nl> + } <nl> + else { <nl> + / / Otherwise use flow network thread <nl> + BindPromise p ( " N2_AcceptHandshakeError " , UID ( ) ) ; <nl> + onHandshook = p . getFuture ( ) ; <nl> + self - > ssl_sock . async_handshake ( boost : : asio : : ssl : : stream_base : : server , std : : move ( p ) ) ; <nl> + } <nl> wait ( onHandshook ) ; <nl> wait ( delay ( 0 , TaskPriority : : Handshake ) ) ; <nl> connected . send ( Void ( ) ) ; <nl> class SSLConnection : public IConnection , ReferenceCounted < SSLConnection > { <nl> } <nl> <nl> ACTOR static void doConnectHandshake ( Reference < SSLConnection > self , Promise < Void > connected ) { <nl> - try { <nl> - int64_t permitNumber = wait ( g_network - > networkInfo . handshakeLock - > take ( ) ) ; <nl> - state BoundedFlowLock : : Releaser releaser ( g_network - > networkInfo . handshakeLock , permitNumber ) ; <nl> + state Hold < int > holder ; <nl> <nl> - BindPromise p ( " N2_ConnectHandshakeError " , self - > id ) ; <nl> - Future < Void > onHandshook = p . getFuture ( ) ; <nl> - self - > ssl_sock . async_handshake ( boost : : asio : : ssl : : stream_base : : client , std : : move ( p ) ) ; <nl> + try { <nl> + wait ( g_network - > networkInfo . handshakeLock - > take ( ) ) ; <nl> + state FlowLock : : Releaser releaser ( * g_network - > networkInfo . handshakeLock ) ; <nl> + <nl> + Future < Void > onHandshook ; <nl> + / / If the background handshakers are not all busy , use one <nl> + if ( N2 : : g_net2 - > sslPoolHandshakesInProgress < N2 : : g_net2 - > sslHandshakerThreadsStarted ) { <nl> + holder = Hold ( & N2 : : g_net2 - > sslPoolHandshakesInProgress ) ; <nl> + auto handshake = new SSLHandshakerThread : : Handshake ( self - > ssl_sock , boost : : asio : : ssl : : stream_base : : client ) ; <nl> + onHandshook = handshake - > done . getFuture ( ) ; <nl> + N2 : : g_net2 - > sslHandshakerPool - > post ( handshake ) ; <nl> + } <nl> + else { <nl> + / / Otherwise use flow network thread <nl> + BindPromise p ( " N2_ConnectHandshakeError " , self - > id ) ; <nl> + onHandshook = p . getFuture ( ) ; <nl> + self - > ssl_sock . async_handshake ( boost : : asio : : ssl : : stream_base : : client , std : : move ( p ) ) ; <nl> + } <nl> wait ( onHandshook ) ; <nl> wait ( delay ( 0 , TaskPriority : : Handshake ) ) ; <nl> connected . send ( Void ( ) ) ; <nl> Net2 : : Net2 ( const TLSConfig & tlsConfig , bool useThreadPool , bool useMetrics ) <nl> tscBegin ( 0 ) , tscEnd ( 0 ) , taskBegin ( 0 ) , currentTaskID ( TaskPriority : : DefaultYield ) , <nl> numYields ( 0 ) , <nl> lastPriorityStats ( nullptr ) , <nl> - tlsInitialized ( false ) , <nl> + tlsInitializedState ( ETLSInitState : : NONE ) , <nl> tlsConfig ( tlsConfig ) , <nl> started ( false ) <nl> # ifndef TLS_DISABLED <nl> - , sslContextVar ( { ReferencedObject < boost : : asio : : ssl : : context > : : from ( boost : : asio : : ssl : : context ( boost : : asio : : ssl : : context : : tls ) ) } ) <nl> + , sslContextVar ( { ReferencedObject < boost : : asio : : ssl : : context > : : from ( boost : : asio : : ssl : : context ( boost : : asio : : ssl : : context : : tls ) ) } ) , <nl> + sslPoolHandshakesInProgress ( 0 ) , sslHandshakerThreadsStarted ( 0 ) <nl> # endif <nl> <nl> { <nl> ACTOR static Future < Void > reloadCertificatesOnChange ( TLSConfig config , std : : fun <nl> } <nl> # endif <nl> <nl> - void Net2 : : initTLS ( ) { <nl> - if ( tlsInitialized ) { <nl> + void Net2 : : initTLS ( ETLSInitState targetState ) { <nl> + if ( tlsInitializedState > = targetState ) { <nl> return ; <nl> } <nl> # ifndef TLS_DISABLED <nl> - auto onPolicyFailure = [ this ] ( ) { this - > countTLSPolicyFailures + + ; } ; <nl> - try { <nl> - boost : : asio : : ssl : : context newContext ( boost : : asio : : ssl : : context : : tls ) ; <nl> - const LoadedTLSConfig & loaded = tlsConfig . loadSync ( ) ; <nl> - TraceEvent ( " Net2TLSConfig " ) <nl> - . detail ( " CAPath " , tlsConfig . getCAPathSync ( ) ) <nl> - . detail ( " CertificatePath " , tlsConfig . getCertificatePathSync ( ) ) <nl> - . detail ( " KeyPath " , tlsConfig . getKeyPathSync ( ) ) <nl> - . detail ( " HasPassword " , ! loaded . getPassword ( ) . empty ( ) ) <nl> - . detail ( " VerifyPeers " , boost : : algorithm : : join ( loaded . getVerifyPeers ( ) , " | " ) ) ; <nl> - ConfigureSSLContext ( tlsConfig . loadSync ( ) , & newContext , onPolicyFailure ) ; <nl> - sslContextVar . set ( ReferencedObject < boost : : asio : : ssl : : context > : : from ( std : : move ( newContext ) ) ) ; <nl> - } catch ( Error & e ) { <nl> - TraceEvent ( " Net2TLSInitError " ) . error ( e ) ; <nl> - } <nl> - backgroundCertRefresh = reloadCertificatesOnChange ( tlsConfig , onPolicyFailure , & sslContextVar ) ; <nl> + / / Any target state must be higher than NONE so if the current state is NONE <nl> + / / then initialize the TLS config <nl> + if ( tlsInitializedState = = ETLSInitState : : NONE ) { <nl> + auto onPolicyFailure = [ this ] ( ) { this - > countTLSPolicyFailures + + ; } ; <nl> + try { <nl> + boost : : asio : : ssl : : context newContext ( boost : : asio : : ssl : : context : : tls ) ; <nl> + const LoadedTLSConfig & loaded = tlsConfig . loadSync ( ) ; <nl> + TraceEvent ( " Net2TLSConfig " ) <nl> + . detail ( " CAPath " , tlsConfig . getCAPathSync ( ) ) <nl> + . detail ( " CertificatePath " , tlsConfig . getCertificatePathSync ( ) ) <nl> + . detail ( " KeyPath " , tlsConfig . getKeyPathSync ( ) ) <nl> + . detail ( " HasPassword " , ! loaded . getPassword ( ) . empty ( ) ) <nl> + . detail ( " VerifyPeers " , boost : : algorithm : : join ( loaded . getVerifyPeers ( ) , " | " ) ) ; <nl> + ConfigureSSLContext ( tlsConfig . loadSync ( ) , & newContext , onPolicyFailure ) ; <nl> + sslContextVar . set ( ReferencedObject < boost : : asio : : ssl : : context > : : from ( std : : move ( newContext ) ) ) ; <nl> + } catch ( Error & e ) { <nl> + TraceEvent ( " Net2TLSInitError " ) . error ( e ) ; <nl> + } <nl> + backgroundCertRefresh = reloadCertificatesOnChange ( tlsConfig , onPolicyFailure , & sslContextVar ) ; <nl> + } <nl> + <nl> + / / If a TLS connection is actually going to be used then start background threads if configured <nl> + if ( targetState > ETLSInitState : : CONFIG ) { <nl> + int threadsToStart ; <nl> + switch ( targetState ) { <nl> + case ETLSInitState : : CONNECT : <nl> + threadsToStart = FLOW_KNOBS - > TLS_CLIENT_HANDSHAKE_THREADS ; <nl> + break ; <nl> + case ETLSInitState : : LISTEN : <nl> + threadsToStart = FLOW_KNOBS - > TLS_SERVER_HANDSHAKE_THREADS ; <nl> + break ; <nl> + default : <nl> + threadsToStart = 0 ; <nl> + } ; <nl> + threadsToStart - = sslHandshakerThreadsStarted ; <nl> + <nl> + if ( threadsToStart > 0 ) { <nl> + if ( sslHandshakerThreadsStarted = = 0 ) { <nl> + # if defined ( __linux__ ) <nl> + if ( mallopt ( M_ARENA_MAX , FLOW_KNOBS - > TLS_MALLOC_ARENA_MAX ) ! = 1 ) { <nl> + TraceEvent ( SevWarn , " TLSMallocSetMaxArenasFailure " ) . detail ( " MaxArenas " , FLOW_KNOBS - > TLS_MALLOC_ARENA_MAX ) ; <nl> + } ; <nl> + # endif <nl> + sslHandshakerPool = createGenericThreadPool ( FLOW_KNOBS - > TLS_HANDSHAKE_THREAD_STACKSIZE ) ; <nl> + } <nl> + <nl> + for ( int i = 0 ; i < threadsToStart ; + + i ) { <nl> + + + sslHandshakerThreadsStarted ; <nl> + sslHandshakerPool - > addThread ( new SSLHandshakerThread ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> # endif <nl> - tlsInitialized = true ; <nl> + <nl> + tlsInitializedState = targetState ; <nl> } <nl> <nl> ACTOR Future < Void > Net2 : : logTimeOffset ( ) { <nl> THREAD_HANDLE Net2 : : startThread ( THREAD_FUNC_RETURN ( * func ) ( void * ) , void * arg ) <nl> <nl> Future < Reference < IConnection > > Net2 : : connect ( NetworkAddress toAddr , std : : string host ) { <nl> # ifndef TLS_DISABLED <nl> - initTLS ( ) ; <nl> + initTLS ( ETLSInitState : : CONNECT ) ; <nl> if ( toAddr . isTLS ( ) ) { <nl> return SSLConnection : : connect ( & this - > reactor . ios , this - > sslContextVar . get ( ) , toAddr ) ; <nl> } <nl> bool Net2 : : isAddressOnThisHost ( NetworkAddress const & addr ) const { <nl> Reference < IListener > Net2 : : listen ( NetworkAddress localAddr ) { <nl> try { <nl> # ifndef TLS_DISABLED <nl> - initTLS ( ) ; <nl> + initTLS ( ETLSInitState : : LISTEN ) ; <nl> if ( localAddr . isTLS ( ) ) { <nl> return Reference < IListener > ( new SSLListener ( reactor . ios , & this - > sslContextVar , localAddr ) ) ; <nl> } <nl> mmm a / flow / Net2Packet . cpp <nl> ppp b / flow / Net2Packet . cpp <nl> void PacketWriter : : serializeBytesAcrossBoundary ( const void * data , int bytes ) { <nl> if ( ! bytes ) break ; <nl> <nl> data = ( uint8_t * ) data + b ; <nl> - nextBuffer ( ) ; <nl> + nextBuffer ( bytes ) ; <nl> } <nl> } <nl> <nl> void PacketWriter : : nextBuffer ( size_t size ) { <nl> } <nl> } <nl> <nl> + / / Adds exactly bytes of unwritten length to the buffer , possibly across packet buffer boundaries , <nl> + / / and initializes buf to point to the packet buffer ( s ) that contain the unwritten space <nl> void PacketWriter : : writeAhead ( int bytes , struct SplitBuffer * buf ) { <nl> if ( bytes < = buffer - > bytes_unwritten ( ) ) { <nl> buf - > begin = buffer - > data ( ) + buffer - > bytes_written ; <nl> void PacketWriter : : writeAhead ( int bytes , struct SplitBuffer * buf ) { <nl> buf - > begin = buffer - > data ( ) + buffer - > bytes_written ; <nl> buf - > first_length = buffer - > bytes_unwritten ( ) ; <nl> buffer - > bytes_written = buffer - > size ( ) ; <nl> - nextBuffer ( ) ; <nl> + size_t remaining = bytes - buf - > first_length ; <nl> + nextBuffer ( remaining ) ; <nl> buf - > next = buffer - > data ( ) ; <nl> - buffer - > bytes_written = bytes - buf - > first_length ; <nl> + buffer - > bytes_written = remaining ; <nl> } <nl> } <nl> <nl> PacketBuffer * ReliablePacketList : : compact ( PacketBuffer * into , PacketBuffer * end ) <nl> if ( c - > buffer = = end / * & & c - > begin > = c - > buffer - > bytes_written * / ) / / quit when we hit the unsent range <nl> return into ; <nl> if ( into - > bytes_written = = into - > size ( ) ) { <nl> - into - > next = PacketBuffer : : create ( ) ; <nl> + into - > next = PacketBuffer : : create ( into - > size ( ) ) ; <nl> into = into - > nextPacketBuffer ( ) ; <nl> } <nl> <nl> mmm a / flow / Net2Packet . h <nl> ppp b / flow / Net2Packet . h <nl> class UnsentPacketQueue : NonCopyable { <nl> ~ UnsentPacketQueue ( ) { discardAll ( ) ; } <nl> <nl> / / Get a PacketBuffer to write new packets into <nl> - PacketBuffer * getWriteBuffer ( ) { <nl> + PacketBuffer * getWriteBuffer ( size_t sizeHint = 0 ) { <nl> if ( ! unsent_last ) { <nl> ASSERT ( ! unsent_first ) ; <nl> - unsent_first = unsent_last = PacketBuffer : : create ( ) ; <nl> + unsent_first = unsent_last = PacketBuffer : : create ( sizeHint ) ; <nl> } ; <nl> return unsent_last ; <nl> } <nl> mmm a / flow / ObjectSerializer . h <nl> ppp b / flow / ObjectSerializer . h <nl> struct SaveContext { <nl> <nl> template < class ReaderImpl > <nl> class _ObjectReader { <nl> + protected : <nl> ProtocolVersion mProtocolVersion ; <nl> public : <nl> <nl> class _ObjectReader { <nl> const uint8_t * data = static_cast < ReaderImpl * > ( this ) - > data ( ) ; <nl> LoadContext < ReaderImpl > context ( static_cast < ReaderImpl * > ( this ) ) ; <nl> if ( read_file_identifier ( data ) ! = file_identifier ) { <nl> - TraceEvent ( SevError , " MismatchedFileIdentifier " ) . detail ( " Expected " , file_identifier ) . detail ( " Read " , read_file_identifier ( data ) ) ; <nl> - ASSERT ( false ) ; <nl> + / / Some file identifiers are changed in 7 . 0 , so file identifier mismatches <nl> + / / are expected during a downgrade from 7 . 0 to 6 . 3 <nl> + bool expectMismatch = mProtocolVersion > = ProtocolVersion ( 0x0FDB00B070000000LL ) ; <nl> + { <nl> + TraceEvent te ( expectMismatch ? SevInfo : SevError , " MismatchedFileIdentifier " ) ; <nl> + if ( expectMismatch ) { <nl> + te . suppressFor ( 1 . 0 ) ; <nl> + } <nl> + te . detail ( " Expected " , file_identifier ) . detail ( " Read " , read_file_identifier ( data ) ) ; <nl> + } <nl> + if ( ! expectMismatch ) { <nl> + ASSERT ( false ) ; <nl> + } <nl> } <nl> load_members ( data , context , items . . . ) ; <nl> } <nl> mmm a / flow / Platform . actor . cpp <nl> ppp b / flow / Platform . actor . cpp <nl> void setCloseOnExec ( int fd ) { <nl> } / / namespace platform <nl> <nl> # ifdef _WIN32 <nl> - THREAD_HANDLE startThread ( void ( * func ) ( void * ) , void * arg ) { <nl> - return ( void * ) _beginthread ( func , 0 , arg ) ; <nl> + THREAD_HANDLE startThread ( void ( * func ) ( void * ) , void * arg , int stackSize ) { <nl> + return ( void * ) _beginthread ( func , stackSize , arg ) ; <nl> } <nl> # elif ( defined ( __linux__ ) | | defined ( __APPLE__ ) | | defined ( __FreeBSD__ ) ) <nl> - THREAD_HANDLE startThread ( void * ( * func ) ( void * ) , void * arg ) { <nl> + THREAD_HANDLE startThread ( void * ( * func ) ( void * ) , void * arg , int stackSize ) { <nl> pthread_t t ; <nl> - pthread_create ( & t , NULL , func , arg ) ; <nl> + pthread_attr_t attr ; <nl> + <nl> + pthread_attr_init ( & attr ) ; <nl> + if ( stackSize ! = 0 ) { <nl> + if ( pthread_attr_setstacksize ( & attr , stackSize ) ! = 0 ) { <nl> + / / If setting the stack size fails the default stack size will be used , so failure to set <nl> + / / the stack size is treated as a warning . <nl> + / / Logging a trace event here is a bit risky because startThread ( ) could be used early <nl> + / / enough that TraceEvent can ' t be used yet , though currently it is not used with a nonzero <nl> + / / stack size that early in execution . <nl> + TraceEvent ( SevWarnAlways , " StartThreadInvalidStackSize " ) . detail ( " StackSize " , stackSize ) ; <nl> + } ; <nl> + } <nl> + <nl> + pthread_create ( & t , & attr , func , arg ) ; <nl> + pthread_attr_destroy ( & attr ) ; <nl> + <nl> return t ; <nl> } <nl> # else <nl> mmm a / flow / Platform . h <nl> ppp b / flow / Platform . h <nl> do { \ <nl> # include < functional > <nl> # endif <nl> <nl> - / / fake < T > ( ) is for use in decltype expressions only - there is no implementation <nl> - template < class T > T fake ( ) ; <nl> - <nl> / / g + + requires that non - dependent names have to be looked up at <nl> / / template definition , which makes circular dependencies a royal <nl> / / pain . ( For whatever it ' s worth , g + + appears to be adhering to spec <nl> inline static T & makeDependent ( T & value ) { return value ; } <nl> # define THREAD_FUNC static void __cdecl <nl> # define THREAD_FUNC_RETURN void <nl> # define THREAD_HANDLE void * <nl> - THREAD_HANDLE startThread ( void ( func ) ( void * ) , void * arg ) ; <nl> + THREAD_HANDLE startThread ( void ( func ) ( void * ) , void * arg , int stackSize = 0 ) ; <nl> # define THREAD_RETURN return <nl> # elif defined ( __unixish__ ) <nl> # define THREAD_FUNC static void * <nl> # define THREAD_FUNC_RETURN void * <nl> # define THREAD_HANDLE pthread_t <nl> - THREAD_HANDLE startThread ( void * ( func ) ( void * ) , void * arg ) ; <nl> + THREAD_HANDLE startThread ( void * ( func ) ( void * ) , void * arg , int stackSize = 0 ) ; <nl> # define THREAD_RETURN return NULL <nl> # else <nl> # error How do I start a new thread on this platform ? <nl> mmm a / flow / ProtocolVersion . h <nl> ppp b / flow / ProtocolVersion . h <nl> constexpr ProtocolVersion currentProtocolVersion ( 0x0FDB00B070010001LL ) ; <nl> / / This assert is intended to help prevent incrementing the leftmost digits accidentally . It will probably need to <nl> / / change when we reach version 10 . <nl> static_assert ( currentProtocolVersion . version ( ) < 0x0FDB00B100000000LL , " Unexpected protocol version " ) ; <nl> + <nl> + / / Downgrades are only supported for one minor version <nl> + constexpr ProtocolVersion minInvalidProtocolVersion ( 0x0FDB00B072000000LL ) ; <nl> mmm a / flow / ThreadHelper . actor . h <nl> ppp b / flow / ThreadHelper . actor . h <nl> <nl> # elif ! defined ( FLOW_THREADHELPER_ACTOR_H ) <nl> # define FLOW_THREADHELPER_ACTOR_H <nl> <nl> + # include < utility > <nl> + <nl> # include " flow / flow . h " <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> class ThreadSingleAssignmentVarBase { <nl> Error error ; <nl> ThreadCallback * callback ; <nl> <nl> - bool isReady ( ) { <nl> + bool isReady ( ) { <nl> ThreadSpinLockHolder holder ( mutex ) ; <nl> return isReadyUnsafe ( ) ; <nl> } <nl> <nl> - bool isError ( ) { <nl> + bool isError ( ) { <nl> ThreadSpinLockHolder holder ( mutex ) ; <nl> return isErrorUnsafe ( ) ; <nl> } <nl> class ThreadSingleAssignmentVarBase { <nl> return error . code ( ) ; <nl> } <nl> <nl> - bool canBeSet ( ) { <nl> + bool canBeSet ( ) { <nl> ThreadSpinLockHolder holder ( mutex ) ; <nl> return canBeSetUnsafe ( ) ; <nl> } <nl> class ThreadSingleAssignmentVarBase { <nl> } <nl> <nl> ThreadSingleAssignmentVarBase ( ) : status ( Unset ) , callback ( NULL ) , valueReferenceCount ( 0 ) { } / / , referenceCount ( 1 ) { } <nl> - ~ ThreadSingleAssignmentVarBase ( ) { <nl> - this - > mutex . assertNotEntered ( ) ; <nl> + ~ ThreadSingleAssignmentVarBase ( ) { <nl> + this - > mutex . assertNotEntered ( ) ; <nl> <nl> if ( callback ) <nl> callback - > destroy ( ) ; <nl> class ThreadSingleAssignmentVarBase { <nl> ASSERT ( false ) ; / / Promise fulfilled twice <nl> } <nl> error = err ; <nl> - status = ErrorSet ; <nl> + status = ErrorSet ; <nl> if ( ! callback ) { <nl> this - > mutex . leave ( ) ; <nl> return ; <nl> ACTOR template < class F > void doOnMainThreadVoid ( Future < Void > signal , F f , Erro <nl> } <nl> } <nl> <nl> - template < class F > ThreadFuture < decltype ( fake < F > ( ) ( ) . getValue ( ) ) > onMainThread ( F f ) { <nl> + template < class F > <nl> + ThreadFuture < decltype ( std : : declval < F > ( ) ( ) . getValue ( ) ) > onMainThread ( F f ) { <nl> Promise < Void > signal ; <nl> - auto returnValue = new ThreadSingleAssignmentVar < decltype ( fake < F > ( ) ( ) . getValue ( ) ) > ( ) ; <nl> + auto returnValue = new ThreadSingleAssignmentVar < decltype ( std : : declval < F > ( ) ( ) . getValue ( ) ) > ( ) ; <nl> returnValue - > addref ( ) ; / / For the ThreadFuture we return <nl> - Future < Void > cancelFuture = doOnMainThread < decltype ( fake < F > ( ) ( ) . getValue ( ) ) , F > ( signal . getFuture ( ) , f , returnValue ) ; <nl> + Future < Void > cancelFuture = <nl> + doOnMainThread < decltype ( std : : declval < F > ( ) ( ) . getValue ( ) ) , F > ( signal . getFuture ( ) , f , returnValue ) ; <nl> returnValue - > setCancel ( std : : move ( cancelFuture ) ) ; <nl> g_network - > onMainThread ( std : : move ( signal ) , TaskPriority : : DefaultOnMainThread ) ; <nl> - return ThreadFuture < decltype ( fake < F > ( ) ( ) . getValue ( ) ) > ( returnValue ) ; <nl> + return ThreadFuture < decltype ( std : : declval < F > ( ) ( ) . getValue ( ) ) > ( returnValue ) ; <nl> } <nl> <nl> template < class V > <nl> mmm a / flow / ThreadPrimitives . h <nl> ppp b / flow / ThreadPrimitives . h <nl> <nl> # pragma once <nl> <nl> # include < atomic > <nl> + # include < array > <nl> <nl> # include " flow / Error . h " <nl> # include " flow / Trace . h " <nl> <nl> # include < drd . h > <nl> # endif <nl> <nl> - class ThreadSpinLock { <nl> + / / TODO : We should make this dependent on the CPU . Maybe cmake <nl> + / / can set this variable properly ? <nl> + constexpr size_t CACHE_LINE_SIZE = 64 ; <nl> + <nl> + class alignas ( CACHE_LINE_SIZE ) ThreadSpinLock { <nl> public : <nl> / / # ifdef _WIN32 <nl> ThreadSpinLock ( ) { <nl> class ThreadSpinLock { <nl> ThreadSpinLock ( const ThreadSpinLock & ) ; <nl> void operator = ( const ThreadSpinLock & ) ; <nl> std : : atomic_flag isLocked = ATOMIC_FLAG_INIT ; <nl> + / / We want a spin lock to occupy a cache line in order to <nl> + / / prevent false sharing . <nl> + std : : array < uint8_t , CACHE_LINE_SIZE - sizeof ( isLocked ) > padding ; <nl> } ; <nl> <nl> class ThreadSpinLockHolder { <nl> mmm a / flow / TreeBenchmark . h <nl> ppp b / flow / TreeBenchmark . h <nl> void treeBenchmark ( T & tree , F generateKey ) { <nl> keys . resize ( std : : unique ( keys . begin ( ) , keys . end ( ) ) - keys . begin ( ) ) ; <nl> <nl> auto iter = tree . lower_bound ( * keys . begin ( ) ) ; <nl> - timedRun ( " scan " , keys , [ & tree , & iter ] ( key const & k ) { <nl> + timedRun ( " scan " , keys , [ & iter ] ( key const & k ) { <nl> ASSERT ( k = = * iter ) ; <nl> + + iter ; <nl> } ) ; <nl> static inline int randomInt ( ) { <nl> return deterministicRandom ( ) - > randomInt ( 0 , INT32_MAX ) ; <nl> } <nl> <nl> - # endif / / FLOW_TREEBENCHMARK_H <nl> \ No newline at end of file <nl> + # endif / / FLOW_TREEBENCHMARK_H <nl> mmm a / flow / Util . h <nl> ppp b / flow / Util . h <nl> void swapAndPop ( C * container , int index ) { <nl> container - > pop_back ( ) ; <nl> } <nl> <nl> + / / Adds n to pCount upon construction , subtracts in upon destruction <nl> + template < typename T > <nl> + struct Hold { <nl> + Hold ( T * pCount = nullptr , T n = 1 ) : pCount ( pCount ) , n ( n ) { <nl> + if ( pCount ! = nullptr ) { <nl> + * pCount + = n ; <nl> + } <nl> + } <nl> + ~ Hold ( ) { <nl> + if ( pCount ! = nullptr ) { <nl> + * pCount - = n ; <nl> + } <nl> + } <nl> + <nl> + Hold ( Hold & & other ) { <nl> + pCount = other . pCount ; <nl> + other . pCount = nullptr ; <nl> + n = other . n ; <nl> + } <nl> + <nl> + Hold & operator = ( Hold & & other ) { <nl> + if ( pCount ! = nullptr ) { <nl> + * pCount - = n ; <nl> + } <nl> + pCount = other . pCount ; <nl> + other . pCount = nullptr ; <nl> + n = other . n ; <nl> + return * this ; <nl> + } ; <nl> + <nl> + void release ( ) { <nl> + if ( pCount ! = nullptr ) { <nl> + * pCount - = n ; <nl> + pCount = nullptr ; <nl> + } <nl> + } <nl> + <nl> + T * pCount ; <nl> + T n ; <nl> + <nl> + void operator = ( const Hold & other ) = delete ; <nl> + } ; <nl> + <nl> # endif / / _FLOW_UTIL_H_ <nl> mmm a / flow / actorcompiler / ActorCompiler . cs <nl> ppp b / flow / actorcompiler / ActorCompiler . cs <nl> void CompileStatement ( RangeForStatement stmt , Context cx ) <nl> } <nl> <nl> var iter = getIteratorName ( cx ) ; <nl> - state . Add ( new StateVar { SourceLine = stmt . FirstSourceLine , name = iter , type = " decltype ( std : : begin ( fake < " + container . type + " > ( ) ) ) " , initializer = null } ) ; <nl> + state . Add ( new StateVar { SourceLine = stmt . FirstSourceLine , name = iter , type = " decltype ( std : : begin ( std : : declval < " + container . type + " > ( ) ) ) " , initializer = null } ) ; <nl> var equivalent = new ForStatement { <nl> initExpression = iter + " = std : : begin ( " + stmt . rangeExpression + " ) " , <nl> condExpression = iter + " ! = std : : end ( " + stmt . rangeExpression + " ) " , <nl> mmm a / flow / error_definitions . h <nl> ppp b / flow / error_definitions . h <nl> ERROR ( tag_too_long , 2110 , " Tag set on transaction is too long " ) <nl> ERROR ( too_many_tag_throttles , 2111 , " Too many tag throttles have been created " ) <nl> ERROR ( special_keys_cross_module_read , 2112 , " Special key space range read crosses modules . Refer to the ` special_key_space_relaxed ' transaction option for more details . " ) <nl> ERROR ( special_keys_no_module_found , 2113 , " Special key space range read does not intersect a module . Refer to the ` special_key_space_relaxed ' transaction option for more details . " ) <nl> + ERROR ( special_keys_write_disabled , 2114 , " Special Key space is not allowed to write by default . Refer to the ` special_key_space_enable_writes ` transaction option for more details . " ) <nl> + ERROR ( special_keys_no_write_module_found , 2115 , " Special key space key or keyrange in set or clear does not intersect a module " ) <nl> + ERROR ( special_keys_cross_module_clear , 2116 , " Special key space clear crosses modules " ) <nl> + ERROR ( special_keys_api_failure , 2117 , " Api call through special keys failed . For more information , call get on special key 0xff0xff / error_message to get a json string of the error message . " ) <nl> <nl> / / 2200 - errors from bindings and official APIs <nl> ERROR ( api_version_unset , 2200 , " API version is not set " ) <nl> mmm a / flow / flow . h <nl> ppp b / flow / flow . h <nl> class FutureStream { <nl> } ; <nl> <nl> template < class Request > <nl> - decltype ( fake < Request > ( ) . reply ) const & getReplyPromise ( Request const & r ) { return r . reply ; } <nl> - <nl> - <nl> + decltype ( std : : declval < Request > ( ) . reply ) const & getReplyPromise ( Request const & r ) { <nl> + return r . reply ; <nl> + } <nl> <nl> / / Neither of these implementations of REPLY_TYPE ( ) works on both MSVC and g + + , so . . . <nl> # ifdef __GNUG__ <nl> - # define REPLY_TYPE ( RequestType ) decltype ( getReplyPromise ( fake < RequestType > ( ) ) . getFuture ( ) . getValue ( ) ) <nl> - / / # define REPLY_TYPE ( RequestType ) decltype ( getReplyFuture ( fake < RequestType > ( ) ) . getValue ( ) ) <nl> + # define REPLY_TYPE ( RequestType ) decltype ( getReplyPromise ( std : : declval < RequestType > ( ) ) . getFuture ( ) . getValue ( ) ) <nl> + / / # define REPLY_TYPE ( RequestType ) decltype ( getReplyFuture ( std : : declval < RequestType > ( ) ) . getValue ( ) ) <nl> # else <nl> template < class T > <nl> struct ReplyType { <nl> / / Doing this calculation directly in the return value declaration for PromiseStream < T > : : getReply ( ) <nl> / / breaks IntelliSense in VS2010 ; this is a workaround . <nl> - typedef decltype ( fake < T > ( ) . reply . getFuture ( ) . getValue ( ) ) Type ; <nl> + typedef decltype ( std : : declval < T > ( ) . reply . getFuture ( ) . getValue ( ) ) Type ; <nl> } ; <nl> template < class T > class ReplyPromise ; <nl> template < class T > <nl> mmm a / flow / genericactors . actor . h <nl> ppp b / flow / genericactors . actor . h <nl> <nl> # define GENERICACTORS_ACTOR_H <nl> <nl> # include < list > <nl> + # include < utility > <nl> <nl> # include " flow / flow . h " <nl> # include " flow / Knobs . h " <nl> Future < Void > storeOrThrow ( T & out , Future < Optional < T > > what , Error e = key_not_fo <nl> } <nl> <nl> / / Waits for a future to be ready , and then applies an asynchronous function to it . <nl> - ACTOR template < class T , class F , class U = decltype ( fake < F > ( ) ( fake < T > ( ) ) . getValue ( ) ) > <nl> - Future < U > mapAsync ( Future < T > what , F actorFunc ) <nl> - { <nl> + ACTOR template < class T , class F , class U = decltype ( std : : declval < F > ( ) ( std : : declval < T > ( ) ) . getValue ( ) ) > <nl> + Future < U > mapAsync ( Future < T > what , F actorFunc ) { <nl> T val = wait ( what ) ; <nl> U ret = wait ( actorFunc ( val ) ) ; <nl> return ret ; <nl> std : : vector < Future < std : : invoke_result_t < F , T > > > mapAsync ( std : : vector < Future < T > > <nl> } <nl> <nl> / / maps a stream with an asynchronous function <nl> - ACTOR template < class T , class F , class U = decltype ( fake < F > ( ) ( fake < T > ( ) ) . getValue ( ) ) > <nl> - Future < Void > mapAsync ( FutureStream < T > input , F actorFunc , PromiseStream < U > output ) { <nl> + ACTOR template < class T , class F , class U = decltype ( std : : declval < F > ( ) ( std : : declval < T > ( ) ) . getValue ( ) ) > <nl> + Future < Void > mapAsync ( FutureStream < T > input , F actorFunc , PromiseStream < U > output ) { <nl> state Deque < Future < U > > futures ; <nl> <nl> loop { <nl> Future < T > ioTimeoutError ( Future < T > what , double time ) { <nl> Future < Void > end = lowPriorityDelay ( time ) ; <nl> choose { <nl> when ( T t = wait ( what ) ) { return t ; } <nl> - when ( wait ( end ) ) { <nl> + when ( wait ( end ) ) { <nl> Error err = io_timeout ( ) ; <nl> if ( g_network - > isSimulated ( ) ) { <nl> err = err . asInjectedFault ( ) ; <nl> struct NotifiedInt { <nl> NotifiedInt ( int64_t val = 0 ) : val ( val ) { } <nl> <nl> Future < Void > whenAtLeast ( int64_t limit ) { <nl> - if ( val > = limit ) <nl> - return Void ( ) ; <nl> + if ( val > = limit ) return Void ( ) ; <nl> Promise < Void > p ; <nl> waiting . push ( std : : make_pair ( limit , p ) ) ; <nl> return p . getFuture ( ) ; <nl> mmm a / flow / network . cpp <nl> ppp b / flow / network . cpp <nl> TEST_CASE ( " / flow / network / ipaddress " ) { <nl> return Void ( ) ; <nl> } <nl> <nl> - NetworkInfo : : NetworkInfo ( ) : handshakeLock ( new BoundedFlowLock ( FLOW_KNOBS - > UNRESTRICTED_HANDSHAKE_LIMIT , FLOW_KNOBS - > BOUNDED_HANDSHAKE_LIMIT ) ) { } <nl> + NetworkInfo : : NetworkInfo ( ) : handshakeLock ( new FlowLock ( FLOW_KNOBS - > TLS_HANDSHAKE_LIMIT ) ) { } <nl> mmm a / flow / network . h <nl> ppp b / flow / network . h <nl> struct NetworkMetrics { <nl> } <nl> } ; <nl> <nl> - struct BoundedFlowLock ; <nl> + struct FlowLock ; <nl> <nl> struct NetworkInfo { <nl> NetworkMetrics metrics ; <nl> struct NetworkInfo { <nl> double lastAlternativesFailureSkipDelay = 0 ; <nl> <nl> std : : map < std : : pair < IPAddress , uint16_t > , std : : pair < int , double > > serverTLSConnectionThrottler ; <nl> - BoundedFlowLock * handshakeLock ; <nl> + FlowLock * handshakeLock ; <nl> <nl> NetworkInfo ( ) ; <nl> } ; <nl> class INetwork { <nl> virtual void initMetrics ( ) { } <nl> / / Metrics must be initialized after FlowTransport : : createInstance has been called <nl> <nl> - virtual void initTLS ( ) { } <nl> / / TLS must be initialized before using the network <nl> + enum ETLSInitState { NONE = 0 , CONFIG = 1 , CONNECT = 2 , LISTEN = 3 } ; <nl> + virtual void initTLS ( ETLSInitState targetState = CONFIG ) { } <nl> <nl> virtual const TLSConfig & getTLSConfig ( ) const = 0 ; <nl> / / Return the TLS Configuration <nl> mmm a / flow / serialize . h <nl> ppp b / flow / serialize . h <nl> inline typename Archive : : WRITER & operator < < ( Archive & ar , const Item & item ) { <nl> <nl> template < class Archive , class Item > <nl> inline typename Archive : : READER & operator > > ( Archive & ar , Item & item ) { <nl> - load ( ar , item ) ; <nl> + ar . deserialize ( item ) ; <nl> return ar ; <nl> } <nl> <nl> struct _IncludeVersion { <nl> TraceEvent ( SevWarnAlways , " InvalidSerializationVersion " ) . error ( err ) . detailf ( " Version " , " % llx " , v . versionWithFlags ( ) ) ; <nl> throw err ; <nl> } <nl> - if ( v > currentProtocolVersion ) { <nl> - / / For now , no forward compatibility whatsoever is supported . In the future , this check may be weakened for <nl> - / / particular data structures ( e . g . to support mismatches between client and server versions when the client <nl> - / / must deserialize zookeeper and database structures ) <nl> + if ( v > = minInvalidProtocolVersion ) { <nl> + / / Downgrades are only supported for one minor version <nl> auto err = incompatible_protocol_version ( ) ; <nl> TraceEvent ( SevError , " FutureProtocolVersion " ) . error ( err ) . detailf ( " Version " , " % llx " , v . versionWithFlags ( ) ) ; <nl> throw err ; <nl> class OverWriter { <nl> } <nl> } ; <nl> <nl> - <nl> - class ArenaReader { <nl> + template < class Impl > <nl> + class _Reader { <nl> public : <nl> static const int isDeserializing = 1 ; <nl> static constexpr bool isSerializing = false ; <nl> - typedef ArenaReader READER ; <nl> - <nl> - const void * readBytes ( int bytes ) { <nl> - const char * b = begin ; <nl> - const char * e = b + bytes ; <nl> - ASSERT ( e < = end ) ; <nl> - begin = e ; <nl> - return b ; <nl> - } <nl> + using READER = Impl ; <nl> <nl> - const void * peekBytes ( int bytes ) const { <nl> - ASSERT ( begin + bytes < = end ) ; <nl> + const void * peekBytes ( int bytes ) const { <nl> + ASSERT ( begin + bytes < = end ) ; <nl> return begin ; <nl> } <nl> <nl> - void serializeBytes ( void * data , int bytes ) { <nl> - memcpy ( data , readBytes ( bytes ) , bytes ) ; <nl> - } <nl> - <nl> - const uint8_t * arenaRead ( int bytes ) { <nl> - return ( const uint8_t * ) readBytes ( bytes ) ; <nl> - } <nl> - <nl> - StringRef arenaReadAll ( ) const { <nl> - return StringRef ( reinterpret_cast < const uint8_t * > ( begin ) , end - begin ) ; <nl> + void serializeBytes ( void * data , int bytes ) { <nl> + memcpy ( data , static_cast < Impl * > ( this ) - > readBytes ( bytes ) , bytes ) ; <nl> } <nl> <nl> template < class T > <nl> void serializeBinaryItem ( T & t ) { <nl> - t = * ( T * ) readBytes ( sizeof ( T ) ) ; <nl> - } <nl> - <nl> - template < class VersionOptions > <nl> - ArenaReader ( Arena const & arena , const StringRef & input , VersionOptions vo ) : m_pool ( arena ) , check ( NULL ) { <nl> - begin = ( const char * ) input . begin ( ) ; <nl> - end = begin + input . size ( ) ; <nl> - vo . read ( * this ) ; <nl> + t = * ( T * ) ( static_cast < Impl * > ( this ) - > readBytes ( sizeof ( T ) ) ) ; <nl> } <nl> <nl> - Arena & arena ( ) { return m_pool ; } <nl> + Arena & arena ( ) { return m_pool ; } <nl> <nl> ProtocolVersion protocolVersion ( ) const { return m_protocolVersion ; } <nl> void setProtocolVersion ( ProtocolVersion pv ) { m_protocolVersion = pv ; } <nl> class ArenaReader { <nl> } <nl> <nl> void rewind ( ) { <nl> - ASSERT ( check ! = NULL ) ; <nl> + ASSERT ( check ! = nullptr ) ; <nl> begin = check ; <nl> - check = NULL ; <nl> + check = nullptr ; <nl> } <nl> <nl> - private : <nl> - const char * begin , * end , * check ; <nl> + protected : <nl> + _Reader ( const char * begin , const char * end ) : begin ( begin ) , end ( end ) { } <nl> + _Reader ( const char * begin , const char * end , const Arena & arena ) : begin ( begin ) , end ( end ) , m_pool ( arena ) { } <nl> + <nl> + const char * begin , * end ; <nl> + const char * check = nullptr ; <nl> Arena m_pool ; <nl> ProtocolVersion m_protocolVersion ; <nl> } ; <nl> <nl> - class BinaryReader { <nl> + class ArenaReader : public _Reader < ArenaReader > { <nl> + Optional < ArenaObjectReader > arenaObjectReader ; <nl> + <nl> public : <nl> - static const int isDeserializing = 1 ; <nl> - static constexpr bool isSerializing = false ; <nl> - typedef BinaryReader READER ; <nl> + const void * readBytes ( int bytes ) { <nl> + const char * b = begin ; <nl> + const char * e = b + bytes ; <nl> + ASSERT ( e < = end ) ; <nl> + begin = e ; <nl> + return b ; <nl> + } <nl> <nl> - const void * readBytes ( int bytes ) ; <nl> + const uint8_t * arenaRead ( int bytes ) { <nl> + return ( const uint8_t * ) readBytes ( bytes ) ; <nl> + } <nl> <nl> const void * peekBytes ( int bytes ) const { <nl> ASSERT ( begin + bytes < = end ) ; <nl> return begin ; <nl> } <nl> <nl> - void serializeBytes ( void * data , int bytes ) { <nl> - memcpy ( data , readBytes ( bytes ) , bytes ) ; <nl> + StringRef arenaReadAll ( ) const { <nl> + return StringRef ( reinterpret_cast < const uint8_t * > ( begin ) , end - begin ) ; <nl> } <nl> - <nl> + <nl> + template < class VersionOptions > <nl> + ArenaReader ( Arena const & arena , const StringRef & input , VersionOptions vo ) <nl> + : _Reader ( reinterpret_cast < const char * > ( input . begin ( ) ) , reinterpret_cast < const char * > ( input . end ( ) ) , arena ) { <nl> + vo . read ( * this ) ; <nl> + if ( m_protocolVersion . hasObjectSerializerFlag ( ) ) { <nl> + arenaObjectReader = ArenaObjectReader ( arena , input , vo ) ; <nl> + } <nl> + } <nl> + <nl> template < class T > <nl> - void serializeBinaryItem ( T & t ) { <nl> - t = * ( T * ) readBytes ( sizeof ( T ) ) ; <nl> + void deserialize ( T & t ) { <nl> + if constexpr ( HasFileIdentifier < T > : : value ) { <nl> + if ( arenaObjectReader . present ( ) ) { <nl> + arenaObjectReader . get ( ) . deserialize ( t ) ; <nl> + } else { <nl> + load ( * this , t ) ; <nl> + } <nl> + } else { <nl> + load ( * this , t ) ; <nl> + } <nl> } <nl> + } ; <nl> + <nl> + class BinaryReader : public _Reader < BinaryReader > { <nl> + Optional < ObjectReader > objectReader ; <nl> + <nl> + public : <nl> + const void * readBytes ( int bytes ) ; <nl> <nl> const uint8_t * arenaRead ( int bytes ) { <nl> / / Reads and returns the next bytes . <nl> / / The returned pointer has the lifetime of this . arena ( ) <nl> / / Could be implemented zero - copy if [ begin , end ) was in this . arena ( ) already ; for now is a copy <nl> - if ( ! bytes ) return NULL ; <nl> + if ( ! bytes ) return nullptr ; <nl> uint8_t * dat = new ( arena ( ) ) uint8_t [ bytes ] ; <nl> serializeBytes ( dat , bytes ) ; <nl> return dat ; <nl> } <nl> <nl> - template < class VersionOptions > <nl> - BinaryReader ( const void * data , int length , VersionOptions vo ) { <nl> - begin = ( const char * ) data ; <nl> - end = begin + length ; <nl> - check = nullptr ; <nl> - vo . read ( * this ) ; <nl> - } <nl> - template < class VersionOptions > <nl> - BinaryReader ( const StringRef & s , VersionOptions vo ) { begin = ( const char * ) s . begin ( ) ; end = begin + s . size ( ) ; vo . read ( * this ) ; } <nl> - template < class VersionOptions > <nl> - BinaryReader ( const std : : string & v , VersionOptions vo ) { begin = v . c_str ( ) ; end = begin + v . size ( ) ; vo . read ( * this ) ; } <nl> - <nl> - Arena & arena ( ) { return m_pool ; } <nl> - <nl> template < class T , class VersionOptions > <nl> static T fromStringRef ( StringRef sr , VersionOptions vo ) { <nl> T t ; <nl> class BinaryReader { <nl> <nl> bool empty ( ) const { return begin = = end ; } <nl> <nl> - void checkpoint ( ) { <nl> - check = begin ; <nl> + template < class VersionOptions > <nl> + BinaryReader ( const void * data , int length , VersionOptions vo ) <nl> + : _Reader ( reinterpret_cast < const char * > ( data ) , reinterpret_cast < const char * > ( data ) + length ) { <nl> + readVersion ( vo ) ; <nl> } <nl> - <nl> - void rewind ( ) { <nl> - ASSERT ( check ! = nullptr ) ; <nl> - begin = check ; <nl> - check = nullptr ; <nl> + template < class VersionOptions > <nl> + BinaryReader ( const StringRef & s , VersionOptions vo ) <nl> + : _Reader ( reinterpret_cast < const char * > ( s . begin ( ) ) , reinterpret_cast < const char * > ( s . end ( ) ) ) { <nl> + readVersion ( vo ) ; <nl> + } <nl> + template < class VersionOptions > <nl> + BinaryReader ( const std : : string & s , VersionOptions vo ) : _Reader ( s . c_str ( ) , s . c_str ( ) + s . size ( ) ) { <nl> + readVersion ( vo ) ; <nl> } <nl> <nl> + template < class T > <nl> + void deserialize ( T & t ) { <nl> + if constexpr ( HasFileIdentifier < T > : : value ) { <nl> + if ( objectReader . present ( ) ) { <nl> + objectReader . get ( ) . deserialize ( t ) ; <nl> + } else { <nl> + load ( * this , t ) ; <nl> + } <nl> + } else { <nl> + load ( * this , t ) ; <nl> + } <nl> + } <nl> <nl> private : <nl> - const char * begin , * end , * check ; <nl> - Arena m_pool ; <nl> - ProtocolVersion m_protocolVersion ; <nl> + template < class VersionOptions > <nl> + void readVersion ( VersionOptions vo ) { <nl> + vo . read ( * this ) ; <nl> + if ( m_protocolVersion . hasObjectSerializerFlag ( ) ) { <nl> + objectReader = ObjectReader ( reinterpret_cast < const uint8_t * > ( begin ) , AssumeVersion ( m_protocolVersion ) ) ; <nl> + } <nl> + } <nl> } ; <nl> <nl> class SendBuffer { <nl> class SendBuffer { <nl> inline uint8_t * data ( ) { return _data ; } <nl> SendBuffer * next ; <nl> int bytes_written , bytes_sent ; <nl> + int bytes_unsent ( ) const { <nl> + return bytes_written - bytes_sent ; <nl> + } <nl> } ; <nl> <nl> struct PacketBuffer : SendBuffer { <nl> private : <nl> int reference_count ; <nl> uint32_t size_ ; <nl> + static constexpr size_t PACKET_BUFFER_MIN_SIZE = 16384 ; <nl> static constexpr size_t PACKET_BUFFER_OVERHEAD = 32 ; <nl> <nl> public : <nl> struct PacketBuffer : SendBuffer { <nl> <nl> public : <nl> static PacketBuffer * create ( size_t size = 0 ) { <nl> - size = std : : max ( size , 4096 - PACKET_BUFFER_OVERHEAD ) ; <nl> - if ( size = = 4096 - PACKET_BUFFER_OVERHEAD ) { <nl> - return new ( FastAllocator < 4096 > : : allocate ( ) ) PacketBuffer { size } ; <nl> + size = std : : max ( size , PACKET_BUFFER_MIN_SIZE - PACKET_BUFFER_OVERHEAD ) ; <nl> + if ( size = = PACKET_BUFFER_MIN_SIZE - PACKET_BUFFER_OVERHEAD ) { <nl> + return new ( FastAllocator < PACKET_BUFFER_MIN_SIZE > : : allocate ( ) ) PacketBuffer { size } ; <nl> } <nl> uint8_t * mem = new uint8_t [ size + PACKET_BUFFER_OVERHEAD ] ; <nl> return new ( mem ) PacketBuffer { size } ; <nl> struct PacketBuffer : SendBuffer { <nl> void addref ( ) { + + reference_count ; } <nl> void delref ( ) { <nl> if ( ! - - reference_count ) { <nl> - if ( size_ = = 4096 - PACKET_BUFFER_OVERHEAD ) { <nl> - FastAllocator < 4096 > : : release ( this ) ; <nl> + if ( size_ = = PACKET_BUFFER_MIN_SIZE - PACKET_BUFFER_OVERHEAD ) { <nl> + FastAllocator < PACKET_BUFFER_MIN_SIZE > : : release ( this ) ; <nl> } else { <nl> delete [ ] this ; <nl> } <nl> new file mode 100644 <nl> index 0000000000 . . 19a3219287 <nl> mmm / dev / null <nl> ppp b / flowbench / BenchIterate . cpp <nl> <nl> + / * <nl> + * BenchIterate . cpp <nl> + * <nl> + * This source file is part of the FoundationDB open source project <nl> + * <nl> + * Copyright 2013 - 2020 Apple Inc . and the FoundationDB project authors <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # include " benchmark / benchmark . h " <nl> + <nl> + # include " fdbclient / CommitTransaction . h " <nl> + # include " fdbclient / FDBTypes . h " <nl> + # include " fdbclient / MutationList . h " <nl> + # include " flow / Arena . h " <nl> + # include " flow / FastAlloc . h " <nl> + # include " flowbench / GlobalData . h " <nl> + <nl> + void populate ( Standalone < VectorRef < MutationRef > > & mutations , size_t items , size_t size , KeyRef key , ValueRef value ) { <nl> + mutations = Standalone < VectorRef < MutationRef > > { } ; <nl> + mutations . reserve ( mutations . arena ( ) , items ) ; <nl> + for ( int i = 0 ; i < items ; + + i ) { <nl> + mutations . emplace_back_deep ( mutations . arena ( ) , MutationRef : : Type : : SetValue , key , value ) ; <nl> + } <nl> + } <nl> + <nl> + void populate ( MutationList & mutations , size_t items , size_t size , KeyRef key , ValueRef value ) { <nl> + mutations = MutationList { } ; <nl> + for ( int i = 0 ; i < items ; + + i ) { <nl> + mutations . push_back_deep ( mutations . arena ( ) , MutationRef ( MutationRef : : Type : : SetValue , key , value ) ) ; <nl> + } <nl> + } <nl> + <nl> + / / Benchmarks iteration over a list of mutations <nl> + template < class ListImpl > <nl> + static void bench_iterate ( benchmark : : State & state ) { <nl> + size_t items = state . range ( 0 ) ; <nl> + size_t size = state . range ( 1 ) ; <nl> + auto kv = getKV ( size , size ) ; <nl> + ListImpl mutations ; <nl> + populate ( mutations , items , size , kv . key , kv . value ) ; <nl> + while ( state . KeepRunning ( ) ) { <nl> + for ( const auto & mutation : mutations ) { <nl> + benchmark : : DoNotOptimize ( mutation ) ; <nl> + } <nl> + } <nl> + state . SetItemsProcessed ( items * static_cast < long > ( state . iterations ( ) ) ) ; <nl> + } <nl> + <nl> + BENCHMARK_TEMPLATE ( bench_iterate , Standalone < VectorRef < MutationRef > > ) <nl> + - > Ranges ( { { 1 , 1 < < 20 } , { 1 , 1 < < 9 } } ) <nl> + - > ReportAggregatesOnly ( true ) ; <nl> + BENCHMARK_TEMPLATE ( bench_iterate , MutationList ) - > Ranges ( { { 1 , 1 < < 20 } , { 1 , 1 < < 9 } } ) - > ReportAggregatesOnly ( true ) ; <nl> new file mode 100644 <nl> index 0000000000 . . 0223b4e98e <nl> mmm / dev / null <nl> ppp b / flowbench / BenchPopulate . cpp <nl> <nl> + / * <nl> + * BenchPopulate . cpp <nl> + * <nl> + * This source file is part of the FoundationDB open source project <nl> + * <nl> + * Copyright 2013 - 2020 Apple Inc . and the FoundationDB project authors <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # include " benchmark / benchmark . h " <nl> + <nl> + # include " fdbclient / CommitTransaction . h " <nl> + # include " fdbclient / FDBTypes . h " <nl> + # include " flow / Arena . h " <nl> + # include " flow / FastAlloc . h " <nl> + # include " flowbench / GlobalData . h " <nl> + <nl> + static constexpr bool EMPLACE_BACK = true ; <nl> + static constexpr bool PUSH_BACK = false ; <nl> + <nl> + / / Benchmarks the population of a VectorRef < MutationRef > <nl> + template < bool emplace > <nl> + static void bench_populate ( benchmark : : State & state ) { <nl> + size_t items = state . range ( 0 ) ; <nl> + size_t size = state . range ( 1 ) ; <nl> + auto kv = getKV ( size , size ) ; <nl> + while ( state . KeepRunning ( ) ) { <nl> + Standalone < VectorRef < MutationRef > > mutations ; <nl> + mutations . reserve ( mutations . arena ( ) , items ) ; <nl> + for ( int i = 0 ; i < items ; + + i ) { <nl> + if constexpr ( emplace ) { <nl> + mutations . emplace_back_deep ( mutations . arena ( ) , MutationRef : : Type : : SetValue , kv . key , kv . value ) ; <nl> + } else { <nl> + mutations . push_back_deep ( mutations . arena ( ) , MutationRef ( MutationRef : : Type : : SetValue , kv . key , kv . value ) ) ; <nl> + } <nl> + } <nl> + benchmark : : DoNotOptimize ( mutations ) ; <nl> + } <nl> + state . SetItemsProcessed ( items * static_cast < long > ( state . iterations ( ) ) ) ; <nl> + } <nl> + <nl> + BENCHMARK_TEMPLATE ( bench_populate , EMPLACE_BACK ) - > Ranges ( { { 1 , 1 < < 20 } , { 1 , 512 } } ) - > ReportAggregatesOnly ( true ) ; <nl> + BENCHMARK_TEMPLATE ( bench_populate , PUSH_BACK ) - > Ranges ( { { 1 , 1 < < 20 } , { 1 , 512 } } ) - > ReportAggregatesOnly ( true ) ; <nl> new file mode 100644 <nl> index 0000000000 . . e2d114b93c <nl> mmm / dev / null <nl> ppp b / flowbench / BenchRandom . cpp <nl> <nl> + / * <nl> + * BenchRandom . cpp <nl> + * <nl> + * This source file is part of the FoundationDB open source project <nl> + * <nl> + * Copyright 2013 - 2020 Apple Inc . and the FoundationDB project authors <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # include " benchmark / benchmark . h " <nl> + <nl> + # include " flow / IRandom . h " <nl> + <nl> + static void bench_random ( benchmark : : State & state ) { <nl> + while ( state . KeepRunning ( ) ) { <nl> + double r = deterministicRandom ( ) - > random01 ( ) ; <nl> + benchmark : : DoNotOptimize ( r ) ; <nl> + } <nl> + state . SetItemsProcessed ( static_cast < long > ( state . iterations ( ) ) ) ; <nl> + } <nl> + <nl> + BENCHMARK ( bench_random ) - > ReportAggregatesOnly ( true ) ; <nl> new file mode 100644 <nl> index 0000000000 . . 4987c4f857 <nl> mmm / dev / null <nl> ppp b / flowbench / BenchRef . cpp <nl> <nl> + / * <nl> + * BenchRef . cpp <nl> + * <nl> + * This source file is part of the FoundationDB open source project <nl> + * <nl> + * Copyright 2013 - 2020 Apple Inc . and the FoundationDB project authors <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # include " benchmark / benchmark . h " <nl> + <nl> + # include " flow / FastAlloc . h " <nl> + # include " flow / FastRef . h " <nl> + <nl> + # include < memory > <nl> + <nl> + struct Empty : public ReferenceCounted < Empty > , public FastAllocated < Empty > { } ; <nl> + <nl> + enum class RefType { <nl> + RawPointer , <nl> + UniquePointer , <nl> + SharedPointer , <nl> + FlowReference , <nl> + } ; <nl> + <nl> + template < RefType refType > <nl> + class Factory { } ; <nl> + <nl> + template < > <nl> + struct Factory < RefType : : RawPointer > { <nl> + static Empty * create ( ) { return new Empty { } ; } <nl> + static void cleanup ( Empty * empty ) { delete empty ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct Factory < RefType : : UniquePointer > { <nl> + static std : : unique_ptr < Empty > create ( ) { return std : : make_unique < Empty > ( ) ; } <nl> + static void cleanup ( const std : : unique_ptr < Empty > & ) { } <nl> + } ; <nl> + <nl> + template < > <nl> + struct Factory < RefType : : SharedPointer > { <nl> + static std : : shared_ptr < Empty > create ( ) { return std : : make_shared < Empty > ( ) ; } <nl> + static void cleanup ( const std : : shared_ptr < Empty > & ) { } <nl> + } ; <nl> + <nl> + template < > <nl> + struct Factory < RefType : : FlowReference > { <nl> + static Reference < Empty > create ( ) { return Reference < Empty > ( new Empty { } ) ; } <nl> + static void cleanup ( const Reference < Empty > & ) { } <nl> + } ; <nl> + <nl> + template < RefType refType > <nl> + static void bench_ref_create_and_destroy ( benchmark : : State & state ) { <nl> + while ( state . KeepRunning ( ) ) { <nl> + auto ptr = Factory < refType > : : create ( ) ; <nl> + benchmark : : DoNotOptimize ( ptr ) ; <nl> + Factory < refType > : : cleanup ( ptr ) ; <nl> + } <nl> + state . SetItemsProcessed ( static_cast < long > ( state . iterations ( ) ) ) ; <nl> + } <nl> + <nl> + template < RefType refType > <nl> + static void bench_ref_copy ( benchmark : : State & state ) { <nl> + auto ptr = Factory < refType > : : create ( ) ; <nl> + while ( state . KeepRunning ( ) ) { <nl> + auto ptr2 = ptr ; <nl> + benchmark : : DoNotOptimize ( ptr2 ) ; <nl> + } <nl> + Factory < refType > : : cleanup ( ptr ) ; <nl> + state . SetItemsProcessed ( static_cast < long > ( state . iterations ( ) ) ) ; <nl> + } <nl> + <nl> + BENCHMARK_TEMPLATE ( bench_ref_create_and_destroy , RefType : : RawPointer ) - > ReportAggregatesOnly ( true ) ; <nl> + BENCHMARK_TEMPLATE ( bench_ref_create_and_destroy , RefType : : UniquePointer ) - > ReportAggregatesOnly ( true ) ; <nl> + BENCHMARK_TEMPLATE ( bench_ref_create_and_destroy , RefType : : SharedPointer ) - > ReportAggregatesOnly ( true ) ; <nl> + BENCHMARK_TEMPLATE ( bench_ref_create_and_destroy , RefType : : FlowReference ) - > ReportAggregatesOnly ( true ) ; <nl> + <nl> + BENCHMARK_TEMPLATE ( bench_ref_copy , RefType : : RawPointer ) - > ReportAggregatesOnly ( true ) ; <nl> + BENCHMARK_TEMPLATE ( bench_ref_copy , RefType : : SharedPointer ) - > ReportAggregatesOnly ( true ) ; <nl> + BENCHMARK_TEMPLATE ( bench_ref_copy , RefType : : FlowReference ) - > ReportAggregatesOnly ( true ) ; <nl> new file mode 100644 <nl> index 0000000000 . . 6f77f7ca6f <nl> mmm / dev / null <nl> ppp b / flowbench / BenchStream . actor . cpp <nl> <nl> + / * <nl> + * BenchStream . actor . cpp <nl> + * <nl> + * This source file is part of the FoundationDB open source project <nl> + * <nl> + * Copyright 2013 - 2020 Apple Inc . and the FoundationDB project authors <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # include " benchmark / benchmark . h " <nl> + <nl> + # include " fdbclient / FDBTypes . h " <nl> + # include " flow / flow . h " <nl> + # include " flow / TLSConfig . actor . h " <nl> + # include " flow / ThreadHelper . actor . h " <nl> + # include " flow / network . h " <nl> + # include " flowbench / GlobalData . h " <nl> + <nl> + # include < thread > <nl> + <nl> + # include " flow / actorcompiler . h " / / This must be the last # include . <nl> + <nl> + ACTOR static Future < Void > benchStreamActor ( benchmark : : State * benchState ) { <nl> + state size_t items = benchState - > range ( 0 ) ; <nl> + size_t size = benchState - > range ( 1 ) ; <nl> + state KeyRef key = getKey ( size ) ; <nl> + state PromiseStream < Key > stream ; <nl> + state int i ; <nl> + while ( benchState - > KeepRunning ( ) ) { <nl> + for ( i = 0 ; i < items ; + + i ) { <nl> + stream . send ( key ) ; <nl> + } <nl> + for ( i = 0 ; i < items ; + + i ) { <nl> + Key receivedKey = waitNext ( stream . getFuture ( ) ) ; <nl> + benchmark : : DoNotOptimize ( receivedKey ) ; <nl> + } <nl> + } <nl> + benchState - > SetItemsProcessed ( items * static_cast < long > ( benchState - > iterations ( ) ) ) ; <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + static void bench_stream ( benchmark : : State & benchState ) { <nl> + onMainThread ( [ & benchState ] ( ) { return benchStreamActor ( & benchState ) ; } ) . blockUntilReady ( ) ; <nl> + } <nl> + <nl> + BENCHMARK ( bench_stream ) - > Ranges ( { { 1 , 1 < < 16 } , { 1 , 1 < < 16 } } ) - > ReportAggregatesOnly ( true ) ; <nl> new file mode 100644 <nl> index 0000000000 . . c97bf66304 <nl> mmm / dev / null <nl> ppp b / flowbench / BenchTimer . cpp <nl> <nl> + / * <nl> + * BenchTimer . cpp <nl> + * <nl> + * This source file is part of the FoundationDB open source project <nl> + * <nl> + * Copyright 2013 - 2020 Apple Inc . and the FoundationDB project authors <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # include " benchmark / benchmark . h " <nl> + <nl> + # include " flow / Platform . h " <nl> + <nl> + static void bench_timer ( benchmark : : State & state ) { <nl> + while ( state . KeepRunning ( ) ) { <nl> + double time = timer ( ) ; <nl> + benchmark : : DoNotOptimize ( time ) ; <nl> + } <nl> + state . SetItemsProcessed ( static_cast < long > ( state . iterations ( ) ) ) ; <nl> + } <nl> + <nl> + static void bench_timer_monotonic ( benchmark : : State & state ) { <nl> + while ( state . KeepRunning ( ) ) { <nl> + double time = timer_monotonic ( ) ; <nl> + benchmark : : DoNotOptimize ( time ) ; <nl> + } <nl> + state . SetItemsProcessed ( static_cast < long > ( state . iterations ( ) ) ) ; <nl> + } <nl> + <nl> + BENCHMARK ( bench_timer ) - > ReportAggregatesOnly ( true ) ; <nl> + BENCHMARK ( bench_timer_monotonic ) - > ReportAggregatesOnly ( true ) ; <nl> new file mode 100644 <nl> index 0000000000 . . 2c73605cc5 <nl> mmm / dev / null <nl> ppp b / flowbench / CMakeLists . txt <nl> <nl> + set ( FLOWBENCH_SRCS <nl> + flowbench . actor . cpp <nl> + BenchIterate . cpp <nl> + BenchPopulate . cpp <nl> + BenchRandom . cpp <nl> + BenchRef . cpp <nl> + BenchStream . actor . cpp <nl> + BenchTimer . cpp <nl> + GlobalData . h <nl> + GlobalData . cpp ) <nl> + <nl> + project ( flowbench ) <nl> + # include the configurations from benchmark . cmake <nl> + configure_file ( benchmark . cmake googlebenchmark - download / CMakeLists . txt ) <nl> + # executing the configuration step <nl> + execute_process ( <nl> + COMMAND $ { CMAKE_COMMAND } - G " $ { CMAKE_GENERATOR } " . <nl> + RESULT_VARIABLE results <nl> + WORKING_DIRECTORY $ { CMAKE_CURRENT_BINARY_DIR } / googlebenchmark - download <nl> + ) <nl> + # checking if the configuration step passed <nl> + if ( results ) <nl> + message ( FATAL_ERROR " Configuration step for Benchmark has Failed . $ { results } " ) <nl> + endif ( ) <nl> + # executing the build step <nl> + execute_process ( <nl> + COMMAND $ { CMAKE_COMMAND } - - build . - - config Release <nl> + RESULT_VARIABLE results <nl> + WORKING_DIRECTORY $ { CMAKE_CURRENT_BINARY_DIR } / googlebenchmark - download <nl> + ) <nl> + # checking if the build step passed <nl> + if ( results ) <nl> + message ( FATAL_ERROR " Build step for Benchmark has Failed . $ { results } " ) <nl> + endif ( ) <nl> + add_subdirectory ( <nl> + $ { CMAKE_CURRENT_BINARY_DIR } / googlebenchmark - src <nl> + $ { CMAKE_CURRENT_BINARY_DIR } / googlebenchmark - build <nl> + EXCLUDE_FROM_ALL <nl> + ) <nl> + include_directories ( $ { CMAKE_CURRENT_BINARY_DIR } / googlebenchmark - src / include ) <nl> + add_flow_target ( EXECUTABLE NAME flowbench SRCS $ { FLOWBENCH_SRCS } ) <nl> + target_link_libraries ( flowbench benchmark pthread flow fdbclient ) <nl> new file mode 100644 <nl> index 0000000000 . . 659d4150e6 <nl> mmm / dev / null <nl> ppp b / flowbench / GlobalData . cpp <nl> <nl> + / * <nl> + * GlobalData . cpp <nl> + * <nl> + * This source file is part of the FoundationDB open source project <nl> + * <nl> + * Copyright 2013 - 2020 Apple Inc . and the FoundationDB project authors <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # include " fdbclient / FDBTypes . h " <nl> + <nl> + static constexpr size_t globalDataSize = 1 < < 16 ; <nl> + static const uint8_t * globalData = nullptr ; <nl> + <nl> + static inline void initGlobalData ( ) { <nl> + if ( ! globalData ) { <nl> + globalData = static_cast < const uint8_t * > ( allocateFast ( globalDataSize ) ) ; <nl> + } <nl> + } <nl> + <nl> + KeyValueRef getKV ( size_t keySize , size_t valueSize ) { <nl> + initGlobalData ( ) ; <nl> + ASSERT ( keySize + valueSize < = globalDataSize ) ; <nl> + return KeyValueRef ( KeyRef ( globalData , keySize ) , ValueRef ( globalData + keySize , valueSize ) ) ; <nl> + } <nl> + <nl> + KeyRef getKey ( size_t keySize ) { <nl> + initGlobalData ( ) ; <nl> + ASSERT ( keySize ) ; <nl> + return KeyRef ( globalData , keySize ) ; <nl> + } <nl> new file mode 100644 <nl> index 0000000000 . . 11b956c029 <nl> mmm / dev / null <nl> ppp b / flowbench / GlobalData . h <nl> <nl> + / * <nl> + * GlobalData . h <nl> + * <nl> + * This source file is part of the FoundationDB open source project <nl> + * <nl> + * Copyright 2013 - 2020 Apple Inc . and the FoundationDB project authors <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # ifndef __FDBBENCH_GLOBALDATA_H__ <nl> + # define __FDBBENCH_GLOBALDATA_H__ <nl> + <nl> + # pragma once <nl> + <nl> + # include " fdbclient / FDBTypes . h " <nl> + <nl> + KeyValueRef getKV ( size_t keySize , size_t valueSize ) ; <nl> + KeyRef getKey ( size_t keySize ) ; <nl> + <nl> + # endif <nl> new file mode 100644 <nl> index 0000000000 . . 1f823b2d4d <nl> mmm / dev / null <nl> ppp b / flowbench / README . md <nl> <nl> + Summary <nl> + = = = = = = = <nl> + <nl> + ` flowbench ` is an executable that can be used to microbenchmark parts of the FoundationDB code . The goal is to make it easy to test the performance of various sub - millisecond operations using ` flow ` and ` fdbrpc ` . Specifically , this tool can be used to : <nl> + <nl> + - Test the performance effects of changes to the actor compiler or to the ` flow ` and ` fdbrpc ` libraries <nl> + - Test the performance of various uses of the ` flow ` and ` fdbrpc ` libraries <nl> + - Find areas for improvement in the ` flow ` and ` fdbrpc ` libraries <nl> + - Compare ` flow ` / ` fdbrpc ` primitives to alternatives provided by the standard library or other third - party libraries . <nl> + <nl> + Usage <nl> + = = = = = <nl> + <nl> + - To build the ` flowbench ` executable , add ` - DBUILD_FLOWBENCH = ON ` to your cmake command . <nl> + - Then you can run ` bin / flowbench - - help ` to see possible uses of ` flowbench ` . <nl> + - Running ` bin / flowbench ` directly will run all registered benchmarks , but you may want to limit your run to a subset of benchmarks . This can be done by running ` bin / flowbench - - benchmark_filter = < regex > ` <nl> + - All benchmark names can be listed with ` bin / flowbench - - benchmark_list_tests ` <nl> + - Example output : <nl> + <nl> + ` ` ` <nl> + $ bin / flowbench - - benchmark_filter = bench_ref <nl> + 2020 - 08 - 04 21 : 49 : 40 <nl> + Running bin / flowbench <nl> + Run on ( 7 X 2904 MHz CPU s ) <nl> + CPU Caches : <nl> + L1 Data 32 KiB ( x7 ) <nl> + L1 Instruction 32 KiB ( x7 ) <nl> + L2 Unified 256 KiB ( x7 ) <nl> + L3 Unified 12288 KiB ( x1 ) <nl> + Load Average : 0 . 15 , 0 . 15 , 0 . 72 <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + Benchmark Time CPU Iterations UserCounters . . . <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + bench_ref_create_and_destroy < RefType : : RawPointer > 4 . 90 ns 4 . 90 ns 116822124 items_per_second = 203 . 88M / s <nl> + bench_ref_create_and_destroy < RefType : : UniquePointer > 4 . 94 ns 4 . 94 ns 141101924 items_per_second = 202 . 555M / s <nl> + bench_ref_create_and_destroy < RefType : : SharedPointer > 42 . 5 ns 42 . 5 ns 13802909 items_per_second = 23 . 531M / s <nl> + bench_ref_create_and_destroy < RefType : : FlowReference > 5 . 05 ns 5 . 05 ns 100000000 items_per_second = 197 . 955M / s <nl> + bench_ref_copy < RefType : : RawPointer > 1 . 15 ns 1 . 15 ns 612121585 items_per_second = 871 . 218M / s <nl> + bench_ref_copy < RefType : : SharedPointer > 10 . 0 ns 10 . 0 ns 67553102 items_per_second = 99 . 8113M / s <nl> + bench_ref_copy < RefType : : FlowReference > 2 . 33 ns 2 . 33 ns 292317474 items_per_second = 428 . 507M / s <nl> + ` ` ` <nl> + - More detailed documentation can be found at https : / / github . com / google / benchmark <nl> + <nl> + Existing Benchmarks <nl> + = = = = = = = = = = = = = = = = = = = <nl> + - ` bench_populate ` measures the population of a vector of mutations <nl> + - ` bench_ref ` compares the performance of the ` flow ` ` Reference ` type to other pointer types <nl> + - ` bench_iterate ` measures iteration over a list of mutations <nl> + - ` bench_stream ` measures the performance of writing to and reading from a ` PromiseStream ` <nl> + - ` bench_random ` measures the performance of ` DeterministicRandom ` . <nl> + - ` bench_timer ` measures the perforamnce of FoundationDB timers . <nl> + <nl> + Future use cases <nl> + = = = = = = = = = = = = = = = = <nl> + <nl> + - Benchmark the overhead of sending and receiving messages through ` FlowTransport ` <nl> + - Benchmark the performance of serializing / deserializing various types <nl> new file mode 100644 <nl> index 0000000000 . . c981e61433 <nl> mmm / dev / null <nl> ppp b / flowbench / benchmark . cmake <nl> <nl> + project ( googlebenchmark - download NONE ) <nl> + <nl> + include ( ExternalProject ) <nl> + ExternalProject_Add ( googlebenchmark <nl> + GIT_REPOSITORY https : / / github . com / google / benchmark . git <nl> + GIT_TAG 8039b4030795b1c9b8cedb78e3a2a6fb89574b6e # v1 . 5 . 1 <nl> + SOURCE_DIR " $ { CMAKE_CURRENT_BINARY_DIR } / googlebenchmark - src " <nl> + BINARY_DIR " $ { CMAKE_CURRENT_BINARY_DIR } / googlebenchmark - build " <nl> + CMAKE_ARGS " - DCMAKE_BUILD_TYPE = Release - DBENCHMARK_ENABLE_LTO = true " <nl> + CONFIGURE_COMMAND " " <nl> + BUILD_COMMAND " " <nl> + INSTALL_COMMAND " " <nl> + TEST_COMMAND " " <nl> + ) <nl> + <nl> + include ( ExternalProject ) <nl> + ExternalProject_Add ( googletest DEPENDS googlebenchmark <nl> + GIT_REPOSITORY https : / / github . com / google / googletest . git <nl> + GIT_TAG 2fe3bd994b3189899d93f1d5a881e725e046fdc2 # release - 1 . 8 . 1 <nl> + SOURCE_DIR " $ { CMAKE_CURRENT_BINARY_DIR } / googlebenchmark - src / googletest " <nl> + BINARY_DIR " $ { CMAKE_CURRENT_BINARY_DIR } / googlebenchmark - build / googletest " <nl> + CONFIGURE_COMMAND " " <nl> + BUILD_COMMAND " " <nl> + INSTALL_COMMAND " " <nl> + TEST_COMMAND " " <nl> + ) <nl> new file mode 100644 <nl> index 0000000000 . . 951bfb789f <nl> mmm / dev / null <nl> ppp b / flowbench / flowbench . actor . cpp <nl> <nl> + / * <nl> + * flowbench . actor . cpp <nl> + * <nl> + * This source file is part of the FoundationDB open source project <nl> + * <nl> + * Copyright 2013 - 2020 Apple Inc . and the FoundationDB project authors <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # include " benchmark / benchmark . h " <nl> + # include " fdbclient / NativeAPI . actor . h " <nl> + # include " fdbclient / ThreadSafeTransaction . h " <nl> + # include " flow / ThreadHelper . actor . h " <nl> + # include < thread > <nl> + <nl> + ACTOR template < class T > <nl> + Future < T > stopNetworkAfter ( Future < T > what ) { <nl> + try { <nl> + T t = wait ( what ) ; <nl> + g_network - > stop ( ) ; <nl> + return t ; <nl> + } catch ( . . . ) { <nl> + g_network - > stop ( ) ; <nl> + throw ; <nl> + } <nl> + } <nl> + <nl> + int main ( int argc , char * * argv ) { <nl> + benchmark : : Initialize ( & argc , argv ) ; <nl> + if ( benchmark : : ReportUnrecognizedArguments ( argc , argv ) ) { <nl> + return 1 ; <nl> + } <nl> + setupNetwork ( ) ; <nl> + Promise < Void > benchmarksDone ; <nl> + std : : thread benchmarkThread ( [ & ] ( ) { <nl> + benchmark : : RunSpecifiedBenchmarks ( ) ; <nl> + onMainThreadVoid ( [ & ] ( ) { benchmarksDone . send ( Void ( ) ) ; } , nullptr ) ; <nl> + } ) ; <nl> + auto f = stopNetworkAfter ( benchmarksDone . getFuture ( ) ) ; <nl> + runNetwork ( ) ; <nl> + benchmarkThread . join ( ) ; <nl> + } <nl> mmm a / packaging / msi / FDBInstaller . wxs <nl> ppp b / packaging / msi / FDBInstaller . wxs <nl> <nl> <nl> < Wix xmlns = ' http : / / schemas . microsoft . com / wix / 2006 / wi ' > <nl> < Product Name = ' $ ( var . Title ) ' <nl> - Id = ' { C2791390 - 0993 - 4F6B - 9708 - ED2A4558A013 } ' <nl> + Id = ' { 409BDCD0 - ECF7 - 4CCA - A3F9 - EEEAF0C79A42 } ' <nl> UpgradeCode = ' { A95EA002 - 686E - 4164 - 8356 - C715B7F8B1C8 } ' <nl> Version = ' $ ( var . Version ) ' <nl> Manufacturer = ' $ ( var . Manufacturer ) ' <nl> mmm a / tests / CMakeLists . txt <nl> ppp b / tests / CMakeLists . txt <nl> if ( WITH_PYTHON ) <nl> add_fdb_test ( TEST_FILES rare / ConflictRangeRYOWCheck . toml ) <nl> add_fdb_test ( TEST_FILES rare / CycleRollbackClogged . toml ) <nl> add_fdb_test ( TEST_FILES rare / CycleWithKills . toml ) <nl> + add_fdb_test ( TEST_FILES rare / Downgrade . toml ) <nl> add_fdb_test ( TEST_FILES rare / FuzzTest . toml ) <nl> add_fdb_test ( TEST_FILES rare / InventoryTestHeavyWrites . toml ) <nl> add_fdb_test ( TEST_FILES rare / LargeApiCorrectness . toml ) <nl> if ( WITH_PYTHON ) <nl> TEST_FILES restarting / from_7 . 0 . 0 / ConfigureTestRestart - 1 . txt <nl> restarting / from_7 . 0 . 0 / ConfigureTestRestart - 2 . txt ) <nl> add_fdb_test ( <nl> - TEST_FILES restarting / CycleTestRestart - 1 . txt <nl> - restarting / CycleTestRestart - 2 . txt ) <nl> + TEST_FILES restarting / from_5 . 0 . 0 / CycleTestRestart - 1 . txt <nl> + restarting / from_5 . 0 . 0 / CycleTestRestart - 2 . txt ) <nl> add_fdb_test ( <nl> - TEST_FILES restarting / StorefrontTestRestart - 1 . txt <nl> - restarting / StorefrontTestRestart - 2 . txt ) <nl> + TEST_FILES restarting / from_5 . 0 . 0 / StorefrontTestRestart - 1 . txt <nl> + restarting / from_5 . 0 . 0 / StorefrontTestRestart - 2 . txt ) <nl> add_fdb_test ( <nl> TEST_FILES restarting / from_6 . 2 . 0 / SnapTestAttrition - 1 . txt <nl> restarting / from_6 . 2 . 0 / SnapTestAttrition - 2 . txt ) <nl> if ( WITH_PYTHON ) <nl> add_fdb_test ( <nl> TEST_FILES restarting / from_5 . 2 . 0 / ClientTransactionProfilingCorrectness - 1 . txt <nl> restarting / from_5 . 2 . 0 / ClientTransactionProfilingCorrectness - 2 . txt ) <nl> + add_fdb_test ( <nl> + TEST_FILES restarting / to_6 . 3 . 5 / CycleTestRestart - 1 . txt <nl> + restarting / to_6 . 3 . 5 / CycleTestRestart - 2 . txt ) <nl> add_fdb_test ( TEST_FILES slow / ApiCorrectness . toml ) <nl> add_fdb_test ( TEST_FILES slow / ApiCorrectnessAtomicRestore . toml ) <nl> add_fdb_test ( TEST_FILES slow / ApiCorrectnessSwitchover . toml ) <nl> new file mode 100644 <nl> index 0000000000 . . 9b18ea40f4 <nl> mmm / dev / null <nl> ppp b / tests / rare / Downgrade . toml <nl> <nl> + [ [ test ] ] <nl> + testTitle = ' Downgrade ' <nl> + <nl> + [ [ test . workload ] ] <nl> + testName = ' Downgrade ' <nl> + oldKey = ' oldKey ' <nl> + newKey = ' newKey ' <nl> similarity index 100 % <nl> rename from tests / restarting / CycleTestRestart - 1 . txt <nl> rename to tests / restarting / from_5 . 0 . 0 / CycleTestRestart - 1 . txt <nl> similarity index 100 % <nl> rename from tests / restarting / CycleTestRestart - 2 . txt <nl> rename to tests / restarting / from_5 . 0 . 0 / CycleTestRestart - 2 . txt <nl> similarity index 100 % <nl> rename from tests / restarting / StorefrontTestRestart - 1 . txt <nl> rename to tests / restarting / from_5 . 0 . 0 / StorefrontTestRestart - 1 . txt <nl> similarity index 100 % <nl> rename from tests / restarting / StorefrontTestRestart - 2 . txt <nl> rename to tests / restarting / from_5 . 0 . 0 / StorefrontTestRestart - 2 . txt <nl> new file mode 100644 <nl> index 0000000000 . . 647c2f3fe3 <nl> mmm / dev / null <nl> ppp b / tests / restarting / to_6 . 3 . 5 / CycleTestRestart - 1 . txt <nl> <nl> + testTitle = Clogged <nl> + clearAfterTest = false <nl> + testName = Cycle <nl> + transactionsPerSecond = 500 . 0 <nl> + nodeCount = 2500 <nl> + testDuration = 10 . 0 <nl> + expectedRate = 0 <nl> + <nl> + testName = RandomClogging <nl> + testDuration = 10 . 0 <nl> + <nl> + testName = Rollback <nl> + meanDelay = 10 . 0 <nl> + testDuration = 10 . 0 <nl> + <nl> + testName = Attrition <nl> + machinesToKill = 10 <nl> + machinesToLeave = 3 <nl> + reboot = true <nl> + testDuration = 10 . 0 <nl> + <nl> + testName = Attrition <nl> + machinesToKill = 10 <nl> + machinesToLeave = 3 <nl> + reboot = true <nl> + testDuration = 10 . 0 <nl> + <nl> + testName = SaveAndKill <nl> + restartInfoLocation = simfdb / restartInfo . ini <nl> + testDuration = 10 . 0 <nl> new file mode 100644 <nl> index 0000000000 . . 7d498f2be1 <nl> mmm / dev / null <nl> ppp b / tests / restarting / to_6 . 3 . 5 / CycleTestRestart - 2 . txt <nl> <nl> + testTitle = Clogged <nl> + runSetup = false <nl> + testName = Cycle <nl> + transactionsPerSecond = 2500 . 0 <nl> + nodeCount = 2500 <nl> + testDuration = 10 . 0 <nl> + expectedRate = 0 <nl> + <nl> + testName = RandomClogging <nl> + testDuration = 10 . 0 <nl> + <nl> + testName = Rollback <nl> + meanDelay = 10 . 0 <nl> + testDuration = 10 . 0 <nl> + <nl> + testName = Attrition <nl> + machinesToKill = 10 <nl> + machinesToLeave = 3 <nl> + reboot = true <nl> + testDuration = 10 . 0 <nl> + <nl> + testName = Attrition <nl> + machinesToKill = 10 <nl> + machinesToLeave = 3 <nl> + reboot = true <nl> + testDuration = 10 . 0 <nl> new file mode 100644 <nl> index 0000000000 . . 4c151da97a <nl> mmm / dev / null <nl> ppp b / versions . target <nl> <nl> + < ? xml version = " 1 . 0 " ? > <nl> + < Project xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < Version > 6 . 2 . 25 < / Version > <nl> + < PackageName > 6 . 2 < / PackageName > <nl> + < / PropertyGroup > <nl> + < / Project > <nl> | merge master branch | apple/foundationdb | 79ce16650d58f274589cb49ce824d5576ef3fbd4 | 2020-08-12T02:22:10Z |
mmm a / src / mark - compact . cc <nl> ppp b / src / mark - compact . cc <nl> void MarkCompactCollector : : Prepare ( ) { <nl> space - > PrepareForMarkCompact ( compacting_collection_ ) ; <nl> } <nl> <nl> - Counters : : global_objects . Set ( 0 ) ; <nl> - <nl> # ifdef DEBUG <nl> live_bytes_ = 0 ; <nl> live_young_objects_ = 0 ; <nl> class MarkingVisitor : public ObjectVisitor { <nl> void VisitUnmarkedObject ( HeapObject * obj ) { <nl> # ifdef DEBUG <nl> ASSERT ( Heap : : Contains ( obj ) ) ; <nl> - MarkCompactCollector : : UpdateLiveObjectCount ( obj ) ; <nl> ASSERT ( ! obj - > IsMarked ( ) ) ; <nl> # endif <nl> Map * map = obj - > map ( ) ; <nl> - obj - > SetMark ( ) ; <nl> - MarkCompactCollector : : tracer ( ) - > increment_marked_count ( ) ; <nl> + MarkCompactCollector : : SetMark ( obj ) ; <nl> / / Mark the map pointer and the body . <nl> MarkCompactCollector : : MarkObject ( map ) ; <nl> obj - > IterateBody ( map - > instance_type ( ) , obj - > SizeFromMap ( map ) , this ) ; <nl> class RootMarkingVisitor : public ObjectVisitor { <nl> HeapObject * object = ShortCircuitConsString ( p ) ; <nl> if ( object - > IsMarked ( ) ) return ; <nl> <nl> - # ifdef DEBUG <nl> - MarkCompactCollector : : UpdateLiveObjectCount ( object ) ; <nl> - # endif <nl> Map * map = object - > map ( ) ; <nl> / / Mark the object . <nl> - object - > SetMark ( ) ; <nl> - MarkCompactCollector : : tracer ( ) - > increment_marked_count ( ) ; <nl> + MarkCompactCollector : : SetMark ( object ) ; <nl> / / Mark the map pointer and body , and push them on the marking stack . <nl> MarkCompactCollector : : MarkObject ( map ) ; <nl> object - > IterateBody ( map - > instance_type ( ) , object - > SizeFromMap ( map ) , <nl> class SymbolTableCleaner : public ObjectVisitor { <nl> <nl> <nl> void MarkCompactCollector : : MarkUnmarkedObject ( HeapObject * object ) { <nl> - # ifdef DEBUG <nl> - UpdateLiveObjectCount ( object ) ; <nl> - # endif <nl> ASSERT ( ! object - > IsMarked ( ) ) ; <nl> - if ( object - > IsJSGlobalObject ( ) ) Counters : : global_objects . Increment ( ) ; <nl> - <nl> - tracer_ - > increment_marked_count ( ) ; <nl> ASSERT ( Heap : : Contains ( object ) ) ; <nl> if ( object - > IsMap ( ) ) { <nl> Map * map = Map : : cast ( object ) ; <nl> if ( FLAG_cleanup_caches_in_maps_at_gc ) { <nl> map - > ClearCodeCache ( ) ; <nl> } <nl> - map - > SetMark ( ) ; <nl> + SetMark ( map ) ; <nl> if ( FLAG_collect_maps & & <nl> map - > instance_type ( ) > = FIRST_JS_OBJECT_TYPE & & <nl> map - > instance_type ( ) < = JS_FUNCTION_TYPE ) { <nl> void MarkCompactCollector : : MarkUnmarkedObject ( HeapObject * object ) { <nl> marking_stack . Push ( map ) ; <nl> } <nl> } else { <nl> - object - > SetMark ( ) ; <nl> + SetMark ( object ) ; <nl> marking_stack . Push ( object ) ; <nl> } <nl> } <nl> void MarkCompactCollector : : MarkDescriptorArray ( <nl> if ( descriptors - > IsMarked ( ) ) return ; <nl> / / Empty descriptor array is marked as a root before any maps are marked . <nl> ASSERT ( descriptors ! = Heap : : empty_descriptor_array ( ) ) ; <nl> - <nl> - tracer_ - > increment_marked_count ( ) ; <nl> - # ifdef DEBUG <nl> - UpdateLiveObjectCount ( descriptors ) ; <nl> - # endif <nl> - descriptors - > SetMark ( ) ; <nl> + SetMark ( descriptors ) ; <nl> <nl> FixedArray * contents = reinterpret_cast < FixedArray * > ( <nl> descriptors - > get ( DescriptorArray : : kContentArrayIndex ) ) ; <nl> void MarkCompactCollector : : MarkDescriptorArray ( <nl> ASSERT ( ! contents - > IsMarked ( ) ) ; <nl> ASSERT ( contents - > IsFixedArray ( ) ) ; <nl> ASSERT ( contents - > length ( ) > = 2 ) ; <nl> - tracer_ - > increment_marked_count ( ) ; <nl> - # ifdef DEBUG <nl> - UpdateLiveObjectCount ( contents ) ; <nl> - # endif <nl> - contents - > SetMark ( ) ; <nl> + SetMark ( contents ) ; <nl> / / Contents contains ( value , details ) pairs . If the details say <nl> / / that the type of descriptor is MAP_TRANSITION , CONSTANT_TRANSITION , <nl> / / or NULL_DESCRIPTOR , we don ' t mark the value as live . Only for <nl> void MarkCompactCollector : : MarkDescriptorArray ( <nl> if ( details . type ( ) < FIRST_PHANTOM_PROPERTY_TYPE ) { <nl> HeapObject * object = reinterpret_cast < HeapObject * > ( contents - > get ( i ) ) ; <nl> if ( object - > IsHeapObject ( ) & & ! object - > IsMarked ( ) ) { <nl> - tracer_ - > increment_marked_count ( ) ; <nl> - # ifdef DEBUG <nl> - UpdateLiveObjectCount ( object ) ; <nl> - # endif <nl> - object - > SetMark ( ) ; <nl> + SetMark ( object ) ; <nl> marking_stack . Push ( object ) ; <nl> } <nl> } <nl> void MarkCompactCollector : : ProcessRoots ( RootMarkingVisitor * visitor ) { <nl> SymbolTable * symbol_table = SymbolTable : : cast ( Heap : : symbol_table ( ) ) ; <nl> / / 1 . Mark the prefix of the symbol table gray . <nl> symbol_table - > IteratePrefix ( visitor ) ; <nl> - # ifdef DEBUG <nl> - UpdateLiveObjectCount ( symbol_table ) ; <nl> - # endif <nl> / / 2 . Mark the symbol table black ( ie , do not push it on the marking stack <nl> / / or mark it overflowed ) . <nl> - symbol_table - > SetMark ( ) ; <nl> - tracer_ - > increment_marked_count ( ) ; <nl> + SetMark ( symbol_table ) ; <nl> <nl> / / There may be overflowed objects in the heap . Visit them now . <nl> while ( marking_stack . overflowed ( ) ) { <nl> mmm a / src / mark - compact . h <nl> ppp b / src / mark - compact . h <nl> class MarkCompactCollector : public AllStatic { <nl> static void MarkUnmarkedObject ( HeapObject * obj ) ; <nl> <nl> static inline void MarkObject ( HeapObject * obj ) { <nl> - if ( ! obj - > IsMarked ( ) ) MarkUnmarkedObject ( obj ) ; <nl> + if ( ! obj - > IsMarked ( ) ) MarkUnmarkedObject ( obj ) ; <nl> + } <nl> + <nl> + static inline void SetMark ( HeapObject * obj ) { <nl> + tracer_ - > increment_marked_count ( ) ; <nl> + # ifdef DEBUG <nl> + UpdateLiveObjectCount ( obj ) ; <nl> + # endif <nl> + obj - > SetMark ( ) ; <nl> } <nl> <nl> / / Creates back pointers for all map transitions , stores them in <nl> mmm a / src / v8 - counters . h <nl> ppp b / src / v8 - counters . h <nl> namespace v8 { namespace internal { <nl> # define STATS_COUNTER_LIST_1 ( SC ) \ <nl> / * Global Handle Count * / \ <nl> SC ( global_handles , V8 . GlobalHandles ) \ <nl> - / * Global Object Count * / \ <nl> - SC ( global_objects , V8 . GlobalObjects ) \ <nl> / * Mallocs from PCRE * / \ <nl> SC ( pcre_mallocs , V8 . PcreMallocCount ) \ <nl> / * OS Memory allocated * / \ <nl> | Code cleanup & simplification . | v8/v8 | eb9c7f326586a3d6ebca405d3e56296664f8dea7 | 2008-11-04T13:05:56Z |
mmm a / arangod / Indexes / IndexIterator . cpp <nl> ppp b / arangod / Indexes / IndexIterator . cpp <nl> bool IndexIterator : : hasExtra ( ) const { <nl> bool IndexIterator : : nextExtra ( ExtraCallback const & , size_t ) { <nl> TRI_ASSERT ( ! hasExtra ( ) ) ; <nl> THROW_ARANGO_EXCEPTION_MESSAGE ( TRI_ERROR_NOT_IMPLEMENTED , <nl> - " Request extra values from an index that " <nl> + " Requested extra values from an index that " <nl> " does not support it . This seems to be a bug " <nl> " in ArangoDB . Please report the query you are " <nl> " using + the indexes you have defined on the " <nl> | adjusted error message | arangodb/arangodb | 686a9647182ca292f818d1f5fbb17933fe4c7f2d | 2017-05-22T11:53:22Z |
mmm a / api / envoy / api / v2 / endpoint . proto <nl> ppp b / api / envoy / api / v2 / endpoint . proto <nl> message ClusterLoadAssignment { <nl> / / localities as endpoints become unhealthy . Otherwise Envoy will perform <nl> / / graceful failover as : ref : ` overprovisioning factor <nl> / / < arch_overview_load_balancing_overprovisioning_factor > ` suggests . <nl> - / / [ # next - major - version : Unify with overprovisioning config as a single message . ] <nl> / / [ # not - implemented - hide : ] <nl> - bool disable_overprovisioning = 5 ; <nl> + bool disable_overprovisioning = 5 [ deprecated = true ] ; <nl> } <nl> <nl> / / Name of the cluster . This will be the : ref : ` service_name <nl> mmm a / api / envoy / config / endpoint / v3 / endpoint . proto <nl> ppp b / api / envoy / config / endpoint / v3 / endpoint . proto <nl> message ClusterLoadAssignment { <nl> type . v3 . FractionalPercent drop_percentage = 2 ; <nl> } <nl> <nl> - reserved 1 ; <nl> + reserved 1 , 5 ; <nl> + <nl> + reserved " disable_overprovisioning " ; <nl> <nl> / / Action to trim the overall incoming traffic to protect the upstream <nl> / / hosts . This action allows protection in case the hosts are unable to <nl> message ClusterLoadAssignment { <nl> / / are considered stale and should be marked unhealthy . <nl> / / Defaults to 0 which means endpoints never go stale . <nl> google . protobuf . Duration endpoint_stale_after = 4 [ ( validate . rules ) . duration = { gt { } } ] ; <nl> - <nl> - / / The flag to disable overprovisioning . If it is set to true , <nl> - / / : ref : ` overprovisioning factor <nl> - / / < arch_overview_load_balancing_overprovisioning_factor > ` will be ignored <nl> - / / and Envoy will not perform graceful failover between priority levels or <nl> - / / localities as endpoints become unhealthy . Otherwise Envoy will perform <nl> - / / graceful failover as : ref : ` overprovisioning factor <nl> - / / < arch_overview_load_balancing_overprovisioning_factor > ` suggests . <nl> - / / [ # next - major - version : Unify with overprovisioning config as a single message . ] <nl> - / / [ # not - implemented - hide : ] <nl> - bool disable_overprovisioning = 5 ; <nl> } <nl> <nl> / / Name of the cluster . This will be the : ref : ` service_name <nl> mmm a / docs / root / api / client_features . rst <nl> ppp b / docs / root / api / client_features . rst <nl> Client features use reverse DNS naming scheme , for example ` com . acme . feature ` . <nl> Currently Defined Client Features <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> <nl> - + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - + <nl> - | Client Feature Name | Description | <nl> - + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = + <nl> - | envoy . config . require - any - fields - contain - struct | This feature indicates that xDS client requires | <nl> - | | that the configuration entries of type | <nl> - | | * google . protobuf . Any * contain messages of type | <nl> - | | * udpa . type . v1 . TypedStruct * only | <nl> - + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - + <nl> + - * * envoy . config . require - any - fields - contain - struct * * : This feature indicates that xDS client <nl> + requires that the configuration entries of type * google . protobuf . Any * contain messages of type <nl> + * udpa . type . v1 . TypedStruct * only . <nl> + - * * envoy . lb . does_not_support_overprovisioning * * : This feature indicates that the client does not <nl> + support overprovisioning for priority failover and locality weighting as configured by the <nl> + : ref : ` overprovisioning_factor < envoy_api_field_ClusterLoadAssignment . Policy . overprovisioning_factor > ` <nl> + field . If graceful failover functionality is required , it must be supplied by the management <nl> + server . <nl> mmm a / generated_api_shadow / envoy / api / v2 / endpoint . proto <nl> ppp b / generated_api_shadow / envoy / api / v2 / endpoint . proto <nl> message ClusterLoadAssignment { <nl> / / localities as endpoints become unhealthy . Otherwise Envoy will perform <nl> / / graceful failover as : ref : ` overprovisioning factor <nl> / / < arch_overview_load_balancing_overprovisioning_factor > ` suggests . <nl> - / / [ # next - major - version : Unify with overprovisioning config as a single message . ] <nl> / / [ # not - implemented - hide : ] <nl> - bool disable_overprovisioning = 5 ; <nl> + bool disable_overprovisioning = 5 [ deprecated = true ] ; <nl> } <nl> <nl> / / Name of the cluster . This will be the : ref : ` service_name <nl> mmm a / generated_api_shadow / envoy / config / endpoint / v3 / endpoint . proto <nl> ppp b / generated_api_shadow / envoy / config / endpoint / v3 / endpoint . proto <nl> message ClusterLoadAssignment { <nl> / / localities as endpoints become unhealthy . Otherwise Envoy will perform <nl> / / graceful failover as : ref : ` overprovisioning factor <nl> / / < arch_overview_load_balancing_overprovisioning_factor > ` suggests . <nl> - / / [ # next - major - version : Unify with overprovisioning config as a single message . ] <nl> / / [ # not - implemented - hide : ] <nl> - bool disable_overprovisioning = 5 ; <nl> + bool hidden_envoy_deprecated_disable_overprovisioning = 5 [ deprecated = true ] ; <nl> } <nl> <nl> / / Name of the cluster . This will be the : ref : ` service_name <nl> | Add client capability for not supporting overprovisioning . ( ) | envoyproxy/envoy | 2fcd75f1c3295d667c4bbac9fd2c0ba430662d44 | 2020-02-26T03:41:15Z |
mmm a / eosio_build . sh <nl> ppp b / eosio_build . sh <nl> <nl> C_COMPILER = gcc <nl> export LLVM_DIR = $ { HOME } / opt / wasm / lib / cmake / llvm <nl> ; ; <nl> + " Linux Mint " ) <nl> + FILE = $ { WORK_DIR } / scripts / eosio_build_ubuntu . sh <nl> + CXX_COMPILER = clang + + - 4 . 0 <nl> + C_COMPILER = clang - 4 . 0 <nl> + ; ; <nl> " CentOS Linux " ) <nl> FILE = $ { WORK_DIR } / scripts / eosio_build_centos . sh <nl> export CMAKE = $ { HOME } / opt / cmake / bin / cmake <nl> mmm a / scripts / eosio_build_ubuntu . sh <nl> ppp b / scripts / eosio_build_ubuntu . sh <nl> <nl> exit 1 <nl> fi <nl> <nl> - if [ $ OS_MIN - lt 4 ] ; then <nl> - printf " \ tYou must be running Ubuntu 16 . 04 . x or higher to install EOSIO . \ n " <nl> - printf " \ tExiting now . \ n " <nl> - exit 1 <nl> - fi <nl> + case $ OS_NAME in <nl> + " Linux Mint " ) <nl> + if [ $ OS_MAJ - lt 18 ] ; then <nl> + printf " \ tYou must be running Linux Mint 18 . x or higher to install EOSIO . \ n " <nl> + printf " \ tExiting now . \ n " <nl> + exit 1 <nl> + fi <nl> + ; ; <nl> + " Ubuntu " ) <nl> + if [ $ OS_MIN - lt 4 ] ; then <nl> + printf " \ tYou must be running Ubuntu 16 . 04 . x or higher to install EOSIO . \ n " <nl> + printf " \ tExiting now . \ n " <nl> + exit 1 <nl> + fi <nl> + ; ; <nl> + esac <nl> <nl> if [ $ DISK_AVAIL - lt $ DISK_MIN ] ; then <nl> printf " \ tYou must have at least $ { DISK_MIN } GB of available storage to install EOSIO . \ n " <nl> | Add support for Linux Mint ( as an ubuntu variant ) to the build scripts | EOSIO/eos | f668736d7af06c93da8757590a69de2e74f03ba4 | 2018-03-08T21:41:51Z |
mmm a / BUILD . gn <nl> ppp b / BUILD . gn <nl> v8_source_set ( " v8_base_without_compiler " ) { <nl> " src / objects / api - callbacks . h " , <nl> " src / objects / arguments - inl . h " , <nl> " src / objects / arguments . h " , <nl> - " src / objects / backing - store . cc " , <nl> - " src / objects / backing - store . h " , <nl> " src / objects / bigint . cc " , <nl> " src / objects / bigint . h " , <nl> " src / objects / cell - inl . h " , <nl> v8_source_set ( " v8_base_without_compiler " ) { <nl> " src / wasm / wasm - js . h " , <nl> " src / wasm / wasm - limits . h " , <nl> " src / wasm / wasm - linkage . h " , <nl> + " src / wasm / wasm - memory . cc " , <nl> + " src / wasm / wasm - memory . h " , <nl> " src / wasm / wasm - module - builder . cc " , <nl> " src / wasm / wasm - module - builder . h " , <nl> " src / wasm / wasm - module . cc " , <nl> mmm a / include / v8 . h <nl> ppp b / include / v8 . h <nl> class V8_EXPORT ArrayBuffer : public Object { <nl> * <nl> * The Data pointer of ArrayBuffer : : Contents must be freed using the provided <nl> * deleter , which will call ArrayBuffer : : Allocator : : Free if the buffer <nl> - * was allocated with ArrayBuffer : : Allocator : : Allocate . <nl> + * was allocated with ArraryBuffer : : Allocator : : Allocate . <nl> * / <nl> Contents Externalize ( ) ; <nl> <nl> class V8_EXPORT ArrayBuffer : public Object { <nl> private : <nl> ArrayBuffer ( ) ; <nl> static void CheckCast ( Value * obj ) ; <nl> - Contents GetContents ( bool externalize ) ; <nl> } ; <nl> <nl> <nl> class V8_EXPORT SharedArrayBuffer : public Object { <nl> private : <nl> SharedArrayBuffer ( ) ; <nl> static void CheckCast ( Value * obj ) ; <nl> - Contents GetContents ( bool externalize ) ; <nl> } ; <nl> <nl> <nl> mmm a / src / api / api . cc <nl> ppp b / src / api / api . cc <nl> <nl> # endif / / V8_TARGET_ARCH_X64 <nl> # endif / / V8_OS_WIN <nl> <nl> - # define TRACE_BS ( . . . ) / * PrintF ( __VA_ARGS__ ) * / <nl> - <nl> namespace v8 { <nl> <nl> / * <nl> bool v8 : : ArrayBuffer : : IsDetachable ( ) const { <nl> return Utils : : OpenHandle ( this ) - > is_detachable ( ) ; <nl> } <nl> <nl> - namespace { <nl> - / / The backing store deleter just deletes the indirection , which downrefs <nl> - / / the shared pointer . It will get collected normally . <nl> - void BackingStoreDeleter ( void * buffer , size_t length , void * info ) { <nl> - auto bs_indirection = <nl> - reinterpret_cast < std : : shared_ptr < i : : BackingStore > * > ( info ) ; <nl> - if ( bs_indirection ) { <nl> - auto backing_store = bs_indirection - > get ( ) ; <nl> - TRACE_BS ( " API : delete bs = % p mem = % p ( % zu bytes ) \ n " , backing_store , <nl> - backing_store - > buffer_start ( ) , backing_store - > byte_length ( ) ) ; <nl> - USE ( backing_store ) ; <nl> - } <nl> - delete bs_indirection ; <nl> - } <nl> - <nl> - void * MakeDeleterData ( std : : shared_ptr < i : : BackingStore > backing_store ) { <nl> - if ( ! backing_store ) return nullptr ; <nl> - TRACE_BS ( " API : extern bs = % p mem = % p ( % zu bytes ) \ n " , backing_store . get ( ) , <nl> - backing_store - > buffer_start ( ) , backing_store - > byte_length ( ) ) ; <nl> - return new std : : shared_ptr < i : : BackingStore > ( backing_store ) ; <nl> - } <nl> + v8 : : ArrayBuffer : : Contents v8 : : ArrayBuffer : : Externalize ( ) { <nl> + i : : Handle < i : : JSArrayBuffer > self = Utils : : OpenHandle ( this ) ; <nl> + i : : Isolate * isolate = self - > GetIsolate ( ) ; <nl> + Utils : : ApiCheck ( ! self - > is_external ( ) , " v8_ArrayBuffer_Externalize " , <nl> + " ArrayBuffer already externalized " ) ; <nl> + self - > set_is_external ( true ) ; <nl> <nl> - std : : shared_ptr < i : : BackingStore > LookupOrCreateBackingStore ( <nl> - i : : Isolate * i_isolate , void * data , size_t byte_length , i : : SharedFlag shared , <nl> - ArrayBufferCreationMode mode ) { <nl> - / / " internalized " means that the storage was allocated by the <nl> - / / ArrayBufferAllocator and thus should be freed upon destruction . <nl> - bool free_on_destruct = mode = = ArrayBufferCreationMode : : kInternalized ; <nl> - <nl> - / / Try to lookup a previously - registered backing store in the global <nl> - / / registry . If found , use that instead of wrapping an embedder allocation . <nl> - std : : shared_ptr < i : : BackingStore > backing_store = <nl> - i : : GlobalBackingStoreRegistry : : Lookup ( data , byte_length ) ; <nl> - <nl> - if ( backing_store ) { <nl> - / / Check invariants for a previously - found backing store . <nl> - <nl> - / / 1 . We cannot allow an embedder to first allocate a backing store that <nl> - / / should not be freed upon destruct , and then allocate an alias that should <nl> - / / destruct it . The other order is fine . <nl> - bool changing_destruct_mode = <nl> - free_on_destruct & & ! backing_store - > free_on_destruct ( ) ; <nl> - Utils : : ApiCheck ( <nl> - ! changing_destruct_mode , " v8_ [ Shared ] ArrayBuffer_New " , <nl> - " previous backing store found that should not be freed on destruct " ) ; <nl> + const v8 : : ArrayBuffer : : Contents contents = GetContents ( ) ; <nl> + isolate - > heap ( ) - > UnregisterArrayBuffer ( * self ) ; <nl> <nl> - / / 2 . We cannot allow embedders to use the same backing store for both <nl> - / / SharedArrayBuffers and regular ArrayBuffers . <nl> - bool changing_shared_flag = <nl> - ( shared = = i : : SharedFlag : : kShared ) ! = backing_store - > is_shared ( ) ; <nl> - Utils : : ApiCheck ( <nl> - ! changing_shared_flag , " v8_ [ Shared ] ArrayBuffer_New " , <nl> - " previous backing store found that does not match shared flag " ) ; <nl> - } else { <nl> - / / No previous backing store found . <nl> - backing_store = i : : BackingStore : : WrapAllocation ( <nl> - i_isolate , data , byte_length , shared , free_on_destruct ) ; <nl> - <nl> - if ( free_on_destruct ) { <nl> - / / The embedder requested free - on - destruct . They already have a <nl> - / / direct pointer to the buffer start , so globally register the backing <nl> - / / store in case they come back with the same buffer start . <nl> - i : : GlobalBackingStoreRegistry : : Register ( backing_store ) ; <nl> - } <nl> - } <nl> - return backing_store ; <nl> + / / A regular copy is good enough . No move semantics needed . <nl> + return contents ; <nl> } <nl> - } / / namespace <nl> <nl> v8 : : ArrayBuffer : : Contents : : Contents ( void * data , size_t byte_length , <nl> void * allocation_base , <nl> v8 : : ArrayBuffer : : Contents : : Contents ( void * data , size_t byte_length , <nl> DCHECK_LE ( byte_length_ , allocation_length_ ) ; <nl> } <nl> <nl> - v8 : : ArrayBuffer : : Contents v8 : : ArrayBuffer : : Externalize ( ) { <nl> - return GetContents ( true ) ; <nl> + void WasmMemoryDeleter ( void * buffer , size_t lenght , void * info ) { <nl> + internal : : wasm : : WasmEngine * engine = <nl> + reinterpret_cast < internal : : wasm : : WasmEngine * > ( info ) ; <nl> + CHECK ( engine - > memory_tracker ( ) - > FreeWasmMemory ( nullptr , buffer ) ) ; <nl> } <nl> <nl> - v8 : : ArrayBuffer : : Contents v8 : : ArrayBuffer : : GetContents ( ) { <nl> - return GetContents ( false ) ; <nl> + void ArrayBufferDeleter ( void * buffer , size_t length , void * info ) { <nl> + v8 : : ArrayBuffer : : Allocator * allocator = <nl> + reinterpret_cast < v8 : : ArrayBuffer : : Allocator * > ( info ) ; <nl> + allocator - > Free ( buffer , length ) ; <nl> } <nl> <nl> - v8 : : ArrayBuffer : : Contents v8 : : ArrayBuffer : : GetContents ( bool externalize ) { <nl> - / / TODO ( titzer ) : reduce duplication between shared / unshared GetContents ( ) <nl> - using BufferType = v8 : : ArrayBuffer ; <nl> - <nl> + v8 : : ArrayBuffer : : Contents v8 : : ArrayBuffer : : GetContents ( ) { <nl> i : : Handle < i : : JSArrayBuffer > self = Utils : : OpenHandle ( this ) ; <nl> - <nl> - std : : shared_ptr < i : : BackingStore > backing_store = self - > GetBackingStore ( ) ; <nl> - <nl> - void * deleter_data = nullptr ; <nl> - if ( externalize ) { <nl> - Utils : : ApiCheck ( ! self - > is_external ( ) , " v8_ArrayBuffer_Externalize " , <nl> - " ArrayBuffer already externalized " ) ; <nl> - self - > set_is_external ( true ) ; <nl> - / / When externalizing , upref the shared pointer to the backing store <nl> - / / and store that as the deleter data . When the embedder calls the deleter <nl> - / / callback , we will delete the additional ( on - heap ) shared_ptr . <nl> - deleter_data = MakeDeleterData ( backing_store ) ; <nl> - } <nl> - <nl> - if ( ! backing_store ) { <nl> - / / If the array buffer has zero length or was detached , return empty <nl> - / / contents . <nl> - DCHECK_EQ ( 0 , self - > byte_length ( ) ) ; <nl> - BufferType : : Contents contents ( <nl> - nullptr , 0 , nullptr , 0 , <nl> - v8 : : ArrayBuffer : : Allocator : : AllocationMode : : kNormal , <nl> - BackingStoreDeleter , deleter_data ) ; <nl> - return contents ; <nl> - } <nl> - <nl> - / / Backing stores that given to the embedder might be passed back through <nl> - / / the API using only the start of the buffer . We need to find such <nl> - / / backing stores using global registration until the API is changed . <nl> - i : : GlobalBackingStoreRegistry : : Register ( backing_store ) ; <nl> - <nl> - auto allocation_mode = <nl> - backing_store - > is_wasm_memory ( ) <nl> - ? v8 : : ArrayBuffer : : Allocator : : AllocationMode : : kReservation <nl> - : v8 : : ArrayBuffer : : Allocator : : AllocationMode : : kNormal ; <nl> - <nl> - BufferType : : Contents contents ( backing_store - > buffer_start ( ) , / / - - <nl> - backing_store - > byte_length ( ) , / / - - <nl> - backing_store - > buffer_start ( ) , / / - - <nl> - backing_store - > byte_length ( ) , / / - - <nl> - allocation_mode , / / - - <nl> - BackingStoreDeleter , / / - - <nl> - deleter_data ) ; <nl> + Contents contents ( <nl> + self - > backing_store ( ) , self - > byte_length ( ) , self - > allocation_base ( ) , <nl> + self - > allocation_length ( ) , <nl> + self - > is_wasm_memory ( ) ? Allocator : : AllocationMode : : kReservation <nl> + : Allocator : : AllocationMode : : kNormal , <nl> + self - > is_wasm_memory ( ) ? WasmMemoryDeleter : ArrayBufferDeleter , <nl> + self - > is_wasm_memory ( ) <nl> + ? static_cast < void * > ( self - > GetIsolate ( ) - > wasm_engine ( ) ) <nl> + : static_cast < void * > ( self - > GetIsolate ( ) - > array_buffer_allocator ( ) ) ) ; <nl> return contents ; <nl> } <nl> <nl> Local < ArrayBuffer > v8 : : ArrayBuffer : : New ( Isolate * isolate , size_t byte_length ) { <nl> i : : Isolate * i_isolate = reinterpret_cast < i : : Isolate * > ( isolate ) ; <nl> LOG_API ( i_isolate , ArrayBuffer , New ) ; <nl> ENTER_V8_NO_SCRIPT_NO_EXCEPTION ( i_isolate ) ; <nl> - i : : MaybeHandle < i : : JSArrayBuffer > result = <nl> - i_isolate - > factory ( ) - > NewJSArrayBufferAndBackingStore ( <nl> - byte_length , i : : InitializedFlag : : kZeroInitialized ) ; <nl> - <nl> - i : : Handle < i : : JSArrayBuffer > array_buffer ; <nl> - if ( ! result . ToHandle ( & array_buffer ) ) { <nl> - / / TODO ( jbroman ) : It may be useful in the future to provide a MaybeLocal <nl> - / / version that throws an exception or otherwise does not crash . <nl> + i : : Handle < i : : JSArrayBuffer > obj = <nl> + i_isolate - > factory ( ) - > NewJSArrayBuffer ( i : : SharedFlag : : kNotShared ) ; <nl> + / / TODO ( jbroman ) : It may be useful in the future to provide a MaybeLocal <nl> + / / version that throws an exception or otherwise does not crash . <nl> + if ( ! i : : JSArrayBuffer : : SetupAllocatingData ( obj , i_isolate , byte_length ) ) { <nl> i : : FatalProcessOutOfMemory ( i_isolate , " v8 : : ArrayBuffer : : New " ) ; <nl> } <nl> - <nl> - return Utils : : ToLocal ( array_buffer ) ; <nl> + return Utils : : ToLocal ( obj ) ; <nl> } <nl> <nl> Local < ArrayBuffer > v8 : : ArrayBuffer : : New ( Isolate * isolate , void * data , <nl> Local < ArrayBuffer > v8 : : ArrayBuffer : : New ( Isolate * isolate , void * data , <nl> i : : Isolate * i_isolate = reinterpret_cast < i : : Isolate * > ( isolate ) ; <nl> LOG_API ( i_isolate , ArrayBuffer , New ) ; <nl> ENTER_V8_NO_SCRIPT_NO_EXCEPTION ( i_isolate ) ; <nl> - <nl> - std : : shared_ptr < i : : BackingStore > backing_store = LookupOrCreateBackingStore ( <nl> - i_isolate , data , byte_length , i : : SharedFlag : : kNotShared , mode ) ; <nl> - <nl> - i : : Handle < i : : JSArrayBuffer > obj = i_isolate - > factory ( ) - > NewJSArrayBuffer ( ) ; <nl> - obj - > Attach ( std : : move ( backing_store ) ) ; <nl> - if ( mode = = ArrayBufferCreationMode : : kExternalized ) { <nl> - obj - > set_is_external ( true ) ; <nl> - } <nl> + i : : Handle < i : : JSArrayBuffer > obj = <nl> + i_isolate - > factory ( ) - > NewJSArrayBuffer ( i : : SharedFlag : : kNotShared ) ; <nl> + i : : JSArrayBuffer : : Setup ( obj , i_isolate , <nl> + mode = = ArrayBufferCreationMode : : kExternalized , data , <nl> + byte_length ) ; <nl> return Utils : : ToLocal ( obj ) ; <nl> } <nl> <nl> size_t v8 : : ArrayBufferView : : CopyContents ( void * dest , size_t byte_length ) { <nl> <nl> bool v8 : : ArrayBufferView : : HasBuffer ( ) const { <nl> i : : Handle < i : : JSArrayBufferView > self = Utils : : OpenHandle ( this ) ; <nl> - if ( ! self - > IsJSTypedArray ( ) ) return true ; <nl> - auto typed_array = i : : Handle < i : : JSTypedArray > : : cast ( self ) ; <nl> - return ! typed_array - > is_on_heap ( ) ; <nl> + i : : Handle < i : : JSArrayBuffer > buffer ( i : : JSArrayBuffer : : cast ( self - > buffer ( ) ) , <nl> + self - > GetIsolate ( ) ) ; <nl> + return buffer - > backing_store ( ) ! = nullptr ; <nl> } <nl> <nl> size_t v8 : : ArrayBufferView : : ByteOffset ( ) { <nl> i : : Handle < i : : JSArrayBuffer > SetupSharedArrayBuffer ( <nl> i : : Isolate * i_isolate = reinterpret_cast < i : : Isolate * > ( isolate ) ; <nl> LOG_API ( i_isolate , SharedArrayBuffer , New ) ; <nl> ENTER_V8_NO_SCRIPT_NO_EXCEPTION ( i_isolate ) ; <nl> - <nl> - std : : shared_ptr < i : : BackingStore > backing_store = LookupOrCreateBackingStore ( <nl> - i_isolate , data , byte_length , i : : SharedFlag : : kShared , mode ) ; <nl> - <nl> i : : Handle < i : : JSArrayBuffer > obj = <nl> - i_isolate - > factory ( ) - > NewJSSharedArrayBuffer ( ) ; <nl> - <nl> - obj - > Attach ( backing_store ) ; <nl> - if ( mode = = ArrayBufferCreationMode : : kExternalized ) { <nl> - obj - > set_is_external ( true ) ; <nl> - } <nl> + i_isolate - > factory ( ) - > NewJSArrayBuffer ( i : : SharedFlag : : kShared ) ; <nl> + bool is_wasm_memory = <nl> + i_isolate - > wasm_engine ( ) - > memory_tracker ( ) - > IsWasmMemory ( data ) ; <nl> + i : : JSArrayBuffer : : Setup ( obj , i_isolate , <nl> + mode = = ArrayBufferCreationMode : : kExternalized , data , <nl> + byte_length , i : : SharedFlag : : kShared , is_wasm_memory ) ; <nl> return obj ; <nl> } <nl> <nl> bool v8 : : SharedArrayBuffer : : IsExternal ( ) const { <nl> return Utils : : OpenHandle ( this ) - > is_external ( ) ; <nl> } <nl> <nl> + v8 : : SharedArrayBuffer : : Contents v8 : : SharedArrayBuffer : : Externalize ( ) { <nl> + i : : Handle < i : : JSArrayBuffer > self = Utils : : OpenHandle ( this ) ; <nl> + i : : Isolate * isolate = self - > GetIsolate ( ) ; <nl> + Utils : : ApiCheck ( ! self - > is_external ( ) , " v8_SharedArrayBuffer_Externalize " , <nl> + " SharedArrayBuffer already externalized " ) ; <nl> + self - > set_is_external ( true ) ; <nl> + <nl> + const v8 : : SharedArrayBuffer : : Contents contents = GetContents ( ) ; <nl> + isolate - > heap ( ) - > UnregisterArrayBuffer ( * self ) ; <nl> + <nl> + / / A regular copy is good enough . No move semantics needed . <nl> + return contents ; <nl> + } <nl> + <nl> v8 : : SharedArrayBuffer : : Contents : : Contents ( <nl> void * data , size_t byte_length , void * allocation_base , <nl> size_t allocation_length , Allocator : : AllocationMode allocation_mode , <nl> v8 : : SharedArrayBuffer : : Contents : : Contents ( <nl> DCHECK_LE ( byte_length_ , allocation_length_ ) ; <nl> } <nl> <nl> - v8 : : SharedArrayBuffer : : Contents v8 : : SharedArrayBuffer : : Externalize ( ) { <nl> - return GetContents ( true ) ; <nl> - } <nl> - <nl> v8 : : SharedArrayBuffer : : Contents v8 : : SharedArrayBuffer : : GetContents ( ) { <nl> - return GetContents ( false ) ; <nl> - } <nl> - <nl> - v8 : : SharedArrayBuffer : : Contents v8 : : SharedArrayBuffer : : GetContents ( <nl> - bool externalize ) { <nl> - / / TODO ( titzer ) : reduce duplication between shared / unshared GetContents ( ) <nl> - using BufferType = v8 : : SharedArrayBuffer ; <nl> - <nl> i : : Handle < i : : JSArrayBuffer > self = Utils : : OpenHandle ( this ) ; <nl> - <nl> - std : : shared_ptr < i : : BackingStore > backing_store = self - > GetBackingStore ( ) ; <nl> - <nl> - void * deleter_data = nullptr ; <nl> - if ( externalize ) { <nl> - Utils : : ApiCheck ( ! self - > is_external ( ) , " v8_SharedArrayBuffer_Externalize " , <nl> - " SharedArrayBuffer already externalized " ) ; <nl> - self - > set_is_external ( true ) ; <nl> - / / When externalizing , upref the shared pointer to the backing store <nl> - / / and store that as the deleter data . When the embedder calls the deleter <nl> - / / callback , we will delete the additional ( on - heap ) shared_ptr . <nl> - deleter_data = MakeDeleterData ( backing_store ) ; <nl> - } <nl> - <nl> - if ( ! backing_store ) { <nl> - / / If the array buffer has zero length or was detached , return empty <nl> - / / contents . <nl> - DCHECK_EQ ( 0 , self - > byte_length ( ) ) ; <nl> - BufferType : : Contents contents ( <nl> - nullptr , 0 , nullptr , 0 , <nl> - v8 : : ArrayBuffer : : Allocator : : AllocationMode : : kNormal , <nl> - BackingStoreDeleter , deleter_data ) ; <nl> - return contents ; <nl> - } <nl> - <nl> - / / Backing stores that given to the embedder might be passed back through <nl> - / / the API using only the start of the buffer . We need to find such <nl> - / / backing stores using global registration until the API is changed . <nl> - i : : GlobalBackingStoreRegistry : : Register ( backing_store ) ; <nl> - <nl> - auto allocation_mode = <nl> - backing_store - > is_wasm_memory ( ) <nl> - ? v8 : : ArrayBuffer : : Allocator : : AllocationMode : : kReservation <nl> - : v8 : : ArrayBuffer : : Allocator : : AllocationMode : : kNormal ; <nl> - <nl> - BufferType : : Contents contents ( backing_store - > buffer_start ( ) , / / - - <nl> - backing_store - > byte_length ( ) , / / - - <nl> - backing_store - > buffer_start ( ) , / / - - <nl> - backing_store - > byte_length ( ) , / / - - <nl> - allocation_mode , / / - - <nl> - BackingStoreDeleter , / / - - <nl> - deleter_data ) ; <nl> + Contents contents ( <nl> + self - > backing_store ( ) , self - > byte_length ( ) , self - > allocation_base ( ) , <nl> + self - > allocation_length ( ) , <nl> + self - > is_wasm_memory ( ) <nl> + ? ArrayBuffer : : Allocator : : AllocationMode : : kReservation <nl> + : ArrayBuffer : : Allocator : : AllocationMode : : kNormal , <nl> + self - > is_wasm_memory ( ) <nl> + ? reinterpret_cast < Contents : : DeleterCallback > ( WasmMemoryDeleter ) <nl> + : reinterpret_cast < Contents : : DeleterCallback > ( ArrayBufferDeleter ) , <nl> + self - > is_wasm_memory ( ) <nl> + ? static_cast < void * > ( self - > GetIsolate ( ) - > wasm_engine ( ) ) <nl> + : static_cast < void * > ( self - > GetIsolate ( ) - > array_buffer_allocator ( ) ) ) ; <nl> return contents ; <nl> } <nl> <nl> Local < SharedArrayBuffer > v8 : : SharedArrayBuffer : : New ( Isolate * isolate , <nl> i : : Isolate * i_isolate = reinterpret_cast < i : : Isolate * > ( isolate ) ; <nl> LOG_API ( i_isolate , SharedArrayBuffer , New ) ; <nl> ENTER_V8_NO_SCRIPT_NO_EXCEPTION ( i_isolate ) ; <nl> - <nl> - std : : unique_ptr < i : : BackingStore > backing_store = <nl> - i : : BackingStore : : Allocate ( i_isolate , byte_length , i : : SharedFlag : : kShared , <nl> - i : : InitializedFlag : : kZeroInitialized ) ; <nl> - <nl> - if ( ! backing_store ) { <nl> - / / TODO ( jbroman ) : It may be useful in the future to provide a MaybeLocal <nl> - / / version that throws an exception or otherwise does not crash . <nl> + i : : Handle < i : : JSArrayBuffer > obj = <nl> + i_isolate - > factory ( ) - > NewJSArrayBuffer ( i : : SharedFlag : : kShared ) ; <nl> + / / TODO ( jbroman ) : It may be useful in the future to provide a MaybeLocal <nl> + / / version that throws an exception or otherwise does not crash . <nl> + if ( ! i : : JSArrayBuffer : : SetupAllocatingData ( obj , i_isolate , byte_length , true , <nl> + i : : SharedFlag : : kShared ) ) { <nl> i : : FatalProcessOutOfMemory ( i_isolate , " v8 : : SharedArrayBuffer : : New " ) ; <nl> } <nl> - <nl> - i : : Handle < i : : JSArrayBuffer > obj = <nl> - i_isolate - > factory ( ) - > NewJSSharedArrayBuffer ( ) ; <nl> - obj - > Attach ( std : : move ( backing_store ) ) ; <nl> return Utils : : ToLocalShared ( obj ) ; <nl> } <nl> <nl> void InvokeFunctionCallback ( const v8 : : FunctionCallbackInfo < v8 : : Value > & info , <nl> <nl> } / / namespace internal <nl> } / / namespace v8 <nl> - <nl> - # undef TRACE_BS <nl> mmm a / src / asmjs / asm - js . cc <nl> ppp b / src / asmjs / asm - js . cc <nl> MaybeHandle < Object > AsmJs : : InstantiateAsmWasm ( Isolate * isolate , <nl> ReportInstantiationFailure ( script , position , " Requires heap buffer " ) ; <nl> return MaybeHandle < Object > ( ) ; <nl> } <nl> - / / Mark the buffer as being used as an asm . js memory . This implies two <nl> - / / things : 1 ) if the buffer is from a Wasm memory , that memory can no longer <nl> - / / be grown , since that would detach this buffer , and 2 ) the buffer cannot <nl> - / / be postMessage ( ) ' d , as that also detaches the buffer . <nl> - memory - > set_is_asmjs_memory ( true ) ; <nl> - memory - > set_is_detachable ( false ) ; <nl> + wasm_engine - > memory_tracker ( ) - > MarkWasmMemoryNotGrowable ( memory ) ; <nl> size_t size = memory - > byte_length ( ) ; <nl> / / Check the asm . js heap size against the valid limits . <nl> if ( ! IsValidAsmjsMemorySize ( size ) ) { <nl> mmm a / src / builtins / builtins - arraybuffer . cc <nl> ppp b / src / builtins / builtins - arraybuffer . cc <nl> namespace { <nl> <nl> Object ConstructBuffer ( Isolate * isolate , Handle < JSFunction > target , <nl> Handle < JSReceiver > new_target , Handle < Object > length , <nl> - InitializedFlag initialized ) { <nl> + bool initialize ) { <nl> Handle < JSObject > result ; <nl> ASSIGN_RETURN_FAILURE_ON_EXCEPTION ( <nl> isolate , result , <nl> JSObject : : New ( target , new_target , Handle < AllocationSite > : : null ( ) ) ) ; <nl> - auto array_buffer = Handle < JSArrayBuffer > : : cast ( result ) ; <nl> - SharedFlag shared = ( * target ! = target - > native_context ( ) . array_buffer_fun ( ) ) <nl> - ? SharedFlag : : kShared <nl> - : SharedFlag : : kNotShared ; <nl> - <nl> size_t byte_length ; <nl> if ( ! TryNumberToSize ( * length , & byte_length ) | | <nl> byte_length > JSArrayBuffer : : kMaxByteLength ) { <nl> - / / ToNumber failed . <nl> - array_buffer - > SetupEmpty ( shared ) ; <nl> + JSArrayBuffer : : SetupAsEmpty ( Handle < JSArrayBuffer > : : cast ( result ) , isolate ) ; <nl> THROW_NEW_ERROR_RETURN_FAILURE ( <nl> isolate , NewRangeError ( MessageTemplate : : kInvalidArrayBufferLength ) ) ; <nl> } <nl> - <nl> - auto backing_store = <nl> - BackingStore : : Allocate ( isolate , byte_length , shared , initialized ) ; <nl> - if ( backing_store ) { <nl> - array_buffer - > Attach ( std : : move ( backing_store ) ) ; <nl> - return * array_buffer ; <nl> + SharedFlag shared_flag = <nl> + ( * target = = target - > native_context ( ) . array_buffer_fun ( ) ) <nl> + ? SharedFlag : : kNotShared <nl> + : SharedFlag : : kShared ; <nl> + if ( ! JSArrayBuffer : : SetupAllocatingData ( Handle < JSArrayBuffer > : : cast ( result ) , <nl> + isolate , byte_length , initialize , <nl> + shared_flag ) ) { <nl> + THROW_NEW_ERROR_RETURN_FAILURE ( <nl> + isolate , NewRangeError ( MessageTemplate : : kArrayBufferAllocationFailed ) ) ; <nl> } <nl> - <nl> - / / Allocation of backing store failed . <nl> - array_buffer - > SetupEmpty ( shared ) ; <nl> - THROW_NEW_ERROR_RETURN_FAILURE ( <nl> - isolate , NewRangeError ( MessageTemplate : : kArrayBufferAllocationFailed ) ) ; <nl> + return * result ; <nl> } <nl> <nl> } / / namespace <nl> BUILTIN ( ArrayBufferConstructor ) { <nl> isolate , NewRangeError ( MessageTemplate : : kInvalidArrayBufferLength ) ) ; <nl> } <nl> <nl> - return ConstructBuffer ( isolate , target , new_target , number_length , <nl> - InitializedFlag : : kZeroInitialized ) ; <nl> + return ConstructBuffer ( isolate , target , new_target , number_length , true ) ; <nl> } <nl> <nl> / / This is a helper to construct an ArrayBuffer with uinitialized memory . <nl> BUILTIN ( ArrayBufferConstructor_DoNotInitialize ) { <nl> Handle < JSFunction > target ( isolate - > native_context ( ) - > array_buffer_fun ( ) , <nl> isolate ) ; <nl> Handle < Object > length = args . atOrUndefined ( isolate , 1 ) ; <nl> - return ConstructBuffer ( isolate , target , target , length , <nl> - InitializedFlag : : kUninitialized ) ; <nl> + return ConstructBuffer ( isolate , target , target , length , false ) ; <nl> } <nl> <nl> / / ES6 section 24 . 1 . 4 . 1 get ArrayBuffer . prototype . byteLength <nl> mmm a / src / d8 / d8 . cc <nl> ppp b / src / d8 / d8 . cc <nl> <nl> # define CHECK ( condition ) assert ( condition ) <nl> # endif <nl> <nl> - # define TRACE_BS ( . . . ) / * PrintF ( __VA_ARGS__ ) * / <nl> - <nl> namespace v8 { <nl> <nl> namespace { <nl> class Serializer : public ValueSerializer : : Delegate { <nl> std : : unique_ptr < SerializationData > Release ( ) { return std : : move ( data_ ) ; } <nl> <nl> void AppendExternalizedContentsTo ( std : : vector < ExternalizedContents > * to ) { <nl> - for ( auto & contents : externalized_contents_ ) { <nl> - auto bs_indirection = reinterpret_cast < std : : shared_ptr < i : : BackingStore > * > ( <nl> - contents . DeleterData ( ) ) ; <nl> - if ( bs_indirection ) { <nl> - auto backing_store = bs_indirection - > get ( ) ; <nl> - TRACE_BS ( " d8 : append bs = % p mem = % p ( % zu bytes ) \ n " , backing_store , <nl> - backing_store - > buffer_start ( ) , backing_store - > byte_length ( ) ) ; <nl> - USE ( backing_store ) ; <nl> - } <nl> - } <nl> - <nl> to - > insert ( to - > end ( ) , <nl> std : : make_move_iterator ( externalized_contents_ . begin ( ) ) , <nl> std : : make_move_iterator ( externalized_contents_ . end ( ) ) ) ; <nl> int main ( int argc , char * argv [ ] ) { return v8 : : Shell : : Main ( argc , argv ) ; } <nl> <nl> # undef CHECK <nl> # undef DCHECK <nl> - # undef TRACE_BS <nl> mmm a / src / d8 / d8 . h <nl> ppp b / src / d8 / d8 . h <nl> class ExternalizedContents { <nl> } <nl> ~ ExternalizedContents ( ) ; <nl> <nl> - void * DeleterData ( ) { return deleter_data_ ; } <nl> - <nl> private : <nl> void * data_ ; <nl> size_t length_ ; <nl> mmm a / src / diagnostics / objects - printer . cc <nl> ppp b / src / diagnostics / objects - printer . cc <nl> void JSArrayBuffer : : JSArrayBufferPrint ( std : : ostream & os ) { / / NOLINT <nl> if ( is_detachable ( ) ) os < < " \ n - detachable " ; <nl> if ( was_detached ( ) ) os < < " \ n - detached " ; <nl> if ( is_shared ( ) ) os < < " \ n - shared " ; <nl> + if ( is_wasm_memory ( ) ) os < < " \ n - is_wasm_memory " ; <nl> JSObjectPrintBody ( os , * this , ! was_detached ( ) ) ; <nl> } <nl> <nl> mmm a / src / execution / isolate . cc <nl> ppp b / src / execution / isolate . cc <nl> <nl> # include " src / logging / counters . h " <nl> # include " src / logging / log . h " <nl> # include " src / numbers / hash - seed - inl . h " <nl> - # include " src / objects / backing - store . h " <nl> # include " src / objects / elements . h " <nl> # include " src / objects / frame - array - inl . h " <nl> # include " src / objects / hash - table - inl . h " <nl> void Isolate : : Deinit ( ) { <nl> optimizing_compile_dispatcher_ = nullptr ; <nl> } <nl> <nl> - BackingStore : : RemoveSharedWasmMemoryObjects ( this ) ; <nl> + wasm_engine ( ) - > memory_tracker ( ) - > DeleteSharedMemoryObjectsOnIsolate ( this ) ; <nl> <nl> heap_ . mark_compact_collector ( ) - > EnsureSweepingCompleted ( ) ; <nl> heap_ . memory_allocator ( ) - > unmapper ( ) - > EnsureUnmappingCompleted ( ) ; <nl> void Isolate : : AddDetachedContext ( Handle < Context > context ) { <nl> heap ( ) - > set_detached_contexts ( * detached_contexts ) ; <nl> } <nl> <nl> - void Isolate : : AddSharedWasmMemory ( Handle < WasmMemoryObject > memory_object ) { <nl> - HandleScope scope ( this ) ; <nl> - Handle < WeakArrayList > shared_wasm_memories = <nl> - factory ( ) - > shared_wasm_memories ( ) ; <nl> - shared_wasm_memories = WeakArrayList : : AddToEnd ( <nl> - this , shared_wasm_memories , MaybeObjectHandle : : Weak ( memory_object ) ) ; <nl> - heap ( ) - > set_shared_wasm_memories ( * shared_wasm_memories ) ; <nl> - } <nl> - <nl> void Isolate : : CheckDetachedContextsAfterGC ( ) { <nl> HandleScope scope ( this ) ; <nl> Handle < WeakArrayList > detached_contexts = factory ( ) - > detached_contexts ( ) ; <nl> mmm a / src / execution / isolate . h <nl> ppp b / src / execution / isolate . h <nl> class Isolate final : private HiddenFactory { <nl> void AddDetachedContext ( Handle < Context > context ) ; <nl> void CheckDetachedContextsAfterGC ( ) ; <nl> <nl> - void AddSharedWasmMemory ( Handle < WasmMemoryObject > memory_object ) ; <nl> - <nl> std : : vector < Object > * partial_snapshot_cache ( ) { <nl> return & partial_snapshot_cache_ ; <nl> } <nl> mmm a / src / execution / stack - guard . cc <nl> ppp b / src / execution / stack - guard . cc <nl> <nl> # include " src / execution / runtime - profiler . h " <nl> # include " src / execution / simulator . h " <nl> # include " src / logging / counters . h " <nl> - # include " src / objects / backing - store . h " <nl> # include " src / roots / roots - inl . h " <nl> # include " src / utils / memcopy . h " <nl> # include " src / wasm / wasm - engine . h " <nl> Object StackGuard : : HandleInterrupts ( ) { <nl> if ( TestAndClear ( & interrupt_flags , GROW_SHARED_MEMORY ) ) { <nl> TRACE_EVENT0 ( TRACE_DISABLED_BY_DEFAULT ( " v8 . wasm " ) , <nl> " V8 . WasmGrowSharedMemory " ) ; <nl> - BackingStore : : UpdateSharedWasmMemoryObjects ( isolate_ ) ; <nl> + isolate_ - > wasm_engine ( ) - > memory_tracker ( ) - > UpdateSharedMemoryInstances ( <nl> + isolate_ ) ; <nl> } <nl> <nl> if ( TestAndClear ( & interrupt_flags , DEOPT_MARKED_ALLOCATION_SITES ) ) { <nl> mmm a / src / extensions / free - buffer - extension . cc <nl> ppp b / src / extensions / free - buffer - extension . cc <nl> void FreeBufferExtension : : FreeBuffer ( <nl> const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> v8 : : Local < v8 : : ArrayBuffer > arrayBuffer = args [ 0 ] . As < v8 : : ArrayBuffer > ( ) ; <nl> v8 : : ArrayBuffer : : Contents contents = arrayBuffer - > Externalize ( ) ; <nl> - contents . Deleter ( ) ( contents . Data ( ) , contents . ByteLength ( ) , <nl> - contents . DeleterData ( ) ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( args . GetIsolate ( ) ) ; <nl> + isolate - > array_buffer_allocator ( ) - > Free ( contents . Data ( ) , <nl> + contents . ByteLength ( ) ) ; <nl> } <nl> <nl> } / / namespace internal <nl> mmm a / src / heap / array - buffer - collector . cc <nl> ppp b / src / heap / array - buffer - collector . cc <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> + namespace { <nl> + <nl> + void FreeAllocationsHelper ( <nl> + Heap * heap , const std : : vector < JSArrayBuffer : : Allocation > & allocations ) { <nl> + for ( JSArrayBuffer : : Allocation alloc : allocations ) { <nl> + JSArrayBuffer : : FreeBackingStore ( heap - > isolate ( ) , alloc ) ; <nl> + } <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> void ArrayBufferCollector : : QueueOrFreeGarbageAllocations ( <nl> - std : : vector < std : : shared_ptr < BackingStore > > backing_stores ) { <nl> + std : : vector < JSArrayBuffer : : Allocation > allocations ) { <nl> if ( heap_ - > ShouldReduceMemory ( ) ) { <nl> - / / Destruct the vector , which destructs the std : : shared_ptrs , freeing <nl> - / / the backing stores . <nl> - backing_stores . clear ( ) ; <nl> + FreeAllocationsHelper ( heap_ , allocations ) ; <nl> } else { <nl> base : : MutexGuard guard ( & allocations_mutex_ ) ; <nl> - allocations_ . push_back ( std : : move ( backing_stores ) ) ; <nl> + allocations_ . push_back ( std : : move ( allocations ) ) ; <nl> } <nl> } <nl> <nl> void ArrayBufferCollector : : PerformFreeAllocations ( ) { <nl> base : : MutexGuard guard ( & allocations_mutex_ ) ; <nl> - / / Destruct the vector , which destructs the vecotr of std : : shared_ptrs , <nl> - / / freeing the backing stores if their refcount drops to zero . <nl> + for ( const std : : vector < JSArrayBuffer : : Allocation > & allocations : <nl> + allocations_ ) { <nl> + FreeAllocationsHelper ( heap_ , allocations ) ; <nl> + } <nl> allocations_ . clear ( ) ; <nl> } <nl> <nl> mmm a / src / heap / array - buffer - collector . h <nl> ppp b / src / heap / array - buffer - collector . h <nl> class ArrayBufferCollector { <nl> / / <nl> / / FreeAllocations ( ) potentially triggers a background task for processing . <nl> void QueueOrFreeGarbageAllocations ( <nl> - std : : vector < std : : shared_ptr < BackingStore > > allocations ) ; <nl> + std : : vector < JSArrayBuffer : : Allocation > allocations ) ; <nl> <nl> / / Calls FreeAllocations ( ) on a background thread . <nl> void FreeAllocations ( ) ; <nl> class ArrayBufferCollector { <nl> <nl> Heap * const heap_ ; <nl> base : : Mutex allocations_mutex_ ; <nl> - std : : vector < std : : vector < std : : shared_ptr < BackingStore > > > allocations_ ; <nl> + std : : vector < std : : vector < JSArrayBuffer : : Allocation > > allocations_ ; <nl> } ; <nl> <nl> } / / namespace internal <nl> mmm a / src / heap / array - buffer - tracker - inl . h <nl> ppp b / src / heap / array - buffer - tracker - inl . h <nl> <nl> # include " src / objects / js - array - buffer - inl . h " <nl> # include " src / objects / objects . h " <nl> <nl> - # define TRACE_BS ( . . . ) / * PrintF ( __VA_ARGS__ ) * / <nl> - <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - inline size_t PerIsolateAccountingLength ( JSArrayBuffer buffer ) { <nl> - / / TODO ( titzer ) : SharedArrayBuffers and shared WasmMemorys cause problems with <nl> - / / accounting for per - isolate external memory . In particular , sharing the same <nl> - / / array buffer or memory multiple times , which happens in stress tests , can <nl> - / / cause overcounting , leading to GC thrashing . Fix with global accounting ? <nl> - return buffer . is_shared ( ) ? 0 : buffer . byte_length ( ) ; <nl> - } <nl> - <nl> - void ArrayBufferTracker : : RegisterNew ( <nl> - Heap * heap , JSArrayBuffer buffer , <nl> - std : : shared_ptr < BackingStore > backing_store ) { <nl> - if ( ! backing_store ) return ; <nl> + void ArrayBufferTracker : : RegisterNew ( Heap * heap , JSArrayBuffer buffer ) { <nl> + if ( buffer . backing_store ( ) = = nullptr ) return ; <nl> <nl> / / ArrayBuffer tracking works only for small objects . <nl> DCHECK ( ! heap - > IsLargeObject ( buffer ) ) ; <nl> - DCHECK_EQ ( backing_store - > buffer_start ( ) , buffer . backing_store ( ) ) ; <nl> <nl> + const size_t length = buffer . byte_length ( ) ; <nl> Page * page = Page : : FromHeapObject ( buffer ) ; <nl> { <nl> base : : MutexGuard guard ( page - > mutex ( ) ) ; <nl> void ArrayBufferTracker : : RegisterNew ( <nl> tracker = page - > local_tracker ( ) ; <nl> } <nl> DCHECK_NOT_NULL ( tracker ) ; <nl> - TRACE_BS ( " ABT : reg bs = % p mem = % p ( % zu bytes ) cnt = % zu \ n " , <nl> - backing_store . get ( ) , backing_store - > buffer_start ( ) , <nl> - backing_store - > byte_length ( ) , backing_store . use_count ( ) ) ; <nl> - tracker - > Add ( buffer , std : : move ( backing_store ) ) ; <nl> + tracker - > Add ( buffer , length ) ; <nl> } <nl> <nl> / / TODO ( wez ) : Remove backing - store from external memory accounting . <nl> / / We may go over the limit of externally allocated memory here . We call the <nl> / / api function to trigger a GC in this case . <nl> - const size_t length = PerIsolateAccountingLength ( buffer ) ; <nl> reinterpret_cast < v8 : : Isolate * > ( heap - > isolate ( ) ) <nl> - > AdjustAmountOfExternalAllocatedMemory ( length ) ; <nl> } <nl> <nl> - std : : shared_ptr < BackingStore > ArrayBufferTracker : : Unregister ( <nl> - Heap * heap , JSArrayBuffer buffer ) { <nl> - std : : shared_ptr < BackingStore > backing_store ; <nl> + void ArrayBufferTracker : : Unregister ( Heap * heap , JSArrayBuffer buffer ) { <nl> + if ( buffer . backing_store ( ) = = nullptr ) return ; <nl> <nl> - const size_t length = PerIsolateAccountingLength ( buffer ) ; <nl> Page * page = Page : : FromHeapObject ( buffer ) ; <nl> + const size_t length = buffer . byte_length ( ) ; <nl> { <nl> base : : MutexGuard guard ( page - > mutex ( ) ) ; <nl> LocalArrayBufferTracker * tracker = page - > local_tracker ( ) ; <nl> DCHECK_NOT_NULL ( tracker ) ; <nl> - backing_store = tracker - > Remove ( buffer ) ; <nl> + tracker - > Remove ( buffer , length ) ; <nl> } <nl> <nl> / / TODO ( wez ) : Remove backing - store from external memory accounting . <nl> heap - > update_external_memory ( - static_cast < intptr_t > ( length ) ) ; <nl> - return backing_store ; <nl> - } <nl> - <nl> - std : : shared_ptr < BackingStore > ArrayBufferTracker : : Lookup ( Heap * heap , <nl> - JSArrayBuffer buffer ) { <nl> - if ( buffer . backing_store ( ) = = nullptr ) return { } ; <nl> - <nl> - Page * page = Page : : FromHeapObject ( buffer ) ; <nl> - base : : MutexGuard guard ( page - > mutex ( ) ) ; <nl> - LocalArrayBufferTracker * tracker = page - > local_tracker ( ) ; <nl> - DCHECK_NOT_NULL ( tracker ) ; <nl> - return tracker - > Lookup ( buffer ) ; <nl> } <nl> <nl> template < typename Callback > <nl> void LocalArrayBufferTracker : : Free ( Callback should_free ) { <nl> size_t freed_memory = 0 ; <nl> + Isolate * isolate = page_ - > heap ( ) - > isolate ( ) ; <nl> for ( TrackingData : : iterator it = array_buffers_ . begin ( ) ; <nl> it ! = array_buffers_ . end ( ) ; ) { <nl> / / Unchecked cast because the map might already be dead at this point . <nl> JSArrayBuffer buffer = JSArrayBuffer : : unchecked_cast ( it - > first ) ; <nl> - const size_t length = PerIsolateAccountingLength ( buffer ) ; <nl> + const size_t length = it - > second . length ; <nl> <nl> if ( should_free ( buffer ) ) { <nl> - / / Destroy the shared pointer , ( perhaps ) freeing the backing store . <nl> - TRACE_BS ( " ABT : die bs = % p mem = % p ( % zu bytes ) cnt = % zu \ n " , it - > second . get ( ) , <nl> - it - > second - > buffer_start ( ) , it - > second - > byte_length ( ) , <nl> - it - > second . use_count ( ) ) ; <nl> + JSArrayBuffer : : FreeBackingStore ( isolate , it - > second ) ; <nl> it = array_buffers_ . erase ( it ) ; <nl> freed_memory + = length ; <nl> } else { <nl> void ArrayBufferTracker : : FreeDead ( Page * page , MarkingState * marking_state ) { <nl> } <nl> } <nl> <nl> - void LocalArrayBufferTracker : : Add ( JSArrayBuffer buffer , <nl> - std : : shared_ptr < BackingStore > backing_store ) { <nl> - auto length = PerIsolateAccountingLength ( buffer ) ; <nl> + void LocalArrayBufferTracker : : Add ( JSArrayBuffer buffer , size_t length ) { <nl> page_ - > IncrementExternalBackingStoreBytes ( <nl> ExternalBackingStoreType : : kArrayBuffer , length ) ; <nl> <nl> - AddInternal ( buffer , std : : move ( backing_store ) ) ; <nl> + AddInternal ( buffer , length ) ; <nl> } <nl> <nl> - void LocalArrayBufferTracker : : AddInternal ( <nl> - JSArrayBuffer buffer , std : : shared_ptr < BackingStore > backing_store ) { <nl> - auto ret = array_buffers_ . insert ( { buffer , std : : move ( backing_store ) } ) ; <nl> + void LocalArrayBufferTracker : : AddInternal ( JSArrayBuffer buffer , size_t length ) { <nl> + auto ret = array_buffers_ . insert ( <nl> + { buffer , <nl> + { buffer . backing_store ( ) , length , buffer . backing_store ( ) , <nl> + buffer . is_wasm_memory ( ) } } ) ; <nl> USE ( ret ) ; <nl> / / Check that we indeed inserted a new value and did not overwrite an existing <nl> / / one ( which would be a bug ) . <nl> DCHECK ( ret . second ) ; <nl> } <nl> <nl> - std : : shared_ptr < BackingStore > LocalArrayBufferTracker : : Remove ( <nl> - JSArrayBuffer buffer ) { <nl> - TrackingData : : iterator it = array_buffers_ . find ( buffer ) ; <nl> - <nl> - / / Check that we indeed find a key to remove . <nl> - DCHECK ( it ! = array_buffers_ . end ( ) ) ; <nl> - <nl> - / / Steal the underlying shared pointer before erasing the entry . <nl> - std : : shared_ptr < BackingStore > backing_store = std : : move ( it - > second ) ; <nl> - <nl> - TRACE_BS ( " ABT : rm bs = % p mem = % p ( % zu bytes ) cnt = % zu \ n " , backing_store . get ( ) , <nl> - backing_store - > buffer_start ( ) , backing_store - > byte_length ( ) , <nl> - backing_store . use_count ( ) ) ; <nl> - <nl> - / / Erase the entry . <nl> - array_buffers_ . erase ( it ) ; <nl> - <nl> - / / Update accounting . <nl> - auto length = PerIsolateAccountingLength ( buffer ) ; <nl> + void LocalArrayBufferTracker : : Remove ( JSArrayBuffer buffer , size_t length ) { <nl> page_ - > DecrementExternalBackingStoreBytes ( <nl> ExternalBackingStoreType : : kArrayBuffer , length ) ; <nl> <nl> - return backing_store ; <nl> - } <nl> - <nl> - std : : shared_ptr < BackingStore > LocalArrayBufferTracker : : Lookup ( <nl> - JSArrayBuffer buffer ) { <nl> TrackingData : : iterator it = array_buffers_ . find ( buffer ) ; <nl> - if ( it ! = array_buffers_ . end ( ) ) { <nl> - return it - > second ; <nl> - } <nl> - return { } ; <nl> + / / Check that we indeed find a key to remove . <nl> + DCHECK ( it ! = array_buffers_ . end ( ) ) ; <nl> + DCHECK_EQ ( length , it - > second . length ) ; <nl> + array_buffers_ . erase ( it ) ; <nl> } <nl> <nl> - # undef TRACE_BS <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / heap / array - buffer - tracker . cc <nl> ppp b / src / heap / array - buffer - tracker . cc <nl> <nl> # include " src / heap / heap . h " <nl> # include " src / heap / spaces . h " <nl> <nl> - # define TRACE_BS ( . . . ) / * PrintF ( __VA_ARGS__ ) * / <nl> - <nl> namespace v8 { <nl> namespace internal { <nl> <nl> LocalArrayBufferTracker : : ~ LocalArrayBufferTracker ( ) { <nl> <nl> template < typename Callback > <nl> void LocalArrayBufferTracker : : Process ( Callback callback ) { <nl> - std : : vector < std : : shared_ptr < BackingStore > > backing_stores_to_free ; <nl> + std : : vector < JSArrayBuffer : : Allocation > backing_stores_to_free ; <nl> TrackingData kept_array_buffers ; <nl> <nl> JSArrayBuffer new_buffer ; <nl> void LocalArrayBufferTracker : : Process ( Callback callback ) { <nl> DCHECK_EQ ( page_ , Page : : FromHeapObject ( old_buffer ) ) ; <nl> const CallbackResult result = callback ( old_buffer , & new_buffer ) ; <nl> if ( result = = kKeepEntry ) { <nl> - kept_array_buffers . insert ( std : : move ( * it ) ) ; <nl> + kept_array_buffers . insert ( * it ) ; <nl> } else if ( result = = kUpdateEntry ) { <nl> - DCHECK_EQ ( old_buffer . byte_length ( ) , new_buffer . byte_length ( ) ) ; <nl> DCHECK ( ! new_buffer . is_null ( ) ) ; <nl> Page * target_page = Page : : FromHeapObject ( new_buffer ) ; <nl> { <nl> void LocalArrayBufferTracker : : Process ( Callback callback ) { <nl> tracker = target_page - > local_tracker ( ) ; <nl> } <nl> DCHECK_NOT_NULL ( tracker ) ; <nl> - const size_t length = PerIsolateAccountingLength ( old_buffer ) ; <nl> + const size_t length = it - > second . length ; <nl> / / We should decrement before adding to avoid potential overflows in <nl> / / the external memory counters . <nl> - tracker - > AddInternal ( new_buffer , std : : move ( it - > second ) ) ; <nl> + DCHECK_EQ ( it - > first . is_wasm_memory ( ) , it - > second . is_wasm_memory ) ; <nl> + tracker - > AddInternal ( new_buffer , length ) ; <nl> MemoryChunk : : MoveExternalBackingStoreBytes ( <nl> ExternalBackingStoreType : : kArrayBuffer , <nl> static_cast < MemoryChunk * > ( page_ ) , <nl> static_cast < MemoryChunk * > ( target_page ) , length ) ; <nl> } <nl> } else if ( result = = kRemoveEntry ) { <nl> - freed_memory + = PerIsolateAccountingLength ( old_buffer ) ; <nl> - auto backing_store = std : : move ( it - > second ) ; <nl> - TRACE_BS ( " ABT : queue bs = % p mem = % p ( % zu bytes ) cnt = % zu \ n " , <nl> - backing_store . get ( ) , backing_store - > buffer_start ( ) , <nl> - backing_store - > byte_length ( ) , backing_store . use_count ( ) ) ; <nl> - if ( ! backing_store - > is_shared ( ) ) { <nl> - / / Only retain non - shared backing stores . For shared backing stores , <nl> - / / drop the shared_ptr right away , since this should be cheap , <nl> - / / as it only updates a refcount , except that last , which will <nl> - / / destruct it , which is rare . <nl> - backing_stores_to_free . push_back ( backing_store ) ; <nl> - } <nl> + freed_memory + = it - > second . length ; <nl> + / / We pass backing_store ( ) and stored length to the collector for freeing <nl> + / / the backing store . Wasm allocations will go through their own tracker <nl> + / / based on the backing store . <nl> + backing_stores_to_free . push_back ( it - > second ) ; <nl> } else { <nl> UNREACHABLE ( ) ; <nl> } <nl> void ArrayBufferTracker : : TearDown ( Heap * heap ) { <nl> <nl> } / / namespace internal <nl> } / / namespace v8 <nl> - # undef TRACE_BS <nl> mmm a / src / heap / array - buffer - tracker . h <nl> ppp b / src / heap / array - buffer - tracker . h <nl> <nl> <nl> # include " src / base / platform / mutex . h " <nl> # include " src / common / globals . h " <nl> - # include " src / objects / backing - store . h " <nl> # include " src / objects / js - array - buffer . h " <nl> # include " src / utils / allocation . h " <nl> <nl> class ArrayBufferTracker : public AllStatic { <nl> <nl> / / Register / unregister a new JSArrayBuffer | buffer | for tracking . Guards all <nl> / / access to the tracker by taking the page lock for the corresponding page . <nl> - inline static void RegisterNew ( Heap * heap , JSArrayBuffer buffer , <nl> - std : : shared_ptr < BackingStore > ) ; <nl> - inline static std : : shared_ptr < BackingStore > Unregister ( Heap * heap , <nl> - JSArrayBuffer buffer ) ; <nl> - inline static std : : shared_ptr < BackingStore > Lookup ( Heap * heap , <nl> - JSArrayBuffer buffer ) ; <nl> + inline static void RegisterNew ( Heap * heap , JSArrayBuffer buffer ) ; <nl> + inline static void Unregister ( Heap * heap , JSArrayBuffer buffer ) ; <nl> <nl> / / Identifies all backing store pointers for dead JSArrayBuffers in new space . <nl> / / Does not take any locks and can only be called during Scavenge . <nl> class LocalArrayBufferTracker { <nl> explicit LocalArrayBufferTracker ( Page * page ) : page_ ( page ) { } <nl> ~ LocalArrayBufferTracker ( ) ; <nl> <nl> - inline void Add ( JSArrayBuffer buffer , <nl> - std : : shared_ptr < BackingStore > backing_store ) ; <nl> - inline std : : shared_ptr < BackingStore > Remove ( JSArrayBuffer buffer ) ; <nl> - inline std : : shared_ptr < BackingStore > Lookup ( JSArrayBuffer buffer ) ; <nl> + inline void Add ( JSArrayBuffer buffer , size_t length ) ; <nl> + inline void Remove ( JSArrayBuffer buffer , size_t length ) ; <nl> <nl> / / Frees up array buffers . <nl> / / <nl> class LocalArrayBufferTracker { <nl> } <nl> } ; <nl> <nl> + / / Keep track of the backing store and the corresponding length at time of <nl> + / / registering . The length is accessed from JavaScript and can be a <nl> + / / HeapNumber . The reason for tracking the length is that in the case of <nl> + / / length being a HeapNumber , the buffer and its length may be stored on <nl> + / / different memory pages , making it impossible to guarantee order of freeing . <nl> using TrackingData = <nl> - std : : unordered_map < JSArrayBuffer , std : : shared_ptr < BackingStore > , Hasher > ; <nl> + std : : unordered_map < JSArrayBuffer , JSArrayBuffer : : Allocation , Hasher > ; <nl> <nl> / / Internal version of add that does not update counters . Requires separate <nl> / / logic for updating external memory counters . <nl> - inline void AddInternal ( JSArrayBuffer buffer , <nl> - std : : shared_ptr < BackingStore > backing_store ) ; <nl> + inline void AddInternal ( JSArrayBuffer buffer , size_t length ) ; <nl> <nl> Page * page_ ; <nl> / / The set contains raw heap pointers which are removed by the GC upon <nl> mmm a / src / heap / factory . cc <nl> ppp b / src / heap / factory . cc <nl> void initialize_length < PropertyArray > ( Handle < PropertyArray > array , int length ) { <nl> array - > initialize_length ( length ) ; <nl> } <nl> <nl> - inline void ZeroEmbedderFields ( i : : Handle < i : : JSObject > obj ) { <nl> - auto count = obj - > GetEmbedderFieldCount ( ) ; <nl> - for ( int i = 0 ; i < count ; i + + ) { <nl> - obj - > SetEmbedderField ( i , Smi : : kZero ) ; <nl> - } <nl> - } <nl> - <nl> } / / namespace <nl> <nl> template < typename T > <nl> Handle < SyntheticModule > Factory : : NewSyntheticModule ( <nl> return module ; <nl> } <nl> <nl> - Handle < JSArrayBuffer > Factory : : NewJSArrayBuffer ( AllocationType allocation ) { <nl> - Handle < Map > map ( isolate ( ) - > native_context ( ) - > array_buffer_fun ( ) . initial_map ( ) , <nl> - isolate ( ) ) ; <nl> - auto result = <nl> - Handle < JSArrayBuffer > : : cast ( NewJSObjectFromMap ( map , allocation ) ) ; <nl> - ZeroEmbedderFields ( result ) ; <nl> - result - > SetupEmpty ( SharedFlag : : kNotShared ) ; <nl> - return result ; <nl> - } <nl> - <nl> - MaybeHandle < JSArrayBuffer > Factory : : NewJSArrayBufferAndBackingStore ( <nl> - size_t byte_length , InitializedFlag initialized , <nl> - AllocationType allocation ) { <nl> - / / TODO ( titzer ) : Don ' t bother allocating a 0 - length backing store . <nl> - / / This is currently required because the embedder API for <nl> - / / TypedArray : : HasBuffer ( ) checks if the backing store is nullptr . <nl> - / / That check should be changed . <nl> - <nl> - std : : unique_ptr < BackingStore > backing_store = BackingStore : : Allocate ( <nl> - isolate ( ) , byte_length , SharedFlag : : kNotShared , initialized ) ; <nl> - if ( ! backing_store ) return MaybeHandle < JSArrayBuffer > ( ) ; <nl> - Handle < Map > map ( isolate ( ) - > native_context ( ) - > array_buffer_fun ( ) . initial_map ( ) , <nl> - isolate ( ) ) ; <nl> - auto array_buffer = <nl> - Handle < JSArrayBuffer > : : cast ( NewJSObjectFromMap ( map , allocation ) ) ; <nl> - array_buffer - > Attach ( std : : move ( backing_store ) ) ; <nl> - ZeroEmbedderFields ( array_buffer ) ; <nl> - return array_buffer ; <nl> - } <nl> - <nl> - Handle < JSArrayBuffer > Factory : : NewJSSharedArrayBuffer ( <nl> - AllocationType allocation ) { <nl> - Handle < Map > map ( <nl> - isolate ( ) - > native_context ( ) - > shared_array_buffer_fun ( ) . initial_map ( ) , <nl> + Handle < JSArrayBuffer > Factory : : NewJSArrayBuffer ( SharedFlag shared , <nl> + AllocationType allocation ) { <nl> + Handle < JSFunction > array_buffer_fun ( <nl> + shared = = SharedFlag : : kShared <nl> + ? isolate ( ) - > native_context ( ) - > shared_array_buffer_fun ( ) <nl> + : isolate ( ) - > native_context ( ) - > array_buffer_fun ( ) , <nl> isolate ( ) ) ; <nl> - auto result = <nl> - Handle < JSArrayBuffer > : : cast ( NewJSObjectFromMap ( map , allocation ) ) ; <nl> - ZeroEmbedderFields ( result ) ; <nl> - result - > SetupEmpty ( SharedFlag : : kShared ) ; <nl> - return result ; <nl> + Handle < Map > map ( array_buffer_fun - > initial_map ( ) , isolate ( ) ) ; <nl> + return Handle < JSArrayBuffer > : : cast ( NewJSObjectFromMap ( map , allocation ) ) ; <nl> } <nl> <nl> Handle < JSIteratorResult > Factory : : NewJSIteratorResult ( Handle < Object > value , <nl> Handle < JSArrayBufferView > Factory : : NewJSArrayBufferView ( <nl> array_buffer_view - > set_buffer ( * buffer ) ; <nl> array_buffer_view - > set_byte_offset ( byte_offset ) ; <nl> array_buffer_view - > set_byte_length ( byte_length ) ; <nl> - ZeroEmbedderFields ( array_buffer_view ) ; <nl> + for ( int i = 0 ; i < v8 : : ArrayBufferView : : kEmbedderFieldCount ; i + + ) { <nl> + array_buffer_view - > SetEmbedderField ( i , Smi : : kZero ) ; <nl> + } <nl> DCHECK_EQ ( array_buffer_view - > GetEmbedderFieldCount ( ) , <nl> v8 : : ArrayBufferView : : kEmbedderFieldCount ) ; <nl> return array_buffer_view ; <nl> Handle < JSPromise > Factory : : NewJSPromiseWithoutHook ( AllocationType allocation ) { <nl> NewJSObject ( isolate ( ) - > promise_function ( ) , allocation ) ) ; <nl> promise - > set_reactions_or_result ( Smi : : kZero ) ; <nl> promise - > set_flags ( 0 ) ; <nl> - ZeroEmbedderFields ( promise ) ; <nl> + for ( int i = 0 ; i < v8 : : Promise : : kEmbedderFieldCount ; i + + ) { <nl> + promise - > SetEmbedderField ( i , Smi : : kZero ) ; <nl> + } <nl> return promise ; <nl> } <nl> <nl> mmm a / src / heap / factory . h <nl> ppp b / src / heap / factory . h <nl> class WeakCell ; <nl> struct SourceRange ; <nl> template < typename T > <nl> class ZoneVector ; <nl> - enum class SharedFlag : uint8_t ; <nl> - enum class InitializedFlag : uint8_t ; <nl> + enum class SharedFlag : uint32_t ; <nl> <nl> enum FunctionMode { <nl> kWithNameBit = 1 < < 0 , <nl> class V8_EXPORT_PRIVATE Factory { <nl> v8 : : Module : : SyntheticModuleEvaluationSteps evaluation_steps ) ; <nl> <nl> Handle < JSArrayBuffer > NewJSArrayBuffer ( <nl> - AllocationType allocation = AllocationType : : kYoung ) ; <nl> - <nl> - MaybeHandle < JSArrayBuffer > NewJSArrayBufferAndBackingStore ( <nl> - size_t byte_length , InitializedFlag initialized , <nl> - AllocationType allocation = AllocationType : : kYoung ) ; <nl> - <nl> - Handle < JSArrayBuffer > NewJSSharedArrayBuffer ( <nl> - AllocationType allocation = AllocationType : : kYoung ) ; <nl> + SharedFlag shared , AllocationType allocation = AllocationType : : kYoung ) ; <nl> <nl> static void TypeAndSizeForElementsKind ( ElementsKind kind , <nl> ExternalArrayType * array_type , <nl> mmm a / src / heap / heap . cc <nl> ppp b / src / heap / heap . cc <nl> HeapObject Heap : : AlignWithFiller ( HeapObject object , int object_size , <nl> return object ; <nl> } <nl> <nl> - void Heap : : RegisterBackingStore ( JSArrayBuffer buffer , <nl> - std : : shared_ptr < BackingStore > backing_store ) { <nl> - ArrayBufferTracker : : RegisterNew ( this , buffer , std : : move ( backing_store ) ) ; <nl> + void Heap : : RegisterNewArrayBuffer ( JSArrayBuffer buffer ) { <nl> + ArrayBufferTracker : : RegisterNew ( this , buffer ) ; <nl> } <nl> <nl> - std : : shared_ptr < BackingStore > Heap : : UnregisterBackingStore ( <nl> - JSArrayBuffer buffer ) { <nl> - return ArrayBufferTracker : : Unregister ( this , buffer ) ; <nl> - } <nl> - <nl> - std : : shared_ptr < BackingStore > Heap : : LookupBackingStore ( JSArrayBuffer buffer ) { <nl> - return ArrayBufferTracker : : Lookup ( this , buffer ) ; <nl> + void Heap : : UnregisterArrayBuffer ( JSArrayBuffer buffer ) { <nl> + ArrayBufferTracker : : Unregister ( this , buffer ) ; <nl> } <nl> <nl> void Heap : : ConfigureInitialOldGenerationSize ( ) { <nl> mmm a / src / heap / heap . h <nl> ppp b / src / heap / heap . h <nl> class TestMemoryAllocatorScope ; <nl> } / / namespace heap <nl> <nl> class IncrementalMarking ; <nl> - class BackingStore ; <nl> class JSArrayBuffer ; <nl> using v8 : : MemoryPressureLevel ; <nl> <nl> class Heap { <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / ArrayBuffer tracking . = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - void RegisterBackingStore ( JSArrayBuffer buffer , <nl> - std : : shared_ptr < BackingStore > backing_store ) ; <nl> - std : : shared_ptr < BackingStore > UnregisterBackingStore ( JSArrayBuffer buffer ) ; <nl> - std : : shared_ptr < BackingStore > LookupBackingStore ( JSArrayBuffer buffer ) ; <nl> + <nl> + / / TODO ( gc ) : API usability : encapsulate mutation of JSArrayBuffer : : is_external <nl> + / / in the registration / unregistration APIs . Consider dropping the " New " from <nl> + / / " RegisterNewArrayBuffer " because one can re - register a previously <nl> + / / unregistered buffer , too , and the name is confusing . <nl> + void RegisterNewArrayBuffer ( JSArrayBuffer buffer ) ; <nl> + void UnregisterArrayBuffer ( JSArrayBuffer buffer ) ; <nl> <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / Allocation site tracking . = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> mmm a / src / heap / setup - heap - internal . cc <nl> ppp b / src / heap / setup - heap - internal . cc <nl> void Heap : : CreateInitialObjects ( ) { <nl> <nl> set_feedback_vectors_for_profiling_tools ( roots . undefined_value ( ) ) ; <nl> set_pending_optimize_for_test_bytecode ( roots . undefined_value ( ) ) ; <nl> - set_shared_wasm_memories ( roots . empty_weak_array_list ( ) ) ; <nl> <nl> set_script_list ( roots . empty_weak_array_list ( ) ) ; <nl> <nl> mmm a / src / heap / spaces . cc <nl> ppp b / src / heap / spaces . cc <nl> <nl> # include " src / base / platform / semaphore . h " <nl> # include " src / base / template - utils . h " <nl> # include " src / execution / vm - state - inl . h " <nl> - # include " src / heap / array - buffer - tracker - inl . h " <nl> + # include " src / heap / array - buffer - tracker . h " <nl> # include " src / heap / combined - heap . h " <nl> # include " src / heap / concurrent - marking . h " <nl> # include " src / heap / gc - tracer . h " <nl> void PagedSpace : : Verify ( Isolate * isolate , ObjectVisitor * visitor ) { <nl> } else if ( object . IsJSArrayBuffer ( ) ) { <nl> JSArrayBuffer array_buffer = JSArrayBuffer : : cast ( object ) ; <nl> if ( ArrayBufferTracker : : IsTracked ( array_buffer ) ) { <nl> - size_t size = PerIsolateAccountingLength ( array_buffer ) ; <nl> + size_t size = array_buffer . byte_length ( ) ; <nl> external_page_bytes [ ExternalBackingStoreType : : kArrayBuffer ] + = size ; <nl> } <nl> } <nl> void NewSpace : : Verify ( Isolate * isolate ) { <nl> } else if ( object . IsJSArrayBuffer ( ) ) { <nl> JSArrayBuffer array_buffer = JSArrayBuffer : : cast ( object ) ; <nl> if ( ArrayBufferTracker : : IsTracked ( array_buffer ) ) { <nl> - size_t size = PerIsolateAccountingLength ( array_buffer ) ; <nl> + size_t size = array_buffer . byte_length ( ) ; <nl> external_space_bytes [ ExternalBackingStoreType : : kArrayBuffer ] + = size ; <nl> } <nl> } <nl> deleted file mode 100644 <nl> index 43b25ff9fcd . . 00000000000 <nl> mmm a / src / objects / backing - store . cc <nl> ppp / dev / null <nl> <nl> - / / Copyright 2019 the V8 project authors . All rights reserved . <nl> - / / Use of this source code is governed by a BSD - style license that can be <nl> - / / found in the LICENSE file . <nl> - <nl> - # include " src / objects / backing - store . h " <nl> - # include " src / execution / isolate . h " <nl> - # include " src / handles / global - handles . h " <nl> - # include " src / logging / counters . h " <nl> - # include " src / wasm / wasm - engine . h " <nl> - # include " src / wasm / wasm - limits . h " <nl> - # include " src / wasm / wasm - objects - inl . h " <nl> - <nl> - # define TRACE_BS ( . . . ) / * PrintF ( __VA_ARGS__ ) ; * / <nl> - <nl> - namespace v8 { <nl> - namespace internal { <nl> - <nl> - namespace { <nl> - # if V8_TARGET_ARCH_64_BIT <nl> - constexpr bool kUseGuardRegions = true ; <nl> - # else <nl> - constexpr bool kUseGuardRegions = false ; <nl> - # endif <nl> - <nl> - # if V8_TARGET_ARCH_MIPS64 <nl> - / / MIPS64 has a user space of 2 ^ 40 bytes on most processors , <nl> - / / address space limits needs to be smaller . <nl> - constexpr size_t kAddressSpaceLimit = 0x4000000000L ; / / 256 GiB <nl> - # elif V8_TARGET_ARCH_64_BIT <nl> - constexpr size_t kAddressSpaceLimit = 0x10100000000L ; / / 1 TiB + 4 GiB <nl> - # else <nl> - constexpr size_t kAddressSpaceLimit = 0xC0000000 ; / / 3 GiB <nl> - # endif <nl> - <nl> - constexpr uint64_t GB = 1024 * 1024 * 1024 ; <nl> - constexpr uint64_t kNegativeGuardSize = 2 * GB ; <nl> - constexpr uint64_t kFullGuardSize = 10 * GB ; <nl> - <nl> - std : : atomic < uint64_t > reserved_address_space_ { 0 } ; <nl> - <nl> - / / Allocation results are reported to UMA <nl> - / / <nl> - / / See wasm_memory_allocation_result in counters . h <nl> - enum class AllocationStatus { <nl> - kSuccess , / / Succeeded on the first try <nl> - <nl> - kSuccessAfterRetry , / / Succeeded after garbage collection <nl> - <nl> - kAddressSpaceLimitReachedFailure , / / Failed because Wasm is at its address <nl> - / / space limit <nl> - <nl> - kOtherFailure / / Failed for an unknown reason <nl> - } ; <nl> - <nl> - base : : AddressRegion GetGuardedRegion ( void * buffer_start , size_t byte_length ) { <nl> - / / Guard regions always look like this : <nl> - / / | xxx ( 2GiB ) xxx | . . . . . . . ( 4GiB ) . . xxxxx | xxxxxx ( 4GiB ) xxxxxx | <nl> - / / ^ buffer_start <nl> - / / ^ byte_length <nl> - / / ^ negative guard region ^ positive guard region <nl> - <nl> - Address start = reinterpret_cast < Address > ( buffer_start ) ; <nl> - DCHECK_EQ ( 8 , sizeof ( size_t ) ) ; / / only use on 64 - bit <nl> - DCHECK_EQ ( 0 , start % AllocatePageSize ( ) ) ; <nl> - return base : : AddressRegion ( start - ( 2 * GB ) , <nl> - static_cast < size_t > ( kFullGuardSize ) ) ; <nl> - } <nl> - <nl> - void RecordStatus ( Isolate * isolate , AllocationStatus status ) { <nl> - isolate - > counters ( ) - > wasm_memory_allocation_result ( ) - > AddSample ( <nl> - static_cast < int > ( status ) ) ; <nl> - } <nl> - <nl> - inline void DebugCheckZero ( void * start , size_t byte_length ) { <nl> - # if DEBUG <nl> - / / Double check memory is zero - initialized . <nl> - const byte * bytes = reinterpret_cast < const byte * > ( start ) ; <nl> - for ( size_t i = 0 ; i < byte_length ; i + + ) { <nl> - DCHECK_EQ ( 0 , bytes [ i ] ) ; <nl> - } <nl> - # endif <nl> - } <nl> - } / / namespace <nl> - <nl> - bool BackingStore : : ReserveAddressSpace ( uint64_t num_bytes ) { <nl> - uint64_t reservation_limit = kAddressSpaceLimit ; <nl> - while ( true ) { <nl> - uint64_t old_count = reserved_address_space_ . load ( ) ; <nl> - if ( old_count > reservation_limit ) return false ; <nl> - if ( reservation_limit - old_count < num_bytes ) return false ; <nl> - if ( reserved_address_space_ . compare_exchange_weak ( old_count , <nl> - old_count + num_bytes ) ) { <nl> - return true ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - void BackingStore : : ReleaseReservation ( uint64_t num_bytes ) { <nl> - uint64_t old_reserved = reserved_address_space_ . fetch_sub ( num_bytes ) ; <nl> - USE ( old_reserved ) ; <nl> - DCHECK_LE ( num_bytes , old_reserved ) ; <nl> - } <nl> - <nl> - / / The backing store for a Wasm shared memory remembers all the isolates <nl> - / / with which it has been shared . <nl> - struct SharedWasmMemoryData { <nl> - std : : vector < Isolate * > isolates_ ; <nl> - } ; <nl> - <nl> - void BackingStore : : Clear ( ) { <nl> - buffer_start_ = nullptr ; <nl> - byte_length_ = 0 ; <nl> - has_guard_regions_ = false ; <nl> - type_specific_data_ . v8_api_array_buffer_allocator = nullptr ; <nl> - } <nl> - <nl> - BackingStore : : ~ BackingStore ( ) { <nl> - if ( globally_registered_ ) { <nl> - GlobalBackingStoreRegistry : : Unregister ( this ) ; <nl> - globally_registered_ = false ; <nl> - } <nl> - <nl> - if ( buffer_start_ = = nullptr ) return ; / / nothing to deallocate <nl> - <nl> - if ( is_wasm_memory_ ) { <nl> - TRACE_BS ( " BSw : free bs = % p mem = % p ( % zu bytes ) \ n " , this , buffer_start_ , <nl> - byte_capacity_ ) ; <nl> - if ( is_shared_ ) { <nl> - / / Deallocate the list of attached memory objects . <nl> - SharedWasmMemoryData * shared_data = get_shared_wasm_memory_data ( ) ; <nl> - delete shared_data ; <nl> - type_specific_data_ . shared_wasm_memory_data = nullptr ; <nl> - } <nl> - <nl> - / / Wasm memories are always allocated through the page allocator . <nl> - auto region = <nl> - has_guard_regions_ <nl> - ? GetGuardedRegion ( buffer_start_ , byte_length_ ) <nl> - : base : : AddressRegion ( reinterpret_cast < Address > ( buffer_start_ ) , <nl> - byte_capacity_ ) ; <nl> - bool pages_were_freed = <nl> - region . size ( ) = = 0 / * no need to free any pages * / | | <nl> - FreePages ( GetPlatformPageAllocator ( ) , <nl> - reinterpret_cast < void * > ( region . begin ( ) ) , region . size ( ) ) ; <nl> - CHECK ( pages_were_freed ) ; <nl> - BackingStore : : ReleaseReservation ( has_guard_regions_ ? kFullGuardSize <nl> - : byte_capacity_ ) ; <nl> - Clear ( ) ; <nl> - return ; <nl> - } <nl> - if ( free_on_destruct_ ) { <nl> - / / JSArrayBuffer backing store . Deallocate through the embedder ' s allocator . <nl> - auto allocator = reinterpret_cast < v8 : : ArrayBuffer : : Allocator * > ( <nl> - get_v8_api_array_buffer_allocator ( ) ) ; <nl> - TRACE_BS ( " BS : free bs = % p mem = % p ( % zu bytes ) \ n " , this , buffer_start_ , <nl> - byte_capacity_ ) ; <nl> - allocator - > Free ( buffer_start_ , byte_length_ ) ; <nl> - } <nl> - Clear ( ) ; <nl> - } <nl> - <nl> - / / Allocate a backing store using the array buffer allocator from the embedder . <nl> - std : : unique_ptr < BackingStore > BackingStore : : Allocate ( <nl> - Isolate * isolate , size_t byte_length , SharedFlag shared , <nl> - InitializedFlag initialized ) { <nl> - void * buffer_start = nullptr ; <nl> - auto allocator = isolate - > array_buffer_allocator ( ) ; <nl> - CHECK_NOT_NULL ( allocator ) ; <nl> - if ( byte_length ! = 0 ) { <nl> - auto counters = isolate - > counters ( ) ; <nl> - int mb_length = static_cast < int > ( byte_length / MB ) ; <nl> - if ( mb_length > 0 ) { <nl> - counters - > array_buffer_big_allocations ( ) - > AddSample ( mb_length ) ; <nl> - } <nl> - if ( shared = = SharedFlag : : kShared ) { <nl> - counters - > shared_array_allocations ( ) - > AddSample ( mb_length ) ; <nl> - } <nl> - if ( initialized = = InitializedFlag : : kZeroInitialized ) { <nl> - buffer_start = allocator - > Allocate ( byte_length ) ; <nl> - if ( buffer_start ) { <nl> - / / TODO ( wasm ) : node does not implement the zero - initialization API . <nl> - / / Reenable this debug check when node does implement it properly . <nl> - constexpr bool <nl> - kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI = true ; <nl> - if ( ( ! ( kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI ) ) & & <nl> - ! FLAG_mock_arraybuffer_allocator ) { <nl> - DebugCheckZero ( buffer_start , byte_length ) ; <nl> - } <nl> - } <nl> - } else { <nl> - buffer_start = allocator - > AllocateUninitialized ( byte_length ) ; <nl> - } <nl> - if ( buffer_start = = nullptr ) { <nl> - / / Allocation failed . <nl> - counters - > array_buffer_new_size_failures ( ) - > AddSample ( mb_length ) ; <nl> - return { } ; <nl> - } <nl> - } <nl> - <nl> - auto result = new BackingStore ( buffer_start , / / start <nl> - byte_length , / / length <nl> - byte_length , / / capacity <nl> - shared , / / shared <nl> - false , / / is_wasm_memory <nl> - true , / / free_on_destruct <nl> - false ) ; / / has_guard_regions <nl> - <nl> - TRACE_BS ( " BS : alloc bs = % p mem = % p ( % zu bytes ) \ n " , result , <nl> - result - > buffer_start ( ) , byte_length ) ; <nl> - result - > type_specific_data_ . v8_api_array_buffer_allocator = allocator ; <nl> - return std : : unique_ptr < BackingStore > ( result ) ; <nl> - } <nl> - <nl> - / / Allocate a backing store for a Wasm memory . Always use the page allocator <nl> - / / and add guard regions . <nl> - std : : unique_ptr < BackingStore > BackingStore : : TryAllocateWasmMemory ( <nl> - Isolate * isolate , size_t initial_pages , size_t maximum_pages , <nl> - SharedFlag shared ) { <nl> - bool guards = kUseGuardRegions ; <nl> - <nl> - / / For accounting purposes , whether a GC was necessary . <nl> - bool did_retry = false ; <nl> - <nl> - / / A helper to try running a function up to 3 times , executing a GC <nl> - / / if the first and second attempts failed . <nl> - auto gc_retry = [ & ] ( const std : : function < bool ( ) > & fn ) { <nl> - for ( int i = 0 ; i < 3 ; i + + ) { <nl> - if ( fn ( ) ) return true ; <nl> - / / Collect garbage and retry . <nl> - did_retry = true ; <nl> - / / TODO ( wasm ) : try Heap : : EagerlyFreeExternalMemory ( ) first ? <nl> - isolate - > heap ( ) - > MemoryPressureNotification ( <nl> - MemoryPressureLevel : : kCritical , true ) ; <nl> - } <nl> - return false ; <nl> - } ; <nl> - <nl> - / / Compute size of reserved memory . <nl> - size_t reservation_size = 0 ; <nl> - size_t byte_capacity = 0 ; <nl> - <nl> - if ( guards ) { <nl> - reservation_size = static_cast < size_t > ( kFullGuardSize ) ; <nl> - byte_capacity = <nl> - static_cast < size_t > ( wasm : : kV8MaxWasmMemoryPages * wasm : : kWasmPageSize ) ; <nl> - } else { <nl> - reservation_size = std : : min ( maximum_pages , wasm : : kV8MaxWasmMemoryPages ) * <nl> - wasm : : kWasmPageSize ; <nl> - byte_capacity = reservation_size ; <nl> - } <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / 1 . Enforce maximum address space reservation per engine . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - auto reserve_memory_space = [ & ] { <nl> - return BackingStore : : ReserveAddressSpace ( reservation_size ) ; <nl> - } ; <nl> - <nl> - if ( ! gc_retry ( reserve_memory_space ) ) { <nl> - / / Crash on out - of - memory if the correctness fuzzer is running . <nl> - if ( FLAG_correctness_fuzzer_suppressions ) { <nl> - FATAL ( " could not allocate wasm memory backing store " ) ; <nl> - } <nl> - RecordStatus ( isolate , AllocationStatus : : kAddressSpaceLimitReachedFailure ) ; <nl> - return { } ; <nl> - } <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / 2 . Allocate pages ( inaccessible by default ) . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - void * allocation_base = nullptr ; <nl> - auto allocate_pages = [ & ] { <nl> - allocation_base = <nl> - AllocatePages ( GetPlatformPageAllocator ( ) , nullptr , reservation_size , <nl> - wasm : : kWasmPageSize , PageAllocator : : kNoAccess ) ; <nl> - return allocation_base ! = nullptr ; <nl> - } ; <nl> - if ( ! gc_retry ( allocate_pages ) ) { <nl> - / / Page allocator could not reserve enough pages . <nl> - BackingStore : : ReleaseReservation ( reservation_size ) ; <nl> - RecordStatus ( isolate , AllocationStatus : : kOtherFailure ) ; <nl> - return { } ; <nl> - } <nl> - <nl> - / / Get a pointer to the start of the buffer , skipping negative guard region <nl> - / / if necessary . <nl> - byte * buffer_start = reinterpret_cast < byte * > ( allocation_base ) + <nl> - ( guards ? kNegativeGuardSize : 0 ) ; <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / 3 . Commit the initial pages ( allow read / write ) . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - size_t byte_length = initial_pages * wasm : : kWasmPageSize ; <nl> - auto commit_memory = [ & ] { <nl> - return byte_length = = 0 | | <nl> - SetPermissions ( GetPlatformPageAllocator ( ) , buffer_start , byte_length , <nl> - PageAllocator : : kReadWrite ) ; <nl> - } ; <nl> - if ( ! gc_retry ( commit_memory ) ) { <nl> - / / SetPermissions put us over the process memory limit . <nl> - V8 : : FatalProcessOutOfMemory ( nullptr , " BackingStore : : AllocateWasmMemory ( ) " ) ; <nl> - } <nl> - <nl> - DebugCheckZero ( buffer_start , byte_length ) ; / / touch the bytes . <nl> - <nl> - RecordStatus ( isolate , did_retry ? AllocationStatus : : kSuccessAfterRetry <nl> - : AllocationStatus : : kSuccess ) ; <nl> - <nl> - auto result = new BackingStore ( buffer_start , / / start <nl> - byte_length , / / length <nl> - byte_capacity , / / capacity <nl> - shared , / / shared <nl> - true , / / is_wasm_memory <nl> - true , / / free_on_destruct <nl> - guards ) ; / / has_guard_regions <nl> - <nl> - TRACE_BS ( " BSw : alloc bs = % p mem = % p ( % zu bytes ) \ n " , result , <nl> - result - > buffer_start ( ) , byte_length ) ; <nl> - <nl> - / / Shared Wasm memories need an anchor for the memory object list . <nl> - if ( shared = = SharedFlag : : kShared ) { <nl> - result - > type_specific_data_ . shared_wasm_memory_data = <nl> - new SharedWasmMemoryData ( ) ; <nl> - } <nl> - <nl> - return std : : unique_ptr < BackingStore > ( result ) ; <nl> - } <nl> - <nl> - / / Allocate a backing store for a Wasm memory . Always use the page allocator <nl> - / / and add guard regions . <nl> - std : : unique_ptr < BackingStore > BackingStore : : AllocateWasmMemory ( <nl> - Isolate * isolate , size_t initial_pages , size_t maximum_pages , <nl> - SharedFlag shared ) { <nl> - / / Wasm pages must be a multiple of the allocation page size . <nl> - DCHECK_EQ ( 0 , wasm : : kWasmPageSize % AllocatePageSize ( ) ) ; <nl> - <nl> - / / Enforce engine limitation on the maximum number of pages . <nl> - if ( initial_pages > wasm : : kV8MaxWasmMemoryPages ) return nullptr ; <nl> - <nl> - auto backing_store = <nl> - TryAllocateWasmMemory ( isolate , initial_pages , maximum_pages , shared ) ; <nl> - if ( ! backing_store & & maximum_pages > initial_pages ) { <nl> - / / If allocating the maximum failed , try allocating with maximum set to <nl> - / / initial <nl> - backing_store = <nl> - TryAllocateWasmMemory ( isolate , initial_pages , initial_pages , shared ) ; <nl> - } <nl> - return backing_store ; <nl> - } <nl> - <nl> - std : : unique_ptr < BackingStore > BackingStore : : CopyWasmMemory ( <nl> - Isolate * isolate , std : : shared_ptr < BackingStore > old , <nl> - size_t new_byte_length ) { <nl> - DCHECK_GE ( new_byte_length , old - > byte_length ( ) ) ; <nl> - / / Note that we could allocate uninitialized to save initialization cost here , <nl> - / / but since Wasm memories are allocated by the page allocator , the zeroing <nl> - / / cost is already built - in . <nl> - / / TODO ( titzer ) : should we use a suitable maximum here ? <nl> - auto new_backing_store = BackingStore : : AllocateWasmMemory ( <nl> - isolate , new_byte_length / wasm : : kWasmPageSize , <nl> - new_byte_length / wasm : : kWasmPageSize , <nl> - old - > is_shared ( ) ? SharedFlag : : kShared : SharedFlag : : kNotShared ) ; <nl> - <nl> - if ( ! new_backing_store | | <nl> - new_backing_store - > has_guard_regions ( ) ! = old - > has_guard_regions ( ) ) { <nl> - return { } ; <nl> - } <nl> - <nl> - size_t old_size = old - > byte_length ( ) ; <nl> - if ( old_size > 0 ) { <nl> - memcpy ( new_backing_store - > buffer_start ( ) , old - > buffer_start ( ) , old_size ) ; <nl> - } <nl> - <nl> - return new_backing_store ; <nl> - } <nl> - <nl> - / / Try to grow the size of a wasm memory in place , without realloc + copy . <nl> - bool BackingStore : : GrowWasmMemoryInPlace ( Isolate * isolate , <nl> - size_t new_byte_length ) { <nl> - DCHECK ( is_wasm_memory_ ) ; <nl> - DCHECK_EQ ( 0 , new_byte_length % wasm : : kWasmPageSize ) ; <nl> - if ( new_byte_length < = byte_length_ ) { <nl> - return true ; / / already big enough . <nl> - } <nl> - if ( byte_capacity_ < new_byte_length ) { <nl> - return false ; / / not enough capacity . <nl> - } <nl> - / / Try to adjust the guard regions . <nl> - DCHECK_NOT_NULL ( buffer_start_ ) ; <nl> - / / If adjusting permissions fails , propagate error back to return <nl> - / / failure to grow . <nl> - if ( ! i : : SetPermissions ( GetPlatformPageAllocator ( ) , buffer_start_ , <nl> - new_byte_length , PageAllocator : : kReadWrite ) ) { <nl> - return false ; <nl> - } <nl> - reinterpret_cast < v8 : : Isolate * > ( isolate ) <nl> - - > AdjustAmountOfExternalAllocatedMemory ( new_byte_length - byte_length_ ) ; <nl> - byte_length_ = new_byte_length ; <nl> - return true ; <nl> - } <nl> - <nl> - void BackingStore : : AttachSharedWasmMemoryObject ( <nl> - Isolate * isolate , Handle < WasmMemoryObject > memory_object ) { <nl> - DCHECK ( is_wasm_memory_ ) ; <nl> - DCHECK ( is_shared_ ) ; <nl> - / / We need to take the global registry lock for this operation . <nl> - GlobalBackingStoreRegistry : : AddSharedWasmMemoryObject ( isolate , this , <nl> - memory_object ) ; <nl> - } <nl> - <nl> - void BackingStore : : BroadcastSharedWasmMemoryGrow ( <nl> - Isolate * isolate , std : : shared_ptr < BackingStore > backing_store , <nl> - size_t new_size ) { <nl> - / / requires the global registry lock . <nl> - GlobalBackingStoreRegistry : : BroadcastSharedWasmMemoryGrow ( <nl> - isolate , backing_store , new_size ) ; <nl> - } <nl> - <nl> - void BackingStore : : RemoveSharedWasmMemoryObjects ( Isolate * isolate ) { <nl> - / / requires the global registry lock . <nl> - GlobalBackingStoreRegistry : : Purge ( isolate ) ; <nl> - } <nl> - <nl> - void BackingStore : : UpdateSharedWasmMemoryObjects ( Isolate * isolate ) { <nl> - / / requires the global registry lock . <nl> - GlobalBackingStoreRegistry : : UpdateSharedWasmMemoryObjects ( isolate ) ; <nl> - } <nl> - <nl> - std : : unique_ptr < BackingStore > BackingStore : : WrapAllocation ( <nl> - Isolate * isolate , void * allocation_base , size_t allocation_length , <nl> - SharedFlag shared , bool free_on_destruct ) { <nl> - auto result = <nl> - new BackingStore ( allocation_base , allocation_length , allocation_length , <nl> - shared , false , free_on_destruct , false ) ; <nl> - result - > type_specific_data_ . v8_api_array_buffer_allocator = <nl> - isolate - > array_buffer_allocator ( ) ; <nl> - TRACE_BS ( " BS : wrap bs = % p mem = % p ( % zu bytes ) \ n " , result , <nl> - result - > buffer_start ( ) , result - > byte_length ( ) ) ; <nl> - return std : : unique_ptr < BackingStore > ( result ) ; <nl> - } <nl> - <nl> - void * BackingStore : : get_v8_api_array_buffer_allocator ( ) { <nl> - CHECK ( ! is_wasm_memory_ ) ; <nl> - auto array_buffer_allocator = <nl> - type_specific_data_ . v8_api_array_buffer_allocator ; <nl> - CHECK_NOT_NULL ( array_buffer_allocator ) ; <nl> - return array_buffer_allocator ; <nl> - } <nl> - <nl> - SharedWasmMemoryData * BackingStore : : get_shared_wasm_memory_data ( ) { <nl> - CHECK ( is_wasm_memory_ & & is_shared_ ) ; <nl> - auto shared_wasm_memory_data = type_specific_data_ . shared_wasm_memory_data ; <nl> - CHECK ( shared_wasm_memory_data ) ; <nl> - return shared_wasm_memory_data ; <nl> - } <nl> - <nl> - namespace { <nl> - / / Implementation details of GlobalBackingStoreRegistry . <nl> - struct GlobalBackingStoreRegistryImpl { <nl> - GlobalBackingStoreRegistryImpl ( ) { } <nl> - base : : Mutex mutex_ ; <nl> - std : : unordered_map < const void * , std : : weak_ptr < BackingStore > > map_ ; <nl> - } ; <nl> - base : : LazyInstance < GlobalBackingStoreRegistryImpl > : : type global_registry_impl_ = <nl> - LAZY_INSTANCE_INITIALIZER ; <nl> - inline GlobalBackingStoreRegistryImpl * impl ( ) { <nl> - return global_registry_impl_ . Pointer ( ) ; <nl> - } <nl> - } / / namespace <nl> - <nl> - void GlobalBackingStoreRegistry : : Register ( <nl> - std : : shared_ptr < BackingStore > backing_store ) { <nl> - if ( ! backing_store ) return ; <nl> - <nl> - base : : MutexGuard scope_lock ( & impl ( ) - > mutex_ ) ; <nl> - if ( backing_store - > globally_registered_ ) return ; <nl> - TRACE_BS ( " BS : reg bs = % p mem = % p ( % zu bytes ) \ n " , backing_store . get ( ) , <nl> - backing_store - > buffer_start ( ) , backing_store - > byte_length ( ) ) ; <nl> - std : : weak_ptr < BackingStore > weak = backing_store ; <nl> - auto result = impl ( ) - > map_ . insert ( { backing_store - > buffer_start ( ) , weak } ) ; <nl> - CHECK ( result . second ) ; <nl> - backing_store - > globally_registered_ = true ; <nl> - } <nl> - <nl> - void GlobalBackingStoreRegistry : : Unregister ( BackingStore * backing_store ) { <nl> - if ( ! backing_store - > globally_registered_ ) return ; <nl> - <nl> - base : : MutexGuard scope_lock ( & impl ( ) - > mutex_ ) ; <nl> - const auto & result = impl ( ) - > map_ . find ( backing_store - > buffer_start ( ) ) ; <nl> - if ( result ! = impl ( ) - > map_ . end ( ) ) { <nl> - auto shared = result - > second . lock ( ) ; <nl> - if ( shared ) { <nl> - DCHECK_EQ ( backing_store , shared . get ( ) ) ; <nl> - } <nl> - impl ( ) - > map_ . erase ( result ) ; <nl> - } <nl> - backing_store - > globally_registered_ = false ; <nl> - } <nl> - <nl> - std : : shared_ptr < BackingStore > GlobalBackingStoreRegistry : : Lookup ( <nl> - void * buffer_start , size_t length ) { <nl> - base : : MutexGuard scope_lock ( & impl ( ) - > mutex_ ) ; <nl> - TRACE_BS ( " BS : lookup mem = % p ( % zu bytes ) \ n " , buffer_start , length ) ; <nl> - const auto & result = impl ( ) - > map_ . find ( buffer_start ) ; <nl> - if ( result = = impl ( ) - > map_ . end ( ) ) { <nl> - return std : : shared_ptr < BackingStore > ( ) ; <nl> - } <nl> - auto backing_store = result - > second . lock ( ) ; <nl> - DCHECK_EQ ( buffer_start , backing_store - > buffer_start ( ) ) ; <nl> - DCHECK_EQ ( length , backing_store - > byte_length ( ) ) ; <nl> - return backing_store ; <nl> - } <nl> - <nl> - void GlobalBackingStoreRegistry : : Purge ( Isolate * isolate ) { <nl> - base : : MutexGuard scope_lock ( & impl ( ) - > mutex_ ) ; <nl> - / / Purge all entries in the map that refer to the given isolate . <nl> - for ( auto & entry : impl ( ) - > map_ ) { <nl> - auto backing_store = entry . second . lock ( ) ; <nl> - if ( ! backing_store ) continue ; / / skip entries where weak ptr is null <nl> - if ( ! backing_store - > is_wasm_memory ( ) ) continue ; / / skip non - wasm memory <nl> - SharedWasmMemoryData * shared_data = <nl> - backing_store - > get_shared_wasm_memory_data ( ) ; <nl> - / / Remove this isolate from the isolates list . <nl> - auto & isolates = shared_data - > isolates_ ; <nl> - for ( size_t i = 0 ; i < isolates . size ( ) ; i + + ) { <nl> - if ( isolates [ i ] = = isolate ) isolates [ i ] = nullptr ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - void GlobalBackingStoreRegistry : : AddSharedWasmMemoryObject ( <nl> - Isolate * isolate , BackingStore * backing_store , <nl> - Handle < WasmMemoryObject > memory_object ) { <nl> - / / Add to the weak array list of shared memory objects in the isolate . <nl> - isolate - > AddSharedWasmMemory ( memory_object ) ; <nl> - <nl> - / / Add the isolate to the list of isolates sharing this backing store . <nl> - base : : MutexGuard scope_lock ( & impl ( ) - > mutex_ ) ; <nl> - SharedWasmMemoryData * shared_data = <nl> - backing_store - > get_shared_wasm_memory_data ( ) ; <nl> - auto & isolates = shared_data - > isolates_ ; <nl> - int free_entry = - 1 ; <nl> - for ( size_t i = 0 ; i < isolates . size ( ) ; i + + ) { <nl> - if ( isolates [ i ] = = isolate ) return ; <nl> - if ( isolates [ i ] = = nullptr ) free_entry = static_cast < int > ( i ) ; <nl> - } <nl> - if ( free_entry > = 0 ) <nl> - isolates [ free_entry ] = isolate ; <nl> - else <nl> - isolates . push_back ( isolate ) ; <nl> - } <nl> - <nl> - void GlobalBackingStoreRegistry : : BroadcastSharedWasmMemoryGrow ( <nl> - Isolate * isolate , std : : shared_ptr < BackingStore > backing_store , <nl> - size_t new_size ) { <nl> - { <nl> - / / The global lock protects the list of isolates per backing store . <nl> - base : : MutexGuard scope_lock ( & impl ( ) - > mutex_ ) ; <nl> - SharedWasmMemoryData * shared_data = <nl> - backing_store - > get_shared_wasm_memory_data ( ) ; <nl> - for ( Isolate * other : shared_data - > isolates_ ) { <nl> - if ( other & & other ! = isolate ) { <nl> - other - > stack_guard ( ) - > RequestGrowSharedMemory ( ) ; <nl> - } <nl> - } <nl> - } <nl> - / / Update memory objects in this isolate . <nl> - UpdateSharedWasmMemoryObjects ( isolate ) ; <nl> - } <nl> - <nl> - void GlobalBackingStoreRegistry : : UpdateSharedWasmMemoryObjects ( <nl> - Isolate * isolate ) { <nl> - HandleScope scope ( isolate ) ; <nl> - Handle < WeakArrayList > shared_wasm_memories = <nl> - isolate - > factory ( ) - > shared_wasm_memories ( ) ; <nl> - <nl> - for ( int i = 0 ; i < shared_wasm_memories - > length ( ) ; i + + ) { <nl> - HeapObject obj ; <nl> - if ( ! shared_wasm_memories - > Get ( i ) . GetHeapObject ( & obj ) ) continue ; <nl> - <nl> - Handle < WasmMemoryObject > memory_object ( WasmMemoryObject : : cast ( obj ) , <nl> - isolate ) ; <nl> - Handle < JSArrayBuffer > old_buffer ( memory_object - > array_buffer ( ) , isolate ) ; <nl> - std : : shared_ptr < BackingStore > backing_store = old_buffer - > GetBackingStore ( ) ; <nl> - <nl> - if ( old_buffer - > byte_length ( ) ! = backing_store - > byte_length ( ) ) { <nl> - Handle < JSArrayBuffer > new_buffer = <nl> - isolate - > factory ( ) - > NewJSSharedArrayBuffer ( ) ; <nl> - new_buffer - > Attach ( backing_store ) ; <nl> - memory_object - > update_instances ( isolate , new_buffer ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - } / / namespace internal <nl> - } / / namespace v8 <nl> - <nl> - # undef TRACE_BS <nl> deleted file mode 100644 <nl> index 0ba8c4448d6 . . 00000000000 <nl> mmm a / src / objects / backing - store . h <nl> ppp / dev / null <nl> <nl> - / / Copyright 2019 the V8 project authors . All rights reserved . <nl> - / / Use of this source code is governed by a BSD - style license that can be <nl> - / / found in the LICENSE file . <nl> - <nl> - # ifndef V8_OBJECTS_BACKING_STORE_H_ <nl> - # define V8_OBJECTS_BACKING_STORE_H_ <nl> - <nl> - # include " src / handles / handles . h " <nl> - <nl> - namespace v8 { <nl> - namespace internal { <nl> - <nl> - class Isolate ; <nl> - class WasmMemoryObject ; <nl> - <nl> - / / Whether the backing store is shared or not . <nl> - enum class SharedFlag : uint8_t { kNotShared , kShared } ; <nl> - <nl> - / / Whether the backing store memory is initialied to zero or not . <nl> - enum class InitializedFlag : uint8_t { kUninitialized , kZeroInitialized } ; <nl> - <nl> - / / Internal information for shared wasm memories . E . g . contains <nl> - / / a list of all memory objects ( across all isolates ) that share this <nl> - / / backing store . <nl> - struct SharedWasmMemoryData ; <nl> - <nl> - / / The { BackingStore } data structure stores all the low - level details about the <nl> - / / backing store of an array buffer or Wasm memory , including its base address <nl> - / / and length , whether it is shared , provided by the embedder , has guard <nl> - / / regions , etc . Instances of this classes * own * the underlying memory <nl> - / / when they are created through one of the { Allocate ( ) } methods below , <nl> - / / and the destructor frees the memory ( and page allocation if necessary ) . <nl> - / / Backing stores can also * wrap * embedder - allocated memory . In this case , <nl> - / / they do not own the memory , and upon destruction , they do not deallocate it . <nl> - class V8_EXPORT_PRIVATE BackingStore { <nl> - public : <nl> - ~ BackingStore ( ) ; <nl> - <nl> - / / Allocate an array buffer backing store using the default method , <nl> - / / which currently is the embedder - provided array buffer allocator . <nl> - static std : : unique_ptr < BackingStore > Allocate ( Isolate * isolate , <nl> - size_t byte_length , <nl> - SharedFlag shared , <nl> - InitializedFlag initialized ) ; <nl> - <nl> - / / Allocate the backing store for a Wasm memory . <nl> - static std : : unique_ptr < BackingStore > AllocateWasmMemory ( Isolate * isolate , <nl> - size_t initial_pages , <nl> - size_t maximum_pages , <nl> - SharedFlag shared ) ; <nl> - <nl> - / / Allocate a new , larger , backing store for a Wasm memory and copy the <nl> - / / contents of this backing store into it . <nl> - static std : : unique_ptr < BackingStore > CopyWasmMemory ( <nl> - Isolate * isolate , std : : shared_ptr < BackingStore > old , <nl> - size_t new_byte_length ) ; <nl> - <nl> - / / Reallocate the backing store for a Wasm memory . Either readjust the <nl> - / / size of the given backing store or allocate a new one and copy . <nl> - static std : : unique_ptr < BackingStore > ReallocateWasmMemory ( <nl> - std : : unique_ptr < BackingStore > existing , size_t new_byte_length ) ; <nl> - <nl> - / / Create a backing store that wraps existing allocated memory . <nl> - / / If { free_on_destruct } is { true } , the memory will be freed using the <nl> - / / ArrayBufferAllocator : : Free ( ) callback when this backing store is <nl> - / / destructed . Otherwise destructing the backing store will do nothing <nl> - / / to the allocated memory . <nl> - static std : : unique_ptr < BackingStore > WrapAllocation ( Isolate * isolate , <nl> - void * allocation_base , <nl> - size_t allocation_length , <nl> - SharedFlag shared , <nl> - bool free_on_destruct ) ; <nl> - <nl> - / / Accessors . <nl> - void * buffer_start ( ) const { return buffer_start_ ; } <nl> - size_t byte_length ( ) const { return byte_length_ ; } <nl> - size_t byte_capacity ( ) const { return byte_length_ ; } <nl> - bool is_shared ( ) const { return is_shared_ ; } <nl> - bool is_wasm_memory ( ) const { return is_wasm_memory_ ; } <nl> - bool has_guard_regions ( ) const { return has_guard_regions_ ; } <nl> - bool free_on_destruct ( ) const { return free_on_destruct_ ; } <nl> - <nl> - / / Attempt to grow this backing store in place . <nl> - bool GrowWasmMemoryInPlace ( Isolate * isolate , size_t new_byte_length ) ; <nl> - <nl> - / / Attach the given memory object to this backing store . The memory object <nl> - / / will be updated if this backing store is grown . <nl> - void AttachSharedWasmMemoryObject ( Isolate * isolate , <nl> - Handle < WasmMemoryObject > memory_object ) ; <nl> - <nl> - / / Send asynchronous updates to attached memory objects in other isolates <nl> - / / after the backing store has been grown . Memory objects in this <nl> - / / isolate are updated synchronously . <nl> - static void BroadcastSharedWasmMemoryGrow ( Isolate * isolate , <nl> - std : : shared_ptr < BackingStore > , <nl> - size_t new_size ) ; <nl> - <nl> - / / TODO ( wasm ) : address space limitations should be enforced in page alloc . <nl> - / / These methods enforce a limit on the total amount of address space , <nl> - / / which is used for both backing stores and wasm memory . <nl> - static bool ReserveAddressSpace ( uint64_t num_bytes ) ; <nl> - static void ReleaseReservation ( uint64_t num_bytes ) ; <nl> - <nl> - / / Remove all memory objects in the given isolate that refer to this <nl> - / / backing store . <nl> - static void RemoveSharedWasmMemoryObjects ( Isolate * isolate ) ; <nl> - <nl> - / / Update all shared memory objects in this isolate ( after a grow operation ) . <nl> - static void UpdateSharedWasmMemoryObjects ( Isolate * isolate ) ; <nl> - <nl> - private : <nl> - friend class GlobalBackingStoreRegistry ; <nl> - <nl> - BackingStore ( void * buffer_start , size_t byte_length , size_t byte_capacity , <nl> - SharedFlag shared , bool is_wasm_memory , bool free_on_destruct , <nl> - bool has_guard_regions ) <nl> - : buffer_start_ ( buffer_start ) , <nl> - byte_length_ ( byte_length ) , <nl> - byte_capacity_ ( byte_capacity ) , <nl> - is_shared_ ( shared = = SharedFlag : : kShared ) , <nl> - is_wasm_memory_ ( is_wasm_memory ) , <nl> - free_on_destruct_ ( free_on_destruct ) , <nl> - has_guard_regions_ ( has_guard_regions ) , <nl> - globally_registered_ ( false ) { <nl> - type_specific_data_ . v8_api_array_buffer_allocator = nullptr ; <nl> - } <nl> - <nl> - void * buffer_start_ = nullptr ; <nl> - size_t byte_length_ = 0 ; <nl> - size_t byte_capacity_ = 0 ; <nl> - bool is_shared_ : 1 ; <nl> - bool is_wasm_memory_ : 1 ; <nl> - bool free_on_destruct_ : 1 ; <nl> - bool has_guard_regions_ : 1 ; <nl> - bool globally_registered_ : 1 ; <nl> - <nl> - union { <nl> - / / If this backing store was allocated through the ArrayBufferAllocator API , <nl> - / / this is a direct pointer to the API object for freeing the backing <nl> - / / store . <nl> - / / Note : we use { void * } here because we cannot forward - declare an inner <nl> - / / class from the API . <nl> - void * v8_api_array_buffer_allocator ; <nl> - <nl> - / / For shared Wasm memories , this is a list of all the attached memory <nl> - / / objects , which is needed to grow shared backing stores . <nl> - SharedWasmMemoryData * shared_wasm_memory_data ; <nl> - } type_specific_data_ ; <nl> - <nl> - / / Accessors for type - specific data . <nl> - void * get_v8_api_array_buffer_allocator ( ) ; <nl> - SharedWasmMemoryData * get_shared_wasm_memory_data ( ) ; <nl> - <nl> - void Clear ( ) ; / / Internally clears fields after deallocation . <nl> - static std : : unique_ptr < BackingStore > TryAllocateWasmMemory ( <nl> - Isolate * isolate , size_t initial_pages , size_t maximum_pages , <nl> - SharedFlag shared ) ; <nl> - <nl> - DISALLOW_COPY_AND_ASSIGN ( BackingStore ) ; <nl> - } ; <nl> - <nl> - / / A global , per - process mapping from buffer addresses to backing stores . <nl> - / / This is generally only used for dealing with an embedder that has not <nl> - / / migrated to the new API which should use proper pointers to manage <nl> - / / backing stores . <nl> - class GlobalBackingStoreRegistry { <nl> - public : <nl> - / / Register a backing store in the global registry . A mapping from the <nl> - / / { buffer_start } to the backing store object will be added . The backing <nl> - / / store will automatically unregister itself upon destruction . <nl> - static void Register ( std : : shared_ptr < BackingStore > backing_store ) ; <nl> - <nl> - / / Look up a backing store based on the { buffer_start } pointer . <nl> - static std : : shared_ptr < BackingStore > Lookup ( void * buffer_start , <nl> - size_t length ) ; <nl> - <nl> - private : <nl> - friend class BackingStore ; <nl> - / / Unregister a backing store in the global registry . <nl> - static void Unregister ( BackingStore * backing_store ) ; <nl> - <nl> - / / Adds the given memory object to the backing store ' s weak list <nl> - / / of memory objects ( under the registry lock ) . <nl> - static void AddSharedWasmMemoryObject ( Isolate * isolate , <nl> - BackingStore * backing_store , <nl> - Handle < WasmMemoryObject > memory_object ) ; <nl> - <nl> - / / Purge any shared wasm memory lists that refer to this isolate . <nl> - static void Purge ( Isolate * isolate ) ; <nl> - <nl> - / / Broadcast updates to all attached memory objects . <nl> - static void BroadcastSharedWasmMemoryGrow ( <nl> - Isolate * isolate , std : : shared_ptr < BackingStore > backing_store , <nl> - size_t new_size ) ; <nl> - <nl> - / / Update all shared memory objects in the given isolate . <nl> - static void UpdateSharedWasmMemoryObjects ( Isolate * isolate ) ; <nl> - } ; <nl> - <nl> - } / / namespace internal <nl> - } / / namespace v8 <nl> - <nl> - # endif / / V8_OBJECTS_BACKING_STORE_H_ <nl> mmm a / src / objects / js - array - buffer - inl . h <nl> ppp b / src / objects / js - array - buffer - inl . h <nl> size_t JSArrayBuffer : : allocation_length ( ) const { <nl> if ( backing_store ( ) = = nullptr ) { <nl> return 0 ; <nl> } <nl> + / / If this buffer is managed by the WasmMemoryTracker <nl> + if ( is_wasm_memory ( ) ) { <nl> + const auto * data = <nl> + GetIsolate ( ) - > wasm_engine ( ) - > memory_tracker ( ) - > FindAllocationData ( <nl> + backing_store ( ) ) ; <nl> + DCHECK_NOT_NULL ( data ) ; <nl> + return data - > allocation_length ; <nl> + } <nl> return byte_length ( ) ; <nl> } <nl> <nl> void * JSArrayBuffer : : allocation_base ( ) const { <nl> if ( backing_store ( ) = = nullptr ) { <nl> return nullptr ; <nl> } <nl> + / / If this buffer is managed by the WasmMemoryTracker <nl> + if ( is_wasm_memory ( ) ) { <nl> + const auto * data = <nl> + GetIsolate ( ) - > wasm_engine ( ) - > memory_tracker ( ) - > FindAllocationData ( <nl> + backing_store ( ) ) ; <nl> + DCHECK_NOT_NULL ( data ) ; <nl> + return data - > allocation_base ; <nl> + } <nl> return backing_store ( ) ; <nl> } <nl> <nl> + bool JSArrayBuffer : : is_wasm_memory ( ) const { <nl> + return IsWasmMemoryBit : : decode ( bit_field ( ) ) ; <nl> + } <nl> + <nl> + void JSArrayBuffer : : set_is_wasm_memory ( bool is_wasm_memory ) { <nl> + set_bit_field ( IsWasmMemoryBit : : update ( bit_field ( ) , is_wasm_memory ) ) ; <nl> + } <nl> + <nl> void JSArrayBuffer : : clear_padding ( ) { <nl> if ( FIELD_SIZE ( kOptionalPaddingOffset ) ! = 0 ) { <nl> DCHECK_EQ ( 4 , FIELD_SIZE ( kOptionalPaddingOffset ) ) ; <nl> BIT_FIELD_ACCESSORS ( JSArrayBuffer , bit_field , is_detachable , <nl> JSArrayBuffer : : IsDetachableBit ) <nl> BIT_FIELD_ACCESSORS ( JSArrayBuffer , bit_field , was_detached , <nl> JSArrayBuffer : : WasDetachedBit ) <nl> - BIT_FIELD_ACCESSORS ( JSArrayBuffer , bit_field , is_asmjs_memory , <nl> - JSArrayBuffer : : IsAsmJsMemoryBit ) <nl> BIT_FIELD_ACCESSORS ( JSArrayBuffer , bit_field , is_shared , <nl> JSArrayBuffer : : IsSharedBit ) <nl> <nl> mmm a / src / objects / js - array - buffer . cc <nl> ppp b / src / objects / js - array - buffer . cc <nl> bool CanonicalNumericIndexString ( Isolate * isolate , Handle < Object > s , <nl> * index = result ; <nl> return true ; <nl> } <nl> - } / / anonymous namespace <nl> <nl> - void JSArrayBuffer : : SetupEmpty ( SharedFlag shared ) { <nl> - clear_padding ( ) ; <nl> - set_bit_field ( 0 ) ; <nl> - set_is_shared ( shared = = SharedFlag : : kShared ) ; <nl> - set_is_detachable ( shared ! = SharedFlag : : kShared ) ; <nl> - set_backing_store ( nullptr ) ; <nl> - set_byte_length ( 0 ) ; <nl> + inline int ConvertToMb ( size_t size ) { <nl> + return static_cast < int > ( size / static_cast < size_t > ( MB ) ) ; <nl> } <nl> <nl> - std : : shared_ptr < BackingStore > JSArrayBuffer : : Detach ( <nl> - bool force_for_wasm_memory ) { <nl> - if ( was_detached ( ) ) return nullptr ; <nl> - <nl> - if ( force_for_wasm_memory ) { <nl> - / / Skip the is_detachable ( ) check . <nl> - } else if ( ! is_detachable ( ) ) { <nl> - / / Not detachable , do nothing . <nl> - return nullptr ; <nl> - } <nl> + } / / anonymous namespace <nl> <nl> + void JSArrayBuffer : : Detach ( ) { <nl> + CHECK ( is_detachable ( ) ) ; <nl> + CHECK ( ! was_detached ( ) ) ; <nl> + CHECK ( is_external ( ) ) ; <nl> + set_backing_store ( nullptr ) ; <nl> + set_byte_length ( 0 ) ; <nl> + set_was_detached ( true ) ; <nl> + set_is_detachable ( false ) ; <nl> + / / Invalidate the detaching protector . <nl> Isolate * const isolate = GetIsolate ( ) ; <nl> - auto backing_store = isolate - > heap ( ) - > UnregisterBackingStore ( * this ) ; <nl> - CHECK_IMPLIES ( force_for_wasm_memory & & backing_store , <nl> - backing_store - > is_wasm_memory ( ) ) ; <nl> - <nl> if ( isolate - > IsArrayBufferDetachingIntact ( ) ) { <nl> isolate - > InvalidateArrayBufferDetachingProtector ( ) ; <nl> } <nl> + } <nl> <nl> - DCHECK ( ! is_shared ( ) ) ; <nl> - DCHECK ( ! is_asmjs_memory ( ) ) ; <nl> + void JSArrayBuffer : : FreeBackingStoreFromMainThread ( ) { <nl> + if ( allocation_base ( ) = = nullptr ) { <nl> + return ; <nl> + } <nl> + FreeBackingStore ( GetIsolate ( ) , { allocation_base ( ) , allocation_length ( ) , <nl> + backing_store ( ) , is_wasm_memory ( ) } ) ; <nl> + / / Zero out the backing store and allocation base to avoid dangling <nl> + / / pointers . <nl> set_backing_store ( nullptr ) ; <nl> - set_byte_length ( 0 ) ; <nl> - set_was_detached ( true ) ; <nl> - <nl> - return backing_store ; <nl> } <nl> <nl> - void JSArrayBuffer : : Attach ( std : : shared_ptr < BackingStore > backing_store ) { <nl> - SetupEmpty ( backing_store - > is_shared ( ) ? SharedFlag : : kShared <nl> - : SharedFlag : : kNotShared ) ; <nl> - <nl> - if ( backing_store - > is_wasm_memory ( ) ) set_is_detachable ( false ) ; <nl> - <nl> - set_backing_store ( backing_store - > buffer_start ( ) ) ; <nl> - set_byte_length ( backing_store - > byte_length ( ) ) ; <nl> - if ( ! backing_store - > free_on_destruct ( ) ) set_is_external ( true ) ; <nl> - <nl> - GetIsolate ( ) - > heap ( ) - > RegisterBackingStore ( * this , std : : move ( backing_store ) ) ; <nl> + / / static <nl> + void JSArrayBuffer : : FreeBackingStore ( Isolate * isolate , Allocation allocation ) { <nl> + if ( allocation . is_wasm_memory ) { <nl> + wasm : : WasmMemoryTracker * memory_tracker = <nl> + isolate - > wasm_engine ( ) - > memory_tracker ( ) ; <nl> + memory_tracker - > FreeWasmMemory ( isolate , allocation . backing_store ) ; <nl> + } else { <nl> + isolate - > array_buffer_allocator ( ) - > Free ( allocation . allocation_base , <nl> + allocation . length ) ; <nl> + } <nl> } <nl> <nl> - std : : shared_ptr < BackingStore > JSArrayBuffer : : GetBackingStore ( ) { <nl> - return GetIsolate ( ) - > heap ( ) - > LookupBackingStore ( * this ) ; <nl> + void JSArrayBuffer : : Setup ( Handle < JSArrayBuffer > array_buffer , Isolate * isolate , <nl> + bool is_external , void * data , size_t byte_length , <nl> + SharedFlag shared_flag , bool is_wasm_memory ) { <nl> + DCHECK_EQ ( array_buffer - > GetEmbedderFieldCount ( ) , <nl> + v8 : : ArrayBuffer : : kEmbedderFieldCount ) ; <nl> + DCHECK_LE ( byte_length , JSArrayBuffer : : kMaxByteLength ) ; <nl> + for ( int i = 0 ; i < v8 : : ArrayBuffer : : kEmbedderFieldCount ; i + + ) { <nl> + array_buffer - > SetEmbedderField ( i , Smi : : kZero ) ; <nl> + } <nl> + array_buffer - > set_byte_length ( byte_length ) ; <nl> + array_buffer - > set_bit_field ( 0 ) ; <nl> + array_buffer - > clear_padding ( ) ; <nl> + array_buffer - > set_is_external ( is_external ) ; <nl> + array_buffer - > set_is_detachable ( shared_flag = = SharedFlag : : kNotShared ) ; <nl> + array_buffer - > set_is_shared ( shared_flag = = SharedFlag : : kShared ) ; <nl> + array_buffer - > set_is_wasm_memory ( is_wasm_memory ) ; <nl> + / / Initialize backing store at last to avoid handling of | JSArrayBuffers | that <nl> + / / are currently being constructed in the | ArrayBufferTracker | . The <nl> + / / registration method below handles the case of registering a buffer that has <nl> + / / already been promoted . <nl> + array_buffer - > set_backing_store ( data ) ; <nl> + <nl> + if ( data & & ! is_external ) { <nl> + isolate - > heap ( ) - > RegisterNewArrayBuffer ( * array_buffer ) ; <nl> + } <nl> } <nl> <nl> - Handle < JSArrayBuffer > JSTypedArray : : GetBuffer ( ) { <nl> - Isolate * isolate = GetIsolate ( ) ; <nl> - Handle < JSTypedArray > self ( * this , isolate ) ; <nl> - DCHECK ( IsTypedArrayElementsKind ( self - > GetElementsKind ( ) ) ) ; <nl> + void JSArrayBuffer : : SetupAsEmpty ( Handle < JSArrayBuffer > array_buffer , <nl> + Isolate * isolate ) { <nl> + Setup ( array_buffer , isolate , false , nullptr , 0 , SharedFlag : : kNotShared ) ; <nl> + } <nl> <nl> - Handle < JSArrayBuffer > array_buffer ( JSArrayBuffer : : cast ( self - > buffer ( ) ) , <nl> - isolate ) ; <nl> - if ( ! is_on_heap ( ) ) { <nl> - / / Already is off heap , so return the existing buffer . <nl> - return array_buffer ; <nl> + bool JSArrayBuffer : : SetupAllocatingData ( Handle < JSArrayBuffer > array_buffer , <nl> + Isolate * isolate , <nl> + size_t allocated_length , <nl> + bool initialize , <nl> + SharedFlag shared_flag ) { <nl> + void * data ; <nl> + CHECK_NOT_NULL ( isolate - > array_buffer_allocator ( ) ) ; <nl> + if ( allocated_length ! = 0 ) { <nl> + if ( allocated_length > = MB ) <nl> + isolate - > counters ( ) - > array_buffer_big_allocations ( ) - > AddSample ( <nl> + ConvertToMb ( allocated_length ) ) ; <nl> + if ( shared_flag = = SharedFlag : : kShared ) <nl> + isolate - > counters ( ) - > shared_array_allocations ( ) - > AddSample ( <nl> + ConvertToMb ( allocated_length ) ) ; <nl> + if ( initialize ) { <nl> + data = isolate - > array_buffer_allocator ( ) - > Allocate ( allocated_length ) ; <nl> + } else { <nl> + data = isolate - > array_buffer_allocator ( ) - > AllocateUninitialized ( <nl> + allocated_length ) ; <nl> + } <nl> + if ( data = = nullptr ) { <nl> + isolate - > counters ( ) - > array_buffer_new_size_failures ( ) - > AddSample ( <nl> + ConvertToMb ( allocated_length ) ) ; <nl> + SetupAsEmpty ( array_buffer , isolate ) ; <nl> + return false ; <nl> + } <nl> + } else { <nl> + data = nullptr ; <nl> } <nl> <nl> - / / The existing array buffer should be empty . <nl> - DCHECK_NULL ( array_buffer - > backing_store ( ) ) ; <nl> + const bool is_external = false ; <nl> + JSArrayBuffer : : Setup ( array_buffer , isolate , is_external , data , <nl> + allocated_length , shared_flag ) ; <nl> + return true ; <nl> + } <nl> <nl> - / / Allocate a new backing store and attach it to the existing array buffer . <nl> - size_t byte_length = self - > byte_length ( ) ; <nl> - auto backing_store = <nl> - BackingStore : : Allocate ( isolate , byte_length , SharedFlag : : kNotShared , <nl> - InitializedFlag : : kUninitialized ) ; <nl> + Handle < JSArrayBuffer > JSTypedArray : : MaterializeArrayBuffer ( <nl> + Handle < JSTypedArray > typed_array ) { <nl> + DCHECK ( typed_array - > is_on_heap ( ) ) ; <nl> <nl> - if ( ! backing_store ) { <nl> - isolate - > heap ( ) - > FatalProcessOutOfMemory ( " JSTypedArray : : GetBuffer " ) ; <nl> - } <nl> + Isolate * isolate = typed_array - > GetIsolate ( ) ; <nl> <nl> - / / Copy the elements into the backing store of the array buffer . <nl> - if ( byte_length > 0 ) { <nl> - memcpy ( backing_store - > buffer_start ( ) , self - > DataPtr ( ) , byte_length ) ; <nl> - } <nl> + DCHECK ( IsTypedArrayElementsKind ( typed_array - > GetElementsKind ( ) ) ) ; <nl> <nl> - / / Attach the backing store to the array buffer . <nl> - array_buffer - > Attach ( std : : move ( backing_store ) ) ; <nl> + Handle < JSArrayBuffer > buffer ( JSArrayBuffer : : cast ( typed_array - > buffer ( ) ) , <nl> + isolate ) ; <nl> + / / This code does not know how to materialize from wasm buffers . <nl> + DCHECK ( ! buffer - > is_wasm_memory ( ) ) ; <nl> <nl> - / / Clear the elements of the typed array . <nl> - self - > set_elements ( ReadOnlyRoots ( isolate ) . empty_byte_array ( ) ) ; <nl> - self - > set_external_pointer ( array_buffer - > backing_store ( ) ) ; <nl> - self - > set_base_pointer ( Smi : : kZero ) ; <nl> - DCHECK ( ! self - > is_on_heap ( ) ) ; <nl> + void * backing_store = <nl> + isolate - > array_buffer_allocator ( ) - > AllocateUninitialized ( <nl> + typed_array - > byte_length ( ) ) ; <nl> + if ( backing_store = = nullptr ) { <nl> + isolate - > heap ( ) - > FatalProcessOutOfMemory ( <nl> + " JSTypedArray : : MaterializeArrayBuffer " ) ; <nl> + } <nl> + buffer - > set_is_external ( false ) ; <nl> + DCHECK_EQ ( buffer - > byte_length ( ) , typed_array - > byte_length ( ) ) ; <nl> + / / Initialize backing store at last to avoid handling of | JSArrayBuffers | that <nl> + / / are currently being constructed in the | ArrayBufferTracker | . The <nl> + / / registration method below handles the case of registering a buffer that has <nl> + / / already been promoted . <nl> + buffer - > set_backing_store ( backing_store ) ; <nl> + / / RegisterNewArrayBuffer expects a valid length for adjusting counters . <nl> + isolate - > heap ( ) - > RegisterNewArrayBuffer ( * buffer ) ; <nl> + memcpy ( buffer - > backing_store ( ) , typed_array - > DataPtr ( ) , <nl> + typed_array - > byte_length ( ) ) ; <nl> + <nl> + typed_array - > set_elements ( ReadOnlyRoots ( isolate ) . empty_byte_array ( ) ) ; <nl> + typed_array - > set_external_pointer ( backing_store ) ; <nl> + typed_array - > set_base_pointer ( Smi : : kZero ) ; <nl> + DCHECK ( ! typed_array - > is_on_heap ( ) ) ; <nl> + <nl> + return buffer ; <nl> + } <nl> <nl> - return array_buffer ; <nl> + Handle < JSArrayBuffer > JSTypedArray : : GetBuffer ( ) { <nl> + if ( ! is_on_heap ( ) ) { <nl> + Handle < JSArrayBuffer > array_buffer ( JSArrayBuffer : : cast ( buffer ( ) ) , <nl> + GetIsolate ( ) ) ; <nl> + return array_buffer ; <nl> + } <nl> + Handle < JSTypedArray > self ( * this , GetIsolate ( ) ) ; <nl> + return MaterializeArrayBuffer ( self ) ; <nl> } <nl> <nl> / / ES # sec - integer - indexed - exotic - objects - defineownproperty - p - desc <nl> mmm a / src / objects / js - array - buffer . h <nl> ppp b / src / objects / js - array - buffer . h <nl> <nl> # ifndef V8_OBJECTS_JS_ARRAY_BUFFER_H_ <nl> # define V8_OBJECTS_JS_ARRAY_BUFFER_H_ <nl> <nl> - # include " src / objects / backing - store . h " <nl> # include " src / objects / js - objects . h " <nl> <nl> / / Has to be the last include ( doesn ' t have include guards ) : <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> + / / Whether a JSArrayBuffer is a SharedArrayBuffer or not . <nl> + enum class SharedFlag : uint32_t { kNotShared , kShared } ; <nl> + <nl> class JSArrayBuffer : public JSObject { <nl> public : <nl> / / The maximum length for JSArrayBuffer ' s supported by V8 . <nl> class JSArrayBuffer : public JSObject { <nl> V ( IsExternalBit , bool , 1 , _ ) \ <nl> V ( IsDetachableBit , bool , 1 , _ ) \ <nl> V ( WasDetachedBit , bool , 1 , _ ) \ <nl> - V ( IsAsmJsMemoryBit , bool , 1 , _ ) \ <nl> - V ( IsSharedBit , bool , 1 , _ ) <nl> + V ( IsSharedBit , bool , 1 , _ ) \ <nl> + V ( IsWasmMemoryBit , bool , 1 , _ ) <nl> DEFINE_BIT_FIELDS ( JS_ARRAY_BUFFER_BIT_FIELD_FIELDS ) <nl> # undef JS_ARRAY_BUFFER_BIT_FIELD_FIELDS <nl> <nl> class JSArrayBuffer : public JSObject { <nl> / / memory block once all ArrayBuffers referencing it are collected by the GC . <nl> DECL_BOOLEAN_ACCESSORS ( is_external ) <nl> <nl> - / / [ is_detachable ] : false = > this buffer cannot be detached . <nl> + / / [ is_detachable ] : false indicates that this buffer cannot be detached . <nl> DECL_BOOLEAN_ACCESSORS ( is_detachable ) <nl> <nl> - / / [ was_detached ] : true = > the buffer was previously detached . <nl> + / / [ was_detached ] : true if the buffer was previously detached . <nl> DECL_BOOLEAN_ACCESSORS ( was_detached ) <nl> <nl> - / / [ is_asmjs_memory ] : true = > this buffer was once used as asm . js memory . <nl> - DECL_BOOLEAN_ACCESSORS ( is_asmjs_memory ) <nl> - <nl> / / [ is_shared ] : tells whether this is an ArrayBuffer or a SharedArrayBuffer . <nl> DECL_BOOLEAN_ACCESSORS ( is_shared ) <nl> <nl> + / / [ is_wasm_memory ] : whether the buffer is tracked by the WasmMemoryTracker . <nl> + DECL_BOOLEAN_ACCESSORS ( is_wasm_memory ) <nl> + <nl> DECL_CAST ( JSArrayBuffer ) <nl> <nl> - / / Immediately after creating an array buffer , the internal untagged fields <nl> - / / are garbage . They need to be initialized with either { SetupEmpty ( ) } or <nl> - / / have a backing store attached via { Attach ( ) } . <nl> - <nl> - / / Setup an array buffer with no backing store . <nl> - V8_EXPORT_PRIVATE void SetupEmpty ( SharedFlag shared ) ; <nl> - <nl> - / / Attach a backing store to this array buffer . <nl> - / / ( note : this registers it with src / heap / array - buffer - tracker . h ) <nl> - V8_EXPORT_PRIVATE void Attach ( std : : shared_ptr < BackingStore > backing_store ) ; <nl> - <nl> - / / Detach the backing store from this array buffer if it is detachable <nl> - / / and return a reference to the backing store object . This sets the <nl> - / / internal pointer and length to 0 and unregisters the backing store <nl> - / / from the array buffer tracker . <nl> - / / If the array buffer is not detachable , this is a nop . <nl> - / / <nl> - / / Array buffers that wrap wasm memory objects are special in that they <nl> - / / are normally not detachable , but can become detached as a side effect <nl> - / / of growing the underlying memory object . The { force_for_wasm_memory } flag <nl> - / / is used by the implementation of Wasm memory growth in order to bypass the <nl> - / / non - detachable check . <nl> - V8_EXPORT_PRIVATE std : : shared_ptr < BackingStore > Detach ( <nl> - bool force_for_wasm_memory = false ) ; <nl> - <nl> - / / Get a reference to backing store of this array buffer , if there is a <nl> - / / backing store . Returns nullptr if there is no backing store ( e . g . detached <nl> - / / or a zero - length array buffer ) . <nl> - std : : shared_ptr < BackingStore > GetBackingStore ( ) ; <nl> + void Detach ( ) ; <nl> + <nl> + struct Allocation { <nl> + Allocation ( void * allocation_base , size_t length , void * backing_store , <nl> + bool is_wasm_memory ) <nl> + : allocation_base ( allocation_base ) , <nl> + length ( length ) , <nl> + backing_store ( backing_store ) , <nl> + is_wasm_memory ( is_wasm_memory ) { } <nl> + <nl> + void * allocation_base ; <nl> + size_t length ; <nl> + void * backing_store ; <nl> + bool is_wasm_memory ; <nl> + } ; <nl> + <nl> + V8_EXPORT_PRIVATE void FreeBackingStoreFromMainThread ( ) ; <nl> + V8_EXPORT_PRIVATE static void FreeBackingStore ( Isolate * isolate , <nl> + Allocation allocation ) ; <nl> + <nl> + V8_EXPORT_PRIVATE static void Setup ( <nl> + Handle < JSArrayBuffer > array_buffer , Isolate * isolate , bool is_external , <nl> + void * data , size_t allocated_length , <nl> + SharedFlag shared_flag = SharedFlag : : kNotShared , <nl> + bool is_wasm_memory = false ) ; <nl> + <nl> + / / Initialize the object as empty one to avoid confusing heap verifier if <nl> + / / the failure happened in the middle of JSArrayBuffer construction . <nl> + V8_EXPORT_PRIVATE static void SetupAsEmpty ( Handle < JSArrayBuffer > array_buffer , <nl> + Isolate * isolate ) ; <nl> + <nl> + / / Returns false if array buffer contents could not be allocated . <nl> + / / In this case , | array_buffer | will not be set up . <nl> + V8_EXPORT_PRIVATE static bool SetupAllocatingData ( <nl> + Handle < JSArrayBuffer > array_buffer , Isolate * isolate , <nl> + size_t allocated_length , bool initialize = true , <nl> + SharedFlag shared_flag = SharedFlag : : kNotShared ) V8_WARN_UNUSED_RESULT ; <nl> <nl> / / Dispatched behavior . <nl> DECL_PRINTER ( JSArrayBuffer ) <nl> class JSTypedArray : public JSArrayBufferView { <nl> # endif <nl> <nl> private : <nl> + static Handle < JSArrayBuffer > MaterializeArrayBuffer ( <nl> + Handle < JSTypedArray > typed_array ) ; <nl> + <nl> OBJECT_CONSTRUCTORS ( JSTypedArray , JSArrayBufferView ) ; <nl> } ; <nl> <nl> mmm a / src / objects / value - serializer . cc <nl> ppp b / src / objects / value - serializer . cc <nl> Maybe < bool > ValueSerializer : : WriteWasmMemory ( Handle < WasmMemoryObject > object ) { <nl> return Nothing < bool > ( ) ; <nl> } <nl> <nl> - GlobalBackingStoreRegistry : : Register ( <nl> - object - > array_buffer ( ) . GetBackingStore ( ) ) ; <nl> + isolate_ - > wasm_engine ( ) - > memory_tracker ( ) - > RegisterWasmMemoryAsShared ( <nl> + object , isolate_ ) ; <nl> <nl> WriteTag ( SerializationTag : : kWasmMemoryTransfer ) ; <nl> WriteZigZag < int32_t > ( object - > maximum_pages ( ) ) ; <nl> MaybeHandle < JSArrayBuffer > ValueDeserializer : : ReadJSArrayBuffer ( <nl> byte_length > static_cast < size_t > ( end_ - position_ ) ) { <nl> return MaybeHandle < JSArrayBuffer > ( ) ; <nl> } <nl> - MaybeHandle < JSArrayBuffer > result = <nl> - isolate_ - > factory ( ) - > NewJSArrayBufferAndBackingStore ( <nl> - byte_length , InitializedFlag : : kUninitialized , allocation_ ) ; <nl> - Handle < JSArrayBuffer > array_buffer ; <nl> - if ( ! result . ToHandle ( & array_buffer ) ) return result ; <nl> - <nl> + const bool should_initialize = false ; <nl> + Handle < JSArrayBuffer > array_buffer = isolate_ - > factory ( ) - > NewJSArrayBuffer ( <nl> + SharedFlag : : kNotShared , allocation_ ) ; <nl> + if ( ! JSArrayBuffer : : SetupAllocatingData ( array_buffer , isolate_ , byte_length , <nl> + should_initialize ) ) { <nl> + return MaybeHandle < JSArrayBuffer > ( ) ; <nl> + } <nl> if ( byte_length > 0 ) { <nl> memcpy ( array_buffer - > backing_store ( ) , position_ , byte_length ) ; <nl> } <nl> MaybeHandle < WasmMemoryObject > ValueDeserializer : : ReadWasmMemory ( ) { <nl> Handle < WasmMemoryObject > result = <nl> WasmMemoryObject : : New ( isolate_ , buffer , maximum_pages ) ; <nl> <nl> + isolate_ - > wasm_engine ( ) - > memory_tracker ( ) - > RegisterWasmMemoryAsShared ( <nl> + result , isolate_ ) ; <nl> + <nl> AddObjectWithID ( id , result ) ; <nl> return result ; <nl> } <nl> mmm a / src / roots / roots . h <nl> ppp b / src / roots / roots . h <nl> class Symbol ; <nl> V ( HeapObject , weak_refs_keep_during_job , WeakRefsKeepDuringJob ) \ <nl> V ( HeapObject , interpreter_entry_trampoline_for_profiling , \ <nl> InterpreterEntryTrampolineForProfiling ) \ <nl> - V ( Object , pending_optimize_for_test_bytecode , \ <nl> - PendingOptimizeForTestBytecode ) \ <nl> - V ( WeakArrayList , shared_wasm_memories , SharedWasmMemories ) <nl> + V ( Object , pending_optimize_for_test_bytecode , PendingOptimizeForTestBytecode ) <nl> <nl> / / Entries in this list are limited to Smis and are not visited during GC . <nl> # define SMI_ROOT_LIST ( V ) \ <nl> mmm a / src / runtime / runtime - test . cc <nl> ppp b / src / runtime / runtime - test . cc <nl> RUNTIME_FUNCTION ( Runtime_SerializeWasmModule ) { <nl> <nl> wasm : : NativeModule * native_module = module_obj - > native_module ( ) ; <nl> wasm : : WasmSerializer wasm_serializer ( native_module ) ; <nl> - size_t byte_length = wasm_serializer . GetSerializedNativeModuleSize ( ) ; <nl> - <nl> - MaybeHandle < JSArrayBuffer > result = <nl> - isolate - > factory ( ) - > NewJSArrayBufferAndBackingStore ( <nl> - byte_length , InitializedFlag : : kUninitialized ) ; <nl> - <nl> - Handle < JSArrayBuffer > array_buffer ; <nl> - if ( result . ToHandle ( & array_buffer ) & & <nl> - wasm_serializer . SerializeNativeModule ( <nl> - { reinterpret_cast < uint8_t * > ( array_buffer - > backing_store ( ) ) , <nl> - byte_length } ) ) { <nl> - return * array_buffer ; <nl> + size_t compiled_size = wasm_serializer . GetSerializedNativeModuleSize ( ) ; <nl> + void * array_data = isolate - > array_buffer_allocator ( ) - > Allocate ( compiled_size ) ; <nl> + Handle < JSArrayBuffer > array_buffer = <nl> + isolate - > factory ( ) - > NewJSArrayBuffer ( SharedFlag : : kNotShared ) ; <nl> + JSArrayBuffer : : Setup ( array_buffer , isolate , false , array_data , compiled_size ) ; <nl> + if ( ! array_data | | <nl> + ! wasm_serializer . SerializeNativeModule ( <nl> + { reinterpret_cast < uint8_t * > ( array_data ) , compiled_size } ) ) { <nl> + return ReadOnlyRoots ( isolate ) . undefined_value ( ) ; <nl> } <nl> - <nl> - / / Error . Return undefined . <nl> - return ReadOnlyRoots ( isolate ) . undefined_value ( ) ; <nl> + return * array_buffer ; <nl> } <nl> <nl> / / Take an array buffer and attempt to reconstruct a compiled wasm module . <nl> mmm a / src / runtime / runtime - typedarray . cc <nl> ppp b / src / runtime / runtime - typedarray . cc <nl> RUNTIME_FUNCTION ( Runtime_ArrayBufferDetach ) { <nl> isolate , NewTypeError ( MessageTemplate : : kNotTypedArray ) ) ; <nl> } <nl> Handle < JSArrayBuffer > array_buffer = Handle < JSArrayBuffer > : : cast ( argument ) ; <nl> + if ( ! array_buffer - > is_detachable ( ) ) { <nl> + return ReadOnlyRoots ( isolate ) . undefined_value ( ) ; <nl> + } <nl> + if ( array_buffer - > backing_store ( ) = = nullptr ) { <nl> + CHECK_EQ ( 0 , array_buffer - > byte_length ( ) ) ; <nl> + return ReadOnlyRoots ( isolate ) . undefined_value ( ) ; <nl> + } <nl> + / / Shared array buffers should never be detached . <nl> + CHECK ( ! array_buffer - > is_shared ( ) ) ; <nl> + DCHECK ( ! array_buffer - > is_external ( ) ) ; <nl> + void * backing_store = array_buffer - > backing_store ( ) ; <nl> + size_t byte_length = array_buffer - > byte_length ( ) ; <nl> + array_buffer - > set_is_external ( true ) ; <nl> + isolate - > heap ( ) - > UnregisterArrayBuffer ( * array_buffer ) ; <nl> array_buffer - > Detach ( ) ; <nl> + isolate - > array_buffer_allocator ( ) - > Free ( backing_store , byte_length ) ; <nl> return ReadOnlyRoots ( isolate ) . undefined_value ( ) ; <nl> } <nl> <nl> mmm a / src / snapshot / deserializer . cc <nl> ppp b / src / snapshot / deserializer . cc <nl> HeapObject Deserializer : : PostProcessNewObject ( HeapObject obj , <nl> if ( ! typed_array . is_on_heap ( ) ) { <nl> Smi store_index ( <nl> reinterpret_cast < Address > ( typed_array . external_pointer ( ) ) ) ; <nl> - auto backing_store = backing_stores_ [ store_index . value ( ) ] ; <nl> - auto start = backing_store <nl> - ? reinterpret_cast < byte * > ( backing_store - > buffer_start ( ) ) <nl> - : nullptr ; <nl> - typed_array . set_external_pointer ( start + typed_array . byte_offset ( ) ) ; <nl> + byte * backing_store = off_heap_backing_stores_ [ store_index . value ( ) ] + <nl> + typed_array . byte_offset ( ) ; <nl> + typed_array . set_external_pointer ( backing_store ) ; <nl> } <nl> } else if ( obj . IsJSArrayBuffer ( ) ) { <nl> JSArrayBuffer buffer = JSArrayBuffer : : cast ( obj ) ; <nl> / / Only fixup for the off - heap case . <nl> if ( buffer . backing_store ( ) ! = nullptr ) { <nl> Smi store_index ( reinterpret_cast < Address > ( buffer . backing_store ( ) ) ) ; <nl> - auto backing_store = backing_stores_ [ store_index . value ( ) ] ; <nl> - if ( backing_store ) { <nl> - buffer . Attach ( backing_store ) ; <nl> - } else { <nl> - buffer . SetupEmpty ( SharedFlag : : kNotShared ) ; <nl> - } <nl> + void * backing_store = off_heap_backing_stores_ [ store_index . value ( ) ] ; <nl> + <nl> + buffer . set_backing_store ( backing_store ) ; <nl> + isolate_ - > heap ( ) - > RegisterNewArrayBuffer ( buffer ) ; <nl> } <nl> } else if ( obj . IsBytecodeArray ( ) ) { <nl> / / TODO ( mythria ) : Remove these once we store the default values for these <nl> bool Deserializer : : ReadData ( TSlot current , TSlot limit , <nl> <nl> case kOffHeapBackingStore : { <nl> int byte_length = source_ . GetInt ( ) ; <nl> - std : : unique_ptr < BackingStore > backing_store = <nl> - BackingStore : : Allocate ( isolate , byte_length , SharedFlag : : kNotShared , <nl> - InitializedFlag : : kUninitialized ) ; <nl> + byte * backing_store = static_cast < byte * > ( <nl> + isolate - > array_buffer_allocator ( ) - > AllocateUninitialized ( <nl> + byte_length ) ) ; <nl> CHECK_NOT_NULL ( backing_store ) ; <nl> - source_ . CopyRaw ( backing_store - > buffer_start ( ) , byte_length ) ; <nl> - backing_stores_ . push_back ( std : : move ( backing_store ) ) ; <nl> + source_ . CopyRaw ( backing_store , byte_length ) ; <nl> + off_heap_backing_stores_ . push_back ( backing_store ) ; <nl> break ; <nl> } <nl> <nl> mmm a / src / snapshot / deserializer . h <nl> ppp b / src / snapshot / deserializer . h <nl> <nl> <nl> # include " src / objects / allocation - site . h " <nl> # include " src / objects / api - callbacks . h " <nl> - # include " src / objects / backing - store . h " <nl> # include " src / objects / code . h " <nl> # include " src / objects / js - array . h " <nl> # include " src / objects / map . h " <nl> class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { <nl> allocator ( ) - > DecodeReservation ( data - > Reservations ( ) ) ; <nl> / / We start the indices here at 1 , so that we can distinguish between an <nl> / / actual index and a nullptr in a deserialized object requiring fix - up . <nl> - backing_stores_ . push_back ( { } ) ; <nl> + off_heap_backing_stores_ . push_back ( nullptr ) ; <nl> } <nl> <nl> void Initialize ( Isolate * isolate ) ; <nl> class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { <nl> std : : vector < CallHandlerInfo > call_handler_infos_ ; <nl> std : : vector < Handle < String > > new_internalized_strings_ ; <nl> std : : vector < Handle < Script > > new_scripts_ ; <nl> - std : : vector < std : : shared_ptr < BackingStore > > backing_stores_ ; <nl> + std : : vector < byte * > off_heap_backing_stores_ ; <nl> <nl> DeserializerAllocator allocator_ ; <nl> const bool deserializing_user_code_ ; <nl> mmm a / src / wasm / c - api . cc <nl> ppp b / src / wasm / c - api . cc <nl> auto Memory : : make ( Store * store_abs , const MemoryType * type ) - > own < Memory * > { <nl> if ( maximum < minimum ) return nullptr ; <nl> if ( maximum > i : : wasm : : kSpecMaxWasmMemoryPages ) return nullptr ; <nl> } <nl> - / / TODO ( wasm + ) : Support shared memory . <nl> - i : : SharedFlag shared = i : : SharedFlag : : kNotShared ; <nl> + bool is_shared = false ; / / TODO ( wasm + ) : Support shared memory . <nl> i : : Handle < i : : WasmMemoryObject > memory_obj ; <nl> - if ( ! i : : WasmMemoryObject : : New ( isolate , minimum , maximum , shared ) <nl> + if ( ! i : : WasmMemoryObject : : New ( isolate , minimum , maximum , is_shared ) <nl> . ToHandle ( & memory_obj ) ) { <nl> return own < Memory * > ( ) ; <nl> } <nl> mmm a / src / wasm / module - compiler . cc <nl> ppp b / src / wasm / module - compiler . cc <nl> <nl> # include " src / wasm / wasm - import - wrapper - cache . h " <nl> # include " src / wasm / wasm - js . h " <nl> # include " src / wasm / wasm - limits . h " <nl> + # include " src / wasm / wasm - memory . h " <nl> # include " src / wasm / wasm - objects - inl . h " <nl> # include " src / wasm / wasm - opcodes . h " <nl> # include " src / wasm / wasm - result . h " <nl> mmm a / src / wasm / module - instantiate . cc <nl> ppp b / src / wasm / module - instantiate . cc <nl> class InstanceBuilder { <nl> InstanceBuilder ( Isolate * isolate , ErrorThrower * thrower , <nl> Handle < WasmModuleObject > module_object , <nl> MaybeHandle < JSReceiver > ffi , <nl> - MaybeHandle < JSArrayBuffer > memory_buffer ) ; <nl> + MaybeHandle < JSArrayBuffer > memory ) ; <nl> <nl> / / Build an instance , in all of its glory . <nl> MaybeHandle < WasmInstanceObject > Build ( ) ; <nl> class InstanceBuilder { <nl> ErrorThrower * thrower_ ; <nl> Handle < WasmModuleObject > module_object_ ; <nl> MaybeHandle < JSReceiver > ffi_ ; <nl> - MaybeHandle < JSArrayBuffer > memory_buffer_ ; <nl> - Handle < WasmMemoryObject > memory_object_ ; <nl> + MaybeHandle < JSArrayBuffer > memory_ ; <nl> Handle < JSArrayBuffer > untagged_globals_ ; <nl> Handle < FixedArray > tagged_globals_ ; <nl> std : : vector < Handle < WasmExceptionObject > > exception_wrappers_ ; <nl> class InstanceBuilder { <nl> <nl> void SanitizeImports ( ) ; <nl> <nl> - / / Find the imported memory if there is one . <nl> - bool FindImportedMemory ( ) ; <nl> - <nl> - / / Allocate the memory . <nl> - bool AllocateMemory ( ) ; <nl> + / / Find the imported memory buffer if there is one . This is used to see if we <nl> + / / need to recompile with bounds checks before creating the instance . <nl> + MaybeHandle < JSArrayBuffer > FindImportedMemoryBuffer ( ) const ; <nl> <nl> / / Processes a single imported function . <nl> bool ProcessImportedFunction ( Handle < WasmInstanceObject > instance , <nl> class InstanceBuilder { <nl> / / Process initialization of globals . <nl> void InitGlobals ( Handle < WasmInstanceObject > instance ) ; <nl> <nl> + / / Allocate memory for a module instance as a new JSArrayBuffer . <nl> + Handle < JSArrayBuffer > AllocateMemory ( uint32_t initial_pages , <nl> + uint32_t maximum_pages ) ; <nl> <nl> bool NeedsWrappers ( ) const ; <nl> <nl> class InstanceBuilder { <nl> MaybeHandle < WasmInstanceObject > InstantiateToInstanceObject ( <nl> Isolate * isolate , ErrorThrower * thrower , <nl> Handle < WasmModuleObject > module_object , MaybeHandle < JSReceiver > imports , <nl> - MaybeHandle < JSArrayBuffer > memory_buffer ) { <nl> - InstanceBuilder builder ( isolate , thrower , module_object , imports , <nl> - memory_buffer ) ; <nl> + MaybeHandle < JSArrayBuffer > memory ) { <nl> + InstanceBuilder builder ( isolate , thrower , module_object , imports , memory ) ; <nl> auto instance = builder . Build ( ) ; <nl> if ( ! instance . is_null ( ) & & builder . ExecuteStartFunction ( ) ) { <nl> return instance ; <nl> MaybeHandle < WasmInstanceObject > InstantiateToInstanceObject ( <nl> InstanceBuilder : : InstanceBuilder ( Isolate * isolate , ErrorThrower * thrower , <nl> Handle < WasmModuleObject > module_object , <nl> MaybeHandle < JSReceiver > ffi , <nl> - MaybeHandle < JSArrayBuffer > memory_buffer ) <nl> + MaybeHandle < JSArrayBuffer > memory ) <nl> : isolate_ ( isolate ) , <nl> enabled_ ( module_object - > native_module ( ) - > enabled_features ( ) ) , <nl> module_ ( module_object - > module ( ) ) , <nl> thrower_ ( thrower ) , <nl> module_object_ ( module_object ) , <nl> ffi_ ( ffi ) , <nl> - memory_buffer_ ( memory_buffer ) { <nl> + memory_ ( memory ) { <nl> sanitized_imports_ . reserve ( module_ - > import_table . size ( ) ) ; <nl> } <nl> <nl> MaybeHandle < WasmInstanceObject > InstanceBuilder : : Build ( ) { <nl> NativeModule * native_module = module_object_ - > native_module ( ) ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / Set up the memory buffer and memory objects . <nl> + / / Allocate the memory array buffer . <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> uint32_t initial_pages = module_ - > initial_pages ; <nl> auto initial_pages_counter = SELECT_WASM_COUNTER ( <nl> MaybeHandle < WasmInstanceObject > InstanceBuilder : : Build ( ) { <nl> isolate_ - > counters ( ) - > wasm_wasm_max_mem_pages_count ( ) ; <nl> max_pages_counter - > AddSample ( module_ - > maximum_pages ) ; <nl> } <nl> - <nl> - if ( memory_buffer_ . is_null ( ) ) { <nl> - / / Search for imported memory first . <nl> - if ( ! FindImportedMemory ( ) ) { <nl> - if ( ! AllocateMemory ( ) ) { <nl> - DCHECK ( isolate_ - > has_pending_exception ( ) | | thrower_ - > error ( ) ) ; <nl> - return { } ; <nl> - } <nl> + / / Asm . js has memory_ already set at this point , so we don ' t want to <nl> + / / overwrite it . <nl> + if ( memory_ . is_null ( ) ) { <nl> + memory_ = FindImportedMemoryBuffer ( ) ; <nl> + } <nl> + if ( ! memory_ . is_null ( ) ) { <nl> + / / Set externally passed ArrayBuffer non detachable . <nl> + Handle < JSArrayBuffer > memory = memory_ . ToHandleChecked ( ) ; <nl> + memory - > set_is_detachable ( false ) ; <nl> + <nl> + DCHECK_IMPLIES ( native_module - > use_trap_handler ( ) , <nl> + module_ - > origin = = kAsmJsOrigin | | <nl> + memory - > is_wasm_memory ( ) | | <nl> + memory - > backing_store ( ) = = nullptr ) ; <nl> + } else if ( initial_pages > 0 | | native_module - > use_trap_handler ( ) ) { <nl> + / / We need to unconditionally create a guard region if using trap handlers , <nl> + / / even when the size is zero to prevent null - dereference issues <nl> + / / ( e . g . https : / / crbug . com / 769637 ) . <nl> + / / Allocate memory if the initial size is more than 0 pages . <nl> + memory_ = AllocateMemory ( initial_pages , module_ - > maximum_pages ) ; <nl> + if ( memory_ . is_null ( ) ) { <nl> + / / failed to allocate memory <nl> + DCHECK ( isolate_ - > has_pending_exception ( ) | | thrower_ - > error ( ) ) ; <nl> + return { } ; <nl> } <nl> - } else { <nl> - / / Asm . js has { memory_buffer_ } already set at this point . <nl> - DCHECK_EQ ( kAsmJsOrigin , module_ - > origin ) ; <nl> - / / asm . js instantiation should have set these flags . <nl> - DCHECK ( ! memory_buffer_ . ToHandleChecked ( ) - > is_detachable ( ) ) ; <nl> - DCHECK ( memory_buffer_ . ToHandleChecked ( ) - > is_asmjs_memory ( ) ) ; <nl> - memory_object_ = <nl> - WasmMemoryObject : : New ( isolate_ , memory_buffer_ , kV8MaxWasmMemoryPages ) ; <nl> } <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> MaybeHandle < WasmInstanceObject > InstanceBuilder : : Build ( ) { <nl> TRACE ( " New module instantiation for % p \ n " , native_module ) ; <nl> Handle < WasmInstanceObject > instance = <nl> WasmInstanceObject : : New ( isolate_ , module_object_ ) ; <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / Attach the memory to the instance . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - if ( module_ - > has_memory ) { <nl> - DCHECK ( ! memory_object_ . is_null ( ) ) ; <nl> - if ( ! instance - > has_memory_object ( ) ) { <nl> - instance - > set_memory_object ( * memory_object_ ) ; <nl> - } <nl> - / / Add the instance object to the list of instances for this memory . <nl> - WasmMemoryObject : : AddInstance ( isolate_ , memory_object_ , instance ) ; <nl> - <nl> - / / Double - check the { memory } array buffer matches the instance . <nl> - Handle < JSArrayBuffer > memory = memory_buffer_ . ToHandleChecked ( ) ; <nl> - CHECK_EQ ( instance - > memory_size ( ) , memory - > byte_length ( ) ) ; <nl> - CHECK_EQ ( instance - > memory_start ( ) , memory - > backing_store ( ) ) ; <nl> - } <nl> + NativeModuleModificationScope native_modification_scope ( native_module ) ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / Set up the globals for the new instance . <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> uint32_t untagged_globals_buffer_size = module_ - > untagged_globals_buffer_size ; <nl> if ( untagged_globals_buffer_size > 0 ) { <nl> - MaybeHandle < JSArrayBuffer > result = <nl> - isolate_ - > factory ( ) - > NewJSArrayBufferAndBackingStore ( <nl> - untagged_globals_buffer_size , InitializedFlag : : kZeroInitialized , <nl> - AllocationType : : kOld ) ; <nl> - <nl> - if ( ! result . ToHandle ( & untagged_globals_ ) ) { <nl> + void * backing_store = isolate_ - > array_buffer_allocator ( ) - > Allocate ( <nl> + untagged_globals_buffer_size ) ; <nl> + if ( backing_store = = nullptr ) { <nl> + thrower_ - > RangeError ( " Out of memory : wasm globals " ) ; <nl> + return { } ; <nl> + } <nl> + untagged_globals_ = isolate_ - > factory ( ) - > NewJSArrayBuffer ( <nl> + SharedFlag : : kNotShared , AllocationType : : kOld ) ; <nl> + constexpr bool is_external = false ; <nl> + constexpr bool is_wasm_memory = false ; <nl> + JSArrayBuffer : : Setup ( untagged_globals_ , isolate_ , is_external , <nl> + backing_store , untagged_globals_buffer_size , <nl> + SharedFlag : : kNotShared , is_wasm_memory ) ; <nl> + if ( untagged_globals_ . is_null ( ) ) { <nl> thrower_ - > RangeError ( " Out of memory : wasm globals " ) ; <nl> return { } ; <nl> } <nl> - <nl> - instance - > set_untagged_globals_buffer ( * untagged_globals_ ) ; <nl> instance - > set_globals_start ( <nl> reinterpret_cast < byte * > ( untagged_globals_ - > backing_store ( ) ) ) ; <nl> + instance - > set_untagged_globals_buffer ( * untagged_globals_ ) ; <nl> } <nl> <nl> uint32_t tagged_globals_buffer_size = module_ - > tagged_globals_buffer_size ; <nl> MaybeHandle < WasmInstanceObject > InstanceBuilder : : Build ( ) { <nl> instance - > set_indirect_function_tables ( * tables ) ; <nl> } <nl> <nl> - NativeModuleModificationScope native_modification_scope ( native_module ) ; <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / Process the imports for the module . <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> MaybeHandle < WasmInstanceObject > InstanceBuilder : : Build ( ) { <nl> InitializeExceptions ( instance ) ; <nl> } <nl> <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / Create the WebAssembly . Memory object . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + if ( module_ - > has_memory ) { <nl> + if ( ! instance - > has_memory_object ( ) ) { <nl> + / / No memory object exists . Create one . <nl> + Handle < WasmMemoryObject > memory_object = WasmMemoryObject : : New ( <nl> + isolate_ , memory_ , <nl> + module_ - > maximum_pages ! = 0 ? module_ - > maximum_pages : - 1 ) ; <nl> + instance - > set_memory_object ( * memory_object ) ; <nl> + } <nl> + <nl> + / / Add the instance object to the list of instances for this memory . <nl> + Handle < WasmMemoryObject > memory_object ( instance - > memory_object ( ) , isolate_ ) ; <nl> + WasmMemoryObject : : AddInstance ( isolate_ , memory_object , instance ) ; <nl> + <nl> + if ( ! memory_ . is_null ( ) ) { <nl> + / / Double - check the { memory } array buffer matches the instance . <nl> + Handle < JSArrayBuffer > memory = memory_ . ToHandleChecked ( ) ; <nl> + CHECK_EQ ( instance - > memory_size ( ) , memory - > byte_length ( ) ) ; <nl> + CHECK_EQ ( instance - > memory_start ( ) , memory - > backing_store ( ) ) ; <nl> + } <nl> + } <nl> + <nl> / / The bulk memory proposal changes the MVP behavior here ; the segments are <nl> / / written as if ` memory . init ` and ` table . init ` are executed directly , and <nl> / / not bounds checked ahead of time . <nl> void InstanceBuilder : : SanitizeImports ( ) { <nl> } <nl> } <nl> <nl> - bool InstanceBuilder : : FindImportedMemory ( ) { <nl> + MaybeHandle < JSArrayBuffer > InstanceBuilder : : FindImportedMemoryBuffer ( ) const { <nl> DCHECK_EQ ( module_ - > import_table . size ( ) , sanitized_imports_ . size ( ) ) ; <nl> for ( size_t index = 0 ; index < module_ - > import_table . size ( ) ; index + + ) { <nl> - WasmImport import = module_ - > import_table [ index ] ; <nl> + const WasmImport & import = module_ - > import_table [ index ] ; <nl> <nl> if ( import . kind = = kExternalMemory ) { <nl> - auto & value = sanitized_imports_ [ index ] . value ; <nl> - if ( ! value - > IsWasmMemoryObject ( ) ) return false ; <nl> - memory_object_ = Handle < WasmMemoryObject > : : cast ( value ) ; <nl> - memory_buffer_ = <nl> - Handle < JSArrayBuffer > ( memory_object_ - > array_buffer ( ) , isolate_ ) ; <nl> - return true ; <nl> + const auto & value = sanitized_imports_ [ index ] . value ; <nl> + if ( ! value - > IsWasmMemoryObject ( ) ) { <nl> + return { } ; <nl> + } <nl> + auto memory = Handle < WasmMemoryObject > : : cast ( value ) ; <nl> + Handle < JSArrayBuffer > buffer ( memory - > array_buffer ( ) , isolate_ ) ; <nl> + return buffer ; <nl> } <nl> } <nl> - return false ; <nl> + return { } ; <nl> } <nl> <nl> bool InstanceBuilder : : ProcessImportedFunction ( <nl> bool InstanceBuilder : : ProcessImportedMemory ( Handle < WasmInstanceObject > instance , <nl> Handle < String > module_name , <nl> Handle < String > import_name , <nl> Handle < Object > value ) { <nl> + / / Validation should have failed if more than one memory object was <nl> + / / provided . <nl> + DCHECK ( ! instance - > has_memory_object ( ) ) ; <nl> if ( ! value - > IsWasmMemoryObject ( ) ) { <nl> ReportLinkError ( " memory import must be a WebAssembly . Memory object " , <nl> import_index , module_name , import_name ) ; <nl> return false ; <nl> } <nl> - auto memory_object = Handle < WasmMemoryObject > : : cast ( value ) ; <nl> - <nl> - / / The imported memory should have been already set up early . <nl> - CHECK_EQ ( instance - > memory_object ( ) , * memory_object ) ; <nl> - <nl> - Handle < JSArrayBuffer > buffer ( memory_object_ - > array_buffer ( ) , isolate_ ) ; <nl> + auto memory = Handle < WasmMemoryObject > : : cast ( value ) ; <nl> + instance - > set_memory_object ( * memory ) ; <nl> + Handle < JSArrayBuffer > buffer ( memory - > array_buffer ( ) , isolate_ ) ; <nl> / / memory_ should have already been assigned in Build ( ) . <nl> - DCHECK_EQ ( * memory_buffer_ . ToHandleChecked ( ) , * buffer ) ; <nl> + DCHECK_EQ ( * memory_ . ToHandleChecked ( ) , * buffer ) ; <nl> uint32_t imported_cur_pages = <nl> static_cast < uint32_t > ( buffer - > byte_length ( ) / kWasmPageSize ) ; <nl> if ( imported_cur_pages < module_ - > initial_pages ) { <nl> bool InstanceBuilder : : ProcessImportedMemory ( Handle < WasmInstanceObject > instance , <nl> imported_cur_pages ) ; <nl> return false ; <nl> } <nl> - int32_t imported_maximum_pages = memory_object_ - > maximum_pages ( ) ; <nl> + int32_t imported_maximum_pages = memory - > maximum_pages ( ) ; <nl> if ( module_ - > has_maximum_pages ) { <nl> if ( imported_maximum_pages < 0 ) { <nl> thrower_ - > LinkError ( <nl> void InstanceBuilder : : InitGlobals ( Handle < WasmInstanceObject > instance ) { <nl> } <nl> <nl> / / Allocate memory for a module instance as a new JSArrayBuffer . <nl> - bool InstanceBuilder : : AllocateMemory ( ) { <nl> - auto initial_pages = module_ - > initial_pages ; <nl> - auto maximum_pages = module_ - > has_maximum_pages ? module_ - > maximum_pages : - 1 ; <nl> + Handle < JSArrayBuffer > InstanceBuilder : : AllocateMemory ( uint32_t initial_pages , <nl> + uint32_t maximum_pages ) { <nl> if ( initial_pages > max_mem_pages ( ) ) { <nl> thrower_ - > RangeError ( " Out of memory : wasm memory too large " ) ; <nl> - return false ; <nl> - } <nl> - auto shared = ( module_ - > has_shared_memory & & enabled_ . threads ) <nl> - ? SharedFlag : : kShared <nl> - : SharedFlag : : kNotShared ; <nl> - <nl> - MaybeHandle < WasmMemoryObject > result = <nl> - WasmMemoryObject : : New ( isolate_ , initial_pages , maximum_pages , shared ) ; <nl> - <nl> - if ( ! result . ToHandle ( & memory_object_ ) ) { <nl> - thrower_ - > RangeError ( " Out of memory : wasm memory " ) ; <nl> - return false ; <nl> + return Handle < JSArrayBuffer > : : null ( ) ; <nl> + } <nl> + const bool is_shared_memory = module_ - > has_shared_memory & & enabled_ . threads ; <nl> + Handle < JSArrayBuffer > mem_buffer ; <nl> + if ( is_shared_memory ) { <nl> + if ( ! NewSharedArrayBuffer ( isolate_ , initial_pages * kWasmPageSize , <nl> + maximum_pages * kWasmPageSize ) <nl> + . ToHandle ( & mem_buffer ) ) { <nl> + thrower_ - > RangeError ( " Out of memory : wasm shared memory " ) ; <nl> + } <nl> + } else { <nl> + if ( ! NewArrayBuffer ( isolate_ , initial_pages * kWasmPageSize ) <nl> + . ToHandle ( & mem_buffer ) ) { <nl> + thrower_ - > RangeError ( " Out of memory : wasm memory " ) ; <nl> + } <nl> } <nl> - memory_buffer_ = <nl> - Handle < JSArrayBuffer > ( memory_object_ - > array_buffer ( ) , isolate_ ) ; <nl> - return true ; <nl> + return mem_buffer ; <nl> } <nl> <nl> bool InstanceBuilder : : NeedsWrappers ( ) const { <nl> mmm a / src / wasm / wasm - code - manager . cc <nl> ppp b / src / wasm / wasm - code - manager . cc <nl> NativeModule : : ~ NativeModule ( ) { <nl> import_wrapper_cache_ . reset ( ) ; <nl> } <nl> <nl> - WasmCodeManager : : WasmCodeManager ( size_t max_committed ) <nl> - : max_committed_code_space_ ( max_committed ) , <nl> + WasmCodeManager : : WasmCodeManager ( WasmMemoryTracker * memory_tracker , <nl> + size_t max_committed ) <nl> + : memory_tracker_ ( memory_tracker ) , <nl> + max_committed_code_space_ ( max_committed ) , <nl> # if defined ( V8_OS_WIN_X64 ) <nl> is_win64_unwind_info_disabled_for_testing_ ( false ) , <nl> # endif <nl> VirtualMemory WasmCodeManager : : TryAllocate ( size_t size , void * hint ) { <nl> DCHECK_GT ( size , 0 ) ; <nl> size_t allocate_page_size = page_allocator - > AllocatePageSize ( ) ; <nl> size = RoundUp ( size , allocate_page_size ) ; <nl> - if ( ! BackingStore : : ReserveAddressSpace ( size ) ) return { } ; <nl> + if ( ! memory_tracker_ - > ReserveAddressSpace ( size ) ) return { } ; <nl> if ( hint = = nullptr ) hint = page_allocator - > GetRandomMmapAddr ( ) ; <nl> <nl> VirtualMemory mem ( page_allocator , size , hint , allocate_page_size ) ; <nl> if ( ! mem . IsReserved ( ) ) { <nl> - BackingStore : : ReleaseReservation ( size ) ; <nl> + memory_tracker_ - > ReleaseReservation ( size ) ; <nl> return { } ; <nl> } <nl> TRACE_HEAP ( " VMem alloc : 0x % " PRIxPTR " : 0x % " PRIxPTR " ( % zu ) \ n " , mem . address ( ) , <nl> void WasmCodeManager : : FreeNativeModule ( Vector < VirtualMemory > owned_code_space , <nl> # endif <nl> <nl> lookup_map_ . erase ( code_space . address ( ) ) ; <nl> - BackingStore : : ReleaseReservation ( code_space . size ( ) ) ; <nl> + memory_tracker_ - > ReleaseReservation ( code_space . size ( ) ) ; <nl> code_space . Free ( ) ; <nl> DCHECK ( ! code_space . IsReserved ( ) ) ; <nl> } <nl> mmm a / src / wasm / wasm - code - manager . h <nl> ppp b / src / wasm / wasm - code - manager . h <nl> class NativeModule ; <nl> class WasmCodeManager ; <nl> struct WasmCompilationResult ; <nl> class WasmEngine ; <nl> + class WasmMemoryTracker ; <nl> class WasmImportWrapperCache ; <nl> struct WasmModule ; <nl> <nl> class V8_EXPORT_PRIVATE NativeModule final { <nl> <nl> class V8_EXPORT_PRIVATE WasmCodeManager final { <nl> public : <nl> - explicit WasmCodeManager ( size_t max_committed ) ; <nl> + explicit WasmCodeManager ( WasmMemoryTracker * memory_tracker , <nl> + size_t max_committed ) ; <nl> <nl> # ifdef DEBUG <nl> ~ WasmCodeManager ( ) { <nl> class V8_EXPORT_PRIVATE WasmCodeManager final { <nl> <nl> void AssignRange ( base : : AddressRegion , NativeModule * ) ; <nl> <nl> + WasmMemoryTracker * const memory_tracker_ ; <nl> + <nl> size_t max_committed_code_space_ ; <nl> <nl> # if defined ( V8_OS_WIN_X64 ) <nl> mmm a / src / wasm / wasm - engine . cc <nl> ppp b / src / wasm / wasm - engine . cc <nl> struct WasmEngine : : NativeModuleInfo { <nl> int8_t num_code_gcs_triggered = 0 ; <nl> } ; <nl> <nl> - WasmEngine : : WasmEngine ( ) : code_manager_ ( FLAG_wasm_max_code_space * MB ) { } <nl> + WasmEngine : : WasmEngine ( ) <nl> + : code_manager_ ( & memory_tracker_ , FLAG_wasm_max_code_space * MB ) { } <nl> <nl> WasmEngine : : ~ WasmEngine ( ) { <nl> / / Synchronize on all background compile tasks . <nl> mmm a / src / wasm / wasm - engine . h <nl> ppp b / src / wasm / wasm - engine . h <nl> <nl> <nl> # include " src / tasks / cancelable - task . h " <nl> # include " src / wasm / wasm - code - manager . h " <nl> + # include " src / wasm / wasm - memory . h " <nl> # include " src / wasm / wasm - tier . h " <nl> # include " src / zone / accounting - allocator . h " <nl> <nl> class CompilationStatistics ; <nl> class HeapNumber ; <nl> class WasmInstanceObject ; <nl> class WasmModuleObject ; <nl> - class JSArrayBuffer ; <nl> <nl> namespace wasm { <nl> <nl> class V8_EXPORT_PRIVATE WasmEngine { <nl> <nl> WasmCodeManager * code_manager ( ) { return & code_manager_ ; } <nl> <nl> + WasmMemoryTracker * memory_tracker ( ) { return & memory_tracker_ ; } <nl> + <nl> AccountingAllocator * allocator ( ) { return & allocator_ ; } <nl> <nl> / / Compilation statistics for TurboFan compilations . <nl> class V8_EXPORT_PRIVATE WasmEngine { <nl> / / calling this method . <nl> void PotentiallyFinishCurrentGC ( ) ; <nl> <nl> + WasmMemoryTracker memory_tracker_ ; <nl> WasmCodeManager code_manager_ ; <nl> AccountingAllocator allocator_ ; <nl> <nl> mmm a / src / wasm / wasm - js . cc <nl> ppp b / src / wasm / wasm - js . cc <nl> <nl> # include " src / wasm / streaming - decoder . h " <nl> # include " src / wasm / wasm - engine . h " <nl> # include " src / wasm / wasm - limits . h " <nl> + # include " src / wasm / wasm - memory . h " <nl> # include " src / wasm / wasm - objects - inl . h " <nl> # include " src / wasm / wasm - serialization . h " <nl> <nl> void WebAssemblyMemory ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> return ; <nl> } <nl> <nl> - auto shared = i : : SharedFlag : : kNotShared ; <nl> + bool is_shared_memory = false ; <nl> auto enabled_features = i : : wasm : : WasmFeaturesFromIsolate ( i_isolate ) ; <nl> if ( enabled_features . threads ) { <nl> / / Shared property of descriptor <nl> void WebAssemblyMemory ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> descriptor - > Get ( context , shared_key ) ; <nl> v8 : : Local < v8 : : Value > value ; <nl> if ( maybe_value . ToLocal ( & value ) ) { <nl> - shared = value - > BooleanValue ( isolate ) ? i : : SharedFlag : : kShared <nl> - : i : : SharedFlag : : kNotShared ; <nl> + is_shared_memory = value - > BooleanValue ( isolate ) ; <nl> } <nl> / / Throw TypeError if shared is true , and the descriptor has no " maximum " <nl> - if ( shared = = i : : SharedFlag : : kShared & & maximum = = - 1 ) { <nl> + if ( is_shared_memory & & maximum = = - 1 ) { <nl> thrower . TypeError ( <nl> " If shared is true , maximum property should be defined . " ) ; <nl> return ; <nl> void WebAssemblyMemory ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> <nl> i : : Handle < i : : JSObject > memory_obj ; <nl> if ( ! i : : WasmMemoryObject : : New ( i_isolate , static_cast < uint32_t > ( initial ) , <nl> - static_cast < uint32_t > ( maximum ) , shared ) <nl> + static_cast < uint32_t > ( maximum ) , <nl> + is_shared_memory ) <nl> . ToHandle ( & memory_obj ) ) { <nl> thrower . RangeError ( " could not allocate memory " ) ; <nl> return ; <nl> } <nl> - if ( shared = = i : : SharedFlag : : kShared ) { <nl> + if ( is_shared_memory ) { <nl> i : : Handle < i : : JSArrayBuffer > buffer ( <nl> i : : Handle < i : : WasmMemoryObject > : : cast ( memory_obj ) - > array_buffer ( ) , <nl> i_isolate ) ; <nl> new file mode 100644 <nl> index 00000000000 . . f2036495425 <nl> mmm / dev / null <nl> ppp b / src / wasm / wasm - memory . cc <nl> <nl> + / / Copyright 2017 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + # include < limits > <nl> + <nl> + # include " src / heap / heap - inl . h " <nl> + # include " src / logging / counters . h " <nl> + # include " src / objects / js - array - buffer - inl . h " <nl> + # include " src / objects / objects - inl . h " <nl> + # include " src / wasm / wasm - engine . h " <nl> + # include " src / wasm / wasm - limits . h " <nl> + # include " src / wasm / wasm - memory . h " <nl> + # include " src / wasm / wasm - module . h " <nl> + <nl> + namespace v8 { <nl> + namespace internal { <nl> + namespace wasm { <nl> + <nl> + namespace { <nl> + <nl> + constexpr size_t kNegativeGuardSize = 1u < < 31 ; / / 2GiB <nl> + <nl> + void AddAllocationStatusSample ( Isolate * isolate , <nl> + WasmMemoryTracker : : AllocationStatus status ) { <nl> + isolate - > counters ( ) - > wasm_memory_allocation_result ( ) - > AddSample ( <nl> + static_cast < int > ( status ) ) ; <nl> + } <nl> + <nl> + bool RunWithGCAndRetry ( const std : : function < bool ( ) > & fn , Heap * heap , <nl> + bool * did_retry ) { <nl> + / / Try up to three times ; getting rid of dead JSArrayBuffer allocations might <nl> + / / require two GCs because the first GC maybe incremental and may have <nl> + / / floating garbage . <nl> + static constexpr int kAllocationRetries = 2 ; <nl> + <nl> + for ( int trial = 0 ; ; + + trial ) { <nl> + if ( fn ( ) ) return true ; <nl> + / / { fn } failed . If { kAllocationRetries } is reached , fail . <nl> + * did_retry = true ; <nl> + if ( trial = = kAllocationRetries ) return false ; <nl> + / / Otherwise , collect garbage and retry . <nl> + / / TODO ( wasm ) : Since reservation limits are engine - wide , we should do an <nl> + / / engine - wide GC here ( i . e . trigger a GC in each isolate using the engine , <nl> + / / and wait for them all to finish ) . See https : / / crbug . com / v8 / 9405 . <nl> + heap - > MemoryPressureNotification ( MemoryPressureLevel : : kCritical , true ) ; <nl> + } <nl> + } <nl> + <nl> + void * TryAllocateBackingStore ( WasmMemoryTracker * memory_tracker , Heap * heap , <nl> + size_t size , size_t max_size , <nl> + void * * allocation_base , <nl> + size_t * allocation_length ) { <nl> + using AllocationStatus = WasmMemoryTracker : : AllocationStatus ; <nl> + # if V8_TARGET_ARCH_64_BIT <nl> + constexpr bool kRequireFullGuardRegions = true ; <nl> + # else <nl> + constexpr bool kRequireFullGuardRegions = false ; <nl> + # endif <nl> + / / Let the WasmMemoryTracker know we are going to reserve a bunch of <nl> + / / address space . <nl> + size_t reservation_size = std : : max ( max_size , size ) ; <nl> + bool did_retry = false ; <nl> + <nl> + auto reserve_memory_space = [ & ] { <nl> + / / For guard regions , we always allocate the largest possible offset <nl> + / / into the heap , so the addressable memory after the guard page can <nl> + / / be made inaccessible . <nl> + / / <nl> + / / To protect against 32 - bit integer overflow issues , we also <nl> + / / protect the 2GiB before the valid part of the memory buffer . <nl> + * allocation_length = <nl> + kRequireFullGuardRegions <nl> + ? RoundUp ( kWasmMaxHeapOffset + kNegativeGuardSize , CommitPageSize ( ) ) <nl> + : RoundUp ( base : : bits : : RoundUpToPowerOfTwo ( reservation_size ) , <nl> + kWasmPageSize ) ; <nl> + DCHECK_GE ( * allocation_length , size ) ; <nl> + DCHECK_GE ( * allocation_length , kWasmPageSize ) ; <nl> + <nl> + return memory_tracker - > ReserveAddressSpace ( * allocation_length ) ; <nl> + } ; <nl> + if ( ! RunWithGCAndRetry ( reserve_memory_space , heap , & did_retry ) ) { <nl> + / / Reset reservation_size to initial size so that at least the initial size <nl> + / / can be allocated if maximum size reservation is not possible . <nl> + reservation_size = size ; <nl> + <nl> + / / We are over the address space limit . Fail . <nl> + / / <nl> + / / When running under the correctness fuzzer ( i . e . <nl> + / / - - correctness - fuzzer - suppressions is preset ) , we crash <nl> + / / instead so it is not incorrectly reported as a correctness <nl> + / / violation . See https : / / crbug . com / 828293 # c4 <nl> + if ( FLAG_correctness_fuzzer_suppressions ) { <nl> + FATAL ( " could not allocate wasm memory " ) ; <nl> + } <nl> + AddAllocationStatusSample ( <nl> + heap - > isolate ( ) , AllocationStatus : : kAddressSpaceLimitReachedFailure ) ; <nl> + return nullptr ; <nl> + } <nl> + <nl> + / / The Reserve makes the whole region inaccessible by default . <nl> + DCHECK_NULL ( * allocation_base ) ; <nl> + auto allocate_pages = [ & ] { <nl> + * allocation_base = <nl> + AllocatePages ( GetPlatformPageAllocator ( ) , nullptr , * allocation_length , <nl> + kWasmPageSize , PageAllocator : : kNoAccess ) ; <nl> + return * allocation_base ! = nullptr ; <nl> + } ; <nl> + if ( ! RunWithGCAndRetry ( allocate_pages , heap , & did_retry ) ) { <nl> + memory_tracker - > ReleaseReservation ( * allocation_length ) ; <nl> + AddAllocationStatusSample ( heap - > isolate ( ) , AllocationStatus : : kOtherFailure ) ; <nl> + return nullptr ; <nl> + } <nl> + <nl> + byte * memory = reinterpret_cast < byte * > ( * allocation_base ) ; <nl> + if ( kRequireFullGuardRegions ) { <nl> + memory + = kNegativeGuardSize ; <nl> + } <nl> + <nl> + / / Make the part we care about accessible . <nl> + auto commit_memory = [ & ] { <nl> + return size = = 0 | | SetPermissions ( GetPlatformPageAllocator ( ) , memory , <nl> + RoundUp ( size , kWasmPageSize ) , <nl> + PageAllocator : : kReadWrite ) ; <nl> + } ; <nl> + / / SetPermissions commits the extra memory , which may put us over the <nl> + / / process memory limit . If so , report this as an OOM . <nl> + if ( ! RunWithGCAndRetry ( commit_memory , heap , & did_retry ) ) { <nl> + V8 : : FatalProcessOutOfMemory ( nullptr , " TryAllocateBackingStore " ) ; <nl> + } <nl> + <nl> + memory_tracker - > RegisterAllocation ( heap - > isolate ( ) , * allocation_base , <nl> + * allocation_length , memory , size ) ; <nl> + AddAllocationStatusSample ( heap - > isolate ( ) , <nl> + did_retry ? AllocationStatus : : kSuccessAfterRetry <nl> + : AllocationStatus : : kSuccess ) ; <nl> + return memory ; <nl> + } <nl> + <nl> + # if V8_TARGET_ARCH_MIPS64 <nl> + / / MIPS64 has a user space of 2 ^ 40 bytes on most processors , <nl> + / / address space limits needs to be smaller . <nl> + constexpr size_t kAddressSpaceLimit = 0x8000000000L ; / / 512 GiB <nl> + # elif V8_TARGET_ARCH_64_BIT <nl> + constexpr size_t kAddressSpaceLimit = 0x10100000000L ; / / 1 TiB + 4 GiB <nl> + # else <nl> + constexpr size_t kAddressSpaceLimit = 0xC0000000 ; / / 3 GiB <nl> + # endif <nl> + <nl> + } / / namespace <nl> + <nl> + WasmMemoryTracker : : ~ WasmMemoryTracker ( ) { <nl> + / / All reserved address space should be released before the allocation tracker <nl> + / / is destroyed . <nl> + DCHECK_EQ ( reserved_address_space_ , 0u ) ; <nl> + DCHECK_EQ ( allocated_address_space_ , 0u ) ; <nl> + DCHECK ( allocations_ . empty ( ) ) ; <nl> + } <nl> + <nl> + void * WasmMemoryTracker : : TryAllocateBackingStoreForTesting ( <nl> + Heap * heap , size_t size , void * * allocation_base , <nl> + size_t * allocation_length ) { <nl> + return TryAllocateBackingStore ( this , heap , size , size , allocation_base , <nl> + allocation_length ) ; <nl> + } <nl> + <nl> + void WasmMemoryTracker : : FreeBackingStoreForTesting ( base : : AddressRegion memory , <nl> + void * buffer_start ) { <nl> + base : : MutexGuard scope_lock ( & mutex_ ) ; <nl> + ReleaseAllocation_Locked ( nullptr , buffer_start ) ; <nl> + CHECK ( FreePages ( GetPlatformPageAllocator ( ) , <nl> + reinterpret_cast < void * > ( memory . begin ( ) ) , memory . size ( ) ) ) ; <nl> + } <nl> + <nl> + bool WasmMemoryTracker : : ReserveAddressSpace ( size_t num_bytes ) { <nl> + size_t reservation_limit = kAddressSpaceLimit ; <nl> + while ( true ) { <nl> + size_t old_count = reserved_address_space_ . load ( ) ; <nl> + if ( old_count > reservation_limit ) return false ; <nl> + if ( reservation_limit - old_count < num_bytes ) return false ; <nl> + if ( reserved_address_space_ . compare_exchange_weak ( old_count , <nl> + old_count + num_bytes ) ) { <nl> + return true ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void WasmMemoryTracker : : ReleaseReservation ( size_t num_bytes ) { <nl> + size_t const old_reserved = reserved_address_space_ . fetch_sub ( num_bytes ) ; <nl> + USE ( old_reserved ) ; <nl> + DCHECK_LE ( num_bytes , old_reserved ) ; <nl> + } <nl> + <nl> + void WasmMemoryTracker : : RegisterAllocation ( Isolate * isolate , <nl> + void * allocation_base , <nl> + size_t allocation_length , <nl> + void * buffer_start , <nl> + size_t buffer_length ) { <nl> + base : : MutexGuard scope_lock ( & mutex_ ) ; <nl> + <nl> + allocated_address_space_ + = allocation_length ; <nl> + / / Report address space usage in MiB so the full range fits in an int on all <nl> + / / platforms . <nl> + isolate - > counters ( ) - > wasm_address_space_usage_mb ( ) - > AddSample ( <nl> + static_cast < int > ( allocated_address_space_ / MB ) ) ; <nl> + <nl> + allocations_ . emplace ( buffer_start , <nl> + AllocationData { allocation_base , allocation_length , <nl> + buffer_start , buffer_length } ) ; <nl> + } <nl> + <nl> + WasmMemoryTracker : : AllocationData WasmMemoryTracker : : ReleaseAllocation_Locked ( <nl> + Isolate * isolate , const void * buffer_start ) { <nl> + auto find_result = allocations_ . find ( buffer_start ) ; <nl> + CHECK_NE ( find_result , allocations_ . end ( ) ) ; <nl> + <nl> + size_t num_bytes = find_result - > second . allocation_length ; <nl> + DCHECK_LE ( num_bytes , reserved_address_space_ ) ; <nl> + DCHECK_LE ( num_bytes , allocated_address_space_ ) ; <nl> + reserved_address_space_ - = num_bytes ; <nl> + allocated_address_space_ - = num_bytes ; <nl> + <nl> + AllocationData allocation_data = find_result - > second ; <nl> + allocations_ . erase ( find_result ) ; <nl> + return allocation_data ; <nl> + } <nl> + <nl> + const WasmMemoryTracker : : AllocationData * WasmMemoryTracker : : FindAllocationData ( <nl> + const void * buffer_start ) { <nl> + base : : MutexGuard scope_lock ( & mutex_ ) ; <nl> + const auto & result = allocations_ . find ( buffer_start ) ; <nl> + if ( result ! = allocations_ . end ( ) ) { <nl> + return & result - > second ; <nl> + } <nl> + return nullptr ; <nl> + } <nl> + <nl> + bool WasmMemoryTracker : : IsWasmMemory ( const void * buffer_start ) { <nl> + base : : MutexGuard scope_lock ( & mutex_ ) ; <nl> + return allocations_ . find ( buffer_start ) ! = allocations_ . end ( ) ; <nl> + } <nl> + <nl> + bool WasmMemoryTracker : : IsWasmSharedMemory ( const void * buffer_start ) { <nl> + base : : MutexGuard scope_lock ( & mutex_ ) ; <nl> + const auto & result = allocations_ . find ( buffer_start ) ; <nl> + / / Should be a wasm allocation , and registered as a shared allocation . <nl> + return ( result ! = allocations_ . end ( ) & & result - > second . is_shared ) ; <nl> + } <nl> + <nl> + void WasmMemoryTracker : : MarkWasmMemoryNotGrowable ( <nl> + Handle < JSArrayBuffer > buffer ) { <nl> + base : : MutexGuard scope_lock ( & mutex_ ) ; <nl> + const auto & allocation = allocations_ . find ( buffer - > backing_store ( ) ) ; <nl> + if ( allocation = = allocations_ . end ( ) ) return ; <nl> + allocation - > second . is_growable = false ; <nl> + } <nl> + <nl> + bool WasmMemoryTracker : : IsWasmMemoryGrowable ( Handle < JSArrayBuffer > buffer ) { <nl> + base : : MutexGuard scope_lock ( & mutex_ ) ; <nl> + if ( buffer - > backing_store ( ) = = nullptr ) return true ; <nl> + const auto & allocation = allocations_ . find ( buffer - > backing_store ( ) ) ; <nl> + if ( allocation = = allocations_ . end ( ) ) return false ; <nl> + return allocation - > second . is_growable ; <nl> + } <nl> + <nl> + bool WasmMemoryTracker : : FreeWasmMemory ( Isolate * isolate , <nl> + const void * buffer_start ) { <nl> + base : : MutexGuard scope_lock ( & mutex_ ) ; <nl> + const auto & result = allocations_ . find ( buffer_start ) ; <nl> + if ( result = = allocations_ . end ( ) ) return false ; <nl> + if ( result - > second . is_shared ) { <nl> + / / This is a shared WebAssembly . Memory allocation <nl> + FreeMemoryIfNotShared_Locked ( isolate , buffer_start ) ; <nl> + return true ; <nl> + } <nl> + / / This is a WebAssembly . Memory allocation <nl> + const AllocationData allocation = <nl> + ReleaseAllocation_Locked ( isolate , buffer_start ) ; <nl> + CHECK ( FreePages ( GetPlatformPageAllocator ( ) , allocation . allocation_base , <nl> + allocation . allocation_length ) ) ; <nl> + return true ; <nl> + } <nl> + <nl> + void WasmMemoryTracker : : RegisterWasmMemoryAsShared ( <nl> + Handle < WasmMemoryObject > object , Isolate * isolate ) { <nl> + / / Only register with the tracker if shared grow is enabled . <nl> + if ( ! FLAG_wasm_grow_shared_memory ) return ; <nl> + const void * backing_store = object - > array_buffer ( ) . backing_store ( ) ; <nl> + / / TODO ( V8 : 8810 ) : This should be a DCHECK , currently some tests do not <nl> + / / use a full WebAssembly . Memory , and fail on registering so return early . <nl> + if ( ! IsWasmMemory ( backing_store ) ) return ; <nl> + { <nl> + base : : MutexGuard scope_lock ( & mutex_ ) ; <nl> + / / Register as shared allocation when it is post messaged . This happens only <nl> + / / the first time a buffer is shared over Postmessage , and track all the <nl> + / / memory objects that are associated with this backing store . <nl> + RegisterSharedWasmMemory_Locked ( object , isolate ) ; <nl> + / / Add isolate to backing store mapping . <nl> + isolates_per_buffer_ [ backing_store ] . emplace ( isolate ) ; <nl> + } <nl> + } <nl> + <nl> + void WasmMemoryTracker : : SetPendingUpdateOnGrow ( Handle < JSArrayBuffer > old_buffer , <nl> + size_t new_size ) { <nl> + base : : MutexGuard scope_lock ( & mutex_ ) ; <nl> + / / Keep track of the new size of the buffer associated with each backing <nl> + / / store . <nl> + AddBufferToGrowMap_Locked ( old_buffer , new_size ) ; <nl> + / / Request interrupt to GROW_SHARED_MEMORY to other isolates <nl> + TriggerSharedGrowInterruptOnAllIsolates_Locked ( old_buffer ) ; <nl> + } <nl> + <nl> + void WasmMemoryTracker : : UpdateSharedMemoryInstances ( Isolate * isolate ) { <nl> + base : : MutexGuard scope_lock ( & mutex_ ) ; <nl> + / / For every buffer in the grow_entry_map_ , update the size for all the <nl> + / / memory objects associated with this isolate . <nl> + for ( auto it = grow_update_map_ . begin ( ) ; it ! = grow_update_map_ . end ( ) ; ) { <nl> + UpdateSharedMemoryStateOnInterrupt_Locked ( isolate , it - > first , it - > second ) ; <nl> + / / If all the isolates that share this buffer have hit a stack check , their <nl> + / / memory objects are updated , and this grow entry can be erased . <nl> + if ( AreAllIsolatesUpdated_Locked ( it - > first ) ) { <nl> + it = grow_update_map_ . erase ( it ) ; <nl> + } else { <nl> + it + + ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void WasmMemoryTracker : : RegisterSharedWasmMemory_Locked ( <nl> + Handle < WasmMemoryObject > object , Isolate * isolate ) { <nl> + DCHECK ( object - > array_buffer ( ) . is_shared ( ) ) ; <nl> + <nl> + void * backing_store = object - > array_buffer ( ) . backing_store ( ) ; <nl> + / / The allocation of a WasmMemoryObject should always be registered with the <nl> + / / WasmMemoryTracker . <nl> + const auto & result = allocations_ . find ( backing_store ) ; <nl> + if ( result = = allocations_ . end ( ) ) return ; <nl> + <nl> + / / Register the allocation as shared , if not alreadt marked as shared . <nl> + if ( ! result - > second . is_shared ) result - > second . is_shared = true ; <nl> + <nl> + / / Create persistent global handles for the memory objects that are shared <nl> + GlobalHandles * global_handles = isolate - > global_handles ( ) ; <nl> + object = global_handles - > Create ( * object ) ; <nl> + <nl> + / / Add to memory_object_vector to track memory objects , instance objects <nl> + / / that will need to be updated on a Grow call <nl> + result - > second . memory_object_vector . push_back ( <nl> + SharedMemoryObjectState ( object , isolate ) ) ; <nl> + } <nl> + <nl> + void WasmMemoryTracker : : AddBufferToGrowMap_Locked ( <nl> + Handle < JSArrayBuffer > old_buffer , size_t new_size ) { <nl> + void * backing_store = old_buffer - > backing_store ( ) ; <nl> + auto entry = grow_update_map_ . find ( old_buffer - > backing_store ( ) ) ; <nl> + if ( entry = = grow_update_map_ . end ( ) ) { <nl> + / / No pending grow for this backing store , add to map . <nl> + grow_update_map_ . emplace ( backing_store , new_size ) ; <nl> + return ; <nl> + } <nl> + / / If grow on the same buffer is requested before the update is complete , <nl> + / / the new_size should always be greater or equal to the old_size . Equal <nl> + / / in the case that grow ( 0 ) is called , but new buffer handles are mandated <nl> + / / by the Spec . <nl> + CHECK_LE ( entry - > second , new_size ) ; <nl> + entry - > second = new_size ; <nl> + / / Flush instances_updated everytime a new grow size needs to be updates <nl> + ClearUpdatedInstancesOnPendingGrow_Locked ( backing_store ) ; <nl> + } <nl> + <nl> + void WasmMemoryTracker : : TriggerSharedGrowInterruptOnAllIsolates_Locked ( <nl> + Handle < JSArrayBuffer > old_buffer ) { <nl> + / / Request a GrowShareMemory interrupt on all the isolates that share <nl> + / / the backing store . <nl> + const auto & isolates = isolates_per_buffer_ . find ( old_buffer - > backing_store ( ) ) ; <nl> + for ( const auto & isolate : isolates - > second ) { <nl> + isolate - > stack_guard ( ) - > RequestGrowSharedMemory ( ) ; <nl> + } <nl> + } <nl> + <nl> + void WasmMemoryTracker : : UpdateSharedMemoryStateOnInterrupt_Locked ( <nl> + Isolate * isolate , void * backing_store , size_t new_size ) { <nl> + / / Update objects only if there are memory objects that share this backing <nl> + / / store , and this isolate is marked as one of the isolates that shares this <nl> + / / buffer . <nl> + if ( MemoryObjectsNeedUpdate_Locked ( isolate , backing_store ) ) { <nl> + UpdateMemoryObjectsForIsolate_Locked ( isolate , backing_store , new_size ) ; <nl> + / / As the memory objects are updated , add this isolate to a set of isolates <nl> + / / that are updated on grow . This state is maintained to track if all the <nl> + / / isolates that share the backing store have hit a StackCheck . <nl> + isolates_updated_on_grow_ [ backing_store ] . emplace ( isolate ) ; <nl> + } <nl> + } <nl> + <nl> + bool WasmMemoryTracker : : AreAllIsolatesUpdated_Locked ( <nl> + const void * backing_store ) { <nl> + const auto & buffer_isolates = isolates_per_buffer_ . find ( backing_store ) ; <nl> + / / No isolates share this buffer . <nl> + if ( buffer_isolates = = isolates_per_buffer_ . end ( ) ) return true ; <nl> + const auto & updated_isolates = isolates_updated_on_grow_ . find ( backing_store ) ; <nl> + / / Some isolates share the buffer , but no isolates have been updated yet . <nl> + if ( updated_isolates = = isolates_updated_on_grow_ . end ( ) ) return false ; <nl> + if ( buffer_isolates - > second = = updated_isolates - > second ) { <nl> + / / If all the isolates that share this backing_store have hit a stack check , <nl> + / / and the memory objects have been updated , remove the entry from the <nl> + / / updatemap , and return true . <nl> + isolates_updated_on_grow_ . erase ( backing_store ) ; <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + void WasmMemoryTracker : : ClearUpdatedInstancesOnPendingGrow_Locked ( <nl> + const void * backing_store ) { <nl> + / / On multiple grows to the same buffer , the entries for that buffer should be <nl> + / / flushed . This is done so that any consecutive grows to the same buffer will <nl> + / / update all instances that share this buffer . <nl> + const auto & value = isolates_updated_on_grow_ . find ( backing_store ) ; <nl> + if ( value ! = isolates_updated_on_grow_ . end ( ) ) { <nl> + value - > second . clear ( ) ; <nl> + } <nl> + } <nl> + <nl> + void WasmMemoryTracker : : UpdateMemoryObjectsForIsolate_Locked ( <nl> + Isolate * isolate , void * backing_store , size_t new_size ) { <nl> + const auto & result = allocations_ . find ( backing_store ) ; <nl> + if ( result = = allocations_ . end ( ) | | ! result - > second . is_shared ) return ; <nl> + for ( const auto & memory_obj_state : result - > second . memory_object_vector ) { <nl> + DCHECK_NE ( memory_obj_state . isolate , nullptr ) ; <nl> + if ( isolate = = memory_obj_state . isolate ) { <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < WasmMemoryObject > memory_object = memory_obj_state . memory_object ; <nl> + DCHECK ( memory_object - > IsWasmMemoryObject ( ) ) ; <nl> + DCHECK ( memory_object - > array_buffer ( ) . is_shared ( ) ) ; <nl> + / / Permissions adjusted , but create a new buffer with new size <nl> + / / and old attributes . Buffer has already been allocated , <nl> + / / just create a new buffer with same backing store . <nl> + bool is_external = memory_object - > array_buffer ( ) . is_external ( ) ; <nl> + Handle < JSArrayBuffer > new_buffer = SetupArrayBuffer ( <nl> + isolate , backing_store , new_size , is_external , SharedFlag : : kShared ) ; <nl> + memory_obj_state . memory_object - > update_instances ( isolate , new_buffer ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + bool WasmMemoryTracker : : MemoryObjectsNeedUpdate_Locked ( <nl> + Isolate * isolate , const void * backing_store ) { <nl> + / / Return true if this buffer has memory_objects it needs to update . <nl> + const auto & result = allocations_ . find ( backing_store ) ; <nl> + if ( result = = allocations_ . end ( ) | | ! result - > second . is_shared ) return false ; <nl> + / / Only update if the buffer has memory objects that need to be updated . <nl> + if ( result - > second . memory_object_vector . empty ( ) ) return false ; <nl> + const auto & isolate_entry = isolates_per_buffer_ . find ( backing_store ) ; <nl> + return ( isolate_entry ! = isolates_per_buffer_ . end ( ) & & <nl> + isolate_entry - > second . count ( isolate ) ! = 0 ) ; <nl> + } <nl> + <nl> + void WasmMemoryTracker : : FreeMemoryIfNotShared_Locked ( <nl> + Isolate * isolate , const void * backing_store ) { <nl> + RemoveSharedBufferState_Locked ( isolate , backing_store ) ; <nl> + if ( CanFreeSharedMemory_Locked ( backing_store ) ) { <nl> + const AllocationData allocation = <nl> + ReleaseAllocation_Locked ( isolate , backing_store ) ; <nl> + CHECK ( FreePages ( GetPlatformPageAllocator ( ) , allocation . allocation_base , <nl> + allocation . allocation_length ) ) ; <nl> + } <nl> + } <nl> + <nl> + bool WasmMemoryTracker : : CanFreeSharedMemory_Locked ( const void * backing_store ) { <nl> + const auto & value = isolates_per_buffer_ . find ( backing_store ) ; <nl> + / / If no isolates share this buffer , backing store can be freed . <nl> + / / Erase the buffer entry . <nl> + if ( value = = isolates_per_buffer_ . end ( ) | | value - > second . empty ( ) ) return true ; <nl> + return false ; <nl> + } <nl> + <nl> + void WasmMemoryTracker : : RemoveSharedBufferState_Locked ( <nl> + Isolate * isolate , const void * backing_store ) { <nl> + if ( isolate ! = nullptr ) { <nl> + DestroyMemoryObjectsAndRemoveIsolateEntry_Locked ( isolate , backing_store ) ; <nl> + RemoveIsolateFromBackingStore_Locked ( isolate , backing_store ) ; <nl> + } else { <nl> + / / This happens for externalized contents cleanup shared memory state <nl> + / / associated with this buffer across isolates . <nl> + DestroyMemoryObjectsAndRemoveIsolateEntry_Locked ( backing_store ) ; <nl> + } <nl> + } <nl> + <nl> + void WasmMemoryTracker : : DestroyMemoryObjectsAndRemoveIsolateEntry_Locked ( <nl> + const void * backing_store ) { <nl> + const auto & result = allocations_ . find ( backing_store ) ; <nl> + CHECK ( result ! = allocations_ . end ( ) & & result - > second . is_shared ) ; <nl> + auto & object_vector = result - > second . memory_object_vector ; <nl> + if ( object_vector . empty ( ) ) return ; <nl> + for ( const auto & mem_obj_state : object_vector ) { <nl> + GlobalHandles : : Destroy ( mem_obj_state . memory_object . location ( ) ) ; <nl> + } <nl> + object_vector . clear ( ) ; <nl> + / / Remove isolate from backing store map . <nl> + isolates_per_buffer_ . erase ( backing_store ) ; <nl> + } <nl> + <nl> + void WasmMemoryTracker : : DestroyMemoryObjectsAndRemoveIsolateEntry_Locked ( <nl> + Isolate * isolate , const void * backing_store ) { <nl> + / / This gets called when an internal handle to the ArrayBuffer should be <nl> + / / freed , on heap tear down for that isolate , remove the memory objects <nl> + / / that are associated with this buffer and isolate . <nl> + const auto & result = allocations_ . find ( backing_store ) ; <nl> + CHECK ( result ! = allocations_ . end ( ) & & result - > second . is_shared ) ; <nl> + auto & object_vector = result - > second . memory_object_vector ; <nl> + if ( object_vector . empty ( ) ) return ; <nl> + for ( auto it = object_vector . begin ( ) ; it ! = object_vector . end ( ) ; ) { <nl> + if ( isolate = = it - > isolate ) { <nl> + GlobalHandles : : Destroy ( it - > memory_object . location ( ) ) ; <nl> + it = object_vector . erase ( it ) ; <nl> + } else { <nl> + + + it ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void WasmMemoryTracker : : RemoveIsolateFromBackingStore_Locked ( <nl> + Isolate * isolate , const void * backing_store ) { <nl> + const auto & isolates = isolates_per_buffer_ . find ( backing_store ) ; <nl> + if ( isolates = = isolates_per_buffer_ . end ( ) | | isolates - > second . empty ( ) ) <nl> + return ; <nl> + isolates - > second . erase ( isolate ) ; <nl> + } <nl> + <nl> + void WasmMemoryTracker : : DeleteSharedMemoryObjectsOnIsolate ( Isolate * isolate ) { <nl> + base : : MutexGuard scope_lock ( & mutex_ ) ; <nl> + / / This is possible for buffers that are externalized , and their handles have <nl> + / / been freed , the backing store wasn ' t released because externalized contents <nl> + / / were using it . <nl> + if ( isolates_per_buffer_ . empty ( ) ) return ; <nl> + for ( auto & entry : isolates_per_buffer_ ) { <nl> + if ( entry . second . find ( isolate ) = = entry . second . end ( ) ) continue ; <nl> + const void * backing_store = entry . first ; <nl> + entry . second . erase ( isolate ) ; <nl> + DestroyMemoryObjectsAndRemoveIsolateEntry_Locked ( isolate , backing_store ) ; <nl> + } <nl> + for ( auto & buffer_isolates : isolates_updated_on_grow_ ) { <nl> + auto & isolates = buffer_isolates . second ; <nl> + isolates . erase ( isolate ) ; <nl> + } <nl> + } <nl> + <nl> + Handle < JSArrayBuffer > SetupArrayBuffer ( Isolate * isolate , void * backing_store , <nl> + size_t size , bool is_external , <nl> + SharedFlag shared ) { <nl> + Handle < JSArrayBuffer > buffer = <nl> + isolate - > factory ( ) - > NewJSArrayBuffer ( shared , AllocationType : : kOld ) ; <nl> + constexpr bool is_wasm_memory = true ; <nl> + JSArrayBuffer : : Setup ( buffer , isolate , is_external , backing_store , size , <nl> + shared , is_wasm_memory ) ; <nl> + buffer - > set_is_detachable ( false ) ; <nl> + return buffer ; <nl> + } <nl> + <nl> + MaybeHandle < JSArrayBuffer > AllocateAndSetupArrayBuffer ( Isolate * isolate , <nl> + size_t size , <nl> + size_t maximum_size , <nl> + SharedFlag shared ) { <nl> + / / Enforce flag - limited maximum allocation size . <nl> + if ( size > max_mem_bytes ( ) ) return { } ; <nl> + <nl> + WasmMemoryTracker * memory_tracker = isolate - > wasm_engine ( ) - > memory_tracker ( ) ; <nl> + <nl> + / / Set by TryAllocateBackingStore or GetEmptyBackingStore <nl> + void * allocation_base = nullptr ; <nl> + size_t allocation_length = 0 ; <nl> + <nl> + void * memory = TryAllocateBackingStore ( memory_tracker , isolate - > heap ( ) , size , <nl> + maximum_size , & allocation_base , <nl> + & allocation_length ) ; <nl> + if ( memory = = nullptr ) return { } ; <nl> + <nl> + # if DEBUG <nl> + / / Double check the API allocator actually zero - initialized the memory . <nl> + const byte * bytes = reinterpret_cast < const byte * > ( memory ) ; <nl> + for ( size_t i = 0 ; i < size ; + + i ) { <nl> + DCHECK_EQ ( 0 , bytes [ i ] ) ; <nl> + } <nl> + # endif <nl> + <nl> + reinterpret_cast < v8 : : Isolate * > ( isolate ) <nl> + - > AdjustAmountOfExternalAllocatedMemory ( size ) ; <nl> + <nl> + constexpr bool is_external = false ; <nl> + return SetupArrayBuffer ( isolate , memory , size , is_external , shared ) ; <nl> + } <nl> + <nl> + MaybeHandle < JSArrayBuffer > NewArrayBuffer ( Isolate * isolate , size_t size ) { <nl> + return AllocateAndSetupArrayBuffer ( isolate , size , size , <nl> + SharedFlag : : kNotShared ) ; <nl> + } <nl> + <nl> + MaybeHandle < JSArrayBuffer > NewSharedArrayBuffer ( Isolate * isolate , <nl> + size_t initial_size , <nl> + size_t max_size ) { <nl> + return AllocateAndSetupArrayBuffer ( isolate , initial_size , max_size , <nl> + SharedFlag : : kShared ) ; <nl> + } <nl> + <nl> + void DetachMemoryBuffer ( Isolate * isolate , Handle < JSArrayBuffer > buffer , <nl> + bool free_memory ) { <nl> + if ( buffer - > is_shared ( ) ) return ; / / Detaching shared buffers is impossible . <nl> + DCHECK ( ! buffer - > is_detachable ( ) ) ; <nl> + <nl> + const bool is_external = buffer - > is_external ( ) ; <nl> + DCHECK ( ! buffer - > is_detachable ( ) ) ; <nl> + if ( ! is_external ) { <nl> + buffer - > set_is_external ( true ) ; <nl> + isolate - > heap ( ) - > UnregisterArrayBuffer ( * buffer ) ; <nl> + if ( free_memory ) { <nl> + / / We need to free the memory before detaching the buffer because <nl> + / / FreeBackingStore reads buffer - > allocation_base ( ) , which is nulled out <nl> + / / by Detach . This means there is a dangling pointer until we detach the <nl> + / / buffer . Since there is no way for the user to directly call <nl> + / / FreeBackingStore , we can ensure this is safe . <nl> + buffer - > FreeBackingStoreFromMainThread ( ) ; <nl> + } <nl> + } <nl> + <nl> + DCHECK ( buffer - > is_external ( ) ) ; <nl> + buffer - > set_is_wasm_memory ( false ) ; <nl> + buffer - > set_is_detachable ( true ) ; <nl> + buffer - > Detach ( ) ; <nl> + } <nl> + <nl> + } / / namespace wasm <nl> + } / / namespace internal <nl> + } / / namespace v8 <nl> new file mode 100644 <nl> index 00000000000 . . ecb6203ac5a <nl> mmm / dev / null <nl> ppp b / src / wasm / wasm - memory . h <nl> <nl> + / / Copyright 2017 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + # ifndef V8_WASM_WASM_MEMORY_H_ <nl> + # define V8_WASM_WASM_MEMORY_H_ <nl> + <nl> + # include < atomic > <nl> + # include < unordered_map > <nl> + # include < unordered_set > <nl> + <nl> + # include " src / base / platform / mutex . h " <nl> + # include " src / flags / flags . h " <nl> + # include " src / handles / handles . h " <nl> + # include " src / objects / js - array - buffer . h " <nl> + <nl> + namespace v8 { <nl> + namespace internal { <nl> + namespace wasm { <nl> + <nl> + / / The { WasmMemoryTracker } tracks reservations and allocations for wasm memory <nl> + / / and wasm code . There is an upper limit on the total reserved memory which is <nl> + / / checked by this class . Allocations are stored so we can look them up when an <nl> + / / array buffer dies and figure out the reservation and allocation bounds for <nl> + / / that buffer . <nl> + class WasmMemoryTracker { <nl> + public : <nl> + WasmMemoryTracker ( ) = default ; <nl> + V8_EXPORT_PRIVATE ~ WasmMemoryTracker ( ) ; <nl> + <nl> + / / ReserveAddressSpace attempts to increase the reserved address space counter <nl> + / / by { num_bytes } . Returns true if successful ( meaning it is okay to go ahead <nl> + / / and reserve { num_bytes } bytes ) , false otherwise . <nl> + bool ReserveAddressSpace ( size_t num_bytes ) ; <nl> + <nl> + void RegisterAllocation ( Isolate * isolate , void * allocation_base , <nl> + size_t allocation_length , void * buffer_start , <nl> + size_t buffer_length ) ; <nl> + <nl> + struct SharedMemoryObjectState { <nl> + Handle < WasmMemoryObject > memory_object ; <nl> + Isolate * isolate ; <nl> + <nl> + SharedMemoryObjectState ( ) = default ; <nl> + SharedMemoryObjectState ( Handle < WasmMemoryObject > memory_object , <nl> + Isolate * isolate ) <nl> + : memory_object ( memory_object ) , isolate ( isolate ) { } <nl> + } ; <nl> + <nl> + struct AllocationData { <nl> + void * allocation_base = nullptr ; <nl> + size_t allocation_length = 0 ; <nl> + void * buffer_start = nullptr ; <nl> + size_t buffer_length = 0 ; <nl> + bool is_shared = false ; <nl> + / / Wasm memories are growable by default , this will be false only when <nl> + / / shared with an asmjs module . <nl> + bool is_growable = true ; <nl> + <nl> + / / Track Wasm Memory instances across isolates , this is populated on <nl> + / / PostMessage using persistent handles for memory objects . <nl> + std : : vector < WasmMemoryTracker : : SharedMemoryObjectState > <nl> + memory_object_vector ; <nl> + <nl> + private : <nl> + AllocationData ( ) = default ; <nl> + AllocationData ( void * allocation_base , size_t allocation_length , <nl> + void * buffer_start , size_t buffer_length ) <nl> + : allocation_base ( allocation_base ) , <nl> + allocation_length ( allocation_length ) , <nl> + buffer_start ( buffer_start ) , <nl> + buffer_length ( buffer_length ) { <nl> + DCHECK_LE ( reinterpret_cast < uintptr_t > ( allocation_base ) , <nl> + reinterpret_cast < uintptr_t > ( buffer_start ) ) ; <nl> + DCHECK_GE ( <nl> + reinterpret_cast < uintptr_t > ( allocation_base ) + allocation_length , <nl> + reinterpret_cast < uintptr_t > ( buffer_start ) ) ; <nl> + DCHECK_GE ( <nl> + reinterpret_cast < uintptr_t > ( allocation_base ) + allocation_length , <nl> + reinterpret_cast < uintptr_t > ( buffer_start ) + buffer_length ) ; <nl> + } <nl> + <nl> + friend WasmMemoryTracker ; <nl> + } ; <nl> + <nl> + / / Allow tests to allocate a backing store the same way as we do it for <nl> + / / WebAssembly memory . This is used in unit tests for trap handler to <nl> + / / generate the same signals / exceptions for invalid memory accesses as <nl> + / / we would get with WebAssembly memory . <nl> + V8_EXPORT_PRIVATE void * TryAllocateBackingStoreForTesting ( <nl> + Heap * heap , size_t size , void * * allocation_base , <nl> + size_t * allocation_length ) ; <nl> + <nl> + / / Free memory allocated with TryAllocateBackingStoreForTesting . <nl> + V8_EXPORT_PRIVATE void FreeBackingStoreForTesting ( base : : AddressRegion memory , <nl> + void * buffer_start ) ; <nl> + <nl> + / / Decreases the amount of reserved address space . <nl> + void ReleaseReservation ( size_t num_bytes ) ; <nl> + <nl> + V8_EXPORT_PRIVATE bool IsWasmMemory ( const void * buffer_start ) ; <nl> + <nl> + bool IsWasmSharedMemory ( const void * buffer_start ) ; <nl> + <nl> + / / Returns a pointer to a Wasm buffer ' s allocation data , or nullptr if the <nl> + / / buffer is not tracked . <nl> + V8_EXPORT_PRIVATE const AllocationData * FindAllocationData ( <nl> + const void * buffer_start ) ; <nl> + <nl> + / / Free Memory allocated by the Wasm memory tracker <nl> + bool FreeWasmMemory ( Isolate * isolate , const void * buffer_start ) ; <nl> + <nl> + void MarkWasmMemoryNotGrowable ( Handle < JSArrayBuffer > buffer ) ; <nl> + <nl> + bool IsWasmMemoryGrowable ( Handle < JSArrayBuffer > buffer ) ; <nl> + <nl> + / / When WebAssembly . Memory is transferred over PostMessage , register the <nl> + / / allocation as shared and track the memory objects that will need <nl> + / / updating if memory is resized . <nl> + void RegisterWasmMemoryAsShared ( Handle < WasmMemoryObject > object , <nl> + Isolate * isolate ) ; <nl> + <nl> + / / This method is called when the underlying backing store is grown , but <nl> + / / instances that share the backing_store have not yet been updated . <nl> + void SetPendingUpdateOnGrow ( Handle < JSArrayBuffer > old_buffer , <nl> + size_t new_size ) ; <nl> + <nl> + / / Interrupt handler for GROW_SHARED_MEMORY interrupt . Update memory objects <nl> + / / and instances that share the memory objects after a Grow call . <nl> + void UpdateSharedMemoryInstances ( Isolate * isolate ) ; <nl> + <nl> + / / Due to timing of when buffers are garbage collected , vs . when isolate <nl> + / / object handles are destroyed , it is possible to leak global handles . To <nl> + / / avoid this , cleanup any global handles on isolate destruction if any exist . <nl> + void DeleteSharedMemoryObjectsOnIsolate ( Isolate * isolate ) ; <nl> + <nl> + / / Allocation results are reported to UMA <nl> + / / <nl> + / / See wasm_memory_allocation_result in counters . h <nl> + enum class AllocationStatus { <nl> + kSuccess , / / Succeeded on the first try <nl> + <nl> + kSuccessAfterRetry , / / Succeeded after garbage collection <nl> + <nl> + kAddressSpaceLimitReachedFailure , / / Failed because Wasm is at its address <nl> + / / space limit <nl> + <nl> + kOtherFailure / / Failed for an unknown reason <nl> + } ; <nl> + <nl> + private : <nl> + / / Helper methods to free memory only if not shared by other isolates , memory <nl> + / / objects . <nl> + void FreeMemoryIfNotShared_Locked ( Isolate * isolate , <nl> + const void * backing_store ) ; <nl> + bool CanFreeSharedMemory_Locked ( const void * backing_store ) ; <nl> + void RemoveSharedBufferState_Locked ( Isolate * isolate , <nl> + const void * backing_store ) ; <nl> + <nl> + / / Registers the allocation as shared , and tracks all the memory objects <nl> + / / associates with this allocation across isolates . <nl> + void RegisterSharedWasmMemory_Locked ( Handle < WasmMemoryObject > object , <nl> + Isolate * isolate ) ; <nl> + <nl> + / / Map the new size after grow to the buffer backing store , so that instances <nl> + / / and memory objects that share the WebAssembly . Memory across isolates can <nl> + / / be updated . . <nl> + void AddBufferToGrowMap_Locked ( Handle < JSArrayBuffer > old_buffer , <nl> + size_t new_size ) ; <nl> + <nl> + / / Trigger a GROW_SHARED_MEMORY interrupt on all the isolates that have memory <nl> + / / objects that share this buffer . <nl> + void TriggerSharedGrowInterruptOnAllIsolates_Locked ( <nl> + Handle < JSArrayBuffer > old_buffer ) ; <nl> + <nl> + / / When isolates hit a stack check , update the memory objects associated with <nl> + / / that isolate . <nl> + void UpdateSharedMemoryStateOnInterrupt_Locked ( Isolate * isolate , <nl> + void * backing_store , <nl> + size_t new_size ) ; <nl> + <nl> + / / Check if all the isolates that share a backing_store have hit a stack <nl> + / / check . If a stack check is hit , and the backing store is pending grow , <nl> + / / this isolate will have updated memory objects . <nl> + bool AreAllIsolatesUpdated_Locked ( const void * backing_store ) ; <nl> + <nl> + / / If a grow call is made to a buffer with a pending grow , and all the <nl> + / / isolates that share this buffer have not hit a StackCheck , clear the set of <nl> + / / already updated instances so they can be updated with the new size on the <nl> + / / most recent grow call . <nl> + void ClearUpdatedInstancesOnPendingGrow_Locked ( const void * backing_store ) ; <nl> + <nl> + / / Helper functions to update memory objects on grow , and maintain state for <nl> + / / which isolates hit a stack check . <nl> + void UpdateMemoryObjectsForIsolate_Locked ( Isolate * isolate , <nl> + void * backing_store , <nl> + size_t new_size ) ; <nl> + bool MemoryObjectsNeedUpdate_Locked ( Isolate * isolate , <nl> + const void * backing_store ) ; <nl> + <nl> + / / Destroy global handles to memory objects , and remove backing store from <nl> + / / isolates_per_buffer on Free . <nl> + void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked ( <nl> + Isolate * isolate , const void * backing_store ) ; <nl> + void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked ( <nl> + const void * backing_store ) ; <nl> + <nl> + void RemoveIsolateFromBackingStore_Locked ( Isolate * isolate , <nl> + const void * backing_store ) ; <nl> + <nl> + / / Removes an allocation from the tracker . <nl> + AllocationData ReleaseAllocation_Locked ( Isolate * isolate , <nl> + const void * buffer_start ) ; <nl> + <nl> + / / Clients use a two - part process . First they " reserve " the address space , <nl> + / / which signifies an intent to actually allocate it . This determines whether <nl> + / / doing the allocation would put us over our limit . Once there is a <nl> + / / reservation , clients can do the allocation and register the result . <nl> + / / <nl> + / / We should always have : <nl> + / / allocated_address_space_ < = reserved_address_space_ < = kAddressSpaceLimit <nl> + std : : atomic < size_t > reserved_address_space_ { 0 } ; <nl> + <nl> + / / Used to protect access to the allocated address space counter and <nl> + / / allocation map . This is needed because Wasm memories can be freed on <nl> + / / another thread by the ArrayBufferTracker . <nl> + base : : Mutex mutex_ ; <nl> + <nl> + size_t allocated_address_space_ = 0 ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / Protected by { mutex_ } : <nl> + <nl> + / / Track Wasm memory allocation information . This is keyed by the start of the <nl> + / / buffer , rather than by the start of the allocation . <nl> + std : : unordered_map < const void * , AllocationData > allocations_ ; <nl> + <nl> + / / Maps each buffer to the isolates that share the backing store . <nl> + std : : unordered_map < const void * , std : : unordered_set < Isolate * > > <nl> + isolates_per_buffer_ ; <nl> + <nl> + / / Maps which isolates have had a grow interrupt handled on the buffer . This <nl> + / / is maintained to ensure that the instances are updated with the right size <nl> + / / on Grow . <nl> + std : : unordered_map < const void * , std : : unordered_set < Isolate * > > <nl> + isolates_updated_on_grow_ ; <nl> + <nl> + / / Maps backing stores ( void * ) to the size of the underlying memory in <nl> + / / ( size_t ) . An entry to this map is made on a grow call to the corresponding <nl> + / / backing store . On consecutive grow calls to the same backing store , <nl> + / / the size entry is updated . This entry is made right after the mprotect <nl> + / / call to change the protections on a backing_store , so the memory objects <nl> + / / have not been updated yet . The backing store entry in this map is erased <nl> + / / when all the memory objects , or instances that share this backing store <nl> + / / have their bounds updated . <nl> + std : : unordered_map < void * , size_t > grow_update_map_ ; <nl> + <nl> + / / End of fields protected by { mutex_ } . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + DISALLOW_COPY_AND_ASSIGN ( WasmMemoryTracker ) ; <nl> + } ; <nl> + <nl> + / / Attempts to allocate an array buffer with guard regions suitable for trap <nl> + / / handling . If address space is not available , it will return a buffer with <nl> + / / mini - guards that will require bounds checks . <nl> + V8_EXPORT_PRIVATE MaybeHandle < JSArrayBuffer > NewArrayBuffer ( Isolate * , <nl> + size_t size ) ; <nl> + <nl> + / / Attempts to allocate a SharedArrayBuffer with guard regions suitable for <nl> + / / trap handling . If address space is not available , it will try to reserve <nl> + / / up to the maximum for that memory . If all else fails , it will return a <nl> + / / buffer with mini - guards of initial size . <nl> + V8_EXPORT_PRIVATE MaybeHandle < JSArrayBuffer > NewSharedArrayBuffer ( <nl> + Isolate * , size_t initial_size , size_t max_size ) ; <nl> + <nl> + Handle < JSArrayBuffer > SetupArrayBuffer ( <nl> + Isolate * , void * backing_store , size_t size , bool is_external , <nl> + SharedFlag shared = SharedFlag : : kNotShared ) ; <nl> + <nl> + V8_EXPORT_PRIVATE void DetachMemoryBuffer ( Isolate * isolate , <nl> + Handle < JSArrayBuffer > buffer , <nl> + bool free_memory ) ; <nl> + <nl> + } / / namespace wasm <nl> + } / / namespace internal <nl> + } / / namespace v8 <nl> + <nl> + # endif / / V8_WASM_WASM_MEMORY_H_ <nl> mmm a / src / wasm / wasm - module . cc <nl> ppp b / src / wasm / wasm - module . cc <nl> Handle < JSArray > GetCustomSections ( Isolate * isolate , <nl> <nl> / / Make a copy of the payload data in the section . <nl> size_t size = section . payload . length ( ) ; <nl> - MaybeHandle < JSArrayBuffer > result = <nl> - isolate - > factory ( ) - > NewJSArrayBufferAndBackingStore ( <nl> - size , InitializedFlag : : kUninitialized ) ; <nl> - Handle < JSArrayBuffer > array_buffer ; <nl> - if ( ! result . ToHandle ( & array_buffer ) ) { <nl> + void * memory = <nl> + size = = 0 ? nullptr : isolate - > array_buffer_allocator ( ) - > Allocate ( size ) ; <nl> + <nl> + if ( size & & ! memory ) { <nl> thrower - > RangeError ( " out of memory allocating custom section data " ) ; <nl> return Handle < JSArray > ( ) ; <nl> } <nl> - memcpy ( array_buffer - > backing_store ( ) , <nl> - wire_bytes . begin ( ) + section . payload . offset ( ) , <nl> + Handle < JSArrayBuffer > buffer = <nl> + isolate - > factory ( ) - > NewJSArrayBuffer ( SharedFlag : : kNotShared ) ; <nl> + constexpr bool is_external = false ; <nl> + JSArrayBuffer : : Setup ( buffer , isolate , is_external , memory , size ) ; <nl> + memcpy ( memory , wire_bytes . begin ( ) + section . payload . offset ( ) , <nl> section . payload . length ( ) ) ; <nl> <nl> - matching_sections . push_back ( array_buffer ) ; <nl> + matching_sections . push_back ( buffer ) ; <nl> } <nl> <nl> int num_custom_sections = static_cast < int > ( matching_sections . size ( ) ) ; <nl> mmm a / src / wasm / wasm - objects . cc <nl> ppp b / src / wasm / wasm - objects . cc <nl> <nl> # include " src / wasm / wasm - code - manager . h " <nl> # include " src / wasm / wasm - engine . h " <nl> # include " src / wasm / wasm - limits . h " <nl> + # include " src / wasm / wasm - memory . h " <nl> # include " src / wasm / wasm - module . h " <nl> # include " src / wasm / wasm - objects - inl . h " <nl> # include " src / wasm / wasm - text . h " <nl> void WasmIndirectFunctionTable : : Resize ( Isolate * isolate , <nl> } <nl> <nl> namespace { <nl> + bool AdjustBufferPermissions ( Isolate * isolate , Handle < JSArrayBuffer > old_buffer , <nl> + size_t new_size ) { <nl> + if ( new_size > old_buffer - > allocation_length ( ) ) return false ; <nl> + void * old_mem_start = old_buffer - > backing_store ( ) ; <nl> + size_t old_size = old_buffer - > byte_length ( ) ; <nl> + if ( old_size ! = new_size ) { <nl> + DCHECK_NOT_NULL ( old_mem_start ) ; <nl> + DCHECK_GE ( new_size , old_size ) ; <nl> + / / If adjusting permissions fails , propagate error back to return <nl> + / / failure to grow . <nl> + if ( ! i : : SetPermissions ( GetPlatformPageAllocator ( ) , old_mem_start , new_size , <nl> + PageAllocator : : kReadWrite ) ) { <nl> + return false ; <nl> + } <nl> + reinterpret_cast < v8 : : Isolate * > ( isolate ) <nl> + - > AdjustAmountOfExternalAllocatedMemory ( new_size - old_size ) ; <nl> + } <nl> + return true ; <nl> + } <nl> <nl> + MaybeHandle < JSArrayBuffer > MemoryGrowBuffer ( Isolate * isolate , <nl> + Handle < JSArrayBuffer > old_buffer , <nl> + size_t new_size ) { <nl> + CHECK_EQ ( 0 , new_size % wasm : : kWasmPageSize ) ; <nl> + / / Reusing the backing store from externalized buffers causes problems with <nl> + / / Blink ' s array buffers . The connection between the two is lost , which can <nl> + / / lead to Blink not knowing about the other reference to the buffer and <nl> + / / freeing it too early . <nl> + if ( old_buffer - > is_external ( ) | | new_size > old_buffer - > allocation_length ( ) ) { <nl> + / / We couldn ' t reuse the old backing store , so create a new one and copy the <nl> + / / old contents in . <nl> + Handle < JSArrayBuffer > new_buffer ; <nl> + if ( ! wasm : : NewArrayBuffer ( isolate , new_size ) . ToHandle ( & new_buffer ) ) { <nl> + return { } ; <nl> + } <nl> + void * old_mem_start = old_buffer - > backing_store ( ) ; <nl> + size_t old_size = old_buffer - > byte_length ( ) ; <nl> + if ( old_size = = 0 ) return new_buffer ; <nl> + memcpy ( new_buffer - > backing_store ( ) , old_mem_start , old_size ) ; <nl> + DCHECK ( old_buffer . is_null ( ) | | ! old_buffer - > is_shared ( ) ) ; <nl> + constexpr bool free_memory = true ; <nl> + i : : wasm : : DetachMemoryBuffer ( isolate , old_buffer , free_memory ) ; <nl> + return new_buffer ; <nl> + } else { <nl> + if ( ! AdjustBufferPermissions ( isolate , old_buffer , new_size ) ) return { } ; <nl> + / / NOTE : We must allocate a new array buffer here because the spec <nl> + / / assumes that ArrayBuffers do not change size . <nl> + void * backing_store = old_buffer - > backing_store ( ) ; <nl> + bool is_external = old_buffer - > is_external ( ) ; <nl> + / / Disconnect buffer early so GC won ' t free it . <nl> + i : : wasm : : DetachMemoryBuffer ( isolate , old_buffer , false ) ; <nl> + Handle < JSArrayBuffer > new_buffer = <nl> + wasm : : SetupArrayBuffer ( isolate , backing_store , new_size , is_external ) ; <nl> + return new_buffer ; <nl> + } <nl> + } <nl> + <nl> + / / May GC , because SetSpecializationMemInfoFrom may GC <nl> void SetInstanceMemory ( Handle < WasmInstanceObject > instance , <nl> Handle < JSArrayBuffer > buffer ) { <nl> - bool is_wasm_module = instance - > module ( ) - > origin = = wasm : : kWasmOrigin ; <nl> - bool use_trap_handler = <nl> - instance - > module_object ( ) . native_module ( ) - > use_trap_handler ( ) ; <nl> - / / Wasm modules compiled to use the trap handler don ' t have bounds checks , <nl> - / / so they must have a memory that has guard regions . <nl> - CHECK_IMPLIES ( is_wasm_module & & use_trap_handler , <nl> - buffer - > GetBackingStore ( ) - > has_guard_regions ( ) ) ; <nl> - <nl> instance - > SetRawMemory ( reinterpret_cast < byte * > ( buffer - > backing_store ( ) ) , <nl> buffer - > byte_length ( ) ) ; <nl> # if DEBUG <nl> void SetInstanceMemory ( Handle < WasmInstanceObject > instance , <nl> } <nl> # endif <nl> } <nl> + <nl> } / / namespace <nl> <nl> Handle < WasmMemoryObject > WasmMemoryObject : : New ( <nl> Handle < WasmMemoryObject > WasmMemoryObject : : New ( <nl> uint32_t maximum ) { <nl> Handle < JSArrayBuffer > buffer ; <nl> if ( ! maybe_buffer . ToHandle ( & buffer ) ) { <nl> - / / If no buffer was provided , create a zero - length one . <nl> - auto clamped_maximum = <nl> - std : : min ( static_cast < size_t > ( maximum ) , wasm : : kV8MaxWasmMemoryPages ) ; <nl> - auto backing_store = BackingStore : : AllocateWasmMemory ( <nl> - isolate , 0 , clamped_maximum , SharedFlag : : kNotShared ) ; <nl> - buffer = isolate - > factory ( ) - > NewJSArrayBuffer ( ) ; <nl> - buffer - > Attach ( std : : move ( backing_store ) ) ; <nl> + / / If no buffer was provided , create a 0 - length one . <nl> + buffer = wasm : : SetupArrayBuffer ( isolate , nullptr , 0 , false ) ; <nl> } <nl> <nl> + / / TODO ( kschimpf ) : Do we need to add an argument that defines the <nl> + / / style of memory the user prefers ( with / without trap handling ) , so <nl> + / / that the memory will match the style of the compiled wasm module . <nl> + / / See issue v8 : 7143 <nl> Handle < JSFunction > memory_ctor ( <nl> isolate - > native_context ( ) - > wasm_memory_constructor ( ) , isolate ) ; <nl> <nl> - auto memory_object = Handle < WasmMemoryObject > : : cast ( <nl> + auto memory_obj = Handle < WasmMemoryObject > : : cast ( <nl> isolate - > factory ( ) - > NewJSObject ( memory_ctor , AllocationType : : kOld ) ) ; <nl> - memory_object - > set_array_buffer ( * buffer ) ; <nl> - memory_object - > set_maximum_pages ( maximum ) ; <nl> - <nl> - if ( buffer - > is_shared ( ) ) { <nl> - auto backing_store = buffer - > GetBackingStore ( ) ; <nl> - backing_store - > AttachSharedWasmMemoryObject ( isolate , memory_object ) ; <nl> - } <nl> + memory_obj - > set_array_buffer ( * buffer ) ; <nl> + memory_obj - > set_maximum_pages ( maximum ) ; <nl> <nl> - return memory_object ; <nl> + return memory_obj ; <nl> } <nl> <nl> MaybeHandle < WasmMemoryObject > WasmMemoryObject : : New ( Isolate * isolate , <nl> uint32_t initial , <nl> uint32_t maximum , <nl> - SharedFlag shared ) { <nl> - auto backing_store = <nl> - BackingStore : : AllocateWasmMemory ( isolate , initial , maximum , shared ) ; <nl> - <nl> - if ( ! backing_store ) return { } ; <nl> - <nl> - Handle < JSArrayBuffer > buffer = <nl> - ( shared = = SharedFlag : : kShared ) <nl> - ? isolate - > factory ( ) - > NewJSSharedArrayBuffer ( ) <nl> - : isolate - > factory ( ) - > NewJSArrayBuffer ( ) ; <nl> - <nl> - buffer - > Attach ( std : : move ( backing_store ) ) ; <nl> - <nl> + bool is_shared_memory ) { <nl> + Handle < JSArrayBuffer > buffer ; <nl> + size_t size = static_cast < size_t > ( i : : wasm : : kWasmPageSize ) * <nl> + static_cast < size_t > ( initial ) ; <nl> + if ( is_shared_memory ) { <nl> + size_t max_size = static_cast < size_t > ( i : : wasm : : kWasmPageSize ) * <nl> + static_cast < size_t > ( maximum ) ; <nl> + if ( ! i : : wasm : : NewSharedArrayBuffer ( isolate , size , max_size ) <nl> + . ToHandle ( & buffer ) ) { <nl> + return { } ; <nl> + } <nl> + } else { <nl> + if ( ! i : : wasm : : NewArrayBuffer ( isolate , size ) . ToHandle ( & buffer ) ) { <nl> + return { } ; <nl> + } <nl> + } <nl> return New ( isolate , buffer , maximum ) ; <nl> } <nl> <nl> int32_t WasmMemoryObject : : Grow ( Isolate * isolate , <nl> uint32_t pages ) { <nl> TRACE_EVENT0 ( TRACE_DISABLED_BY_DEFAULT ( " v8 . wasm " ) , " GrowMemory " ) ; <nl> Handle < JSArrayBuffer > old_buffer ( memory_object - > array_buffer ( ) , isolate ) ; <nl> - / / Any buffer used as an asmjs memory cannot be detached , and <nl> - / / therefore this memory cannot be grown . <nl> - if ( old_buffer - > is_asmjs_memory ( ) ) return - 1 ; <nl> + if ( old_buffer - > is_shared ( ) & & ! FLAG_wasm_grow_shared_memory ) return - 1 ; <nl> + auto * memory_tracker = isolate - > wasm_engine ( ) - > memory_tracker ( ) ; <nl> + if ( ! memory_tracker - > IsWasmMemoryGrowable ( old_buffer ) ) return - 1 ; <nl> <nl> - / / Checks for maximum memory size . <nl> + / / Checks for maximum memory size , compute new size . <nl> uint32_t maximum_pages = wasm : : max_mem_pages ( ) ; <nl> if ( memory_object - > has_maximum_pages ( ) ) { <nl> maximum_pages = std : : min ( <nl> int32_t WasmMemoryObject : : Grow ( Isolate * isolate , <nl> ( pages > wasm : : max_mem_pages ( ) - old_pages ) ) { / / exceeds limit <nl> return - 1 ; <nl> } <nl> - / / Compute new size . <nl> size_t new_size = <nl> static_cast < size_t > ( old_pages + pages ) * wasm : : kWasmPageSize ; <nl> <nl> - std : : shared_ptr < BackingStore > backing_store = old_buffer - > GetBackingStore ( ) ; <nl> - if ( ! backing_store ) return - 1 ; <nl> - <nl> - / / Try to handle shared memory first . <nl> + / / Memory is grown , but the memory objects and instances are not yet updated . <nl> + / / Handle this in the interrupt handler so that it ' s safe for all the isolates <nl> + / / that share this buffer to be updated safely . <nl> + Handle < JSArrayBuffer > new_buffer ; <nl> if ( old_buffer - > is_shared ( ) ) { <nl> - if ( FLAG_wasm_grow_shared_memory ) { <nl> - / / Shared memories can only be grown in place ; no copying . <nl> - if ( backing_store - > GrowWasmMemoryInPlace ( isolate , new_size ) ) { <nl> - BackingStore : : BroadcastSharedWasmMemoryGrow ( isolate , backing_store , <nl> - new_size ) ; <nl> - / / Broadcasting the update should update this memory object too . <nl> - DCHECK_NE ( * old_buffer , memory_object - > array_buffer ( ) ) ; <nl> - DCHECK_EQ ( new_size , memory_object - > array_buffer ( ) . byte_length ( ) ) ; <nl> - return static_cast < int32_t > ( old_pages ) ; / / success <nl> - } <nl> + / / Adjust protections for the buffer . <nl> + if ( ! AdjustBufferPermissions ( isolate , old_buffer , new_size ) ) { <nl> + return - 1 ; <nl> } <nl> - return - 1 ; <nl> - } <nl> - <nl> - / / Try to grow non - shared memory in - place . <nl> - if ( backing_store - > GrowWasmMemoryInPlace ( isolate , new_size ) ) { <nl> - / / Detach old and create a new one with the grown backing store . <nl> - old_buffer - > Detach ( true ) ; <nl> - Handle < JSArrayBuffer > new_buffer = isolate - > factory ( ) - > NewJSArrayBuffer ( ) ; <nl> - new_buffer - > Attach ( backing_store ) ; <nl> + void * backing_store = old_buffer - > backing_store ( ) ; <nl> + if ( memory_tracker - > IsWasmSharedMemory ( backing_store ) ) { <nl> + / / This memory is shared between different isolates . <nl> + DCHECK ( old_buffer - > is_shared ( ) ) ; <nl> + / / Update pending grow state , and trigger a grow interrupt on all the <nl> + / / isolates that share this buffer . <nl> + memory_tracker - > SetPendingUpdateOnGrow ( old_buffer , new_size ) ; <nl> + / / Handle interrupts for this isolate so that the instances with this <nl> + / / isolate are updated . <nl> + isolate - > stack_guard ( ) - > HandleInterrupts ( ) ; <nl> + / / Failure to allocate , or adjust pemissions already handled here , and <nl> + / / updates to instances handled in the interrupt handler safe to return . <nl> + return static_cast < uint32_t > ( old_size / wasm : : kWasmPageSize ) ; <nl> + } <nl> + / / SharedArrayBuffer , but not shared across isolates . Setup a new buffer <nl> + / / with updated permissions and update the instances . <nl> + new_buffer = <nl> + wasm : : SetupArrayBuffer ( isolate , backing_store , new_size , <nl> + old_buffer - > is_external ( ) , SharedFlag : : kShared ) ; <nl> memory_object - > update_instances ( isolate , new_buffer ) ; <nl> - return static_cast < int32_t > ( old_pages ) ; / / success <nl> + } else { <nl> + if ( ! MemoryGrowBuffer ( isolate , old_buffer , new_size ) <nl> + . ToHandle ( & new_buffer ) ) { <nl> + return - 1 ; <nl> + } <nl> } <nl> - / / Try allocating a new backing store and copying . <nl> - std : : unique_ptr < BackingStore > new_backing_store = <nl> - BackingStore : : CopyWasmMemory ( isolate , backing_store , new_size ) ; <nl> - if ( ! new_backing_store ) return - 1 ; <nl> - <nl> - / / Detach old and create a new one with the new backing store . <nl> - old_buffer - > Detach ( true ) ; <nl> - Handle < JSArrayBuffer > new_buffer = isolate - > factory ( ) - > NewJSArrayBuffer ( ) ; <nl> - new_buffer - > Attach ( std : : move ( new_backing_store ) ) ; <nl> + / / Update instances if any . <nl> memory_object - > update_instances ( isolate , new_buffer ) ; <nl> - return static_cast < int32_t > ( old_pages ) ; / / success <nl> + return static_cast < uint32_t > ( old_size / wasm : : kWasmPageSize ) ; <nl> } <nl> <nl> / / static <nl> MaybeHandle < WasmGlobalObject > WasmGlobalObject : : New ( <nl> global_obj - > set_tagged_buffer ( * tagged_buffer ) ; <nl> } else { <nl> DCHECK ( maybe_tagged_buffer . is_null ( ) ) ; <nl> - uint32_t type_size = wasm : : ValueTypes : : ElementSizeInBytes ( type ) ; <nl> - <nl> Handle < JSArrayBuffer > untagged_buffer ; <nl> + uint32_t type_size = wasm : : ValueTypes : : ElementSizeInBytes ( type ) ; <nl> if ( ! maybe_untagged_buffer . ToHandle ( & untagged_buffer ) ) { <nl> - MaybeHandle < JSArrayBuffer > result = <nl> - isolate - > factory ( ) - > NewJSArrayBufferAndBackingStore ( <nl> - offset + type_size , InitializedFlag : : kZeroInitialized ) ; <nl> - <nl> - if ( ! result . ToHandle ( & untagged_buffer ) ) return { } ; <nl> + / / If no buffer was provided , create one long enough for the given type . <nl> + untagged_buffer = isolate - > factory ( ) - > NewJSArrayBuffer ( <nl> + SharedFlag : : kNotShared , AllocationType : : kOld ) ; <nl> + <nl> + const bool initialize = true ; <nl> + if ( ! JSArrayBuffer : : SetupAllocatingData ( untagged_buffer , isolate , <nl> + type_size , initialize ) ) { <nl> + return { } ; <nl> + } <nl> } <nl> <nl> / / Check that the offset is in bounds . <nl> mmm a / src / wasm / wasm - objects . h <nl> ppp b / src / wasm / wasm - objects . h <nl> class WasmJSFunction ; <nl> class WasmModuleObject ; <nl> class WasmIndirectFunctionTable ; <nl> <nl> - enum class SharedFlag : uint8_t ; <nl> - <nl> template < class CppType > <nl> class Managed ; <nl> <nl> class WasmMemoryObject : public JSObject { <nl> V8_EXPORT_PRIVATE static Handle < WasmMemoryObject > New ( <nl> Isolate * isolate , MaybeHandle < JSArrayBuffer > buffer , uint32_t maximum ) ; <nl> <nl> - V8_EXPORT_PRIVATE static MaybeHandle < WasmMemoryObject > New ( Isolate * isolate , <nl> - uint32_t initial , <nl> - uint32_t maximum , <nl> - SharedFlag shared ) ; <nl> + V8_EXPORT_PRIVATE static MaybeHandle < WasmMemoryObject > New ( <nl> + Isolate * isolate , uint32_t initial , uint32_t maximum , <nl> + bool is_shared_memory ) ; <nl> <nl> void update_instances ( Isolate * isolate , Handle < JSArrayBuffer > buffer ) ; <nl> <nl> mmm a / test / cctest / BUILD . gn <nl> ppp b / test / cctest / BUILD . gn <nl> v8_source_set ( " cctest_sources " ) { <nl> " interpreter / test - source - positions . cc " , <nl> " libplatform / test - tracing . cc " , <nl> " libsampler / test - sampler . cc " , <nl> - " manually - externalized - buffer . h " , <nl> " parsing / test - parse - decision . cc " , <nl> " parsing / test - preparser . cc " , <nl> " parsing / test - scanner - streams . cc " , <nl> v8_source_set ( " cctest_sources " ) { <nl> " test - api . h " , <nl> " test - array - list . cc " , <nl> " test - atomicops . cc " , <nl> - " test - backing - store . cc " , <nl> " test - bignum - dtoa . cc " , <nl> " test - bignum . cc " , <nl> " test - bit - vector . cc " , <nl> v8_source_set ( " cctest_sources " ) { <nl> " unicode - helpers . cc " , <nl> " unicode - helpers . h " , <nl> " wasm / test - c - wasm - entry . cc " , <nl> - " wasm / test - grow - memory . cc " , <nl> " wasm / test - jump - table - assembler . cc " , <nl> " wasm / test - run - wasm - 64 . cc " , <nl> " wasm / test - run - wasm - asmjs . cc " , <nl> mmm a / test / cctest / cctest . status <nl> ppp b / test / cctest / cctest . status <nl> <nl> ' test - api / WasmI32AtomicWaitCallback ' : [ SKIP ] , <nl> ' test - api / WasmI64AtomicWaitCallback ' : [ SKIP ] , <nl> ' test - api / WasmStreaming * ' : [ SKIP ] , <nl> - ' test - backing - store / Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree ' : [ SKIP ] , <nl> ' test - c - wasm - entry / * ' : [ SKIP ] , <nl> ' test - jump - table - assembler / * ' : [ SKIP ] , <nl> - ' test - grow - memory / * ' : [ SKIP ] , <nl> ' test - run - wasm - 64 / * ' : [ SKIP ] , <nl> ' test - run - wasm - asmjs / * ' : [ SKIP ] , <nl> ' test - run - wasm - atomics64 / * ' : [ SKIP ] , <nl> mmm a / test / cctest / heap / test - array - buffer - tracker . cc <nl> ppp b / test / cctest / heap / test - array - buffer - tracker . cc <nl> TEST ( ArrayBuffer_UnregisterDuringSweep ) { <nl> / / barriers and proper synchronization this will trigger a data race on <nl> / / TSAN . <nl> v8 : : ArrayBuffer : : Contents contents = ab - > Externalize ( ) ; <nl> - contents . Deleter ( ) ( contents . Data ( ) , contents . ByteLength ( ) , <nl> - contents . DeleterData ( ) ) ; <nl> + heap - > isolate ( ) - > array_buffer_allocator ( ) - > Free ( contents . Data ( ) , <nl> + contents . ByteLength ( ) ) ; <nl> } <nl> } <nl> <nl> mmm a / test / cctest / heap / test - page - promotion . cc <nl> ppp b / test / cctest / heap / test - page - promotion . cc <nl> UNINITIALIZED_TEST ( PagePromotion_NewToNewJSArrayBuffer ) { <nl> heap : : FillCurrentPage ( heap - > new_space ( ) ) ; <nl> / / Allocate a buffer we would like to check against . <nl> Handle < JSArrayBuffer > buffer = <nl> - i_isolate - > factory ( ) <nl> - - > NewJSArrayBufferAndBackingStore ( 100 , <nl> - InitializedFlag : : kZeroInitialized ) <nl> - . ToHandleChecked ( ) ; <nl> + i_isolate - > factory ( ) - > NewJSArrayBuffer ( SharedFlag : : kNotShared ) ; <nl> + CHECK ( JSArrayBuffer : : SetupAllocatingData ( buffer , i_isolate , 100 ) ) ; <nl> std : : vector < Handle < FixedArray > > handles ; <nl> / / Simulate a full space , filling the interesting page with live objects . <nl> heap : : SimulateFullSpace ( heap - > new_space ( ) , & handles ) ; <nl> UNINITIALIZED_TEST ( PagePromotion_NewToOldJSArrayBuffer ) { <nl> heap : : FillCurrentPage ( heap - > new_space ( ) ) ; <nl> / / Allocate a buffer we would like to check against . <nl> Handle < JSArrayBuffer > buffer = <nl> - i_isolate - > factory ( ) <nl> - - > NewJSArrayBufferAndBackingStore ( 100 , <nl> - InitializedFlag : : kZeroInitialized ) <nl> - . ToHandleChecked ( ) ; <nl> + i_isolate - > factory ( ) - > NewJSArrayBuffer ( SharedFlag : : kNotShared ) ; <nl> + CHECK ( JSArrayBuffer : : SetupAllocatingData ( buffer , i_isolate , 100 ) ) ; <nl> std : : vector < Handle < FixedArray > > handles ; <nl> / / Simulate a full space , filling the interesting page with live objects . <nl> heap : : SimulateFullSpace ( heap - > new_space ( ) , & handles ) ; <nl> deleted file mode 100644 <nl> index b5eeed7382f . . 00000000000 <nl> mmm a / test / cctest / manually - externalized - buffer . h <nl> ppp / dev / null <nl> <nl> - / / Copyright 2019 the V8 project authors . All rights reserved . <nl> - / / Use of this source code is governed by a BSD - style license that can be <nl> - / / found in the LICENSE file . <nl> - <nl> - # ifndef V8_CCTEST_MANUALLY_EXTERNALIZED_BUFFER_H_ <nl> - # define V8_CCTEST_MANUALLY_EXTERNALIZED_BUFFER_H_ <nl> - <nl> - # include " src / api / api - inl . h " <nl> - <nl> - namespace v8 { <nl> - namespace internal { <nl> - namespace testing { <nl> - <nl> - / / Utility to free the allocated memory for a buffer that is manually <nl> - / / externalized in a test . <nl> - struct ManuallyExternalizedBuffer { <nl> - Handle < JSArrayBuffer > buffer_ ; <nl> - v8 : : ArrayBuffer : : Contents contents_ ; <nl> - <nl> - explicit ManuallyExternalizedBuffer ( Handle < JSArrayBuffer > buffer ) <nl> - : buffer_ ( buffer ) , <nl> - contents_ ( v8 : : Utils : : ToLocal ( buffer_ ) - > Externalize ( ) ) { } <nl> - ~ ManuallyExternalizedBuffer ( ) { <nl> - contents_ . Deleter ( ) ( contents_ . Data ( ) , contents_ . ByteLength ( ) , <nl> - contents_ . DeleterData ( ) ) ; <nl> - } <nl> - void * backing_store ( ) { return contents_ . Data ( ) ; } <nl> - } ; <nl> - <nl> - } / / namespace testing <nl> - } / / namespace internal <nl> - } / / namespace v8 <nl> - <nl> - # endif / / V8_CCTEST_MANUALLY_EXTERNALIZED_BUFFER_H_ <nl> mmm a / test / cctest / test - api - array - buffer . cc <nl> ppp b / test / cctest / test - api - array - buffer . cc <nl> class ScopedArrayBufferContents { <nl> public : <nl> explicit ScopedArrayBufferContents ( const v8 : : ArrayBuffer : : Contents & contents ) <nl> : contents_ ( contents ) { } <nl> - ~ ScopedArrayBufferContents ( ) { <nl> - contents_ . Deleter ( ) ( contents_ . Data ( ) , contents_ . ByteLength ( ) , <nl> - contents_ . DeleterData ( ) ) ; <nl> - } <nl> + ~ ScopedArrayBufferContents ( ) { free ( contents_ . AllocationBase ( ) ) ; } <nl> void * Data ( ) const { return contents_ . Data ( ) ; } <nl> size_t ByteLength ( ) const { return contents_ . ByteLength ( ) ; } <nl> <nl> class ScopedSharedArrayBufferContents { <nl> explicit ScopedSharedArrayBufferContents ( <nl> const v8 : : SharedArrayBuffer : : Contents & contents ) <nl> : contents_ ( contents ) { } <nl> - ~ ScopedSharedArrayBufferContents ( ) { <nl> - contents_ . Deleter ( ) ( contents_ . Data ( ) , contents_ . ByteLength ( ) , <nl> - contents_ . DeleterData ( ) ) ; <nl> - } <nl> + ~ ScopedSharedArrayBufferContents ( ) { free ( contents_ . AllocationBase ( ) ) ; } <nl> void * Data ( ) const { return contents_ . Data ( ) ; } <nl> size_t ByteLength ( ) const { return contents_ . ByteLength ( ) ; } <nl> <nl> deleted file mode 100644 <nl> index f8010d30319 . . 00000000000 <nl> mmm a / test / cctest / test - backing - store . cc <nl> ppp / dev / null <nl> <nl> - / / Copyright 2019 the V8 project authors . All rights reserved . <nl> - / / Use of this source code is governed by a BSD - style license that can be <nl> - / / found in the LICENSE file . <nl> - <nl> - # include " src / api / api - inl . h " <nl> - # include " src / objects / backing - store . h " <nl> - # include " src / wasm / wasm - objects . h " <nl> - <nl> - # include " test / cctest / cctest . h " <nl> - # include " test / cctest / manually - externalized - buffer . h " <nl> - <nl> - namespace v8 { <nl> - namespace internal { <nl> - <nl> - using testing : : ManuallyExternalizedBuffer ; <nl> - <nl> - TEST ( Run_WasmModule_Buffer_Externalized_Detach ) { <nl> - { <nl> - / / Regression test for <nl> - / / https : / / bugs . chromium . org / p / chromium / issues / detail ? id = 731046 <nl> - Isolate * isolate = CcTest : : InitIsolateOnce ( ) ; <nl> - HandleScope scope ( isolate ) ; <nl> - MaybeHandle < JSArrayBuffer > result = <nl> - isolate - > factory ( ) - > NewJSArrayBufferAndBackingStore ( <nl> - wasm : : kWasmPageSize , InitializedFlag : : kZeroInitialized ) ; <nl> - Handle < JSArrayBuffer > buffer = result . ToHandleChecked ( ) ; <nl> - <nl> - / / Embedder requests contents . <nl> - ManuallyExternalizedBuffer external ( buffer ) ; <nl> - <nl> - buffer - > Detach ( ) ; <nl> - CHECK ( buffer - > was_detached ( ) ) ; <nl> - <nl> - / / Make sure we can write to the buffer without crashing <nl> - uint32_t * int_buffer = <nl> - reinterpret_cast < uint32_t * > ( external . backing_store ( ) ) ; <nl> - int_buffer [ 0 ] = 0 ; <nl> - / / Embedder frees contents . <nl> - } <nl> - CcTest : : CollectAllAvailableGarbage ( ) ; <nl> - } <nl> - <nl> - TEST ( Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree ) { <nl> - { <nl> - / / Regression test for https : / / crbug . com / 813876 <nl> - Isolate * isolate = CcTest : : InitIsolateOnce ( ) ; <nl> - HandleScope scope ( isolate ) ; <nl> - MaybeHandle < WasmMemoryObject > result = <nl> - WasmMemoryObject : : New ( isolate , 1 , 1 , SharedFlag : : kNotShared ) ; <nl> - Handle < WasmMemoryObject > memory_object = result . ToHandleChecked ( ) ; <nl> - Handle < JSArrayBuffer > buffer ( memory_object - > array_buffer ( ) , isolate ) ; <nl> - <nl> - { <nl> - / / Embedder requests contents . <nl> - ManuallyExternalizedBuffer external ( buffer ) ; <nl> - <nl> - / / Growing ( even by 0 ) detaches the old buffer . <nl> - WasmMemoryObject : : Grow ( isolate , memory_object , 0 ) ; <nl> - CHECK ( buffer - > was_detached ( ) ) ; <nl> - <nl> - / / Embedder frees contents . <nl> - } <nl> - <nl> - / / Make sure the memory object has a new buffer that can be written to . <nl> - uint32_t * int_buffer = reinterpret_cast < uint32_t * > ( <nl> - memory_object - > array_buffer ( ) . backing_store ( ) ) ; <nl> - int_buffer [ 0 ] = 0 ; <nl> - } <nl> - CcTest : : CollectAllAvailableGarbage ( ) ; <nl> - } <nl> - <nl> - # if V8_TARGET_ARCH_64_BIT <nl> - TEST ( BackingStore_Reclaim ) { <nl> - / / Make sure we can allocate memories without running out of address space . <nl> - Isolate * isolate = CcTest : : InitIsolateOnce ( ) ; <nl> - for ( int i = 0 ; i < 256 ; + + i ) { <nl> - auto backing_store = <nl> - BackingStore : : AllocateWasmMemory ( isolate , 1 , 1 , SharedFlag : : kNotShared ) ; <nl> - CHECK ( backing_store ) ; <nl> - } <nl> - } <nl> - # endif <nl> - <nl> - } / / namespace internal <nl> - } / / namespace v8 <nl> mmm a / test / cctest / test - code - stub - assembler . cc <nl> ppp b / test / cctest / test - code - stub - assembler . cc <nl> TEST ( TryLookupElement ) { <nl> <nl> v8 : : ArrayBuffer : : Contents contents = buffer - > Externalize ( ) ; <nl> buffer - > Detach ( ) ; <nl> - contents . Deleter ( ) ( contents . Data ( ) , contents . ByteLength ( ) , <nl> - contents . DeleterData ( ) ) ; <nl> + isolate - > array_buffer_allocator ( ) - > Free ( contents . Data ( ) , <nl> + contents . ByteLength ( ) ) ; <nl> <nl> CHECK_ABSENT ( object , 0 ) ; <nl> CHECK_ABSENT ( object , 1 ) ; <nl> mmm a / test / cctest / test - heap - profiler . cc <nl> ppp b / test / cctest / test - heap - profiler . cc <nl> TEST ( ArrayBufferSharedBackingStore ) { <nl> CHECK ( ab2_data ) ; <nl> CHECK_EQ ( ab1_data , ab2_data ) ; <nl> CHECK_EQ ( 2 , GetRetainersCount ( snapshot , ab1_data ) ) ; <nl> - ab_contents . Deleter ( ) ( ab_contents . Data ( ) , ab_contents . ByteLength ( ) , <nl> - ab_contents . DeleterData ( ) ) ; <nl> + free ( data ) ; <nl> } <nl> <nl> <nl> mmm a / test / cctest / test - roots . cc <nl> ppp b / test / cctest / test - roots . cc <nl> bool IsInitiallyMutable ( Factory * factory , Address object_address ) { <nl> V ( detached_contexts ) \ <nl> V ( dirty_js_finalization_groups ) \ <nl> V ( feedback_vectors_for_profiling_tools ) \ <nl> - V ( shared_wasm_memories ) \ <nl> V ( materialized_objects ) \ <nl> V ( noscript_shared_function_infos ) \ <nl> V ( public_symbol_table ) \ <nl> deleted file mode 100644 <nl> index a188707caef . . 00000000000 <nl> mmm a / test / cctest / wasm / test - grow - memory . cc <nl> ppp / dev / null <nl> <nl> - / / Copyright 2019 the V8 project authors . All rights reserved . <nl> - / / Use of this source code is governed by a BSD - style license that can be <nl> - / / found in the LICENSE file . <nl> - <nl> - # include " src / wasm / wasm - objects - inl . h " <nl> - # include " src / wasm / wasm - opcodes . h " <nl> - <nl> - # include " src / wasm / wasm - module - builder . h " <nl> - # include " test / cctest / cctest . h " <nl> - # include " test / cctest / manually - externalized - buffer . h " <nl> - # include " test / common / wasm / flag - utils . h " <nl> - # include " test / common / wasm / test - signatures . h " <nl> - # include " test / common / wasm / wasm - macro - gen . h " <nl> - # include " test / common / wasm / wasm - module - runner . h " <nl> - <nl> - namespace v8 { <nl> - namespace internal { <nl> - namespace wasm { <nl> - namespace test_grow_memory { <nl> - <nl> - using testing : : CompileAndInstantiateForTesting ; <nl> - using v8 : : internal : : testing : : ManuallyExternalizedBuffer ; <nl> - <nl> - namespace { <nl> - void ExportAsMain ( WasmFunctionBuilder * f ) { <nl> - f - > builder ( ) - > AddExport ( CStrVector ( " main " ) , f ) ; <nl> - } <nl> - # define EMIT_CODE_WITH_END ( f , code ) \ <nl> - do { \ <nl> - f - > EmitCode ( code , sizeof ( code ) ) ; \ <nl> - f - > Emit ( kExprEnd ) ; \ <nl> - } while ( false ) <nl> - <nl> - void Cleanup ( Isolate * isolate = CcTest : : InitIsolateOnce ( ) ) { <nl> - / / By sending a low memory notifications , we will try hard to collect all <nl> - / / garbage and will therefore also invoke all weak callbacks of actually <nl> - / / unreachable persistent handles . <nl> - reinterpret_cast < v8 : : Isolate * > ( isolate ) - > LowMemoryNotification ( ) ; <nl> - } <nl> - } / / namespace <nl> - <nl> - TEST ( GrowMemDetaches ) { <nl> - { <nl> - Isolate * isolate = CcTest : : InitIsolateOnce ( ) ; <nl> - HandleScope scope ( isolate ) ; <nl> - Handle < WasmMemoryObject > memory_object = <nl> - WasmMemoryObject : : New ( isolate , 16 , 100 , SharedFlag : : kNotShared ) <nl> - . ToHandleChecked ( ) ; <nl> - Handle < JSArrayBuffer > buffer ( memory_object - > array_buffer ( ) , isolate ) ; <nl> - int32_t result = WasmMemoryObject : : Grow ( isolate , memory_object , 0 ) ; <nl> - CHECK_EQ ( 16 , result ) ; <nl> - CHECK_NE ( * buffer , memory_object - > array_buffer ( ) ) ; <nl> - CHECK ( buffer - > was_detached ( ) ) ; <nl> - } <nl> - Cleanup ( ) ; <nl> - } <nl> - <nl> - TEST ( Externalized_GrowMemMemSize ) { <nl> - { <nl> - Isolate * isolate = CcTest : : InitIsolateOnce ( ) ; <nl> - HandleScope scope ( isolate ) ; <nl> - Handle < WasmMemoryObject > memory_object = <nl> - WasmMemoryObject : : New ( isolate , 16 , 100 , SharedFlag : : kNotShared ) <nl> - . ToHandleChecked ( ) ; <nl> - ManuallyExternalizedBuffer external ( <nl> - handle ( memory_object - > array_buffer ( ) , isolate ) ) ; <nl> - int32_t result = WasmMemoryObject : : Grow ( isolate , memory_object , 0 ) ; <nl> - CHECK_EQ ( 16 , result ) ; <nl> - CHECK_NE ( * external . buffer_ , memory_object - > array_buffer ( ) ) ; <nl> - CHECK ( external . buffer_ - > was_detached ( ) ) ; <nl> - } <nl> - Cleanup ( ) ; <nl> - } <nl> - <nl> - TEST ( Run_WasmModule_Buffer_Externalized_GrowMem ) { <nl> - { <nl> - Isolate * isolate = CcTest : : InitIsolateOnce ( ) ; <nl> - HandleScope scope ( isolate ) ; <nl> - TestSignatures sigs ; <nl> - v8 : : internal : : AccountingAllocator allocator ; <nl> - Zone zone ( & allocator , ZONE_NAME ) ; <nl> - <nl> - WasmModuleBuilder * builder = new ( & zone ) WasmModuleBuilder ( & zone ) ; <nl> - WasmFunctionBuilder * f = builder - > AddFunction ( sigs . i_v ( ) ) ; <nl> - ExportAsMain ( f ) ; <nl> - byte code [ ] = { WASM_GROW_MEMORY ( WASM_I32V_1 ( 6 ) ) , WASM_DROP , <nl> - WASM_MEMORY_SIZE } ; <nl> - EMIT_CODE_WITH_END ( f , code ) ; <nl> - <nl> - ZoneBuffer buffer ( & zone ) ; <nl> - builder - > WriteTo ( & buffer ) ; <nl> - testing : : SetupIsolateForWasmModule ( isolate ) ; <nl> - ErrorThrower thrower ( isolate , " Test " ) ; <nl> - const Handle < WasmInstanceObject > instance = <nl> - CompileAndInstantiateForTesting ( <nl> - isolate , & thrower , ModuleWireBytes ( buffer . begin ( ) , buffer . end ( ) ) ) <nl> - . ToHandleChecked ( ) ; <nl> - Handle < WasmMemoryObject > memory_object ( instance - > memory_object ( ) , isolate ) ; <nl> - <nl> - / / Fake the Embedder flow by externalizing the array buffer . <nl> - ManuallyExternalizedBuffer external1 ( <nl> - handle ( memory_object - > array_buffer ( ) , isolate ) ) ; <nl> - <nl> - / / Grow using the API . <nl> - uint32_t result = WasmMemoryObject : : Grow ( isolate , memory_object , 4 ) ; <nl> - CHECK_EQ ( 16 , result ) ; <nl> - CHECK ( external1 . buffer_ - > was_detached ( ) ) ; / / growing always detaches <nl> - CHECK_EQ ( 0 , external1 . buffer_ - > byte_length ( ) ) ; <nl> - <nl> - CHECK_NE ( * external1 . buffer_ , memory_object - > array_buffer ( ) ) ; <nl> - <nl> - / / Fake the Embedder flow by externalizing the array buffer . <nl> - ManuallyExternalizedBuffer external2 ( <nl> - handle ( memory_object - > array_buffer ( ) , isolate ) ) ; <nl> - <nl> - / / Grow using an internal WASM bytecode . <nl> - result = testing : : RunWasmModuleForTesting ( isolate , instance , 0 , nullptr ) ; <nl> - CHECK_EQ ( 26 , result ) ; <nl> - CHECK ( external2 . buffer_ - > was_detached ( ) ) ; / / growing always detaches <nl> - CHECK_EQ ( 0 , external2 . buffer_ - > byte_length ( ) ) ; <nl> - CHECK_NE ( * external2 . buffer_ , memory_object - > array_buffer ( ) ) ; <nl> - } <nl> - Cleanup ( ) ; <nl> - } <nl> - <nl> - } / / namespace test_grow_memory <nl> - } / / namespace wasm <nl> - } / / namespace internal <nl> - } / / namespace v8 <nl> - <nl> - # undef EMIT_CODE_WITH_END <nl> mmm a / test / cctest / wasm / test - run - wasm - module . cc <nl> ppp b / test / cctest / wasm / test - run - wasm - module . cc <nl> <nl> # include " src / utils / version . h " <nl> # include " src / wasm / module - decoder . h " <nl> # include " src / wasm / wasm - engine . h " <nl> + # include " src / wasm / wasm - memory . h " <nl> # include " src / wasm / wasm - module - builder . h " <nl> # include " src / wasm / wasm - module . h " <nl> # include " src / wasm / wasm - objects - inl . h " <nl> TEST ( MemoryWithOOBEmptyDataSegment ) { <nl> Cleanup ( ) ; <nl> } <nl> <nl> + / / Utility to free the allocated memory for a buffer that is manually <nl> + / / externalized in a test . <nl> + struct ManuallyExternalizedBuffer { <nl> + Isolate * isolate_ ; <nl> + Handle < JSArrayBuffer > buffer_ ; <nl> + void * allocation_base_ ; <nl> + size_t allocation_length_ ; <nl> + bool const should_free_ ; <nl> + <nl> + ManuallyExternalizedBuffer ( JSArrayBuffer buffer , Isolate * isolate ) <nl> + : isolate_ ( isolate ) , <nl> + buffer_ ( buffer , isolate ) , <nl> + allocation_base_ ( buffer . allocation_base ( ) ) , <nl> + allocation_length_ ( buffer . allocation_length ( ) ) , <nl> + should_free_ ( ! isolate_ - > wasm_engine ( ) - > memory_tracker ( ) - > IsWasmMemory ( <nl> + buffer . backing_store ( ) ) ) { <nl> + if ( ! isolate_ - > wasm_engine ( ) - > memory_tracker ( ) - > IsWasmMemory ( <nl> + buffer . backing_store ( ) ) ) { <nl> + v8 : : Utils : : ToLocal ( buffer_ ) - > Externalize ( ) ; <nl> + } <nl> + } <nl> + ~ ManuallyExternalizedBuffer ( ) { <nl> + if ( should_free_ ) { <nl> + buffer_ - > FreeBackingStoreFromMainThread ( ) ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + TEST ( Run_WasmModule_Buffer_Externalized_GrowMem ) { <nl> + { <nl> + Isolate * isolate = CcTest : : InitIsolateOnce ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + TestSignatures sigs ; <nl> + v8 : : internal : : AccountingAllocator allocator ; <nl> + Zone zone ( & allocator , ZONE_NAME ) ; <nl> + <nl> + WasmModuleBuilder * builder = new ( & zone ) WasmModuleBuilder ( & zone ) ; <nl> + WasmFunctionBuilder * f = builder - > AddFunction ( sigs . i_v ( ) ) ; <nl> + ExportAsMain ( f ) ; <nl> + byte code [ ] = { WASM_GROW_MEMORY ( WASM_I32V_1 ( 6 ) ) , WASM_DROP , <nl> + WASM_MEMORY_SIZE } ; <nl> + EMIT_CODE_WITH_END ( f , code ) ; <nl> + <nl> + ZoneBuffer buffer ( & zone ) ; <nl> + builder - > WriteTo ( & buffer ) ; <nl> + testing : : SetupIsolateForWasmModule ( isolate ) ; <nl> + ErrorThrower thrower ( isolate , " Test " ) ; <nl> + const Handle < WasmInstanceObject > instance = <nl> + CompileAndInstantiateForTesting ( <nl> + isolate , & thrower , ModuleWireBytes ( buffer . begin ( ) , buffer . end ( ) ) ) <nl> + . ToHandleChecked ( ) ; <nl> + Handle < WasmMemoryObject > memory_object ( instance - > memory_object ( ) , isolate ) ; <nl> + <nl> + / / Fake the Embedder flow by externalizing the array buffer . <nl> + ManuallyExternalizedBuffer buffer1 ( memory_object - > array_buffer ( ) , isolate ) ; <nl> + <nl> + / / Grow using the API . <nl> + uint32_t result = WasmMemoryObject : : Grow ( isolate , memory_object , 4 ) ; <nl> + CHECK_EQ ( 16 , result ) ; <nl> + CHECK ( buffer1 . buffer_ - > was_detached ( ) ) ; / / growing always detaches <nl> + CHECK_EQ ( 0 , buffer1 . buffer_ - > byte_length ( ) ) ; <nl> + <nl> + CHECK_NE ( * buffer1 . buffer_ , memory_object - > array_buffer ( ) ) ; <nl> + <nl> + / / Fake the Embedder flow by externalizing the array buffer . <nl> + ManuallyExternalizedBuffer buffer2 ( memory_object - > array_buffer ( ) , isolate ) ; <nl> + <nl> + / / Grow using an internal WASM bytecode . <nl> + result = testing : : RunWasmModuleForTesting ( isolate , instance , 0 , nullptr ) ; <nl> + CHECK_EQ ( 26 , result ) ; <nl> + CHECK ( buffer2 . buffer_ - > was_detached ( ) ) ; / / growing always detaches <nl> + CHECK_EQ ( 0 , buffer2 . buffer_ - > byte_length ( ) ) ; <nl> + CHECK_NE ( * buffer2 . buffer_ , memory_object - > array_buffer ( ) ) ; <nl> + } <nl> + Cleanup ( ) ; <nl> + } <nl> + <nl> + TEST ( Run_WasmModule_Buffer_Externalized_GrowMemMemSize ) { <nl> + { <nl> + Isolate * isolate = CcTest : : InitIsolateOnce ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < JSArrayBuffer > buffer ; <nl> + CHECK ( wasm : : NewArrayBuffer ( isolate , 16 * kWasmPageSize ) . ToHandle ( & buffer ) ) ; <nl> + Handle < WasmMemoryObject > mem_obj = <nl> + WasmMemoryObject : : New ( isolate , buffer , 100 ) ; <nl> + auto const contents = v8 : : Utils : : ToLocal ( buffer ) - > Externalize ( ) ; <nl> + int32_t result = WasmMemoryObject : : Grow ( isolate , mem_obj , 0 ) ; <nl> + CHECK_EQ ( 16 , result ) ; <nl> + constexpr bool is_wasm_memory = true ; <nl> + const JSArrayBuffer : : Allocation allocation { contents . AllocationBase ( ) , <nl> + contents . AllocationLength ( ) , <nl> + contents . Data ( ) , is_wasm_memory } ; <nl> + JSArrayBuffer : : FreeBackingStore ( isolate , allocation ) ; <nl> + } <nl> + Cleanup ( ) ; <nl> + } <nl> + <nl> + TEST ( Run_WasmModule_Buffer_Externalized_Detach ) { <nl> + { <nl> + / / Regression test for <nl> + / / https : / / bugs . chromium . org / p / chromium / issues / detail ? id = 731046 <nl> + Isolate * isolate = CcTest : : InitIsolateOnce ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < JSArrayBuffer > buffer ; <nl> + CHECK ( wasm : : NewArrayBuffer ( isolate , 16 * kWasmPageSize ) . ToHandle ( & buffer ) ) ; <nl> + auto const contents = v8 : : Utils : : ToLocal ( buffer ) - > Externalize ( ) ; <nl> + wasm : : DetachMemoryBuffer ( isolate , buffer , true ) ; <nl> + constexpr bool is_wasm_memory = true ; <nl> + const JSArrayBuffer : : Allocation allocation { contents . AllocationBase ( ) , <nl> + contents . AllocationLength ( ) , <nl> + contents . Data ( ) , is_wasm_memory } ; <nl> + JSArrayBuffer : : FreeBackingStore ( isolate , allocation ) ; <nl> + } <nl> + Cleanup ( ) ; <nl> + } <nl> + <nl> + TEST ( Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree ) { <nl> + / / Regresion test for https : / / crbug . com / 813876 <nl> + Isolate * isolate = CcTest : : InitIsolateOnce ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < JSArrayBuffer > buffer ; <nl> + CHECK ( wasm : : NewArrayBuffer ( isolate , 16 * kWasmPageSize ) . ToHandle ( & buffer ) ) ; <nl> + Handle < WasmMemoryObject > mem = WasmMemoryObject : : New ( isolate , buffer , 128 ) ; <nl> + auto contents = v8 : : Utils : : ToLocal ( buffer ) - > Externalize ( ) ; <nl> + WasmMemoryObject : : Grow ( isolate , mem , 0 ) ; <nl> + constexpr bool is_wasm_memory = true ; <nl> + JSArrayBuffer : : FreeBackingStore ( <nl> + isolate , JSArrayBuffer : : Allocation ( contents . AllocationBase ( ) , <nl> + contents . AllocationLength ( ) , <nl> + contents . Data ( ) , is_wasm_memory ) ) ; <nl> + / / Make sure we can write to the buffer without crashing <nl> + uint32_t * int_buffer = <nl> + reinterpret_cast < uint32_t * > ( mem - > array_buffer ( ) . backing_store ( ) ) ; <nl> + int_buffer [ 0 ] = 0 ; <nl> + } <nl> + <nl> + # if V8_TARGET_ARCH_64_BIT <nl> + TEST ( Run_WasmModule_Reclaim_Memory ) { <nl> + / / Make sure we can allocate memories without running out of address space . <nl> + Isolate * isolate = CcTest : : InitIsolateOnce ( ) ; <nl> + Handle < JSArrayBuffer > buffer ; <nl> + for ( int i = 0 ; i < 256 ; + + i ) { <nl> + HandleScope scope ( isolate ) ; <nl> + CHECK ( NewArrayBuffer ( isolate , kWasmPageSize ) . ToHandle ( & buffer ) ) ; <nl> + } <nl> + } <nl> + # endif <nl> + <nl> TEST ( AtomicOpDisassembly ) { <nl> { <nl> EXPERIMENTAL_FLAG_SCOPE ( threads ) ; <nl> mmm a / test / cctest / wasm / test - wasm - serialization . cc <nl> ppp b / test / cctest / wasm / test - wasm - serialization . cc <nl> <nl> # include " src / utils / version . h " <nl> # include " src / wasm / module - decoder . h " <nl> # include " src / wasm / wasm - engine . h " <nl> + # include " src / wasm / wasm - memory . h " <nl> # include " src / wasm / wasm - module - builder . h " <nl> # include " src / wasm / wasm - module . h " <nl> # include " src / wasm / wasm - objects - inl . h " <nl> mmm a / test / cctest / wasm / wasm - run - utils . cc <nl> ppp b / test / cctest / wasm / wasm - run - utils . cc <nl> <nl> # include " src / wasm / graph - builder - interface . h " <nl> # include " src / wasm / module - compiler . h " <nl> # include " src / wasm / wasm - import - wrapper - cache . h " <nl> + # include " src / wasm / wasm - memory . h " <nl> # include " src / wasm / wasm - objects - inl . h " <nl> <nl> namespace v8 { <nl> byte * TestingModuleBuilder : : AddMemory ( uint32_t size , SharedFlag shared ) { <nl> CHECK_NULL ( mem_start_ ) ; <nl> CHECK_EQ ( 0 , mem_size_ ) ; <nl> DCHECK ( ! instance_object_ - > has_memory_object ( ) ) ; <nl> - uint32_t initial_pages = RoundUp ( size , kWasmPageSize ) / kWasmPageSize ; <nl> - uint32_t maximum_pages = ( test_module_ - > maximum_pages ! = 0 ) <nl> - ? test_module_ - > maximum_pages <nl> - : initial_pages ; <nl> + DCHECK_IMPLIES ( test_module_ - > origin = = kWasmOrigin , <nl> + size % kWasmPageSize = = 0 ) ; <nl> test_module_ - > has_memory = true ; <nl> + uint32_t max_size = <nl> + ( test_module_ - > maximum_pages ! = 0 ) ? test_module_ - > maximum_pages : size ; <nl> + uint32_t alloc_size = RoundUp ( size , kWasmPageSize ) ; <nl> + Handle < JSArrayBuffer > new_buffer ; <nl> + if ( shared = = SharedFlag : : kShared ) { <nl> + CHECK ( NewSharedArrayBuffer ( isolate_ , alloc_size , max_size ) <nl> + . ToHandle ( & new_buffer ) ) ; <nl> + } else { <nl> + CHECK ( NewArrayBuffer ( isolate_ , alloc_size ) . ToHandle ( & new_buffer ) ) ; <nl> + } <nl> + CHECK ( ! new_buffer . is_null ( ) ) ; <nl> + mem_start_ = reinterpret_cast < byte * > ( new_buffer - > backing_store ( ) ) ; <nl> + mem_size_ = size ; <nl> + CHECK ( size = = 0 | | mem_start_ ) ; <nl> + memset ( mem_start_ , 0 , size ) ; <nl> <nl> / / Create the WasmMemoryObject . <nl> Handle < WasmMemoryObject > memory_object = <nl> - WasmMemoryObject : : New ( isolate_ , initial_pages , maximum_pages , shared ) <nl> - . ToHandleChecked ( ) ; <nl> + WasmMemoryObject : : New ( isolate_ , new_buffer , max_size ) ; <nl> instance_object_ - > set_memory_object ( * memory_object ) ; <nl> - <nl> - mem_start_ = <nl> - reinterpret_cast < byte * > ( memory_object - > array_buffer ( ) . backing_store ( ) ) ; <nl> - mem_size_ = size ; <nl> - CHECK ( size = = 0 | | mem_start_ ) ; <nl> - <nl> WasmMemoryObject : : AddInstance ( isolate_ , memory_object , instance_object_ ) ; <nl> / / TODO ( wasm ) : Delete the following two lines when test - run - wasm will use a <nl> / / multiple of kPageSize as memory size . At the moment , the effect of these <nl> deleted file mode 100644 <nl> index 31e96f8be3f . . 00000000000 <nl> mmm a / test / mjsunit / wasm / gc - memory . js <nl> ppp / dev / null <nl> <nl> - / / Copyright 2019 the V8 project authors . All rights reserved . <nl> - / / Use of this source code is governed by a BSD - style license that can be <nl> - / / found in the LICENSE file . <nl> - <nl> - let kPageSize = 65536 ; <nl> - <nl> - function allocMems ( count , initial , maximum ) { <nl> - print ( ` alloc $ { count } ` ) ; <nl> - let result = [ ] ; <nl> - for ( let i = 0 ; i < count ; i + + ) { <nl> - print ( ` memory # $ { i } ( initial = $ { initial } , maximum = $ { maximum } ) . . . ` ) ; <nl> - result . push ( new WebAssembly . Memory ( { initial : initial , maximum : maximum } ) ) ; <nl> - } <nl> - return result ; <nl> - } <nl> - <nl> - function check ( mems , initial ) { <nl> - for ( m of mems ) { <nl> - assertEquals ( initial * kPageSize , m . buffer . byteLength ) ; <nl> - } <nl> - } <nl> - <nl> - function test ( count , initial , maximum ) { <nl> - let mems = allocMems ( count , initial , maximum ) ; <nl> - check ( mems , initial ) ; <nl> - } <nl> - <nl> - test ( 1 , 1 , 1 ) ; <nl> - test ( 1 , 1 , 2 ) ; <nl> - test ( 1 , 1 , 3 ) ; <nl> - test ( 1 , 1 , 4 ) ; <nl> - <nl> - test ( 2 , 1 , 1 ) ; <nl> - test ( 2 , 1 , 2 ) ; <nl> - test ( 2 , 1 , 3 ) ; <nl> - test ( 2 , 1 , 4 ) ; <nl> - <nl> - test ( 1 , 1 , undefined ) ; <nl> - test ( 2 , 1 , undefined ) ; <nl> - test ( 3 , 1 , undefined ) ; <nl> - test ( 4 , 1 , undefined ) ; <nl> mmm a / test / unittests / compiler / js - typed - lowering - unittest . cc <nl> ppp b / test / unittests / compiler / js - typed - lowering - unittest . cc <nl> class JSTypedLoweringTest : public TypedGraphTest { <nl> return reducer . Reduce ( node ) ; <nl> } <nl> <nl> + Handle < JSArrayBuffer > NewArrayBuffer ( void * bytes , size_t byte_length ) { <nl> + Handle < JSArrayBuffer > buffer = <nl> + factory ( ) - > NewJSArrayBuffer ( SharedFlag : : kNotShared ) ; <nl> + JSArrayBuffer : : Setup ( buffer , isolate ( ) , true , bytes , byte_length ) ; <nl> + return buffer ; <nl> + } <nl> + <nl> JSOperatorBuilder * javascript ( ) { return & javascript_ ; } <nl> <nl> private : <nl> mmm a / test / unittests / objects / value - serializer - unittest . cc <nl> ppp b / test / unittests / objects / value - serializer - unittest . cc <nl> <nl> # include " include / v8 . h " <nl> # include " src / api / api - inl . h " <nl> # include " src / base / build_config . h " <nl> - # include " src / objects / backing - store . h " <nl> # include " src / objects / objects - inl . h " <nl> # include " src / wasm / wasm - objects . h " <nl> # include " test / unittests / test - utils . h " <nl> class ValueSerializerTestWithSharedArrayBufferClone <nl> ValueSerializerTestWithSharedArrayBufferClone ( ) <nl> : serializer_delegate_ ( this ) , deserializer_delegate_ ( this ) { } <nl> <nl> - void InitializeData ( const std : : vector < uint8_t > & data , bool is_wasm_memory ) { <nl> + void InitializeData ( const std : : vector < uint8_t > & data ) { <nl> data_ = data ; <nl> { <nl> Context : : Scope scope ( serialization_context ( ) ) ; <nl> input_buffer_ = <nl> - NewSharedArrayBuffer ( data_ . data ( ) , data_ . size ( ) , is_wasm_memory ) ; <nl> + SharedArrayBuffer : : New ( isolate ( ) , data_ . data ( ) , data_ . size ( ) ) ; <nl> } <nl> { <nl> Context : : Scope scope ( deserialization_context ( ) ) ; <nl> output_buffer_ = <nl> - NewSharedArrayBuffer ( data_ . data ( ) , data_ . size ( ) , is_wasm_memory ) ; <nl> + SharedArrayBuffer : : New ( isolate ( ) , data_ . data ( ) , data_ . size ( ) ) ; <nl> } <nl> } <nl> <nl> const Local < SharedArrayBuffer > & input_buffer ( ) { return input_buffer_ ; } <nl> const Local < SharedArrayBuffer > & output_buffer ( ) { return output_buffer_ ; } <nl> <nl> - Local < SharedArrayBuffer > NewSharedArrayBuffer ( void * data , size_t byte_length , <nl> - bool is_wasm_memory ) { <nl> - if ( is_wasm_memory ) { <nl> - / / TODO ( titzer ) : there is no way to create Wasm memory backing stores <nl> - / / through the API , or to create a shared array buffer whose backing <nl> - / / store is wasm memory , so use the internal API . <nl> - DCHECK_EQ ( 0 , byte_length % i : : wasm : : kWasmPageSize ) ; <nl> - auto pages = byte_length / i : : wasm : : kWasmPageSize ; <nl> - auto i_isolate = reinterpret_cast < i : : Isolate * > ( isolate ( ) ) ; <nl> - auto backing_store = i : : BackingStore : : AllocateWasmMemory ( <nl> - i_isolate , pages , pages , i : : SharedFlag : : kShared ) ; <nl> - memcpy ( backing_store - > buffer_start ( ) , data , byte_length ) ; <nl> - i : : Handle < i : : JSArrayBuffer > buffer = <nl> - i_isolate - > factory ( ) - > NewJSSharedArrayBuffer ( ) ; <nl> - buffer - > Attach ( std : : move ( backing_store ) ) ; <nl> - return Utils : : ToLocalShared ( buffer ) ; <nl> - } else { <nl> - return SharedArrayBuffer : : New ( isolate ( ) , data , byte_length ) ; <nl> - } <nl> - } <nl> - <nl> static void SetUpTestCase ( ) { <nl> flag_was_enabled_ = i : : FLAG_harmony_sharedarraybuffer ; <nl> i : : FLAG_harmony_sharedarraybuffer = true ; <nl> bool ValueSerializerTestWithSharedArrayBufferClone : : flag_was_enabled_ = false ; <nl> <nl> TEST_F ( ValueSerializerTestWithSharedArrayBufferClone , <nl> RoundTripSharedArrayBufferClone ) { <nl> - InitializeData ( { 0x00 , 0x01 , 0x80 , 0xFF } , false ) ; <nl> + InitializeData ( { 0x00 , 0x01 , 0x80 , 0xFF } ) ; <nl> <nl> EXPECT_CALL ( serializer_delegate_ , <nl> GetSharedArrayBufferId ( isolate ( ) , input_buffer ( ) ) ) <nl> TEST_F ( ValueSerializerTestWithSharedArrayBufferClone , <nl> <nl> std : : vector < uint8_t > data = { 0x00 , 0x01 , 0x80 , 0xFF } ; <nl> data . resize ( 65536 ) ; <nl> - InitializeData ( data , true ) ; <nl> + InitializeData ( data ) ; <nl> <nl> EXPECT_CALL ( serializer_delegate_ , <nl> GetSharedArrayBufferId ( isolate ( ) , input_buffer ( ) ) ) <nl> mmm a / test / unittests / wasm / trap - handler - x64 - unittest . cc <nl> ppp b / test / unittests / wasm / trap - handler - x64 - unittest . cc <nl> <nl> # include " src / codegen / assembler - inl . h " <nl> # include " src / codegen / macro - assembler - inl . h " <nl> # include " src / execution / simulator . h " <nl> - # include " src / objects / backing - store . h " <nl> # include " src / trap - handler / trap - handler . h " <nl> # include " src / utils / allocation . h " <nl> # include " src / utils / vector . h " <nl> # include " src / wasm / wasm - engine . h " <nl> + # include " src / wasm / wasm - memory . h " <nl> <nl> # include " test / common / assembler - tester . h " <nl> # include " test / unittests / test - utils . h " <nl> class TrapHandlerTest : public TestWithIsolate , <nl> public : : testing : : WithParamInterface < TrapHandlerStyle > { <nl> protected : <nl> void SetUp ( ) override { <nl> - backing_store_ = BackingStore : : AllocateWasmMemory ( i_isolate ( ) , 1 , 1 , <nl> - SharedFlag : : kNotShared ) ; <nl> - CHECK ( backing_store_ ) ; <nl> - CHECK ( backing_store_ - > has_guard_regions ( ) ) ; <nl> - / / The allocated backing store ends with a guard page . <nl> - crash_address_ = reinterpret_cast < Address > ( backing_store_ - > buffer_start ( ) ) + <nl> - backing_store_ - > byte_length ( ) + 32 ; <nl> + void * base = nullptr ; <nl> + size_t length = 0 ; <nl> + accessible_memory_start_ = <nl> + i_isolate ( ) <nl> + - > wasm_engine ( ) <nl> + - > memory_tracker ( ) <nl> + - > TryAllocateBackingStoreForTesting ( <nl> + i_isolate ( ) - > heap ( ) , 1 * kWasmPageSize , & base , & length ) ; <nl> + memory_buffer_ = <nl> + base : : AddressRegion ( reinterpret_cast < Address > ( base ) , length ) ; <nl> + <nl> + / / The allocated memory buffer ends with a guard page . <nl> + crash_address_ = memory_buffer_ . end ( ) - 32 ; <nl> / / Allocate a buffer for the generated code . <nl> buffer_ = AllocateAssemblerBuffer ( AssemblerBase : : kMinimalBufferSize , <nl> GetRandomMmapAddr ( ) ) ; <nl> class TrapHandlerTest : public TestWithIsolate , <nl> CHECK ( ! GetThreadInWasmFlag ( ) ) ; <nl> buffer_ . reset ( ) ; <nl> recovery_buffer_ . reset ( ) ; <nl> - backing_store_ . reset ( ) ; <nl> + <nl> + / / Free the allocated backing store . <nl> + i_isolate ( ) - > wasm_engine ( ) - > memory_tracker ( ) - > FreeBackingStoreForTesting ( <nl> + memory_buffer_ , accessible_memory_start_ ) ; <nl> <nl> / / Clean up the trap handler <nl> trap_handler : : RemoveTrapHandler ( ) ; <nl> class TrapHandlerTest : public TestWithIsolate , <nl> <nl> bool test_handler_executed ( ) { return g_test_handler_executed ; } <nl> <nl> - / / The backing store used for testing the trap handler . <nl> - std : : unique_ptr < BackingStore > backing_store_ ; <nl> - <nl> + / / Allocated memory which corresponds to wasm memory with guard regions . <nl> + base : : AddressRegion memory_buffer_ ; <nl> / / Address within the guard region of the wasm memory . Accessing this memory <nl> / / address causes a signal or exception . <nl> Address crash_address_ ; <nl> + / / The start of the accessible region in the allocated memory . This pointer is <nl> + / / needed to de - register the memory from the wasm memory tracker again . <nl> + void * accessible_memory_start_ ; <nl> <nl> / / Buffer for generated code . <nl> std : : unique_ptr < TestingAssemblerBuffer > buffer_ ; <nl> mmm a / test / unittests / wasm / wasm - code - manager - unittest . cc <nl> ppp b / test / unittests / wasm / wasm - code - manager - unittest . cc <nl> <nl> # include " src / wasm / jump - table - assembler . h " <nl> # include " src / wasm / wasm - code - manager . h " <nl> # include " src / wasm / wasm - engine . h " <nl> + # include " src / wasm / wasm - memory . h " <nl> <nl> namespace v8 { <nl> namespace internal { <nl> | Revert " Reland " [ arraybuffer ] Rearchitect backing store ownership " " | v8/v8 | 6e0473f343da2a5c69afab8461ef5427d3f17398 | 2019-07-18T15:56:45Z |
mmm a / include / swift / SIL / SILGlobalVariable . h <nl> ppp b / include / swift / SIL / SILGlobalVariable . h <nl> SILFunction * getCalleeOfOnceCall ( BuiltinInst * BI ) ; <nl> / / / <nl> / / / Given an addressor , AddrF , find the call to the global initializer if <nl> / / / present , otherwise return null . If an initializer is returned , then <nl> - / / / ` CallsToOnce ` is initialized to the corresponding builtin " once " call . <nl> + / / / ` CallToOnce ` is initialized to the corresponding builtin " once " call . <nl> SILFunction * findInitializer ( SILModule * Module , SILFunction * AddrF , <nl> BuiltinInst * & CallToOnce ) ; <nl> <nl> | Merge remote - tracking branch ' origin / master ' into master - next | apple/swift | e25df4000dd331a60c66afbd10589ffba40da600 | 2018-06-28T16:09:28Z |
mmm a / include / swift / Parse / Parser . h <nl> ppp b / include / swift / Parse / Parser . h <nl> class Parser { <nl> bool periodHasKeyPathBehavior , <nl> bool & hasBindOptional ) ; <nl> ParserResult < Expr > parseExprPostfix ( Diag < > ID , bool isExprBasic ) ; <nl> - ParserResult < Expr > parseExprPostfixWithoutSuffix ( Diag < > ID , bool isExprBasic ) ; <nl> + ParserResult < Expr > parseExprPrimary ( Diag < > ID , bool isExprBasic ) ; <nl> ParserResult < Expr > parseExprUnary ( Diag < > ID , bool isExprBasic ) ; <nl> ParserResult < Expr > parseExprKeyPathObjC ( ) ; <nl> ParserResult < Expr > parseExprKeyPath ( ) ; <nl> mmm a / lib / Parse / ParseExpr . cpp <nl> ppp b / lib / Parse / ParseExpr . cpp <nl> Parser : : parseExprPostfixSuffix ( ParserResult < Expr > Result , bool isExprBasic , <nl> <nl> / / / parseExprPostfix <nl> / / / <nl> - / / / expr - literal : <nl> - / / / integer_literal <nl> - / / / floating_literal <nl> - / / / string_literal <nl> - / / / nil <nl> - / / / true <nl> - / / / false <nl> - / / / # file <nl> - / / / # line <nl> - / / / # column <nl> - / / / # function <nl> - / / / # dsohandle <nl> - / / / <nl> - / / / expr - primary : <nl> - / / / expr - literal <nl> - / / / expr - identifier expr - call - suffix ? <nl> - / / / expr - closure <nl> - / / / expr - anon - closure - argument <nl> - / / / expr - delayed - identifier <nl> - / / / expr - paren <nl> - / / / expr - super <nl> - / / / expr - discard <nl> - / / / expr - selector <nl> - / / / <nl> - / / / expr - delayed - identifier : <nl> - / / / ' . ' identifier <nl> - / / / <nl> - / / / expr - discard : <nl> - / / / ' _ ' <nl> - / / / <nl> / / / expr - dot : <nl> / / / expr - postfix ' . ' ' type ' <nl> / / / expr - postfix ' . ' ( identifier | keyword ) generic - args ? expr - call - suffix ? <nl> Parser : : parseExprPostfixSuffix ( ParserResult < Expr > Result , bool isExprBasic , <nl> / / / <nl> ParserResult < Expr > Parser : : parseExprPostfix ( Diag < > ID , bool isExprBasic ) { <nl> SyntaxParsingContext ExprContext ( SyntaxContext , SyntaxContextKind : : Expr ) ; <nl> - auto Result = parseExprPostfixWithoutSuffix ( ID , isExprBasic ) ; <nl> + auto Result = parseExprPrimary ( ID , isExprBasic ) ; <nl> / / If we had a parse error , don ' t attempt to parse suffixes . <nl> if ( Result . isParseError ( ) ) <nl> return Result ; <nl> ParserResult < Expr > Parser : : parseExprPostfix ( Diag < > ID , bool isExprBasic ) { <nl> return Result ; <nl> } <nl> <nl> - ParserResult < Expr > <nl> - Parser : : parseExprPostfixWithoutSuffix ( Diag < > ID , bool isExprBasic ) { <nl> + / / / parseExprPrimary <nl> + / / / <nl> + / / / expr - literal : <nl> + / / / integer_literal <nl> + / / / floating_literal <nl> + / / / string_literal <nl> + / / / nil <nl> + / / / true <nl> + / / / false <nl> + / / / # file <nl> + / / / # line <nl> + / / / # column <nl> + / / / # function <nl> + / / / # dsohandle <nl> + / / / <nl> + / / / expr - delayed - identifier : <nl> + / / / ' . ' identifier <nl> + / / / <nl> + / / / expr - discard : <nl> + / / / ' _ ' <nl> + / / / <nl> + / / / expr - primary : <nl> + / / / expr - literal <nl> + / / / expr - identifier expr - call - suffix ? <nl> + / / / expr - closure <nl> + / / / expr - anon - closure - argument <nl> + / / / expr - delayed - identifier <nl> + / / / expr - paren <nl> + / / / expr - super <nl> + / / / expr - discard <nl> + / / / expr - selector <nl> + / / / <nl> + ParserResult < Expr > Parser : : parseExprPrimary ( Diag < > ID , bool isExprBasic ) { <nl> SyntaxParsingContext ExprContext ( SyntaxContext , SyntaxContextKind : : Expr ) ; <nl> - ParserResult < Expr > Result ; <nl> switch ( Tok . getKind ( ) ) { <nl> - case tok : : pound_keyPath : <nl> - return parseExprKeyPathObjC ( ) ; <nl> case tok : : integer_literal : { <nl> StringRef Text = copyAndStripUnderscores ( Context , Tok . getText ( ) ) ; <nl> SourceLoc Loc = consumeToken ( tok : : integer_literal ) ; <nl> - SyntaxContext - > setCreateSyntax ( SyntaxKind : : IntegerLiteralExpr ) ; <nl> - Result = makeParserResult ( new ( Context ) IntegerLiteralExpr ( Text , Loc , <nl> - / * Implicit = * / false ) ) ; <nl> - break ; <nl> + ExprContext . setCreateSyntax ( SyntaxKind : : IntegerLiteralExpr ) ; <nl> + return makeParserResult ( new ( Context ) <nl> + IntegerLiteralExpr ( Text , Loc , <nl> + / * Implicit = * / false ) ) ; <nl> } <nl> case tok : : floating_literal : { <nl> StringRef Text = copyAndStripUnderscores ( Context , Tok . getText ( ) ) ; <nl> SourceLoc Loc = consumeToken ( tok : : floating_literal ) ; <nl> - SyntaxContext - > setCreateSyntax ( SyntaxKind : : FloatLiteralExpr ) ; <nl> - Result = makeParserResult ( new ( Context ) FloatLiteralExpr ( Text , Loc , <nl> + ExprContext . setCreateSyntax ( SyntaxKind : : FloatLiteralExpr ) ; <nl> + return makeParserResult ( new ( Context ) FloatLiteralExpr ( Text , Loc , <nl> / * Implicit = * / false ) ) ; <nl> - break ; <nl> } <nl> case tok : : at_sign : <nl> / / Objective - C programmers habitually type @ " foo " , so recover gracefully <nl> Parser : : parseExprPostfixWithoutSuffix ( Diag < > ID , bool isExprBasic ) { <nl> LLVM_FALLTHROUGH ; <nl> <nl> case tok : : string_literal : / / " foo " <nl> - Result = parseExprStringLiteral ( ) ; <nl> - break ; <nl> + return parseExprStringLiteral ( ) ; <nl> <nl> - case tok : : kw_nil : { <nl> - SyntaxParsingContext NilContext ( SyntaxContext , SyntaxKind : : NilLiteralExpr ) ; <nl> - Result = makeParserResult ( <nl> - new ( Context ) NilLiteralExpr ( consumeToken ( tok : : kw_nil ) ) ) ; <nl> - break ; <nl> - } <nl> + case tok : : kw_nil : <nl> + ExprContext . setCreateSyntax ( SyntaxKind : : NilLiteralExpr ) ; <nl> + return makeParserResult ( new ( Context ) <nl> + NilLiteralExpr ( consumeToken ( tok : : kw_nil ) ) ) ; <nl> <nl> case tok : : kw_true : <nl> case tok : : kw_false : { <nl> - SyntaxParsingContext BoolContext ( SyntaxContext , <nl> - SyntaxKind : : BooleanLiteralExpr ) ; <nl> + ExprContext . setCreateSyntax ( SyntaxKind : : BooleanLiteralExpr ) ; <nl> bool isTrue = Tok . is ( tok : : kw_true ) ; <nl> - Result = makeParserResult ( <nl> - new ( Context ) BooleanLiteralExpr ( isTrue , consumeToken ( ) ) ) ; <nl> - break ; <nl> + return makeParserResult ( new ( Context ) <nl> + BooleanLiteralExpr ( isTrue , consumeToken ( ) ) ) ; <nl> } <nl> <nl> case tok : : kw___FILE__ : <nl> Parser : : parseExprPostfixWithoutSuffix ( Diag < > ID , bool isExprBasic ) { <nl> case tok : : pound_dsohandle : SKind = SyntaxKind : : PoundDsohandleExpr ; break ; <nl> default : break ; <nl> } <nl> - SyntaxParsingContext MagicIdCtx ( SyntaxContext , SKind ) ; <nl> + ExprContext . setCreateSyntax ( SKind ) ; <nl> auto Kind = getMagicIdentifierLiteralKind ( Tok . getKind ( ) ) ; <nl> SourceLoc Loc = consumeToken ( ) ; <nl> - Result = makeParserResult ( <nl> - new ( Context ) MagicIdentifierLiteralExpr ( Kind , Loc , / * implicit = * / false ) ) ; <nl> - break ; <nl> + return makeParserResult ( new ( Context ) MagicIdentifierLiteralExpr ( <nl> + Kind , Loc , / * implicit = * / false ) ) ; <nl> } <nl> <nl> case tok : : identifier : / / foo <nl> / / Attempt to parse for ' type ( of : < expr > ) ' . <nl> if ( canParseTypeOf ( * this ) ) { <nl> - Result = parseExprTypeOf ( ) ; <nl> - break ; <nl> + return parseExprTypeOf ( ) ; <nl> } <nl> <nl> / / If we are parsing a refutable pattern and are inside a let / var pattern , <nl> Parser : : parseExprPostfixWithoutSuffix ( Diag < > ID , bool isExprBasic ) { <nl> SyntaxFactory : : makeUnresolvedPatternExpr ( PatternNode ) ; <nl> SyntaxContext - > addSyntax ( ExprNode ) ; <nl> } <nl> - Result = makeParserResult ( new ( Context ) UnresolvedPatternExpr ( pattern ) ) ; <nl> - break ; <nl> + return makeParserResult ( new ( Context ) UnresolvedPatternExpr ( pattern ) ) ; <nl> } <nl> <nl> LLVM_FALLTHROUGH ; <nl> case tok : : kw_self : / / self <nl> case tok : : kw_Self : / / Self <nl> - Result = makeParserResult ( parseExprIdentifier ( ) ) ; <nl> - <nl> - / / If there is an expr - call - suffix , parse it and form a call . <nl> - if ( Tok . isFollowingLParen ( ) ) { <nl> - Result = parseExprCallSuffix ( Result , isExprBasic ) ; <nl> - SyntaxContext - > createNodeInPlace ( SyntaxKind : : FunctionCallExpr ) ; <nl> - break ; <nl> - } <nl> - <nl> - break ; <nl> + return makeParserResult ( parseExprIdentifier ( ) ) ; <nl> <nl> case tok : : kw_Any : { / / Any <nl> - SyntaxParsingContext ExprContext ( SyntaxContext , SyntaxKind : : TypeExpr ) ; <nl> + ExprContext . setCreateSyntax ( SyntaxKind : : TypeExpr ) ; <nl> auto TyR = parseAnyType ( ) ; <nl> - auto expr = new ( Context ) TypeExpr ( TypeLoc ( TyR . get ( ) ) ) ; <nl> - Result = makeParserResult ( expr ) ; <nl> - break ; <nl> + return makeParserResult ( new ( Context ) TypeExpr ( TypeLoc ( TyR . get ( ) ) ) ) ; <nl> } <nl> <nl> case tok : : dollarident : / / $ 1 <nl> - Result = makeParserResult ( parseExprAnonClosureArg ( ) ) ; <nl> - break ; <nl> + return makeParserResult ( parseExprAnonClosureArg ( ) ) ; <nl> <nl> - / / If the next token is ' _ ' , parse a discard expression . <nl> - case tok : : kw__ : { <nl> - SyntaxParsingContext DAContext ( SyntaxContext , <nl> - SyntaxKind : : DiscardAssignmentExpr ) ; <nl> - Result = makeParserResult ( <nl> + case tok : : kw__ : / / _ <nl> + ExprContext . setCreateSyntax ( SyntaxKind : : DiscardAssignmentExpr ) ; <nl> + return makeParserResult ( <nl> new ( Context ) DiscardAssignmentExpr ( consumeToken ( ) , / * Implicit = * / false ) ) ; <nl> - break ; <nl> - } <nl> <nl> case tok : : pound_selector : / / expr - selector <nl> - Result = parseExprSelector ( ) ; <nl> - break ; <nl> + return parseExprSelector ( ) ; <nl> + <nl> + case tok : : pound_keyPath : <nl> + return parseExprKeyPathObjC ( ) ; <nl> <nl> case tok : : l_brace : / / expr - closure <nl> - Result = parseExprClosure ( ) ; <nl> - break ; <nl> + return parseExprClosure ( ) ; <nl> <nl> case tok : : period : / / = . foo <nl> case tok : : period_prefix : { / / . foo <nl> Parser : : parseExprPostfixWithoutSuffix ( Diag < > ID , bool isExprBasic ) { <nl> FltText = copyAndStripUnderscores ( Context , FltText ) ; <nl> <nl> consumeToken ( tok : : integer_literal ) ; <nl> - Result = makeParserResult ( new ( Context ) <nl> - FloatLiteralExpr ( FltText , DotLoc , <nl> - / * Implicit = * / false ) ) ; <nl> - break ; <nl> + return makeParserResult ( new ( Context ) <nl> + FloatLiteralExpr ( FltText , DotLoc , <nl> + / * Implicit = * / false ) ) ; <nl> } <nl> <nl> DeclName Name ; <nl> Parser : : parseExprPostfixWithoutSuffix ( Diag < > ID , bool isExprBasic ) { <nl> auto Expr = UnresolvedMemberExpr : : create ( <nl> Context , DotLoc , DeclNameLoc ( DotLoc . getAdvancedLoc ( 1 ) ) , <nl> Context . getIdentifier ( " _ " ) , / * implicit = * / false ) ; <nl> - Result = makeParserResult ( Expr ) ; <nl> + auto Result = makeParserResult ( Expr ) ; <nl> if ( CodeCompletion ) { <nl> std : : vector < StringRef > Identifiers ; <nl> <nl> Parser : : parseExprPostfixWithoutSuffix ( Diag < > ID , bool isExprBasic ) { <nl> return nullptr ; <nl> <nl> SyntaxContext - > createNodeInPlace ( SyntaxKind : : FunctionCallExpr ) ; <nl> - Result = makeParserResult ( <nl> + return makeParserResult ( <nl> status , <nl> UnresolvedMemberExpr : : create ( Context , DotLoc , NameLoc , Name , <nl> lParenLoc , args , argLabels , <nl> argLabelLocs , rParenLoc , <nl> trailingClosure , <nl> / * implicit = * / false ) ) ; <nl> - if ( Result . hasCodeCompletion ( ) ) <nl> - return Result ; <nl> - break ; <nl> } <nl> <nl> / / Check for a trailing closure , if allowed . <nl> Parser : : parseExprPostfixWithoutSuffix ( Diag < > ID , bool isExprBasic ) { <nl> <nl> SyntaxContext - > createNodeInPlace ( SyntaxKind : : FunctionCallExpr ) ; <nl> / / Handle . foo by just making an AST node . <nl> - Result = makeParserResult ( <nl> + return makeParserResult ( <nl> ParserStatus ( closure ) , <nl> UnresolvedMemberExpr : : create ( Context , DotLoc , NameLoc , Name , <nl> SourceLoc ( ) , { } , { } , { } , <nl> SourceLoc ( ) , closure . get ( ) , <nl> / * implicit = * / false ) ) ; <nl> - <nl> - if ( Result . hasCodeCompletion ( ) ) <nl> - return Result ; <nl> - <nl> - break ; <nl> } <nl> <nl> / / Handle . foo by just making an AST node . <nl> - Result = makeParserResult ( <nl> + return makeParserResult ( <nl> UnresolvedMemberExpr : : create ( Context , DotLoc , NameLoc , Name , <nl> / * implicit = * / false ) ) ; <nl> - break ; <nl> } <nl> <nl> - case tok : : kw_super : { / / super . foo or super [ foo ] <nl> - Result = parseExprSuper ( isExprBasic ) ; <nl> - break ; <nl> - } <nl> + case tok : : kw_super : / / super . foo or super [ foo ] <nl> + return parseExprSuper ( isExprBasic ) ; <nl> <nl> - case tok : : l_paren : { <nl> + case tok : : l_paren : <nl> / / Build a tuple expression syntax node . <nl> / / AST differentiates paren and tuple expression where the former allows <nl> / / only one element without label . However , libSyntax tree doesn ' t have this <nl> / / differentiation . A tuple expression node in libSyntax can have a single <nl> / / element without label . <nl> - SyntaxParsingContext TupleContext ( SyntaxContext , SyntaxKind : : TupleExpr ) ; <nl> - Result = parseExprList ( tok : : l_paren , tok : : r_paren , <nl> - SyntaxKind : : TupleElementList ) ; <nl> - break ; <nl> - } <nl> + ExprContext . setCreateSyntax ( SyntaxKind : : TupleExpr ) ; <nl> + return parseExprList ( tok : : l_paren , tok : : r_paren , <nl> + SyntaxKind : : TupleElementList ) ; <nl> + <nl> case tok : : l_square : <nl> - Result = parseExprCollection ( ) ; <nl> - break ; <nl> + return parseExprCollection ( ) ; <nl> <nl> case tok : : pound_available : { <nl> / / For better error recovery , parse but reject # available in an expr <nl> Parser : : parseExprPostfixWithoutSuffix ( Diag < > ID , bool isExprBasic ) { <nl> return makeParserCodeCompletionStatus ( ) ; <nl> if ( res . isParseError ( ) | | res . isNull ( ) ) <nl> return nullptr ; <nl> - Result = makeParserResult ( <nl> - new ( Context ) ErrorExpr ( res . get ( ) - > getSourceRange ( ) ) ) ; <nl> - break ; <nl> + return makeParserResult ( new ( Context ) <nl> + ErrorExpr ( res . get ( ) - > getSourceRange ( ) ) ) ; <nl> } <nl> <nl> - <nl> - # define POUND_OBJECT_LITERAL ( Name , Desc , Proto ) case tok : : pound_ # # Name : \ <nl> - Result = parseExprObjectLiteral ( ObjectLiteralExpr : : Name , isExprBasic ) ; \ <nl> - break ; <nl> + # define POUND_OBJECT_LITERAL ( Name , Desc , Proto ) \ <nl> + case tok : : pound_ # # Name : \ <nl> + return parseExprObjectLiteral ( ObjectLiteralExpr : : Name , isExprBasic ) ; <nl> # include " swift / Syntax / TokenKinds . def " <nl> <nl> - case tok : : code_complete : <nl> - Result = makeParserResult ( new ( Context ) CodeCompletionExpr ( Tok . getLoc ( ) ) ) ; <nl> + case tok : : code_complete : { <nl> + auto Result = <nl> + makeParserResult ( new ( Context ) CodeCompletionExpr ( Tok . getLoc ( ) ) ) ; <nl> Result . setHasCodeCompletion ( ) ; <nl> if ( CodeCompletion & & <nl> / / We cannot code complete anything after var / let . <nl> ( ! InVarOrLetPattern | | InVarOrLetPattern = = IVOLP_InMatchingPattern ) ) <nl> - CodeCompletion - > completePostfixExprBeginning ( dyn_cast < CodeCompletionExpr > ( <nl> - Result . get ( ) ) ) ; <nl> + CodeCompletion - > completePostfixExprBeginning ( <nl> + dyn_cast < CodeCompletionExpr > ( Result . get ( ) ) ) ; <nl> consumeToken ( tok : : code_complete ) ; <nl> - break ; <nl> + return Result ; <nl> + } <nl> <nl> case tok : : pound : <nl> if ( peekToken ( ) . is ( tok : : identifier ) & & ! peekToken ( ) . isEscapedIdentifier ( ) & & <nl> Parser : : parseExprPostfixWithoutSuffix ( Diag < > ID , bool isExprBasic ) { <nl> diagnose ( Tok , ID ) ; <nl> return nullptr ; <nl> } <nl> - <nl> - return Result ; <nl> } <nl> <nl> static StringLiteralExpr * <nl> similarity index 100 % <nl> rename from test / expr / postfix / keypath / keypath - objc . swift <nl> rename to test / expr / primary / keypath / keypath - objc . swift <nl> similarity index 100 % <nl> rename from test / expr / unary / selector / Inputs / property_helper . swift <nl> rename to test / expr / primary / selector / Inputs / property_helper . swift <nl> similarity index 100 % <nl> rename from test / expr / unary / selector / fixits . swift <nl> rename to test / expr / primary / selector / fixits . swift <nl> similarity index 100 % <nl> rename from test / expr / unary / selector / property . swift <nl> rename to test / expr / primary / selector / property . swift <nl> similarity index 100 % <nl> rename from test / expr / unary / selector / selector . swift <nl> rename to test / expr / primary / selector / selector . swift <nl> | Merge pull request from rintaro / parse - exprprimary | apple/swift | ea83df4c9debcfc38d10635820f08240e529cb28 | 2018-03-17T04:16:35Z |
mmm a / include / internal / catch_ptr . hpp <nl> ppp b / include / internal / catch_ptr . hpp <nl> namespace Catch { <nl> swap ( temp ) ; <nl> return * this ; <nl> } <nl> - Ptr & operator = ( Ptr & other ) { <nl> + Ptr & operator = ( const Ptr & other ) { <nl> Ptr temp ( other ) ; <nl> swap ( temp ) ; <nl> return * this ; <nl> mmm a / single_include / catch . hpp <nl> ppp b / single_include / catch . hpp <nl> <nl> / * <nl> - * Generated : 2012 - 08 - 31 18 : 45 : 38 . 119837 <nl> + * Generated : 2012 - 08 - 31 18 : 50 : 03 . 965736 <nl> * mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> * This file has been merged from multiple headers . Please don ' t edit it directly <nl> * Copyright ( c ) 2012 Two Blue Cubes Ltd . All rights reserved . <nl> namespace Catch { <nl> swap ( temp ) ; <nl> return * this ; <nl> } <nl> - Ptr & operator = ( Ptr & other ) { <nl> + Ptr & operator = ( const Ptr & other ) { <nl> Ptr temp ( other ) ; <nl> swap ( temp ) ; <nl> return * this ; <nl> | Ptr op = takes const ref ( fixes : ) | catchorg/Catch2 | deb3ced619ccf6e36c1fcdca567a870cac26819a | 2012-08-31T17:50:46Z |
mmm a / skflow / __init__ . py <nl> ppp b / skflow / __init__ . py <nl> class TensorFlowEstimator ( BaseEstimator ) : <nl> learning_rate : Learning rate for optimizer . <nl> tf_random_seed : Random seed for TensorFlow initializers . <nl> Setting this value , allows consistency between reruns . <nl> + continue_training : when continue_training is True , once initialized <nl> + model will be continuely trained on every call of fit . <nl> " " " <nl> <nl> def __init__ ( self , model_fn , n_classes , tf_master = " " , batch_size = 32 , steps = 50 , optimizer = " SGD " , <nl> - learning_rate = 0 . 1 , tf_random_seed = 42 ) : <nl> + learning_rate = 0 . 1 , tf_random_seed = 42 , continue_training = False ) : <nl> self . n_classes = n_classes <nl> self . tf_master = tf_master <nl> self . batch_size = batch_size <nl> def __init__ ( self , model_fn , n_classes , tf_master = " " , batch_size = 32 , steps = 50 , o <nl> self . learning_rate = learning_rate <nl> self . tf_random_seed = tf_random_seed <nl> self . model_fn = model_fn <nl> + self . continue_training = continue_training <nl> + self . _initialized = False <nl> <nl> - def fit ( self , X , y ) : <nl> - " " " Builds a neural network model given provided ` model_fn ` and training <nl> - data X and y . <nl> - <nl> - Args : <nl> - X : matrix or tensor of shape [ n_samples , n_features . . . ] . Can be <nl> - iterator that returns arrays of features . The training input <nl> - samples for fitting the model . <nl> - y : vector or matrix [ n_samples ] or [ n_samples , n_outputs ] . Can be <nl> - iterator that returns array of targets . The training target values <nl> - ( class labels in classification , real numbers in regression ) . <nl> - <nl> - Returns : <nl> - Returns self . <nl> + def _setup_data_feeder ( self , X , y ) : <nl> + " " " Create data feeder , to sample inputs from dataset . <nl> + If X and y are iterators , use StreamingDataFeeder . <nl> " " " <nl> + if hasattr ( X , ' next ' ) : <nl> + assert hasattr ( y , ' next ' ) <nl> + self . _data_feeder = data_feeder . StreamingDataFeeder ( X , y , <nl> + self . n_classes , self . batch_size ) <nl> + else : <nl> + self . _data_feeder = data_feeder . DataFeeder ( X , y , <nl> + self . n_classes , self . batch_size ) <nl> + <nl> + def _setup_training ( self ) : <nl> + " " " Sets up graph , model and trainer . " " " <nl> with tf . Graph ( ) . as_default ( ) as graph : <nl> tf . set_random_seed ( self . tf_random_seed ) <nl> self . _global_step = tf . Variable ( 0 , name = " global_step " , trainable = False ) <nl> <nl> - # Create data feeder , to sample inputs from dataset . <nl> - # If X and y are iterators , use StreamingDataFeeder . <nl> - if hasattr ( X , ' next ' ) : <nl> - assert hasattr ( y , ' next ' ) <nl> - self . _data_feeder = data_feeder . StreamingDataFeeder ( X , y , <nl> - self . n_classes , self . batch_size ) <nl> - else : <nl> - self . _data_feeder = data_feeder . DataFeeder ( X , y , <nl> - self . n_classes , self . batch_size ) <nl> - <nl> # Setting up input and output placeholders . <nl> input_shape = [ None ] + self . _data_feeder . input_shape [ 1 : ] <nl> output_shape = [ None ] + self . _data_feeder . output_shape [ 1 : ] <nl> def fit ( self , X , y ) : <nl> self . _global_step , self . optimizer , self . learning_rate ) <nl> self . _session = tf . Session ( self . tf_master ) <nl> <nl> - # Initialize and train model . <nl> + def fit ( self , X , y ) : <nl> + " " " Builds a neural network model given provided ` model_fn ` and training <nl> + data X and y . <nl> + <nl> + Args : <nl> + X : matrix or tensor of shape [ n_samples , n_features . . . ] . Can be <nl> + iterator that returns arrays of features . The training input <nl> + samples for fitting the model . <nl> + y : vector or matrix [ n_samples ] or [ n_samples , n_outputs ] . Can be <nl> + iterator that returns array of targets . The training target values <nl> + ( class labels in classification , real numbers in regression ) . <nl> + <nl> + Returns : <nl> + Returns self . <nl> + " " " <nl> + # Sets up data feeder . <nl> + self . _setup_data_feeder ( X , y ) <nl> + if not self . continue_training or not self . _initialized : <nl> + # Sets up model and trainer . <nl> + self . _setup_training ( ) <nl> + # Initialize model parameters . <nl> self . _trainer . initialize ( self . _session ) <nl> - self . _trainer . train ( self . _session , <nl> - self . _data_feeder . get_feed_dict_fn ( self . _inp , <nl> - self . _out ) , <nl> - self . steps ) <nl> - return self <nl> + self . _initialized = True <nl> + <nl> + # Train model for given number of steps . <nl> + self . _trainer . train ( self . _session , <nl> + self . _data_feeder . get_feed_dict_fn ( self . _inp , self . _out ) , <nl> + self . steps ) <nl> + return self <nl> <nl> def _predict ( self , X ) : <nl> pred = self . _session . run ( self . _model_predictions , <nl> class TensorFlowLinearRegressor ( TensorFlowEstimator , RegressorMixin ) : <nl> " " " TensorFlow Linear Regression model . " " " <nl> <nl> def __init__ ( self , n_classes = 0 , tf_master = " " , batch_size = 32 , steps = 50 , optimizer = " SGD " , <nl> - learning_rate = 0 . 1 , tf_random_seed = 42 ) : <nl> + learning_rate = 0 . 1 , tf_random_seed = 42 , continue_training = False ) : <nl> super ( TensorFlowLinearRegressor , self ) . __init__ ( <nl> model_fn = models . linear_regression , n_classes = n_classes , <nl> tf_master = tf_master , <nl> batch_size = batch_size , steps = steps , optimizer = optimizer , <nl> - learning_rate = learning_rate , tf_random_seed = tf_random_seed ) <nl> + learning_rate = learning_rate , tf_random_seed = tf_random_seed , <nl> + continue_training = continue_training ) <nl> <nl> <nl> class TensorFlowLinearClassifier ( TensorFlowEstimator , ClassifierMixin ) : <nl> " " " TensorFlow Linear Classifier model . " " " <nl> <nl> def __init__ ( self , n_classes , tf_master = " " , batch_size = 32 , steps = 50 , optimizer = " SGD " , <nl> - learning_rate = 0 . 1 , tf_random_seed = 42 ) : <nl> + learning_rate = 0 . 1 , tf_random_seed = 42 , continue_training = False ) : <nl> super ( TensorFlowLinearClassifier , self ) . __init__ ( <nl> model_fn = models . logistic_regression , n_classes = n_classes , <nl> tf_master = tf_master , <nl> batch_size = batch_size , steps = steps , optimizer = optimizer , <nl> - learning_rate = learning_rate , tf_random_seed = tf_random_seed ) <nl> + learning_rate = learning_rate , tf_random_seed = tf_random_seed , <nl> + continue_training = continue_training ) <nl> <nl> <nl> TensorFlowRegressor = TensorFlowLinearRegressor <nl> class TensorFlowDNNClassifier ( TensorFlowEstimator , ClassifierMixin ) : <nl> learning_rate : Learning rate for optimizer . <nl> tf_random_seed : Random seed for TensorFlow initializers . <nl> Setting this value , allows consistency between reruns . <nl> - " " " <nl> + continue_training : when continue_training is True , once initialized <nl> + model will be continuely trained on every call of fit . <nl> + " " " <nl> <nl> def __init__ ( self , hidden_units , n_classes , tf_master = " " , batch_size = 32 , <nl> - steps = 50 , optimizer = " SGD " , learning_rate = 0 . 1 , tf_random_seed = 42 ) : <nl> + steps = 50 , optimizer = " SGD " , learning_rate = 0 . 1 , <nl> + tf_random_seed = 42 , continue_training = False ) : <nl> model_fn = models . get_dnn_model ( hidden_units , <nl> models . logistic_regression ) <nl> super ( TensorFlowDNNClassifier , self ) . __init__ ( <nl> model_fn = model_fn , <nl> n_classes = n_classes , tf_master = tf_master , <nl> batch_size = batch_size , steps = steps , optimizer = optimizer , <nl> - learning_rate = learning_rate , tf_random_seed = tf_random_seed ) <nl> + learning_rate = learning_rate , tf_random_seed = tf_random_seed , <nl> + continue_training = continue_training ) <nl> <nl> <nl> class TensorFlowDNNRegressor ( TensorFlowEstimator , ClassifierMixin ) : <nl> class TensorFlowDNNRegressor ( TensorFlowEstimator , ClassifierMixin ) : <nl> learning_rate : Learning rate for optimizer . <nl> tf_random_seed : Random seed for TensorFlow initializers . <nl> Setting this value , allows consistency between reruns . <nl> + continue_training : when continue_training is True , once initialized <nl> + model will be continuely trained on every call of fit . <nl> " " " <nl> <nl> def __init__ ( self , hidden_units , n_classes = 0 , tf_master = " " , batch_size = 32 , <nl> - steps = 50 , optimizer = " SGD " , learning_rate = 0 . 1 , tf_random_seed = 42 ) : <nl> + steps = 50 , optimizer = " SGD " , learning_rate = 0 . 1 , <nl> + tf_random_seed = 42 , continue_training = False ) : <nl> model_fn = models . get_dnn_model ( hidden_units , <nl> models . linear_regression ) <nl> super ( TensorFlowDNNRegressor , self ) . __init__ ( <nl> model_fn = model_fn , <nl> n_classes = n_classes , tf_master = tf_master , <nl> batch_size = batch_size , steps = steps , optimizer = optimizer , <nl> - learning_rate = learning_rate , tf_random_seed = tf_random_seed ) <nl> + learning_rate = learning_rate , tf_random_seed = tf_random_seed , <nl> + continue_training = continue_training ) <nl> <nl> mmm a / skflow / data_feeder . py <nl> ppp b / skflow / data_feeder . py <nl> class DataFeeder ( object ) : <nl> <nl> def __init__ ( self , X , y , n_classes , batch_size ) : <nl> self . X = check_array ( X , ensure_2d = False , <nl> - allow_nd = True , dtype = None ) <nl> - self . y = check_array ( y , ensure_2d = False , dtype = None ) <nl> + allow_nd = True , dtype = [ np . float32 , np . int64 ] ) <nl> + self . y = check_array ( y , ensure_2d = False , dtype = np . float32 ) <nl> self . n_classes = n_classes <nl> self . batch_size = batch_size <nl> self . input_shape , self . output_shape = _get_in_out_shape ( <nl> - X . shape , y . shape , n_classes , batch_size ) <nl> - self . input_dtype , self . output_dtype = X . dtype , y . dtype <nl> + self . X . shape , self . y . shape , n_classes , batch_size ) <nl> + self . input_dtype , self . output_dtype = self . X . dtype , self . y . dtype <nl> <nl> def get_feed_dict_fn ( self , input_placeholder , output_placeholder ) : <nl> " " " Returns a function , that will sample data and provide it to given <nl> def get_feed_dict_fn ( self , input_placeholder , output_placeholder ) : <nl> from X and y . <nl> " " " <nl> def _feed_dict_fn ( ) : <nl> - inp = np . zeros ( self . input_shape ) <nl> - out = np . zeros ( self . output_shape ) <nl> + inp = np . zeros ( self . input_shape , dtype = self . input_dtype ) <nl> + out = np . zeros ( self . output_shape , dtype = self . output_dtype ) <nl> for i in xrange ( self . batch_size ) : <nl> sample = random . randint ( 0 , self . X . shape [ 0 ] - 1 ) <nl> inp [ i , : ] = self . X [ sample , : ] <nl> def __init__ ( self , X , y , n_classes , batch_size ) : <nl> self . input_shape , self . output_shape = _get_in_out_shape ( <nl> [ 1 ] + list ( X_first_el . shape ) , <nl> [ 1 ] + list ( y_first_el . shape ) , n_classes , batch_size ) <nl> - self . input_dtype , self . output_dtype = ( <nl> - X_first_el . dtype , y_first_el . dtype ) <nl> + self . input_dtype = X_first_el . dtype <nl> + # Convert float64 to float32 , as all the parameters in the model are <nl> + # floats32 and there is a lot of benefits in using it in NNs . <nl> + if self . input_dtype = = np . float64 : <nl> + self . input_dtype = np . float32 <nl> + # Output types are floats , due to both softmaxes and regression req . <nl> + self . output_dtype = np . float32 <nl> <nl> def get_feed_dict_fn ( self , input_placeholder , output_placeholder ) : <nl> " " " Returns a function , that will sample data and provide it to given <nl> def get_feed_dict_fn ( self , input_placeholder , output_placeholder ) : <nl> from X and y . <nl> " " " <nl> def _feed_dict_fn ( ) : <nl> - inp = np . zeros ( self . input_shape ) <nl> - out = np . zeros ( self . output_shape ) <nl> + inp = np . zeros ( self . input_shape , dtype = self . input_dtype ) <nl> + out = np . zeros ( self . output_shape , dtype = self . output_dtype ) <nl> for i in xrange ( self . batch_size ) : <nl> inp [ i , : ] = self . X . next ( ) <nl> y = self . y . next ( ) <nl> mmm a / skflow / tests / test_base . py <nl> ppp b / skflow / tests / test_base . py <nl> def testIris ( self ) : <nl> score = accuracy_score ( classifier . predict ( iris . data ) , iris . target ) <nl> self . assertGreater ( score , 0 . 5 , " Failed with score = { 0 } " . format ( score ) ) <nl> <nl> + def testIrisContinueTraining ( self ) : <nl> + random . seed ( 42 ) <nl> + iris = datasets . load_iris ( ) <nl> + classifier = skflow . TensorFlowLinearClassifier ( n_classes = 3 , <nl> + continue_training = True ) <nl> + classifier . fit ( iris . data , iris . target ) <nl> + score1 = accuracy_score ( classifier . predict ( iris . data ) , iris . target ) <nl> + classifier . fit ( iris . data , iris . target ) <nl> + score2 = accuracy_score ( classifier . predict ( iris . data ) , iris . target ) <nl> + self . assertGreater ( score2 , score1 , <nl> + " Failed with score = { 0 } " . format ( score2 ) ) <nl> + <nl> def testIrisStreaming ( self ) : <nl> iris = datasets . load_iris ( ) <nl> def iris_data ( ) : <nl> | Added continue_learning flag to TensorFlowEstimator to allow continues learning when called fit multiple times . Small fixes in data feeder to match expected dtypes | tensorflow/tensorflow | 7a8c372a537eabd57c3b6e9274aaa3dc92d52825 | 2015-11-25T08:13:58Z |
mmm a / src / bootstrapper . cc <nl> ppp b / src / bootstrapper . cc <nl> void Genesis : : InitializeExperimentalGlobal ( ) { <nl> isolate ( ) - > initial_object_prototype ( ) , <nl> Builtins : : kIllegal , true , true ) ; <nl> } <nl> + { / / - - W e a k S e t <nl> + InstallFunction ( global , " WeakSet " , JS_WEAK_SET_TYPE , JSWeakSet : : kSize , <nl> + isolate ( ) - > initial_object_prototype ( ) , <nl> + Builtins : : kIllegal , true , true ) ; <nl> + } <nl> } <nl> <nl> if ( FLAG_harmony_array_buffer ) { <nl> mmm a / src / collection . js <nl> ppp b / src / collection . js <nl> <nl> var $ Set = global . Set ; <nl> var $ Map = global . Map ; <nl> var $ WeakMap = global . WeakMap ; <nl> + var $ WeakSet = global . WeakSet ; <nl> <nl> / / Global sentinel to be used instead of undefined keys , which are not <nl> / / supported internally but required for Harmony sets and maps . <nl> SetUpMap ( ) ; <nl> <nl> function WeakMapConstructor ( ) { <nl> if ( % _IsConstructCall ( ) ) { <nl> - % WeakMapInitialize ( this ) ; <nl> + % WeakCollectionInitialize ( this ) ; <nl> } else { <nl> return new $ WeakMap ( ) ; <nl> } <nl> function WeakMapGet ( key ) { <nl> if ( ! ( IS_SPEC_OBJECT ( key ) | | IS_SYMBOL ( key ) ) ) { <nl> throw % MakeTypeError ( ' invalid_weakmap_key ' , [ this , key ] ) ; <nl> } <nl> - return % WeakMapGet ( this , key ) ; <nl> + return % WeakCollectionGet ( this , key ) ; <nl> } <nl> <nl> <nl> function WeakMapSet ( key , value ) { <nl> if ( ! ( IS_SPEC_OBJECT ( key ) | | IS_SYMBOL ( key ) ) ) { <nl> throw % MakeTypeError ( ' invalid_weakmap_key ' , [ this , key ] ) ; <nl> } <nl> - return % WeakMapSet ( this , key , value ) ; <nl> + return % WeakCollectionSet ( this , key , value ) ; <nl> } <nl> <nl> <nl> function WeakMapHas ( key ) { <nl> if ( ! ( IS_SPEC_OBJECT ( key ) | | IS_SYMBOL ( key ) ) ) { <nl> throw % MakeTypeError ( ' invalid_weakmap_key ' , [ this , key ] ) ; <nl> } <nl> - return % WeakMapHas ( this , key ) ; <nl> + return % WeakCollectionHas ( this , key ) ; <nl> } <nl> <nl> <nl> function WeakMapDelete ( key ) { <nl> if ( ! ( IS_SPEC_OBJECT ( key ) | | IS_SYMBOL ( key ) ) ) { <nl> throw % MakeTypeError ( ' invalid_weakmap_key ' , [ this , key ] ) ; <nl> } <nl> - return % WeakMapDelete ( this , key ) ; <nl> + return % WeakCollectionDelete ( this , key ) ; <nl> } <nl> <nl> <nl> function WeakMapClear ( ) { <nl> [ ' WeakMap . prototype . clear ' , this ] ) ; <nl> } <nl> / / Replace the internal table with a new empty table . <nl> - % WeakMapInitialize ( this ) ; <nl> + % WeakCollectionInitialize ( this ) ; <nl> } <nl> <nl> <nl> function SetUpWeakMap ( ) { <nl> } <nl> <nl> SetUpWeakMap ( ) ; <nl> + <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + / / Harmony WeakSet <nl> + <nl> + function WeakSetConstructor ( ) { <nl> + if ( % _IsConstructCall ( ) ) { <nl> + % WeakCollectionInitialize ( this ) ; <nl> + } else { <nl> + return new $ WeakSet ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + function WeakSetAdd ( value ) { <nl> + if ( ! IS_WEAKSET ( this ) ) { <nl> + throw MakeTypeError ( ' incompatible_method_receiver ' , <nl> + [ ' WeakSet . prototype . add ' , this ] ) ; <nl> + } <nl> + if ( ! ( IS_SPEC_OBJECT ( value ) | | IS_SYMBOL ( value ) ) ) { <nl> + throw % MakeTypeError ( ' invalid_weakset_value ' , [ this , value ] ) ; <nl> + } <nl> + return % WeakCollectionSet ( this , value , true ) ; <nl> + } <nl> + <nl> + <nl> + function WeakSetHas ( value ) { <nl> + if ( ! IS_WEAKSET ( this ) ) { <nl> + throw MakeTypeError ( ' incompatible_method_receiver ' , <nl> + [ ' WeakSet . prototype . has ' , this ] ) ; <nl> + } <nl> + if ( ! ( IS_SPEC_OBJECT ( value ) | | IS_SYMBOL ( value ) ) ) { <nl> + throw % MakeTypeError ( ' invalid_weakset_value ' , [ this , value ] ) ; <nl> + } <nl> + return % WeakCollectionHas ( this , value ) ; <nl> + } <nl> + <nl> + <nl> + function WeakSetDelete ( value ) { <nl> + if ( ! IS_WEAKSET ( this ) ) { <nl> + throw MakeTypeError ( ' incompatible_method_receiver ' , <nl> + [ ' WeakSet . prototype . delete ' , this ] ) ; <nl> + } <nl> + if ( ! ( IS_SPEC_OBJECT ( value ) | | IS_SYMBOL ( value ) ) ) { <nl> + throw % MakeTypeError ( ' invalid_weakset_value ' , [ this , value ] ) ; <nl> + } <nl> + return % WeakCollectionDelete ( this , value ) ; <nl> + } <nl> + <nl> + <nl> + function WeakSetClear ( ) { <nl> + if ( ! IS_WEAKSET ( this ) ) { <nl> + throw MakeTypeError ( ' incompatible_method_receiver ' , <nl> + [ ' WeakSet . prototype . clear ' , this ] ) ; <nl> + } <nl> + / / Replace the internal table with a new empty table . <nl> + % WeakCollectionInitialize ( this ) ; <nl> + } <nl> + <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> + function SetUpWeakSet ( ) { <nl> + % CheckIsBootstrapping ( ) ; <nl> + <nl> + % SetCode ( $ WeakSet , WeakSetConstructor ) ; <nl> + % FunctionSetPrototype ( $ WeakSet , new $ Object ( ) ) ; <nl> + % SetProperty ( $ WeakSet . prototype , " constructor " , $ WeakSet , DONT_ENUM ) ; <nl> + <nl> + / / Set up the non - enumerable functions on the WeakSet prototype object . <nl> + InstallFunctions ( $ WeakSet . prototype , DONT_ENUM , $ Array ( <nl> + " add " , WeakSetAdd , <nl> + " has " , WeakSetHas , <nl> + " delete " , WeakSetDelete , <nl> + " clear " , WeakSetClear <nl> + ) ) ; <nl> + } <nl> + <nl> + SetUpWeakSet ( ) ; <nl> mmm a / src / heap . cc <nl> ppp b / src / heap . cc <nl> class ScavengingVisitor : public StaticVisitorBase { <nl> & ObjectEvacuationStrategy < POINTER_OBJECT > : : <nl> Visit ) ; <nl> <nl> + table_ . Register ( kVisitJSWeakSet , <nl> + & ObjectEvacuationStrategy < POINTER_OBJECT > : : <nl> + Visit ) ; <nl> + <nl> table_ . Register ( kVisitJSArrayBuffer , <nl> & ObjectEvacuationStrategy < POINTER_OBJECT > : : <nl> Visit ) ; <nl> GCTracer : : ~ GCTracer ( ) { <nl> PrintF ( " intracompaction_ptrs = % . 1f " , <nl> scopes_ [ Scope : : MC_UPDATE_POINTERS_BETWEEN_EVACUATED ] ) ; <nl> PrintF ( " misc_compaction = % . 1f " , scopes_ [ Scope : : MC_UPDATE_MISC_POINTERS ] ) ; <nl> - PrintF ( " weakmap_process = % . 1f " , scopes_ [ Scope : : MC_WEAKMAP_PROCESS ] ) ; <nl> - PrintF ( " weakmap_clear = % . 1f " , scopes_ [ Scope : : MC_WEAKMAP_CLEAR ] ) ; <nl> + PrintF ( " weakcollection_process = % . 1f " , <nl> + scopes_ [ Scope : : MC_WEAKCOLLECTION_PROCESS ] ) ; <nl> + PrintF ( " weakcollection_clear = % . 1f " , <nl> + scopes_ [ Scope : : MC_WEAKCOLLECTION_CLEAR ] ) ; <nl> <nl> PrintF ( " total_size_before = % " V8_PTR_PREFIX " d " , start_object_size_ ) ; <nl> PrintF ( " total_size_after = % " V8_PTR_PREFIX " d " , heap_ - > SizeOfObjects ( ) ) ; <nl> mmm a / src / heap . h <nl> ppp b / src / heap . h <nl> class GCTracer BASE_EMBEDDED { <nl> MC_UPDATE_POINTERS_TO_EVACUATED , <nl> MC_UPDATE_POINTERS_BETWEEN_EVACUATED , <nl> MC_UPDATE_MISC_POINTERS , <nl> - MC_WEAKMAP_PROCESS , <nl> - MC_WEAKMAP_CLEAR , <nl> + MC_WEAKCOLLECTION_PROCESS , <nl> + MC_WEAKCOLLECTION_CLEAR , <nl> MC_FLUSH_CODE , <nl> kNumberOfScopes <nl> } ; <nl> mmm a / src / incremental - marking . cc <nl> ppp b / src / incremental - marking . cc <nl> class IncrementalMarkingMarkingVisitor <nl> VisitNativeContext ( map , context ) ; <nl> } <nl> <nl> - static void VisitJSWeakMap ( Map * map , HeapObject * object ) { <nl> + static void VisitWeakCollection ( Map * map , HeapObject * object ) { <nl> Heap * heap = map - > GetHeap ( ) ; <nl> VisitPointers ( heap , <nl> - HeapObject : : RawField ( object , JSWeakMap : : kPropertiesOffset ) , <nl> - HeapObject : : RawField ( object , JSWeakMap : : kSize ) ) ; <nl> + HeapObject : : RawField ( object , <nl> + JSWeakCollection : : kPropertiesOffset ) , <nl> + HeapObject : : RawField ( object , JSWeakCollection : : kSize ) ) ; <nl> } <nl> <nl> static void BeforeVisitingSharedFunctionInfo ( HeapObject * object ) { } <nl> mmm a / src / macros . py <nl> ppp b / src / macros . py <nl> <nl> macro IS_SET ( arg ) = ( % _ClassOf ( arg ) = = = ' Set ' ) ; <nl> macro IS_MAP ( arg ) = ( % _ClassOf ( arg ) = = = ' Map ' ) ; <nl> macro IS_WEAKMAP ( arg ) = ( % _ClassOf ( arg ) = = = ' WeakMap ' ) ; <nl> + macro IS_WEAKSET ( arg ) = ( % _ClassOf ( arg ) = = = ' WeakSet ' ) ; <nl> macro IS_DATE ( arg ) = ( % _ClassOf ( arg ) = = = ' Date ' ) ; <nl> macro IS_NUMBER_WRAPPER ( arg ) = ( % _ClassOf ( arg ) = = = ' Number ' ) ; <nl> macro IS_STRING_WRAPPER ( arg ) = ( % _ClassOf ( arg ) = = = ' String ' ) ; <nl> mmm a / src / mark - compact . cc <nl> ppp b / src / mark - compact . cc <nl> MarkCompactCollector : : MarkCompactCollector ( ) : / / NOLINT <nl> migration_slots_buffer_ ( NULL ) , <nl> heap_ ( NULL ) , <nl> code_flusher_ ( NULL ) , <nl> - encountered_weak_maps_ ( NULL ) { } <nl> + encountered_weak_collections_ ( NULL ) { } <nl> <nl> <nl> # ifdef VERIFY_HEAP <nl> void MarkCompactCollector : : CollectGarbage ( ) { <nl> / / Make sure that Prepare ( ) has been called . The individual steps below will <nl> / / update the state as they proceed . <nl> ASSERT ( state_ = = PREPARE_GC ) ; <nl> - ASSERT ( encountered_weak_maps_ = = Smi : : FromInt ( 0 ) ) ; <nl> + ASSERT ( encountered_weak_collections_ = = Smi : : FromInt ( 0 ) ) ; <nl> <nl> MarkLiveObjects ( ) ; <nl> ASSERT ( heap_ - > incremental_marking ( ) - > IsStopped ( ) ) ; <nl> <nl> if ( FLAG_collect_maps ) ClearNonLiveReferences ( ) ; <nl> <nl> - ClearWeakMaps ( ) ; <nl> + ClearWeakCollections ( ) ; <nl> <nl> # ifdef VERIFY_HEAP <nl> if ( FLAG_verify_heap ) { <nl> class MarkCompactMarkingVisitor <nl> shared - > BeforeVisitingPointers ( ) ; <nl> } <nl> <nl> - static void VisitJSWeakMap ( Map * map , HeapObject * object ) { <nl> + static void VisitWeakCollection ( Map * map , HeapObject * object ) { <nl> MarkCompactCollector * collector = map - > GetHeap ( ) - > mark_compact_collector ( ) ; <nl> - JSWeakMap * weak_map = reinterpret_cast < JSWeakMap * > ( object ) ; <nl> + JSWeakCollection * weak_collection = <nl> + reinterpret_cast < JSWeakCollection * > ( object ) ; <nl> <nl> / / Enqueue weak map in linked list of encountered weak maps . <nl> - if ( weak_map - > next ( ) = = Smi : : FromInt ( 0 ) ) { <nl> - weak_map - > set_next ( collector - > encountered_weak_maps ( ) ) ; <nl> - collector - > set_encountered_weak_maps ( weak_map ) ; <nl> + if ( weak_collection - > next ( ) = = Smi : : FromInt ( 0 ) ) { <nl> + weak_collection - > set_next ( collector - > encountered_weak_collections ( ) ) ; <nl> + collector - > set_encountered_weak_collections ( weak_collection ) ; <nl> } <nl> <nl> / / Skip visiting the backing hash table containing the mappings . <nl> - int object_size = JSWeakMap : : BodyDescriptor : : SizeOf ( map , object ) ; <nl> + int object_size = JSWeakCollection : : BodyDescriptor : : SizeOf ( map , object ) ; <nl> BodyVisitorBase < MarkCompactMarkingVisitor > : : IteratePointers ( <nl> map - > GetHeap ( ) , <nl> object , <nl> - JSWeakMap : : BodyDescriptor : : kStartOffset , <nl> - JSWeakMap : : kTableOffset ) ; <nl> + JSWeakCollection : : BodyDescriptor : : kStartOffset , <nl> + JSWeakCollection : : kTableOffset ) ; <nl> BodyVisitorBase < MarkCompactMarkingVisitor > : : IteratePointers ( <nl> map - > GetHeap ( ) , <nl> object , <nl> - JSWeakMap : : kTableOffset + kPointerSize , <nl> + JSWeakCollection : : kTableOffset + kPointerSize , <nl> object_size ) ; <nl> <nl> / / Mark the backing hash table without pushing it on the marking stack . <nl> - Object * table_object = weak_map - > table ( ) ; <nl> + Object * table_object = weak_collection - > table ( ) ; <nl> if ( ! table_object - > IsHashTable ( ) ) return ; <nl> ObjectHashTable * table = ObjectHashTable : : cast ( table_object ) ; <nl> Object * * table_slot = <nl> - HeapObject : : RawField ( weak_map , JSWeakMap : : kTableOffset ) ; <nl> + HeapObject : : RawField ( weak_collection , JSWeakCollection : : kTableOffset ) ; <nl> MarkBit table_mark = Marking : : MarkBitFrom ( table ) ; <nl> collector - > RecordSlot ( table_slot , table_slot , table ) ; <nl> if ( ! table_mark . Get ( ) ) collector - > SetMark ( table , table_mark ) ; <nl> void MarkCompactCollector : : ProcessEphemeralMarking ( ObjectVisitor * visitor ) { <nl> isolate ( ) - > global_handles ( ) - > IterateObjectGroups ( <nl> visitor , & IsUnmarkedHeapObjectWithHeap ) ; <nl> MarkImplicitRefGroups ( ) ; <nl> - ProcessWeakMaps ( ) ; <nl> + ProcessWeakCollections ( ) ; <nl> work_to_do = ! marking_deque_ . IsEmpty ( ) ; <nl> ProcessMarkingDeque ( ) ; <nl> } <nl> void MarkCompactCollector : : ClearNonLiveDependentCode ( DependentCode * entries ) { <nl> } <nl> <nl> <nl> - void MarkCompactCollector : : ProcessWeakMaps ( ) { <nl> - GCTracer : : Scope gc_scope ( tracer_ , GCTracer : : Scope : : MC_WEAKMAP_PROCESS ) ; <nl> - Object * weak_map_obj = encountered_weak_maps ( ) ; <nl> - while ( weak_map_obj ! = Smi : : FromInt ( 0 ) ) { <nl> - ASSERT ( MarkCompactCollector : : IsMarked ( HeapObject : : cast ( weak_map_obj ) ) ) ; <nl> - JSWeakMap * weak_map = reinterpret_cast < JSWeakMap * > ( weak_map_obj ) ; <nl> - ObjectHashTable * table = ObjectHashTable : : cast ( weak_map - > table ( ) ) ; <nl> + void MarkCompactCollector : : ProcessWeakCollections ( ) { <nl> + GCTracer : : Scope gc_scope ( tracer_ , GCTracer : : Scope : : MC_WEAKCOLLECTION_PROCESS ) ; <nl> + Object * weak_collection_obj = encountered_weak_collections ( ) ; <nl> + while ( weak_collection_obj ! = Smi : : FromInt ( 0 ) ) { <nl> + ASSERT ( MarkCompactCollector : : IsMarked ( <nl> + HeapObject : : cast ( weak_collection_obj ) ) ) ; <nl> + JSWeakCollection * weak_collection = <nl> + reinterpret_cast < JSWeakCollection * > ( weak_collection_obj ) ; <nl> + ObjectHashTable * table = ObjectHashTable : : cast ( weak_collection - > table ( ) ) ; <nl> Object * * anchor = reinterpret_cast < Object * * > ( table - > address ( ) ) ; <nl> for ( int i = 0 ; i < table - > Capacity ( ) ; i + + ) { <nl> if ( MarkCompactCollector : : IsMarked ( HeapObject : : cast ( table - > KeyAt ( i ) ) ) ) { <nl> void MarkCompactCollector : : ProcessWeakMaps ( ) { <nl> this , anchor , value_slot ) ; <nl> } <nl> } <nl> - weak_map_obj = weak_map - > next ( ) ; <nl> + weak_collection_obj = weak_collection - > next ( ) ; <nl> } <nl> } <nl> <nl> <nl> - void MarkCompactCollector : : ClearWeakMaps ( ) { <nl> - GCTracer : : Scope gc_scope ( tracer_ , GCTracer : : Scope : : MC_WEAKMAP_CLEAR ) ; <nl> - Object * weak_map_obj = encountered_weak_maps ( ) ; <nl> - while ( weak_map_obj ! = Smi : : FromInt ( 0 ) ) { <nl> - ASSERT ( MarkCompactCollector : : IsMarked ( HeapObject : : cast ( weak_map_obj ) ) ) ; <nl> - JSWeakMap * weak_map = reinterpret_cast < JSWeakMap * > ( weak_map_obj ) ; <nl> - ObjectHashTable * table = ObjectHashTable : : cast ( weak_map - > table ( ) ) ; <nl> + void MarkCompactCollector : : ClearWeakCollections ( ) { <nl> + GCTracer : : Scope gc_scope ( tracer_ , GCTracer : : Scope : : MC_WEAKCOLLECTION_CLEAR ) ; <nl> + Object * weak_collection_obj = encountered_weak_collections ( ) ; <nl> + while ( weak_collection_obj ! = Smi : : FromInt ( 0 ) ) { <nl> + ASSERT ( MarkCompactCollector : : IsMarked ( <nl> + HeapObject : : cast ( weak_collection_obj ) ) ) ; <nl> + JSWeakCollection * weak_collection = <nl> + reinterpret_cast < JSWeakCollection * > ( weak_collection_obj ) ; <nl> + ObjectHashTable * table = ObjectHashTable : : cast ( weak_collection - > table ( ) ) ; <nl> for ( int i = 0 ; i < table - > Capacity ( ) ; i + + ) { <nl> if ( ! MarkCompactCollector : : IsMarked ( HeapObject : : cast ( table - > KeyAt ( i ) ) ) ) { <nl> table - > RemoveEntry ( i ) ; <nl> } <nl> } <nl> - weak_map_obj = weak_map - > next ( ) ; <nl> - weak_map - > set_next ( Smi : : FromInt ( 0 ) ) ; <nl> + weak_collection_obj = weak_collection - > next ( ) ; <nl> + weak_collection - > set_next ( Smi : : FromInt ( 0 ) ) ; <nl> } <nl> - set_encountered_weak_maps ( Smi : : FromInt ( 0 ) ) ; <nl> + set_encountered_weak_collections ( Smi : : FromInt ( 0 ) ) ; <nl> } <nl> <nl> <nl> mmm a / src / mark - compact . h <nl> ppp b / src / mark - compact . h <nl> class MarkCompactCollector { <nl> <nl> bool TryPromoteObject ( HeapObject * object , int object_size ) ; <nl> <nl> - inline Object * encountered_weak_maps ( ) { return encountered_weak_maps_ ; } <nl> - inline void set_encountered_weak_maps ( Object * weak_map ) { <nl> - encountered_weak_maps_ = weak_map ; <nl> + inline Object * encountered_weak_collections ( ) { <nl> + return encountered_weak_collections_ ; <nl> + } <nl> + inline void set_encountered_weak_collections ( Object * weak_collection ) { <nl> + encountered_weak_collections_ = weak_collection ; <nl> } <nl> <nl> void InvalidateCode ( Code * code ) ; <nl> class MarkCompactCollector { <nl> / / ClearNonLiveTransitions pass or by calling this function . <nl> void ReattachInitialMaps ( ) ; <nl> <nl> - / / Mark all values associated with reachable keys in weak maps encountered <nl> - / / so far . This might push new object or even new weak maps onto the <nl> - / / marking stack . <nl> - void ProcessWeakMaps ( ) ; <nl> + / / Mark all values associated with reachable keys in weak collections <nl> + / / encountered so far . This might push new object or even new weak maps onto <nl> + / / the marking stack . <nl> + void ProcessWeakCollections ( ) ; <nl> <nl> / / After all reachable objects have been marked those weak map entries <nl> / / with an unreachable key are removed from all encountered weak maps . <nl> / / The linked list of all encountered weak maps is destroyed . <nl> - void ClearWeakMaps ( ) ; <nl> + void ClearWeakCollections ( ) ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / Phase 2 : Sweeping to clear mark bits and free non - live objects for <nl> class MarkCompactCollector { <nl> Heap * heap_ ; <nl> MarkingDeque marking_deque_ ; <nl> CodeFlusher * code_flusher_ ; <nl> - Object * encountered_weak_maps_ ; <nl> + Object * encountered_weak_collections_ ; <nl> <nl> List < Page * > evacuation_candidates_ ; <nl> List < Code * > invalidated_code_ ; <nl> mmm a / src / messages . js <nl> ppp b / src / messages . js <nl> var kMessages = { <nl> proxy_non_object_prop_names : [ " Trap ' " , " % 1 " , " ' returned non - object " , " % 0 " ] , <nl> proxy_repeated_prop_name : [ " Trap ' " , " % 1 " , " ' returned repeated property name ' " , " % 2 " , " ' " ] , <nl> invalid_weakmap_key : [ " Invalid value used as weak map key " ] , <nl> + invalid_weakset_value : [ " Invalid value used in weak set " ] , <nl> not_date_object : [ " this is not a Date object . " ] , <nl> observe_non_object : [ " Object . " , " % 0 " , " cannot " , " % 0 " , " non - object " ] , <nl> observe_non_function : [ " Object . " , " % 0 " , " cannot deliver to non - function " ] , <nl> mmm a / src / object - observe . js <nl> ppp b / src / object - observe . js <nl> ObservationWeakMap . prototype = { <nl> get : function ( key ) { <nl> key = % UnwrapGlobalProxy ( key ) ; <nl> if ( ! IS_SPEC_OBJECT ( key ) ) return void 0 ; <nl> - return % WeakMapGet ( this . map_ , key ) ; <nl> + return % WeakCollectionGet ( this . map_ , key ) ; <nl> } , <nl> set : function ( key , value ) { <nl> key = % UnwrapGlobalProxy ( key ) ; <nl> if ( ! IS_SPEC_OBJECT ( key ) ) return void 0 ; <nl> - % WeakMapSet ( this . map_ , key , value ) ; <nl> + % WeakCollectionSet ( this . map_ , key , value ) ; <nl> } , <nl> has : function ( key ) { <nl> return ! IS_UNDEFINED ( this . get ( key ) ) ; <nl> mmm a / src / objects - debug . cc <nl> ppp b / src / objects - debug . cc <nl> void HeapObject : : HeapObjectVerify ( ) { <nl> case JS_WEAK_MAP_TYPE : <nl> JSWeakMap : : cast ( this ) - > JSWeakMapVerify ( ) ; <nl> break ; <nl> + case JS_WEAK_SET_TYPE : <nl> + JSWeakSet : : cast ( this ) - > JSWeakSetVerify ( ) ; <nl> + break ; <nl> case JS_REGEXP_TYPE : <nl> JSRegExp : : cast ( this ) - > JSRegExpVerify ( ) ; <nl> break ; <nl> void JSWeakMap : : JSWeakMapVerify ( ) { <nl> } <nl> <nl> <nl> + void JSWeakSet : : JSWeakSetVerify ( ) { <nl> + CHECK ( IsJSWeakSet ( ) ) ; <nl> + JSObjectVerify ( ) ; <nl> + VerifyHeapPointer ( table ( ) ) ; <nl> + CHECK ( table ( ) - > IsHashTable ( ) | | table ( ) - > IsUndefined ( ) ) ; <nl> + } <nl> + <nl> + <nl> void JSRegExp : : JSRegExpVerify ( ) { <nl> JSObjectVerify ( ) ; <nl> CHECK ( data ( ) - > IsUndefined ( ) | | data ( ) - > IsFixedArray ( ) ) ; <nl> mmm a / src / objects - inl . h <nl> ppp b / src / objects - inl . h <nl> TYPE_CHECKER ( JSFunctionProxy , JS_FUNCTION_PROXY_TYPE ) <nl> TYPE_CHECKER ( JSSet , JS_SET_TYPE ) <nl> TYPE_CHECKER ( JSMap , JS_MAP_TYPE ) <nl> TYPE_CHECKER ( JSWeakMap , JS_WEAK_MAP_TYPE ) <nl> + TYPE_CHECKER ( JSWeakSet , JS_WEAK_SET_TYPE ) <nl> TYPE_CHECKER ( JSContextExtensionObject , JS_CONTEXT_EXTENSION_OBJECT_TYPE ) <nl> TYPE_CHECKER ( Map , MAP_TYPE ) <nl> TYPE_CHECKER ( FixedArray , FIXED_ARRAY_TYPE ) <nl> TYPE_CHECKER ( FixedDoubleArray , FIXED_DOUBLE_ARRAY_TYPE ) <nl> <nl> <nl> + bool Object : : IsJSWeakCollection ( ) { <nl> + return IsJSWeakMap ( ) | | IsJSWeakSet ( ) ; <nl> + } <nl> + <nl> + <nl> bool Object : : IsDescriptorArray ( ) { <nl> return IsFixedArray ( ) ; <nl> } <nl> int JSObject : : GetHeaderSize ( ) { <nl> return JSMap : : kSize ; <nl> case JS_WEAK_MAP_TYPE : <nl> return JSWeakMap : : kSize ; <nl> + case JS_WEAK_SET_TYPE : <nl> + return JSWeakSet : : kSize ; <nl> case JS_REGEXP_TYPE : <nl> return JSRegExp : : kSize ; <nl> case JS_CONTEXT_EXTENSION_OBJECT_TYPE : <nl> CAST_ACCESSOR ( JSFunctionProxy ) <nl> CAST_ACCESSOR ( JSSet ) <nl> CAST_ACCESSOR ( JSMap ) <nl> CAST_ACCESSOR ( JSWeakMap ) <nl> + CAST_ACCESSOR ( JSWeakSet ) <nl> CAST_ACCESSOR ( Foreign ) <nl> CAST_ACCESSOR ( ByteArray ) <nl> CAST_ACCESSOR ( FreeSpace ) <nl> void JSProxy : : InitializeBody ( int object_size , Object * value ) { <nl> <nl> ACCESSORS ( JSSet , table , Object , kTableOffset ) <nl> ACCESSORS ( JSMap , table , Object , kTableOffset ) <nl> - ACCESSORS ( JSWeakMap , table , Object , kTableOffset ) <nl> - ACCESSORS ( JSWeakMap , next , Object , kNextOffset ) <nl> + ACCESSORS ( JSWeakCollection , table , Object , kTableOffset ) <nl> + ACCESSORS ( JSWeakCollection , next , Object , kNextOffset ) <nl> <nl> <nl> Address Foreign : : foreign_address ( ) { <nl> mmm a / src / objects - printer . cc <nl> ppp b / src / objects - printer . cc <nl> void HeapObject : : HeapObjectPrint ( FILE * out ) { <nl> case JS_WEAK_MAP_TYPE : <nl> JSWeakMap : : cast ( this ) - > JSWeakMapPrint ( out ) ; <nl> break ; <nl> + case JS_WEAK_SET_TYPE : <nl> + JSWeakSet : : cast ( this ) - > JSWeakSetPrint ( out ) ; <nl> + break ; <nl> case FOREIGN_TYPE : <nl> Foreign : : cast ( this ) - > ForeignPrint ( out ) ; <nl> break ; <nl> static const char * TypeToString ( InstanceType type ) { <nl> case JS_ARRAY_TYPE : return " JS_ARRAY " ; <nl> case JS_PROXY_TYPE : return " JS_PROXY " ; <nl> case JS_WEAK_MAP_TYPE : return " JS_WEAK_MAP " ; <nl> + case JS_WEAK_SET_TYPE : return " JS_WEAK_SET " ; <nl> case JS_REGEXP_TYPE : return " JS_REGEXP " ; <nl> case JS_VALUE_TYPE : return " JS_VALUE " ; <nl> case JS_GLOBAL_OBJECT_TYPE : return " JS_GLOBAL_OBJECT " ; <nl> void JSWeakMap : : JSWeakMapPrint ( FILE * out ) { <nl> } <nl> <nl> <nl> + void JSWeakSet : : JSWeakSetPrint ( FILE * out ) { <nl> + HeapObject : : PrintHeader ( out , " JSWeakSet " ) ; <nl> + PrintF ( out , " - map = 0x % p \ n " , reinterpret_cast < void * > ( map ( ) ) ) ; <nl> + PrintF ( out , " - table = " ) ; <nl> + table ( ) - > ShortPrint ( out ) ; <nl> + PrintF ( out , " \ n " ) ; <nl> + } <nl> + <nl> + <nl> void JSArrayBuffer : : JSArrayBufferPrint ( FILE * out ) { <nl> HeapObject : : PrintHeader ( out , " JSArrayBuffer " ) ; <nl> PrintF ( out , " - map = 0x % p \ n " , reinterpret_cast < void * > ( map ( ) ) ) ; <nl> mmm a / src / objects - visiting - inl . h <nl> ppp b / src / objects - visiting - inl . h <nl> void StaticNewSpaceVisitor < StaticVisitor > : : Initialize ( ) { <nl> <nl> table_ . Register ( kVisitJSWeakMap , & JSObjectVisitor : : Visit ) ; <nl> <nl> + table_ . Register ( kVisitJSWeakSet , & JSObjectVisitor : : Visit ) ; <nl> + <nl> table_ . Register ( kVisitJSRegExp , & JSObjectVisitor : : Visit ) ; <nl> <nl> table_ . template RegisterSpecializations < DataObjectVisitor , <nl> void StaticMarkingVisitor < StaticVisitor > : : Initialize ( ) { <nl> <nl> table_ . Register ( kVisitSeqTwoByteString , & DataObjectVisitor : : Visit ) ; <nl> <nl> - table_ . Register ( kVisitJSWeakMap , & StaticVisitor : : VisitJSWeakMap ) ; <nl> + table_ . Register ( kVisitJSWeakMap , & StaticVisitor : : VisitWeakCollection ) ; <nl> + <nl> + table_ . Register ( kVisitJSWeakSet , & StaticVisitor : : VisitWeakCollection ) ; <nl> <nl> table_ . Register ( kVisitOddball , <nl> & FixedBodyVisitor < StaticVisitor , <nl> mmm a / src / objects - visiting . cc <nl> ppp b / src / objects - visiting . cc <nl> StaticVisitorBase : : VisitorId StaticVisitorBase : : GetVisitorId ( <nl> case JS_WEAK_MAP_TYPE : <nl> return kVisitJSWeakMap ; <nl> <nl> + case JS_WEAK_SET_TYPE : <nl> + return kVisitJSWeakSet ; <nl> + <nl> case JS_REGEXP_TYPE : <nl> return kVisitJSRegExp ; <nl> <nl> mmm a / src / objects - visiting . h <nl> ppp b / src / objects - visiting . h <nl> class StaticVisitorBase : public AllStatic { <nl> V ( SharedFunctionInfo ) \ <nl> V ( JSFunction ) \ <nl> V ( JSWeakMap ) \ <nl> + V ( JSWeakSet ) \ <nl> V ( JSArrayBuffer ) \ <nl> V ( JSTypedArray ) \ <nl> V ( JSDataView ) \ <nl> mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> void JSObject : : JSObjectShortPrint ( StringStream * accumulator ) { <nl> accumulator - > Add ( " < JS WeakMap > " ) ; <nl> break ; <nl> } <nl> + case JS_WEAK_SET_TYPE : { <nl> + accumulator - > Add ( " < JS WeakSet > " ) ; <nl> + break ; <nl> + } <nl> case JS_REGEXP_TYPE : { <nl> accumulator - > Add ( " < JS RegExp > " ) ; <nl> break ; <nl> void HeapObject : : IterateBody ( InstanceType type , int object_size , <nl> case JS_SET_TYPE : <nl> case JS_MAP_TYPE : <nl> case JS_WEAK_MAP_TYPE : <nl> + case JS_WEAK_SET_TYPE : <nl> case JS_REGEXP_TYPE : <nl> case JS_GLOBAL_PROXY_TYPE : <nl> case JS_GLOBAL_OBJECT_TYPE : <nl> mmm a / src / objects . h <nl> ppp b / src / objects . h <nl> <nl> / / - JSArray <nl> / / - JSArrayBuffer <nl> / / - JSArrayBufferView <nl> - / / - JSTypedArray <nl> - / / - JSDataView <nl> + / / - JSTypedArray <nl> + / / - JSDataView <nl> / / - JSSet <nl> / / - JSMap <nl> - / / - JSWeakMap <nl> + / / - JSWeakCollection <nl> + / / - JSWeakMap <nl> + / / - JSWeakSet <nl> / / - JSRegExp <nl> / / - JSFunction <nl> / / - JSGeneratorObject <nl> const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits ; <nl> V ( JS_DATA_VIEW_TYPE ) \ <nl> V ( JS_PROXY_TYPE ) \ <nl> V ( JS_WEAK_MAP_TYPE ) \ <nl> + V ( JS_WEAK_SET_TYPE ) \ <nl> V ( JS_REGEXP_TYPE ) \ <nl> \ <nl> V ( JS_FUNCTION_TYPE ) \ <nl> enum InstanceType { <nl> JS_SET_TYPE , <nl> JS_MAP_TYPE , <nl> JS_WEAK_MAP_TYPE , <nl> + JS_WEAK_SET_TYPE , <nl> <nl> JS_REGEXP_TYPE , <nl> <nl> class MaybeObject BASE_EMBEDDED { <nl> V ( JSFunctionProxy ) \ <nl> V ( JSSet ) \ <nl> V ( JSMap ) \ <nl> + V ( JSWeakCollection ) \ <nl> V ( JSWeakMap ) \ <nl> + V ( JSWeakSet ) \ <nl> V ( JSRegExp ) \ <nl> V ( HashTable ) \ <nl> V ( Dictionary ) \ <nl> class JSMap : public JSObject { <nl> } ; <nl> <nl> <nl> - / / The JSWeakMap describes EcmaScript Harmony weak maps <nl> - class JSWeakMap : public JSObject { <nl> + / / Base class for both JSWeakMap and JSWeakSet <nl> + class JSWeakCollection : public JSObject { <nl> public : <nl> / / [ table ] : the backing hash table mapping keys to values . <nl> DECL_ACCESSORS ( table , Object ) <nl> class JSWeakMap : public JSObject { <nl> / / [ next ] : linked list of encountered weak maps during GC . <nl> DECL_ACCESSORS ( next , Object ) <nl> <nl> + static const int kTableOffset = JSObject : : kHeaderSize ; <nl> + static const int kNextOffset = kTableOffset + kPointerSize ; <nl> + static const int kSize = kNextOffset + kPointerSize ; <nl> + <nl> + private : <nl> + DISALLOW_IMPLICIT_CONSTRUCTORS ( JSWeakCollection ) ; <nl> + } ; <nl> + <nl> + <nl> + / / The JSWeakMap describes EcmaScript Harmony weak maps <nl> + class JSWeakMap : public JSWeakCollection { <nl> + public : <nl> / / Casting . <nl> static inline JSWeakMap * cast ( Object * obj ) ; <nl> <nl> class JSWeakMap : public JSObject { <nl> DECLARE_PRINTER ( JSWeakMap ) <nl> DECLARE_VERIFIER ( JSWeakMap ) <nl> <nl> - static const int kTableOffset = JSObject : : kHeaderSize ; <nl> - static const int kNextOffset = kTableOffset + kPointerSize ; <nl> - static const int kSize = kNextOffset + kPointerSize ; <nl> - <nl> private : <nl> DISALLOW_IMPLICIT_CONSTRUCTORS ( JSWeakMap ) ; <nl> } ; <nl> <nl> <nl> + / / The JSWeakSet describes EcmaScript Harmony weak sets <nl> + class JSWeakSet : public JSWeakCollection { <nl> + public : <nl> + / / Casting . <nl> + static inline JSWeakSet * cast ( Object * obj ) ; <nl> + <nl> + / / Dispatched behavior . <nl> + DECLARE_PRINTER ( JSWeakSet ) <nl> + DECLARE_VERIFIER ( JSWeakSet ) <nl> + <nl> + private : <nl> + DISALLOW_IMPLICIT_CONSTRUCTORS ( JSWeakSet ) ; <nl> + } ; <nl> + <nl> + <nl> class JSArrayBuffer : public JSObject { <nl> public : <nl> / / [ backing_store ] : backing memory for this array <nl> mmm a / src / runtime . cc <nl> ppp b / src / runtime . cc <nl> RUNTIME_FUNCTION ( MaybeObject * , Runtime_MapGetSize ) { <nl> } <nl> <nl> <nl> - static JSWeakMap * WeakMapInitialize ( Isolate * isolate , <nl> - Handle < JSWeakMap > weakmap ) { <nl> - ASSERT ( weakmap - > map ( ) - > inobject_properties ( ) = = 0 ) ; <nl> + static JSWeakCollection * WeakCollectionInitialize ( Isolate * isolate , <nl> + Handle < JSWeakCollection > weak_collection ) { <nl> + ASSERT ( weak_collection - > map ( ) - > inobject_properties ( ) = = 0 ) ; <nl> Handle < ObjectHashTable > table = isolate - > factory ( ) - > NewObjectHashTable ( 0 ) ; <nl> - weakmap - > set_table ( * table ) ; <nl> - weakmap - > set_next ( Smi : : FromInt ( 0 ) ) ; <nl> - return * weakmap ; <nl> + weak_collection - > set_table ( * table ) ; <nl> + weak_collection - > set_next ( Smi : : FromInt ( 0 ) ) ; <nl> + return * weak_collection ; <nl> } <nl> <nl> <nl> - RUNTIME_FUNCTION ( MaybeObject * , Runtime_WeakMapInitialize ) { <nl> + RUNTIME_FUNCTION ( MaybeObject * , Runtime_WeakCollectionInitialize ) { <nl> HandleScope scope ( isolate ) ; <nl> ASSERT ( args . length ( ) = = 1 ) ; <nl> - CONVERT_ARG_HANDLE_CHECKED ( JSWeakMap , weakmap , 0 ) ; <nl> - return WeakMapInitialize ( isolate , weakmap ) ; <nl> + CONVERT_ARG_HANDLE_CHECKED ( JSWeakCollection , weak_collection , 0 ) ; <nl> + return WeakCollectionInitialize ( isolate , weak_collection ) ; <nl> } <nl> <nl> <nl> - RUNTIME_FUNCTION ( MaybeObject * , Runtime_WeakMapGet ) { <nl> + RUNTIME_FUNCTION ( MaybeObject * , Runtime_WeakCollectionGet ) { <nl> HandleScope scope ( isolate ) ; <nl> ASSERT ( args . length ( ) = = 2 ) ; <nl> - CONVERT_ARG_HANDLE_CHECKED ( JSWeakMap , weakmap , 0 ) ; <nl> + CONVERT_ARG_HANDLE_CHECKED ( JSWeakCollection , weak_collection , 0 ) ; <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , key , 1 ) ; <nl> - Handle < ObjectHashTable > table ( ObjectHashTable : : cast ( weakmap - > table ( ) ) ) ; <nl> + Handle < ObjectHashTable > table ( <nl> + ObjectHashTable : : cast ( weak_collection - > table ( ) ) ) ; <nl> Handle < Object > lookup ( table - > Lookup ( * key ) , isolate ) ; <nl> return lookup - > IsTheHole ( ) ? isolate - > heap ( ) - > undefined_value ( ) : * lookup ; <nl> } <nl> <nl> <nl> - RUNTIME_FUNCTION ( MaybeObject * , Runtime_WeakMapHas ) { <nl> + RUNTIME_FUNCTION ( MaybeObject * , Runtime_WeakCollectionHas ) { <nl> HandleScope scope ( isolate ) ; <nl> ASSERT ( args . length ( ) = = 2 ) ; <nl> - CONVERT_ARG_HANDLE_CHECKED ( JSWeakMap , weakmap , 0 ) ; <nl> + CONVERT_ARG_HANDLE_CHECKED ( JSWeakCollection , weak_collection , 0 ) ; <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , key , 1 ) ; <nl> - Handle < ObjectHashTable > table ( ObjectHashTable : : cast ( weakmap - > table ( ) ) ) ; <nl> + Handle < ObjectHashTable > table ( <nl> + ObjectHashTable : : cast ( weak_collection - > table ( ) ) ) ; <nl> Handle < Object > lookup ( table - > Lookup ( * key ) , isolate ) ; <nl> return isolate - > heap ( ) - > ToBoolean ( ! lookup - > IsTheHole ( ) ) ; <nl> } <nl> <nl> <nl> - RUNTIME_FUNCTION ( MaybeObject * , Runtime_WeakMapDelete ) { <nl> + RUNTIME_FUNCTION ( MaybeObject * , Runtime_WeakCollectionDelete ) { <nl> HandleScope scope ( isolate ) ; <nl> ASSERT ( args . length ( ) = = 2 ) ; <nl> - CONVERT_ARG_HANDLE_CHECKED ( JSWeakMap , weakmap , 0 ) ; <nl> + CONVERT_ARG_HANDLE_CHECKED ( JSWeakCollection , weak_collection , 0 ) ; <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , key , 1 ) ; <nl> - Handle < ObjectHashTable > table ( ObjectHashTable : : cast ( weakmap - > table ( ) ) ) ; <nl> + Handle < ObjectHashTable > table ( ObjectHashTable : : cast ( <nl> + weak_collection - > table ( ) ) ) ; <nl> Handle < Object > lookup ( table - > Lookup ( * key ) , isolate ) ; <nl> Handle < ObjectHashTable > new_table = <nl> PutIntoObjectHashTable ( table , key , isolate - > factory ( ) - > the_hole_value ( ) ) ; <nl> - weakmap - > set_table ( * new_table ) ; <nl> + weak_collection - > set_table ( * new_table ) ; <nl> return isolate - > heap ( ) - > ToBoolean ( ! lookup - > IsTheHole ( ) ) ; <nl> } <nl> <nl> <nl> - RUNTIME_FUNCTION ( MaybeObject * , Runtime_WeakMapSet ) { <nl> + RUNTIME_FUNCTION ( MaybeObject * , Runtime_WeakCollectionSet ) { <nl> HandleScope scope ( isolate ) ; <nl> ASSERT ( args . length ( ) = = 3 ) ; <nl> - CONVERT_ARG_HANDLE_CHECKED ( JSWeakMap , weakmap , 0 ) ; <nl> + CONVERT_ARG_HANDLE_CHECKED ( JSWeakCollection , weak_collection , 0 ) ; <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , key , 1 ) ; <nl> Handle < Object > value ( args [ 2 ] , isolate ) ; <nl> - Handle < ObjectHashTable > table ( ObjectHashTable : : cast ( weakmap - > table ( ) ) ) ; <nl> + Handle < ObjectHashTable > table ( <nl> + ObjectHashTable : : cast ( weak_collection - > table ( ) ) ) ; <nl> Handle < ObjectHashTable > new_table = PutIntoObjectHashTable ( table , key , value ) ; <nl> - weakmap - > set_table ( * new_table ) ; <nl> + weak_collection - > set_table ( * new_table ) ; <nl> return isolate - > heap ( ) - > undefined_value ( ) ; <nl> } <nl> <nl> RUNTIME_FUNCTION ( MaybeObject * , Runtime_ObservationWeakMapCreate ) { <nl> isolate - > factory ( ) - > NewMap ( JS_WEAK_MAP_TYPE , JSWeakMap : : kSize ) ; <nl> Handle < JSWeakMap > weakmap = <nl> Handle < JSWeakMap > : : cast ( isolate - > factory ( ) - > NewJSObjectFromMap ( map ) ) ; <nl> - return WeakMapInitialize ( isolate , weakmap ) ; <nl> + return WeakCollectionInitialize ( isolate , weakmap ) ; <nl> } <nl> <nl> <nl> mmm a / src / runtime . h <nl> ppp b / src / runtime . h <nl> namespace internal { <nl> F ( MapSet , 3 , 1 ) \ <nl> F ( MapGetSize , 1 , 1 ) \ <nl> \ <nl> - / * Harmony weakmaps * / \ <nl> - F ( WeakMapInitialize , 1 , 1 ) \ <nl> - F ( WeakMapGet , 2 , 1 ) \ <nl> - F ( WeakMapHas , 2 , 1 ) \ <nl> - F ( WeakMapDelete , 2 , 1 ) \ <nl> - F ( WeakMapSet , 3 , 1 ) \ <nl> + / * Harmony weak maps and sets * / \ <nl> + F ( WeakCollectionInitialize , 1 , 1 ) \ <nl> + F ( WeakCollectionGet , 2 , 1 ) \ <nl> + F ( WeakCollectionHas , 2 , 1 ) \ <nl> + F ( WeakCollectionDelete , 2 , 1 ) \ <nl> + F ( WeakCollectionSet , 3 , 1 ) \ <nl> \ <nl> / * Harmony observe * / \ <nl> F ( IsObserved , 1 , 1 ) \ <nl> mmm a / src / types . cc <nl> ppp b / src / types . cc <nl> int Type : : LubBitset ( ) { <nl> case JS_SET_TYPE : <nl> case JS_MAP_TYPE : <nl> case JS_WEAK_MAP_TYPE : <nl> + case JS_WEAK_SET_TYPE : <nl> if ( map - > is_undetectable ( ) ) return kUndetectable ; <nl> return kOtherObject ; <nl> case JS_ARRAY_TYPE : <nl> mmm a / test / cctest / cctest . gyp <nl> ppp b / test / cctest / cctest . gyp <nl> <nl> ' test - utils . cc ' , <nl> ' test - version . cc ' , <nl> ' test - weakmaps . cc ' , <nl> + ' test - weaksets . cc ' , <nl> ' test - weaktypedarrays . cc ' <nl> ] , <nl> ' conditions ' : [ <nl> mmm a / test / cctest / cctest . status <nl> ppp b / test / cctest / cctest . status <nl> test - log / EquivalenceOfLoggingAndTraversal : PASS | | FAIL <nl> <nl> # We do not yet shrink weak maps after they have been emptied by the GC <nl> test - weakmaps / Shrinking : FAIL <nl> + test - weaksets / WeakSet_Shrinking : FAIL <nl> <nl> # Deferred stack trace formatting is temporarily disabled . <nl> test - heap / ReleaseStackTraceData : PASS | | FAIL <nl> new file mode 100644 <nl> index 00000000000 . . 707f9032848 <nl> mmm / dev / null <nl> ppp b / test / cctest / test - weaksets . cc <nl> <nl> + / / Copyright 2011 the V8 project authors . All rights reserved . <nl> + / / Redistribution and use in source and binary forms , with or without <nl> + / / modification , are permitted provided that the following conditions are <nl> + / / met : <nl> + / / <nl> + / / * Redistributions of source code must retain the above copyright <nl> + / / notice , this list of conditions and the following disclaimer . <nl> + / / * Redistributions in binary form must reproduce the above <nl> + / / copyright notice , this list of conditions and the following <nl> + / / disclaimer in the documentation and / or other materials provided <nl> + / / with the distribution . <nl> + / / * Neither the name of Google Inc . nor the names of its <nl> + / / contributors may be used to endorse or promote products derived <nl> + / / from this software without specific prior written permission . <nl> + / / <nl> + / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + # include " v8 . h " <nl> + <nl> + # include " global - handles . h " <nl> + # include " snapshot . h " <nl> + # include " cctest . h " <nl> + <nl> + using namespace v8 : : internal ; <nl> + <nl> + <nl> + static Isolate * GetIsolateFrom ( LocalContext * context ) { <nl> + return reinterpret_cast < Isolate * > ( ( * context ) - > GetIsolate ( ) ) ; <nl> + } <nl> + <nl> + <nl> + static Handle < JSWeakSet > AllocateJSWeakSet ( Isolate * isolate ) { <nl> + Factory * factory = isolate - > factory ( ) ; <nl> + Heap * heap = isolate - > heap ( ) ; <nl> + Handle < Map > map = factory - > NewMap ( JS_WEAK_SET_TYPE , JSWeakSet : : kSize ) ; <nl> + Handle < JSObject > weakset_obj = factory - > NewJSObjectFromMap ( map ) ; <nl> + Handle < JSWeakSet > weakset ( JSWeakSet : : cast ( * weakset_obj ) ) ; <nl> + / / Do not use handles for the hash table , it would make entries strong . <nl> + Object * table_obj = ObjectHashTable : : Allocate ( heap , 1 ) - > ToObjectChecked ( ) ; <nl> + ObjectHashTable * table = ObjectHashTable : : cast ( table_obj ) ; <nl> + weakset - > set_table ( table ) ; <nl> + weakset - > set_next ( Smi : : FromInt ( 0 ) ) ; <nl> + return weakset ; <nl> + } <nl> + <nl> + static void PutIntoWeakSet ( Handle < JSWeakSet > weakset , <nl> + Handle < JSObject > key , <nl> + Handle < Object > value ) { <nl> + Handle < ObjectHashTable > table = PutIntoObjectHashTable ( <nl> + Handle < ObjectHashTable > ( ObjectHashTable : : cast ( weakset - > table ( ) ) ) , <nl> + Handle < JSObject > ( JSObject : : cast ( * key ) ) , <nl> + value ) ; <nl> + weakset - > set_table ( * table ) ; <nl> + } <nl> + <nl> + static int NumberOfWeakCalls = 0 ; <nl> + static void WeakPointerCallback ( v8 : : Isolate * isolate , <nl> + v8 : : Persistent < v8 : : Value > * handle , <nl> + void * id ) { <nl> + ASSERT ( id = = reinterpret_cast < void * > ( 1234 ) ) ; <nl> + NumberOfWeakCalls + + ; <nl> + handle - > Dispose ( isolate ) ; <nl> + } <nl> + <nl> + <nl> + TEST ( WeakSet_Weakness ) { <nl> + FLAG_incremental_marking = false ; <nl> + LocalContext context ; <nl> + Isolate * isolate = GetIsolateFrom ( & context ) ; <nl> + Factory * factory = isolate - > factory ( ) ; <nl> + Heap * heap = isolate - > heap ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < JSWeakSet > weakset = AllocateJSWeakSet ( isolate ) ; <nl> + GlobalHandles * global_handles = isolate - > global_handles ( ) ; <nl> + <nl> + / / Keep global reference to the key . <nl> + Handle < Object > key ; <nl> + { <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < Map > map = factory - > NewMap ( JS_OBJECT_TYPE , JSObject : : kHeaderSize ) ; <nl> + Handle < JSObject > object = factory - > NewJSObjectFromMap ( map ) ; <nl> + key = global_handles - > Create ( * object ) ; <nl> + } <nl> + CHECK ( ! global_handles - > IsWeak ( key . location ( ) ) ) ; <nl> + <nl> + / / Put entry into weak set . <nl> + { <nl> + HandleScope scope ( isolate ) ; <nl> + PutIntoWeakSet ( weakset , <nl> + Handle < JSObject > ( JSObject : : cast ( * key ) ) , <nl> + Handle < Smi > ( Smi : : FromInt ( 23 ) , isolate ) ) ; <nl> + } <nl> + CHECK_EQ ( 1 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > NumberOfElements ( ) ) ; <nl> + <nl> + / / Force a full GC . <nl> + heap - > CollectAllGarbage ( false ) ; <nl> + CHECK_EQ ( 0 , NumberOfWeakCalls ) ; <nl> + CHECK_EQ ( 1 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > NumberOfElements ( ) ) ; <nl> + CHECK_EQ ( <nl> + 0 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > NumberOfDeletedElements ( ) ) ; <nl> + <nl> + / / Make the global reference to the key weak . <nl> + { <nl> + HandleScope scope ( isolate ) ; <nl> + global_handles - > MakeWeak ( key . location ( ) , <nl> + reinterpret_cast < void * > ( 1234 ) , <nl> + & WeakPointerCallback ) ; <nl> + } <nl> + CHECK ( global_handles - > IsWeak ( key . location ( ) ) ) ; <nl> + <nl> + / / Force a full GC . <nl> + / / Perform two consecutive GCs because the first one will only clear <nl> + / / weak references whereas the second one will also clear weak sets . <nl> + heap - > CollectAllGarbage ( false ) ; <nl> + CHECK_EQ ( 1 , NumberOfWeakCalls ) ; <nl> + CHECK_EQ ( 1 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > NumberOfElements ( ) ) ; <nl> + CHECK_EQ ( <nl> + 0 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > NumberOfDeletedElements ( ) ) ; <nl> + heap - > CollectAllGarbage ( false ) ; <nl> + CHECK_EQ ( 1 , NumberOfWeakCalls ) ; <nl> + CHECK_EQ ( 0 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > NumberOfElements ( ) ) ; <nl> + CHECK_EQ ( <nl> + 1 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > NumberOfDeletedElements ( ) ) ; <nl> + } <nl> + <nl> + <nl> + TEST ( WeakSet_Shrinking ) { <nl> + LocalContext context ; <nl> + Isolate * isolate = GetIsolateFrom ( & context ) ; <nl> + Factory * factory = isolate - > factory ( ) ; <nl> + Heap * heap = isolate - > heap ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < JSWeakSet > weakset = AllocateJSWeakSet ( isolate ) ; <nl> + <nl> + / / Check initial capacity . <nl> + CHECK_EQ ( 32 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > Capacity ( ) ) ; <nl> + <nl> + / / Fill up weak set to trigger capacity change . <nl> + { <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < Map > map = factory - > NewMap ( JS_OBJECT_TYPE , JSObject : : kHeaderSize ) ; <nl> + for ( int i = 0 ; i < 32 ; i + + ) { <nl> + Handle < JSObject > object = factory - > NewJSObjectFromMap ( map ) ; <nl> + PutIntoWeakSet ( weakset , object , Handle < Smi > ( Smi : : FromInt ( i ) , isolate ) ) ; <nl> + } <nl> + } <nl> + <nl> + / / Check increased capacity . <nl> + CHECK_EQ ( 128 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > Capacity ( ) ) ; <nl> + <nl> + / / Force a full GC . <nl> + CHECK_EQ ( 32 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > NumberOfElements ( ) ) ; <nl> + CHECK_EQ ( <nl> + 0 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > NumberOfDeletedElements ( ) ) ; <nl> + heap - > CollectAllGarbage ( false ) ; <nl> + CHECK_EQ ( 0 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > NumberOfElements ( ) ) ; <nl> + CHECK_EQ ( <nl> + 32 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > NumberOfDeletedElements ( ) ) ; <nl> + <nl> + / / Check shrunk capacity . <nl> + CHECK_EQ ( 32 , ObjectHashTable : : cast ( weakset - > table ( ) ) - > Capacity ( ) ) ; <nl> + } <nl> + <nl> + <nl> + / / Test that weak set values on an evacuation candidate which are not reachable <nl> + / / by other paths are correctly recorded in the slots buffer . <nl> + TEST ( WeakSet_Regress2060a ) { <nl> + FLAG_always_compact = true ; <nl> + LocalContext context ; <nl> + Isolate * isolate = GetIsolateFrom ( & context ) ; <nl> + Factory * factory = isolate - > factory ( ) ; <nl> + Heap * heap = isolate - > heap ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < JSFunction > function = <nl> + factory - > NewFunction ( factory - > function_string ( ) , factory - > null_value ( ) ) ; <nl> + Handle < JSObject > key = factory - > NewJSObject ( function ) ; <nl> + Handle < JSWeakSet > weakset = AllocateJSWeakSet ( isolate ) ; <nl> + <nl> + / / Start second old - space page so that values land on evacuation candidate . <nl> + Page * first_page = heap - > old_pointer_space ( ) - > anchor ( ) - > next_page ( ) ; <nl> + factory - > NewFixedArray ( 900 * KB / kPointerSize , TENURED ) ; <nl> + <nl> + / / Fill up weak set with values on an evacuation candidate . <nl> + { <nl> + HandleScope scope ( isolate ) ; <nl> + for ( int i = 0 ; i < 32 ; i + + ) { <nl> + Handle < JSObject > object = factory - > NewJSObject ( function , TENURED ) ; <nl> + CHECK ( ! heap - > InNewSpace ( object - > address ( ) ) ) ; <nl> + CHECK ( ! first_page - > Contains ( object - > address ( ) ) ) ; <nl> + PutIntoWeakSet ( weakset , key , object ) ; <nl> + } <nl> + } <nl> + <nl> + / / Force compacting garbage collection . <nl> + CHECK ( FLAG_always_compact ) ; <nl> + heap - > CollectAllGarbage ( Heap : : kNoGCFlags ) ; <nl> + } <nl> + <nl> + <nl> + / / Test that weak set keys on an evacuation candidate which are reachable by <nl> + / / other strong paths are correctly recorded in the slots buffer . <nl> + TEST ( WeakSet_Regress2060b ) { <nl> + FLAG_always_compact = true ; <nl> + # ifdef VERIFY_HEAP <nl> + FLAG_verify_heap = true ; <nl> + # endif <nl> + <nl> + LocalContext context ; <nl> + Isolate * isolate = GetIsolateFrom ( & context ) ; <nl> + Factory * factory = isolate - > factory ( ) ; <nl> + Heap * heap = isolate - > heap ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < JSFunction > function = <nl> + factory - > NewFunction ( factory - > function_string ( ) , factory - > null_value ( ) ) ; <nl> + <nl> + / / Start second old - space page so that keys land on evacuation candidate . <nl> + Page * first_page = heap - > old_pointer_space ( ) - > anchor ( ) - > next_page ( ) ; <nl> + factory - > NewFixedArray ( 900 * KB / kPointerSize , TENURED ) ; <nl> + <nl> + / / Fill up weak set with keys on an evacuation candidate . <nl> + Handle < JSObject > keys [ 32 ] ; <nl> + for ( int i = 0 ; i < 32 ; i + + ) { <nl> + keys [ i ] = factory - > NewJSObject ( function , TENURED ) ; <nl> + CHECK ( ! heap - > InNewSpace ( keys [ i ] - > address ( ) ) ) ; <nl> + CHECK ( ! first_page - > Contains ( keys [ i ] - > address ( ) ) ) ; <nl> + } <nl> + Handle < JSWeakSet > weakset = AllocateJSWeakSet ( isolate ) ; <nl> + for ( int i = 0 ; i < 32 ; i + + ) { <nl> + PutIntoWeakSet ( weakset , <nl> + keys [ i ] , <nl> + Handle < Smi > ( Smi : : FromInt ( i ) , isolate ) ) ; <nl> + } <nl> + <nl> + / / Force compacting garbage collection . The subsequent collections are used <nl> + / / to verify that key references were actually updated . <nl> + CHECK ( FLAG_always_compact ) ; <nl> + heap - > CollectAllGarbage ( Heap : : kNoGCFlags ) ; <nl> + heap - > CollectAllGarbage ( Heap : : kNoGCFlags ) ; <nl> + heap - > CollectAllGarbage ( Heap : : kNoGCFlags ) ; <nl> + } <nl> mmm a / test / mjsunit / harmony / collections . js <nl> ppp b / test / mjsunit / harmony / collections . js <nl> function TestValidSetCalls ( m ) { <nl> assertDoesNotThrow ( function ( ) { m . delete ( new Object ) } ) ; <nl> } <nl> TestValidSetCalls ( new Set ) ; <nl> + TestValidSetCalls ( new WeakSet ) ; <nl> <nl> <nl> / / Test valid getter and setter calls on Maps and WeakMaps <nl> function TestSetBehavior ( set ) { <nl> } <nl> } <nl> TestSetBehavior ( new Set ) ; <nl> + TestSet ( new WeakSet , new Object ) ; <nl> <nl> <nl> / / Test expected mapping behavior for Maps and WeakMaps <nl> function TestEnumerable ( func ) { <nl> TestEnumerable ( Set ) ; <nl> TestEnumerable ( Map ) ; <nl> TestEnumerable ( WeakMap ) ; <nl> + TestEnumerable ( WeakSet ) ; <nl> <nl> <nl> / / Test arbitrary properties on Maps and WeakMaps <nl> TestArbitrary ( new WeakMap ) ; <nl> assertTrue ( Set ( ) instanceof Set ) ; <nl> assertTrue ( Map ( ) instanceof Map ) ; <nl> assertTrue ( WeakMap ( ) instanceof WeakMap ) ; <nl> + assertTrue ( WeakSet ( ) instanceof WeakSet ) ; <nl> <nl> <nl> / / Test whether NaN values as keys are treated correctly . <nl> assertTrue ( s instanceof Set ) ; <nl> assertTrue ( Set . prototype . add instanceof Function ) <nl> assertTrue ( Set . prototype . has instanceof Function ) <nl> assertTrue ( Set . prototype . delete instanceof Function ) <nl> + assertTrue ( Set . prototype . clear instanceof Function ) <nl> <nl> <nl> / / Test some common JavaScript idioms for Maps <nl> assertTrue ( Map . prototype . set instanceof Function ) <nl> assertTrue ( Map . prototype . get instanceof Function ) <nl> assertTrue ( Map . prototype . has instanceof Function ) <nl> assertTrue ( Map . prototype . delete instanceof Function ) <nl> + assertTrue ( Map . prototype . clear instanceof Function ) <nl> <nl> <nl> / / Test some common JavaScript idioms for WeakMaps <nl> assertTrue ( WeakMap . prototype . set instanceof Function ) <nl> assertTrue ( WeakMap . prototype . get instanceof Function ) <nl> assertTrue ( WeakMap . prototype . has instanceof Function ) <nl> assertTrue ( WeakMap . prototype . delete instanceof Function ) <nl> + assertTrue ( WeakMap . prototype . clear instanceof Function ) <nl> <nl> <nl> - / / Test class of the Set , Map and WeakMap instance and prototype . <nl> + / / Test some common JavaScript idioms for WeakSets <nl> + var s = new WeakSet ; <nl> + assertTrue ( s instanceof WeakSet ) ; <nl> + assertTrue ( WeakSet . prototype . add instanceof Function ) <nl> + assertTrue ( WeakSet . prototype . has instanceof Function ) <nl> + assertTrue ( WeakSet . prototype . delete instanceof Function ) <nl> + assertTrue ( WeakSet . prototype . clear instanceof Function ) <nl> + <nl> + <nl> + / / Test class of instance and prototype . <nl> assertEquals ( " Set " , % _ClassOf ( new Set ) ) <nl> assertEquals ( " Object " , % _ClassOf ( Set . prototype ) ) <nl> assertEquals ( " Map " , % _ClassOf ( new Map ) ) <nl> assertEquals ( " Object " , % _ClassOf ( Map . prototype ) ) <nl> assertEquals ( " WeakMap " , % _ClassOf ( new WeakMap ) ) <nl> assertEquals ( " Object " , % _ClassOf ( WeakMap . prototype ) ) <nl> + assertEquals ( " WeakSet " , % _ClassOf ( new WeakSet ) ) <nl> + assertEquals ( " Object " , % _ClassOf ( WeakMap . prototype ) ) <nl> + <nl> + <nl> + / / Test name of constructor . <nl> + assertEquals ( " Set " , Set . name ) ; <nl> + assertEquals ( " Map " , Map . name ) ; <nl> + assertEquals ( " WeakMap " , WeakMap . name ) ; <nl> + assertEquals ( " WeakSet " , WeakSet . name ) ; <nl> <nl> <nl> - / / Test constructor property of the Set , Map and WeakMap prototype . <nl> + / / Test constructor property of the Set , Map , WeakMap and WeakSet prototype . <nl> function TestConstructor ( C ) { <nl> assertFalse ( C = = = Object . prototype . constructor ) ; <nl> assertSame ( C , C . prototype . constructor ) ; <nl> function TestConstructor ( C ) { <nl> TestConstructor ( Set ) ; <nl> TestConstructor ( Map ) ; <nl> TestConstructor ( WeakMap ) ; <nl> + TestConstructor ( WeakSet ) ; <nl> + <nl> + <nl> + function TestDescriptor ( global , C ) { <nl> + assertEquals ( { <nl> + value : C , <nl> + writable : true , <nl> + enumerable : false , <nl> + configurable : true <nl> + } , Object . getOwnPropertyDescriptor ( global , C . name ) ) ; <nl> + } <nl> + TestDescriptor ( this , Set ) ; <nl> + TestDescriptor ( this , Map ) ; <nl> + TestDescriptor ( this , WeakMap ) ; <nl> + TestDescriptor ( this , WeakSet ) ; <nl> <nl> <nl> / / Regression test for WeakMap prototype . <nl> var alwaysBogus = [ undefined , null , true , " x " , 23 , { } ] ; <nl> var bogusReceiversTestSet = [ <nl> { proto : Set . prototype , <nl> funcs : [ ' add ' , ' has ' , ' delete ' ] , <nl> - receivers : alwaysBogus . concat ( [ new Map , new WeakMap ] ) , <nl> + receivers : alwaysBogus . concat ( [ new Map , new WeakMap , new WeakSet ] ) , <nl> } , <nl> { proto : Map . prototype , <nl> funcs : [ ' get ' , ' set ' , ' has ' , ' delete ' ] , <nl> - receivers : alwaysBogus . concat ( [ new Set , new WeakMap ] ) , <nl> + receivers : alwaysBogus . concat ( [ new Set , new WeakMap , new WeakSet ] ) , <nl> } , <nl> { proto : WeakMap . prototype , <nl> funcs : [ ' get ' , ' set ' , ' has ' , ' delete ' ] , <nl> - receivers : alwaysBogus . concat ( [ new Set , new Map ] ) , <nl> + receivers : alwaysBogus . concat ( [ new Set , new Map , new WeakSet ] ) , <nl> + } , <nl> + { proto : WeakSet . prototype , <nl> + funcs : [ ' add ' , ' has ' , ' delete ' ] , <nl> + receivers : alwaysBogus . concat ( [ new Set , new Map , new WeakMap ] ) , <nl> } , <nl> ] ; <nl> function TestBogusReceivers ( testSet ) { <nl> for ( var i = 9 ; i > = 0 ; i - - ) { <nl> assertFalse ( w . has ( k ) ) ; <nl> assertEquals ( undefined , w . get ( k ) ) ; <nl> } ) ( ) ; <nl> + <nl> + <nl> + / / Test WeakSet clear <nl> + ( function ( ) { <nl> + var k = new Object ( ) ; <nl> + var w = new WeakSet ( ) ; <nl> + w . add ( k ) ; <nl> + assertTrue ( w . has ( k ) ) ; <nl> + w . clear ( ) ; <nl> + assertFalse ( w . has ( k ) ) ; <nl> + } ) ( ) ; <nl> \ No newline at end of file <nl> mmm a / tools / grokdump . py <nl> ppp b / tools / grokdump . py <nl> def FindSymbol ( self , addr ) : <nl> 180 : " JS_ARRAY_TYPE " , <nl> 171 : " JS_PROXY_TYPE " , <nl> 183 : " JS_WEAK_MAP_TYPE " , <nl> - 184 : " JS_REGEXP_TYPE " , <nl> - 185 : " JS_FUNCTION_TYPE " , <nl> + 184 : " JS_WEAK_SET_TYPE " , <nl> + 185 : " JS_REGEXP_TYPE " , <nl> + 186 : " JS_FUNCTION_TYPE " , <nl> 170 : " JS_FUNCTION_PROXY_TYPE " , <nl> 165 : " DEBUG_INFO_TYPE " , <nl> 166 : " BREAK_POINT_INFO_TYPE " , <nl> | ES6 : Implement WeakSet | v8/v8 | ce81b0d3a8c1e456779e45708e2370b9aade2fed | 2013-07-22T08:32:24Z |
mmm a / scene / main / http_request . cpp <nl> ppp b / scene / main / http_request . cpp <nl> Error HTTPRequest : : request ( const String & p_url , const Vector < String > & p_custom_h <nl> ERR_FAIL_V ( ERR_BUSY ) ; <nl> } <nl> <nl> + if ( timeout > 0 ) { <nl> + timer - > stop ( ) ; <nl> + timer - > start ( timeout ) ; <nl> + } <nl> + <nl> method = p_method ; <nl> <nl> Error err = _parse_url ( p_url ) ; <nl> void HTTPRequest : : _thread_func ( void * p_userdata ) { <nl> <nl> void HTTPRequest : : cancel_request ( ) { <nl> <nl> + timer - > stop ( ) ; <nl> + <nl> if ( ! requesting ) <nl> return ; <nl> <nl> int HTTPRequest : : get_body_size ( ) const { <nl> return body_len ; <nl> } <nl> <nl> + void HTTPRequest : : set_timeout ( int p_timeout ) { <nl> + <nl> + ERR_FAIL_COND ( p_timeout < 0 ) ; <nl> + timeout = p_timeout ; <nl> + } <nl> + <nl> + int HTTPRequest : : get_timeout ( ) { <nl> + <nl> + return timeout ; <nl> + } <nl> + <nl> + void HTTPRequest : : _timeout ( ) { <nl> + <nl> + cancel_request ( ) ; <nl> + call_deferred ( " _request_done " , RESULT_TIMEOUT , 0 , PoolStringArray ( ) , PoolByteArray ( ) ) ; <nl> + } <nl> + <nl> void HTTPRequest : : _bind_methods ( ) { <nl> <nl> ClassDB : : bind_method ( D_METHOD ( " request " , " url " , " custom_headers " , " ssl_validate_domain " , " method " , " request_data " ) , & HTTPRequest : : request , DEFVAL ( PoolStringArray ( ) ) , DEFVAL ( true ) , DEFVAL ( HTTPClient : : METHOD_GET ) , DEFVAL ( String ( ) ) ) ; <nl> void HTTPRequest : : _bind_methods ( ) { <nl> ClassDB : : bind_method ( D_METHOD ( " _redirect_request " ) , & HTTPRequest : : _redirect_request ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " _request_done " ) , & HTTPRequest : : _request_done ) ; <nl> <nl> + ClassDB : : bind_method ( D_METHOD ( " set_timeout " , " timeout " ) , & HTTPRequest : : set_timeout ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " get_timeout " ) , & HTTPRequest : : get_timeout ) ; <nl> + <nl> + ClassDB : : bind_method ( D_METHOD ( " _timeout " ) , & HTTPRequest : : _timeout ) ; <nl> + <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : STRING , " download_file " , PROPERTY_HINT_FILE ) , " set_download_file " , " get_download_file " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : BOOL , " use_threads " ) , " set_use_threads " , " is_using_threads " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : INT , " body_size_limit " , PROPERTY_HINT_RANGE , " - 1 , 2000000000 " ) , " set_body_size_limit " , " get_body_size_limit " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : INT , " max_redirects " , PROPERTY_HINT_RANGE , " - 1 , 64 " ) , " set_max_redirects " , " get_max_redirects " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : INT , " timeout " , PROPERTY_HINT_RANGE , " 0 , 86400 " ) , " set_timeout " , " get_timeout " ) ; <nl> <nl> ADD_SIGNAL ( MethodInfo ( " request_completed " , PropertyInfo ( Variant : : INT , " result " ) , PropertyInfo ( Variant : : INT , " response_code " ) , PropertyInfo ( Variant : : POOL_STRING_ARRAY , " headers " ) , PropertyInfo ( Variant : : POOL_BYTE_ARRAY , " body " ) ) ) ; <nl> <nl> void HTTPRequest : : _bind_methods ( ) { <nl> BIND_ENUM_CONSTANT ( RESULT_DOWNLOAD_FILE_CANT_OPEN ) ; <nl> BIND_ENUM_CONSTANT ( RESULT_DOWNLOAD_FILE_WRITE_ERROR ) ; <nl> BIND_ENUM_CONSTANT ( RESULT_REDIRECT_LIMIT_REACHED ) ; <nl> + BIND_ENUM_CONSTANT ( RESULT_TIMEOUT ) ; <nl> } <nl> <nl> HTTPRequest : : HTTPRequest ( ) { <nl> HTTPRequest : : HTTPRequest ( ) { <nl> downloaded = 0 ; <nl> body_size_limit = - 1 ; <nl> file = NULL ; <nl> + <nl> + timer = memnew ( Timer ) ; <nl> + timer - > set_one_shot ( true ) ; <nl> + timer - > connect ( " timeout " , this , " _timeout " ) ; <nl> + add_child ( timer ) ; <nl> + timeout = 0 ; <nl> } <nl> <nl> HTTPRequest : : ~ HTTPRequest ( ) { <nl> mmm a / scene / main / http_request . h <nl> ppp b / scene / main / http_request . h <nl> <nl> # include " core / os / file_access . h " <nl> # include " core / os / thread . h " <nl> # include " node . h " <nl> + # include " scene / main / timer . h " <nl> <nl> class HTTPRequest : public Node { <nl> <nl> class HTTPRequest : public Node { <nl> RESULT_REQUEST_FAILED , <nl> RESULT_DOWNLOAD_FILE_CANT_OPEN , <nl> RESULT_DOWNLOAD_FILE_WRITE_ERROR , <nl> - RESULT_REDIRECT_LIMIT_REACHED <nl> + RESULT_REDIRECT_LIMIT_REACHED , <nl> + RESULT_TIMEOUT <nl> <nl> } ; <nl> <nl> class HTTPRequest : public Node { <nl> <nl> int max_redirects ; <nl> <nl> + int timeout ; <nl> + <nl> void _redirect_request ( const String & p_new_url ) ; <nl> <nl> bool _handle_response ( bool * ret_value ) ; <nl> class HTTPRequest : public Node { <nl> void set_max_redirects ( int p_max ) ; <nl> int get_max_redirects ( ) const ; <nl> <nl> + Timer * timer ; <nl> + <nl> + void set_timeout ( int p_timeout ) ; <nl> + int get_timeout ( ) ; <nl> + <nl> + void _timeout ( ) ; <nl> + <nl> int get_downloaded_bytes ( ) const ; <nl> int get_body_size ( ) const ; <nl> <nl> | Merge pull request from zaksnet / httprequest - add - timeout | godotengine/godot | 7f80c1dca8892cab4f4fe7a2daf6699755024f09 | 2019-07-09T12:50:12Z |
mmm a / src / runtime / ext / ext_hotprofiler . cpp <nl> ppp b / src / runtime / ext / ext_hotprofiler . cpp <nl> <nl> <nl> namespace HPHP { <nl> IMPLEMENT_DEFAULT_EXTENSION ( hotprofiler ) ; <nl> + IMPLEMENT_DEFAULT_EXTENSION ( xhprof ) ; <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / helpers <nl> <nl> mmm a / src / runtime / ext / ext_image . cpp <nl> ppp b / src / runtime / ext / ext_image . cpp <nl> <nl> / / # define IM_MEMORY_CHECK <nl> <nl> namespace HPHP { <nl> + IMPLEMENT_DEFAULT_EXTENSION ( exif ) ; <nl> IMPLEMENT_DEFAULT_EXTENSION ( gd ) ; <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> mmm a / src / runtime / ext / ext_intl . cpp <nl> ppp b / src / runtime / ext / ext_intl . cpp <nl> <nl> # include < unicode / unorm . h > <nl> <nl> namespace HPHP { <nl> + IMPLEMENT_DEFAULT_EXTENSION ( idn ) ; <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> int64 f_intl_get_error_code ( ) { <nl> mmm a / src / runtime / ext / ext_ipc . cpp <nl> ppp b / src / runtime / ext / ext_ipc . cpp <nl> using namespace std ; <nl> # endif <nl> <nl> namespace HPHP { <nl> + IMPLEMENT_DEFAULT_EXTENSION ( sysvmsg ) ; <nl> + IMPLEMENT_DEFAULT_EXTENSION ( sysvsem ) ; <nl> + IMPLEMENT_DEFAULT_EXTENSION ( sysvshm ) ; <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> int64 f_ftok ( CStrRef pathname , CStrRef proj ) { <nl> | Fix HPHP to list more of the extensions that it supports | facebook/hhvm | 05d1733d9b19a507c245089ff31cc4483fd5f1f7 | 2011-02-28T19:08:13Z |
mmm a / dbms / include / DB / Common / HashTable / HashTable . h <nl> ppp b / dbms / include / DB / Common / HashTable / HashTable . h <nl> class HashTable : <nl> <nl> bool next ( ) <nl> { <nl> + if ( ! is_initialized ) <nl> + { <nl> + Cell : : State : : read ( in ) ; <nl> + DB : : readVarUInt ( size , in ) ; <nl> + is_initialized = true ; <nl> + } <nl> + <nl> if ( read_count = = size ) <nl> { <nl> is_eof = true ; <nl> return false ; <nl> } <nl> - else if ( read_count = = 0 ) <nl> - { <nl> - Cell : : State : : read ( in ) ; <nl> - DB : : readVarUInt ( size , in ) ; <nl> - } <nl> <nl> cell . read ( in ) ; <nl> + + read_count ; <nl> class HashTable : <nl> <nl> inline const value_type & get ( ) const <nl> { <nl> - if ( ( read_count = = 0 ) | | is_eof ) <nl> + if ( ! is_initialized | | is_eof ) <nl> throw DB : : Exception ( " No available data " , DB : : ErrorCodes : : NO_AVAILABLE_DATA ) ; <nl> <nl> return cell . getValue ( ) ; <nl> } <nl> <nl> private : <nl> - DB : : ReadBuffer in ; <nl> + DB : : ReadBuffer & in ; <nl> Cell cell ; <nl> size_t read_count = 0 ; <nl> size_t size ; <nl> bool is_eof = false ; <nl> + bool is_initialized = false ; <nl> } ; <nl> <nl> class iterator <nl> mmm a / dbms / include / DB / Common / HashTable / SmallTable . h <nl> ppp b / dbms / include / DB / Common / HashTable / SmallTable . h <nl> class SmallTable : <nl> <nl> bool next ( ) <nl> { <nl> - if ( read_count = = size ) <nl> - { <nl> - is_eof = true ; <nl> - return false ; <nl> - } <nl> - else if ( read_count = = 0 ) <nl> + if ( ! is_initialized ) <nl> { <nl> Cell : : State : : read ( in ) ; <nl> DB : : readVarUInt ( size , in ) ; <nl> <nl> if ( size > capacity ) <nl> throw DB : : Exception ( " Illegal size " ) ; <nl> + <nl> + is_initialized = true ; <nl> + } <nl> + <nl> + if ( read_count = = size ) <nl> + { <nl> + is_eof = true ; <nl> + return false ; <nl> } <nl> <nl> cell . read ( in ) ; <nl> class SmallTable : <nl> <nl> inline const value_type & get ( ) const <nl> { <nl> - if ( ( read_count = = 0 ) | | is_eof ) <nl> + if ( ! is_initialized | | is_eof ) <nl> throw DB : : Exception ( " No available data " , DB : : ErrorCodes : : NO_AVAILABLE_DATA ) ; <nl> <nl> return cell . getValue ( ) ; <nl> } <nl> <nl> private : <nl> - DB : : ReadBuffer in ; <nl> + DB : : ReadBuffer & in ; <nl> Cell cell ; <nl> size_t read_count = 0 ; <nl> size_t size ; <nl> bool is_eof = false ; <nl> + bool is_initialized = false ; <nl> } ; <nl> <nl> class iterator <nl> | dbms : Server : Bug fixes in the serialization / deserialization code . [ # METR - 17276 ] | ClickHouse/ClickHouse | 437ca51ce83d0378d48ec1861cd804b6095e00d9 | 2015-09-07T19:03:14Z |
mmm a / modules / dnn / include / opencv2 / dnn / all_layers . hpp <nl> ppp b / modules / dnn / include / opencv2 / dnn / all_layers . hpp <nl> <nl> # define OPENCV_DNN_DNN_ALL_LAYERS_HPP <nl> # include < opencv2 / dnn . hpp > <nl> <nl> - namespace cv <nl> - { <nl> - namespace dnn <nl> - { <nl> + namespace cv { <nl> + namespace dnn { <nl> + CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> / / ! @ addtogroup dnn <nl> / / ! @ { <nl> <nl> namespace dnn <nl> <nl> / / ! @ } <nl> / / ! @ } <nl> - <nl> + CV__DNN_EXPERIMENTAL_NS_END <nl> } <nl> } <nl> # endif <nl> mmm a / modules / dnn / include / opencv2 / dnn / dict . hpp <nl> ppp b / modules / dnn / include / opencv2 / dnn / dict . hpp <nl> <nl> # include < map > <nl> # include < ostream > <nl> <nl> - namespace cv <nl> - { <nl> - namespace dnn <nl> - { <nl> + namespace cv { <nl> + namespace dnn { <nl> + CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> / / ! @ addtogroup dnn <nl> / / ! @ { <nl> <nl> class CV_EXPORTS Dict <nl> } ; <nl> <nl> / / ! @ } <nl> + CV__DNN_EXPERIMENTAL_NS_END <nl> } <nl> } <nl> <nl> mmm a / modules / dnn / include / opencv2 / dnn / dnn . hpp <nl> ppp b / modules / dnn / include / opencv2 / dnn / dnn . hpp <nl> <nl> <nl> # include < vector > <nl> # include < opencv2 / core . hpp > <nl> + <nl> + # if ! defined CV_DOXYGEN & & ! defined CV_DNN_DONT_ADD_EXPERIMENTAL_NS <nl> + # define CV__DNN_EXPERIMENTAL_NS_USE using namespace experimental_dnn_v1 ; <nl> + # define CV__DNN_EXPERIMENTAL_NS_BEGIN namespace experimental_dnn_v1 { <nl> + # define CV__DNN_EXPERIMENTAL_NS_END } <nl> + # else <nl> + # define CV__DNN_EXPERIMENTAL_NS_USE <nl> + # define CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> + # define CV__DNN_EXPERIMENTAL_NS_END <nl> + # endif <nl> + <nl> # include < opencv2 / dnn / dict . hpp > <nl> <nl> - namespace cv <nl> - { <nl> - namespace dnn / / ! This namespace is used for dnn module functionlaity . <nl> - { <nl> + namespace cv { <nl> + namespace dnn { <nl> + CV__DNN_EXPERIMENTAL_NS_USE <nl> + CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> / / ! @ addtogroup dnn <nl> / / ! @ { <nl> <nl> namespace dnn / / ! This namespace is used for dnn module functionlaity . <nl> Size size = Size ( ) , const Scalar & mean = Scalar ( ) , bool swapRB = true ) ; <nl> <nl> / / ! @ } <nl> + CV__DNN_EXPERIMENTAL_NS_END <nl> } <nl> } <nl> <nl> mmm a / modules / dnn / include / opencv2 / dnn / dnn . inl . hpp <nl> ppp b / modules / dnn / include / opencv2 / dnn / dnn . inl . hpp <nl> <nl> <nl> # include < opencv2 / dnn . hpp > <nl> <nl> - namespace cv <nl> - { <nl> - namespace dnn <nl> - { <nl> + namespace cv { <nl> + namespace dnn { <nl> + CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> <nl> template < typename TypeIter > <nl> DictValue DictValue : : arrayInt ( TypeIter begin , int size ) <nl> inline std : : ostream & operator < < ( std : : ostream & stream , const Dict & dict ) <nl> return stream ; <nl> } <nl> <nl> + CV__DNN_EXPERIMENTAL_NS_END <nl> } <nl> } <nl> <nl> mmm a / modules / dnn / include / opencv2 / dnn / layer . details . hpp <nl> ppp b / modules / dnn / include / opencv2 / dnn / layer . details . hpp <nl> <nl> <nl> # include < opencv2 / dnn / layer . hpp > <nl> <nl> - namespace cv <nl> - { <nl> - namespace dnn <nl> - { <nl> + namespace cv { <nl> + namespace dnn { <nl> + CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> <nl> / * * @ brief Registers layer constructor in runtime . <nl> * @ param type string , containing type name of the layer . <nl> class _LayerStaticRegisterer <nl> } <nl> } ; <nl> <nl> - } } } / / namespace <nl> + } / / namespace <nl> + CV__DNN_EXPERIMENTAL_NS_END <nl> + } } / / namespace <nl> <nl> # endif <nl> mmm a / modules / dnn / include / opencv2 / dnn / layer . hpp <nl> ppp b / modules / dnn / include / opencv2 / dnn / layer . hpp <nl> <nl> # define OPENCV_DNN_LAYER_HPP <nl> # include < opencv2 / dnn . hpp > <nl> <nl> - namespace cv <nl> - { <nl> - namespace dnn <nl> - { <nl> + namespace cv { <nl> + namespace dnn { <nl> + CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> / / ! @ addtogroup dnn <nl> / / ! @ { <nl> / / ! <nl> class CV_EXPORTS LayerFactory <nl> <nl> / / ! @ } <nl> / / ! @ } <nl> - <nl> + CV__DNN_EXPERIMENTAL_NS_END <nl> } <nl> } <nl> # endif <nl> mmm a / modules / dnn / include / opencv2 / dnn / shape_utils . hpp <nl> ppp b / modules / dnn / include / opencv2 / dnn / shape_utils . hpp <nl> <nl> <nl> namespace cv { <nl> namespace dnn { <nl> + CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> <nl> / / Useful shortcut <nl> inline std : : ostream & operator < < ( std : : ostream & s , cv : : Range & r ) <nl> inline int clamp ( int ax , const MatShape & shape ) <nl> return clamp ( ax , ( int ) shape . size ( ) ) ; <nl> } <nl> <nl> + CV__DNN_EXPERIMENTAL_NS_END <nl> } <nl> } <nl> # endif <nl> mmm a / modules / dnn / src / caffe / caffe_importer . cpp <nl> ppp b / modules / dnn / src / caffe / caffe_importer . cpp <nl> <nl> / / M * / <nl> <nl> # include " . . / precomp . hpp " <nl> - using namespace cv ; <nl> - using namespace cv : : dnn ; <nl> <nl> # ifdef HAVE_PROTOBUF <nl> # include " caffe . pb . h " <nl> using namespace cv : : dnn ; <nl> # include < google / protobuf / text_format . h > <nl> # include < google / protobuf / io / zero_copy_stream_impl . h > <nl> # include " caffe_io . hpp " <nl> + # endif <nl> <nl> + namespace cv { <nl> + namespace dnn { <nl> + CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> + <nl> + # ifdef HAVE_PROTOBUF <nl> using : : google : : protobuf : : RepeatedField ; <nl> using : : google : : protobuf : : RepeatedPtrField ; <nl> using : : google : : protobuf : : Message ; <nl> class CaffeImporter : public Importer <nl> <nl> } <nl> <nl> - Ptr < Importer > cv : : dnn : : createCaffeImporter ( const String & prototxt , const String & caffeModel ) <nl> + Ptr < Importer > createCaffeImporter ( const String & prototxt , const String & caffeModel ) <nl> { <nl> return Ptr < Importer > ( new CaffeImporter ( prototxt . c_str ( ) , caffeModel . c_str ( ) ) ) ; <nl> } <nl> <nl> # else / / HAVE_PROTOBUF <nl> <nl> - Ptr < Importer > cv : : dnn : : createCaffeImporter ( const String & , const String & ) <nl> + Ptr < Importer > createCaffeImporter ( const String & , const String & ) <nl> { <nl> CV_Error ( cv : : Error : : StsNotImplemented , " libprotobuf required to import data from Caffe models " ) ; <nl> return Ptr < Importer > ( ) ; <nl> Ptr < Importer > cv : : dnn : : createCaffeImporter ( const String & , const String & ) <nl> <nl> # endif / / HAVE_PROTOBUF <nl> <nl> - Net cv : : dnn : : readNetFromCaffe ( const String & prototxt , const String & caffeModel / * = String ( ) * / ) <nl> + Net readNetFromCaffe ( const String & prototxt , const String & caffeModel / * = String ( ) * / ) <nl> { <nl> Ptr < Importer > caffeImporter = createCaffeImporter ( prototxt , caffeModel ) ; <nl> Net net ; <nl> Net cv : : dnn : : readNetFromCaffe ( const String & prototxt , const String & caffeModel / <nl> caffeImporter - > populateNet ( net ) ; <nl> return net ; <nl> } <nl> + <nl> + CV__DNN_EXPERIMENTAL_NS_END <nl> + } } / / namespace <nl> mmm a / modules / dnn / src / dnn . cpp <nl> ppp b / modules / dnn / src / dnn . cpp <nl> <nl> # include < opencv2 / dnn / shape_utils . hpp > <nl> # include < opencv2 / imgproc . hpp > <nl> <nl> - using namespace cv ; <nl> - using namespace cv : : dnn ; <nl> + namespace cv { <nl> + namespace dnn { <nl> + CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> <nl> using std : : vector ; <nl> using std : : map ; <nl> namespace <nl> } ; <nl> } <nl> <nl> - namespace cv <nl> - { <nl> - namespace dnn <nl> - { <nl> - <nl> template < typename T > <nl> static String toString ( const T & v ) <nl> { <nl> BackendWrapper : : BackendWrapper ( const Ptr < BackendWrapper > & base , const MatShape & <nl> <nl> BackendWrapper : : ~ BackendWrapper ( ) { } <nl> <nl> - } <nl> - } <nl> + CV__DNN_EXPERIMENTAL_NS_END <nl> + } } / / namespace <nl> mmm a / modules / dnn / src / init . cpp <nl> ppp b / modules / dnn / src / init . cpp <nl> <nl> # include " precomp . hpp " <nl> # include < opencv2 / dnn / layer . details . hpp > <nl> <nl> - namespace cv <nl> - { <nl> - namespace dnn <nl> - { <nl> + namespace cv { <nl> + namespace dnn { <nl> + CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> <nl> static Mutex * __initialization_mutex = NULL ; <nl> Mutex & getInitializationMutex ( ) <nl> void initializeLayerFactory ( ) <nl> CV_DNN_REGISTER_LAYER_CLASS ( Scale , ScaleLayer ) ; <nl> } <nl> <nl> - } } / / namespace <nl> + CV__DNN_EXPERIMENTAL_NS_END <nl> + } } / / namespace <nl> mmm a / modules / dnn / src / precomp . hpp <nl> ppp b / modules / dnn / src / precomp . hpp <nl> <nl> # include < opencv2 / dnn / all_layers . hpp > <nl> <nl> namespace cv { namespace dnn { <nl> + CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> Mutex & getInitializationMutex ( ) ; <nl> void initializeLayerFactory ( ) ; <nl> + CV__DNN_EXPERIMENTAL_NS_END <nl> } } / / namespace <nl> mmm a / modules / dnn / src / tensorflow / tf_importer . cpp <nl> ppp b / modules / dnn / src / tensorflow / tf_importer . cpp <nl> Implementation of Tensorflow models parser <nl> * / <nl> <nl> # include " . . / precomp . hpp " <nl> - using namespace cv ; <nl> - using namespace cv : : dnn ; <nl> <nl> # ifdef HAVE_PROTOBUF <nl> # include " graph . pb . h " <nl> using namespace cv : : dnn ; <nl> # include < google / protobuf / text_format . h > <nl> # include < google / protobuf / io / zero_copy_stream_impl . h > <nl> # include " tf_io . hpp " <nl> + # endif <nl> + <nl> + namespace cv { <nl> + namespace dnn { <nl> + CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> + <nl> + # if HAVE_PROTOBUF <nl> <nl> using : : google : : protobuf : : RepeatedField ; <nl> using : : google : : protobuf : : RepeatedPtrField ; <nl> void TFImporter : : populateNet ( Net dstNet ) <nl> <nl> } / / namespace <nl> <nl> - Ptr < Importer > cv : : dnn : : createTensorflowImporter ( const String & model ) <nl> + Ptr < Importer > createTensorflowImporter ( const String & model ) <nl> { <nl> return Ptr < Importer > ( new TFImporter ( model . c_str ( ) ) ) ; <nl> } <nl> <nl> # else / / HAVE_PROTOBUF <nl> <nl> - Ptr < Importer > cv : : dnn : : createTensorflowImporter ( const String & ) <nl> + Ptr < Importer > createTensorflowImporter ( const String & ) <nl> { <nl> CV_Error ( cv : : Error : : StsNotImplemented , " libprotobuf required to import data from TensorFlow models " ) ; <nl> return Ptr < Importer > ( ) ; <nl> Ptr < Importer > cv : : dnn : : createTensorflowImporter ( const String & ) <nl> <nl> # endif / / HAVE_PROTOBUF <nl> <nl> - Net cv : : dnn : : readNetFromTensorflow ( const String & model ) <nl> + Net readNetFromTensorflow ( const String & model ) <nl> { <nl> Ptr < Importer > importer = createTensorflowImporter ( model ) ; <nl> Net net ; <nl> Net cv : : dnn : : readNetFromTensorflow ( const String & model ) <nl> importer - > populateNet ( net ) ; <nl> return net ; <nl> } <nl> + <nl> + CV__DNN_EXPERIMENTAL_NS_END <nl> + } } / / namespace <nl> mmm a / modules / dnn / src / torch / torch_importer . cpp <nl> ppp b / modules / dnn / src / torch / torch_importer . cpp <nl> <nl> <nl> namespace cv { <nl> namespace dnn { <nl> + CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> + <nl> # if defined ( ENABLE_TORCH_IMPORTER ) & & ENABLE_TORCH_IMPORTER <nl> # include " THDiskFile . h " <nl> <nl> Net readNetFromTorch ( const String & model , bool isBinary ) <nl> return net ; <nl> } <nl> <nl> - } <nl> - } <nl> + CV__DNN_EXPERIMENTAL_NS_END <nl> + } } / / namespace <nl> | Merge pull request from alalek : dnn_experimental_namespace | opencv/opencv | f1a56cb4b73f75e85f8718714a150f0271f19443 | 2017-06-29T07:37:04Z |
mmm a / python / google / protobuf / __init__ . py <nl> ppp b / python / google / protobuf / __init__ . py <nl> <nl> # <nl> # Copyright 2007 Google Inc . All Rights Reserved . <nl> <nl> - __version__ = ' 2 . 6 . 1 ' <nl> + __version__ = ' 3 . 0 . 0 - pre ' <nl> | Updated __version__ | protocolbuffers/protobuf | 4f0170d465ed03a3ae8815f5c42104e5ce009326 | 2014-12-14T18:42:46Z |
mmm a / src / arm / cpu - arm . cc <nl> ppp b / src / arm / cpu - arm . cc <nl> void CpuFeatures : : FlushICache ( void * start , size_t size ) { <nl> register uint32_t end asm ( " r1 " ) = beg + size ; <nl> register uint32_t flg asm ( " r2 " ) = 0 ; <nl> <nl> + # ifdef __clang__ <nl> + / / This variant of the asm avoids a constant pool entry , which can be <nl> + / / problematic when LTO ' ing . It is also slightly shorter . <nl> + register uint32_t scno asm ( " r7 " ) = __ARM_NR_cacheflush ; <nl> + <nl> + asm volatile ( " svc 0 \ n " <nl> + : <nl> + : " r " ( beg ) , " r " ( end ) , " r " ( flg ) , " r " ( scno ) <nl> + : " memory " ) ; <nl> + # else <nl> + / / Use a different variant of the asm with GCC because some versions doesn ' t <nl> + / / support r7 as an asm input . <nl> asm volatile ( <nl> / / This assembly works for both ARM and Thumb targets . <nl> <nl> void CpuFeatures : : FlushICache ( void * start , size_t size ) { <nl> : " r " ( beg ) , " r " ( end ) , " r " ( flg ) , [ scno ] " i " ( __ARM_NR_cacheflush ) <nl> : " memory " ) ; <nl> # endif <nl> + # endif <nl> } <nl> <nl> } } / / namespace v8 : : internal <nl> | Use a different variant of CpuFeatures : : FlushICache asm with clang . | v8/v8 | 0c05bdfd09ea5e01e5d8e6de1260ef43446ab15f | 2015-03-26T11:41:09Z |
mmm a / dbms / src / Interpreters / DDLWorker . cpp <nl> ppp b / dbms / src / Interpreters / DDLWorker . cpp <nl> static bool isSupportedAlterType ( int type ) <nl> ASTAlterCommand : : DROP_PARTITION , <nl> ASTAlterCommand : : DELETE , <nl> ASTAlterCommand : : UPDATE , <nl> - <nl> ASTAlterCommand : : COMMENT_COLUMN , <nl> ASTAlterCommand : : MODIFY_ORDER_BY , <nl> ASTAlterCommand : : MODIFY_TTL , <nl> | remove line | ClickHouse/ClickHouse | d23e505414d9cdd285a4e43630c7c444b15fbe88 | 2019-06-12T10:56:01Z |
mmm a / hphp / hack / src / server / serverLocalConfig . ml <nl> ppp b / hphp / hack / src / server / serverLocalConfig . ml <nl> let load_ fn ~ silent ~ current_version overrides = <nl> match naming_sqlite_path with <nl> | Some _ - > true <nl> | None - > <nl> - bool_if_version <nl> + bool_if_min_version <nl> " enable_naming_table_fallback " <nl> ~ default : default . enable_naming_table_fallback <nl> + ~ current_version <nl> config <nl> in <nl> let symbolindex_search_provider = <nl> | Switch setting for SQLite naming table from bool_if_version to bool_if_min_version | facebook/hhvm | b9d480c7dd6adde958dfef2debda2b43d2068991 | 2019-09-19T18:26:25Z |
mmm a / Source / CNTKv2LibraryDll / proto / onnx / CNTKToONNX . cpp <nl> ppp b / Source / CNTKv2LibraryDll / proto / onnx / CNTKToONNX . cpp <nl> namespace CNTK <nl> static ONNXIR : : Node * InsertReshapeNodeToCNTKFunction ( const FunctionPtr & src , ONNXIR : : Node * node , const std : : vector < int > & shape , ONNXIR : : Graph * graph ) ; <nl> <nl> / / <nl> - / / Create a LSTM node . <nl> + / / methods to create a RNN / LSTM / GRU node . <nl> / / <nl> static ONNXIR : : Node * CreateLSTMNode ( const FunctionPtr & src , <nl> ONNXIR : : Graph * graph , <nl> namespace CNTK <nl> std : : unordered_map < FunctionPtr , ONNXIR : : Node * > & functionNodes , <nl> std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> const std : : unordered_map < Variable , Variable > & compositeOutputsMap ) ; <nl> + static ONNXIR : : Node * CreateRNNNode ( const FunctionPtr & src , <nl> + ONNXIR : : Graph * graph , <nl> + std : : unordered_map < FunctionPtr , ONNXIR : : Node * > & functionNodes , <nl> + std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> + const std : : unordered_map < Variable , Variable > & compositeOutputsMap ) ; <nl> <nl> static void PrepareRNNInput ( const Variable & X , std : : vector < ONNXIR : : NodeArg > & nodeInputs ) ; <nl> static void PrepareLSTMInitialStateNode ( ONNXIR : : Graph * graph , std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> const std : : vector < Variable > & initialVariables , int batchSize , int cellSize , <nl> const std : : string & uid , std : : vector < ONNXIR : : NodeArg > & nodeInputs ) ; <nl> <nl> - static void PrepareGRUWeightNode ( ONNXIR : : Graph * graph , std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> - const std : : vector < Variable > & Ws , std : : vector < ONNXIR : : NodeArg > & nodeInputs ) ; <nl> + static void PrepareRNNWeightNode ( ONNXIR : : Graph * graph , std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> + const std : : vector < Variable > & Ws , std : : vector < ONNXIR : : NodeArg > & nodeInputs , <nl> + std : : function < void ( const std : : vector < NDArrayViewPtr > & srcTensors , <nl> + onnx : : TensorProto & dst , const onnx : : TypeProto & inputArgType ) > weightConverter ) ; <nl> static void PrepareGRUZRHWeightNode ( ONNXIR : : Graph * graph , std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> const std : : vector < Variable > & Rs , const std : : vector < Variable > & Rh1s , std : : vector < ONNXIR : : NodeArg > & nodeInputs ) ; <nl> static void PrepareGRUBiasNode ( ONNXIR : : Graph * graph , std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> const std : : vector < Variable > & Bs , std : : vector < ONNXIR : : NodeArg > & nodeInputs ) ; <nl> <nl> + static void PrepareRNNBiasNode ( ONNXIR : : Graph * graph , std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> + const std : : vector < Variable > & Bs , std : : vector < ONNXIR : : NodeArg > & nodeInputs ) ; <nl> + <nl> static void PrepareLSTMWeightNode ( ONNXIR : : Graph * graph , std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> const std : : vector < Variable > & Ws , double * stabilizerConstants , std : : vector < ONNXIR : : NodeArg > & nodeInputs ) ; <nl> static void PrepareLSTMBiasNode ( ONNXIR : : Graph * graph , std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> namespace CNTK <nl> onnx : : TensorProto & dst , const onnx : : TypeProto & inputArgType ) ; <nl> <nl> <nl> - static void CopyGRUBiasTensors ( const std : : vector < NDArrayViewPtr > & srcTensors , <nl> + static void CopyRNNBiasTensors ( const std : : vector < NDArrayViewPtr > & srcTensors , <nl> onnx : : TensorProto & dst , const onnx : : TypeProto & inputArgType ) ; <nl> <nl> static void CopyGRUWeightTensors ( const std : : vector < NDArrayViewPtr > & srcTensors , <nl> namespace CNTK <nl> const std : : vector < NDArrayViewPtr > & srcZRTensors , const std : : vector < NDArrayViewPtr > & srcHTensors , <nl> onnx : : TensorProto & dst , const onnx : : TypeProto & inputArgType ) ; <nl> <nl> + static void CopyRNNWeightTensors ( const std : : vector < NDArrayViewPtr > & srcTensors , <nl> + onnx : : TensorProto & dst , const onnx : : TypeProto & inputArgType ) ; <nl> + <nl> static void FillTensorWithScalar ( const std : : vector < NDArrayViewPtr > & src , onnx : : TensorProto & dst , const std : : vector < int > dstShape ) ; <nl> <nl> / / <nl> void AppendCNTKBiasWeightToONNXTensor ( DType * data , const NDShape & shape , onnx : : T <nl> row - = 2 * cell_size ; <nl> } <nl> <nl> - / / soruce is collmn major <nl> + / / source is collmn major <nl> int src_index = row ; <nl> if ( typeid ( DType ) = = typeid ( float ) ) <nl> * ( dst . mutable_float_data ( ) - > Add ( ) ) = ( float ) data [ src_index ] ; <nl> void AppendCNTKWeightToONNXTensor ( DType * data , const NDShape & shape , onnx : : Tenso <nl> row - = 2 * cell_size ; <nl> } <nl> <nl> - / / soruce is collum major <nl> + / / source is column major <nl> int src_index = LSTMWeightDimensionHiddenMultiplier * cell_size * col + row ; <nl> if ( typeid ( DType ) = = typeid ( float ) ) <nl> * ( dst . mutable_float_data ( ) - > Add ( ) ) = ( float ) ( data [ src_index ] * stabilizer ) ; <nl> void CNTKToONNXHelper : : CopyTensorsWithMultipliers ( const std : : vector < NDArrayViewP <nl> CopyShapeTypeProtoToTensorProto ( inputArgType , dst ) ; <nl> } <nl> <nl> - void CNTKToONNXHelper : : CopyGRUBiasTensors ( const std : : vector < NDArrayViewPtr > & srcTensors , <nl> + void CNTKToONNXHelper : : CopyRNNBiasTensors ( const std : : vector < NDArrayViewPtr > & srcTensors , <nl> onnx : : TensorProto & dst , const onnx : : TypeProto & inputArgType ) <nl> { <nl> if ( srcTensors . empty ( ) ) <nl> void CNTKToONNXHelper : : CopyGRUWeightTensors ( const std : : vector < NDArrayViewPtr > & s <nl> int row = targetIndex / input_size , <nl> col = targetIndex % input_size ; <nl> <nl> - / / soruce is collum major <nl> + / / source is column major <nl> int srcIndex = 3 * cell_size * col + row ; <nl> AddDataElementArrayViewToTensorProto ( srcTemp , srcIndex , dst ) ; <nl> } <nl> void CNTKToONNXHelper : : CopyGRUStateWeightTensors ( <nl> CopyShapeTypeProtoToTensorProto ( inputArgType , dst ) ; <nl> } <nl> <nl> + void CNTKToONNXHelper : : CopyRNNWeightTensors ( const std : : vector < NDArrayViewPtr > & srcTensors , <nl> + onnx : : TensorProto & dst , const onnx : : TypeProto & inputArgType ) <nl> + { <nl> + if ( srcTensors . empty ( ) ) <nl> + return ; <nl> + <nl> + DataType dataType = srcTensors [ 0 ] - > GetDataType ( ) ; <nl> + SetTensorType ( dst , dataType ) ; <nl> + <nl> + for ( int i = 0 ; i < srcTensors . size ( ) ; i + + ) <nl> + { <nl> + auto srcTemp = srcTensors [ i ] - > DeepClone ( ) ; <nl> + auto srcShape = srcTemp - > Shape ( ) ; <nl> + <nl> + int cell_size = srcShape [ 0 ] ; <nl> + int input_size = srcShape [ 1 ] ; <nl> + <nl> + / / This is our own copy so move it to the CPU . <nl> + srcTemp - > ChangeDevice ( DeviceDescriptor : : CPUDevice ( ) ) ; <nl> + <nl> + auto totalSize = srcShape . TotalSize ( ) ; <nl> + for ( size_t targetIndex = 0 ; targetIndex < totalSize ; targetIndex + + ) <nl> + { <nl> + / / row major layout <nl> + int row = targetIndex / input_size , <nl> + col = targetIndex % input_size ; <nl> + <nl> + / / source is column major <nl> + int srcIndex = cell_size * col + row ; <nl> + AddDataElementArrayViewToTensorProto ( srcTemp , srcIndex , dst ) ; <nl> + } <nl> + } <nl> + <nl> + CopyShapeTypeProtoToTensorProto ( inputArgType , dst ) ; <nl> + } <nl> + <nl> void CNTKToONNXHelper : : CopyTensor ( const NDArrayViewPtr src , onnx : : TensorProto & dst , onnx : : TypeProto * inputArgType / * = nullptr * / ) <nl> { <nl> auto dataType = src - > GetDataType ( ) ; <nl> ONNXIR : : Node * CNTKToONNXHelper : : CreateLSTMNode ( const FunctionPtr & src , <nl> std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> const std : : unordered_map < Variable , Variable > & compositeOutputsMap ) <nl> { <nl> - / / sanity check : <nl> - std : : vector < FunctionPtr > lstms ; <nl> - if ( src - > OpName ( ) = = L " LSTM " ) <nl> - { <nl> - lstms . push_back ( src ) ; <nl> - } <nl> - else if ( src - > OpName ( ) = = L " Splice " ) / / src is a Splice op with inputs from two LSTM ops . <nl> - { <nl> - for ( auto & input : src - > Inputs ( ) ) <nl> - { <nl> - lstms . push_back ( input . Owner ( ) ) ; <nl> - } <nl> - } <nl> - else <nl> - { <nl> - LogicError ( " An LSTM op should start with an LSTM op ( single direction ) or a Splice op ( bidirectional ) . " ) ; <nl> - } <nl> - <nl> - / / For single direction LSTM , lstms . size ( ) = = 1 . For bidirectional LSTM , lstms . size ( ) = = 2 . <nl> - / / It is an error otherwise . <nl> - if ( lstms . size ( ) = = 0 | | lstms . size ( ) > 2 | | <nl> - std : : any_of ( lstms . cbegin ( ) , lstms . cend ( ) , [ ] ( const FunctionPtr & f ) { return f - > OpName ( ) ! = L " LSTM " ; } ) ) <nl> - { <nl> - LogicError ( " Invalid number of LSTM ops to construct an ONNX LSTM node . " ) ; <nl> - } <nl> + std : : vector < FunctionPtr > lstms = GetRNNBlocksFromSingleOrBidirectionalRNN ( src , " LSTM " ) ; <nl> <nl> / / order forward , backward <nl> std : : map < RNNDirection , int > directionCount ( { { RNNDirection : : Forward , 0 } , { RNNDirection : : Backward , 0 } } ) ; <nl> void CNTKToONNXHelper : : PrepareGRUBiasNode ( ONNXIR : : Graph * graph , std : : unordered_m <nl> <nl> onnx : : TensorProto dstTensor ; <nl> <nl> - CopyGRUBiasTensors ( srcTensors , dstTensor , inputArgType ) ; <nl> + CopyRNNBiasTensors ( srcTensors , dstTensor , inputArgType ) ; <nl> variableNode - > AddAttribute ( " value " , dstTensor ) ; <nl> nodeInputs . push_back ( inputArg ) ; <nl> <nl> void CNTKToONNXHelper : : PrepareGRUZRHWeightNode ( ONNXIR : : Graph * graph , std : : unorde <nl> <nl> variableNodes . emplace ( Rzrs [ 0 ] , variableNode ) ; <nl> } <nl> - void CNTKToONNXHelper : : PrepareGRUWeightNode ( ONNXIR : : Graph * graph , std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> - const std : : vector < Variable > & Ws , std : : vector < ONNXIR : : NodeArg > & nodeInputs ) <nl> + <nl> + void CNTKToONNXHelper : : PrepareRNNWeightNode ( ONNXIR : : Graph * graph , std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> + const std : : vector < Variable > & Ws , std : : vector < ONNXIR : : NodeArg > & nodeInputs , <nl> + std : : function < void ( const std : : vector < NDArrayViewPtr > & srcTensors , <nl> + onnx : : TensorProto & dst , const onnx : : TypeProto & inputArgType ) > weightConverter ) <nl> { <nl> / / TODO : sanity check for all variables to have the same shape and data types . <nl> bool doReverseVec = false ; <nl> void CNTKToONNXHelper : : PrepareGRUWeightNode ( ONNXIR : : Graph * graph , std : : unordered <nl> <nl> onnx : : TensorProto dstTensor ; <nl> <nl> - CopyGRUWeightTensors ( srcTensors , dstTensor , inputArgType ) ; <nl> + weightConverter ( srcTensors , dstTensor , inputArgType ) ; <nl> variableNode - > AddAttribute ( " value " , dstTensor ) ; <nl> nodeInputs . push_back ( inputArg ) ; <nl> <nl> ONNXIR : : Node * CNTKToONNXHelper : : CreateGRUNode ( const FunctionPtr & src , <nl> std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> const std : : unordered_map < Variable , Variable > & compositeOutputsMap ) <nl> { <nl> - / / sanity check : <nl> - std : : vector < FunctionPtr > grus ; <nl> - if ( src - > OpName ( ) = = L " GRU " ) <nl> - { <nl> - grus . push_back ( src ) ; <nl> - } <nl> - else if ( src - > OpName ( ) = = L " Splice " ) / / src is a Splice op with inputs from two LSTM ops . <nl> - { <nl> - for ( auto & input : src - > Inputs ( ) ) <nl> - { <nl> - grus . push_back ( input . Owner ( ) ) ; <nl> - } <nl> - } <nl> - else <nl> - { <nl> - LogicError ( " An GRU op should start with an GRU op ( single direction ) or a Splice op ( bidirectional ) . " ) ; <nl> - } <nl> - <nl> - / / For single direction GRU , grus . size ( ) = = 1 . For bidirectional GRU , grus . size ( ) = = 2 . <nl> - / / It is an error otherwise . <nl> - if ( grus . size ( ) = = 0 | | grus . size ( ) > 2 | | <nl> - std : : any_of ( grus . cbegin ( ) , grus . cend ( ) , [ ] ( const FunctionPtr & f ) { return f - > OpName ( ) ! = L " GRU " ; } ) ) <nl> - { <nl> - LogicError ( " Invalid number of GRU ops to construct an ONNX GRU node . " ) ; <nl> - } <nl> + std : : vector < FunctionPtr > grus = GetRNNBlocksFromSingleOrBidirectionalRNN ( src , " GRU " ) ; <nl> <nl> / / order forward , backward <nl> std : : map < RNNDirection , int > directionCount ( { { RNNDirection : : Forward , 0 } , { RNNDirection : : Backward , 0 } } ) ; <nl> ONNXIR : : Node * CNTKToONNXHelper : : CreateGRUNode ( const FunctionPtr & src , <nl> <nl> initialHs [ directionIndex ] = initStateH ; <nl> <nl> - <nl> activations [ directionIndex * GRUActivationCount + GRUActivationFIndex ] = f_activation ; <nl> activations [ directionIndex * GRUActivationCount + GRUActivationGIndex ] = g_activation ; <nl> <nl> ONNXIR : : Node * CNTKToONNXHelper : : CreateGRUNode ( const FunctionPtr & src , <nl> / / inputs <nl> std : : vector < ONNXIR : : NodeArg > nodeInputs ; <nl> PrepareRNNInput ( Xs [ 0 ] , nodeInputs ) ; <nl> - PrepareGRUWeightNode ( graph , variableNodes , Ws , nodeInputs ) ; <nl> + PrepareRNNWeightNode ( graph , variableNodes , Ws , nodeInputs , CopyGRUWeightTensors ) ; <nl> PrepareGRUZRHWeightNode ( graph , variableNodes , Rzrs , Rhs , nodeInputs ) ; <nl> <nl> { <nl> ONNXIR : : Node * CNTKToONNXHelper : : CreateGRUNode ( const FunctionPtr & src , <nl> return squeezedLSTMNode ; <nl> } <nl> <nl> + void CNTKToONNXHelper : : PrepareRNNBiasNode ( ONNXIR : : Graph * graph , std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> + const std : : vector < Variable > & Bs , std : : vector < ONNXIR : : NodeArg > & nodeInputs ) <nl> + { <nl> + / / TODO : sanity check for all variables to have the same shape and data types . <nl> + bool doReverseVec = false ; <nl> + int numDirections = Bs . size ( ) ; <nl> + int hiddenSize = Bs [ 0 ] . Shape ( ) [ 0 ] ; <nl> + <nl> + std : : vector < int > shape ( { numDirections , 2 * hiddenSize } ) ; <nl> + <nl> + / / ONNX GRU spec has 2 bias , for forward and backward . <nl> + onnx : : TypeProto inputArgType = ToTypeProto ( shape , doReverseVec ) ; <nl> + UpdateONNXType ( Bs [ 0 ] . GetDataType ( ) , inputArgType ) ; <nl> + ONNXIR : : NodeArg inputArg ( ToString ( Bs [ 0 ] . Uid ( ) ) , & inputArgType ) ; <nl> + std : : vector < ONNXIR : : NodeArg > varOutputs ( { inputArg } ) ; <nl> + std : : vector < ONNXIR : : NodeArg > varInputs ; <nl> + std : : string inputName = inputArg . Name ( ) ; <nl> + ONNXIR : : Node * variableNode = graph - > AddNode ( inputName , " Constant " , " " , varInputs , varOutputs ) ; <nl> + <nl> + std : : vector < NDArrayViewPtr > srcTensors ; <nl> + for ( int i = 0 ; i < Bs . size ( ) ; i + + ) <nl> + { <nl> + const Variable & variable = Bs [ i ] ; <nl> + srcTensors . push_back ( variable . IsParameter ( ) ? Parameter ( variable ) . Value ( ) : Constant ( variable ) . Value ( ) ) ; <nl> + } <nl> + <nl> + onnx : : TensorProto dstTensor ; <nl> + <nl> + CopyRNNBiasTensors ( srcTensors , dstTensor , inputArgType ) ; <nl> + variableNode - > AddAttribute ( " value " , dstTensor ) ; <nl> + nodeInputs . push_back ( inputArg ) ; <nl> + <nl> + variableNodes . emplace ( Bs [ 0 ] , variableNode ) ; <nl> + } <nl> + <nl> + <nl> + ONNXIR : : Node * CNTKToONNXHelper : : CreateRNNNode ( const FunctionPtr & src , <nl> + ONNXIR : : Graph * graph , <nl> + std : : unordered_map < FunctionPtr , ONNXIR : : Node * > & functionNodes , <nl> + std : : unordered_map < Variable , ONNXIR : : Node * > & variableNodes , <nl> + const std : : unordered_map < Variable , Variable > & compositeOutputsMap ) <nl> + { <nl> + std : : vector < FunctionPtr > rnns = GetRNNBlocksFromSingleOrBidirectionalRNN ( src , " RNNStep " ) ; <nl> + <nl> + / / order forward , backward <nl> + std : : map < RNNDirection , int > directionCount ( { { RNNDirection : : Forward , 0 } , { RNNDirection : : Backward , 0 } } ) ; <nl> + <nl> + / / The following construct refers to ONNX spec : <nl> + / / https : / / github . com / onnx / onnx / blob / master / docs / Operators . md # lstm <nl> + / / specifically , for attrubute and variable dimension . <nl> + / / We use the term from the spec as possible as we can to maintain a close correlation <nl> + / / to the ONNX specification . <nl> + <nl> + int num_directions = rnns . size ( ) ; <nl> + / / A list of 3 ( or 6 if bidirectional ) activation functions for input , output , forget , cell , and hidden . <nl> + std : : vector < std : : string > activations ( num_directions ) ; <nl> + <nl> + / / TODO : <nl> + / / In principle all these variables shall be treated as either constant or op output . <nl> + / / In reality except X , all other inputs to LSTM can be treated as constant . <nl> + std : : vector < Variable > Xs ( num_directions ) , Ws ( num_directions ) , Rs ( num_directions ) , <nl> + Bs ( num_directions ) , initialHs ( num_directions ) ; <nl> + <nl> + std : : vector < Variable > Yhs ( rnns . size ( ) ) ; <nl> + <nl> + for ( std : : vector < FunctionPtr > : : const_iterator itRNNBlock = rnns . cbegin ( ) ; itRNNBlock ! = rnns . cend ( ) ; itRNNBlock + + ) <nl> + { <nl> + / / src has to be an RNN node . <nl> + const FunctionPtr & rnn = * itRNNBlock ; <nl> + std : : vector < Variable > inputs = rnn - > Inputs ( ) ; <nl> + if ( inputs . size ( ) ! = CNTKRNNInputCount ) <nl> + LogicError ( " A RNN block does not have expected input count ( % d ) . Actual input count is % d " , ( int ) CNTKRNNInputCount , ( int ) inputs . size ( ) ) ; <nl> + <nl> + string activation ; <nl> + RNNDirection direction ; <nl> + Variable initStateH ; <nl> + TraceRNNPathes ( rnn , activation , direction , initStateH ) ; <nl> + <nl> + directionCount [ direction ] + + ; <nl> + <nl> + int directionIndex = rnns . size ( ) = = 1 ? 0 : ( direction ? 1 : 0 ) ; <nl> + <nl> + initialHs [ directionIndex ] = initStateH ; <nl> + <nl> + activations [ directionIndex ] = activation ; <nl> + <nl> + Xs [ directionIndex ] = inputs [ CNTKRNNInputIndex ] ; <nl> + <nl> + Ws [ directionIndex ] = inputs [ CNTKRNNWeightIndex ] ; <nl> + <nl> + Rs [ directionIndex ] = inputs [ CNTKRNNHweightIndex ] ; <nl> + <nl> + Bs [ directionIndex ] = inputs [ CNTKRNNBiasIndex ] ; <nl> + <nl> + std : : vector < Variable > outputs = rnn - > Outputs ( ) ; <nl> + <nl> + Yhs [ directionIndex ] = outputs [ CNTKRNNOutputYhIndex ] ; <nl> + } <nl> + <nl> + SanityCheckForConstantOrParameters ( Ws ) ; <nl> + SanityCheckForConstantOrParameters ( Rs ) ; <nl> + SanityCheckForConstantOrParameters ( Bs ) ; <nl> + <nl> + / / ensure that if there is one direction , it is not backward . <nl> + / / if there two directions , they are forward and backward , and <nl> + / / that the inputs ( Xs ) are the same . <nl> + if ( std : : any_of ( directionCount . begin ( ) , directionCount . end ( ) , [ ] ( std : : map < RNNDirection , int > : : value_type & v ) { return v . second > 1 ; } ) ) <nl> + { <nl> + LogicError ( " RNN node is invalid because there should be no more than one path in each direction . " ) ; <nl> + } <nl> + if ( rnns . size ( ) = = 2 & & Xs [ 0 ] ! = Xs [ 1 ] ) <nl> + { <nl> + LogicError ( " Bi - directional RNN node is invalid because the two RNN nodes do not share one same input . " ) ; <nl> + } <nl> + <nl> + string direction = DeriveDirectionString ( rnns , directionCount ) ; <nl> + <nl> + / / an RNN output size is the hidden size <nl> + int hidden_size = rnns [ 0 ] - > Outputs ( ) [ 0 ] . Shape ( ) [ 0 ] ; <nl> + <nl> + / / inputs <nl> + std : : vector < ONNXIR : : NodeArg > nodeInputs ; <nl> + PrepareRNNInput ( Xs [ 0 ] , nodeInputs ) ; <nl> + PrepareRNNWeightNode ( graph , variableNodes , Ws , nodeInputs , CopyRNNWeightTensors ) ; <nl> + PrepareRNNWeightNode ( graph , variableNodes , Rs , nodeInputs , CopyRNNWeightTensors ) ; <nl> + <nl> + { <nl> + bool hasBias = std : : all_of ( Bs . begin ( ) , Bs . end ( ) , [ ] ( Variable & v ) { return v . IsInitialized ( ) ; } ) ; <nl> + if ( hasBias ) <nl> + { <nl> + PrepareRNNBiasNode ( graph , variableNodes , Bs , nodeInputs ) ; <nl> + } <nl> + else <nl> + { <nl> + AddEmptyInput ( nodeInputs ) ; <nl> + } <nl> + <nl> + { <nl> + / / sequence_lens is not supported <nl> + AddEmptyInput ( nodeInputs ) ; <nl> + } <nl> + <nl> + bool has_initial_h = std : : all_of ( initialHs . begin ( ) , initialHs . end ( ) , [ ] ( Variable & v ) { return v . IsInitialized ( ) ; } ) ; <nl> + if ( has_initial_h ) <nl> + { <nl> + std : : string hiddenUid = ToString ( Yhs [ 0 ] . Uid ( ) ) + " _initial_h " ; <nl> + PrepareLSTMInitialStateNode ( graph , variableNodes , initialHs , FreeBatchSize , hidden_size , hiddenUid , nodeInputs ) ; <nl> + } <nl> + else <nl> + { <nl> + AddEmptyInput ( nodeInputs ) ; <nl> + } <nl> + } <nl> + <nl> + const int output_sequence = RNNOutputSequence ; / / RNN in CNTK always output full sequence of output <nl> + std : : vector < ONNXIR : : NodeArg > nodeOutputs ; <nl> + { <nl> + if ( output_sequence = = 1 ) <nl> + { <nl> + std : : string nodeName ; <nl> + if ( rnns . size ( ) = = 1 ) <nl> + nodeName = ToString ( Yhs [ 0 ] . Uid ( ) ) ; <nl> + else <nl> + nodeName = ToString ( src - > Output ( ) . Uid ( ) ) ; <nl> + <nl> + auto outputArgType = ToTypeProto ( std : : vector < int > ( { FreeSequenceLen , ( int ) Yhs . size ( ) , FreeBatchSize , ( int ) Yhs [ 0 ] . Shape ( ) [ 0 ] } ) , false ) ; <nl> + UpdateONNXType ( Yhs [ 0 ] . GetDataType ( ) , outputArgType ) ; <nl> + ONNXIR : : NodeArg outputArg ( nodeName , & outputArgType ) ; <nl> + nodeOutputs . push_back ( outputArg ) ; <nl> + } <nl> + else <nl> + { <nl> + ONNXIR : : NodeArg outputArg ( " " , nullptr ) ; <nl> + nodeOutputs . push_back ( outputArg ) ; <nl> + } <nl> + <nl> + { <nl> + Variable Yh = Yhs [ 0 ] ; <nl> + std : : string nodeName = ToString ( Yh . Uid ( ) ) + " _h " ; <nl> + <nl> + const int batchSize = 1 ; <nl> + const bool doReverseVec = false ; <nl> + auto outputArgType = ToTypeProto ( std : : vector < int > ( { ( int ) Yhs . size ( ) , batchSize , ( int ) Yh . Shape ( ) [ 0 ] } ) , doReverseVec ) ; <nl> + UpdateONNXType ( Yh . GetDataType ( ) , outputArgType ) ; <nl> + ONNXIR : : NodeArg outputArg ( nodeName , & outputArgType ) ; <nl> + nodeOutputs . push_back ( outputArg ) ; <nl> + } <nl> + } <nl> + <nl> + if ( Xs [ 0 ] . Owner ( ) . get ( ) ! = nullptr ) <nl> + CreateNode ( Xs [ 0 ] . Owner ( ) , graph , functionNodes , variableNodes , compositeOutputsMap ) ; <nl> + <nl> + auto nodeName = src - > Name ( ) . empty ( ) ? ToString ( src - > Uid ( ) ) : ToString ( src - > Name ( ) ) ; <nl> + ONNXIR : : Node * rnnNode = graph - > AddNode ( nodeName , " RNN " , " " , nodeInputs , nodeOutputs ) ; <nl> + <nl> + rnnNode - > AddAttribute ( " activations " , activations ) ; <nl> + rnnNode - > AddAttribute ( " direction " , direction ) ; <nl> + rnnNode - > AddAttribute ( " hidden_size " , ( int64_t ) hidden_size ) ; <nl> + rnnNode - > AddAttribute ( " output_sequence " , ( int64_t ) output_sequence ) ; <nl> + <nl> + / / / / TODO : make bidirectional RNN work by figuring out output data <nl> + / / / / layout transpose in InsertReshapeNodeToCNTKFunction . <nl> + if ( rnns . size ( ) = = 2 ) <nl> + NOT_IMPLEMENTED ; <nl> + <nl> + / / / / TODO : uncomment this code once LotusRT output shape matches ONNX <nl> + / / / / squeeze direction axis out . This is safe because it is not bi - directional node . <nl> + std : : vector < int > shape ( { FreeSequenceLen , 1 , hidden_size } ) ; <nl> + ONNXIR : : Node * squeezedRNNNode = InsertReshapeNodeToCNTKFunction ( src , rnnNode , shape , graph ) ; <nl> + functionNodes . emplace ( src , squeezedRNNNode ) ; <nl> + return squeezedRNNNode ; <nl> + } <nl> + <nl> ONNXIR : : Node * CNTKToONNXHelper : : AddReshapeNode ( const ONNXIR : : NodeArg & nodeArg , const std : : vector < int > & newShape , const std : : string & outArgName , ONNXIR : : Graph * graph ) <nl> { <nl> ONNXIR : : NodeArg outputArg ( outArgName , nullptr ) ; <nl> ONNXIR : : Node * CNTKToONNXHelper : : CreateNode ( const FunctionPtr & src , <nl> / / return CreateLSTMNode ( src , graph , functionNodes , variableNodes , compositeOutputsMap ) ; <nl> / / } <nl> / / else <nl> - if ( opName = = " GRU " ) <nl> + if ( opName = = " RNNStep " ) <nl> + { <nl> + return CreateRNNNode ( src , graph , functionNodes , variableNodes , compositeOutputsMap ) ; <nl> + } <nl> + else if ( opName = = " GRU " ) <nl> { <nl> return CreateGRUNode ( src , graph , functionNodes , variableNodes , compositeOutputsMap ) ; <nl> } <nl> mmm a / Source / CNTKv2LibraryDll / proto / onnx / ONNXToCNTK . cpp <nl> ppp b / Source / CNTKv2LibraryDll / proto / onnx / ONNXToCNTK . cpp <nl> std : : vector < Variable > CreateRNNConstant ( <nl> { <nl> case LSTMInputIndexX : <nl> / / X , should not come to here <nl> - return inputs ; <nl> + CNTK : : LogicError ( " input to a recurrent node shall not be a constant " ) ; <nl> case LSTMInputIndexW : <nl> case LSTMInputIndexH : <nl> / / W , R : <nl> std : : vector < Variable > CreateRNNConstant ( <nl> <nl> for ( int dir = 0 ; dir < num_directions ; dir + + ) <nl> { <nl> - std : : string nodeName = name + ( index = = 1 ? " _W_ " : " _R_ " ) + ( char ) dir ; <nl> + std : : string nodeName = name + ( index = = 1 ? " _W_ " : " _R_ " ) + ( char ) ( ' 0 ' + dir ) ; <nl> int totalSizePerDirection = rows * cols ; <nl> <nl> / / TODO : what about double ? <nl> std : : vector < Variable > CreateRNNConstant ( <nl> NDShape weightShape ( { ( size_t ) ( 4 * cell_size ) } ) ; <nl> for ( int dir = 0 ; dir < num_directions ; dir + + ) <nl> { <nl> - std : : string nodeName = name + std : : string ( 1 , ( char ) dir ) + LSTMInputBiasNameHint ; <nl> + std : : string nodeName = name + std : : string ( 1 , ( char ) ( ' 0 ' + dir ) ) + LSTMInputBiasNameHint ; <nl> int totalSizePerDirection = 4 * cell_size ; <nl> float * data = new float [ totalSizePerDirection ] ; <nl> for ( size_t targetIndex = 0 ; targetIndex < totalSizePerDirection ; targetIndex + + ) <nl> std : : vector < Variable > CreateRNNConstant ( <nl> row - = 2 * cell_size ; <nl> } <nl> <nl> - / / soruce is collmn major <nl> + / / source is column major <nl> int src_index = row ; <nl> / / " fuse " <nl> data [ targetIndex ] = <nl> std : : vector < Variable > CreateRNNConstant ( <nl> NDShape weightShape ( { ( size_t ) ( cell_size ) } ) ; <nl> for ( int dir = 0 ; dir < num_directions ; dir + + ) <nl> { <nl> - std : : string nodeName = name + std : : string ( 1 , ( char ) dir ) ; <nl> + std : : string nodeName = name + std : : string ( 1 , ( char ) ( ' 0 ' + dir ) ) ; <nl> if ( index = = 5 ) <nl> nodeName + = LSTMInputInitialHNameHint ; <nl> else <nl> std : : vector < Variable > CreateRNNConstant ( <nl> for ( int i = 0 ; i < 3 ; i + + ) <nl> { <nl> std : : string nodeName = name + ( ( i = = 0 ) ? " _i " : ( ( i = = 1 ) ? " _o " : " _f " ) ) + <nl> - std : : string ( 1 , ( char ) dir ) + LSTMInputPeepholeNameHint ; <nl> + std : : string ( 1 , ( char ) ( ' 0 ' + dir ) ) + LSTMInputPeepholeNameHint ; <nl> float * data = new float [ cell_size ] ; <nl> NDShape weightShape ( { ( size_t ) ( cell_size ) } ) ; <nl> for ( size_t targetIndex = 0 ; targetIndex < cell_size ; targetIndex + + ) <nl> std : : vector < Variable > CreateRNNConstant ( <nl> } <nl> return inputs ; <nl> } <nl> - break ; <nl> default : <nl> - CNTK : : LogicError ( " CreateRNNConstant received unepxpeted index : % d " , index ) ; <nl> + CNTK : : LogicError ( " CreateRNNConstant received unexpected index : % d " , index ) ; <nl> } <nl> } <nl> else if ( parentONNXOpName = = " GRU " ) <nl> std : : vector < Variable > CreateRNNConstant ( <nl> { <nl> case GRUInputIndexX : <nl> / / X , should not come to here <nl> - return inputs ; <nl> + CNTK : : LogicError ( " input to a recurrent node shall not be a constant " ) ; <nl> case GRUInputIndexW : <nl> { <nl> / / see ONNX spec for the tensor shape <nl> std : : vector < Variable > CreateRNNConstant ( <nl> <nl> for ( int dir = 0 ; dir < num_directions ; dir + + ) <nl> { <nl> - std : : string nodeName = name + " _W_ " + ( char ) dir ; <nl> + std : : string nodeName = name + " _W_ " + ( char ) ( ' 0 ' + dir ) ; <nl> int totalSizePerDirection = rows * cols ; <nl> <nl> / / TODO : what about double ? <nl> std : : vector < Variable > CreateRNNConstant ( <nl> inputs . resize ( num_directions * 2 ) ; <nl> for ( int dir = 0 ; dir < num_directions ; dir + + ) <nl> { <nl> - std : : string hNodeName = name + " _H_ " + ( char ) dir ; <nl> - std : : string h1NodeName = name + " _H1_ " + ( char ) dir ; <nl> + std : : string hNodeName = name + " _H_ " + ( char ) ( ' 0 ' + dir ) ; <nl> + std : : string h1NodeName = name + " _H1_ " + ( char ) ( ' 0 ' + dir ) ; <nl> int totalSizePerDirection = rows * cols ; <nl> <nl> float * hData = new float [ hShape . TotalSize ( ) ] ; <nl> std : : vector < Variable > CreateRNNConstant ( <nl> / / see ONNX spec for the tensor shape <nl> int num_directions = valueProto . dims ( 0 ) ; <nl> int cell_size = valueProto . dims ( 1 ) / GRUBiasDimensionHiddenMultiplier ; <nl> - / / shape size is devided by 2 so that it only applies to input ( CNTK ) <nl> + / / shape size is divided by 2 so that it only applies to input ( CNTK ) <nl> / / TODO : this incompatibility needs further investigation . <nl> NDShape weightShape ( { ( size_t ) ( GRUBiasDimensionHiddenMultiplier / 2 * cell_size ) } ) ; <nl> for ( int dir = 0 ; dir < num_directions ; dir + + ) <nl> { <nl> - std : : string nodeName = name + std : : string ( 1 , ( char ) dir ) + LSTMInputBiasNameHint ; <nl> + std : : string nodeName = name + std : : string ( 1 , ' 0 ' + dir ) + LSTMInputBiasNameHint ; <nl> int totalSizePerDirection = GRUBiasDimensionHiddenMultiplier / 2 * cell_size ; <nl> float * data = new float [ totalSizePerDirection ] ; <nl> for ( size_t targetIndex = 0 ; targetIndex < totalSizePerDirection ; targetIndex + + ) <nl> { <nl> int row = targetIndex ; <nl> - / / soruce is collmn major <nl> + / / source is column major <nl> int src_index = row ; <nl> / / " fuse " <nl> data [ targetIndex ] = <nl> std : : vector < Variable > CreateRNNConstant ( <nl> NDShape weightShape ( { ( size_t ) ( cell_size ) } ) ; <nl> for ( int dir = 0 ; dir < num_directions ; dir + + ) <nl> { <nl> - std : : string nodeName = name + std : : string ( 1 , ( char ) dir ) + LSTMInputInitialHNameHint ; <nl> + std : : string nodeName = name + std : : string ( 1 , ( char ) ( ' 0 ' + dir ) ) + LSTMInputInitialHNameHint ; <nl> + <nl> + float * data = new float [ cell_size ] ; <nl> + for ( size_t targetIndex = 0 ; targetIndex < cell_size ; targetIndex + + ) <nl> + { <nl> + data [ targetIndex ] = valueProto . float_data ( ) [ dir * cell_size + targetIndex ] ; <nl> + } <nl> + <nl> + Constant constant = CreateConstantWithRawData ( data , weightShape , nodeName , computeDevice ) ; <nl> + inputs . push_back ( constant ) ; <nl> + } <nl> + return inputs ; <nl> + } <nl> + default : <nl> + CNTK : : LogicError ( " CreateRNNConstant for GRU op received unexpected index : % d " , index ) ; <nl> + } <nl> + } <nl> + else if ( parentONNXOpName = = " RNN " ) <nl> + { <nl> + / / https : / / github . com / onnx / onnx / blob / master / docs / Operators . md # inputs - 3mmm6 - 1 <nl> + switch ( index ) <nl> + { <nl> + case RNNInputIndexX : <nl> + / / X , should not come to here <nl> + CNTK : : LogicError ( " input to a recurrent node shall not be a constant " ) ; <nl> + case RNNInputIndexW : <nl> + case RNNInputIndexR : <nl> + { <nl> + / / see ONNX spec for the tensor shape <nl> + int num_directions = valueProto . dims ( 0 ) ; <nl> + size_t rows = valueProto . dims ( 1 ) ; <nl> + size_t cols = valueProto . dims ( 2 ) ; <nl> + <nl> + / / CNTK cpp requires shape : ( input_size , 3 * hidden_size ) <nl> + NDShape weightShape ( { rows , cols } ) ; <nl> + <nl> + int input_size = cols ; <nl> + int cell_size = rows ; <nl> + <nl> + for ( int dir = 0 ; dir < num_directions ; dir + + ) <nl> + { <nl> + std : : string nodeName = name + ( index = = RNNInputIndexW ? " _W_ " : " _R_ " ) + ( char ) ( ' 0 ' + dir ) ; <nl> + int totalSizePerDirection = rows * cols ; <nl> + <nl> + / / TODO : what about double ? <nl> + float * data = new float [ totalSizePerDirection ] ; <nl> + for ( size_t count = 0 ; count < totalSizePerDirection ; count + + ) <nl> + { <nl> + int row = count / input_size ; <nl> + int col = count % input_size ; <nl> + int sourceIndex = dir * totalSizePerDirection + count ; <nl> + int targetIndex = col * cell_size + row ; <nl> + data [ targetIndex ] = valueProto . float_data ( ) [ sourceIndex ] ; <nl> + } <nl> + <nl> + Constant constant = CreateConstantWithRawData ( & data [ 0 ] , weightShape , nodeName , computeDevice ) ; <nl> + inputs . push_back ( constant ) ; <nl> + } <nl> + return inputs ; <nl> + } <nl> + case RNNInputIndexB : <nl> + / / B <nl> + { <nl> + / / see ONNX spec for the tensor shape : <nl> + / / https : / / github . com / onnx / onnx / blob / master / docs / Operators . md # inputs - 3mmm6 - 1 <nl> + / / shape of bias is [ num_directions , 2 * hidden_size ] thus we divide dim ( 1 ) by 2 <nl> + / / to get cell_size . <nl> + int num_directions = valueProto . dims ( 0 ) ; <nl> + int cell_size = valueProto . dims ( 1 ) / 2 ; <nl> + NDShape weightShape ( { ( size_t ) ( cell_size ) } ) ; <nl> + for ( int dir = 0 ; dir < num_directions ; dir + + ) <nl> + { <nl> + std : : string nodeName = name + std : : string ( 1 , ' 0 ' + dir ) + LSTMInputBiasNameHint ; <nl> + int totalSizePerDirection = cell_size ; <nl> + float * data = new float [ totalSizePerDirection ] ; <nl> + for ( size_t targetIndex = 0 ; targetIndex < totalSizePerDirection ; targetIndex + + ) <nl> + { <nl> + int row = targetIndex ; <nl> + / / source is column major <nl> + int src_index = row ; <nl> + / / " fuse " <nl> + / / RNN only has one bias vector . It is applied after element - wise addition <nl> + / / of projected input and hidden states . Therefore we need to fuse two biases <nl> + / / in ONNX into one . <nl> + / / RNNBiasMultiplier = 2 <nl> + data [ targetIndex ] = <nl> + valueProto . float_data ( ) [ dir * RNNBiasMultiplier * totalSizePerDirection + src_index ] + <nl> + valueProto . float_data ( ) [ dir * RNNBiasMultiplier * totalSizePerDirection + totalSizePerDirection + src_index ] ; <nl> + } <nl> + <nl> + Constant constant = CreateConstantWithRawData ( data , weightShape , nodeName , computeDevice ) ; <nl> + inputs . push_back ( constant ) ; <nl> + } <nl> + return inputs ; <nl> + } <nl> + case RNNInputIndexSequenceLens : <nl> + return inputs ; <nl> + case RNNInitialH : <nl> + { <nl> + / / initial_h <nl> + int num_directions = valueProto . dims ( 0 ) ; <nl> + int cell_size = valueProto . dims ( 2 ) ; <nl> + NDShape weightShape ( { ( size_t ) ( cell_size ) } ) ; <nl> + for ( int dir = 0 ; dir < num_directions ; dir + + ) <nl> + { <nl> + std : : string nodeName = name + std : : string ( 1 , ( char ) ( ' 0 ' + dir ) ) + LSTMInputInitialHNameHint ; <nl> <nl> float * data = new float [ cell_size ] ; <nl> for ( size_t targetIndex = 0 ; targetIndex < cell_size ; targetIndex + + ) <nl> std : : vector < Variable > CreateRNNConstant ( <nl> } <nl> return inputs ; <nl> } <nl> - break ; <nl> - return inputs ; <nl> default : <nl> - CNTK : : LogicError ( " CreateRNNConstant for GRU op received unepxpeted index : % d " , index ) ; <nl> + CNTK : : LogicError ( " CreateRNNConstant for GRU op received unexpected index : % d " , index ) ; <nl> } <nl> } <nl> else <nl> std : : vector < Variable > ONNXToCNTKHelper : : CreateRNNLeafVariableOrConstant ( const No <nl> case GRUInitialH : <nl> NOT_IMPLEMENTED ; <nl> default : <nl> - LogicError ( " LSTM node has unexpected input " ) ; <nl> + LogicError ( " GRU node has unexpected input " ) ; <nl> + } <nl> + } <nl> + else if ( parentONNXOpName = = " RNN " ) <nl> + { <nl> + int inputIndex = CalculateNodeArgInputIndex ( nodeArg , parentNode ) ; <nl> + switch ( inputIndex ) <nl> + { <nl> + case GRUInputIndexX : <nl> + / / X : ` [ seq_length , batch_size , input_size ] ` . <nl> + { <nl> + Variable inputVariable ; <nl> + if ( constructedNodeArgVariableMap . find ( nodeArg - > Name ( ) ) = = constructedNodeArgVariableMap . end ( ) ) <nl> + { <nl> + DataType dataType = FromONNXType ( nodeArg - > ToProto ( ) . type ( ) ) ; <nl> + int input_size = shapeProto - > dim ( 2 ) . dim_value ( ) ; <nl> + NDShape shape ( { ( size_t ) ( input_size ) } ) ; <nl> + inputVariable = InputVariable ( shape , dataType , ToWString ( nodeArg - > Name ( ) ) , dynamicAxes ) ; <nl> + constructedNodeArgVariableMap . insert ( ONNXToCNTKVariableMap : : value_type ( nodeArg - > Name ( ) , inputVariable ) ) ; <nl> + } <nl> + return std : : vector < Variable > ( { constructedNodeArgVariableMap [ nodeArg - > Name ( ) ] } ) ; <nl> + } <nl> + / / other inputs shall be ONNX constant node and be created as CNTK Constant in CreateRNNConstant <nl> + case GRUInputIndexW : <nl> + case GRUInputIndexR : <nl> + case GRUInputIndexB : <nl> + case GRUInputIndexSequenceLens : <nl> + case GRUInitialH : <nl> + NOT_IMPLEMENTED ; <nl> + default : <nl> + LogicError ( " RNN node has unexpected input " ) ; <nl> } <nl> } <nl> else <nl> FunctionPtr ONNXToCNTKHelper : : CreateFunction ( const Node * node , const std : : vector <nl> std : : vector < string > ( { " Sigmoid " , " Tanh " } ) ) ; <nl> return CreateGRU ( node , inputs , direction , activations , activation_alpha , activation_beta ) ; <nl> } <nl> + else if ( onnxOpName = = " RNN " ) <nl> + { <nl> + const string direction = GetNamedAttributeAsString ( node , " direction " ) ; <nl> + std : : vector < float > activation_alpha = GetNamedAttributeAsFloatVec ( node , " activation_alpha " , std : : vector < float > ( ) ) ; <nl> + std : : vector < float > activation_beta = GetNamedAttributeAsFloatVec ( node , " activation_beta " , std : : vector < float > ( ) ) ; <nl> + const std : : vector < string > activations = GetNamedAttributeAsStringVec ( node , " activations " , <nl> + std : : vector < string > ( { " Tanh " } ) ) ; <nl> + return CreateRNN ( node , inputs , direction , activations , activation_alpha , activation_beta ) ; <nl> + } <nl> if ( onnxOpName = = " FC " ) <nl> { <nl> return CreateCNTKFCNode ( ToWString ( node - > Name ( ) ) , inputs ) ; <nl> mmm a / Source / CNTKv2LibraryDll / proto / onnx / Operators . cpp <nl> ppp b / Source / CNTKv2LibraryDll / proto / onnx / Operators . cpp <nl> namespace ONNX <nl> <nl> bool Operators : : IsRNNOp ( const std : : string & opName ) <nl> { <nl> - return opName = = " LSTM " | | opName = = " GRU " | | opName = = " RNN " ; <nl> + return opName = = " LSTM " | | opName = = " GRU " | | opName = = " RNN " | | opName = = " RNNStep " ; <nl> } <nl> std : : unordered_map < std : : wstring , std : : set < size_t > > Operators : : _cntkBlockOPInvalidIndices = { <nl> { L " Clip " , { 1 , 2 } } , <nl> mmm a / Source / CNTKv2LibraryDll / proto / onnx / RNNHelper . cpp <nl> ppp b / Source / CNTKv2LibraryDll / proto / onnx / RNNHelper . cpp <nl> std : : tuple < std : : function < FunctionPtr ( const Variable & ) > , std : : function < FunctionPt <nl> GetActivations ( const std : : vector < std : : string > & activations , const std : : vector < float > & activation_alpha , const std : : vector < float > & activation_beta , int direction ) <nl> { <nl> if ( activations . size ( ) < ( direction + 1 ) * LSTMActivationCount ) <nl> - CNTK : : LogicError ( " LSTM activations shall be % d or % d of strings " , LSTMActivationCount , LSTMActivationCount * 2 ) ; <nl> + CNTK : : LogicError ( " LSTM activations shall be a list of strings of size % d or % d " , LSTMActivationCount , LSTMActivationCount * 2 ) ; <nl> <nl> / / <nl> int iofActivationIndex = direction * LSTMActivationCount + LSTMActivationFIndex ; <nl> std : : tuple < std : : function < FunctionPtr ( const Variable & ) > , std : : function < FunctionPt <nl> GetGRUActivations ( const std : : vector < std : : string > & activations , const std : : vector < float > & activation_alpha , const std : : vector < float > & activation_beta , int direction ) <nl> { <nl> if ( activations . size ( ) < ( direction + 1 ) * GRUActivationCount ) <nl> - CNTK : : LogicError ( " LSTM activations shall be % d or % d of strings " , GRUActivationCount , GRUActivationCount * 2 ) ; <nl> + CNTK : : LogicError ( " GRU activations shall be a list of strings of size % d or % d " , GRUActivationCount , GRUActivationCount * 2 ) ; <nl> <nl> / / <nl> int fActivationIndex = direction * GRUActivationCount + GRUActivationFIndex ; <nl> GetGRUActivations ( const std : : vector < std : : string > & activations , const std : : vector <nl> return std : : make_tuple ( fActivationOp , gActivationOp ) ; <nl> } <nl> <nl> + std : : function < FunctionPtr ( const Variable & ) > <nl> + GetRNNActivations ( const std : : vector < std : : string > & activations , const std : : vector < float > & activation_alpha , const std : : vector < float > & activation_beta , int direction ) <nl> + { <nl> + if ( activations . size ( ) < ( direction + 1 ) ) <nl> + CNTK : : LogicError ( " RNN activations shall be a list of strings of size 1 or 2 " ) ; <nl> + <nl> + / / <nl> + int activationIndex = direction ; <nl> + <nl> + bool hasAlpha = activation_alpha . size ( ) = = ( direction + 1 ) ; <nl> + bool hasAlphaBeta = hasAlpha & & activation_beta . size ( ) = = ( direction + 1 ) ; <nl> + std : : function < FunctionPtr ( const Variable & ) > activationOp ; <nl> + if ( hasAlphaBeta ) <nl> + { <nl> + activationOp = ActivationMap ( activations [ activationIndex ] , activation_alpha [ activationIndex ] , activation_beta [ activationIndex ] ) ; <nl> + } <nl> + else if ( hasAlpha ) <nl> + { <nl> + activationOp = ActivationMap ( activations [ activationIndex ] , activation_alpha [ activationIndex ] ) ; <nl> + } <nl> + else <nl> + { <nl> + activationOp = ActivationMap ( activations [ activationIndex ] ) ; <nl> + } <nl> + <nl> + return activationOp ; <nl> + } <nl> + <nl> std : : pair < FunctionPtr , FunctionPtr > LSTMPCell ( Variable input , <nl> const std : : function < FunctionPtr ( const Variable & ) > & iofActivationOp , <nl> const std : : function < FunctionPtr ( const Variable & ) > & cellActivationOp , <nl> FunctionPtr GRUCell ( Variable input , <nl> return ht ; <nl> } <nl> <nl> + FunctionPtr RNNCell ( Variable input , <nl> + const std : : function < FunctionPtr ( const Variable & ) > & activationOp , <nl> + Variable prevOutput , <nl> + Constant & W , Constant & R , Constant & B ) <nl> + { <nl> + FunctionPtr proj = Times ( W , input ) + Times ( R , prevOutput ) ; ; <nl> + if ( B . IsInitialized ( ) ) <nl> + proj = B + proj ; <nl> + <nl> + FunctionPtr h = activationOp ( proj ) ; <nl> + return h ; <nl> + } <nl> + <nl> + <nl> # include " PrimitiveFunction . h " <nl> # include " BlockFunction . h " <nl> <nl> FunctionPtr GRUComponent ( Variable input , <nl> <nl> auto actualDh = recurrenceHookH ( gruCell ) ; <nl> <nl> - gruCell - > ReplacePlaceholders ( { { inputPlaceholder , input } , { dh , actualDh } } ) ; <nl> - return gruCell ; <nl> + gruCell - > ReplacePlaceholders ( { { dh , actualDh } } ) ; <nl> + <nl> + auto gruBlock = AsBlock ( std : : move ( gruCell ) , { { inputPlaceholder , input } } , L " GRU " , L " " ) ; <nl> + return gruBlock ; <nl> + } <nl> + <nl> + FunctionPtr RNNComponent ( Variable input , <nl> + const NDShape & cellShape , <nl> + const std : : function < FunctionPtr ( const Variable & ) > & activationOp , <nl> + const std : : function < FunctionPtr ( const Variable & ) > & recurrenceHookH , <nl> + Constant & W , Constant & R , Constant & B ) <nl> + { <nl> + auto dh = PlaceholderVariable ( cellShape , input . DynamicAxes ( ) ) ; <nl> + auto inputPlaceholder = PlaceholderVariable ( input . Shape ( ) , input . DynamicAxes ( ) ) ; <nl> + <nl> + auto rnnCell = RNNCell ( <nl> + inputPlaceholder , <nl> + activationOp , <nl> + dh , W , R , B ) ; <nl> + <nl> + auto actualDh = recurrenceHookH ( rnnCell ) ; <nl> + <nl> + rnnCell - > ReplacePlaceholders ( { { inputPlaceholder , input } , { dh , actualDh } } ) ; <nl> + return rnnCell ; <nl> } <nl> <nl> const std : : vector < Variable > FindByNameHint ( const std : : vector < Variable > & inputs , const std : : string & hint ) <nl> FunctionPtr CreateGRU ( const ONNXIR : : Node * node , const std : : vector < Variable > & inp <nl> } <nl> } <nl> <nl> + FunctionPtr CreateRNN ( const ONNXIR : : Node * node , const std : : vector < Variable > & inputs , const std : : string & direction , <nl> + const std : : vector < string > & activations , const std : : vector < float > & activation_alpha , const std : : vector < float > & activation_beta ) <nl> + { <nl> + int numDirections = direction = = RNNDirectionBidirection ? 2 : 1 ; <nl> + std : : vector < FunctionPtr > outputHs ; <nl> + for ( int dir = 0 ; dir < numDirections ; dir + + ) <nl> + { <nl> + std : : function < FunctionPtr ( const Variable & ) > activationOp = <nl> + GetRNNActivations ( activations , activation_alpha , activation_beta , dir ) ; <nl> + <nl> + / / the first a few inputs are ( in order ) : X , numDirections * W , numDirections * R , numDirections * H1 <nl> + Variable X = inputs [ 0 ] ; <nl> + Variable W = inputs [ 1 * numDirections + dir - ( ( numDirections = = 2 ) ? 1 : 0 ) ] ; <nl> + Variable R = inputs [ 2 * numDirections + dir - ( ( numDirections = = 2 ) ? 1 : 0 ) ] ; <nl> + Variable B ; <nl> + std : : vector < Variable > biasVariables = FindByNameHint ( inputs , LSTMInputBiasNameHint ) ; <nl> + if ( numDirections = = 1 & & biasVariables . size ( ) > = 1 ) <nl> + B = biasVariables [ 0 ] ; <nl> + else if ( numDirections = = 2 & & biasVariables . size ( ) = = 2 ) <nl> + B = biasVariables [ 1 ] ; <nl> + <nl> + Variable initHVariable = GetInitialStateVariable ( inputs , numDirections , GRUInputInitialHNameHint , X . GetDataType ( ) ) ; <nl> + <nl> + int hiddenDim = W . Shape ( ) [ 0 ] ; <nl> + <nl> + FunctionPtr outputH ; <nl> + <nl> + / / if it is bidirectional LSTM , the second one will be the backword one . <nl> + bool go_backwards = direction = = RNNDirectionReverse | | ( numDirections = = 2 & & dir = = 1 ) ; <nl> + <nl> + std : : function < FunctionPtr ( const Variable & ) > recurrenceHook ; <nl> + if ( go_backwards ) <nl> + recurrenceHook = [ initHVariable ] ( const Variable & x ) { return FutureValue ( x , initHVariable ) ; } ; <nl> + else <nl> + recurrenceHook = [ initHVariable ] ( const Variable & x ) { return PastValue ( x , initHVariable ) ; } ; <nl> + <nl> + outputH = RNNComponent ( <nl> + X , { ( size_t ) hiddenDim } , activationOp , <nl> + recurrenceHook , ( Constant & ) W , ( Constant & ) R , ( Constant & ) B ) ; <nl> + outputHs . push_back ( outputH ) ; <nl> + } <nl> + if ( outputHs . size ( ) = = 1 ) <nl> + return outputHs [ 0 ] ; <nl> + else <nl> + { <nl> + std : : vector < Variable > operands ( { outputHs [ 0 ] , outputHs [ 1 ] } ) ; <nl> + return Splice ( operands , Axis ( 0 ) , ToWString ( node - > Name ( ) ) ) ; <nl> + } <nl> + } <nl> + <nl> template < typename FunctionType > <nl> void TraverseGraphWithPrePostActions ( FunctionPtr cntkFunction , std : : unordered_set < FunctionPtr > & visitedFunctions , <nl> FunctionType preFunctor , FunctionType postFunctor ) <nl> void TraceGRUPathes ( const FunctionPtr & src , string & f_activation , string & g_acti <nl> f_activation = " Sigmoid " ; <nl> g_activation = MapActivationNameCNTKToONNX ( ToString ( gActivation - > OpName ( ) ) ) ; <nl> } <nl> + <nl> + void TraceRNNPathes ( const FunctionPtr & src , string & activation , <nl> + RNNDirection & direction , Variable & initStateH ) <nl> + { <nl> + std : : vector < Variable > inputVars = src - > Inputs ( ) ; <nl> + std : : vector < FunctionPtr > pastValueOps , futureValueOps ; <nl> + GetDelayOps ( inputVars , pastValueOps , futureValueOps ) ; <nl> + <nl> + / / indices here coresponding with CNTK python layer code . <nl> + if ( pastValueOps . size ( ) = = 1 & & futureValueOps . size ( ) = = 0 ) <nl> + { <nl> + direction = RNNDirection : : Forward ; <nl> + initStateH = pastValueOps [ 0 ] - > Inputs ( ) [ 1 ] ; <nl> + } <nl> + else if ( pastValueOps . size ( ) = = 0 & & futureValueOps . size ( ) = = 1 ) <nl> + { <nl> + direction = RNNDirection : : Backward ; <nl> + initStateH = futureValueOps [ 0 ] - > Inputs ( ) [ 1 ] ; <nl> + } <nl> + else <nl> + { <nl> + CNTK : : LogicError ( " Node % s ( % s ) is not a valid RNN node " , ToString ( src - > Name ( ) ) . c_str ( ) , ToString ( src - > Uid ( ) ) . c_str ( ) ) ; <nl> + } <nl> + <nl> + FunctionPtr activationFunction = src - > BlockRoot ( ) ; <nl> + activation = MapActivationNameCNTKToONNX ( ToString ( activationFunction - > OpName ( ) ) ) ; <nl> + } <nl> + <nl> + std : : vector < FunctionPtr > GetRNNBlocksFromSingleOrBidirectionalRNN ( const FunctionPtr src , const std : : string & RNNStepOpName ) <nl> + { <nl> + std : : vector < FunctionPtr > rnns ; <nl> + if ( ToString ( src - > OpName ( ) ) = = RNNStepOpName ) <nl> + { <nl> + rnns . push_back ( src ) ; <nl> + } <nl> + else if ( src - > OpName ( ) = = L " Splice " ) / / src is a Splice op with inputs from two LSTM ops . <nl> + { <nl> + for ( auto & input : src - > Inputs ( ) ) <nl> + { <nl> + rnns . push_back ( input . Owner ( ) ) ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + CNTK : : LogicError ( " An % s op should start with an GRU op ( single direction ) or a Splice op ( bidirectional ) . " , RNNStepOpName . c_str ( ) ) ; <nl> + } <nl> + <nl> + / / For single direction RNN , rnns . size ( ) = = 1 . For bidirectional RNN , rnns . size ( ) = = 2 . <nl> + / / It is an error otherwise . <nl> + if ( rnns . size ( ) = = 0 | | rnns . size ( ) > 2 | | <nl> + std : : any_of ( rnns . cbegin ( ) , rnns . cend ( ) , [ RNNStepOpName ] ( const FunctionPtr & f ) { return ToString ( f - > OpName ( ) ) ! = RNNStepOpName ; } ) ) <nl> + { <nl> + CNTK : : LogicError ( " Invalid number of RNN ops to construct an ONNX % s node . " , RNNStepOpName . c_str ( ) ) ; <nl> + } <nl> + <nl> + return rnns ; <nl> + } <nl> mmm a / Source / CNTKv2LibraryDll / proto / onnx / RNNHelper . h <nl> ppp b / Source / CNTKv2LibraryDll / proto / onnx / RNNHelper . h <nl> enum <nl> GRUInitialH = 5 , <nl> } ; <nl> <nl> + enum <nl> + { <nl> + RNNInputIndexX = 0 , <nl> + RNNInputIndexW = 1 , <nl> + RNNInputIndexR = 2 , <nl> + RNNInputIndexB = 3 , <nl> + RNNInputIndexSequenceLens = 4 , <nl> + RNNInitialH = 5 , <nl> + } ; <nl> + <nl> + enum <nl> + { <nl> + CNTKRNNOutputYhIndex = 0 <nl> + } ; <nl> + <nl> / / https : / / github . com / onnx / onnx / blob / master / docs / Operators . md # inputs - 3mmm6 <nl> / / size of weight / bias matrix is a multiple of hidden size <nl> enum <nl> enum <nl> CNTKGRUInputCount = 7 <nl> } ; <nl> <nl> + enum <nl> + { <nl> + CNTKRNNWeightIndex = 0 , <nl> + CNTKRNNHweightIndex = 1 , <nl> + CNTKRNNBiasIndex = 2 , <nl> + CNTKRNNDelayIndex = 3 , <nl> + CNTKRNNInputIndex = 4 , <nl> + CNTKRNNInputCount = 5 <nl> + } ; <nl> + <nl> + enum <nl> + { <nl> + RNNBiasMultiplier = 2 <nl> + } ; <nl> <nl> const string RNNDirectionBidirection = " bidirectional " ; <nl> const string RNNDirectionReverse = " reverse " ; <nl> FunctionPtr CreateLSTM ( const ONNXIR : : Node * node , const std : : vector < Variable > & in <nl> FunctionPtr CreateGRU ( const ONNXIR : : Node * node , const std : : vector < Variable > & inputs , const std : : string & direction , <nl> const std : : vector < string > & activations , const std : : vector < float > & activation_alpha , const std : : vector < float > & activation_beta ) ; <nl> <nl> + FunctionPtr CreateRNN ( const ONNXIR : : Node * node , const std : : vector < Variable > & inputs , const std : : string & direction , <nl> + const std : : vector < string > & activations , const std : : vector < float > & activation_alpha , const std : : vector < float > & activation_beta ) ; <nl> + <nl> void TraceLSTMPathes ( const FunctionPtr & src , string & f_activation , string & g_activation , string & h_activation , <nl> RNNDirection & direction , Variable & initStateH , Variable & initStateC , Variable & peepholeCi , Variable & peepholeCo , Variable & peepholeCf , <nl> double & stabilizer_dh , double & stabilizer_dc , double & stabilizer_c ) ; <nl> void TraceLSTMPathes ( const FunctionPtr & src , string & f_activation , string & g_act <nl> void TraceGRUPathes ( const FunctionPtr & src , string & f_activation , string & g_activation , <nl> RNNDirection & direction , Variable & initStateH ) ; <nl> <nl> + void TraceRNNPathes ( const FunctionPtr & src , string & activation , <nl> + RNNDirection & direction , Variable & initStateH ) ; <nl> + <nl> std : : string MapActivationNameONNXToCNTK ( const std : : string & onnxOp ) ; <nl> - std : : string MapActivationNameCNTKToONNX ( const std : : string & cntkOp ) ; <nl> \ No newline at end of file <nl> + std : : string MapActivationNameCNTKToONNX ( const std : : string & cntkOp ) ; <nl> + <nl> + std : : vector < FunctionPtr > GetRNNBlocksFromSingleOrBidirectionalRNN ( const FunctionPtr src , const std : : string & RNNStepOpName ) ; <nl> \ No newline at end of file <nl> mmm a / bindings / python / cntk / tests / onnx_op_test . py <nl> ppp b / bindings / python / cntk / tests / onnx_op_test . py <nl> def test_Greater ( tmpdir ) : <nl> verify_no_input ( model , tmpdir , ' Greater_0 ' ) <nl> <nl> # GRU <nl> - def MakeGRUNameFromConfig ( backward , initial_state , activtion ) : <nl> - model_name = ' GRU . ' + activtion . __name__ <nl> - if ( initial_state ! = 0 ) : <nl> - model_name + = ' . initial ' <nl> - if ( backward ) : <nl> - model_name + = ' . backward ' <nl> - else : <nl> - model_name + = ' . forward ' <nl> - return model_name <nl> - <nl> - direction_options = [ False , True ] <nl> - activation_options = [ C . tanh ] <nl> - initial_state_options = [ 0 ] <nl> - <nl> - input_dim = 2 <nl> - cell_dim = 3 <nl> - batch_size = 1 <nl> - sequence_len = 5 <nl> - <nl> def test_GRU ( tmpdir ) : <nl> + def MakeGRUNameFromConfig ( backward , initial_state , activition ) : <nl> + model_name = ' GRU . ' + activition . __name__ <nl> + if ( initial_state ! = 0 ) : <nl> + model_name + = ' . initial ' <nl> + if ( backward ) : <nl> + model_name + = ' . backward ' <nl> + else : <nl> + model_name + = ' . forward ' <nl> + return model_name <nl> + <nl> + direction_options = [ False , True ] <nl> + activation_options = [ C . tanh ] <nl> + initial_state_options = [ 0 ] <nl> + <nl> + input_dim = 2 <nl> + cell_dim = 3 <nl> + batch_size = 1 <nl> + sequence_len = 5 <nl> + <nl> for config in list ( product ( direction_options , initial_state_options , activation_options ) ) : <nl> model_filename = MakeGRUNameFromConfig ( * config ) <nl> print ( model_filename ) <nl> def test_LRN ( tmpdir ) : <nl> verify_one_input ( model , img , tmpdir , ' LRN_1 ' ) <nl> <nl> # LSTM <nl> - def CreateLSTMModel ( activation , <nl> - peepholes , <nl> - self_stabilization , <nl> - cell_dim , <nl> - initial_state ) : <nl> - return C . layers . Sequential ( [ <nl> - C . layers . Recurrence ( C . layers . LSTM ( cell_dim , <nl> - use_peepholes = peepholes , <nl> - activation = activation , <nl> - enable_self_stabilization = self_stabilization ) , <nl> - initial_state = initial_state ) <nl> - ] ) <nl> - <nl> - # lstm attributes <nl> - use_peepholes_options = [ False ] <nl> - enable_self_stabilization_options = [ False ] <nl> - activation_options = [ C . tanh ] <nl> - <nl> - # Recurrence attributes <nl> - initial_state_options = [ 0 ] <nl> - <nl> - input_dim = 2 <nl> - cell_dim = 3 <nl> - batch_size = 1 <nl> - sequence_len = 5 <nl> - <nl> - def MakeLSTMNameFromConfig ( use_peepholes , enable_self_stabilization , initial_state , activtion ) : <nl> - model_name = ' LSTM . ' + activtion . __name__ <nl> - if ( use_peepholes ) : <nl> - model_name + = ' . peephole ' <nl> - if ( enable_self_stabilization ) : <nl> - model_name + = ' . stabilize ' <nl> - if ( initial_state ! = 0 ) : <nl> - model_name + = ' . initial ' <nl> - return model_name <nl> - <nl> def test_LSTM ( tmpdir ) : <nl> + def CreateLSTMModel ( activation , <nl> + peepholes , <nl> + self_stabilization , <nl> + cell_dim , <nl> + initial_state ) : <nl> + return C . layers . Sequential ( [ <nl> + C . layers . Recurrence ( C . layers . LSTM ( cell_dim , <nl> + use_peepholes = peepholes , <nl> + activation = activation , <nl> + enable_self_stabilization = self_stabilization ) , <nl> + initial_state = initial_state ) <nl> + ] ) <nl> + <nl> + <nl> + def MakeLSTMNameFromConfig ( use_peepholes , enable_self_stabilization , initial_state , activition ) : <nl> + model_name = ' LSTM . ' + activition . __name__ <nl> + if ( use_peepholes ) : <nl> + model_name + = ' . peephole ' <nl> + if ( enable_self_stabilization ) : <nl> + model_name + = ' . stabilize ' <nl> + if ( initial_state ! = 0 ) : <nl> + model_name + = ' . initial ' <nl> + return model_name <nl> + <nl> + # lstm attributes <nl> + use_peepholes_options = [ False ] <nl> + enable_self_stabilization_options = [ False ] <nl> + activation_options = [ C . tanh ] <nl> + <nl> + # Recurrence attributes <nl> + initial_state_options = [ 0 ] <nl> + <nl> + input_dim = 2 <nl> + cell_dim = 3 <nl> + batch_size = 1 <nl> + sequence_len = 5 <nl> + <nl> for config in list ( product ( use_peepholes_options , enable_self_stabilization_options , <nl> initial_state_options , activation_options ) ) : <nl> model_filename = MakeLSTMNameFromConfig ( * config ) <nl> def test_Reshape ( tmpdir ) : <nl> model = C . reshape ( i1 , ( 2 , 3 ) ) <nl> verify_one_input ( model , data , tmpdir , ' Reshape_1 ' ) <nl> <nl> + # RNN <nl> + def test_GRU ( tmpdir ) : <nl> + def CreatRNN ( cell_dim , <nl> + activation , <nl> + initial_state , <nl> + direction , <nl> + num_layers , <nl> + init = C . default_override_or ( C . glorot_uniform ( ) ) , <nl> + init_bias = C . default_override_or ( 0 ) ) : <nl> + if direction = = ' bidirectional ' : <nl> + return C . layers . Sequential ( [ <nl> + C . layers . For ( range ( num_layers ) , lambda i : [ <nl> + ( C . layers . Recurrence ( C . layers . RNNStep ( cell_dim , <nl> + activation = activation , <nl> + init = init , <nl> + init_bias = init_bias ) , <nl> + initial_state = initial_state , <nl> + return_full_state = False , go_backwards = False ) , <nl> + C . layers . Recurrence ( C . layers . RNNStep ( cell_dim , activation = activation , <nl> + init = init , <nl> + init_bias = init_bias ) , <nl> + initial_state = initial_state , <nl> + return_full_state = False , go_backwards = True ) ) , <nl> + C . splice ] ) ] ) <nl> + else : <nl> + go_backward = False if direction = = ' forward ' else True <nl> + return C . layers . Sequential ( [ <nl> + C . layers . For ( range ( num_layers ) , lambda i : [ <nl> + C . layers . Recurrence ( C . layers . RNNStep ( cell_dim , <nl> + activation = activation , <nl> + init = init , <nl> + init_bias = init_bias ) , <nl> + initial_state = initial_state , <nl> + return_full_state = False , go_backwards = go_backward ) ] ) ] ) <nl> + <nl> + def MakeRNNNameFromConfig ( direction , num_layers , initial_state , activition ) : <nl> + model_name = ' GRU . ' + direction + ' . ' <nl> + <nl> + if num_layers = = 1 : <nl> + model_name + = ' one_layer . ' <nl> + else : <nl> + assert ( num_layers = = 2 ) , " needs 1 or 2 layers ! " <nl> + model_name + = ' two_layer . ' <nl> + <nl> + if ( initial_state ! = 0 ) : <nl> + model_name + = ' initial . ' <nl> + <nl> + model_name + = activition . __name__ <nl> + return model_name <nl> + <nl> + direction_options = [ ' forward ' , ' reverse ' , ' bidirectional ' ] <nl> + num_layers_options = [ 1 , 2 ] <nl> + initial_state_options = [ 0 ] <nl> + activation_options = [ C . tanh , C . relu , C . sigmoid ] <nl> + <nl> + input_dim = 2 <nl> + hidden_dim = 3 <nl> + batch_size = 1 <nl> + sequence_len = 5 <nl> + <nl> + for config in list ( product ( direction_options , num_layers_options , initial_state_options , activation_options ) ) : <nl> + model_filename = MakeRNNNameFromConfig ( * config ) <nl> + print ( model_filename ) <nl> + direction , num_layers , initial_state , activation = config <nl> + <nl> + x = C . input_variable ( input_dim , dynamic_axes = [ C . Axis . default_batch_axis ( ) , C . Axis ( ' sequenceAxis ' ) ] ) <nl> + RNNModel = CreatRNN ( <nl> + hidden_dim , <nl> + activation , <nl> + initial_state , <nl> + direction , <nl> + num_layers ) ( x ) <nl> + data = np . random . uniform ( low = 0 . 0 , high = 1 . 0 , size = ( batch_size , sequence_len , input_dim ) ) . astype ( ' f ' ) <nl> + verify_one_input ( RNNModel , data , tmpdir , model_filename ) <nl> + <nl> # Selu <nl> def test_Selu ( tmpdir ) : <nl> model = C . selu ( [ [ - 1 , - 0 . 5 , 0 , 1 , 2 ] ] ) <nl> | Integrate liqun / RNNStage into master | microsoft/CNTK | aab2567d170043262837d0f78efd6dc9568805db | 2018-03-21T19:23:28Z |
mmm a / db / db_test . cc <nl> ppp b / db / db_test . cc <nl> TEST_F ( DBTest , AggregatedTableProperties ) { <nl> TEST_F ( DBTest , ReadLatencyHistogramByLevel ) { <nl> Options options = CurrentOptions ( ) ; <nl> options . write_buffer_size = 110 < < 10 ; <nl> - options . level0_file_num_compaction_trigger = 3 ; <nl> + options . level0_file_num_compaction_trigger = 6 ; <nl> options . num_levels = 4 ; <nl> options . compression = kNoCompression ; <nl> - options . max_bytes_for_level_base = 450 < < 10 ; <nl> + options . max_bytes_for_level_base = 4500 < < 10 ; <nl> options . target_file_size_base = 98 < < 10 ; <nl> options . max_write_buffer_number = 2 ; <nl> options . statistics = rocksdb : : CreateDBStatistics ( ) ; <nl> TEST_F ( DBTest , ReadLatencyHistogramByLevel ) { <nl> DestroyAndReopen ( options ) ; <nl> int key_index = 0 ; <nl> Random rnd ( 301 ) ; <nl> - for ( int num = 0 ; num < 5 ; num + + ) { <nl> + for ( int num = 0 ; num < 7 ; num + + ) { <nl> Put ( " foo " , " bar " ) ; <nl> GenerateNewFile ( & rnd , & key_index ) ; <nl> } <nl> + dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> <nl> std : : string prop ; <nl> ASSERT_TRUE ( dbfull ( ) - > GetProperty ( " rocksdb . dbstats " , & prop ) ) ; <nl> TEST_F ( DBTest , ReadLatencyHistogramByLevel ) { <nl> <nl> / / Reopen and issue Get ( ) . See thee latency tracked <nl> Reopen ( options ) ; <nl> + dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> for ( int key = 0 ; key < 500 ; key + + ) { <nl> Get ( Key ( key ) ) ; <nl> } <nl> | Make DBTest . AggregatedTableProperties more deterministic | facebook/rocksdb | e61d9c1484c32bd0028604ef65a724f7f55f2447 | 2015-10-09T16:47:56Z |
mmm a / Marlin / src / inc / Version . h <nl> ppp b / Marlin / src / inc / Version . h <nl> <nl> * version was tagged . <nl> * / <nl> # ifndef STRING_DISTRIBUTION_DATE <nl> - # define STRING_DISTRIBUTION_DATE " 2020 - 08 - 01 " <nl> + # define STRING_DISTRIBUTION_DATE " 2020 - 08 - 02 " <nl> # endif <nl> <nl> / * * <nl> | [ cron ] Bump distribution date ( 2020 - 08 - 02 ) | MarlinFirmware/Marlin | 2465f2fd746169bca31ec43a956211bed15839c6 | 2020-08-02T00:10:12Z |
mmm a / cmake / OpenCVModule . cmake <nl> ppp b / cmake / OpenCVModule . cmake <nl> macro ( ocv_glob_module_sources ) <nl> if ( cl_kernels ) <nl> set ( OCL_NAME opencl_kernels_ $ { name } ) <nl> add_custom_command ( <nl> - OUTPUT " $ { CMAKE_CURRENT_BINARY_DIR } / $ { OCL_NAME } . cpp " " $ { CMAKE_CURRENT_BINARY_DIR } / $ { OCL_NAME } . hpp " <nl> + OUTPUT " $ { CMAKE_CURRENT_BINARY_DIR } / $ { OCL_NAME } . hpp " " $ { CMAKE_CURRENT_BINARY_DIR } / $ { OCL_NAME } . cpp " <nl> COMMAND $ { CMAKE_COMMAND } " - DMODULE_NAME = $ { name } " " - DCL_DIR = $ { CMAKE_CURRENT_LIST_DIR } / src / opencl " " - DOUTPUT = $ { CMAKE_CURRENT_BINARY_DIR } / $ { OCL_NAME } . cpp " - P " $ { OpenCV_SOURCE_DIR } / cmake / cl2cpp . cmake " <nl> DEPENDS $ { cl_kernels } " $ { OpenCV_SOURCE_DIR } / cmake / cl2cpp . cmake " ) <nl> ocv_source_group ( " Src \ \ opencl \ \ kernels " FILES $ { cl_kernels } ) <nl> | Merge pull request from alalek : cmake_cl2cpp_dependency_fix | opencv/opencv | 273add5d5d5acadf5ab087b699c2601316bc31c8 | 2017-09-01T18:11:15Z |
mmm a / admin / static / coffee / namespaces / shards . html <nl> ppp b / admin / static / coffee / namespaces / shards . html <nl> <nl> < script id = " shard_name_td - partial " type = " text / x - handlebars - template " > <nl> < td class = " name " > <nl> < h4 > { { name } } < / h4 > <nl> - Keys : ~ { { shard_stats . rows_approx } } <nl> + About { { shard_stats . rows_approx } } { { pluralize_noun " key " shard_stats . rows_approx } } <nl> < / td > <nl> < / script > <nl> <nl> mmm a / admin / static / coffee / util . coffee <nl> ppp b / admin / static / coffee / util . coffee <nl> Handlebars . registerHelper ' pluralize_noun ' , ( noun , num , capitalize ) - > <nl> if num is 1 <nl> result = noun <nl> else <nl> - if noun . substr ( - 1 ) is ' y ' <nl> + if noun . substr ( - 1 ) is ' y ' and ( noun isnt ' key ' ) <nl> result = noun . slice ( 0 , noun . length - 1 ) + " ies " <nl> else <nl> result = noun + " s " <nl> | Key tilde pluralization crpa | rethinkdb/rethinkdb | e3c07cb3fc8660070589c34f10cc0654787e02f5 | 2012-05-08T03:01:58Z |
mmm a / lib / Sema / TypeChecker . cpp <nl> ppp b / lib / Sema / TypeChecker . cpp <nl> void swift : : performWholeModuleTypeChecking ( SourceFile & SF ) { <nl> bool swift : : performTypeLocChecking ( ASTContext & Ctx , TypeLoc & T , <nl> DeclContext * DC , <nl> bool ProduceDiagnostics ) { <nl> - return performTypeLocChecking ( Ctx , T , <nl> - / * isSILMode = * / false , <nl> - / * isSILType = * / false , <nl> - / * GenericEnv = * / nullptr , <nl> - DC , ProduceDiagnostics ) ; <nl> + return performTypeLocChecking ( <nl> + Ctx , T , <nl> + / * isSILMode = * / false , <nl> + / * isSILType = * / false , <nl> + / * GenericEnv = * / DC - > getGenericEnvironmentOfContext ( ) , <nl> + DC , ProduceDiagnostics ) ; <nl> } <nl> <nl> bool swift : : performTypeLocChecking ( ASTContext & Ctx , TypeLoc & T , <nl> | [ Type checker ] Teach performTypeLocChecking ( ) to provide a generic environment . | apple/swift | 975849935355e0a7128214503a38af2d74f390d2 | 2016-12-06T06:44:51Z |
mmm a / src / utils . h <nl> ppp b / src / utils . h <nl> class EnumSet { <nl> void Intersect ( const EnumSet & set ) { bits_ & = set . bits_ ; } <nl> T ToIntegral ( ) const { return bits_ ; } <nl> bool operator = = ( const EnumSet & set ) { return bits_ = = set . bits_ ; } <nl> + EnumSet < E , T > operator | ( const EnumSet & set ) const { <nl> + return EnumSet < E , T > ( bits_ | set . bits_ ) ; <nl> + } <nl> <nl> private : <nl> T Mask ( E element ) const { <nl> mmm a / test / cctest / cctest . cc <nl> ppp b / test / cctest / cctest . cc <nl> CcTest : : CcTest ( TestFunction * callback , const char * file , const char * name , <nl> } <nl> <nl> <nl> + v8 : : Persistent < v8 : : Context > CcTest : : context_ ; <nl> + <nl> + <nl> + void CcTest : : InitializeVM ( CcTestExtensionFlags extensions ) { <nl> + const char * extension_names [ kMaxExtensions ] ; <nl> + int extension_count = 0 ; <nl> + # define CHECK_EXTENSION_FLAG ( Name , Id ) \ <nl> + if ( extensions . Contains ( Name # # _ID ) ) extension_names [ extension_count + + ] = Id ; <nl> + EXTENSION_LIST ( CHECK_EXTENSION_FLAG ) <nl> + # undef CHECK_EXTENSION_FLAG <nl> + if ( context_ . IsEmpty ( ) ) { <nl> + v8 : : ExtensionConfiguration config ( extension_count , extension_names ) ; <nl> + context_ = v8 : : Context : : New ( & config ) ; <nl> + } <nl> + context_ - > Enter ( ) ; <nl> + } <nl> + <nl> + <nl> static void PrintTestList ( CcTest * current ) { <nl> if ( current = = NULL ) return ; <nl> PrintTestList ( current - > prev ( ) ) ; <nl> static void PrintTestList ( CcTest * current ) { <nl> <nl> v8 : : Isolate * CcTest : : default_isolate_ ; <nl> <nl> + <nl> int main ( int argc , char * argv [ ] ) { <nl> v8 : : internal : : FlagList : : SetFlagsFromCommandLine ( & argc , argv , true ) ; <nl> CcTest : : set_default_isolate ( v8 : : Isolate : : GetCurrent ( ) ) ; <nl> mmm a / test / cctest / cctest . h <nl> ppp b / test / cctest / cctest . h <nl> <nl> static void Test # # Name ( ) <nl> # endif <nl> <nl> + # define EXTENSION_LIST ( V ) \ <nl> + V ( GC_EXTENSION , " v8 / gc " ) \ <nl> + V ( PRINT_EXTENSION , " v8 / print " ) \ <nl> + V ( TRACE_EXTENSION , " v8 / trace " ) <nl> + <nl> + # define DEFINE_EXTENSION_ID ( Name , Ident ) Name # # _ID , <nl> + enum CcTestExtensionIds { <nl> + EXTENSION_LIST ( DEFINE_EXTENSION_ID ) <nl> + kMaxExtensions <nl> + } ; <nl> + # undef DEFINE_EXTENSION_ID <nl> + <nl> + typedef v8 : : internal : : EnumSet < CcTestExtensionIds > CcTestExtensionFlags ; <nl> + # define DEFINE_EXTENSION_FLAG ( Name , Ident ) \ <nl> + static const CcTestExtensionFlags Name ( 1 < < Name # # _ID ) ; <nl> + static const CcTestExtensionFlags NO_EXTENSIONS ( 0 ) ; <nl> + static const CcTestExtensionFlags ALL_EXTENSIONS ( ( 1 < < kMaxExtensions ) - 1 ) ; <nl> + EXTENSION_LIST ( DEFINE_EXTENSION_FLAG ) <nl> + # undef DEFINE_EXTENSION_FLAG <nl> + <nl> class CcTest { <nl> public : <nl> typedef void ( TestFunction ) ( ) ; <nl> class CcTest { <nl> default_isolate_ = default_isolate ; <nl> } <nl> static v8 : : Isolate * default_isolate ( ) { return default_isolate_ ; } <nl> + static v8 : : Isolate * isolate ( ) { return context_ - > GetIsolate ( ) ; } <nl> + static v8 : : Handle < v8 : : Context > env ( ) { return context_ ; } <nl> + <nl> + / / Helper function to initialize the VM . <nl> + static void InitializeVM ( CcTestExtensionFlags extensions = NO_EXTENSIONS ) ; <nl> <nl> private : <nl> TestFunction * callback_ ; <nl> class CcTest { <nl> const char * name_ ; <nl> const char * dependency_ ; <nl> bool enabled_ ; <nl> - static CcTest * last_ ; <nl> CcTest * prev_ ; <nl> + static CcTest * last_ ; <nl> static v8 : : Isolate * default_isolate_ ; <nl> + static v8 : : Persistent < v8 : : Context > context_ ; <nl> } ; <nl> <nl> / / Switches between all the Api tests using the threading support . <nl> mmm a / test / cctest / test - assembler - arm . cc <nl> ppp b / test / cctest / test - assembler - arm . cc <nl> typedef Object * ( * F3 ) ( void * p0 , int p1 , int p2 , int p3 , int p4 ) ; <nl> typedef Object * ( * F4 ) ( void * p0 , void * p1 , int p2 , int p3 , int p4 ) ; <nl> <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) { <nl> - env = v8 : : Context : : New ( ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> # define __ assm . <nl> <nl> TEST ( 0 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( 0 ) { <nl> <nl> <nl> TEST ( 1 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( 1 ) { <nl> <nl> <nl> TEST ( 2 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( 2 ) { <nl> <nl> <nl> TEST ( 3 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( 3 ) { <nl> <nl> TEST ( 4 ) { <nl> / / Test the VFP floating point instructions . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( 4 ) { <nl> <nl> TEST ( 5 ) { <nl> / / Test the ARMv7 bitfield instructions . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( 5 ) { <nl> <nl> TEST ( 6 ) { <nl> / / Test saturating instructions . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> static void TestRoundingMode ( VCVTTypes types , <nl> double value , <nl> int expected , <nl> bool expected_exception = false ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( 7 ) { <nl> <nl> TEST ( 8 ) { <nl> / / Test VFP multi load / store with ia_w . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( 8 ) { <nl> <nl> TEST ( 9 ) { <nl> / / Test VFP multi load / store with ia . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( 9 ) { <nl> <nl> TEST ( 10 ) { <nl> / / Test VFP multi load / store with db_w . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( 10 ) { <nl> <nl> TEST ( 11 ) { <nl> / / Test instructions using the carry flag . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( 11 ) { <nl> <nl> TEST ( 12 ) { <nl> / / Test chaining of label usages within instructions ( issue 1644 ) . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( 12 ) { <nl> <nl> TEST ( 13 ) { <nl> / / Test VFP instructions using registers d16 - d31 . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> mmm a / test / cctest / test - assembler - ia32 . cc <nl> ppp b / test / cctest / test - assembler - ia32 . cc <nl> typedef int ( * F1 ) ( int x ) ; <nl> typedef int ( * F2 ) ( int x , int y ) ; <nl> <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) { <nl> - env = v8 : : Context : : New ( ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> # define __ assm . <nl> <nl> TEST ( AssemblerIa320 ) { <nl> - InitializeVM ( ) ; <nl> - Isolate * isolate = reinterpret_cast < Isolate * > ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( CcTest : : isolate ( ) ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> v8 : : internal : : byte buffer [ 256 ] ; <nl> TEST ( AssemblerIa320 ) { <nl> <nl> <nl> TEST ( AssemblerIa321 ) { <nl> - InitializeVM ( ) ; <nl> - Isolate * isolate = reinterpret_cast < Isolate * > ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( CcTest : : isolate ( ) ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> v8 : : internal : : byte buffer [ 256 ] ; <nl> TEST ( AssemblerIa321 ) { <nl> <nl> <nl> TEST ( AssemblerIa322 ) { <nl> - InitializeVM ( ) ; <nl> - Isolate * isolate = reinterpret_cast < Isolate * > ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( CcTest : : isolate ( ) ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> v8 : : internal : : byte buffer [ 256 ] ; <nl> TEST ( AssemblerIa322 ) { <nl> typedef int ( * F3 ) ( float x ) ; <nl> <nl> TEST ( AssemblerIa323 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> if ( ! CpuFeatures : : IsSupported ( SSE2 ) ) return ; <nl> <nl> - Isolate * isolate = reinterpret_cast < Isolate * > ( env - > GetIsolate ( ) ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( CcTest : : isolate ( ) ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> v8 : : internal : : byte buffer [ 256 ] ; <nl> TEST ( AssemblerIa323 ) { <nl> typedef int ( * F4 ) ( double x ) ; <nl> <nl> TEST ( AssemblerIa324 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> if ( ! CpuFeatures : : IsSupported ( SSE2 ) ) return ; <nl> <nl> - Isolate * isolate = reinterpret_cast < Isolate * > ( env - > GetIsolate ( ) ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( CcTest : : isolate ( ) ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> v8 : : internal : : byte buffer [ 256 ] ; <nl> TEST ( AssemblerIa324 ) { <nl> <nl> static int baz = 42 ; <nl> TEST ( AssemblerIa325 ) { <nl> - InitializeVM ( ) ; <nl> - Isolate * isolate = reinterpret_cast < Isolate * > ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( CcTest : : isolate ( ) ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> v8 : : internal : : byte buffer [ 256 ] ; <nl> TEST ( AssemblerIa325 ) { <nl> typedef double ( * F5 ) ( double x , double y ) ; <nl> <nl> TEST ( AssemblerIa326 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> if ( ! CpuFeatures : : IsSupported ( SSE2 ) ) return ; <nl> <nl> - Isolate * isolate = reinterpret_cast < Isolate * > ( env - > GetIsolate ( ) ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( CcTest : : isolate ( ) ) ; <nl> HandleScope scope ( isolate ) ; <nl> v8 : : internal : : byte buffer [ 256 ] ; <nl> Assembler assm ( isolate , buffer , sizeof buffer ) ; <nl> TEST ( AssemblerIa326 ) { <nl> typedef double ( * F6 ) ( int x ) ; <nl> <nl> TEST ( AssemblerIa328 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> if ( ! CpuFeatures : : IsSupported ( SSE2 ) ) return ; <nl> <nl> - Isolate * isolate = reinterpret_cast < Isolate * > ( env - > GetIsolate ( ) ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( CcTest : : isolate ( ) ) ; <nl> HandleScope scope ( isolate ) ; <nl> v8 : : internal : : byte buffer [ 256 ] ; <nl> Assembler assm ( isolate , buffer , sizeof buffer ) ; <nl> TEST ( AssemblerIa328 ) { <nl> typedef int ( * F7 ) ( double x , double y ) ; <nl> <nl> TEST ( AssemblerIa329 ) { <nl> - InitializeVM ( ) ; <nl> - Isolate * isolate = reinterpret_cast < Isolate * > ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( CcTest : : isolate ( ) ) ; <nl> HandleScope scope ( isolate ) ; <nl> v8 : : internal : : byte buffer [ 256 ] ; <nl> MacroAssembler assm ( isolate , buffer , sizeof buffer ) ; <nl> TEST ( AssemblerIa329 ) { <nl> <nl> TEST ( AssemblerIa3210 ) { <nl> / / Test chaining of label usages within instructions ( issue 1644 ) . <nl> - InitializeVM ( ) ; <nl> - Isolate * isolate = reinterpret_cast < Isolate * > ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( CcTest : : isolate ( ) ) ; <nl> HandleScope scope ( isolate ) ; <nl> Assembler assm ( isolate , NULL , 0 ) ; <nl> <nl> TEST ( AssemblerIa3210 ) { <nl> <nl> <nl> TEST ( AssemblerMultiByteNop ) { <nl> - InitializeVM ( ) ; <nl> - Isolate * isolate = reinterpret_cast < Isolate * > ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( CcTest : : isolate ( ) ) ; <nl> HandleScope scope ( isolate ) ; <nl> v8 : : internal : : byte buffer [ 1024 ] ; <nl> Assembler assm ( isolate , buffer , sizeof ( buffer ) ) ; <nl> mmm a / test / cctest / test - assembler - mips . cc <nl> ppp b / test / cctest / test - assembler - mips . cc <nl> typedef Object * ( * F2 ) ( int x , int y , int p2 , int p3 , int p4 ) ; <nl> typedef Object * ( * F3 ) ( void * p , int p1 , int p2 , int p3 , int p4 ) ; <nl> <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - <nl> - static void InitializeVM ( ) { <nl> - / / Disable compilation of natives . <nl> - FLAG_disable_native_files = true ; <nl> - <nl> - if ( env . IsEmpty ( ) ) { <nl> - env = v8 : : Context : : New ( ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> # define __ assm . <nl> <nl> <nl> TEST ( MIPS0 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS0 ) { <nl> <nl> <nl> TEST ( MIPS1 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS1 ) { <nl> <nl> <nl> TEST ( MIPS2 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS2 ) { <nl> <nl> TEST ( MIPS3 ) { <nl> / / Test floating point instructions . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS3 ) { <nl> <nl> TEST ( MIPS4 ) { <nl> / / Test moves between floating point and integer registers . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS4 ) { <nl> <nl> TEST ( MIPS5 ) { <nl> / / Test conversions between doubles and integers . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS5 ) { <nl> <nl> TEST ( MIPS6 ) { <nl> / / Test simple memory loads and stores . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS6 ) { <nl> <nl> TEST ( MIPS7 ) { <nl> / / Test floating point compare and branch instructions . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS7 ) { <nl> <nl> TEST ( MIPS8 ) { <nl> / / Test ROTR and ROTRV instructions . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS8 ) { <nl> <nl> TEST ( MIPS9 ) { <nl> / / Test BRANCH improvements . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS9 ) { <nl> TEST ( MIPS10 ) { <nl> / / Test conversions between doubles and long integers . <nl> / / Test hos the long ints map to FP regs pairs . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS10 ) { <nl> <nl> TEST ( MIPS11 ) { <nl> / / Test LWL , LWR , SWL and SWR instructions . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS11 ) { <nl> <nl> <nl> TEST ( MIPS12 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS12 ) { <nl> <nl> TEST ( MIPS13 ) { <nl> / / Test Cvt_d_uw and Trunc_uw_d macros . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS13 ) { <nl> <nl> TEST ( MIPS14 ) { <nl> / / Test round , floor , ceil , trunc , cvt . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> TEST ( MIPS14 ) { <nl> <nl> TEST ( MIPS15 ) { <nl> / / Test chaining of label usages within instructions ( issue 1644 ) . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> Assembler assm ( isolate , NULL , 0 ) ; <nl> mmm a / test / cctest / test - assembler - x64 . cc <nl> ppp b / test / cctest / test - assembler - x64 . cc <nl> static const v8 : : internal : : Register arg2 = rsi ; <nl> # define __ assm . <nl> <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) { <nl> - env = v8 : : Context : : New ( ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> TEST ( AssemblerX64ReturnOperation ) { <nl> OS : : SetUp ( ) ; <nl> / / Allocate an executable page of memory . <nl> TEST ( OperandRegisterDependency ) { <nl> <nl> TEST ( AssemblerX64LabelChaining ) { <nl> / / Test chaining of label usages within instructions ( issue 1644 ) . <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> Assembler assm ( Isolate : : Current ( ) , NULL , 0 ) ; <nl> <nl> Label target ; <nl> TEST ( AssemblerX64LabelChaining ) { <nl> <nl> <nl> TEST ( AssemblerMultiByteNop ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> v8 : : internal : : byte buffer [ 1024 ] ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Assembler assm ( isolate , buffer , sizeof ( buffer ) ) ; <nl> mmm a / test / cctest / test - compiler . cc <nl> ppp b / test / cctest / test - compiler . cc <nl> <nl> <nl> using namespace v8 : : internal ; <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> / / mmm P r i n t E x t e n s i o n mmm <nl> <nl> class PrintExtension : public v8 : : Extension { <nl> static PrintExtension kPrintExtension ; <nl> v8 : : DeclareExtension kPrintExtensionDeclaration ( & kPrintExtension ) ; <nl> <nl> <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) { <nl> - const char * extensions [ ] = { " v8 / print " , " v8 / gc " } ; <nl> - v8 : : ExtensionConfiguration config ( 2 , extensions ) ; <nl> - env = v8 : : Context : : New ( & config ) ; <nl> - } <nl> - env - > Enter ( ) ; <nl> - } <nl> - <nl> - <nl> static MaybeObject * GetGlobalProperty ( const char * name ) { <nl> Handle < String > internalized_name = FACTORY - > InternalizeUtf8String ( name ) ; <nl> return Isolate : : Current ( ) - > context ( ) - > global_object ( ) - > GetProperty ( <nl> static double Inc ( int x ) { <nl> <nl> <nl> TEST ( Inc ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CHECK_EQ ( 4 . 0 , Inc ( 3 ) ) ; <nl> } <nl> <nl> static double Add ( int x , int y ) { <nl> <nl> <nl> TEST ( Add ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CHECK_EQ ( 5 . 0 , Add ( 2 , 3 ) ) ; <nl> } <nl> <nl> static double Abs ( int x ) { <nl> <nl> <nl> TEST ( Abs ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CHECK_EQ ( 3 . 0 , Abs ( - 3 ) ) ; <nl> } <nl> <nl> static double Sum ( int n ) { <nl> <nl> <nl> TEST ( Sum ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CHECK_EQ ( 5050 . 0 , Sum ( 100 ) ) ; <nl> } <nl> <nl> <nl> TEST ( Print ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( PRINT_EXTENSION ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> const char * source = " for ( n = 0 ; n < 100 ; + + n ) print ( n , 1 , 2 ) ; " ; <nl> Handle < JSFunction > fun = Compile ( source ) ; <nl> if ( fun . is_null ( ) ) return ; <nl> TEST ( Print ) { <nl> / / The following test method stems from my coding efforts today . It <nl> / / tests all the functionality I have added to the compiler today <nl> TEST ( Stuff ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> const char * source = <nl> " r = 0 ; \ n " <nl> " a = new Object ; \ n " <nl> TEST ( Stuff ) { <nl> <nl> <nl> TEST ( UncaughtThrow ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> const char * source = " throw 42 ; " ; <nl> Handle < JSFunction > fun = Compile ( source ) ; <nl> TEST ( UncaughtThrow ) { <nl> / / | JS | <nl> / / | C - to - JS | <nl> TEST ( C2JSFrames ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( PRINT_EXTENSION | GC_EXTENSION ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> const char * source = " function foo ( a ) { gc ( ) , print ( a ) ; } " ; <nl> <nl> TEST ( C2JSFrames ) { <nl> / / Regression 236 . Calling InitLineEnds on a Script with undefined <nl> / / source resulted in crash . <nl> TEST ( Regression236 ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> Handle < Script > script = FACTORY - > NewScript ( FACTORY - > empty_string ( ) ) ; <nl> script - > set_source ( HEAP - > undefined_value ( ) ) ; <nl> TEST ( Regression236 ) { <nl> <nl> <nl> TEST ( GetScriptLineNumber ) { <nl> - LocalContext env ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> v8 : : ScriptOrigin origin = v8 : : ScriptOrigin ( v8 : : String : : New ( " test " ) ) ; <nl> const char function_f [ ] = " function f ( ) { } " ; <nl> const int max_rows = 1000 ; <nl> TEST ( GetScriptLineNumber ) { <nl> v8 : : Handle < v8 : : String > script_body = v8 : : String : : New ( buffer . start ( ) ) ; <nl> v8 : : Script : : Compile ( script_body , & origin ) - > Run ( ) ; <nl> v8 : : Local < v8 : : Function > f = v8 : : Local < v8 : : Function > : : Cast ( <nl> - env - > Global ( ) - > Get ( v8 : : String : : New ( " f " ) ) ) ; <nl> + CcTest : : env ( ) - > Global ( ) - > Get ( v8 : : String : : New ( " f " ) ) ) ; <nl> CHECK_EQ ( i , f - > GetScriptLineNumber ( ) ) ; <nl> } <nl> } <nl> TEST ( OptimizedCodeSharing ) { <nl> / / FastNewClosureStub that is baked into the snapshot is incorrect . <nl> if ( ! FLAG_cache_optimized_code ) return ; <nl> FLAG_allow_natives_syntax = true ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> for ( int i = 0 ; i < 10 ; i + + ) { <nl> LocalContext env ; <nl> env - > Global ( ) - > Set ( v8 : : String : : New ( " x " ) , v8 : : Integer : : New ( i ) ) ; <nl> static void CheckCodeForUnsafeLiteral ( Handle < JSFunction > f ) { <nl> <nl> <nl> TEST ( SplitConstantsInFullCompiler ) { <nl> - LocalContext env ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> CompileRun ( " function f ( ) { a = 12345678 } ; f ( ) ; " ) ; <nl> - CheckCodeForUnsafeLiteral ( GetJSFunction ( env - > Global ( ) , " f " ) ) ; <nl> + CheckCodeForUnsafeLiteral ( GetJSFunction ( CcTest : : env ( ) - > Global ( ) , " f " ) ) ; <nl> CompileRun ( " function f ( x ) { a = 12345678 + x } ; f ( 1 ) ; " ) ; <nl> - CheckCodeForUnsafeLiteral ( GetJSFunction ( env - > Global ( ) , " f " ) ) ; <nl> + CheckCodeForUnsafeLiteral ( GetJSFunction ( CcTest : : env ( ) - > Global ( ) , " f " ) ) ; <nl> CompileRun ( " function f ( x ) { var arguments = 1 ; x + = 12345678 } ; f ( 1 ) ; " ) ; <nl> - CheckCodeForUnsafeLiteral ( GetJSFunction ( env - > Global ( ) , " f " ) ) ; <nl> + CheckCodeForUnsafeLiteral ( GetJSFunction ( CcTest : : env ( ) - > Global ( ) , " f " ) ) ; <nl> CompileRun ( " function f ( x ) { var arguments = 1 ; x = 12345678 } ; f ( 1 ) ; " ) ; <nl> - CheckCodeForUnsafeLiteral ( GetJSFunction ( env - > Global ( ) , " f " ) ) ; <nl> + CheckCodeForUnsafeLiteral ( GetJSFunction ( CcTest : : env ( ) - > Global ( ) , " f " ) ) ; <nl> } <nl> # endif <nl> mmm a / test / cctest / test - cpu - profiler . cc <nl> ppp b / test / cctest / test - cpu - profiler . cc <nl> TEST ( StartStop ) { <nl> processor . Join ( ) ; <nl> } <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) env = v8 : : Context : : New ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> - env - > Enter ( ) ; <nl> - } <nl> - <nl> static inline i : : Address ToAddress ( int n ) { <nl> return reinterpret_cast < i : : Address > ( n ) ; <nl> } <nl> class TestSetup { <nl> } / / namespace <nl> <nl> TEST ( CodeEvents ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> i : : Isolate * isolate = i : : Isolate : : Current ( ) ; <nl> i : : Heap * heap = isolate - > heap ( ) ; <nl> i : : Factory * factory = isolate - > factory ( ) ; <nl> TEST ( TickEvents ) { <nl> / / http : / / crbug / 51594 <nl> / / This test must not crash . <nl> TEST ( CrashIfStoppingLastNonExistentProfile ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> TestSetup test_setup ; <nl> CpuProfiler * profiler = i : : Isolate : : Current ( ) - > cpu_profiler ( ) ; <nl> profiler - > StartProfiling ( " 1 " ) ; <nl> TEST ( Issue1398 ) { <nl> <nl> <nl> TEST ( DeleteAllCpuProfiles ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> TestSetup test_setup ; <nl> CpuProfiler * profiler = i : : Isolate : : Current ( ) - > cpu_profiler ( ) ; <nl> CHECK_EQ ( 0 , profiler - > GetProfilesCount ( ) ) ; <nl> mmm a / test / cctest / test - disasm - arm . cc <nl> ppp b / test / cctest / test - disasm - arm . cc <nl> <nl> using namespace v8 : : internal ; <nl> <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) { <nl> - env = v8 : : Context : : New ( ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> bool DisassembleAndCompare ( byte * pc , const char * compare_string ) { <nl> disasm : : NameConverter converter ; <nl> disasm : : Disassembler disasm ( converter ) ; <nl> bool DisassembleAndCompare ( byte * pc , const char * compare_string ) { <nl> / / disassembler . Declare the variables and allocate the data structures used <nl> / / in the rest of the macros . <nl> # define SET_UP ( ) \ <nl> - InitializeVM ( ) ; \ <nl> + CcTest : : InitializeVM ( ) ; \ <nl> Isolate * isolate = Isolate : : Current ( ) ; \ <nl> HandleScope scope ( isolate ) ; \ <nl> byte * buffer = reinterpret_cast < byte * > ( malloc ( 4 * 1024 ) ) ; \ <nl> mmm a / test / cctest / test - disasm - ia32 . cc <nl> ppp b / test / cctest / test - disasm - ia32 . cc <nl> <nl> <nl> using namespace v8 : : internal ; <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) { <nl> - env = v8 : : Context : : New ( ) ; <nl> - } <nl> - } <nl> - <nl> <nl> # define __ assm . <nl> <nl> static void DummyStaticFunction ( Object * result ) { <nl> <nl> <nl> TEST ( DisasmIa320 ) { <nl> - InitializeVM ( ) ; <nl> - Isolate * isolate = reinterpret_cast < Isolate * > ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( CcTest : : isolate ( ) ) ; <nl> HandleScope scope ( isolate ) ; <nl> v8 : : internal : : byte buffer [ 2048 ] ; <nl> Assembler assm ( isolate , buffer , sizeof buffer ) ; <nl> mmm a / test / cctest / test - disasm - mips . cc <nl> ppp b / test / cctest / test - disasm - mips . cc <nl> <nl> using namespace v8 : : internal ; <nl> <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - static void InitializeVM ( ) { <nl> - / / Disable compilation of natives . <nl> - FLAG_disable_native_files = true ; <nl> - if ( env . IsEmpty ( ) ) { <nl> - env = v8 : : Context : : New ( ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> bool DisassembleAndCompare ( byte * pc , const char * compare_string ) { <nl> disasm : : NameConverter converter ; <nl> disasm : : Disassembler disasm ( converter ) ; <nl> bool DisassembleAndCompare ( byte * pc , const char * compare_string ) { <nl> / / disassembler . Declare the variables and allocate the data structures used <nl> / / in the rest of the macros . <nl> # define SET_UP ( ) \ <nl> - InitializeVM ( ) ; \ <nl> + CcTest : : InitializeVM ( ) ; \ <nl> Isolate * isolate = Isolate : : Current ( ) ; \ <nl> HandleScope scope ( isolate ) ; \ <nl> byte * buffer = reinterpret_cast < byte * > ( malloc ( 4 * 1024 ) ) ; \ <nl> mmm a / test / cctest / test - disasm - x64 . cc <nl> ppp b / test / cctest / test - disasm - x64 . cc <nl> <nl> <nl> using namespace v8 : : internal ; <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) { <nl> - env = v8 : : Context : : New ( ) ; <nl> - } <nl> - } <nl> - <nl> <nl> # define __ assm . <nl> <nl> static void DummyStaticFunction ( Object * result ) { <nl> <nl> <nl> TEST ( DisasmX64 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> v8 : : HandleScope scope ; <nl> v8 : : internal : : byte buffer [ 2048 ] ; <nl> Assembler assm ( Isolate : : Current ( ) , buffer , sizeof buffer ) ; <nl> mmm a / test / cctest / test - func - name - inference . cc <nl> ppp b / test / cctest / test - func - name - inference . cc <nl> using : : v8 : : internal : : SharedFunctionInfo ; <nl> using : : v8 : : internal : : String ; <nl> <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) { <nl> - env = v8 : : Context : : New ( ) ; <nl> - } <nl> - env - > Enter ( ) ; <nl> - } <nl> - <nl> - <nl> static void CheckFunctionName ( v8 : : Handle < v8 : : Script > script , <nl> const char * func_pos_src , <nl> const char * ref_inferred_name ) { <nl> static v8 : : Handle < v8 : : Script > Compile ( const char * src ) { <nl> <nl> <nl> TEST ( GlobalProperty ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " fun1 = function ( ) { return 1 ; } \ n " <nl> TEST ( GlobalProperty ) { <nl> <nl> <nl> TEST ( GlobalVar ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " var fun1 = function ( ) { return 1 ; } \ n " <nl> TEST ( GlobalVar ) { <nl> <nl> <nl> TEST ( LocalVar ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " function outer ( ) { \ n " <nl> TEST ( LocalVar ) { <nl> <nl> <nl> TEST ( InConstructor ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " function MyClass ( ) { \ n " <nl> TEST ( InConstructor ) { <nl> <nl> <nl> TEST ( Factory ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " function createMyObj ( ) { \ n " <nl> TEST ( Factory ) { <nl> <nl> <nl> TEST ( Static ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " function MyClass ( ) { } \ n " <nl> TEST ( Static ) { <nl> <nl> <nl> TEST ( Prototype ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " function MyClass ( ) { } \ n " <nl> TEST ( Prototype ) { <nl> <nl> <nl> TEST ( ObjectLiteral ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " function MyClass ( ) { } \ n " <nl> TEST ( ObjectLiteral ) { <nl> <nl> <nl> TEST ( AsParameter ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " function f1 ( a ) { return a ( ) ; } \ n " <nl> TEST ( AsParameter ) { <nl> <nl> <nl> TEST ( MultipleFuncsConditional ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " fun1 = 0 ? \ n " <nl> TEST ( MultipleFuncsConditional ) { <nl> <nl> <nl> TEST ( MultipleFuncsInLiteral ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " function MyClass ( ) { } \ n " <nl> TEST ( MultipleFuncsInLiteral ) { <nl> <nl> / / See http : / / code . google . com / p / v8 / issues / detail ? id = 380 <nl> TEST ( Issue380 ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " function a ( ) { \ n " <nl> TEST ( Issue380 ) { <nl> <nl> <nl> TEST ( MultipleAssignments ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " var fun1 = fun2 = function ( ) { return 1 ; } \ n " <nl> TEST ( MultipleAssignments ) { <nl> <nl> <nl> TEST ( AsConstructorParameter ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " function Foo ( ) { } \ n " <nl> TEST ( AsConstructorParameter ) { <nl> <nl> <nl> TEST ( FactoryHashmap ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " function createMyObj ( ) { \ n " <nl> TEST ( FactoryHashmap ) { <nl> <nl> <nl> TEST ( FactoryHashmapVariable ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " function createMyObj ( ) { \ n " <nl> TEST ( FactoryHashmapVariable ) { <nl> <nl> <nl> TEST ( FactoryHashmapConditional ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " function createMyObj ( ) { \ n " <nl> TEST ( FactoryHashmapConditional ) { <nl> <nl> <nl> TEST ( GlobalAssignmentAndCall ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " var Foo = function ( ) { \ n " <nl> TEST ( GlobalAssignmentAndCall ) { <nl> <nl> <nl> TEST ( AssignmentAndCall ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " ( function Enclosing ( ) { \ n " <nl> TEST ( AssignmentAndCall ) { <nl> <nl> <nl> TEST ( MethodAssignmentInAnonymousFunctionCall ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " ( function ( ) { \ n " <nl> TEST ( MethodAssignmentInAnonymousFunctionCall ) { <nl> <nl> <nl> TEST ( ReturnAnonymousFunction ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : Script > script = Compile ( <nl> " ( function ( ) { \ n " <nl> mmm a / test / cctest / test - heap . cc <nl> ppp b / test / cctest / test - heap . cc <nl> <nl> <nl> using namespace v8 : : internal ; <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) env = v8 : : Context : : New ( ) ; <nl> - env - > Enter ( ) ; <nl> - } <nl> - <nl> <nl> / / Go through all incremental marking steps in one swoop . <nl> static void SimulateIncrementalMarking ( ) { <nl> static void CheckMap ( Map * map , int type , int instance_size ) { <nl> <nl> <nl> TEST ( HeapMaps ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> CheckMap ( HEAP - > meta_map ( ) , MAP_TYPE , Map : : kSize ) ; <nl> CheckMap ( HEAP - > heap_number_map ( ) , HEAP_NUMBER_TYPE , HeapNumber : : kSize ) ; <nl> CheckMap ( HEAP - > fixed_array_map ( ) , FIXED_ARRAY_TYPE , kVariableSizeSentinel ) ; <nl> static void CheckFindCodeObject ( Isolate * isolate ) { <nl> <nl> <nl> TEST ( HeapObjects ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> <nl> TEST ( HeapObjects ) { <nl> <nl> <nl> TEST ( Tagging ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> int request = 24 ; <nl> CHECK_EQ ( request , static_cast < int > ( OBJECT_POINTER_ALIGN ( request ) ) ) ; <nl> CHECK ( Smi : : FromInt ( 42 ) - > IsSmi ( ) ) ; <nl> TEST ( Tagging ) { <nl> <nl> <nl> TEST ( GarbageCollection ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> Factory * factory = isolate - > factory ( ) ; <nl> static void VerifyStringAllocation ( Isolate * isolate , const char * string ) { <nl> <nl> <nl> TEST ( String ) { <nl> - InitializeVM ( ) ; <nl> - Isolate * isolate = reinterpret_cast < Isolate * > ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = reinterpret_cast < Isolate * > ( CcTest : : isolate ( ) ) ; <nl> <nl> VerifyStringAllocation ( isolate , " a " ) ; <nl> VerifyStringAllocation ( isolate , " ab " ) ; <nl> TEST ( String ) { <nl> <nl> <nl> TEST ( LocalHandles ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> const char * name = " Kasper the spunky " ; <nl> Handle < String > string = FACTORY - > NewStringFromAscii ( CStrVector ( name ) ) ; <nl> CHECK_EQ ( StrLength ( name ) , string - > length ( ) ) ; <nl> TEST ( LocalHandles ) { <nl> <nl> <nl> TEST ( GlobalHandles ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> Factory * factory = isolate - > factory ( ) ; <nl> static void TestWeakGlobalHandleCallback ( v8 : : Isolate * isolate , <nl> <nl> <nl> TEST ( WeakGlobalHandlesScavenge ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> Factory * factory = isolate - > factory ( ) ; <nl> TEST ( WeakGlobalHandlesScavenge ) { <nl> <nl> <nl> TEST ( WeakGlobalHandlesMark ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> Factory * factory = isolate - > factory ( ) ; <nl> TEST ( WeakGlobalHandlesMark ) { <nl> <nl> <nl> TEST ( DeleteWeakGlobalHandle ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> Factory * factory = isolate - > factory ( ) ; <nl> static void CheckInternalizedStrings ( const char * * strings ) { <nl> <nl> <nl> TEST ( StringTable ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> CheckInternalizedStrings ( not_so_random_string_table ) ; <nl> CheckInternalizedStrings ( not_so_random_string_table ) ; <nl> TEST ( StringTable ) { <nl> <nl> <nl> TEST ( FunctionAllocation ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> - v8 : : HandleScope sc ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope sc ( CcTest : : isolate ( ) ) ; <nl> Handle < String > name = FACTORY - > InternalizeUtf8String ( " theFunction " ) ; <nl> Handle < JSFunction > function = <nl> FACTORY - > NewFunction ( name , FACTORY - > undefined_value ( ) ) ; <nl> TEST ( FunctionAllocation ) { <nl> <nl> <nl> TEST ( ObjectProperties ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> - v8 : : HandleScope sc ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope sc ( CcTest : : isolate ( ) ) ; <nl> String * object_string = String : : cast ( HEAP - > Object_string ( ) ) ; <nl> Object * raw_object = Isolate : : Current ( ) - > context ( ) - > global_object ( ) - > <nl> GetProperty ( object_string ) - > ToObjectChecked ( ) ; <nl> TEST ( ObjectProperties ) { <nl> <nl> <nl> TEST ( JSObjectMaps ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> - v8 : : HandleScope sc ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope sc ( CcTest : : isolate ( ) ) ; <nl> Handle < String > name = FACTORY - > InternalizeUtf8String ( " theFunction " ) ; <nl> Handle < JSFunction > function = <nl> FACTORY - > NewFunction ( name , FACTORY - > undefined_value ( ) ) ; <nl> TEST ( JSObjectMaps ) { <nl> <nl> <nl> TEST ( JSArray ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> - v8 : : HandleScope sc ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope sc ( CcTest : : isolate ( ) ) ; <nl> Handle < String > name = FACTORY - > InternalizeUtf8String ( " Array " ) ; <nl> Object * raw_object = Isolate : : Current ( ) - > context ( ) - > global_object ( ) - > <nl> GetProperty ( * name ) - > ToObjectChecked ( ) ; <nl> TEST ( JSArray ) { <nl> <nl> <nl> TEST ( JSObjectCopy ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> - v8 : : HandleScope sc ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope sc ( CcTest : : isolate ( ) ) ; <nl> String * object_string = String : : cast ( HEAP - > Object_string ( ) ) ; <nl> Object * raw_object = Isolate : : Current ( ) - > context ( ) - > global_object ( ) - > <nl> GetProperty ( object_string ) - > ToObjectChecked ( ) ; <nl> TEST ( JSObjectCopy ) { <nl> <nl> <nl> TEST ( StringAllocation ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> const unsigned char chars [ ] = { 0xe5 , 0xa4 , 0xa7 } ; <nl> for ( int length = 0 ; length < 100 ; length + + ) { <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> char * non_ascii = NewArray < char > ( 3 * length + 1 ) ; <nl> char * ascii = NewArray < char > ( length + 1 ) ; <nl> non_ascii [ 3 * length ] = 0 ; <nl> static int ObjectsFoundInHeap ( Heap * heap , Handle < Object > objs [ ] , int size ) { <nl> <nl> <nl> TEST ( Iteration ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> / / Array of objects to scan haep for . <nl> const int objs_count = 6 ; <nl> TEST ( Iteration ) { <nl> <nl> <nl> TEST ( EmptyHandleEscapeFrom ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> Handle < JSObject > runaway ; <nl> <nl> { <nl> - v8 : : HandleScope nested ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope nested ( CcTest : : isolate ( ) ) ; <nl> Handle < JSObject > empty ; <nl> runaway = empty . EscapeFrom ( & nested ) ; <nl> } <nl> static int LenFromSize ( int size ) { <nl> <nl> TEST ( Regression39128 ) { <nl> / / Test case for crbug . com / 39128 . <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> / / Increase the chance of ' bump - the - pointer ' allocation in old space . <nl> HEAP - > CollectAllGarbage ( Heap : : kNoGCFlags ) ; <nl> <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> / / The plan : create JSObject which references objects in new space . <nl> / / Then clone this object ( forcing it to go into old space ) and check <nl> TEST ( TestCodeFlushing ) { <nl> / / If we do not flush code this test is invalid . <nl> if ( ! FLAG_flush_code ) return ; <nl> i : : FLAG_allow_natives_syntax = true ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> const char * source = " function foo ( ) { " <nl> " var x = 42 ; " <nl> " var y = 42 ; " <nl> TEST ( TestCodeFlushing ) { <nl> Handle < String > foo_name = FACTORY - > InternalizeUtf8String ( " foo " ) ; <nl> <nl> / / This compile will add the code to the compilation cache . <nl> - { v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + { v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CompileRun ( source ) ; <nl> } <nl> <nl> TEST ( TestCodeFlushingIncremental ) { <nl> / / If we do not flush code this test is invalid . <nl> if ( ! FLAG_flush_code | | ! FLAG_flush_code_incrementally ) return ; <nl> i : : FLAG_allow_natives_syntax = true ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> const char * source = " function foo ( ) { " <nl> " var x = 42 ; " <nl> " var y = 42 ; " <nl> TEST ( TestCodeFlushingIncremental ) { <nl> Handle < String > foo_name = FACTORY - > InternalizeUtf8String ( " foo " ) ; <nl> <nl> / / This compile will add the code to the compilation cache . <nl> - { v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + { v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CompileRun ( source ) ; <nl> } <nl> <nl> TEST ( TestCodeFlushingIncremental ) { <nl> CHECK ( ! function - > is_compiled ( ) | | function - > IsOptimized ( ) ) ; <nl> <nl> / / This compile will compile the function again . <nl> - { v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + { v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CompileRun ( " foo ( ) ; " ) ; <nl> } <nl> <nl> TEST ( TestCodeFlushingIncremental ) { <nl> <nl> / / Force optimization while incremental marking is active and while <nl> / / the function is enqueued as a candidate . <nl> - { v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + { v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CompileRun ( " % OptimizeFunctionOnNextCall ( foo ) ; foo ( ) ; " ) ; <nl> } <nl> <nl> TEST ( TestCodeFlushingIncrementalScavenge ) { <nl> / / If we do not flush code this test is invalid . <nl> if ( ! FLAG_flush_code | | ! FLAG_flush_code_incrementally ) return ; <nl> i : : FLAG_allow_natives_syntax = true ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> const char * source = " var foo = function ( ) { " <nl> " var x = 42 ; " <nl> " var y = 42 ; " <nl> TEST ( TestCodeFlushingIncrementalScavenge ) { <nl> HEAP - > CollectAllGarbage ( Heap : : kAbortIncrementalMarkingMask ) ; <nl> <nl> / / This compile will add the code to the compilation cache . <nl> - { v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + { v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CompileRun ( source ) ; <nl> } <nl> <nl> TEST ( TestCodeFlushingIncrementalScavenge ) { <nl> CHECK ( function2 - > shared ( ) - > is_compiled ( ) ) ; <nl> <nl> / / Clear references to functions so that one of them can die . <nl> - { v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + { v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CompileRun ( " foo = 0 ; bar = 0 ; " ) ; <nl> } <nl> <nl> TEST ( TestCodeFlushingIncrementalAbort ) { <nl> / / If we do not flush code this test is invalid . <nl> if ( ! FLAG_flush_code | | ! FLAG_flush_code_incrementally ) return ; <nl> i : : FLAG_allow_natives_syntax = true ; <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> const char * source = " function foo ( ) { " <nl> " var x = 42 ; " <nl> " var y = 42 ; " <nl> TEST ( TestCodeFlushingIncrementalAbort ) { <nl> Handle < String > foo_name = FACTORY - > InternalizeUtf8String ( " foo " ) ; <nl> <nl> / / This compile will add the code to the compilation cache . <nl> - { v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + { v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CompileRun ( source ) ; <nl> } <nl> <nl> TEST ( TestCodeFlushingIncrementalAbort ) { <nl> # endif / / ENABLE_DEBUGGER_SUPPORT <nl> <nl> / / Force optimization now that code flushing is disabled . <nl> - { v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + { v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CompileRun ( " % OptimizeFunctionOnNextCall ( foo ) ; foo ( ) ; " ) ; <nl> } <nl> <nl> TEST ( TestSizeOfObjects ) { <nl> <nl> <nl> TEST ( TestSizeOfObjectsVsHeapIteratorPrecision ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> HEAP - > EnsureHeapIsIterable ( ) ; <nl> intptr_t size_of_objects_1 = HEAP - > SizeOfObjects ( ) ; <nl> HeapIterator iterator ( HEAP ) ; <nl> static void FillUpNewSpace ( NewSpace * new_space ) { <nl> <nl> <nl> TEST ( GrowAndShrinkNewSpace ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> NewSpace * new_space = HEAP - > new_space ( ) ; <nl> <nl> if ( HEAP - > ReservedSemiSpaceSize ( ) = = HEAP - > InitialSemiSpaceSize ( ) | | <nl> TEST ( GrowAndShrinkNewSpace ) { <nl> <nl> <nl> TEST ( CollectingAllAvailableGarbageShrinksNewSpace ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> if ( HEAP - > ReservedSemiSpaceSize ( ) = = HEAP - > InitialSemiSpaceSize ( ) | | <nl> HEAP - > MaxSemiSpaceSize ( ) = = HEAP - > InitialSemiSpaceSize ( ) ) { <nl> TEST ( CollectingAllAvailableGarbageShrinksNewSpace ) { <nl> return ; <nl> } <nl> <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> NewSpace * new_space = HEAP - > new_space ( ) ; <nl> intptr_t old_capacity , new_capacity ; <nl> old_capacity = new_space - > Capacity ( ) ; <nl> TEST ( InstanceOfStubWriteBarrier ) { <nl> i : : FLAG_verify_heap = true ; <nl> # endif <nl> <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> if ( ! i : : V8 : : UseCrankshaft ( ) ) return ; <nl> if ( i : : FLAG_force_marking_deque_overflows ) return ; <nl> v8 : : HandleScope outer_scope ( v8 : : Isolate : : GetCurrent ( ) ) ; <nl> TEST ( InstanceOfStubWriteBarrier ) { <nl> <nl> <nl> TEST ( PrototypeTransitionClearing ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> CompileRun ( <nl> " var base = { } ; " <nl> TEST ( ResetSharedFunctionInfoCountersDuringIncrementalMarking ) { <nl> i : : FLAG_verify_heap = true ; <nl> # endif <nl> <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> if ( ! i : : V8 : : UseCrankshaft ( ) ) return ; <nl> v8 : : HandleScope outer_scope ( v8 : : Isolate : : GetCurrent ( ) ) ; <nl> <nl> TEST ( ResetSharedFunctionInfoCountersDuringMarkSweep ) { <nl> i : : FLAG_verify_heap = true ; <nl> # endif <nl> <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> if ( ! i : : V8 : : UseCrankshaft ( ) ) return ; <nl> - v8 : : HandleScope outer_scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope outer_scope ( CcTest : : isolate ( ) ) ; <nl> <nl> { <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CompileRun ( <nl> " function f ( ) { " <nl> " var s = 0 ; " <nl> TEST ( ResetSharedFunctionInfoCountersDuringMarkSweep ) { <nl> / / Test that HAllocateObject will always return an object in new - space . <nl> TEST ( OptimizedAllocationAlwaysInNewSpace ) { <nl> i : : FLAG_allow_natives_syntax = true ; <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> if ( ! i : : V8 : : UseCrankshaft ( ) | | i : : FLAG_always_opt ) return ; <nl> if ( i : : FLAG_gc_global | | i : : FLAG_stress_compaction ) return ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> SimulateFullSpace ( HEAP - > new_space ( ) ) ; <nl> AlwaysAllocateScope always_allocate ; <nl> TEST ( OptimizedAllocationAlwaysInNewSpace ) { <nl> TEST ( OptimizedPretenuringArrayLiterals ) { <nl> i : : FLAG_allow_natives_syntax = true ; <nl> i : : FLAG_pretenure_literals = true ; <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> if ( ! i : : V8 : : UseCrankshaft ( ) | | i : : FLAG_always_opt ) return ; <nl> if ( i : : FLAG_gc_global | | i : : FLAG_stress_compaction ) return ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> AlwaysAllocateScope always_allocate ; <nl> v8 : : Local < v8 : : Value > res = CompileRun ( <nl> TEST ( OptimizedPretenuringArrayLiterals ) { <nl> / / Test regular array literals allocation . <nl> TEST ( OptimizedAllocationArrayLiterals ) { <nl> i : : FLAG_allow_natives_syntax = true ; <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> if ( ! i : : V8 : : UseCrankshaft ( ) | | i : : FLAG_always_opt ) return ; <nl> if ( i : : FLAG_gc_global | | i : : FLAG_stress_compaction ) return ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> AlwaysAllocateScope always_allocate ; <nl> v8 : : Local < v8 : : Value > res = CompileRun ( <nl> static int CountMapTransitions ( Map * map ) { <nl> TEST ( Regress1465 ) { <nl> i : : FLAG_allow_natives_syntax = true ; <nl> i : : FLAG_trace_incremental_marking = true ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> static const int transitions_count = 256 ; <nl> <nl> { <nl> TEST ( Regress1465 ) { <nl> TEST ( Regress2143a ) { <nl> i : : FLAG_collect_maps = true ; <nl> i : : FLAG_incremental_marking = true ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> / / Prepare a map transition from the root object together with a yet <nl> / / untransitioned root object . <nl> TEST ( Regress2143b ) { <nl> i : : FLAG_collect_maps = true ; <nl> i : : FLAG_incremental_marking = true ; <nl> i : : FLAG_allow_natives_syntax = true ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> / / Prepare a map transition from the root object together with a yet <nl> / / untransitioned root object . <nl> TEST ( ReleaseOverReservedPages ) { <nl> / / The optimizer can allocate stuff , messing up the test . <nl> i : : FLAG_crankshaft = false ; <nl> i : : FLAG_always_opt = false ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> static const int number_of_test_pages = 20 ; <nl> <nl> / / Prepare many pages with low live - bytes count . <nl> TEST ( ReleaseOverReservedPages ) { <nl> <nl> <nl> TEST ( Regress2237 ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> Handle < String > slice ( HEAP - > empty_string ( ) ) ; <nl> <nl> { <nl> / / Generate a parent that lives in new - space . <nl> - v8 : : HandleScope inner_scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope inner_scope ( CcTest : : isolate ( ) ) ; <nl> const char * c = " This text is long enough to trigger sliced strings . " ; <nl> Handle < String > s = FACTORY - > NewStringFromAscii ( CStrVector ( c ) ) ; <nl> CHECK ( s - > IsSeqOneByteString ( ) ) ; <nl> TEST ( Regress2237 ) { <nl> <nl> # ifdef OBJECT_PRINT <nl> TEST ( PrintSharedFunctionInfo ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> const char * source = " f = function ( ) { return 987654321 ; } \ n " <nl> " g = function ( ) { return 123456789 ; } \ n " ; <nl> CompileRun ( source ) ; <nl> TEST ( PrintSharedFunctionInfo ) { <nl> <nl> <nl> TEST ( Regress2211 ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> v8 : : Handle < v8 : : String > value = v8_str ( " val string " ) ; <nl> Smi * hash = Smi : : FromInt ( 321 ) ; <nl> TEST ( Regress2211 ) { <nl> <nl> TEST ( IncrementalMarkingClearsTypeFeedbackCells ) { <nl> if ( i : : FLAG_always_opt ) return ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> v8 : : Local < v8 : : Value > fun1 , fun2 ; <nl> <nl> { <nl> static Code * FindFirstIC ( Code * code , Code : : Kind kind ) { <nl> <nl> TEST ( IncrementalMarkingPreservesMonomorhpicIC ) { <nl> if ( i : : FLAG_always_opt ) return ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> / / Prepare function f that contains a monomorphic IC for object <nl> / / originating from the same native context . <nl> TEST ( IncrementalMarkingPreservesMonomorhpicIC ) { <nl> <nl> TEST ( IncrementalMarkingClearsMonomorhpicIC ) { <nl> if ( i : : FLAG_always_opt ) return ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> v8 : : Local < v8 : : Value > obj1 ; <nl> <nl> { <nl> TEST ( IncrementalMarkingClearsMonomorhpicIC ) { <nl> <nl> TEST ( IncrementalMarkingClearsPolymorhpicIC ) { <nl> if ( i : : FLAG_always_opt ) return ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> v8 : : Local < v8 : : Value > obj1 , obj2 ; <nl> <nl> { <nl> void ReleaseStackTraceDataTest ( const char * source ) { <nl> / / after the first time the accessor is fired . We use external string <nl> / / to check whether the data is being released since the external string <nl> / / resource ' s callback is fired when the external string is GC ' ed . <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> SourceResource * resource = new SourceResource ( i : : StrDup ( source ) ) ; <nl> { <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> v8 : : Handle < v8 : : String > source_string = v8 : : String : : NewExternal ( resource ) ; <nl> v8 : : Script : : Compile ( source_string ) - > Run ( ) ; <nl> CHECK ( ! resource - > IsDisposed ( ) ) ; <nl> TEST ( ReleaseStackTraceData ) { <nl> <nl> <nl> TEST ( Regression144230 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> TEST ( Regression144230 ) { <nl> TEST ( Regress159140 ) { <nl> i : : FLAG_allow_natives_syntax = true ; <nl> i : : FLAG_flush_code_incrementally = true ; <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> TEST ( Regress159140 ) { <nl> TEST ( Regress165495 ) { <nl> i : : FLAG_allow_natives_syntax = true ; <nl> i : : FLAG_flush_code_incrementally = true ; <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> TEST ( Regress169209 ) { <nl> i : : FLAG_stress_compaction = false ; <nl> i : : FLAG_allow_natives_syntax = true ; <nl> i : : FLAG_flush_code_incrementally = true ; <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> static inline void AllocateAllButNBytes ( v8 : : internal : : NewSpace * space , <nl> TEST ( Regress169928 ) { <nl> i : : FLAG_allow_natives_syntax = true ; <nl> i : : FLAG_crankshaft = false ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> / / Some flags turn Scavenge collections into Mark - sweep collections <nl> / / and hence are incompatible with this test case . <nl> TEST ( Regress168801 ) { <nl> i : : FLAG_cache_optimized_code = false ; <nl> i : : FLAG_allow_natives_syntax = true ; <nl> i : : FLAG_flush_code_incrementally = true ; <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> TEST ( Regress173458 ) { <nl> i : : FLAG_cache_optimized_code = false ; <nl> i : : FLAG_allow_natives_syntax = true ; <nl> i : : FLAG_flush_code_incrementally = true ; <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> class DummyVisitor : public ObjectVisitor { <nl> <nl> <nl> TEST ( DeferredHandles ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> Heap * heap = isolate - > heap ( ) ; <nl> v8 : : HandleScope scope ; <nl> mmm a / test / cctest / test - log - stack - tracer . cc <nl> ppp b / test / cctest / test - log - stack - tracer . cc <nl> using v8 : : internal : : StackTracer ; <nl> using v8 : : internal : : TickSample ; <nl> <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - <nl> static struct { <nl> TickSample * sample ; <nl> } trace_env = { NULL } ; <nl> static TraceExtension kTraceExtension ; <nl> v8 : : DeclareExtension kTraceExtensionDeclaration ( & kTraceExtension ) ; <nl> <nl> <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) { <nl> - const char * extensions [ ] = { " v8 / trace " } ; <nl> - v8 : : ExtensionConfiguration config ( 1 , extensions ) ; <nl> - env = v8 : : Context : : New ( & config ) ; <nl> - } <nl> - env - > Enter ( ) ; <nl> - } <nl> - <nl> - <nl> static bool IsAddressWithinFuncCode ( JSFunction * function , Address addr ) { <nl> i : : Code * code = function - > code ( ) ; <nl> return code - > contains ( addr ) ; <nl> } <nl> <nl> static bool IsAddressWithinFuncCode ( const char * func_name , Address addr ) { <nl> - v8 : : Local < v8 : : Value > func = env - > Global ( ) - > Get ( v8_str ( func_name ) ) ; <nl> + v8 : : Local < v8 : : Value > func = CcTest : : env ( ) - > Global ( ) - > Get ( v8_str ( func_name ) ) ; <nl> CHECK ( func - > IsFunction ( ) ) ; <nl> JSFunction * js_func = JSFunction : : cast ( * v8 : : Utils : : OpenHandle ( * func ) ) ; <nl> return IsAddressWithinFuncCode ( js_func , addr ) ; <nl> void CreateFramePointerGrabberConstructor ( const char * constructor_name ) { <nl> v8 : : FunctionTemplate : : New ( construct_call ) ; <nl> constructor_template - > SetClassName ( v8_str ( " FPGrabber " ) ) ; <nl> Local < Function > fun = constructor_template - > GetFunction ( ) ; <nl> - env - > Global ( ) - > Set ( v8_str ( constructor_name ) , fun ) ; <nl> + CcTest : : env ( ) - > Global ( ) - > Set ( v8_str ( constructor_name ) , fun ) ; <nl> } <nl> <nl> <nl> TEST ( CFromJSStackTrace ) { <nl> TickSample sample ; <nl> InitTraceEnv ( & sample ) ; <nl> <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( TRACE_EXTENSION ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> / / Create global function JSFuncDoTrace which calls <nl> / / extension function trace ( ) with the current frame pointer value . <nl> CreateTraceCallerFunction ( " JSFuncDoTrace " , " trace " ) ; <nl> TEST ( PureJSStackTrace ) { <nl> TickSample sample ; <nl> InitTraceEnv ( & sample ) ; <nl> <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( TRACE_EXTENSION ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> / / Create global function JSFuncDoTrace which calls <nl> / / extension function js_trace ( ) with the current frame pointer value . <nl> CreateTraceCallerFunction ( " JSFuncDoTrace " , " js_trace " ) ; <nl> static int CFunc ( int depth ) { <nl> TEST ( PureCStackTrace ) { <nl> TickSample sample ; <nl> InitTraceEnv ( & sample ) ; <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( TRACE_EXTENSION ) ; <nl> / / Check that sampler doesn ' t crash <nl> CHECK_EQ ( 10 , CFunc ( 10 ) ) ; <nl> } <nl> <nl> <nl> TEST ( JsEntrySp ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( TRACE_EXTENSION ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> CHECK_EQ ( 0 , GetJsEntrySp ( ) ) ; <nl> CompileRun ( " a = 1 ; b = a + 1 ; " ) ; <nl> CHECK_EQ ( 0 , GetJsEntrySp ( ) ) ; <nl> mmm a / test / cctest / test - mark - compact . cc <nl> ppp b / test / cctest / test - mark - compact . cc <nl> <nl> <nl> using namespace v8 : : internal ; <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) env = v8 : : Context : : New ( ) ; <nl> - env - > Enter ( ) ; <nl> - } <nl> - <nl> <nl> TEST ( MarkingDeque ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> int mem_size = 20 * kPointerSize ; <nl> byte * mem = NewArray < byte > ( 20 * kPointerSize ) ; <nl> Address low = reinterpret_cast < Address > ( mem ) ; <nl> TEST ( Promotion ) { <nl> FLAG_always_compact = true ; <nl> HEAP - > ConfigureHeap ( 2 * 256 * KB , 8 * MB , 8 * MB ) ; <nl> <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> - v8 : : HandleScope sc ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope sc ( CcTest : : isolate ( ) ) ; <nl> <nl> / / Allocate a fixed array in the new space . <nl> int array_size = <nl> TEST ( NoPromotion ) { <nl> <nl> / / Test the situation that some objects in new space are promoted to <nl> / / the old space <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> - v8 : : HandleScope sc ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope sc ( CcTest : : isolate ( ) ) ; <nl> <nl> / / Do a mark compact GC to shrink the heap . <nl> HEAP - > CollectGarbage ( OLD_POINTER_SPACE ) ; <nl> TEST ( NoPromotion ) { <nl> <nl> <nl> TEST ( MarkCompactCollector ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> - v8 : : HandleScope sc ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope sc ( CcTest : : isolate ( ) ) ; <nl> / / call mark - compact when heap is empty <nl> HEAP - > CollectGarbage ( OLD_POINTER_SPACE ) ; <nl> <nl> static Handle < Map > CreateMap ( ) { <nl> <nl> TEST ( MapCompact ) { <nl> FLAG_max_map_space_pages = 16 ; <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> { <nl> v8 : : HandleScope sc ; <nl> static void GCEpilogueCallbackFunc ( ) { <nl> <nl> <nl> TEST ( GCCallback ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> <nl> HEAP - > SetGlobalGCPrologueCallback ( & GCPrologueCallbackFunc ) ; <nl> HEAP - > SetGlobalGCEpilogueCallback ( & GCEpilogueCallbackFunc ) ; <nl> static void WeakPointerCallback ( v8 : : Isolate * isolate , <nl> <nl> TEST ( ObjectGroups ) { <nl> FLAG_incremental_marking = false ; <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> GlobalHandles * global_handles = Isolate : : Current ( ) - > global_handles ( ) ; <nl> <nl> NumberOfWeakCalls = 0 ; <nl> - v8 : : HandleScope handle_scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope handle_scope ( CcTest : : isolate ( ) ) ; <nl> <nl> Handle < Object > g1s1 = <nl> global_handles - > Create ( HEAP - > AllocateFixedArray ( 1 ) - > ToObjectChecked ( ) ) ; <nl> class TestRetainedObjectInfo : public v8 : : RetainedObjectInfo { <nl> <nl> <nl> TEST ( EmptyObjectGroups ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> GlobalHandles * global_handles = Isolate : : Current ( ) - > global_handles ( ) ; <nl> <nl> - v8 : : HandleScope handle_scope ( env - > GetIsolate ( ) ) ; <nl> + v8 : : HandleScope handle_scope ( CcTest : : isolate ( ) ) ; <nl> <nl> Handle < Object > object = <nl> global_handles - > Create ( HEAP - > AllocateFixedArray ( 1 ) - > ToObjectChecked ( ) ) ; <nl> TEST ( BootUpMemoryUse ) { <nl> / / Only Linux has the proc filesystem and only if it is mapped . If it ' s not <nl> / / there we just skip the test . <nl> if ( initial_memory > = 0 ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> intptr_t delta = MemoryInUse ( ) - initial_memory ; <nl> printf ( " delta : % " V8_PTR_PREFIX " d kB \ n " , delta / 1024 ) ; <nl> if ( sizeof ( initial_memory ) = = 8 ) { / / 64 - bit . <nl> mmm a / test / cctest / test - strings . cc <nl> ppp b / test / cctest / test - strings . cc <nl> class RandomNumberGenerator { <nl> <nl> using namespace v8 : : internal ; <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) { <nl> - const char * extensions [ ] = { " v8 / print " } ; <nl> - v8 : : ExtensionConfiguration config ( 1 , extensions ) ; <nl> - env = v8 : : Context : : New ( & config ) ; <nl> - } <nl> - env - > Enter ( ) ; <nl> - } <nl> - <nl> <nl> static const int DEEP_DEPTH = 8 * 1024 ; <nl> static const int SUPER_DEEP_DEPTH = 80 * 1024 ; <nl> static void TraverseFirst ( Handle < String > s1 , Handle < String > s2 , int chars ) { <nl> <nl> TEST ( Traverse ) { <nl> printf ( " TestTraverse \ n " ) ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> ZoneScope zone ( Isolate : : Current ( ) - > runtime_zone ( ) , DELETE_ON_EXIT ) ; <nl> ConsStringGenerationData data ( false ) ; <nl> Handle < String > flat = ConstructBalanced ( & data ) ; <nl> printf ( <nl> <nl> template < typename BuildString > <nl> void TestStringCharacterStream ( BuildString build , int test_cases ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope outer_scope ( isolate ) ; <nl> ZoneScope zone ( Isolate : : Current ( ) - > runtime_zone ( ) , DELETE_ON_EXIT ) ; <nl> static const int DEEP_ASCII_DEPTH = 100000 ; <nl> <nl> TEST ( DeepAscii ) { <nl> printf ( " TestDeepAscii \ n " ) ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> <nl> char * foo = NewArray < char > ( DEEP_ASCII_DEPTH ) ; <nl> for ( int i = 0 ; i < DEEP_ASCII_DEPTH ; i + + ) { <nl> TEST ( DeepAscii ) { <nl> <nl> TEST ( Utf8Conversion ) { <nl> / / Smoke test for converting strings to utf - 8 . <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope handle_scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope handle_scope ( CcTest : : isolate ( ) ) ; <nl> / / A simple ascii string <nl> const char * ascii_string = " abcdef12345 " ; <nl> int len = <nl> TEST ( Utf8Conversion ) { <nl> TEST ( ExternalShortStringAdd ) { <nl> ZoneScope zonescope ( Isolate : : Current ( ) - > runtime_zone ( ) , DELETE_ON_EXIT ) ; <nl> <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope handle_scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope handle_scope ( CcTest : : isolate ( ) ) ; <nl> Zone * zone = Isolate : : Current ( ) - > runtime_zone ( ) ; <nl> <nl> / / Make sure we cover all always - flat lengths and at least one above . <nl> TEST ( ExternalShortStringAdd ) { <nl> } <nl> <nl> / / Add the arrays with the short external strings in the global object . <nl> - v8 : : Handle < v8 : : Object > global = env - > Global ( ) ; <nl> + v8 : : Handle < v8 : : Object > global = CcTest : : env ( ) - > Global ( ) ; <nl> global - > Set ( v8_str ( " external_ascii " ) , ascii_external_strings ) ; <nl> global - > Set ( v8_str ( " external_non_ascii " ) , non_ascii_external_strings ) ; <nl> global - > Set ( v8_str ( " max_length " ) , v8 : : Integer : : New ( kMaxLength ) ) ; <nl> TEST ( CachedHashOverflow ) { <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> ZoneScope zone ( isolate - > runtime_zone ( ) , DELETE_ON_EXIT ) ; <nl> <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope handle_scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope handle_scope ( CcTest : : isolate ( ) ) ; <nl> / / Lines must be executed sequentially . Combining them into one script <nl> / / makes the bug go away . <nl> const char * lines [ ] = { <nl> TEST ( CachedHashOverflow ) { <nl> <nl> TEST ( SliceFromCons ) { <nl> FLAG_string_slices = true ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> Handle < String > string = <nl> FACTORY - > NewStringFromAscii ( CStrVector ( " parentparentparent " ) ) ; <nl> Handle < String > parent = FACTORY - > NewConsString ( string , string ) ; <nl> class AsciiVectorResource : public v8 : : String : : ExternalAsciiStringResource { <nl> <nl> TEST ( SliceFromExternal ) { <nl> FLAG_string_slices = true ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> AsciiVectorResource resource ( <nl> i : : Vector < const char > ( " abcdefghijklmnopqrstuvwxyz " , 26 ) ) ; <nl> Handle < String > string = FACTORY - > NewExternalStringFromAscii ( & resource ) ; <nl> TEST ( TrivialSlice ) { <nl> / / This tests whether a slice that contains the entire parent string <nl> / / actually creates a new string ( it should not ) . <nl> FLAG_string_slices = true ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> v8 : : Local < v8 : : Value > result ; <nl> Handle < String > string ; <nl> const char * init = " var str = ' abcdefghijklmnopqrstuvwxyz ' ; " ; <nl> TEST ( SliceFromSlice ) { <nl> / / This tests whether a slice that contains the entire parent string <nl> / / actually creates a new string ( it should not ) . <nl> FLAG_string_slices = true ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> v8 : : Local < v8 : : Value > result ; <nl> Handle < String > string ; <nl> const char * init = " var str = ' abcdefghijklmnopqrstuvwxyz ' ; " ; <nl> TEST ( RobustSubStringStub ) { <nl> / / This tests whether the SubStringStub can handle unsafe arguments . <nl> / / If not recognized , those unsafe arguments lead to out - of - bounds reads . <nl> FLAG_allow_natives_syntax = true ; <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> v8 : : Local < v8 : : Value > result ; <nl> Handle < String > string ; <nl> CompileRun ( " var short = ' abcdef ' ; " ) ; <nl> TEST ( RobustSubStringStub ) { <nl> <nl> TEST ( RegExpOverflow ) { <nl> / / Result string has the length 2 ^ 32 , causing a 32 - bit integer overflow . <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> LocalContext context ; <nl> v8 : : V8 : : IgnoreOutOfMemoryException ( ) ; <nl> v8 : : Local < v8 : : Value > result = CompileRun ( <nl> TEST ( RegExpOverflow ) { <nl> <nl> <nl> TEST ( StringReplaceAtomTwoByteResult ) { <nl> - InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( env - > GetIsolate ( ) ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> LocalContext context ; <nl> v8 : : Local < v8 : : Value > result = CompileRun ( <nl> " var subject = ' ascii ~ only ~ string ~ ' ; " <nl> mmm a / test / cctest / test - symbols . cc <nl> ppp b / test / cctest / test - symbols . cc <nl> <nl> <nl> using namespace v8 : : internal ; <nl> <nl> - static v8 : : Persistent < v8 : : Context > env ; <nl> - <nl> - static void InitializeVM ( ) { <nl> - if ( env . IsEmpty ( ) ) { <nl> - const char * extensions [ ] = { " v8 / print " } ; <nl> - v8 : : ExtensionConfiguration config ( 1 , extensions ) ; <nl> - env = v8 : : Context : : New ( & config ) ; <nl> - } <nl> - env - > Enter ( ) ; <nl> - } <nl> - <nl> <nl> TEST ( Create ) { <nl> - InitializeVM ( ) ; <nl> + CcTest : : InitializeVM ( ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope scope ( isolate ) ; <nl> <nl> | Unify the way cctest initalizes the VM for each test case . | v8/v8 | dd70ce29d1356cb3ab892bce35bb47b808acbaa3 | 2013-04-10T08:29:39Z |
mmm a / src / arm / builtins - arm . cc <nl> ppp b / src / arm / builtins - arm . cc <nl> void Builtins : : Generate_CompileLazy ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileLazy ) ; <nl> } <nl> <nl> + void Builtins : : Generate_CompileBaseline ( MacroAssembler * masm ) { <nl> + GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileBaseline ) ; <nl> + } <nl> <nl> void Builtins : : Generate_CompileOptimized ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , <nl> mmm a / src / arm64 / builtins - arm64 . cc <nl> ppp b / src / arm64 / builtins - arm64 . cc <nl> void Builtins : : Generate_CompileLazy ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileLazy ) ; <nl> } <nl> <nl> + void Builtins : : Generate_CompileBaseline ( MacroAssembler * masm ) { <nl> + GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileBaseline ) ; <nl> + } <nl> <nl> void Builtins : : Generate_CompileOptimized ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , <nl> mmm a / src / builtins . h <nl> ppp b / src / builtins . h <nl> inline bool operator & ( BuiltinExtraArguments lhs , BuiltinExtraArguments rhs ) { <nl> V ( JSConstructEntryTrampoline , BUILTIN , UNINITIALIZED , kNoExtraICState ) \ <nl> V ( ResumeGeneratorTrampoline , BUILTIN , UNINITIALIZED , kNoExtraICState ) \ <nl> V ( CompileLazy , BUILTIN , UNINITIALIZED , kNoExtraICState ) \ <nl> + V ( CompileBaseline , BUILTIN , UNINITIALIZED , kNoExtraICState ) \ <nl> V ( CompileOptimized , BUILTIN , UNINITIALIZED , kNoExtraICState ) \ <nl> V ( CompileOptimizedConcurrent , BUILTIN , UNINITIALIZED , kNoExtraICState ) \ <nl> V ( NotifyDeoptimized , BUILTIN , UNINITIALIZED , kNoExtraICState ) \ <nl> class Builtins { <nl> BuiltinExtraArguments extra_args ) ; <nl> static void Generate_ConstructedNonConstructable ( MacroAssembler * masm ) ; <nl> static void Generate_CompileLazy ( MacroAssembler * masm ) ; <nl> + static void Generate_CompileBaseline ( MacroAssembler * masm ) ; <nl> static void Generate_InOptimizationQueue ( MacroAssembler * masm ) ; <nl> static void Generate_CompileOptimized ( MacroAssembler * masm ) ; <nl> static void Generate_CompileOptimizedConcurrent ( MacroAssembler * masm ) ; <nl> mmm a / src / compiler . cc <nl> ppp b / src / compiler . cc <nl> <nl> # include " src / debug / debug . h " <nl> # include " src / debug / liveedit . h " <nl> # include " src / deoptimizer . h " <nl> + # include " src / frames - inl . h " <nl> # include " src / full - codegen / full - codegen . h " <nl> # include " src / interpreter / interpreter . h " <nl> # include " src / isolate - inl . h " <nl> MUST_USE_RESULT MaybeHandle < Code > GetUnoptimizedCode ( CompilationInfo * info ) { <nl> <nl> / / Compile either unoptimized code or bytecode for the interpreter . <nl> if ( ! CompileUnoptimizedCode ( info ) ) return MaybeHandle < Code > ( ) ; <nl> - RecordFunctionCompilation ( Logger : : LAZY_COMPILE_TAG , info ) ; <nl> <nl> / / Update the shared function info with the scope info . <nl> InstallSharedScopeInfo ( info , shared ) ; <nl> MUST_USE_RESULT MaybeHandle < Code > GetUnoptimizedCode ( CompilationInfo * info ) { <nl> / / Install compilation result on the shared function info <nl> InstallSharedCompilationResult ( info , shared ) ; <nl> <nl> + / / Record the function compilation event . <nl> + RecordFunctionCompilation ( Logger : : LAZY_COMPILE_TAG , info ) ; <nl> + <nl> return info - > code ( ) ; <nl> } <nl> <nl> MaybeHandle < Code > GetOptimizedCode ( Handle < JSFunction > function , <nl> return cached_code ; <nl> } <nl> <nl> - DCHECK ( AllowCompilation : : IsAllowed ( isolate ) ) ; <nl> - <nl> + / / Reset profiler ticks , function is no longer considered hot . <nl> if ( shared - > is_compiled ( ) ) { <nl> shared - > code ( ) - > set_profiler_ticks ( 0 ) ; <nl> } <nl> MaybeHandle < Code > GetOptimizedCode ( Handle < JSFunction > function , <nl> return MaybeHandle < Code > ( ) ; <nl> } <nl> <nl> + class InterpreterActivationsFinder : public ThreadVisitor { <nl> + public : <nl> + SharedFunctionInfo * shared_ ; <nl> + bool has_activations_ ; <nl> + <nl> + explicit InterpreterActivationsFinder ( SharedFunctionInfo * shared ) <nl> + : shared_ ( shared ) , has_activations_ ( false ) { } <nl> + <nl> + void VisitThread ( Isolate * isolate , ThreadLocalTop * top ) { <nl> + JavaScriptFrameIterator it ( isolate , top ) ; <nl> + for ( ; ! it . done ( ) & & ! has_activations_ ; it . Advance ( ) ) { <nl> + JavaScriptFrame * frame = it . frame ( ) ; <nl> + if ( ! frame - > is_interpreted ( ) ) continue ; <nl> + if ( frame - > function ( ) - > shared ( ) = = shared_ ) has_activations_ = true ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + bool HasInterpreterActivations ( Isolate * isolate , SharedFunctionInfo * shared ) { <nl> + InterpreterActivationsFinder activations_finder ( shared ) ; <nl> + activations_finder . VisitThread ( isolate , isolate - > thread_local_top ( ) ) ; <nl> + isolate - > thread_manager ( ) - > IterateArchivedThreads ( & activations_finder ) ; <nl> + return activations_finder . has_activations_ ; <nl> + } <nl> + <nl> + MaybeHandle < Code > GetBaselineCode ( Handle < JSFunction > function ) { <nl> + Isolate * isolate = function - > GetIsolate ( ) ; <nl> + VMState < COMPILER > state ( isolate ) ; <nl> + PostponeInterruptsScope postpone ( isolate ) ; <nl> + CompilationInfoWithZone info ( function ) ; <nl> + <nl> + / / Reset profiler ticks , function is no longer considered hot . <nl> + if ( function - > shared ( ) - > HasBytecodeArray ( ) ) { <nl> + function - > shared ( ) - > set_profiler_ticks ( 0 ) ; <nl> + } <nl> + <nl> + / / We do not switch to baseline code when the debugger might have created a <nl> + / / copy of the bytecode with break slots to be able to set break points . <nl> + if ( function - > shared ( ) - > HasDebugInfo ( ) ) { <nl> + return MaybeHandle < Code > ( ) ; <nl> + } <nl> + <nl> + / / TODO ( 4280 ) : For now we do not switch generators to baseline code because <nl> + / / there might be suspended activations stored in generator objects on the <nl> + / / heap . We could eventually go directly to TurboFan in this case . <nl> + if ( function - > shared ( ) - > is_generator ( ) ) { <nl> + return MaybeHandle < Code > ( ) ; <nl> + } <nl> + <nl> + / / TODO ( 4280 ) : For now we disable switching to baseline code in the presence <nl> + / / of interpreter activations of the given function . The reasons are : <nl> + / / 1 ) The debugger assumes each function is either full - code or bytecode . <nl> + / / 2 ) The underlying bytecode is cleared below , breaking stack unwinding . <nl> + if ( HasInterpreterActivations ( isolate , function - > shared ( ) ) ) { <nl> + if ( FLAG_trace_opt ) { <nl> + OFStream os ( stdout ) ; <nl> + os < < " [ unable to switch " < < Brief ( * function ) < < " due to activations ] " <nl> + < < std : : endl ; <nl> + } <nl> + return MaybeHandle < Code > ( ) ; <nl> + } <nl> + <nl> + if ( FLAG_trace_opt ) { <nl> + OFStream os ( stdout ) ; <nl> + os < < " [ switching method " < < Brief ( * function ) < < " to baseline code ] " <nl> + < < std : : endl ; <nl> + } <nl> + <nl> + / / Parse and update CompilationInfo with the results . <nl> + if ( ! Parser : : ParseStatic ( info . parse_info ( ) ) ) return MaybeHandle < Code > ( ) ; <nl> + Handle < SharedFunctionInfo > shared = info . shared_info ( ) ; <nl> + DCHECK_EQ ( shared - > language_mode ( ) , info . literal ( ) - > language_mode ( ) ) ; <nl> + <nl> + / / Compile baseline code using the full code generator . <nl> + if ( ! Compiler : : Analyze ( info . parse_info ( ) ) | | <nl> + ! FullCodeGenerator : : MakeCode ( & info ) ) { <nl> + if ( ! isolate - > has_pending_exception ( ) ) isolate - > StackOverflow ( ) ; <nl> + return MaybeHandle < Code > ( ) ; <nl> + } <nl> + <nl> + / / TODO ( 4280 ) : For now we play it safe and remove the bytecode array when we <nl> + / / switch to baseline code . We might consider keeping around the bytecode so <nl> + / / that it can be used as the " source of truth " eventually . <nl> + shared - > ClearBytecodeArray ( ) ; <nl> + <nl> + / / Update the shared function info with the scope info . <nl> + InstallSharedScopeInfo ( & info , shared ) ; <nl> + <nl> + / / Install compilation result on the shared function info <nl> + InstallSharedCompilationResult ( & info , shared ) ; <nl> + <nl> + / / Record the function compilation event . <nl> + RecordFunctionCompilation ( Logger : : LAZY_COMPILE_TAG , & info ) ; <nl> + <nl> + return info . code ( ) ; <nl> + } <nl> + <nl> MaybeHandle < Code > GetLazyCode ( Handle < JSFunction > function ) { <nl> Isolate * isolate = function - > GetIsolate ( ) ; <nl> DCHECK ( ! isolate - > has_pending_exception ( ) ) ; <nl> bool Compiler : : Compile ( Handle < JSFunction > function , ClearExceptionFlag flag ) { <nl> return true ; <nl> } <nl> <nl> + bool Compiler : : CompileBaseline ( Handle < JSFunction > function ) { <nl> + Isolate * isolate = function - > GetIsolate ( ) ; <nl> + DCHECK ( AllowCompilation : : IsAllowed ( isolate ) ) ; <nl> + <nl> + / / Start a compilation . <nl> + Handle < Code > code ; <nl> + if ( ! GetBaselineCode ( function ) . ToHandle ( & code ) ) { <nl> + / / Baseline generation failed , get unoptimized code . <nl> + DCHECK ( function - > shared ( ) - > is_compiled ( ) ) ; <nl> + code = handle ( function - > shared ( ) - > code ( ) ) ; <nl> + isolate - > clear_pending_exception ( ) ; <nl> + } <nl> + <nl> + / / Install code on closure . <nl> + function - > ReplaceCode ( * code ) ; <nl> + <nl> + / / Check postconditions on success . <nl> + DCHECK ( ! isolate - > has_pending_exception ( ) ) ; <nl> + DCHECK ( function - > shared ( ) - > is_compiled ( ) ) ; <nl> + DCHECK ( function - > is_compiled ( ) ) ; <nl> + return true ; <nl> + } <nl> + <nl> bool Compiler : : CompileOptimized ( Handle < JSFunction > function , <nl> ConcurrencyMode mode ) { <nl> if ( function - > IsOptimized ( ) ) return true ; <nl> mmm a / src / compiler . h <nl> ppp b / src / compiler . h <nl> class Compiler : public AllStatic { <nl> / / given function holds ( except for live - edit , which compiles the world ) . <nl> <nl> static bool Compile ( Handle < JSFunction > function , ClearExceptionFlag flag ) ; <nl> + static bool CompileBaseline ( Handle < JSFunction > function ) ; <nl> static bool CompileOptimized ( Handle < JSFunction > function , ConcurrencyMode ) ; <nl> static bool CompileDebugCode ( Handle < JSFunction > function ) ; <nl> static bool CompileDebugCode ( Handle < SharedFunctionInfo > shared ) ; <nl> mmm a / src / ia32 / builtins - ia32 . cc <nl> ppp b / src / ia32 / builtins - ia32 . cc <nl> void Builtins : : Generate_CompileLazy ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileLazy ) ; <nl> } <nl> <nl> + void Builtins : : Generate_CompileBaseline ( MacroAssembler * masm ) { <nl> + GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileBaseline ) ; <nl> + } <nl> <nl> void Builtins : : Generate_CompileOptimized ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , <nl> mmm a / src / mips / builtins - mips . cc <nl> ppp b / src / mips / builtins - mips . cc <nl> void Builtins : : Generate_CompileLazy ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileLazy ) ; <nl> } <nl> <nl> + void Builtins : : Generate_CompileBaseline ( MacroAssembler * masm ) { <nl> + GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileBaseline ) ; <nl> + } <nl> <nl> void Builtins : : Generate_CompileOptimized ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , <nl> mmm a / src / mips64 / builtins - mips64 . cc <nl> ppp b / src / mips64 / builtins - mips64 . cc <nl> void Builtins : : Generate_CompileLazy ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileLazy ) ; <nl> } <nl> <nl> + void Builtins : : Generate_CompileBaseline ( MacroAssembler * masm ) { <nl> + GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileBaseline ) ; <nl> + } <nl> <nl> void Builtins : : Generate_CompileOptimized ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , <nl> mmm a / src / objects - inl . h <nl> ppp b / src / objects - inl . h <nl> bool SharedFunctionInfo : : is_compiled ( ) { <nl> Builtins * builtins = GetIsolate ( ) - > builtins ( ) ; <nl> DCHECK ( code ( ) ! = builtins - > builtin ( Builtins : : kCompileOptimizedConcurrent ) ) ; <nl> DCHECK ( code ( ) ! = builtins - > builtin ( Builtins : : kCompileOptimized ) ) ; <nl> + DCHECK ( code ( ) ! = builtins - > builtin ( Builtins : : kCompileBaseline ) ) ; <nl> return code ( ) ! = builtins - > builtin ( Builtins : : kCompileLazy ) ; <nl> } <nl> <nl> bool JSFunction : : IsOptimized ( ) { <nl> return code ( ) - > kind ( ) = = Code : : OPTIMIZED_FUNCTION ; <nl> } <nl> <nl> + bool JSFunction : : IsMarkedForBaseline ( ) { <nl> + return code ( ) = = <nl> + GetIsolate ( ) - > builtins ( ) - > builtin ( Builtins : : kCompileBaseline ) ; <nl> + } <nl> <nl> bool JSFunction : : IsMarkedForOptimization ( ) { <nl> return code ( ) = = GetIsolate ( ) - > builtins ( ) - > builtin ( <nl> Object * JSFunction : : prototype ( ) { <nl> bool JSFunction : : is_compiled ( ) { <nl> Builtins * builtins = GetIsolate ( ) - > builtins ( ) ; <nl> return code ( ) ! = builtins - > builtin ( Builtins : : kCompileLazy ) & & <nl> + code ( ) ! = builtins - > builtin ( Builtins : : kCompileBaseline ) & & <nl> code ( ) ! = builtins - > builtin ( Builtins : : kCompileOptimized ) & & <nl> code ( ) ! = builtins - > builtin ( Builtins : : kCompileOptimizedConcurrent ) ; <nl> } <nl> mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> bool JSFunction : : Inlines ( SharedFunctionInfo * candidate ) { <nl> return false ; <nl> } <nl> <nl> + void JSFunction : : MarkForBaseline ( ) { <nl> + Isolate * isolate = GetIsolate ( ) ; <nl> + set_code_no_write_barrier ( <nl> + isolate - > builtins ( ) - > builtin ( Builtins : : kCompileBaseline ) ) ; <nl> + / / No write barrier required , since the builtin is part of the root set . <nl> + } <nl> <nl> void JSFunction : : MarkForOptimization ( ) { <nl> Isolate * isolate = GetIsolate ( ) ; <nl> mmm a / src / objects . h <nl> ppp b / src / objects . h <nl> class JSFunction : public JSObject { <nl> / / Tells whether or not this function has been optimized . <nl> inline bool IsOptimized ( ) ; <nl> <nl> - / / Mark this function for lazy recompilation . The function will be <nl> - / / recompiled the next time it is executed . <nl> + / / Mark this function for lazy recompilation . The function will be recompiled <nl> + / / the next time it is executed . <nl> + void MarkForBaseline ( ) ; <nl> void MarkForOptimization ( ) ; <nl> void AttemptConcurrentOptimization ( ) ; <nl> <nl> - / / Tells whether or not the function is already marked for lazy <nl> - / / recompilation . <nl> + / / Tells whether or not the function is already marked for lazy recompilation . <nl> + inline bool IsMarkedForBaseline ( ) ; <nl> inline bool IsMarkedForOptimization ( ) ; <nl> inline bool IsMarkedForConcurrentOptimization ( ) ; <nl> <nl> mmm a / src / ppc / builtins - ppc . cc <nl> ppp b / src / ppc / builtins - ppc . cc <nl> void Builtins : : Generate_CompileLazy ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileLazy ) ; <nl> } <nl> <nl> + void Builtins : : Generate_CompileBaseline ( MacroAssembler * masm ) { <nl> + GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileBaseline ) ; <nl> + } <nl> <nl> void Builtins : : Generate_CompileOptimized ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , <nl> mmm a / src / runtime - profiler . cc <nl> ppp b / src / runtime - profiler . cc <nl> void RuntimeProfiler : : Optimize ( JSFunction * function , const char * reason ) { <nl> PrintF ( " ] \ n " ) ; <nl> } <nl> <nl> - function - > AttemptConcurrentOptimization ( ) ; <nl> + if ( function - > shared ( ) - > HasBytecodeArray ( ) ) { <nl> + function - > MarkForBaseline ( ) ; <nl> + } else { <nl> + function - > AttemptConcurrentOptimization ( ) ; <nl> + } <nl> } <nl> <nl> <nl> void RuntimeProfiler : : MaybeOptimizeIgnition ( JSFunction * function , <nl> / / TODO ( rmcilroy ) : Consider whether we should optimize small functions when <nl> / / they are first seen on the stack ( e . g . , kMaxSizeEarlyOpt ) . <nl> <nl> - if ( ! frame_optimized & & ( function - > IsMarkedForOptimization ( ) | | <nl> + if ( ! frame_optimized & & ( function - > IsMarkedForBaseline ( ) | | <nl> + function - > IsMarkedForOptimization ( ) | | <nl> function - > IsMarkedForConcurrentOptimization ( ) | | <nl> function - > IsOptimized ( ) ) ) { <nl> / / TODO ( rmcilroy ) : Support OSR in these cases . <nl> void RuntimeProfiler : : MarkCandidatesForOptimization ( ) { <nl> } <nl> } <nl> <nl> - if ( FLAG_ignition ) { <nl> + if ( frame - > is_interpreted ( ) ) { <nl> MaybeOptimizeIgnition ( function , frame - > is_optimized ( ) ) ; <nl> } else { <nl> MaybeOptimizeFullCodegen ( function , frame_count , frame - > is_optimized ( ) ) ; <nl> mmm a / src / runtime - profiler . h <nl> ppp b / src / runtime - profiler . h <nl> <nl> # include " src / allocation . h " <nl> <nl> namespace v8 { <nl> - <nl> - namespace base { <nl> - class Semaphore ; <nl> - } <nl> - <nl> namespace internal { <nl> <nl> class Isolate ; <nl> class JSFunction ; <nl> - class Object ; <nl> <nl> class RuntimeProfiler { <nl> public : <nl> mmm a / src / runtime / runtime - compiler . cc <nl> ppp b / src / runtime / runtime - compiler . cc <nl> namespace internal { <nl> <nl> RUNTIME_FUNCTION ( Runtime_CompileLazy ) { <nl> HandleScope scope ( isolate ) ; <nl> - DCHECK ( args . length ( ) = = 1 ) ; <nl> + DCHECK_EQ ( 1 , args . length ( ) ) ; <nl> CONVERT_ARG_HANDLE_CHECKED ( JSFunction , function , 0 ) ; <nl> <nl> # ifdef DEBUG <nl> RUNTIME_FUNCTION ( Runtime_CompileLazy ) { <nl> return function - > code ( ) ; <nl> } <nl> <nl> + RUNTIME_FUNCTION ( Runtime_CompileBaseline ) { <nl> + HandleScope scope ( isolate ) ; <nl> + DCHECK_EQ ( 1 , args . length ( ) ) ; <nl> + CONVERT_ARG_HANDLE_CHECKED ( JSFunction , function , 0 ) ; <nl> + StackLimitCheck check ( isolate ) ; <nl> + if ( check . JsHasOverflowed ( 1 * KB ) ) return isolate - > StackOverflow ( ) ; <nl> + if ( ! Compiler : : CompileBaseline ( function ) ) { <nl> + return isolate - > heap ( ) - > exception ( ) ; <nl> + } <nl> + DCHECK ( function - > is_compiled ( ) ) ; <nl> + return function - > code ( ) ; <nl> + } <nl> <nl> RUNTIME_FUNCTION ( Runtime_CompileOptimized_Concurrent ) { <nl> HandleScope scope ( isolate ) ; <nl> - DCHECK ( args . length ( ) = = 1 ) ; <nl> + DCHECK_EQ ( 1 , args . length ( ) ) ; <nl> CONVERT_ARG_HANDLE_CHECKED ( JSFunction , function , 0 ) ; <nl> StackLimitCheck check ( isolate ) ; <nl> if ( check . JsHasOverflowed ( 1 * KB ) ) return isolate - > StackOverflow ( ) ; <nl> RUNTIME_FUNCTION ( Runtime_CompileOptimized_Concurrent ) { <nl> <nl> RUNTIME_FUNCTION ( Runtime_CompileOptimized_NotConcurrent ) { <nl> HandleScope scope ( isolate ) ; <nl> - DCHECK ( args . length ( ) = = 1 ) ; <nl> + DCHECK_EQ ( 1 , args . length ( ) ) ; <nl> CONVERT_ARG_HANDLE_CHECKED ( JSFunction , function , 0 ) ; <nl> StackLimitCheck check ( isolate ) ; <nl> if ( check . JsHasOverflowed ( 1 * KB ) ) return isolate - > StackOverflow ( ) ; <nl> mmm a / src / runtime / runtime . h <nl> ppp b / src / runtime / runtime . h <nl> namespace internal { <nl> <nl> # define FOR_EACH_INTRINSIC_COMPILER ( F ) \ <nl> F ( CompileLazy , 1 , 1 ) \ <nl> + F ( CompileBaseline , 1 , 1 ) \ <nl> F ( CompileOptimized_Concurrent , 1 , 1 ) \ <nl> F ( CompileOptimized_NotConcurrent , 1 , 1 ) \ <nl> F ( NotifyStubFailure , 0 , 1 ) \ <nl> mmm a / src / s390 / builtins - s390 . cc <nl> ppp b / src / s390 / builtins - s390 . cc <nl> void Builtins : : Generate_CompileLazy ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileLazy ) ; <nl> } <nl> <nl> + void Builtins : : Generate_CompileBaseline ( MacroAssembler * masm ) { <nl> + GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileBaseline ) ; <nl> + } <nl> + <nl> void Builtins : : Generate_CompileOptimized ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , <nl> Runtime : : kCompileOptimized_NotConcurrent ) ; <nl> mmm a / src / x64 / builtins - x64 . cc <nl> ppp b / src / x64 / builtins - x64 . cc <nl> void Builtins : : Generate_CompileLazy ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileLazy ) ; <nl> } <nl> <nl> + void Builtins : : Generate_CompileBaseline ( MacroAssembler * masm ) { <nl> + GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileBaseline ) ; <nl> + } <nl> <nl> void Builtins : : Generate_CompileOptimized ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , <nl> mmm a / src / x87 / builtins - x87 . cc <nl> ppp b / src / x87 / builtins - x87 . cc <nl> void Builtins : : Generate_CompileLazy ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileLazy ) ; <nl> } <nl> <nl> + void Builtins : : Generate_CompileBaseline ( MacroAssembler * masm ) { <nl> + GenerateTailCallToReturnedCode ( masm , Runtime : : kCompileBaseline ) ; <nl> + } <nl> <nl> void Builtins : : Generate_CompileOptimized ( MacroAssembler * masm ) { <nl> GenerateTailCallToReturnedCode ( masm , <nl> | [ compiler ] Add baseline tier to compilation pipeline . | v8/v8 | 3fc0224cfc65a238ed83b6a4823f2eae0060aabf | 2016-04-25T10:48:34Z |
mmm a / cocos / renderer / CCRenderer . cpp <nl> ppp b / cocos / renderer / CCRenderer . cpp <nl> void RenderQueue : : clear ( ) <nl> _queuePosZ . clear ( ) ; <nl> } <nl> <nl> + void RenderQueue : : saveRenderState ( ) <nl> + { <nl> + _isDepthEnabled = glIsEnabled ( GL_DEPTH_TEST ) ; <nl> + _isCullEnabled = glIsEnabled ( GL_CULL_FACE ) ; <nl> + glGetBooleanv ( GL_DEPTH_WRITEMASK , & _isDepthWrite ) ; <nl> + <nl> + CHECK_GL_ERROR_DEBUG ( ) ; <nl> + } <nl> + <nl> + void RenderQueue : : restoreRenderState ( ) <nl> + { <nl> + if ( _isCullEnabled ) <nl> + { <nl> + glEnable ( GL_CULL_FACE ) ; <nl> + } <nl> + else <nl> + { <nl> + glDisable ( GL_CULL_FACE ) ; <nl> + } <nl> + <nl> + <nl> + if ( _isDepthEnabled ) <nl> + { <nl> + glEnable ( GL_DEPTH_TEST ) ; <nl> + } <nl> + else <nl> + { <nl> + glDisable ( GL_DEPTH_TEST ) ; <nl> + } <nl> + <nl> + glDepthMask ( _isDepthWrite ) ; <nl> + <nl> + CHECK_GL_ERROR_DEBUG ( ) ; <nl> + } <nl> + <nl> / / <nl> / / <nl> / / <nl> void Renderer : : processRenderCommand ( RenderCommand * command ) <nl> } <nl> } <nl> <nl> - void Renderer : : visitRenderQueue ( const RenderQueue & queue ) <nl> + void Renderer : : visitRenderQueue ( RenderQueue & queue ) <nl> { <nl> ssize_t size = queue . size ( ) ; <nl> <nl> + queue . saveRenderState ( ) ; <nl> + <nl> / / Process Opaque Object <nl> const std : : vector < RenderCommand * > & opaqueQueue = queue . getOpaqueCommands ( ) ; <nl> if ( opaqueQueue . size ( ) > 0 ) <nl> void Renderer : : visitRenderQueue ( const RenderQueue & queue ) <nl> processRenderCommand ( queue [ index ] ) ; <nl> } <nl> flush ( ) ; <nl> + <nl> + queue . restoreRenderState ( ) ; <nl> } <nl> <nl> void Renderer : : render ( ) <nl> mmm a / cocos / renderer / CCRenderer . h <nl> ppp b / cocos / renderer / CCRenderer . h <nl> class RenderQueue { <nl> ssize_t getOpaqueQueueSize ( ) const { return _queue3DOpaque . size ( ) ; } <nl> const std : : vector < RenderCommand * > & getOpaqueCommands ( ) const { return _queue3DOpaque ; } <nl> <nl> + void saveRenderState ( ) ; <nl> + void restoreRenderState ( ) ; <nl> + <nl> protected : <nl> std : : vector < RenderCommand * > _queue3DOpaque ; <nl> std : : vector < RenderCommand * > _queue3DTransparent ; <nl> std : : vector < RenderCommand * > _queueNegZ ; <nl> std : : vector < RenderCommand * > _queue0 ; <nl> std : : vector < RenderCommand * > _queuePosZ ; <nl> + <nl> + / / Render State related <nl> + bool _isCullEnabled ; <nl> + bool _isDepthEnabled ; <nl> + GLboolean _isDepthWrite ; <nl> } ; <nl> <nl> struct RenderStackElement <nl> class CC_DLL Renderer <nl> void flushTriangles ( ) ; <nl> <nl> void processRenderCommand ( RenderCommand * command ) ; <nl> - void visitRenderQueue ( const RenderQueue & queue ) ; <nl> + void visitRenderQueue ( RenderQueue & queue ) ; <nl> <nl> void fillVerticesAndIndices ( const TrianglesCommand * cmd ) ; <nl> void fillQuads ( const QuadCommand * cmd ) ; <nl> | Merge pull request from darkdukey / saveRenderState | cocos2d/cocos2d-x | 2be140fcccb2130ac65ea70a73bbfb0e1cad0c10 | 2015-01-27T19:12:05Z |
mmm a / emcc . py <nl> ppp b / emcc . py <nl> def check_human_readable_list ( items ) : <nl> passes + = parse_passes ( shared . Settings . BINARYEN_EXTRA_PASSES ) <nl> options . binaryen_passes = passes <nl> <nl> - # to bootstrap struct_info , we need binaryen <nl> - os . environ [ ' EMCC_WASM_BACKEND_BINARYEN ' ] = ' 1 ' <nl> - <nl> # run safe - heap as a binaryen pass in fastcomp wasm , while in the wasm backend we <nl> # run it in binaryen_passes so that it can be synchronized with the sbrk ptr <nl> if shared . Settings . SAFE_HEAP and shared . Building . is_wasm_only ( ) and not shared . Settings . WASM_BACKEND : <nl> mmm a / tools / gen_struct_info . py <nl> ppp b / tools / gen_struct_info . py <nl> def inspect_code ( headers , cpp_opts , structs , defines ) : <nl> if opt in safe_env : <nl> del safe_env [ opt ] <nl> <nl> - # Use binaryen , if necessary <nl> - binaryen = os . environ . get ( ' EMCC_WASM_BACKEND_BINARYEN ' ) <nl> - if binaryen : <nl> - cpp_opts + = [ ' - s ' , ' BINARYEN = 1 ' ] <nl> - <nl> info = [ ] <nl> # Compile the program . <nl> show ( ' Compiling generated code . . . ' ) <nl> # - Oz optimizes enough to avoid warnings on code size / num locals <nl> - cmd = [ shared . PYTHON , shared . EMCC ] + cpp_opts + [ ' - o ' , js_file [ 1 ] , src_file [ 1 ] , ' - s ' , ' BOOTSTRAPPING_STRUCT_INFO = 1 ' , ' - s ' , ' WARN_ON_UNDEFINED_SYMBOLS = 0 ' , ' - O0 ' , ' - - js - opts ' , ' 0 ' , ' - - memory - init - file ' , ' 0 ' , ' - s ' , ' SINGLE_FILE = 1 ' , ' - Wno - format ' ] <nl> + cmd = [ shared . PYTHON , shared . EMCC ] + cpp_opts + [ ' - o ' , js_file [ 1 ] , src_file [ 1 ] , <nl> + ' - O0 ' , ' - - js - opts ' , ' 0 ' , ' - - memory - init - file ' , ' 0 ' , <nl> + ' - Werror ' , ' - Wno - format ' , <nl> + ' - s ' , ' BOOTSTRAPPING_STRUCT_INFO = 1 ' , <nl> + ' - s ' , ' WARN_ON_UNDEFINED_SYMBOLS = 0 ' , <nl> + ' - s ' , ' STRICT = 1 ' , <nl> + ' - s ' , ' SINGLE_FILE = 1 ' ] <nl> if not shared . Settings . WASM_BACKEND : <nl> + # Avoid the binaryen dependency if we are only using fastcomp <nl> cmd + = [ ' - s ' , ' WASM = 0 ' ] <nl> if shared . Settings . LTO : <nl> cmd + = [ ' - flto = ' + shared . Settings . LTO ] <nl> def inspect_code ( headers , cpp_opts , structs , defines ) : <nl> show ( cmd ) <nl> try : <nl> subprocess . check_call ( cmd , env = safe_env ) <nl> - except subprocess . CalledProcessError : <nl> - sys . stderr . write ( ' FAIL : Compilation failed ! \ n ' ) <nl> + except subprocess . CalledProcessError as e : <nl> + sys . stderr . write ( ' FAIL : Compilation failed ! : % s \ n ' % e . cmd ) <nl> sys . exit ( 1 ) <nl> <nl> # Run the compiled program . <nl> | Remove EMCC_WASM_BACKEND_BINARYEN environment variable ( ) | emscripten-core/emscripten | 8bccf16ca358dfe42659c025f15635ce99b99dd5 | 2020-04-25T01:05:56Z |
mmm a / tensorflow / contrib / distributions / python / kernel_tests / bernoulli_test . py <nl> ppp b / tensorflow / contrib / distributions / python / kernel_tests / bernoulli_test . py <nl> def testPmfCorrectBroadcastDynamicShape ( self ) : <nl> p : [ 0 . 2 , 0 . 3 , 0 . 4 ] <nl> } ) , [ [ 0 . 2 , 0 . 7 , 0 . 4 ] ] ) <nl> <nl> + def testPmfInvalid ( self ) : <nl> + p = [ 0 . 1 , 0 . 2 , 0 . 7 ] <nl> + with self . test_session ( ) : <nl> + dist = bernoulli . Bernoulli ( probs = p , validate_args = True ) <nl> + with self . assertRaisesOpError ( " must be non - negative . " ) : <nl> + dist . prob ( [ 1 , 1 , - 1 ] ) . eval ( ) <nl> + with self . assertRaisesOpError ( " is not less than or equal to 1 . " ) : <nl> + dist . prob ( [ 2 , 0 , 1 ] ) . eval ( ) <nl> + <nl> def testPmfWithP ( self ) : <nl> p = [ [ 0 . 2 , 0 . 4 ] , [ 0 . 3 , 0 . 6 ] ] <nl> self . _testPmf ( probs = p ) <nl> mmm a / tensorflow / contrib / distributions / python / ops / bernoulli . py <nl> ppp b / tensorflow / contrib / distributions / python / ops / bernoulli . py <nl> <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import tensor_shape <nl> from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import check_ops <nl> from tensorflow . python . ops import control_flow_ops <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import nn <nl> def _sample_n ( self , n , seed = None ) : <nl> return math_ops . cast ( sample , self . dtype ) <nl> <nl> def _log_prob ( self , event ) : <nl> + event = self . _maybe_assert_valid_sample ( event ) <nl> # TODO ( jaana ) : The current sigmoid_cross_entropy_with_logits has <nl> # inconsistent behavior for logits = inf / - inf . <nl> event = math_ops . cast ( event , self . logits . dtype ) <nl> def _mode ( self ) : <nl> " " " Returns ` 1 ` if ` prob > 0 . 5 ` and ` 0 ` otherwise . " " " <nl> return math_ops . cast ( self . probs > 0 . 5 , self . dtype ) <nl> <nl> + def _maybe_assert_valid_sample ( self , event , check_integer = True ) : <nl> + if not self . validate_args : <nl> + return event <nl> + event = distribution_util . embed_check_nonnegative_discrete ( <nl> + event , check_integer = check_integer ) <nl> + return control_flow_ops . with_dependencies ( [ <nl> + check_ops . assert_less_equal ( <nl> + event , array_ops . ones_like ( event ) , <nl> + message = " event is not less than or equal to 1 . " ) , <nl> + ] , event ) <nl> + <nl> <nl> class BernoulliWithSigmoidProbs ( Bernoulli ) : <nl> " " " Bernoulli with ` probs = nn . sigmoid ( logits ) ` . " " " <nl> | Add check op for assert valid sample in Bernoulli for contrib . distributions ( ) | tensorflow/tensorflow | 8cabe77fabd6eb395b9d751c82c18f32b8ce492d | 2017-04-11T01:25:02Z |
mmm a / include / swift / Reflection / TypeRef . h <nl> ppp b / include / swift / Reflection / TypeRef . h <nl> class BuiltinTypeRef final : public TypeRef { <nl> <nl> class NominalTypeTrait { <nl> std : : string MangledName ; <nl> + TypeRefPointer Parent ; <nl> <nl> protected : <nl> - NominalTypeTrait ( const std : : string & MangledName ) : MangledName ( MangledName ) { } <nl> + NominalTypeTrait ( const std : : string & MangledName , TypeRefPointer Parent ) <nl> + : MangledName ( MangledName ) , Parent ( Parent ) { } <nl> <nl> public : <nl> const std : : string & getMangledName ( ) const { <nl> class NominalTypeTrait { <nl> bool isStruct ( ) const ; <nl> bool isEnum ( ) const ; <nl> bool isClass ( ) const ; <nl> - } ; <nl> - <nl> - class NominalTypeRef final : public TypeRef , public NominalTypeTrait { <nl> - TypeRefPointer Parent ; <nl> - <nl> - public : <nl> - NominalTypeRef ( const std : : string & MangledName , <nl> - TypeRefPointer Parent = nullptr ) <nl> - : TypeRef ( TypeRefKind : : Nominal ) , NominalTypeTrait ( MangledName ) , <nl> - Parent ( Parent ) { } <nl> - <nl> - static std : : shared_ptr < NominalTypeRef > <nl> - create ( const std : : string & MangledName , TypeRefPointer Parent = nullptr ) { <nl> - return std : : make_shared < NominalTypeRef > ( MangledName , Parent ) ; <nl> - } <nl> <nl> ConstTypeRefPointer getParent ( ) const { <nl> return Parent ; <nl> class NominalTypeRef final : public TypeRef , public NominalTypeTrait { <nl> } <nl> <nl> unsigned getDepth ( ) const ; <nl> + } ; <nl> + <nl> + class NominalTypeRef final : public TypeRef , public NominalTypeTrait { <nl> + public : <nl> + NominalTypeRef ( const std : : string & MangledName , <nl> + TypeRefPointer Parent = nullptr ) <nl> + : TypeRef ( TypeRefKind : : Nominal ) , NominalTypeTrait ( MangledName , Parent ) { } <nl> + <nl> + static std : : shared_ptr < NominalTypeRef > <nl> + create ( const std : : string & MangledName , TypeRefPointer Parent = nullptr ) { <nl> + return std : : make_shared < NominalTypeRef > ( MangledName , Parent ) ; <nl> + } <nl> <nl> static bool classof ( const TypeRef * TR ) { <nl> return TR - > getKind ( ) = = TypeRefKind : : Nominal ; <nl> class NominalTypeRef final : public TypeRef , public NominalTypeTrait { <nl> <nl> class BoundGenericTypeRef final : public TypeRef , public NominalTypeTrait { <nl> TypeRefVector GenericParams ; <nl> - TypeRefPointer Parent ; <nl> <nl> public : <nl> BoundGenericTypeRef ( const std : : string & MangledName , <nl> TypeRefVector GenericParams , <nl> TypeRefPointer Parent = nullptr ) <nl> : TypeRef ( TypeRefKind : : BoundGeneric ) , <nl> - NominalTypeTrait ( MangledName ) , <nl> - GenericParams ( GenericParams ) , <nl> - Parent ( Parent ) { } <nl> + NominalTypeTrait ( MangledName , Parent ) , <nl> + GenericParams ( GenericParams ) { } <nl> <nl> static std : : shared_ptr < BoundGenericTypeRef > <nl> create ( const std : : string & MangledName , TypeRefVector GenericParams , <nl> class BoundGenericTypeRef final : public TypeRef , public NominalTypeTrait { <nl> const TypeRefVector & getGenericParams ( ) const { <nl> return GenericParams ; <nl> } <nl> - ConstTypeRefPointer getParent ( ) const { <nl> - return Parent ; <nl> - } <nl> - <nl> - TypeRefPointer getParent ( ) { <nl> - return Parent ; <nl> - } <nl> - <nl> - void setParent ( TypeRefPointer P ) { <nl> - Parent = P ; <nl> - } <nl> - <nl> - unsigned getDepth ( ) const ; <nl> <nl> static bool classof ( const TypeRef * TR ) { <nl> return TR - > getKind ( ) = = TypeRefKind : : BoundGeneric ; <nl> mmm a / stdlib / public / Reflection / TypeRef . cpp <nl> ppp b / stdlib / public / Reflection / TypeRef . cpp <nl> bool TypeRef : : isConcrete ( ) const { <nl> return TypeRefIsConcrete ( ) . visit ( this ) ; <nl> } <nl> <nl> - static unsigned _getDepth ( TypeRef * TR ) { <nl> - switch ( TR - > getKind ( ) ) { <nl> - case TypeRefKind : : Nominal : { <nl> - auto Nom = cast < NominalTypeRef > ( TR ) ; <nl> - return Nom - > getDepth ( ) ; <nl> - break ; <nl> - } <nl> - case TypeRefKind : : BoundGeneric : { <nl> - auto BG = cast < BoundGenericTypeRef > ( TR ) ; <nl> - return BG - > getDepth ( ) ; <nl> - break ; <nl> - } <nl> - default : <nl> - assert ( false & & " Asked for depth on non - nominal typeref " ) ; <nl> + unsigned NominalTypeTrait : : getDepth ( ) const { <nl> + if ( auto P = Parent . get ( ) ) { <nl> + switch ( P - > getKind ( ) ) { <nl> + case TypeRefKind : : Nominal : <nl> + return 1 + cast < NominalTypeRef > ( P ) - > getDepth ( ) ; <nl> + case TypeRefKind : : BoundGeneric : <nl> + return 1 + cast < BoundGenericTypeRef > ( P ) - > getDepth ( ) ; <nl> + default : <nl> + assert ( false & & " Asked for depth on non - nominal typeref " ) ; <nl> + } <nl> } <nl> - } <nl> - <nl> - unsigned NominalTypeRef : : getDepth ( ) const { <nl> - if ( auto P = Parent . get ( ) ) <nl> - return 1 + _getDepth ( P ) ; <nl> - <nl> - return 0 ; <nl> - } <nl> - <nl> - unsigned BoundGenericTypeRef : : getDepth ( ) const { <nl> - if ( auto P = Parent . get ( ) ) <nl> - return 1 + _getDepth ( P ) ; <nl> <nl> return 0 ; <nl> } <nl> | Reflection : Clean up Parent type handling a bit , NFC | apple/swift | 22bf2dcdbe3d14f81a5eaeca7ce67d3ab21f39a5 | 2016-04-13T02:15:38Z |
new file mode 100644 <nl> index 000000000000 . . e69de29bb2d1 <nl> new file mode 100644 <nl> index 000000000000 . . e69de29bb2d1 <nl> new file mode 100644 <nl> index 000000000000 . . e69de29bb2d1 <nl> new file mode 100644 <nl> index 000000000000 . . fe09834539c4 <nl> mmm / dev / null <nl> ppp b / test / remote - run / download . test - sh <nl> <nl> + RUN : % empty - directory ( % t ) <nl> + RUN : % empty - directory ( % t - REMOTE ) <nl> + RUN : % debug - remote - run - - test - dir % t touch % t / output <nl> + RUN : ls % t / | % FileCheck % s <nl> + RUN : ls % t - REMOTE / inout / | % FileCheck % s <nl> + <nl> + RUN : % empty - directory ( % t ) <nl> + RUN : % empty - directory ( % t / nested ) <nl> + RUN : % empty - directory ( % t - REMOTE ) <nl> + RUN : % debug - remote - run - - test - dir % t touch % t / nested / output <nl> + RUN : ls % t / nested / | % FileCheck % s <nl> + RUN : ls % t - REMOTE / inout / nested / | % FileCheck % s <nl> + <nl> + CHECK : { { ^ output $ } } <nl> new file mode 100644 <nl> index 000000000000 . . afc3094e8401 <nl> mmm / dev / null <nl> ppp b / test / remote - run / lit . local . cfg <nl> <nl> + # Make a local copy of the substitutions . <nl> + config . substitutions = list ( config . substitutions ) <nl> + <nl> + config . substitutions . insert ( 0 , ( ' % debug - remote - run ' , <nl> + ' % utils / remote - run - - debug - as - local - - remote - dir % t - REMOTE ' ) ) <nl> new file mode 100644 <nl> index 000000000000 . . d8cb98fbd8d0 <nl> mmm / dev / null <nl> ppp b / test / remote - run / run - only . test - sh <nl> <nl> + RUN : % debug - remote - run echo hello | % FileCheck % s <nl> + <nl> + CHECK : { { ^ hello $ } } <nl> new file mode 100644 <nl> index 000000000000 . . 431f5a1e5a40 <nl> mmm / dev / null <nl> ppp b / test / remote - run / upload - and - download . test - sh <nl> <nl> + RUN : % empty - directory ( % t ) <nl> + RUN : % empty - directory ( % t - REMOTE ) <nl> + RUN : touch % t / input % t / BAD <nl> + RUN : % debug - remote - run - - test - dir % t cp % t / input % t / output <nl> + RUN : ls % t / | % FileCheck % s <nl> + RUN : ls % t - REMOTE / inout / | % FileCheck - check - prefix CHECK - REMOTE % s <nl> + <nl> + CHECK : BAD <nl> + CHECK - NEXT : { { ^ input $ } } <nl> + CHECK - NEXT : { { ^ output $ } } <nl> + <nl> + CHECK - REMOTE - NOT : BAD <nl> + CHECK - REMOTE : { { ^ input $ } } <nl> + CHECK - REMOTE - NEXT : { { ^ output $ } } <nl> + CHECK - REMOTE - NOT : BAD <nl> + <nl> + RUN : % empty - directory ( % t ) <nl> + RUN : % empty - directory ( % t / nested ) <nl> + RUN : % empty - directory ( % t - REMOTE ) <nl> + RUN : touch % t / nested / input % t / nested / BAD <nl> + RUN : % debug - remote - run - - test - dir % t cp % t / nested / input % t / nested / output <nl> + RUN : ls % t / nested / | % FileCheck % s <nl> + RUN : ls % t - REMOTE / inout / nested / | % FileCheck - check - prefix CHECK - REMOTE % s <nl> new file mode 100644 <nl> index 000000000000 . . 6691aebc4383 <nl> mmm / dev / null <nl> ppp b / test / remote - run / upload . test - sh <nl> <nl> + RUN : % debug - remote - run - - source - dir % S / Inputs / upload / ls % S / Inputs / upload / 1 . txt % S / Inputs / upload / 2 . txt | % FileCheck - check - prefix CHECK - REMOTE % s <nl> + RUN : ls % t - REMOTE / inputs / | % FileCheck % s <nl> + <nl> + RUN : % empty - directory ( % t - REMOTE ) <nl> + RUN : % debug - remote - run - - source - dir % S / Inputs / ls % S / Inputs / upload / 1 . txt % S / Inputs / upload / 2 . txt | % FileCheck - check - prefix CHECK - REMOTE - NESTED % s <nl> + RUN : ls % t - REMOTE / inputs / upload / | % FileCheck % s <nl> + <nl> + CHECK - REMOTE : { { - REMOTE / inputs / 1 . txt $ } } <nl> + CHECK - REMOTE - NEXT : { { - REMOTE / inputs / 2 . txt $ } } <nl> + <nl> + CHECK - REMOTE - NESTED : { { - REMOTE / inputs / upload / 1 . txt $ } } <nl> + CHECK - REMOTE - NESTED - NEXT : { { - REMOTE / inputs / upload / 2 . txt $ } } <nl> + <nl> + CHECK - NOT : BAD <nl> + CHECK : { { ^ 1 . txt $ } } <nl> + CHECK - NEXT : { { ^ 2 . txt $ } } <nl> + CHECK - NOT : BAD <nl> new file mode 100755 <nl> index 000000000000 . . b6f4cbaf3f2f <nl> mmm / dev / null <nl> ppp b / utils / remote - run <nl> <nl> + # ! / usr / bin / env python <nl> + # remote - run - Runs a command on another machine , for testing mmm - - * - python - * - <nl> + # <nl> + # This source file is part of the Swift . org open source project <nl> + # <nl> + # Copyright ( c ) 2018 Apple Inc . and the Swift project authors <nl> + # Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + # <nl> + # See https : / / swift . org / LICENSE . txt for license information <nl> + # See https : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + # <nl> + # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> + from __future__ import print_function <nl> + <nl> + import argparse <nl> + import os . path <nl> + import subprocess <nl> + <nl> + def quote ( arg ) : <nl> + return repr ( arg ) <nl> + <nl> + class CommandRunner ( object ) : <nl> + @ staticmethod <nl> + def _dirnames ( files ) : <nl> + return list ( set ( os . path . dirname ( f ) for f in files ) ) <nl> + <nl> + def send ( self , local_to_remote_files ) : <nl> + # Prepare the remote directory structure . <nl> + # FIXME : This could be folded into the sftp connection below . <nl> + dirs_to_make = self . _dirnames ( local_to_remote_files . viewvalues ( ) ) <nl> + self . run_remote ( [ ' / bin / mkdir ' , ' - p ' ] + dirs_to_make ) <nl> + <nl> + # Send the local files . <nl> + copy_proc = self . open_sftp ( ) <nl> + _ , _ = copy_proc . communicate ( ' \ n ' . join ( " - put { 0 } { 1 } " . format ( quote ( local_file ) , quote ( remote_file ) ) for local_file , remote_file in local_to_remote_files . viewitems ( ) ) ) <nl> + # FIXME : dump stderr at this point <nl> + assert copy_proc . returncode = = 0 , " sftp send failed " <nl> + <nl> + def fetch ( self , local_to_remote_files ) : <nl> + # Prepare the local directory structure . <nl> + dirs_to_make = self . _dirnames ( download_files . viewkeys ( ) ) <nl> + subprocess . check_call ( [ ' / bin / mkdir ' , ' - p ' ] + dirs_to_make ) <nl> + <nl> + # Fetch the remote files . <nl> + copy_proc = self . open_sftp ( ) <nl> + _ , _ = copy_proc . communicate ( ' \ n ' . join ( " - get { 0 } { 1 } " . format ( quote ( remote_file ) , quote ( local_file ) ) for local_file , remote_file in download_files . viewitems ( ) ) ) <nl> + # FIXME : dump stderr at this point <nl> + assert copy_proc . returncode = = 0 , " sftp fetch failed " <nl> + <nl> + class RemoteCommandRunner ( CommandRunner ) : <nl> + def __init__ ( self , remote_host ) : <nl> + self . remote_host = remote_host <nl> + <nl> + def run_remote ( self , command ) : <nl> + return subprocess . check_output ( [ ' / usr / bin / ssh ' , ' - n ' , self . remote_host , ' - - ' ] + <nl> + [ quote ( arg ) for arg in command ] ) <nl> + <nl> + def open_sftp ( self ) : <nl> + return subprocess . Popen ( [ ' / usr / bin / sftp ' , ' - b ' , ' - ' , self . remote_host ] , <nl> + stdin = subprocess . PIPE , <nl> + stdout = subprocess . PIPE , <nl> + stderr = subprocess . PIPE ) <nl> + <nl> + class LocalCommandRunner ( CommandRunner ) : <nl> + def run_remote ( self , command ) : <nl> + return subprocess . check_output ( command ) <nl> + <nl> + def open_sftp ( self ) : <nl> + return subprocess . Popen ( [ ' / usr / bin / sftp ' , ' - b ' , ' - ' , ' - D ' , ' / usr / libexec / sftp - server ' ] , <nl> + stdin = subprocess . PIPE , <nl> + stdout = subprocess . PIPE , <nl> + stderr = subprocess . PIPE ) <nl> + <nl> + <nl> + def find_transfers ( args , source_dir , dest_dir ) : <nl> + if not source_dir . endswith ( os . path . sep ) : <nl> + source_dir + = os . path . sep <nl> + result = dict ( ) <nl> + for arg in args : <nl> + if not arg . startswith ( source_dir ) : <nl> + continue <nl> + relative_path = os . path . relpath ( arg , source_dir ) <nl> + result [ arg ] = os . path . join ( dest_dir , relative_path ) <nl> + return result <nl> + <nl> + parser = argparse . ArgumentParser ( ) <nl> + parser . add_argument ( ' - - source - dir ' ) <nl> + parser . add_argument ( ' - - test - dir ' ) <nl> + parser . add_argument ( ' - - remote - dir ' , required = True ) <nl> + parser . add_argument ( ' - - debug - as - local ' , action = ' store_true ' ) <nl> + parser . add_argument ( ' host ' ) <nl> + parser . add_argument ( ' command ' , nargs = argparse . REMAINDER ) <nl> + args = parser . parse_args ( ) <nl> + <nl> + if args . debug_as_local : <nl> + runner = LocalCommandRunner ( ) <nl> + args . command . insert ( 0 , args . host ) <nl> + del args . host <nl> + else : <nl> + runner = RemoteCommandRunner ( args . host ) <nl> + <nl> + upload_files = dict ( ) <nl> + download_files = dict ( ) <nl> + if args . source_dir : <nl> + input_files = find_transfers ( args . command , args . source_dir , os . path . join ( args . remote_dir , ' inputs ' ) ) <nl> + assert not any ( upload_files . has_key ( f ) for f in input_files ) <nl> + upload_files . update ( input_files ) <nl> + if args . test_dir : <nl> + test_files = find_transfers ( args . command , args . test_dir , os . path . join ( args . remote_dir , ' inout ' ) ) <nl> + assert not any ( upload_files . has_key ( f ) for f in test_files ) <nl> + upload_files . update ( test_files ) <nl> + assert not any ( download_files . has_key ( f ) for f in test_files ) <nl> + download_files . update ( test_files ) <nl> + <nl> + if upload_files : <nl> + runner . send ( upload_files ) <nl> + <nl> + translated_command = [ upload_files . get ( arg , download_files . get ( arg , arg ) ) for arg in args . command ] <nl> + print ( runner . run_remote ( translated_command ) ) <nl> + <nl> + if download_files : <nl> + runner . fetch ( download_files ) <nl> | [ test ] Start work on ' remote - run ' , to run executable tests over SSH | apple/swift | 74da057e13205e328543b00a24b8812a041ece65 | 2018-08-09T00:42:55Z |
mmm a / xbmc / windowing / osx / WinSystemOSX . mm <nl> ppp b / xbmc / windowing / osx / WinSystemOSX . mm <nl> static void DisplayReconfigured ( CGDirectDisplayID display , <nl> <nl> / / Blank other displays if requested . <nl> if ( blankOtherDisplays ) <nl> - BlankOtherDisplays ( 0 ) ; <nl> + BlankOtherDisplays ( m_lastDisplayNr ) ; <nl> <nl> / / Hide the mouse . <nl> [ NSCursor hide ] ; <nl> | [ osx / windowing ] - fix " blank other displays " by using the actual screen idx instead of always using screen 0 | xbmc/xbmc | 0443e53951e3fabca6ec2845d2da765faa56643e | 2018-11-07T20:50:42Z |
mmm a / tensorflow / tools / compatibility / ast_edits . py <nl> ppp b / tensorflow / tools / compatibility / ast_edits . py <nl> def process_tree ( self , root_directory , output_root_directory , <nl> in_place : Allow the conversion of an entire directory in place . <nl> <nl> Returns : <nl> - A tuple of files processed , the report string ofr all files , and errors <nl> + A tuple of files processed , the report string ofr all files , and a dict <nl> + mapping filenames to errors encountered in that file . <nl> " " " <nl> <nl> if output_root_directory = = root_directory : <nl> def process_tree ( self , root_directory , output_root_directory , <nl> files_to_copy . append ( ( fullpath , fullpath_output ) ) <nl> <nl> file_count = 0 <nl> - tree_errors = [ ] <nl> + tree_errors = { } <nl> report = " " <nl> report + = ( " = " * 80 ) + " \ n " <nl> report + = " Input tree : % r \ n " % root_directory <nl> def process_tree ( self , root_directory , output_root_directory , <nl> os . makedirs ( output_directory ) <nl> file_count + = 1 <nl> _ , l_report , l_errors = self . process_file ( input_path , output_path ) <nl> - tree_errors + = l_errors <nl> + tree_errors [ input_path ] = l_errors <nl> report + = l_report <nl> for input_path , output_path in files_to_copy : <nl> output_directory = os . path . dirname ( output_path ) <nl> def process_tree_inplace ( self , root_directory ) : <nl> files_to_process + = py_files <nl> <nl> file_count = 0 <nl> - tree_errors = [ ] <nl> + tree_errors = { } <nl> report = " " <nl> report + = ( " = " * 80 ) + " \ n " <nl> report + = " Input tree : % r \ n " % root_directory <nl> def process_tree_inplace ( self , root_directory ) : <nl> for path in files_to_process : <nl> file_count + = 1 <nl> _ , l_report , l_errors = self . process_file ( path , path ) <nl> - tree_errors + = l_errors <nl> + tree_errors [ path ] = l_errors <nl> report + = l_report <nl> <nl> return file_count , report , tree_errors <nl> mmm a / tensorflow / tools / compatibility / tf_upgrade_v2_main . py <nl> ppp b / tensorflow / tools / compatibility / tf_upgrade_v2_main . py <nl> def main ( ) : <nl> else : <nl> parser . print_help ( ) <nl> if report_text : <nl> - open ( report_filename , " w " ) . write ( report_text ) <nl> - print ( " TensorFlow 2 . 0 Upgrade Script " ) <nl> - print ( " mmmmmmmmmmmmmmmmmmmmmmmmmmm - - " ) <nl> - print ( " Converted % d files \ n " % files_processed ) <nl> - print ( " Detected % d errors that require attention " % len ( errors ) ) <nl> - print ( " - " * 80 ) <nl> - print ( " \ n " . join ( errors ) ) <nl> - print ( " \ nMake sure to read the detailed log % r \ n " % report_filename ) <nl> + num_errors = 0 <nl> + report = " " <nl> + for f in errors : <nl> + if errors [ f ] : <nl> + num_errors + = len ( errors [ f ] ) <nl> + report + = " - " * 80 + " \ n " <nl> + report + = " File : % s \ n " % f <nl> + report + = " - " * 80 + " \ n " <nl> + report + = " \ n " . join ( errors [ f ] ) + " \ n " <nl> + <nl> + report = ( <nl> + " TensorFlow 2 . 0 Upgrade Script \ n " <nl> + " mmmmmmmmmmmmmmmmmmmmmmmmmmm - - \ n " <nl> + " Converted % d files \ n " % files_processed + <nl> + " Detected % d errors that require attention " % num_errors + " \ n " + <nl> + " - " * 80 + " \ n " <nl> + ) + report <nl> <nl> + with open ( report_filename , " w " ) as report_file : <nl> + report_file . write ( report ) <nl> + report_file . write ( " = " * 80 + " \ n " ) <nl> + report_file . write ( " Detailed log follows : \ n \ n " ) <nl> + report_file . write ( " = " * 80 + " \ n " ) <nl> + report_file . write ( report_text ) <nl> + <nl> + print ( report ) <nl> + print ( " \ nMake sure to read the detailed log % r \ n " % report_filename ) <nl> <nl> if __name__ = = " __main__ " : <nl> main ( ) <nl> | Make converter output filenames in logs . | tensorflow/tensorflow | a206216c854e23d77bd727f039fd194ad831185f | 2019-01-18T20:11:35Z |
mmm a / src / bootstrapper . cc <nl> ppp b / src / bootstrapper . cc <nl> void Genesis : : InitializeGlobal_harmony_weak_refs ( ) { <nl> <nl> SimpleInstallFunction ( isolate ( ) , weak_factory_prototype , " makeRef " , <nl> Builtins : : kWeakFactoryMakeRef , 2 , false ) ; <nl> + <nl> + SimpleInstallFunction ( isolate ( ) , weak_factory_prototype , " cleanupSome " , <nl> + Builtins : : kWeakFactoryCleanupSome , 0 , false ) ; <nl> } <nl> { <nl> / / Create % WeakCellPrototype % <nl> mmm a / src / builtins / builtins - definitions . h <nl> ppp b / src / builtins / builtins - definitions . h <nl> namespace internal { <nl> CPP ( WeakCellClear ) \ <nl> CPP ( WeakCellHoldingsGetter ) \ <nl> CPP ( WeakFactoryCleanupIteratorNext ) \ <nl> + CPP ( WeakFactoryCleanupSome ) \ <nl> CPP ( WeakFactoryConstructor ) \ <nl> CPP ( WeakFactoryMakeCell ) \ <nl> CPP ( WeakFactoryMakeRef ) \ <nl> mmm a / src / builtins / builtins - weak - refs . cc <nl> ppp b / src / builtins / builtins - weak - refs . cc <nl> BUILTIN ( WeakFactoryMakeRef ) { <nl> return * weak_ref ; <nl> } <nl> <nl> + BUILTIN ( WeakFactoryCleanupSome ) { <nl> + HandleScope scope ( isolate ) ; <nl> + const char * method_name = " WeakFactory . prototype . cleanupSome " ; <nl> + <nl> + CHECK_RECEIVER ( JSWeakFactory , weak_factory , method_name ) ; <nl> + <nl> + / / Don ' t do set_scheduled_for_cleanup ( false ) ; we still have the microtask <nl> + / / scheduled and don ' t want to schedule another one in case the user never <nl> + / / executes microtasks . <nl> + JSWeakFactory : : Cleanup ( weak_factory , isolate ) ; <nl> + return ReadOnlyRoots ( isolate ) . undefined_value ( ) ; <nl> + } <nl> + <nl> BUILTIN ( WeakFactoryCleanupIteratorNext ) { <nl> HandleScope scope ( isolate ) ; <nl> CHECK_RECEIVER ( JSWeakFactoryCleanupIterator , iterator , " next " ) ; <nl> mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> template void <nl> BaseNameDictionary < NameDictionary , NameDictionaryShape > : : CollectKeysTo ( <nl> Handle < NameDictionary > dictionary , KeyAccumulator * keys ) ; <nl> <nl> + void JSWeakFactory : : Cleanup ( Handle < JSWeakFactory > weak_factory , <nl> + Isolate * isolate ) { <nl> + / / It ' s possible that the cleared_cells list is empty , since <nl> + / / WeakCell . clear ( ) was called on all its elements before this task ran . In <nl> + / / that case , don ' t call the cleanup function . <nl> + if ( ! weak_factory - > cleared_cells ( ) - > IsUndefined ( isolate ) ) { <nl> + / / Construct the iterator . <nl> + Handle < JSWeakFactoryCleanupIterator > iterator ; <nl> + { <nl> + Handle < Map > cleanup_iterator_map ( <nl> + isolate - > native_context ( ) - > js_weak_factory_cleanup_iterator_map ( ) , <nl> + isolate ) ; <nl> + iterator = Handle < JSWeakFactoryCleanupIterator > : : cast ( <nl> + isolate - > factory ( ) - > NewJSObjectFromMap ( <nl> + cleanup_iterator_map , NOT_TENURED , <nl> + Handle < AllocationSite > : : null ( ) ) ) ; <nl> + iterator - > set_factory ( * weak_factory ) ; <nl> + } <nl> + Handle < Object > cleanup ( weak_factory - > cleanup ( ) , isolate ) ; <nl> + <nl> + v8 : : TryCatch try_catch ( reinterpret_cast < v8 : : Isolate * > ( isolate ) ) ; <nl> + v8 : : Local < v8 : : Value > result ; <nl> + MaybeHandle < Object > exception ; <nl> + Handle < Object > args [ ] = { iterator } ; <nl> + bool has_pending_exception = ! ToLocal < Value > ( <nl> + Execution : : TryCall ( <nl> + isolate , cleanup , <nl> + handle ( ReadOnlyRoots ( isolate ) . undefined_value ( ) , isolate ) , 1 , args , <nl> + Execution : : MessageHandling : : kReport , & exception , <nl> + Execution : : Target : : kCallable ) , <nl> + & result ) ; <nl> + / / TODO ( marja ) : ( spec ) : What if there ' s an exception ? <nl> + USE ( has_pending_exception ) ; <nl> + <nl> + / / TODO ( marja ) : ( spec ) : Should the iterator be invalidated after the <nl> + / / function returns ? <nl> + } <nl> + } <nl> + <nl> } / / namespace internal <nl> } / / namespace v8 <nl> mmm a / src / objects / js - weak - refs . h <nl> ppp b / src / objects / js - weak - refs . h <nl> class JSWeakFactory : public JSObject { <nl> / / list . ( Assumes there is one . ) <nl> inline JSWeakCell * PopClearedCell ( Isolate * isolate ) ; <nl> <nl> + / / Constructs an iterator for the WeakCells in the cleared_cells list and <nl> + / / calls the user ' s cleanup function . <nl> + static void Cleanup ( Handle < JSWeakFactory > weak_factory , Isolate * isolate ) ; <nl> + <nl> static const int kNativeContextOffset = JSObject : : kHeaderSize ; <nl> static const int kCleanupOffset = kNativeContextOffset + kPointerSize ; <nl> static const int kActiveCellsOffset = kCleanupOffset + kPointerSize ; <nl> mmm a / src / runtime / runtime - weak - refs . cc <nl> ppp b / src / runtime / runtime - weak - refs . cc <nl> RUNTIME_FUNCTION ( Runtime_WeakFactoryCleanupJob ) { <nl> CONVERT_ARG_HANDLE_CHECKED ( JSWeakFactory , weak_factory , 0 ) ; <nl> weak_factory - > set_scheduled_for_cleanup ( false ) ; <nl> <nl> - / / It ' s possible that the cleared_cells list is empty , since <nl> - / / WeakCell . clear ( ) was called on all its elements before this task ran . In <nl> - / / that case , don ' t call the cleanup function . <nl> - if ( ! weak_factory - > cleared_cells ( ) - > IsUndefined ( isolate ) ) { <nl> - / / Construct the iterator . <nl> - Handle < JSWeakFactoryCleanupIterator > iterator ; <nl> - { <nl> - Handle < Map > cleanup_iterator_map ( <nl> - isolate - > native_context ( ) - > js_weak_factory_cleanup_iterator_map ( ) , <nl> - isolate ) ; <nl> - iterator = Handle < JSWeakFactoryCleanupIterator > : : cast ( <nl> - isolate - > factory ( ) - > NewJSObjectFromMap ( <nl> - cleanup_iterator_map , NOT_TENURED , <nl> - Handle < AllocationSite > : : null ( ) ) ) ; <nl> - iterator - > set_factory ( * weak_factory ) ; <nl> - } <nl> - Handle < Object > cleanup ( weak_factory - > cleanup ( ) , isolate ) ; <nl> - <nl> - v8 : : TryCatch try_catch ( reinterpret_cast < v8 : : Isolate * > ( isolate ) ) ; <nl> - v8 : : Local < v8 : : Value > result ; <nl> - MaybeHandle < Object > exception ; <nl> - Handle < Object > args [ ] = { iterator } ; <nl> - bool has_pending_exception = ! ToLocal < Value > ( <nl> - Execution : : TryCall ( <nl> - isolate , cleanup , <nl> - handle ( ReadOnlyRoots ( isolate ) . undefined_value ( ) , isolate ) , 1 , args , <nl> - Execution : : MessageHandling : : kReport , & exception , <nl> - Execution : : Target : : kCallable ) , <nl> - & result ) ; <nl> - / / TODO ( marja ) : ( spec ) : What if there ' s an exception ? <nl> - USE ( has_pending_exception ) ; <nl> - <nl> - / / TODO ( marja ) : ( spec ) : Should the iterator be invalidated after the <nl> - / / function returns ? <nl> - } <nl> + JSWeakFactory : : Cleanup ( weak_factory , isolate ) ; <nl> return ReadOnlyRoots ( isolate ) . undefined_value ( ) ; <nl> } <nl> <nl> mmm a / test / mjsunit / harmony / weakrefs / basics . js <nl> ppp b / test / mjsunit / harmony / weakrefs / basics . js <nl> <nl> WeakFactory . prototype . makeRef . call ( wf , { } ) ; <nl> } ) ( ) ; <nl> <nl> + ( function TestCleanupSomeWithoutWeakFactory ( ) { <nl> + assertThrows ( ( ) = > WeakFactory . prototype . cleanupSome . call ( { } ) , TypeError ) ; <nl> + / / Does not throw : <nl> + let wf = new WeakFactory ( ( ) = > { } ) ; <nl> + let rv = WeakFactory . prototype . cleanupSome . call ( wf ) ; <nl> + assertEquals ( undefined , rv ) ; <nl> + } ) ( ) ; <nl> + <nl> ( function TestDerefWithoutWeakRef ( ) { <nl> let wf = new WeakFactory ( ( ) = > { } ) ; <nl> let wc = wf . makeCell ( { } ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 631f43c0124 <nl> mmm / dev / null <nl> ppp b / test / mjsunit / harmony / weakrefs / cleanupsome - cleared - weakcell . js <nl> <nl> + / / Copyright 2018 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + / / Flags : - - harmony - weak - refs - - expose - gc - - noincremental - marking <nl> + <nl> + let cleanup_count = 0 ; <nl> + let cleanup_cells = [ ] ; <nl> + let cleanup = function ( iter ) { <nl> + for ( wc of iter ) { <nl> + cleanup_cells . push ( wc ) ; <nl> + } <nl> + + + cleanup_count ; <nl> + } <nl> + <nl> + let wf = new WeakFactory ( cleanup ) ; <nl> + let weak_cell ; <nl> + ( function ( ) { <nl> + let o = { } ; <nl> + weak_cell = wf . makeCell ( o ) ; <nl> + <nl> + / / cleanupSome won ' t do anything since there are no dirty WeakCells . <nl> + wf . cleanupSome ( ) ; <nl> + assertEquals ( 0 , cleanup_count ) ; <nl> + } ) ( ) ; <nl> + <nl> + / / GC will detect the WeakCell as dirty . <nl> + gc ( ) ; <nl> + <nl> + / / Clear the WeakCell just before we would ' ve called cleanupSome . <nl> + weak_cell . clear ( ) ; <nl> + <nl> + wf . cleanupSome ( ) ; <nl> + <nl> + assertEquals ( 0 , cleanup_count ) ; <nl> new file mode 100644 <nl> index 00000000000 . . a7105771b30 <nl> mmm / dev / null <nl> ppp b / test / mjsunit / harmony / weakrefs / cleanupsome - dereffed - and - cleared - weakref . js <nl> <nl> + / / Copyright 2018 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + / / Flags : - - harmony - weak - refs - - expose - gc - - noincremental - marking - - allow - natives - syntax <nl> + <nl> + let cleanup_count = 0 ; <nl> + let cleanup_cells = [ ] ; <nl> + let cleanup = function ( iter ) { <nl> + for ( wc of iter ) { <nl> + cleanup_cells . push ( wc ) ; <nl> + } <nl> + + + cleanup_count ; <nl> + } <nl> + <nl> + let o = { } ; <nl> + let wf = new WeakFactory ( cleanup ) ; <nl> + let weak_ref ; <nl> + ( function ( ) { <nl> + weak_ref = wf . makeRef ( o ) ; <nl> + <nl> + / / cleanupSome won ' t do anything since there are no dirty WeakCells . <nl> + wf . cleanupSome ( ) ; <nl> + assertEquals ( 0 , cleanup_count ) ; <nl> + } ) ( ) ; <nl> + <nl> + / / Clear the KeepDuringJob set . <nl> + % RunMicrotasks ( ) ; <nl> + <nl> + weak_ref . deref ( ) ; <nl> + o = null ; <nl> + <nl> + / / The WeakRef is not detected as dirty , since the KeepDuringJob set keeps the <nl> + / / target object alive . <nl> + gc ( ) ; <nl> + <nl> + wf . cleanupSome ( ) ; <nl> + assertEquals ( 0 , cleanup_count ) ; <nl> + <nl> + % RunMicrotasks ( ) ; <nl> + / / Next turn . <nl> + <nl> + / / This GC detects the WeakRef as dirty . <nl> + gc ( ) ; <nl> + <nl> + / / Clear the WeakRef just before we would ' ve called cleanupSome . <nl> + weak_ref . clear ( ) ; <nl> + <nl> + wf . cleanupSome ( ) ; <nl> + <nl> + assertEquals ( 0 , cleanup_count ) ; <nl> new file mode 100644 <nl> index 00000000000 . . fb113bef0d7 <nl> mmm / dev / null <nl> ppp b / test / mjsunit / harmony / weakrefs / cleanupsome - dereffed - weakref . js <nl> <nl> + / / Copyright 2018 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + / / Flags : - - harmony - weak - refs - - expose - gc - - noincremental - marking - - allow - natives - syntax <nl> + <nl> + let cleanup_count = 0 ; <nl> + let cleanup_cells = [ ] ; <nl> + let cleanup = function ( iter ) { <nl> + for ( wc of iter ) { <nl> + cleanup_cells . push ( wc ) ; <nl> + } <nl> + + + cleanup_count ; <nl> + } <nl> + <nl> + let o = { } ; <nl> + let wf = new WeakFactory ( cleanup ) ; <nl> + let weak_ref ; <nl> + ( function ( ) { <nl> + weak_ref = wf . makeRef ( o ) ; <nl> + <nl> + / / cleanupSome won ' t do anything since there are no dirty WeakCells . <nl> + wf . cleanupSome ( ) ; <nl> + assertEquals ( 0 , cleanup_count ) ; <nl> + } ) ( ) ; <nl> + <nl> + / / Clear the KeepDuringJob set . <nl> + % RunMicrotasks ( ) ; <nl> + <nl> + weak_ref . deref ( ) ; <nl> + o = null ; <nl> + <nl> + / / The WeakRef is not detected as dirty , since the KeepDuringJob set keeps the <nl> + / / target object alive . <nl> + gc ( ) ; <nl> + <nl> + wf . cleanupSome ( ) ; <nl> + assertEquals ( 0 , cleanup_count ) ; <nl> + <nl> + % RunMicrotasks ( ) ; <nl> + / / Next turn . <nl> + <nl> + / / Now the WeakRef can be cleared . <nl> + gc ( ) ; <nl> + wf . cleanupSome ( ) ; <nl> + <nl> + assertEquals ( 1 , cleanup_count ) ; <nl> + assertEquals ( 1 , cleanup_cells . length ) ; <nl> + assertEquals ( weak_ref , cleanup_cells [ 0 ] ) ; <nl> + <nl> + / / The cleanup task is not executed again since all WeakCells have been <nl> + / / processed . <nl> + <nl> + % RunMicrotasks ( ) ; <nl> + / / Next turn . <nl> + <nl> + assertEquals ( 1 , cleanup_count ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 84a946d3902 <nl> mmm / dev / null <nl> ppp b / test / mjsunit / harmony / weakrefs / cleanupsome - weakcell . js <nl> <nl> + / / Copyright 2018 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + / / Flags : - - harmony - weak - refs - - expose - gc - - noincremental - marking <nl> + <nl> + let cleanup_count = 0 ; <nl> + let cleanup_cells = [ ] ; <nl> + let cleanup = function ( iter ) { <nl> + for ( wc of iter ) { <nl> + cleanup_cells . push ( wc ) ; <nl> + } <nl> + + + cleanup_count ; <nl> + } <nl> + <nl> + let wf = new WeakFactory ( cleanup ) ; <nl> + let weak_cell ; <nl> + ( function ( ) { <nl> + let o = { } ; <nl> + weak_cell = wf . makeCell ( o ) ; <nl> + <nl> + / / cleanupSome won ' t do anything since there are no dirty WeakCells . <nl> + wf . cleanupSome ( ) ; <nl> + assertEquals ( 0 , cleanup_count ) ; <nl> + } ) ( ) ; <nl> + <nl> + / / GC will detect the WeakCell as dirty . <nl> + gc ( ) ; <nl> + <nl> + wf . cleanupSome ( ) ; <nl> + assertEquals ( 1 , cleanup_count ) ; <nl> + assertEquals ( 1 , cleanup_cells . length ) ; <nl> + assertEquals ( weak_cell , cleanup_cells [ 0 ] ) ; <nl> new file mode 100644 <nl> index 00000000000 . . ab1e7ebe193 <nl> mmm / dev / null <nl> ppp b / test / mjsunit / harmony / weakrefs / cleanupsome - weakref . js <nl> <nl> + / / Copyright 2018 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + / / Flags : - - harmony - weak - refs - - expose - gc - - noincremental - marking - - allow - natives - syntax <nl> + <nl> + let cleanup_count = 0 ; <nl> + let cleanup_cells = [ ] ; <nl> + let cleanup = function ( iter ) { <nl> + for ( wc of iter ) { <nl> + cleanup_cells . push ( wc ) ; <nl> + } <nl> + + + cleanup_count ; <nl> + } <nl> + <nl> + let wf = new WeakFactory ( cleanup ) ; <nl> + let weak_ref ; <nl> + ( function ( ) { <nl> + let o = { } ; <nl> + weak_ref = wf . makeRef ( o ) ; <nl> + <nl> + / / cleanupSome won ' t do anything since there are no dirty WeakCells . <nl> + wf . cleanupSome ( ) ; <nl> + assertEquals ( 0 , cleanup_count ) ; <nl> + } ) ( ) ; <nl> + <nl> + / / The WeakRef is not detected as dirty , since the KeepDuringJob set keeps the <nl> + / / target object alive . <nl> + gc ( ) ; <nl> + <nl> + wf . cleanupSome ( ) ; <nl> + assertEquals ( 0 , cleanup_count ) ; <nl> + <nl> + % RunMicrotasks ( ) ; <nl> + / / Next turn . <nl> + <nl> + / / Now the WeakRef can be cleared . <nl> + gc ( ) ; <nl> + wf . cleanupSome ( ) ; <nl> + <nl> + assertEquals ( 1 , cleanup_count ) ; <nl> + assertEquals ( 1 , cleanup_cells . length ) ; <nl> + assertEquals ( weak_ref , cleanup_cells [ 0 ] ) ; <nl> | [ js weak refs ] Add WeakFactory . prototype . cleanupSome | v8/v8 | 1ed5214c8cb7a40075114f21ec488022f6e6bec7 | 2018-11-13T14:56:14Z |
mmm a / src / serializer / log / metablock / metablock_manager . hpp <nl> ppp b / src / serializer / log / metablock / metablock_manager . hpp <nl> class metablock_manager_t : private iocallback_t { <nl> <nl> crc_metablock_t * mb_buffer ; <nl> bool mb_buffer_in_use ; / * ! < true : we ' re using the buffer , no one else can * / <nl> - <nl> - private : <nl> + <nl> + <nl> / * these are only used in the beginning when we want to find the metablock * / <nl> crc_metablock_t * mb_buffer_last ; / * the last metablock we read * / <nl> - int version ; / * ! < only used during boot up * / <nl> + int64_t version ; / * ! < only used during boot up * / <nl> <nl> + void swap_buffers ( ) ; <nl> + <nl> extent_manager_t * extent_manager ; <nl> <nl> std : : vector < off64_t , gnew_alloc < off64_t > > metablock_offsets ; <nl> mmm a / src / serializer / log / metablock / metablock_manager . tcc <nl> ppp b / src / serializer / log / metablock / metablock_manager . tcc <nl> bool metablock_manager_t < metablock_t > : : write_metablock ( metablock_t * mb , metabloc <nl> } else { <nl> assert ( ! mb_buffer_in_use ) ; <nl> memcpy ( & ( mb_buffer - > metablock ) , mb , sizeof ( metablock_t ) ) ; <nl> + mb_buffer - > version + + ; <nl> + <nl> mb_buffer - > set_crc ( ) ; <nl> assert ( mb_buffer - > check_crc ( ) ) ; <nl> mb_buffer_in_use = true ; <nl> <nl> dbfile - > write_async ( head . offset ( ) , DEVICE_BLOCK_SIZE , mb_buffer , this ) ; <nl> <nl> - mb_buffer - > version + + ; <nl> + / / TODO : Does mb_buffer get copied immediately by write_async ? It had better be . <nl> head + + ; <nl> <nl> state = state_writing ; <nl> void metablock_manager_t < metablock_t > : : shutdown ( ) { <nl> state = state_shut_down ; <nl> } <nl> <nl> + template < class metablock_t > <nl> + void metablock_manager_t < metablock_t > : : swap_buffers ( ) { <nl> + crc_metablock_t * tmp = mb_buffer_last ; <nl> + mb_buffer_last = mb_buffer ; <nl> + mb_buffer = tmp ; <nl> + } <nl> + <nl> template < class metablock_t > <nl> void metablock_manager_t < metablock_t > : : on_io_complete ( event_t * e ) { <nl> bool done_looking = false ; / * whether or not the value in mb_buffer_last is the real metablock * / <nl> void metablock_manager_t < metablock_t > : : on_io_complete ( event_t * e ) { <nl> head . push ( ) ; <nl> head + + ; <nl> / * mb_buffer_last = mb_buffer and give mb_buffer mb_buffer_last ' s space so no realloc * / <nl> - swap ( ( void * * ) & mb_buffer_last , ( void * * ) & mb_buffer ) ; <nl> + swap_buffers ( ) ; <nl> if ( head . wraparound ) { <nl> done_looking = true ; <nl> } else { <nl> void metablock_manager_t < metablock_t > : : on_io_complete ( event_t * e ) { <nl> <nl> } else { <nl> / * we found a metablock * / <nl> - swap ( ( void * * ) & mb_buffer_last , ( void * * ) & mb_buffer ) ; <nl> + swap_buffers ( ) ; <nl> <nl> / * set everything up * / <nl> version = - 1 ; / * version is now useless * / <nl> mmm a / src / utils . hpp <nl> ppp b / src / utils . hpp <nl> void * malloc_aligned ( size_t size , size_t alignment = 64 ) ; <nl> / / Fast string compare <nl> int sized_strcmp ( const char * str1 , int len1 , const char * str2 , int len2 ) ; <nl> <nl> - static inline void swap ( void * * x , void * * y ) { <nl> - void * tmp = * x ; <nl> - * x = * y ; <nl> - * y = tmp ; <nl> - } <nl> - <nl> / / Buffer <nl> template < int _size > <nl> struct buffer_base_t <nl> | Made metablock_manager increment the version number _before_ writing to disk . | rethinkdb/rethinkdb | 75baa280f0e927cec3911f565c4be6fcc964ec8e | 2010-10-26T23:41:20Z |
mmm a / tensorflow / core / common_runtime / process_state . cc <nl> ppp b / tensorflow / core / common_runtime / process_state . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / common_runtime / process_state . h " <nl> <nl> + # include < atomic > <nl> # include < cstring > <nl> # include < vector > <nl> <nl> namespace tensorflow { <nl> return instance ; <nl> } <nl> <nl> - ProcessState : : ProcessState ( ) : numa_enabled_ ( false ) { } <nl> + ProcessState : : ProcessState ( ) <nl> + : numa_enabled_ ( false ) , cpu_allocators_cached_ ( 0 ) { } <nl> <nl> string ProcessState : : MemDesc : : DebugString ( ) { <nl> return strings : : StrCat ( ( loc = = CPU ? " CPU " : " GPU " ) , dev_index , <nl> ProcessState : : MemDesc ProcessState : : PtrType ( const void * ptr ) { <nl> <nl> Allocator * ProcessState : : GetCPUAllocator ( int numa_node ) { <nl> if ( ! numa_enabled_ | | numa_node = = port : : kNUMANoAffinity ) numa_node = 0 ; <nl> + <nl> + / / Check if allocator for the numa node is in lock - free cache . <nl> + if ( numa_node < cpu_allocators_cached_ . load ( std : : memory_order_acquire ) ) { <nl> + return cpu_allocators_cache_ [ numa_node ] ; <nl> + } <nl> + <nl> mutex_lock lock ( mu_ ) ; <nl> while ( cpu_allocators_ . size ( ) < = static_cast < size_t > ( numa_node ) ) { <nl> / / If visitors have been defined we need an Allocator built from <nl> Allocator * ProcessState : : GetCPUAllocator ( int numa_node ) { <nl> allocator = new TrackingAllocator ( allocator , true ) ; <nl> } <nl> cpu_allocators_ . push_back ( allocator ) ; <nl> + if ( cpu_allocators_ . size ( ) < cpu_allocators_cache_ . max_size ( ) ) { <nl> + cpu_allocators_cache_ [ cpu_allocators_ . size ( ) - 1 ] = allocator ; <nl> + cpu_allocators_cached_ . fetch_add ( 1 , std : : memory_order_release ) ; <nl> + } <nl> if ( ! sub_allocator ) { <nl> DCHECK ( cpu_alloc_visitors_ . empty ( ) & & cpu_free_visitors_ . empty ( ) ) ; <nl> } <nl> mmm a / tensorflow / core / common_runtime / process_state . h <nl> ppp b / tensorflow / core / common_runtime / process_state . h <nl> class ProcessState : public ProcessStateInterface { <nl> std : : vector < SubAllocator : : Visitor > cpu_alloc_visitors_ TF_GUARDED_BY ( mu_ ) ; <nl> std : : vector < SubAllocator : : Visitor > cpu_free_visitors_ TF_GUARDED_BY ( mu_ ) ; <nl> <nl> + / / A cache of cpu allocators indexed by a numa node . Used as a fast path to <nl> + / / get CPU allocator by numa node id without locking the mutex . We can ' t use <nl> + / / ` cpu_allocators_ ` storage in the lock - free path because concurrent <nl> + / / operation can deallocate the vector storage . <nl> + std : : atomic < int > cpu_allocators_cached_ ; <nl> + std : : array < Allocator * , 8 > cpu_allocators_cache_ ; <nl> + <nl> / / Optional RecordingAllocators that wrap the corresponding <nl> / / Allocators for runtime attribute use analysis . <nl> MDMap mem_desc_map_ ; <nl> | Remove mutex lock from the ProcessState : : GetCPUAllocator . | tensorflow/tensorflow | 304bfa2fb47f117d2ef4454ba20528f9a196bf41 | 2020-07-20T01:48:15Z |
mmm a / aten / src / TH / generic / THBlas . c <nl> ppp b / aten / src / TH / generic / THBlas . c <nl> void THBlas_ ( gemm ) ( char transa , char transb , int64_t m , int64_t n , int64_t k , re <nl> <nl> # if defined ( USE_BLAS ) & & ( defined ( TH_REAL_IS_DOUBLE ) | | defined ( TH_REAL_IS_FLOAT ) ) <nl> if ( ( m < = INT_MAX ) & & ( n < = INT_MAX ) & & ( k < = INT_MAX ) & & <nl> - ( lda > = THMax ( 1 , ( transa_ ? m : k ) ) ) & & ( lda < = INT_MAX ) & & <nl> - ( ldb > = THMax ( 1 , ( transb_ ? k : n ) ) ) & & ( ldb < = INT_MAX ) & & <nl> + ( lda > = THMax ( 1 , ( transa_ ? k : m ) ) ) & & ( lda < = INT_MAX ) & & <nl> + ( ldb > = THMax ( 1 , ( transb_ ? n : k ) ) ) & & ( ldb < = INT_MAX ) & & <nl> ( ldc > = THMax ( 1 , n ) ) & & ( ldc < = INT_MAX ) ) <nl> { <nl> int i_m = ( int ) m ; <nl> | Fix stride checks in gemm dispatch ( ) | pytorch/pytorch | 9c3cb6e652e6033e355cef92229fde5b0baf271b | 2017-11-08T14:55:25Z |
mmm a / hphp / runtime / vm / jit / irgen - builtin . cpp <nl> ppp b / hphp / runtime / vm / jit / irgen - builtin . cpp <nl> SSATmp * opt_type_structure_classname ( IRGS & env , const ParamPrep & params ) { <nl> SSATmp * opt_is_list_like ( IRGS & env , const ParamPrep & params ) { <nl> if ( params . size ( ) ! = 1 ) return nullptr ; <nl> auto const type = params [ 0 ] . value - > type ( ) ; <nl> - / / Type might be a Ptr here , so the maybe ( ) below will go wrong if we don ' t <nl> - / / bail out here . <nl> - if ( ! ( type < = TInitCell ) ) return nullptr ; <nl> if ( type < = TClsMeth ) { <nl> raiseClsMethToVecWarningHelper ( env , params ) ; <nl> return cns ( env , true ) ; <nl> SSATmp * opt_is_list_like ( IRGS & env , const ParamPrep & params ) { <nl> return nullptr ; <nl> } <nl> <nl> + SSATmp * opt_is_vec_or_varray ( IRGS & env , const ParamPrep & params ) { <nl> + if ( params . size ( ) ! = 1 ) return nullptr ; <nl> + auto const type = params [ 0 ] . value - > type ( ) ; <nl> + <nl> + if ( type . subtypeOfAny ( TVec , TVArr ) ) { <nl> + return cns ( env , true ) ; <nl> + } <nl> + <nl> + if ( type < = TClsMeth & & RO : : EvalIsCompatibleClsMethType ) { <nl> + if ( RO : : EvalIsVecNotices ) { <nl> + auto const msg = makeStaticString ( Strings : : CLSMETH_COMPAT_IS_VEC_OR_VARR ) ; <nl> + gen ( env , RaiseNotice , make_opt_catch ( env , params ) , cns ( env , msg ) ) ; <nl> + } <nl> + return cns ( env , true ) ; <nl> + } <nl> + <nl> + if ( ! type . maybe ( TVec ) & & ! type . maybe ( TVArr ) ) { <nl> + return cns ( env , false ) ; <nl> + } <nl> + <nl> + return nullptr ; <nl> + } <nl> + <nl> + SSATmp * opt_is_dict_or_darray ( IRGS & env , const ParamPrep & params ) { <nl> + if ( params . size ( ) ! = 1 ) return nullptr ; <nl> + auto const type = params [ 0 ] . value - > type ( ) ; <nl> + <nl> + if ( type . subtypeOfAny ( TDict , TDArr ) ) { <nl> + return cns ( env , true ) ; <nl> + } <nl> + <nl> + if ( ! type . maybe ( TDict ) & & ! type . maybe ( TDArr ) ) { <nl> + return cns ( env , false ) ; <nl> + } <nl> + <nl> + return nullptr ; <nl> + } <nl> + <nl> SSATmp * opt_foldable ( IRGS & env , <nl> const Func * func , <nl> const ParamPrep & params , <nl> const hphp_fast_string_imap < OptEmitFn > s_opt_emit_fns { <nl> { " hh \ \ type_structure " , opt_type_structure } , <nl> { " hh \ \ type_structure_classname " , opt_type_structure_classname } , <nl> { " hh \ \ is_list_like " , opt_is_list_like } , <nl> + { " HH \ \ is_dict_or_darray " , opt_is_dict_or_darray } , <nl> + { " HH \ \ is_vec_or_varray " , opt_is_vec_or_varray } , <nl> { " HH \ \ Lib \ \ _Private \ \ Native \ \ first " , opt_container_first } , <nl> { " HH \ \ Lib \ \ _Private \ \ Native \ \ last " , opt_container_last } , <nl> { " HH \ \ Lib \ \ _Private \ \ Native \ \ first_key " , opt_container_first_key } , <nl> mmm a / hphp / test / slow / hack_arr_compat / is_dict_or_darray . php <nl> ppp b / hphp / test / slow / hack_arr_compat / is_dict_or_darray . php <nl> function check ( mixed $ arg , string $ descr ) { <nl> <nl> < < __EntryPoint > > <nl> function main ( ) : void { <nl> - check ( null , ' null ' ) ; <nl> - check ( " abc " , ' " abc " ' ) ; <nl> + $ inputs = vec [ <nl> + tuple ( null , ' null ' ) , <nl> + tuple ( " abc " , ' " abc " ' ) , <nl> <nl> - check ( varray [ 1 , 2 , 3 ] , ' varray [ 1 , 2 , 3 ] ' ) ; <nl> - check ( vec [ 1 , 2 , 3 ] , ' vec [ 1 , 2 , 3 ] ' ) ; <nl> + tuple ( varray [ 1 , 2 , 3 ] , ' varray [ 1 , 2 , 3 ] ' ) , <nl> + tuple ( vec [ 1 , 2 , 3 ] , ' vec [ 1 , 2 , 3 ] ' ) , <nl> <nl> - check ( darray [ " a " = > " a " ] , ' darray [ " a " = > " a " ] ' ) ; <nl> - check ( dict [ " a " = > " a " ] , ' dict [ " a " = > " a " ] ' ) ; <nl> + tuple ( darray [ " a " = > " a " ] , ' darray [ " a " = > " a " ] ' ) , <nl> + tuple ( dict [ " a " = > " a " ] , ' dict [ " a " = > " a " ] ' ) , <nl> <nl> - check ( keyset [ 1 , 2 , 3 ] , ' keyset [ 1 , 2 , 3 ] ' ) ; <nl> + tuple ( keyset [ 1 , 2 , 3 ] , ' keyset [ 1 , 2 , 3 ] ' ) , <nl> <nl> - check ( Vector { 1 , 2 , 3 } , ' Vector { 1 , 2 , 3 } ' ) ; <nl> - check ( Map { " a " = > " a " } , ' Map { " a " = > " a " } ' ) ; <nl> + tuple ( Vector { 1 , 2 , 3 } , ' Vector { 1 , 2 , 3 } ' ) , <nl> + tuple ( Map { " a " = > " a " } , ' Map { " a " = > " a " } ' ) , <nl> + ] ; <nl> + <nl> + echo " = = = constant values = = = \ n " ; <nl> + foreach ( $ inputs as list ( $ arg , $ descr ) ) { <nl> + check ( $ arg , $ descr ) ; <nl> + } <nl> + <nl> + echo " = = = laundered values = = = \ n " ; <nl> + foreach ( $ inputs as list ( $ arg , $ descr ) ) { <nl> + check ( __hhvm_intrinsics \ launder_value ( $ arg ) , $ descr ) ; <nl> + } <nl> } <nl> mmm a / hphp / test / slow / hack_arr_compat / is_dict_or_darray . php . expect <nl> ppp b / hphp / test / slow / hack_arr_compat / is_dict_or_darray . php . expect <nl> <nl> + = = = constant values = = = <nl> + is_dict_or_darray ( null ) = false <nl> + is_dict_or_darray ( " abc " ) = false <nl> + is_dict_or_darray ( varray [ 1 , 2 , 3 ] ) = false <nl> + is_dict_or_darray ( vec [ 1 , 2 , 3 ] ) = false <nl> + is_dict_or_darray ( darray [ " a " = > " a " ] ) = true <nl> + is_dict_or_darray ( dict [ " a " = > " a " ] ) = true <nl> + is_dict_or_darray ( keyset [ 1 , 2 , 3 ] ) = false <nl> + is_dict_or_darray ( Vector { 1 , 2 , 3 } ) = false <nl> + is_dict_or_darray ( Map { " a " = > " a " } ) = false <nl> + = = = laundered values = = = <nl> is_dict_or_darray ( null ) = false <nl> is_dict_or_darray ( " abc " ) = false <nl> is_dict_or_darray ( varray [ 1 , 2 , 3 ] ) = false <nl> mmm a / hphp / test / slow / hack_arr_compat / is_vec_or_varray . php <nl> ppp b / hphp / test / slow / hack_arr_compat / is_vec_or_varray . php <nl> function check ( mixed $ arg , string $ descr ) { <nl> $ descr , <nl> HH \ is_vec_or_varray ( $ arg ) ? ' true ' : ' false ' , <nl> ) ; <nl> + <nl> + <nl> } <nl> <nl> < < __EntryPoint > > <nl> function main ( ) : void { <nl> - check ( null , ' null ' ) ; <nl> - check ( " abc " , ' " abc " ' ) ; <nl> + $ inputs = vec [ <nl> + tuple ( null , ' null ' ) , <nl> + tuple ( " abc " , ' " abc " ' ) , <nl> + <nl> + tuple ( varray [ 1 , 2 , 3 ] , ' varray [ 1 , 2 , 3 ] ' ) , <nl> + tuple ( vec [ 1 , 2 , 3 ] , ' vec [ 1 , 2 , 3 ] ' ) , <nl> + <nl> + tuple ( darray [ " a " = > " a " ] , ' darray [ " a " = > " a " ] ' ) , <nl> + tuple ( dict [ " a " = > " a " ] , ' dict [ " a " = > " a " ] ' ) , <nl> <nl> - check ( varray [ 1 , 2 , 3 ] , ' varray [ 1 , 2 , 3 ] ' ) ; <nl> - check ( vec [ 1 , 2 , 3 ] , ' vec [ 1 , 2 , 3 ] ' ) ; <nl> + tuple ( keyset [ 1 , 2 , 3 ] , ' keyset [ 1 , 2 , 3 ] ' ) , <nl> <nl> - check ( darray [ " a " = > " a " ] , ' darray [ " a " = > " a " ] ' ) ; <nl> - check ( dict [ " a " = > " a " ] , ' dict [ " a " = > " a " ] ' ) ; <nl> + tuple ( Vector { 1 , 2 , 3 } , ' Vector { 1 , 2 , 3 } ' ) , <nl> + tuple ( Map { " a " = > " a " } , ' Map { " a " = > " a " } ' ) , <nl> + ] ; <nl> <nl> - check ( keyset [ 1 , 2 , 3 ] , ' keyset [ 1 , 2 , 3 ] ' ) ; <nl> + echo " = = = constant values = = = \ n " ; <nl> + foreach ( $ inputs as list ( $ arg , $ descr ) ) { <nl> + check ( $ arg , $ descr ) ; <nl> + } <nl> <nl> - check ( Vector { 1 , 2 , 3 } , ' Vector { 1 , 2 , 3 } ' ) ; <nl> - check ( Map { " a " = > " a " } , ' Map { " a " = > " a " } ' ) ; <nl> + echo " = = = laundered values = = = \ n " ; <nl> + foreach ( $ inputs as list ( $ arg , $ descr ) ) { <nl> + check ( __hhvm_intrinsics \ launder_value ( $ arg ) , $ descr ) ; <nl> + } <nl> } <nl> mmm a / hphp / test / slow / hack_arr_compat / is_vec_or_varray . php . expect <nl> ppp b / hphp / test / slow / hack_arr_compat / is_vec_or_varray . php . expect <nl> <nl> + = = = constant values = = = <nl> + is_vec_or_varray ( null ) = false <nl> + is_vec_or_varray ( " abc " ) = false <nl> + is_vec_or_varray ( varray [ 1 , 2 , 3 ] ) = true <nl> + is_vec_or_varray ( vec [ 1 , 2 , 3 ] ) = true <nl> + is_vec_or_varray ( darray [ " a " = > " a " ] ) = false <nl> + is_vec_or_varray ( dict [ " a " = > " a " ] ) = false <nl> + is_vec_or_varray ( keyset [ 1 , 2 , 3 ] ) = false <nl> + is_vec_or_varray ( Vector { 1 , 2 , 3 } ) = false <nl> + is_vec_or_varray ( Map { " a " = > " a " } ) = false <nl> + = = = laundered values = = = <nl> is_vec_or_varray ( null ) = false <nl> is_vec_or_varray ( " abc " ) = false <nl> is_vec_or_varray ( varray [ 1 , 2 , 3 ] ) = true <nl> | optimize HH \ is_vec_or_varray and HH \ is_dict_or_darray in JIT | facebook/hhvm | c925dd3a62703732e604823359ba073fd52cdd57 | 2020-07-01T01:12:39Z |
mmm a / file / file_util . cc <nl> ppp b / file / file_util . cc <nl> <nl> namespace ROCKSDB_NAMESPACE { <nl> <nl> / / Utility function to copy a file up to a specified length <nl> - Status CopyFile ( FileSystem * fs , const std : : string & source , <nl> - const std : : string & destination , uint64_t size , bool use_fsync ) { <nl> + IOStatus CopyFile ( FileSystem * fs , const std : : string & source , <nl> + const std : : string & destination , uint64_t size , <nl> + bool use_fsync ) { <nl> const FileOptions soptions ; <nl> - Status s ; <nl> + IOStatus io_s ; <nl> std : : unique_ptr < SequentialFileReader > src_reader ; <nl> std : : unique_ptr < WritableFileWriter > dest_writer ; <nl> <nl> { <nl> std : : unique_ptr < FSSequentialFile > srcfile ; <nl> - s = fs - > NewSequentialFile ( source , soptions , & srcfile , nullptr ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> + io_s = fs - > NewSequentialFile ( source , soptions , & srcfile , nullptr ) ; <nl> + if ( ! io_s . ok ( ) ) { <nl> + return io_s ; <nl> } <nl> std : : unique_ptr < FSWritableFile > destfile ; <nl> - s = fs - > NewWritableFile ( destination , soptions , & destfile , nullptr ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> + io_s = fs - > NewWritableFile ( destination , soptions , & destfile , nullptr ) ; <nl> + if ( ! io_s . ok ( ) ) { <nl> + return io_s ; <nl> } <nl> <nl> if ( size = = 0 ) { <nl> / / default argument means copy everything <nl> - s = fs - > GetFileSize ( source , IOOptions ( ) , & size , nullptr ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> + io_s = fs - > GetFileSize ( source , IOOptions ( ) , & size , nullptr ) ; <nl> + if ( ! io_s . ok ( ) ) { <nl> + return io_s ; <nl> } <nl> } <nl> src_reader . reset ( new SequentialFileReader ( std : : move ( srcfile ) , source ) ) ; <nl> Status CopyFile ( FileSystem * fs , const std : : string & source , <nl> Slice slice ; <nl> while ( size > 0 ) { <nl> size_t bytes_to_read = std : : min ( sizeof ( buffer ) , static_cast < size_t > ( size ) ) ; <nl> - s = src_reader - > Read ( bytes_to_read , & slice , buffer ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> + io_s = status_to_io_status ( src_reader - > Read ( bytes_to_read , & slice , buffer ) ) ; <nl> + if ( ! io_s . ok ( ) ) { <nl> + return io_s ; <nl> } <nl> if ( slice . size ( ) = = 0 ) { <nl> - return Status : : Corruption ( " file too small " ) ; <nl> + return IOStatus : : Corruption ( " file too small " ) ; <nl> } <nl> - s = dest_writer - > Append ( slice ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> + io_s = dest_writer - > Append ( slice ) ; <nl> + if ( ! io_s . ok ( ) ) { <nl> + return io_s ; <nl> } <nl> size - = slice . size ( ) ; <nl> } <nl> Status CopyFile ( FileSystem * fs , const std : : string & source , <nl> } <nl> <nl> / / Utility function to create a file with the provided contents <nl> - Status CreateFile ( FileSystem * fs , const std : : string & destination , <nl> - const std : : string & contents , bool use_fsync ) { <nl> + IOStatus CreateFile ( FileSystem * fs , const std : : string & destination , <nl> + const std : : string & contents , bool use_fsync ) { <nl> const EnvOptions soptions ; <nl> - Status s ; <nl> + IOStatus io_s ; <nl> std : : unique_ptr < WritableFileWriter > dest_writer ; <nl> <nl> std : : unique_ptr < FSWritableFile > destfile ; <nl> - s = fs - > NewWritableFile ( destination , soptions , & destfile , nullptr ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> + io_s = fs - > NewWritableFile ( destination , soptions , & destfile , nullptr ) ; <nl> + if ( ! io_s . ok ( ) ) { <nl> + return io_s ; <nl> } <nl> dest_writer . reset ( <nl> new WritableFileWriter ( std : : move ( destfile ) , destination , soptions ) ) ; <nl> - s = dest_writer - > Append ( Slice ( contents ) ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> + io_s = dest_writer - > Append ( Slice ( contents ) ) ; <nl> + if ( ! io_s . ok ( ) ) { <nl> + return io_s ; <nl> } <nl> return dest_writer - > Sync ( use_fsync ) ; <nl> } <nl> mmm a / file / file_util . h <nl> ppp b / file / file_util . h <nl> <nl> namespace ROCKSDB_NAMESPACE { <nl> / / use_fsync maps to options . use_fsync , which determines the way that <nl> / / the file is synced after copying . <nl> - extern Status CopyFile ( FileSystem * fs , const std : : string & source , <nl> - const std : : string & destination , uint64_t size , <nl> - bool use_fsync ) ; <nl> + extern IOStatus CopyFile ( FileSystem * fs , const std : : string & source , <nl> + const std : : string & destination , uint64_t size , <nl> + bool use_fsync ) ; <nl> <nl> - extern Status CreateFile ( FileSystem * fs , const std : : string & destination , <nl> - const std : : string & contents , bool use_fsync ) ; <nl> + extern IOStatus CreateFile ( FileSystem * fs , const std : : string & destination , <nl> + const std : : string & contents , bool use_fsync ) ; <nl> <nl> extern Status DeleteDBFile ( const ImmutableDBOptions * db_options , <nl> const std : : string & fname , <nl> | Replace Status with IOStatus in CopyFile and CreateFile ( ) | facebook/rocksdb | 556972e964026a95edecb28cf70a6f36013dd3aa | 2020-06-02T22:05:06Z |
mmm a / dbms / include / DB / DataTypes / DataTypeNested . h <nl> ppp b / dbms / include / DB / DataTypes / DataTypeNested . h <nl> class DataTypeNested : public IDataType <nl> throw Exception ( " Method getDefault is not supported for " + getName ( ) , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> } <nl> <nl> - const NamesAndTypesListPtr & getNestedTypes ( ) const { return nested ; } <nl> + const NamesAndTypesListPtr & getNestedTypesList ( ) const { return nested ; } <nl> const DataTypePtr & getOffsetsType ( ) const { return offsets ; } <nl> } ; <nl> <nl> mmm a / dbms / include / DB / Storages / MergeTree / MergeTreeBlockInputStream . h <nl> ppp b / dbms / include / DB / Storages / MergeTree / MergeTreeBlockInputStream . h <nl> class MergeTreeBlockInputStream : public IProfilingBlockInputStream <nl> <nl> addStream ( name , * type_arr - > getNestedType ( ) , mark_number , level + 1 ) ; <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + String escaped_size_name = escaped_column_name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + <nl> + streams . insert ( std : : make_pair ( size_name , new Stream ( <nl> + path + escaped_size_name , <nl> + mark_number ) ) ) ; <nl> + <nl> + const NamesAndTypesList & columns = * type_nested - > getNestedTypesList ( ) ; <nl> + for ( NamesAndTypesList : : const_iterator it = columns . begin ( ) ; it ! = columns . end ( ) ; + + it ) <nl> + addStream ( name + " . " + it - > first , * it - > second , mark_number , level + 1 ) ; <nl> + } <nl> else <nl> streams . insert ( std : : make_pair ( name , new Stream ( <nl> path + escaped_column_name , <nl> class MergeTreeBlockInputStream : public IProfilingBlockInputStream <nl> type_arr - > deserializeOffsets ( <nl> column , <nl> streams [ name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ] - > compressed , <nl> - max_rows_to_read ) ; <nl> + max_rows_to_read ) ; <nl> <nl> if ( column . size ( ) ) <nl> readData ( <nl> name , <nl> * type_arr - > getNestedType ( ) , <nl> - dynamic_cast < ColumnArray & > ( column ) . getData ( ) , <nl> - dynamic_cast < const ColumnArray & > ( column ) . getOffsets ( ) [ column . size ( ) - 1 ] , <nl> - level + 1 ) ; <nl> + dynamic_cast < ColumnArray & > ( column ) . getData ( ) , <nl> + dynamic_cast < const ColumnArray & > ( column ) . getOffsets ( ) [ column . size ( ) - 1 ] , <nl> + level + 1 ) ; <nl> + } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + type_nested - > deserializeOffsets ( <nl> + column , <nl> + streams [ name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ] - > compressed , <nl> + max_rows_to_read ) ; <nl> + <nl> + if ( column . size ( ) ) <nl> + { <nl> + ColumnNested & column_nested = dynamic_cast < ColumnNested & > ( column ) ; <nl> + <nl> + NamesAndTypesList : : const_iterator it = type_nested - > getNestedTypesList ( ) - > begin ( ) ; <nl> + for ( size_t i = 0 ; i < column_nested . getData ( ) . size ( ) ; + + i , + + it ) <nl> + { <nl> + readData ( <nl> + name + " . " + it - > first , <nl> + * it - > second , <nl> + * column_nested . getData ( ) [ i ] , <nl> + column_nested . getOffsets ( ) [ column . size ( ) - 1 ] , <nl> + level + 1 ) ; <nl> + } <nl> + } <nl> } <nl> else <nl> type . deserializeBinary ( column , streams [ name ] - > compressed , max_rows_to_read ) ; <nl> mmm a / dbms / include / DB / Storages / MergeTree / MergeTreeBlockOutputStream . h <nl> ppp b / dbms / include / DB / Storages / MergeTree / MergeTreeBlockOutputStream . h <nl> class MergeTreeBlockOutputStream : public IBlockOutputStream <nl> prev_mark + = storage . index_granularity ; <nl> } <nl> } <nl> + if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + String size_name = escaped_column_name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + <nl> + WriteBufferFromFile plain ( path + size_name + " . bin " , DBMS_DEFAULT_BUFFER_SIZE , flags ) ; <nl> + WriteBufferFromFile marks ( path + size_name + " . mrk " , 4096 , flags ) ; <nl> + CompressedWriteBuffer compressed ( plain ) ; <nl> + <nl> + size_t prev_mark = 0 ; <nl> + while ( prev_mark < size ) <nl> + { <nl> + / / / Каждая засечка - это : ( смещение в файле до начала сжатого блока , смещение внутри блока ) <nl> + writeIntBinary ( plain . count ( ) , marks ) ; <nl> + writeIntBinary ( compressed . offset ( ) , marks ) ; <nl> + <nl> + type_nested - > serializeOffsets ( column , compressed , prev_mark , storage . index_granularity ) ; <nl> + prev_mark + = storage . index_granularity ; <nl> + } <nl> + } <nl> <nl> { <nl> WriteBufferFromFile plain ( path + escaped_column_name + " . bin " , DBMS_DEFAULT_BUFFER_SIZE , flags ) ; <nl> mmm a / dbms / include / DB / Storages / MergeTree / MergedBlockOutputStream . h <nl> ppp b / dbms / include / DB / Storages / MergeTree / MergedBlockOutputStream . h <nl> class MergedBlockOutputStream : public IBlockOutputStream <nl> <nl> addStream ( name , * type_arr - > getNestedType ( ) , level + 1 ) ; <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + String escaped_size_name = escaped_column_name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + <nl> + column_streams [ size_name ] = new ColumnStream ( <nl> + part_tmp_path + escaped_size_name + " . bin " , <nl> + part_tmp_path + escaped_size_name + " . mrk " ) ; <nl> + <nl> + const NamesAndTypesList & columns = * type_nested - > getNestedTypesList ( ) ; <nl> + for ( NamesAndTypesList : : const_iterator it = columns . begin ( ) ; it ! = columns . end ( ) ; + + it ) <nl> + addStream ( name + " . " + it - > first , * it - > second , level + 1 ) ; <nl> + } <nl> else <nl> column_streams [ name ] = new ColumnStream ( <nl> part_tmp_path + escaped_column_name + " . bin " , <nl> class MergedBlockOutputStream : public IBlockOutputStream <nl> prev_mark + = limit ; <nl> } <nl> } <nl> + if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + <nl> + ColumnStream & stream = * column_streams [ size_name ] ; <nl> + <nl> + size_t prev_mark = 0 ; <nl> + while ( prev_mark < size ) <nl> + { <nl> + size_t limit = 0 ; <nl> + <nl> + / / / Если есть index_offset , то первая засечка идёт не сразу , а после этого количества строк . <nl> + if ( prev_mark = = 0 & & index_offset ! = 0 ) <nl> + { <nl> + limit = index_offset ; <nl> + } <nl> + else <nl> + { <nl> + limit = storage . index_granularity ; <nl> + writeIntBinary ( stream . plain . count ( ) , stream . marks ) ; <nl> + writeIntBinary ( stream . compressed . offset ( ) , stream . marks ) ; <nl> + } <nl> + <nl> + type_nested - > serializeOffsets ( column , stream . compressed , prev_mark , limit ) ; <nl> + prev_mark + = limit ; <nl> + } <nl> + } <nl> <nl> { <nl> ColumnStream & stream = * column_streams [ name ] ; <nl> mmm a / dbms / src / DataStreams / NativeBlockInputStream . cpp <nl> ppp b / dbms / src / DataStreams / NativeBlockInputStream . cpp <nl> <nl> # include < DB / IO / VarInt . h > <nl> <nl> # include < DB / Columns / ColumnArray . h > <nl> + # include < DB / Columns / ColumnNested . h > <nl> <nl> # include < DB / DataTypes / DataTypeArray . h > <nl> + # include < DB / DataTypes / DataTypeNested . h > <nl> <nl> # include < DB / DataStreams / NativeBlockInputStream . h > <nl> <nl> static void readData ( const IDataType & type , IColumn & column , ReadBuffer & istr <nl> istr , <nl> dynamic_cast < const ColumnArray & > ( column ) . getOffsets ( ) [ rows - 1 ] ) ; <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + ColumnNested & column_nested = dynamic_cast < ColumnNested & > ( column ) ; <nl> + IColumn & offsets_column = * column_nested . getOffsetsColumn ( ) ; <nl> + type_nested - > getOffsetsType ( ) - > deserializeBinary ( offsets_column , istr , rows ) ; <nl> + <nl> + if ( offsets_column . size ( ) ! = rows ) <nl> + throw Exception ( " Cannot read all data in NativeBlockInputStream . " , ErrorCodes : : CANNOT_READ_ALL_DATA ) ; <nl> + <nl> + if ( rows ) <nl> + { <nl> + NamesAndTypesList : : const_iterator it = type_nested - > getNestedTypesList ( ) - > begin ( ) ; <nl> + for ( size_t i = 0 ; i < column_nested . getData ( ) . size ( ) ; + + i , + + it ) <nl> + { <nl> + readData ( <nl> + * it - > second , <nl> + * column_nested . getData ( ) [ i ] , <nl> + istr , <nl> + column_nested . getOffsets ( ) [ rows - 1 ] ) ; <nl> + } <nl> + } <nl> + } <nl> else <nl> type . deserializeBinary ( column , istr , rows ) ; <nl> <nl> mmm a / dbms / src / DataStreams / NativeBlockOutputStream . cpp <nl> ppp b / dbms / src / DataStreams / NativeBlockOutputStream . cpp <nl> <nl> <nl> # include < DB / Columns / ColumnConst . h > <nl> # include < DB / Columns / ColumnArray . h > <nl> + # include < DB / Columns / ColumnNested . h > <nl> <nl> # include < DB / DataTypes / DataTypeArray . h > <nl> + # include < DB / DataTypes / DataTypeNested . h > <nl> <nl> # include < DB / DataStreams / NativeBlockOutputStream . h > <nl> <nl> static void writeData ( const IDataType & type , const IColumn & column , WriteBuffe <nl> if ( ! dynamic_cast < const ColumnArray & > ( column ) . getData ( ) . empty ( ) ) <nl> writeData ( * type_arr - > getNestedType ( ) , dynamic_cast < const ColumnArray & > ( column ) . getData ( ) , ostr ) ; <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + const ColumnNested & column_nested = dynamic_cast < const ColumnNested & > ( column ) ; <nl> + <nl> + type_nested - > getOffsetsType ( ) - > serializeBinary ( * column_nested . getOffsetsColumn ( ) , ostr ) ; <nl> + <nl> + NamesAndTypesList : : const_iterator it = type_nested - > getNestedTypesList ( ) - > begin ( ) ; <nl> + for ( size_t i = 0 ; i < column_nested . getData ( ) . size ( ) ; + + i , + + it ) <nl> + { <nl> + if ( column_nested . getData ( ) [ i ] - > empty ( ) ) <nl> + break ; <nl> + writeData ( * it - > second , * column_nested . getData ( ) [ i ] , ostr ) ; <nl> + } <nl> + } <nl> else <nl> type . serializeBinary ( column , ostr ) ; <nl> } <nl> mmm a / dbms / src / DataTypes / DataTypeNested . cpp <nl> ppp b / dbms / src / DataTypes / DataTypeNested . cpp <nl> std : : string DataTypeNested : : getName ( ) const <nl> <nl> void DataTypeNested : : serializeBinary ( const Field & field , WriteBuffer & ostr ) const <nl> { <nl> - throw Exception ( " Method serializeBinary ( const Field & field , WriteBuffer & ostr ) is not supported for " + getName ( ) , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> + throw Exception ( " Method serializeBinary ( const Field & , WriteBuffer & ) is not supported for " + getName ( ) , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> } <nl> <nl> <nl> void DataTypeNested : : deserializeBinary ( Field & field , ReadBuffer & istr ) const <nl> { <nl> - throw Exception ( " Method deserializeBinary ( Field & field , ReadBuffer & istr ) is not supported for " + getName ( ) , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> + throw Exception ( " Method deserializeBinary ( Field & , ReadBuffer & ) is not supported for " + getName ( ) , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> } <nl> <nl> <nl> mmm a / dbms / src / Storages / StorageLog . cpp <nl> ppp b / dbms / src / Storages / StorageLog . cpp <nl> <nl> # include < DB / IO / WriteHelpers . h > <nl> <nl> # include < DB / DataTypes / DataTypeArray . h > <nl> + # include < DB / DataTypes / DataTypeNested . h > <nl> <nl> # include < DB / Columns / ColumnArray . h > <nl> + # include < DB / Columns / ColumnNested . h > <nl> <nl> # include < DB / Storages / StorageLog . h > <nl> <nl> void LogBlockInputStream : : addStream ( const String & name , const IDataType & type , <nl> <nl> addStream ( name , * type_arr - > getNestedType ( ) , level + 1 ) ; <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + streams . insert ( std : : make_pair ( size_name , new Stream ( <nl> + storage . files [ size_name ] . data_file . path ( ) , <nl> + mark_number <nl> + ? storage . files [ size_name ] . marks [ mark_number ] . offset <nl> + : 0 ) ) ) ; <nl> + <nl> + const NamesAndTypesList & columns = * type_nested - > getNestedTypesList ( ) ; <nl> + for ( NamesAndTypesList : : const_iterator it = columns . begin ( ) ; it ! = columns . end ( ) ; + + it ) <nl> + addStream ( name + " . " + it - > first , * it - > second , level + 1 ) ; <nl> + } <nl> else <nl> streams . insert ( std : : make_pair ( name , new Stream ( <nl> storage . files [ name ] . data_file . path ( ) , <nl> void LogBlockInputStream : : readData ( const String & name , const IDataType & type , <nl> dynamic_cast < const ColumnArray & > ( column ) . getOffsets ( ) [ column . size ( ) - 1 ] , <nl> level + 1 ) ; <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + type_nested - > deserializeOffsets ( <nl> + column , <nl> + streams [ name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ] - > compressed , <nl> + max_rows_to_read ) ; <nl> + <nl> + if ( column . size ( ) ) <nl> + { <nl> + ColumnNested & column_nested = dynamic_cast < ColumnNested & > ( column ) ; <nl> + <nl> + NamesAndTypesList : : const_iterator it = type_nested - > getNestedTypesList ( ) - > begin ( ) ; <nl> + for ( size_t i = 0 ; i < column_nested . getData ( ) . size ( ) ; + + i , + + it ) <nl> + { <nl> + readData ( <nl> + name + " . " + it - > first , <nl> + * it - > second , <nl> + * column_nested . getData ( ) [ i ] , <nl> + column_nested . getOffsets ( ) [ column . size ( ) - 1 ] , <nl> + level + 1 ) ; <nl> + } <nl> + } <nl> + } <nl> else <nl> type . deserializeBinary ( column , streams [ name ] - > compressed , max_rows_to_read ) ; <nl> } <nl> void LogBlockOutputStream : : addStream ( const String & name , const IDataType & type <nl> <nl> addStream ( name , * type_arr - > getNestedType ( ) , level + 1 ) ; <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + streams . insert ( std : : make_pair ( size_name , new Stream ( <nl> + storage . files [ size_name ] . data_file . path ( ) ) ) ) ; <nl> + <nl> + const NamesAndTypesList & columns = * type_nested - > getNestedTypesList ( ) ; <nl> + for ( NamesAndTypesList : : const_iterator it = columns . begin ( ) ; it ! = columns . end ( ) ; + + it ) <nl> + addStream ( name + " . " + it - > first , * it - > second , level + 1 ) ; <nl> + } <nl> else <nl> streams . insert ( std : : make_pair ( name , new Stream ( <nl> storage . files [ name ] . data_file . path ( ) ) ) ) ; <nl> void LogBlockOutputStream : : writeData ( const String & name , const IDataType & type <nl> <nl> writeData ( name , * type_arr - > getNestedType ( ) , dynamic_cast < const ColumnArray & > ( column ) . getData ( ) , out_marks , level + 1 ) ; <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + <nl> + Mark mark ; <nl> + mark . rows = ( storage . files [ size_name ] . marks . empty ( ) ? 0 : storage . files [ size_name ] . marks . back ( ) . rows ) + column . size ( ) ; <nl> + mark . offset = streams [ size_name ] - > plain_offset + streams [ size_name ] - > plain . count ( ) ; <nl> + <nl> + out_marks . push_back ( std : : make_pair ( storage . files [ size_name ] . column_index , mark ) ) ; <nl> + <nl> + type_nested - > serializeOffsets ( column , streams [ size_name ] - > compressed ) ; <nl> + streams [ size_name ] - > compressed . next ( ) ; <nl> + <nl> + const ColumnNested & column_nested = dynamic_cast < const ColumnNested & > ( column ) ; <nl> + <nl> + NamesAndTypesList : : const_iterator it = type_nested - > getNestedTypesList ( ) - > begin ( ) ; <nl> + for ( size_t i = 0 ; i < column_nested . getData ( ) . size ( ) ; + + i , + + it ) <nl> + { <nl> + writeData ( <nl> + name + " . " + it - > first , <nl> + * it - > second , <nl> + * column_nested . getData ( ) [ i ] , <nl> + out_marks , <nl> + level + 1 ) ; <nl> + } <nl> + } <nl> else <nl> { <nl> Mark mark ; <nl> StoragePtr StorageLog : : create ( const std : : string & path_ , const std : : string & nam <nl> void StorageLog : : addFile ( const String & column_name , const IDataType & type , size_t level ) <nl> { <nl> if ( files . end ( ) ! = files . find ( column_name ) ) <nl> - throw Exception ( " Duplicate column with name " + column_name + " in constructor of StorageTinyLog . " , <nl> + throw Exception ( " Duplicate column with name " + column_name + " in constructor of StorageLog . " , <nl> ErrorCodes : : DUPLICATE_COLUMN ) ; <nl> <nl> if ( const DataTypeArray * type_arr = dynamic_cast < const DataTypeArray * > ( & type ) ) <nl> void StorageLog : : addFile ( const String & column_name , const IDataType & type , siz <nl> <nl> addFile ( column_name , * type_arr - > getNestedType ( ) , level + 1 ) ; <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + String size_column_suffix = ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + <nl> + ColumnData & column_data = files . insert ( std : : make_pair ( column_name + size_column_suffix , ColumnData ( ) ) ) . first - > second ; <nl> + column_data . column_index = column_names . size ( ) ; <nl> + column_data . data_file = Poco : : File ( <nl> + path + escapeForFileName ( name ) + ' / ' + escapeForFileName ( column_name ) + size_column_suffix + DBMS_STORAGE_LOG_DATA_FILE_EXTENSION ) ; <nl> + <nl> + column_names . push_back ( column_name + size_column_suffix ) ; <nl> + <nl> + const NamesAndTypesList & columns = * type_nested - > getNestedTypesList ( ) ; <nl> + for ( NamesAndTypesList : : const_iterator it = columns . begin ( ) ; it ! = columns . end ( ) ; + + it ) <nl> + addFile ( column_name + " . " + it - > first , * it - > second , level + 1 ) ; <nl> + } <nl> else <nl> { <nl> ColumnData & column_data = files . insert ( std : : make_pair ( column_name , ColumnData ( ) ) ) . first - > second ; <nl> mmm a / dbms / src / Storages / StorageMergeTree . cpp <nl> ppp b / dbms / src / Storages / StorageMergeTree . cpp <nl> <nl> <nl> # include < DB / Columns / ColumnsNumber . h > <nl> # include < DB / Columns / ColumnArray . h > <nl> + # include < DB / Columns / ColumnNested . h > <nl> <nl> # include < DB / DataTypes / DataTypesNumberFixed . h > <nl> # include < DB / DataTypes / DataTypeArray . h > <nl> + # include < DB / DataTypes / DataTypeNested . h > <nl> <nl> # include < DB / DataStreams / IProfilingBlockInputStream . h > <nl> # include < DB / DataStreams / MergingSortedBlockInputStream . h > <nl> mmm a / dbms / src / Storages / StorageTinyLog . cpp <nl> ppp b / dbms / src / Storages / StorageTinyLog . cpp <nl> <nl> # include < DB / IO / WriteHelpers . h > <nl> <nl> # include < DB / DataTypes / DataTypeArray . h > <nl> + # include < DB / DataTypes / DataTypeNested . h > <nl> <nl> # include < DB / Columns / ColumnArray . h > <nl> + # include < DB / Columns / ColumnNested . h > <nl> <nl> # include < DB / Storages / StorageTinyLog . h > <nl> <nl> void TinyLogBlockInputStream : : addStream ( const String & name , const IDataType & t <nl> <nl> addStream ( name , * type_arr - > getNestedType ( ) , level + 1 ) ; <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + streams . insert ( std : : make_pair ( size_name , new Stream ( storage . files [ size_name ] . data_file . path ( ) ) ) ) ; <nl> + <nl> + const NamesAndTypesList & columns = * type_nested - > getNestedTypesList ( ) ; <nl> + for ( NamesAndTypesList : : const_iterator it = columns . begin ( ) ; it ! = columns . end ( ) ; + + it ) <nl> + addStream ( name + " . " + it - > first , * it - > second , level + 1 ) ; <nl> + } <nl> else <nl> streams . insert ( std : : make_pair ( name , new Stream ( storage . files [ name ] . data_file . path ( ) ) ) ) ; <nl> } <nl> void TinyLogBlockInputStream : : readData ( const String & name , const IDataType & ty <nl> throw Exception ( " Cannot read array data for all offsets " , ErrorCodes : : CANNOT_READ_ALL_DATA ) ; <nl> } <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + type_nested - > deserializeOffsets ( <nl> + column , <nl> + streams [ name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ] - > compressed , <nl> + limit ) ; <nl> + <nl> + if ( column . size ( ) ) <nl> + { <nl> + ColumnNested & column_nested = dynamic_cast < ColumnNested & > ( column ) ; <nl> + <nl> + NamesAndTypesList : : const_iterator it = type_nested - > getNestedTypesList ( ) - > begin ( ) ; <nl> + for ( size_t i = 0 ; i < column_nested . getData ( ) . size ( ) ; + + i , + + it ) <nl> + { <nl> + readData ( <nl> + name + " . " + it - > first , <nl> + * it - > second , <nl> + * column_nested . getData ( ) [ i ] , <nl> + column_nested . getOffsets ( ) [ column . size ( ) - 1 ] , <nl> + level + 1 ) ; <nl> + } <nl> + } <nl> + } <nl> else <nl> type . deserializeBinary ( column , streams [ name ] - > compressed , limit ) ; <nl> } <nl> void TinyLogBlockOutputStream : : addStream ( const String & name , const IDataType & <nl> <nl> addStream ( name , * type_arr - > getNestedType ( ) , level + 1 ) ; <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + streams . insert ( std : : make_pair ( size_name , new Stream ( storage . files [ size_name ] . data_file . path ( ) ) ) ) ; <nl> + <nl> + const NamesAndTypesList & columns = * type_nested - > getNestedTypesList ( ) ; <nl> + for ( NamesAndTypesList : : const_iterator it = columns . begin ( ) ; it ! = columns . end ( ) ; + + it ) <nl> + addStream ( name + " . " + it - > first , * it - > second , level + 1 ) ; <nl> + } <nl> else <nl> streams . insert ( std : : make_pair ( name , new Stream ( storage . files [ name ] . data_file . path ( ) ) ) ) ; <nl> } <nl> void TinyLogBlockOutputStream : : writeData ( const String & name , const IDataType & <nl> <nl> writeData ( name , * type_arr - > getNestedType ( ) , dynamic_cast < const ColumnArray & > ( column ) . getData ( ) , level + 1 ) ; <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + <nl> + type_nested - > serializeOffsets ( column , streams [ size_name ] - > compressed ) ; <nl> + <nl> + const ColumnNested & column_nested = dynamic_cast < const ColumnNested & > ( column ) ; <nl> + <nl> + NamesAndTypesList : : const_iterator it = type_nested - > getNestedTypesList ( ) - > begin ( ) ; <nl> + for ( size_t i = 0 ; i < column_nested . getData ( ) . size ( ) ; + + i , + + it ) <nl> + { <nl> + writeData ( <nl> + name + " . " + it - > first , <nl> + * it - > second , <nl> + * column_nested . getData ( ) [ i ] , <nl> + level + 1 ) ; <nl> + } <nl> + } <nl> else <nl> type . serializeBinary ( column , streams [ name ] - > compressed ) ; <nl> } <nl> void StorageTinyLog : : addFile ( const String & column_name , const IDataType & type , <nl> <nl> addFile ( column_name , * type_arr - > getNestedType ( ) , level + 1 ) ; <nl> } <nl> + else if ( const DataTypeNested * type_nested = dynamic_cast < const DataTypeNested * > ( & type ) ) <nl> + { <nl> + String size_column_suffix = ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString ( level ) ; <nl> + <nl> + ColumnData column_data ; <nl> + files . insert ( std : : make_pair ( column_name + size_column_suffix , column_data ) ) ; <nl> + files [ column_name + size_column_suffix ] . data_file = Poco : : File ( <nl> + path + escapeForFileName ( name ) + ' / ' + escapeForFileName ( column_name ) + size_column_suffix + DBMS_STORAGE_LOG_DATA_FILE_EXTENSION ) ; <nl> + <nl> + const NamesAndTypesList & columns = * type_nested - > getNestedTypesList ( ) ; <nl> + for ( NamesAndTypesList : : const_iterator it = columns . begin ( ) ; it ! = columns . end ( ) ; + + it ) <nl> + addFile ( column_name + " . " + it - > first , * it - > second , level + 1 ) ; <nl> + } <nl> else <nl> { <nl> ColumnData column_data ; <nl> | propagated [ de ] serialization to storages and ( in | out ) put streams [ # CONV - 7967 ] | ClickHouse/ClickHouse | 090aae8e7b0a52609f4d173b8441c037fef370b3 | 2013-07-12T13:35:05Z |
mmm a / buildscripts / resmokelib / logging / buildlogger . py <nl> ppp b / buildscripts / resmokelib / logging / buildlogger . py <nl> <nl> from __future__ import absolute_import <nl> <nl> import functools <nl> + import urllib2 <nl> <nl> from . import handlers <nl> from . import loggers <nl> <nl> CREATE_TEST_ENDPOINT = " / build / % ( build_id ) s / test " <nl> APPEND_TEST_LOGS_ENDPOINT = " / build / % ( build_id ) s / test / % ( test_id ) s " <nl> <nl> + _BUILDLOGGER_REALM = " buildlogs " <nl> _BUILDLOGGER_CONFIG = " mci . buildlogger " <nl> <nl> _SEND_AFTER_LINES = 2000 <nl> def _log_on_error ( func ) : <nl> def wrapper ( * args , * * kwargs ) : <nl> try : <nl> return func ( * args , * * kwargs ) <nl> + except urllib2 . HTTPError as err : <nl> + sb = [ ] # String builder . <nl> + sb . append ( " HTTP Error % s : % s " % ( err . code , err . msg ) ) <nl> + sb . append ( " POST % s " % ( err . filename ) ) <nl> + <nl> + for name in err . hdrs : <nl> + value = err . hdrs [ name ] <nl> + sb . append ( " % s : % s " % ( name , value ) ) <nl> + <nl> + # Try to read the response back from the server . <nl> + if hasattr ( err , " read " ) : <nl> + sb . append ( err . read ( ) ) <nl> + <nl> + loggers . _BUILDLOGGER_FALLBACK . exception ( " \ n " . join ( sb ) ) <nl> except : <nl> loggers . _BUILDLOGGER_FALLBACK . exception ( " Encountered an error . " ) <nl> return None <nl> def new_build_id ( config ) : <nl> build_num = int ( config [ " build_num " ] ) <nl> <nl> handler = handlers . HTTPHandler ( <nl> + realm = _BUILDLOGGER_REALM , <nl> url_root = _config . BUILDLOGGER_URL , <nl> username = username , <nl> password = password ) <nl> def new_test_id ( build_id , build_config , test_filename , test_command ) : <nl> return None <nl> <nl> handler = handlers . HTTPHandler ( <nl> + realm = _BUILDLOGGER_REALM , <nl> url_root = _config . BUILDLOGGER_URL , <nl> username = build_config [ " username " ] , <nl> password = build_config [ " password " ] ) <nl> def __init__ ( self , <nl> username = build_config [ " username " ] <nl> password = build_config [ " password " ] <nl> <nl> - self . http_handler = handlers . HTTPHandler ( _config . BUILDLOGGER_URL , <nl> + self . http_handler = handlers . HTTPHandler ( _BUILDLOGGER_REALM , <nl> + _config . BUILDLOGGER_URL , <nl> username , <nl> password ) <nl> <nl> mmm a / buildscripts / resmokelib / logging / handlers . py <nl> ppp b / buildscripts / resmokelib / logging / handlers . py <nl> <nl> import json <nl> import logging <nl> import threading <nl> - <nl> - import requests <nl> - import requests . auth <nl> + import urllib2 <nl> <nl> from . . import utils <nl> from . . utils import timer <nl> <nl> _TIMEOUT_SECS = 10 <nl> <nl> - <nl> class BufferedHandler ( logging . Handler ) : <nl> " " " <nl> A handler class that buffers logging records in memory . Whenever <nl> class HTTPHandler ( object ) : <nl> A class which sends data to a web server using POST requests . <nl> " " " <nl> <nl> - def __init__ ( self , url_root , username , password ) : <nl> + def __init__ ( self , realm , url_root , username , password ) : <nl> " " " <nl> - Initializes the handler with the necessary authentication <nl> + Initializes the handler with the necessary authenticaton <nl> credentials . <nl> " " " <nl> <nl> - self . auth_handler = requests . auth . HTTPBasicAuth ( username , password ) <nl> + auth_handler = urllib2 . HTTPBasicAuthHandler ( ) <nl> + auth_handler . add_password ( <nl> + realm = realm , <nl> + uri = url_root , <nl> + user = username , <nl> + passwd = password ) <nl> <nl> self . url_root = url_root <nl> + self . url_opener = urllib2 . build_opener ( auth_handler , urllib2 . HTTPErrorProcessor ( ) ) <nl> <nl> def _make_url ( self , endpoint ) : <nl> return " % s / % s / " % ( self . url_root . rstrip ( " / " ) , endpoint . strip ( " / " ) ) <nl> def post ( self , endpoint , data = None , headers = None , timeout_secs = _TIMEOUT_SECS ) : <nl> headers [ " Content - Type " ] = " application / json ; charset = utf - 8 " <nl> <nl> url = self . _make_url ( endpoint ) <nl> + request = urllib2 . Request ( url = url , data = data , headers = headers ) <nl> <nl> - response = requests . post ( url , data = data , headers = headers , timeout = timeout_secs , <nl> - auth = self . auth_handler ) <nl> - <nl> - response . raise_for_status ( ) <nl> - <nl> - if not response . encoding : <nl> - response . encoding = " utf - 8 " <nl> - <nl> - headers = response . headers <nl> + response = self . url_opener . open ( request , timeout = timeout_secs ) <nl> + headers = response . info ( ) <nl> <nl> - if headers [ " Content - Type " ] . startswith ( " application / json " ) : <nl> - return response . json ( ) <nl> + content_type = headers . gettype ( ) <nl> + if content_type = = " application / json " : <nl> + encoding = headers . getparam ( " charset " ) or " utf - 8 " <nl> + return json . load ( response , encoding = encoding ) <nl> <nl> - return response . text <nl> + return response . read ( ) <nl> mmm a / buildscripts / resmokelib / requirements . txt <nl> ppp b / buildscripts / resmokelib / requirements . txt <nl> <nl> pymongo > = 3 . 0 <nl> pypiwin32 = = 219 ; sys_platform = = " win32 " <nl> PyYAML = = 3 . 11 <nl> - requests > = 2 . 0 . 0 <nl> subprocess32 > = 3 . 2 . 7 ; os_name = = " posix " and python_version < " 3 " <nl> | Revert " SERVER - 27627 use requests instead of urllib2 in resmoke . py " | mongodb/mongo | 973b8b9da39db84073e98d4979ec3a8d6179b217 | 2017-02-24T16:48:46Z |
mmm a / src / python / src / _framework / base / packets / _ingestion . py <nl> ppp b / src / python / src / _framework / base / packets / _ingestion . py <nl> def moar ( self , payload , complete ) : <nl> payload : A customer - significant payload object . May be None only if <nl> complete is True . <nl> complete : Whether or not the end of the payload sequence has been reached . <nl> - May be False only if payload is not None . <nl> + Must be True if payload is None . <nl> <nl> Returns : <nl> True if the wrapped consumer made progress or False if the wrapped <nl> def moar ( self , payload , complete ) : <nl> progress . <nl> " " " <nl> try : <nl> - if payload : <nl> - if complete : <nl> - self . _consumer . consume_and_terminate ( payload ) <nl> - else : <nl> - self . _consumer . consume ( payload ) <nl> - else : <nl> + if payload is None : <nl> self . _consumer . terminate ( ) <nl> + elif complete : <nl> + self . _consumer . consume_and_terminate ( payload ) <nl> + else : <nl> + self . _consumer . consume ( payload ) <nl> return True <nl> except abandonment . Abandoned : <nl> return False <nl> | Simplify _WrappedConsumer . moar . | grpc/grpc | 6edb45189ea7165cc06561d5aab56dbb743b0505 | 2015-02-15T01:06:31Z |
mmm a / libs / libcommon / src / phdr_cache . cpp <nl> ppp b / libs / libcommon / src / phdr_cache . cpp <nl> <nl> # include < assert . h > <nl> # include < vector > <nl> # include < cstddef > <nl> - # include < exception > <nl> + # include < stdexcept > <nl> <nl> namespace <nl> { <nl> | Addition to prev . revision | ClickHouse/ClickHouse | cc7fe5fb8d9dc840baf493b2875f59b1113c2309 | 2019-07-24T15:27:37Z |
mmm a / INSTALL . md <nl> ppp b / INSTALL . md <nl> Prerequisites : Linux <nl> <nl> Building is currently supported only on recent Debian / Ubuntu releases . All <nl> binaries are produced in a self - contained chroot environment for the target <nl> - distribution , so you will need to setup it up first by running <nl> + distribution , so you will need to setup it up first by running <nl> ` ` ` scripts / build . py ` ` ` . The following targets are currently supported : <nl> <nl> Target | Command for Setup <nl> CentOS 6 | ` ` ` sudo scripts / build . py setup - schroot - centos6 ` ` ` <nl> Ubuntu Precise | ` ` ` sudo scripts / build . py setup - schroot - precise ` ` ` <nl> MinGW - w64 | ` ` ` sudo scripts / build . py setup - mingw - w64 ` ` ` <nl> <nl> - The MinGW - w64 toolchain can cross - compile 32 / 64 - bit Windows binaries from <nl> + Please note that you should run the above commands while logged in as a <nl> + regular user who has ` ` ` sudo ` ` ` access . * * Do not attempt to clone the <nl> + repository or run any other command as root ! * * <nl> + <nl> + The MinGW - w64 toolchain can cross - compile 32 / 64 - bit Windows binaries from <nl> Linux - - it is useful for targetting Windows XP / Windows 2003 , which are not <nl> supported by default when compiling with MSVC 2013 . <nl> <nl> Building <nl> <nl> Switch to the checked - out folder and run the command ` ` ` scripts / build . py ` ` ` <nl> ( or ` ` ` scripts \ build . py ` ` ` if you are on Windows ) . This will present all <nl> - the targets which you can build . Select the appropriate target and the <nl> + the targets which you can build . Select the appropriate target and the <nl> output package will be generated in the ` ` ` static - build ` ` ` folder . <nl> | warn about running as root when performing the environment setup | wkhtmltopdf/wkhtmltopdf | 72c285066039bde8d8b8edefdae0f9dd9ddbe279 | 2014-05-02T10:19:19Z |
mmm a / include / internal / catch_enforce . h <nl> ppp b / include / internal / catch_enforce . h <nl> <nl> # include " catch_common . h " <nl> <nl> # include < sstream > <nl> - # include < exception > <nl> + # include < stdexcept > <nl> <nl> # define CATCH_PREPARE_EXCEPTION ( type , msg ) \ <nl> type ( static_cast < std : : ostringstream & & > ( std : : ostringstream ( ) < < msg ) . str ( ) ) <nl> | Include proper exception header in enforce . h | catchorg/Catch2 | 3882ac1a1936a31abdc110bdaea9c64aa27daae2 | 2017-08-01T22:26:52Z |
mmm a / src / citra_qt / main . cpp <nl> ppp b / src / citra_qt / main . cpp <nl> GMainWindow : : GMainWindow ( ) : emu_thread ( nullptr ) <nl> <nl> show ( ) ; <nl> <nl> - game_list - > PopulateAsync ( settings . value ( " gameListRootDir " ) . toString ( ) , settings . value ( " gameListDeepScan " ) . toBool ( ) ) ; <nl> + game_list - > PopulateAsync ( settings . value ( " gameListRootDir " , " " ) . toString ( ) , settings . value ( " gameListDeepScan " , false ) . toBool ( ) ) ; <nl> <nl> QStringList args = QApplication : : arguments ( ) ; <nl> if ( args . length ( ) > = 2 ) { <nl> | Gamelist : supply default settings for QSettings config | yuzu-emu/yuzu | e516a5bc96015c54c425523c4a1f1b89e5a421d5 | 2016-01-01T21:54:06Z |
mmm a / src / init . cpp <nl> ppp b / src / init . cpp <nl> void CleanupBlockRevFiles ( ) <nl> LogPrintf ( " Removing unusable blk ? ? ? ? ? . dat and rev ? ? ? ? ? . dat files for - reindex with - prune \ n " ) ; <nl> fs : : path blocksdir = GetDataDir ( ) / " blocks " ; <nl> for ( fs : : directory_iterator it ( blocksdir ) ; it ! = fs : : directory_iterator ( ) ; it + + ) { <nl> - if ( is_regular_file ( * it ) & & <nl> + if ( fs : : is_regular_file ( * it ) & & <nl> it - > path ( ) . filename ( ) . string ( ) . length ( ) = = 12 & & <nl> it - > path ( ) . filename ( ) . string ( ) . substr ( 8 , 4 ) = = " . dat " ) <nl> { <nl> | Merge : [ trivial ] Make namespace explicit for is_regular_file | bitcoin/bitcoin | 50d72b357081ba377dac4c0d841ad55be3ae9fad | 2017-10-18T14:35:19Z |
mmm a / Code / Sandbox / EditorQt / Terrain / Heightmap . cpp <nl> ppp b / Code / Sandbox / EditorQt / Terrain / Heightmap . cpp <nl> void CHeightmap : : UpdateEngineTerrain ( int x1 , int y1 , int areaSize , int _height , <nl> const float areaRadius = originalInputAreaSize * nHeightMapUnitSize / 2 ; <nl> <nl> GetIEditorImpl ( ) - > GetGameEngine ( ) - > OnTerrainModified ( worldModPosition , areaRadius , ( originalInputAreaSize = = m_iWidth ) , updateFlags & ( ETerrainUpdateType : : Elevation | ETerrainUpdateType : : InfoBits ) ) ; <nl> + GetIEditorImpl ( ) - > GetTerrainManager ( ) - > SetModified ( ) ; <nl> } <nl> <nl> void CHeightmap : : Serialize ( CXmlArchive & xmlAr ) <nl> | ! I integrate from / / ce / main . . . | CRYTEK/CRYENGINE | 12dd02632d9cd42fbf91bc226f43ae3f06b85f55 | 2018-12-13T12:02:06Z |
mmm a / benchmarks / operator_benchmark / README . md <nl> ppp b / benchmarks / operator_benchmark / README . md <nl> Filter and run an operator ( use add as an example ) : <nl> ` ` ` <nl> $ python - m benchmark_all_test - - operator add - - omp_num_threads 1 - - mkl_num_threads 1 <nl> ` ` ` <nl> + Note : this filter is based on the operator name rather than the file name . <nl> <nl> Run torch . add benchmark with tag ' long ' : <nl> ` ` ` <nl> | update op bench readme ( ) | pytorch/pytorch | b5a38fa98eeb5e3f8a381c6ca4dafda86ba21f82 | 2019-11-11T23:33:29Z |
mmm a / tensorflow / examples / adding_an_op / BUILD <nl> ppp b / tensorflow / examples / adding_an_op / BUILD <nl> licenses ( [ " notice " ] ) # Apache 2 . 0 <nl> load ( " / / tensorflow : tensorflow . bzl " , " tf_custom_op_library " ) <nl> load ( " / / tensorflow : tensorflow . bzl " , " tf_cuda_tests_tags " ) <nl> load ( " / / tensorflow : tensorflow . bzl " , " tf_cc_binary " ) <nl> + load ( " / / tensorflow : tensorflow . bzl " , " tf_exec_compatible_with " ) <nl> <nl> exports_files ( [ " LICENSE " ] ) <nl> <nl> py_test ( <nl> name = " cuda_op_test " , <nl> size = " small " , <nl> srcs = [ " cuda_op_test . py " ] , <nl> + exec_compatible_with = tf_exec_compatible_with ( { " tags " : tf_cuda_tests_tags ( ) } ) , <nl> srcs_version = " PY2AND3 " , <nl> tags = tf_cuda_tests_tags ( ) + [ " notap " ] , <nl> deps = [ <nl> | Adapt exec_compatible_with for remote testing in add_an_op test . | tensorflow/tensorflow | 6dae7e3ebbfd83b9f1d318ba5f2f2baeae17b9f5 | 2019-01-14T15:45:18Z |
mmm a / dbms / include / DB / Storages / MergeTree / ReplicatedMergeTreeQueue . h <nl> ppp b / dbms / include / DB / Storages / MergeTree / ReplicatedMergeTreeQueue . h <nl> class ReplicatedMergeTreeQueue <nl> <nl> using Queue = std : : list < LogEntryPtr > ; <nl> <nl> + struct ByTime <nl> + { <nl> + bool operator ( ) ( const LogEntryPtr & lhs , const LogEntryPtr & rhs ) const <nl> + { <nl> + return std : : forward_as_tuple ( lhs . get ( ) - > create_time , lhs . get ( ) ) <nl> + < std : : forward_as_tuple ( rhs . get ( ) - > create_time , rhs . get ( ) ) ; <nl> + } <nl> + } ; <nl> + <nl> + / / / Для вычисления min_unprocessed_insert_time , max_processed_insert_time , по которым вычисляется отставание реплик . <nl> + using InsertsByTime = std : : set < LogEntryPtr , ByTime > ; <nl> + <nl> + <nl> String zookeeper_path ; <nl> String replica_path ; <nl> String logger_name ; <nl> class ReplicatedMergeTreeQueue <nl> * / <nl> Queue queue ; <nl> <nl> + InsertsByTime inserts_by_time ; <nl> + time_t min_unprocessed_insert_time = 0 ; <nl> + time_t max_processed_insert_time = 0 ; <nl> + <nl> time_t last_queue_update = 0 ; <nl> <nl> / / / Куски , которые появятся в результате действий , выполняемых прямо сейчас фоновыми потоками ( этих действий нет в очереди ) . <nl> class ReplicatedMergeTreeQueue <nl> * / <nl> bool shouldExecuteLogEntry ( const LogEntry & entry , String & out_postpone_reason , MergeTreeDataMerger & merger ) ; <nl> <nl> + / / / После удаления элемента очереди , обновить времена insert - ов в оперативке . Выполняется под queue_mutex . <nl> + / / / Возвращает информацию , какие времена изменились - эту информацию можно передать в updateTimesInZooKeeper . <nl> + void updateTimesOnRemoval ( const LogEntryPtr & entry , bool & min_unprocessed_insert_time_changed , bool & max_processed_insert_time_changed ) ; <nl> + <nl> + / / / Обновить времена insert - ов в ZooKeeper . <nl> + void updateTimesInZooKeeper ( zkutil : : ZooKeeperPtr zookeeper , bool min_unprocessed_insert_time_changed , bool max_processed_insert_time_changed ) ; <nl> + <nl> public : <nl> ReplicatedMergeTreeQueue ( ) { } <nl> <nl> void initialize ( const String & zookeeper_path_ , const String & replica_path_ , const String & logger_name_ , <nl> const MergeTreeData : : DataParts & parts , zkutil : : ZooKeeperPtr zookeeper ) ; <nl> <nl> - / * * Вставить действие в конец очереди . * / <nl> - void insert ( LogEntryPtr & entry ) ; <nl> + / * * Вставить действие в конец очереди . <nl> + * Для восстановления битых кусков во время работы . <nl> + * Не вставляет само действие в ZK ( сделайте это самостоятельно ) . <nl> + * / <nl> + void insert ( zkutil : : ZooKeeperPtr zookeeper , LogEntryPtr & entry ) ; <nl> <nl> - / * * Удалить действие с указанным куском ( в качестве new_part_name ) из очереди . * / <nl> + / * * Удалить действие с указанным куском ( в качестве new_part_name ) из очереди . <nl> + * Вызывается для невыполнимых действий в очереди - старых потерянных кусков . <nl> + * / <nl> bool remove ( zkutil : : ZooKeeperPtr zookeeper , const String & part_name ) ; <nl> <nl> / * * Скопировать новые записи из общего лога в очередь этой реплики . Установить log_pointer в соответствующее значение . <nl> class ReplicatedMergeTreeQueue <nl> / / / Получить данные элементов очереди . <nl> using LogEntriesData = std : : vector < ReplicatedMergeTreeLogEntryData > ; <nl> void getEntries ( LogEntriesData & res ) ; <nl> + <nl> + / / / Получить информацию о временах insert - ов . <nl> + void getInsertTimes ( time_t & out_min_unprocessed_insert_time , time_t & out_max_processed_insert_time ) const ; <nl> } ; <nl> <nl> <nl> mmm a / dbms / include / DB / Storages / MergeTree / ReplicatedMergeTreeRestartingThread . h <nl> ppp b / dbms / include / DB / Storages / MergeTree / ReplicatedMergeTreeRestartingThread . h <nl> class ReplicatedMergeTreeRestartingThread <nl> wakeup ( ) ; <nl> } <nl> <nl> - void getReplicaDelays ( time_t & out_absolute_delay , time_t & out_relative_delay ) const <nl> - { <nl> - out_absolute_delay = absolute_delay . load ( std : : memory_order_relaxed ) ; <nl> - out_relative_delay = relative_delay . load ( std : : memory_order_relaxed ) ; <nl> - } <nl> - <nl> private : <nl> StorageReplicatedMergeTree & storage ; <nl> Logger * log ; <nl> class ReplicatedMergeTreeRestartingThread <nl> <nl> std : : thread thread ; <nl> <nl> - / / / Отставание реплики . <nl> - std : : atomic < time_t > absolute_delay { } ; <nl> - std : : atomic < time_t > relative_delay { } ; <nl> - <nl> - <nl> void run ( ) ; <nl> <nl> / / / Запустить или остановить фоновые потоки . Используется для частичной переинициализации при пересоздании сессии в ZooKeeper . <nl> class ReplicatedMergeTreeRestartingThread <nl> <nl> / / / Запретить запись в таблицу и завершить все фоновые потоки . <nl> void goReadOnlyPermanently ( ) ; <nl> - <nl> - / / / Получить информацию об отставании реплик . <nl> - void checkReplicationDelays ( time_t & out_absolute_delay , time_t & out_relative_delay ) ; <nl> } ; <nl> <nl> <nl> mmm a / dbms / include / DB / Storages / StorageReplicatedMergeTree . h <nl> ppp b / dbms / include / DB / Storages / StorageReplicatedMergeTree . h <nl> class StorageReplicatedMergeTree : public IStorage <nl> using LogEntriesData = std : : vector < ReplicatedMergeTreeLogEntryData > ; <nl> void getQueue ( LogEntriesData & res , String & replica_name ) ; <nl> <nl> - void getReplicaDelays ( time_t & out_absolute_delay , time_t & out_relative_delay ) const ; <nl> + void getReplicaDelays ( time_t & out_absolute_delay , time_t & out_relative_delay ) ; <nl> <nl> private : <nl> void dropUnreplicatedPartition ( const Field & partition , bool detach , const Settings & settings ) ; <nl> mmm a / dbms / src / Server / Server . cpp <nl> ppp b / dbms / src / Server / Server . cpp <nl> class ReplicasStatusHandler : public Poco : : Net : : HTTPRequestHandler <nl> { <nl> try <nl> { <nl> + HTMLForm params ( request ) ; <nl> + <nl> + / / / Даже в случае , когда отставание небольшое , выводить подробную информацию об отставании . <nl> + bool verbose = params . get ( " verbose " , " " ) = = " 1 " ; <nl> + <nl> / / / Собираем набор реплицируемых таблиц . <nl> Databases replicated_tables ; <nl> { <nl> class ReplicasStatusHandler : public Poco : : Net : : HTTPRequestHandler <nl> <nl> const MergeTreeSettings & settings = context . getMergeTreeSettings ( ) ; <nl> <nl> - bool ok = / * true * / false ; <nl> + bool ok = true ; <nl> std : : stringstream message ; <nl> <nl> for ( const auto & db : replicated_tables ) <nl> { <nl> - for ( const auto & table : db . second ) <nl> + for ( auto & table : db . second ) <nl> { <nl> time_t absolute_delay = 0 ; <nl> time_t relative_delay = 0 ; <nl> <nl> - static_cast < const StorageReplicatedMergeTree & > ( * table . second ) . getReplicaDelays ( absolute_delay , relative_delay ) ; <nl> + static_cast < StorageReplicatedMergeTree & > ( * table . second ) . getReplicaDelays ( absolute_delay , relative_delay ) ; <nl> <nl> if ( ( settings . min_absolute_delay_to_close & & absolute_delay > = static_cast < time_t > ( settings . min_absolute_delay_to_close ) ) <nl> | | ( settings . min_relative_delay_to_close & & relative_delay > = static_cast < time_t > ( settings . min_relative_delay_to_close ) ) ) <nl> ok = false ; <nl> <nl> message < < backQuoteIfNeed ( db . first ) < < " . " < < backQuoteIfNeed ( table . first ) <nl> - < < " \ tAbsolute delay : " < < absolute_delay < < " . Relative delay : " < < relative_delay < < " . \ n " ; <nl> + < < " : \ tAbsolute delay : " < < absolute_delay < < " . Relative delay : " < < relative_delay < < " . \ n " ; <nl> } <nl> } <nl> <nl> - if ( ok ) <nl> + if ( ok & & ! verbose ) <nl> { <nl> const char * data = " Ok . \ n " ; <nl> response . sendBuffer ( data , strlen ( data ) ) ; <nl> class ReplicasStatusHandler : public Poco : : Net : : HTTPRequestHandler <nl> } <nl> catch ( . . . ) <nl> { <nl> - / / / TODO Отправлять клиенту . <nl> tryLogCurrentException ( " ReplicasStatusHandler " ) ; <nl> + <nl> + try <nl> + { <nl> + response . setStatusAndReason ( Poco : : Net : : HTTPResponse : : HTTP_INTERNAL_SERVER_ERROR ) ; <nl> + <nl> + if ( ! response . sent ( ) ) <nl> + { <nl> + / / / Ещё ничего не отправляли , и даже не знаем , нужно ли сжимать ответ . <nl> + response . send ( ) < < getCurrentExceptionMessage ( false ) < < std : : endl ; <nl> + } <nl> + } <nl> + catch ( . . . ) <nl> + { <nl> + LOG_ERROR ( ( & Logger : : get ( " ReplicasStatusHandler " ) ) , " Cannot send exception to client " ) ; <nl> + } <nl> } <nl> } <nl> } ; <nl> class HTTPRequestHandlerFactory : public Poco : : Net : : HTTPRequestHandlerFactory <nl> { <nl> if ( uri = = " / " | | uri = = " / ping " ) <nl> return new PingRequestHandler ; <nl> - else if ( uri = = " / replicas_status " ) <nl> + else if ( 0 = = uri . compare ( 0 , strlen ( " / replicas_status " ) , " / replicas_status " ) ) <nl> return new ReplicasStatusHandler ( * server . global_context ) ; <nl> else <nl> return new NotFoundHandler ; <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeQueue . cpp <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeQueue . cpp <nl> void ReplicatedMergeTreeQueue : : load ( zkutil : : ZooKeeperPtr zookeeper ) <nl> insertUnlocked ( entry ) ; <nl> } <nl> <nl> + updateTimesInZooKeeper ( zookeeper , true , false ) ; <nl> + <nl> LOG_TRACE ( log , " Loaded queue " ) ; <nl> } <nl> <nl> void ReplicatedMergeTreeQueue : : insertUnlocked ( LogEntryPtr & entry ) <nl> { <nl> virtual_parts . add ( entry - > new_part_name ) ; <nl> queue . push_back ( entry ) ; <nl> + <nl> + if ( entry - > type = = LogEntry : : GET_PART ) <nl> + { <nl> + inserts_by_time . insert ( entry ) ; <nl> + <nl> + if ( entry - > create_time & & ( ! min_unprocessed_insert_time | | entry - > create_time < min_unprocessed_insert_time ) ) <nl> + min_unprocessed_insert_time = entry - > create_time ; <nl> + } <nl> } <nl> <nl> <nl> - void ReplicatedMergeTreeQueue : : insert ( LogEntryPtr & entry ) <nl> + void ReplicatedMergeTreeQueue : : insert ( zkutil : : ZooKeeperPtr zookeeper , LogEntryPtr & entry ) <nl> { <nl> - std : : lock_guard < std : : mutex > lock ( mutex ) ; <nl> - insertUnlocked ( entry ) ; <nl> + time_t prev_min_unprocessed_insert_time ; <nl> + <nl> + { <nl> + std : : lock_guard < std : : mutex > lock ( mutex ) ; <nl> + prev_min_unprocessed_insert_time = min_unprocessed_insert_time ; <nl> + insertUnlocked ( entry ) ; <nl> + } <nl> + <nl> + if ( min_unprocessed_insert_time ! = prev_min_unprocessed_insert_time ) <nl> + updateTimesInZooKeeper ( zookeeper , true , false ) ; <nl> + } <nl> + <nl> + <nl> + void ReplicatedMergeTreeQueue : : updateTimesOnRemoval ( <nl> + const LogEntryPtr & entry , <nl> + bool & min_unprocessed_insert_time_changed , <nl> + bool & max_processed_insert_time_changed ) <nl> + { <nl> + if ( entry - > type ! = LogEntry : : GET_PART ) <nl> + return ; <nl> + <nl> + inserts_by_time . erase ( entry ) ; <nl> + <nl> + if ( inserts_by_time . empty ( ) ) <nl> + { <nl> + min_unprocessed_insert_time = 0 ; <nl> + min_unprocessed_insert_time_changed = true ; <nl> + } <nl> + else if ( ( * inserts_by_time . begin ( ) ) - > create_time > min_unprocessed_insert_time ) <nl> + { <nl> + min_unprocessed_insert_time = ( * inserts_by_time . begin ( ) ) - > create_time ; <nl> + min_unprocessed_insert_time_changed = true ; <nl> + } <nl> + <nl> + if ( entry - > create_time > max_processed_insert_time ) <nl> + { <nl> + max_processed_insert_time = entry - > create_time ; <nl> + max_processed_insert_time_changed = true ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void ReplicatedMergeTreeQueue : : updateTimesInZooKeeper ( <nl> + zkutil : : ZooKeeperPtr zookeeper , <nl> + bool min_unprocessed_insert_time_changed , <nl> + bool max_processed_insert_time_changed ) <nl> + { <nl> + / / / Здесь может быть race condition ( при одновременном выполнении разных remove ) . <nl> + / / / Считаем его несущественным ( в течение небольшого времени , в ZK будет записано немного отличающееся значение времени ) . <nl> + / / / Также читаем значения переменных min_unprocessed_insert_time , max_processed_insert_time без синхронизации . <nl> + zkutil : : Ops ops ; <nl> + <nl> + if ( min_unprocessed_insert_time_changed ) <nl> + ops . push_back ( new zkutil : : Op : : SetData ( <nl> + replica_path + " / min_unprocessed_insert_time " , toString ( min_unprocessed_insert_time ) , - 1 ) ) ; <nl> + <nl> + if ( max_processed_insert_time_changed ) <nl> + ops . push_back ( new zkutil : : Op : : SetData ( <nl> + replica_path + " / max_processed_insert_time " , toString ( max_processed_insert_time ) , - 1 ) ) ; <nl> + <nl> + if ( ! ops . empty ( ) ) <nl> + { <nl> + auto code = zookeeper - > tryMulti ( ops ) ; <nl> + <nl> + if ( code ! = ZOK ) <nl> + LOG_ERROR ( log , " Couldn ' t set value of nodes for insert times ( " <nl> + < < replica_path < < " / min_unprocessed_insert_time , max_processed_insert_time ) " < < " : " <nl> + < < zkutil : : ZooKeeper : : error2string ( code ) + " . This shouldn ' t happen often . " ) ; <nl> + } <nl> } <nl> <nl> <nl> void ReplicatedMergeTreeQueue : : remove ( zkutil : : ZooKeeperPtr zookeeper , LogEntryPt <nl> auto code = zookeeper - > tryRemove ( replica_path + " / queue / " + entry - > znode_name ) ; <nl> <nl> if ( code ! = ZOK ) <nl> - LOG_ERROR ( log , " Couldn ' t remove " < < replica_path + " / queue / " + entry - > znode_name < < " : " <nl> - < < zkutil : : ZooKeeper : : error2string ( code ) + " . This shouldn ' t happen often . " ) ; <nl> + LOG_ERROR ( log , " Couldn ' t remove " < < replica_path < < " / queue / " < < entry - > znode_name < < " : " <nl> + < < zkutil : : ZooKeeper : : error2string ( code ) < < " . This shouldn ' t happen often . " ) ; <nl> <nl> - std : : lock_guard < std : : mutex > lock ( mutex ) ; <nl> + bool min_unprocessed_insert_time_changed = false ; <nl> + bool max_processed_insert_time_changed = false ; <nl> <nl> - / / / Удалим задание из очереди в оперативке . <nl> - / / / Нельзя просто обратиться по заранее сохраненному итератору , потому что задание мог успеть удалить кто - то другой . <nl> - / / / Почему просматриваем очередь с конца ? <nl> - / / / - потому что задание к выполнению сначала перемещается в конец очереди , чтобы в случае неуспеха оно осталось в конце . <nl> - for ( Queue : : iterator it = queue . end ( ) ; it ! = queue . begin ( ) ; ) <nl> { <nl> - - - it ; <nl> - if ( * it = = entry ) <nl> + std : : lock_guard < std : : mutex > lock ( mutex ) ; <nl> + <nl> + / / / Удалим задание из очереди в оперативке . <nl> + / / / Нельзя просто обратиться по заранее сохраненному итератору , потому что задание мог успеть удалить кто - то другой . <nl> + / / / Почему просматриваем очередь с конца ? <nl> + / / / - потому что задание к выполнению сначала перемещается в конец очереди , чтобы в случае неуспеха оно осталось в конце . <nl> + for ( Queue : : iterator it = queue . end ( ) ; it ! = queue . begin ( ) ; ) <nl> { <nl> - queue . erase ( it ) ; <nl> - break ; <nl> + - - it ; <nl> + if ( * it = = entry ) <nl> + { <nl> + queue . erase ( it ) ; <nl> + break ; <nl> + } <nl> } <nl> + <nl> + updateTimesOnRemoval ( entry , min_unprocessed_insert_time_changed , max_processed_insert_time_changed ) ; <nl> } <nl> + <nl> + updateTimesInZooKeeper ( zookeeper , min_unprocessed_insert_time_changed , max_processed_insert_time_changed ) ; <nl> } <nl> <nl> <nl> bool ReplicatedMergeTreeQueue : : remove ( zkutil : : ZooKeeperPtr zookeeper , const String & part_name ) <nl> { <nl> - std : : lock_guard < std : : mutex > lock ( mutex ) ; <nl> + LogEntryPtr found ; <nl> + <nl> + bool min_unprocessed_insert_time_changed = false ; <nl> + bool max_processed_insert_time_changed = false ; <nl> <nl> - for ( Queue : : iterator it = queue . begin ( ) ; it ! = queue . end ( ) ; ) <nl> { <nl> - if ( ( * it ) - > new_part_name = = part_name ) <nl> + std : : lock_guard < std : : mutex > lock ( mutex ) ; <nl> + <nl> + for ( Queue : : iterator it = queue . begin ( ) ; it ! = queue . end ( ) ; ) <nl> { <nl> - zookeeper - > tryRemove ( replica_path + " / queue / " + ( * it ) - > znode_name ) ; / / / NOTE Может быть , стоит избежать блокировки в это время . <nl> - queue . erase ( it + + ) ; <nl> - return true ; <nl> + if ( ( * it ) - > new_part_name = = part_name ) <nl> + { <nl> + found = * it ; <nl> + queue . erase ( it + + ) ; <nl> + updateTimesOnRemoval ( found , min_unprocessed_insert_time_changed , max_processed_insert_time_changed ) ; <nl> + break ; <nl> + } <nl> + else <nl> + + + it ; <nl> } <nl> - else <nl> - + + it ; <nl> } <nl> <nl> - return false ; <nl> + if ( ! found ) <nl> + return false ; <nl> + <nl> + zookeeper - > tryRemove ( replica_path + " / queue / " + found - > znode_name ) ; <nl> + updateTimesInZooKeeper ( zookeeper , min_unprocessed_insert_time_changed , max_processed_insert_time_changed ) ; <nl> + <nl> + return true ; <nl> } <nl> <nl> <nl> bool ReplicatedMergeTreeQueue : : pullLogsToQueue ( zkutil : : ZooKeeperPtr zookeeper , z <nl> std : : vector < LogEntryPtr > copied_entries ; <nl> copied_entries . reserve ( log_entries . size ( ) ) ; <nl> <nl> + bool min_unprocessed_insert_time_changed = false ; <nl> + <nl> for ( auto & future : futures ) <nl> { <nl> zkutil : : ZooKeeper : : ValueAndStat res = future . second . get ( ) ; <nl> bool ReplicatedMergeTreeQueue : : pullLogsToQueue ( zkutil : : ZooKeeperPtr zookeeper , z <nl> <nl> ops . push_back ( new zkutil : : Op : : Create ( <nl> replica_path + " / queue / queue - " , res . value , zookeeper - > getDefaultACL ( ) , zkutil : : CreateMode : : PersistentSequential ) ) ; <nl> + <nl> + const auto & entry = * copied_entries . back ( ) ; <nl> + if ( entry . type = = LogEntry : : GET_PART ) <nl> + { <nl> + if ( entry . create_time & & ( ! min_unprocessed_insert_time | | entry . create_time < min_unprocessed_insert_time ) ) <nl> + { <nl> + min_unprocessed_insert_time = entry . create_time ; <nl> + min_unprocessed_insert_time_changed = true ; <nl> + } <nl> + } <nl> } <nl> <nl> ops . push_back ( new zkutil : : Op : : SetData ( <nl> replica_path + " / log_pointer " , toString ( last_entry_index + 1 ) , - 1 ) ) ; <nl> <nl> + if ( min_unprocessed_insert_time_changed ) <nl> + ops . push_back ( new zkutil : : Op : : SetData ( <nl> + replica_path + " / min_unprocessed_insert_time " , toString ( min_unprocessed_insert_time ) , - 1 ) ) ; <nl> + <nl> auto results = zookeeper - > multi ( ops ) ; <nl> <nl> / / / Сейчас мы успешно обновили очередь в ZooKeeper . Обновим её в оперативке . <nl> void ReplicatedMergeTreeQueue : : removeGetsAndMergesInRange ( zkutil : : ZooKeeperPtr z <nl> { <nl> Queue to_wait ; <nl> size_t removed_entries = 0 ; <nl> + bool min_unprocessed_insert_time_changed = false ; <nl> + bool max_processed_insert_time_changed = false ; <nl> <nl> / / / Удалим из очереди операции с кусками , содержащимися в удаляемом диапазоне . <nl> std : : unique_lock < std : : mutex > lock ( mutex ) ; <nl> void ReplicatedMergeTreeQueue : : removeGetsAndMergesInRange ( zkutil : : ZooKeeperPtr z <nl> if ( code ! = ZOK ) <nl> LOG_INFO ( log , " Couldn ' t remove " < < replica_path + " / queue / " + ( * it ) - > znode_name < < " : " <nl> < < zkutil : : ZooKeeper : : error2string ( code ) ) ; <nl> + <nl> + updateTimesOnRemoval ( * it , min_unprocessed_insert_time_changed , max_processed_insert_time_changed ) ; <nl> queue . erase ( it + + ) ; <nl> + + removed_entries ; <nl> } <nl> void ReplicatedMergeTreeQueue : : removeGetsAndMergesInRange ( zkutil : : ZooKeeperPtr z <nl> + + it ; <nl> } <nl> <nl> + updateTimesInZooKeeper ( zookeeper , min_unprocessed_insert_time_changed , max_processed_insert_time_changed ) ; <nl> + <nl> LOG_DEBUG ( log , " Removed " < < removed_entries < < " entries from queue . " <nl> " Waiting for " < < to_wait . size ( ) < < " entries that are currently executing . " ) ; <nl> <nl> void ReplicatedMergeTreeQueue : : countMerges ( size_t & all_merges , size_t & big_mer <nl> } <nl> <nl> <nl> + void ReplicatedMergeTreeQueue : : getInsertTimes ( time_t & out_min_unprocessed_insert_time , time_t & out_max_processed_insert_time ) const <nl> + { <nl> + out_min_unprocessed_insert_time = min_unprocessed_insert_time ; <nl> + out_max_processed_insert_time = max_processed_insert_time ; <nl> + } <nl> + <nl> + <nl> String padIndex ( Int64 index ) <nl> { <nl> String index_str = toString ( index ) ; <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeRestartingThread . cpp <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeRestartingThread . cpp <nl> void ReplicatedMergeTreeRestartingThread : : run ( ) <nl> if ( current_time > = prev_time_of_check_delay + static_cast < time_t > ( storage . data . settings . check_delay_period ) ) <nl> { <nl> / / / Выясняем отставания реплик . <nl> - time_t new_absolute_delay = 0 ; <nl> - time_t new_relative_delay = 0 ; <nl> + time_t absolute_delay = 0 ; <nl> + time_t relative_delay = 0 ; <nl> <nl> - / / / TODO Ловить здесь исключение . <nl> - checkReplicationDelays ( new_absolute_delay , new_relative_delay ) ; <nl> - <nl> - absolute_delay . store ( new_absolute_delay , std : : memory_order_relaxed ) ; <nl> - relative_delay . store ( new_relative_delay , std : : memory_order_relaxed ) ; <nl> + bool error = false ; <nl> + try <nl> + { <nl> + storage . getReplicaDelays ( absolute_delay , relative_delay ) ; <nl> + LOG_TRACE ( log , " Absolute delay : " < < absolute_delay < < " . Relative delay : " < < relative_delay < < " . " ) ; <nl> + } <nl> + catch ( . . . ) <nl> + { <nl> + tryLogCurrentException ( " __PRETTY_FUNCTION__ " , " Cannot get replica delays " ) ; <nl> + error = true ; <nl> + } <nl> <nl> prev_time_of_check_delay = current_time ; <nl> <nl> / / / Уступаем лидерство , если относительное отставание больше порога . <nl> - if ( storage . is_leader_node & & new_relative_delay > static_cast < time_t > ( storage . data . settings . min_relative_delay_to_yield_leadership ) ) <nl> + if ( storage . is_leader_node <nl> + & & ( error | | relative_delay > static_cast < time_t > ( storage . data . settings . min_relative_delay_to_yield_leadership ) ) ) <nl> { <nl> - LOG_INFO ( log , " Relative replica delay ( " < < new_relative_delay < < " seconds ) is bigger than threshold ( " <nl> - < < storage . data . settings . min_relative_delay_to_yield_leadership < < " ) . Will yield leadership . " ) ; <nl> + if ( error ) <nl> + LOG_INFO ( log , " Will yield leadership . " ) ; <nl> + else <nl> + LOG_INFO ( log , " Relative replica delay ( " < < relative_delay < < " seconds ) is bigger than threshold ( " <nl> + < < storage . data . settings . min_relative_delay_to_yield_leadership < < " ) . Will yield leadership . " ) ; <nl> <nl> need_restart = true ; <nl> continue ; <nl> void ReplicatedMergeTreeRestartingThread : : goReadOnlyPermanently ( ) <nl> } <nl> <nl> <nl> - void ReplicatedMergeTreeRestartingThread : : checkReplicationDelays ( time_t & out_absolute_delay , time_t & out_relative_delay ) <nl> - { <nl> - out_absolute_delay = 0 ; <nl> - out_relative_delay = 0 ; <nl> - <nl> - auto zookeeper = storage . getZooKeeper ( ) ; <nl> - <nl> - / / TODO <nl> - <nl> - LOG_TRACE ( log , " Absolute delay : " < < out_absolute_delay < < " . Relative delay : " < < out_relative_delay < < " . " ) ; <nl> - } <nl> - <nl> - <nl> } <nl> mmm a / dbms / src / Storages / StorageReplicatedMergeTree . cpp <nl> ppp b / dbms / src / Storages / StorageReplicatedMergeTree . cpp <nl> void StorageReplicatedMergeTree : : createNewZooKeeperNodes ( ) <nl> <nl> / / / Отслеживание отставания реплик . <nl> zookeeper - > createIfNotExists ( replica_path + " / min_unprocessed_insert_time " , " " ) ; <nl> + zookeeper - > createIfNotExists ( replica_path + " / max_processed_insert_time " , " " ) ; <nl> } <nl> <nl> <nl> void StorageReplicatedMergeTree : : createReplica ( ) <nl> zookeeper - > create ( replica_path + " / queue / queue - " , entry , zkutil : : CreateMode : : PersistentSequential ) ; <nl> } <nl> <nl> - / / / Далее оно будет загружено в переменную queue в методе queue . load . <nl> + / / / Далее оно будет загружено в переменную queue в методе queue . initialize . <nl> <nl> LOG_DEBUG ( log , " Copied " < < source_queue . size ( ) < < " queue entries " ) ; <nl> } <nl> void StorageReplicatedMergeTree : : checkParts ( bool skip_sanity_checks ) <nl> log_entry . new_part_name = name ; <nl> log_entry . create_time = tryGetPartCreateTime ( zookeeper , replica_path , name ) ; <nl> <nl> - / / / Полагаемся , что это происходит до загрузки очереди ( queue . load ) . <nl> + / / / Полагаемся , что это происходит до загрузки очереди ( queue . initialize ) . <nl> zkutil : : Ops ops ; <nl> removePartFromZooKeeper ( name , ops ) ; <nl> ops . push_back ( new zkutil : : Op : : Create ( <nl> void StorageReplicatedMergeTree : : removePartAndEnqueueFetch ( const String & part_n <nl> <nl> String path_created = dynamic_cast < zkutil : : Op : : Create & > ( ops [ 0 ] ) . getPathCreated ( ) ; <nl> log_entry - > znode_name = path_created . substr ( path_created . find_last_of ( ' / ' ) + 1 ) ; <nl> - queue . insert ( log_entry ) ; <nl> + queue . insert ( zookeeper , log_entry ) ; <nl> } <nl> <nl> <nl> void StorageReplicatedMergeTree : : getQueue ( LogEntriesData & res , String & replica <nl> } <nl> <nl> <nl> - void StorageReplicatedMergeTree : : getReplicaDelays ( time_t & out_absolute_delay , time_t & out_relative_delay ) const <nl> + void StorageReplicatedMergeTree : : getReplicaDelays ( time_t & out_absolute_delay , time_t & out_relative_delay ) <nl> { <nl> - if ( ! restarting_thread ) <nl> - throw Exception ( " Table was shutted down or is in readonly mode . " , ErrorCodes : : TABLE_IS_READ_ONLY ) ; <nl> + assertNotReadonly ( ) ; <nl> + <nl> + / * * Абсолютная задержка - задержка отставания текущей реплики от реального времени . <nl> + * / <nl> + <nl> + time_t min_unprocessed_insert_time = 0 ; <nl> + time_t max_processed_insert_time = 0 ; <nl> + queue . getInsertTimes ( min_unprocessed_insert_time , max_processed_insert_time ) ; <nl> + <nl> + time_t current_time = time ( 0 ) ; <nl> + out_absolute_delay = 0 ; <nl> + out_relative_delay = 0 ; <nl> + <nl> + if ( min_unprocessed_insert_time ) <nl> + out_absolute_delay = current_time - min_unprocessed_insert_time ; <nl> + <nl> + / * * Относительная задержка - максимальная разница абсолютной задержки от какой - либо другой реплики , <nl> + * ( если эта реплика отстаёт от какой - либо другой реплики , или ноль , иначе ) . <nl> + * Вычисляется только если абсолютная задержка достаточно большая . <nl> + * / <nl> + <nl> + if ( out_absolute_delay < static_cast < time_t > ( data . settings . min_relative_delay_to_yield_leadership ) ) <nl> + return ; <nl> + <nl> + auto zookeeper = getZooKeeper ( ) ; <nl> + <nl> + time_t max_replicas_unprocessed_insert_time = 0 ; <nl> + Strings replicas = zookeeper - > getChildren ( zookeeper_path + " / replicas " ) ; <nl> + <nl> + for ( const auto & replica : replicas ) <nl> + { <nl> + if ( replica = = replica_name ) <nl> + continue ; <nl> + <nl> + String value ; <nl> + if ( ! zookeeper - > tryGet ( zookeeper_path + " / replicas / " + replica + " / min_unprocessed_insert_time " , value ) ) <nl> + continue ; <nl> + <nl> + time_t replica_time = value . empty ( ) ? 0 : parse < time_t > ( value ) ; <nl> + if ( replica_time > max_replicas_unprocessed_insert_time ) <nl> + max_replicas_unprocessed_insert_time = replica_time ; <nl> + } <nl> <nl> - restarting_thread - > getReplicaDelays ( out_absolute_delay , out_relative_delay ) ; <nl> + if ( max_replicas_unprocessed_insert_time > min_unprocessed_insert_time ) <nl> + out_relative_delay = max_replicas_unprocessed_insert_time - min_unprocessed_insert_time ; <nl> } <nl> <nl> <nl> | dbms : tracking replicas delays [ # METR - 17573 ] . | ClickHouse/ClickHouse | 82a96d22b19a0f736d58cb71a61bc119d0141185 | 2016-01-17T13:00:42Z |
mmm a / bench / spdlog - bench - mt . cpp <nl> ppp b / bench / spdlog - bench - mt . cpp <nl> int main ( int argc , char * argv [ ] ) <nl> logger - > set_pattern ( " [ % Y - % b - % d % T . % e ] : % v " ) ; <nl> <nl> std : : atomic < int > msg_counter { 0 } ; <nl> - vector < thread > threads ; <nl> + std : : vector < thread > threads ; <nl> <nl> for ( int t = 0 ; t < thread_count ; + + t ) <nl> { <nl> | added std : : to vector decl in bench | gabime/spdlog | 3ee1bab316f880d7d7f7aedd8e1d9ce951def249 | 2016-09-02T14:24:40Z |
mmm a / src / common / swap . h <nl> ppp b / src / common / swap . h <nl> <nl> # include < cstring > <nl> # include " common / common_types . h " <nl> <nl> - / / GCC 4 . 6 + <nl> - # if __GNUC__ > = 5 | | ( __GNUC__ = = 4 & & __GNUC_MINOR__ > = 6 ) <nl> + / / GCC <nl> + # ifdef __GNUC__ <nl> <nl> # if __BYTE_ORDER__ & & ( __BYTE_ORDER__ = = __ORDER_LITTLE_ENDIAN__ ) & & ! defined ( COMMON_LITTLE_ENDIAN ) <nl> # define COMMON_LITTLE_ENDIAN 1 <nl> <nl> # endif <nl> <nl> / / LLVM / clang <nl> - # elif __clang__ <nl> + # elif defined ( __clang__ ) <nl> <nl> # if __LITTLE_ENDIAN__ & & ! defined ( COMMON_LITTLE_ENDIAN ) <nl> # define COMMON_LITTLE_ENDIAN 1 <nl> | Remove GCC version checks | yuzu-emu/yuzu | 030814b1cb330a5bee2cceeac2734462c0397353 | 2019-02-24T14:24:06Z |
mmm a / docs / tools / website . py <nl> ppp b / docs / tools / website . py <nl> <nl> import mdx_clickhouse <nl> <nl> <nl> - def copy_icons ( args ) : <nl> - logging . info ( ' Copying icons ' ) <nl> - icons_dir = os . path . join ( args . output_dir , ' images ' , ' icons ' ) <nl> - os . makedirs ( icons_dir ) <nl> - for icon in [ <nl> - ' github ' , <nl> - ' edit ' , <nl> - ' external - link ' <nl> - ] : <nl> - icon = ' % s . svg ' % icon <nl> - icon_src = os . path . join ( args . website_dir , ' images ' , ' feathericons ' , ' icons ' , icon ) <nl> - icon_dst = os . path . join ( icons_dir , icon ) <nl> - shutil . copy2 ( icon_src , icon_dst ) <nl> - <nl> - <nl> def build_website ( args ) : <nl> logging . info ( ' Building website ' ) <nl> env = jinja2 . Environment ( <nl> | Update website . py | ClickHouse/ClickHouse | b58fa8585cda86978d63a15514b755bd92897f89 | 2020-04-20T12:52:12Z |
mmm a / CHANGELOG . md <nl> ppp b / CHANGELOG . md <nl> <nl> <nl> # # [ Unreleased ] <nl> <nl> - # # [ 1 . 3 . 13 ] ( https : / / github . com / cmderdev / cmder / tree / v1 . 3 . 12 ) ( 2019 - 11 - 3 ) <nl> + # # [ 1 . 3 . 13 ] ( https : / / github . com / cmderdev / cmder / tree / v1 . 3 . 12 ) ( 2019 - 11 - 03 ) <nl> <nl> # # # Adds <nl> <nl> | README . md | cmderdev/cmder | 5a6f873d2b1180a69ccc25ec96f4bc2fdf36ca9b | 2019-11-03T16:29:18Z |
mmm a / Tests / EndToEndTests / TestDriver . py <nl> ppp b / Tests / EndToEndTests / TestDriver . py <nl> def runImpl ( self , flavor , device , args ) : <nl> if args . verbose : <nl> six . print_ ( self . fullName + " : > " + logFile ) <nl> with open ( logFile , " w " ) as output : <nl> + testScript = self . testDir + " / run - test " <nl> + st = os . stat ( testScript ) <nl> + os . chmod ( testScript , st . st_mode | stat . S_IEXEC ) <nl> cmdLine = [ " bash " , " - c " , self . testDir + " / run - test 2 > & 1 " ] <nl> process = subprocess . Popen ( cmdLine , stdout = subprocess . PIPE ) <nl> <nl> | Changing run - test to executable before test run | microsoft/CNTK | 3eebc19c2e328c3e0ddb96f07f52881be770cb97 | 2016-03-21T08:38:11Z |
mmm a / core / safe_refcount . cpp <nl> ppp b / core / safe_refcount . cpp <nl> static _ALWAYS_INLINE_ uint64_t _atomic_increment_impl ( register uint64_t * pw ) { <nl> <nl> static _ALWAYS_INLINE_ uint64_t _atomic_sub_impl ( register uint64_t * pw , register uint64_t val ) { <nl> <nl> - # if _WIN32_WINNT > = 0x0601 / / Windows 7 + <nl> + # if _WIN32_WINNT > = 0x0601 & & ! defined ( UWP_ENABLED ) / / Windows 7 + except UWP <nl> return InterlockedExchangeSubtract64 ( pw , val ) - val ; <nl> # else <nl> return InterlockedExchangeAdd64 ( ( LONGLONG volatile * ) pw , - ( int64_t ) val ) - val ; <nl> | UWP : Fix build issue caused by atomic operations | godotengine/godot | f514fabf61e122880c1c184fe57bccfe2388b57d | 2017-07-24T00:29:01Z |
mmm a / include / http2 . h <nl> ppp b / include / http2 . h <nl> static sw_inline ssize_t swHttp2_get_length ( const char * buf ) <nl> <nl> ssize_t swHttp2_get_frame_length ( swProtocol * protocol , swSocket * conn , char * buf , uint32_t length ) ; <nl> int swHttp2_send_setting_frame ( swProtocol * protocol , swSocket * conn ) ; <nl> - char * swHttp2_get_type ( int type ) ; <nl> + const char * swHttp2_get_type ( int type ) ; <nl> int swHttp2_get_type_color ( int type ) ; <nl> <nl> static sw_inline void swHttp2_init_settings ( swHttp2_settings * settings ) <nl> mmm a / include / swoole . h <nl> ppp b / include / swoole . h <nl> typedef struct _swChannel <nl> swPipe notify_fd ; <nl> } swChannel ; <nl> <nl> - swChannel * swChannel_new ( size_t size , int maxlen , int flag ) ; <nl> + swChannel * swChannel_new ( size_t size , size_t maxlen , int flag ) ; <nl> # define swChannel_empty ( ch ) ( ch - > num = = 0 ) <nl> # define swChannel_full ( ch ) ( ( ch - > head = = ch - > tail & & ch - > tail_tag ! = ch - > head_tag ) | | ( ch - > bytes + sizeof ( int ) * ch - > num = = ch - > size ) ) <nl> int swChannel_pop ( swChannel * object , void * out , int buffer_length ) ; <nl> mmm a / src / core / array . cc <nl> ppp b / src / core / array . cc <nl> int swArray_store ( swArray * array , uint32_t n , void * data ) <nl> <nl> void * swArray_alloc ( swArray * array , uint32_t n ) <nl> { <nl> - while ( n > = array - > page_num * array - > page_size ) <nl> + while ( n > = ( uint32_t ) ( array - > page_num * array - > page_size ) ) <nl> { <nl> if ( swArray_extend ( array ) < 0 ) <nl> { <nl> mmm a / src / core / base . cc <nl> ppp b / src / core / base . cc <nl> uint64_t swoole_hash_key ( char * str , int str_len ) <nl> <nl> void swoole_dump_ascii ( const char * data , size_t size ) <nl> { <nl> - int i ; <nl> - for ( i = 0 ; i < size ; i + + ) <nl> + for ( size_t i = 0 ; i < size ; i + + ) <nl> { <nl> printf ( " % d " , ( unsigned ) data [ i ] ) ; <nl> } <nl> void swoole_dump_bin ( const char * data , char type , size_t size ) <nl> <nl> void swoole_dump_hex ( const char * data , size_t outlen ) <nl> { <nl> - long i ; <nl> - for ( i = 0 ; i < outlen ; + + i ) <nl> + for ( size_t i = 0 ; i < outlen ; + + i ) <nl> { <nl> if ( ( i & 0x0fu ) = = 0 ) <nl> { <nl> int swoole_file_put_contents ( const char * filename , const char * content , size_t l <nl> return SW_ERR ; <nl> } <nl> <nl> - int n , chunk_size , written = 0 ; <nl> + size_t n , chunk_size , written = 0 ; <nl> <nl> while ( written < length ) <nl> { <nl> size_t sw_snprintf ( char * buf , size_t size , const char * format , . . . ) <nl> retval = 0 ; <nl> buf [ 0 ] = ' \ 0 ' ; <nl> } <nl> - else if ( sw_unlikely ( retval > = size ) ) <nl> + else if ( sw_unlikely ( retval > = ( int ) size ) ) <nl> { <nl> retval = size - 1 ; <nl> buf [ retval ] = ' \ 0 ' ; <nl> size_t sw_vsnprintf ( char * buf , size_t size , const char * format , va_list args ) <nl> retval = 0 ; <nl> buf [ 0 ] = ' \ 0 ' ; <nl> } <nl> - else if ( sw_unlikely ( retval > = size ) ) <nl> + else if ( sw_unlikely ( retval > = ( int ) size ) ) <nl> { <nl> retval = size - 1 ; <nl> buf [ retval ] = ' \ 0 ' ; <nl> mmm a / src / core / channel . cc <nl> ppp b / src / core / channel . cc <nl> typedef struct _swChannel_item <nl> char data [ 0 ] ; <nl> } swChannel_item ; <nl> <nl> - swChannel * swChannel_new ( size_t size , int maxlen , int flags ) <nl> + swChannel * swChannel_new ( size_t size , size_t maxlen , int flags ) <nl> { <nl> assert ( size > = maxlen ) ; <nl> int ret ; <nl> int swChannel_in ( swChannel * object , void * in , int data_length ) <nl> { <nl> item = ( swChannel_item * ) ( ( char * ) object - > mem + object - > tail ) ; <nl> object - > tail + = msize ; <nl> - if ( object - > tail > = object - > size ) <nl> + if ( object - > tail > = ( off_t ) object - > size ) <nl> { <nl> object - > tail = 0 ; <nl> object - > tail_tag = 1 - object - > tail_tag ; <nl> int swChannel_out ( swChannel * object , void * out , int buffer_length ) <nl> assert ( buffer_length > = item - > length ) ; <nl> memcpy ( out , item - > data , item - > length ) ; <nl> object - > head + = ( item - > length + sizeof ( item - > length ) ) ; <nl> - if ( object - > head > = object - > size ) <nl> + if ( object - > head > = ( off_t ) object - > size ) <nl> { <nl> object - > head = 0 ; <nl> object - > head_tag = 1 - object - > head_tag ; <nl> mmm a / src / core / heap . cc <nl> ppp b / src / core / heap . cc <nl> void * swHeap_peek ( swHeap * heap ) <nl> <nl> void swHeap_print ( swHeap * heap ) <nl> { <nl> - int i ; <nl> - for ( i = 1 ; i < heap - > num ; i + + ) <nl> + for ( uint32_t i = 1 ; i < heap - > num ; i + + ) <nl> { <nl> printf ( " # % d \ tpriority = % ld , data = % p \ n " , i , ( long ) heap - > nodes [ i ] - > priority , heap - > nodes [ i ] - > data ) ; <nl> } <nl> mmm a / src / memory / buffer . cc <nl> ppp b / src / memory / buffer . cc <nl> int swBuffer_free ( swBuffer * buffer ) <nl> int swBuffer_append ( swBuffer * buffer , const void * data , uint32_t size ) <nl> { <nl> uint32_t _length = size ; <nl> - const void * _pos = data ; <nl> + char * _pos = ( char * ) data ; <nl> uint32_t _n ; <nl> <nl> / / buffer enQueue <nl> mmm a / src / memory / table . cc <nl> ppp b / src / memory / table . cc <nl> int swTable_create ( swTable * table ) <nl> memory = ( char * ) memory + table - > size * sizeof ( swTableRow * ) ; <nl> memory_size - = table - > size * sizeof ( swTableRow * ) ; <nl> <nl> - int i ; <nl> - for ( i = 0 ; i < table - > size ; i + + ) <nl> + for ( size_t i = 0 ; i < table - > size ; i + + ) <nl> { <nl> table - > rows [ i ] = ( swTableRow * ) ( ( char * ) memory + ( row_memory_size * i ) ) ; <nl> memset ( table - > rows [ i ] , 0 , sizeof ( swTableRow ) ) ; <nl> void swTable_iterator_forward ( swTable * table ) <nl> } <nl> else <nl> { <nl> - int i = 0 ; <nl> + uint32_t i = 0 ; <nl> for ( ; ; i + + ) <nl> { <nl> if ( row = = NULL ) <nl> mmm a / src / network / stream . cc <nl> ppp b / src / network / stream . cc <nl> int swStream_send ( swStream * stream , char * data , size_t length ) <nl> int swStream_recv_blocking ( int fd , void * __buf , size_t __len ) <nl> { <nl> int tmp = 0 ; <nl> - int ret = swSocket_recv_blocking ( fd , & tmp , sizeof ( tmp ) , MSG_WAITALL ) ; <nl> + ssize_t ret = swSocket_recv_blocking ( fd , & tmp , sizeof ( tmp ) , MSG_WAITALL ) ; <nl> <nl> if ( ret < = 0 ) <nl> { <nl> return SW_CLOSE ; <nl> } <nl> - int length = ntohl ( tmp ) ; <nl> + int length = ( int ) ntohl ( tmp ) ; <nl> if ( length < = 0 ) <nl> { <nl> return SW_CLOSE ; <nl> } <nl> - else if ( length > __len ) <nl> + else if ( length > ( int ) __len ) <nl> { <nl> return SW_CLOSE ; <nl> } <nl> mmm a / src / protocol / http2 . cc <nl> ppp b / src / protocol / http2 . cc <nl> ssize_t swHttp2_get_frame_length ( swProtocol * protocol , swSocket * conn , char * buf <nl> return swHttp2_get_length ( buf ) + SW_HTTP2_FRAME_HEADER_SIZE ; <nl> } <nl> <nl> - char * swHttp2_get_type ( int type ) <nl> + const char * swHttp2_get_type ( int type ) <nl> { <nl> switch ( type ) <nl> { <nl> mmm a / src / protocol / ssl . cc <nl> ppp b / src / protocol / ssl . cc <nl> void swSSL_destroy ( ) <nl> } <nl> openssl_init = 0 ; <nl> # if OPENSSL_VERSION_NUMBER > = OPENSSL_VERSION_1_0_0 <nl> - CRYPTO_THREADID_set_callback ( NULL ) ; <nl> + ( void ) CRYPTO_THREADID_set_callback ( NULL ) ; <nl> # else <nl> CRYPTO_set_id_callback ( NULL ) ; <nl> # endif <nl> void swSSL_init_thread_safety ( ) <nl> } <nl> <nl> # if OPENSSL_VERSION_NUMBER > = OPENSSL_VERSION_1_0_0 <nl> - CRYPTO_THREADID_set_callback ( swSSL_id_callback ) ; <nl> + ( void ) CRYPTO_THREADID_set_callback ( swSSL_id_callback ) ; <nl> # else <nl> CRYPTO_set_id_callback ( swSSL_id_callback ) ; <nl> # endif <nl> static int swSSL_passwd_callback ( char * buf , int num , int verify , void * data ) <nl> swSSL_option * option = ( swSSL_option * ) data ; <nl> if ( option - > passphrase ) <nl> { <nl> - size_t len = strlen ( option - > passphrase ) ; <nl> + int len = strlen ( option - > passphrase ) ; <nl> if ( len < num - 1 ) <nl> { <nl> memcpy ( buf , option - > passphrase , len + 1 ) ; <nl> int swSSL_get_client_certificate ( SSL * ssl , char * buffer , size_t length ) <nl> } <nl> <nl> len = BIO_pending ( bio ) ; <nl> - if ( len < 0 & & len > length ) <nl> + if ( len < 0 & & len > ( long ) length ) <nl> { <nl> swWarn ( " certificate length [ % ld ] is too big " , len ) ; <nl> goto _failed ; <nl> void swSSL_close ( swSocket * conn ) <nl> <nl> static sw_inline void swSSL_connection_error ( swSocket * conn ) <nl> { <nl> - int level = SW_LOG_NOTICE ; <nl> + uint32_t level = SW_LOG_NOTICE ; <nl> int reason = ERR_GET_REASON ( ERR_peek_error ( ) ) ; <nl> <nl> # if 0 <nl> mmm a / src / reactor / poll . cc <nl> ppp b / src / reactor / poll . cc <nl> static int swReactorPoll_exist ( swReactor * reactor , int fd ) ; <nl> <nl> typedef struct _swReactorPoll <nl> { <nl> - int max_fd_num ; <nl> + uint32_t max_fd_num ; <nl> swSocket * * fds ; <nl> struct pollfd * events ; <nl> } swReactorPoll ; <nl> static int swReactorPoll_wait ( swReactor * reactor , struct timeval * timeo ) <nl> swEvent event ; <nl> swReactor_handler handler ; <nl> <nl> - int ret , i ; <nl> + int ret ; <nl> <nl> if ( reactor - > timeout_msec = = 0 ) <nl> { <nl> static int swReactorPoll_wait ( swReactor * reactor , struct timeval * timeo ) <nl> } <nl> else <nl> { <nl> - for ( i = 0 ; i < reactor - > event_num ; i + + ) <nl> + for ( uint32_t i = 0 ; i < reactor - > event_num ; i + + ) <nl> { <nl> event . socket = object - > fds [ i ] ; <nl> event . fd = object - > events [ i ] . fd ; <nl> static int swReactorPoll_wait ( swReactor * reactor , struct timeval * timeo ) <nl> static int swReactorPoll_exist ( swReactor * reactor , int fd ) <nl> { <nl> swReactorPoll * object = ( swReactorPoll * ) reactor - > object ; <nl> - int i ; <nl> - for ( i = 0 ; i < reactor - > event_num ; i + + ) <nl> + for ( uint32_t i = 0 ; i < reactor - > event_num ; i + + ) <nl> { <nl> if ( object - > events [ i ] . fd = = fd ) <nl> { <nl> | warning free | swoole/swoole-src | 3a4af54d3de6f526d439c6e686c32de853581b50 | 2020-02-18T09:11:49Z |
mmm a / hphp / compiler / type_annotation . cpp <nl> ppp b / hphp / compiler / type_annotation . cpp <nl> const StaticString <nl> s_access_list ( " access_list " ) , <nl> s_fields ( " fields " ) , <nl> s_is_cls_cns ( " is_cls_cns " ) , <nl> + s_optional_shape_field ( " optional_shape_field " ) , <nl> s_value ( " value " ) , <nl> s_typevars ( " typevars " ) <nl> ; <nl> void TypeAnnotation : : shapeFieldsToScalarArray ( Array & rep , <nl> assert ( shapeField - > m_typeArgs ) ; <nl> auto field = Array : : Create ( ) ; <nl> if ( shapeField - > isClsCnsShapeField ( ) ) field . add ( s_is_cls_cns , true_varNR ) ; <nl> + <nl> + if ( shapeField - > isOptionalShapeField ( ) ) { <nl> + field . add ( s_optional_shape_field , true_varNR ) ; <nl> + } <nl> + <nl> field . add ( s_value , Variant ( shapeField - > m_typeArgs - > getScalarArrayRep ( ) ) ) ; <nl> fields . add ( String ( shapeField - > m_name ) , Variant ( field . get ( ) ) ) ; <nl> shapeField = shapeField - > m_typeList ; <nl> mmm a / hphp / compiler / type_annotation . h <nl> ppp b / hphp / compiler / type_annotation . h <nl> struct TypeAnnotation { <nl> bool isTypeAccess ( ) const { return m_typeaccess ; } <nl> bool isShape ( ) const { return m_shape ; } <nl> bool isClsCnsShapeField ( ) const { return m_clsCnsShapeField ; } <nl> + bool isOptionalShapeField ( ) const { return m_optionalShapeField ; } <nl> <nl> / * <nl> * Return a shallow copy of this TypeAnnotation , except with <nl> mmm a / hphp / runtime / base / type - structure . cpp <nl> ppp b / hphp / runtime / base / type - structure . cpp <nl> const StaticString <nl> s_access_list ( " access_list " ) , <nl> s_fields ( " fields " ) , <nl> s_is_cls_cns ( " is_cls_cns " ) , <nl> + s_optional_shape_field ( " optional_shape_field " ) , <nl> s_value ( " value " ) , <nl> s_this ( " HH \ \ this " ) , <nl> s_self ( " self " ) , <nl> void shapeTypeName ( const Array & arr , std : : string & name ) { <nl> auto const field = fields - > getKey ( i ) ; <nl> auto value = fields - > getValue ( i ) . toCArrRef ( ) ; <nl> auto quote = " ' " ; <nl> + if ( value . exists ( s_optional_shape_field ) ) { <nl> + name + = " ? " ; <nl> + } <nl> if ( value . exists ( s_value ) ) { <nl> / / if unresolved , ignore wrapper <nl> if ( value . exists ( s_is_cls_cns ) ) quote = " " ; <nl> Array resolveShape ( const Array & arr , <nl> auto valueArr = wrapper [ s_value ] . toArray ( ) ; <nl> auto value = <nl> resolveTS ( valueArr , typeCns , typeCnsCls , generics , persistent ) ; <nl> + <nl> + if ( wrapper . exists ( s_optional_shape_field ) ) { <nl> + value . add ( s_optional_shape_field , true_varNR ) ; <nl> + } <nl> + <nl> newfields . add ( key , Variant ( value ) ) ; <nl> } <nl> <nl> mmm a / hphp / test / slow / type_annotation / shape_typedef_with_optional_field . php . expect <nl> ppp b / hphp / test / slow / type_annotation / shape_typedef_with_optional_field . php . expect <nl> <nl> - string ( 21 ) " HH \ shape ( ' a ' = > HH \ int ) " <nl> + string ( 22 ) " HH \ shape ( ? ' a ' = > HH \ int ) " <nl> array ( 2 ) { <nl> [ " kind " ] = > <nl> int ( 14 ) <nl> [ " fields " ] = > <nl> array ( 1 ) { <nl> [ " a " ] = > <nl> - array ( 1 ) { <nl> + array ( 2 ) { <nl> [ " kind " ] = > <nl> int ( 1 ) <nl> + [ " optional_shape_field " ] = > <nl> + bool ( true ) <nl> } <nl> } <nl> } <nl> | Implement HHVM type structure correctness for optional shape fields | facebook/hhvm | 793b4c52aa1b4406bc60167fe42ea57c367b19f7 | 2017-03-08T18:59:31Z |
mmm a / bindings / bindingtester / known_testers . py <nl> ppp b / bindings / bindingtester / known_testers . py <nl> def _absolute_path ( path ) : <nl> ' ruby ' : Tester ( ' ruby ' , _absolute_path ( ' ruby / tests / tester . rb ' ) , 64 , 23 , MAX_API_VERSION ) , <nl> ' java ' : Tester ( ' java ' , _java_cmd + ' StackTester ' , 2040 , 500 , MAX_API_VERSION , types = ALL_TYPES ) , <nl> ' java_async ' : Tester ( ' java ' , _java_cmd + ' AsyncStackTester ' , 2040 , 500 , MAX_API_VERSION , types = ALL_TYPES ) , <nl> - ' java_completable ' : Tester ( ' java ' , _java_completable_cmd + ' StackTester ' , 2040 , 500 , MAX_API_VERSION , types = ALL_TYPES ) , <nl> - ' java_completable_async ' : Tester ( ' java ' , _java_completable_cmd + ' AsyncStackTester ' , 2040 , 500 , MAX_API_VERSION , types = ALL_TYPES ) , <nl> + ' java_completable ' : Tester ( ' java ' , _java_completable_cmd + ' StackTester ' , 2040 , 510 , MAX_API_VERSION , types = ALL_TYPES ) , <nl> + ' java_completable_async ' : Tester ( ' java ' , _java_completable_cmd + ' AsyncStackTester ' , 2040 , 510 , MAX_API_VERSION , types = ALL_TYPES ) , <nl> ' go ' : Tester ( ' go ' , _absolute_path ( ' go / build / bin / _stacktester ' ) , 63 , 200 , MAX_API_VERSION ) , <nl> ' flow ' : Tester ( ' flow ' , _absolute_path ( ' flow / bin / fdb_flow_tester ' ) , 63 , 500 , MAX_API_VERSION ) , <nl> } <nl> new file mode 100644 <nl> index 0000000000 . . 62d6232eed <nl> mmm / dev / null <nl> ppp b / bindings / java / fdb - java - style . xml <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> + < ! - - <nl> + The style for code written within the FDB Java bindings . <nl> + Note that this style guide grew up somewhat organically from <nl> + the idiosyncracies of the committers involved . It aims to <nl> + be at least a little idiomatically Java while at the same time <nl> + trying not to look too incongruous when compared to the style <nl> + of our core products ( e . g . , fdbserver ) . It also isn ' t <nl> + borrowed from some other style guide , because that would <nl> + be too easy . <nl> + - - > <nl> + < ! DOCTYPE module PUBLIC <nl> + " - / / Puppy Crawl / / DTD Check Configuration 1 . 3 / / EN " <nl> + " http : / / www . puppycrawl . com / dtds / configuration_1_3 . dtd " > <nl> + <nl> + <nl> + < module name = " Checker " > <nl> + < module name = " SuppressionFilter " > <nl> + < property name = " file " value = " suppressions . xml " / > <nl> + < / module > <nl> + <nl> + < module name = " TreeWalker " > <nl> + < property name = " tabWidth " value = " 4 " / > <nl> + <nl> + < ! - - Blocks - - > <nl> + < module name = " AvoidNestedBlocks " / > <nl> + < module name = " EmptyBlock " / > <nl> + < module name = " EmptyCatchBlock " / > <nl> + < module name = " LeftCurly " > <nl> + < property name = " option " value = " eol " / > <nl> + < property name = " ignoreEnums " value = " false " / > <nl> + < / module > <nl> + < ! - - We have about 76 errors with value = " alone " and 27 with value = " same " . We should pick one . <nl> + < module name = " RightCurly " > <nl> + < property name = " option " value = " same " / > <nl> + < / module > <nl> + - - > <nl> + <nl> + < ! - - Design - - > <nl> + < ! - - We might get some helpful errors if we turned this on , but not right now . <nl> + < module name = " DesignForExtension " / > <nl> + - - > <nl> + < ! - - We have some classes violating this . It seems like a reasonable thing to add , but it is technically API breaking . <nl> + < module name = " FinalClass " / > <nl> + - - > <nl> + < module name = " HideUtilityClassConstructor " / > <nl> + < module name = " MutableException " / > <nl> + < module name = " OneTopLevelClass " / > <nl> + <nl> + < ! - - Coding - - > <nl> + < module name = " CovariantEquals " / > <nl> + < module name = " DefaultComesLast " / > <nl> + < module name = " EmptyStatement " / > <nl> + < module name = " EqualsHashCode " / > <nl> + < module name = " FallThrough " / > <nl> + < ! - - We should probably clean these up at some point , but not today . <nl> + < module name = " MagicNumber " > <nl> + < property name = " ignoreNumbers " value = " - 1 , 0 , 1 , 2 , 255 , 65535 " / > <nl> + < property name = " ignoreHashCodeMethod " value = " true " / > <nl> + < / module > <nl> + - - > <nl> + < module name = " MissingSwitchDefault " / > <nl> + < module name = " NoClone " / > <nl> + < module name = " PackageDeclaration " / > <nl> + < module name = " SimplifyBooleanExpression " / > <nl> + < module name = " SimplifyBooleanReturn " / > <nl> + < module name = " StringLiteralEquality " / > <nl> + < module name = " SuperClone " / > <nl> + < module name = " SuperFinalize " / > <nl> + <nl> + < ! - - Imports - - > <nl> + < module name = " CustomImportOrder " > <nl> + < property name = " customImportOrderRules " value = " STATIC # # # STANDARD_JAVA_PACKAGE # # # SAME_PACKAGE ( 3 ) " / > <nl> + < / module > <nl> + < module name = " AvoidStarImport " / > <nl> + < module name = " UnusedImports " / > <nl> + < module name = " RedundantImport " / > <nl> + <nl> + < ! - - Javadoc - - > <nl> + < ! - - TODO - - > <nl> + <nl> + < ! - - Miscellaneous - - > <nl> + < module name = " ArrayTypeStyle " / > <nl> + < module name = " CommentsIndentation " / > <nl> + < module name = " Indentation " / > <nl> + < module name = " OuterTypeFilename " / > <nl> + < module name = " UpperEll " / > <nl> + <nl> + < ! - - Modifiers - - > <nl> + < module name = " ModifierOrder " / > <nl> + < module name = " RedundantModifier " / > <nl> + <nl> + < ! - - Naming conventions - - > <nl> + < module name = " CatchParameterName " > <nl> + < property name = " format " value = " ^ ( e \ d * | t \ d * | ex \ d * | err \ d * ) $ " / > <nl> + < / module > <nl> + < module name = " ClassTypeParameterName " / > <nl> + < module name = " InterfaceTypeParameterName " / > <nl> + < module name = " LocalFinalVariableName " / > <nl> + < module name = " LocalVariableName " / > <nl> + < module name = " MemberName " / > <nl> + < module name = " MethodName " > <nl> + < property name = " applyToProtected " value = " false " / > <nl> + < property name = " applyToPackage " value = " false " / > <nl> + < property name = " applyToPrivate " value = " false " / > <nl> + < / module > <nl> + < module name = " MethodTypeParameterName " / > <nl> + < module name = " PackageName " / > <nl> + < module name = " ParameterName " / > <nl> + < module name = " StaticVariableName " / > <nl> + < module name = " TypeName " / > <nl> + <nl> + < ! - - Whitespace - - > <nl> + < module name = " EmptyForInitializerPad " / > <nl> + < module name = " EmptyForIteratorPad " / > <nl> + < module name = " GenericWhitespace " / > <nl> + < module name = " MethodParamPad " / > <nl> + < module name = " NoLineWrap " / > <nl> + < module name = " NoWhitespaceAfter " > <nl> + < property name = " tokens " value = " AT , INC , DEC , UNARY_MINUS , UNARY_PLUS , BNOT , LNOT , DOT , ARRAY_DECLARATOR , INDEX_OP , METHOD_REF " / > <nl> + < / module > <nl> + < module name = " NoWhitespaceBefore " > <nl> + < property name = " allowLineBreaks " value = " true " / > <nl> + < property name = " tokens " value = " COMMA , SEMI , POST_INC , POST_DEC , DOT , ELLIPSIS , METHOD_REF " / > <nl> + < / module > <nl> + < module name = " OperatorWrap " > <nl> + < property name = " option " value = " eol " / > <nl> + < / module > <nl> + < module name = " ParenPad " / > <nl> + < module name = " SeparatorWrap " > <nl> + < property name = " option " value = " eol " / > <nl> + < property name = " tokens " value = " COMMA " / > <nl> + < / module > <nl> + < module name = " SeparatorWrap " > <nl> + < property name = " option " value = " nl " / > <nl> + < property name = " tokens " value = " DOT " / > <nl> + < / module > <nl> + < module name = " TypecastParenPad " > <nl> + < property name = " option " value = " nospace " / > <nl> + < / module > <nl> + < module name = " WhitespaceAfter " > <nl> + < property name = " tokens " value = " SEMI " / > <nl> + < / module > <nl> + <nl> + < / module > <nl> + < / module > <nl> mmm a / bindings / java / src - completable / junit / com / apple / foundationdb / tuple / ArrayUtilTests . java <nl> ppp b / bindings / java / src - completable / junit / com / apple / foundationdb / tuple / ArrayUtilTests . java <nl> <nl> import java . util . ArrayList ; <nl> import java . util . List ; <nl> <nl> - import com . apple . foundationdb . tuple . ByteArrayUtil ; <nl> import org . junit . Test ; <nl> <nl> / * * <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / Cluster . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / Cluster . java <nl> <nl> <nl> / * * <nl> * The { @ code Cluster } represents a connection to a physical set of cooperating machines <nl> - * running FoundationDB . A { @ code Cluster } is opened with a reference to a cluster file . <nl> + * running FoundationDB . A { @ code Cluster } is opened with a reference to a cluster file . < br > <nl> + * < br > <nl> + * < b > Note : < / b > { @ code Cluster } objects must be { @ link # close closed } when no longer in use <nl> + * in order to free any associated resources . <nl> * / <nl> - public class Cluster extends DefaultDisposableImpl implements Disposable { <nl> + public class Cluster extends NativeObjectWrapper { <nl> private ClusterOptions options ; <nl> private final Executor executor ; <nl> <nl> protected Cluster ( long cPtr , Executor executor ) { <nl> * <nl> * @ return a set of cluster - specific options affecting this { @ code Cluster } <nl> * / <nl> - public ClusterOptions options ( ) { return options ; } <nl> + public ClusterOptions options ( ) { <nl> + return options ; <nl> + } <nl> <nl> @ Override <nl> protected void finalize ( ) throws Throwable { <nl> - dispose ( ) ; <nl> - super . finalize ( ) ; <nl> + try { <nl> + checkUnclosed ( " Cluster " ) ; <nl> + close ( ) ; <nl> + } <nl> + finally { <nl> + super . finalize ( ) ; <nl> + } <nl> } <nl> <nl> / * * <nl> public Database openDatabase ( Executor e ) throws FDBException { <nl> } <nl> <nl> @ Override <nl> - protected void disposeInternal ( long cPtr ) { <nl> + protected void closeInternal ( long cPtr ) { <nl> Cluster_dispose ( cPtr ) ; <nl> } <nl> <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / Database . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / Database . java <nl> <nl> * in the { @ link TransactionContext } interface . When used on a { @ code Database } these <nl> * methods will call { @ code Transaction # commit ( ) } after user code has been <nl> * executed . These methods will not return successfully until { @ code commit ( ) } has <nl> - * returned successfully . <nl> - * <nl> + * returned successfully . < br > <nl> + * < br > <nl> + * < b > Note : < / b > { @ code Database } objects must be { @ link # close closed } when no longer <nl> + * in use in order to free any associated resources . <nl> * / <nl> - public interface Database extends Disposable , TransactionContext { <nl> + public interface Database extends AutoCloseable , TransactionContext { <nl> / * * <nl> * Creates a { @ link Transaction } that operates on this { @ code Database } . < br > <nl> * < br > <nl> default Transaction createTransaction ( ) { <nl> * / <nl> @ Override <nl> default < T > CompletableFuture < T > readAsync ( <nl> - Function < ? super ReadTransaction , CompletableFuture < T > > retryable ) { <nl> + Function < ? super ReadTransaction , ? extends CompletableFuture < T > > retryable ) { <nl> return readAsync ( retryable , getExecutor ( ) ) ; <nl> } <nl> <nl> default Transaction createTransaction ( ) { <nl> * @ see # readAsync ( Function ) <nl> * / <nl> < T > CompletableFuture < T > readAsync ( <nl> - Function < ? super ReadTransaction , CompletableFuture < T > > retryable , Executor e ) ; <nl> + Function < ? super ReadTransaction , ? extends CompletableFuture < T > > retryable , Executor e ) ; <nl> <nl> / * * <nl> * Runs a transactional function against this { @ code Database } with retry logic . <nl> default Transaction createTransaction ( ) { <nl> * / <nl> @ Override <nl> default < T > CompletableFuture < T > runAsync ( <nl> - Function < ? super Transaction , CompletableFuture < T > > retryable ) { <nl> + Function < ? super Transaction , ? extends CompletableFuture < T > > retryable ) { <nl> return runAsync ( retryable , getExecutor ( ) ) ; <nl> } <nl> <nl> default Transaction createTransaction ( ) { <nl> * @ see # run ( Function ) <nl> * / <nl> < T > CompletableFuture < T > runAsync ( <nl> - Function < ? super Transaction , CompletableFuture < T > > retryable , Executor e ) ; <nl> + Function < ? super Transaction , ? extends CompletableFuture < T > > retryable , Executor e ) ; <nl> + <nl> + / * * <nl> + * Close the { @ code Database } object and release any associated resources . This must be called at <nl> + * least once after the { @ code Database } object is no longer in use . This can be called multiple <nl> + * times , but care should be taken that it is not in use in another thread at the time of the call . <nl> + * / <nl> + @ Override <nl> + void close ( ) ; <nl> } <nl> deleted file mode 100644 <nl> index 7772065bd3 . . 0000000000 <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / Disposable . java <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Disposable . java <nl> - * <nl> - * This source file is part of the FoundationDB open source project <nl> - * <nl> - * Copyright 2013 - 2018 Apple Inc . and the FoundationDB project authors <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * / <nl> - <nl> - package com . apple . foundationdb ; <nl> - <nl> - / * * <nl> - * A FoundationDB object with native resources that can be freed . It is not mandatory to call <nl> - * { @ link Disposable # dispose ( ) } most of the time , as disposal will happen at finalization . <nl> - * / <nl> - public interface Disposable { <nl> - / * * <nl> - * Dispose of the object . This can be called multiple times , but care should be <nl> - * taken that an object is not in use in another thread at the time of the call . <nl> - * / <nl> - void dispose ( ) ; <nl> - } <nl> \ No newline at end of file <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / FDB . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / FDB . java <nl> <nl> static class DaemonThreadFactory implements ThreadFactory { <nl> private final ThreadFactory factory ; <nl> <nl> - public DaemonThreadFactory ( ThreadFactory factory ) { <nl> + DaemonThreadFactory ( ThreadFactory factory ) { <nl> this . factory = factory ; <nl> } <nl> <nl> public Thread newThread ( Runnable r ) { <nl> final int apiVersion ; <nl> private volatile boolean netStarted = false ; <nl> private volatile boolean netStopped = false ; <nl> - final private Semaphore netRunning = new Semaphore ( 1 ) ; <nl> + volatile boolean warnOnUnclosed = true ; <nl> + private final Semaphore netRunning = new Semaphore ( 1 ) ; <nl> private final NetworkOptions options ; <nl> <nl> static { <nl> public Thread newThread ( Runnable r ) { <nl> private FDB ( int apiVersion ) { <nl> this . apiVersion = apiVersion ; <nl> <nl> - options = new NetworkOptions ( new OptionConsumer ( ) { <nl> - @ Override <nl> - public void setOption ( int code , byte [ ] parameter ) { <nl> - Network_setOption ( code , parameter ) ; <nl> - } <nl> - } ) ; <nl> - <nl> - Runtime . getRuntime ( ) . addShutdownHook ( new Thread ( <nl> - new Runnable ( ) { <nl> - @ Override <nl> - public void run ( ) { <nl> - FDB . this . stopNetwork ( ) ; <nl> - } <nl> - } <nl> - ) ) ; <nl> + options = new NetworkOptions ( this : : Network_setOption ) ; <nl> + Runtime . getRuntime ( ) . addShutdownHook ( new Thread ( this : : stopNetwork ) ) ; <nl> } <nl> <nl> / * * <nl> public void run ( ) { <nl> * <nl> * @ return a set of options affecting this instance of the FoundationDB API <nl> * / <nl> - public NetworkOptions options ( ) { return options ; } <nl> + public NetworkOptions options ( ) { <nl> + return options ; <nl> + } <nl> <nl> / * * <nl> * Select the version for the client API . An exception will be thrown if the <nl> public void run ( ) { <nl> * <nl> * @ return the FoundationDB API object <nl> * / <nl> - public synchronized static FDB selectAPIVersion ( final int version ) throws FDBException { <nl> + public static synchronized FDB selectAPIVersion ( final int version ) throws FDBException { <nl> if ( singleton ! = null ) { <nl> if ( version ! = singleton . apiVersion ) { <nl> throw new IllegalArgumentException ( <nl> public synchronized static FDB selectAPIVersion ( final int version ) throws FDBExc <nl> } <nl> return singleton ; <nl> } <nl> - if ( version < 500 ) <nl> - throw new IllegalArgumentException ( " API version not supported ( minimum 500 ) " ) ; <nl> + if ( version < 510 ) <nl> + throw new IllegalArgumentException ( " API version not supported ( minimum 510 ) " ) ; <nl> if ( version > 510 ) <nl> throw new IllegalArgumentException ( " API version not supported ( maximum 510 ) " ) ; <nl> + <nl> Select_API_version ( version ) ; <nl> - return singleton = new FDB ( version ) ; <nl> + FDB fdb = new FDB ( version ) ; <nl> + <nl> + return singleton = fdb ; <nl> + } <nl> + <nl> + / * * <nl> + * Enables or disables the stderr warning that is printed whenever an object with FoundationDB <nl> + * native resources is garbage collected without being closed . By default , this feature is enabled . <nl> + * <nl> + * @ param warnOnUnclosed Whether the warning should be printed for unclosed objects <nl> + * / <nl> + public void setUnclosedWarning ( boolean warnOnUnclosed ) { <nl> + this . warnOnUnclosed = warnOnUnclosed ; <nl> + } <nl> + <nl> + / / Singleton is initialized to null and only set once by a call to selectAPIVersion <nl> + static FDB getInstance ( ) { <nl> + if ( singleton ! = null ) { <nl> + return singleton ; <nl> + } <nl> + <nl> + throw new IllegalStateException ( " API version has not been selected " ) ; <nl> } <nl> <nl> / * * <nl> public Database open ( String clusterFilePath , Executor e ) throws FDBException { <nl> f = new FutureCluster ( Cluster_create ( clusterFilePath ) , e ) ; <nl> } <nl> Cluster c = f . join ( ) ; <nl> - return c . openDatabase ( e ) ; <nl> + Database db = c . openDatabase ( e ) ; <nl> + c . close ( ) ; <nl> + <nl> + return db ; <nl> } <nl> <nl> / * * <nl> public synchronized void startNetwork ( Executor e ) throws FDBException , IllegalSt <nl> Network_setup ( ) ; <nl> netStarted = true ; <nl> <nl> - e . execute ( new Runnable ( ) { <nl> - @ Override <nl> - public void run ( ) { <nl> - boolean acquired = false ; <nl> - try { <nl> - while ( ! acquired ) { <nl> - try { <nl> - / / make attempt to avoid a needless deadlock <nl> - synchronized ( FDB . this ) { <nl> - if ( netStopped ) { <nl> - return ; <nl> - } <nl> + e . execute ( ( ) - > { <nl> + boolean acquired = false ; <nl> + try { <nl> + while ( ! acquired ) { <nl> + try { <nl> + / / make attempt to avoid a needless deadlock <nl> + synchronized ( FDB . this ) { <nl> + if ( netStopped ) { <nl> + return ; <nl> } <nl> + } <nl> <nl> - netRunning . acquire ( ) ; <nl> - acquired = true ; <nl> - } catch ( InterruptedException e ) { } <nl> - } <nl> - try { <nl> - Network_run ( ) ; <nl> - } catch ( Throwable t ) { <nl> - System . err . println ( " Unhandled error in FoundationDB network thread : " + t . getMessage ( ) ) ; <nl> - / / eat this error . we have nowhere to send it . <nl> - } <nl> - } finally { <nl> - if ( acquired ) { <nl> - netRunning . release ( ) ; <nl> - } <nl> - synchronized ( FDB . this ) { <nl> - netStopped = true ; <nl> + netRunning . acquire ( ) ; <nl> + acquired = true ; <nl> + } catch ( InterruptedException err ) { <nl> + / / Swallow thread interruption <nl> } <nl> } <nl> + try { <nl> + Network_run ( ) ; <nl> + } catch ( Throwable t ) { <nl> + System . err . println ( " Unhandled error in FoundationDB network thread : " + t . getMessage ( ) ) ; <nl> + / / eat this error . we have nowhere to send it . <nl> + } <nl> + } finally { <nl> + if ( acquired ) { <nl> + netRunning . release ( ) ; <nl> + } <nl> + synchronized ( FDB . this ) { <nl> + netStopped = true ; <nl> + } <nl> } <nl> } ) ; <nl> } <nl> public synchronized void stopNetwork ( ) throws FDBException { <nl> / / that we will never again be able to call runNetwork ( ) <nl> netRunning . acquire ( ) ; <nl> return ; <nl> - } catch ( InterruptedException e ) { } <nl> + } catch ( InterruptedException e ) { <nl> + / / If the thread is interrupted while trying to acquire <nl> + / / the semaphore , we just want to try again . <nl> + } <nl> } <nl> } <nl> <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / FDBDatabase . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / FDBDatabase . java <nl> <nl> <nl> import com . apple . foundationdb . async . AsyncUtil ; <nl> <nl> - class FDBDatabase extends DefaultDisposableImpl implements Database , Disposable , OptionConsumer { <nl> + class FDBDatabase extends NativeObjectWrapper implements Database , OptionConsumer { <nl> private DatabaseOptions options ; <nl> private final Executor executor ; <nl> <nl> public DatabaseOptions options ( ) { <nl> } <nl> } <nl> } finally { <nl> - t . dispose ( ) ; <nl> + t . close ( ) ; <nl> } <nl> } <nl> <nl> public DatabaseOptions options ( ) { <nl> } <nl> <nl> @ Override <nl> - public < T > CompletableFuture < T > runAsync ( final Function < ? super Transaction , CompletableFuture < T > > retryable , Executor e ) { <nl> + public < T > CompletableFuture < T > runAsync ( final Function < ? super Transaction , ? extends CompletableFuture < T > > retryable , Executor e ) { <nl> final AtomicReference < Transaction > trRef = new AtomicReference < > ( createTransaction ( e ) ) ; <nl> final AtomicReference < T > returnValue = new AtomicReference < > ( ) ; <nl> return AsyncUtil . whileTrue ( ( ) - > { <nl> CompletableFuture < T > process = AsyncUtil . applySafely ( retryable , trRef . get ( ) ) ; <nl> <nl> - return process . thenComposeAsync ( returnVal - > <nl> + return AsyncUtil . composeHandleAsync ( process . thenComposeAsync ( returnVal - > <nl> trRef . get ( ) . commit ( ) . thenApply ( o - > { <nl> returnValue . set ( returnVal ) ; <nl> return false ; <nl> - } ) <nl> - , e ) . handleAsync ( ( value , t ) - > { <nl> - if ( t = = null ) <nl> - return CompletableFuture . completedFuture ( value ) ; <nl> - if ( ! ( t instanceof RuntimeException ) ) <nl> - throw new CompletionException ( t ) ; <nl> - return trRef . get ( ) . onError ( t ) . thenApply ( newTr - > { <nl> - trRef . set ( newTr ) ; <nl> - return true ; <nl> - } ) ; <nl> - } , e ) . thenCompose ( x - > x ) ; <nl> - } , e ) . thenApply ( o - > { <nl> - trRef . get ( ) . dispose ( ) ; <nl> - return returnValue . get ( ) ; <nl> - } ) ; <nl> + } ) , e ) , <nl> + ( value , t ) - > { <nl> + if ( t = = null ) <nl> + return CompletableFuture . completedFuture ( value ) ; <nl> + if ( ! ( t instanceof RuntimeException ) ) <nl> + throw new CompletionException ( t ) ; <nl> + return trRef . get ( ) . onError ( t ) . thenApply ( newTr - > { <nl> + trRef . set ( newTr ) ; <nl> + return true ; <nl> + } ) ; <nl> + } , e ) ; <nl> + } , e ) <nl> + . thenApply ( o - > returnValue . get ( ) ) <nl> + . whenComplete ( ( v , t ) - > trRef . get ( ) . close ( ) ) ; <nl> } <nl> <nl> @ Override <nl> public < T > CompletableFuture < T > readAsync ( <nl> - Function < ? super ReadTransaction , CompletableFuture < T > > retryable , Executor e ) { <nl> + Function < ? super ReadTransaction , ? extends CompletableFuture < T > > retryable , Executor e ) { <nl> return this . runAsync ( retryable , e ) ; <nl> } <nl> <nl> @ Override <nl> protected void finalize ( ) throws Throwable { <nl> - dispose ( ) ; <nl> - super . finalize ( ) ; <nl> + try { <nl> + checkUnclosed ( " Database " ) ; <nl> + close ( ) ; <nl> + } <nl> + finally { <nl> + super . finalize ( ) ; <nl> + } <nl> } <nl> <nl> @ Override <nl> public Transaction createTransaction ( Executor e ) { <nl> pointerReadLock . lock ( ) ; <nl> + Transaction tr = null ; <nl> try { <nl> - Transaction tr = new FDBTransaction ( Database_createTransaction ( getPtr ( ) ) , this , e ) ; <nl> + tr = new FDBTransaction ( Database_createTransaction ( getPtr ( ) ) , this , e ) ; <nl> tr . options ( ) . setUsedDuringCommitProtectionDisable ( ) ; <nl> return tr ; <nl> + } catch ( RuntimeException err ) { <nl> + if ( tr ! = null ) { <nl> + tr . close ( ) ; <nl> + } <nl> + <nl> + throw err ; <nl> } finally { <nl> pointerReadLock . unlock ( ) ; <nl> } <nl> public Executor getExecutor ( ) { <nl> } <nl> <nl> @ Override <nl> - protected void disposeInternal ( long cPtr ) { <nl> + protected void closeInternal ( long cPtr ) { <nl> Database_dispose ( cPtr ) ; <nl> } <nl> <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / FDBTransaction . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / FDBTransaction . java <nl> <nl> import java . util . concurrent . Executor ; <nl> import java . util . function . Function ; <nl> <nl> - import com . apple . foundationdb . async . * ; <nl> + import com . apple . foundationdb . async . AsyncIterable ; <nl> + import com . apple . foundationdb . async . AsyncUtil ; <nl> import com . apple . foundationdb . tuple . ByteArrayUtil ; <nl> <nl> - class FDBTransaction extends DefaultDisposableImpl implements Disposable , Transaction , OptionConsumer { <nl> + class FDBTransaction extends NativeObjectWrapper implements Transaction , OptionConsumer { <nl> private final Database database ; <nl> private final Executor executor ; <nl> private final TransactionOptions options ; <nl> <nl> @ Override <nl> public AsyncIterable < KeyValue > getRange ( KeySelector begin , KeySelector end , <nl> int limit , boolean reverse , StreamingMode mode ) { <nl> - return RangeQuery . start ( FDBTransaction . this , true , begin , end , limit , reverse , mode ) ; <nl> + return new RangeQuery ( FDBTransaction . this , true , begin , end , limit , reverse , mode ) ; <nl> } <nl> @ Override <nl> public AsyncIterable < KeyValue > getRange ( KeySelector begin , KeySelector end , <nl> public TransactionOptions options ( ) { <nl> <nl> @ Override <nl> public < T > CompletableFuture < T > readAsync ( <nl> - Function < ? super ReadTransaction , CompletableFuture < T > > retryable ) { <nl> + Function < ? super ReadTransaction , ? extends CompletableFuture < T > > retryable ) { <nl> return AsyncUtil . applySafely ( retryable , this ) ; <nl> } <nl> <nl> public void setReadVersion ( long version ) { <nl> public CompletableFuture < Long > getReadVersion ( ) { <nl> pointerReadLock . lock ( ) ; <nl> try { <nl> - return new FutureVersion ( Transaction_getReadVersion ( getPtr ( ) ) , executor ) ; <nl> + return new FutureVersion ( Transaction_getReadVersion ( getPtr ( ) ) , executor ) ; <nl> } finally { <nl> pointerReadLock . unlock ( ) ; <nl> } <nl> public void setReadVersion ( long version ) { <nl> private CompletableFuture < byte [ ] > get_internal ( byte [ ] key , boolean isSnapshot ) { <nl> pointerReadLock . lock ( ) ; <nl> try { <nl> - return new FutureResult ( Transaction_get ( getPtr ( ) , key , isSnapshot ) , executor ) ; <nl> + return new FutureResult ( Transaction_get ( getPtr ( ) , key , isSnapshot ) , executor ) ; <nl> } finally { <nl> pointerReadLock . unlock ( ) ; <nl> } <nl> public void setReadVersion ( long version ) { <nl> private CompletableFuture < byte [ ] > getKey_internal ( KeySelector selector , boolean isSnapshot ) { <nl> pointerReadLock . lock ( ) ; <nl> try { <nl> - return new FutureKey ( Transaction_getKey ( getPtr ( ) , <nl> + return new FutureKey ( Transaction_getKey ( getPtr ( ) , <nl> selector . getKey ( ) , selector . orEqual ( ) , selector . getOffset ( ) , isSnapshot ) , executor ) ; <nl> } finally { <nl> pointerReadLock . unlock ( ) ; <nl> public void setReadVersion ( long version ) { <nl> @ Override <nl> public AsyncIterable < KeyValue > getRange ( KeySelector begin , KeySelector end , <nl> int limit , boolean reverse , StreamingMode mode ) { <nl> - return RangeQuery . start ( this , false , begin , end , limit , reverse , mode ) ; <nl> + return new RangeQuery ( this , false , begin , end , limit , reverse , mode ) ; <nl> } <nl> @ Override <nl> public AsyncIterable < KeyValue > getRange ( KeySelector begin , KeySelector end , <nl> public Database getDatabase ( ) { <nl> return database ; <nl> } <nl> <nl> + / / Users of this function must close the returned FutureResults when finished <nl> protected FutureResults getRange_internal ( <nl> KeySelector begin , KeySelector end , <nl> int rowLimit , int targetBytes , int streamingMode , <nl> private void addConflictRange ( byte [ ] keyBegin , byte [ ] keyEnd , <nl> <nl> @ Override <nl> public < T > CompletableFuture < T > runAsync ( <nl> - Function < ? super Transaction , CompletableFuture < T > > retryable ) { <nl> + Function < ? super Transaction , ? extends CompletableFuture < T > > retryable ) { <nl> return AsyncUtil . applySafely ( retryable , this ) ; <nl> } <nl> <nl> private void addConflictRange ( byte [ ] keyBegin , byte [ ] keyEnd , <nl> <nl> @ Override <nl> public < T > CompletableFuture < T > readAsync ( <nl> - Function < ? super ReadTransaction , CompletableFuture < T > > retryable ) { <nl> + Function < ? super ReadTransaction , ? extends CompletableFuture < T > > retryable ) { <nl> return AsyncUtil . applySafely ( retryable , this ) ; <nl> } <nl> <nl> public Long getCommittedVersion ( ) { <nl> return f . thenApply ( v - > tr ) <nl> . whenComplete ( ( v , t ) - > { <nl> if ( t ! = null ) { <nl> - tr . dispose ( ) ; <nl> + tr . close ( ) ; <nl> } <nl> } ) ; <nl> } finally { <nl> pointerReadLock . unlock ( ) ; <nl> if ( ! transactionOwner ) { <nl> - dispose ( ) ; <nl> + close ( ) ; <nl> } <nl> } <nl> } <nl> public void cancel ( ) { <nl> <nl> / / Must hold pointerReadLock when calling <nl> private FDBTransaction transfer ( ) { <nl> - FDBTransaction tr = new FDBTransaction ( getPtr ( ) , database , executor ) ; <nl> - tr . options ( ) . setUsedDuringCommitProtectionDisable ( ) ; <nl> - transactionOwner = false ; <nl> - return tr ; <nl> + FDBTransaction tr = null ; <nl> + try { <nl> + tr = new FDBTransaction ( getPtr ( ) , database , executor ) ; <nl> + tr . options ( ) . setUsedDuringCommitProtectionDisable ( ) ; <nl> + transactionOwner = false ; <nl> + return tr ; <nl> + } <nl> + catch ( RuntimeException err ) { <nl> + if ( tr ! = null ) { <nl> + tr . close ( ) ; <nl> + } <nl> + <nl> + throw err ; <nl> + } <nl> } <nl> <nl> @ Override <nl> protected long getPtr ( ) { <nl> <nl> @ Override <nl> protected void finalize ( ) throws Throwable { <nl> - dispose ( ) ; <nl> + try { <nl> + checkUnclosed ( " Transaction " ) ; <nl> + close ( ) ; <nl> + } <nl> + finally { <nl> + super . finalize ( ) ; <nl> + } <nl> } <nl> <nl> @ Override <nl> - protected void disposeInternal ( long cPtr ) { <nl> + protected void closeInternal ( long cPtr ) { <nl> if ( transactionOwner ) { <nl> Transaction_dispose ( cPtr ) ; <nl> } <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / FutureCluster . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / FutureCluster . java <nl> protected FutureCluster ( long cPtr , Executor executor ) { <nl> } <nl> <nl> @ Override <nl> - public Cluster getIfDone_internal ( ) throws FDBException { <nl> + protected Cluster getIfDone_internal ( long cPtr ) throws FDBException { <nl> return new Cluster ( FutureCluster_get ( cPtr ) , executor ) ; <nl> } <nl> <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / FutureDatabase . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / FutureDatabase . java <nl> <nl> } <nl> <nl> @ Override <nl> - public Database getIfDone_internal ( ) throws FDBException { <nl> + protected Database getIfDone_internal ( long cPtr ) throws FDBException { <nl> return new FDBDatabase ( FutureDatabase_get ( cPtr ) , executor ) ; <nl> } <nl> <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / FutureKey . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / FutureKey . java <nl> <nl> } <nl> <nl> @ Override <nl> - public byte [ ] getIfDone_internal ( ) throws FDBException { <nl> + protected byte [ ] getIfDone_internal ( long cPtr ) throws FDBException { <nl> return FutureKey_get ( cPtr ) ; <nl> } <nl> <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / FutureResult . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / FutureResult . java <nl> <nl> } <nl> <nl> @ Override <nl> - public byte [ ] getIfDone_internal ( ) throws FDBException { <nl> + protected byte [ ] getIfDone_internal ( long cPtr ) throws FDBException { <nl> return FutureResult_get ( cPtr ) ; <nl> } <nl> <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / FutureResults . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / FutureResults . java <nl> <nl> <nl> @ Override <nl> protected void postMarshal ( ) { <nl> - / / We can ' t dispose because this class actually marshals on - demand <nl> + / / We can ' t close because this class actually marshals on - demand <nl> } <nl> <nl> @ Override <nl> - public RangeResultInfo getIfDone_internal ( ) throws FDBException { <nl> + protected RangeResultInfo getIfDone_internal ( long cPtr ) throws FDBException { <nl> FDBException err = Future_getError ( cPtr ) ; <nl> <nl> if ( ! err . isSuccess ( ) ) { <nl> public RangeResultInfo getIfDone_internal ( ) throws FDBException { <nl> } <nl> <nl> public RangeResultSummary getSummary ( ) { <nl> - return FutureResults_getSummary ( cPtr ) ; <nl> + try { <nl> + pointerReadLock . lock ( ) ; <nl> + return FutureResults_getSummary ( getPtr ( ) ) ; <nl> + } <nl> + finally { <nl> + pointerReadLock . unlock ( ) ; <nl> + } <nl> } <nl> <nl> public RangeResult getResults ( ) { <nl> - return FutureResults_get ( cPtr ) ; <nl> + try { <nl> + pointerReadLock . lock ( ) ; <nl> + return FutureResults_get ( getPtr ( ) ) ; <nl> + } <nl> + finally { <nl> + pointerReadLock . unlock ( ) ; <nl> + } <nl> } <nl> <nl> private native RangeResultSummary FutureResults_getSummary ( long ptr ) throws FDBException ; <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / FutureStrings . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / FutureStrings . java <nl> <nl> } <nl> <nl> @ Override <nl> - public String [ ] getIfDone_internal ( ) throws FDBException { <nl> + protected String [ ] getIfDone_internal ( long cPtr ) throws FDBException { <nl> return FutureStrings_get ( cPtr ) ; <nl> } <nl> <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / FutureVersion . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / FutureVersion . java <nl> <nl> } <nl> <nl> @ Override <nl> - Long getIfDone_internal ( ) throws FDBException { <nl> + protected Long getIfDone_internal ( long cPtr ) throws FDBException { <nl> return FutureVersion_get ( cPtr ) ; <nl> } <nl> <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / FutureVoid . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / FutureVoid . java <nl> <nl> } <nl> <nl> @ Override <nl> - public Void getIfDone_internal ( ) throws FDBException { <nl> + protected Void getIfDone_internal ( long cPtr ) throws FDBException { <nl> / / With " future - cleanup " we get rid of FutureVoid_get and replace instead <nl> / / with a get on the error and throw if the error is not success . <nl> FDBException err = Future_getError ( cPtr ) ; <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / JNIUtil . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / JNIUtil . java <nl> private static OS getRunningOS ( ) { <nl> return OS . OSX ; <nl> throw new IllegalStateException ( " Unknown or unsupported OS : " + osname ) ; <nl> } <nl> + <nl> + private JNIUtil ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / KeySelector . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / KeySelector . java <nl> public KeySelector ( byte [ ] key , boolean orEqual , int offset ) { <nl> * <nl> * @ return a newly created { @ code KeySelector } <nl> * / <nl> - public static KeySelector lastLessThan ( byte [ ] key ) { <nl> - return new KeySelector ( key , false , 0 ) ; <nl> + public static KeySelector lastLessThan ( byte [ ] key ) { <nl> + return new KeySelector ( key , false , 0 ) ; <nl> } <nl> <nl> / * * <nl> public static KeySelector lastLessThan ( byte [ ] key ) { <nl> * <nl> * @ return a newly created { @ code KeySelector } <nl> * / <nl> - public static KeySelector lastLessOrEqual ( byte [ ] key ) { <nl> - return new KeySelector ( key , true , 0 ) ; <nl> + public static KeySelector lastLessOrEqual ( byte [ ] key ) { <nl> + return new KeySelector ( key , true , 0 ) ; <nl> } <nl> <nl> / * * <nl> public static KeySelector lastLessOrEqual ( byte [ ] key ) { <nl> * <nl> * @ return a newly created { @ code KeySelector } <nl> * / <nl> - public static KeySelector firstGreaterThan ( byte [ ] key ) { <nl> - return new KeySelector ( key , true , + 1 ) ; <nl> + public static KeySelector firstGreaterThan ( byte [ ] key ) { <nl> + return new KeySelector ( key , true , + 1 ) ; <nl> } <nl> <nl> / * * <nl> public static KeySelector firstGreaterThan ( byte [ ] key ) { <nl> * <nl> * @ return a newly created { @ code KeySelector } <nl> * / <nl> - public static KeySelector firstGreaterOrEqual ( byte [ ] key ) { <nl> - return new KeySelector ( key , false , + 1 ) ; <nl> + public static KeySelector firstGreaterOrEqual ( byte [ ] key ) { <nl> + return new KeySelector ( key , false , + 1 ) ; <nl> } <nl> <nl> / * * <nl> public static KeySelector firstGreaterOrEqual ( byte [ ] key ) { <nl> * <nl> * @ return a newly created { @ code KeySelector } that is offset by a number of keys . <nl> * / <nl> - public KeySelector add ( int offset ) { <nl> - return new KeySelector ( getKey ( ) , orEqual ( ) , getOffset ( ) + offset ) ; <nl> + public KeySelector add ( int offset ) { <nl> + return new KeySelector ( getKey ( ) , orEqual ( ) , getOffset ( ) + offset ) ; <nl> } <nl> <nl> / * * <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / LocalityUtil . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / LocalityUtil . java <nl> <nl> <nl> import java . nio . charset . Charset ; <nl> import java . util . Arrays ; <nl> - import java . util . List ; <nl> import java . util . concurrent . CompletableFuture ; <nl> import java . util . concurrent . CompletionException ; <nl> + import java . util . concurrent . Executor ; <nl> import java . util . function . BiFunction ; <nl> <nl> import com . apple . foundationdb . async . AsyncIterable ; <nl> import com . apple . foundationdb . async . AsyncIterator ; <nl> import com . apple . foundationdb . async . AsyncUtil ; <nl> + import com . apple . foundationdb . async . CloseableAsyncIterator ; <nl> import com . apple . foundationdb . tuple . ByteArrayUtil ; <nl> <nl> / * * <nl> <nl> * / <nl> public class LocalityUtil { <nl> / * * <nl> - * Returns a { @ code AsyncIterable } of keys { @ code k } such that <nl> + * Returns a { @ code CloseableAsyncIterator } of keys { @ code k } such that <nl> * { @ code begin < = k < end } and { @ code k } is located at the start of a <nl> * contiguous range stored on a single server . < br > <nl> * < br > <nl> <nl> * <nl> * @ return an sequence of keys denoting the start of single - server ranges <nl> * / <nl> - public static AsyncIterable < byte [ ] > getBoundaryKeys ( Database db , byte [ ] begin , byte [ ] end ) { <nl> + public static CloseableAsyncIterator < byte [ ] > getBoundaryKeys ( Database db , byte [ ] begin , byte [ ] end ) { <nl> return getBoundaryKeys_internal ( db . createTransaction ( ) , begin , end ) ; <nl> } <nl> <nl> / * * <nl> - * Returns a { @ code AsyncIterable } of keys { @ code k } such that <nl> + * Returns a { @ code CloseableAsyncIterator } of keys { @ code k } such that <nl> * { @ code begin < = k < end } and { @ code k } is located at the start of a <nl> * contiguous range stored on a single server . < br > <nl> * < br > <nl> <nl> * <nl> * @ return an sequence of keys denoting the start of single - server ranges <nl> * / <nl> - public static AsyncIterable < byte [ ] > getBoundaryKeys ( Transaction tr , byte [ ] begin , byte [ ] end ) { <nl> - Transaction local = tr . getDatabase ( ) . createTransaction ( ) ; <nl> + public static CloseableAsyncIterator < byte [ ] > getBoundaryKeys ( Transaction tr , byte [ ] begin , byte [ ] end ) { <nl> + Transaction local = tr . getDatabase ( ) . createTransaction ( tr . getExecutor ( ) ) ; <nl> CompletableFuture < Long > readVersion = tr . getReadVersion ( ) ; <nl> if ( readVersion . isDone ( ) & & ! readVersion . isCompletedExceptionally ( ) ) { <nl> local . setReadVersion ( readVersion . getNow ( null ) ) ; <nl> } <nl> - return new BoundaryIterable ( local , begin , end ) ; <nl> + return new BoundaryIterator ( local , begin , end ) ; <nl> } <nl> <nl> / * * <nl> <nl> return ( ( FDBTransaction ) tr ) . getAddressesForKey ( key ) ; <nl> } <nl> <nl> - private static AsyncIterable < byte [ ] > getBoundaryKeys_internal ( Transaction tr , byte [ ] begin , byte [ ] end ) { <nl> - return new BoundaryIterable ( tr , begin , end ) ; <nl> + private static CloseableAsyncIterator < byte [ ] > getBoundaryKeys_internal ( Transaction tr , byte [ ] begin , byte [ ] end ) { <nl> + return new BoundaryIterator ( tr , begin , end ) ; <nl> } <nl> <nl> - static class BoundaryIterable implements AsyncIterable < byte [ ] > { <nl> - final Transaction tr ; <nl> - final byte [ ] begin ; <nl> + static class BoundaryIterator implements CloseableAsyncIterator < byte [ ] > { <nl> + Transaction tr ; <nl> + byte [ ] begin ; <nl> + byte [ ] lastBegin ; <nl> final byte [ ] end ; <nl> final AsyncIterable < KeyValue > firstGet ; <nl> <nl> - public BoundaryIterable ( Transaction tr , byte [ ] begin , byte [ ] end ) { <nl> + AsyncIterator < KeyValue > block ; <nl> + private CompletableFuture < Boolean > nextFuture ; <nl> + private boolean closed ; <nl> + <nl> + BoundaryIterator ( Transaction tr , byte [ ] begin , byte [ ] end ) { <nl> this . tr = tr ; <nl> this . begin = Arrays . copyOf ( begin , begin . length ) ; <nl> this . end = Arrays . copyOf ( end , end . length ) ; <nl> <nl> + lastBegin = begin ; <nl> + <nl> tr . options ( ) . setReadSystemKeys ( ) ; <nl> tr . options ( ) . setLockAware ( ) ; <nl> + <nl> firstGet = tr . getRange ( keyServersForKey ( begin ) , keyServersForKey ( end ) ) ; <nl> + block = firstGet . iterator ( ) ; <nl> + nextFuture = AsyncUtil . composeHandleAsync ( block . onHasNext ( ) , handler , tr . getExecutor ( ) ) ; <nl> + <nl> + closed = false ; <nl> } <nl> <nl> @ Override <nl> - public AsyncIterator < byte [ ] > iterator ( ) { <nl> - return new BoundaryIterator ( ) ; <nl> + public CompletableFuture < Boolean > onHasNext ( ) { <nl> + return nextFuture ; <nl> } <nl> <nl> - @ Override <nl> - public CompletableFuture < List < byte [ ] > > asList ( ) { <nl> - return AsyncUtil . collect ( this , tr . getExecutor ( ) ) ; <nl> - } <nl> - <nl> - class BoundaryIterator implements AsyncIterator < byte [ ] > { <nl> - AsyncIterator < KeyValue > block = BoundaryIterable . this . firstGet . iterator ( ) ; <nl> - Transaction tr = BoundaryIterable . this . tr ; <nl> - byte [ ] begin = BoundaryIterable . this . begin ; <nl> - byte [ ] lastBegin = begin ; <nl> - private CompletableFuture < Boolean > nextFuture ; <nl> + @ Override <nl> + public boolean hasNext ( ) { <nl> + return nextFuture . join ( ) ; <nl> + } <nl> <nl> - public BoundaryIterator ( ) { <nl> - nextFuture = block . onHasNext ( ) . handleAsync ( handler , tr . getExecutor ( ) ) . thenCompose ( x - > x ) ; <nl> + CompletableFuture < Boolean > restartGet ( ) { <nl> + if ( ByteArrayUtil . compareUnsigned ( begin , end ) > = 0 ) { <nl> + return AsyncUtil . READY_FALSE ; <nl> } <nl> + lastBegin = begin ; <nl> + tr . options ( ) . setReadSystemKeys ( ) ; <nl> + block = tr . getRange ( <nl> + keyServersForKey ( begin ) , <nl> + keyServersForKey ( end ) ) . iterator ( ) ; <nl> + nextFuture = AsyncUtil . composeHandleAsync ( block . onHasNext ( ) , handler , tr . getExecutor ( ) ) ; <nl> + return nextFuture ; <nl> + } <nl> <nl> + BiFunction < Boolean , Throwable , CompletableFuture < Boolean > > handler = new BiFunction < Boolean , Throwable , CompletableFuture < Boolean > > ( ) { <nl> @ Override <nl> - public CompletableFuture < Boolean > onHasNext ( ) { <nl> - return nextFuture ; <nl> - } <nl> + public CompletableFuture < Boolean > apply ( Boolean b , Throwable o ) { <nl> + if ( b ! = null ) { <nl> + return CompletableFuture . completedFuture ( b ) ; <nl> + } <nl> + if ( o instanceof FDBException ) { <nl> + FDBException err = ( FDBException ) o ; <nl> + if ( err . getCode ( ) = = 1007 & & ! Arrays . equals ( begin , lastBegin ) ) { <nl> + Executor executor = BoundaryIterator . this . tr . getExecutor ( ) ; <nl> + BoundaryIterator . this . tr . close ( ) ; <nl> + BoundaryIterator . this . tr = BoundaryIterator . this . tr . getDatabase ( ) . createTransaction ( executor ) ; <nl> + return restartGet ( ) ; <nl> + } <nl> + } <nl> <nl> - @ Override <nl> - public boolean hasNext ( ) { <nl> - return nextFuture . join ( ) ; <nl> + if ( ! ( o instanceof RuntimeException ) ) <nl> + throw new CompletionException ( o ) ; <nl> + <nl> + CompletableFuture < Transaction > onError = BoundaryIterator . this . tr . onError ( o ) ; <nl> + return onError . thenComposeAsync ( tr - > { <nl> + BoundaryIterator . this . tr = tr ; <nl> + return restartGet ( ) ; <nl> + } , tr . getExecutor ( ) ) ; <nl> } <nl> + } ; <nl> <nl> - CompletableFuture < Boolean > restartGet ( ) { <nl> - if ( ByteArrayUtil . compareUnsigned ( begin , end ) > = 0 ) { <nl> - return CompletableFuture . completedFuture ( false ) ; <nl> - } <nl> - lastBegin = begin ; <nl> - tr . options ( ) . setReadSystemKeys ( ) ; <nl> - block = tr . getRange ( <nl> - keyServersForKey ( begin ) , <nl> - keyServersForKey ( end ) ) . iterator ( ) ; <nl> - nextFuture = block . onHasNext ( ) . handleAsync ( handler , tr . getExecutor ( ) ) . thenCompose ( x - > x ) ; <nl> - return nextFuture ; <nl> + @ Override <nl> + public byte [ ] next ( ) { <nl> + if ( ! nextFuture . isDone ( ) ) { <nl> + throw new IllegalStateException ( " Call to next without hasNext ( ) = true " ) ; <nl> } <nl> + KeyValue o = block . next ( ) ; <nl> + byte [ ] key = o . getKey ( ) ; <nl> + byte [ ] suffix = Arrays . copyOfRange ( key , 13 , key . length ) ; <nl> + BoundaryIterator . this . begin = ByteArrayUtil . join ( suffix , new byte [ ] { ( byte ) 0 } ) ; <nl> + nextFuture = AsyncUtil . composeHandleAsync ( block . onHasNext ( ) , handler , tr . getExecutor ( ) ) ; <nl> + return suffix ; <nl> + } <nl> <nl> - BiFunction < Boolean , Throwable , CompletableFuture < Boolean > > handler = new BiFunction < Boolean , Throwable , CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > apply ( Boolean b , Throwable o ) { <nl> - if ( b ! = null ) { <nl> - return CompletableFuture . completedFuture ( b ) ; <nl> - } <nl> - if ( o instanceof FDBException ) { <nl> - FDBException err = ( FDBException ) o ; <nl> - if ( err . getCode ( ) = = 1007 & & ! Arrays . equals ( begin , lastBegin ) ) { <nl> - BoundaryIterator . this . tr . dispose ( ) ; <nl> - BoundaryIterator . this . tr = <nl> - BoundaryIterator . this . tr . getDatabase ( ) . createTransaction ( ) ; <nl> - return restartGet ( ) ; <nl> - } <nl> - } <nl> + @ Override <nl> + public void remove ( ) { <nl> + throw new UnsupportedOperationException ( " Boundary keys are read - only " ) ; <nl> + } <nl> <nl> - if ( ! ( o instanceof RuntimeException ) ) <nl> - throw new CompletionException ( o ) ; <nl> + @ Override <nl> + public void close ( ) { <nl> + BoundaryIterator . this . tr . close ( ) ; <nl> + closed = true ; <nl> + } <nl> <nl> - CompletableFuture < Transaction > onError = BoundaryIterator . this . tr . onError ( ( RuntimeException ) o ) ; <nl> - return onError . thenComposeAsync ( tr - > { <nl> - BoundaryIterator . this . tr = tr ; <nl> - return restartGet ( ) ; <nl> - } , tr . getExecutor ( ) ) ; <nl> + @ Override <nl> + protected void finalize ( ) throws Throwable { <nl> + try { <nl> + if ( FDB . getInstance ( ) . warnOnUnclosed & & ! closed ) { <nl> + System . err . println ( " CloseableAsyncIterator not closed ( getBoundaryKeys ) " ) ; <nl> } <nl> - } ; <nl> - <nl> - @ Override <nl> - public byte [ ] next ( ) { <nl> - if ( ! nextFuture . isDone ( ) ) { <nl> - throw new IllegalStateException ( " Call to next without hasNext ( ) = true " ) ; <nl> + if ( ! closed ) { <nl> + close ( ) ; <nl> } <nl> - KeyValue o = block . next ( ) ; <nl> - byte [ ] key = o . getKey ( ) ; <nl> - byte [ ] suffix = Arrays . copyOfRange ( key , 13 , key . length ) ; <nl> - BoundaryIterator . this . begin = ByteArrayUtil . join ( suffix , new byte [ ] { ( byte ) 0 } ) ; <nl> - nextFuture = block . onHasNext ( ) . handleAsync ( handler , tr . getExecutor ( ) ) . thenCompose ( x - > x ) ; <nl> - return suffix ; <nl> } <nl> - <nl> - @ Override <nl> - public void remove ( ) { <nl> - throw new UnsupportedOperationException ( " Boundary keys are read - only " ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public void cancel ( ) { <nl> - / / TODO Auto - generated method stub <nl> - } <nl> - <nl> - @ Override <nl> - public void dispose ( ) { <nl> - BoundaryIterator . this . tr . dispose ( ) ; <nl> + finally { <nl> + super . finalize ( ) ; <nl> } <nl> } <nl> } <nl> <nl> - private static Charset ASCII = Charset . forName ( " US - ASCII " ) ; <nl> + private static final Charset ASCII = Charset . forName ( " US - ASCII " ) ; <nl> static byte [ ] keyServersForKey ( byte [ ] key ) { <nl> return ByteArrayUtil . join ( new byte [ ] { ( byte ) 255 } , <nl> " / keyServers / " . getBytes ( ASCII ) , <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / NativeFuture . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / NativeFuture . java <nl> <nl> <nl> import java . util . concurrent . CompletableFuture ; <nl> import java . util . concurrent . Executor ; <nl> + import java . util . concurrent . locks . Lock ; <nl> + import java . util . concurrent . locks . ReentrantReadWriteLock ; <nl> <nl> - abstract class NativeFuture < T > extends CompletableFuture < T > { <nl> - protected final long cPtr ; <nl> + abstract class NativeFuture < T > extends CompletableFuture < T > implements AutoCloseable { <nl> + private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock ( ) ; <nl> + protected final Lock pointerReadLock = rwl . readLock ( ) ; <nl> + <nl> + private long cPtr ; <nl> <nl> protected NativeFuture ( long cPtr ) { <nl> this . cPtr = cPtr ; <nl> protected NativeFuture ( long cPtr ) { <nl> / / constructor of this class because a quickly completing future can <nl> / / lead to a race where the marshalWhenDone tries to run on an <nl> / / unconstructed subclass . <nl> + / / <nl> + / / Since this must be called from a constructor , we assume that close <nl> + / / cannot be called concurrently . <nl> protected void registerMarshalCallback ( Executor executor ) { <nl> - Future_registerCallback ( cPtr , ( ) - > executor . execute ( this : : marshalWhenDone ) ) ; <nl> + if ( cPtr ! = 0 ) { <nl> + Future_registerCallback ( cPtr , ( ) - > executor . execute ( this : : marshalWhenDone ) ) ; <nl> + } <nl> } <nl> <nl> private void marshalWhenDone ( ) { <nl> try { <nl> - T val = getIfDone_internal ( ) ; <nl> - postMarshal ( ) ; <nl> - complete ( val ) ; <nl> - } catch ( FDBException t ) { <nl> - assert ( t . getCode ( ) ! = 2015 ) ; / / future_not_set not possible <nl> - if ( t . getCode ( ) ! = 1102 ) { / / future_released <nl> - completeExceptionally ( t ) ; <nl> + T val = null ; <nl> + boolean shouldComplete = false ; <nl> + try { <nl> + pointerReadLock . lock ( ) ; <nl> + if ( cPtr ! = 0 ) { <nl> + val = getIfDone_internal ( cPtr ) ; <nl> + shouldComplete = true ; <nl> + } <nl> + } <nl> + finally { <nl> + pointerReadLock . unlock ( ) ; <nl> + } <nl> + <nl> + if ( shouldComplete ) { <nl> + complete ( val ) ; <nl> } <nl> + } catch ( FDBException t ) { <nl> + assert ( t . getCode ( ) ! = 1102 & & t . getCode ( ) ! = 2015 ) ; / / future_released , future_not_set not possible <nl> + completeExceptionally ( t ) ; <nl> } catch ( Throwable t ) { <nl> completeExceptionally ( t ) ; <nl> + } finally { <nl> + postMarshal ( ) ; <nl> } <nl> } <nl> <nl> protected void postMarshal ( ) { <nl> - dispose ( ) ; <nl> + close ( ) ; <nl> } <nl> <nl> - abstract T getIfDone_internal ( ) throws FDBException ; <nl> + protected abstract T getIfDone_internal ( long cPtr ) throws FDBException ; <nl> + <nl> + @ Override <nl> + public void close ( ) { <nl> + long ptr = 0 ; <nl> <nl> - public void dispose ( ) { <nl> - Future_releaseMemory ( cPtr ) ; <nl> + rwl . writeLock ( ) . lock ( ) ; <nl> + if ( cPtr ! = 0 ) { <nl> + ptr = cPtr ; <nl> + cPtr = 0 ; <nl> + } <nl> + rwl . writeLock ( ) . unlock ( ) ; <nl> + <nl> + if ( ptr ! = 0 ) { <nl> + Future_dispose ( ptr ) ; <nl> + if ( ! isDone ( ) ) { <nl> + completeExceptionally ( new IllegalStateException ( " Future has been closed " ) ) ; <nl> + } <nl> + } <nl> } <nl> <nl> @ Override <nl> - protected void finalize ( ) throws Throwable { <nl> - Future_dispose ( cPtr ) ; <nl> + public boolean cancel ( boolean mayInterruptIfRunning ) { <nl> + boolean result = super . cancel ( mayInterruptIfRunning ) ; <nl> + try { <nl> + rwl . readLock ( ) . lock ( ) ; <nl> + if ( cPtr ! = 0 ) { <nl> + Future_cancel ( cPtr ) ; <nl> + } <nl> + return result ; <nl> + } <nl> + finally { <nl> + rwl . readLock ( ) . unlock ( ) ; <nl> + } <nl> } <nl> <nl> - @ Override <nl> - public T join ( ) { <nl> - Future_blockUntilReady ( cPtr ) ; <nl> - return super . join ( ) ; <nl> + protected long getPtr ( ) { <nl> + / / we must have a read lock for this function to make sense , however it <nl> + / / does not make sense to take the lock here , since the code that uses <nl> + / / the result must inherently have the read lock itself . <nl> + assert ( rwl . getReadHoldCount ( ) > 0 ) ; <nl> + <nl> + if ( cPtr = = 0 ) <nl> + throw new IllegalStateException ( " Cannot access closed object " ) ; <nl> + <nl> + return cPtr ; <nl> } <nl> <nl> private native void Future_registerCallback ( long cPtr , Runnable callback ) ; <nl> similarity index 67 % <nl> rename from bindings / java / src - completable / main / com / apple / foundationdb / DefaultDisposableImpl . java <nl> rename to bindings / java / src - completable / main / com / apple / foundationdb / NativeObjectWrapper . java <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / DefaultDisposableImpl . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / NativeObjectWrapper . java <nl> <nl> / * <nl> - * DefaultDisposableImpl . java <nl> + * NativeObjectWrapper . java <nl> * <nl> * This source file is part of the FoundationDB open source project <nl> * <nl> <nl> import java . util . concurrent . locks . Lock ; <nl> import java . util . concurrent . locks . ReentrantReadWriteLock ; <nl> <nl> - abstract class DefaultDisposableImpl implements Disposable { <nl> + abstract class NativeObjectWrapper implements AutoCloseable { <nl> private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock ( ) ; <nl> protected final Lock pointerReadLock = rwl . readLock ( ) ; <nl> <nl> - private boolean disposed = false ; <nl> + private boolean closed = false ; <nl> private long cPtr ; <nl> <nl> - public DefaultDisposableImpl ( ) { <nl> - } <nl> - <nl> - public DefaultDisposableImpl ( long cPtr ) { <nl> + NativeObjectWrapper ( long cPtr ) { <nl> this . cPtr = cPtr ; <nl> if ( this . cPtr = = 0 ) <nl> - this . disposed = true ; <nl> + this . closed = true ; <nl> } <nl> <nl> - public boolean isDisposed ( ) { <nl> + public boolean isClosed ( ) { <nl> / / we must have a read lock for this function to make sense , however it <nl> / / does not make sense to take the lock here , since the code that uses <nl> / / the result must inherently have the read lock itself . <nl> - assert ( rwl . getReadHoldCount ( ) > 0 ) ; <nl> + assert ( rwl . getReadHoldCount ( ) > 0 ) ; <nl> + <nl> + return closed ; <nl> + } <nl> <nl> - return disposed ; <nl> + public void checkUnclosed ( String context ) { <nl> + try { <nl> + if ( FDB . getInstance ( ) . warnOnUnclosed & & ! closed ) { <nl> + System . err . println ( context + " not closed " ) ; <nl> + } <nl> + } <nl> + catch ( Exception e ) { <nl> + / / Eat this error . This is called from the finalizer , <nl> + / / so there isn ' t much we can do . <nl> + } <nl> } <nl> <nl> @ Override <nl> - public void dispose ( ) { <nl> + public void close ( ) { <nl> rwl . writeLock ( ) . lock ( ) ; <nl> long ptr = 0 ; <nl> try { <nl> - if ( disposed ) <nl> + if ( closed ) <nl> return ; <nl> <nl> ptr = cPtr ; <nl> this . cPtr = 0 ; <nl> - disposed = true ; <nl> + closed = true ; <nl> } finally { <nl> rwl . writeLock ( ) . unlock ( ) ; <nl> } <nl> <nl> - disposeInternal ( ptr ) ; <nl> + closeInternal ( ptr ) ; <nl> } <nl> <nl> protected long getPtr ( ) { <nl> / / we must have a read lock for this function to make sense , however it <nl> / / does not make sense to take the lock here , since the code that uses <nl> / / the result must inherently have the read lock itself . <nl> - assert ( rwl . getReadHoldCount ( ) > 0 ) ; <nl> + assert ( rwl . getReadHoldCount ( ) > 0 ) ; <nl> <nl> - if ( this . disposed ) <nl> - throw new IllegalStateException ( " Cannot access disposed object " ) ; <nl> + if ( this . closed ) <nl> + throw new IllegalStateException ( " Cannot access closed object " ) ; <nl> <nl> return this . cPtr ; <nl> } <nl> <nl> - protected abstract void disposeInternal ( long cPtr ) ; <nl> + protected abstract void closeInternal ( long cPtr ) ; <nl> } <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / OptionsSet . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / OptionsSet . java <nl> <nl> import java . nio . charset . Charset ; <nl> <nl> abstract class OptionsSet { <nl> - private final static Charset CHARSET_UTF8 = Charset . forName ( " UTF - 8 " ) ; <nl> + private static final Charset CHARSET_UTF8 = Charset . forName ( " UTF - 8 " ) ; <nl> OptionConsumer consumer ; <nl> <nl> - public OptionsSet ( OptionConsumer provider ) { <nl> + OptionsSet ( OptionConsumer provider ) { <nl> this . consumer = provider ; <nl> } <nl> <nl> public OptionsSet ( OptionConsumer provider ) { <nl> * <nl> * @ return target of option set calls <nl> * / <nl> - public OptionConsumer getOptionConsumer ( ) { return consumer ; } <nl> + public OptionConsumer getOptionConsumer ( ) { <nl> + return consumer ; <nl> + } <nl> <nl> protected void setOption ( int code ) { <nl> consumer . setOption ( code , null ) ; <nl> protected void setOption ( int code , long param ) { <nl> ByteBuffer b = ByteBuffer . allocate ( 8 ) ; <nl> b . order ( ByteOrder . LITTLE_ENDIAN ) ; <nl> b . putLong ( param ) ; <nl> - consumer . setOption ( code , b . array ( ) ) ; <nl> + consumer . setOption ( code , b . array ( ) ) ; <nl> } <nl> } <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / Range . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / Range . java <nl> public Range ( byte [ ] begin , byte [ ] end ) { <nl> public static Range startsWith ( byte [ ] prefix ) { <nl> if ( prefix = = null ) <nl> throw new NullPointerException ( " prefix cannot be null " ) ; <nl> - return new Range ( prefix , ByteArrayUtil . strinc ( prefix ) ) ; <nl> + return new Range ( prefix , ByteArrayUtil . strinc ( prefix ) ) ; <nl> } <nl> <nl> / * * <nl> public int hashCode ( ) { <nl> * / <nl> @ Override <nl> public String toString ( ) { <nl> - return " Range ( " + ( begin = = null ? " null " : " \ " " + ByteArrayUtil . printable ( begin ) + " \ " " ) <nl> - + " , " + ( end = = null ? " null " : " \ " " + ByteArrayUtil . printable ( end ) + " \ " " ) + " ) " ; <nl> + return " Range ( " + ( begin = = null ? " null " : " \ " " + ByteArrayUtil . printable ( begin ) + " \ " " ) + <nl> + " , " + ( end = = null ? " null " : " \ " " + ByteArrayUtil . printable ( end ) + " \ " " ) + " ) " ; <nl> } <nl> } <nl> \ No newline at end of file <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / RangeQuery . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / RangeQuery . java <nl> <nl> import java . util . concurrent . CancellationException ; <nl> import java . util . concurrent . CompletableFuture ; <nl> import java . util . function . BiConsumer ; <nl> - import java . util . function . Function ; <nl> <nl> import com . apple . foundationdb . async . AsyncIterable ; <nl> import com . apple . foundationdb . async . AsyncIterator ; <nl> <nl> private final int rowLimit ; <nl> private final boolean reverse ; <nl> private final StreamingMode streamingMode ; <nl> - private final FutureResults firstChunk ; <nl> <nl> - private RangeQuery ( FDBTransaction transaction , boolean isSnapshot , <nl> + RangeQuery ( FDBTransaction transaction , boolean isSnapshot , <nl> KeySelector begin , KeySelector end , int rowLimit , <nl> - boolean reverse , StreamingMode streamingMode , <nl> - FutureResults firstChunk ) { <nl> + boolean reverse , StreamingMode streamingMode ) { <nl> this . tr = transaction ; <nl> this . begin = begin ; <nl> this . end = end ; <nl> private RangeQuery ( FDBTransaction transaction , boolean isSnapshot , <nl> this . rowLimit = rowLimit ; <nl> this . reverse = reverse ; <nl> this . streamingMode = streamingMode ; <nl> - this . firstChunk = firstChunk ; <nl> - } <nl> - <nl> - static RangeQuery start ( FDBTransaction transaction , boolean isSnapshot , <nl> - KeySelector begin , KeySelector end , int rowLimit , <nl> - boolean reverse , StreamingMode streamingMode ) { <nl> - / / start the first fetch . . . <nl> - FutureResults firstChunk = transaction . getRange_internal ( begin , end , <nl> - rowLimit , 0 , streamingMode . code ( ) , 1 , isSnapshot , reverse ) ; <nl> - <nl> - return new RangeQuery ( transaction , isSnapshot , begin , end , rowLimit , reverse , streamingMode , firstChunk ) ; <nl> } <nl> <nl> / * * <nl> static RangeQuery start ( FDBTransaction transaction , boolean isSnapshot , <nl> <nl> / / if the streaming mode is EXACT , try and grab things as one chunk <nl> if ( mode = = StreamingMode . EXACT ) { <nl> - CompletableFuture < RangeResultInfo > range = tr . getRange_internal ( <nl> + FutureResults range = tr . getRange_internal ( <nl> this . begin , this . end , this . rowLimit , 0 , StreamingMode . EXACT . code ( ) , <nl> 1 , this . snapshot , this . reverse ) ; <nl> - return range . thenApply ( new Function < RangeResultInfo , List < KeyValue > > ( ) { <nl> - @ Override <nl> - public List < KeyValue > apply ( RangeResultInfo o ) { <nl> - return o . get ( ) . values ; <nl> - } <nl> - } ) ; <nl> + return range . thenApply ( result - > result . get ( ) . values ) <nl> + . whenComplete ( ( result , e ) - > range . close ( ) ) ; <nl> } <nl> <nl> / / If the streaming mode is not EXACT , simply collect the results of an iteration into a list <nl> return AsyncUtil . collect ( <nl> - new RangeQuery ( tr , snapshot , begin , end , rowLimit , reverse , mode , firstChunk ) , tr . getExecutor ( ) ) ; <nl> + new RangeQuery ( tr , snapshot , begin , end , rowLimit , reverse , mode ) , tr . getExecutor ( ) ) ; <nl> } <nl> <nl> / * * <nl> public AsyncRangeIterator iterator ( ) { <nl> / / There is the chance for parallelism in the two " chunks " for fetched data <nl> private RangeResult chunk = null ; <nl> private RangeResult nextChunk = null ; <nl> - private boolean fetchOutstanding = true ; <nl> + private boolean fetchOutstanding = false ; <nl> private byte [ ] prevKey = null ; <nl> private int index = 0 ; <nl> - / / The first request is made in the constructor for the parent Iterable , so start at 1 <nl> - private int iteration = 1 ; <nl> + private int iteration = 0 ; <nl> private KeySelector begin ; <nl> private KeySelector end ; <nl> <nl> private int rowsRemaining ; <nl> <nl> + private FutureResults fetchingChunk ; <nl> private CompletableFuture < Boolean > nextFuture ; <nl> private boolean isCancelled = false ; <nl> <nl> private AsyncRangeIterator ( int rowLimit , boolean reverse , StreamingMode streamin <nl> this . reverse = reverse ; <nl> this . streamingMode = streamingMode ; <nl> <nl> - / / Register for completion , etc . on the first chunk . Some of the fields in <nl> - / / this class were initialized with the knowledge that this fetch is active <nl> - / / at creation time . This set normally happens in startNextFetch , but <nl> - / / the first fetch has already been configured and started . <nl> - CompletableFuture < Boolean > promise = new CompletableFuture < Boolean > ( ) ; <nl> - nextFuture = promise ; <nl> - <nl> - / / FIXME : should we propagate cancellation into the first chuck fetch ? <nl> - / / This would invalidate the whole iterable , not just the iterator <nl> - / / promise . onCancelledCancel ( firstChunk ) ; <nl> - <nl> - / / FIXME : I have no idea if this will just get garbage collected away , etc . <nl> - firstChunk . whenComplete ( new FetchComplete ( firstChunk , promise ) ) ; <nl> + startNextFetch ( ) ; <nl> } <nl> <nl> private synchronized boolean mainChunkIsTheLast ( ) { <nl> private synchronized boolean mainChunkIsTheLast ( ) { <nl> final FutureResults fetchingChunk ; <nl> final CompletableFuture < Boolean > promise ; <nl> <nl> - public FetchComplete ( FutureResults fetch , CompletableFuture < Boolean > promise ) { <nl> + FetchComplete ( FutureResults fetch , CompletableFuture < Boolean > promise ) { <nl> this . fetchingChunk = fetch ; <nl> this . promise = promise ; <nl> } <nl> <nl> @ Override <nl> public void accept ( RangeResultInfo data , Throwable error ) { <nl> - final RangeResultSummary summary ; <nl> - <nl> - if ( error ! = null ) { <nl> - promise . completeExceptionally ( error ) ; <nl> - if ( error instanceof Error ) { <nl> - throw ( Error ) error ; <nl> - } <nl> - <nl> - return ; <nl> - } <nl> - <nl> - summary = data . getSummary ( ) ; <nl> - if ( summary . lastKey = = null ) { <nl> - promise . complete ( Boolean . FALSE ) ; <nl> - return ; <nl> - } <nl> + try { <nl> + final RangeResultSummary summary ; <nl> <nl> - synchronized ( AsyncRangeIterator . this ) { <nl> - fetchOutstanding = false ; <nl> + if ( error ! = null ) { <nl> + promise . completeExceptionally ( error ) ; <nl> + if ( error instanceof Error ) { <nl> + throw ( Error ) error ; <nl> + } <nl> <nl> - / / adjust the total number of rows we should ever fetch <nl> - rowsRemaining - = summary . keyCount ; <nl> + return ; <nl> + } <nl> <nl> - / / set up the next fetch <nl> - if ( reverse ) { <nl> - end = KeySelector . firstGreaterOrEqual ( summary . lastKey ) ; <nl> - } else { <nl> - begin = KeySelector . firstGreaterThan ( summary . lastKey ) ; <nl> + summary = data . getSummary ( ) ; <nl> + if ( summary . lastKey = = null ) { <nl> + promise . complete ( Boolean . FALSE ) ; <nl> + return ; <nl> } <nl> <nl> - / / If this is the first fetch or the main chunk is exhausted <nl> - if ( chunk = = null | | index = = chunk . values . size ( ) ) { <nl> - nextChunk = null ; <nl> - chunk = data . get ( ) ; <nl> - index = 0 ; <nl> - } else { <nl> - nextChunk = data . get ( ) ; <nl> + synchronized ( AsyncRangeIterator . this ) { <nl> + fetchOutstanding = false ; <nl> + <nl> + / / adjust the total number of rows we should ever fetch <nl> + rowsRemaining - = summary . keyCount ; <nl> + <nl> + / / set up the next fetch <nl> + if ( reverse ) { <nl> + end = KeySelector . firstGreaterOrEqual ( summary . lastKey ) ; <nl> + } <nl> + else { <nl> + begin = KeySelector . firstGreaterThan ( summary . lastKey ) ; <nl> + } <nl> + <nl> + / / If this is the first fetch or the main chunk is exhausted <nl> + if ( chunk = = null | | index = = chunk . values . size ( ) ) { <nl> + nextChunk = null ; <nl> + chunk = data . get ( ) ; <nl> + index = 0 ; <nl> + } <nl> + else { <nl> + nextChunk = data . get ( ) ; <nl> + } <nl> } <nl> - } <nl> <nl> - promise . complete ( Boolean . TRUE ) ; <nl> + promise . complete ( Boolean . TRUE ) ; <nl> + } <nl> + finally { <nl> + fetchingChunk . close ( ) ; <nl> + } <nl> } <nl> } <nl> <nl> private synchronized void startNextFetch ( ) { <nl> if ( isCancelled ) <nl> return ; <nl> <nl> - if ( mainChunkIsTheLast ( ) ) <nl> + if ( chunk ! = null & & mainChunkIsTheLast ( ) ) <nl> return ; <nl> <nl> fetchOutstanding = true ; <nl> nextChunk = null ; <nl> <nl> - FutureResults fetchingChunk = tr . getRange_internal ( begin , end , <nl> + fetchingChunk = tr . getRange_internal ( begin , end , <nl> rowsLimited ? rowsRemaining : 0 , 0 , streamingMode . code ( ) , <nl> + + iteration , snapshot , reverse ) ; <nl> <nl> - CompletableFuture < Boolean > promise = new CompletableFuture < Boolean > ( ) ; <nl> - nextFuture = promise ; <nl> - <nl> - / / FIXME : BOOOOOOOOOO ! Maybe we don ' t need this ? <nl> - / / promise . onCancelledCancel ( fetchingChunk ) ; <nl> - <nl> - / / TODO : again , I have no idea if this will get out - of - scope collected right away <nl> - fetchingChunk . whenComplete ( new FetchComplete ( fetchingChunk , promise ) ) ; <nl> + nextFuture = new CompletableFuture < > ( ) ; <nl> + fetchingChunk . whenComplete ( new FetchComplete ( fetchingChunk , nextFuture ) ) ; <nl> } <nl> <nl> @ Override <nl> private synchronized void startNextFetch ( ) { <nl> <nl> / / We have a chunk and are still working though it <nl> if ( index < chunk . values . size ( ) ) { <nl> - return CompletableFuture . completedFuture ( true ) ; <nl> + return AsyncUtil . READY_TRUE ; <nl> } <nl> <nl> / / If we are at the end of the current chunk there is either : <nl> / / - no more data - or - <nl> / / - we are already fetching the next block <nl> return mainChunkIsTheLast ( ) ? <nl> - CompletableFuture . completedFuture ( false ) : <nl> + AsyncUtil . READY_FALSE : <nl> nextFuture ; <nl> } <nl> <nl> public synchronized void remove ( ) { <nl> public synchronized void cancel ( ) { <nl> isCancelled = true ; <nl> nextFuture . cancel ( true ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public void dispose ( ) { <nl> - cancel ( ) ; <nl> + fetchingChunk . cancel ( true ) ; <nl> } <nl> } <nl> } <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / ReadTransactionContext . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / ReadTransactionContext . java <nl> <nl> package com . apple . foundationdb ; <nl> <nl> import java . util . concurrent . CompletableFuture ; <nl> - import java . util . concurrent . ExecutionException ; <nl> import java . util . concurrent . Executor ; <nl> import java . util . function . Function ; <nl> <nl> <nl> * to { @ code retryable } <nl> * / <nl> < T > CompletableFuture < T > readAsync ( <nl> - Function < ? super ReadTransaction , CompletableFuture < T > > retryable ) ; <nl> + Function < ? super ReadTransaction , ? extends CompletableFuture < T > > retryable ) ; <nl> <nl> / * * <nl> * Retrieves the { @ link Executor } used by this { @ code TransactionContext } when running <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / Transaction . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / Transaction . java <nl> <nl> <nl> import java . util . concurrent . CompletableFuture ; <nl> import java . util . function . Function ; <nl> + <nl> import com . apple . foundationdb . tuple . Tuple ; <nl> <nl> / * * <nl> <nl> * < br > <nl> * < b > Note : < / b > Java transactions automatically set the { @ link TransactionOptions # setUsedDuringCommitProtectionDisable } <nl> * option . This is because the Java bindings disallow use of { @ code Transaction } objects after { @ link # onError } <nl> - * is called . <nl> + * is called . < br > <nl> + * < br > <nl> + * < b > Note : < / b > { @ code Transaction } objects must be { @ link # close closed } when no longer <nl> + * in use in order to free any associated resources . <nl> * / <nl> - public interface Transaction extends Disposable , ReadTransaction , TransactionContext { <nl> + public interface Transaction extends AutoCloseable , ReadTransaction , TransactionContext { <nl> <nl> / * * <nl> * Return special - purpose , read - only view of the database . Reads done through this interface are known as " snapshot reads " . <nl> <nl> * / <nl> @ Override <nl> < T > CompletableFuture < T > runAsync ( <nl> - Function < ? super Transaction , CompletableFuture < T > > retryable ) ; <nl> + Function < ? super Transaction , ? extends CompletableFuture < T > > retryable ) ; <nl> <nl> + / * * <nl> + * Close the { @ code Transaction } object and release any associated resources . This must be called at <nl> + * least once after the { @ code Transaction } object is no longer in use . This can be called multiple <nl> + * times , but care should be taken that it is not in use in another thread at the time of the call . <nl> + * / <nl> + @ Override <nl> + void close ( ) ; <nl> } <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / TransactionContext . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / TransactionContext . java <nl> <nl> * @ return a { @ code CompletableFuture } that will be set to the value returned by the last call <nl> * to { @ code retryable } <nl> * / <nl> - < T > CompletableFuture < T > runAsync ( Function < ? super Transaction , CompletableFuture < T > > retryable ) ; <nl> + < T > CompletableFuture < T > runAsync ( Function < ? super Transaction , ? extends CompletableFuture < T > > retryable ) ; <nl> } <nl> \ No newline at end of file <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / async / AsyncIterator . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / async / AsyncIterator . java <nl> <nl> import java . util . NoSuchElementException ; <nl> import java . util . concurrent . CompletableFuture ; <nl> <nl> - import com . apple . foundationdb . Disposable ; <nl> - <nl> / * * <nl> * A version of { @ code Iterator } that allows for non - blocking iteration over elements . <nl> * Calls to { @ link # next ( ) } will not block if { @ link # onHasNext ( ) } has been called <nl> <nl> * <nl> * @ param < T > the type of object yielded by { @ code next ( ) } <nl> * / <nl> - public interface AsyncIterator < T > extends Iterator < T > , Disposable { <nl> + public interface AsyncIterator < T > extends Iterator < T > { <nl> / * * <nl> * Returns a asynchronous signal for the presence of more elements in the sequence . <nl> - * Once the future returned by { @ link # onHasNext ( ) } is ready , the next call to <nl> + * Once the future returned by { @ code onHasNext ( ) } is ready , the next call to <nl> * { @ link # next } will not block . <nl> * <nl> * @ return a { @ code CompletableFuture } that will be set to { @ code true } if { @ code next ( ) } <nl> <nl> * Cancels any outstanding asynchronous work associated with this { @ code AsyncIterator } . <nl> * / <nl> void cancel ( ) ; <nl> - <nl> - / * * <nl> - * Cancel this { @ code AsyncIterable } and dispose of associated resources . Equivalent <nl> - * to calling { @ link AsyncIterator # cancel ( ) } . <nl> - * / <nl> - @ Override <nl> - void dispose ( ) ; <nl> } <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / async / AsyncUtil . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / async / AsyncUtil . java <nl> <nl> import java . util . concurrent . CompletableFuture ; <nl> import java . util . concurrent . Executor ; <nl> import java . util . function . BiFunction ; <nl> + import java . util . function . Consumer ; <nl> import java . util . function . Function ; <nl> import java . util . function . Supplier ; <nl> <nl> <nl> * Provided utilities for using and manipulating { @ link CompletableFuture } s . <nl> * / <nl> public class AsyncUtil { <nl> + / * * <nl> + * A completed future of type { @ link Void } . In particular , it is completed to { @ code null } , <nl> + * but that shouldn ' t really matter for the { @ link Void } type . This can be used instead <nl> + * of creating a new future if one wants to signal that some asynchronous task has <nl> + * already been completed . <nl> + * / <nl> + public static final CompletableFuture < Void > DONE = CompletableFuture . completedFuture ( null ) ; <nl> + / * * <nl> + * A completed future of type { @ link Boolean } that is set to { @ code true } . This can be <nl> + * used instead of creating a new future if one wants to signal that some task has <nl> + * already been completed with a { @ code true } result . <nl> + * / <nl> + public static final CompletableFuture < Boolean > READY_TRUE = CompletableFuture . completedFuture ( Boolean . TRUE ) ; <nl> + / * * <nl> + * A completed future of type { @ link Boolean } that is set to { @ code false } . This can be <nl> + * used instead of creating a new future if one wants to signal that some task has <nl> + * already been completed with a { @ code false } result . <nl> + * / <nl> + public static final CompletableFuture < Boolean > READY_FALSE = CompletableFuture . completedFuture ( Boolean . FALSE ) ; <nl> + <nl> / * * <nl> * Run { @ code Function } { @ code func } , returning all caught exceptions as a <nl> * { @ code CompletableFuture } in an error state . <nl> <nl> * @ return the output of { @ code func } , or a { @ code CompletableFuture } carrying any exception <nl> * caught in the process . <nl> * / <nl> - public static < I , O > CompletableFuture < O > applySafely ( Function < I , CompletableFuture < O > > func , I value ) { <nl> + public static < I , O > CompletableFuture < O > applySafely ( Function < I , ? extends CompletableFuture < O > > func , I value ) { <nl> try { <nl> return func . apply ( value ) ; <nl> } catch ( RuntimeException e ) { <nl> <nl> } <nl> } <nl> <nl> + / * * <nl> + * Run the { @ code consumer } on each element of the iterable in order . The future will <nl> + * complete with either the first error encountered by either the iterable itself <nl> + * or by the consumer provided or with { @ code null } if the future completes <nl> + * successfully . Items are processed in order from the iterable , and each item <nl> + * will be processed only after the item before it has finished processing . <nl> + * <nl> + * @ param iterable the source of data over from which to consume <nl> + * @ param consumer operation to apply to each item <nl> + * @ param < V > type of the items returned by the iterable <nl> + * <nl> + * @ return a future that is ready once the asynchronous operation completes <nl> + * / <nl> + public static < V > CompletableFuture < Void > forEach ( final AsyncIterable < V > iterable , final Consumer < ? super V > consumer ) { <nl> + return forEachRemaining ( iterable . iterator ( ) , consumer ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Run the { @ code consumer } on each element of the iterable in order . The future will <nl> + * complete with either the first error encountered by either the iterable itself <nl> + * or by the consumer provided or with { @ code null } if the future completes <nl> + * successfully . Items are processed in order from the iterable , and each item <nl> + * will be processed only after the item before it has finished processing . Asynchronous <nl> + * tasks needed to complete this operation are scheduled on the provided executor . <nl> + * <nl> + * @ param iterable the source of data over from which to consume <nl> + * @ param consumer operation to apply to each item <nl> + * @ param executor executor on which to schedule asynchronous tasks <nl> + * @ param < V > type of the items returned by the iterable <nl> + * <nl> + * @ return a future that is ready once the asynchronous operation completes <nl> + * / <nl> + public static < V > CompletableFuture < Void > forEach ( final AsyncIterable < V > iterable , final Consumer < ? super V > consumer , final Executor executor ) { <nl> + return forEachRemaining ( iterable . iterator ( ) , consumer , executor ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Run the { @ code consumer } on each element remaining in the iterator in order . The future will <nl> + * complete with either the first error encountered by either the iterator itself <nl> + * or by the consumer provided or with { @ code null } if the future completes <nl> + * successfully . Items are processed in order from the iterator , and each item <nl> + * will be processed only after the item before it has finished processing . <nl> + * <nl> + * @ param iterator the source of data over from which to consume <nl> + * @ param consumer operation to apply to each item <nl> + * @ param < V > type of the items returned by the iterator <nl> + * <nl> + * @ return a future that is ready once the asynchronous operation completes <nl> + * / <nl> + public static < V > CompletableFuture < Void > forEachRemaining ( final AsyncIterator < V > iterator , final Consumer < ? super V > consumer ) { <nl> + return forEachRemaining ( iterator , consumer , DEFAULT_EXECUTOR ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Run the { @ code consumer } on each element remaining if the iterator in order . The future will <nl> + * complete with either the first error encountered by either the iterator itself <nl> + * or by the consumer provided or with { @ code null } if the future completes <nl> + * successfully . Items are processed in order from the iterator , and each item <nl> + * will be processed only after the item before it has finished processing . Asynchronous <nl> + * tasks needed to complete this operation are scheduled on the provided executor . <nl> + * <nl> + * @ param iterator the source of data over from which to consume <nl> + * @ param consumer operation to apply to each item <nl> + * @ param executor executor on which to schedule asynchronous tasks <nl> + * @ param < V > type of the items returned by the iterator <nl> + * <nl> + * @ return a future that is ready once the asynchronous operation completes <nl> + * / <nl> + public static < V > CompletableFuture < Void > forEachRemaining ( final AsyncIterator < V > iterator , final Consumer < ? super V > consumer , final Executor executor ) { <nl> + return iterator . onHasNext ( ) . thenComposeAsync ( hasAny - > { <nl> + if ( hasAny ) { <nl> + return whileTrue ( ( ) - > { <nl> + consumer . accept ( iterator . next ( ) ) ; <nl> + return iterator . onHasNext ( ) ; <nl> + } , executor ) ; <nl> + } else { <nl> + return DONE ; <nl> + } <nl> + } , executor ) ; <nl> + } <nl> + <nl> / * * <nl> * Iterates over a set of items and returns the result as a list . <nl> * <nl> <nl> return collect ( iterable , DEFAULT_EXECUTOR ) ; <nl> } <nl> <nl> + / * * <nl> + * Iterates over a set of items and returns the remaining results as a list . <nl> + * <nl> + * @ param iterator the source of data over which to iterate . This function will exhaust the iterator . <nl> + * <nl> + * @ return a { @ code CompletableFuture } which will be set to the amalgamation of results <nl> + * from iteration . <nl> + * / <nl> + public static < V > CompletableFuture < List < V > > collectRemaining ( final AsyncIterator < V > iterator ) { <nl> + return collectRemaining ( iterator , DEFAULT_EXECUTOR ) ; <nl> + } <nl> + <nl> / * * <nl> * Iterates over a set of items and returns the result as a list . <nl> * <nl> <nl> * from iteration . <nl> * / <nl> public static < V > CompletableFuture < List < V > > collect ( final AsyncIterable < V > iterable , final Executor executor ) { <nl> - final AsyncIterator < V > it = iterable . iterator ( ) ; <nl> - final List < V > accumulator = new LinkedList < V > ( ) ; <nl> - <nl> - / / The condition of the while loop is simply " onHasNext ( ) " returning true <nl> - Supplier < CompletableFuture < Boolean > > condition = new Supplier < CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > get ( ) { <nl> - return it . onHasNext ( ) . thenApply ( new Function < Boolean , Boolean > ( ) { <nl> - @ Override <nl> - public Boolean apply ( Boolean o ) { <nl> - if ( o ) { <nl> - accumulator . add ( it . next ( ) ) ; <nl> - } <nl> - return o ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ; <nl> - <nl> - CompletableFuture < Void > complete = whileTrue ( condition , executor ) ; <nl> - CompletableFuture < List < V > > result = tag ( complete , accumulator ) ; <nl> + return collectRemaining ( iterable . iterator ( ) , executor ) ; <nl> + } <nl> <nl> - return result ; <nl> + / * * <nl> + * Iterates over a set of items and returns the remaining results as a list . <nl> + * <nl> + * @ param iterator the source of data over which to iterate . This function will exhaust the iterator . <nl> + * @ param executor the { @ link Executor } to use for asynchronous operations <nl> + * <nl> + * @ return a { @ code CompletableFuture } which will be set to the amalgamation of results <nl> + * from iteration . <nl> + * / <nl> + public static < V > CompletableFuture < List < V > > collectRemaining ( final AsyncIterator < V > iterator , final Executor executor ) { <nl> + final List < V > accumulator = new LinkedList < > ( ) ; <nl> + return tag ( forEachRemaining ( iterator , accumulator : : add , executor ) , accumulator ) ; <nl> } <nl> <nl> / * * <nl> public Boolean apply ( Boolean o ) { <nl> return new AsyncIterable < T > ( ) { <nl> @ Override <nl> public AsyncIterator < T > iterator ( ) { <nl> - final AsyncIterator < V > it = iterable . iterator ( ) ; <nl> - return new AsyncIterator < T > ( ) { <nl> + return mapIterator ( iterable . iterator ( ) , func ) ; <nl> + } <nl> <nl> - @ Override <nl> - public void remove ( ) { <nl> - it . remove ( ) ; <nl> - } <nl> + @ Override <nl> + public CompletableFuture < List < T > > asList ( ) { <nl> + final List < T > accumulator = new LinkedList < > ( ) ; <nl> + return tag ( AsyncUtil . forEach ( iterable , value - > accumulator . add ( func . apply ( value ) ) ) , accumulator ) ; <nl> + } <nl> + } ; <nl> + } <nl> <nl> - @ Override <nl> - public CompletableFuture < Boolean > onHasNext ( ) { <nl> - return it . onHasNext ( ) ; <nl> - } <nl> + / * * <nl> + * Map an { @ code AsyncIterator } into an { @ code AsyncIterator } of another type or with <nl> + * each element modified in some fashion . <nl> + * <nl> + * @ param iterator input <nl> + * @ param func mapping function applied to each element <nl> + * @ return a new iterator with each element mapped to a different value <nl> + * / <nl> + public static < V , T > AsyncIterator < T > mapIterator ( final AsyncIterator < V > iterator , <nl> + final Function < V , T > func ) { <nl> + return new AsyncIterator < T > ( ) { <nl> + @ Override <nl> + public void remove ( ) { <nl> + iterator . remove ( ) ; <nl> + } <nl> <nl> - @ Override <nl> - public boolean hasNext ( ) { <nl> - return it . hasNext ( ) ; <nl> - } <nl> + @ Override <nl> + public CompletableFuture < Boolean > onHasNext ( ) { <nl> + return iterator . onHasNext ( ) ; <nl> + } <nl> <nl> - @ Override <nl> - public T next ( ) { <nl> - return func . apply ( it . next ( ) ) ; <nl> - } <nl> + @ Override <nl> + public boolean hasNext ( ) { <nl> + return iterator . hasNext ( ) ; <nl> + } <nl> <nl> - @ Override <nl> - public void cancel ( ) { <nl> - it . cancel ( ) ; <nl> - } <nl> + @ Override <nl> + public T next ( ) { <nl> + return func . apply ( iterator . next ( ) ) ; <nl> + } <nl> <nl> - @ Override <nl> - public void dispose ( ) { <nl> - it . dispose ( ) ; <nl> - } <nl> - } ; <nl> + @ Override <nl> + public void cancel ( ) { <nl> + iterator . cancel ( ) ; <nl> + } <nl> + } ; <nl> + } <nl> + <nl> + / * * <nl> + * Map a { @ code CloseableAsyncIterator } into a { @ code CloseableAsyncIterator } of another type or with <nl> + * each element modified in some fashion . <nl> + * <nl> + * @ param iterator input <nl> + * @ param func mapping function applied to each element <nl> + * @ return a new iterator with each element mapped to a different value <nl> + * / <nl> + public static < V , T > CloseableAsyncIterator < T > mapIterator ( final CloseableAsyncIterator < V > iterator , <nl> + final Function < V , T > func ) { <nl> + return new CloseableAsyncIterator < T > ( ) { <nl> + @ Override <nl> + public void remove ( ) { <nl> + iterator . remove ( ) ; <nl> } <nl> <nl> @ Override <nl> - public CompletableFuture < List < T > > asList ( ) { <nl> - return iterable . asList ( ) . thenApply ( new Function < List < V > , List < T > > ( ) { <nl> - @ Override <nl> - public List < T > apply ( List < V > o ) { <nl> - ArrayList < T > out = new ArrayList < T > ( o . size ( ) ) ; <nl> - for ( V in : o ) <nl> - out . add ( func . apply ( in ) ) ; <nl> - return out ; <nl> - } <nl> - } ) ; <nl> + public CompletableFuture < Boolean > onHasNext ( ) { <nl> + return iterator . onHasNext ( ) ; <nl> + } <nl> + <nl> + @ Override <nl> + public boolean hasNext ( ) { <nl> + return iterator . hasNext ( ) ; <nl> + } <nl> + <nl> + @ Override <nl> + public T next ( ) { <nl> + return func . apply ( iterator . next ( ) ) ; <nl> + } <nl> + <nl> + @ Override <nl> + public void cancel ( ) { <nl> + iterator . cancel ( ) ; <nl> + } <nl> + <nl> + @ Override <nl> + public void close ( ) { <nl> + iterator . close ( ) ; <nl> } <nl> } ; <nl> } <nl> public Void apply ( Boolean more , Throwable error ) { <nl> * <nl> * @ param body the asynchronous operation over which to loop <nl> * <nl> - * @ return a { @ code PartialFuture } which will be set at completion of the loop . <nl> + * @ return a { @ link CompletableFuture } which will be set at completion of the loop . <nl> * @ deprecated Since version 5 . 1 . 0 . Use the version of { @ link # whileTrue ( Supplier ) whileTrue } that takes a <nl> * { @ link Supplier } instead . <nl> * / <nl> public Void apply ( Boolean more , Throwable error ) { <nl> * @ param body the asynchronous operation over which to loop <nl> * @ param executor the { @ link Executor } to use for asynchronous operations <nl> * <nl> - * @ return a { @ code PartialFuture } which will be set at completion of the loop . <nl> + * @ return a { @ link CompletableFuture } which will be set at completion of the loop . <nl> * @ deprecated Since version 5 . 1 . 0 . Use the version of { @ link # whileTrue ( Supplier , Executor ) whileTrue } that takes a <nl> * { @ link Supplier } instead . <nl> * / <nl> public Void apply ( Boolean more , Throwable error ) { <nl> * <nl> * @ param body the asynchronous operation over which to loop <nl> * <nl> - * @ return a { @ code PartialFuture } which will be set at completion of the loop . <nl> + * @ return a { @ link CompletableFuture } which will be set at completion of the loop . <nl> * / <nl> public static CompletableFuture < Void > whileTrue ( Supplier < CompletableFuture < Boolean > > body ) { <nl> return whileTrue ( body , DEFAULT_EXECUTOR ) ; <nl> public Void apply ( Boolean more , Throwable error ) { <nl> * @ param body the asynchronous operation over which to loop <nl> * @ param executor the { @ link Executor } to use for asynchronous operations <nl> * <nl> - * @ return a { @ code PartialFuture } which will be set at completion of the loop . <nl> + * @ return a { @ link CompletableFuture } which will be set at completion of the loop . <nl> * / <nl> public static CompletableFuture < Void > whileTrue ( Supplier < CompletableFuture < Boolean > > body , Executor executor ) { <nl> return new LoopPartial ( body , executor ) . run ( ) ; <nl> public Void apply ( Boolean more , Throwable error ) { <nl> * @ return a new { @ link CompletableFuture } that is set when { @ code task } is ready . <nl> * / <nl> public static < V > CompletableFuture < Void > whenReady ( CompletableFuture < V > task ) { <nl> - return task . thenApply ( o - > ( Void ) null ) <nl> - . exceptionally ( o - > null ) ; <nl> + return task . handle ( ( v , t ) - > null ) ; <nl> } <nl> <nl> public static < V > CompletableFuture < V > composeExceptionally ( CompletableFuture < V > task , Function < Throwable , CompletableFuture < V > > fn ) { <nl> public Void apply ( Boolean more , Throwable error ) { <nl> } else { <nl> return task ; <nl> } <nl> - } ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Compose a handler bi - function to the result of a future . Unlike the <nl> + * { @ link CompletableFuture # handle ( BiFunction ) CompletableFuture . handle ( ) } <nl> + * function , which requires that the handler return a regular value , this <nl> + * method requires that the handler return a { @ link CompletableFuture } . <nl> + * The returned future will then be ready with the result of the <nl> + * handler ' s future ( or an error if that future completes exceptionally ) . <nl> + * <nl> + * @ param future future to compose the handler onto <nl> + * @ param handler handler bi - function to compose onto the passed future <nl> + * @ param < V > return type of original future <nl> + * @ param < T > return type of final future <nl> + * <nl> + * @ return future with same completion properties as the future returned by the handler <nl> + * / <nl> + public static < V , T > CompletableFuture < T > composeHandle ( CompletableFuture < V > future , BiFunction < V , Throwable , ? extends CompletableFuture < T > > handler ) { <nl> + return future . handle ( handler ) . thenCompose ( Function . identity ( ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Compose a handler bi - function to the result of a future . Unlike the <nl> + * { @ link CompletableFuture # handle ( BiFunction ) CompletableFuture . handle ( ) } <nl> + * function , which requires that the handler return a regular value , this <nl> + * method requires that the handler return a { @ link CompletableFuture } . <nl> + * The returned future will then be ready with the result of the <nl> + * handler ' s future ( or an error if that future completes exceptionally ) . <nl> + * The handler will execute on the { @ link com . apple . foundationdb . FDB # DEFAULT_EXECUTOR default executor } <nl> + * used for asychronous tasks . <nl> + * <nl> + * @ param future future to compose the handler onto <nl> + * @ param handler handler bi - function to compose onto the passed future <nl> + * @ param < V > return type of original future <nl> + * @ param < T > return type of final future <nl> + * <nl> + * @ return future with same completion properties as the future returned by the handler <nl> + * / <nl> + public static < V , T > CompletableFuture < T > composeHandleAsync ( CompletableFuture < V > future , BiFunction < V , Throwable , ? extends CompletableFuture < T > > handler ) { <nl> + return composeHandleAsync ( future , handler , DEFAULT_EXECUTOR ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Compose a handler bi - function to the result of a future . Unlike the <nl> + * { @ link CompletableFuture # handle ( BiFunction ) CompletableFuture . handle ( ) } <nl> + * function , which requires that the handler return a regular value , this <nl> + * method requires that the handler return a { @ link CompletableFuture } . <nl> + * The returned future will then be ready with the result of the <nl> + * handler ' s future ( or an error if that future completes excpetionally ) . <nl> + * The handler will execute on the passed { @ link Executor } . <nl> + * <nl> + * @ param future future to compose the handler onto <nl> + * @ param handler handler bi - function to compose onto the passed future <nl> + * @ param executor executor on which to execute the handler function <nl> + * @ param < V > return type of original future <nl> + * @ param < T > return type of final future <nl> + * <nl> + * @ return future with same completion properties as the future returned by the handler <nl> + * / <nl> + public static < V , T > CompletableFuture < T > composeHandleAsync ( CompletableFuture < V > future , BiFunction < V , Throwable , ? extends CompletableFuture < T > > handler , Executor executor ) { <nl> + return future . handleAsync ( handler , executor ) . thenCompose ( Function . identity ( ) ) ; <nl> } <nl> <nl> / * * <nl> public Void apply ( Boolean more , Throwable error ) { <nl> * / <nl> public static < V > CompletableFuture < List < V > > getAll ( final Collection < CompletableFuture < V > > tasks ) { <nl> return whenAll ( tasks ) . thenApply ( unused - > { <nl> - List < V > result = new ArrayList < > ( ) ; <nl> + List < V > result = new ArrayList < > ( tasks . size ( ) ) ; <nl> for ( CompletableFuture < V > f : tasks ) { <nl> assert ( f . isDone ( ) ) ; <nl> result . add ( f . getNow ( null ) ) ; <nl> public Void apply ( Boolean more , Throwable error ) { <nl> } <nl> <nl> / * * <nl> - * Return a { @ code CompletableFuture } that will be set when any of the { @ code PartialFuture } <nl> + * Return a { @ code CompletableFuture } that will be set when any of the { @ link CompletableFuture } <nl> * inputs are done . A { @ code CompletableFuture } is done both on success and failure . <nl> * <nl> - * @ param input the list of { @ code PartialFuture } s to monitor . This list <nl> + * @ param input the list of { @ link CompletableFuture } s to monitor . This list <nl> * < b > must not < / b > be modified during the execution of this call . <nl> * <nl> * @ return a signal that will be set when any of the { @ code CompletableFuture } s are done <nl> * / <nl> public static < V > CompletableFuture < Void > whenAny ( final Collection < ? extends CompletableFuture < V > > input ) { <nl> - @ SuppressWarnings ( " unchecked " ) <nl> - CompletableFuture < V > [ ] array = ( CompletableFuture < V > [ ] ) input . toArray ( new CompletableFuture < ? > [ input . size ( ) ] ) ; <nl> + CompletableFuture < ? > [ ] array = input . toArray ( new CompletableFuture < ? > [ input . size ( ) ] ) ; <nl> CompletableFuture < Object > anyOf = CompletableFuture . anyOf ( array ) ; <nl> return success ( anyOf ) ; <nl> } <nl> <nl> / * * <nl> - * Return a { @ code CompletableFuture } that will be set when all the { @ code PartialFuture } <nl> + * Return a { @ code CompletableFuture } that will be set when all the { @ link CompletableFuture } <nl> * inputs are done . A { @ code CompletableFuture } is done both on success and failure . <nl> * <nl> - * @ param input the list of { @ code PartialFuture } s to monitor . This list <nl> + * @ param input the list of { @ link CompletableFuture } s to monitor . This list <nl> * < b > must not < / b > be modified during the execution of this call . <nl> * <nl> * @ return a signal that will be set when all of the { @ code CompletableFuture } s are done <nl> * / <nl> public static < V > CompletableFuture < Void > whenAll ( final Collection < ? extends CompletableFuture < V > > input ) { <nl> - @ SuppressWarnings ( " unchecked " ) <nl> - CompletableFuture < V > [ ] array = ( CompletableFuture < V > [ ] ) input . toArray ( new CompletableFuture < ? > [ input . size ( ) ] ) ; <nl> + CompletableFuture < ? > [ ] array = input . toArray ( new CompletableFuture < ? > [ input . size ( ) ] ) ; <nl> return CompletableFuture . allOf ( array ) ; <nl> } <nl> <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / async / Cancellable . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / async / Cancellable . java <nl> <nl> * is not an error to call this method on an operation that has already completed or <nl> * already been cancelled . This method will not block or throw non - fatal exceptions . <nl> * / <nl> - public abstract void cancel ( ) ; <nl> + void cancel ( ) ; <nl> } <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / async / CloneableException . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / async / CloneableException . java <nl> <nl> * <nl> * @ return a newly created { @ code Exception } . <nl> * / <nl> - public Exception retargetClone ( ) ; <nl> + Exception retargetClone ( ) ; <nl> } <nl> new file mode 100644 <nl> index 0000000000 . . ebad0155b6 <nl> mmm / dev / null <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / async / CloseableAsyncIterator . java <nl> <nl> + / * <nl> + * CloseableAsyncIterator . java <nl> + * <nl> + * This source file is part of the FoundationDB open source project <nl> + * <nl> + * Copyright 2013 - 2018 Apple Inc . and the FoundationDB project authors <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + package com . apple . foundationdb . async ; <nl> + <nl> + / * * <nl> + * A version of { @ link AsyncIterator } that must be closed once no longer in use in order to free <nl> + * any associated resources . <nl> + * <nl> + * @ param < T > the type of object yielded by { @ code next ( ) } <nl> + * / <nl> + public interface CloseableAsyncIterator < T > extends AutoCloseable , AsyncIterator < T > { <nl> + / * * <nl> + * Cancels any outstanding asynchronous work , closes the iterator , and frees any associated <nl> + * resources . This must be called at least once after the object is no longer in use . This <nl> + * can be called multiple times , but care should be taken that an object is not in use <nl> + * in another thread at the time of the call . <nl> + * / <nl> + @ Override <nl> + void close ( ) ; <nl> + <nl> + / * * <nl> + * Alias for { @ link # close } . <nl> + * / <nl> + @ Override <nl> + default void cancel ( ) { <nl> + close ( ) ; <nl> + } <nl> + } <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / directory / DirectoryLayer . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / directory / DirectoryLayer . java <nl> <nl> import java . util . Random ; <nl> import java . util . concurrent . CompletableFuture ; <nl> import java . util . function . Function ; <nl> - import java . util . function . Supplier ; <nl> <nl> import com . apple . foundationdb . KeyValue ; <nl> import com . apple . foundationdb . MutationType ; <nl> <nl> * access to subspaces . <nl> * < / p > <nl> * / <nl> - public class DirectoryLayer implements Directory <nl> - { <nl> + public class DirectoryLayer implements Directory { <nl> private static final Charset UTF_8 = Charset . forName ( " UTF - 8 " ) ; <nl> private static final byte [ ] LITTLE_ENDIAN_LONG_ONE = { 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 } ; <nl> private static final byte [ ] HIGH_CONTENTION_KEY = " hca " . getBytes ( UTF_8 ) ; <nl> public DirectoryLayer ( Subspace nodeSubspace , Subspace contentSubspace , boolean a <nl> * Creates a new { @ code DirectoryLayer } formed with a specified node subspace and default content subspace . <nl> * Prefixes can not be specified in calls to { @ link Directory # create ( TransactionContext , List , byte [ ] , byte [ ] ) } . <nl> * <nl> - * @ param node_subspace a { @ link Subspace } used to store directory metadata <nl> - * @ return a { @ code DirectoryLayer } formed with { @ code node_subspace } and a default content subspace <nl> + * @ param nodeSubspace a { @ link Subspace } used to store directory metadata <nl> + * @ return a { @ code DirectoryLayer } formed with { @ code nodeSubspace } and a default content subspace <nl> * / <nl> - public static Directory createWithNodeSubspace ( Subspace node_subspace ) { <nl> - return new DirectoryLayer ( node_subspace , DEFAULT_CONTENT_SUBSPACE ) ; <nl> + public static Directory createWithNodeSubspace ( Subspace nodeSubspace ) { <nl> + return new DirectoryLayer ( nodeSubspace , DEFAULT_CONTENT_SUBSPACE ) ; <nl> } <nl> <nl> / * * <nl> * Creates a new { @ code DirectoryLayer } formed with a default node subspace and specified content subspace . <nl> * Prefixes can not be specified in calls to { @ link Directory # create ( TransactionContext , List , byte [ ] , byte [ ] ) } . <nl> * <nl> - * @ param content_subspace a { @ link Subspace } used to store directory content <nl> - * @ return a { @ code DirectoryLayer } formed with a { @ code content_subspace } and a default node subspace <nl> + * @ param contentSubspace a { @ link Subspace } used to store directory content <nl> + * @ return a { @ code DirectoryLayer } formed with a { @ code contentSubspace } and a default node subspace <nl> * / <nl> - public static Directory createWithContentSubspace ( Subspace content_subspace ) { <nl> - return new DirectoryLayer ( DEFAULT_NODE_SUBSPACE , content_subspace ) ; <nl> + public static Directory createWithContentSubspace ( Subspace contentSubspace ) { <nl> + return new DirectoryLayer ( DEFAULT_NODE_SUBSPACE , contentSubspace ) ; <nl> } <nl> <nl> / * * <nl> public boolean equals ( Object rhs ) { <nl> } <nl> DirectoryLayer other = ( DirectoryLayer ) rhs ; <nl> <nl> - return ( path = = other . path | | path . equals ( other . path ) ) <nl> - & & nodeSubspace . equals ( other . nodeSubspace ) <nl> - & & contentSubspace . equals ( other . contentSubspace ) ; <nl> + return ( path = = other . path | | path . equals ( other . path ) ) & & <nl> + nodeSubspace . equals ( other . nodeSubspace ) & & <nl> + contentSubspace . equals ( other . contentSubspace ) ; <nl> } <nl> <nl> / * * <nl> public boolean equals ( Object rhs ) { <nl> * / <nl> @ Override <nl> public int hashCode ( ) { <nl> - return path . hashCode ( ) ^ ( nodeSubspace . hashCode ( ) * 179 ) ^ ( contentSubspace . hashCode ( ) * 937 ) ; <nl> + return path . hashCode ( ) ^ ( nodeSubspace . hashCode ( ) * 179 ) ^ ( contentSubspace . hashCode ( ) * 937 ) ; <nl> } <nl> <nl> / * * <nl> public DirectoryLayer getDirectoryLayer ( ) { <nl> * / <nl> @ Override <nl> public CompletableFuture < DirectorySubspace > createOrOpen ( TransactionContext tcx , final List < String > path , final byte [ ] layer ) { <nl> - return tcx . runAsync ( new Function < Transaction , CompletableFuture < DirectorySubspace > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > apply ( Transaction tr ) { <nl> - return createOrOpenInternal ( tr , tr , path , layer , null , true , true ) ; <nl> - } <nl> - } ) ; <nl> + return tcx . runAsync ( tr - > createOrOpenInternal ( tr , tr , path , layer , null , true , true ) ) ; <nl> } <nl> <nl> / * * <nl> public DirectoryLayer getDirectoryLayer ( ) { <nl> * / <nl> @ Override <nl> public CompletableFuture < DirectorySubspace > open ( ReadTransactionContext tcx , final List < String > path , final byte [ ] layer ) { <nl> - return tcx . readAsync ( new Function < ReadTransaction , CompletableFuture < DirectorySubspace > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > apply ( ReadTransaction rtr ) { <nl> - return createOrOpenInternal ( rtr , null , path , layer , null , false , true ) ; <nl> - } <nl> - } ) ; <nl> + return tcx . readAsync ( rtr - > createOrOpenInternal ( rtr , null , path , layer , null , false , true ) ) ; <nl> } <nl> <nl> / * * <nl> public DirectoryLayer getDirectoryLayer ( ) { <nl> * / <nl> @ Override <nl> public CompletableFuture < DirectorySubspace > create ( TransactionContext tcx , final List < String > path , final byte [ ] layer , final byte [ ] prefix ) { <nl> - return tcx . runAsync ( new Function < Transaction , CompletableFuture < DirectorySubspace > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > apply ( Transaction tr ) { <nl> - return createOrOpenInternal ( tr , tr , path , layer , prefix , true , false ) ; <nl> - } <nl> - } ) ; <nl> + return tcx . runAsync ( tr - > createOrOpenInternal ( tr , tr , path , layer , prefix , true , false ) ) ; <nl> } <nl> <nl> / * * <nl> public DirectoryLayer getDirectoryLayer ( ) { <nl> * / <nl> @ Override <nl> public CompletableFuture < DirectorySubspace > moveTo ( TransactionContext tcx , List < String > newAbsolutePath ) { <nl> - CompletableFuture < DirectorySubspace > future = new CompletableFuture < DirectorySubspace > ( ) ; <nl> + CompletableFuture < DirectorySubspace > future = new CompletableFuture < > ( ) ; <nl> future . completeExceptionally ( new DirectoryMoveException ( " The root directory cannot be moved . " , path , newAbsolutePath ) ) ; <nl> return future ; <nl> } <nl> public DirectoryLayer getDirectoryLayer ( ) { <nl> * / <nl> @ Override <nl> public CompletableFuture < DirectorySubspace > move ( final TransactionContext tcx , final List < String > oldPath , final List < String > newPath ) { <nl> - final List < String > oldPathCopy = new ArrayList < String > ( oldPath ) ; <nl> - final List < String > newPathCopy = new ArrayList < String > ( newPath ) ; <nl> - <nl> - return tcx . runAsync ( new Function < Transaction , CompletableFuture < DirectorySubspace > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > apply ( final Transaction tr ) { <nl> - return checkOrWriteVersion ( tr ) <nl> - . thenComposeAsync ( new Function < Void , CompletableFuture < List < Node > > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < List < Node > > apply ( Void ignore ) { <nl> - if ( oldPathCopy . size ( ) < = newPathCopy . size ( ) & & oldPathCopy . equals ( newPathCopy . subList ( 0 , oldPathCopy . size ( ) ) ) ) <nl> - throw new DirectoryMoveException ( " The destination directory cannot be a subdirectory of the source directory . " , toAbsolutePath ( oldPathCopy ) , toAbsolutePath ( newPathCopy ) ) ; <nl> - <nl> - ArrayList < CompletableFuture < Node > > futures = new ArrayList < CompletableFuture < Node > > ( ) ; <nl> - futures . add ( new NodeFinder ( oldPathCopy ) . find ( tr ) . thenComposeAsync ( new NodeMetadataLoader ( tr ) , tr . getExecutor ( ) ) ) ; <nl> - futures . add ( new NodeFinder ( newPathCopy ) . find ( tr ) . thenComposeAsync ( new NodeMetadataLoader ( tr ) , tr . getExecutor ( ) ) ) ; <nl> - <nl> - return AsyncUtil . getAll ( futures ) ; <nl> - } <nl> - } , tr . getExecutor ( ) ) <nl> - . thenCompose ( new Function < List < Node > , CompletableFuture < DirectorySubspace > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > apply ( List < Node > nodes ) { <nl> - final Node oldNode = nodes . get ( 0 ) ; <nl> - final Node newNode = nodes . get ( 1 ) ; <nl> + final List < String > oldPathCopy = new ArrayList < > ( oldPath ) ; <nl> + final List < String > newPathCopy = new ArrayList < > ( newPath ) ; <nl> <nl> - if ( ! oldNode . exists ( ) ) <nl> - throw new NoSuchDirectoryException ( toAbsolutePath ( oldPathCopy ) ) ; <nl> + return tcx . runAsync ( tr - > checkOrWriteVersion ( tr ) . thenComposeAsync ( ignore - > { <nl> + if ( oldPathCopy . size ( ) < = newPathCopy . size ( ) & & oldPathCopy . equals ( newPathCopy . subList ( 0 , oldPathCopy . size ( ) ) ) ) <nl> + throw new DirectoryMoveException ( " The destination directory cannot be a subdirectory of the source directory . " , toAbsolutePath ( oldPathCopy ) , toAbsolutePath ( newPathCopy ) ) ; <nl> <nl> - if ( oldNode . isInPartition ( false ) | | newNode . isInPartition ( false ) ) { <nl> - if ( ! oldNode . isInPartition ( false ) | | ! newNode . isInPartition ( false ) | | ! oldNode . path . equals ( newNode . path ) ) <nl> - throw new DirectoryMoveException ( " Cannot move between partitions . " , toAbsolutePath ( oldPathCopy ) , toAbsolutePath ( newPathCopy ) ) ; <nl> + ArrayList < CompletableFuture < Node > > futures = new ArrayList < > ( ) ; <nl> + futures . add ( new NodeFinder ( oldPathCopy ) . find ( tr ) . thenComposeAsync ( new NodeMetadataLoader ( tr ) , tr . getExecutor ( ) ) ) ; <nl> + futures . add ( new NodeFinder ( newPathCopy ) . find ( tr ) . thenComposeAsync ( new NodeMetadataLoader ( tr ) , tr . getExecutor ( ) ) ) ; <nl> <nl> - return newNode . getContents ( ) . move ( tr , oldNode . getPartitionSubpath ( ) , newNode . getPartitionSubpath ( ) ) ; <nl> - } <nl> + return AsyncUtil . getAll ( futures ) ; <nl> + } , tr . getExecutor ( ) ) <nl> + . thenComposeAsync ( nodes - > { <nl> + final Node oldNode = nodes . get ( 0 ) ; <nl> + final Node newNode = nodes . get ( 1 ) ; <nl> <nl> - if ( newNode . exists ( ) ) <nl> - throw new DirectoryAlreadyExistsException ( toAbsolutePath ( newPathCopy ) ) ; <nl> - <nl> - final List < String > parentPath = PathUtil . popBack ( newPathCopy ) ; <nl> - return new NodeFinder ( parentPath ) . find ( tr ) <nl> - . thenComposeAsync ( new Function < Node , CompletableFuture < DirectorySubspace > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > apply ( Node parentNode ) { <nl> - if ( ! parentNode . exists ( ) ) <nl> - throw new NoSuchDirectoryException ( toAbsolutePath ( parentPath ) ) ; <nl> - <nl> - tr . set ( <nl> - parentNode . subspace . get ( SUB_DIR_KEY ) . get ( getLast ( newPathCopy ) ) . getKey ( ) , <nl> - contentsOfNode ( oldNode . subspace , EMPTY_PATH , EMPTY_BYTES ) . getKey ( ) <nl> - ) ; <nl> - <nl> - return removeFromParent ( tr , oldPathCopy ) <nl> - . thenApply ( new Function < Void , DirectorySubspace > ( ) { <nl> - @ Override <nl> - public DirectorySubspace apply ( Void ignore ) { <nl> - return contentsOfNode ( oldNode . subspace , newPathCopy , oldNode . layer ) ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } , tr . getExecutor ( ) ) ; <nl> - } <nl> - } ) ; <nl> + if ( ! oldNode . exists ( ) ) <nl> + throw new NoSuchDirectoryException ( toAbsolutePath ( oldPathCopy ) ) ; <nl> + <nl> + if ( oldNode . isInPartition ( false ) | | newNode . isInPartition ( false ) ) { <nl> + if ( ! oldNode . isInPartition ( false ) | | ! newNode . isInPartition ( false ) | | ! oldNode . path . equals ( newNode . path ) ) <nl> + throw new DirectoryMoveException ( " Cannot move between partitions . " , toAbsolutePath ( oldPathCopy ) , toAbsolutePath ( newPathCopy ) ) ; <nl> + <nl> + return newNode . getContents ( ) . move ( tr , oldNode . getPartitionSubpath ( ) , newNode . getPartitionSubpath ( ) ) ; <nl> } <nl> - } ) ; <nl> + <nl> + if ( newNode . exists ( ) ) <nl> + throw new DirectoryAlreadyExistsException ( toAbsolutePath ( newPathCopy ) ) ; <nl> + <nl> + final List < String > parentPath = PathUtil . popBack ( newPathCopy ) ; <nl> + return new NodeFinder ( parentPath ) . find ( tr ) . thenComposeAsync ( parentNode - > { <nl> + if ( ! parentNode . exists ( ) ) <nl> + throw new NoSuchDirectoryException ( toAbsolutePath ( parentPath ) ) ; <nl> + <nl> + tr . set ( <nl> + parentNode . subspace . get ( SUB_DIR_KEY ) . get ( getLast ( newPathCopy ) ) . getKey ( ) , <nl> + contentsOfNode ( oldNode . subspace , EMPTY_PATH , EMPTY_BYTES ) . getKey ( ) <nl> + ) ; <nl> + <nl> + return removeFromParent ( tr , oldPathCopy ) <nl> + . thenApply ( ignore - > contentsOfNode ( oldNode . subspace , newPathCopy , oldNode . layer ) ) ; <nl> + } , tr . getExecutor ( ) ) ; <nl> + } , tr . getExecutor ( ) ) ) ; <nl> } <nl> <nl> / * * <nl> public DirectorySubspace apply ( Void ignore ) { <nl> public CompletableFuture < List < String > > list ( final ReadTransactionContext tcx , final List < String > path ) { <nl> final List < String > pathCopy = new ArrayList < String > ( path ) ; <nl> <nl> - return tcx . readAsync ( new Function < ReadTransaction , CompletableFuture < List < String > > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < List < String > > apply ( final ReadTransaction tr ) { <nl> - return checkVersion ( tr ) <nl> - . thenComposeAsync ( new Function < Void , CompletableFuture < Node > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Node > apply ( Void ignore ) { <nl> - return new NodeFinder ( pathCopy ) . find ( tr ) . thenComposeAsync ( new NodeMetadataLoader ( tr ) , tr . getExecutor ( ) ) ; <nl> - } <nl> - } , tr . getExecutor ( ) ) <nl> - . thenComposeAsync ( new Function < Node , CompletableFuture < List < String > > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < List < String > > apply ( Node node ) { <nl> - if ( ! node . exists ( ) ) <nl> - throw new NoSuchDirectoryException ( toAbsolutePath ( pathCopy ) ) ; <nl> - <nl> - if ( node . isInPartition ( true ) ) <nl> - return node . getContents ( ) . list ( tr , node . getPartitionSubpath ( ) ) ; <nl> - <nl> - final Subspace subdir = node . subspace . get ( SUB_DIR_KEY ) ; <nl> - <nl> - return AsyncUtil . collect ( <nl> - AsyncUtil . mapIterable ( tr . getRange ( subdir . range ( ) ) , <nl> - new Function < KeyValue , String > ( ) { <nl> - @ Override <nl> - public String apply ( KeyValue o ) { <nl> - return subdir . unpack ( o . getKey ( ) ) . getString ( 0 ) ; <nl> - } <nl> - } ) , tr . getExecutor ( ) ) ; <nl> - } <nl> - } , tr . getExecutor ( ) ) ; <nl> - } <nl> - } ) ; <nl> + return tcx . readAsync ( tr - > checkVersion ( tr ) <nl> + . thenComposeAsync ( ignore - > <nl> + new NodeFinder ( pathCopy ) . find ( tr ) . thenComposeAsync ( new NodeMetadataLoader ( tr ) , tr . getExecutor ( ) ) , <nl> + tr . getExecutor ( ) ) <nl> + . thenComposeAsync ( node - > { <nl> + if ( ! node . exists ( ) ) <nl> + throw new NoSuchDirectoryException ( toAbsolutePath ( pathCopy ) ) ; <nl> + <nl> + if ( node . isInPartition ( true ) ) <nl> + return node . getContents ( ) . list ( tr , node . getPartitionSubpath ( ) ) ; <nl> + <nl> + final Subspace subdir = node . subspace . get ( SUB_DIR_KEY ) ; <nl> + <nl> + return AsyncUtil . collect ( <nl> + AsyncUtil . mapIterable ( tr . getRange ( subdir . range ( ) ) , <nl> + kv - > subdir . unpack ( kv . getKey ( ) ) . getString ( 0 ) <nl> + ) , <nl> + tr . getExecutor ( ) <nl> + ) ; <nl> + } , tr . getExecutor ( ) ) <nl> + ) ; <nl> } <nl> <nl> / * * <nl> public String apply ( KeyValue o ) { <nl> * / <nl> @ Override <nl> public CompletableFuture < Boolean > exists ( ReadTransactionContext tcx ) { <nl> - return CompletableFuture . completedFuture ( true ) ; <nl> + return AsyncUtil . READY_TRUE ; <nl> } <nl> <nl> / * * <nl> public String apply ( KeyValue o ) { <nl> * / <nl> @ Override <nl> public CompletableFuture < Boolean > exists ( final ReadTransactionContext tcx , final List < String > path ) { <nl> - final List < String > pathCopy = new ArrayList < String > ( path ) ; <nl> - <nl> - return tcx . readAsync ( new Function < ReadTransaction , CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > apply ( final ReadTransaction tr ) { <nl> - return checkVersion ( tr ) <nl> - . thenComposeAsync ( new Function < Void , CompletableFuture < Node > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Node > apply ( Void ignore ) { <nl> - return new NodeFinder ( pathCopy ) . find ( tr ) . thenComposeAsync ( new NodeMetadataLoader ( tr ) , tr . getExecutor ( ) ) ; <nl> - } ; <nl> - } , tr . getExecutor ( ) ) <nl> - . thenComposeAsync ( new Function < Node , CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > apply ( Node node ) { <nl> - if ( ! node . exists ( ) ) <nl> - / / return new ReadyFuture < Boolean > ( false ) ; <nl> - return CompletableFuture . completedFuture ( false ) ; <nl> - else if ( node . isInPartition ( false ) ) <nl> - return node . getContents ( ) . exists ( tr , node . getPartitionSubpath ( ) ) ; <nl> - <nl> - / / return new ReadyFuture < Boolean > ( true ) ; <nl> - return CompletableFuture . completedFuture ( true ) ; <nl> - } <nl> - } , tr . getExecutor ( ) ) ; <nl> - } <nl> - } ) ; <nl> + final List < String > pathCopy = new ArrayList < > ( path ) ; <nl> + <nl> + return tcx . readAsync ( tr - > checkVersion ( tr ) <nl> + . thenComposeAsync ( ignore - > <nl> + new NodeFinder ( pathCopy ) . find ( tr ) . thenComposeAsync ( new NodeMetadataLoader ( tr ) , tr . getExecutor ( ) ) , <nl> + tr . getExecutor ( ) ) <nl> + . thenComposeAsync ( node - > { <nl> + if ( ! node . exists ( ) ) <nl> + return AsyncUtil . READY_FALSE ; <nl> + else if ( node . isInPartition ( false ) ) <nl> + return node . getContents ( ) . exists ( tr , node . getPartitionSubpath ( ) ) ; <nl> + <nl> + return AsyncUtil . READY_TRUE ; <nl> + } , tr . getExecutor ( ) ) ) ; <nl> } <nl> <nl> / / <nl> private Subspace nodeWithPrefix ( byte [ ] prefix ) { <nl> <nl> return tr . getRange ( nodeSubspace . range ( ) . begin , ByteArrayUtil . join ( nodeSubspace . pack ( key ) , new byte [ ] { 0x00 } ) , 1 , true ) <nl> . asList ( ) <nl> - . thenApply ( new Function < List < KeyValue > , Subspace > ( ) { <nl> - @ Override <nl> - public Subspace apply ( List < KeyValue > results ) { <nl> - if ( results . size ( ) > 0 ) { <nl> - byte [ ] resultKey = results . get ( 0 ) . getKey ( ) ; <nl> - byte [ ] prevPrefix = nodeSubspace . unpack ( resultKey ) . getBytes ( 0 ) ; <nl> - if ( ByteArrayUtil . startsWith ( key , prevPrefix ) ) { <nl> - return nodeWithPrefix ( prevPrefix ) ; <nl> - } <nl> + . thenApply ( results - > { <nl> + if ( results . size ( ) > 0 ) { <nl> + byte [ ] resultKey = results . get ( 0 ) . getKey ( ) ; <nl> + byte [ ] prevPrefix = nodeSubspace . unpack ( resultKey ) . getBytes ( 0 ) ; <nl> + if ( ByteArrayUtil . startsWith ( key , prevPrefix ) ) { <nl> + return nodeWithPrefix ( prevPrefix ) ; <nl> } <nl> - <nl> - return null ; <nl> } <nl> + <nl> + return null ; <nl> } ) ; <nl> } <nl> <nl> private DirectorySubspace contentsOfNode ( Subspace node , List < String > path , byte [ <nl> } <nl> <nl> private CompletableFuture < Boolean > removeInternal ( final TransactionContext tcx , final List < String > path , final boolean mustExist ) { <nl> - final List < String > pathCopy = new ArrayList < String > ( path ) ; <nl> + final List < String > pathCopy = new ArrayList < > ( path ) ; <nl> <nl> - return tcx . runAsync ( new Function < Transaction , CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > apply ( final Transaction tr ) { <nl> - return checkOrWriteVersion ( tr ) <nl> - . thenComposeAsync ( new Function < Void , CompletableFuture < Node > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Node > apply ( Void ignore ) { <nl> - if ( pathCopy . size ( ) = = 0 ) <nl> - throw new DirectoryException ( " The root directory cannot be removed . " , toAbsolutePath ( pathCopy ) ) ; <nl> - <nl> - return new NodeFinder ( pathCopy ) . find ( tr ) . thenComposeAsync ( new NodeMetadataLoader ( tr ) , tr . getExecutor ( ) ) ; <nl> - } <nl> - } , tr . getExecutor ( ) ) <nl> - . thenComposeAsync ( new Function < Node , CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > apply ( Node node ) { <nl> - if ( ! node . exists ( ) ) { <nl> - if ( mustExist ) <nl> - throw new NoSuchDirectoryException ( toAbsolutePath ( pathCopy ) ) ; <nl> - else <nl> - return CompletableFuture . completedFuture ( false ) ; <nl> - } <nl> + return tcx . runAsync ( tr - > checkOrWriteVersion ( tr ) . thenComposeAsync ( ignore - > { <nl> + if ( pathCopy . size ( ) = = 0 ) <nl> + throw new DirectoryException ( " The root directory cannot be removed . " , toAbsolutePath ( pathCopy ) ) ; <nl> <nl> - if ( node . isInPartition ( false ) ) <nl> - return node . getContents ( ) . getDirectoryLayer ( ) . removeInternal ( tr , node . getPartitionSubpath ( ) , mustExist ) ; <nl> - else { <nl> - ArrayList < CompletableFuture < Void > > futures = new ArrayList < CompletableFuture < Void > > ( ) ; <nl> - futures . add ( removeRecursive ( tr , node . subspace ) ) ; <nl> - futures . add ( removeFromParent ( tr , pathCopy ) ) ; <nl> + return new NodeFinder ( pathCopy ) . find ( tr ) . thenComposeAsync ( new NodeMetadataLoader ( tr ) , tr . getExecutor ( ) ) ; <nl> + } , tr . getExecutor ( ) ) <nl> + . thenComposeAsync ( node - > { <nl> + if ( ! node . exists ( ) ) { <nl> + if ( mustExist ) <nl> + throw new NoSuchDirectoryException ( toAbsolutePath ( pathCopy ) ) ; <nl> + else <nl> + return AsyncUtil . READY_FALSE ; <nl> + } <nl> <nl> - return AsyncUtil . tag ( AsyncUtil . whenAll ( futures ) , true ) ; <nl> - } <nl> - } <nl> - } , tr . getExecutor ( ) ) ; <nl> + if ( node . isInPartition ( false ) ) <nl> + return node . getContents ( ) . getDirectoryLayer ( ) . removeInternal ( tr , node . getPartitionSubpath ( ) , mustExist ) ; <nl> + else { <nl> + ArrayList < CompletableFuture < Void > > futures = new ArrayList < > ( ) ; <nl> + futures . add ( removeRecursive ( tr , node . subspace ) ) ; <nl> + futures . add ( removeFromParent ( tr , pathCopy ) ) ; <nl> + return AsyncUtil . tag ( AsyncUtil . whenAll ( futures ) , true ) ; <nl> } <nl> - } ) ; <nl> + } , tr . getExecutor ( ) ) ) ; <nl> } <nl> <nl> private CompletableFuture < Void > removeFromParent ( final Transaction tr , final List < String > path ) { <nl> return new NodeFinder ( PathUtil . popBack ( path ) ) . find ( tr ) <nl> - . thenApply ( new Function < Node , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Node parent ) { <nl> - tr . clear ( parent . subspace . get ( SUB_DIR_KEY ) . get ( getLast ( path ) ) . getKey ( ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + . thenAccept ( parent - > tr . clear ( parent . subspace . get ( SUB_DIR_KEY ) . get ( getLast ( path ) ) . getKey ( ) ) ) ; <nl> } <nl> <nl> private CompletableFuture < Void > removeRecursive ( final Transaction tr , final Subspace node ) { <nl> public Void apply ( Node parent ) { <nl> tr . clear ( Range . startsWith ( nodeSubspace . unpack ( node . getKey ( ) ) . getBytes ( 0 ) ) ) ; <nl> tr . clear ( node . range ( ) ) ; <nl> <nl> - return AsyncUtil . whileTrue ( new Supplier < CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > get ( ) { <nl> - CompletableFuture < Void > subdirRemoveFuture ; <nl> - if ( rangeItr . onHasNext ( ) . isDone ( ) & & rangeItr . hasNext ( ) ) <nl> - subdirRemoveFuture = removeRecursive ( tr , nodeWithPrefix ( rangeItr . next ( ) . getValue ( ) ) ) ; <nl> - else <nl> - subdirRemoveFuture = CompletableFuture . completedFuture ( null ) ; <nl> + return AsyncUtil . whileTrue ( ( ) - > { <nl> + CompletableFuture < Void > subdirRemoveFuture ; <nl> + if ( rangeItr . onHasNext ( ) . isDone ( ) & & rangeItr . hasNext ( ) ) <nl> + subdirRemoveFuture = removeRecursive ( tr , nodeWithPrefix ( rangeItr . next ( ) . getValue ( ) ) ) ; <nl> + else <nl> + subdirRemoveFuture = AsyncUtil . DONE ; <nl> <nl> - return subdirRemoveFuture <nl> - . thenComposeAsync ( new Function < Void , CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > apply ( Void ignore ) { <nl> - return rangeItr . onHasNext ( ) ; <nl> - } <nl> - } , tr . getExecutor ( ) ) ; <nl> - } <nl> + return subdirRemoveFuture . thenCompose ( ignore - > rangeItr . onHasNext ( ) ) ; <nl> } , tr . getExecutor ( ) ) ; <nl> } <nl> <nl> public Void apply ( Node parent ) { <nl> / / allocated prefix ( including the root node ) . This means that it neither <nl> / / contains any other prefix nor is contained by any other prefix . <nl> if ( prefix = = null | | prefix . length = = 0 ) <nl> - / / return new ReadyFuture < Boolean > ( false ) ; <nl> - return CompletableFuture . completedFuture ( false ) ; <nl> - <nl> - return nodeContainingKey ( tr , prefix ) <nl> - . thenComposeAsync ( new Function < Subspace , CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > apply ( Subspace node ) { <nl> - if ( node ! = null ) <nl> - / / return new ReadyFuture < Boolean > ( false ) ; <nl> - return CompletableFuture . completedFuture ( false ) ; <nl> - <nl> - AsyncIterator < KeyValue > it = tr . getRange ( nodeSubspace . pack ( prefix ) , nodeSubspace . pack ( ByteArrayUtil . strinc ( prefix ) ) , 1 ) . iterator ( ) ; <nl> - return it . onHasNext ( ) <nl> - . thenApply ( new Function < Boolean , Boolean > ( ) { <nl> - @ Override <nl> - public Boolean apply ( Boolean hasNext ) { <nl> - return ! hasNext ; <nl> - } <nl> - } ) ; <nl> - } <nl> + return AsyncUtil . READY_FALSE ; <nl> + <nl> + return nodeContainingKey ( tr , prefix ) . thenComposeAsync ( node - > { <nl> + if ( node ! = null ) <nl> + return AsyncUtil . READY_FALSE ; <nl> + <nl> + final AsyncIterator < KeyValue > it = tr . getRange ( nodeSubspace . pack ( prefix ) , nodeSubspace . pack ( ByteArrayUtil . strinc ( prefix ) ) , 1 ) . iterator ( ) ; <nl> + return it . onHasNext ( ) . thenApply ( hasNext - > ! hasNext ) ; <nl> } , tr . getExecutor ( ) ) ; <nl> } <nl> <nl> public Boolean apply ( Boolean hasNext ) { <nl> final byte [ ] layer , <nl> final byte [ ] prefix , <nl> final boolean allowCreate , <nl> - final boolean allowOpen ) <nl> - { <nl> - final List < String > pathCopy = new ArrayList < String > ( path ) ; <nl> + final boolean allowOpen ) { <nl> + final List < String > pathCopy = new ArrayList < > ( path ) ; <nl> <nl> if ( prefix ! = null & & ! allowManualPrefixes ) { <nl> String errorMessage ; <nl> public Boolean apply ( Boolean hasNext ) { <nl> else <nl> errorMessage = " Cannot specify a prefix in a partition . " ; <nl> <nl> - CompletableFuture < DirectorySubspace > future = new CompletableFuture < DirectorySubspace > ( ) ; <nl> + CompletableFuture < DirectorySubspace > future = new CompletableFuture < > ( ) ; <nl> future . completeExceptionally ( new IllegalArgumentException ( errorMessage ) ) ; <nl> return future ; <nl> } <nl> <nl> - return checkVersion ( rtr ) <nl> - . thenComposeAsync ( new Function < Void , CompletableFuture < Node > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Node > apply ( Void ignore ) { <nl> - / / Root directory contains node metadata and so may not be opened . <nl> - if ( pathCopy . size ( ) = = 0 ) { <nl> - throw new IllegalArgumentException ( " The root directory may not be opened . " ) ; <nl> - } <nl> + return checkVersion ( rtr ) . thenComposeAsync ( ignore - > { <nl> + / / Root directory contains node metadata and so may not be opened . <nl> + if ( pathCopy . size ( ) = = 0 ) { <nl> + throw new IllegalArgumentException ( " The root directory may not be opened . " ) ; <nl> + } <nl> <nl> - return new NodeFinder ( pathCopy ) . find ( rtr ) . thenComposeAsync ( new NodeMetadataLoader ( rtr ) , rtr . getExecutor ( ) ) ; <nl> + return new NodeFinder ( pathCopy ) . find ( rtr ) . thenComposeAsync ( new NodeMetadataLoader ( rtr ) , rtr . getExecutor ( ) ) ; <nl> + } , rtr . getExecutor ( ) ) <nl> + . thenComposeAsync ( existingNode - > { <nl> + if ( existingNode . exists ( ) ) { <nl> + if ( existingNode . isInPartition ( false ) ) { <nl> + List < String > subpath = existingNode . getPartitionSubpath ( ) ; <nl> + DirectoryLayer directoryLayer = existingNode . getContents ( ) . getDirectoryLayer ( ) ; <nl> + return directoryLayer . createOrOpenInternal ( <nl> + rtr , tr , subpath , layer , prefix , allowCreate , allowOpen ) ; <nl> } <nl> - } , rtr . getExecutor ( ) ) <nl> - . thenComposeAsync ( new Function < Node , CompletableFuture < DirectorySubspace > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > apply ( final Node existingNode ) { <nl> - if ( existingNode . exists ( ) ) { <nl> - if ( existingNode . isInPartition ( false ) ) { <nl> - List < String > subpath = existingNode . getPartitionSubpath ( ) ; <nl> - DirectoryLayer directoryLayer = existingNode . getContents ( ) . getDirectoryLayer ( ) ; <nl> - return directoryLayer . createOrOpenInternal ( <nl> - rtr , tr , subpath , layer , prefix , allowCreate , allowOpen ) ; <nl> - } <nl> <nl> - DirectorySubspace opened = openInternal ( pathCopy , layer , existingNode , allowOpen ) ; <nl> - return CompletableFuture . completedFuture ( opened ) ; <nl> - } <nl> - else <nl> - return createInternal ( tr , pathCopy , layer , prefix , allowCreate ) ; <nl> - } <nl> - } , rtr . getExecutor ( ) ) ; <nl> + DirectorySubspace opened = openInternal ( pathCopy , layer , existingNode , allowOpen ) ; <nl> + return CompletableFuture . completedFuture ( opened ) ; <nl> + } <nl> + else <nl> + return createInternal ( tr , pathCopy , layer , prefix , allowCreate ) ; <nl> + } , rtr . getExecutor ( ) ) ; <nl> } <nl> <nl> private DirectorySubspace openInternal ( final List < String > path , <nl> final byte [ ] layer , <nl> final Node existingNode , <nl> - final boolean allowOpen ) <nl> - { <nl> + final boolean allowOpen ) { <nl> if ( ! allowOpen ) { <nl> throw new DirectoryAlreadyExistsException ( toAbsolutePath ( path ) ) ; <nl> } <nl> private DirectorySubspace openInternal ( final List < String > path , <nl> final List < String > path , <nl> final byte [ ] layer , <nl> final byte [ ] prefix , <nl> - final boolean allowCreate ) <nl> - { <nl> + final boolean allowCreate ) { <nl> if ( ! allowCreate ) { <nl> throw new NoSuchDirectoryException ( toAbsolutePath ( path ) ) ; <nl> } <nl> <nl> - return checkOrWriteVersion ( tr ) <nl> - . thenComposeAsync ( new Function < Void , CompletableFuture < byte [ ] > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < byte [ ] > apply ( Void ignore ) { <nl> - if ( prefix = = null ) { <nl> - return allocator . allocate ( tr ) <nl> - . thenComposeAsync ( new Function < byte [ ] , CompletableFuture < byte [ ] > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < byte [ ] > apply ( byte [ ] allocated ) { <nl> - final byte [ ] finalPrefix = ByteArrayUtil . join ( contentSubspace . getKey ( ) , allocated ) ; <nl> - return tr . getRange ( Range . startsWith ( finalPrefix ) , 1 ) <nl> - . asList ( ) <nl> - . thenApply ( new Function < List < KeyValue > , byte [ ] > ( ) { <nl> - @ Override <nl> - public byte [ ] apply ( List < KeyValue > results ) { <nl> - if ( results . size ( ) > 0 ) { <nl> - throw new IllegalStateException ( " The database has keys stored at the prefix chosen by the automatic " + <nl> - " prefix allocator : " + ByteArrayUtil . printable ( finalPrefix ) + " . " ) ; <nl> - } <nl> - <nl> - return finalPrefix ; <nl> - } <nl> - } ) ; <nl> + return checkOrWriteVersion ( tr ) . thenComposeAsync ( ignore - > { <nl> + if ( prefix = = null ) { <nl> + return allocator . allocate ( tr ) . thenComposeAsync ( allocated - > { <nl> + final byte [ ] finalPrefix = ByteArrayUtil . join ( contentSubspace . getKey ( ) , allocated ) ; <nl> + return tr . getRange ( Range . startsWith ( finalPrefix ) , 1 ) . iterator ( ) . onHasNext ( ) . thenApply ( hasAny - > { <nl> + if ( hasAny ) { <nl> + throw new IllegalStateException ( " The database has keys stored at the prefix chosen by the automatic " + <nl> + " prefix allocator : " + ByteArrayUtil . printable ( finalPrefix ) + " . " ) ; <nl> } <nl> - } , tr . getExecutor ( ) ) ; <nl> - } <nl> - else <nl> - return CompletableFuture . completedFuture ( prefix ) ; <nl> - / / return new ReadyFuture < byte [ ] > ( prefix ) ; <nl> + return finalPrefix ; <nl> + } ) ; <nl> + } , tr . getExecutor ( ) ) ; <nl> } <nl> + else <nl> + return CompletableFuture . completedFuture ( prefix ) ; <nl> } , tr . getExecutor ( ) ) <nl> - . thenComposeAsync ( new Function < byte [ ] , CompletableFuture < DirectorySubspace > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > apply ( final byte [ ] actualPrefix ) { <nl> - return isPrefixFree ( prefix = = null ? tr . snapshot ( ) : tr , actualPrefix ) <nl> - . thenComposeAsync ( new Function < Boolean , CompletableFuture < Subspace > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Subspace > apply ( Boolean prefixFree ) { <nl> - if ( ! prefixFree ) { <nl> - if ( prefix = = null ) { <nl> - throw new IllegalStateException ( " The directory layer has manually allocated prefixes that conflict " + <nl> - " with the automatic prefix allocator . " ) ; <nl> - } <nl> - else <nl> - throw new IllegalArgumentException ( " Prefix already in use : " + ByteArrayUtil . printable ( actualPrefix ) + " . " ) ; <nl> - } <nl> - else if ( path . size ( ) > 1 ) { <nl> - return createOrOpen ( tr , PathUtil . popBack ( path ) ) <nl> - . thenApply ( new Function < DirectorySubspace , Subspace > ( ) { <nl> - @ Override <nl> - public Subspace apply ( DirectorySubspace dir ) { <nl> - return nodeWithPrefix ( dir . getKey ( ) ) ; <nl> - } <nl> - } ) ; <nl> - } <nl> - else <nl> - return CompletableFuture . completedFuture ( rootNode ) ; <nl> + . thenComposeAsync ( actualPrefix - > isPrefixFree ( prefix = = null ? tr . snapshot ( ) : tr , actualPrefix ) <nl> + . thenComposeAsync ( prefixFree - > { <nl> + if ( ! prefixFree ) { <nl> + if ( prefix = = null ) { <nl> + throw new IllegalStateException ( " The directory layer has manually allocated prefixes that conflict " + <nl> + " with the automatic prefix allocator . " ) ; <nl> } <nl> - } , tr . getExecutor ( ) ) <nl> - . thenApply ( new Function < Subspace , DirectorySubspace > ( ) { <nl> - @ Override <nl> - public DirectorySubspace apply ( Subspace parentNode ) { <nl> - if ( parentNode = = null ) <nl> - throw new IllegalStateException ( " The parent directory does not exist . " ) ; / / Shouldn ' t happen <nl> - Subspace node = nodeWithPrefix ( actualPrefix ) ; <nl> - tr . set ( parentNode . get ( SUB_DIR_KEY ) . get ( getLast ( path ) ) . getKey ( ) , actualPrefix ) ; <nl> - tr . set ( node . get ( LAYER_KEY ) . getKey ( ) , layer ) ; <nl> - return contentsOfNode ( node , path , layer ) ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } , tr . getExecutor ( ) ) ; <nl> + else <nl> + throw new IllegalArgumentException ( " Prefix already in use : " + ByteArrayUtil . printable ( actualPrefix ) + " . " ) ; <nl> + } <nl> + else if ( path . size ( ) > 1 ) { <nl> + return createOrOpen ( tr , PathUtil . popBack ( path ) ) . thenApply ( dir - > nodeWithPrefix ( dir . getKey ( ) ) ) ; <nl> + } <nl> + else <nl> + return CompletableFuture . completedFuture ( rootNode ) ; <nl> + } , tr . getExecutor ( ) ) <nl> + . thenApplyAsync ( parentNode - > { <nl> + if ( parentNode = = null ) <nl> + throw new IllegalStateException ( " The parent directory does not exist . " ) ; / / Shouldn ' t happen <nl> + Subspace node = nodeWithPrefix ( actualPrefix ) ; <nl> + tr . set ( parentNode . get ( SUB_DIR_KEY ) . get ( getLast ( path ) ) . getKey ( ) , actualPrefix ) ; <nl> + tr . set ( node . get ( LAYER_KEY ) . getKey ( ) , layer ) ; <nl> + return contentsOfNode ( node , path , layer ) ; <nl> + } , tr . getExecutor ( ) ) , <nl> + tr . getExecutor ( ) ) ; <nl> } <nl> <nl> / / <nl> public Void apply ( byte [ ] versionBytes ) { <nl> ByteBuffer versionBuf = ByteBuffer . wrap ( versionBytes ) ; <nl> versionBuf . order ( ByteOrder . LITTLE_ENDIAN ) ; <nl> <nl> - Integer version [ ] = new Integer [ 3 ] ; <nl> + Integer [ ] version = new Integer [ 3 ] ; <nl> for ( int i = 0 ; i < version . length ; + + i ) <nl> version [ i ] = versionBuf . getInt ( ) ; <nl> <nl> protected void throwOnError ( Integer [ ] version , String dirVersion , <nl> private Node node ; <nl> private List < String > currentPath ; <nl> <nl> - public NodeFinder ( List < String > path ) { <nl> + NodeFinder ( List < String > path ) { <nl> this . path = path ; <nl> } <nl> <nl> public CompletableFuture < Node > find ( final ReadTransaction tr ) { <nl> index = 0 ; <nl> node = new Node ( rootNode , currentPath , path ) ; <nl> - currentPath = new ArrayList < String > ( ) ; <nl> - <nl> - return AsyncUtil . whileTrue ( new Supplier < CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > get ( ) { <nl> - if ( index = = path . size ( ) ) <nl> - return CompletableFuture . completedFuture ( false ) ; <nl> - <nl> - return tr . get ( node . subspace . get ( SUB_DIR_KEY ) . get ( path . get ( index ) ) . getKey ( ) ) <nl> - . thenComposeAsync ( new Function < byte [ ] , CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > apply ( byte [ ] key ) { <nl> - currentPath . add ( path . get ( index ) ) ; <nl> - node = new Node ( nodeWithPrefix ( key ) , currentPath , path ) ; <nl> - <nl> - if ( ! node . exists ( ) ) <nl> - return CompletableFuture . completedFuture ( false ) ; <nl> - <nl> - return node . loadMetadata ( tr ) <nl> - . thenApply ( new Function < Node , Boolean > ( ) { <nl> - @ Override <nl> - public Boolean apply ( Node ignore ) { <nl> - + + index ; <nl> - return ! Arrays . equals ( node . layer , DirectoryLayer . PARTITION_LAYER ) ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } , tr . getExecutor ( ) ) ; <nl> - } <nl> + currentPath = new ArrayList < > ( ) ; <nl> + <nl> + return AsyncUtil . whileTrue ( ( ) - > { <nl> + if ( index = = path . size ( ) ) <nl> + return AsyncUtil . READY_FALSE ; <nl> + <nl> + return tr . get ( node . subspace . get ( SUB_DIR_KEY ) . get ( path . get ( index ) ) . getKey ( ) ) . thenComposeAsync ( key - > { <nl> + currentPath . add ( path . get ( index ) ) ; <nl> + node = new Node ( nodeWithPrefix ( key ) , currentPath , path ) ; <nl> + <nl> + if ( ! node . exists ( ) ) <nl> + return AsyncUtil . READY_FALSE ; <nl> + <nl> + return node . loadMetadata ( tr ) . thenApply ( ignore - > { <nl> + + + index ; <nl> + return ! Arrays . equals ( node . layer , DirectoryLayer . PARTITION_LAYER ) ; <nl> + } ) ; <nl> + } , tr . getExecutor ( ) ) ; <nl> } , tr . getExecutor ( ) ) <nl> - . thenApply ( new Function < Void , Node > ( ) { <nl> - @ Override <nl> - public Node apply ( Void ignore ) { <nl> - return node ; <nl> - } <nl> - } ) ; <nl> + . thenApply ( ignore - > node ) ; <nl> } <nl> } <nl> <nl> private static class NodeMetadataLoader implements Function < Node , CompletableFuture < Node > > { <nl> private final ReadTransaction tr ; <nl> <nl> - public NodeMetadataLoader ( ReadTransaction tr ) { <nl> + NodeMetadataLoader ( ReadTransaction tr ) { <nl> this . tr = tr ; <nl> } <nl> <nl> public NodeMetadataLoader ( ReadTransaction tr ) { <nl> <nl> private boolean loadedMetadata ; <nl> <nl> - public Node ( Subspace subspace , List < String > path , List < String > targetPath ) { <nl> + Node ( Subspace subspace , List < String > path , List < String > targetPath ) { <nl> this . subspace = subspace ; <nl> this . path = path ; <nl> this . targetPath = targetPath ; <nl> public boolean exists ( ) { <nl> } <nl> <nl> return tr . get ( subspace . pack ( new Tuple ( ) . add ( LAYER_KEY ) ) ) <nl> - . thenApply ( new Function < byte [ ] , Node > ( ) { <nl> - @ Override <nl> - public Node apply ( byte [ ] value ) { <nl> - layer = value ; <nl> - loadedMetadata = true ; <nl> - return Node . this ; <nl> - } <nl> + . thenApply ( value - > { <nl> + layer = value ; <nl> + loadedMetadata = true ; <nl> + return Node . this ; <nl> } ) ; <nl> } <nl> <nl> public DirectorySubspace getContents ( ) { <nl> private long candidate ; <nl> private boolean restart ; <nl> <nl> - public PrefixFinder ( ) { <nl> + PrefixFinder ( ) { <nl> this . random = new Random ( ) ; <nl> this . windowStart = 0 ; <nl> } <nl> <nl> public CompletableFuture < byte [ ] > find ( final Transaction tr , final HighContentionAllocator allocator ) { <nl> - return AsyncUtil . whileTrue ( new Supplier < CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > get ( ) { <nl> - final AsyncIterator < KeyValue > rangeItr = tr . snapshot ( ) . getRange ( allocator . counters . range ( ) , 1 , true ) . iterator ( ) ; <nl> - return rangeItr . onHasNext ( ) <nl> - . thenApply ( new Function < Boolean , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Boolean hasNext ) { <nl> - if ( hasNext ) { <nl> - KeyValue kv = rangeItr . next ( ) ; <nl> - windowStart = allocator . counters . unpack ( kv . getKey ( ) ) . getLong ( 0 ) ; <nl> - } <nl> - <nl> - return null ; <nl> - } <nl> - } ) <nl> - . thenComposeAsync ( new Function < Void , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Void ignore ) { <nl> - return chooseWindow ( tr , allocator ) ; <nl> - } <nl> - } , tr . getExecutor ( ) ) <nl> - . thenComposeAsync ( new Function < Void , CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > apply ( Void ignore ) { <nl> - return choosePrefix ( tr , allocator ) ; / / false exits the loop ( i . e . we have a valid prefix ) <nl> - } <nl> - } , tr . getExecutor ( ) ) ; <nl> - } <nl> + return AsyncUtil . whileTrue ( ( ) - > { <nl> + final AsyncIterator < KeyValue > rangeItr = tr . snapshot ( ) . getRange ( allocator . counters . range ( ) , 1 , true ) . iterator ( ) ; <nl> + return rangeItr . onHasNext ( ) . thenApply ( hasNext - > { <nl> + if ( hasNext ) { <nl> + KeyValue kv = rangeItr . next ( ) ; <nl> + windowStart = allocator . counters . unpack ( kv . getKey ( ) ) . getLong ( 0 ) ; <nl> + } <nl> + <nl> + return null ; <nl> + } ) <nl> + . thenComposeAsync ( ignore - > chooseWindow ( tr , allocator ) , tr . getExecutor ( ) ) <nl> + . thenComposeAsync ( ignore - > choosePrefix ( tr , allocator ) , tr . getExecutor ( ) ) ; <nl> } , tr . getExecutor ( ) ) <nl> - . thenApply ( new Function < Void , byte [ ] > ( ) { <nl> - @ Override <nl> - public byte [ ] apply ( Void ignore ) { <nl> - return Tuple . from ( candidate ) . pack ( ) ; <nl> - } <nl> - } ) ; <nl> + . thenApply ( ignore - > Tuple . from ( candidate ) . pack ( ) ) ; <nl> } <nl> <nl> public CompletableFuture < Void > chooseWindow ( final Transaction tr , final HighContentionAllocator allocator ) { <nl> final long initialWindowStart = windowStart ; <nl> - return AsyncUtil . whileTrue ( new Supplier < CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > get ( ) { <nl> - final byte [ ] counterKey = allocator . counters . get ( windowStart ) . getKey ( ) ; <nl> - <nl> - Range oldCounters = new Range ( allocator . counters . getKey ( ) , counterKey ) ; <nl> - Range oldAllocations = new Range ( allocator . recent . getKey ( ) , allocator . recent . get ( windowStart ) . getKey ( ) ) ; <nl> - <nl> - CompletableFuture < byte [ ] > newCountRead ; <nl> - / / SOMEDAY : synchronize on something transaction local <nl> - synchronized ( HighContentionAllocator . class ) { <nl> - if ( windowStart > initialWindowStart ) { <nl> - tr . clear ( oldCounters ) ; <nl> - tr . options ( ) . setNextWriteNoWriteConflictRange ( ) ; <nl> - tr . clear ( oldAllocations ) ; <nl> - } <nl> + return AsyncUtil . whileTrue ( ( ) - > { <nl> + final byte [ ] counterKey = allocator . counters . get ( windowStart ) . getKey ( ) ; <nl> <nl> - tr . mutate ( MutationType . ADD , counterKey , LITTLE_ENDIAN_LONG_ONE ) ; <nl> - newCountRead = tr . snapshot ( ) . get ( counterKey ) ; <nl> + Range oldCounters = new Range ( allocator . counters . getKey ( ) , counterKey ) ; <nl> + Range oldAllocations = new Range ( allocator . recent . getKey ( ) , allocator . recent . get ( windowStart ) . getKey ( ) ) ; <nl> + <nl> + CompletableFuture < byte [ ] > newCountRead ; <nl> + / / SOMEDAY : synchronize on something transaction local <nl> + synchronized ( HighContentionAllocator . class ) { <nl> + if ( windowStart > initialWindowStart ) { <nl> + tr . clear ( oldCounters ) ; <nl> + tr . options ( ) . setNextWriteNoWriteConflictRange ( ) ; <nl> + tr . clear ( oldAllocations ) ; <nl> } <nl> <nl> - return newCountRead <nl> - . thenApply ( new Function < byte [ ] , Boolean > ( ) { <nl> - @ Override <nl> - public Boolean apply ( byte [ ] newCountBytes ) { <nl> - long newCount = newCountBytes = = null ? 0 : unpackLittleEndian ( newCountBytes ) ; <nl> - windowSize = getWindowSize ( windowStart ) ; <nl> - if ( newCount * 2 > = windowSize ) { <nl> - windowStart + = windowSize ; <nl> - return true ; <nl> - } <nl> - <nl> - return false ; / / exit the loop <nl> - } <nl> - } ) ; <nl> + tr . mutate ( MutationType . ADD , counterKey , LITTLE_ENDIAN_LONG_ONE ) ; <nl> + newCountRead = tr . snapshot ( ) . get ( counterKey ) ; <nl> } <nl> + <nl> + return newCountRead . thenApply ( newCountBytes - > { <nl> + long newCount = newCountBytes = = null ? 0 : unpackLittleEndian ( newCountBytes ) ; <nl> + windowSize = getWindowSize ( windowStart ) ; <nl> + if ( newCount * 2 > = windowSize ) { <nl> + windowStart + = windowSize ; <nl> + return true ; <nl> + } <nl> + <nl> + return false ; / / exit the loop <nl> + } ) ; <nl> } , tr . getExecutor ( ) ) ; <nl> } <nl> <nl> public CompletableFuture < Boolean > choosePrefix ( final Transaction tr , final HighContentionAllocator allocator ) { <nl> restart = false ; <nl> - return AsyncUtil . whileTrue ( new Supplier < CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > get ( ) { <nl> - / / As of the snapshot being read from , the window is less than half <nl> - / / full , so this should be expected to take 2 tries . Under high <nl> - / / contention ( and when the window advances ) , there is an additional <nl> - / / subsequent risk of conflict for this transaction . <nl> - candidate = windowStart + random . nextInt ( windowSize ) ; <nl> - final byte [ ] allocationKey = allocator . recent . get ( candidate ) . getKey ( ) ; <nl> - Range countersRange = allocator . counters . range ( ) ; <nl> - <nl> - AsyncIterable < KeyValue > counterRange ; <nl> - CompletableFuture < byte [ ] > allocationTemp ; <nl> - / / SOMEDAY : synchronize on something transaction local <nl> - synchronized ( HighContentionAllocator . class ) { <nl> - counterRange = tr . snapshot ( ) . getRange ( countersRange , 1 , true ) ; <nl> - allocationTemp = tr . get ( allocationKey ) ; <nl> - tr . options ( ) . setNextWriteNoWriteConflictRange ( ) ; <nl> - tr . set ( allocationKey , EMPTY_BYTES ) ; <nl> + return AsyncUtil . whileTrue ( ( ) - > { <nl> + / / As of the snapshot being read from , the window is less than half <nl> + / / full , so this should be expected to take 2 tries . Under high <nl> + / / contention ( and when the window advances ) , there is an additional <nl> + / / subsequent risk of conflict for this transaction . <nl> + candidate = windowStart + random . nextInt ( windowSize ) ; <nl> + final byte [ ] allocationKey = allocator . recent . get ( candidate ) . getKey ( ) ; <nl> + Range countersRange = allocator . counters . range ( ) ; <nl> + <nl> + AsyncIterable < KeyValue > counterRange ; <nl> + CompletableFuture < byte [ ] > allocationTemp ; <nl> + / / SOMEDAY : synchronize on something transaction local <nl> + synchronized ( HighContentionAllocator . class ) { <nl> + counterRange = tr . snapshot ( ) . getRange ( countersRange , 1 , true ) ; <nl> + allocationTemp = tr . get ( allocationKey ) ; <nl> + tr . options ( ) . setNextWriteNoWriteConflictRange ( ) ; <nl> + tr . set ( allocationKey , EMPTY_BYTES ) ; <nl> + } <nl> + <nl> + final CompletableFuture < List < KeyValue > > lastCounter = counterRange . asList ( ) ; <nl> + final CompletableFuture < byte [ ] > allocation = allocationTemp ; <nl> + <nl> + return lastCounter . thenCombineAsync ( allocation , ( result , allocationValue ) - > { <nl> + long currentWindowStart = 0 ; <nl> + if ( ! result . isEmpty ( ) ) { <nl> + currentWindowStart = allocator . counters . unpack ( result . get ( 0 ) . getKey ( ) ) . getLong ( 0 ) ; <nl> } <nl> <nl> - final CompletableFuture < List < KeyValue > > lastCounter = counterRange . asList ( ) ; <nl> - final CompletableFuture < byte [ ] > allocation = allocationTemp ; <nl> - <nl> - List < CompletableFuture < Void > > futures = new ArrayList < CompletableFuture < Void > > ( ) ; <nl> - futures . add ( AsyncUtil . success ( lastCounter ) ) ; <nl> - futures . add ( AsyncUtil . success ( allocation ) ) ; <nl> - <nl> - return AsyncUtil . whenAll ( futures ) <nl> - . thenApply ( new Function < Void , Boolean > ( ) { <nl> - @ Override <nl> - public Boolean apply ( Void ignore ) { <nl> - long currentWindowStart = 0 ; <nl> - List < KeyValue > result = lastCounter . join ( ) ; <nl> - if ( ! result . isEmpty ( ) ) { <nl> - currentWindowStart = allocator . counters . unpack ( result . get ( 0 ) . getKey ( ) ) . getLong ( 0 ) ; <nl> - } <nl> - <nl> - if ( currentWindowStart > windowStart ) { <nl> - restart = true ; <nl> - return false ; / / exit the loop and rerun the allocation from the beginning <nl> - } <nl> - <nl> - if ( allocation . join ( ) = = null ) { <nl> - tr . addWriteConflictKey ( allocationKey ) ; <nl> - return false ; / / exit the loop and return this candidate <nl> - } <nl> - <nl> - return true ; <nl> - } <nl> - } ) ; <nl> - } <nl> + if ( currentWindowStart > windowStart ) { <nl> + restart = true ; <nl> + return false ; / / exit the loop and rerun the allocation from the beginning <nl> + } <nl> + <nl> + if ( allocationValue = = null ) { <nl> + tr . addWriteConflictKey ( allocationKey ) ; <nl> + return false ; / / exit the loop and return this candidate <nl> + } <nl> + <nl> + return true ; <nl> + } , tr . getExecutor ( ) ) ; <nl> } , tr . getExecutor ( ) ) <nl> - . thenApply ( new Function < Void , Boolean > ( ) { <nl> - @ Override <nl> - public Boolean apply ( Void ignore ) { <nl> - return restart ; <nl> - } <nl> - } ) ; <nl> + . thenApply ( ignore - > restart ) ; <nl> } <nl> <nl> private static int getWindowSize ( long start ) { <nl> private static int getWindowSize ( long start ) { <nl> public final Subspace counters ; <nl> public final Subspace recent ; <nl> <nl> - public HighContentionAllocator ( Subspace subspace ) { <nl> + HighContentionAllocator ( Subspace subspace ) { <nl> this . counters = subspace . get ( 0 ) ; <nl> this . recent = subspace . get ( 1 ) ; <nl> } <nl> public HighContentionAllocator ( Subspace subspace ) { <nl> / * * <nl> * Returns a byte string that : <nl> * < ol > <nl> - * < li > has never and will never be returned by another call to this method on the same subspace < / li > <nl> - * < li > is nearly as short as possible given the above < / li > <nl> + * < li > has never and will never be returned by another call to this method on the same subspace < / li > <nl> + * < li > is nearly as short as possible given the above < / li > <nl> * < / ol > <nl> * / <nl> public CompletableFuture < byte [ ] > allocate ( final Transaction tr ) { <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / directory / DirectoryPartition . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / directory / DirectoryPartition . java <nl> <nl> this . parentDirectoryLayer = parentDirectoryLayer ; <nl> } <nl> <nl> - / * * <nl> - * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> - * <nl> - * @ throws UnsupportedOperationException <nl> - * / <nl> + / * * <nl> + * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> + * <nl> + * @ throws UnsupportedOperationException <nl> + * / <nl> @ Override <nl> public Subspace get ( Object o ) { <nl> throw new UnsupportedOperationException ( " Cannot open subspace in the root of a directory partition . " ) ; <nl> } <nl> <nl> - / * * <nl> - * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> - * <nl> - * @ throws UnsupportedOperationException <nl> - * / <nl> + / * * <nl> + * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> + * <nl> + * @ throws UnsupportedOperationException <nl> + * / <nl> @ Override <nl> public Subspace get ( Tuple name ) { <nl> throw new UnsupportedOperationException ( " Cannot open subspace in the root of a directory partition . " ) ; <nl> public Subspace get ( Tuple name ) { <nl> throw new UnsupportedOperationException ( " Cannot get key for the root of a directory partition . " ) ; <nl> } <nl> <nl> - / * * <nl> - * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> - * <nl> - * @ throws UnsupportedOperationException <nl> - * / <nl> + / * * <nl> + * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> + * <nl> + * @ throws UnsupportedOperationException <nl> + * / <nl> @ Override <nl> public byte [ ] pack ( ) { <nl> throw new UnsupportedOperationException ( " Cannot pack keys using the root of a directory partition . " ) ; <nl> } <nl> <nl> - / * * <nl> - * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> - * <nl> - * @ throws UnsupportedOperationException <nl> - * / <nl> + / * * <nl> + * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> + * <nl> + * @ throws UnsupportedOperationException <nl> + * / <nl> @ Override <nl> public byte [ ] pack ( Object o ) { <nl> throw new UnsupportedOperationException ( " Cannot pack keys using the root of a directory partition . " ) ; <nl> } <nl> <nl> - / * * <nl> - * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> - * <nl> - * @ throws UnsupportedOperationException <nl> - * / <nl> + / * * <nl> + * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> + * <nl> + * @ throws UnsupportedOperationException <nl> + * / <nl> @ Override <nl> public byte [ ] pack ( Tuple tuple ) { <nl> throw new UnsupportedOperationException ( " Cannot pack keys using the root of a directory partition . " ) ; <nl> } <nl> <nl> - / * * <nl> - * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> - * <nl> - * @ throws UnsupportedOperationException <nl> - * / <nl> + / * * <nl> + * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> + * <nl> + * @ throws UnsupportedOperationException <nl> + * / <nl> @ Override <nl> public Tuple unpack ( byte [ ] key ) { <nl> throw new UnsupportedOperationException ( " Cannot unpack keys using the root of a directory partition . " ) ; <nl> } <nl> <nl> - / * * <nl> - * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> - * <nl> - * @ throws UnsupportedOperationException <nl> - * / <nl> + / * * <nl> + * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> + * <nl> + * @ throws UnsupportedOperationException <nl> + * / <nl> @ Override <nl> public Range range ( ) { <nl> throw new UnsupportedOperationException ( " Cannot get range for the root of a directory partition . " ) ; <nl> } <nl> <nl> - / * * <nl> - * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> - * <nl> - * @ throws UnsupportedOperationException <nl> - * / <nl> + / * * <nl> + * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> + * <nl> + * @ throws UnsupportedOperationException <nl> + * / <nl> @ Override <nl> public Range range ( Tuple tuple ) { <nl> throw new UnsupportedOperationException ( " Cannot get range for the root of a directory partition . " ) ; <nl> public boolean contains ( byte [ ] key ) { <nl> throw new UnsupportedOperationException ( " Cannot check whether a key belongs to the root of a directory partition . " ) ; <nl> } <nl> <nl> - / * * <nl> - * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> - * <nl> - * @ throws UnsupportedOperationException <nl> - * / <nl> + / * * <nl> + * Raises an exception because DirectoryPartition cannot be used as a Subspace . <nl> + * <nl> + * @ throws UnsupportedOperationException <nl> + * / <nl> @ Override <nl> public Subspace subspace ( Tuple tuple ) { <nl> throw new UnsupportedOperationException ( " Cannot open subspace in the root of a directory partition . " ) ; <nl> DirectoryLayer getLayerForPath ( List < String > path ) { <nl> * @ param rhs the { @ code } Object to test for equality <nl> * @ return true if this is equal to { @ code rhs } <nl> * / <nl> - @ Override <nl> - public boolean equals ( Object rhs ) { <nl> - if ( this = = rhs ) { <nl> - return true ; <nl> - } <nl> - if ( rhs = = null | | getClass ( ) ! = rhs . getClass ( ) ) { <nl> - return false ; <nl> - } <nl> - <nl> - DirectoryPartition other = ( DirectoryPartition ) rhs ; <nl> - return ( getPath ( ) = = other . getPath ( ) | | getPath ( ) = = other . getPath ( ) ) & & <nl> + @ Override <nl> + public boolean equals ( Object rhs ) { <nl> + if ( this = = rhs ) { <nl> + return true ; <nl> + } <nl> + if ( rhs = = null | | getClass ( ) ! = rhs . getClass ( ) ) { <nl> + return false ; <nl> + } <nl> + <nl> + DirectoryPartition other = ( DirectoryPartition ) rhs ; <nl> + return ( getPath ( ) = = other . getPath ( ) | | getPath ( ) . equals ( other . getPath ( ) ) ) & & <nl> parentDirectoryLayer . equals ( other . parentDirectoryLayer ) & & <nl> super . equals ( rhs ) ; <nl> - } <nl> + } <nl> + <nl> + / * * <nl> + * Computes a hash code compatible with this class ' s { @ link # equals ( Object ) equals ( ) } <nl> + * method . In particular , it computes a hash that is based off of the <nl> + * hash of the parent { @ link DirectoryLayer } and this partition ' s <nl> + * path , layer , and subspace prefix . <nl> + * <nl> + * @ return a hash compatible with this class ' s { @ code equals ( ) } method <nl> + * / <nl> + @ Override <nl> + public int hashCode ( ) { <nl> + / / The path , layer , and subspace prefix information comes from the super <nl> + / / class ' s hash code method . <nl> + return parentDirectoryLayer . hashCode ( ) ^ ( super . hashCode ( ) * 3209 ) ; <nl> + } <nl> } <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / directory / DirectorySubspace . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / directory / DirectorySubspace . java <nl> <nl> * < / p > <nl> * / <nl> public class DirectorySubspace extends Subspace implements Directory { <nl> - private final List < String > path ; <nl> - private final byte [ ] layer ; <nl> - private final DirectoryLayer directoryLayer ; <nl> + private final List < String > path ; <nl> + private final byte [ ] layer ; <nl> + private final DirectoryLayer directoryLayer ; <nl> <nl> - DirectorySubspace ( List < String > path , byte [ ] prefix , DirectoryLayer directoryLayer ) { <nl> - this ( path , prefix , directoryLayer , EMPTY_BYTES ) ; <nl> - } <nl> + DirectorySubspace ( List < String > path , byte [ ] prefix , DirectoryLayer directoryLayer ) { <nl> + this ( path , prefix , directoryLayer , EMPTY_BYTES ) ; <nl> + } <nl> <nl> - DirectorySubspace ( List < String > path , byte [ ] prefix , DirectoryLayer directoryLayer , byte [ ] layer ) { <nl> - super ( prefix ) ; <nl> - this . path = path ; <nl> - this . layer = layer ; <nl> - this . directoryLayer = directoryLayer ; <nl> - } <nl> + DirectorySubspace ( List < String > path , byte [ ] prefix , DirectoryLayer directoryLayer , byte [ ] layer ) { <nl> + super ( prefix ) ; <nl> + this . path = path ; <nl> + this . layer = layer ; <nl> + this . directoryLayer = directoryLayer ; <nl> + } <nl> <nl> / * * <nl> * @ return a printable representation of this { @ code DirectorySubspace } <nl> * / <nl> - @ Override <nl> - public String toString ( ) { <nl> - return getClass ( ) . getSimpleName ( ) + ' ( ' + DirectoryUtil . pathStr ( path ) + " , " + printable ( getKey ( ) ) + ' ) ' ; <nl> - } <nl> + @ Override <nl> + public String toString ( ) { <nl> + return getClass ( ) . getSimpleName ( ) + ' ( ' + DirectoryUtil . pathStr ( path ) + " , " + printable ( getKey ( ) ) + ' ) ' ; <nl> + } <nl> <nl> / * * <nl> * Returns whether this { @ code DirectorySubspace } is equal to { @ code rhs } . <nl> public String toString ( ) { <nl> * @ param rhs the { @ code } Object to test for equality <nl> * @ return true if this is equal to { @ code rhs } <nl> * / <nl> - @ Override <nl> - public boolean equals ( Object rhs ) { <nl> - if ( this = = rhs ) { <nl> - return true ; <nl> - } <nl> - if ( rhs = = null | | getClass ( ) ! = rhs . getClass ( ) ) { <nl> - return false ; <nl> - } <nl> - DirectorySubspace other = ( DirectorySubspace ) rhs ; <nl> - return ( path = = other . path | | path . equals ( other . path ) ) & & <nl> - Arrays . equals ( layer , other . layer ) & & <nl> + @ Override <nl> + public boolean equals ( Object rhs ) { <nl> + if ( this = = rhs ) { <nl> + return true ; <nl> + } <nl> + if ( rhs = = null | | getClass ( ) ! = rhs . getClass ( ) ) { <nl> + return false ; <nl> + } <nl> + DirectorySubspace other = ( DirectorySubspace ) rhs ; <nl> + return ( path = = other . path | | path . equals ( other . path ) ) & & <nl> + Arrays . equals ( layer , other . layer ) & & <nl> directoryLayer . equals ( other . directoryLayer ) & & <nl> super . equals ( rhs ) ; <nl> - } <nl> + } <nl> <nl> - @ Override <nl> - public List < String > getPath ( ) { <nl> - return Collections . unmodifiableList ( path ) ; <nl> - } <nl> + / * * <nl> + * Computes a hash code compatible with the { @ link # equals ( Object ) equals ( ) } method of <nl> + * this class . In particular , it will produce a hash code that is based off of the hashes <nl> + * of its path , its layer , and its subspace prefix . <nl> + * <nl> + * @ return a hash compatible with this class ' s { @ code equals ( ) } method <nl> + * / <nl> + @ Override <nl> + public int hashCode ( ) { <nl> + return path . hashCode ( ) ^ ( Arrays . hashCode ( layer ) * 1153 ) ^ ( directoryLayer . hashCode ( ) * 929 ) ^ ( super . hashCode ( ) * 419 ) ; <nl> + } <nl> + <nl> + @ Override <nl> + public List < String > getPath ( ) { <nl> + return Collections . unmodifiableList ( path ) ; <nl> + } <nl> <nl> - @ Override <nl> - public byte [ ] getLayer ( ) { <nl> - return Arrays . copyOf ( layer , layer . length ) ; <nl> - } <nl> + @ Override <nl> + public byte [ ] getLayer ( ) { <nl> + return Arrays . copyOf ( layer , layer . length ) ; <nl> + } <nl> <nl> @ Override <nl> public DirectoryLayer getDirectoryLayer ( ) { <nl> return directoryLayer ; <nl> } <nl> <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > createOrOpen ( TransactionContext tcx , List < String > subpath , byte [ ] otherLayer ) { <nl> - return directoryLayer . createOrOpen ( tcx , getPartitionSubpath ( subpath ) , otherLayer ) ; <nl> - } <nl> + @ Override <nl> + public CompletableFuture < DirectorySubspace > createOrOpen ( TransactionContext tcx , List < String > subpath , byte [ ] otherLayer ) { <nl> + return directoryLayer . createOrOpen ( tcx , getPartitionSubpath ( subpath ) , otherLayer ) ; <nl> + } <nl> <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > open ( ReadTransactionContext tcx , List < String > subpath , byte [ ] otherLayer ) { <nl> - return directoryLayer . open ( tcx , getPartitionSubpath ( subpath ) , otherLayer ) ; <nl> - } <nl> + @ Override <nl> + public CompletableFuture < DirectorySubspace > open ( ReadTransactionContext tcx , List < String > subpath , byte [ ] otherLayer ) { <nl> + return directoryLayer . open ( tcx , getPartitionSubpath ( subpath ) , otherLayer ) ; <nl> + } <nl> <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > create ( TransactionContext tcx , List < String > subpath , byte [ ] otherLayer , byte [ ] prefix ) { <nl> - return directoryLayer . create ( tcx , getPartitionSubpath ( subpath ) , otherLayer , prefix ) ; <nl> - } <nl> + @ Override <nl> + public CompletableFuture < DirectorySubspace > create ( TransactionContext tcx , List < String > subpath , byte [ ] otherLayer , byte [ ] prefix ) { <nl> + return directoryLayer . create ( tcx , getPartitionSubpath ( subpath ) , otherLayer , prefix ) ; <nl> + } <nl> <nl> - @ Override <nl> - public CompletableFuture < List < String > > list ( ReadTransactionContext tcx , List < String > subpath ) { <nl> - return directoryLayer . list ( tcx , getPartitionSubpath ( subpath ) ) ; <nl> - } <nl> + @ Override <nl> + public CompletableFuture < List < String > > list ( ReadTransactionContext tcx , List < String > subpath ) { <nl> + return directoryLayer . list ( tcx , getPartitionSubpath ( subpath ) ) ; <nl> + } <nl> <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > move ( TransactionContext tcx , List < String > oldSubpath , List < String > newSubpath ) { <nl> - return directoryLayer . move ( tcx , getPartitionSubpath ( oldSubpath ) , getPartitionSubpath ( newSubpath ) ) ; <nl> - } <nl> + @ Override <nl> + public CompletableFuture < DirectorySubspace > move ( TransactionContext tcx , List < String > oldSubpath , List < String > newSubpath ) { <nl> + return directoryLayer . move ( tcx , getPartitionSubpath ( oldSubpath ) , getPartitionSubpath ( newSubpath ) ) ; <nl> + } <nl> <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > moveTo ( TransactionContext tcx , List < String > newAbsolutePath ) { <nl> + @ Override <nl> + public CompletableFuture < DirectorySubspace > moveTo ( TransactionContext tcx , List < String > newAbsolutePath ) { <nl> DirectoryLayer dir = getLayerForPath ( EMPTY_PATH ) ; <nl> int partitionLen = dir . getPath ( ) . size ( ) ; <nl> List < String > partitionPath = newAbsolutePath . subList ( 0 , Math . min ( newAbsolutePath . size ( ) , partitionLen ) ) ; <nl> if ( ! partitionPath . equals ( dir . getPath ( ) ) ) <nl> throw new DirectoryMoveException ( " Cannot move between partitions " , path , newAbsolutePath ) ; <nl> <nl> - return dir . move ( tcx , <nl> + return dir . move ( tcx , <nl> getPartitionSubpath ( EMPTY_PATH , dir ) , <nl> newAbsolutePath . subList ( partitionLen , newAbsolutePath . size ( ) ) ) ; <nl> - } <nl> + } <nl> <nl> - @ Override <nl> - public CompletableFuture < Void > remove ( TransactionContext tcx , List < String > subpath ) { <nl> + @ Override <nl> + public CompletableFuture < Void > remove ( TransactionContext tcx , List < String > subpath ) { <nl> DirectoryLayer dir = getLayerForPath ( subpath ) ; <nl> - return dir . remove ( tcx , getPartitionSubpath ( subpath , dir ) ) ; <nl> - } <nl> + return dir . remove ( tcx , getPartitionSubpath ( subpath , dir ) ) ; <nl> + } <nl> <nl> - @ Override <nl> - public CompletableFuture < Boolean > removeIfExists ( TransactionContext tcx , List < String > subpath ) { <nl> + @ Override <nl> + public CompletableFuture < Boolean > removeIfExists ( TransactionContext tcx , List < String > subpath ) { <nl> DirectoryLayer dir = getLayerForPath ( subpath ) ; <nl> - return dir . removeIfExists ( tcx , getPartitionSubpath ( subpath , dir ) ) ; <nl> - } <nl> + return dir . removeIfExists ( tcx , getPartitionSubpath ( subpath , dir ) ) ; <nl> + } <nl> <nl> - @ Override <nl> - public CompletableFuture < Boolean > exists ( ReadTransactionContext tcx , List < String > subpath ) { <nl> + @ Override <nl> + public CompletableFuture < Boolean > exists ( ReadTransactionContext tcx , List < String > subpath ) { <nl> DirectoryLayer dir = getLayerForPath ( subpath ) ; <nl> - return dir . exists ( tcx , getPartitionSubpath ( subpath , dir ) ) ; <nl> - } <nl> + return dir . exists ( tcx , getPartitionSubpath ( subpath , dir ) ) ; <nl> + } <nl> <nl> private List < String > getPartitionSubpath ( List < String > path ) { <nl> return getPartitionSubpath ( path , directoryLayer ) ; <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / directory / DirectoryUtil . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / directory / DirectoryUtil . java <nl> static String pathStr ( List < String > t ) { <nl> sb . append ( ' ) ' ) ; <nl> return sb . toString ( ) ; <nl> } <nl> + <nl> + private DirectoryUtil ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / directory / MismatchedLayerException . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / directory / MismatchedLayerException . java <nl> <nl> <nl> package com . apple . foundationdb . directory ; <nl> <nl> - import com . apple . foundationdb . tuple . ByteArrayUtil ; <nl> - <nl> import java . util . List ; <nl> <nl> + import com . apple . foundationdb . tuple . ByteArrayUtil ; <nl> + <nl> / * * <nl> * A { @ link DirectoryException } that is thrown when a directory is opened with an incompatible layer . <nl> * / <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / directory / PathUtil . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / directory / PathUtil . java <nl> <nl> <nl> return new LinkedList < String > ( path . subList ( 0 , path . size ( ) - 1 ) ) ; <nl> } <nl> + <nl> + private PathUtil ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / subspace / Subspace . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / subspace / Subspace . java <nl> <nl> * As a best practice , API clients should use at least one subspace for application data . <nl> * < / p > <nl> * / <nl> - public class Subspace <nl> - { <nl> + public class Subspace { <nl> static final Tuple EMPTY_TUPLE = Tuple . from ( ) ; <nl> static final byte [ ] EMPTY_BYTES = new byte [ 0 ] ; <nl> <nl> public boolean equals ( Object rhs ) { <nl> return false ; <nl> } <nl> Subspace other = ( Subspace ) rhs ; <nl> - return Arrays . equals ( rawPrefix , other . rawPrefix ) ; <nl> + return Arrays . equals ( rawPrefix , other . rawPrefix ) ; <nl> } <nl> <nl> / * * <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / tuple / ByteArrayUtil . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / tuple / ByteArrayUtil . java <nl> <nl> if ( interlude = = null ) <nl> interlude = new byte [ 0 ] ; <nl> <nl> - int element_totals = 0 ; <nl> + int elementTotals = 0 ; <nl> int interludeSize = interlude . length ; <nl> for ( byte [ ] e : parts ) { <nl> - element_totals + = e . length ; <nl> + elementTotals + = e . length ; <nl> } <nl> <nl> - byte [ ] dest = new byte [ ( interludeSize * ( partCount - 1 ) ) + element_totals ] ; <nl> + byte [ ] dest = new byte [ ( interludeSize * ( partCount - 1 ) ) + elementTotals ] ; <nl> <nl> / / System . out . println ( " interlude - > " + ArrayUtils . printable ( interlude ) ) ; <nl> <nl> - int start_byte = 0 ; <nl> + int startByte = 0 ; <nl> int index = 0 ; <nl> for ( byte [ ] part : parts ) { <nl> / / System . out . println ( " section - > " + ArrayUtils . printable ( parts . get ( i ) ) ) ; <nl> int length = part . length ; <nl> if ( length > 0 ) { <nl> - System . arraycopy ( part , 0 , dest , start_byte , length ) ; <nl> - start_byte + = length ; <nl> + System . arraycopy ( part , 0 , dest , startByte , length ) ; <nl> + startByte + = length ; <nl> } <nl> if ( index < partCount - 1 & & interludeSize > 0 ) { <nl> / / If this is not the last element , append the interlude <nl> - System . arraycopy ( interlude , 0 , dest , start_byte , interludeSize ) ; <nl> - start_byte + = interludeSize ; <nl> + System . arraycopy ( interlude , 0 , dest , startByte , interludeSize ) ; <nl> + startByte + = interludeSize ; <nl> } <nl> index + + ; <nl> } <nl> <nl> * <nl> * @ return a newly created concatenation of the input <nl> * / <nl> - public static byte [ ] join ( byte [ ] . . . parts ) { <nl> + public static byte [ ] join ( byte [ ] . . . parts ) { <nl> return join ( null , Arrays . asList ( parts ) ) ; <nl> } <nl> <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / tuple / Tuple . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / tuple / Tuple . java <nl> <nl> package com . apple . foundationdb . tuple ; <nl> <nl> import java . math . BigInteger ; <nl> - import java . nio . ByteBuffer ; <nl> - import java . nio . ByteOrder ; <nl> import java . util . ArrayList ; <nl> import java . util . Arrays ; <nl> - import java . util . Collection ; <nl> import java . util . Collections ; <nl> import java . util . Iterator ; <nl> import java . util . LinkedList ; <nl> public static Tuple fromStream ( Stream < ? extends Object > items ) { <nl> * <nl> * @ return a newly created { @ code Tuple } <nl> * / <nl> - public static Tuple from ( Object . . . items ) { <nl> + public static Tuple from ( Object . . . items ) { <nl> return fromList ( Arrays . asList ( items ) ) ; <nl> } <nl> <nl> static void main ( String [ ] args ) { <nl> - for ( int i : new int [ ] { 10 , 100 , 1000 , 10000 , 100000 , 1000000 } ) { <nl> + for ( int i : new int [ ] { 10 , 100 , 1000 , 10000 , 100000 , 1000000 } ) { <nl> createTuple ( i ) ; <nl> } <nl> <nl> mmm a / bindings / java / src - completable / main / com / apple / foundationdb / tuple / TupleUtil . java <nl> ppp b / bindings / java / src - completable / main / com / apple / foundationdb / tuple / TupleUtil . java <nl> static DecodeResult decode ( byte [ ] rep , int pos , int last ) { <nl> <nl> / / Convert to long if in range - - otherwise , leave as BigInteger . <nl> if ( val . compareTo ( BigInteger . valueOf ( Long . MIN_VALUE ) ) < 0 | | <nl> - val . compareTo ( BigInteger . valueOf ( Long . MAX_VALUE ) ) > 0 ) { <nl> - / / This can occur if the thing can be represented with 8 bytes but not <nl> - / / the right sign information . <nl> - return new DecodeResult ( end , val ) ; <nl> + val . compareTo ( BigInteger . valueOf ( Long . MAX_VALUE ) ) > 0 ) { <nl> + / / This can occur if the thing can be represented with 8 bytes but not <nl> + / / the right sign information . <nl> + return new DecodeResult ( end , val ) ; <nl> } <nl> return new DecodeResult ( end , val . longValue ( ) ) ; <nl> } <nl> static boolean hasIncompleteVersionstamp ( Stream < ? > items ) { <nl> <nl> public static void main ( String [ ] args ) { <nl> try { <nl> - byte [ ] bytes = pack ( Collections . singletonList ( 4 ) , null ) ; <nl> - assert 4 = = ( Integer ) ( decode ( bytes , 0 , bytes . length ) . o ) ; <nl> + byte [ ] bytes = pack ( Collections . singletonList ( 4 ) , null ) ; <nl> + assert 4 = = ( Integer ) ( decode ( bytes , 0 , bytes . length ) . o ) ; <nl> } catch ( Exception e ) { <nl> e . printStackTrace ( ) ; <nl> System . out . println ( " Error " + e . getMessage ( ) ) ; <nl> } <nl> <nl> try { <nl> - byte [ ] bytes = pack ( Collections . singletonList ( " \ u021Aest \ u0218tring " ) , null ) ; <nl> - String string = ( String ) ( decode ( bytes , 0 , bytes . length ) . o ) ; <nl> + byte [ ] bytes = pack ( Collections . singletonList ( " \ u021Aest \ u0218tring " ) , null ) ; <nl> + String string = ( String ) ( decode ( bytes , 0 , bytes . length ) . o ) ; <nl> System . out . println ( " contents - > " + string ) ; <nl> - assert " \ u021Aest \ u0218tring " = = string ; <nl> + assert " \ u021Aest \ u0218tring " . equals ( string ) ; <nl> } catch ( Exception e ) { <nl> e . printStackTrace ( ) ; <nl> System . out . println ( " Error " + e . getMessage ( ) ) ; <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / AbstractTester . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / AbstractTester . java <nl> <nl> <nl> package com . apple . foundationdb . test ; <nl> <nl> - import com . apple . foundationdb . Database ; <nl> - import com . apple . foundationdb . FDB ; <nl> - <nl> import java . nio . charset . Charset ; <nl> import java . util . Random ; <nl> <nl> + import com . apple . foundationdb . Database ; <nl> + import com . apple . foundationdb . FDB ; <nl> + <nl> public abstract class AbstractTester { <nl> public static final int API_VERSION = 510 ; <nl> protected static final int NUM_RUNS = 25 ; <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / AsyncDirectoryExtension . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / AsyncDirectoryExtension . java <nl> <nl> <nl> import java . util . ArrayList ; <nl> import java . util . Arrays ; <nl> + import java . util . Collections ; <nl> import java . util . List ; <nl> - <nl> import java . util . concurrent . CompletableFuture ; <nl> - import java . util . function . Function ; <nl> <nl> import com . apple . foundationdb . Range ; <nl> - import com . apple . foundationdb . Transaction ; <nl> import com . apple . foundationdb . async . AsyncUtil ; <nl> import com . apple . foundationdb . directory . Directory ; <nl> import com . apple . foundationdb . directory . DirectoryLayer ; <nl> <nl> import com . apple . foundationdb . tuple . Tuple ; <nl> <nl> class AsyncDirectoryExtension { <nl> - List < Object > dirList = new ArrayList < Object > ( ) ; <nl> + List < Object > dirList = new ArrayList < > ( ) ; <nl> int dirIndex = 0 ; <nl> int errorIndex = 0 ; <nl> <nl> - public AsyncDirectoryExtension ( ) { <nl> + AsyncDirectoryExtension ( ) { <nl> dirList . add ( DirectoryLayer . getDefault ( ) ) ; <nl> } <nl> <nl> Subspace subspace ( ) { <nl> } <nl> <nl> CompletableFuture < Void > processInstruction ( final Instruction inst ) { <nl> - return executeInstruction ( inst ) <nl> - . exceptionally ( new Function < Throwable , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Throwable e ) { <nl> - DirectoryUtil . pushError ( inst , e , dirList ) ; <nl> - return null ; <nl> - } <nl> + return executeInstruction ( inst ) . exceptionally ( e - > { <nl> + DirectoryUtil . pushError ( inst , e , dirList ) ; <nl> + return null ; <nl> } ) ; <nl> } <nl> <nl> public Void apply ( Throwable e ) { <nl> <nl> if ( op = = DirectoryOperation . DIRECTORY_CREATE_SUBSPACE ) { <nl> return DirectoryUtil . popTuple ( inst ) <nl> - . thenComposeAsync ( new Function < Tuple , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( final Tuple prefix ) { <nl> - return inst . popParam ( ) <nl> - . thenApplyAsync ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object rawPrefix ) { <nl> - dirList . add ( new Subspace ( prefix , ( byte [ ] ) rawPrefix ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> + . thenComposeAsync ( prefix - > inst . popParam ( ) <nl> + . thenAccept ( rawPrefix - > dirList . add ( new Subspace ( prefix , ( byte [ ] ) rawPrefix ) ) ) ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_CREATE_LAYER ) { <nl> - return inst . popParams ( 3 ) <nl> - . thenApplyAsync ( new Function < List < Object > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( List < Object > params ) { <nl> - Subspace nodeSubspace = ( Subspace ) dirList . get ( StackUtils . getInt ( params . get ( 0 ) ) ) ; <nl> - Subspace contentSubspace = ( Subspace ) dirList . get ( StackUtils . getInt ( params . get ( 1 ) ) ) ; <nl> - boolean allowManualPrefixes = StackUtils . getInt ( params . get ( 2 ) ) = = 1 ; <nl> - <nl> - if ( nodeSubspace = = null | | contentSubspace = = null ) <nl> - dirList . add ( null ) ; <nl> - else <nl> - dirList . add ( new DirectoryLayer ( nodeSubspace , contentSubspace , allowManualPrefixes ) ) ; <nl> + return inst . popParams ( 3 ) . thenAcceptAsync ( params - > { <nl> + Subspace nodeSubspace = ( Subspace ) dirList . get ( StackUtils . getInt ( params . get ( 0 ) ) ) ; <nl> + Subspace contentSubspace = ( Subspace ) dirList . get ( StackUtils . getInt ( params . get ( 1 ) ) ) ; <nl> + boolean allowManualPrefixes = StackUtils . getInt ( params . get ( 2 ) ) = = 1 ; <nl> <nl> - return null ; <nl> - } <nl> + if ( nodeSubspace = = null | | contentSubspace = = null ) <nl> + dirList . add ( null ) ; <nl> + else <nl> + dirList . add ( new DirectoryLayer ( nodeSubspace , contentSubspace , allowManualPrefixes ) ) ; <nl> } ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_CHANGE ) { <nl> - return inst . popParam ( ) <nl> - . thenApplyAsync ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object index ) { <nl> - dirIndex = StackUtils . getInt ( index ) ; <nl> - if ( dirList . get ( dirIndex ) = = null ) <nl> - dirIndex = errorIndex ; <nl> - <nl> - return null ; <nl> - } <nl> + return inst . popParam ( ) . thenAcceptAsync ( index - > { <nl> + dirIndex = StackUtils . getInt ( index ) ; <nl> + if ( dirList . get ( dirIndex ) = = null ) <nl> + dirIndex = errorIndex ; <nl> } ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_SET_ERROR_INDEX ) { <nl> - return inst . popParam ( ) <nl> - . thenApplyAsync ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object index ) { <nl> - errorIndex = StackUtils . getInt ( index ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenAcceptAsync ( index - > errorIndex = StackUtils . getInt ( index ) ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_CREATE_OR_OPEN | | op = = DirectoryOperation . DIRECTORY_OPEN ) { <nl> return DirectoryUtil . popPath ( inst ) <nl> - . thenComposeAsync ( new Function < List < String > , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( final List < String > path ) { <nl> - return inst . popParam ( ) <nl> - . thenComposeAsync ( new Function < Object , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Object layer ) { <nl> - CompletableFuture < DirectorySubspace > dir ; <nl> - if ( layer = = null ) { <nl> - if ( op = = DirectoryOperation . DIRECTORY_CREATE_OR_OPEN ) <nl> - dir = directory ( ) . createOrOpen ( inst . tcx , path ) ; <nl> - else <nl> - dir = directory ( ) . open ( inst . readTcx , path ) ; <nl> - } <nl> - else { <nl> - if ( op = = DirectoryOperation . DIRECTORY_CREATE_OR_OPEN ) <nl> - dir = directory ( ) . createOrOpen ( inst . tcx , path , ( byte [ ] ) layer ) ; <nl> - else <nl> - dir = directory ( ) . open ( inst . readTcx , path , ( byte [ ] ) layer ) ; <nl> - } <nl> - <nl> - return dir . thenApplyAsync ( new Function < DirectorySubspace , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( DirectorySubspace dirSubspace ) { <nl> - dirList . add ( dirSubspace ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> + . thenComposeAsync ( path - > inst . popParam ( ) . thenComposeAsync ( layer - > { <nl> + CompletableFuture < DirectorySubspace > dir ; <nl> + if ( layer = = null ) { <nl> + if ( op = = DirectoryOperation . DIRECTORY_CREATE_OR_OPEN ) <nl> + dir = directory ( ) . createOrOpen ( inst . tcx , path ) ; <nl> + else <nl> + dir = directory ( ) . open ( inst . readTcx , path ) ; <nl> } <nl> - } ) ; <nl> + else { <nl> + if ( op = = DirectoryOperation . DIRECTORY_CREATE_OR_OPEN ) <nl> + dir = directory ( ) . createOrOpen ( inst . tcx , path , ( byte [ ] ) layer ) ; <nl> + else <nl> + dir = directory ( ) . open ( inst . readTcx , path , ( byte [ ] ) layer ) ; <nl> + } <nl> + <nl> + return dir . thenAccept ( dirList : : add ) ; <nl> + } ) ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_CREATE ) { <nl> - return DirectoryUtil . popPath ( inst ) <nl> - . thenComposeAsync ( new Function < List < String > , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( final List < String > path ) { <nl> - return inst . popParams ( 2 ) <nl> - . thenComposeAsync ( new Function < List < Object > , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( List < Object > params ) { <nl> - byte [ ] layer = ( byte [ ] ) params . get ( 0 ) ; <nl> - byte [ ] prefix = ( byte [ ] ) params . get ( 1 ) ; <nl> + return DirectoryUtil . popPath ( inst ) . thenComposeAsync ( path - > inst . popParams ( 2 ) . thenComposeAsync ( params - > { <nl> + byte [ ] layer = ( byte [ ] ) params . get ( 0 ) ; <nl> + byte [ ] prefix = ( byte [ ] ) params . get ( 1 ) ; <nl> <nl> - CompletableFuture < DirectorySubspace > dir ; <nl> - if ( layer = = null & & prefix = = null ) <nl> - dir = directory ( ) . create ( inst . tcx , path ) ; <nl> - else if ( prefix = = null ) <nl> - dir = directory ( ) . create ( inst . tcx , path , layer ) ; <nl> - else { <nl> - if ( layer = = null ) <nl> - layer = new byte [ 0 ] ; <nl> + CompletableFuture < DirectorySubspace > dir ; <nl> + if ( layer = = null & & prefix = = null ) <nl> + dir = directory ( ) . create ( inst . tcx , path ) ; <nl> + else if ( prefix = = null ) <nl> + dir = directory ( ) . create ( inst . tcx , path , layer ) ; <nl> + else { <nl> + if ( layer = = null ) <nl> + layer = new byte [ 0 ] ; <nl> <nl> - dir = directory ( ) . create ( inst . tcx , path , layer , prefix ) ; <nl> - } <nl> - <nl> - return dir . thenApplyAsync ( new Function < DirectorySubspace , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( DirectorySubspace dirSubspace ) { <nl> - dirList . add ( dirSubspace ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> + dir = directory ( ) . create ( inst . tcx , path , layer , prefix ) ; <nl> } <nl> - } ) ; <nl> + <nl> + return dir . thenAccept ( dirList : : add ) ; <nl> + } ) ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_MOVE ) { <nl> return DirectoryUtil . popPaths ( inst , 2 ) <nl> - . thenComposeAsync ( new Function < List < List < String > > , CompletableFuture < DirectorySubspace > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > apply ( List < List < String > > paths ) { <nl> - return directory ( ) . move ( inst . tcx , paths . get ( 0 ) , paths . get ( 1 ) ) ; <nl> - } <nl> - } ) <nl> - . thenApplyAsync ( new Function < DirectorySubspace , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( DirectorySubspace dirSubspace ) { <nl> - dirList . add ( dirSubspace ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + . thenComposeAsync ( paths - > directory ( ) . move ( inst . tcx , paths . get ( 0 ) , paths . get ( 1 ) ) ) <nl> + . thenAccept ( dirList : : add ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_MOVE_TO ) { <nl> return DirectoryUtil . popPath ( inst ) <nl> - . thenComposeAsync ( new Function < List < String > , CompletableFuture < DirectorySubspace > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < DirectorySubspace > apply ( List < String > newAbsolutePath ) { <nl> - return directory ( ) . moveTo ( inst . tcx , newAbsolutePath ) ; <nl> - } <nl> - } ) <nl> - . thenApplyAsync ( new Function < DirectorySubspace , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( DirectorySubspace dirSubspace ) { <nl> - dirList . add ( dirSubspace ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + . thenComposeAsync ( newAbsolutePath - > directory ( ) . moveTo ( inst . tcx , newAbsolutePath ) ) <nl> + . thenAccept ( dirList : : add ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_REMOVE ) { <nl> return inst . popParam ( ) <nl> - . thenComposeAsync ( new Function < Object , CompletableFuture < List < List < String > > > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < List < List < String > > > apply ( Object count ) { <nl> - return DirectoryUtil . popPaths ( inst , StackUtils . getInt ( count ) ) ; <nl> - } <nl> - } ) <nl> - . thenComposeAsync ( new Function < List < List < String > > , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( List < List < String > > path ) { <nl> - if ( path . size ( ) = = 0 ) <nl> - return directory ( ) . remove ( inst . tcx ) ; <nl> - else <nl> - return directory ( ) . remove ( inst . tcx , path . get ( 0 ) ) ; <nl> - } <nl> + . thenComposeAsync ( count - > DirectoryUtil . popPaths ( inst , StackUtils . getInt ( count ) ) ) <nl> + . thenComposeAsync ( path - > { <nl> + if ( path . size ( ) = = 0 ) <nl> + return directory ( ) . remove ( inst . tcx ) ; <nl> + else <nl> + return directory ( ) . remove ( inst . tcx , path . get ( 0 ) ) ; <nl> } ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_REMOVE_IF_EXISTS ) { <nl> return inst . popParam ( ) <nl> - . thenComposeAsync ( new Function < Object , CompletableFuture < List < List < String > > > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < List < List < String > > > apply ( Object count ) { <nl> - return DirectoryUtil . popPaths ( inst , StackUtils . getInt ( count ) ) ; <nl> - } <nl> - } ) <nl> - . thenComposeAsync ( new Function < List < List < String > > , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( List < List < String > > path ) { <nl> - if ( path . size ( ) = = 0 ) <nl> - return AsyncUtil . success ( directory ( ) . removeIfExists ( inst . tcx ) ) ; <nl> - else <nl> - return AsyncUtil . success ( directory ( ) . removeIfExists ( inst . tcx , path . get ( 0 ) ) ) ; <nl> - } <nl> + . thenComposeAsync ( count - > DirectoryUtil . popPaths ( inst , StackUtils . getInt ( count ) ) ) <nl> + . thenComposeAsync ( path - > { <nl> + if ( path . size ( ) = = 0 ) <nl> + return AsyncUtil . success ( directory ( ) . removeIfExists ( inst . tcx ) ) ; <nl> + else <nl> + return AsyncUtil . success ( directory ( ) . removeIfExists ( inst . tcx , path . get ( 0 ) ) ) ; <nl> } ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_LIST ) { <nl> return inst . popParam ( ) <nl> - . thenComposeAsync ( new Function < Object , CompletableFuture < List < List < String > > > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < List < List < String > > > apply ( Object count ) { <nl> - return DirectoryUtil . popPaths ( inst , StackUtils . getInt ( count ) ) ; <nl> - } <nl> - } ) <nl> - . thenComposeAsync ( new Function < List < List < String > > , CompletableFuture < List < String > > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < List < String > > apply ( List < List < String > > path ) { <nl> - if ( path . size ( ) = = 0 ) <nl> - return directory ( ) . list ( inst . readTcx ) ; <nl> - else <nl> - return directory ( ) . list ( inst . readTcx , path . get ( 0 ) ) ; <nl> - } <nl> + . thenComposeAsync ( count - > DirectoryUtil . popPaths ( inst , StackUtils . getInt ( count ) ) ) <nl> + . thenComposeAsync ( path - > { <nl> + if ( path . size ( ) = = 0 ) <nl> + return directory ( ) . list ( inst . readTcx ) ; <nl> + else <nl> + return directory ( ) . list ( inst . readTcx , path . get ( 0 ) ) ; <nl> } ) <nl> - . thenApplyAsync ( new Function < List < String > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( List < String > children ) { <nl> - inst . push ( Tuple . fromItems ( children ) . pack ( ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + . thenAccept ( children - > inst . push ( Tuple . fromItems ( children ) . pack ( ) ) ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_EXISTS ) { <nl> return inst . popParam ( ) <nl> - . thenComposeAsync ( new Function < Object , CompletableFuture < List < List < String > > > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < List < List < String > > > apply ( Object count ) { <nl> - return DirectoryUtil . popPaths ( inst , StackUtils . getInt ( count ) ) ; <nl> - } <nl> - } ) <nl> - . thenComposeAsync ( new Function < List < List < String > > , CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > apply ( List < List < String > > path ) { <nl> - if ( path . size ( ) = = 0 ) <nl> - return directory ( ) . exists ( inst . readTcx ) ; <nl> - else <nl> - return directory ( ) . exists ( inst . readTcx , path . get ( 0 ) ) ; <nl> - } <nl> + . thenComposeAsync ( count - > DirectoryUtil . popPaths ( inst , StackUtils . getInt ( count ) ) ) <nl> + . thenComposeAsync ( path - > { <nl> + if ( path . size ( ) = = 0 ) <nl> + return directory ( ) . exists ( inst . readTcx ) ; <nl> + else <nl> + return directory ( ) . exists ( inst . readTcx , path . get ( 0 ) ) ; <nl> } ) <nl> - . thenApplyAsync ( new Function < Boolean , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Boolean exists ) { <nl> - inst . push ( exists ? 1 : 0 ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + . thenAccept ( exists - > inst . push ( exists ? 1 : 0 ) ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_PACK_KEY ) { <nl> - return DirectoryUtil . popTuple ( inst ) <nl> - . thenApplyAsync ( new Function < Tuple , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Tuple keyTuple ) { <nl> - inst . push ( subspace ( ) . pack ( keyTuple ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return DirectoryUtil . popTuple ( inst ) . thenAccept ( keyTuple - > inst . push ( subspace ( ) . pack ( keyTuple ) ) ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_UNPACK_KEY ) { <nl> - return inst . popParam ( ) <nl> - . thenApplyAsync ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object key ) { <nl> - Tuple tup = subspace ( ) . unpack ( ( byte [ ] ) key ) ; <nl> - for ( Object o : tup ) <nl> - inst . push ( o ) ; <nl> - <nl> - return null ; <nl> - } <nl> + return inst . popParam ( ) . thenAcceptAsync ( key - > { <nl> + Tuple tup = subspace ( ) . unpack ( ( byte [ ] ) key ) ; <nl> + for ( Object o : tup ) <nl> + inst . push ( o ) ; <nl> } ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_RANGE ) { <nl> - return DirectoryUtil . popTuple ( inst ) <nl> - . thenApplyAsync ( new Function < Tuple , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Tuple tup ) { <nl> - Range range = subspace ( ) . range ( tup ) ; <nl> - inst . push ( range . begin ) ; <nl> - inst . push ( range . end ) ; <nl> - return null ; <nl> - } <nl> + return DirectoryUtil . popTuple ( inst ) . thenAcceptAsync ( tup - > { <nl> + Range range = subspace ( ) . range ( tup ) ; <nl> + inst . push ( range . begin ) ; <nl> + inst . push ( range . end ) ; <nl> } ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_CONTAINS ) { <nl> - return inst . popParam ( ) <nl> - . thenApplyAsync ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object key ) { <nl> - inst . push ( subspace ( ) . contains ( ( byte [ ] ) key ) ? 1 : 0 ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenAccept ( key - > inst . push ( subspace ( ) . contains ( ( byte [ ] ) key ) ? 1 : 0 ) ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_OPEN_SUBSPACE ) { <nl> - return DirectoryUtil . popTuple ( inst ) <nl> - . thenApplyAsync ( new Function < Tuple , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Tuple prefix ) { <nl> - dirList . add ( subspace ( ) . subspace ( prefix ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return DirectoryUtil . popTuple ( inst ) . thenAcceptAsync ( prefix - > dirList . add ( subspace ( ) . subspace ( prefix ) ) ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_LOG_SUBSPACE ) { <nl> - return inst . popParam ( ) <nl> - . thenComposeAsync ( new Function < Object , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( final Object prefix ) { <nl> - return inst . tcx . runAsync ( new Function < Transaction , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Transaction tr ) { <nl> - tr . set ( Tuple . from ( dirIndex ) . pack ( ( byte [ ] ) prefix ) , subspace ( ) . getKey ( ) ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenComposeAsync ( prefix - > <nl> + inst . tcx . runAsync ( tr - > { <nl> + tr . set ( Tuple . from ( dirIndex ) . pack ( ( byte [ ] ) prefix ) , subspace ( ) . getKey ( ) ) ; <nl> + return AsyncUtil . DONE ; <nl> + } ) <nl> + ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_LOG_DIRECTORY ) { <nl> - return inst . popParam ( ) <nl> - . thenComposeAsync ( new Function < Object , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Object prefix ) { <nl> - final Subspace logSubspace = new Subspace ( new Tuple ( ) . add ( dirIndex ) , ( byte [ ] ) prefix ) ; <nl> - return inst . tcx . runAsync ( new Function < Transaction , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( final Transaction tr ) { <nl> - return directory ( ) . exists ( tr ) <nl> - . thenComposeAsync ( new Function < Boolean , CompletableFuture < List < String > > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < List < String > > apply ( Boolean exists ) { <nl> - tr . set ( logSubspace . pack ( " path " ) , Tuple . fromItems ( directory ( ) . getPath ( ) ) . pack ( ) ) ; <nl> - tr . set ( logSubspace . pack ( " layer " ) , new Tuple ( ) . add ( directory ( ) . getLayer ( ) ) . pack ( ) ) ; <nl> - tr . set ( logSubspace . pack ( " exists " ) , new Tuple ( ) . add ( exists ? 1 : 0 ) . pack ( ) ) ; <nl> - if ( exists ) <nl> - return directory ( ) . list ( tr ) ; <nl> - else <nl> - return CompletableFuture . completedFuture ( new ArrayList < String > ( ) ) ; <nl> - } <nl> - } ) <nl> - . thenApplyAsync ( new Function < List < String > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( List < String > children ) { <nl> - tr . set ( logSubspace . pack ( " children " ) , Tuple . fromItems ( children ) . pack ( ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> - } <nl> + return inst . popParam ( ) . thenComposeAsync ( prefix - > { <nl> + final Subspace logSubspace = new Subspace ( new Tuple ( ) . add ( dirIndex ) , ( byte [ ] ) prefix ) ; <nl> + return inst . tcx . runAsync ( tr - > directory ( ) . exists ( tr ) <nl> + . thenComposeAsync ( exists - > { <nl> + tr . set ( logSubspace . pack ( " path " ) , Tuple . fromItems ( directory ( ) . getPath ( ) ) . pack ( ) ) ; <nl> + tr . set ( logSubspace . pack ( " layer " ) , new Tuple ( ) . add ( directory ( ) . getLayer ( ) ) . pack ( ) ) ; <nl> + tr . set ( logSubspace . pack ( " exists " ) , new Tuple ( ) . add ( exists ? 1 : 0 ) . pack ( ) ) ; <nl> + if ( exists ) <nl> + return directory ( ) . list ( tr ) ; <nl> + else <nl> + return CompletableFuture . completedFuture ( Collections . emptyList ( ) ) ; <nl> + } ) <nl> + . thenAcceptAsync ( children - > tr . set ( logSubspace . pack ( " children " ) , Tuple . fromItems ( children ) . pack ( ) ) ) <nl> + ) ; <nl> } ) ; <nl> } <nl> else if ( op = = DirectoryOperation . DIRECTORY_STRIP_PREFIX ) { <nl> - return inst . popParam ( ) <nl> - . thenApplyAsync ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object param ) { <nl> - byte [ ] str = ( byte [ ] ) param ; <nl> - byte [ ] rawPrefix = subspace ( ) . getKey ( ) ; <nl> + return inst . popParam ( ) . thenAcceptAsync ( param - > { <nl> + byte [ ] str = ( byte [ ] ) param ; <nl> + byte [ ] rawPrefix = subspace ( ) . getKey ( ) ; <nl> <nl> - if ( str . length < rawPrefix . length ) <nl> - throw new RuntimeException ( " String does not start with raw prefix " ) ; <nl> + if ( str . length < rawPrefix . length ) <nl> + throw new RuntimeException ( " String does not start with raw prefix " ) ; <nl> <nl> - for ( int i = 0 ; i < rawPrefix . length ; + + i ) <nl> - if ( str [ i ] ! = rawPrefix [ i ] ) <nl> - throw new RuntimeException ( " String does not start with raw prefix " ) ; <nl> + for ( int i = 0 ; i < rawPrefix . length ; + + i ) <nl> + if ( str [ i ] ! = rawPrefix [ i ] ) <nl> + throw new RuntimeException ( " String does not start with raw prefix " ) ; <nl> <nl> - inst . push ( Arrays . copyOfRange ( str , rawPrefix . length , str . length ) ) ; <nl> - return null ; <nl> - } <nl> + inst . push ( Arrays . copyOfRange ( str , rawPrefix . length , str . length ) ) ; <nl> } ) ; <nl> } <nl> else { <nl> deleted file mode 100644 <nl> index dcecfdeff1 . . 0000000000 <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / AsyncPerformanceTester . java <nl> ppp / dev / null <nl> <nl> - / * <nl> - * AsyncPerformanceTester . java <nl> - * <nl> - * This source file is part of the FoundationDB open source project <nl> - * <nl> - * Copyright 2013 - 2018 Apple Inc . and the FoundationDB project authors <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * / <nl> - <nl> - package com . apple . foundationdb . test ; <nl> - <nl> - public class AsyncPerformanceTester { <nl> - <nl> - public static void main ( String [ ] args ) { <nl> - System . out . println ( " Running Java async performance test on Java version " + System . getProperty ( " java . version " ) ) ; <nl> - } <nl> - } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / AsyncStackTester . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / AsyncStackTester . java <nl> <nl> package com . apple . foundationdb . test ; <nl> <nl> import java . math . BigInteger ; <nl> - import java . util . * ; <nl> - <nl> import java . nio . ByteBuffer ; <nl> import java . nio . ByteOrder ; <nl> + import java . util . ArrayList ; <nl> + import java . util . Arrays ; <nl> + import java . util . Collections ; <nl> + import java . util . HashMap ; <nl> + import java . util . LinkedList ; <nl> + import java . util . List ; <nl> + import java . util . Map ; <nl> import java . util . concurrent . CompletableFuture ; <nl> import java . util . function . Function ; <nl> <nl> <nl> <nl> @ Override <nl> public CompletableFuture < Void > apply ( Transaction tr ) { <nl> - return tr . getRange ( Range . startsWith ( prefix ) ) . asList ( ) . thenApplyAsync ( new Function < List < KeyValue > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( List < KeyValue > list ) { <nl> - if ( list . size ( ) > 0 ) { <nl> - / / System . out . println ( " - Throwing new fake commit error . . . " ) ; <nl> - throw new FDBException ( " ERROR : Fake commit conflict " , 1020 ) ; <nl> - } <nl> - return null ; <nl> + return tr . getRange ( Range . startsWith ( prefix ) ) . asList ( ) . thenAcceptAsync ( list - > { <nl> + if ( list . size ( ) > 0 ) { <nl> + / / System . out . println ( " - Throwing new fake commit error . . . " ) ; <nl> + throw new FDBException ( " ERROR : Fake commit conflict " , 1020 ) ; <nl> } <nl> - } ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> } <nl> <nl> else if ( item = = null ) <nl> System . out . println ( inst . context . preStr + " - " + " Pushing null " ) ; <nl> else <nl> System . out . println ( inst . context . preStr + " - " + " Pushing item of type " + item . getClass ( ) . getName ( ) ) ; * / <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> + return AsyncUtil . DONE ; <nl> } <nl> else if ( op = = StackOperation . POP ) { <nl> inst . pop ( ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> + return AsyncUtil . DONE ; <nl> } <nl> else if ( op = = StackOperation . DUP ) { <nl> if ( inst . size ( ) = = 0 ) <nl> else if ( op = = StackOperation . DUP ) { <nl> StackEntry e = inst . pop ( ) ; <nl> inst . push ( e ) ; <nl> inst . push ( e ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> + return AsyncUtil . DONE ; <nl> } <nl> else if ( op = = StackOperation . EMPTY_STACK ) { <nl> inst . clear ( ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> + return AsyncUtil . DONE ; <nl> } <nl> else if ( op = = StackOperation . SWAP ) { <nl> - return inst . popParam ( ) <nl> - . thenApplyAsync ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object param ) { <nl> - int index = StackUtils . getInt ( param ) ; <nl> - if ( index > = inst . size ( ) ) <nl> - throw new IllegalArgumentException ( " Stack index not valid " ) ; <nl> - <nl> - inst . swap ( index ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenAcceptAsync ( param - > { <nl> + int index = StackUtils . getInt ( param ) ; <nl> + if ( index > = inst . size ( ) ) <nl> + throw new IllegalArgumentException ( " Stack index not valid " ) ; <nl> + <nl> + inst . swap ( index ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . WAIT_FUTURE ) { <nl> - return popAndWait ( inst ) <nl> - . thenApplyAsync ( new Function < StackEntry , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( StackEntry e ) { <nl> - inst . push ( e ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return popAndWait ( inst ) . thenAccept ( inst : : push ) ; <nl> } <nl> else if ( op = = StackOperation . WAIT_EMPTY ) { <nl> - return inst . popParam ( ) <nl> - . thenComposeAsync ( new Function < Object , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Object param ) { <nl> - WaitEmpty retryable = new WaitEmpty ( ( byte [ ] ) param ) ; <nl> - return inst . context . db . runAsync ( retryable ) . thenApply ( new Function < Void , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Void o ) { <nl> - inst . push ( " WAITED_FOR_EMPTY " . getBytes ( ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenComposeAsync ( param - > { <nl> + WaitEmpty retryable = new WaitEmpty ( ( byte [ ] ) param ) ; <nl> + return inst . context . db . runAsync ( retryable ) . thenRun ( ( ) - > inst . push ( " WAITED_FOR_EMPTY " . getBytes ( ) ) ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . START_THREAD ) { <nl> - return inst . popParam ( ) <nl> - . thenApplyAsync ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object param ) { <nl> - / / System . out . println ( inst . context . preStr + " - " + " Starting new thread at prefix : " + ByteArrayUtil . printable ( ( byte [ ] ) params . get ( 0 ) ) ) ; <nl> - inst . context . addContext ( ( byte [ ] ) param ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenAcceptAsync ( param - > { <nl> + / / System . out . println ( inst . context . preStr + " - " + " Starting new thread at prefix : " + ByteArrayUtil . printable ( ( byte [ ] ) params . get ( 0 ) ) ) ; <nl> + inst . context . addContext ( ( byte [ ] ) param ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . NEW_TRANSACTION ) { <nl> inst . context . newTransaction ( ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> + return AsyncUtil . DONE ; <nl> } <nl> else if ( op = = StackOperation . USE_TRANSACTION ) { <nl> - return inst . popParam ( ) <nl> - . thenApplyAsync ( new Function < Object , Void > ( ) { <nl> - public Void apply ( Object param ) { <nl> - inst . context . switchTransaction ( ( byte [ ] ) param ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenAcceptAsync ( param - > { <nl> + inst . context . switchTransaction ( ( byte [ ] ) param ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . SET ) { <nl> - return inst . popParams ( 2 ) . thenComposeAsync ( new Function < List < Object > , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( final List < Object > params ) { <nl> - / * System . out . println ( inst . context . preStr + " - " + " Setting ' " + ByteArrayUtil . printable ( ( byte [ ] ) params . get ( 0 ) ) + <nl> - " ' to ' " + ByteArrayUtil . printable ( ( byte [ ] ) params . get ( 1 ) ) + " ' " ) ; * / <nl> - return executeMutation ( inst , new Function < Transaction , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Transaction tr ) { <nl> - tr . set ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> + return inst . popParams ( 2 ) . thenComposeAsync ( params - > { <nl> + / * System . out . println ( inst . context . preStr + " - " + " Setting ' " + ByteArrayUtil . printable ( ( byte [ ] ) params . get ( 0 ) ) + <nl> + " ' to ' " + ByteArrayUtil . printable ( ( byte [ ] ) params . get ( 1 ) ) + " ' " ) ; * / <nl> + return executeMutation ( inst , tr - > { <nl> + tr . set ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ; <nl> + return AsyncUtil . DONE ; <nl> + } ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . CLEAR ) { <nl> - return inst . popParam ( ) . thenComposeAsync ( new Function < Object , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( final Object param ) { <nl> - / / System . out . println ( inst . context . preStr + " - " + " Clearing : ' " + ByteArrayUtil . printable ( ( byte [ ] ) param ) + " ' " ) ; <nl> - return executeMutation ( inst , new Function < Transaction , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Transaction tr ) { <nl> - tr . clear ( ( byte [ ] ) param ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenComposeAsync ( param - > { <nl> + / / System . out . println ( inst . context . preStr + " - " + " Clearing : ' " + ByteArrayUtil . printable ( ( byte [ ] ) param ) + " ' " ) ; <nl> + return executeMutation ( inst , tr - > { <nl> + tr . clear ( ( byte [ ] ) param ) ; <nl> + return AsyncUtil . DONE ; <nl> + } ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . CLEAR_RANGE ) { <nl> - return inst . popParams ( 2 ) . thenComposeAsync ( new Function < List < Object > , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( final List < Object > params ) { <nl> - return executeMutation ( inst , new Function < Transaction , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Transaction tr ) { <nl> - tr . clear ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> + return inst . popParams ( 2 ) . thenComposeAsync ( params - > <nl> + executeMutation ( inst , tr - > { <nl> + tr . clear ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ; <nl> + return AsyncUtil . DONE ; <nl> + } ) , <nl> + FDB . DEFAULT_EXECUTOR <nl> + ) ; <nl> } <nl> else if ( op = = StackOperation . CLEAR_RANGE_STARTS_WITH ) { <nl> - return inst . popParam ( ) . thenComposeAsync ( new Function < Object , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( final Object param ) { <nl> - return executeMutation ( inst , new Function < Transaction , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Transaction tr ) { <nl> - tr . clear ( Range . startsWith ( ( byte [ ] ) param ) ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenComposeAsync ( param - > <nl> + executeMutation ( inst , tr - > { <nl> + tr . clear ( Range . startsWith ( ( byte [ ] ) param ) ) ; <nl> + return AsyncUtil . DONE ; <nl> + } ) , <nl> + FDB . DEFAULT_EXECUTOR <nl> + ) ; <nl> } <nl> else if ( op = = StackOperation . ATOMIC_OP ) { <nl> - return inst . popParams ( 3 ) . thenComposeAsync ( new Function < List < Object > , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( final List < Object > params ) { <nl> - final MutationType optype = MutationType . valueOf ( ( String ) params . get ( 0 ) ) ; <nl> - return executeMutation ( inst , <nl> - new Function < Transaction , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Transaction tr ) { <nl> - tr . mutate ( optype , ( byte [ ] ) params . get ( 1 ) , ( byte [ ] ) params . get ( 2 ) ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> - } <nl> - } <nl> - ) ; <nl> - } <nl> - } ) ; <nl> + return inst . popParams ( 3 ) . thenComposeAsync ( params - > { <nl> + final MutationType optype = MutationType . valueOf ( ( String ) params . get ( 0 ) ) ; <nl> + return executeMutation ( inst , tr - > { <nl> + tr . mutate ( optype , ( byte [ ] ) params . get ( 1 ) , ( byte [ ] ) params . get ( 2 ) ) ; <nl> + return AsyncUtil . DONE ; <nl> + } ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . COMMIT ) { <nl> inst . push ( inst . tr . commit ( ) ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> + return AsyncUtil . DONE ; <nl> } <nl> else if ( op = = StackOperation . RESET ) { <nl> inst . context . newTransaction ( ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> + return AsyncUtil . DONE ; <nl> } <nl> else if ( op = = StackOperation . CANCEL ) { <nl> inst . tr . cancel ( ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> + return AsyncUtil . DONE ; <nl> } <nl> else if ( op = = StackOperation . READ_CONFLICT_RANGE ) { <nl> - return inst . popParams ( 2 ) . thenApplyAsync ( new Function < List < Object > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( List < Object > params ) { <nl> - inst . tr . addReadConflictRange ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ; <nl> - inst . push ( " SET_CONFLICT_RANGE " . getBytes ( ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . popParams ( 2 ) . thenAcceptAsync ( params - > { <nl> + inst . tr . addReadConflictRange ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ; <nl> + inst . push ( " SET_CONFLICT_RANGE " . getBytes ( ) ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . WRITE_CONFLICT_RANGE ) { <nl> - return inst . popParams ( 2 ) . thenApplyAsync ( new Function < List < Object > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( List < Object > params ) { <nl> - inst . tr . addWriteConflictRange ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ; <nl> - inst . push ( " SET_CONFLICT_RANGE " . getBytes ( ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . popParams ( 2 ) . thenAcceptAsync ( params - > { <nl> + inst . tr . addWriteConflictRange ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ; <nl> + inst . push ( " SET_CONFLICT_RANGE " . getBytes ( ) ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . READ_CONFLICT_KEY ) { <nl> - return inst . popParam ( ) . thenApplyAsync ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object param ) { <nl> - inst . tr . addReadConflictKey ( ( byte [ ] ) param ) ; <nl> - inst . push ( " SET_CONFLICT_KEY " . getBytes ( ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenAcceptAsync ( param - > { <nl> + inst . tr . addReadConflictKey ( ( byte [ ] ) param ) ; <nl> + inst . push ( " SET_CONFLICT_KEY " . getBytes ( ) ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . WRITE_CONFLICT_KEY ) { <nl> - return inst . popParam ( ) . thenApplyAsync ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object param ) { <nl> - inst . tr . addWriteConflictKey ( ( byte [ ] ) param ) ; <nl> - inst . push ( " SET_CONFLICT_KEY " . getBytes ( ) ) ; <nl> - return null ; <nl> - } <nl> + return inst . popParam ( ) . thenAcceptAsync ( param - > { <nl> + inst . tr . addWriteConflictKey ( ( byte [ ] ) param ) ; <nl> + inst . push ( " SET_CONFLICT_KEY " . getBytes ( ) ) ; <nl> } ) ; <nl> } <nl> else if ( op = = StackOperation . DISABLE_WRITE_CONFLICT ) { <nl> inst . tr . options ( ) . setNextWriteNoWriteConflictRange ( ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> + return AsyncUtil . DONE ; <nl> } <nl> else if ( op = = StackOperation . GET ) { <nl> - return inst . popParam ( ) . thenApplyAsync ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object param ) { <nl> - inst . push ( inst . readTcx . readAsync ( readTr - > readTr . get ( ( byte [ ] ) param ) ) ) ; <nl> - return null ; <nl> - } <nl> + return inst . popParam ( ) . thenAcceptAsync ( param - > { <nl> + inst . push ( inst . readTcx . readAsync ( readTr - > readTr . get ( ( byte [ ] ) param ) ) ) ; <nl> } ) ; <nl> } <nl> else if ( op = = StackOperation . GET_RANGE ) { <nl> - return inst . popParams ( 5 ) . thenComposeAsync ( new Function < List < Object > , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( List < Object > params ) { <nl> - int limit = StackUtils . getInt ( params . get ( 2 ) ) ; <nl> - boolean reverse = StackUtils . getBoolean ( params . get ( 3 ) ) ; <nl> - StreamingMode mode = inst . context . streamingModeFromCode ( <nl> - StackUtils . getInt ( params . get ( 4 ) , StreamingMode . ITERATOR . code ( ) ) ) ; <nl> - <nl> - CompletableFuture < List < KeyValue > > range = inst . readTcx . readAsync ( readTr - > readTr . getRange ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) , limit , reverse , mode ) . asList ( ) ) ; <nl> - return pushRange ( inst , range ) ; <nl> - } <nl> - } ) ; <nl> + return inst . popParams ( 5 ) . thenComposeAsync ( params - > { <nl> + int limit = StackUtils . getInt ( params . get ( 2 ) ) ; <nl> + boolean reverse = StackUtils . getBoolean ( params . get ( 3 ) ) ; <nl> + StreamingMode mode = inst . context . streamingModeFromCode ( <nl> + StackUtils . getInt ( params . get ( 4 ) , StreamingMode . ITERATOR . code ( ) ) ) ; <nl> + <nl> + CompletableFuture < List < KeyValue > > range = inst . readTcx . readAsync ( readTr - > readTr . getRange ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) , limit , reverse , mode ) . asList ( ) ) ; <nl> + return pushRange ( inst , range ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . GET_RANGE_SELECTOR ) { <nl> - return inst . popParams ( 10 ) . thenComposeAsync ( new Function < List < Object > , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( List < Object > params ) { <nl> - int limit = StackUtils . getInt ( params . get ( 6 ) ) ; <nl> - boolean reverse = StackUtils . getBoolean ( params . get ( 7 ) ) ; <nl> - StreamingMode mode = inst . context . streamingModeFromCode ( <nl> - StackUtils . getInt ( params . get ( 8 ) , StreamingMode . ITERATOR . code ( ) ) ) ; <nl> - <nl> - KeySelector start = StackUtils . createSelector ( params . get ( 0 ) , params . get ( 1 ) , params . get ( 2 ) ) ; <nl> - KeySelector end = StackUtils . createSelector ( params . get ( 3 ) , params . get ( 4 ) , params . get ( 5 ) ) ; <nl> - <nl> - CompletableFuture < List < KeyValue > > range = inst . readTcx . readAsync ( readTr - > readTr . getRange ( start , end , limit , reverse , mode ) . asList ( ) ) ; <nl> - return pushRange ( inst , range , ( byte [ ] ) params . get ( 9 ) ) ; <nl> - } <nl> - } ) ; <nl> + return inst . popParams ( 10 ) . thenComposeAsync ( params - > { <nl> + int limit = StackUtils . getInt ( params . get ( 6 ) ) ; <nl> + boolean reverse = StackUtils . getBoolean ( params . get ( 7 ) ) ; <nl> + StreamingMode mode = inst . context . streamingModeFromCode ( <nl> + StackUtils . getInt ( params . get ( 8 ) , StreamingMode . ITERATOR . code ( ) ) ) ; <nl> + <nl> + KeySelector start = StackUtils . createSelector ( params . get ( 0 ) , params . get ( 1 ) , params . get ( 2 ) ) ; <nl> + KeySelector end = StackUtils . createSelector ( params . get ( 3 ) , params . get ( 4 ) , params . get ( 5 ) ) ; <nl> + <nl> + CompletableFuture < List < KeyValue > > range = inst . readTcx . readAsync ( readTr - > readTr . getRange ( start , end , limit , reverse , mode ) . asList ( ) ) ; <nl> + return pushRange ( inst , range , ( byte [ ] ) params . get ( 9 ) ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . GET_RANGE_STARTS_WITH ) { <nl> - return inst . popParams ( 4 ) . thenComposeAsync ( new Function < List < Object > , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( List < Object > params ) { <nl> - int limit = StackUtils . getInt ( params . get ( 1 ) ) ; <nl> - boolean reverse = StackUtils . getBoolean ( params . get ( 2 ) ) ; <nl> - StreamingMode mode = inst . context . streamingModeFromCode ( <nl> - StackUtils . getInt ( params . get ( 3 ) , StreamingMode . ITERATOR . code ( ) ) ) ; <nl> - <nl> - CompletableFuture < List < KeyValue > > range = inst . readTcx . readAsync ( readTr - > readTr . getRange ( Range . startsWith ( ( byte [ ] ) params . get ( 0 ) ) , limit , reverse , mode ) . asList ( ) ) ; <nl> - return pushRange ( inst , range ) ; <nl> - } <nl> + return inst . popParams ( 4 ) . thenComposeAsync ( params - > { <nl> + int limit = StackUtils . getInt ( params . get ( 1 ) ) ; <nl> + boolean reverse = StackUtils . getBoolean ( params . get ( 2 ) ) ; <nl> + StreamingMode mode = inst . context . streamingModeFromCode ( <nl> + StackUtils . getInt ( params . get ( 3 ) , StreamingMode . ITERATOR . code ( ) ) ) ; <nl> + <nl> + CompletableFuture < List < KeyValue > > range = inst . readTcx . readAsync ( readTr - > readTr . getRange ( Range . startsWith ( ( byte [ ] ) params . get ( 0 ) ) , limit , reverse , mode ) . asList ( ) ) ; <nl> + return pushRange ( inst , range ) ; <nl> } ) ; <nl> } <nl> else if ( op = = StackOperation . GET_KEY ) { <nl> - return inst . popParams ( 4 ) . thenApplyAsync ( new Function < List < Object > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( List < Object > params ) { <nl> - KeySelector start = StackUtils . createSelector ( params . get ( 0 ) , params . get ( 1 ) , params . get ( 2 ) ) ; <nl> - inst . push ( inst . readTcx . readAsync ( readTr - > executeGetKey ( readTr . getKey ( start ) , ( byte [ ] ) params . get ( 3 ) ) ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . popParams ( 4 ) . thenAcceptAsync ( params - > { <nl> + KeySelector start = StackUtils . createSelector ( params . get ( 0 ) , params . get ( 1 ) , params . get ( 2 ) ) ; <nl> + inst . push ( inst . readTcx . readAsync ( readTr - > executeGetKey ( readTr . getKey ( start ) , ( byte [ ] ) params . get ( 3 ) ) ) ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . GET_READ_VERSION ) { <nl> - return inst . readTr . getReadVersion ( ) . thenApplyAsync ( new Function < Long , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Long readVersion ) { <nl> - inst . context . lastVersion = readVersion ; <nl> - inst . push ( " GOT_READ_VERSION " . getBytes ( ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . readTr . getReadVersion ( ) . thenAcceptAsync ( readVersion - > { <nl> + inst . context . lastVersion = readVersion ; <nl> + inst . push ( " GOT_READ_VERSION " . getBytes ( ) ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . GET_COMMITTED_VERSION ) { <nl> try { <nl> else if ( op = = StackOperation . GET_COMMITTED_VERSION ) { <nl> StackUtils . pushError ( inst , e ) ; <nl> } <nl> <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> + return AsyncUtil . DONE ; <nl> } <nl> else if ( op = = StackOperation . GET_VERSIONSTAMP ) { <nl> try { <nl> else if ( op = = StackOperation . GET_VERSIONSTAMP ) { <nl> StackUtils . pushError ( inst , e ) ; <nl> } <nl> <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> + return AsyncUtil . DONE ; <nl> } <nl> else if ( op = = StackOperation . SET_READ_VERSION ) { <nl> if ( inst . context . lastVersion = = null ) <nl> throw new IllegalArgumentException ( " Read version has not been read " ) ; <nl> inst . tr . setReadVersion ( inst . context . lastVersion ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> + return AsyncUtil . DONE ; <nl> } <nl> else if ( op = = StackOperation . ON_ERROR ) { <nl> - return inst . popParam ( ) . thenComposeAsync ( new Function < Object , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Object param ) { <nl> - int errorCode = StackUtils . getInt ( param ) ; <nl> - <nl> - / / 1102 ( future_released ) and 2015 ( future_not_set ) are not errors to Java . <nl> - / / This is never encountered by user code , so we have to do something rather <nl> - / / messy here to get compatibility with other languages . <nl> - / / <nl> - / / First , try on error with a retryable error . If it fails , then the transaction is in <nl> - / / a failed state and we should rethrow the error . Otherwise , throw the original error . <nl> - boolean filteredError = errorCode = = 1102 | | errorCode = = 2015 ; <nl> - <nl> - FDBException err = new FDBException ( " Fake testing error " , filteredError ? 1020 : errorCode ) ; <nl> - final Transaction oldTr = inst . tr ; <nl> - CompletableFuture < Void > f = oldTr . onError ( err ) <nl> - . whenComplete ( ( tr , t ) - > { <nl> - if ( t ! = null ) { <nl> - inst . context . newTransaction ( oldTr ) ; / / Other bindings allow reuse of non - retryable transactions , so we need to emulate that behavior . <nl> - } <nl> - else { <nl> - inst . context . updateCurrentTransaction ( oldTr , tr ) ; <nl> - } <nl> - } ) <nl> - . thenApply ( v - > null ) ; <nl> - <nl> - if ( filteredError ) { <nl> - f . join ( ) ; <nl> - throw new FDBException ( " Fake testing error " , errorCode ) ; <nl> + return inst . popParam ( ) . thenComposeAsync ( param - > { <nl> + int errorCode = StackUtils . getInt ( param ) ; <nl> + <nl> + / / 1102 ( future_released ) and 2015 ( future_not_set ) are not errors to Java . <nl> + / / This is never encountered by user code , so we have to do something rather <nl> + / / messy here to get compatibility with other languages . <nl> + / / <nl> + / / First , try on error with a retryable error . If it fails , then the transaction is in <nl> + / / a failed state and we should rethrow the error . Otherwise , throw the original error . <nl> + boolean filteredError = errorCode = = 1102 | | errorCode = = 2015 ; <nl> + <nl> + FDBException err = new FDBException ( " Fake testing error " , filteredError ? 1020 : errorCode ) ; <nl> + final Transaction oldTr = inst . tr ; <nl> + CompletableFuture < Void > f = oldTr . onError ( err ) . whenComplete ( ( tr , t ) - > { <nl> + if ( t ! = null ) { <nl> + inst . context . newTransaction ( oldTr ) ; / / Other bindings allow reuse of non - retryable transactions , so we need to emulate that behavior . <nl> + } <nl> + else { <nl> + inst . setTransaction ( oldTr , tr ) ; <nl> } <nl> + } ) . thenApply ( v - > null ) ; <nl> <nl> - inst . push ( f ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> + if ( filteredError ) { <nl> + f . join ( ) ; <nl> + throw new FDBException ( " Fake testing error " , errorCode ) ; <nl> } <nl> - } ) ; <nl> + <nl> + inst . push ( f ) ; <nl> + return AsyncUtil . DONE ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . SUB ) { <nl> - return inst . popParams ( 2 ) . thenApplyAsync ( new Function < List < Object > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( List < Object > params ) { <nl> - BigInteger result = StackUtils . getBigInteger ( params . get ( 0 ) ) . subtract ( <nl> - StackUtils . getBigInteger ( params . get ( 1 ) ) <nl> - ) ; <nl> - inst . push ( result ) ; <nl> - return null ; <nl> - } <nl> + return inst . popParams ( 2 ) . thenAcceptAsync ( params - > { <nl> + BigInteger result = StackUtils . getBigInteger ( params . get ( 0 ) ) . subtract ( <nl> + StackUtils . getBigInteger ( params . get ( 1 ) ) <nl> + ) ; <nl> + inst . push ( result ) ; <nl> } ) ; <nl> } <nl> else if ( op = = StackOperation . CONCAT ) { <nl> - return inst . popParams ( 2 ) . thenApplyAsync ( new Function < List < Object > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( List < Object > params ) { <nl> - if ( params . get ( 0 ) instanceof String ) { <nl> - inst . push ( ( String ) params . get ( 0 ) + ( String ) params . get ( 1 ) ) ; <nl> - } <nl> - else { <nl> - inst . push ( ByteArrayUtil . join ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ) ; <nl> - } <nl> - <nl> - return null ; <nl> + return inst . popParams ( 2 ) . thenAcceptAsync ( params - > { <nl> + if ( params . get ( 0 ) instanceof String ) { <nl> + inst . push ( ( String ) params . get ( 0 ) + ( String ) params . get ( 1 ) ) ; <nl> } <nl> - } ) ; <nl> + else { <nl> + inst . push ( ByteArrayUtil . join ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ) ; <nl> + } <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . TUPLE_PACK ) { <nl> - return inst . popParam ( ) . thenComposeAsync ( new Function < Object , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Object param ) { <nl> - int tupleSize = StackUtils . getInt ( param ) ; <nl> - / / System . out . println ( inst . context . preStr + " - " + " Packing top " + tupleSize + " items from stack " ) ; <nl> - return inst . popParams ( tupleSize ) . thenApplyAsync ( new Function < List < Object > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( List < Object > elements ) { <nl> - byte [ ] coded = Tuple . fromItems ( elements ) . pack ( ) ; <nl> - / / System . out . println ( inst . context . preStr + " - " + " - > result ' " + ByteArrayUtil . printable ( coded ) + " ' " ) ; <nl> - inst . push ( coded ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenComposeAsync ( param - > { <nl> + int tupleSize = StackUtils . getInt ( param ) ; <nl> + / / System . out . println ( inst . context . preStr + " - " + " Packing top " + tupleSize + " items from stack " ) ; <nl> + return inst . popParams ( tupleSize ) . thenAcceptAsync ( elements - > { <nl> + byte [ ] coded = Tuple . fromItems ( elements ) . pack ( ) ; <nl> + / / System . out . println ( inst . context . preStr + " - " + " - > result ' " + ByteArrayUtil . printable ( coded ) + " ' " ) ; <nl> + inst . push ( coded ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . TUPLE_PACK_WITH_VERSIONSTAMP ) { <nl> - return inst . popParams ( 2 ) . thenComposeAsync ( new Function < List < Object > , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( List < Object > params ) { <nl> - byte [ ] prefix = ( byte [ ] ) params . get ( 0 ) ; <nl> - int tupleSize = StackUtils . getInt ( params . get ( 1 ) ) ; <nl> - / / System . out . println ( inst . context . preStr + " - " + " Packing top " + tupleSize + " items from stack " ) ; <nl> - return inst . popParams ( tupleSize ) . thenApplyAsync ( new Function < List < Object > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( List < Object > elements ) { <nl> - Tuple tuple = Tuple . fromItems ( elements ) ; <nl> - if ( ! tuple . hasIncompleteVersionstamp ( ) & & Math . random ( ) < 0 . 5 ) { <nl> - inst . push ( " ERROR : NONE " . getBytes ( ) ) ; <nl> - return null ; <nl> - } <nl> - try { <nl> - byte [ ] coded = tuple . packWithVersionstamp ( prefix ) ; <nl> - inst . push ( " OK " . getBytes ( ) ) ; <nl> - inst . push ( coded ) ; <nl> - } catch ( IllegalArgumentException e ) { <nl> - if ( e . getMessage ( ) . startsWith ( " No incomplete " ) ) { <nl> - inst . push ( " ERROR : NONE " . getBytes ( ) ) ; <nl> - } else { <nl> - inst . push ( " ERROR : MULTIPLE " . getBytes ( ) ) ; <nl> - } <nl> - } <nl> - return null ; <nl> + return inst . popParams ( 2 ) . thenComposeAsync ( params - > { <nl> + byte [ ] prefix = ( byte [ ] ) params . get ( 0 ) ; <nl> + int tupleSize = StackUtils . getInt ( params . get ( 1 ) ) ; <nl> + / / System . out . println ( inst . context . preStr + " - " + " Packing top " + tupleSize + " items from stack " ) ; <nl> + return inst . popParams ( tupleSize ) . thenAcceptAsync ( elements - > { <nl> + Tuple tuple = Tuple . fromItems ( elements ) ; <nl> + if ( ! tuple . hasIncompleteVersionstamp ( ) & & Math . random ( ) < 0 . 5 ) { <nl> + inst . push ( " ERROR : NONE " . getBytes ( ) ) ; <nl> + return ; <nl> + } <nl> + try { <nl> + byte [ ] coded = tuple . packWithVersionstamp ( prefix ) ; <nl> + inst . push ( " OK " . getBytes ( ) ) ; <nl> + inst . push ( coded ) ; <nl> + } catch ( IllegalArgumentException e ) { <nl> + if ( e . getMessage ( ) . startsWith ( " No incomplete " ) ) { <nl> + inst . push ( " ERROR : NONE " . getBytes ( ) ) ; <nl> + } else { <nl> + inst . push ( " ERROR : MULTIPLE " . getBytes ( ) ) ; <nl> } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> + } <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . TUPLE_UNPACK ) { <nl> - return inst . popParam ( ) . thenApplyAsync ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object param ) { <nl> - / * System . out . println ( inst . context . preStr + " - " + " Unpacking tuple code : " + <nl> - ByteArrayUtil . printable ( ( byte [ ] ) param ) ) ; * / <nl> - Tuple t = Tuple . fromBytes ( ( byte [ ] ) param ) ; <nl> - for ( Object o : t . getItems ( ) ) { <nl> - byte [ ] itemBytes = Tuple . from ( o ) . pack ( ) ; <nl> - inst . push ( itemBytes ) ; <nl> - } <nl> - return null ; <nl> + return inst . popParam ( ) . thenAcceptAsync ( param - > { <nl> + / * System . out . println ( inst . context . preStr + " - " + " Unpacking tuple code : " + <nl> + ByteArrayUtil . printable ( ( byte [ ] ) param ) ) ; * / <nl> + Tuple t = Tuple . fromBytes ( ( byte [ ] ) param ) ; <nl> + for ( Object o : t . getItems ( ) ) { <nl> + byte [ ] itemBytes = Tuple . from ( o ) . pack ( ) ; <nl> + inst . push ( itemBytes ) ; <nl> } <nl> - } ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . TUPLE_RANGE ) { <nl> - return inst . popParam ( ) . thenComposeAsync ( new Function < Object , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Object param ) { <nl> - int tupleSize = StackUtils . getInt ( param ) ; <nl> - / / System . out . println ( inst . context . preStr + " - " + " Tuple range with top " + tupleSize + " items from stack " ) ; <nl> - return inst . popParams ( tupleSize ) . thenApplyAsync ( new Function < List < Object > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( List < Object > elements ) { <nl> - Range range = Tuple . fromItems ( elements ) . range ( ) ; <nl> - inst . push ( range . begin ) ; <nl> - inst . push ( range . end ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenComposeAsync ( param - > { <nl> + int tupleSize = StackUtils . getInt ( param ) ; <nl> + / / System . out . println ( inst . context . preStr + " - " + " Tuple range with top " + tupleSize + " items from stack " ) ; <nl> + return inst . popParams ( tupleSize ) . thenAcceptAsync ( elements - > { <nl> + Range range = Tuple . fromItems ( elements ) . range ( ) ; <nl> + inst . push ( range . begin ) ; <nl> + inst . push ( range . end ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . TUPLE_SORT ) { <nl> - return inst . popParam ( ) . thenComposeAsync ( new Function < Object , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Object param ) { <nl> - final int listSize = StackUtils . getInt ( param ) ; <nl> - return inst . popParams ( listSize ) . thenApply ( new Function < List < Object > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( List < Object > rawElements ) { <nl> - List < Tuple > tuples = new ArrayList < Tuple > ( listSize ) ; <nl> - for ( Object o : rawElements ) { <nl> - tuples . add ( Tuple . fromBytes ( ( byte [ ] ) o ) ) ; <nl> - } <nl> - Collections . sort ( tuples ) ; <nl> - for ( Tuple t : tuples ) { <nl> - inst . push ( t . pack ( ) ) ; <nl> - } <nl> - return null ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenComposeAsync ( param - > { <nl> + final int listSize = StackUtils . getInt ( param ) ; <nl> + return inst . popParams ( listSize ) . thenAcceptAsync ( rawElements - > { <nl> + List < Tuple > tuples = new ArrayList < > ( listSize ) ; <nl> + for ( Object o : rawElements ) { <nl> + tuples . add ( Tuple . fromBytes ( ( byte [ ] ) o ) ) ; <nl> + } <nl> + Collections . sort ( tuples ) ; <nl> + for ( Tuple t : tuples ) { <nl> + inst . push ( t . pack ( ) ) ; <nl> + } <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . ENCODE_FLOAT ) { <nl> - return inst . popParam ( ) . thenApply ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object param ) { <nl> - byte [ ] fBytes = ( byte [ ] ) param ; <nl> - float value = ByteBuffer . wrap ( fBytes ) . order ( ByteOrder . BIG_ENDIAN ) . getFloat ( ) ; <nl> - inst . push ( value ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenAcceptAsync ( param - > { <nl> + byte [ ] fBytes = ( byte [ ] ) param ; <nl> + float value = ByteBuffer . wrap ( fBytes ) . order ( ByteOrder . BIG_ENDIAN ) . getFloat ( ) ; <nl> + inst . push ( value ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . ENCODE_DOUBLE ) { <nl> - return inst . popParam ( ) . thenApply ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object param ) { <nl> - byte [ ] dBytes = ( byte [ ] ) param ; <nl> - double value = ByteBuffer . wrap ( dBytes ) . order ( ByteOrder . BIG_ENDIAN ) . getDouble ( ) ; <nl> - inst . push ( value ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenAcceptAsync ( param - > { <nl> + byte [ ] dBytes = ( byte [ ] ) param ; <nl> + double value = ByteBuffer . wrap ( dBytes ) . order ( ByteOrder . BIG_ENDIAN ) . getDouble ( ) ; <nl> + inst . push ( value ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . DECODE_FLOAT ) { <nl> - return inst . popParam ( ) . thenApply ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object param ) { <nl> - float value = ( ( Number ) param ) . floatValue ( ) ; <nl> - inst . push ( ByteBuffer . allocate ( 4 ) . order ( ByteOrder . BIG_ENDIAN ) . putFloat ( value ) . array ( ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenAcceptAsync ( param - > { <nl> + float value = ( ( Number ) param ) . floatValue ( ) ; <nl> + inst . push ( ByteBuffer . allocate ( 4 ) . order ( ByteOrder . BIG_ENDIAN ) . putFloat ( value ) . array ( ) ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . DECODE_DOUBLE ) { <nl> - return inst . popParam ( ) . thenApply ( new Function < Object , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Object param ) { <nl> - double value = ( ( Number ) param ) . doubleValue ( ) ; <nl> - inst . push ( ByteBuffer . allocate ( 8 ) . order ( ByteOrder . BIG_ENDIAN ) . putDouble ( value ) . array ( ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . popParam ( ) . thenAcceptAsync ( param - > { <nl> + double value = ( ( Number ) param ) . doubleValue ( ) ; <nl> + inst . push ( ByteBuffer . allocate ( 8 ) . order ( ByteOrder . BIG_ENDIAN ) . putDouble ( value ) . array ( ) ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> else if ( op = = StackOperation . UNIT_TESTS ) { <nl> inst . context . db . options ( ) . setLocationCacheSize ( 100001 ) ; <nl> else if ( op = = StackOperation . UNIT_TESTS ) { <nl> } ) ; <nl> } <nl> else if ( op = = StackOperation . LOG_STACK ) { <nl> - return inst . popParam ( ) . thenComposeAsync ( prefix - > doLogStack ( inst , ( byte [ ] ) prefix ) ) ; <nl> + return inst . popParam ( ) . thenComposeAsync ( prefix - > doLogStack ( inst , ( byte [ ] ) prefix ) , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> <nl> throw new IllegalArgumentException ( " Unrecognized ( or unimplemented ) operation " ) ; <nl> else if ( op = = StackOperation . LOG_STACK ) { <nl> <nl> private static CompletableFuture < Void > executeMutation ( final Instruction inst , Function < Transaction , CompletableFuture < Void > > r ) { <nl> / / run this with a retry loop <nl> - return inst . tcx . runAsync ( r ) . thenApplyAsync ( new Function < Void , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Void a ) { <nl> - if ( inst . isDatabase ) <nl> - inst . push ( " RESULT_NOT_PRESENT " . getBytes ( ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + return inst . tcx . runAsync ( r ) . thenRunAsync ( ( ) - > { <nl> + if ( inst . isDatabase ) <nl> + inst . push ( " RESULT_NOT_PRESENT " . getBytes ( ) ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> <nl> private static CompletableFuture < byte [ ] > executeGetKey ( final CompletableFuture < byte [ ] > keyFuture , final byte [ ] prefixFilter ) { <nl> - return keyFuture . thenApplyAsync ( new Function < byte [ ] , byte [ ] > ( ) { <nl> - @ Override <nl> - public byte [ ] apply ( byte [ ] key ) { <nl> - if ( ByteArrayUtil . startsWith ( key , prefixFilter ) ) { <nl> - return key ; <nl> - } <nl> - else if ( ByteArrayUtil . compareUnsigned ( key , prefixFilter ) < 0 ) { <nl> - return prefixFilter ; <nl> - } <nl> - else { <nl> - return ByteArrayUtil . strinc ( prefixFilter ) ; <nl> - } <nl> + return keyFuture . thenApplyAsync ( key - > { <nl> + if ( ByteArrayUtil . startsWith ( key , prefixFilter ) ) { <nl> + return key ; <nl> } <nl> - } ) ; <nl> + else if ( ByteArrayUtil . compareUnsigned ( key , prefixFilter ) < 0 ) { <nl> + return prefixFilter ; <nl> + } <nl> + else { <nl> + return ByteArrayUtil . strinc ( prefixFilter ) ; <nl> + } <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> <nl> private static CompletableFuture < Void > doLogStack ( final Instruction inst , final byte [ ] prefix ) { <nl> else if ( ByteArrayUtil . compareUnsigned ( key , prefixFilter ) < 0 ) { <nl> while ( inst . size ( ) > 0 ) { <nl> entries . put ( inst . size ( ) - 1 , inst . pop ( ) ) ; <nl> if ( entries . size ( ) = = 100 ) { <nl> - return logStack ( inst . context . db , entries , prefix ) . thenComposeAsync ( v - > doLogStack ( inst , prefix ) ) ; <nl> + return logStack ( inst . context . db , entries , prefix ) . thenComposeAsync ( v - > doLogStack ( inst , prefix ) , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> } <nl> <nl> else if ( ByteArrayUtil . compareUnsigned ( key , prefixFilter ) < 0 ) { <nl> tr . set ( pk , pv . length < 40000 ? pv : Arrays . copyOfRange ( pv , 0 , 40000 ) ) ; <nl> } <nl> <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> - } ) ; <nl> - } <nl> - private static CompletableFuture < Void > logStack ( final Instruction inst , final byte [ ] prefix , int i ) { <nl> - / / System . out . println ( " Logging stack at " + i ) ; <nl> - while ( inst . size ( ) > 0 ) { <nl> - StackEntry e = inst . pop ( ) ; <nl> - byte [ ] pk = Tuple . from ( i , e . idx ) . pack ( prefix ) ; <nl> - byte [ ] pv = Tuple . from ( StackUtils . serializeFuture ( e . value ) ) . pack ( ) ; <nl> - inst . tr . set ( pk , pv . length < 40000 ? pv : Arrays . copyOfRange ( pv , 0 , 40000 ) ) ; <nl> - i - - ; <nl> - if ( i % 100 = = 0 ) { <nl> - final int saved = i ; <nl> - return inst . tr . commit ( ) . thenComposeAsync ( new Function < Void , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Void o ) { <nl> - inst . tr = inst . context . newTransaction ( ) ; <nl> - return logStack ( inst , prefix , saved ) ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } <nl> - return inst . tr . commit ( ) . thenApplyAsync ( new Function < Void , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Void a ) { <nl> - inst . tr = inst . context . newTransaction ( ) ; <nl> - return null ; <nl> - } <nl> + return AsyncUtil . DONE ; <nl> } ) ; <nl> } <nl> <nl> public Void apply ( Void a ) { <nl> } <nl> @ Override <nl> public Void apply ( List < KeyValue > list ) { <nl> - List < byte [ ] > o = new LinkedList < byte [ ] > ( ) ; <nl> + List < byte [ ] > o = new LinkedList < > ( ) ; <nl> for ( KeyValue kv : list ) { <nl> if ( prefixFilter = = null | | ByteArrayUtil . startsWith ( kv . getKey ( ) , prefixFilter ) ) { <nl> o . add ( kv . getKey ( ) ) ; <nl> Context createContext ( byte [ ] prefix ) { <nl> } * / <nl> <nl> if ( inst . op . startsWith ( DIRECTORY_PREFIX ) ) <nl> - return directoryExtension . processInstruction ( inst ) ; <nl> + return directoryExtension . processInstruction ( inst ) . whenComplete ( ( x , t ) - > inst . releaseTransaction ( ) ) ; <nl> else { <nl> - return AsyncUtil . composeExceptionally ( processInstruction ( inst ) , <nl> - new Function < Throwable , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Throwable e ) { <nl> - FDBException ex = StackUtils . getRootFDBException ( e ) ; <nl> - if ( ex ! = null ) { <nl> - StackUtils . pushError ( inst , ex ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> - } <nl> - else { <nl> - CompletableFuture < Void > f = new CompletableFuture < Void > ( ) ; <nl> - f . completeExceptionally ( e ) ; <nl> - return f ; <nl> - } <nl> - } <nl> - } ) ; <nl> + return AsyncUtil . composeExceptionally ( processInstruction ( inst ) , ( e ) - > { <nl> + FDBException ex = StackUtils . getRootFDBException ( e ) ; <nl> + if ( ex ! = null ) { <nl> + StackUtils . pushError ( inst , ex ) ; <nl> + return AsyncUtil . DONE ; <nl> + } <nl> + else { <nl> + CompletableFuture < Void > f = new CompletableFuture < > ( ) ; <nl> + f . completeExceptionally ( e ) ; <nl> + return f ; <nl> + } <nl> + } ) <nl> + . whenComplete ( ( x , t ) - > inst . releaseTransaction ( ) ) ; <nl> } <nl> } <nl> <nl> Context createContext ( byte [ ] prefix ) { <nl> } <nl> <nl> CompletableFuture < Void > executeRemainingOperations ( ) { <nl> - Transaction t = db . createTransaction ( ) ; <nl> - <nl> - final Function < Void , CompletableFuture < Void > > processNext = new Function < Void , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( Void ignore ) { <nl> - instructionIndex + + ; <nl> - return executeRemainingOperations ( ) ; <nl> - } <nl> + final Function < Void , CompletableFuture < Void > > processNext = ignore - > { <nl> + instructionIndex + + ; <nl> + return executeRemainingOperations ( ) ; <nl> } ; <nl> <nl> if ( operations = = null | | + + currentOp = = operations . size ( ) ) { <nl> - return t . getRange ( nextKey , endKey , 1000 ) . asList ( ) <nl> - . thenComposeAsync ( new Function < List < KeyValue > , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( List < KeyValue > next ) { <nl> - if ( next . size ( ) < 1 ) { <nl> - / / System . out . println ( " No key found after : " + ByteArrayUtil . printable ( nextKey . getKey ( ) ) ) ; <nl> - return CompletableFuture . completedFuture ( null ) ; <nl> - } <nl> + Transaction tr = db . createTransaction ( ) ; <nl> + <nl> + return tr . getRange ( nextKey , endKey , 1000 ) . asList ( ) <nl> + . whenComplete ( ( x , t ) - > tr . close ( ) ) <nl> + . thenComposeAsync ( next - > { <nl> + if ( next . size ( ) < 1 ) { <nl> + / / System . out . println ( " No key found after : " + ByteArrayUtil . printable ( nextKey . getKey ( ) ) ) ; <nl> + return AsyncUtil . DONE ; <nl> + } <nl> <nl> - operations = next ; <nl> - currentOp = 0 ; <nl> - nextKey = KeySelector . firstGreaterThan ( next . get ( next . size ( ) - 1 ) . getKey ( ) ) ; <nl> + operations = next ; <nl> + currentOp = 0 ; <nl> + nextKey = KeySelector . firstGreaterThan ( next . get ( next . size ( ) - 1 ) . getKey ( ) ) ; <nl> <nl> - return processOp ( next . get ( 0 ) . getValue ( ) ) . thenComposeAsync ( processNext ) ; <nl> - } <nl> - } ) ; <nl> + return processOp ( next . get ( 0 ) . getValue ( ) ) . thenComposeAsync ( processNext ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> <nl> - return processOp ( operations . get ( currentOp ) . getValue ( ) ) . thenComposeAsync ( processNext ) ; <nl> + return processOp ( operations . get ( currentOp ) . getValue ( ) ) . thenComposeAsync ( processNext , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> } <nl> <nl> Context createContext ( byte [ ] prefix ) { <nl> } <nl> final int idx = entry . idx ; <nl> <nl> - @ SuppressWarnings ( " unchecked " ) <nl> - final CompletableFuture < Object > future = ( CompletableFuture < Object > ) item ; <nl> + final CompletableFuture < ? > future = ( CompletableFuture < ? > ) item ; <nl> CompletableFuture < Object > flattened = flatten ( future ) ; <nl> <nl> - return flattened . thenApplyAsync ( new Function < Object , StackEntry > ( ) { <nl> - @ Override <nl> - public StackEntry apply ( Object o ) { <nl> - return new StackEntry ( idx , o ) ; <nl> - } <nl> - } ) ; <nl> + return flattened . thenApplyAsync ( o - > new StackEntry ( idx , o ) ) ; <nl> } <nl> <nl> - private static CompletableFuture < Object > flatten ( final CompletableFuture < Object > future ) { <nl> - CompletableFuture < Object > f = future . thenApplyAsync ( new Function < Object , Object > ( ) { <nl> - @ Override <nl> - public Object apply ( Object o ) { <nl> - if ( o = = null ) <nl> - return " RESULT_NOT_PRESENT " . getBytes ( ) ; <nl> - return o ; <nl> - } <nl> + private static CompletableFuture < Object > flatten ( final CompletableFuture < ? > future ) { <nl> + CompletableFuture < Object > f = future . thenApply ( o - > { <nl> + if ( o = = null ) <nl> + return " RESULT_NOT_PRESENT " . getBytes ( ) ; <nl> + return o ; <nl> } ) ; <nl> <nl> - return AsyncUtil . composeExceptionally ( f , new Function < Throwable , CompletableFuture < Object > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Object > apply ( Throwable t ) { <nl> - FDBException e = StackUtils . getRootFDBException ( t ) ; <nl> - if ( e ! = null ) { <nl> - return CompletableFuture . completedFuture ( StackUtils . getErrorBytes ( e ) ) ; <nl> - } <nl> - <nl> - CompletableFuture < Object > error = new CompletableFuture < Object > ( ) ; <nl> - error . completeExceptionally ( t ) ; <nl> - return error ; <nl> + return AsyncUtil . composeExceptionally ( f , t - > { <nl> + FDBException e = StackUtils . getRootFDBException ( t ) ; <nl> + if ( e ! = null ) { <nl> + return CompletableFuture . completedFuture ( StackUtils . getErrorBytes ( e ) ) ; <nl> } <nl> + <nl> + CompletableFuture < Object > error = new CompletableFuture < > ( ) ; <nl> + error . completeExceptionally ( t ) ; <nl> + return error ; <nl> } ) ; <nl> } <nl> <nl> - <nl> / * * <nl> * Run a stack - machine based test . <nl> * / <nl> public static void main ( String [ ] args ) { <nl> byte [ ] bs = db . createTransaction ( ) . get ( key ) . get ( ) ; <nl> System . out . println ( " output of " + ByteArrayUtil . printable ( key ) + " as : " + ByteArrayUtil . printable ( bs ) ) ; * / <nl> <nl> + db . close ( ) ; <nl> + System . gc ( ) ; <nl> + <nl> / * fdb . stopNetwork ( ) ; <nl> executor . shutdown ( ) ; * / <nl> } <nl> <nl> + private AsyncStackTester ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / BlockingBenchmark . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / BlockingBenchmark . java <nl> public static void main ( String [ ] args ) throws InterruptedException { <nl> <nl> Transaction tr = database . createTransaction ( ) ; <nl> tr . setReadVersion ( 100000 ) ; <nl> - final Function < Long , Long > identity = new Function < Long , Long > ( ) { <nl> - @ Override <nl> - public Long apply ( Long o ) { <nl> - return o ; <nl> - } <nl> - } ; <nl> - <nl> <nl> System . out . println ( " readVersion ( ) . join ( ) : " ) ; <nl> - runTests ( tr , new Function < CompletableFuture < Long > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( CompletableFuture < Long > o ) { <nl> - try { <nl> - o . join ( ) ; <nl> - } catch ( Exception e ) { } <nl> - <nl> - return null ; <nl> + runTests ( tr , o - > { <nl> + try { <nl> + o . join ( ) ; <nl> + } catch ( Exception e ) { <nl> + / / Ignore <nl> } <nl> + return null ; <nl> } ) ; <nl> <nl> System . out . println ( " readVersion ( ) . get ( ) : " ) ; <nl> - runTests ( tr , new Function < CompletableFuture < Long > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( CompletableFuture < Long > o ) { <nl> - try { <nl> - o . get ( ) ; <nl> - } catch ( InterruptedException e ) { <nl> - / / TODO Auto - generated catch block <nl> - e . printStackTrace ( ) ; <nl> - } catch ( Exception e ) { } <nl> - <nl> - return null ; <nl> + runTests ( tr , o - > { <nl> + try { <nl> + o . get ( ) ; <nl> + } catch ( Exception e ) { <nl> + / / Ignore <nl> } <nl> + return null ; <nl> } ) ; <nl> <nl> System . out . println ( " readVersion ( ) . thenApplyAsync ( identity ) . get ( ) : " ) ; <nl> - runTests ( tr , new Function < CompletableFuture < Long > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( CompletableFuture < Long > o ) { <nl> - try { <nl> - o . thenApplyAsync ( identity ) . get ( ) ; <nl> - } catch ( Exception e ) { } <nl> - <nl> - return null ; <nl> + runTests ( tr , o - > { <nl> + try { <nl> + o . thenApplyAsync ( Function . identity ( ) , FDB . DEFAULT_EXECUTOR ) . get ( ) ; <nl> + } catch ( Exception e ) { <nl> + / / Ignore <nl> } <nl> + return null ; <nl> } ) ; <nl> <nl> System . out . println ( " readVersion ( ) . thenApplyAsync ^ 10 ( identity ) . get ( ) : " ) ; <nl> - runTests ( tr , new Function < CompletableFuture < Long > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( CompletableFuture < Long > o ) { <nl> - for ( int i = 0 ; i < 10 ; i + + ) <nl> - o = o . thenApplyAsync ( identity ) ; <nl> - try { <nl> - o . get ( ) ; <nl> - } catch ( Exception e ) { } <nl> - <nl> - return null ; <nl> + runTests ( tr , o - > { <nl> + for ( int i = 0 ; i < 10 ; i + + ) <nl> + o = o . thenApplyAsync ( Function . identity ( ) , FDB . DEFAULT_EXECUTOR ) ; <nl> + try { <nl> + o . get ( ) ; <nl> + } catch ( Exception e ) { <nl> + / / Ignore <nl> } <nl> + return null ; <nl> } ) ; <nl> <nl> System . out . println ( " readVersion ( ) . get ^ 100 ( ) : " ) ; <nl> - runTests ( tr , new Function < CompletableFuture < Long > , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( CompletableFuture < Long > o ) { <nl> - for ( int i = 0 ; i < 100 ; i + + ) { <nl> - try { <nl> - o . get ( ) ; <nl> - } catch ( Exception e ) { } <nl> + runTests ( tr , o - > { <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + try { <nl> + o . get ( ) ; <nl> + } catch ( Exception e ) { <nl> + / / Ignore <nl> } <nl> - return null ; <nl> } <nl> + return null ; <nl> } ) ; <nl> <nl> } <nl> private static void runTests ( Transaction tr , Function < CompletableFuture < Long > , V <nl> for ( int r = 0 ; r < 4 ; r + + ) { <nl> long start = System . currentTimeMillis ( ) ; <nl> for ( int i = 0 ; i < REPS ; i + + ) { <nl> - blockMethod . apply ( tr . getReadVersion ( ) ) ; <nl> + blockMethod . apply ( tr . getReadVersion ( ) ) ; <nl> } <nl> <nl> long taken = System . currentTimeMillis ( ) - start ; <nl> private static void runTests ( Transaction tr , Function < CompletableFuture < Long > , V <nl> System . out . println ( " " + REPS + " done in " + taken + " ms - > " + ( REPS / ( taken ) ) + " KHz " ) ; <nl> } <nl> } <nl> + <nl> + private BlockingBenchmark ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / ConcurrentGetSetGet . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / ConcurrentGetSetGet . java <nl> <nl> import java . nio . charset . Charset ; <nl> import java . security . SecureRandom ; <nl> import java . util . Random ; <nl> - import java . util . concurrent . CompletableFuture ; <nl> import java . util . concurrent . Semaphore ; <nl> import java . util . concurrent . atomic . AtomicInteger ; <nl> - import java . util . function . Function ; <nl> <nl> import com . apple . foundationdb . Database ; <nl> import com . apple . foundationdb . FDB ; <nl> - import com . apple . foundationdb . Transaction ; <nl> <nl> public class ConcurrentGetSetGet { <nl> public static final Charset UTF8 = Charset . forName ( " UTF - 8 " ) ; <nl> public static void main ( String [ ] args ) { <nl> new ConcurrentGetSetGet ( ) . apply ( database ) ; <nl> } <nl> <nl> - public void apply ( Database d ) { <nl> - new Thread ( new Runnable ( ) { <nl> - @ Override <nl> - public void run ( ) { <nl> - int loops = 0 ; <nl> - try { <nl> - Thread . sleep ( 5000 ) ; <nl> - System . out . println ( " Loop " + loops + + + " : " ) ; <nl> - System . out . println ( " attempts : " + attemptCount . get ( ) ) ; <nl> - System . out . println ( " gets complete : " + getCompleteCount . get ( ) ) ; <nl> - System . out . println ( " errors : " + errorCount . get ( ) ) ; <nl> - System . out . println ( " sem : " + semaphore ) ; <nl> - System . out . println ( ) ; <nl> - } catch ( InterruptedException e ) { <nl> - / / TODO Auto - generated catch block <nl> - e . printStackTrace ( ) ; <nl> - } <nl> - <nl> + public void apply ( Database db ) { <nl> + new Thread ( ( ) - > { <nl> + int loops = 0 ; <nl> + try { <nl> + Thread . sleep ( 5000 ) ; <nl> + System . out . println ( " Loop " + loops + + + " : " ) ; <nl> + System . out . println ( " attempts : " + attemptCount . get ( ) ) ; <nl> + System . out . println ( " gets complete : " + getCompleteCount . get ( ) ) ; <nl> + System . out . println ( " errors : " + errorCount . get ( ) ) ; <nl> + System . out . println ( " sem : " + semaphore ) ; <nl> + System . out . println ( ) ; <nl> + } catch ( InterruptedException e ) { <nl> + / / TODO Auto - generated catch block <nl> + e . printStackTrace ( ) ; <nl> } <nl> + <nl> } ) . start ( ) ; <nl> final Random random = new SecureRandom ( ) ; <nl> try { <nl> public void run ( ) { <nl> System . out . println ( " Waited " + wait + " ms " ) ; <nl> } <nl> current = System . currentTimeMillis ( ) ; <nl> - d . runAsync ( new Function < Transaction , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( final Transaction r ) { <nl> - attemptCount . addAndGet ( 1 ) ; <nl> - final String key = " test : " + random . nextInt ( ) ; <nl> - return r . get ( $ ( key ) ) . thenComposeAsync ( new Function < byte [ ] , CompletableFuture < Void > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Void > apply ( byte [ ] o ) { <nl> - r . set ( $ ( key ) , $ ( " value " ) ) ; <nl> - return r . get ( $ ( key ) ) . thenApplyAsync ( new Function < byte [ ] , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( byte [ ] o ) { <nl> - getCompleteCount . addAndGet ( 1 ) ; <nl> - semaphore . release ( ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } ) . exceptionally ( new Function < Throwable , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Throwable o ) { <nl> - errorCount . addAndGet ( 1 ) ; <nl> - System . err . println ( " Fail ( " + o . getMessage ( ) + " ) " ) ; <nl> - semaphore . release ( ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> - } <nl> + db . runAsync ( tr - > { <nl> + attemptCount . addAndGet ( 1 ) ; <nl> + final String key = " test : " + random . nextInt ( ) ; <nl> + return tr . get ( $ ( key ) ) . thenComposeAsync ( ignore - > { <nl> + tr . set ( $ ( key ) , $ ( " value " ) ) ; <nl> + return tr . get ( $ ( key ) ) . thenRunAsync ( ( ) - > { <nl> + getCompleteCount . addAndGet ( 1 ) ; <nl> + semaphore . release ( ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) . exceptionally ( t - > { <nl> + errorCount . addAndGet ( 1 ) ; <nl> + System . err . println ( " Fail ( " + t . getMessage ( ) + " ) " ) ; <nl> + semaphore . release ( ) ; <nl> + return null ; <nl> + } ) ; <nl> } ) ; <nl> } <nl> semaphore . acquire ( CONCURRENCY ) ; <nl> long diff = System . currentTimeMillis ( ) - start ; <nl> System . out . println ( " time taken ( ms ) : " + diff ) ; <nl> - System . out . println ( " tr / sec : " + COUNT * 1000l / diff ) ; <nl> + System . out . println ( " tr / sec : " + COUNT * 1000L / diff ) ; <nl> System . out . println ( " attempts : " + attemptCount . get ( ) ) ; <nl> System . out . println ( " gets complete : " + getCompleteCount . get ( ) ) ; <nl> System . out . println ( " errors : " + errorCount . get ( ) ) ; <nl> System . out . println ( ) ; <nl> / / Can be enabled in Database . java <nl> - / / System . out . println ( " db success : " + d . commitSuccessCount . get ( ) ) ; <nl> - / / System . out . println ( " db errors : " + d . commitErrorCount . get ( ) ) ; <nl> + / / System . out . println ( " db success : " + db . commitSuccessCount . get ( ) ) ; <nl> + / / System . out . println ( " db errors : " + db . commitErrorCount . get ( ) ) ; <nl> System . exit ( 0 ) ; <nl> - } catch ( Throwable throwable ) { <nl> - throwable . printStackTrace ( ) ; <nl> + } catch ( Throwable t ) { <nl> + t . printStackTrace ( ) ; <nl> System . exit ( 1 ) ; <nl> } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / Context . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / Context . java <nl> <nl> <nl> package com . apple . foundationdb . test ; <nl> <nl> + import java . util . ArrayList ; <nl> + import java . util . HashMap ; <nl> import java . util . LinkedList ; <nl> import java . util . List ; <nl> + import java . util . Map ; <nl> <nl> + import java . util . concurrent . atomic . AtomicInteger ; <nl> import java . util . concurrent . CompletableFuture ; <nl> - import java . util . concurrent . ConcurrentHashMap ; <nl> - import java . util . function . BiConsumer ; <nl> - import java . util . function . Function ; <nl> <nl> import com . apple . foundationdb . Database ; <nl> + import com . apple . foundationdb . FDB ; <nl> import com . apple . foundationdb . FDBException ; <nl> import com . apple . foundationdb . KeySelector ; <nl> import com . apple . foundationdb . Range ; <nl> <nl> import com . apple . foundationdb . tuple . ByteArrayUtil ; <nl> import com . apple . foundationdb . tuple . Tuple ; <nl> <nl> - abstract class Context implements Runnable { <nl> + abstract class Context implements Runnable , AutoCloseable { <nl> final Stack stack = new Stack ( ) ; <nl> final Database db ; <nl> final String preStr ; <nl> int instructionIndex = 0 ; <nl> - String trName ; <nl> KeySelector nextKey , endKey ; <nl> Long lastVersion = null ; <nl> - List < Thread > children = new LinkedList < Thread > ( ) ; <nl> <nl> - static ConcurrentHashMap < String , Transaction > transactionMap = new ConcurrentHashMap < > ( ) ; <nl> + private String trName ; <nl> + private List < Thread > children = new LinkedList < > ( ) ; <nl> + private static Map < String , Transaction > transactionMap = new HashMap < > ( ) ; <nl> + private static Map < Transaction , AtomicInteger > transactionRefCounts = new HashMap < > ( ) ; <nl> <nl> Context ( Database db , byte [ ] prefix ) { <nl> this . db = db ; <nl> public void run ( ) { <nl> } <nl> } <nl> <nl> + public static synchronized void addTransactionReference ( Transaction tr ) { <nl> + transactionRefCounts . computeIfAbsent ( tr , x - > new AtomicInteger ( 0 ) ) . incrementAndGet ( ) ; <nl> + } <nl> + <nl> + private static synchronized Transaction getTransaction ( String trName ) { <nl> + Transaction tr = transactionMap . get ( trName ) ; <nl> + addTransactionReference ( tr ) ; <nl> + return tr ; <nl> + } <nl> + <nl> public Transaction getCurrentTransaction ( ) { <nl> - return Context . transactionMap . get ( this . trName ) ; <nl> + return getTransaction ( trName ) ; <nl> + } <nl> + <nl> + public static synchronized void releaseTransaction ( Transaction tr ) { <nl> + if ( tr ! = null ) { <nl> + AtomicInteger count = transactionRefCounts . get ( tr ) ; <nl> + if ( count . decrementAndGet ( ) = = 0 ) { <nl> + assert ! transactionMap . containsValue ( tr ) ; <nl> + transactionRefCounts . remove ( tr ) ; <nl> + tr . close ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + private static synchronized void updateTransaction ( String trName , Transaction tr ) { <nl> + releaseTransaction ( transactionMap . put ( trName , tr ) ) ; <nl> + addTransactionReference ( tr ) ; <nl> + } <nl> + <nl> + private static synchronized boolean updateTransaction ( String trName , Transaction oldTr , Transaction newTr ) { <nl> + if ( transactionMap . replace ( trName , oldTr , newTr ) ) { <nl> + addTransactionReference ( newTr ) ; <nl> + releaseTransaction ( oldTr ) ; <nl> + return true ; <nl> + } <nl> + <nl> + return false ; <nl> } <nl> <nl> public void updateCurrentTransaction ( Transaction tr ) { <nl> - Context . transactionMap . put ( this . trName , tr ) ; <nl> + updateTransaction ( trName , tr ) ; <nl> } <nl> <nl> public boolean updateCurrentTransaction ( Transaction oldTr , Transaction newTr ) { <nl> - return Context . transactionMap . replace ( this . trName , oldTr , newTr ) ; <nl> + return updateTransaction ( trName , oldTr , newTr ) ; <nl> } <nl> <nl> - public Transaction newTransaction ( ) { <nl> + public void newTransaction ( ) { <nl> Transaction tr = db . createTransaction ( ) ; <nl> - Context . transactionMap . put ( this . trName , tr ) ; <nl> - return tr ; <nl> + updateCurrentTransaction ( tr ) ; <nl> } <nl> <nl> - public Transaction newTransaction ( Transaction oldTr ) { <nl> + public void newTransaction ( Transaction oldTr ) { <nl> Transaction newTr = db . createTransaction ( ) ; <nl> - boolean replaced = Context . transactionMap . replace ( this . trName , oldTr , newTr ) ; <nl> - if ( replaced ) { <nl> - return newTr ; <nl> - } <nl> - else { <nl> - newTr . cancel ( ) ; <nl> - return Context . transactionMap . get ( this . trName ) ; <nl> + if ( ! updateCurrentTransaction ( oldTr , newTr ) ) { <nl> + newTr . close ( ) ; <nl> } <nl> } <nl> <nl> - public void switchTransaction ( byte [ ] trName ) { <nl> - this . trName = ByteArrayUtil . printable ( trName ) ; <nl> - Transaction tr = db . createTransaction ( ) ; <nl> - Transaction previousTr = Context . transactionMap . putIfAbsent ( this . trName , tr ) ; <nl> - if ( previousTr ! = null ) { <nl> - tr . cancel ( ) ; <nl> - } <nl> + public void switchTransaction ( byte [ ] rawTrName ) { <nl> + trName = ByteArrayUtil . printable ( rawTrName ) ; <nl> + newTransaction ( null ) ; <nl> } <nl> <nl> abstract void executeOperations ( ) throws Throwable ; <nl> StreamingMode streamingModeFromCode ( int code ) { <nl> throw new IllegalArgumentException ( " Invalid code : " + code ) ; <nl> } <nl> <nl> - void popParams ( int num , final List < Object > params , final CompletableFuture < Void > done ) { <nl> + private void popParams ( int num , final List < Object > params , final CompletableFuture < Void > done ) { <nl> while ( num - - > 0 ) { <nl> Object item = stack . pop ( ) . value ; <nl> if ( item instanceof CompletableFuture ) { <nl> - @ SuppressWarnings ( " unchecked " ) <nl> - final CompletableFuture < Object > future = ( CompletableFuture < Object > ) item ; <nl> + final CompletableFuture < ? > future = ( CompletableFuture < ? > ) item ; <nl> final int nextNum = num ; <nl> - future . whenCompleteAsync ( new BiConsumer < Object , Throwable > ( ) { <nl> - @ Override <nl> - public void accept ( Object o , Throwable t ) { <nl> - if ( t ! = null ) { <nl> - Throwable root = StackUtils . getRootFDBException ( t ) ; <nl> - if ( root instanceof FDBException ) { <nl> - params . add ( StackUtils . getErrorBytes ( ( FDBException ) root ) ) ; <nl> - popParams ( nextNum , params , done ) ; <nl> - } <nl> - else { <nl> - done . completeExceptionally ( t ) ; <nl> - } <nl> + future . whenCompleteAsync ( ( o , t ) - > { <nl> + if ( t ! = null ) { <nl> + FDBException root = StackUtils . getRootFDBException ( t ) ; <nl> + if ( root ! = null ) { <nl> + params . add ( StackUtils . getErrorBytes ( root ) ) ; <nl> + popParams ( nextNum , params , done ) ; <nl> } <nl> else { <nl> - if ( o = = null ) <nl> - params . add ( " RESULT_NOT_PRESENT " . getBytes ( ) ) ; <nl> - else <nl> - params . add ( o ) ; <nl> - <nl> - popParams ( nextNum , params , done ) ; <nl> + done . completeExceptionally ( t ) ; <nl> } <nl> } <nl> - } ) ; <nl> + else { <nl> + if ( o = = null ) <nl> + params . add ( " RESULT_NOT_PRESENT " . getBytes ( ) ) ; <nl> + else <nl> + params . add ( o ) ; <nl> + <nl> + popParams ( nextNum , params , done ) ; <nl> + } <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> <nl> return ; <nl> } <nl> public void accept ( Object o , Throwable t ) { <nl> } <nl> <nl> CompletableFuture < List < Object > > popParams ( int num ) { <nl> - final List < Object > params = new LinkedList < Object > ( ) ; <nl> - CompletableFuture < Void > done = new CompletableFuture < Void > ( ) ; <nl> + final List < Object > params = new ArrayList < > ( num ) ; <nl> + CompletableFuture < Void > done = new CompletableFuture < > ( ) ; <nl> popParams ( num , params , done ) ; <nl> <nl> - return done . thenApplyAsync ( new Function < Void , List < Object > > ( ) { <nl> - @ Override <nl> - public List < Object > apply ( Void n ) { <nl> - return params ; <nl> - } <nl> - } ) ; <nl> + return done . thenApply ( x - > params ) ; <nl> + } <nl> + <nl> + @ Override <nl> + public void close ( ) { <nl> + for ( Transaction tr : transactionMap . values ( ) ) { <nl> + tr . close ( ) ; <nl> + } <nl> } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / ContinuousSample . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / ContinuousSample . java <nl> <nl> import java . util . List ; <nl> import java . util . Random ; <nl> <nl> - public class ContinuousSample < T extends Number & Comparable < T > > { <nl> - public ContinuousSample ( int sampleSize ) { <nl> + public class ContinuousSample < T extends Number & Comparable < T > > { <nl> + public ContinuousSample ( int sampleSize ) { <nl> this . sampleSize = sampleSize ; <nl> - this . samples = new ArrayList < T > ( sampleSize ) ; <nl> + this . samples = new ArrayList < > ( sampleSize ) ; <nl> this . populationSize = 0 ; <nl> this . sorted = true ; <nl> } <nl> <nl> public ContinuousSample < T > addSample ( T sample ) { <nl> if ( populationSize = = 0 ) <nl> - _min = _max = sample ; <nl> + min = max = sample ; <nl> populationSize + + ; <nl> sorted = false ; <nl> <nl> - if ( populationSize < = sampleSize ) { <nl> - samples . add ( sample ) ; <nl> - } else if ( random . nextDouble ( ) < ( ( double ) sampleSize / populationSize ) ) { <nl> + if ( populationSize < = sampleSize ) { <nl> + samples . add ( sample ) ; <nl> + } else if ( random . nextDouble ( ) < ( ( double ) sampleSize / populationSize ) ) { <nl> samples . add ( random . nextInt ( sampleSize ) , sample ) ; <nl> } <nl> <nl> - _max = sample . compareTo ( _max ) > 0 ? sample : _max ; <nl> - _min = sample . compareTo ( _min ) < 0 ? sample : _min ; <nl> + max = sample . compareTo ( max ) > 0 ? sample : max ; <nl> + min = sample . compareTo ( min ) < 0 ? sample : min ; <nl> return this ; <nl> } <nl> <nl> public double mean ( ) { <nl> if ( samples . size ( ) = = 0 ) return 0 ; <nl> double sum = 0 ; <nl> - for ( int c = 0 ; c < samples . size ( ) ; c + + ) { <nl> + for ( int c = 0 ; c < samples . size ( ) ; c + + ) { <nl> sum + = samples . get ( c ) . doubleValue ( ) ; <nl> } <nl> return sum / samples . size ( ) ; <nl> } <nl> <nl> public T median ( ) { <nl> - return percentile ( 0 . 5 ) ; <nl> + return percentile ( 0 . 5 ) ; <nl> } <nl> <nl> - public T percentile ( double percentile ) { <nl> - if ( samples . size ( ) = = 0 | | percentile < 0 . 0 | | percentile > 1 . 0 ) <nl> + public T percentile ( double percentile ) { <nl> + if ( samples . size ( ) = = 0 | | percentile < 0 . 0 | | percentile > 1 . 0 ) <nl> return null ; <nl> sort ( ) ; <nl> - int idx = ( int ) Math . floor ( ( samples . size ( ) - 1 ) * percentile ) ; <nl> + int idx = ( int ) Math . floor ( ( samples . size ( ) - 1 ) * percentile ) ; <nl> return samples . get ( idx ) ; <nl> } <nl> <nl> - public T min ( ) { return _min ; } <nl> - public T max ( ) { return _max ; } <nl> + public T min ( ) { <nl> + return min ; <nl> + } <nl> + <nl> + public T max ( ) { <nl> + return max ; <nl> + } <nl> <nl> @ Override <nl> public String toString ( ) { <nl> public String toString ( ) { <nl> private long populationSize ; <nl> private boolean sorted ; <nl> private List < T > samples ; <nl> - private T _min , _max ; <nl> + private T min , max ; <nl> <nl> private void sort ( ) { <nl> - if ( ! sorted & & samples . size ( ) > 1 ) <nl> + if ( ! sorted & & samples . size ( ) > 1 ) <nl> Collections . sort ( samples ) ; <nl> sorted = true ; <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / DirectoryExtension . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / DirectoryExtension . java <nl> <nl> int dirIndex = 0 ; <nl> int errorIndex = 0 ; <nl> <nl> - public DirectoryExtension ( ) { <nl> + DirectoryExtension ( ) { <nl> dirList . add ( DirectoryLayer . getDefault ( ) ) ; <nl> } <nl> <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / DirectoryTest . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / DirectoryTest . java <nl> <nl> import java . util . Arrays ; <nl> import java . util . List ; <nl> <nl> - import java . util . function . Function ; <nl> - <nl> import com . apple . foundationdb . Cluster ; <nl> import com . apple . foundationdb . Database ; <nl> import com . apple . foundationdb . FDB ; <nl> - import com . apple . foundationdb . Transaction ; <nl> import com . apple . foundationdb . TransactionContext ; <nl> import com . apple . foundationdb . directory . DirectoryLayer ; <nl> import com . apple . foundationdb . directory . DirectorySubspace ; <nl> private static void runTests ( TransactionContext db ) throws Exception { <nl> final DirectoryLayer dir = new DirectoryLayer ( ) ; <nl> <nl> try { <nl> - db . run ( new Function < Transaction , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Transaction tr ) { <nl> - List < String > path = new ArrayList < > ( ) ; <nl> - path . add ( " foo " ) ; <nl> - DirectorySubspace foo = dir . create ( tr , path ) . join ( ) ; / / , " partition " . getBytes ( " UTF - 8 " ) ) . get ( ) ; <nl> - System . out . println ( foo . getPath ( ) ) ; <nl> - path . add ( " bar " ) ; <nl> - DirectorySubspace bar = dir . create ( tr , path ) . join ( ) ; / / , " partition " . getBytes ( " UTF - 8 " ) ) . get ( ) ; <nl> - System . out . println ( foo . getPath ( ) ) ; <nl> - path . add ( " baz " ) ; <nl> - DirectorySubspace baz = dir . create ( tr , path ) . join ( ) ; <nl> - System . out . println ( foo . getPath ( ) ) ; <nl> - System . out . println ( " Created foo : " + foo . exists ( tr ) . join ( ) ) ; <nl> - System . out . println ( " Created bar : " + bar . exists ( tr ) . join ( ) ) ; <nl> - System . out . println ( " Created baz : " + baz . exists ( tr ) . join ( ) ) ; <nl> + db . run ( tr - > { <nl> + List < String > path = new ArrayList < > ( ) ; <nl> + path . add ( " foo " ) ; <nl> + DirectorySubspace foo = dir . create ( tr , path ) . join ( ) ; / / , " partition " . getBytes ( " UTF - 8 " ) ) . get ( ) ; <nl> + System . out . println ( foo . getPath ( ) ) ; <nl> + path . add ( " bar " ) ; <nl> + DirectorySubspace bar = dir . create ( tr , path ) . join ( ) ; / / , " partition " . getBytes ( " UTF - 8 " ) ) . get ( ) ; <nl> + System . out . println ( foo . getPath ( ) ) ; <nl> + path . add ( " baz " ) ; <nl> + DirectorySubspace baz = dir . create ( tr , path ) . join ( ) ; <nl> + System . out . println ( foo . getPath ( ) ) ; <nl> + System . out . println ( " Created foo : " + foo . exists ( tr ) . join ( ) ) ; <nl> + System . out . println ( " Created bar : " + bar . exists ( tr ) . join ( ) ) ; <nl> + System . out . println ( " Created baz : " + baz . exists ( tr ) . join ( ) ) ; <nl> <nl> - DirectorySubspace bat = baz . moveTo ( tr , Arrays . asList ( " foo " , " bar " , " bat " ) ) . join ( ) ; <nl> + DirectorySubspace bat = baz . moveTo ( tr , Arrays . asList ( " foo " , " bar " , " bat " ) ) . join ( ) ; <nl> <nl> - System . out . println ( " Moved baz to bat : " + bat . exists ( tr ) . join ( ) ) ; <nl> + System . out . println ( " Moved baz to bat : " + bat . exists ( tr ) . join ( ) ) ; <nl> <nl> - foo . removeIfExists ( tr ) . join ( ) ; <nl> + foo . removeIfExists ( tr ) . join ( ) ; <nl> <nl> - System . out . println ( " Removed foo : " + foo . exists ( tr ) . join ( ) ) ; <nl> + System . out . println ( " Removed foo : " + foo . exists ( tr ) . join ( ) ) ; <nl> <nl> - return null ; <nl> - } <nl> + return null ; <nl> } ) ; <nl> } catch ( Throwable e ) { <nl> e . printStackTrace ( ) ; <nl> public Void apply ( Transaction tr ) { <nl> <nl> System . exit ( 0 ) ; <nl> } <nl> + <nl> + private DirectoryTest ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / DirectoryUtil . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / DirectoryUtil . java <nl> <nl> import java . util . ArrayList ; <nl> import java . util . List ; <nl> import java . util . concurrent . CompletableFuture ; <nl> - import java . util . function . Function ; <nl> - import java . util . function . Supplier ; <nl> <nl> + import com . apple . foundationdb . FDB ; <nl> import com . apple . foundationdb . async . AsyncUtil ; <nl> import com . apple . foundationdb . tuple . Tuple ; <nl> <nl> <nl> } <nl> <nl> CompletableFuture < List < Tuple > > pop ( ) { <nl> - return AsyncUtil . whileTrue ( new Supplier < CompletableFuture < Boolean > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < Boolean > get ( ) { <nl> - if ( num - - = = 0 ) { <nl> - return CompletableFuture . completedFuture ( false ) ; <nl> - } <nl> - return inst . popParam ( ) <nl> - . thenComposeAsync ( new Function < Object , CompletableFuture < List < Object > > > ( ) { <nl> - @ Override <nl> - public CompletableFuture < List < Object > > apply ( Object count ) { <nl> - return inst . popParams ( StackUtils . getInt ( count ) ) ; <nl> - } <nl> - } ) <nl> - . thenApplyAsync ( new Function < List < Object > , Boolean > ( ) { <nl> - @ Override <nl> - public Boolean apply ( List < Object > elements ) { <nl> + return AsyncUtil . whileTrue ( ( ) - > { <nl> + if ( num - - = = 0 ) { <nl> + return AsyncUtil . READY_FALSE ; <nl> + } <nl> + return inst . popParam ( ) <nl> + . thenComposeAsync ( count - > inst . popParams ( StackUtils . getInt ( count ) ) , FDB . DEFAULT_EXECUTOR ) <nl> + . thenApplyAsync ( elements - > { <nl> tuples . add ( Tuple . fromItems ( elements ) ) ; <nl> return num > 0 ; <nl> - } <nl> - } ) ; <nl> - } <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } ) <nl> - . thenApplyAsync ( new Function < Void , List < Tuple > > ( ) { <nl> - @ Override <nl> - public List < Tuple > apply ( Void ignore ) { <nl> - return tuples ; <nl> - } <nl> - } ) ; <nl> + . thenApply ( ignore - > tuples ) ; <nl> } <nl> } <nl> <nl> public Boolean apply ( List < Object > elements ) { <nl> } <nl> <nl> static CompletableFuture < Tuple > popTuple ( Instruction inst ) { <nl> - return popTuples ( inst , 1 ) <nl> - . thenApplyAsync ( new Function < List < Tuple > , Tuple > ( ) { <nl> - @ Override <nl> - public Tuple apply ( List < Tuple > tuples ) { <nl> - return tuples . get ( 0 ) ; <nl> - } <nl> - } ) ; <nl> + return popTuples ( inst , 1 ) . thenApply ( tuples - > tuples . get ( 0 ) ) ; <nl> } <nl> <nl> static CompletableFuture < List < List < String > > > popPaths ( Instruction inst , int num ) { <nl> - return popTuples ( inst , num ) <nl> - . thenApplyAsync ( new Function < List < Tuple > , List < List < String > > > ( ) { <nl> - @ Override <nl> - public List < List < String > > apply ( List < Tuple > tuples ) { <nl> - List < List < String > > paths = new ArrayList < List < String > > ( ) ; <nl> - for ( Tuple t : tuples ) { <nl> - List < String > path = new ArrayList < String > ( ) ; <nl> - for ( int i = 0 ; i < t . size ( ) ; + + i ) <nl> - path . add ( t . getString ( i ) ) ; <nl> + return popTuples ( inst , num ) . thenApplyAsync ( tuples - > { <nl> + List < List < String > > paths = new ArrayList < > ( tuples . size ( ) ) ; <nl> + for ( Tuple t : tuples ) { <nl> + List < String > path = new ArrayList < > ( t . size ( ) ) ; <nl> + for ( int i = 0 ; i < t . size ( ) ; + + i ) <nl> + path . add ( t . getString ( i ) ) ; <nl> <nl> - paths . add ( path ) ; <nl> - } <nl> - <nl> - return paths ; <nl> + paths . add ( path ) ; <nl> } <nl> - } ) ; <nl> + <nl> + return paths ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> <nl> static CompletableFuture < List < String > > popPath ( Instruction inst ) { <nl> - return popPaths ( inst , 1 ) <nl> - . thenApplyAsync ( new Function < List < List < String > > , List < String > > ( ) { <nl> - @ Override <nl> - public List < String > apply ( List < List < String > > paths ) { <nl> - return paths . get ( 0 ) ; <nl> - } <nl> - } ) ; <nl> + return popPaths ( inst , 1 ) . thenApply ( paths - > paths . get ( 0 ) ) ; <nl> } <nl> <nl> static void pushError ( Instruction inst , Throwable t , List < Object > dirList ) { <nl> static void pushError ( Instruction inst , Throwable t , List < Object > dirList ) { <nl> if ( op . createsDirectory ) <nl> dirList . add ( null ) ; <nl> } <nl> + <nl> + private DirectoryUtil ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / Example . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / Example . java <nl> <nl> import com . apple . foundationdb . tuple . Tuple ; <nl> <nl> public class Example { <nl> - public static void main ( String [ ] args ) throws ExecutionException , InterruptedException { <nl> - FDB fdb = FDB . selectAPIVersion ( 510 ) ; <nl> - Database db = fdb . open ( ) ; <nl> + public static void main ( String [ ] args ) throws ExecutionException , InterruptedException { <nl> + FDB fdb = FDB . selectAPIVersion ( 510 ) ; <nl> + Database db = fdb . open ( ) ; <nl> <nl> - / / Run an operation on the database <nl> - db . run ( new Function < Transaction , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Transaction tr ) { <nl> - tr . set ( Tuple . from ( " hello " ) . pack ( ) , Tuple . from ( " world " ) . pack ( ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + / / Run an operation on the database <nl> + db . run ( ( Function < Transaction , Void > ) tr - > { <nl> + tr . set ( Tuple . from ( " hello " ) . pack ( ) , Tuple . from ( " world " ) . pack ( ) ) ; <nl> + return null ; <nl> + } ) ; <nl> <nl> - / / Get the value of ' hello ' from the database <nl> - String hello = db . run ( new Function < Transaction , String > ( ) { <nl> - @ Override <nl> - public String apply ( Transaction tr ) { <nl> - byte [ ] result = tr . get ( Tuple . from ( " hello " ) . pack ( ) ) . join ( ) ; <nl> - return Tuple . fromBytes ( result ) . getString ( 0 ) ; <nl> - } <nl> - } ) ; <nl> - System . out . println ( " Hello " + hello ) ; <nl> - } <nl> + / / Get the value of ' hello ' from the database <nl> + String hello = db . run ( tr - > { <nl> + byte [ ] result = tr . get ( Tuple . from ( " hello " ) . pack ( ) ) . join ( ) ; <nl> + return Tuple . fromBytes ( result ) . getString ( 0 ) ; <nl> + } ) ; <nl> + System . out . println ( " Hello " + hello ) ; <nl> + } <nl> + <nl> + private Example ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / Instruction . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / Instruction . java <nl> <nl> package com . apple . foundationdb . test ; <nl> <nl> import java . util . concurrent . CompletableFuture ; <nl> - import java . util . function . Function ; <nl> + import java . util . List ; <nl> <nl> import com . apple . foundationdb . ReadTransaction ; <nl> import com . apple . foundationdb . ReadTransactionContext ; <nl> <nl> import com . apple . foundationdb . TransactionContext ; <nl> import com . apple . foundationdb . tuple . Tuple ; <nl> <nl> - import java . util . List ; <nl> <nl> class Instruction extends Stack { <nl> - private final static String SUFFIX_SNAPSHOT = " _SNAPSHOT " ; <nl> - private final static String SUFFIX_DATABASE = " _DATABASE " ; <nl> - <nl> - String op ; <nl> - Tuple tokens ; <nl> - Context context ; <nl> - boolean isDatabase ; <nl> - boolean isSnapshot ; <nl> - Transaction tr ; <nl> - ReadTransaction readTr ; <nl> - TransactionContext tcx ; <nl> - ReadTransactionContext readTcx ; <nl> - <nl> - public Instruction ( Context context , Tuple tokens ) { <nl> + private static final String SUFFIX_SNAPSHOT = " _SNAPSHOT " ; <nl> + private static final String SUFFIX_DATABASE = " _DATABASE " ; <nl> + <nl> + final String op ; <nl> + final Tuple tokens ; <nl> + final Context context ; <nl> + final boolean isDatabase ; <nl> + final boolean isSnapshot ; <nl> + final Transaction tr ; <nl> + final ReadTransaction readTr ; <nl> + final TransactionContext tcx ; <nl> + final ReadTransactionContext readTcx ; <nl> + <nl> + Instruction ( Context context , Tuple tokens ) { <nl> this . context = context ; <nl> this . tokens = tokens ; <nl> <nl> - op = tokens . getString ( 0 ) ; <nl> - isDatabase = op . endsWith ( SUFFIX_DATABASE ) ; <nl> - isSnapshot = op . endsWith ( SUFFIX_SNAPSHOT ) ; <nl> + String fullOp = tokens . getString ( 0 ) ; <nl> + isDatabase = fullOp . endsWith ( SUFFIX_DATABASE ) ; <nl> + isSnapshot = fullOp . endsWith ( SUFFIX_SNAPSHOT ) ; <nl> <nl> if ( isDatabase ) { <nl> - this . tr = context . db . createTransaction ( ) ; <nl> - readTr = this . tr ; <nl> - op = op . substring ( 0 , op . length ( ) - SUFFIX_DATABASE . length ( ) ) ; <nl> + tr = null ; <nl> + readTr = null ; <nl> + op = fullOp . substring ( 0 , fullOp . length ( ) - SUFFIX_DATABASE . length ( ) ) ; <nl> } <nl> else if ( isSnapshot ) { <nl> - this . tr = context . getCurrentTransaction ( ) ; <nl> - readTr = this . tr . snapshot ( ) ; <nl> - op = op . substring ( 0 , op . length ( ) - SUFFIX_SNAPSHOT . length ( ) ) ; <nl> + tr = context . getCurrentTransaction ( ) ; <nl> + readTr = tr . snapshot ( ) ; <nl> + op = fullOp . substring ( 0 , fullOp . length ( ) - SUFFIX_SNAPSHOT . length ( ) ) ; <nl> } <nl> else { <nl> - this . tr = context . getCurrentTransaction ( ) ; <nl> - readTr = this . tr ; <nl> + tr = context . getCurrentTransaction ( ) ; <nl> + readTr = tr ; <nl> + op = fullOp ; <nl> } <nl> <nl> - tcx = isDatabase ? context . db : this . tr ; <nl> - readTcx = isDatabase ? context . db : this . readTr ; <nl> + tcx = isDatabase ? context . db : tr ; <nl> + readTcx = isDatabase ? context . db : readTr ; <nl> } <nl> <nl> - void setTransaction ( Transaction tr ) { <nl> - this . tr = tr ; <nl> - if ( isSnapshot ) { <nl> - readTr = this . tr . snapshot ( ) ; <nl> - } <nl> - else { <nl> - readTr = tr ; <nl> + void setTransaction ( Transaction newTr ) { <nl> + if ( ! isDatabase ) { <nl> + context . updateCurrentTransaction ( newTr ) ; <nl> } <nl> + } <nl> <nl> + void setTransaction ( Transaction oldTr , Transaction newTr ) { <nl> if ( ! isDatabase ) { <nl> - context . updateCurrentTransaction ( tr ) ; <nl> + context . updateCurrentTransaction ( oldTr , newTr ) ; <nl> } <nl> } <nl> <nl> + void releaseTransaction ( ) { <nl> + Context . releaseTransaction ( tr ) ; <nl> + } <nl> + <nl> void push ( Object o ) { <nl> + if ( o instanceof CompletableFuture & & tr ! = null ) { <nl> + CompletableFuture < ? > future = ( CompletableFuture < ? > ) o ; <nl> + Context . addTransactionReference ( tr ) ; <nl> + future . whenComplete ( ( x , t ) - > Context . releaseTransaction ( tr ) ) ; <nl> + } <nl> context . stack . push ( context . instructionIndex , o ) ; <nl> } <nl> <nl> void clear ( ) { <nl> <nl> CompletableFuture < Object > popParam ( ) { <nl> return popParams ( 1 ) <nl> - . thenApplyAsync ( new Function < List < Object > , Object > ( ) { <nl> - public Object apply ( List < Object > params ) { <nl> - return params . get ( 0 ) ; <nl> - } <nl> - } ) ; <nl> + . thenApplyAsync ( ( params ) - > params . get ( 0 ) ) ; <nl> } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / IterableTest . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / IterableTest . java <nl> <nl> package com . apple . foundationdb . test ; <nl> <nl> import java . util . concurrent . atomic . AtomicInteger ; <nl> - import java . util . function . Function ; <nl> <nl> import com . apple . foundationdb . Cluster ; <nl> import com . apple . foundationdb . Database ; <nl> import com . apple . foundationdb . FDB ; <nl> import com . apple . foundationdb . KeyValue ; <nl> - import com . apple . foundationdb . Transaction ; <nl> import com . apple . foundationdb . TransactionContext ; <nl> <nl> public class IterableTest { <nl> private static void runTests ( final int reps , TransactionContext db ) { <nl> long start = System . currentTimeMillis ( ) ; <nl> final AtomicInteger lastcount = new AtomicInteger ( 0 ) ; <nl> try { <nl> - db . run ( new Function < Transaction , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Transaction tr ) { <nl> - for ( KeyValue e : tr . getRange ( " vcount " . getBytes ( ) , " zz " . getBytes ( ) ) ) { <nl> - System . out . println ( " K : " + new String ( e . getKey ( ) ) + " , V : " + new String ( e . getValue ( ) ) ) ; <nl> - } <nl> - return null ; <nl> + db . run ( tr - > { <nl> + for ( KeyValue e : tr . getRange ( " vcount " . getBytes ( ) , " zz " . getBytes ( ) ) ) { <nl> + System . out . println ( " K : " + new String ( e . getKey ( ) ) + " , V : " + new String ( e . getValue ( ) ) ) ; <nl> } <nl> + return null ; <nl> } ) ; <nl> } catch ( Throwable e ) { <nl> e . printStackTrace ( ) ; <nl> public Void apply ( Transaction tr ) { <nl> <nl> System . exit ( 0 ) ; <nl> } <nl> + <nl> + private IterableTest ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / LocalityTests . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / LocalityTests . java <nl> <nl> import com . apple . foundationdb . FDB ; <nl> import com . apple . foundationdb . LocalityUtil ; <nl> import com . apple . foundationdb . Transaction ; <nl> - import com . apple . foundationdb . async . AsyncIterable ; <nl> + import com . apple . foundationdb . async . CloseableAsyncIterator ; <nl> import com . apple . foundationdb . async . AsyncUtil ; <nl> import com . apple . foundationdb . tuple . ByteArrayUtil ; <nl> <nl> <nl> public static void main ( String [ ] args ) { <nl> FDB fdb = FDB . selectAPIVersion ( 510 ) ; <nl> Database database = fdb . open ( args [ 0 ] ) ; <nl> - { <nl> - Transaction tr = database . createTransaction ( ) ; <nl> - String [ ] keyAddresses = LocalityUtil . getAddressesForKey ( tr , " a " . getBytes ( ) ) . join ( ) ; <nl> - for ( String s : keyAddresses ) { <nl> - System . out . println ( " @ " + s ) ; <nl> - } <nl> + <nl> + Transaction tr = database . createTransaction ( ) ; <nl> + String [ ] keyAddresses = LocalityUtil . getAddressesForKey ( tr , " a " . getBytes ( ) ) . join ( ) ; <nl> + for ( String s : keyAddresses ) { <nl> + System . out . println ( " @ " + s ) ; <nl> } <nl> <nl> long start = System . currentTimeMillis ( ) ; <nl> - AsyncIterable < byte [ ] > keys = LocalityUtil . getBoundaryKeys ( database , new byte [ 0 ] , new byte [ ] { ( byte ) 255 } ) ; <nl> - CompletableFuture < List < byte [ ] > > collection = AsyncUtil . collect ( keys ) ; <nl> + <nl> + CloseableAsyncIterator < byte [ ] > keys = LocalityUtil . getBoundaryKeys ( database , new byte [ 0 ] , new byte [ ] { ( byte ) 255 } ) ; <nl> + CompletableFuture < List < byte [ ] > > collection = AsyncUtil . collectRemaining ( keys ) ; <nl> List < byte [ ] > list = collection . join ( ) ; <nl> System . out . println ( " Took " + ( System . currentTimeMillis ( ) - start ) + " ms to get " + <nl> list . size ( ) + " items " ) ; <nl> + <nl> + keys . close ( ) ; <nl> + <nl> int i = 0 ; <nl> for ( byte [ ] key : collection . join ( ) ) { <nl> System . out . println ( i + + + " : " + ByteArrayUtil . printable ( key ) ) ; <nl> } <nl> } <nl> + <nl> + private LocalityTests ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / ParallelRandomScan . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / ParallelRandomScan . java <nl> <nl> import java . util . concurrent . CompletableFuture ; <nl> import java . util . concurrent . Semaphore ; <nl> import java . util . concurrent . atomic . AtomicInteger ; <nl> - import java . util . function . BiConsumer ; <nl> - import java . util . function . Function ; <nl> <nl> import com . apple . foundationdb . Database ; <nl> import com . apple . foundationdb . FDB ; <nl> private static void runTest ( Database database , <nl> final long launch = System . nanoTime ( ) ; <nl> <nl> final AsyncIterator < KeyValue > it = range . iterator ( ) ; <nl> - final CompletableFuture < KeyValue > f = it . onHasNext ( ) . thenApplyAsync ( <nl> - new Function < Boolean , KeyValue > ( ) { <nl> - @ Override <nl> - public KeyValue apply ( Boolean o ) { <nl> - if ( ! o ) { <nl> - return null ; <nl> - } <nl> - return it . next ( ) ; <nl> - } <nl> - } <nl> - ) ; <nl> - f . whenCompleteAsync ( new BiConsumer < KeyValue , Throwable > ( ) { <nl> - @ Override <nl> - public void accept ( KeyValue kv , Throwable t ) { <nl> - if ( kv ! = null ) { <nl> - readsCompleted . incrementAndGet ( ) ; <nl> - long timeTaken = System . nanoTime ( ) - launch ; <nl> - synchronized ( latencies ) { <nl> - latencies . addSample ( timeTaken ) ; <nl> - } <nl> - } <nl> - else if ( t ! = null ) { <nl> - errors . incrementAndGet ( ) ; <nl> + final CompletableFuture < KeyValue > f = it . onHasNext ( ) . thenApplyAsync ( hasFirst - > { <nl> + if ( ! hasFirst ) { <nl> + return null ; <nl> + } <nl> + return it . next ( ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> + f . whenCompleteAsync ( ( kv , t ) - > { <nl> + if ( kv ! = null ) { <nl> + readsCompleted . incrementAndGet ( ) ; <nl> + long timeTaken = System . nanoTime ( ) - launch ; <nl> + synchronized ( latencies ) { <nl> + latencies . addSample ( timeTaken ) ; <nl> } <nl> - <nl> - coordinator . release ( ) ; <nl> } <nl> - } ) ; <nl> + else if ( t ! = null ) { <nl> + errors . incrementAndGet ( ) ; <nl> + } <nl> + <nl> + coordinator . release ( ) ; <nl> + } , FDB . DEFAULT_EXECUTOR ) ; <nl> } <nl> <nl> / / Block for ALL tasks to end ! <nl> else if ( t ! = null ) { <nl> System . out . println ( String . format ( " Mean : % . 2f , Median : % d , 98 % % : % d " , <nl> latencies . mean ( ) , latencies . median ( ) , latencies . percentile ( 0 . 98 ) ) ) ; <nl> } <nl> + <nl> + private ParallelRandomScan ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / PerformanceTester . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / PerformanceTester . java <nl> <nl> <nl> package com . apple . foundationdb . test ; <nl> <nl> - import com . apple . foundationdb . Database ; <nl> - import com . apple . foundationdb . KeySelector ; <nl> - import com . apple . foundationdb . Transaction ; <nl> - import com . apple . foundationdb . TransactionContext ; <nl> - import com . apple . foundationdb . async . AsyncUtil ; <nl> - import com . apple . foundationdb . tuple . ByteArrayUtil ; <nl> - <nl> import java . util . ArrayList ; <nl> import java . util . Arrays ; <nl> import java . util . Collections ; <nl> <nl> import java . util . stream . IntStream ; <nl> import java . util . stream . Stream ; <nl> <nl> + import com . apple . foundationdb . Database ; <nl> + import com . apple . foundationdb . KeySelector ; <nl> + import com . apple . foundationdb . Transaction ; <nl> + import com . apple . foundationdb . TransactionContext ; <nl> + import com . apple . foundationdb . async . AsyncUtil ; <nl> + import com . apple . foundationdb . tuple . ByteArrayUtil ; <nl> + <nl> public class PerformanceTester extends AbstractTester { <nl> private final int keyCount ; <nl> private final int keySize ; <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / RYWBenchmark . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / RYWBenchmark . java <nl> <nl> <nl> package com . apple . foundationdb . test ; <nl> <nl> + import java . util . ArrayList ; <nl> + import java . util . Arrays ; <nl> + import java . util . Collections ; <nl> + import java . util . List ; <nl> + import java . util . function . Function ; <nl> + import java . util . stream . Collectors ; <nl> + <nl> import com . apple . foundationdb . Database ; <nl> import com . apple . foundationdb . Transaction ; <nl> import com . apple . foundationdb . tuple . ByteArrayUtil ; <nl> <nl> - import java . util . * ; <nl> - import java . util . function . Function ; <nl> - import java . util . stream . Collectors ; <nl> - <nl> public class RYWBenchmark extends AbstractTester { <nl> private int keyCount ; <nl> <nl> public Double getManySequential ( Transaction tr , int count ) { <nl> public Double getRangeBasic ( Transaction tr , int count ) { <nl> long start = System . nanoTime ( ) ; <nl> for ( int i = 0 ; i < count ; i + + ) { <nl> - tr . getRange ( key ( 0 ) , key ( keyCount ) ) . asList ( ) . join ( ) ; <nl> + tr . getRange ( key ( 0 ) , key ( keyCount ) ) . asList ( ) . join ( ) ; <nl> } <nl> long end = System . nanoTime ( ) ; <nl> <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / RangeTest . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / RangeTest . java <nl> public static void main ( String [ ] args ) { <nl> Database db = fdb . open ( ) ; <nl> <nl> try { <nl> - db . run ( new Function < Transaction , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Transaction tr ) { <nl> - long version = tr . getReadVersion ( ) . join ( ) ; <nl> - System . out . println ( " DB version : " + version ) ; <nl> - tr . get ( " apple1 " . getBytes ( ) ) . join ( ) ; <nl> - tr . set ( " apple1 " . getBytes ( ) , " crunchy1 " . getBytes ( ) ) ; <nl> - tr . set ( " apple2 " . getBytes ( ) , " crunchy2 " . getBytes ( ) ) ; <nl> - tr . set ( " apple3 " . getBytes ( ) , " crunchy3 " . getBytes ( ) ) ; <nl> - tr . set ( " apple4 " . getBytes ( ) , " crunchy4 " . getBytes ( ) ) ; <nl> - tr . set ( " apple5 " . getBytes ( ) , " crunchy5 " . getBytes ( ) ) ; <nl> - tr . set ( " apple6 " . getBytes ( ) , " crunchy6 " . getBytes ( ) ) ; <nl> - System . out . println ( " Attempting to commit apple / crunchy pairs . . . " ) ; <nl> - <nl> - return null ; <nl> - } <nl> + db . run ( ( Function < Transaction , Void > ) tr - > { <nl> + long version = tr . getReadVersion ( ) . join ( ) ; <nl> + System . out . println ( " DB version : " + version ) ; <nl> + tr . get ( " apple1 " . getBytes ( ) ) . join ( ) ; <nl> + tr . set ( " apple1 " . getBytes ( ) , " crunchy1 " . getBytes ( ) ) ; <nl> + tr . set ( " apple2 " . getBytes ( ) , " crunchy2 " . getBytes ( ) ) ; <nl> + tr . set ( " apple3 " . getBytes ( ) , " crunchy3 " . getBytes ( ) ) ; <nl> + tr . set ( " apple4 " . getBytes ( ) , " crunchy4 " . getBytes ( ) ) ; <nl> + tr . set ( " apple5 " . getBytes ( ) , " crunchy5 " . getBytes ( ) ) ; <nl> + tr . set ( " apple6 " . getBytes ( ) , " crunchy6 " . getBytes ( ) ) ; <nl> + System . out . println ( " Attempting to commit apple / crunchy pairs . . . " ) ; <nl> + <nl> + return null ; <nl> } ) ; <nl> } catch ( Throwable e ) { <nl> e . printStackTrace ( ) ; <nl> public Void apply ( Transaction tr ) { <nl> e . printStackTrace ( ) ; <nl> return ; <nl> } <nl> - / / db . dispose ( ) ; <nl> - / / cluster . dispose ( ) ; <nl> + / / db . close ( ) ; <nl> + / / cluster . close ( ) ; <nl> <nl> tr = db . createTransaction ( ) ; <nl> checkRange ( tr ) ; <nl> public Void apply ( Transaction tr ) { <nl> System . out . println ( " range comparisons okay " ) ; <nl> } <nl> <nl> - db . dispose ( ) ; <nl> - / / cluster . dispose ( ) ; <nl> + db . close ( ) ; <nl> + / / cluster . close ( ) ; <nl> / / fdb . stopNetwork ( ) ; <nl> System . out . println ( " Done with test program " ) ; <nl> } <nl> private static void checkRange ( Transaction tr ) { <nl> System . out . println ( " Value is " + <nl> ( val ! = null ? new String ( val ) : " not present " ) ) ; <nl> <nl> - AsyncIterable < KeyValue > entryList = tr . getRange ( <nl> + AsyncIterable < KeyValue > entryList = tr . getRange ( <nl> KeySelector . firstGreaterOrEqual ( " apple " . getBytes ( ) ) , <nl> KeySelector . firstGreaterOrEqual ( " banana " . getBytes ( ) ) , 4 ) ; <nl> List < KeyValue > entries = entryList . asList ( ) . join ( ) ; <nl> private static void checkRange ( Transaction tr ) { <nl> } <nl> <nl> } <nl> + <nl> + private RangeTest ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / SerialInsertion . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / SerialInsertion . java <nl> public void run ( ) { <nl> } <nl> } <nl> <nl> + private SerialInsertion ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / SerialIteration . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / SerialIteration . java <nl> private static int scanDatabase ( Database database , int rows ) { <nl> } <nl> return counter ; <nl> } <nl> + <nl> + private SerialIteration ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / SerialTest . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / SerialTest . java <nl> <nl> package com . apple . foundationdb . test ; <nl> <nl> import java . util . concurrent . atomic . AtomicInteger ; <nl> - import java . util . function . Function ; <nl> <nl> import com . apple . foundationdb . Cluster ; <nl> import com . apple . foundationdb . Database ; <nl> import com . apple . foundationdb . FDB ; <nl> - import com . apple . foundationdb . Transaction ; <nl> import com . apple . foundationdb . TransactionContext ; <nl> <nl> public class SerialTest { <nl> private static void runTests ( int reps , TransactionContext db ) { <nl> final AtomicInteger lastcount = new AtomicInteger ( 0 ) ; <nl> for ( int i = 0 ; i < reps ; i + + ) { <nl> try { <nl> - db . run ( new Function < Transaction , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Transaction tr ) { <nl> - byte [ ] val = tr . get ( " count " . getBytes ( ) ) . join ( ) ; <nl> - / / System . out . println ( " Got value " ) ; <nl> - int count = Integer . parseInt ( new String ( val ) ) ; <nl> - tr . set ( " count " . getBytes ( ) , Integer . toString ( count + 1 ) . getBytes ( ) ) ; <nl> - lastcount . set ( count ) ; <nl> + db . run ( tr - > { <nl> + byte [ ] val = tr . get ( " count " . getBytes ( ) ) . join ( ) ; <nl> + / / System . out . println ( " Got value " ) ; <nl> + int count = Integer . parseInt ( new String ( val ) ) ; <nl> + tr . set ( " count " . getBytes ( ) , Integer . toString ( count + 1 ) . getBytes ( ) ) ; <nl> + lastcount . set ( count ) ; <nl> <nl> - return null ; <nl> - } <nl> + return null ; <nl> } ) ; <nl> } catch ( Throwable e ) { <nl> e . printStackTrace ( ) ; <nl> public Void apply ( Transaction tr ) { <nl> System . exit ( 0 ) ; <nl> } <nl> <nl> + private SerialTest ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / StackEntry . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / StackEntry . java <nl> <nl> class StackEntry { <nl> int idx ; <nl> Object value ; <nl> - public StackEntry ( int idx , Object value ) { <nl> + StackEntry ( int idx , Object value ) { <nl> this . idx = idx ; <nl> this . value = value ; <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / StackTester . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / StackTester . java <nl> <nl> package com . apple . foundationdb . test ; <nl> <nl> import java . math . BigInteger ; <nl> - import java . util . * ; <nl> - <nl> import java . nio . ByteBuffer ; <nl> import java . nio . ByteOrder ; <nl> + import java . util . ArrayList ; <nl> + import java . util . Arrays ; <nl> + import java . util . Collections ; <nl> + import java . util . HashMap ; <nl> + import java . util . LinkedList ; <nl> + import java . util . List ; <nl> + import java . util . Map ; <nl> import java . util . concurrent . CompletableFuture ; <nl> import java . util . concurrent . CompletionException ; <nl> import java . util . function . Function ; <nl> <nl> import com . apple . foundationdb . LocalityUtil ; <nl> import com . apple . foundationdb . MutationType ; <nl> import com . apple . foundationdb . Range ; <nl> - import com . apple . foundationdb . ReadTransaction ; <nl> import com . apple . foundationdb . StreamingMode ; <nl> import com . apple . foundationdb . Transaction ; <nl> import com . apple . foundationdb . async . AsyncIterable ; <nl> + import com . apple . foundationdb . async . AsyncUtil ; <nl> + import com . apple . foundationdb . async . CloseableAsyncIterator ; <nl> import com . apple . foundationdb . tuple . ByteArrayUtil ; <nl> import com . apple . foundationdb . tuple . Tuple ; <nl> <nl> else if ( op = = StackOperation . WAIT_FUTURE ) { <nl> } <nl> else if ( op = = StackOperation . WAIT_EMPTY ) { <nl> List < Object > params = inst . popParams ( 1 ) . join ( ) ; <nl> - inst . context . db . run ( new WaitEmpty ( ( byte [ ] ) params . get ( 0 ) ) ) ; <nl> + inst . context . db . run ( new WaitEmpty ( ( byte [ ] ) params . get ( 0 ) ) ) ; <nl> inst . push ( " WAITED_FOR_EMPTY " . getBytes ( ) ) ; <nl> } <nl> else if ( op = = StackOperation . START_THREAD ) { <nl> else if ( op = = StackOperation . SET ) { <nl> final List < Object > params = inst . popParams ( 2 ) . join ( ) ; <nl> / / System . out . println ( inst . context . preStr + " - " + " Setting ' " + ArrayUtils . printable ( ( byte [ ] ) params . get ( 0 ) ) + <nl> / / " ' to ' " + ArrayUtils . printable ( ( byte [ ] ) params . get ( 1 ) ) + " ' " ) ; <nl> - executeMutation ( inst , <nl> - new Function < Transaction , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Transaction tr ) { <nl> - tr . set ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + executeMutation ( inst , tr - > { <nl> + tr . set ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ; <nl> + return null ; <nl> + } ) ; <nl> } <nl> else if ( op = = StackOperation . CLEAR ) { <nl> final List < Object > params = inst . popParams ( 1 ) . join ( ) ; <nl> / / System . out . println ( inst . context . preStr + " - " + " Clearing : ' " + ByteArrayUtil . printable ( ( byte [ ] ) params . get ( 0 ) ) + " ' " ) ; <nl> - executeMutation ( inst , <nl> - new Function < Transaction , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Transaction tr ) { <nl> - tr . clear ( ( byte [ ] ) params . get ( 0 ) ) ; <nl> - return null ; <nl> - } <nl> - } <nl> - ) ; <nl> + executeMutation ( inst , tr - > { <nl> + tr . clear ( ( byte [ ] ) params . get ( 0 ) ) ; <nl> + return null ; <nl> + } ) ; <nl> } <nl> else if ( op = = StackOperation . CLEAR_RANGE ) { <nl> final List < Object > params = inst . popParams ( 2 ) . join ( ) ; <nl> - executeMutation ( inst , <nl> - new Function < Transaction , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Transaction tr ) { <nl> - tr . clear ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + executeMutation ( inst , tr - > { <nl> + tr . clear ( ( byte [ ] ) params . get ( 0 ) , ( byte [ ] ) params . get ( 1 ) ) ; <nl> + return null ; <nl> + } ) ; <nl> } <nl> else if ( op = = StackOperation . CLEAR_RANGE_STARTS_WITH ) { <nl> final List < Object > params = inst . popParams ( 1 ) . join ( ) ; <nl> - executeMutation ( inst , <nl> - new Function < Transaction , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Transaction tr ) { <nl> - tr . clear ( Range . startsWith ( ( byte [ ] ) params . get ( 0 ) ) ) ; <nl> - return null ; <nl> - } <nl> - } ) ; <nl> + executeMutation ( inst , tr - > { <nl> + tr . clear ( Range . startsWith ( ( byte [ ] ) params . get ( 0 ) ) ) ; <nl> + return null ; <nl> + } ) ; <nl> } <nl> else if ( op = = StackOperation . ATOMIC_OP ) { <nl> final List < Object > params = inst . popParams ( 3 ) . join ( ) ; <nl> final MutationType optype = MutationType . valueOf ( ( String ) params . get ( 0 ) ) ; <nl> - executeMutation ( inst , <nl> - new Function < Transaction , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Transaction tr ) { <nl> - tr . mutate ( optype , ( byte [ ] ) params . get ( 1 ) , ( byte [ ] ) params . get ( 2 ) ) ; <nl> - return null ; <nl> - } <nl> - } <nl> - ) ; <nl> + executeMutation ( inst , tr - > { <nl> + tr . mutate ( optype , ( byte [ ] ) params . get ( 1 ) , ( byte [ ] ) params . get ( 2 ) ) ; <nl> + return null ; <nl> + } ) ; <nl> } <nl> else if ( op = = StackOperation . COMMIT ) { <nl> inst . push ( inst . tr . commit ( ) ) ; <nl> void processOp ( byte [ ] operation ) { <nl> directoryExtension . processInstruction ( inst ) ; <nl> else <nl> processInstruction ( inst ) ; <nl> + <nl> + inst . releaseTransaction ( ) ; <nl> } <nl> <nl> @ Override <nl> void executeOperations ( ) { <nl> while ( true ) { <nl> Transaction t = db . createTransaction ( ) ; <nl> List < KeyValue > keyValues = t . getRange ( begin , endKey / * , 1000 * / ) . asList ( ) . join ( ) ; <nl> - if ( keyValues . size ( ) = = 0 ) <nl> + t . close ( ) ; <nl> + if ( keyValues . size ( ) = = 0 ) { <nl> break ; <nl> + } <nl> / / System . out . println ( " * Got " + keyValues . size ( ) + " instructions " ) ; <nl> <nl> for ( KeyValue next : keyValues ) { <nl> void executeOperations ( ) { <nl> instructionIndex + + ; <nl> } <nl> } <nl> + <nl> / / System . out . println ( " * Completed " + instructionIndex + " instructions " ) ; <nl> } <nl> } <nl> private static void testLocality ( Database db ) { <nl> tr . options ( ) . setTimeout ( 60 * 1000 ) ; <nl> tr . options ( ) . setReadSystemKeys ( ) ; <nl> tr . getReadVersion ( ) . join ( ) ; <nl> - AsyncIterable < byte [ ] > boundaryKeys = LocalityUtil . getBoundaryKeys ( <nl> + CloseableAsyncIterator < byte [ ] > boundaryKeys = LocalityUtil . getBoundaryKeys ( <nl> tr , new byte [ 0 ] , new byte [ ] { ( byte ) 255 , ( byte ) 255 } ) ; <nl> - List < byte [ ] > keys = boundaryKeys . asList ( ) . join ( ) ; <nl> - for ( int i = 0 ; i < keys . size ( ) - 1 ; i + + ) { <nl> - byte [ ] start = keys . get ( i ) ; <nl> - byte [ ] end = tr . getKey ( KeySelector . lastLessThan ( keys . get ( i + 1 ) ) ) . join ( ) ; <nl> - List < String > startAddresses = Arrays . asList ( LocalityUtil . getAddressesForKey ( tr , start ) . join ( ) ) ; <nl> - List < String > endAddresses = Arrays . asList ( LocalityUtil . getAddressesForKey ( tr , end ) . join ( ) ) ; <nl> - for ( String a : startAddresses ) { <nl> - if ( ! endAddresses . contains ( a ) ) { <nl> - throw new RuntimeException ( " Locality not internally consistent . " ) ; <nl> + try { <nl> + List < byte [ ] > keys = AsyncUtil . collectRemaining ( boundaryKeys ) . join ( ) ; <nl> + for ( int i = 0 ; i < keys . size ( ) - 1 ; i + + ) { <nl> + byte [ ] start = keys . get ( i ) ; <nl> + byte [ ] end = tr . getKey ( KeySelector . lastLessThan ( keys . get ( i + 1 ) ) ) . join ( ) ; <nl> + List < String > startAddresses = Arrays . asList ( LocalityUtil . getAddressesForKey ( tr , start ) . join ( ) ) ; <nl> + List < String > endAddresses = Arrays . asList ( LocalityUtil . getAddressesForKey ( tr , end ) . join ( ) ) ; <nl> + for ( String a : startAddresses ) { <nl> + if ( ! endAddresses . contains ( a ) ) { <nl> + throw new RuntimeException ( " Locality not internally consistent . " ) ; <nl> + } <nl> } <nl> } <nl> - } <nl> <nl> - return null ; <nl> + return null ; <nl> + } <nl> + finally { <nl> + boundaryKeys . close ( ) ; <nl> + } <nl> } ) ; <nl> } <nl> <nl> public static void main ( String [ ] args ) { <nl> / / System . out . println ( " Starting test . . . " ) ; <nl> c . run ( ) ; <nl> / / System . out . println ( " Done with test . " ) ; <nl> + db . close ( ) ; <nl> + System . gc ( ) ; <nl> } <nl> + <nl> + private StackTester ( ) { } <nl> } <nl> <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / StackUtils . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / StackUtils . java <nl> <nl> <nl> import com . apple . foundationdb . FDBException ; <nl> import com . apple . foundationdb . KeySelector ; <nl> - import com . apple . foundationdb . async . AsyncUtil ; <nl> import com . apple . foundationdb . tuple . Tuple ; <nl> <nl> public class StackUtils { <nl> static FDBException getRootFDBException ( Throwable t ) { <nl> <nl> return ( t instanceof FDBException ) ? ( FDBException ) t : null ; <nl> } <nl> + <nl> + private StackUtils ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / TesterArgs . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / TesterArgs . java <nl> <nl> <nl> package com . apple . foundationdb . test ; <nl> <nl> - import com . apple . foundationdb . subspace . Subspace ; <nl> - import com . apple . foundationdb . tuple . Tuple ; <nl> - <nl> import java . util . ArrayList ; <nl> - import java . util . Arrays ; <nl> import java . util . List ; <nl> <nl> + import com . apple . foundationdb . subspace . Subspace ; <nl> + import com . apple . foundationdb . tuple . Tuple ; <nl> + <nl> public class TesterArgs { <nl> private String outputDirectory ; <nl> private boolean multiversionApi ; <nl> private TesterArgs ( String outputDirectory , boolean multiversionApi , boolean call <nl> } <nl> <nl> public static void printUsage ( ) { <nl> - String usage = " Arguments : [ - o / - - output - directory DIR ] [ - - disable - multiversion - api ] [ - - enable - callbacks - on - external - threads ] [ - - use - external - client ] [ - - tests - to - run TEST [ TEST . . . ] ] [ - h / - - help ] \ n " <nl> - + " \ n " <nl> - + " Arguments : \ n " <nl> - + " - o / - - output - directory DIR Directory to store JSON output . If not set , the current directory is used . \ n " <nl> - + " - - disable - multiversion - api Disables the multi - version client API \ n " <nl> - + " - - enable - callbacks - on - external - threads Allows callbacks to be called on threads created by the client library . \ n " <nl> - + " - - use - external - client Connect to the server using an external client . \ n " <nl> - + " - - tests - to - run TEST [ TEST . . . ] List of test names to run . \ n " <nl> - + " - h / - - help Print this help message and then quit . \ n " ; <nl> + String usage = " Arguments : [ - o / - - output - directory DIR ] [ - - disable - multiversion - api ] [ - - enable - callbacks - on - external - threads ] [ - - use - external - client ] [ - - tests - to - run TEST [ TEST . . . ] ] [ - h / - - help ] \ n " + <nl> + " \ n " + <nl> + " Arguments : \ n " + <nl> + " - o / - - output - directory DIR Directory to store JSON output . If not set , the current directory is used . \ n " + <nl> + " - - disable - multiversion - api Disables the multi - version client API \ n " + <nl> + " - - enable - callbacks - on - external - threads Allows callbacks to be called on threads created by the client library . \ n " + <nl> + " - - use - external - client Connect to the server using an external client . \ n " + <nl> + " - - tests - to - run TEST [ TEST . . . ] List of test names to run . \ n " + <nl> + " - h / - - help Print this help message and then quit . \ n " ; <nl> <nl> System . out . print ( usage ) ; <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / TupleTest . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / TupleTest . java <nl> <nl> <nl> package com . apple . foundationdb . test ; <nl> <nl> - import java . util . function . Function ; <nl> - <nl> import com . apple . foundationdb . Cluster ; <nl> import com . apple . foundationdb . Database ; <nl> import com . apple . foundationdb . FDB ; <nl> - import com . apple . foundationdb . Transaction ; <nl> import com . apple . foundationdb . TransactionContext ; <nl> import com . apple . foundationdb . tuple . Tuple ; <nl> <nl> private static void runTests ( final int reps , TransactionContext db ) { <nl> System . out . println ( " Running tests . . . " ) ; <nl> long start = System . currentTimeMillis ( ) ; <nl> try { <nl> - db . run ( new Function < Transaction , Void > ( ) { <nl> - @ Override <nl> - public Void apply ( Transaction tr ) { <nl> - Tuple t = new Tuple ( ) ; <nl> - t . add ( 100230045000L ) ; <nl> - t . add ( " Hello ! " ) ; <nl> - t . add ( " foo " . getBytes ( ) ) ; <nl> + db . run ( tr - > { <nl> + Tuple t = new Tuple ( ) ; <nl> + t . add ( 100230045000L ) ; <nl> + t . add ( " Hello ! " ) ; <nl> + t . add ( " foo " . getBytes ( ) ) ; <nl> <nl> - / * for ( Map . Entry < byte [ ] , byte [ ] > e : tr . getRange ( " vcount " . getBytes ( ) , " zz " . getBytes ( ) ) ) { <nl> - System . out . println ( " K : " + new String ( e . getKey ( ) ) + " , V : " + new String ( e . getValue ( ) ) ) ; <nl> - } * / <nl> - return null ; <nl> - } <nl> + / * for ( Map . Entry < byte [ ] , byte [ ] > e : tr . getRange ( " vcount " . getBytes ( ) , " zz " . getBytes ( ) ) ) { <nl> + System . out . println ( " K : " + new String ( e . getKey ( ) ) + " , V : " + new String ( e . getValue ( ) ) ) ; <nl> + } * / <nl> + return null ; <nl> } ) ; <nl> } catch ( Throwable e ) { <nl> e . printStackTrace ( ) ; <nl> public Void apply ( Transaction tr ) { <nl> <nl> System . exit ( 0 ) ; <nl> } <nl> + <nl> + private TupleTest ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / VersionstampSmokeTest . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / VersionstampSmokeTest . java <nl> <nl> <nl> package com . apple . foundationdb . test ; <nl> <nl> + import java . util . concurrent . CompletableFuture ; <nl> + <nl> import com . apple . foundationdb . Database ; <nl> import com . apple . foundationdb . FDB ; <nl> import com . apple . foundationdb . MutationType ; <nl> <nl> import com . apple . foundationdb . tuple . Tuple ; <nl> import com . apple . foundationdb . tuple . Versionstamp ; <nl> <nl> - import java . util . concurrent . CompletableFuture ; <nl> - <nl> public class VersionstampSmokeTest { <nl> public static void main ( String [ ] args ) { <nl> FDB fdb = FDB . selectAPIVersion ( 510 ) ; <nl> Database db = fdb . open ( ) ; <nl> <nl> db . run ( tr - > { <nl> - tr . clear ( Tuple . from ( " prefix " ) . range ( ) ) ; <nl> - return null ; <nl> + tr . clear ( Tuple . from ( " prefix " ) . range ( ) ) ; <nl> + return null ; <nl> } ) ; <nl> <nl> CompletableFuture < byte [ ] > trVersionFuture = db . run ( ( Transaction tr ) - > { <nl> - / / The incomplete Versionstamp will have tr ' s version information when committed . <nl> - Tuple t = Tuple . from ( " prefix " , Versionstamp . incomplete ( ) ) ; <nl> - tr . mutate ( MutationType . SET_VERSIONSTAMPED_KEY , t . packWithVersionstamp ( ) , new byte [ 0 ] ) ; <nl> - return tr . getVersionstamp ( ) ; <nl> + / / The incomplete Versionstamp will have tr ' s version information when committed . <nl> + Tuple t = Tuple . from ( " prefix " , Versionstamp . incomplete ( ) ) ; <nl> + tr . mutate ( MutationType . SET_VERSIONSTAMPED_KEY , t . packWithVersionstamp ( ) , new byte [ 0 ] ) ; <nl> + return tr . getVersionstamp ( ) ; <nl> } ) ; <nl> <nl> byte [ ] trVersion = trVersionFuture . join ( ) ; <nl> <nl> Versionstamp v = db . run ( ( Transaction tr ) - > { <nl> - Subspace subspace = new Subspace ( Tuple . from ( " prefix " ) ) ; <nl> - byte [ ] serialized = tr . getRange ( subspace . range ( ) , 1 ) . iterator ( ) . next ( ) . getKey ( ) ; <nl> - Tuple t = subspace . unpack ( serialized ) ; <nl> - return t . getVersionstamp ( 0 ) ; <nl> + Subspace subspace = new Subspace ( Tuple . from ( " prefix " ) ) ; <nl> + byte [ ] serialized = tr . getRange ( subspace . range ( ) , 1 ) . iterator ( ) . next ( ) . getKey ( ) ; <nl> + Tuple t = subspace . unpack ( serialized ) ; <nl> + return t . getVersionstamp ( 0 ) ; <nl> } ) ; <nl> <nl> System . out . println ( v ) ; <nl> System . out . println ( Versionstamp . complete ( trVersion ) ) ; <nl> assert v . equals ( Versionstamp . complete ( trVersion ) ) ; <nl> } <nl> + <nl> + private VersionstampSmokeTest ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / WatchTest . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / WatchTest . java <nl> public void run ( ) { <nl> a . incrementAndGet ( ) ; <nl> } <nl> } ; <nl> - Runnable get = new Runnable ( ) { <nl> - @ Override <nl> - public void run ( ) { <nl> - try { <nl> - System . err . println ( " ` f ' get ( ) . . . " ) ; <nl> - f . join ( ) ; <nl> - System . err . println ( " ` f ' changed " ) ; <nl> - } catch ( FDBException e ) { <nl> - System . err . println ( " ` f ' watch error - > " + e . getMessage ( ) ) ; <nl> - if ( e . getCode ( ) ! = 1101 ) <nl> - throw e ; <nl> - } finally { <nl> - a . incrementAndGet ( ) ; <nl> - } <nl> + Runnable get = ( ) - > { <nl> + try { <nl> + System . err . println ( " ` f ' get ( ) . . . " ) ; <nl> + f . join ( ) ; <nl> + System . err . println ( " ` f ' changed " ) ; <nl> + } catch ( FDBException e12 ) { <nl> + System . err . println ( " ` f ' watch error - > " + e12 . getMessage ( ) ) ; <nl> + if ( e12 . getCode ( ) ! = 1101 ) <nl> + throw e12 ; <nl> + } finally { <nl> + a . incrementAndGet ( ) ; <nl> } <nl> } ; <nl> if ( r . nextBoolean ( ) ) { <nl> public void run ( ) { <nl> } <nl> <nl> / / if ( i % 1000 = = 0 ) { <nl> - System . out . println ( " Done with " + i ) ; <nl> + System . out . println ( " Done with " + i ) ; <nl> / / } <nl> } <nl> } <nl> + <nl> + private WatchTest ( ) { } <nl> } <nl> mmm a / bindings / java / src - completable / test / com / apple / foundationdb / test / WhileTrueTest . java <nl> ppp b / bindings / java / src - completable / test / com / apple / foundationdb / test / WhileTrueTest . java <nl> <nl> <nl> package com . apple . foundationdb . test ; <nl> <nl> - import com . apple . foundationdb . async . AsyncUtil ; <nl> import java . util . concurrent . CompletableFuture ; <nl> import java . util . concurrent . atomic . AtomicInteger ; <nl> <nl> + import com . apple . foundationdb . async . AsyncUtil ; <nl> + <nl> public class WhileTrueTest { <nl> public static void main ( String [ ] args ) { <nl> / / This should cause memory issues using the old implementation but not the new one . <nl> public static void main ( String [ ] args ) { <nl> AsyncUtil . whileTrue ( ( ) - > CompletableFuture . completedFuture ( count . decrementAndGet ( ) ) . thenApplyAsync ( c - > c > 0 ) ) . join ( ) ; <nl> System . out . println ( " Final value : " + count . get ( ) ) ; <nl> } <nl> + <nl> + private WhileTrueTest ( ) { } <nl> } <nl> mmm a / bindings / java / src / main / com / apple / foundationdb / FDB . java <nl> ppp b / bindings / java / src / main / com / apple / foundationdb / FDB . java <nl> public Database open ( String clusterFilePath , Executor e ) throws FDBException { <nl> f = new FutureCluster ( Cluster_create ( clusterFilePath ) , e ) ; <nl> } <nl> Cluster c = f . get ( ) ; <nl> - return c . openDatabase ( e ) ; <nl> + Database db = c . openDatabase ( e ) ; <nl> + c . dispose ( ) ; <nl> + <nl> + return db ; <nl> } <nl> <nl> / * * <nl> mmm a / bindings / java / src / main / com / apple / foundationdb / FDBDatabase . java <nl> ppp b / bindings / java / src / main / com / apple / foundationdb / FDBDatabase . java <nl> public DatabaseOptions options ( ) { <nl> public < T > Future < T > runAsync ( final Function < ? super Transaction , Future < T > > retryable , Executor e ) { <nl> final AtomicReference < Transaction > trRef = new AtomicReference < Transaction > ( createTransaction ( e ) ) ; <nl> final AtomicReference < T > returnValue = new AtomicReference < T > ( ) ; <nl> - return AsyncUtil . whileTrue ( new Function < Void , Future < Boolean > > ( ) { <nl> + Future < T > result = AsyncUtil . whileTrue ( new Function < Void , Future < Boolean > > ( ) { <nl> @ Override <nl> public Future < Boolean > apply ( Void v ) { <nl> Future < T > process = AsyncUtil . applySafely ( retryable , trRef . get ( ) ) ; <nl> public Boolean apply ( final Transaction tr ) { <nl> } <nl> } ) ; <nl> } <nl> - } ) . map ( new Function < Void , T > ( ) { <nl> + } ) . map ( new Function < Void , T > ( ) { <nl> @ Override <nl> public T apply ( Void o ) { <nl> - trRef . get ( ) . dispose ( ) ; <nl> return returnValue . get ( ) ; <nl> } <nl> } ) ; <nl> + <nl> + result . onReady ( new Runnable ( ) { <nl> + @ Override <nl> + public void run ( ) { <nl> + trRef . get ( ) . dispose ( ) ; <nl> + } <nl> + } ) ; <nl> + <nl> + return result ; <nl> } <nl> <nl> @ Override <nl> public T apply ( Void o ) { <nl> public < T > PartialFuture < T > runAsync ( final PartialFunction < ? super Transaction , ? extends PartialFuture < T > > retryable , Executor e ) { <nl> final AtomicReference < Transaction > trRef = new AtomicReference < Transaction > ( createTransaction ( ) ) ; <nl> final AtomicReference < T > returnValue = new AtomicReference < T > ( ) ; <nl> - return AsyncUtil . whileTrue ( new Function < Void , PartialFuture < Boolean > > ( ) { <nl> + PartialFuture < T > result = AsyncUtil . whileTrue ( new Function < Void , PartialFuture < Boolean > > ( ) { <nl> @ Override <nl> public PartialFuture < Boolean > apply ( Void v ) { <nl> PartialFuture < T > process = AsyncUtil . applySafely ( retryable , trRef . get ( ) ) ; <nl> public Boolean apply ( final Transaction tr ) { <nl> } <nl> } ) ; <nl> } <nl> - } ) . map ( new Function < Void , T > ( ) { <nl> + } ) . map ( new Function < Void , T > ( ) { <nl> @ Override <nl> public T apply ( Void o ) { <nl> - trRef . get ( ) . dispose ( ) ; <nl> return returnValue . get ( ) ; <nl> } <nl> } ) ; <nl> + <nl> + result . onReady ( new Runnable ( ) { <nl> + @ Override <nl> + public void run ( ) { <nl> + trRef . get ( ) . dispose ( ) ; <nl> + } <nl> + } ) ; <nl> + <nl> + return result ; <nl> } <nl> <nl> @ Override <nl> public Transaction createTransaction ( ) { <nl> @ Override <nl> public Transaction createTransaction ( Executor e ) { <nl> pointerReadLock . lock ( ) ; <nl> + Transaction tr = null ; <nl> try { <nl> - Transaction tr = new FDBTransaction ( Database_createTransaction ( getPtr ( ) ) , this , e ) ; <nl> + tr = new FDBTransaction ( Database_createTransaction ( getPtr ( ) ) , this , e ) ; <nl> tr . options ( ) . setUsedDuringCommitProtectionDisable ( ) ; <nl> return tr ; <nl> + } catch ( RuntimeException err ) { <nl> + if ( tr ! = null ) { <nl> + tr . dispose ( ) ; <nl> + } <nl> + <nl> + throw err ; <nl> } finally { <nl> pointerReadLock . unlock ( ) ; <nl> } <nl> protected void disposeInternal ( long cPtr ) { <nl> private native long Database_createTransaction ( long cPtr ) ; <nl> private native void Database_dispose ( long cPtr ) ; <nl> private native void Database_setOption ( long cPtr , int code , byte [ ] value ) throws FDBException ; <nl> - } <nl> \ No newline at end of file <nl> + } <nl> mmm a / bindings / java / src / main / com / apple / foundationdb / FDBTransaction . java <nl> ppp b / bindings / java / src / main / com / apple / foundationdb / FDBTransaction . java <nl> public Executor getExecutor ( ) { <nl> <nl> / / Must hold pointerReadLock when calling <nl> private FDBTransaction transfer ( ) { <nl> - FDBTransaction tr = new FDBTransaction ( getPtr ( ) , database , executor ) ; <nl> - tr . options ( ) . setUsedDuringCommitProtectionDisable ( ) ; <nl> - transactionOwner = false ; <nl> - return tr ; <nl> + FDBTransaction tr = null ; <nl> + try { <nl> + tr = new FDBTransaction ( getPtr ( ) , database , executor ) ; <nl> + tr . options ( ) . setUsedDuringCommitProtectionDisable ( ) ; <nl> + transactionOwner = false ; <nl> + return tr ; <nl> + } <nl> + catch ( RuntimeException err ) { <nl> + if ( tr ! = null ) { <nl> + tr . dispose ( ) ; <nl> + } <nl> + <nl> + throw err ; <nl> + } <nl> } <nl> <nl> @ Override <nl> mmm a / bindings / java / src / main / com / apple / foundationdb / LocalityUtil . java <nl> ppp b / bindings / java / src / main / com / apple / foundationdb / LocalityUtil . java <nl> public boolean hasNext ( ) { <nl> } <nl> lastBegin = begin ; <nl> tr . options ( ) . setReadSystemKeys ( ) ; <nl> + block . dispose ( ) ; <nl> block = tr . getRange ( <nl> keyServersForKey ( begin ) , <nl> keyServersForKey ( end ) ) . iterator ( ) ; <nl> public boolean hasNext ( ) { <nl> FDBException err = ( FDBException ) o ; <nl> if ( err . getCode ( ) = = 1007 & & ! Arrays . equals ( begin , lastBegin ) ) { <nl> BoundaryIterator . this . tr . dispose ( ) ; <nl> - BoundaryIterator . this . tr = <nl> - BoundaryIterator . this . tr . getDatabase ( ) . createTransaction ( ) ; <nl> + BoundaryIterator . this . tr = BoundaryIterator . this . tr . getDatabase ( ) . createTransaction ( ) ; <nl> return restartGet ( ) ; <nl> } <nl> } <nl> public void cancel ( ) { <nl> @ Override <nl> public void dispose ( ) { <nl> BoundaryIterator . this . tr . dispose ( ) ; <nl> + block . dispose ( ) ; <nl> } <nl> } <nl> } <nl> mmm a / bindings / java / src / main / com / apple / foundationdb / async / AsyncUtil . java <nl> ppp b / bindings / java / src / main / com / apple / foundationdb / async / AsyncUtil . java <nl> public Boolean apply ( Boolean o ) { <nl> Future < Void > complete = whileTrue ( condition ) ; <nl> Future < List < V > > result = tag ( complete , accumulator ) ; <nl> <nl> + result . onReady ( new Runnable ( ) { <nl> + @ Override <nl> + public void run ( ) { <nl> + it . dispose ( ) ; <nl> + } <nl> + } ) ; <nl> + <nl> return result ; <nl> } <nl> <nl> mmm a / bindings / java / src / main / com / apple / foundationdb / directory / DirectoryLayer . java <nl> ppp b / bindings / java / src / main / com / apple / foundationdb / directory / DirectoryLayer . java <nl> public Void apply ( Node parent ) { <nl> tr . clear ( Range . startsWith ( nodeSubspace . unpack ( node . getKey ( ) ) . getBytes ( 0 ) ) ) ; <nl> tr . clear ( node . range ( ) ) ; <nl> <nl> - return AsyncUtil . whileTrue ( new Function < Void , Future < Boolean > > ( ) { <nl> + Future < Void > result = AsyncUtil . whileTrue ( new Function < Void , Future < Boolean > > ( ) { <nl> @ Override <nl> public Future < Boolean > apply ( Void ignore ) { <nl> Future < Void > subdirRemoveFuture ; <nl> public Void apply ( Node parent ) { <nl> } ) ; <nl> } <nl> } ) ; <nl> + <nl> + result . onReady ( new Runnable ( ) { <nl> + @ Override <nl> + public void run ( ) { <nl> + rangeItr . dispose ( ) ; <nl> + } <nl> + } ) ; <nl> + <nl> + return result ; <nl> } <nl> <nl> private Future < Boolean > isPrefixFree ( final ReadTransaction tr , final byte [ ] prefix ) { <nl> public Void apply ( Node parent ) { <nl> if ( node ! = null ) <nl> return new ReadyFuture < Boolean > ( false ) ; <nl> <nl> - AsyncIterator < KeyValue > it = tr . getRange ( nodeSubspace . pack ( prefix ) , nodeSubspace . pack ( ByteArrayUtil . strinc ( prefix ) ) , 1 ) . iterator ( ) ; <nl> - return it . onHasNext ( ) <nl> + final AsyncIterator < KeyValue > it = tr . getRange ( nodeSubspace . pack ( prefix ) , nodeSubspace . pack ( ByteArrayUtil . strinc ( prefix ) ) , 1 ) . iterator ( ) ; <nl> + Future < Boolean > result = it . onHasNext ( ) <nl> . map ( new Function < Boolean , Boolean > ( ) { <nl> @ Override <nl> public Boolean apply ( Boolean hasNext ) { <nl> return ! hasNext ; <nl> } <nl> } ) ; <nl> + <nl> + result . onReady ( new Runnable ( ) { <nl> + @ Override <nl> + public void run ( ) { <nl> + it . dispose ( ) ; <nl> + } <nl> + } ) ; <nl> + <nl> + return result ; <nl> } <nl> } ) ; <nl> } <nl> public PrefixFinder ( ) { <nl> @ Override <nl> public Future < Boolean > apply ( Void ignore ) { <nl> final AsyncIterator < KeyValue > rangeItr = tr . snapshot ( ) . getRange ( allocator . counters . range ( ) , 1 , true ) . iterator ( ) ; <nl> - return rangeItr . onHasNext ( ) <nl> + Future < Boolean > result = rangeItr . onHasNext ( ) <nl> . map ( new Function < Boolean , Void > ( ) { <nl> @ Override <nl> public Void apply ( Boolean hasNext ) { <nl> public Void apply ( Boolean hasNext ) { <nl> return choosePrefix ( tr , allocator ) ; / / false exits the loop ( i . e . we have a valid prefix ) <nl> } <nl> } ) ; <nl> + <nl> + result . onReady ( new Runnable ( ) { <nl> + @ Override <nl> + public void run ( ) { <nl> + rangeItr . dispose ( ) ; <nl> + } <nl> + } ) ; <nl> + <nl> + return result ; <nl> } <nl> } ) <nl> . map ( new Function < Void , byte [ ] > ( ) { <nl> new file mode 100644 <nl> index 0000000000 . . 6d21d7353b <nl> mmm / dev / null <nl> ppp b / bindings / java / suppressions . xml <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> + <nl> + < ! DOCTYPE suppressions PUBLIC <nl> + " - / / Puppy Crawl / / DTD Suppressions 1 . 1 / / EN " <nl> + " http : / / checkstyle . sourceforge . net / dtds / suppressions_1_1 . dtd " > <nl> + <nl> + < suppressions > <nl> + < ! - - These files are auto generated . Ignore those files for style checks . - - > <nl> + < suppress files = " . + Options \ . java " checks = " . * " / > <nl> + < suppress files = " . + ConflictRangeType \ . java " checks = " . * " / > <nl> + < suppress files = " . + FDBException \ . java " checks = " . * " / > <nl> + < suppress files = " . + MutationType \ . java " checks = " . * " / > <nl> + < suppress files = " . + StreamingMode \ . java " checks = " . * " / > <nl> + < / suppressions > <nl> | Merge pull request from cie / java - add - missing - dispose | apple/foundationdb | 24c64394565dc01b602dfed2a6066e34e238aecb | 2017-12-14T21:38:15Z |
mmm a / libraries / fc <nl> ppp b / libraries / fc <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 4dae7e0fff05647ec06972bbce03b16e17cae4ac <nl> + Subproject commit aac546b419891ef6644e0d99dba5e8d33f70401d <nl> | update fc submodule to latest master | EOSIO/eos | 8218a70f0cddf428bb75c818c10a7d0d867fb53e | 2018-09-24T14:04:17Z |
mmm a / stdlib / public / core / Integers . swift . gyb <nl> ppp b / stdlib / public / core / Integers . swift . gyb <nl> extension FixedWidthInteger { <nl> return Self . init ( _truncatingBits : UInt ( Self . bitWidth . _value ) & - 1 ) <nl> } <nl> <nl> - public var popcount : Int { <nl> - fatalError ( ) <nl> - } <nl> - <nl> public static func doubleWidthDivide ( <nl> _ lhs : ( high : Self , low : Magnitude ) , _ rhs : Self ) <nl> - > ( quotient : Self , remainder : Self ) { <nl> public struct $ { Self } <nl> ) . _lowUWord . _value ) <nl> } <nl> <nl> + @ _transparent <nl> + public var popcount : Int { <nl> + return Int ( <nl> + $ { Self } ( <nl> + Builtin . int_ctpop_Int $ { bits } ( self . _value ) <nl> + ) . _lowUWord . _value ) <nl> + } <nl> + <nl> / / @ _transparent <nl> public func word ( at n : Int ) - > UInt { <nl> _precondition ( n > = 0 , " Negative word index " ) <nl> | implementing popcount | apple/swift | 3517b62c42f212b8b579ecca3d114783b9cae6ca | 2016-07-29T10:31:21Z |
mmm a / buildscripts / errorcodes . py <nl> ppp b / buildscripts / errorcodes . py <nl> def readErrorCodes ( callback , replaceZero = False ) : <nl> re . compile ( " ( ( fassertFailed ) ( ) ) * \ ( ( * ) ( \ d + ) " ) <nl> ] <nl> <nl> - bad = [ re . compile ( " \ sassert * \ ( " ) ] <nl> + bad = [ re . compile ( " ^ \ s * assert * \ ( " ) ] <nl> <nl> for x in utils . getAllSourceFiles ( ) : <nl> <nl> | Make " bare assert " check more selective | mongodb/mongo | 41e3d093a280680d113aa2c92fa0aebf00b9212a | 2013-04-22T12:20:35Z |
mmm a / android / build . gradle <nl> ppp b / android / build . gradle <nl> <nl> - / * <nl> - * Licensed to the Apache Software Foundation ( ASF ) under one <nl> - * or more contributor license agreements . See the NOTICE file <nl> - * distributed with this work for additional information <nl> - * regarding copyright ownership . The ASF licenses this file <nl> - * to you under the Apache License , Version 2 . 0 ( the <nl> - * " License " ) ; you may not use this file except in compliance <nl> - * with the License . You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , <nl> - * software distributed under the License is distributed on an <nl> - * " AS IS " BASIS , WITHOUT WARRANTIES OR CONDITIONS OF ANY <nl> - * KIND , either express or implied . See the License for the <nl> - * specific language governing permissions and limitations <nl> - * under the License . <nl> - * / <nl> - <nl> + <nl> buildscript { <nl> repositories { <nl> - jcenter { <nl> + mavenLocal ( ) <nl> + maven ( ) { <nl> + url ' https : / / maven . aliyun . com / repository / google ' <nl> + } <nl> + jcenter ( ) { <nl> url ' https : / / maven . aliyun . com / repository / jcenter ' <nl> } <nl> - mavenLocal ( ) <nl> } <nl> dependencies { <nl> - classpath ' com . android . tools . build : gradle : 2 . 3 . 3 ' <nl> + classpath ' com . android . tools . build : gradle : 3 . 2 . 1 ' <nl> } <nl> } <nl> <nl> subprojects { <nl> } <nl> } <nl> repositories { <nl> + mavenLocal ( ) <nl> if ( project . hasProperty ( ' external_repositories ' ) ) { <nl> maven { <nl> url external_repositories <nl> } <nl> } <nl> - jcenter { <nl> + maven ( ) { <nl> + url ' https : / / maven . aliyun . com / repository / google ' <nl> + } <nl> + jcenter ( ) { <nl> url ' https : / / maven . aliyun . com / repository / jcenter ' <nl> } <nl> - mavenCentral ( ) <nl> - mavenLocal ( ) <nl> } <nl> buildscript { <nl> dependencies { <nl> - classpath ' com . android . tools . build : gradle : 2 . 3 . 3 ' <nl> classpath ' com . jfrog . bintray . gradle : gradle - bintray - plugin : 1 . 0 ' <nl> classpath ' com . github . dcendents : android - maven - gradle - plugin : 1 . 5 ' <nl> } <nl> + repositories { <nl> + mavenLocal ( ) <nl> + maven ( ) { <nl> + url ' https : / / maven . aliyun . com / repository / google ' <nl> + } <nl> + jcenter ( ) { <nl> + url ' https : / / maven . aliyun . com / repository / jcenter ' <nl> + } <nl> + } <nl> } <nl> ext { <nl> compileSdkVersion = 26 <nl> mmm a / android / commons / build . gradle <nl> ppp b / android / commons / build . gradle <nl> apply plugin : ' com . android . library ' <nl> android { <nl> <nl> compileSdkVersion project . compileSdkVersion <nl> - buildToolsVersion project . buildToolsVersion <nl> + / / buildToolsVersion project . buildToolsVersion <nl> resourcePrefix " weexcomm " <nl> <nl> defaultConfig { <nl> android { <nl> } <nl> <nl> dependencies { <nl> - compile fileTree ( dir : ' libs ' , include : [ ' * . jar ' ] ) <nl> - compile project ( ' : weex_sdk ' ) <nl> - compile " com . android . support : support - v4 : $ { project . supportLibVersion } " <nl> - compile " com . android . support : appcompat - v7 : $ { project . supportLibVersion } " <nl> - / / compile project ( ' : weex_sdk ' ) <nl> - compile ' com . squareup . picasso : picasso : 2 . 5 . 2 ' <nl> - compile ' com . facebook . fresco : fresco : 0 . 10 . 0 ' <nl> - compile ' com . taobao . android . weex_inspection : protocol : 1 . 1 . 4 . 1 ' <nl> + implementation fileTree ( dir : ' libs ' , include : [ ' * . jar ' ] ) <nl> + implementation project ( ' : weex_sdk ' ) <nl> + implementation " com . android . support : support - v4 : $ { project . supportLibVersion } " <nl> + implementation " com . android . support : appcompat - v7 : $ { project . supportLibVersion } " <nl> + / / implementation project ( ' : weex_sdk ' ) <nl> + implementation ' com . squareup . picasso : picasso : 2 . 5 . 2 ' <nl> + implementation ' com . facebook . fresco : fresco : 0 . 10 . 0 ' <nl> + implementation ' com . taobao . android . weex_inspection : protocol : 1 . 1 . 4 . 1 ' <nl> <nl> - provided ' com . taobao . android : weex_analyzer : 0 . 1 . 0 . 5 ' <nl> - provided ' com . squareup . okhttp : okhttp : 2 . 3 . 0 ' <nl> - provided ' com . squareup . okhttp : okhttp - ws : 2 . 3 . 0 ' <nl> - provided " com . alibaba : fastjson : $ { project . fastjsonLibVersion } " <nl> + compileOnly ' com . taobao . android : weex_analyzer : 0 . 1 . 0 . 5 ' <nl> + compileOnly ' com . squareup . okhttp : okhttp : 2 . 3 . 0 ' <nl> + compileOnly ' com . squareup . okhttp : okhttp - ws : 2 . 3 . 0 ' <nl> + compileOnly " com . alibaba : fastjson : $ { project . fastjsonLibVersion } " <nl> <nl> - testCompile ' junit : junit : 4 . 12 ' <nl> - testCompile ' org . hamcrest : hamcrest - core : 1 . 3 ' <nl> - testCompile ' org . javassist : javassist : 3 . 20 . 0 - GA ' <nl> - testCompile ' org . mockito : mockito - core : 1 . 10 . 19 ' <nl> - testCompile ' org . objenesis : objenesis : 2 . 1 ' <nl> - testCompile ' org . powermock : powermock - core : 1 . 6 . 4 ' <nl> - testCompile ' org . powermock : powermock - api - mockito : 1 . 6 . 4 ' <nl> - testCompile ' org . powermock : powermock - module - junit4 - common : 1 . 6 . 4 ' <nl> - testCompile ' org . powermock : powermock - module - junit4 : 1 . 6 . 4 ' <nl> - testCompile ' org . powermock : powermock - module - junit4 - legacy : 1 . 6 . 4 ' <nl> - testCompile ' org . powermock : powermock - module - testng : 1 . 6 . 4 ' <nl> - testCompile ' org . robolectric : robolectric : 3 . 0 - rc3 ' <nl> + testImplementation ' junit : junit : 4 . 12 ' <nl> + testImplementation ' org . hamcrest : hamcrest - core : 1 . 3 ' <nl> + testImplementation ' org . javassist : javassist : 3 . 20 . 0 - GA ' <nl> + testImplementation ' org . mockito : mockito - core : 1 . 10 . 19 ' <nl> + testImplementation ' org . objenesis : objenesis : 2 . 1 ' <nl> + testImplementation ' org . powermock : powermock - core : 1 . 6 . 4 ' <nl> + testImplementation ' org . powermock : powermock - api - mockito : 1 . 6 . 4 ' <nl> + testImplementation ' org . powermock : powermock - module - junit4 - common : 1 . 6 . 4 ' <nl> + testImplementation ' org . powermock : powermock - module - junit4 : 1 . 6 . 4 ' <nl> + testImplementation ' org . powermock : powermock - module - junit4 - legacy : 1 . 6 . 4 ' <nl> + testImplementation ' org . powermock : powermock - module - testng : 1 . 6 . 4 ' <nl> + testImplementation ' org . robolectric : robolectric : 3 . 0 - rc3 ' <nl> } <nl> <nl> if ( file ( ' . . / license / LICENSE ' ) . exists ( ) ) { <nl> mmm a / android / gradle / wrapper / gradle - wrapper . properties <nl> ppp b / android / gradle / wrapper / gradle - wrapper . properties <nl> <nl> - # Mon Apr 23 15 : 37 : 25 CST 2018 <nl> + # Tue May 28 20 : 11 : 48 CST 2019 <nl> distributionBase = GRADLE_USER_HOME <nl> distributionPath = wrapper / dists <nl> zipStoreBase = GRADLE_USER_HOME <nl> zipStorePath = wrapper / dists <nl> - distributionUrl = https \ : / / services . gradle . org / distributions / gradle - 4 . 4 - all . zip <nl> + distributionUrl = https \ : / / services . gradle . org / distributions / gradle - 4 . 6 - all . zip <nl> mmm a / android / jacoco . gradle <nl> ppp b / android / jacoco . gradle <nl> <nl> - apply plugin : ' jacoco ' <nl> + / / apply plugin : ' jacoco ' <nl> <nl> jacoco { <nl> toolVersion = " 0 . 7 . 9 + " <nl> jacoco { <nl> / / def sdk_path = ' . . / . . / sdk ' <nl> / / println ( sdk_path ) <nl> <nl> - def coverageSourceDirs = [ <nl> - ' . . / app / src / main / java ' , <nl> - ' . . / . . / . . / android / sdk / src / main / java ' ] <nl> + / / def coverageSourceDirs = [ <nl> + / / ' . . / app / src / main / java ' , <nl> + / / ' . . / . . / . . / android / sdk / src / main / java ' ] <nl> <nl> - task jacocoTestReport ( type : JacocoReport ) { <nl> - group = " Reporting " <nl> - description = " Generate Jacoco coverage reports after running tests . " <nl> - reports { <nl> - xml . enabled = true <nl> - html . enabled = true <nl> - } <nl> - classDirectories = fileTree ( / / " enter code here " <nl> - dir : ' . . / . . / sdk / build / intermediates / classes ' , <nl> - excludes : [ ' * * / R * . class ' , <nl> - ' * * / * $ InjectAdapter . class ' , <nl> - ' * * / * $ ModuleAdapter . class ' , <nl> - ' * * / * $ ViewInjector * . class ' <nl> - ] ) <nl> - sourceDirectories = files ( coverageSourceDirs ) <nl> - File configFile = file ( ' . . / . . / . . / android / sdk / src / main / java ' ) <nl> - configFile = file ( configFile . absolutePath ) <nl> - println configFile . path <nl> - <nl> - <nl> - <nl> - executionData = files ( " $ buildDir / outputs / code - coverage / connected / coverage . ec " ) <nl> - doFirst { <nl> - new File ( " $ buildDir / intermediates / classes / " ) . eachFileRecurse { file - > <nl> - if ( file . name . contains ( ' $ $ ' ) ) { <nl> - file . renameTo ( file . path . replace ( ' $ $ ' , ' $ ' ) ) <nl> - } <nl> - } <nl> - } <nl> - } <nl> \ No newline at end of file <nl> + / / task jacocoTestReport ( type : JacocoReport ) { <nl> + / / group = " Reporting " <nl> + / / description = " Generate Jacoco coverage reports after running tests . " <nl> + / / reports { <nl> + / / xml . enabled = true <nl> + / / html . enabled = true <nl> + / / } <nl> + / / classDirectories = fileTree ( / / " enter code here " <nl> + / / dir : ' . . / . . / sdk / build / intermediates / classes ' , <nl> + / / excludes : [ ' * * / R * . class ' , <nl> + / / ' * * / * $ InjectAdapter . class ' , <nl> + / / ' * * / * $ ModuleAdapter . class ' , <nl> + / / ' * * / * $ ViewInjector * . class ' <nl> + / / ] ) <nl> + / / sourceDirectories = files ( coverageSourceDirs ) <nl> + / / File configFile = file ( ' . . / . . / . . / android / sdk / src / main / java ' ) <nl> + / / configFile = file ( configFile . absolutePath ) <nl> + / / println configFile . path <nl> + / / <nl> + / / <nl> + / / <nl> + / / executionData = files ( " $ buildDir / outputs / code - coverage / connected / coverage . ec " ) <nl> + / / doFirst { <nl> + / / new File ( " $ buildDir / intermediates / classes / " ) . eachFileRecurse { file - > <nl> + / / if ( file . name . contains ( ' $ $ ' ) ) { <nl> + / / file . renameTo ( file . path . replace ( ' $ $ ' , ' $ ' ) ) <nl> + / / } <nl> + / / } <nl> + / / } <nl> + / / } <nl> \ No newline at end of file <nl> mmm a / android / playground / app / build . gradle <nl> ppp b / android / playground / app / build . gradle <nl> apply plugin : ' com . android . application ' <nl> <nl> android { <nl> compileSdkVersion project . compileSdkVersion <nl> - buildToolsVersion project . buildToolsVersion <nl> - <nl> defaultConfig { <nl> applicationId " com . alibaba . weex " <nl> minSdkVersion project . minSdkVersion <nl> android { <nl> testInstrumentationRunner " android . support . test . runner . AndroidJUnitRunner " <nl> multiDexEnabled true <nl> ndk { <nl> - abiFilters " armeabi - v7a " , " armeabi " , " x86 " <nl> + abiFilters " arm64 - v8a " , " armeabi - v7a " , " x86 " <nl> } <nl> } <nl> applicationVariants . all { variant - > <nl> android { <nl> <nl> <nl> dependencies { <nl> - compile fileTree ( include : [ ' * . jar ' ] , dir : ' libs ' ) <nl> - androidTestCompile " com . android . support : support - annotations : $ { project . supportLibVersion } " <nl> - androidTestCompile ' junit : junit : 4 . 12 ' <nl> - androidTestCompile ' org . awaitility : awaitility : 3 . 0 . 0 ' <nl> - androidTestCompile ' org . awaitility : awaitility - proxy : 3 . 0 . 0 ' <nl> - androidTestCompile ' com . android . support . test . espresso : espresso - core : 2 . 2 . 2 ' <nl> - androidTestCompile ( ' com . android . support . test . espresso : espresso - contrib : 2 . 2 . 2 ' , { <nl> + implementation fileTree ( include : [ ' * . jar ' ] , dir : ' libs ' ) <nl> + androidTestImplementation " com . android . support : support - annotations : $ { project . supportLibVersion } " <nl> + androidTestImplementation ' junit : junit : 4 . 12 ' <nl> + androidTestImplementation ' org . awaitility : awaitility : 3 . 0 . 0 ' <nl> + androidTestImplementation ' org . awaitility : awaitility - proxy : 3 . 0 . 0 ' <nl> + androidTestImplementation ' com . android . support . test . espresso : espresso - core : 2 . 2 . 2 ' <nl> + androidTestImplementation ( ' com . android . support . test . espresso : espresso - contrib : 2 . 2 . 2 ' , { <nl> exclude group : ' com . android . support ' , module : ' support - annotations ' <nl> exclude group : ' com . android . support ' , module : ' support - v4 ' <nl> exclude group : ' com . android . support ' , module : ' design ' <nl> exclude group : ' com . android . support ' , module : ' appcompat - v7 ' <nl> exclude group : ' com . android . support ' , module : ' recyclerview - v7 ' <nl> } ) <nl> - androidTestCompile ' com . android . support . test : runner : 0 . 5 ' <nl> - androidTestCompile ' com . squareup . picasso : picasso : 2 . 5 . 2 ' <nl> - androidTestCompile ' org . hamcrest : hamcrest - library : 1 . 3 ' <nl> - androidTestCompile ' com . android . support . test . uiautomator : uiautomator - v18 : 2 . 1 . 2 ' <nl> + androidTestImplementation ' com . android . support . test : runner : 0 . 5 ' <nl> + androidTestImplementation ' com . squareup . picasso : picasso : 2 . 5 . 2 ' <nl> + androidTestImplementation ' org . hamcrest : hamcrest - library : 1 . 3 ' <nl> + androidTestImplementation ' com . android . support . test . uiautomator : uiautomator - v18 : 2 . 1 . 2 ' <nl> / * source dependency * / <nl> <nl> - compile project ( ' : commons ' ) <nl> - compile project ( ' : weex_sdk ' ) <nl> + implementation project ( ' : commons ' ) <nl> + implementation project ( ' : weex_sdk ' ) <nl> / / https : / / github . com / weexteam / weex - analyzer - android <nl> / / Weex - Analyzer provides several convenient tools such as Memory Monitor <nl> / / to optimize your application . It ' s not available by default , you can <nl> / / set WXAnalyzerDelegate # DEBUG = = true to enable it <nl> - debugCompile ' com . taobao . android : weex_analyzer : 0 . 1 . 0 . 5 ' <nl> + debugImplementation ' com . taobao . android : weex_analyzer : 0 . 1 . 0 . 5 ' <nl> <nl> <nl> - compile ' com . loopj . android : android - async - http : 1 . 4 . 9 @ aar ' <nl> - compile ' com . facebook . fresco : fresco : 0 . 12 . 0 + ' <nl> - compile ' com . facebook . fresco : animated - gif : 0 . 12 . 0 ' <nl> + implementation ' com . loopj . android : android - async - http : 1 . 4 . 9 @ aar ' <nl> + implementation ' com . facebook . fresco : fresco : 0 . 12 . 0 + ' <nl> + implementation ' com . facebook . fresco : animated - gif : 0 . 12 . 0 ' <nl> <nl> - compile ' com . google . zxing : core : 3 . 3 . 3 ' <nl> + implementation ' com . google . zxing : core : 3 . 3 . 3 ' <nl> <nl> - compile ' com . squareup . okhttp : okhttp : 2 . 3 . 0 ' <nl> - compile ' com . squareup . okhttp : okhttp - ws : 2 . 3 . 0 ' <nl> - compile ' com . squareup . okio : okio : 1 . 0 . 1 ' <nl> - compile " com . alibaba : fastjson : $ { project . fastjsonLibVersion } " <nl> - compile " com . android . support : support - v4 : $ { project . supportLibVersion } " <nl> - compile " com . android . support : appcompat - v7 : $ { project . supportLibVersion } " <nl> - compile " com . android . support : design : $ { project . supportLibVersion } " <nl> - compile " com . android . support : support - annotations : $ { project . supportLibVersion } " <nl> - compile ' com . jakewharton . scalpel : scalpel : 1 . 1 . 2 ' <nl> - compile ' com . taobao . android . weex_inspection : urlconnection_interceptor : 1 . 0 . 0 ' <nl> - compile ' com . android . support . test . espresso : espresso - idling - resource : 2 . 2 . 2 ' <nl> - compile ' com . taobao . android : weex_inspector : 0 . 24 . 2 . 4 ' <nl> - / / compile project ( " : weex_inspector " ) <nl> + implementation ' com . squareup . okhttp : okhttp : 2 . 3 . 0 ' <nl> + implementation ' com . squareup . okhttp : okhttp - ws : 2 . 3 . 0 ' <nl> + implementation ' com . squareup . okio : okio : 1 . 0 . 1 ' <nl> + implementation " com . alibaba : fastjson : $ { project . fastjsonLibVersion } " <nl> + implementation " com . android . support : support - v4 : $ { project . supportLibVersion } " <nl> + implementation " com . android . support : appcompat - v7 : $ { project . supportLibVersion } " <nl> + implementation " com . android . support : design : $ { project . supportLibVersion } " <nl> + implementation " com . android . support : support - annotations : $ { project . supportLibVersion } " <nl> + implementation ' com . jakewharton . scalpel : scalpel : 1 . 1 . 2 ' <nl> + implementation ' com . taobao . android . weex_inspection : urlconnection_interceptor : 1 . 0 . 0 ' <nl> + implementation ' com . taobao . android . weex_inspection : protocol : 1 . 1 . 4 . 1 ' <nl> + implementation ' com . android . support . test . espresso : espresso - idling - resource : 2 . 2 . 2 ' <nl> + implementation ' com . taobao . android : weex_inspector : 0 . 24 . 2 . 4 ' <nl> + / / implementation project ( " : weex_inspector " ) <nl> <nl> / / Bind actions to effects . See https : / / alibaba . github . io / bindingx / <nl> - compile ' com . alibaba . android : bindingx - core : 1 . 1 . 1 . 2 @ aar ' <nl> - compile ' com . alibaba . android : bindingx_weex_plugin : 1 . 1 . 1 @ aar ' <nl> + implementation ' com . alibaba . android : bindingx - core : 1 . 1 . 1 . 2 @ aar ' <nl> + implementation ' com . alibaba . android : bindingx_weex_plugin : 1 . 1 . 1 @ aar ' <nl> } <nl> <nl> if ( file ( ' . . / . . / license / LICENSE ' ) . exists ( ) ) { <nl> mmm a / android / playground / app / src / main / java / com / alibaba / weex / WXApplication . java <nl> ppp b / android / playground / app / src / main / java / com / alibaba / weex / WXApplication . java <nl> <nl> import android . app . Activity ; <nl> import android . app . Application ; <nl> import android . os . Bundle ; <nl> + import android . support . multidex . MultiDex ; <nl> <nl> import com . alibaba . android . bindingx . plugin . weex . BindingX ; <nl> import com . alibaba . weex . commons . adapter . DefaultWebSocketAdapterFactory ; <nl> <nl> @ Override <nl> public void onCreate ( ) { <nl> super . onCreate ( ) ; <nl> - <nl> + MultiDex . install ( this ) ; <nl> / * * <nl> * Set up for fresco usage . <nl> * Set < RequestListener > requestListeners = new HashSet < > ( ) ; <nl> mmm a / android / playground / app / src / main / res / xml / preferences . xml <nl> ppp b / android / playground / app / src / main / res / xml / preferences . xml <nl> <nl> android : summary = " @ string / preferences_remember_duplicates_summary " / > <nl> < CheckBoxPreference <nl> android : key = " preferences_history " <nl> - android : defaultValue = " true " <nl> - android : title = " @ string / preferences_history_title " <nl> - android : summary = " @ string / preferences_history_summary " / > <nl> + android : defaultValue = " true " / > <nl> < CheckBoxPreference <nl> android : key = " preferences_supplemental " <nl> android : defaultValue = " true " <nl> mmm a / android / sdk / build . gradle <nl> ppp b / android / sdk / build . gradle <nl> <nl> * specific language governing permissions and limitations <nl> * under the License . <nl> * / <nl> - <nl> - buildscript { <nl> - dependencies { <nl> - if ( ! project . disableCov ) { <nl> - classpath ' com . vanniktech : gradle - android - junit - jacoco - plugin : 0 . 5 . 0 ' <nl> - } <nl> - } <nl> - } <nl> - <nl> - plugins { <nl> - id " com . github . hierynomus . license " version " 0 . 14 . 0 " <nl> - } <nl> + plugins { <nl> + id " com . github . hierynomus . license " version " 0 . 14 . 0 " <nl> + } <nl> <nl> apply plugin : ' com . android . library ' <nl> apply plugin : ' checkstyle ' <nl> apply plugin : ' com . jfrog . bintray ' <nl> apply plugin : ' com . github . dcendents . android - maven ' <nl> <nl> - if ( ! project . disableCov ) { <nl> - apply plugin : ' com . vanniktech . android . junit . jacoco ' <nl> - junitJacoco { <nl> - excludes = [ ' com / taobao / weex / dom / flex / * * ' , ' com / taobao / weex / ui / view / refresh / circlebar / * * ' ] <nl> - } <nl> - } <nl> + / / if ( ! project . disableCov ) { <nl> + / / apply plugin : ' com . vanniktech . android . junit . jacoco ' <nl> + / / junitJacoco { <nl> + / / excludes = [ ' com / taobao / weex / dom / flex / * * ' , ' com / taobao / weex / ui / view / refresh / circlebar / * * ' ] <nl> + / / } <nl> + / / } <nl> <nl> task checkstyle ( type : Checkstyle ) { <nl> configFile file ( " $ { project . rootDir } / sdk / config / quality / checkstyle . xml " ) / / Where my checkstyle config is . . . <nl> checkstyle { <nl> toolVersion = ' 6 . 9 ' <nl> } <nl> <nl> - version = project . hasProperty ( ' weexVersion ' ) ? project . getProperty ( ' weexVersion ' ) : " 0 . 24 . 0 . 1 " <nl> + version = project . hasProperty ( ' weexVersion ' ) ? project . getProperty ( ' weexVersion ' ) : " 0 . 20 . 0 . 1 " <nl> <nl> / / Check version , the version must have 4 sections . The leading three section must be number , and the last section is odd number with or without suffix string . <nl> if ( ! project . hasProperty ( ' ignoreVersionCheck ' ) | | ! project . getProperty ( ' ignoreVersionCheck ' ) . equals ( " true " ) ) { <nl> if ( ! project . hasProperty ( ' ignoreVersionCheck ' ) | | ! project . getProperty ( ' ignoreVe <nl> <nl> android { <nl> compileSdkVersion project . compileSdkVersion <nl> - buildToolsVersion project . buildToolsVersion <nl> + / / buildToolsVersion project . buildToolsVersion <nl> resourcePrefix " weex " <nl> useLibrary ' org . apache . http . legacy ' <nl> + if ( project . hasProperty ( ' removeSharedLib ' ) & & " true " . equals ( project . getProperty ( ' removeSharedLib ' ) ) ) { <nl> + packagingOptions { <nl> + exclude ' lib / armeabi - v7a / libc + + _shared . so ' <nl> + exclude ' lib / arm64 - v8a / libc + + _shared . so ' <nl> + exclude ' lib / x86 / libc + + _shared . so ' <nl> + } <nl> + } <nl> copy { <nl> from ' . . / . . / pre - build ' <nl> into new File ( projectDir , " assets " ) <nl> android { <nl> include ' weex - rax - api . js ' <nl> } <nl> <nl> - <nl> - / / copy { <nl> - / / from ' . . / . . / pre - build ' <nl> - / / into new File ( projectDir , " assets " ) <nl> - / / include ' weex - rax - extra - api . js ' <nl> - / / } <nl> - <nl> def ndkversion = 16 ; <nl> def api_level = " " ; <nl> def compile_tool = " clang " ; <nl> - def cxx_stl = " c + + _static " ; <nl> + def cxx_stl = " c + + _shared " ; <nl> if ( ndkversion < 16 ) { <nl> api_level = " android - 14 " ; <nl> } else { <nl> - api_level = " android - 19 " ; <nl> + api_level = " android - 21 " ; <nl> } <nl> <nl> + def android_project_dir = projectDir <nl> + <nl> defaultConfig { <nl> buildConfigField " String " , " buildJavascriptFrameworkVersion " , " \ " $ { jsfmVersion } \ " " <nl> buildConfigField " String " , " buildVersion " , " \ " $ { version } \ " " <nl> android { <nl> versionCode 1 <nl> versionName project . version <nl> ndk { <nl> - abiFilters " armeabi - v7a " , " armeabi " , " x86 " <nl> + abiFilters " arm64 - v8a " , " armeabi - v7a " , " x86 " <nl> } <nl> externalNativeBuild { <nl> cmake { <nl> android { <nl> ' - DANDROID_TOOLCHAIN = ' + " $ { compile_tool } " , <nl> ' - DANDROID_STL = ' + " $ { cxx_stl } " , <nl> ' - DCMAKE_BUILD_TYPE = Release ' , <nl> + ' - DANDROID_PROJECT_DIR = ' + " $ { android_project_dir } " , <nl> ' - DENABLE_ASAN = false ' <nl> } <nl> } <nl> android { <nl> } <nl> <nl> dependencies { <nl> - compile fileTree ( include : [ ' * . jar ' ] , dir : ' libs ' ) <nl> - provided " com . android . support : recyclerview - v7 : $ { project . supportLibVersion } " <nl> - provided " com . android . support : support - v4 : $ { project . supportLibVersion } " <nl> - provided " com . android . support : appcompat - v7 : $ { project . supportLibVersion } " <nl> - provided " com . alibaba : fastjson : $ { project . fastjsonLibVersion } " <nl> - <nl> - testCompile " com . alibaba : fastjson : $ { project . fastjsonLibVersion } " <nl> - testCompile ' junit : junit : 4 . 12 ' <nl> - testCompile ' org . hamcrest : hamcrest - core : 1 . 3 ' <nl> - testCompile ' org . javassist : javassist : 3 . 20 . 0 - GA ' <nl> - testCompile ' org . mockito : mockito - core : 1 . 10 . 19 ' <nl> - testCompile ' org . objenesis : objenesis : 2 . 1 ' <nl> - testCompile ' org . powermock : powermock - core : 1 . 6 . 4 ' <nl> - testCompile ' org . powermock : powermock - api - mockito : 1 . 6 . 4 ' <nl> - testCompile ' org . powermock : powermock - module - junit4 - common : 1 . 6 . 4 ' <nl> - testCompile ' org . powermock : powermock - module - junit4 : 1 . 6 . 4 ' <nl> - testCompile ' org . powermock : powermock - module - junit4 - legacy : 1 . 6 . 4 ' <nl> - testCompile ' org . powermock : powermock - module - testng : 1 . 6 . 4 ' <nl> - testCompile ' org . powermock : powermock - classloading - xstream : 1 . 6 . 4 ' <nl> - testCompile " org . powermock : powermock - module - junit4 - rule : 1 . 6 . 4 " <nl> - testCompile ' org . robolectric : robolectric : 3 . 3 . 2 ' <nl> - testCompile " org . robolectric : shadows - httpclient : 3 . 3 . 2 " <nl> - testCompile ' org . json : json : 20160212 ' <nl> + implementation fileTree ( include : [ ' * . jar ' ] , dir : ' libs ' ) <nl> + compileOnly " com . android . support : recyclerview - v7 : $ { project . supportLibVersion } " <nl> + compileOnly " com . android . support : support - v4 : $ { project . supportLibVersion } " <nl> + compileOnly " com . android . support : appcompat - v7 : $ { project . supportLibVersion } " <nl> + compileOnly " com . alibaba : fastjson : $ { project . fastjsonLibVersion } " <nl> + <nl> + testImplementation " com . alibaba : fastjson : $ { project . fastjsonLibVersion } " <nl> + testImplementation ' junit : junit : 4 . 12 ' <nl> + testImplementation ' org . hamcrest : hamcrest - core : 1 . 3 ' <nl> + testImplementation ' org . javassist : javassist : 3 . 20 . 0 - GA ' <nl> + testImplementation ' org . mockito : mockito - core : 1 . 10 . 19 ' <nl> + testImplementation ' org . objenesis : objenesis : 2 . 1 ' <nl> + testImplementation ' org . powermock : powermock - core : 1 . 6 . 4 ' <nl> + testImplementation ' org . powermock : powermock - api - mockito : 1 . 6 . 4 ' <nl> + testImplementation ' org . powermock : powermock - module - junit4 - common : 1 . 6 . 4 ' <nl> + testImplementation ' org . powermock : powermock - module - junit4 : 1 . 6 . 4 ' <nl> + testImplementation ' org . powermock : powermock - module - junit4 - legacy : 1 . 6 . 4 ' <nl> + testImplementation ' org . powermock : powermock - module - testng : 1 . 6 . 4 ' <nl> + testImplementation ' org . powermock : powermock - classloading - xstream : 1 . 6 . 4 ' <nl> + testImplementation " org . powermock : powermock - module - junit4 - rule : 1 . 6 . 4 " <nl> + testImplementation ' org . robolectric : robolectric : 3 . 3 . 2 ' <nl> + testImplementation " org . robolectric : shadows - httpclient : 3 . 3 . 2 " <nl> + testImplementation ' org . json : json : 20160212 ' <nl> } <nl> <nl> - if ( file ( ' . . / license / LICENSE ' ) . exists ( ) ) { <nl> - license { <nl> - header = file ( ' . . / license / LICENSE ' ) <nl> - mapping ( ' cpp ' , ' JAVADOC_STYLE ' ) <nl> - mapping ( ' h ' , ' JAVADOC_STYLE ' ) <nl> - excludes ( [ ' com / taobao / weex / utils / WXDataStructureUtil . java ' ] ) <nl> - } <nl> - <nl> - task weex_core_license ( type : com . hierynomus . gradle . license . tasks . LicenseFormat ) { <nl> - source = fileTree ( dir : " . . / . . / weex_core " ) . include ( [ ' * * / * . h ' , ' * * / * . cpp ' , ' * * / * . cc ' , ' * * / * . c ' ] ) . <nl> - exclude ( [ ' Source / rapidjson / * * / * . h ' , ' Source / rapidjson / * * / * . cpp ' , <nl> - ' Source / android / base / base64 / * * / * . h ' , ' Source / android / base / base64 / * * / * . cpp ' , <nl> - ' Source / android / jniprebuild / jniheader / * . h ' , <nl> - ' Source / base / Compatible . cpp ' , <nl> - ' Source / IPC / * * / * . h ' , ' Source / IPC / * * / * . cpp ' , ' Source / IPC / * * / * . c ' , <nl> - ' Source / android / jsengine / dependence / * * / * . h ' , <nl> - ' Source / android / jsengine / dependence / * * / * . cpp ' , <nl> - ' Source / include / wtf / Optional . h ' , <nl> - ' Source / include / wtf / Brigand . h ' , <nl> - ' Source / include / wtf / Variant . h ' ] ) <nl> - } <nl> - preBuild . dependsOn licenseFormat <nl> - } <nl> + if ( file ( ' . . / license / LICENSE ' ) . exists ( ) ) { <nl> + license { <nl> + header = file ( ' . . / license / LICENSE ' ) <nl> + mapping ( ' cpp ' , ' JAVADOC_STYLE ' ) <nl> + mapping ( ' h ' , ' JAVADOC_STYLE ' ) <nl> + excludes ( [ ' com / taobao / weex / utils / WXDataStructureUtil . java ' ] ) <nl> + } <nl> + <nl> + task weex_core_license ( type : com . hierynomus . gradle . license . tasks . LicenseFormat ) { <nl> + source = fileTree ( dir : " . . / . . / weex_core " ) . include ( [ ' * * / * . h ' , ' * * / * . cpp ' , ' * * / * . cc ' , ' * * / * . c ' ] ) . <nl> + exclude ( [ ' Source / rapidjson / * * / * . h ' , ' Source / rapidjson / * * / * . cpp ' , <nl> + ' Source / android / base / base64 / * * / * . h ' , ' Source / android / base / base64 / * * / * . cpp ' , <nl> + ' Source / android / jniprebuild / jniheader / * . h ' , <nl> + ' Source / base / Compatible . cpp ' , <nl> + ' Source / IPC / * * / * . h ' , ' Source / IPC / * * / * . cpp ' , ' Source / IPC / * * / * . c ' , <nl> + ' Source / android / jsengine / dependence / * * / * . h ' , <nl> + ' Source / android / jsengine / dependence / * * / * . cpp ' , <nl> + ' Source / include / wtf / Optional . h ' , <nl> + ' Source / include / wtf / Brigand . h ' , <nl> + ' Source / include / wtf / Variant . h ' ] ) <nl> + } <nl> + preBuild . dependsOn licenseFormat <nl> + } <nl> <nl> task checkNdkVersion ( ) { <nl> def rootDir = project . rootDir <nl> task checkNdkVersion ( ) { <nl> def errMsg = ' please set ndk . dir path in project / local . properties and ndk - 16 supported only , example : ndk . dir = / Users / xxx / Library / Android / sdk / ndk - bundle - r16 ' <nl> throw new StopActionException ( errMsg ) <nl> } <nl> - <nl> - if ( ! ndkDir . isEmpty ( ) ) { <nl> - def sourcePropertiesFile = new File ( ndkDir , " source . properties " ) <nl> - def ndkVersion = ' ' <nl> - println ( " sourcePropertiesFile " + sourcePropertiesFile . absolutePath ) <nl> - if ( sourcePropertiesFile . exists ( ) ) { <nl> - Properties properties = new Properties ( ) <nl> - sourcePropertiesFile . withInputStream { instr - > <nl> - properties . load ( instr ) <nl> - } <nl> - <nl> - ndkVersion = properties . getProperty ( ' Pkg . Revision ' ) <nl> - } <nl> - <nl> - if ( ndkVersion . length ( ) > 2 ) { <nl> - def substring = ndkVersion . substring ( 0 , 2 ) <nl> - try { <nl> - if ( substring . toInteger ( ) ! = 16 ) { <nl> - throw new StopActionException ( " Ndk Version 16 supported only " ) <nl> - } <nl> - } catch ( Exception ignored ) { <nl> - println ( " Ndk Version 16 supported only " ) <nl> - } <nl> - <nl> - } else { <nl> - println ( " Ndk Version 16 supported only " ) <nl> - } <nl> - } else { <nl> - println ( " Ndk Version 16 supported only " ) <nl> - } <nl> } <nl> <nl> preBuild . dependsOn checkNdkVersion <nl> bintray { <nl> } <nl> } <nl> <nl> - afterEvaluate { <nl> - transformNativeLibsWithStripDebugSymbolForRelease < < { <nl> + afterEvaluate { project - > <nl> + project . tasks . find { ( it . name . contains ( " assembleRelease " ) ) } ? . doLast { <nl> + println ( " begin_copy_so " ) <nl> copy { <nl> - from transformNativeLibsWithMergeJniLibsForRelease <nl> - into new File ( project . buildDir , " unstrippedSo " ) <nl> + from new File ( project . buildDir , " intermediates / transforms / mergeJniLibs / release / 0 / lib " ) <nl> + into new File ( project . buildDir , " outputs / aar " ) <nl> include ' * * / libweexjss . so ' , ' * * / libweexcore . so ' <nl> eachFile { <nl> - it . path = " $ { it . relativePath . segments [ 5 ] } _ $ { it . name } " <nl> + it . path = " $ { it . relativePath . segments . first ( ) } _ $ { it . name } " <nl> + println " dyy " + it . path <nl> } <nl> } <nl> } <nl> new file mode 100755 <nl> index 0000000000 . . 56307f09c7 <nl> Binary files / dev / null and b / android / sdk / libs / arm64 - v8a / libJavaScriptCore . so differ <nl> new file mode 100755 <nl> index 0000000000 . . ce8d0bc28b <nl> Binary files / dev / null and b / android / sdk / libs / arm64 - v8a / libWTF . so differ <nl> deleted file mode 100755 <nl> index afa1dcaab5 . . 0000000000 <nl> Binary files a / android / sdk / libs / armeabi - v7a / libweexjsb . so and / dev / null differ <nl> deleted file mode 100755 <nl> index 099256c3c4 . . 0000000000 <nl> Binary files a / android / sdk / libs / armeabi - v7a / libweexjst . so and / dev / null differ <nl> deleted file mode 100755 <nl> index 2d590993db . . 0000000000 <nl> Binary files a / android / sdk / libs / armeabi / libJavaScriptCore . so and / dev / null differ <nl> deleted file mode 100755 <nl> index 790bef24ae . . 0000000000 <nl> Binary files a / android / sdk / libs / armeabi / libWTF . so and / dev / null differ <nl> deleted file mode 100755 <nl> index afa1dcaab5 . . 0000000000 <nl> Binary files a / android / sdk / libs / armeabi / libweexjsb . so and / dev / null differ <nl> deleted file mode 100755 <nl> index 099256c3c4 . . 0000000000 <nl> Binary files a / android / sdk / libs / armeabi / libweexjst . so and / dev / null differ <nl> deleted file mode 100755 <nl> index 165578be9f . . 0000000000 <nl> Binary files a / android / sdk / libs / x86 / libweexjsb . so and / dev / null differ <nl> deleted file mode 100644 <nl> index 84add1ebda . . 0000000000 <nl> Binary files a / android / sdk / libs / x86 / libweexjst . so and / dev / null differ <nl> mmm a / android / sdk / src / main / AndroidManifest . xml <nl> ppp b / android / sdk / src / main / AndroidManifest . xml <nl> under the License . <nl> android : versionCode = " 1 " <nl> android : versionName = " 1 . 0 " > <nl> <nl> - < uses - sdk <nl> - android : minSdkVersion = " 14 " <nl> - android : targetSdkVersion = " 21 " / > <nl> - <nl> < application > <nl> < receiver <nl> android : name = " com . taobao . weex . WXGlobalEventReceiver " <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / WXEnvironment . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / WXEnvironment . java <nl> <nl> <nl> public static final String CORE_SO_NAME = " weexcore " ; <nl> public static final String CORE_JSS_SO_NAME = " weexjss " ; <nl> + public static final String CORE_JSB_SO_NAME = " weexjsb " ; <nl> + public static final String CORE_JST_SO_NAME = " weexjst " ; <nl> public static final String CORE_JSC_SO_NAME = " JavaScriptCore " ; <nl> / * * <nl> * this marked jsb . so ' s version , Change this if we want to update jsb . so <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / utils / WXSoInstallMgrSdk . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / utils / WXSoInstallMgrSdk . java <nl> <nl> import android . content . pm . ApplicationInfo ; <nl> import android . os . Build ; <nl> import android . text . TextUtils ; <nl> + import android . util . Log ; <nl> + <nl> import com . taobao . weex . IWXStatisticsListener ; <nl> import com . taobao . weex . WXEnvironment ; <nl> import com . taobao . weex . adapter . IWXSoLoaderAdapter ; <nl> public static boolean initSo ( String libName , int version , IWXUserTrackAdapter ut <nl> boolean InitSuc = false ; <nl> / / if ( checkSoIsValid ( libName , BuildConfig . ARMEABI_Size ) | | checkSoIsValid ( libName , BuildConfig . X86_Size ) ) { <nl> <nl> + <nl> + try { <nl> + / / If a library loader adapter exists , use this adapter to load library <nl> + / / instead of System . loadLibrary . <nl> + if ( mSoLoader ! = null ) { <nl> + mSoLoader . doLoadLibrary ( " c + + _shared " ) ; <nl> + } else { <nl> + System . loadLibrary ( " c + + _shared " ) ; <nl> + } <nl> + } catch ( Exception e ) { <nl> + <nl> + } <nl> + <nl> / * * <nl> * Load library with { @ link System # loadLibrary ( String ) } <nl> * / <nl> public static void copyStartUpSo ( ) { <nl> / / if android api < 16 copy libweexjst . so else copy libweexjsb . so <nl> boolean pieSupport = true ; <nl> File newfile ; <nl> + String startSoName = WXEnvironment . CORE_JSB_SO_NAME ; <nl> if ( Build . VERSION . SDK_INT < Build . VERSION_CODES . JELLY_BEAN ) { <nl> pieSupport = false ; <nl> newfile = new File ( cacheFile + STARTUPSOANDROID15 ) ; <nl> + startSoName = WXEnvironment . CORE_JST_SO_NAME ; <nl> } else { <nl> newfile = new File ( cacheFile + STARTUPSO ) ; <nl> } <nl> public static void copyStartUpSo ( ) { <nl> } <nl> <nl> File oldfile = new File ( soName ) ; <nl> + <nl> + <nl> + if ( ! oldfile . exists ( ) ) { <nl> + try { <nl> + String weexjsb = ( ( PathClassLoader ) ( WXSoInstallMgrSdk . class . getClassLoader ( ) ) ) . findLibrary ( startSoName ) ; <nl> + oldfile = new File ( weexjsb ) ; <nl> + } catch ( Throwable throwable ) { <nl> + / / do nothing <nl> + } <nl> + <nl> + } <nl> + <nl> if ( oldfile . exists ( ) ) { <nl> WXFileUtils . copyFile ( oldfile , newfile ) ; <nl> } else { <nl> mmm a / weex_core / Source / CMakeLists . txt <nl> ppp b / weex_core / Source / CMakeLists . txt <nl> set ( WEEXCORE_LIBRARY_NAME weexcore ) <nl> set ( WEEX_CORE_SOURCE_DIR $ { CMAKE_CURRENT_SOURCE_DIR } ) <nl> <nl> message ( " $ { ANDROID_STL } " ) <nl> - if ( " $ { ANDROID_STL } " STREQUAL " c + + _static " ) <nl> + if ( " $ { ANDROID_STL } " STREQUAL " c + + _static " OR " $ { ANDROID_STL } " STREQUAL " c + + _shared " ) <nl> set ( CMAKE_CXX_FLAGS " $ { CMAKE_CXX_FLAGS } - std = c + + 1y " ) <nl> elseif ( " $ { ANDROID_STL } " STREQUAL " gnustl_static " ) <nl> set ( CMAKE_CXX_FLAGS " $ { CMAKE_CXX_FLAGS } - std = gnu + + 1y " ) <nl> add_definitions ( - DLAYOUT_LOG = 0 ) <nl> add_definitions ( - DJSAPI_LOG = 0 ) <nl> add_definitions ( - DDOM_PARSER_LOG = 0 ) <nl> <nl> - set ( LOCAL_LIBRARIES_DIR $ { CMAKE_CURRENT_SOURCE_DIR } / libs / $ { ANDROID_ABI } ) <nl> + message ( " ANDROID_PROJECT_DIR : " $ { ANDROID_PROJECT_DIR } ) <nl> + set ( LOCAL_LIBRARIES_DIR $ { ANDROID_PROJECT_DIR } / libs / $ { ANDROID_ABI } ) <nl> <nl> if ( " $ { ANDROID_ABI } " STREQUAL " x86 " ) <nl> # todo <nl> mmm a / weex_core / Source / android / jsengine / CMakeLists . txt <nl> ppp b / weex_core / Source / android / jsengine / CMakeLists . txt <nl> if ( " $ { ANDROID_ABI } " STREQUAL " armeabi " ) <nl> elseif ( " $ { ANDROID_ABI } " STREQUAL " armeabi - v7a " ) <nl> add_definitions ( - DWTF_CPU_ARM ) <nl> set ( WTF_CPU_ARM 1 ) <nl> + elseif ( " $ { ANDROID_ABI } " STREQUAL " arm64 - v8a " ) <nl> + add_definitions ( - DWTF_CPU_ARM64 ) <nl> + set ( WTF_CPU_ARM64 1 ) <nl> else ( ) <nl> add_definitions ( - DWTF_CPU_X86 ) <nl> set ( WTF_CPU_X86_64 1 ) <nl> set ( $ { WEEXJSSERVER_NAME } _SOURCES <nl> wson / wson_jsc . cpp <nl> ) <nl> link_directories ( $ { LOCAL_LIBRARIES_DIR } ) <nl> + message ( " LOCAL_LIBRARIES_DIR : " $ { LOCAL_LIBRARIES_DIR } ) <nl> SET ( CMAKE_SHARED_LINKER_FLAGS " $ { CMAKE_SHARED_LINKER_FLAGS } " CACHE STRING " toolchain_exelinkflags " FORCE ) <nl> SET ( CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS " $ { CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS } - shared " ) <nl> <nl> mmm a / weex_core / Source / android / jsengine / object / args / exe_js_args . cpp <nl> ppp b / weex_core / Source / android / jsengine / object / args / exe_js_args . cpp <nl> <nl> / / <nl> <nl> # include " exe_js_args . h " <nl> - <nl> # include " android / jsengine / weex_jsc_utils . h " <nl> <nl> ExeJsArgs : : ExeJsArgs ( std : : vector < VALUE_WITH_TYPE * > & params ) { <nl> mmm a / weex_core / Source / android / jsengine / object / args / init_framework_args . cpp <nl> ppp b / weex_core / Source / android / jsengine / object / args / init_framework_args . cpp <nl> <nl> / / <nl> <nl> # include " init_framework_args . h " <nl> - <nl> # include " android / jsengine / weex_jsc_utils . h " <nl> <nl> InitFrameworkArgs : : InitFrameworkArgs ( std : : vector < INIT_FRAMEWORK_PARAMS * > & params ) { <nl> mmm a / weex_core / Source / android / jsengine / object / weex_console_object . cpp <nl> ppp b / weex_core / Source / android / jsengine / object / weex_console_object . cpp <nl> static EncodedJSValue consoleLogWithLevel ( ExecState * exec , MessageLevel level ) { <nl> / / return JSValue : : encode ( jsUndefined ( ) ) ; <nl> / / <nl> / / client - > logWithLevel ( exec , Inspector : : createScriptArguments ( exec , 0 ) , level ) ; <nl> + JSValue : : equalSlowCaseInline ( exec , JSC : : jsUndefined ( ) , JSC : : jsUndefined ( ) ) ; <nl> return JSValue : : encode ( jsUndefined ( ) ) ; <nl> } <nl> <nl> mmm a / weex_core / Source / android / jsengine / object / weex_global_object . cpp <nl> ppp b / weex_core / Source / android / jsengine / object / weex_global_object . cpp <nl> <nl> # include " base / utils / log_utils . h " <nl> # include " core / bridge / script_bridge . h " <nl> # include " include / wtf / text / Base64 . h " <nl> - <nl> + # include " android / jsengine / weex_jsc_utils . h " <nl> # define WX_GLOBAL_CONFIG_KEY " global_switch_config " <nl> / / # define GET_CHARFROM_UNIPTR ( str ) ( str ) = = nullptr ? nullptr : ( reinterpret_cast < const char * > ( ( str ) . get ( ) ) ) <nl> using namespace JSC ; <nl> mmm a / weex_core / Source / android / jsengine / object / weex_object_holder . cpp <nl> ppp b / weex_core / Source / android / jsengine / object / weex_object_holder . cpp <nl> <nl> / / Created by Darin on 10 / 04 / 2018 . <nl> / / <nl> # include " android / jsengine / object / weex_object_holder . h " <nl> - <nl> + # include " android / jsengine / weex_jsc_utils . h " <nl> # include " android / jsengine / object / weex_env . h " <nl> <nl> void WeexObjectHolder : : initFromIPCArguments ( IPCArguments * arguments , size_t startCount , bool forAppContext ) { <nl> mmm a / weex_core / Source / android / jsengine / object / weex_simple_object . cpp <nl> ppp b / weex_core / Source / android / jsengine / object / weex_simple_object . cpp <nl> <nl> / / <nl> <nl> # include " android / jsengine / object / weex_simple_object . h " <nl> - <nl> + # include " android / jsengine / weex_jsc_utils . h " <nl> <nl> const ClassInfo SimpleObject : : s_info = { " Object " , & Base : : s_info , nullptr , CREATE_METHOD_TABLE ( SimpleObject ) } ; <nl> mmm a / weex_core / Source / android / jsengine / task / weex_task . cpp <nl> ppp b / weex_core / Source / android / jsengine / task / weex_task . cpp <nl> <nl> / / <nl> <nl> # include " weex_task . h " <nl> - <nl> + # include " android / jsengine / weex_jsc_utils . h " <nl> void WeexTask : : Future : : setResult ( std : : unique_ptr < WeexJSResult > & result ) { <nl> thread_locker_ . lock ( ) ; <nl> has_result_ = true ; <nl> mmm a / weex_core / Source / android / jsengine / task / weex_task_queue . cpp <nl> ppp b / weex_core / Source / android / jsengine / task / weex_task_queue . cpp <nl> <nl> # include " android / jsengine / bridge / script / script_bridge_in_multi_process . h " <nl> # include " android / jsengine / bridge / script / core_side_in_multi_process . h " <nl> # include " android / jsengine / object / weex_env . h " <nl> - <nl> + # include " android / jsengine / weex_jsc_utils . h " <nl> void WeexTaskQueue : : run ( WeexTask * task ) { <nl> if ( task = = nullptr | | WeexEnv : : getEnv ( ) - > is_app_crashed ( ) ) { <nl> return ; <nl> mmm a / weex_core / Source / android / jsengine / weex_ipc_server . cpp <nl> ppp b / weex_core / Source / android / jsengine / weex_ipc_server . cpp <nl> <nl> # include " android / jsengine / object / weex_env . h " <nl> # include " android / jsengine / weex_runtime . h " <nl> # include " core / manager / weex_core_manager . h " <nl> - <nl> + # include " android / jsengine / weex_jsc_utils . h " <nl> using namespace JSC ; <nl> using namespace WTF ; <nl> using namespace WEEXICU ; <nl> mmm a / weex_core / Source / android / jsengine / weex_jsc_utils . h <nl> ppp b / weex_core / Source / android / jsengine / weex_jsc_utils . h <nl> <nl> # include " third_party / IPC / IPCFutexPageQueue . h " <nl> # include " third_party / IPC / IPCException . h " <nl> # include " third_party / IPC / Serializing / IPCSerializer . h " <nl> + # include " include / JavaScriptCore / runtime / StructureInlines . h " <nl> + # include " include / JavaScriptCore / runtime / JSCJSValueInlines . h " <nl> <nl> # if ! defined ( PATH_MAX ) <nl> # define PATH_MAX 4096 <nl> mmm a / weex_core / Source / android / jsengine / weex_runtime . cpp <nl> ppp b / weex_core / Source / android / jsengine / weex_runtime . cpp <nl> <nl> # include " android / jsengine / object / weex_env . h " <nl> # include " include / JavaScriptCore / runtime / Exception . h " <nl> # include " include / JavaScriptCore / heap / HeapSnapshotBuilder . h " <nl> + # include " android / jsengine / weex_jsc_utils . h " <nl> <nl> using namespace JSC ; <nl> using namespace WTF ; <nl> mmm a / weex_core / Source / android / jsengine / wson / wson_jsc . cpp <nl> ppp b / weex_core / Source / android / jsengine / wson / wson_jsc . cpp <nl> <nl> <nl> # include " include / JavaScriptCore / runtime / ObjectConstructor . h " <nl> # include " include / JavaScriptCore / runtime / JSONObject . h " <nl> - # include " include / JavaScriptCore / runtime / JSCJSValueInlines . h " <nl> + # include " include / JavaScriptCore / runtime / JSCJSValue . h " <nl> # include < wtf / Vector . h > <nl> # include < wtf / HashMap . h > <nl> <nl> namespace wson { <nl> wson_push_type_null ( buffer ) ; <nl> return ; <nl> } <nl> - # ifdef __ANDROID__ <nl> - LOGE ( " weex wson err value type is not handled , treat as null , json value % s % d % d " , JSONStringify ( exec , val , 0 ) . utf8 ( ) . data ( ) , val . isFunction ( ) , val . tag ( ) ) ; <nl> - for ( size_t i = 0 ; i < objectStack . size ( ) ; i + + ) { <nl> - LOGE ( " weex wson err value type is not handled , treat as null , root json value % s " , JSONStringify ( exec , objectStack [ i ] , 0 ) . utf8 ( ) . data ( ) ) ; <nl> - } <nl> - # endif <nl> + / / # ifdef __ANDROID__ <nl> + / / LOGE ( " weex wson err value type is not handled , treat as null , json value % s % d % d " , JSONStringify ( exec , val , 0 ) . utf8 ( ) . data ( ) , val . isFunction ( ) , val . tag ( ) ) ; <nl> + / / for ( size_t i = 0 ; i < objectStack . size ( ) ; i + + ) { <nl> + / / LOGE ( " weex wson err value type is not handled , treat as null , root json value % s " , JSONStringify ( exec , objectStack [ i ] , 0 ) . utf8 ( ) . data ( ) ) ; <nl> + / / } <nl> + / / # endif <nl> wson_push_type_null ( buffer ) ; <nl> } <nl> <nl> mmm a / weex_core / Source / base / CMakeLists . txt <nl> ppp b / weex_core / Source / base / CMakeLists . txt <nl> set ( $ { BASE_LIBRARY_NAME } _SOURCES <nl> <nl> if ( ANDROID ) <nl> add_definitions ( - DOS_ANDROID = 1 ) <nl> - if ( " $ { ANDROID_ABI } " STREQUAL " x86 " ) <nl> + if ( " $ { ANDROID_ABI } " STREQUAL " x86 " OR " $ { ANDROID_ABI } " STREQUAL " arm64 - v8a " ) <nl> else ( ) <nl> list ( APPEND $ { BASE_LIBRARY_NAME } _SOURCES <nl> crash / backtrace . h <nl> new file mode 100755 <nl> index 0000000000 . . 56307f09c7 <nl> Binary files / dev / null and b / weex_core / Source / libs / arm64 - v8a / libJavaScriptCore . so differ <nl> new file mode 100755 <nl> index 0000000000 . . ce8d0bc28b <nl> Binary files / dev / null and b / weex_core / Source / libs / arm64 - v8a / libWTF . so differ <nl> Binary files a / weex_core / Source / libs / armeabi - v7a / libJavaScriptCore . so and b / weex_core / Source / libs / armeabi - v7a / libJavaScriptCore . so differ <nl> Binary files a / weex_core / Source / libs / armeabi / libJavaScriptCore . so and b / weex_core / Source / libs / armeabi / libJavaScriptCore . so differ <nl> | * [ Android ] Support arm64 by upgrading NDK to 16 and Gradle Plugin for Android to 3 . 2 . 1 ( ) | apache/incubator-weex | 61ef2564303b9076df7bbdfee362564884aad0f6 | 2019-06-18T09:11:24Z |
mmm a / tensorflow / compiler / aot / codegen_test_h . golden <nl> ppp b / tensorflow / compiler / aot / codegen_test_h . golden <nl> class MyClass : public tensorflow : : XlaCompiledCpuFunction { <nl> / / Shape of the args and results . <nl> static const xla : : ProgramShape * StaticProgramShape ( ) { <nl> static const xla : : ProgramShape * kShape = [ ] ( ) { <nl> - static const char kProto [ ] = { 10 , 12 , 16 , 11 , 26 , 2 , 1 , 2 , 42 , 4 , 10 , 2 , 1 , 0 , 10 , 12 , 16 , 5 , 26 , 2 , 3 , 4 , 42 , 4 , 10 , 2 , 1 , 0 , 18 , 16 , 16 , 13 , 34 , 12 , 16 , 8 , 26 , 2 , 5 , 6 , 42 , 4 , 10 , 2 , 1 , 0 } ; <nl> - static constexpr int kProtoSize = 46 ; <nl> + static const char kProto [ ] = { 10 , 14 , 16 , 11 , 26 , 2 , 1 , 2 , 42 , 6 , 10 , 2 , 1 , 0 , 32 , 1 , 10 , 14 , 16 , 5 , 26 , 2 , 3 , 4 , 42 , 6 , 10 , 2 , 1 , 0 , 32 , 1 , 18 , 18 , 16 , 13 , 34 , 14 , 16 , 8 , 26 , 2 , 5 , 6 , 42 , 6 , 10 , 2 , 1 , 0 , 32 , 1 } ; <nl> + static constexpr int kProtoSize = 52 ; <nl> xla : : ProgramShape * shape = new xla : : ProgramShape ; <nl> shape - > ParseFromArray ( kProto , kProtoSize ) ; <nl> return shape ; <nl> mmm a / tensorflow / compiler / tests / binary_ops_test . py <nl> ppp b / tensorflow / compiler / tests / binary_ops_test . py <nl> def testFloatOps ( self ) : <nl> dtype ( 4 ) , <nl> expected = np . array ( [ [ 16 ] , [ 81 ] ] , dtype = dtype ) ) <nl> <nl> - atan2_supported = self . device = = " XLA_GPU " <nl> - if atan2_supported : <nl> - self . _testBinary ( <nl> - math_ops . atan2 , <nl> - np . array ( [ 0 , np . sqrt ( 2 ) , 1 , np . sqrt ( 2 ) , 0 ] , dtype ) , <nl> - np . array ( [ 1 , np . sqrt ( 2 ) , 0 , - np . sqrt ( 2 ) , - 1 ] , dtype ) , <nl> - expected = np . array ( <nl> - [ 0 , np . pi / 4 , np . pi / 2 , np . pi * 3 / 4 , np . pi ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . atan2 , <nl> + np . array ( [ 0 , np . sqrt ( 2 ) , 1 , np . sqrt ( 2 ) , 0 ] , dtype ) , <nl> + np . array ( [ 1 , np . sqrt ( 2 ) , 0 , - np . sqrt ( 2 ) , - 1 ] , dtype ) , <nl> + expected = np . array ( <nl> + [ 0 , np . pi / 4 , np . pi / 2 , np . pi * 3 / 4 , np . pi ] , dtype = dtype ) ) <nl> <nl> self . _testBinary ( <nl> gen_math_ops . _reciprocal_grad , <nl> def testComplexOps ( self ) : <nl> ] , <nl> dtype = dtype ) ) <nl> <nl> - atan2_supported = self . device = = " XLA_GPU " <nl> - if atan2_supported : <nl> - self . _testBinary ( <nl> - math_ops . pow , <nl> - dtype ( 3 + 2j ) , <nl> - dtype ( 4 - 5j ) , <nl> - expected = np . power ( dtype ( 3 + 2j ) , dtype ( 4 - 5j ) ) ) <nl> - self . _testBinary ( # empty rhs <nl> - math_ops . pow , <nl> - np . array ( [ 1 + 2j , 2 - 3j ] , dtype = dtype ) , <nl> - np . zeros ( shape = [ 0 , 2 ] , dtype = dtype ) , <nl> - expected = np . zeros ( shape = [ 0 , 2 ] , dtype = dtype ) ) <nl> - self . _testBinary ( # to zero power <nl> - math_ops . pow , <nl> - np . array ( [ 1 + 2j , 2 - 3j ] , dtype = dtype ) , <nl> - np . zeros ( shape = [ 1 , 2 ] , dtype = dtype ) , <nl> - expected = np . ones ( shape = [ 1 , 2 ] , dtype = dtype ) ) <nl> - lhs = np . array ( [ 1 - 2j , 4 + 3j , 2 - 3j , 3 , 2j , 1 , 4 ] , dtype = dtype ) <nl> - rhs = np . array ( [ 2 , 3j , 3 + 4j , 2 + 3j , 3 - 2j , 2 , 3 + 3j ] , dtype = dtype ) <nl> - scalar = dtype ( 2 + 2j ) <nl> - self . _testBinary ( math_ops . pow , lhs , rhs , expected = np . power ( lhs , rhs ) ) <nl> - self . _testBinary ( <nl> - math_ops . pow , scalar , rhs , expected = np . power ( scalar , rhs ) ) <nl> - self . _testBinary ( math_ops . pow , lhs , scalar , np . power ( lhs , scalar ) ) <nl> + self . _testBinary ( <nl> + math_ops . pow , <nl> + dtype ( 3 + 2j ) , <nl> + dtype ( 4 - 5j ) , <nl> + expected = np . power ( dtype ( 3 + 2j ) , dtype ( 4 - 5j ) ) ) <nl> + self . _testBinary ( # empty rhs <nl> + math_ops . pow , <nl> + np . array ( [ 1 + 2j , 2 - 3j ] , dtype = dtype ) , <nl> + np . zeros ( shape = [ 0 , 2 ] , dtype = dtype ) , <nl> + expected = np . zeros ( shape = [ 0 , 2 ] , dtype = dtype ) ) <nl> + self . _testBinary ( # to zero power <nl> + math_ops . pow , <nl> + np . array ( [ 1 + 2j , 2 - 3j ] , dtype = dtype ) , <nl> + np . zeros ( shape = [ 1 , 2 ] , dtype = dtype ) , <nl> + expected = np . ones ( shape = [ 1 , 2 ] , dtype = dtype ) ) <nl> + lhs = np . array ( [ 1 - 2j , 4 + 3j , 2 - 3j , 3 , 2j , 1 , 4 ] , dtype = dtype ) <nl> + rhs = np . array ( [ 2 , 3j , 3 + 4j , 2 + 3j , 3 - 2j , 2 , 3 + 3j ] , dtype = dtype ) <nl> + scalar = dtype ( 2 + 2j ) <nl> + self . _testBinary ( math_ops . pow , lhs , rhs , expected = np . power ( lhs , rhs ) ) <nl> + self . _testBinary ( <nl> + math_ops . pow , scalar , rhs , expected = np . power ( scalar , rhs ) ) <nl> + self . _testBinary ( math_ops . pow , lhs , scalar , np . power ( lhs , scalar ) ) <nl> <nl> lhs = np . array ( [ 4 + 2j , - 3 - 1j , 2j , 1 ] , dtype = dtype ) <nl> rhs = np . array ( [ 5 , - 6j , 7 - 3j , - 8j ] , dtype = dtype ) <nl> def testComplexOps ( self ) : <nl> self . _testBinary ( <nl> gen_math_ops . _sigmoid_grad , lhs , rhs , expected = rhs * lhs * ( 1 - lhs ) ) <nl> <nl> - if atan2_supported : <nl> - self . _testBinary ( <nl> - gen_math_ops . _rsqrt_grad , lhs , rhs , expected = lhs * * 3 * rhs / - 2 ) <nl> + self . _testBinary ( <nl> + gen_math_ops . _rsqrt_grad , lhs , rhs , expected = lhs * * 3 * rhs / - 2 ) <nl> <nl> self . _testBinary ( <nl> gen_math_ops . _sqrt_grad , lhs , rhs , expected = rhs / ( 2 * lhs ) ) <nl> mmm a / tensorflow / compiler / tests / unary_ops_test . py <nl> ppp b / tensorflow / compiler / tests / unary_ops_test . py <nl> def testFloatOps ( self ) : <nl> def testComplexOps ( self ) : <nl> for dtype in self . complex_types : <nl> <nl> - # TODO ( b / 65408531 ) : Wider support for log ( needs atan2 ) . <nl> - atan2_supported = self . device = = " XLA_GPU " <nl> - if atan2_supported : <nl> - self . _assertOpOutputMatchesExpected ( <nl> - math_ops . acosh , <nl> - np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) , <nl> - expected = np . arccosh ( <nl> - np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . acosh , <nl> + np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) , <nl> + expected = np . arccosh ( <nl> + np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) ) ) <nl> <nl> - self . _assertOpOutputMatchesExpected ( <nl> - math_ops . asinh , <nl> - np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) , <nl> - expected = np . arcsinh ( <nl> - np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . asinh , <nl> + np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) , <nl> + expected = np . arcsinh ( <nl> + np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) ) ) <nl> <nl> - self . _assertOpOutputMatchesExpected ( <nl> - math_ops . atanh , <nl> - np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) , <nl> - expected = np . arctanh ( <nl> - np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . atanh , <nl> + np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) , <nl> + expected = np . arctanh ( <nl> + np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) ) ) <nl> <nl> self . _assertOpOutputMatchesExpected ( <nl> math_ops . cosh , <nl> def testComplexOps ( self ) : <nl> np . array ( [ [ 1 , 2j , 2 + 3j ] ] , dtype = dtype ) , <nl> expected = 1 . 0 / np . array ( [ [ 1 , 2j , 2 + 3j ] ] , dtype = dtype ) ) <nl> <nl> - if atan2_supported : <nl> - self . _assertOpOutputMatchesExpected ( <nl> - math_ops . log , <nl> - np . array ( [ [ 5j , 3 - 2j ] ] , dtype = dtype ) , <nl> - expected = np . log ( np . array ( [ [ 5j , 3 - 2j ] ] , dtype = dtype ) ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . log , <nl> + np . array ( [ [ 5j , 3 - 2j ] ] , dtype = dtype ) , <nl> + expected = np . log ( np . array ( [ [ 5j , 3 - 2j ] ] , dtype = dtype ) ) ) <nl> <nl> self . _assertOpOutputMatchesExpected ( <nl> math_ops . sin , <nl> def testComplexOps ( self ) : <nl> <nl> # TODO ( b / 34703906 ) : improve log1p implementation and make tolerance <nl> # tighter . <nl> - if atan2_supported : # TODO ( b / 34703906 ) : log support <nl> - self . _assertOpOutputMatchesExpected ( <nl> - math_ops . log1p , <nl> - np . array ( [ [ 1e - 14 , 1e - 15j , 0 . 6 - 0 . 3j ] ] , dtype = dtype ) , <nl> - expected = np . log1p ( <nl> - np . array ( [ [ 1e - 14 , 1e - 15j , 0 . 6 - 0 . 3j ] ] , dtype = dtype ) ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . log1p , <nl> + np . array ( [ [ 1e - 14 , 1e - 15j , 0 . 6 - 0 . 3j ] ] , dtype = dtype ) , <nl> + expected = np . log1p ( <nl> + np . array ( [ [ 1e - 14 , 1e - 15j , 0 . 6 - 0 . 3j ] ] , dtype = dtype ) ) ) <nl> <nl> - val = np . array ( [ 1 , 2j , 2 - 3j , 4 + 5j ] , dtype = dtype ) <nl> - self . _assertOpOutputMatchesExpected ( <nl> - math_ops . rsqrt , val , expected = 1 / np . sqrt ( val ) ) <nl> + val = np . array ( [ 1 , 2j , 2 - 3j , 4 + 5j ] , dtype = dtype ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . rsqrt , val , expected = 1 / np . sqrt ( val ) ) <nl> <nl> - self . _assertOpOutputMatchesExpected ( <nl> - math_ops . sigmoid , val , expected = 1 / ( 1 + np . exp ( - val ) ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . sigmoid , val , expected = 1 / ( 1 + np . exp ( - val ) ) ) <nl> <nl> - self . _assertOpOutputMatchesExpected ( <nl> - math_ops . sqrt , val , expected = np . sqrt ( val ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . sqrt , val , expected = np . sqrt ( val ) ) <nl> <nl> - self . _assertOpOutputMatchesExpected ( <nl> - math_ops . tanh , <nl> - np . array ( [ 1 , 2j , 2 - 3j , 4 + 5j ] , dtype = dtype ) , <nl> - expected = np . tanh ( np . array ( [ 1 , 2j , 2 - 3j , 4 + 5j ] , dtype = dtype ) ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . tanh , <nl> + np . array ( [ 1 , 2j , 2 - 3j , 4 + 5j ] , dtype = dtype ) , <nl> + expected = np . tanh ( np . array ( [ 1 , 2j , 2 - 3j , 4 + 5j ] , dtype = dtype ) ) ) <nl> <nl> self . _assertOpOutputMatchesExpected ( <nl> math_ops . tan , <nl> def testComplexOps ( self ) : <nl> np . array ( [ [ - 4j , 3 + 2j ] , [ 2 , - 1j ] ] , dtype = dtype ) , <nl> expected = np . array ( [ [ 1 , 1 ] , [ 1 , 1 ] ] , dtype = dtype ) ) <nl> <nl> - if atan2_supported : # TODO ( b / 34703906 ) : atan2 support <nl> - self . _assertOpOutputMatchesExpected ( <nl> - math_ops . angle , <nl> - np . array ( [ 1 + 3j , - 4 + 7j , 2 . 7 , - 3j ] , dtype = dtype ) , <nl> - expected = np . angle ( <nl> - np . array ( [ 1 + 3j , - 4 + 7j , 2 . 7 , - 3j ] , dtype = dtype ) ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . angle , <nl> + np . array ( [ 1 + 3j , - 4 + 7j , 2 . 7 , - 3j ] , dtype = dtype ) , <nl> + expected = np . angle ( np . array ( [ 1 + 3j , - 4 + 7j , 2 . 7 , - 3j ] , dtype = dtype ) ) ) <nl> <nl> self . _assertOpOutputMatchesExpected ( <nl> math_ops . conj , <nl> mmm a / tensorflow / compiler / tf2xla / xla_compiler . cc <nl> ppp b / tensorflow / compiler / tf2xla / xla_compiler . cc <nl> Status BuildComputation ( <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - void AssignMajorToMinorLayout ( xla : : Shape * shape ) { <nl> - if ( xla : : ShapeUtil : : IsTuple ( * shape ) ) { <nl> - for ( xla : : Shape & elem_shape : * shape - > mutable_tuple_shapes ( ) ) { <nl> - AssignMajorToMinorLayout ( & elem_shape ) ; <nl> - } <nl> - } else { <nl> - auto & minor_to_major = * shape - > mutable_layout ( ) - > mutable_minor_to_major ( ) ; <nl> - minor_to_major . Resize ( xla : : ShapeUtil : : Rank ( * shape ) , 0 ) ; <nl> - std : : iota ( minor_to_major . rbegin ( ) , minor_to_major . rend ( ) , 0 ) ; <nl> - } <nl> - } <nl> - <nl> } / / namespace <nl> <nl> Status XlaCompiler : : CompileGraph ( const XlaCompiler : : CompileOptions & options , <nl> Status XlaCompiler : : CompileGraph ( const XlaCompiler : : CompileOptions & options , <nl> < < xla : : ShapeUtil : : HumanString ( result - > xla_output_shape ) ; <nl> <nl> / / Tensorflow expects a major - to - minor order of results . <nl> - AssignMajorToMinorLayout ( & result - > xla_output_shape ) ; <nl> + xla : : LayoutUtil : : SetToDefaultLayout ( & result - > xla_output_shape ) ; <nl> <nl> / / Converts the output shapes to TensorShapes . <nl> int computation_output = 0 ; <nl> mmm a / tensorflow / compiler / xla / index_util . cc <nl> ppp b / tensorflow / compiler / xla / index_util . cc <nl> namespace xla { <nl> int64 scale = 1 ; <nl> int64 linear_index = 0 ; <nl> bool first = true ; <nl> - for ( auto dimension : shape . layout ( ) . minor_to_major ( ) ) { <nl> + for ( auto dimension : LayoutUtil : : MinorToMajor ( shape ) ) { <nl> if ( first ) { <nl> / / Avoid two multiplies on the first loop iteration <nl> linear_index = multi_index [ dimension ] ; <nl> namespace xla { <nl> <nl> / / Accumulated product D { L ( 0 ) } * D { L ( 1 ) } * . . . <nl> int64 divisor = 1 ; <nl> - for ( auto dimension : shape . layout ( ) . minor_to_major ( ) ) { <nl> + for ( auto dimension : LayoutUtil : : MinorToMajor ( shape ) ) { <nl> multi_index [ dimension ] = <nl> ( linear_index / divisor ) % shape . dimensions ( dimension ) ; <nl> divisor * = shape . dimensions ( dimension ) ; <nl> namespace xla { <nl> <nl> / * static * / int64 IndexUtil : : GetDimensionStride ( const Shape & shape , <nl> int64 dimension ) { <nl> - const Layout & layout = shape . layout ( ) ; <nl> - int64 pdim_size = layout . padded_dimensions_size ( ) ; <nl> + int64 pdim_size = LayoutUtil : : PaddedDimensions ( shape ) . size ( ) ; <nl> int64 stride = 1 ; <nl> DCHECK ( pdim_size = = 0 | | pdim_size = = shape . dimensions_size ( ) ) ; <nl> - for ( auto dim : layout . minor_to_major ( ) ) { <nl> + for ( auto dim : LayoutUtil : : MinorToMajor ( shape ) ) { <nl> if ( dim = = dimension ) { <nl> break ; <nl> } <nl> if ( pdim_size = = 0 ) { <nl> stride * = shape . dimensions ( dim ) ; <nl> } else { <nl> - stride * = layout . padded_dimensions ( dim ) ; <nl> + stride * = LayoutUtil : : PaddedDimension ( shape , dim ) ; <nl> } <nl> } <nl> return stride ; <nl> mmm a / tensorflow / compiler / xla / layout_util . cc <nl> ppp b / tensorflow / compiler / xla / layout_util . cc <nl> void SetDefaultLayoutToContainer ( <nl> / * static * / Layout LayoutUtil : : MakeLayout ( <nl> tensorflow : : gtl : : ArraySlice < int64 > minor_to_major ) { <nl> Layout layout ; <nl> + layout . set_format ( DENSE ) ; <nl> for ( int64 dimension_number : minor_to_major ) { <nl> layout . add_minor_to_major ( dimension_number ) ; <nl> } <nl> namespace { <nl> / / Internal helper that creates a default layout for an array of the given rank . <nl> Layout CreateDefaultLayoutForRank ( int64 rank ) { <nl> Layout layout ; <nl> + layout . set_format ( DENSE ) ; <nl> tensorflow : : protobuf : : RepeatedField < tensorflow : : protobuf_int64 > * <nl> minor_to_major = layout . mutable_minor_to_major ( ) ; <nl> minor_to_major - > Resize ( rank , 0 ) ; <nl> Layout CreateDefaultLayoutForRank ( int64 rank ) { <nl> for ( auto & element_shape : * shape - > mutable_tuple_shapes ( ) ) { <nl> SetToDefaultLayout ( & element_shape ) ; <nl> } <nl> + shape - > clear_layout ( ) ; <nl> + } else if ( ShapeUtil : : IsOpaque ( * shape ) ) { <nl> + shape - > clear_layout ( ) ; <nl> } else { <nl> + shape - > mutable_layout ( ) - > set_format ( DENSE ) ; <nl> tensorflow : : protobuf : : RepeatedField < tensorflow : : protobuf_int64 > * <nl> minor_to_major = shape - > mutable_layout ( ) - > mutable_minor_to_major ( ) ; <nl> minor_to_major - > Resize ( shape - > dimensions_size ( ) , 0 ) ; <nl> Layout CreateDefaultLayoutForRank ( int64 rank ) { <nl> TF_RETURN_IF_ERROR ( ValidateLayoutInShape ( element_shape ) ) ; <nl> } <nl> return tensorflow : : Status : : OK ( ) ; <nl> - } else if ( ShapeUtil : : Rank ( shape ) = = 0 & & ! shape . has_layout ( ) ) { <nl> - / / A scalar without a layout is ok . <nl> + } else if ( ShapeUtil : : IsOpaque ( shape ) ) { <nl> + if ( shape . has_layout ( ) ) { <nl> + return InvalidArgument ( " opaque should not have a layout field " ) ; <nl> + } <nl> return tensorflow : : Status : : OK ( ) ; <nl> } else { <nl> / / Array shape . <nl> Layout CreateDefaultLayoutForRank ( int64 rank ) { <nl> return InvalidArgument ( " a single Layout is not valid for tuple shapes " ) ; <nl> } <nl> <nl> - if ( layout . minor_to_major_size ( ) ! = ShapeUtil : : Rank ( shape ) ) { <nl> + if ( ShapeUtil : : IsOpaque ( shape ) ) { <nl> + return tensorflow : : Status : : OK ( ) ; <nl> + } <nl> + <nl> + if ( layout . format ( ) = = INVALID_FORMAT ) { <nl> return InvalidArgument ( <nl> - " layout minor_to_major field contains % d elements , " <nl> - " but shape is rank % lld : { % s } ; shape : % s " , <nl> - layout . minor_to_major_size ( ) , ShapeUtil : : Rank ( shape ) , <nl> - tensorflow : : str_util : : Join ( layout . minor_to_major ( ) , " , " ) . c_str ( ) , <nl> - shape . ShortDebugString ( ) . c_str ( ) ) ; <nl> + " Layout does not have a valid format : layout { % s } , shape { % s } " , <nl> + layout . ShortDebugString ( ) . c_str ( ) , shape . ShortDebugString ( ) . c_str ( ) ) ; <nl> } <nl> <nl> - std : : vector < bool > dimensions_in_layout ( ShapeUtil : : Rank ( shape ) , false ) ; <nl> - for ( int64 i = 0 ; i < ShapeUtil : : Rank ( shape ) ; + + i ) { <nl> - int64 dim = layout . minor_to_major ( i ) ; <nl> - if ( dim < 0 | | dim > = ShapeUtil : : Rank ( shape ) ) { <nl> + if ( layout . format ( ) = = DENSE ) { <nl> + if ( layout . minor_to_major_size ( ) ! = ShapeUtil : : Rank ( shape ) ) { <nl> return InvalidArgument ( <nl> - " layout minor_to_major field has out - of - bounds value : % s " , <nl> - HumanString ( layout ) . c_str ( ) ) ; <nl> + " layout minor_to_major field contains % d elements , " <nl> + " but shape is rank % lld : { % s } ; shape : % s " , <nl> + layout . minor_to_major_size ( ) , ShapeUtil : : Rank ( shape ) , <nl> + tensorflow : : str_util : : Join ( layout . minor_to_major ( ) , " , " ) . c_str ( ) , <nl> + shape . ShortDebugString ( ) . c_str ( ) ) ; <nl> } <nl> - if ( dimensions_in_layout [ dim ] ) { <nl> - return InvalidArgument ( <nl> - " layout minor_to_major field has duplicate values : { % s } " , <nl> - HumanString ( layout ) . c_str ( ) ) ; <nl> - } <nl> - dimensions_in_layout [ dim ] = true ; <nl> - } <nl> <nl> - if ( layout . padded_dimensions_size ( ) > 0 ) { <nl> - if ( layout . padded_dimensions_size ( ) ! = ShapeUtil : : Rank ( shape ) ) { <nl> - return InvalidArgument ( <nl> - " layout has % d padded dimensions , but shape is rank % lld " , <nl> - layout . padded_dimensions_size ( ) , ShapeUtil : : Rank ( shape ) ) ; <nl> + std : : vector < bool > dimensions_in_layout ( ShapeUtil : : Rank ( shape ) , false ) ; <nl> + for ( int64 i = 0 ; i < ShapeUtil : : Rank ( shape ) ; + + i ) { <nl> + int64 dim = layout . minor_to_major ( i ) ; <nl> + if ( dim < 0 | | dim > = ShapeUtil : : Rank ( shape ) ) { <nl> + return InvalidArgument ( <nl> + " layout minor_to_major field has out - of - bounds value : % s " , <nl> + HumanString ( layout ) . c_str ( ) ) ; <nl> + } <nl> + if ( dimensions_in_layout [ dim ] ) { <nl> + return InvalidArgument ( <nl> + " layout minor_to_major field has duplicate values : { % s } " , <nl> + HumanString ( layout ) . c_str ( ) ) ; <nl> + } <nl> + dimensions_in_layout [ dim ] = true ; <nl> } <nl> - for ( int i = 0 ; i < layout . padded_dimensions_size ( ) ; + + i ) { <nl> - if ( layout . padded_dimensions ( i ) < shape . dimensions ( i ) ) { <nl> + <nl> + if ( layout . padded_dimensions_size ( ) > 0 ) { <nl> + if ( layout . padded_dimensions_size ( ) ! = ShapeUtil : : Rank ( shape ) ) { <nl> return InvalidArgument ( <nl> - " for dimension % d , dimension padding ( % lld ) is smaller than " <nl> - " the dimension size ( % lld ) of the shape " , <nl> - i , layout . padded_dimensions ( i ) , shape . dimensions ( i ) ) ; <nl> + " layout has % d padded dimensions , but shape is rank % lld " , <nl> + layout . padded_dimensions_size ( ) , ShapeUtil : : Rank ( shape ) ) ; <nl> + } <nl> + for ( int i = 0 ; i < layout . padded_dimensions_size ( ) ; + + i ) { <nl> + if ( layout . padded_dimensions ( i ) < shape . dimensions ( i ) ) { <nl> + return InvalidArgument ( <nl> + " for dimension % d , dimension padding ( % lld ) is smaller than " <nl> + " the dimension size ( % lld ) of the shape " , <nl> + i , layout . padded_dimensions ( i ) , shape . dimensions ( i ) ) ; <nl> + } <nl> } <nl> } <nl> } <nl> + <nl> return tensorflow : : Status : : OK ( ) ; <nl> } <nl> <nl> Layout CreateDefaultLayoutForRank ( int64 rank ) { <nl> LayoutUtil : : ClearLayout ( program_shape - > mutable_result ( ) ) ; <nl> } <nl> <nl> + / * static * / bool LayoutUtil : : IsDense ( const Shape & shape ) { <nl> + return ShapeUtil : : IsArray ( shape ) & & shape . has_layout ( ) & & <nl> + IsDense ( shape . layout ( ) ) ; <nl> + } <nl> + <nl> + / * static * / bool LayoutUtil : : IsDense ( const Layout & layout ) { <nl> + return layout . format ( ) = = DENSE ; <nl> + } <nl> + <nl> / * static * / bool LayoutUtil : : IsMonotonicWithDim0Minor ( const Layout & layout ) { <nl> + CHECK ( layout . format ( ) = = DENSE ) ; <nl> return std : : is_sorted ( layout . minor_to_major ( ) . begin ( ) , <nl> layout . minor_to_major ( ) . end ( ) ) ; <nl> } <nl> <nl> / * static * / bool LayoutUtil : : IsMonotonicWithDim0Major ( const Layout & layout ) { <nl> + CHECK ( layout . format ( ) = = DENSE ) ; <nl> return std : : is_sorted ( layout . minor_to_major ( ) . begin ( ) , <nl> layout . minor_to_major ( ) . end ( ) , std : : greater < int64 > ( ) ) ; <nl> } <nl> Layout CreateDefaultLayoutForRank ( int64 rank ) { <nl> shape . layout ( ) . padded_dimensions_size ( ) = = 0 ) { <nl> return false ; <nl> } <nl> + CHECK ( IsDense ( shape ) ) ; <nl> CHECK_EQ ( shape . dimensions_size ( ) , shape . layout ( ) . padded_dimensions_size ( ) ) ; <nl> for ( int64 i = 0 ; i < shape . dimensions_size ( ) ; + + i ) { <nl> if ( shape . layout ( ) . padded_dimensions ( i ) > shape . dimensions ( i ) ) { <nl> Layout CreateDefaultLayoutForRank ( int64 rank ) { <nl> return false ; <nl> } <nl> <nl> + / * static * / tensorflow : : gtl : : ArraySlice < const int64 > <nl> + LayoutUtil : : PaddedDimensions ( const Shape & shape ) { <nl> + CHECK ( IsDense ( shape ) ) ; <nl> + return AsInt64Slice ( shape . layout ( ) . padded_dimensions ( ) ) ; <nl> + } <nl> + <nl> + / * static * / int64 LayoutUtil : : PaddedDimension ( const Shape & shape , <nl> + int64 index ) { <nl> + CHECK ( IsDense ( shape ) ) ; <nl> + return shape . layout ( ) . padded_dimensions ( index ) ; <nl> + } <nl> + <nl> + / * static * / PaddingValue LayoutUtil : : GetPaddingValue ( const Shape & shape ) { <nl> + CHECK ( IsDense ( shape ) ) ; <nl> + return shape . layout ( ) . padding_value ( ) ; <nl> + } <nl> + <nl> / * static * / bool LayoutUtil : : HasLayout ( const Shape & shape ) { <nl> if ( ShapeUtil : : IsTuple ( shape ) ) { <nl> / / Tuple shape : all subshapes must have a layout . <nl> return std : : all_of ( shape . tuple_shapes ( ) . begin ( ) , shape . tuple_shapes ( ) . end ( ) , <nl> [ ] ( const Shape & s ) { return HasLayout ( s ) ; } ) ; <nl> + } else if ( ShapeUtil : : IsOpaque ( shape ) ) { <nl> + return true ; <nl> } <nl> - / / A scalar trivially always has a layout . <nl> - return ( ShapeUtil : : Rank ( shape ) = = 0 | | <nl> - ( shape . has_layout ( ) & & ( shape . layout ( ) . minor_to_major_size ( ) > 0 ) ) ) ; <nl> + return shape . has_layout ( ) & & shape . layout ( ) . format ( ) ! = INVALID_FORMAT ; <nl> } <nl> <nl> / * static * / bool LayoutUtil : : HasLayout ( const ProgramShape & program_shape ) { <nl> Layout CreateDefaultLayoutForRank ( int64 rank ) { <nl> return protobuf_util : : ProtobufEquals ( lhs , rhs ) ; <nl> } <nl> <nl> + / * static * / tensorflow : : gtl : : ArraySlice < int64 > LayoutUtil : : MinorToMajor ( <nl> + const Shape & shape ) { <nl> + CHECK ( IsDense ( shape ) ) ; <nl> + return AsInt64Slice ( shape . layout ( ) . minor_to_major ( ) ) ; <nl> + } <nl> + <nl> + / * static * / tensorflow : : gtl : : ArraySlice < int64 > LayoutUtil : : MinorToMajor ( <nl> + const Layout & layout ) { <nl> + CHECK ( layout . format ( ) = = DENSE ) ; <nl> + return AsInt64Slice ( layout . minor_to_major ( ) ) ; <nl> + } <nl> + <nl> / * static * / int64 LayoutUtil : : Major ( const Layout & layout , <nl> int64 physical_dimension_number ) { <nl> CHECK_LE ( 0 , physical_dimension_number ) ; <nl> Layout CreateDefaultLayoutForRank ( int64 rank ) { <nl> <nl> / * static * / int64 LayoutUtil : : Minor ( const Layout & layout , <nl> int64 physical_dimension_number ) { <nl> + CHECK_EQ ( layout . format ( ) , DENSE ) ; <nl> CHECK_LE ( 0 , physical_dimension_number ) ; <nl> CHECK_LT ( physical_dimension_number , layout . minor_to_major_size ( ) ) ; <nl> return layout . minor_to_major ( physical_dimension_number ) ; <nl> mmm a / tensorflow / compiler / xla / layout_util . h <nl> ppp b / tensorflow / compiler / xla / layout_util . h <nl> class LayoutUtil { <nl> / / Clears the layout on all Shapes within the given ProgramShape . <nl> static void ClearLayout ( ProgramShape * program_shape ) ; <nl> <nl> + / / Returns whether the given Shape is an array and has a dense format layout . <nl> + static bool IsDense ( const Shape & shape ) ; <nl> + <nl> + / / Returns whether the given Layout has a dense format . <nl> + static bool IsDense ( const Layout & layout ) ; <nl> + <nl> / / Returns whether the layout is monotonic and dim 0 is minor in the layout . <nl> / / * R0 and R1 : this is always trivially true . <nl> / / * R2 + : equivalent to column - major . Dimension 0 is the minor , dimension 1 is <nl> class LayoutUtil { <nl> / / dimension size ) . <nl> static bool IsPadded ( const Shape & shape ) ; <nl> <nl> + / / Returns the padded_dimensions array for the given Shape . Requires that the <nl> + / / shape is an array and has a dense layout . <nl> + static tensorflow : : gtl : : ArraySlice < const int64 > PaddedDimensions ( <nl> + const Shape & shape ) ; <nl> + <nl> + / / Returns the given index of the padded_dimensions array for the given Shape . <nl> + / / Requires that the shape is an array and has a dense layout . <nl> + static int64 PaddedDimension ( const Shape & shape , int64 index ) ; <nl> + <nl> + / / Returns the padding_value for the given Shape . Requires that the shape is <nl> + / / an array and has a dense layout . <nl> + static PaddingValue GetPaddingValue ( const Shape & shape ) ; <nl> + <nl> / / Returns whether the given shape has a layout . For tuple shapes , true is <nl> / / returned only if all elements have layouts . <nl> static bool HasLayout ( const Shape & shape ) ; <nl> class LayoutUtil { <nl> / / Returns whether lhs and rhs are identical . <nl> static bool Equal ( const Layout & lhs , const Layout & rhs ) ; <nl> <nl> + / / Returns the minor_to_major array for the given Shape . Requires that the <nl> + / / shape is an array and has a dense layout . <nl> + static tensorflow : : gtl : : ArraySlice < int64 > MinorToMajor ( const Shape & shape ) ; <nl> + static tensorflow : : gtl : : ArraySlice < int64 > MinorToMajor ( const Layout & layout ) ; <nl> + <nl> / / Major ( 0 ) is the most major logical dimension number , major ( 1 ) is the <nl> / / second - most - major logical dimension number and so on . <nl> / / <nl> mmm a / tensorflow / compiler / xla / literal_util . cc <nl> ppp b / tensorflow / compiler / xla / literal_util . cc <nl> Literal : : StrideConfig : : StrideConfig ( <nl> if ( ! dimensions . empty ( ) ) { <nl> / / Selects the shape with the largest minor dimension as the one upon <nl> / / which to run the tight stride loop . <nl> - if ( dimensions [ source_shape . layout ( ) . minor_to_major ( ) [ 0 ] ] > = <nl> - dimensions [ dest_shape . layout ( ) . minor_to_major ( ) [ 0 ] ] ) { <nl> - minor_dimension = source_shape . layout ( ) . minor_to_major ( ) [ 0 ] ; <nl> + if ( dimensions [ LayoutUtil : : Minor ( source_shape . layout ( ) , 0 ) ] > = <nl> + dimensions [ LayoutUtil : : Minor ( dest_shape . layout ( ) , 0 ) ] ) { <nl> + minor_dimension = LayoutUtil : : Minor ( source_shape . layout ( ) , 0 ) ; <nl> dest_stride = IndexUtil : : GetDimensionStride ( dest_shape , minor_dimension ) ; <nl> } else { <nl> - minor_dimension = dest_shape . layout ( ) . minor_to_major ( ) [ 0 ] ; <nl> + minor_dimension = LayoutUtil : : Minor ( dest_shape . layout ( ) , 0 ) ; <nl> source_stride = <nl> IndexUtil : : GetDimensionStride ( source_shape , minor_dimension ) ; <nl> } <nl> StatusOr < std : : unique_ptr < Literal > > Literal : : Reshape ( <nl> } <nl> std : : unique_ptr < Literal > output ; <nl> if ( ! LayoutUtil : : IsMonotonicWithDim0Major ( shape ( ) . layout ( ) ) ) { <nl> - std : : vector < int64 > minor_to_major ( ShapeUtil : : Rank ( shape ( ) ) ) ; <nl> - std : : iota ( minor_to_major . rbegin ( ) , minor_to_major . rend ( ) , <nl> - static_cast < int64 > ( 0 ) ) ; <nl> - output = Relayout ( LayoutUtil : : MakeLayout ( minor_to_major ) ) ; <nl> + output = <nl> + Relayout ( LayoutUtil : : GetDefaultLayoutForRank ( ShapeUtil : : Rank ( shape ( ) ) ) ) ; <nl> } else { <nl> output = CloneToUnique ( ) ; <nl> } <nl> std : : unique_ptr < Literal > Literal : : Transpose ( <nl> / / dimension has within the transposed array , a layout is affine if <nl> / / MinMaj ( Di ) = = TMinMaj ( T ( Di ) ) , with TMinMaj ( ) being the minor to major <nl> / / vector of the affine layout . <nl> + CHECK ( LayoutUtil : : IsDense ( permuted_shape ) ) ; <nl> Layout * layout = permuted_shape . mutable_layout ( ) ; <nl> layout - > clear_minor_to_major ( ) ; <nl> - for ( auto index : shape ( ) . layout ( ) . minor_to_major ( ) ) { <nl> + for ( auto index : LayoutUtil : : MinorToMajor ( shape ( ) ) ) { <nl> layout - > add_minor_to_major ( inverse_permutation [ index ] ) ; <nl> } <nl> std : : unique_ptr < Literal > new_literal = CreateFromShape ( permuted_shape ) ; <nl> std : : unique_ptr < Literal > Literal : : Slice ( <nl> CHECK_GT ( dimension , 0 ) ; <nl> result_dimensions . push_back ( dimension ) ; <nl> } <nl> - const auto result_shape = ShapeUtil : : MakeShapeWithLayout ( <nl> - shape ( ) . element_type ( ) , result_dimensions , <nl> - AsInt64Slice ( shape ( ) . layout ( ) . minor_to_major ( ) ) ) ; <nl> + const auto result_shape = <nl> + ShapeUtil : : MakeShapeWithLayout ( shape ( ) . element_type ( ) , result_dimensions , <nl> + LayoutUtil : : MinorToMajor ( shape ( ) ) ) ; <nl> <nl> auto result_literal = MakeUnique < Literal > ( ) ; <nl> * result_literal - > mutable_shape ( ) = result_shape ; <nl> mmm a / tensorflow / compiler / xla / literal_util . h <nl> ppp b / tensorflow / compiler / xla / literal_util . h <nl> void Literal : : PopulateR2WithLayout ( <nl> primitive_util : : NativeToPrimitiveType < NativeT > ( ) , <nl> { static_cast < int64 > ( values . size ( ) ) , <nl> static_cast < int64 > ( values . begin ( ) - > size ( ) ) } , <nl> - AsInt64Slice ( layout . minor_to_major ( ) ) ) ; <nl> + LayoutUtil : : MinorToMajor ( layout ) ) ; <nl> <nl> const int64 dim0_size = values . size ( ) ; <nl> const int64 dim1_size = values . begin ( ) - > size ( ) ; <nl> void Literal : : PopulateR2 ( <nl> template < typename NativeT > <nl> void Literal : : PopulateFromArrayWithLayout ( const Array < NativeT > & values , <nl> const Layout & layout ) { <nl> + CHECK_EQ ( layout . format ( ) , DENSE ) ; <nl> * mutable_shape ( ) = ShapeUtil : : MakeShapeWithLayout ( <nl> primitive_util : : NativeToPrimitiveType < NativeT > ( ) , values . dimensions ( ) , <nl> - AsInt64Slice ( layout . minor_to_major ( ) ) ) ; <nl> + LayoutUtil : : MinorToMajor ( layout ) ) ; <nl> Reserve ( values . num_elements ( ) ) ; <nl> values . Each ( [ this ] ( tensorflow : : gtl : : ArraySlice < int64 > indices , <nl> NativeT value ) { this - > Set ( indices , value ) ; } ) ; <nl> mmm a / tensorflow / compiler / xla / service / algebraic_simplifier . cc <nl> ppp b / tensorflow / compiler / xla / service / algebraic_simplifier . cc <nl> Status AlgebraicSimplifierVisitor : : HandlePower ( HloInstruction * power ) { <nl> power , HloInstruction : : CreateBinary ( power - > shape ( ) , HloOpcode : : kDivide , <nl> broadcast_one , lhs ) ) ; <nl> } <nl> + <nl> + VLOG ( 10 ) < < " trying transform [ pow ( pow ( A , X ) , Y ) = > pow ( A , X * Y ) ] : " <nl> + < < power - > ToString ( ) ; <nl> + <nl> + / / Don ' t perform this optimization if either of the exponents is complex ; this <nl> + / / identity is true only for real - valued exponents . In addition , we cowardly <nl> + / / refuse to do this transformation if the two expontents have different <nl> + / / element types . <nl> + if ( lhs - > opcode ( ) = = HloOpcode : : kPower & & <nl> + ! ShapeUtil : : ElementIsComplex ( lhs - > operand ( 1 ) - > shape ( ) ) & & <nl> + ! ShapeUtil : : ElementIsComplex ( rhs - > shape ( ) ) & & <nl> + ShapeUtil : : SameElementType ( lhs - > operand ( 1 ) - > shape ( ) , rhs - > shape ( ) ) ) { <nl> + auto exponent_product = <nl> + computation_ - > AddInstruction ( HloInstruction : : CreateBinary ( <nl> + rhs - > shape ( ) , HloOpcode : : kMultiply , lhs - > mutable_operand ( 1 ) , rhs ) ) ; <nl> + return ReplaceWithNewInstruction ( <nl> + power , HloInstruction : : CreateBinary ( power - > shape ( ) , HloOpcode : : kPower , <nl> + lhs - > mutable_operand ( 0 ) , <nl> + exponent_product ) ) ; <nl> + } <nl> + <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> StatusOr < bool > AlgebraicSimplifierVisitor : : <nl> ShapeUtil : : MakeShapeWithLayout ( <nl> user - > shape ( ) . element_type ( ) , <nl> AsInt64Slice ( operand - > shape ( ) . dimensions ( ) ) , <nl> - AsInt64Slice ( operand - > shape ( ) . layout ( ) . minor_to_major ( ) ) ) , <nl> + LayoutUtil : : MinorToMajor ( operand - > shape ( ) ) ) , <nl> new_user_operands ) ) ; <nl> VLOG ( 4 ) < < " new user : " < < new_user - > ToString ( ) ; <nl> HloInstruction * new_reshape_or_broadcast = nullptr ; <nl> StatusOr < bool > AlgebraicSimplifierVisitor : : <nl> ShapeUtil : : MakeShapeWithLayout ( <nl> user - > shape ( ) . element_type ( ) , <nl> AsInt64Slice ( reshape_or_broadcast - > shape ( ) . dimensions ( ) ) , <nl> - AsInt64Slice ( <nl> - reshape_or_broadcast - > shape ( ) . layout ( ) . minor_to_major ( ) ) ) , <nl> + LayoutUtil : : MinorToMajor ( reshape_or_broadcast - > shape ( ) ) ) , <nl> new_user ) ) ; <nl> } else { <nl> TF_RET_CHECK ( reshape_or_broadcast - > opcode ( ) = = HloOpcode : : kBroadcast ) ; <nl> StatusOr < bool > AlgebraicSimplifierVisitor : : <nl> ShapeUtil : : MakeShapeWithLayout ( <nl> user - > shape ( ) . element_type ( ) , <nl> AsInt64Slice ( reshape_or_broadcast - > shape ( ) . dimensions ( ) ) , <nl> - AsInt64Slice ( <nl> - reshape_or_broadcast - > shape ( ) . layout ( ) . minor_to_major ( ) ) ) , <nl> + LayoutUtil : : MinorToMajor ( reshape_or_broadcast - > shape ( ) ) ) , <nl> new_user , reshape_or_broadcast - > dimensions ( ) ) ) ; <nl> } <nl> VLOG ( 4 ) < < " new reshape / broadcast : " <nl> Status AlgebraicSimplifierVisitor : : HandleConvolution ( <nl> / / still convert Conv into more efficient Matmul with operand transposition <nl> / / ( such as the transposition flags in cuBLAS SGEMM ) . <nl> if ( ! LayoutUtil : : Equal ( input_shape . layout ( ) , convolution_shape . layout ( ) ) | | <nl> - input_shape . layout ( ) . minor_to_major ( 0 ) ! = <nl> + LayoutUtil : : Minor ( input_shape . layout ( ) , 0 ) ! = <nl> dnums . input_feature_dimension ( ) | | <nl> - convolution_shape . layout ( ) . minor_to_major ( 0 ) ! = <nl> + LayoutUtil : : Minor ( convolution_shape . layout ( ) , 0 ) ! = <nl> dnums . output_feature_dimension ( ) | | <nl> / / The input feature dimension should come later in the minor - to - major <nl> / / order . <nl> - ( PositionInContainer ( filter_shape . layout ( ) . minor_to_major ( ) , <nl> + ( PositionInContainer ( LayoutUtil : : MinorToMajor ( filter_shape ) , <nl> dnums . kernel_input_feature_dimension ( ) ) < <nl> - PositionInContainer ( filter_shape . layout ( ) . minor_to_major ( ) , <nl> + PositionInContainer ( LayoutUtil : : MinorToMajor ( filter_shape ) , <nl> dnums . kernel_output_feature_dimension ( ) ) ) ) { <nl> return Status : : OK ( ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / algebraic_simplifier_test . cc <nl> ppp b / tensorflow / compiler / xla / service / algebraic_simplifier_test . cc <nl> TEST_F ( AlgebraicSimplifierTest , DivOfBroadcastingPower ) { <nl> EXPECT_EQ ( 0 , negate_shape . dimensions_size ( ) ) ; <nl> } <nl> <nl> + / / pow ( pow ( A , X ) , Y ) = > pow ( A , X * Y ) <nl> + TEST_F ( AlgebraicSimplifierTest , PowerOfPower ) { <nl> + Shape r0f32 = ShapeUtil : : MakeShape ( F32 , { } ) ; <nl> + Shape r1f32 = ShapeUtil : : MakeShape ( F32 , { 7 } ) ; <nl> + HloComputation : : Builder builder ( TestName ( ) ) ; <nl> + HloInstruction * base = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , r1f32 , " param0 " ) ) ; <nl> + HloInstruction * exp1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , r0f32 , " param1 " ) ) ; <nl> + HloInstruction * exp2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 2 , r0f32 , " param2 " ) ) ; <nl> + HloInstruction * inner_power = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( r1f32 , HloOpcode : : kPower , base , exp1 ) ) ; <nl> + builder . AddInstruction ( HloInstruction : : CreateBinary ( r1f32 , HloOpcode : : kPower , <nl> + inner_power , exp2 ) ) ; <nl> + <nl> + auto module = CreateNewModule ( ) ; <nl> + auto computation = module - > AddEntryComputation ( builder . Build ( ) ) ; <nl> + AlgebraicSimplifier simplifier ( / * is_layout_sensitive = * / false , <nl> + non_bitcasting_callback ( ) ) ; <nl> + ASSERT_TRUE ( simplifier . Run ( module . get ( ) ) . ValueOrDie ( ) ) ; <nl> + EXPECT_THAT ( computation - > root_instruction ( ) , <nl> + op : : Power ( base , op : : Multiply ( exp1 , exp2 ) ) ) ; <nl> + } <nl> + <nl> + / / Don ' t simplify pow ( pow ( A , X ) , Y ) = > pow ( A , X * Y ) if X and Y are complex <nl> + / / numbers . <nl> + TEST_F ( AlgebraicSimplifierTest , PowerOfPowerComplex ) { <nl> + Shape r0c64 = ShapeUtil : : MakeShape ( C64 , { } ) ; <nl> + Shape r1f32 = ShapeUtil : : MakeShape ( F32 , { 7 } ) ; <nl> + HloComputation : : Builder builder ( TestName ( ) ) ; <nl> + HloInstruction * base = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , r1f32 , " param0 " ) ) ; <nl> + HloInstruction * exp1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , r0c64 , " param1 " ) ) ; <nl> + HloInstruction * exp2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 2 , r0c64 , " param2 " ) ) ; <nl> + HloInstruction * inner_power = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( r1f32 , HloOpcode : : kPower , base , exp1 ) ) ; <nl> + builder . AddInstruction ( HloInstruction : : CreateBinary ( r1f32 , HloOpcode : : kPower , <nl> + inner_power , exp2 ) ) ; <nl> + <nl> + auto module = CreateNewModule ( ) ; <nl> + module - > AddEntryComputation ( builder . Build ( ) ) ; <nl> + AlgebraicSimplifier simplifier ( / * is_layout_sensitive = * / false , <nl> + non_bitcasting_callback ( ) ) ; <nl> + ASSERT_FALSE ( simplifier . Run ( module . get ( ) ) . ValueOrDie ( ) ) ; <nl> + } <nl> + <nl> / / Test that A / 1 is simplified to A for a scalar . <nl> TEST_F ( AlgebraicSimplifierTest , DivOneScalar ) { <nl> Shape r0f32 = ShapeUtil : : MakeShape ( F32 , { } ) ; <nl> mmm a / tensorflow / compiler / xla / service / cpu / dot_op_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / dot_op_emitter . cc <nl> DotOpEmitter : : MatMultDims DotOpEmitter : : GetMatMultDims ( ) const { <nl> return { lhs_shape . dimensions ( transpose_lhs_ ? 1 : 0 ) , <nl> lhs_shape . dimensions ( transpose_lhs_ ? 0 : 1 ) , <nl> rhs_shape . dimensions ( transpose_rhs_ ? 0 : 1 ) , <nl> - lhs_shape . layout ( ) . minor_to_major ( 0 ) = = 0 , <nl> - rhs_shape . layout ( ) . minor_to_major ( 0 ) = = 0 } ; <nl> + LayoutUtil : : Minor ( lhs_shape . layout ( ) , 0 ) = = 0 , <nl> + LayoutUtil : : Minor ( rhs_shape . layout ( ) , 0 ) = = 0 } ; <nl> } <nl> <nl> llvm_ir : : IrArray : : Index DotOpEmitter : : EmitOperandArrayLoopNest ( <nl> llvm_ir : : IrArray : : Index DotOpEmitter : : EmitOperandArrayLoopNest ( <nl> / / reduction dimension . <nl> std : : vector < int64 > dimensions ; <nl> const Shape & shape = operand_array . GetShape ( ) ; <nl> - for ( int i = shape . layout ( ) . minor_to_major_size ( ) - 1 ; i > = 0 ; - - i ) { <nl> - int64 dimension = shape . layout ( ) . minor_to_major ( i ) ; <nl> + for ( int i = LayoutUtil : : MinorToMajor ( shape ) . size ( ) - 1 ; i > = 0 ; - - i ) { <nl> + int64 dimension = LayoutUtil : : Minor ( shape . layout ( ) , i ) ; <nl> if ( dimension ! = reduction_dimension ) { <nl> dimensions . push_back ( dimension ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / cpu / elemental_ir_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / elemental_ir_emitter . cc <nl> StatusOr < llvm : : Value * > CpuElementalIrEmitter : : EmitFloatUnaryOp ( <nl> default : <nl> return Unimplemented ( " tanh " ) ; <nl> } <nl> - / / Create function type for the function . <nl> - llvm : : FunctionType * function_type = llvm : : FunctionType : : get ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( element_type , module_ ) , <nl> - llvm_ir : : PrimitiveTypeToIrType ( element_type , module_ ) , <nl> - / * isVarArg = * / false ) ; <nl> / / Create function declaration for ' tanhf ' . <nl> llvm : : Function * function = <nl> llvm : : cast < llvm : : Function > ( module_ - > getOrInsertFunction ( <nl> - llvm_ir : : AsStringRef ( function_name ) , function_type ) ) ; <nl> + llvm_ir : : AsStringRef ( function_name ) , operand_value - > getType ( ) , <nl> + operand_value - > getType ( ) ) ) ; <nl> function - > setCallingConv ( llvm : : CallingConv : : C ) ; <nl> function - > setDoesNotThrow ( ) ; <nl> function - > setDoesNotAccessMemory ( ) ; <nl> StatusOr < llvm : : Value * > CpuElementalIrEmitter : : EmitFloatUnaryOp ( <nl> } <nl> } <nl> <nl> + StatusOr < llvm : : Value * > CpuElementalIrEmitter : : EmitAtan2 ( <nl> + PrimitiveType prim_type , llvm : : Value * lhs , llvm : : Value * rhs ) const { <nl> + string function_name ; <nl> + switch ( prim_type ) { <nl> + case F32 : <nl> + function_name = " atan2f " ; <nl> + break ; <nl> + case F64 : <nl> + function_name = " atan2 " ; <nl> + break ; <nl> + default : <nl> + return Unimplemented ( " atan2 " ) ; <nl> + } <nl> + / / Create function declaration for ' atan2 ' . <nl> + llvm : : Function * function = <nl> + llvm : : cast < llvm : : Function > ( module_ - > getOrInsertFunction ( <nl> + llvm_ir : : AsStringRef ( function_name ) , lhs - > getType ( ) , lhs - > getType ( ) , <nl> + rhs - > getType ( ) ) ) ; <nl> + function - > setCallingConv ( llvm : : CallingConv : : C ) ; <nl> + function - > setDoesNotThrow ( ) ; <nl> + function - > setDoesNotAccessMemory ( ) ; <nl> + / / Create instruction to call ' atan2 ' . <nl> + return ir_builder_ - > CreateCall ( function , { lhs , rhs } ) ; <nl> + } <nl> + <nl> llvm_ir : : ElementGenerator CpuElementalIrEmitter : : MakeElementGenerator ( <nl> const HloInstruction * hlo , <nl> const HloToElementGeneratorMap & operand_to_generator ) const { <nl> mmm a / tensorflow / compiler / xla / service / cpu / elemental_ir_emitter . h <nl> ppp b / tensorflow / compiler / xla / service / cpu / elemental_ir_emitter . h <nl> class CpuElementalIrEmitter : public ElementalIrEmitter { <nl> protected : <nl> StatusOr < llvm : : Value * > EmitFloatUnaryOp ( <nl> const HloInstruction * op , llvm : : Value * operand_value ) const override ; <nl> + StatusOr < llvm : : Value * > EmitAtan2 ( PrimitiveType prim_type , llvm : : Value * lhs , <nl> + llvm : : Value * rhs ) const override ; <nl> <nl> IrEmitter * ir_emitter_ ; <nl> } ; <nl> mmm a / tensorflow / compiler / xla / service / cpu / ir_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / ir_emitter . cc <nl> StatusOr < bool > IrEmitter : : EmitVectorizedReduce ( <nl> <nl> bool is_reduction_over_minor_dimension = <nl> std : : find ( dimensions . begin ( ) , dimensions . end ( ) , <nl> - arg - > shape ( ) . layout ( ) . minor_to_major ( 0 ) ) ! = dimensions . end ( ) ; <nl> + LayoutUtil : : Minor ( arg - > shape ( ) . layout ( ) , 0 ) ) ! = <nl> + dimensions . end ( ) ; <nl> <nl> unsigned element_alignment = tensorflow : : MathUtil : : GCD < unsigned > ( <nl> ShapeUtil : : ByteSizeOfPrimitiveType ( reduce - > shape ( ) . element_type ( ) ) , <nl> StatusOr < bool > IrEmitter : : EmitVectorizedReduce ( <nl> <nl> llvm_ir : : ForLoopNest loop_nest ( IrName ( reduce ) , & ir_builder_ ) ; <nl> llvm_ir : : IrArray : : Index array_index ( reduce - > shape ( ) . dimensions_size ( ) ) ; <nl> - for ( int i = reduce - > shape ( ) . layout ( ) . minor_to_major_size ( ) - 1 ; i > 0 ; - - i ) { <nl> - int64 dimension = reduce - > shape ( ) . layout ( ) . minor_to_major ( i ) ; <nl> + for ( int i = LayoutUtil : : MinorToMajor ( reduce - > shape ( ) ) . size ( ) - 1 ; i > 0 ; <nl> + - - i ) { <nl> + int64 dimension = LayoutUtil : : Minor ( reduce - > shape ( ) . layout ( ) , i ) ; <nl> int64 start_index = 0 ; <nl> int64 end_index = reduce - > shape ( ) . dimensions ( dimension ) ; <nl> std : : unique_ptr < llvm_ir : : ForLoop > loop = <nl> StatusOr < bool > IrEmitter : : EmitVectorizedReduce ( <nl> array_index [ dimension ] = loop - > GetIndVarValue ( ) ; <nl> } <nl> <nl> - int64 innermost_dimension = reduce - > shape ( ) . layout ( ) . minor_to_major ( 0 ) ; <nl> + int64 innermost_dimension = LayoutUtil : : Minor ( reduce - > shape ( ) . layout ( ) , 0 ) ; <nl> int64 innermost_dimension_size = <nl> reduce - > shape ( ) . dimensions ( innermost_dimension ) ; <nl> <nl> StatusOr < bool > IrEmitter : : EmitVectorizedReduce ( <nl> target_array ) ; <nl> <nl> if ( auto exit_terminator = loop - > GetExitBasicBlock ( ) - > getTerminator ( ) ) { <nl> - CHECK_GT ( reduce - > shape ( ) . layout ( ) . minor_to_major_size ( ) , 1 ) ; <nl> + CHECK_GT ( LayoutUtil : : MinorToMajor ( reduce - > shape ( ) ) . size ( ) , 1 ) ; <nl> ir_builder_ . SetInsertPoint ( exit_terminator ) ; <nl> } else { <nl> - CHECK_EQ ( reduce - > shape ( ) . layout ( ) . minor_to_major_size ( ) , 1 ) ; <nl> + CHECK_EQ ( LayoutUtil : : MinorToMajor ( reduce - > shape ( ) ) . size ( ) , 1 ) ; <nl> ir_builder_ . SetInsertPoint ( loop - > GetExitBasicBlock ( ) ) ; <nl> } <nl> } <nl> Status IrEmitter : : HandleSlice ( HloInstruction * slice ) { <nl> / / * Implement the memcpy within the innermost loop . <nl> <nl> tensorflow : : gtl : : FlatSet < int64 > inner_dims ; <nl> - for ( int64 dim : layout . minor_to_major ( ) ) { <nl> + for ( int64 dim : LayoutUtil : : MinorToMajor ( layout ) ) { <nl> if ( operand - > shape ( ) . dimensions ( dim ) ! = slice - > shape ( ) . dimensions ( dim ) ) { <nl> break ; <nl> } <nl> Status IrEmitter : : HandleSlice ( HloInstruction * slice ) { <nl> <nl> / / memcpy_dim is the innermost ( in terms of layout ) dimension for which the <nl> / / slice does * not * just copy all the elements along the dimension . <nl> - const int64 memcpy_dim = layout . minor_to_major ( inner_dims . size ( ) ) ; <nl> + const int64 memcpy_dim = LayoutUtil : : Minor ( layout , inner_dims . size ( ) ) ; <nl> <nl> const bool memcpy_is_contiguous = slice - > slice_strides ( memcpy_dim ) = = 1 ; <nl> / / The number of logical elements that can be copied in a single call <nl> StatusOr < bool > IrEmitter : : EmitFastConcatenate ( <nl> <nl> int64 concat_dim = concatenate - > dimensions ( 0 ) ; <nl> const Layout & output_layout = output_shape . layout ( ) ; <nl> + auto output_min2maj = LayoutUtil : : MinorToMajor ( output_layout ) ; <nl> auto concat_dim_layout_itr = <nl> - std : : find ( output_layout . minor_to_major ( ) . begin ( ) , <nl> - output_layout . minor_to_major ( ) . end ( ) , concat_dim ) ; <nl> + std : : find ( output_min2maj . begin ( ) , output_min2maj . end ( ) , concat_dim ) ; <nl> <nl> - std : : vector < int64 > inner_dims ( output_layout . minor_to_major ( ) . begin ( ) , <nl> - concat_dim_layout_itr ) ; <nl> + std : : vector < int64 > inner_dims ( output_min2maj . begin ( ) , concat_dim_layout_itr ) ; <nl> std : : vector < int64 > outer_dims ( std : : next ( concat_dim_layout_itr ) , <nl> - output_layout . minor_to_major ( ) . end ( ) ) ; <nl> + output_min2maj . end ( ) ) ; <nl> <nl> llvm : : Type * i8_ptr_type = ir_builder_ . getInt8PtrTy ( ) ; <nl> llvm : : Type * i8_type = ir_builder_ . getInt8Ty ( ) ; <nl> mmm a / tensorflow / compiler / xla / service / cpu / parallel_loop_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / parallel_loop_emitter . cc <nl> llvm_ir : : IrArray : : Index ParallelLoopEmitter : : EmitIndexAndSetExitBasicBlock ( <nl> llvm_ir : : IrArray : : Index array_index ( num_dims ) ; <nl> <nl> / / Add loops from outer - most to inner - most dimensions . <nl> - for ( int i = shape_ . layout ( ) . minor_to_major_size ( ) - 1 ; i > = 0 ; - - i ) { <nl> - const int64 dimension = shape_ . layout ( ) . minor_to_major ( i ) ; <nl> + for ( int i = LayoutUtil : : MinorToMajor ( shape_ ) . size ( ) - 1 ; i > = 0 ; - - i ) { <nl> + const int64 dimension = LayoutUtil : : Minor ( shape_ . layout ( ) , i ) ; <nl> const int bounds_index = num_dims - 1 - i ; <nl> if ( bounds_index < dynamic_loop_bounds_ - > size ( ) ) { <nl> / / Emit dynamic loop bounds for this dimension . Dynamic loop bounds <nl> mmm a / tensorflow / compiler / xla / service / elemental_ir_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / elemental_ir_emitter . cc <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitFloatUnaryOp ( <nl> primitive_util : : BitWidth ( to_type ) ) ; <nl> } <nl> case HloOpcode : : kExp : <nl> - return llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : exp , { operand_value } , <nl> - { operand_value - > getType ( ) } , <nl> - ir_builder_ ) ; <nl> + return EmitExp ( op - > shape ( ) . element_type ( ) , operand_value ) ; <nl> case HloOpcode : : kLog : <nl> - return llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : log , { operand_value } , <nl> - { operand_value - > getType ( ) } , <nl> - ir_builder_ ) ; <nl> + return EmitLog ( op - > shape ( ) . element_type ( ) , operand_value ) ; <nl> case HloOpcode : : kCos : <nl> - return llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : cos , { operand_value } , <nl> - { operand_value - > getType ( ) } , <nl> - ir_builder_ ) ; <nl> + return EmitCos ( op - > shape ( ) . element_type ( ) , operand_value ) ; <nl> case HloOpcode : : kSin : <nl> - return llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : sin , { operand_value } , <nl> - { operand_value - > getType ( ) } , <nl> - ir_builder_ ) ; <nl> + return EmitSin ( op - > shape ( ) . element_type ( ) , operand_value ) ; <nl> case HloOpcode : : kFloor : <nl> return llvm_ir : : EmitCallToIntrinsic ( <nl> llvm : : Intrinsic : : floor , { operand_value } , { operand_value - > getType ( ) } , <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitFloatUnaryOp ( <nl> <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitComplexUnaryOp ( <nl> const HloInstruction * op , llvm : : Value * operand_value ) const { <nl> + PrimitiveType input_type = op - > operand ( 0 ) - > shape ( ) . element_type ( ) ; <nl> + PrimitiveType component_type = <nl> + primitive_util : : IsComplexType ( input_type ) <nl> + ? primitive_util : : ComplexComponentType ( input_type ) <nl> + : input_type ; <nl> switch ( op - > opcode ( ) ) { <nl> - / / TODO ( b / 65209142 ) : Angle / Log require atan2 . <nl> - / / case HloOpcode : : kLog : / / log ( a + bi ) = . 5 * log ( a ^ 2 + b ^ 2 ) + i * atan2 ( b , a ) <nl> + case HloOpcode : : kLog : { <nl> + / / log ( a + bi ) = . 5 * log ( a ^ 2 + b ^ 2 ) + i * atan2 ( b , a ) <nl> + auto a = EmitExtractReal ( operand_value ) ; <nl> + auto b = EmitExtractImag ( operand_value ) ; <nl> + llvm : : Type * llvm_ty = a - > getType ( ) ; <nl> + auto sum_sq = ir_builder_ - > CreateFAdd ( ir_builder_ - > CreateFMul ( a , a ) , <nl> + ir_builder_ - > CreateFMul ( b , b ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto log_sum_sq , EmitLog ( component_type , sum_sq ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto angle , EmitAtan2 ( component_type , b , a ) ) ; <nl> + auto one_half = llvm : : ConstantFP : : get ( llvm_ty , 0 . 5 ) ; <nl> + return EmitComposeComplex ( <nl> + op , ir_builder_ - > CreateFMul ( one_half , log_sum_sq ) , angle ) ; <nl> + } <nl> case HloOpcode : : kConvert : { <nl> PrimitiveType from_type = op - > operand ( 0 ) - > shape ( ) . element_type ( ) ; <nl> TF_RET_CHECK ( primitive_util : : IsComplexType ( from_type ) ) ; <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitComplexUnaryOp ( <nl> } <nl> case HloOpcode : : kExp : { <nl> / / e ^ ( a + bi ) = e ^ a * ( cos ( b ) + sin ( b ) i ) <nl> - auto exp_a = llvm_ir : : EmitCallToIntrinsic ( <nl> - llvm : : Intrinsic : : exp , { EmitExtractReal ( operand_value ) } , <nl> - { EmitExtractReal ( operand_value ) - > getType ( ) } , ir_builder_ ) ; <nl> - auto cos_b = llvm_ir : : EmitCallToIntrinsic ( <nl> - llvm : : Intrinsic : : cos , { EmitExtractImag ( operand_value ) } , <nl> - { EmitExtractImag ( operand_value ) - > getType ( ) } , ir_builder_ ) ; <nl> - auto sin_b = llvm_ir : : EmitCallToIntrinsic ( <nl> - llvm : : Intrinsic : : sin , { EmitExtractImag ( operand_value ) } , <nl> - { EmitExtractImag ( operand_value ) - > getType ( ) } , ir_builder_ ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto exp_a , EmitExp ( component_type , EmitExtractReal ( operand_value ) ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto cos_b , EmitCos ( component_type , EmitExtractImag ( operand_value ) ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto sin_b , EmitSin ( component_type , EmitExtractImag ( operand_value ) ) ) ; <nl> return EmitComposeComplex ( op , ir_builder_ - > CreateFMul ( exp_a , cos_b ) , <nl> ir_builder_ - > CreateFMul ( exp_a , sin_b ) ) ; <nl> } <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitComplexUnaryOp ( <nl> auto a = EmitExtractReal ( operand_value ) ; <nl> auto b = EmitExtractImag ( operand_value ) ; <nl> auto type = a - > getType ( ) ; <nl> - auto exp_b = llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : exp , { b } , <nl> - { type } , ir_builder_ ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto exp_b , EmitExp ( component_type , b ) ) ; <nl> auto half_exp_b = <nl> ir_builder_ - > CreateFMul ( llvm : : ConstantFP : : get ( type , 0 . 5 ) , exp_b ) ; <nl> auto half_exp_neg_b = <nl> ir_builder_ - > CreateFDiv ( llvm : : ConstantFP : : get ( type , 0 . 5 ) , exp_b ) ; <nl> - auto cos_a = llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : cos , { a } , <nl> - { type } , ir_builder_ ) ; <nl> - auto sin_a = llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : sin , { a } , <nl> - { type } , ir_builder_ ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto cos_a , EmitCos ( component_type , a ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto sin_a , EmitSin ( component_type , a ) ) ; <nl> return EmitComposeComplex ( <nl> op , <nl> ir_builder_ - > CreateFMul ( <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitComplexUnaryOp ( <nl> auto a = EmitExtractReal ( operand_value ) ; <nl> auto b = EmitExtractImag ( operand_value ) ; <nl> auto type = a - > getType ( ) ; <nl> - auto exp_b = llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : exp , { b } , <nl> - { type } , ir_builder_ ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto exp_b , EmitExp ( component_type , b ) ) ; <nl> auto half_exp_b = <nl> ir_builder_ - > CreateFMul ( llvm : : ConstantFP : : get ( type , 0 . 5 ) , exp_b ) ; <nl> auto half_exp_neg_b = <nl> ir_builder_ - > CreateFDiv ( llvm : : ConstantFP : : get ( type , 0 . 5 ) , exp_b ) ; <nl> - auto cos_a = llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : cos , { a } , <nl> - { type } , ir_builder_ ) ; <nl> - auto sin_a = llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : sin , { a } , <nl> - { type } , ir_builder_ ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto cos_a , EmitCos ( component_type , a ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto sin_a , EmitSin ( component_type , a ) ) ; <nl> return EmitComposeComplex ( <nl> op , <nl> ir_builder_ - > CreateFMul ( <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitComplexUnaryOp ( <nl> ir_builder_ - > CreateFMul ( <nl> cos_a , ir_builder_ - > CreateFSub ( half_exp_b , half_exp_neg_b ) ) ) ; <nl> } <nl> + case HloOpcode : : kTanh : { <nl> + / * <nl> + tanh = ( exp ( x ) - exp ( - x ) ) / ( exp ( x ) + exp ( - x ) ) <nl> + e ^ ( a + bi ) = e ^ a * ( cos ( b ) + sin ( b ) i ) <nl> + so tanh = ( ( ( cos ( b ) + sin ( b ) i ) e ^ a - ( cos ( - b ) + sin ( - b ) i ) e ^ - a ) ) / <nl> + ( ( ( cos ( b ) + sin ( b ) i ) e ^ a + ( cos ( - b ) + sin ( - b ) i ) e ^ - a ) ) <nl> + cos ( b ) = cos ( - b ) , sin ( - b ) = - sin ( b ) <nl> + so tanh = ( ( ( cos ( b ) + sin ( b ) i ) e ^ a - ( cos ( b ) - sin ( b ) i ) e ^ - a ) ) / <nl> + ( ( ( cos ( b ) + sin ( b ) i ) e ^ a + ( cos ( b ) - sin ( b ) i ) e ^ - a ) ) <nl> + = ( cos ( b ) e ^ a + i * sin ( b ) e ^ a + cos ( b ) ( - e ^ - a ) + i * sin ( b ) e ^ - a ) / <nl> + ( cos ( b ) e ^ a + i * sin ( b ) e ^ a + cos ( b ) e ^ - a + i * sin ( b ) ( - e ^ - a ) ) <nl> + = ( cos ( b ) ( e ^ a - e ^ - a ) + i * sin ( b ) ( e ^ a + e ^ - a ) ) / <nl> + ( cos ( b ) ( e ^ a + e ^ - a ) + i * sin ( b ) ( e ^ a - e ^ - a ) ) <nl> + This is a complex division , so we can multiply by denom_conj / denom_conj <nl> + = ( cos ( b ) ( e ^ a - e ^ - a ) + i * sin ( b ) ( e ^ a + e ^ - a ) ) * <nl> + ( cos ( b ) ( e ^ a + e ^ - a ) - i * sin ( b ) ( e ^ a - e ^ - a ) ) / <nl> + ( ( cos ( b ) ( e ^ a + e ^ - a ) ) ^ 2 + ( sin ( b ) ( e ^ a - e ^ - a ) ) ^ 2 ) <nl> + = ( cos ( b ) ^ 2 ( e ^ ( 2a ) - e ^ ( - 2a ) ) + sin ( b ) ^ 2 ( e ^ ( 2a ) - e ^ ( - 2a ) ) + <nl> + i * ( cos ( b ) sin ( b ) ( e ^ a + e ^ - a ) ^ 2 - cos ( b ) sin ( b ) ( e ^ a - e ^ - a ) ^ 2 ) ) / <nl> + ( ( cos ( b ) ( e ^ a + e ^ - a ) ) ^ 2 + ( sin ( b ) ( e ^ a - e ^ - a ) ) ^ 2 ) <nl> + * / <nl> + auto a = EmitExtractReal ( operand_value ) ; <nl> + auto b = EmitExtractImag ( operand_value ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto exp_a , EmitExp ( component_type , a ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto cos_b , EmitCos ( component_type , b ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto sin_b , EmitSin ( component_type , b ) ) ; <nl> + auto exp_neg_a = ir_builder_ - > CreateFDiv ( <nl> + llvm : : ConstantFP : : get ( exp_a - > getType ( ) , 1 ) , exp_a ) ; <nl> + auto exp_2a_minus_exp_neg_2a = ir_builder_ - > CreateFSub ( <nl> + ir_builder_ - > CreateFMul ( exp_a , exp_a ) , <nl> + ir_builder_ - > CreateFMul ( exp_neg_a , exp_neg_a ) ) ; <nl> + auto cos_b_sq = ir_builder_ - > CreateFMul ( cos_b , cos_b ) ; <nl> + auto sin_b_sq = ir_builder_ - > CreateFMul ( sin_b , sin_b ) ; <nl> + auto real_num = ir_builder_ - > CreateFAdd ( <nl> + ir_builder_ - > CreateFMul ( cos_b_sq , exp_2a_minus_exp_neg_2a ) , <nl> + ir_builder_ - > CreateFMul ( sin_b_sq , exp_2a_minus_exp_neg_2a ) ) ; <nl> + auto cos_b_sin_b = ir_builder_ - > CreateFMul ( cos_b , sin_b ) ; <nl> + auto exp_a_plus_exp_neg_a = ir_builder_ - > CreateFAdd ( exp_a , exp_neg_a ) ; <nl> + auto exp_a_plus_exp_neg_a_sq = <nl> + ir_builder_ - > CreateFMul ( exp_a_plus_exp_neg_a , exp_a_plus_exp_neg_a ) ; <nl> + auto exp_a_minus_exp_neg_a = ir_builder_ - > CreateFSub ( exp_a , exp_neg_a ) ; <nl> + auto exp_a_minus_exp_neg_a_sq = <nl> + ir_builder_ - > CreateFMul ( exp_a_minus_exp_neg_a , exp_a_minus_exp_neg_a ) ; <nl> + auto imag_num = ir_builder_ - > CreateFMul ( <nl> + cos_b_sin_b , ir_builder_ - > CreateFSub ( exp_a_plus_exp_neg_a_sq , <nl> + exp_a_minus_exp_neg_a_sq ) ) ; <nl> + auto denom = ir_builder_ - > CreateFAdd ( <nl> + ir_builder_ - > CreateFMul ( cos_b_sq , exp_a_plus_exp_neg_a_sq ) , <nl> + ir_builder_ - > CreateFMul ( sin_b_sq , exp_a_minus_exp_neg_a_sq ) ) ; <nl> + return EmitComposeComplex ( op , ir_builder_ - > CreateFDiv ( real_num , denom ) , <nl> + ir_builder_ - > CreateFDiv ( imag_num , denom ) ) ; <nl> + } <nl> case HloOpcode : : kAbs : { <nl> auto sum_sq = ir_builder_ - > CreateFAdd ( <nl> ir_builder_ - > CreateFMul ( EmitExtractReal ( operand_value ) , <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitFloatBinaryOp ( <nl> const HloInstruction * op , llvm : : Value * lhs_value , <nl> llvm : : Value * rhs_value ) const { <nl> switch ( op - > opcode ( ) ) { <nl> - / / case HloOpcode : : kAtan2 : / / TODO ( b / 65209142 ) : CPU atan2 support <nl> case HloOpcode : : kComplex : <nl> return EmitComposeComplex ( op , lhs_value , rhs_value ) ; <nl> case HloOpcode : : kAdd : <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitFloatBinaryOp ( <nl> case HloOpcode : : kMinimum : <nl> return EmitFloatMin ( lhs_value , rhs_value ) ; <nl> case HloOpcode : : kPower : <nl> - return llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : pow , <nl> - { lhs_value , rhs_value } , <nl> - { lhs_value - > getType ( ) } , ir_builder_ ) ; <nl> - <nl> + return EmitPow ( op - > shape ( ) . element_type ( ) , lhs_value , rhs_value ) ; <nl> + case HloOpcode : : kAtan2 : <nl> + return EmitAtan2 ( op - > shape ( ) . element_type ( ) , lhs_value , rhs_value ) ; <nl> default : <nl> return Unimplemented ( " binary floating point op ' % s ' " , <nl> HloOpcodeString ( op - > opcode ( ) ) . c_str ( ) ) ; <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitComplexBinaryOp ( <nl> EmitExtractImag ( lhs_value ) , <nl> EmitExtractImag ( rhs_value ) , ir_builder_ ) ) ; <nl> <nl> - / / TODO ( b / 65209142 ) : requires arg ( z ) - > requires atan | atan2 intrinsic <nl> - / / case HloOpcode : : kPower : <nl> - / / / / ( a + bi ) ^ ( c + di ) = exp ( i ( c + di ) * arg ( a + bi ) ) * ( a * a + b * b ) ^ ( c / 2 + di / 2 ) <nl> + case HloOpcode : : kPower : { <nl> + / / ( a + bi ) ^ ( c + di ) = <nl> + / / ( a * a + b * b ) ^ ( 0 . 5c ) * exp ( - d * atan2 ( b , a ) ) * ( cos ( q ) + i * sin ( q ) ) , <nl> + / / where q = c * atan2 ( b , a ) + 0 . 5d * ln ( a * a + b * b ) <nl> + PrimitiveType component_type = <nl> + primitive_util : : ComplexComponentType ( op - > shape ( ) . element_type ( ) ) ; <nl> + auto a = EmitExtractReal ( lhs_value ) ; <nl> + auto b = EmitExtractImag ( lhs_value ) ; <nl> + auto c = EmitExtractReal ( rhs_value ) ; <nl> + auto d = EmitExtractImag ( rhs_value ) ; <nl> + auto aa_p_bb = ir_builder_ - > CreateFAdd ( ir_builder_ - > CreateFMul ( a , a ) , <nl> + ir_builder_ - > CreateFMul ( b , b ) ) ; <nl> + auto one_half = llvm : : ConstantFP : : get ( a - > getType ( ) , 0 . 5 ) ; <nl> + auto half_c = ir_builder_ - > CreateFMul ( one_half , c ) ; <nl> + <nl> + TF_ASSIGN_OR_RETURN ( auto aa_p_bb_to_half_c , <nl> + EmitPow ( component_type , aa_p_bb , half_c ) ) ; <nl> + auto neg_d = ir_builder_ - > CreateFNeg ( d ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto arg_lhs , EmitAtan2 ( component_type , b , a ) ) ; <nl> + auto neg_d_arg_lhs = ir_builder_ - > CreateFMul ( neg_d , arg_lhs ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto e_to_neg_d_arg_lhs , <nl> + EmitExp ( component_type , neg_d_arg_lhs ) ) ; <nl> + auto coeff = <nl> + ir_builder_ - > CreateFMul ( aa_p_bb_to_half_c , e_to_neg_d_arg_lhs ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto ln_aa_p_bb , EmitLog ( component_type , aa_p_bb ) ) ; <nl> + auto half_d = ir_builder_ - > CreateFMul ( one_half , d ) ; <nl> + auto q = <nl> + ir_builder_ - > CreateFAdd ( ir_builder_ - > CreateFMul ( c , arg_lhs ) , <nl> + ir_builder_ - > CreateFMul ( half_d , ln_aa_p_bb ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto cos_q , EmitCos ( component_type , q ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto sin_q , EmitSin ( component_type , q ) ) ; <nl> + return EmitComposeComplex ( op , ir_builder_ - > CreateFMul ( coeff , cos_q ) , <nl> + ir_builder_ - > CreateFMul ( coeff , sin_q ) ) ; <nl> + } <nl> default : <nl> return Unimplemented ( " binary complex op ' % s ' " , <nl> HloOpcodeString ( op - > opcode ( ) ) . c_str ( ) ) ; <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitErfcInv ( <nl> return EmitErfInv ( prim_type , ir_builder_ - > CreateFSub ( one , value ) ) ; <nl> } <nl> <nl> + StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitLog ( PrimitiveType prim_type , <nl> + llvm : : Value * value ) const { <nl> + return llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : log , { value } , <nl> + { value - > getType ( ) } , ir_builder_ ) ; <nl> + } <nl> + <nl> + StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitSin ( PrimitiveType prim_type , <nl> + llvm : : Value * value ) const { <nl> + return llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : sin , { value } , <nl> + { value - > getType ( ) } , ir_builder_ ) ; <nl> + } <nl> + <nl> + StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitCos ( PrimitiveType prim_type , <nl> + llvm : : Value * value ) const { <nl> + return llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : cos , { value } , <nl> + { value - > getType ( ) } , ir_builder_ ) ; <nl> + } <nl> + <nl> + StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitExp ( PrimitiveType prim_type , <nl> + llvm : : Value * value ) const { <nl> + return llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : exp , { value } , <nl> + { value - > getType ( ) } , ir_builder_ ) ; <nl> + } <nl> + <nl> + StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitPow ( PrimitiveType prim_type , <nl> + llvm : : Value * lhs , <nl> + llvm : : Value * rhs ) const { <nl> + return llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : pow , { lhs , rhs } , <nl> + { lhs - > getType ( ) } , ir_builder_ ) ; <nl> + } <nl> + <nl> + StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitAtan2 ( PrimitiveType prim_type , <nl> + llvm : : Value * lhs , <nl> + llvm : : Value * rhs ) const { <nl> + return Unimplemented ( " atan2 " ) ; <nl> + } <nl> + <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitReducePrecision ( <nl> const HloInstruction * hlo , llvm : : Value * x ) const { <nl> if ( hlo - > operand ( 0 ) - > shape ( ) . element_type ( ) ! = F32 ) { <nl> mmm a / tensorflow / compiler / xla / service / elemental_ir_emitter . h <nl> ppp b / tensorflow / compiler / xla / service / elemental_ir_emitter . h <nl> class ElementalIrEmitter { <nl> module_ ( module ) , <nl> hlo_module_config_ ( hlo_module_config ) { } <nl> <nl> - virtual ~ ElementalIrEmitter ( ) { } <nl> + virtual ~ ElementalIrEmitter ( ) = default ; <nl> <nl> virtual StatusOr < llvm : : Value * > EmitUnaryOp ( const HloInstruction * op , <nl> llvm : : Value * operand_value ) const ; <nl> class ElementalIrEmitter { <nl> virtual StatusOr < llvm : : Value * > EmitErfcInv ( PrimitiveType prim_type , <nl> llvm : : Value * value ) const ; <nl> <nl> + virtual StatusOr < llvm : : Value * > EmitAtan2 ( PrimitiveType prim_type , <nl> + llvm : : Value * lhs , <nl> + llvm : : Value * rhs ) const ; <nl> + <nl> + virtual StatusOr < llvm : : Value * > EmitLog ( PrimitiveType prim_type , <nl> + llvm : : Value * value ) const ; <nl> + <nl> + virtual StatusOr < llvm : : Value * > EmitSin ( PrimitiveType prim_type , <nl> + llvm : : Value * value ) const ; <nl> + <nl> + virtual StatusOr < llvm : : Value * > EmitCos ( PrimitiveType prim_type , <nl> + llvm : : Value * value ) const ; <nl> + <nl> + virtual StatusOr < llvm : : Value * > EmitExp ( PrimitiveType prim_type , <nl> + llvm : : Value * value ) const ; <nl> + <nl> + virtual StatusOr < llvm : : Value * > EmitPow ( PrimitiveType prim_type , <nl> + llvm : : Value * lhs , <nl> + llvm : : Value * rhs ) const ; <nl> + <nl> virtual StatusOr < llvm : : Value * > EmitReducePrecision ( const HloInstruction * hlo , <nl> llvm : : Value * x ) const ; <nl> <nl> mmm a / tensorflow / compiler / xla / service / gpu / elemental_ir_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / elemental_ir_emitter . cc <nl> StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitFloatBinaryOp ( <nl> PrimitiveType rhs_input_type = op - > operand ( 1 ) - > shape ( ) . element_type ( ) ; <nl> PrimitiveType output_type = op - > shape ( ) . element_type ( ) ; <nl> switch ( op - > opcode ( ) ) { <nl> - case HloOpcode : : kAtan2 : <nl> - return EmitLibdeviceMathCall ( " __nv_atan2 " , { lhs_value , rhs_value } , <nl> - { lhs_input_type , rhs_input_type } , <nl> - output_type ) ; <nl> case HloOpcode : : kRemainder : { <nl> return EmitLibdeviceMathCall ( " __nv_fmod " , { lhs_value , rhs_value } , <nl> { lhs_input_type , rhs_input_type } , <nl> StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitErfcInv ( <nl> return EmitLibdeviceMathCall ( " __nv_erfcinv " , { value } , { prim_type } , prim_type ) ; <nl> } <nl> <nl> + StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitLog ( <nl> + PrimitiveType prim_type , llvm : : Value * value ) const { <nl> + return EmitLibdeviceMathCall ( " __nv_log " , { value } , { prim_type } , prim_type ) ; <nl> + } <nl> + <nl> + StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitSin ( <nl> + PrimitiveType prim_type , llvm : : Value * value ) const { <nl> + return EmitLibdeviceMathCall ( " __nv_sin " , { value } , { prim_type } , prim_type ) ; <nl> + } <nl> + <nl> + StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitCos ( <nl> + PrimitiveType prim_type , llvm : : Value * value ) const { <nl> + return EmitLibdeviceMathCall ( " __nv_cos " , { value } , { prim_type } , prim_type ) ; <nl> + } <nl> + <nl> + StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitExp ( <nl> + PrimitiveType prim_type , llvm : : Value * value ) const { <nl> + return EmitLibdeviceMathCall ( " __nv_exp " , { value } , { prim_type } , prim_type ) ; <nl> + } <nl> + <nl> + StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitPow ( PrimitiveType prim_type , <nl> + llvm : : Value * lhs , <nl> + llvm : : Value * rhs ) const { <nl> + return EmitLibdeviceMathCall ( " __nv_pow " , { lhs , rhs } , { prim_type , prim_type } , <nl> + prim_type ) ; <nl> + } <nl> + <nl> + StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitAtan2 ( <nl> + PrimitiveType prim_type , llvm : : Value * lhs , llvm : : Value * rhs ) const { <nl> + return EmitLibdeviceMathCall ( " __nv_atan2 " , { lhs , rhs } , { prim_type , prim_type } , <nl> + prim_type ) ; <nl> + } <nl> + <nl> StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitFloatUnaryOp ( <nl> const HloInstruction * op , llvm : : Value * operand_value ) const { <nl> PrimitiveType input_type = op - > operand ( 0 ) - > shape ( ) . element_type ( ) ; <nl> PrimitiveType output_type = op - > shape ( ) . element_type ( ) ; <nl> switch ( op - > opcode ( ) ) { <nl> - case HloOpcode : : kExp : <nl> - return EmitLibdeviceMathCall ( " __nv_exp " , { operand_value } , { input_type } , <nl> - output_type ) ; <nl> case HloOpcode : : kFloor : <nl> return EmitLibdeviceMathCall ( " __nv_floor " , { operand_value } , { input_type } , <nl> output_type ) ; <nl> case HloOpcode : : kCeil : <nl> return EmitLibdeviceMathCall ( " __nv_ceil " , { operand_value } , { input_type } , <nl> output_type ) ; <nl> - case HloOpcode : : kLog : <nl> - return EmitLibdeviceMathCall ( " __nv_log " , { operand_value } , { input_type } , <nl> - output_type ) ; <nl> - case HloOpcode : : kCos : <nl> - return EmitLibdeviceMathCall ( " __nv_cos " , { operand_value } , { input_type } , <nl> - output_type ) ; <nl> - case HloOpcode : : kSin : <nl> - return EmitLibdeviceMathCall ( " __nv_sin " , { operand_value } , { input_type } , <nl> - output_type ) ; <nl> case HloOpcode : : kTanh : <nl> return EmitLibdeviceMathCall ( " __nv_tanh " , { operand_value } , { input_type } , <nl> output_type ) ; <nl> StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitFloatUnaryOp ( <nl> } <nl> } <nl> <nl> - StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitComplexBinaryOp ( <nl> - const HloInstruction * op , llvm : : Value * lhs_value , <nl> - llvm : : Value * rhs_value ) const { <nl> - PrimitiveType input_type = op - > operand ( 0 ) - > shape ( ) . element_type ( ) ; <nl> - TF_RET_CHECK ( primitive_util : : IsComplexType ( input_type ) ) ; <nl> - PrimitiveType component_type = <nl> - primitive_util : : ComplexComponentType ( input_type ) ; <nl> - switch ( op - > opcode ( ) ) { <nl> - case HloOpcode : : kPower : { <nl> - / / ( a + bi ) ^ ( c + di ) = <nl> - / / ( a * a + b * b ) ^ ( 0 . 5c ) * exp ( - d * atan2 ( b , a ) ) * ( cos ( q ) + i * sin ( q ) ) , <nl> - / / where q = c * atan2 ( b , a ) + 0 . 5d * ln ( a * a + b * b ) <nl> - auto a = EmitExtractReal ( lhs_value ) ; <nl> - auto b = EmitExtractImag ( lhs_value ) ; <nl> - auto c = EmitExtractReal ( rhs_value ) ; <nl> - auto d = EmitExtractImag ( rhs_value ) ; <nl> - auto aa_p_bb = ir_builder_ - > CreateFAdd ( ir_builder_ - > CreateFMul ( a , a ) , <nl> - ir_builder_ - > CreateFMul ( b , b ) ) ; <nl> - auto one_half = llvm : : ConstantFP : : get ( a - > getType ( ) , 0 . 5 ) ; <nl> - auto half_c = ir_builder_ - > CreateFMul ( one_half , c ) ; <nl> - <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto aa_p_bb_to_half_c , <nl> - EmitLibdeviceMathCall ( " __nv_pow " , { aa_p_bb , half_c } , <nl> - { component_type , component_type } , <nl> - component_type ) ) ; <nl> - auto neg_d = ir_builder_ - > CreateFNeg ( d ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto arg_lhs , EmitLibdeviceMathCall ( " __nv_atan2 " , { b , a } , <nl> - { component_type , component_type } , <nl> - component_type ) ) ; <nl> - auto neg_d_arg_lhs = ir_builder_ - > CreateFMul ( neg_d , arg_lhs ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto e_to_neg_d_arg_lhs , <nl> - EmitLibdeviceMathCall ( " __nv_exp " , { neg_d_arg_lhs } , { component_type } , <nl> - component_type ) ) ; <nl> - auto coeff = <nl> - ir_builder_ - > CreateFMul ( aa_p_bb_to_half_c , e_to_neg_d_arg_lhs ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto ln_aa_p_bb , <nl> - EmitLibdeviceMathCall ( " __nv_log " , { aa_p_bb } , { component_type } , <nl> - component_type ) ) ; <nl> - auto half_d = ir_builder_ - > CreateFMul ( one_half , d ) ; <nl> - auto q = <nl> - ir_builder_ - > CreateFAdd ( ir_builder_ - > CreateFMul ( c , arg_lhs ) , <nl> - ir_builder_ - > CreateFMul ( half_d , ln_aa_p_bb ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto cos_q , EmitLibdeviceMathCall ( " __nv_cos " , { q } , { component_type } , <nl> - component_type ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto sin_q , EmitLibdeviceMathCall ( " __nv_sin " , { q } , { component_type } , <nl> - component_type ) ) ; <nl> - return EmitComposeComplex ( op , ir_builder_ - > CreateFMul ( coeff , cos_q ) , <nl> - ir_builder_ - > CreateFMul ( coeff , sin_q ) ) ; <nl> - } <nl> - default : <nl> - return ElementalIrEmitter : : EmitComplexBinaryOp ( op , lhs_value , rhs_value ) ; <nl> - } <nl> - } <nl> - <nl> - StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitComplexUnaryOp ( <nl> - const HloInstruction * op , llvm : : Value * operand_value ) const { <nl> - PrimitiveType input_type = op - > operand ( 0 ) - > shape ( ) . element_type ( ) ; <nl> - PrimitiveType component_type = <nl> - primitive_util : : IsComplexType ( input_type ) <nl> - ? primitive_util : : ComplexComponentType ( input_type ) <nl> - : input_type ; <nl> - <nl> - switch ( op - > opcode ( ) ) { <nl> - case HloOpcode : : kLog : { <nl> - / / log ( a + bi ) = . 5 * log ( a ^ 2 + b ^ 2 ) + i * atan2 ( b , a ) <nl> - auto a = EmitExtractReal ( operand_value ) ; <nl> - auto b = EmitExtractImag ( operand_value ) ; <nl> - llvm : : Type * llvm_ty = a - > getType ( ) ; <nl> - auto sum_sq = ir_builder_ - > CreateFAdd ( ir_builder_ - > CreateFMul ( a , a ) , <nl> - ir_builder_ - > CreateFMul ( b , b ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto log_sum_sq , <nl> - EmitLibdeviceMathCall ( " __nv_log " , { sum_sq } , { component_type } , <nl> - component_type ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto angle , EmitLibdeviceMathCall ( " __nv_atan2 " , { b , a } , <nl> - { component_type , component_type } , <nl> - component_type ) ) ; <nl> - auto one_half = llvm : : ConstantFP : : get ( llvm_ty , 0 . 5 ) ; <nl> - return EmitComposeComplex ( <nl> - op , ir_builder_ - > CreateFMul ( one_half , log_sum_sq ) , angle ) ; <nl> - } <nl> - case HloOpcode : : kExp : { <nl> - / / e ^ ( a + bi ) = e ^ a * ( cos ( b ) + sin ( b ) i ) <nl> - auto b = EmitExtractImag ( operand_value ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto exp_a , <nl> - EmitLibdeviceMathCall ( " __nv_exp " , { EmitExtractReal ( operand_value ) } , <nl> - { component_type } , component_type ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto cos_b , EmitLibdeviceMathCall ( " __nv_cos " , { b } , { component_type } , <nl> - component_type ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto sin_b , EmitLibdeviceMathCall ( " __nv_sin " , { b } , { component_type } , <nl> - component_type ) ) ; <nl> - return EmitComposeComplex ( op , ir_builder_ - > CreateFMul ( exp_a , cos_b ) , <nl> - ir_builder_ - > CreateFMul ( exp_a , sin_b ) ) ; <nl> - } <nl> - case HloOpcode : : kCos : { <nl> - / / cos ( a + bi ) = . 5 ( cos ( a ) * ( e ^ - b + e ^ b ) + i * sin ( a ) * ( e ^ - b - e ^ b ) ) <nl> - auto a = EmitExtractReal ( operand_value ) ; <nl> - auto llvm_ty = a - > getType ( ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto exp_b , <nl> - EmitLibdeviceMathCall ( " __nv_exp " , { EmitExtractImag ( operand_value ) } , <nl> - { component_type } , component_type ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto cos_a , EmitLibdeviceMathCall ( " __nv_cos " , { a } , { component_type } , <nl> - component_type ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto sin_a , EmitLibdeviceMathCall ( " __nv_sin " , { a } , { component_type } , <nl> - component_type ) ) ; <nl> - auto half_exp_b = <nl> - ir_builder_ - > CreateFMul ( llvm : : ConstantFP : : get ( llvm_ty , 0 . 5 ) , exp_b ) ; <nl> - auto half_exp_neg_b = <nl> - ir_builder_ - > CreateFDiv ( llvm : : ConstantFP : : get ( llvm_ty , 0 . 5 ) , exp_b ) ; <nl> - return EmitComposeComplex ( <nl> - op , <nl> - ir_builder_ - > CreateFMul ( <nl> - cos_a , ir_builder_ - > CreateFAdd ( half_exp_neg_b , half_exp_b ) ) , <nl> - ir_builder_ - > CreateFMul ( <nl> - sin_a , ir_builder_ - > CreateFSub ( half_exp_neg_b , half_exp_b ) ) ) ; <nl> - } <nl> - <nl> - case HloOpcode : : kSin : { <nl> - / / sin ( a + bi ) = 0 . 5 ( sin ( a ) * ( e ^ b + e ^ - b ) + i * cos ( a ) * ( e ^ b - e ^ - b ) <nl> - auto a = EmitExtractReal ( operand_value ) ; <nl> - auto llvm_ty = a - > getType ( ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto exp_b , <nl> - EmitLibdeviceMathCall ( " __nv_exp " , { EmitExtractImag ( operand_value ) } , <nl> - { component_type } , component_type ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto cos_a , EmitLibdeviceMathCall ( " __nv_cos " , { a } , { component_type } , <nl> - component_type ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto sin_a , EmitLibdeviceMathCall ( " __nv_sin " , { a } , { component_type } , <nl> - component_type ) ) ; <nl> - auto half_exp_b = <nl> - ir_builder_ - > CreateFMul ( llvm : : ConstantFP : : get ( llvm_ty , 0 . 5 ) , exp_b ) ; <nl> - auto half_exp_neg_b = <nl> - ir_builder_ - > CreateFDiv ( llvm : : ConstantFP : : get ( llvm_ty , 0 . 5 ) , exp_b ) ; <nl> - return EmitComposeComplex ( <nl> - op , <nl> - ir_builder_ - > CreateFMul ( <nl> - sin_a , ir_builder_ - > CreateFAdd ( half_exp_b , half_exp_neg_b ) ) , <nl> - ir_builder_ - > CreateFMul ( <nl> - cos_a , ir_builder_ - > CreateFSub ( half_exp_b , half_exp_neg_b ) ) ) ; <nl> - } <nl> - case HloOpcode : : kTanh : { <nl> - / * <nl> - tanh = ( exp ( x ) - exp ( - x ) ) / ( exp ( x ) + exp ( - x ) ) <nl> - e ^ ( a + bi ) = e ^ a * ( cos ( b ) + sin ( b ) i ) <nl> - so tanh = ( ( ( cos ( b ) + sin ( b ) i ) e ^ a - ( cos ( - b ) + sin ( - b ) i ) e ^ - a ) ) / <nl> - ( ( ( cos ( b ) + sin ( b ) i ) e ^ a + ( cos ( - b ) + sin ( - b ) i ) e ^ - a ) ) <nl> - cos ( b ) = cos ( - b ) , sin ( - b ) = - sin ( b ) <nl> - so tanh = ( ( ( cos ( b ) + sin ( b ) i ) e ^ a - ( cos ( b ) - sin ( b ) i ) e ^ - a ) ) / <nl> - ( ( ( cos ( b ) + sin ( b ) i ) e ^ a + ( cos ( b ) - sin ( b ) i ) e ^ - a ) ) <nl> - = ( cos ( b ) e ^ a + i * sin ( b ) e ^ a + cos ( b ) ( - e ^ - a ) + i * sin ( b ) e ^ - a ) / <nl> - ( cos ( b ) e ^ a + i * sin ( b ) e ^ a + cos ( b ) e ^ - a + i * sin ( b ) ( - e ^ - a ) ) <nl> - = ( cos ( b ) ( e ^ a - e ^ - a ) + i * sin ( b ) ( e ^ a + e ^ - a ) ) / <nl> - ( cos ( b ) ( e ^ a + e ^ - a ) + i * sin ( b ) ( e ^ a - e ^ - a ) ) <nl> - This is a complex division , so we can multiply by denom_conj / denom_conj <nl> - = ( cos ( b ) ( e ^ a - e ^ - a ) + i * sin ( b ) ( e ^ a + e ^ - a ) ) * <nl> - ( cos ( b ) ( e ^ a + e ^ - a ) - i * sin ( b ) ( e ^ a - e ^ - a ) ) / <nl> - ( ( cos ( b ) ( e ^ a + e ^ - a ) ) ^ 2 + ( sin ( b ) ( e ^ a - e ^ - a ) ) ^ 2 ) <nl> - = ( cos ( b ) ^ 2 ( e ^ ( 2a ) - e ^ ( - 2a ) ) + sin ( b ) ^ 2 ( e ^ ( 2a ) - e ^ ( - 2a ) ) + <nl> - i * ( cos ( b ) sin ( b ) ( e ^ a + e ^ - a ) ^ 2 - cos ( b ) sin ( b ) ( e ^ a - e ^ - a ) ^ 2 ) ) / <nl> - ( ( cos ( b ) ( e ^ a + e ^ - a ) ) ^ 2 + ( sin ( b ) ( e ^ a - e ^ - a ) ) ^ 2 ) <nl> - * / <nl> - auto a = EmitExtractReal ( operand_value ) ; <nl> - auto b = EmitExtractImag ( operand_value ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto exp_a , EmitLibdeviceMathCall ( " __nv_exp " , { a } , { component_type } , <nl> - component_type ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto cos_b , EmitLibdeviceMathCall ( " __nv_cos " , { b } , { component_type } , <nl> - component_type ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto sin_b , EmitLibdeviceMathCall ( " __nv_sin " , { b } , { component_type } , <nl> - component_type ) ) ; <nl> - auto exp_neg_a = ir_builder_ - > CreateFDiv ( <nl> - llvm : : ConstantFP : : get ( exp_a - > getType ( ) , 1 ) , exp_a ) ; <nl> - auto exp_2a_minus_exp_neg_2a = ir_builder_ - > CreateFSub ( <nl> - ir_builder_ - > CreateFMul ( exp_a , exp_a ) , <nl> - ir_builder_ - > CreateFMul ( exp_neg_a , exp_neg_a ) ) ; <nl> - auto cos_b_sq = ir_builder_ - > CreateFMul ( cos_b , cos_b ) ; <nl> - auto sin_b_sq = ir_builder_ - > CreateFMul ( sin_b , sin_b ) ; <nl> - auto real_num = ir_builder_ - > CreateFAdd ( <nl> - ir_builder_ - > CreateFMul ( cos_b_sq , exp_2a_minus_exp_neg_2a ) , <nl> - ir_builder_ - > CreateFMul ( sin_b_sq , exp_2a_minus_exp_neg_2a ) ) ; <nl> - auto cos_b_sin_b = ir_builder_ - > CreateFMul ( cos_b , sin_b ) ; <nl> - auto exp_a_plus_exp_neg_a = ir_builder_ - > CreateFAdd ( exp_a , exp_neg_a ) ; <nl> - auto exp_a_plus_exp_neg_a_sq = <nl> - ir_builder_ - > CreateFMul ( exp_a_plus_exp_neg_a , exp_a_plus_exp_neg_a ) ; <nl> - auto exp_a_minus_exp_neg_a = ir_builder_ - > CreateFSub ( exp_a , exp_neg_a ) ; <nl> - auto exp_a_minus_exp_neg_a_sq = <nl> - ir_builder_ - > CreateFMul ( exp_a_minus_exp_neg_a , exp_a_minus_exp_neg_a ) ; <nl> - auto imag_num = ir_builder_ - > CreateFMul ( <nl> - cos_b_sin_b , ir_builder_ - > CreateFSub ( exp_a_plus_exp_neg_a_sq , <nl> - exp_a_minus_exp_neg_a_sq ) ) ; <nl> - auto denom = ir_builder_ - > CreateFAdd ( <nl> - ir_builder_ - > CreateFMul ( cos_b_sq , exp_a_plus_exp_neg_a_sq ) , <nl> - ir_builder_ - > CreateFMul ( sin_b_sq , exp_a_minus_exp_neg_a_sq ) ) ; <nl> - return EmitComposeComplex ( op , ir_builder_ - > CreateFDiv ( real_num , denom ) , <nl> - ir_builder_ - > CreateFDiv ( imag_num , denom ) ) ; <nl> - } <nl> - default : <nl> - return ElementalIrEmitter : : EmitComplexUnaryOp ( op , operand_value ) ; <nl> - } <nl> - } <nl> - <nl> llvm : : Value * GpuElementalIrEmitter : : EmitDeviceFunctionCall ( <nl> const string & callee_name , <nl> tensorflow : : gtl : : ArraySlice < llvm : : Value * > operands , <nl> mmm a / tensorflow / compiler / xla / service / gpu / elemental_ir_emitter . h <nl> ppp b / tensorflow / compiler / xla / service / gpu / elemental_ir_emitter . h <nl> class GpuElementalIrEmitter : public ElementalIrEmitter { <nl> StatusOr < llvm : : Value * > EmitFloatUnaryOp ( <nl> const HloInstruction * op , llvm : : Value * operand_value ) const override ; <nl> <nl> - StatusOr < llvm : : Value * > EmitComplexUnaryOp ( <nl> - const HloInstruction * op , llvm : : Value * operand_value ) const override ; <nl> - <nl> StatusOr < llvm : : Value * > EmitFloatBinaryOp ( <nl> const HloInstruction * op , llvm : : Value * lhs_value , <nl> llvm : : Value * rhs_value ) const override ; <nl> <nl> - StatusOr < llvm : : Value * > EmitComplexBinaryOp ( <nl> - const HloInstruction * op , llvm : : Value * lhs_value , <nl> - llvm : : Value * rhs_value ) const override ; <nl> - <nl> StatusOr < llvm : : Value * > EmitErfcInv ( PrimitiveType prim_type , <nl> llvm : : Value * value ) const override ; <nl> <nl> + StatusOr < llvm : : Value * > EmitLog ( PrimitiveType prim_type , <nl> + llvm : : Value * value ) const override ; <nl> + <nl> + StatusOr < llvm : : Value * > EmitSin ( PrimitiveType prim_type , <nl> + llvm : : Value * value ) const override ; <nl> + <nl> + StatusOr < llvm : : Value * > EmitCos ( PrimitiveType prim_type , <nl> + llvm : : Value * value ) const override ; <nl> + <nl> + StatusOr < llvm : : Value * > EmitExp ( PrimitiveType prim_type , <nl> + llvm : : Value * value ) const override ; <nl> + <nl> + StatusOr < llvm : : Value * > EmitPow ( PrimitiveType prim_type , llvm : : Value * lhs , <nl> + llvm : : Value * rhs ) const override ; <nl> + <nl> + StatusOr < llvm : : Value * > EmitAtan2 ( PrimitiveType prim_type , llvm : : Value * lhs , <nl> + llvm : : Value * rhs ) const override ; <nl> + <nl> llvm : : Value * EmitThreadId ( ) const override ; <nl> <nl> private : <nl> mmm a / tensorflow / compiler / xla / service / gpu / gemm_thunk . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / gemm_thunk . cc <nl> tensorflow : : Status GemmThunk : : ExecuteOnStream ( <nl> <nl> auto make_descriptor = [ this ] ( se : : DeviceMemoryBase data , const Shape & shape , <nl> bool transpose ) - > MatrixDescriptor { <nl> - bool is_row_major = shape . layout ( ) . minor_to_major ( 0 ) ! = 0 ; <nl> - bool layout_mismatch = shape . layout ( ) . minor_to_major ( 0 ) ! = <nl> - output_shape_ . layout ( ) . minor_to_major ( 0 ) ; <nl> + bool is_row_major = LayoutUtil : : Minor ( shape . layout ( ) , 0 ) ! = 0 ; <nl> + bool layout_mismatch = LayoutUtil : : Minor ( shape . layout ( ) , 0 ) ! = <nl> + LayoutUtil : : Minor ( output_shape_ . layout ( ) , 0 ) ; <nl> return MatrixDescriptor ( data , transpose ^ layout_mismatch , <nl> shape . dimensions ( is_row_major ) , <nl> shape . dimensions ( ! is_row_major ) ) ; <nl> tensorflow : : Status GemmThunk : : ExecuteOnStream ( <nl> } ; <nl> <nl> bool launch_ok ; <nl> - if ( output_shape_ . layout ( ) . minor_to_major ( 0 ) = = 0 ) { <nl> + if ( LayoutUtil : : Minor ( output_shape_ . layout ( ) , 0 ) = = 0 ) { <nl> launch_ok = launch ( <nl> lhs_descriptor , rhs_descriptor , <nl> MatrixDescriptor ( output_data , false , output_num_rows , output_num_cols ) , <nl> mmm a / tensorflow / compiler / xla / service / gpu / ir_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / ir_emitter . cc <nl> llvm_ir : : IrArray : : Index IrEmitter : : EmitOperandArrayLoopNest ( <nl> / / reduction dimension . <nl> std : : vector < int64 > dimensions ; <nl> const Shape & shape = operand_array . GetShape ( ) ; <nl> - for ( int i = shape . layout ( ) . minor_to_major_size ( ) - 1 ; i > = 0 ; - - i ) { <nl> - int64 dimension = shape . layout ( ) . minor_to_major ( i ) ; <nl> + for ( int i = 0 ; i < LayoutUtil : : MinorToMajor ( shape ) . size ( ) ; + + i ) { <nl> + int64 dimension = LayoutUtil : : Major ( shape . layout ( ) , i ) ; <nl> if ( dimension ! = reduction_dimension ) { <nl> dimensions . push_back ( dimension ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / gpu / ir_emitter_unnested . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / ir_emitter_unnested . cc <nl> std : : tuple < bool , Shape , Shape > IsTranspose021 ( const Shape & a , const Shape & b ) { <nl> CHECK ( ShapeUtil : : Compatible ( a , b ) ) ; <nl> std : : vector < int64 > perm ( a . dimensions ( ) . size ( ) ) ; <nl> { <nl> - std : : vector < int64 > layout_a ( a . layout ( ) . minor_to_major ( ) . rbegin ( ) , <nl> - a . layout ( ) . minor_to_major ( ) . rend ( ) ) ; <nl> - std : : vector < int64 > layout_b ( b . layout ( ) . minor_to_major ( ) . rbegin ( ) , <nl> - b . layout ( ) . minor_to_major ( ) . rend ( ) ) ; <nl> + auto layout_a_orig = LayoutUtil : : MinorToMajor ( a ) ; <nl> + std : : vector < int64 > layout_a ( layout_a_orig . rbegin ( ) , layout_a_orig . rend ( ) ) ; <nl> + auto layout_b_orig = LayoutUtil : : MinorToMajor ( b ) ; <nl> + std : : vector < int64 > layout_b ( layout_b_orig . rbegin ( ) , layout_b_orig . rend ( ) ) ; <nl> for ( size_t i = 0 ; i < perm . size ( ) ; + + i ) { <nl> perm [ i ] = PositionInContainer ( layout_b , layout_a [ i ] ) ; <nl> } <nl> Status IrEmitterUnnested : : EmitColumnReduction ( <nl> / / normalized_input_shape to input_matrix_shape . <nl> const Shape normalized_input_shape = <nl> ShapeUtil : : NormalizeShapeToMonotonicDim0MajorLayout ( input_shape ) ; <nl> + auto input_shape_min2maj = LayoutUtil : : MinorToMajor ( input_shape ) ; <nl> const std : : vector < int64 > transpose_dimension_mapping ( <nl> - input_shape . layout ( ) . minor_to_major ( ) . rbegin ( ) , <nl> - input_shape . layout ( ) . minor_to_major ( ) . rend ( ) ) ; <nl> + input_shape_min2maj . rbegin ( ) , input_shape_min2maj . rend ( ) ) ; <nl> <nl> const Shape input_matrix_shape = <nl> ShapeUtil : : MakeShapeWithMonotonicDim0MajorLayout ( <nl> Status IrEmitterUnnested : : EmitRowReduction ( <nl> / / normalized_input_shape to input_3d_tensor_shape . <nl> const Shape normalized_input_shape = <nl> ShapeUtil : : NormalizeShapeToMonotonicDim0MajorLayout ( input_shape ) ; <nl> + auto input_shape_min2maj = LayoutUtil : : MinorToMajor ( input_shape ) ; <nl> const std : : vector < int64 > transpose_dimension_mapping ( <nl> - input_shape . layout ( ) . minor_to_major ( ) . rbegin ( ) , <nl> - input_shape . layout ( ) . minor_to_major ( ) . rend ( ) ) ; <nl> + input_shape_min2maj . rbegin ( ) , input_shape_min2maj . rend ( ) ) ; <nl> const Shape input_3d_tensor_shape = <nl> ShapeUtil : : MakeShapeWithMonotonicDim0MajorLayout ( <nl> input_shape . element_type ( ) , { depth , height , width } ) ; <nl> Status IrEmitterUnnested : : EmitReductionToVector ( <nl> / / whether another dimension is major or minor of them . <nl> std : : sort ( input_dims_to_keep . begin ( ) , input_dims_to_keep . end ( ) , <nl> [ & input_shape ] ( int64 dim_a , int64 dim_b ) { <nl> - return PositionInContainer ( input_shape . layout ( ) . minor_to_major ( ) , <nl> + return PositionInContainer ( LayoutUtil : : MinorToMajor ( input_shape ) , <nl> dim_a ) < <nl> - PositionInContainer ( input_shape . layout ( ) . minor_to_major ( ) , <nl> + PositionInContainer ( LayoutUtil : : MinorToMajor ( input_shape ) , <nl> dim_b ) ; <nl> } ) ; <nl> / / Now , if output rank is at least 1 , ` input_dims_to_keep . front ( ) ` is <nl> Status IrEmitterUnnested : : EmitReductionToVector ( <nl> int64 width = 1 ; <nl> for ( int64 input_dim = 0 ; input_dim < ShapeUtil : : Rank ( input_shape ) ; <nl> + + input_dim ) { <nl> - if ( PositionInContainer ( input_shape . layout ( ) . minor_to_major ( ) , <nl> + if ( PositionInContainer ( LayoutUtil : : MinorToMajor ( input_shape ) , <nl> input_dim ) > <nl> - PositionInContainer ( input_shape . layout ( ) . minor_to_major ( ) , <nl> + PositionInContainer ( LayoutUtil : : MinorToMajor ( input_shape ) , <nl> input_dims_to_keep . back ( ) ) ) { <nl> depth * = input_shape . dimensions ( input_dim ) ; <nl> - } else if ( PositionInContainer ( input_shape . layout ( ) . minor_to_major ( ) , <nl> + } else if ( PositionInContainer ( LayoutUtil : : MinorToMajor ( input_shape ) , <nl> input_dim ) < <nl> - PositionInContainer ( input_shape . layout ( ) . minor_to_major ( ) , <nl> + PositionInContainer ( LayoutUtil : : MinorToMajor ( input_shape ) , <nl> input_dims_to_keep . front ( ) ) ) { <nl> width * = input_shape . dimensions ( input_dim ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / hlo_graph_dumper . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_graph_dumper . cc <nl> string HloDotDumper : : GetInstructionNodeExtraInfo ( const HloInstruction * instr ) { <nl> instr - > shape ( ) . dimensions_size ( ) > 1 & & <nl> ! ShapeUtil : : IsTuple ( instr - > shape ( ) ) ) { <nl> StrAppend ( & instr_shape , " { " , <nl> - Join ( instr - > shape ( ) . layout ( ) . minor_to_major ( ) , " , " ) , " } " ) ; <nl> + Join ( LayoutUtil : : MinorToMajor ( instr - > shape ( ) ) , " , " ) , " } " ) ; <nl> } <nl> <nl> / / Some instructions have giant tuples as their shapes , so truncate the <nl> mmm a / tensorflow / compiler / xla / service / hlo_module . h <nl> ppp b / tensorflow / compiler / xla / service / hlo_module . h <nl> class HloModule { <nl> return config_ . mutable_entry_computation_layout ( ) ; <nl> } <nl> <nl> + ComputationLayout entry_computation_layout ( ) const { <nl> + return config_ . entry_computation_layout ( ) ; <nl> + } <nl> + <nl> const VersionedComputationHandle & entry_computation_handle ( ) const { <nl> return entry_computation_handle_ ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / hlo_tfgraph_builder . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_tfgraph_builder . cc <nl> void HloTfGraphBuilder : : SetNodeAttrs ( const HloInstruction * instruction , <nl> layout_string = ShapeUtil : : HumanStringWithLayout ( instruction - > shape ( ) ) ; <nl> } else { <nl> layout_string = StrCat ( <nl> - " { " , Join ( instruction - > shape ( ) . layout ( ) . minor_to_major ( ) , " , " ) , " } " ) ; <nl> + " { " , Join ( LayoutUtil : : MinorToMajor ( instruction - > shape ( ) ) , " , " ) , " } " ) ; <nl> } <nl> attrs [ " layout " ] . set_s ( layout_string ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / layout_assignment . cc <nl> ppp b / tensorflow / compiler / xla / service / layout_assignment . cc <nl> Status CheckCallLayout ( HloInstruction * call , <nl> Status CheckCustomCallLayout ( HloInstruction * custom_call ) { <nl> for ( const HloInstruction * operand : custom_call - > operands ( ) ) { <nl> TF_RET_CHECK ( <nl> + ShapeUtil : : IsOpaque ( operand - > shape ( ) ) | | <nl> LayoutUtil : : IsMonotonicWithDim0Major ( operand - > shape ( ) . layout ( ) ) ) ; <nl> } <nl> TF_RET_CHECK ( <nl> + ShapeUtil : : IsOpaque ( custom_call - > shape ( ) ) | | <nl> LayoutUtil : : IsMonotonicWithDim0Major ( custom_call - > shape ( ) . layout ( ) ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> std : : unique_ptr < Layout > LayoutAssignment : : ChooseOperandLayoutFromOutputLayout ( <nl> int64 operand_no ) { <nl> const HloInstruction * operand = instruction - > operand ( operand_no ) ; <nl> <nl> - CHECK ( ShapeUtil : : IsArray ( instruction - > shape ( ) ) & & <nl> - ShapeUtil : : IsArray ( operand - > shape ( ) ) ) ; <nl> + CHECK ( ShapeUtil : : IsArray ( instruction - > shape ( ) ) ) ; <nl> + CHECK ( ShapeUtil : : IsArray ( operand - > shape ( ) ) ) ; <nl> <nl> if ( instruction - > IsElementwiseOnOperand ( operand_no ) & & <nl> ! ShapeUtil : : IsScalar ( operand - > shape ( ) ) & & <nl> std : : unique_ptr < Layout > LayoutAssignment : : ChooseOperandLayoutFromOutputLayout ( <nl> const Shape & output_shape = instruction - > shape ( ) ; <nl> Shape output_shape_with_layout = ShapeUtil : : MakeShapeWithLayout ( <nl> output_shape . element_type ( ) , AsInt64Slice ( output_shape . dimensions ( ) ) , <nl> - AsInt64Slice ( output_layout . minor_to_major ( ) ) ) ; <nl> + LayoutUtil : : MinorToMajor ( output_layout ) ) ; <nl> Shape operand_shape = operand - > shape ( ) ; <nl> * operand_shape . mutable_layout ( ) = <nl> LayoutUtil : : GetDefaultLayoutForShape ( operand_shape ) ; <nl> std : : unique_ptr < Layout > LayoutAssignment : : ChooseOperandLayoutFromOutputLayout ( <nl> int64 rank = ShapeUtil : : Rank ( instruction - > shape ( ) ) ; <nl> std : : vector < int64 > new_minor_to_major ( rank ) ; <nl> for ( int64 i = 0 ; i < rank ; + + i ) { <nl> - int64 output_dim = output_layout . minor_to_major ( i ) ; <nl> + int64 output_dim = LayoutUtil : : Minor ( output_layout , i ) ; <nl> int64 operand_dim = instruction - > dimensions ( output_dim ) ; <nl> new_minor_to_major [ i ] = operand_dim ; <nl> } <nl> std : : unique_ptr < Layout > LayoutAssignment : : ChooseOutputLayoutFromOperandLayout ( <nl> Shape operand_shape_with_layout = ShapeUtil : : MakeShapeWithLayout ( <nl> operand - > shape ( ) . element_type ( ) , <nl> AsInt64Slice ( operand - > shape ( ) . dimensions ( ) ) , <nl> - AsInt64Slice ( operand_layout . minor_to_major ( ) ) ) ; <nl> + LayoutUtil : : MinorToMajor ( operand_layout ) ) ; <nl> Shape output_shape = user - > shape ( ) ; <nl> * output_shape . mutable_layout ( ) = <nl> LayoutUtil : : GetDefaultLayoutForShape ( output_shape ) ; <nl> std : : unique_ptr < Layout > LayoutAssignment : : ChooseOutputLayoutFromOperandLayout ( <nl> std : : vector < int64 > new_minor_to_major ( rank ) ; <nl> auto inverse_dimensions = InversePermutation ( user - > dimensions ( ) ) ; <nl> for ( int64 i = 0 ; i < rank ; + + i ) { <nl> - int64 operand_dim = operand_layout . minor_to_major ( i ) ; <nl> + int64 operand_dim = LayoutUtil : : Minor ( operand_layout , i ) ; <nl> int64 user_dim = inverse_dimensions [ operand_dim ] ; <nl> new_minor_to_major [ i ] = user_dim ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / llvm_ir / ir_array . cc <nl> ppp b / tensorflow / compiler / xla / service / llvm_ir / ir_array . cc <nl> IrArray : : Index : : Index ( llvm : : Value * linear , const Shape & shape , <nl> < < " Shape " < < ShapeUtil : : HumanStringWithLayout ( shape ) <nl> < < " should have a layout . " ; <nl> int64 divisor = 1 ; <nl> - for ( int64 dimension : layout_ . minor_to_major ( ) ) { <nl> + for ( int64 dimension : LayoutUtil : : MinorToMajor ( layout_ ) ) { <nl> int64 size_of_current_dimension = shape . dimensions ( dimension ) ; <nl> / / Emit IR instructions that compute <nl> / / ( linear_index / divisor ) % current_dimension <nl> llvm : : Value * IrArray : : EmitArrayElementAddress ( <nl> / / <nl> / / getelementptr base_ptr_ , 0 , most major index , . . . , most minor index <nl> std : : vector < llvm : : Value * > gep_indices ( 1 , ir_builder - > getInt64 ( 0 ) ) ; <nl> - for ( int64 i = shape_ - > layout ( ) . minor_to_major_size ( ) - 1 ; i > = 0 ; - - i ) { <nl> - int64 dimension = shape_ - > layout ( ) . minor_to_major ( i ) ; <nl> + for ( int64 i = 0 ; i < LayoutUtil : : MinorToMajor ( * shape_ ) . size ( ) ; + + i ) { <nl> + int64 dimension = LayoutUtil : : Major ( shape_ - > layout ( ) , i ) ; <nl> gep_indices . push_back ( actual_index [ dimension ] ) ; <nl> } <nl> return ir_builder - > CreateInBoundsGEP ( base_ptr_ , gep_indices , <nl> mmm a / tensorflow / compiler / xla / service / llvm_ir / llvm_util . cc <nl> ppp b / tensorflow / compiler / xla / service / llvm_ir / llvm_util . cc <nl> llvm : : Type * ShapeToIrType ( const Shape & shape , llvm : : Module * module ) { <nl> if ( ShapeUtil : : IsTuple ( shape ) ) { <nl> / / A tuple buffer is an array of pointers . <nl> result_type = llvm : : ArrayType : : get ( result_type , shape . tuple_shapes_size ( ) ) ; <nl> - } else { <nl> - for ( int64 dimension : shape . layout ( ) . minor_to_major ( ) ) { <nl> + } else if ( ShapeUtil : : IsArray ( shape ) ) { <nl> + for ( int64 dimension : LayoutUtil : : MinorToMajor ( shape ) ) { <nl> result_type = <nl> llvm : : ArrayType : : get ( result_type , shape . dimensions ( dimension ) ) ; <nl> } <nl> llvm : : Constant * LiteralToConstant ( const Literal & literal , int64 dimension_index , <nl> / / decrements with each recursive call . We want to iterate through the <nl> / / dimensions in major - to - minor order as we recurse so just index into <nl> / / minor_to_major to get the dimension number for this level of the recursion . <nl> - int64 dimension = shape . layout ( ) . minor_to_major ( dimension_index ) ; <nl> + int64 dimension = LayoutUtil : : Minor ( shape . layout ( ) , dimension_index ) ; <nl> <nl> / / Recursively call LiteralToConstant to construct subarrays for the <nl> / / more - minor dimensions . Gather the subarrays into a vector for bundling into <nl> llvm : : Constant * LiteralToConstant ( const Literal & literal , int64 dimension_index , <nl> if ( elements . empty ( ) ) { <nl> element_type = ir_element_type ; <nl> for ( int i = 0 ; i < dimension_index ; + + i ) { <nl> - int64 index = shape . layout ( ) . minor_to_major ( i ) ; <nl> + int64 index = LayoutUtil : : Minor ( shape . layout ( ) , i ) ; <nl> element_type = <nl> llvm : : ArrayType : : get ( element_type , shape . dimensions ( index ) ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / llvm_ir / loop_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / llvm_ir / loop_emitter . cc <nl> IrArray : : Index LoopEmitter : : EmitIndexAndSetExitBasicBlock ( <nl> / / dimension ( of the target shape ) . <nl> ForLoopNest loop_nest ( loop_name , ir_builder_ ) ; <nl> IrArray : : Index array_index ( shape_ . dimensions_size ( ) ) ; <nl> - for ( int i = shape_ . layout ( ) . minor_to_major_size ( ) - 1 ; i > = 0 ; - - i ) { <nl> - int64 dimension = shape_ . layout ( ) . minor_to_major ( i ) ; <nl> + for ( int i = 0 ; i < LayoutUtil : : MinorToMajor ( shape_ ) . size ( ) ; + + i ) { <nl> + int64 dimension = LayoutUtil : : Major ( shape_ . layout ( ) , i ) ; <nl> std : : unique_ptr < ForLoop > loop = loop_nest . AddLoop ( <nl> / * start_index = * / 0 , <nl> / * end_index = * / shape_ . dimensions ( dimension ) , <nl> mmm a / tensorflow / compiler / xla / service / user_computation . cc <nl> ppp b / tensorflow / compiler / xla / service / user_computation . cc <nl> StatusOr < const OperationRequest * > LookUpRequest ( <nl> return & session_computation . requests ( ) . at ( handle_value ) ; <nl> } <nl> <nl> - / / Returns the OperationRequestion corresponding to the root ( result ) of the <nl> + / / Returns the OperationRequest corresponding to the root ( result ) of the <nl> / / session computation . <nl> StatusOr < const OperationRequest * > GetRoot ( <nl> VersionedComputationHandle : : Version version , <nl> mmm a / tensorflow / compiler / xla / service / user_computation_test . cc <nl> ppp b / tensorflow / compiler / xla / service / user_computation_test . cc <nl> TEST_F ( UserComputationTest , SimpleComputation ) { <nl> <nl> OutfeedRequest outfeed_request ; <nl> * outfeed_request . mutable_operand ( ) = constant_handle ; <nl> + * outfeed_request . mutable_shape ( ) = kVectorShape ; <nl> outfeed_request . set_outfeed_config ( " abc " ) ; <nl> TF_ASSERT_OK ( computation . AddOutfeedInstruction ( outfeed_request ) ) ; <nl> <nl> mmm a / tensorflow / compiler / xla / shape_util . cc <nl> ppp b / tensorflow / compiler / xla / shape_util . cc <nl> namespace { <nl> / / the shapes are the same . If compare_layouts is true , then layouts must also <nl> / / match . <nl> bool CompareShapes ( const Shape & lhs , const Shape & rhs , bool compare_layouts ) { <nl> - if ( ShapeUtil : : IsTuple ( lhs ) ) { <nl> - return ShapeUtil : : IsTuple ( rhs ) & & <nl> + if ( ShapeUtil : : IsTuple ( lhs ) | | ShapeUtil : : IsTuple ( rhs ) ) { <nl> + return ShapeUtil : : IsTuple ( lhs ) & & ShapeUtil : : IsTuple ( rhs ) & & <nl> ContainersEqual ( lhs . tuple_shapes ( ) , rhs . tuple_shapes ( ) , <nl> [ = ] ( const Shape & l , const Shape & r ) { <nl> return CompareShapes ( l , r , compare_layouts ) ; <nl> } ) ; <nl> + } else if ( ShapeUtil : : IsOpaque ( lhs ) | | ShapeUtil : : IsOpaque ( rhs ) ) { <nl> + return ShapeUtil : : IsOpaque ( lhs ) & & ShapeUtil : : IsOpaque ( rhs ) ; <nl> } <nl> - / / Explicitly compare the fields rather than using MessageDifferencer because <nl> - / / we want empty layouts to be treated identically to missing layouts . <nl> + <nl> if ( compare_layouts ) { <nl> - if ( ! ContainersEqual ( lhs . layout ( ) . minor_to_major ( ) , <nl> - rhs . layout ( ) . minor_to_major ( ) ) ) { <nl> - VLOG ( 3 ) < < " CompareShapes : lhs layout ! = rhs layout " ; <nl> - return false ; <nl> - } <nl> - if ( ! ContainersEqual ( lhs . layout ( ) . padded_dimensions ( ) , <nl> - rhs . layout ( ) . padded_dimensions ( ) ) ) { <nl> - VLOG ( 3 ) <nl> - < < " CompareShapes : lhs padded_dimensions ! = rhs padded_dimensions " ; <nl> + if ( lhs . layout ( ) . format ( ) ! = rhs . layout ( ) . format ( ) ) { <nl> return false ; <nl> } <nl> - if ( lhs . layout ( ) . padding_value ( ) ! = rhs . layout ( ) . padding_value ( ) ) { <nl> - VLOG ( 3 ) < < " CompareShapes : lhs padding value ! = rhs padding_value " ; <nl> - return false ; <nl> + if ( LayoutUtil : : IsDense ( lhs ) ) { <nl> + if ( ! ContainersEqual ( LayoutUtil : : MinorToMajor ( lhs ) , <nl> + LayoutUtil : : MinorToMajor ( rhs ) ) ) { <nl> + VLOG ( 3 ) < < " CompareShapes : lhs layout ! = rhs layout " ; <nl> + return false ; <nl> + } <nl> + if ( ! ContainersEqual ( lhs . layout ( ) . padded_dimensions ( ) , <nl> + rhs . layout ( ) . padded_dimensions ( ) ) ) { <nl> + VLOG ( 3 ) <nl> + < < " CompareShapes : lhs padded_dimensions ! = rhs padded_dimensions " ; <nl> + return false ; <nl> + } <nl> + if ( lhs . layout ( ) . padding_value ( ) ! = rhs . layout ( ) . padding_value ( ) ) { <nl> + VLOG ( 3 ) < < " CompareShapes : lhs padding value ! = rhs padding_value " ; <nl> + return false ; <nl> + } <nl> } <nl> } <nl> <nl> StatusOr < Shape > MakeShapeWithLayoutInternal ( <nl> } <nl> <nl> / * static * / void ShapeUtil : : AppendMajorDimension ( int bound , Shape * shape ) { <nl> + CHECK ( LayoutUtil : : IsDense ( * shape ) ) ; <nl> shape - > mutable_layout ( ) - > add_minor_to_major ( Rank ( * shape ) ) ; <nl> shape - > add_dimensions ( bound ) ; <nl> TF_DCHECK_OK ( ValidateShape ( * shape ) ) ; <nl> Status ForEachMutableSubshapeHelper ( <nl> new_shape . add_dimensions ( dim ) ; <nl> } <nl> if ( shape . has_layout ( ) ) { <nl> + CHECK ( LayoutUtil : : IsDense ( shape ) ) ; <nl> Layout * new_layout = new_shape . mutable_layout ( ) ; <nl> + new_layout - > set_format ( DENSE ) ; <nl> new_layout - > clear_minor_to_major ( ) ; <nl> for ( auto index : Permute ( permutation , shape . layout ( ) . minor_to_major ( ) ) ) { <nl> new_layout - > add_minor_to_major ( index ) ; <nl> ShapeUtil : : DimensionsUnmodifiedByReshape ( const Shape & input_shape , <nl> shape . mutable_dimensions ( ) - > erase ( shape . dimensions ( ) . begin ( ) + dim_to_delete ) ; <nl> if ( LayoutUtil : : HasLayout ( shape ) ) { <nl> Layout * layout = shape . mutable_layout ( ) ; <nl> + layout - > set_format ( DENSE ) ; <nl> for ( size_t i = 0 ; i < layout - > minor_to_major ( ) . size ( ) ; ) { <nl> if ( layout - > minor_to_major ( i ) = = dim_to_delete ) { <nl> layout - > mutable_minor_to_major ( ) - > erase ( <nl> mmm a / tensorflow / compiler / xla / shape_util . h <nl> ppp b / tensorflow / compiler / xla / shape_util . h <nl> limitations under the License . <nl> # include < initializer_list > <nl> # include < string > <nl> <nl> + # include " tensorflow / compiler / xla / layout_util . h " <nl> # include " tensorflow / compiler / xla / statusor . h " <nl> # include " tensorflow / compiler / xla / types . h " <nl> # include " tensorflow / compiler / xla / xla_data . pb . h " <nl> class ShapeUtil { <nl> return shape . element_type ( ) = = OPAQUE ; <nl> } <nl> <nl> - / / Returns whether the shape is an array . <nl> + / / Returns whether the shape is an array . Note that scalars are considered <nl> + / / arrays . <nl> static bool IsArray ( const Shape & shape ) { <nl> return ! IsTuple ( shape ) & & ! IsOpaque ( shape ) ; <nl> } <nl> class ShapeUtil { <nl> CHECK_EQ ( Rank ( shape ) , base . size ( ) ) ; <nl> CHECK_EQ ( incr . size ( ) , base . size ( ) ) ; <nl> CHECK_EQ ( count . size ( ) , base . size ( ) ) ; <nl> - const Layout & layout = shape . layout ( ) ; <nl> - const int64 rank = layout . minor_to_major_size ( ) ; <nl> + const int64 rank = LayoutUtil : : MinorToMajor ( shape ) . size ( ) ; <nl> / / Allows handling R0 arrays , such that the visitor function will be called <nl> / / once with the proper empty indexes . <nl> int64 n = - 1 ; <nl> class ShapeUtil { <nl> while ( n < rank & & visitor_function ( indexes ) ) { <nl> / / Increments dimensions in minor to major order . <nl> for ( n = 0 ; n < rank ; + + n ) { <nl> - int64 dim = layout . minor_to_major ( n ) ; <nl> + int64 dim = LayoutUtil : : Minor ( shape . layout ( ) , n ) ; <nl> indexes [ dim ] + = incr [ dim ] ; <nl> if ( indexes [ dim ] < base [ dim ] + count [ dim ] ) { <nl> break ; <nl> mmm a / tensorflow / compiler / xla / shape_util_test . cc <nl> ppp b / tensorflow / compiler / xla / shape_util_test . cc <nl> TEST ( ShapeUtilTest , IncompatibleTuplesWithDifferentDimensions ) { <nl> EXPECT_FALSE ( ShapeUtil : : Compatible ( tuple1 , tuple2 ) ) ; <nl> } <nl> <nl> - TEST ( ShapeUtilTest , EmptyLayoutEqualsMissingLayout ) { <nl> - / / A shape with a missing layout should be equal to a shape with an empty <nl> - / / layout . <nl> - Shape scalar1 = ShapeUtil : : MakeShape ( F32 , { } ) ; <nl> - Shape scalar2 = ShapeUtil : : MakeShape ( F32 , { } ) ; <nl> - <nl> - EXPECT_TRUE ( ShapeUtil : : Equal ( scalar1 , scalar2 ) ) ; <nl> - <nl> - scalar1 . clear_layout ( ) ; / / Remove layout field . <nl> - scalar2 . mutable_layout ( ) ; / / Create empty layout field . <nl> - <nl> - EXPECT_TRUE ( ShapeUtil : : Equal ( scalar1 , scalar2 ) ) ; <nl> - } <nl> - <nl> TEST ( ShapeUtilTest , CompareShapesWithPaddedDimensionsMismatch ) { <nl> Shape shape1 = ShapeUtil : : MakeShape ( F32 , { 20 , 30 } ) ; <nl> shape1 . mutable_layout ( ) - > add_padded_dimensions ( 10 ) ; <nl> TEST ( ShapeUtilTest , CompareShapesWithPaddingValueMismatch ) { <nl> EXPECT_FALSE ( ShapeUtil : : Equal ( shape1 , shape2 ) ) ; <nl> } <nl> <nl> - TEST ( ShapeUtilTest , ScalarUnpopulatedLayoutEqualsScalarLayout ) { <nl> - Shape scalar_unpopulated = ShapeUtil : : MakeShape ( F32 , { } ) ; <nl> - scalar_unpopulated . clear_layout ( ) ; <nl> - ASSERT_FALSE ( scalar_unpopulated . has_layout ( ) ) <nl> - < < ShapeUtil : : HumanStringWithLayout ( scalar_unpopulated ) ; <nl> + TEST ( ShapeUtilTest , ScalarDefaultLayoutEqualsScalarEmptyMin2Maj ) { <nl> + Shape scalar_default_layout = ShapeUtil : : MakeShape ( F32 , { } ) ; <nl> + ASSERT_TRUE ( scalar_default_layout . has_layout ( ) ) <nl> + < < ShapeUtil : : HumanStringWithLayout ( scalar_default_layout ) ; <nl> <nl> - const Shape scalar_populated = ShapeUtil : : MakeShapeWithLayout ( F32 , { } , { } ) ; <nl> - ASSERT_TRUE ( scalar_populated . has_layout ( ) ) <nl> - < < ShapeUtil : : HumanStringWithLayout ( scalar_populated ) ; <nl> + const Shape scalar_empty_min2maj = <nl> + ShapeUtil : : MakeShapeWithLayout ( F32 , { } , { } ) ; <nl> + ASSERT_TRUE ( scalar_empty_min2maj . has_layout ( ) ) <nl> + < < ShapeUtil : : HumanStringWithLayout ( scalar_empty_min2maj ) ; <nl> <nl> - EXPECT_TRUE ( ShapeUtil : : Equal ( scalar_unpopulated , scalar_populated ) ) ; <nl> + EXPECT_TRUE ( ShapeUtil : : Equal ( scalar_default_layout , scalar_empty_min2maj ) ) ; <nl> } <nl> <nl> TEST ( ShapeUtilTest , ByteSizeOfWithoutPadding ) { <nl> mmm a / tensorflow / compiler / xla / tests / conditional_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / conditional_test . cc <nl> XLA_TEST_F ( ConditionalOpTest , Parameters1 ) { <nl> ComputeAndCompareR0 < float > ( & builder , 12 . 0f , { } , error_spec_ ) ; <nl> } <nl> <nl> + / / Test conditional with two different computations in the true and false cases <nl> + / / that take in different arguments . <nl> + XLA_TEST_F ( ConditionalOpTest , DiffComputationsDiffArgs ) { <nl> + ComputationBuilder builder ( client_ , TestName ( ) ) ; <nl> + auto pred = builder . ConstantR0 < bool > ( false ) ; <nl> + auto operand1 = builder . ConstantR0 < float > ( 56 . 4f ) ; <nl> + auto operand2 = builder . ConstantR0 < float > ( 12 . 6f ) ; <nl> + auto result = <nl> + builder . Conditional ( pred , operand1 , CreateR0F32CeilComputation ( ) , <nl> + operand2 , CreateR0F32FloorComputation ( ) ) ; <nl> + <nl> + ComputeAndCompareR0 < float > ( & builder , 12 . 0f , { } , error_spec_ ) ; <nl> + } <nl> + <nl> + / / Test conditional with two different computations in the true and false cases <nl> + / / that take in the same arguments . <nl> + XLA_TEST_F ( ConditionalOpTest , DiffComputationsSameArg ) { <nl> + ComputationBuilder builder ( client_ , TestName ( ) ) ; <nl> + auto pred = builder . ConstantR0 < bool > ( false ) ; <nl> + auto operand = builder . ConstantR0 < float > ( 12 . 6f ) ; <nl> + auto result = builder . Conditional ( pred , operand , CreateR0F32CeilComputation ( ) , <nl> + operand , CreateR0F32FloorComputation ( ) ) ; <nl> + <nl> + ComputeAndCompareR0 < float > ( & builder , 12 . 0f , { } , error_spec_ ) ; <nl> + } <nl> + <nl> + / / Test conditional with the same computation in the true and false cases but <nl> + / / take in different arguments . <nl> + XLA_TEST_F ( ConditionalOpTest , SameComputationDiffArgs ) { <nl> + ComputationBuilder builder ( client_ , TestName ( ) ) ; <nl> + auto pred = builder . ConstantR0 < bool > ( false ) ; <nl> + auto operand1 = builder . ConstantR0 < float > ( 56 . 4f ) ; <nl> + auto operand2 = builder . ConstantR0 < float > ( 12 . 6f ) ; <nl> + auto floor = CreateR0F32FloorComputation ( ) ; <nl> + auto result = builder . Conditional ( pred , operand1 , floor , operand2 , floor ) ; <nl> + <nl> + ComputeAndCompareR0 < float > ( & builder , 12 . 0f , { } , error_spec_ ) ; <nl> + } <nl> + <nl> + / / Test conditional with the same computation in the true and false cases that <nl> + / / take in the same arguments . <nl> + XLA_TEST_F ( ConditionalOpTest , SameComputationSameArg ) { <nl> + ComputationBuilder builder ( client_ , TestName ( ) ) ; <nl> + auto pred = builder . ConstantR0 < bool > ( false ) ; <nl> + auto operand = builder . ConstantR0 < float > ( 12 . 6f ) ; <nl> + auto floor = CreateR0F32FloorComputation ( ) ; <nl> + auto result = builder . Conditional ( pred , operand , floor , operand , floor ) ; <nl> + <nl> + ComputeAndCompareR0 < float > ( & builder , 12 . 0f , { } , error_spec_ ) ; <nl> + } <nl> + <nl> + / / Test conditional with different instances of the same computation in the true <nl> + / / and false cases . <nl> + XLA_TEST_F ( ConditionalOpTest , SameComputationDiffInstances ) { <nl> + ComputationBuilder builder ( client_ , TestName ( ) ) ; <nl> + auto pred = builder . ConstantR0 < bool > ( false ) ; <nl> + auto operand1 = builder . ConstantR0 < float > ( 56 . 4f ) ; <nl> + auto operand2 = builder . ConstantR0 < float > ( 12 . 6f ) ; <nl> + auto result = <nl> + builder . Conditional ( pred , operand1 , CreateR0F32FloorComputation ( ) , <nl> + operand2 , CreateR0F32FloorComputation ( ) ) ; <nl> + <nl> + ComputeAndCompareR0 < float > ( & builder , 12 . 0f , { } , error_spec_ ) ; <nl> + } <nl> + <nl> + / / Test the case when a call invokes a computation that contains a conditional . <nl> + XLA_TEST_F ( ConditionalOpTest , ConditionalWithCall ) { <nl> + Shape r0bool = ShapeUtil : : MakeShape ( PRED , { } ) ; <nl> + ComputationBuilder inner_builder ( client_ , TestName ( ) + " . inner_conditional " ) ; <nl> + auto pred_cond = inner_builder . Parameter ( 0 , r0bool , " param0 " ) ; <nl> + auto true_operand = inner_builder . Parameter ( 1 , r0f32_ , " param1 " ) ; <nl> + auto false_operand = inner_builder . Parameter ( 2 , r0f32_ , " param2 " ) ; <nl> + inner_builder . Conditional ( pred_cond , true_operand , <nl> + CreateR0F32CeilComputation ( ) , false_operand , <nl> + CreateR0F32FloorComputation ( ) ) ; <nl> + auto inner_builder_result = inner_builder . Build ( ) ; <nl> + <nl> + ComputationBuilder builder ( client_ , TestName ( ) ) ; <nl> + auto pred = builder . ConstantR0 < bool > ( false ) ; <nl> + auto operand1 = builder . ConstantR0 < float > ( 56 . 4f ) ; <nl> + auto operand2 = builder . ConstantR0 < float > ( 12 . 6f ) ; <nl> + builder . Call ( inner_builder_result . ConsumeValueOrDie ( ) , <nl> + { pred , operand1 , operand2 } ) ; <nl> + <nl> + ComputeAndCompareR0 < float > ( & builder , 12 . 0f , { } , error_spec_ ) ; <nl> + } <nl> + <nl> / / Test true and false computations that take in 2 parameters and predicate is <nl> / / true . <nl> XLA_TEST_F ( ConditionalOpTest , Parameters2TrueBranch ) { <nl> mmm a / tensorflow / compiler / xla / tools / parser / README . md <nl> ppp b / tensorflow / compiler / xla / tools / parser / README . md <nl> hlo_module <nl> : ' HloModule ' name computations <nl> ; <nl> <nl> + / * If no computation is marked as ENTRY , the last computation will be the entry <nl> + computation of the module . * / <nl> computations <nl> : computation <nl> | computation computations <nl> computation <nl> | name instruction_list <nl> ; <nl> <nl> + / * If no instruction is marked as ROOT , the last instruction will be the root of <nl> + its computation . * / <nl> instruction_list <nl> : ' { ' instruction_list1 ' } ' <nl> ; <nl> mmm a / tensorflow / compiler / xla / tools / parser / hlo_parser . cc <nl> ppp b / tensorflow / compiler / xla / tools / parser / hlo_parser . cc <nl> class HloParser { <nl> / / ParseXXX returns false if an error occurred . <nl> bool ParseHloModule ( ) ; <nl> bool ParseComputations ( ) ; <nl> - bool ParseComputation ( ) ; <nl> + bool ParseComputation ( HloComputation * * entry_computation ) ; <nl> bool ParseInstructionList ( HloComputation : : Builder * builder , <nl> string * root_name ) ; <nl> bool ParseInstruction ( HloComputation : : Builder * builder , string * root_name ) ; <nl> class HloParser { <nl> <nl> HloLexer lexer_ ; <nl> std : : unique_ptr < HloModule > module_ ; <nl> + std : : vector < std : : unique_ptr < HloComputation > > computations_ ; <nl> const HloModuleConfig config_ ; <nl> std : : vector < string > error_ ; <nl> } ; <nl> bool HloParser : : ParseHloModule ( ) { <nl> <nl> / / computations : : = ( computation ) + <nl> bool HloParser : : ParseComputations ( ) { <nl> + HloComputation * entry_computation = nullptr ; <nl> do { <nl> - if ( ! ParseComputation ( ) ) { <nl> + if ( ! ParseComputation ( & entry_computation ) ) { <nl> return false ; <nl> } <nl> } while ( lexer_ . GetKind ( ) ! = TokKind : : kEof ) ; <nl> + <nl> + for ( int i = 0 ; i < computations_ . size ( ) ; i + + ) { <nl> + / / If entry_computation is not nullptr , it means the computation it pointed <nl> + / / to is marked with " ENTRY " ; otherwise , no computation is marked with <nl> + / / " ENTRY " , and we use the last computation as the entry computation . We <nl> + / / add the non - entry computations as embedded computations to the module . <nl> + if ( ( entry_computation ! = nullptr & & <nl> + computations_ [ i ] . get ( ) ! = entry_computation ) | | <nl> + ( entry_computation = = nullptr & & i ! = computations_ . size ( ) - 1 ) ) { <nl> + module_ - > AddEmbeddedComputation ( std : : move ( computations_ [ i ] ) ) ; <nl> + continue ; <nl> + } <nl> + auto computation = <nl> + module_ - > AddEntryComputation ( std : : move ( computations_ [ i ] ) ) ; <nl> + / / The parameters and result layouts were set to default layout . Here we <nl> + / / set the layouts to what the hlo text says . <nl> + for ( int p = 0 ; p < computation - > num_parameters ( ) ; p + + ) { <nl> + const Shape & param_shape = computation - > parameter_instruction ( p ) - > shape ( ) ; <nl> + if ( param_shape . has_layout ( ) ) { <nl> + module_ - > mutable_entry_computation_layout ( ) <nl> + - > mutable_parameter_layout ( p ) <nl> + - > ResetLayout ( param_shape . layout ( ) ) ; <nl> + } <nl> + } <nl> + const Shape & result_shape = computation - > root_instruction ( ) - > shape ( ) ; <nl> + if ( result_shape . has_layout ( ) ) { <nl> + module_ - > mutable_entry_computation_layout ( ) <nl> + - > mutable_result_layout ( ) <nl> + - > ResetLayout ( result_shape . layout ( ) ) ; <nl> + } <nl> + } <nl> + <nl> return true ; <nl> } <nl> <nl> / / computation : : = ( ' ENTRY ' ) ? name ( param_list_to_shape ) ? instruction_list <nl> - bool HloParser : : ParseComputation ( ) { <nl> + bool HloParser : : ParseComputation ( HloComputation * * entry_computation ) { <nl> + LocTy maybe_entry_loc = lexer_ . GetLoc ( ) ; <nl> const bool is_entry_computation = EatIfPresent ( TokKind : : kw_ENTRY ) ; <nl> + <nl> string name ; <nl> LocTy name_loc = lexer_ . GetLoc ( ) ; <nl> if ( ! ParseName ( & name ) ) { <nl> bool HloParser : : ParseComputation ( ) { <nl> / / Now root can be either an existing instruction or a nullptr . If it ' s a <nl> / / nullptr , the implementation of Builder will set the last instruction as <nl> / / root instruction . <nl> - HloComputation * computation = <nl> - is_entry_computation <nl> - ? module_ - > AddEntryComputation ( builder - > Build ( root ) ) <nl> - : module_ - > AddEmbeddedComputation ( builder - > Build ( root ) ) ; <nl> + computations_ . emplace_back ( builder - > Build ( root ) ) ; <nl> + HloComputation * computation = computations_ . back ( ) . get ( ) ; <nl> <nl> if ( ! root ) { <nl> root = computation - > root_instruction ( ) ; <nl> bool HloParser : : ParseComputation ( ) { <nl> root_name , " , " , ShapeUtil : : HumanString ( root - > shape ( ) ) ) ) ; <nl> } <nl> <nl> - / / The parameters and result layouts were set to default layout . Here we set <nl> - / / the layouts to what the hlo text says . <nl> if ( is_entry_computation ) { <nl> - for ( int i = 0 ; i < computation - > num_parameters ( ) ; i + + ) { <nl> - const Shape & param_shape = computation - > parameter_instruction ( i ) - > shape ( ) ; <nl> - if ( param_shape . has_layout ( ) ) { <nl> - module_ - > mutable_entry_computation_layout ( ) <nl> - - > mutable_parameter_layout ( i ) <nl> - - > ResetLayout ( param_shape . layout ( ) ) ; <nl> - } <nl> - } <nl> - const Shape & result_shape = computation - > root_instruction ( ) - > shape ( ) ; <nl> - if ( result_shape . has_layout ( ) ) { <nl> - module_ - > mutable_entry_computation_layout ( ) <nl> - - > mutable_result_layout ( ) <nl> - - > ResetLayout ( result_shape . layout ( ) ) ; <nl> + if ( * entry_computation ! = nullptr ) { <nl> + return Error ( maybe_entry_loc , " expects only one ENTRY " ) ; <nl> } <nl> + * entry_computation = computation ; <nl> } <nl> + <nl> return AddComputation ( name , computation , name_loc ) ; <nl> } <nl> <nl> bool HloParser : : ParseInstruction ( HloComputation : : Builder * builder , <nl> Shape shape ; <nl> HloOpcode opcode ; <nl> std : : vector < HloInstruction * > operands ; <nl> + <nl> + LocTy maybe_root_loc = lexer_ . GetLoc ( ) ; <nl> bool is_root = EatIfPresent ( TokKind : : kw_ROOT ) ; <nl> <nl> const LocTy name_loc = lexer_ . GetLoc ( ) ; <nl> bool HloParser : : ParseInstruction ( HloComputation : : Builder * builder , <nl> ! ParseShape ( & shape ) | | ! ParseOpcode ( & opcode ) ) { <nl> return false ; <nl> } <nl> + <nl> if ( is_root ) { <nl> + if ( ! root_name - > empty ( ) ) { <nl> + return Error ( maybe_root_loc , " one computation should have only one ROOT " ) ; <nl> + } <nl> * root_name = name ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / tools / parser / hlo_parser_test . cc <nl> ppp b / tensorflow / compiler / xla / tools / parser / hlo_parser_test . cc <nl> ENTRY % CustomCall ( ) - > f32 [ 1 ] { <nl> " with that of its root instruction foo , f32 [ 1 , 2 , 3 ] " ) ; <nl> } <nl> <nl> + TEST_F ( HloParserTest , EntryComputationWithLayout ) { <nl> + const string original = R " ( HloModule layout : <nl> + add_F32 . v3 { <nl> + lhs = f32 [ ] parameter ( 0 ) <nl> + rhs = f32 [ ] parameter ( 1 ) <nl> + ROOT add = f32 [ ] add ( lhs , rhs ) <nl> + } <nl> + <nl> + ENTRY % Reduce ( input : f32 [ 8 , 16 , 256 ] ) - > f32 [ 8 , 16 ] { <nl> + input = f32 [ 8 , 16 , 256 ] { 0 , 1 , 2 } parameter ( 0 ) <nl> + constant = f32 [ ] constant ( 0 ) <nl> + ROOT reduce = f32 [ 8 , 16 ] { 0 , 1 } reduce ( input , constant ) , dimensions = { 2 } , to_apply = add_F32 . v3 <nl> + } ) " ; <nl> + <nl> + auto module = Parse ( original ) ; <nl> + TF_ASSERT_OK ( module . status ( ) ) ; <nl> + auto program_layout = module . ValueOrDie ( ) - > entry_computation_layout ( ) ; <nl> + ASSERT_EQ ( program_layout . parameter_count ( ) , 1 ) ; <nl> + auto param_layout = program_layout . parameter_layout ( 0 ) . layout ( ) ; <nl> + auto result_layout = program_layout . result_layout ( ) . layout ( ) ; <nl> + EXPECT_TRUE ( <nl> + LayoutUtil : : Equal ( LayoutUtil : : MakeLayout ( { 0 , 1 , 2 } ) , param_layout ) ) <nl> + < < " actual layout of parameter ( 0 ) is " <nl> + < < LayoutUtil : : HumanString ( param_layout ) ; <nl> + EXPECT_TRUE ( LayoutUtil : : Equal ( LayoutUtil : : MakeLayout ( { 0 , 1 } ) , result_layout ) ) <nl> + < < " actual layout of result is " <nl> + < < LayoutUtil : : HumanString ( result_layout ) ; <nl> + } <nl> + <nl> + TEST_F ( HloParserTest , NoEntry ) { <nl> + const string original = R " ( HloModule no_entry : <nl> + c1 { <nl> + const1 = f32 [ 1 ] { 0 } constant ( { 12345 } ) <nl> + } <nl> + c2 { <nl> + const2 = f32 [ 1 ] { 0 } constant ( { 67890 } ) <nl> + } ) " ; <nl> + auto module = Parse ( original ) ; <nl> + TF_ASSERT_OK ( module . status ( ) ) ; <nl> + EXPECT_EQ ( module . ValueOrDie ( ) - > entry_computation ( ) - > name ( ) , " c2 " ) ; <nl> + } <nl> + <nl> + TEST_F ( HloParserTest , NoRoot ) { <nl> + const string original = R " ( HloModule no_root : <nl> + ENTRY consts { <nl> + first = f32 [ 1 ] { 0 } constant ( { 12345 } ) <nl> + last = f32 [ 1 ] { 0 } constant ( { 67890 } ) <nl> + } ) " ; <nl> + auto module = Parse ( original ) ; <nl> + TF_ASSERT_OK ( module . status ( ) ) ; <nl> + EXPECT_EQ ( <nl> + module . ValueOrDie ( ) - > entry_computation ( ) - > root_instruction ( ) - > name ( ) , <nl> + " last " ) ; <nl> + } <nl> + <nl> + TEST_F ( HloParserTest , MultipleEntries ) { <nl> + const string original = R " ( HloModule multiple_entries : <nl> + ENTRY c1 { <nl> + const1 = f32 [ 1 ] { 0 } constant ( { 12345 } ) <nl> + } <nl> + ENTRY c2 { <nl> + const2 = f32 [ 1 ] { 0 } constant ( { 67890 } ) <nl> + } ) " ; <nl> + ExpectHasSubstr ( Parse ( original ) . status ( ) . error_message ( ) , <nl> + " expects only one ENTRY " ) ; <nl> + } <nl> + <nl> + TEST_F ( HloParserTest , MultipleRoots ) { <nl> + const string original = R " ( HloModule multiple_roots : <nl> + ENTRY consts { <nl> + ROOT const1 = f32 [ 1 ] { 0 } constant ( { 12345 } ) <nl> + ROOT const2 = f32 [ 1 ] { 0 } constant ( { 12345 } ) <nl> + } ) " ; <nl> + ExpectHasSubstr ( Parse ( original ) . status ( ) . error_message ( ) , <nl> + " one computation should have only one ROOT " ) ; <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace tools <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / xla_data . proto <nl> ppp b / tensorflow / compiler / xla / xla_data . proto <nl> message PaddingConfig { <nl> repeated PaddingConfigDimension dimensions = 1 ; <nl> } <nl> <nl> + / / A format specifies the method used by a layout to store an array in memory . <nl> + enum Format { <nl> + INVALID_FORMAT = 0 ; <nl> + / / The default layout , with exactly one storage location per element ( ignoring <nl> + / / padding ) . <nl> + DENSE = 1 ; <nl> + } <nl> + <nl> / / A layout describes how the array is placed in ( 1D ) memory space . This <nl> / / includes the minor - to - major ordering of dimensions within a shape , as well as <nl> / / any padding present in those dimensions . <nl> message PaddingConfig { <nl> / / <nl> / / See the XLA documentation for more information on shapes and layouts . <nl> message Layout { <nl> + / / The method used to store the data in memory . The format determines which of <nl> + / / the other fields are used by the layout . <nl> + Format format = 4 ; <nl> + <nl> / / Sequence of dimension numbers , from minor ( fastest varying index ) to major <nl> / / ( slowest varying index ) . This field is required . <nl> repeated int64 minor_to_major = 1 ; <nl> <nl> - / / The width to which the layout of each dimension is padded up <nl> - / / to . If present , the size of the padded_dimensions must equal the <nl> - / / rank of the shape . The padding appears at the end of a dimension , <nl> - / / not at the beginning . This kind of padding , unlike padding in <nl> - / / e . g . convolution , is not part of the shape . <nl> + / / The width to which the layout of each dimension is padded up to . If <nl> + / / present , the size of the padded_dimensions must equal the rank of the <nl> + / / shape . The padding appears at the end of a dimension , not at the <nl> + / / beginning . This kind of padding , unlike padding in e . g . convolution , is not <nl> + / / part of the shape . This field must be unset unless the format is DENSE . <nl> repeated int64 padded_dimensions = 2 ; <nl> <nl> - / / Describes the values in the padding specified by <nl> - / / padded_dimensions . <nl> + / / Describes the values in the padding specified by padded_dimensions . This <nl> + / / field must be unset unless the format is DENSE . <nl> PaddingValue padding_value = 3 ; <nl> <nl> / / Important : if any field is added , be sure to modify ShapeUtil : : Equal ( ) <nl> mmm a / tensorflow / contrib / bayesflow / BUILD <nl> ppp b / tensorflow / contrib / bayesflow / BUILD <nl> cuda_py_test ( <nl> ] , <nl> ) <nl> <nl> + cuda_py_test ( <nl> + name = " layers_conv_variational_test " , <nl> + size = " small " , <nl> + srcs = [ " python / kernel_tests / layers_conv_variational_test . py " ] , <nl> + additional_deps = [ <nl> + " : bayesflow_py " , <nl> + " / / third_party / py / numpy " , <nl> + " / / tensorflow / contrib / distributions : distributions_py " , <nl> + " / / tensorflow / python / ops / distributions " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : gradients " , <nl> + " / / tensorflow / python : linalg_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : nn_ops " , <nl> + ] , <nl> + ) <nl> + <nl> cuda_py_test ( <nl> name = " layers_dense_variational_test " , <nl> size = " small " , <nl> new file mode 100644 <nl> index 0000000000000 . . 57f44aef1a198 <nl> mmm / dev / null <nl> ppp b / tensorflow / contrib / bayesflow / python / kernel_tests / layers_conv_variational_test . py <nl> <nl> + # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Tests for convolutional Bayesian layers . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + from tensorflow . contrib . bayesflow . python . ops import layers_conv_variational as prob_layers_lib <nl> + from tensorflow . contrib . bayesflow . python . ops import layers_util as prob_layers_util <nl> + from tensorflow . contrib . distributions . python . ops import independent as independent_lib <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . framework import tensor_shape <nl> + from tensorflow . python . ops import nn <nl> + from tensorflow . python . ops import nn_ops <nl> + from tensorflow . python . ops import random_ops <nl> + from tensorflow . python . ops . distributions import normal as normal_lib <nl> + from tensorflow . python . platform import test <nl> + <nl> + <nl> + class Counter ( object ) : <nl> + " " " Helper class to manage incrementing a counting ` int ` . " " " <nl> + <nl> + def __init__ ( self ) : <nl> + self . _value = - 1 <nl> + <nl> + @ property <nl> + def value ( self ) : <nl> + return self . _value <nl> + <nl> + def __call__ ( self ) : <nl> + self . _value + = 1 <nl> + return self . _value <nl> + <nl> + <nl> + class MockDistribution ( independent_lib . Independent ) : <nl> + " " " Monitors DenseVariational calls to the underlying distribution . " " " <nl> + <nl> + def __init__ ( self , result_sample , result_log_prob , loc = None , scale = None ) : <nl> + self . result_sample = result_sample <nl> + self . result_log_prob = result_log_prob <nl> + self . result_loc = loc <nl> + self . result_scale = scale <nl> + self . result_distribution = normal_lib . Normal ( loc = 0 . 0 , scale = 1 . 0 ) <nl> + if loc is not None and scale is not None : <nl> + self . result_distribution = normal_lib . Normal ( loc = self . result_loc , <nl> + scale = self . result_scale ) <nl> + self . called_log_prob = Counter ( ) <nl> + self . called_sample = Counter ( ) <nl> + self . called_loc = Counter ( ) <nl> + self . called_scale = Counter ( ) <nl> + <nl> + def log_prob ( self , * args , * * kwargs ) : <nl> + self . called_log_prob ( ) <nl> + return self . result_log_prob <nl> + <nl> + def sample ( self , * args , * * kwargs ) : <nl> + self . called_sample ( ) <nl> + return self . result_sample <nl> + <nl> + @ property <nl> + def distribution ( self ) : # for dummy check on Independent ( Normal ) <nl> + return self . result_distribution <nl> + <nl> + @ property <nl> + def loc ( self ) : <nl> + self . called_loc ( ) <nl> + return self . result_loc <nl> + <nl> + @ property <nl> + def scale ( self ) : <nl> + self . called_scale ( ) <nl> + return self . result_scale <nl> + <nl> + <nl> + class MockKLDivergence ( object ) : <nl> + " " " Monitors layer calls to the divergence implementation . " " " <nl> + <nl> + def __init__ ( self , result ) : <nl> + self . result = result <nl> + self . args = [ ] <nl> + self . called = Counter ( ) <nl> + <nl> + def __call__ ( self , * args , * * kwargs ) : <nl> + self . called ( ) <nl> + self . args . append ( args ) <nl> + return self . result <nl> + <nl> + <nl> + class ConvVariational ( test . TestCase ) : <nl> + <nl> + def _testKLPenaltyKernel ( self , layer_class ) : <nl> + with self . test_session ( ) : <nl> + layer = layer_class ( filters = 2 , kernel_size = 3 ) <nl> + if layer_class = = prob_layers_lib . Conv1DVariational : <nl> + inputs = random_ops . random_uniform ( [ 2 , 3 , 1 ] , seed = 1 ) <nl> + elif layer_class = = prob_layers_lib . Conv2DVariational : <nl> + inputs = random_ops . random_uniform ( [ 2 , 3 , 3 , 1 ] , seed = 1 ) <nl> + elif layer_class = = prob_layers_lib . Conv3DVariational : <nl> + inputs = random_ops . random_uniform ( [ 2 , 3 , 3 , 3 , 1 ] , seed = 1 ) <nl> + <nl> + # No keys . <nl> + losses = ops . get_collection ( ops . GraphKeys . REGULARIZATION_LOSSES ) <nl> + self . assertEqual ( len ( losses ) , 0 ) <nl> + self . assertListEqual ( layer . losses , losses ) <nl> + <nl> + _ = layer ( inputs ) <nl> + <nl> + # Yes keys . <nl> + losses = ops . get_collection ( ops . GraphKeys . REGULARIZATION_LOSSES ) <nl> + self . assertEqual ( len ( losses ) , 1 ) <nl> + self . assertListEqual ( layer . losses , losses ) <nl> + <nl> + def _testKLPenaltyBoth ( self , layer_class ) : <nl> + def _make_normal ( dtype , * args ) : # pylint : disable = unused - argument <nl> + return normal_lib . Normal ( <nl> + loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) <nl> + with self . test_session ( ) : <nl> + layer = layer_class ( <nl> + filters = 2 , <nl> + kernel_size = 3 , <nl> + bias_posterior_fn = prob_layers_util . default_mean_field_normal_fn ( ) , <nl> + bias_prior_fn = _make_normal ) <nl> + if layer_class = = prob_layers_lib . Conv1DVariational : <nl> + inputs = random_ops . random_uniform ( [ 2 , 3 , 1 ] , seed = 1 ) <nl> + elif layer_class = = prob_layers_lib . Conv2DVariational : <nl> + inputs = random_ops . random_uniform ( [ 2 , 3 , 3 , 1 ] , seed = 1 ) <nl> + elif layer_class = = prob_layers_lib . Conv3DVariational : <nl> + inputs = random_ops . random_uniform ( [ 2 , 3 , 3 , 3 , 1 ] , seed = 1 ) <nl> + <nl> + # No keys . <nl> + losses = ops . get_collection ( ops . GraphKeys . REGULARIZATION_LOSSES ) <nl> + self . assertEqual ( len ( losses ) , 0 ) <nl> + self . assertListEqual ( layer . losses , losses ) <nl> + <nl> + _ = layer ( inputs ) <nl> + <nl> + # Yes keys . <nl> + losses = ops . get_collection ( ops . GraphKeys . REGULARIZATION_LOSSES ) <nl> + self . assertEqual ( len ( losses ) , 2 ) <nl> + self . assertListEqual ( layer . losses , losses ) <nl> + <nl> + def _testConvVariational ( self , layer_class ) : <nl> + batch_size , depth , height , width , channels , filters = 2 , 4 , 4 , 4 , 3 , 5 <nl> + with self . test_session ( ) as sess : <nl> + seed = Counter ( ) <nl> + if layer_class = = prob_layers_lib . Conv1DVariational : <nl> + inputs = random_ops . random_uniform ( <nl> + [ batch_size , width , channels ] , seed = seed ( ) ) <nl> + kernel_size = ( 2 , ) <nl> + elif layer_class = = prob_layers_lib . Conv2DVariational : <nl> + inputs = random_ops . random_uniform ( <nl> + [ batch_size , height , width , channels ] , seed = seed ( ) ) <nl> + kernel_size = ( 2 , 2 ) <nl> + elif layer_class = = prob_layers_lib . Conv3DVariational : <nl> + inputs = random_ops . random_uniform ( <nl> + [ batch_size , depth , height , width , channels ] , seed = seed ( ) ) <nl> + kernel_size = ( 2 , 2 , 2 ) <nl> + <nl> + kernel_shape = kernel_size + ( channels , filters ) <nl> + kernel_posterior = MockDistribution ( <nl> + result_log_prob = random_ops . random_uniform ( kernel_shape , seed = seed ( ) ) , <nl> + result_sample = random_ops . random_uniform ( kernel_shape , seed = seed ( ) ) ) <nl> + kernel_prior = MockDistribution ( <nl> + result_log_prob = random_ops . random_uniform ( kernel_shape , seed = seed ( ) ) , <nl> + result_sample = random_ops . random_uniform ( kernel_shape , seed = seed ( ) ) ) <nl> + kernel_divergence = MockKLDivergence ( <nl> + result = random_ops . random_uniform ( kernel_shape , seed = seed ( ) ) ) <nl> + <nl> + bias_size = ( filters , ) <nl> + bias_posterior = MockDistribution ( <nl> + result_log_prob = random_ops . random_uniform ( bias_size , seed = seed ( ) ) , <nl> + result_sample = random_ops . random_uniform ( bias_size , seed = seed ( ) ) ) <nl> + bias_prior = MockDistribution ( <nl> + result_log_prob = random_ops . random_uniform ( bias_size , seed = seed ( ) ) , <nl> + result_sample = random_ops . random_uniform ( bias_size , seed = seed ( ) ) ) <nl> + bias_divergence = MockKLDivergence ( <nl> + result = random_ops . random_uniform ( bias_size , seed = seed ( ) ) ) <nl> + <nl> + convolution_op = nn_ops . Convolution ( <nl> + tensor_shape . TensorShape ( inputs . shape ) , <nl> + filter_shape = tensor_shape . TensorShape ( kernel_shape ) , <nl> + padding = " SAME " ) <nl> + expected_outputs = convolution_op ( inputs , kernel_posterior . result_sample ) <nl> + expected_outputs = nn . bias_add ( expected_outputs , <nl> + bias_posterior . result_sample , <nl> + data_format = " NHWC " ) <nl> + <nl> + layer = layer_class ( <nl> + filters = filters , <nl> + kernel_size = kernel_size , <nl> + padding = " SAME " , <nl> + kernel_posterior_fn = lambda * args : kernel_posterior , <nl> + kernel_posterior_tensor_fn = lambda d : d . sample ( seed = 42 ) , <nl> + kernel_prior_fn = lambda * args : kernel_prior , <nl> + kernel_divergence_fn = kernel_divergence , <nl> + bias_posterior_fn = lambda * args : bias_posterior , <nl> + bias_posterior_tensor_fn = lambda d : d . sample ( seed = 43 ) , <nl> + bias_prior_fn = lambda * args : bias_prior , <nl> + bias_divergence_fn = bias_divergence ) <nl> + <nl> + outputs = layer ( inputs ) <nl> + <nl> + kl_penalty = ops . get_collection ( ops . GraphKeys . REGULARIZATION_LOSSES ) <nl> + <nl> + [ <nl> + expected_outputs_ , actual_outputs_ , <nl> + expected_kernel_ , actual_kernel_ , <nl> + expected_kernel_divergence_ , actual_kernel_divergence_ , <nl> + expected_bias_ , actual_bias_ , <nl> + expected_bias_divergence_ , actual_bias_divergence_ , <nl> + ] = sess . run ( [ <nl> + expected_outputs , outputs , <nl> + kernel_posterior . result_sample , layer . kernel_posterior_tensor , <nl> + kernel_divergence . result , kl_penalty [ 0 ] , <nl> + bias_posterior . result_sample , layer . bias_posterior_tensor , <nl> + bias_divergence . result , kl_penalty [ 1 ] , <nl> + ] ) <nl> + <nl> + self . assertAllClose ( <nl> + expected_kernel_ , actual_kernel_ , <nl> + rtol = 1e - 6 , atol = 0 . ) <nl> + self . assertAllClose ( <nl> + expected_bias_ , actual_bias_ , <nl> + rtol = 1e - 6 , atol = 0 . ) <nl> + self . assertAllClose ( <nl> + expected_outputs_ , actual_outputs_ , <nl> + rtol = 1e - 6 , atol = 0 . ) <nl> + self . assertAllClose ( <nl> + expected_kernel_divergence_ , actual_kernel_divergence_ , <nl> + rtol = 1e - 6 , atol = 0 . ) <nl> + self . assertAllClose ( <nl> + expected_bias_divergence_ , actual_bias_divergence_ , <nl> + rtol = 1e - 6 , atol = 0 . ) <nl> + <nl> + self . assertAllEqual ( <nl> + [ [ kernel_posterior . distribution , <nl> + kernel_prior . distribution , <nl> + kernel_posterior . result_sample ] ] , <nl> + kernel_divergence . args ) <nl> + <nl> + self . assertAllEqual ( <nl> + [ [ bias_posterior . distribution , <nl> + bias_prior . distribution , <nl> + bias_posterior . result_sample ] ] , <nl> + bias_divergence . args ) <nl> + <nl> + def testKLPenaltyKernelConv1DVariational ( self ) : <nl> + self . _testKLPenaltyKernel ( prob_layers_lib . Conv1DVariational ) <nl> + <nl> + def testKLPenaltyKernelConv2DVariational ( self ) : <nl> + self . _testKLPenaltyKernel ( prob_layers_lib . Conv2DVariational ) <nl> + <nl> + def testKLPenaltyKernelConv3DVariational ( self ) : <nl> + self . _testKLPenaltyKernel ( prob_layers_lib . Conv3DVariational ) <nl> + <nl> + def testKLPenaltyBothConv1DVariational ( self ) : <nl> + self . _testKLPenaltyBoth ( prob_layers_lib . Conv1DVariational ) <nl> + <nl> + def testKLPenaltyBothConv2DVariational ( self ) : <nl> + self . _testKLPenaltyBoth ( prob_layers_lib . Conv2DVariational ) <nl> + <nl> + def testKLPenaltyBothConv3DVariational ( self ) : <nl> + self . _testKLPenaltyBoth ( prob_layers_lib . Conv3DVariational ) <nl> + <nl> + def testConv1DVariational ( self ) : <nl> + self . _testConvVariational ( prob_layers_lib . Conv1DVariational ) <nl> + <nl> + def testConv2DVariational ( self ) : <nl> + self . _testConvVariational ( prob_layers_lib . Conv2DVariational ) <nl> + <nl> + def testConv3DVariational ( self ) : <nl> + self . _testConvVariational ( prob_layers_lib . Conv3DVariational ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + test . main ( ) <nl> mmm a / tensorflow / contrib / bayesflow / python / kernel_tests / layers_dense_variational_test . py <nl> ppp b / tensorflow / contrib / bayesflow / python / kernel_tests / layers_dense_variational_test . py <nl> <nl> import numpy as np <nl> <nl> from tensorflow . contrib . bayesflow . python . ops import layers_dense_variational_impl as prob_layers_lib <nl> + from tensorflow . contrib . bayesflow . python . ops import layers_util as prob_layers_util <nl> from tensorflow . contrib . distributions . python . ops import independent as independent_lib <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> def _make_normal ( dtype , * args ) : # pylint : disable = unused - argument <nl> with self . test_session ( ) : <nl> layer = layer_class ( <nl> units = 2 , <nl> - bias_posterior_fn = prob_layers_lib . default_mean_field_normal_fn ( ) , <nl> + bias_posterior_fn = prob_layers_util . default_mean_field_normal_fn ( ) , <nl> bias_prior_fn = _make_normal ) <nl> inputs = random_ops . random_uniform ( [ 2 , 3 ] , seed = 1 ) <nl> <nl> def testDenseFlipout ( self ) : <nl> maxval = 2 , <nl> dtype = dtypes . int32 , <nl> seed = distribution_util . gen_new_seed ( <nl> - layer . seed , salt = " conv_variational " ) ) <nl> + layer . seed , salt = " dense_flipout " ) ) <nl> sign_output = math_ops . cast ( 2 * sign_output - 1 , inputs . dtype ) <nl> perturbed_inputs = math_ops . matmul ( <nl> inputs * sign_input , expected_kernel_posterior_affine_tensor ) <nl> mmm a / tensorflow / contrib / bayesflow / python / ops / layers . py <nl> ppp b / tensorflow / contrib / bayesflow / python / ops / layers . py <nl> <nl> <nl> # go / tf - wildcard - import <nl> # pylint : disable = wildcard - import <nl> + from tensorflow . contrib . bayesflow . python . ops . layers_conv_variational import * <nl> from tensorflow . contrib . bayesflow . python . ops . layers_dense_variational_impl import * <nl> + from tensorflow . contrib . bayesflow . python . ops . layers_util import * <nl> # pylint : enable = wildcard - import <nl> from tensorflow . python . util . all_util import remove_undocumented <nl> <nl> _allowed_symbols = [ <nl> + ' Convolution1DVariational ' , <nl> + ' Convolution2DVariational ' , <nl> + ' Convolution3DVariational ' , <nl> + ' Conv1DVariational ' , <nl> + ' Conv2DVariational ' , <nl> + ' Conv3DVariational ' , <nl> + ' convolution1d_variational ' , <nl> + ' convolution2d_variational ' , <nl> + ' convolution3d_variational ' , <nl> + ' conv1d_variational ' , <nl> + ' conv2d_variational ' , <nl> + ' conv3d_variational ' , <nl> ' DenseReparameterization ' , <nl> ' DenseLocalReparameterization ' , <nl> ' DenseFlipout ' , <nl> new file mode 100644 <nl> index 0000000000000 . . 6ffb55feb1ad7 <nl> mmm / dev / null <nl> ppp b / tensorflow / contrib / bayesflow / python / ops / layers_conv_variational . py <nl> <nl> + # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Convolutional variational layer classes and their functional aliases . <nl> + " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + from tensorflow . contrib . bayesflow . python . ops import layers_util <nl> + from tensorflow . contrib . distributions . python . ops import independent as independent_lib <nl> + from tensorflow . python . framework import dtypes <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . framework import tensor_shape <nl> + from tensorflow . python . layers import base as layers_lib <nl> + from tensorflow . python . layers import utils <nl> + from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import nn <nl> + from tensorflow . python . ops import nn_ops <nl> + from tensorflow . python . ops import standard_ops <nl> + from tensorflow . python . ops . distributions import kullback_leibler as kl_lib <nl> + from tensorflow . python . ops . distributions import normal as normal_lib <nl> + <nl> + <nl> + class _ConvVariational ( layers_lib . Layer ) : <nl> + " " " Abstract nD convolution layer ( private , used as implementation base ) . <nl> + <nl> + This layer creates a convolution kernel that is convolved <nl> + ( actually cross - correlated ) with the layer input to produce a tensor of <nl> + outputs . It may also include a bias addition and activation function <nl> + on the outputs . It assumes the ` kernel ` and / or ` bias ` are drawn from <nl> + distributions . <nl> + <nl> + By default , the layer implements a stochastic forward pass via <nl> + sampling from the kernel and bias posteriors , <nl> + ` ` ` none <nl> + outputs = f ( inputs ; kernel , bias ) , kernel , bias ~ posterior <nl> + ` ` ` <nl> + where f denotes the layer ' s calculation . <nl> + <nl> + The arguments permit separate specification of the surrogate posterior <nl> + ( ` q ( W | x ) ` ) , prior ( ` p ( W ) ` ) , and divergence for both the ` kernel ` and ` bias ` <nl> + distributions . <nl> + <nl> + Arguments : <nl> + rank : An integer , the rank of the convolution , e . g . " 2 " for 2D convolution . <nl> + filters : Integer , the dimensionality of the output space ( i . e . the number <nl> + of filters in the convolution ) . <nl> + kernel_size : An integer or tuple / list of n integers , specifying the <nl> + length of the convolution window . <nl> + strides : An integer or tuple / list of n integers , <nl> + specifying the stride length of the convolution . <nl> + Specifying any stride value ! = 1 is incompatible with specifying <nl> + any ` dilation_rate ` value ! = 1 . <nl> + padding : One of ` " valid " ` or ` " same " ` ( case - insensitive ) . <nl> + data_format : A string , one of ` channels_last ` ( default ) or ` channels_first ` . <nl> + The ordering of the dimensions in the inputs . <nl> + ` channels_last ` corresponds to inputs with shape <nl> + ` ( batch , . . . , channels ) ` while ` channels_first ` corresponds to <nl> + inputs with shape ` ( batch , channels , . . . ) ` . <nl> + dilation_rate : An integer or tuple / list of n integers , specifying <nl> + the dilation rate to use for dilated convolution . <nl> + Currently , specifying any ` dilation_rate ` value ! = 1 is <nl> + incompatible with specifying any ` strides ` value ! = 1 . <nl> + activation : Activation function . Set it to None to maintain a <nl> + linear activation . <nl> + activity_regularizer : Optional regularizer function for the output . <nl> + trainable : Boolean , if ` True ` also add variables to the graph collection <nl> + ` GraphKeys . TRAINABLE_VARIABLES ` ( see ` tf . Variable ` ) . <nl> + kernel_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` kernel ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( ) ` . <nl> + kernel_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + kernel_prior_fn : Python ` callable ` which creates ` tf . distributions ` <nl> + instance . See ` default_mean_field_normal_fn ` docstring for required <nl> + parameter signature . <nl> + Default value : ` tf . distributions . Normal ( loc = 0 . , scale = 1 . ) ` . <nl> + kernel_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + sample is a ` Tensor ` . <nl> + bias_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` bias ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( is_singular = True ) ` ( which creates an <nl> + instance of ` tf . distributions . Deterministic ` ) . <nl> + bias_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + bias_prior_fn : Python ` callable ` which creates ` tf . distributions ` instance . <nl> + See ` default_mean_field_normal_fn ` docstring for required parameter <nl> + signature . Default value : ` None ` ( no prior , no variational inference ) <nl> + bias_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + sample is a ` Tensor ` . <nl> + name : A string , the name of the layer . <nl> + <nl> + Properties : <nl> + rank : Python integer , dimensionality of convolution . <nl> + filters : Python integer , dimensionality of the output space . <nl> + kernel_size : Size of the convolution window . <nl> + strides : Stride length of convolution . <nl> + padding : Python string describing padding approach . <nl> + data_format : Python string describing input data ' s dimensions . <nl> + dilation_rate : Dilation rate for an atrous convolution . <nl> + activation : Activation function ( ` callable ` ) . <nl> + activity_regularizer : Regularizer function for the output . <nl> + kernel_use_local_reparameterization : Python ` bool ` indicating whether <nl> + ` kernel ` calculation should employ the Local Reparameterization Trick . <nl> + kernel_posterior_fn : ` callable ` returning posterior . <nl> + kernel_posterior_tensor_fn : ` callable ` operating on posterior . <nl> + kernel_prior_fn : ` callable ` returning prior . <nl> + kernel_divergence_fn : ` callable ` returning divergence . <nl> + bias_posterior_fn : ` callable ` returning posterior . <nl> + bias_posterior_tensor_fn : ` callable ` operating on posterior . <nl> + bias_prior_fn : ` callable ` returning prior . <nl> + bias_divergence_fn : ` callable ` returning divergence . <nl> + " " " <nl> + <nl> + def __init__ ( <nl> + self , <nl> + rank , <nl> + filters , <nl> + kernel_size , <nl> + strides = 1 , <nl> + padding = " valid " , <nl> + data_format = " channels_last " , <nl> + dilation_rate = 1 , <nl> + activation = None , <nl> + activity_regularizer = None , <nl> + trainable = True , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> + loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> + kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( is_singular = True ) , # pylint : disable = line - too - long <nl> + bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + bias_prior_fn = None , <nl> + bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + name = None , <nl> + * * kwargs ) : <nl> + super ( _ConvVariational , self ) . __init__ ( <nl> + trainable = trainable , <nl> + name = name , <nl> + activity_regularizer = activity_regularizer , <nl> + * * kwargs ) <nl> + self . rank = rank <nl> + self . filters = filters <nl> + self . kernel_size = utils . normalize_tuple ( kernel_size , rank , " kernel_size " ) <nl> + self . strides = utils . normalize_tuple ( strides , rank , " strides " ) <nl> + self . padding = utils . normalize_padding ( padding ) <nl> + self . data_format = utils . normalize_data_format ( data_format ) <nl> + self . dilation_rate = utils . normalize_tuple ( <nl> + dilation_rate , rank , " dilation_rate " ) <nl> + self . activation = activation <nl> + self . input_spec = layers_lib . InputSpec ( ndim = self . rank + 2 ) <nl> + self . kernel_posterior_fn = kernel_posterior_fn <nl> + self . kernel_posterior_tensor_fn = kernel_posterior_tensor_fn <nl> + self . kernel_prior_fn = kernel_prior_fn <nl> + self . kernel_divergence_fn = kernel_divergence_fn <nl> + self . bias_posterior_fn = bias_posterior_fn <nl> + self . bias_posterior_tensor_fn = bias_posterior_tensor_fn <nl> + self . bias_prior_fn = bias_prior_fn <nl> + self . bias_divergence_fn = bias_divergence_fn <nl> + <nl> + def build ( self , input_shape ) : <nl> + input_shape = tensor_shape . TensorShape ( input_shape ) <nl> + if self . data_format = = " channels_first " : <nl> + channel_axis = 1 <nl> + else : <nl> + channel_axis = - 1 <nl> + if input_shape [ channel_axis ] . value is None : <nl> + raise ValueError ( " The channel dimension of the inputs " <nl> + " should be defined . Found ` None ` . " ) <nl> + input_dim = input_shape [ channel_axis ] . value <nl> + kernel_shape = self . kernel_size + ( input_dim , self . filters ) <nl> + dtype = dtypes . as_dtype ( self . dtype ) <nl> + <nl> + # Must have a posterior kernel . <nl> + self . kernel_posterior = self . kernel_posterior_fn ( <nl> + dtype , kernel_shape , " kernel_posterior " , <nl> + self . trainable , self . add_variable ) <nl> + <nl> + if self . kernel_prior_fn is None : <nl> + self . kernel_prior = None <nl> + else : <nl> + self . kernel_prior = self . kernel_prior_fn ( <nl> + dtype , kernel_shape , " kernel_prior " , <nl> + self . trainable , self . add_variable ) <nl> + self . _built_kernel_divergence = False <nl> + <nl> + if self . bias_posterior_fn is None : <nl> + self . bias_posterior = None <nl> + else : <nl> + self . bias_posterior = self . bias_posterior_fn ( <nl> + dtype , ( self . filters , ) , " bias_posterior " , <nl> + self . trainable , self . add_variable ) <nl> + <nl> + if self . bias_prior_fn is None : <nl> + self . bias_prior = None <nl> + else : <nl> + self . bias_prior = self . bias_prior_fn ( <nl> + dtype , ( self . filters , ) , " bias_prior " , <nl> + self . trainable , self . add_variable ) <nl> + self . _built_bias_divergence = False <nl> + <nl> + self . input_spec = layers_lib . InputSpec ( ndim = self . rank + 2 , <nl> + axes = { channel_axis : input_dim } ) <nl> + self . _convolution_op = nn_ops . Convolution ( <nl> + input_shape , <nl> + filter_shape = tensor_shape . TensorShape ( kernel_shape ) , <nl> + dilation_rate = self . dilation_rate , <nl> + strides = self . strides , <nl> + padding = self . padding . upper ( ) , <nl> + data_format = utils . convert_data_format ( self . data_format , <nl> + self . rank + 2 ) ) <nl> + <nl> + self . built = True <nl> + <nl> + def call ( self , inputs ) : <nl> + inputs = ops . convert_to_tensor ( inputs , dtype = self . dtype ) <nl> + <nl> + outputs = self . _apply_variational_kernel ( inputs ) <nl> + outputs = self . _apply_variational_bias ( outputs ) <nl> + if self . activation is not None : <nl> + outputs = self . activation ( outputs ) <nl> + if not self . _built_kernel_divergence : <nl> + kernel_posterior = self . kernel_posterior <nl> + kernel_prior = self . kernel_prior <nl> + if isinstance ( self . kernel_posterior , independent_lib . Independent ) : <nl> + kernel_posterior = kernel_posterior . distribution <nl> + if isinstance ( self . kernel_prior , independent_lib . Independent ) : <nl> + kernel_prior = kernel_prior . distribution <nl> + self . _apply_divergence ( self . kernel_divergence_fn , <nl> + kernel_posterior , <nl> + kernel_prior , <nl> + self . kernel_posterior_tensor , <nl> + name = " divergence_kernel " ) <nl> + self . _built_kernel_divergence = True <nl> + if not self . _built_bias_divergence : <nl> + bias_posterior = self . bias_posterior <nl> + bias_prior = self . bias_prior <nl> + if isinstance ( self . bias_posterior , independent_lib . Independent ) : <nl> + bias_posterior = bias_posterior . distribution <nl> + if isinstance ( self . bias_prior , independent_lib . Independent ) : <nl> + bias_prior = bias_prior . distribution <nl> + self . _apply_divergence ( self . bias_divergence_fn , <nl> + bias_posterior , <nl> + bias_prior , <nl> + self . bias_posterior_tensor , <nl> + name = " divergence_bias " ) <nl> + self . _built_bias_divergence = True <nl> + return outputs <nl> + <nl> + def _apply_variational_kernel ( self , inputs ) : <nl> + self . kernel_posterior_tensor = self . kernel_posterior_tensor_fn ( <nl> + self . kernel_posterior ) <nl> + outputs = self . _convolution_op ( inputs , self . kernel_posterior_tensor ) <nl> + return outputs <nl> + <nl> + def _apply_variational_bias ( self , inputs ) : <nl> + if self . bias_posterior is None : <nl> + self . bias_posterior_tensor = None <nl> + return inputs <nl> + self . bias_posterior_tensor = self . bias_posterior_tensor_fn ( <nl> + self . bias_posterior ) <nl> + outputs = inputs <nl> + if self . data_format = = " channels_first " : <nl> + if self . rank = = 1 : <nl> + # nn . bias_add does not accept a 1D input tensor . <nl> + bias = array_ops . reshape ( self . bias_posterior_tensor , <nl> + ( 1 , self . filters , 1 ) ) <nl> + outputs + = bias <nl> + if self . rank = = 2 : <nl> + outputs = nn . bias_add ( outputs , <nl> + self . bias_posterior_tensor , <nl> + data_format = " NCHW " ) <nl> + if self . rank = = 3 : <nl> + # As of Mar 2017 , direct addition is significantly slower than <nl> + # bias_add when computing gradients . To use bias_add , we collapse Z <nl> + # and Y into a single dimension to obtain a 4D input tensor . <nl> + outputs_shape = outputs . shape . as_list ( ) <nl> + outputs_4d = array_ops . reshape ( outputs , <nl> + [ outputs_shape [ 0 ] , outputs_shape [ 1 ] , <nl> + outputs_shape [ 2 ] * outputs_shape [ 3 ] , <nl> + outputs_shape [ 4 ] ] ) <nl> + outputs_4d = nn . bias_add ( outputs_4d , <nl> + self . bias_posterior_tensor , <nl> + data_format = " NCHW " ) <nl> + outputs = array_ops . reshape ( outputs_4d , outputs_shape ) <nl> + else : <nl> + outputs = nn . bias_add ( outputs , <nl> + self . bias_posterior_tensor , <nl> + data_format = " NHWC " ) <nl> + return outputs <nl> + <nl> + def _apply_divergence ( self , divergence_fn , posterior , prior , <nl> + posterior_tensor , name ) : <nl> + if ( divergence_fn is None or <nl> + posterior is None or <nl> + prior is None ) : <nl> + divergence = None <nl> + return <nl> + divergence = standard_ops . identity ( <nl> + divergence_fn ( <nl> + posterior , prior , posterior_tensor ) , <nl> + name = name ) <nl> + self . add_loss ( divergence ) <nl> + <nl> + def _compute_output_shape ( self , input_shape ) : <nl> + input_shape = tensor_shape . TensorShape ( input_shape ) . as_list ( ) <nl> + if self . data_format = = " channels_last " : <nl> + space = input_shape [ 1 : - 1 ] <nl> + new_space = [ ] <nl> + for i in range ( len ( space ) ) : <nl> + new_dim = utils . conv_output_length ( <nl> + space [ i ] , <nl> + self . kernel_size [ i ] , <nl> + padding = self . padding , <nl> + stride = self . strides [ i ] , <nl> + dilation = self . dilation_rate [ i ] ) <nl> + new_space . append ( new_dim ) <nl> + return tensor_shape . TensorShape ( [ input_shape [ 0 ] ] + new_space + <nl> + [ self . filters ] ) <nl> + else : <nl> + space = input_shape [ 2 : ] <nl> + new_space = [ ] <nl> + for i in range ( len ( space ) ) : <nl> + new_dim = utils . conv_output_length ( <nl> + space [ i ] , <nl> + self . kernel_size [ i ] , <nl> + padding = self . padding , <nl> + stride = self . strides [ i ] , <nl> + dilation = self . dilation_rate [ i ] ) <nl> + new_space . append ( new_dim ) <nl> + return tensor_shape . TensorShape ( [ input_shape [ 0 ] , self . filters ] + <nl> + new_space ) <nl> + <nl> + <nl> + class Conv1DVariational ( _ConvVariational ) : <nl> + " " " 1D convolution layer ( e . g . temporal convolution ) . <nl> + <nl> + This layer creates a convolution kernel that is convolved <nl> + ( actually cross - correlated ) with the layer input to produce a tensor of <nl> + outputs . It may also include a bias addition and activation function <nl> + on the outputs . It assumes the ` kernel ` and / or ` bias ` are drawn from <nl> + distributions . <nl> + <nl> + By default , the layer implements a stochastic forward pass via <nl> + sampling from the kernel and bias posteriors , <nl> + ` ` ` none <nl> + outputs = f ( inputs ; kernel , bias ) , kernel , bias ~ posterior <nl> + ` ` ` <nl> + where f denotes the layer ' s calculation . <nl> + <nl> + The arguments permit separate specification of the surrogate posterior <nl> + ( ` q ( W | x ) ` ) , prior ( ` p ( W ) ` ) , and divergence for both the ` kernel ` and ` bias ` <nl> + distributions . <nl> + <nl> + Arguments : <nl> + filters : Integer , the dimensionality of the output space ( i . e . the number <nl> + of filters in the convolution ) . <nl> + kernel_size : An integer or tuple / list of a single integer , specifying the <nl> + length of the 1D convolution window . <nl> + strides : An integer or tuple / list of a single integer , <nl> + specifying the stride length of the convolution . <nl> + Specifying any stride value ! = 1 is incompatible with specifying <nl> + any ` dilation_rate ` value ! = 1 . <nl> + padding : One of ` " valid " ` or ` " same " ` ( case - insensitive ) . <nl> + data_format : A string , one of ` channels_last ` ( default ) or ` channels_first ` . <nl> + The ordering of the dimensions in the inputs . <nl> + ` channels_last ` corresponds to inputs with shape <nl> + ` ( batch , length , channels ) ` while ` channels_first ` corresponds to <nl> + inputs with shape ` ( batch , channels , length ) ` . <nl> + dilation_rate : An integer or tuple / list of a single integer , specifying <nl> + the dilation rate to use for dilated convolution . <nl> + Currently , specifying any ` dilation_rate ` value ! = 1 is <nl> + incompatible with specifying any ` strides ` value ! = 1 . <nl> + activation : Activation function . Set it to None to maintain a <nl> + linear activation . <nl> + activity_regularizer : Optional regularizer function for the output . <nl> + trainable : Boolean , if ` True ` also add variables to the graph collection <nl> + ` GraphKeys . TRAINABLE_VARIABLES ` ( see ` tf . Variable ` ) . <nl> + kernel_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` kernel ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( ) ` . <nl> + kernel_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + kernel_prior_fn : Python ` callable ` which creates ` tf . distributions ` <nl> + instance . See ` default_mean_field_normal_fn ` docstring for required <nl> + parameter signature . <nl> + Default value : ` tf . distributions . Normal ( loc = 0 . , scale = 1 . ) ` . <nl> + kernel_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + sample is a ` Tensor ` . <nl> + bias_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` bias ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( is_singular = True ) ` ( which creates an <nl> + instance of ` tf . distributions . Deterministic ` ) . <nl> + bias_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + bias_prior_fn : Python ` callable ` which creates ` tf . distributions ` instance . <nl> + See ` default_mean_field_normal_fn ` docstring for required parameter <nl> + signature . Default value : ` None ` ( no prior , no variational inference ) <nl> + bias_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + name : A string , the name of the layer . <nl> + <nl> + Properties : <nl> + filters : Python integer , dimensionality of the output space . <nl> + kernel_size : Size of the convolution window . <nl> + strides : Stride length of convolution . <nl> + padding : Python string describing padding approach . <nl> + data_format : Python string describing input data ' s dimensions . <nl> + dilation_rate : Dilation rate for an atrous convolution . <nl> + activation : Activation function ( ` callable ` ) . <nl> + activity_regularizer : Regularizer function for the output . <nl> + kernel_use_local_reparameterization : Python ` bool ` indicating whether <nl> + ` kernel ` calculation should employ the Local Reparameterization Trick . <nl> + kernel_posterior_fn : ` callable ` returning posterior . <nl> + kernel_posterior_tensor_fn : ` callable ` operating on posterior . <nl> + kernel_prior_fn : ` callable ` returning prior . <nl> + kernel_divergence_fn : ` callable ` returning divergence . <nl> + bias_posterior_fn : ` callable ` returning posterior . <nl> + bias_posterior_tensor_fn : ` callable ` operating on posterior . <nl> + bias_prior_fn : ` callable ` returning prior . <nl> + bias_divergence_fn : ` callable ` returning divergence . <nl> + <nl> + # # # # Examples <nl> + <nl> + We illustrate a Bayesian neural network with [ variational inference ] ( <nl> + https : / / en . wikipedia . org / wiki / Variational_Bayesian_methods ) , <nl> + assuming a dataset of ` features ` and ` labels ` . <nl> + <nl> + ` ` ` python <nl> + tfp = tf . contrib . bayesflow <nl> + <nl> + net = tf . reshape ( features , [ - 1 , 128 , 1 ] ) <nl> + net = tfp . layers . Conv1DVariational ( 64 , <nl> + kernel_size = 5 , <nl> + padding = " SAME " , <nl> + activation = tf . nn . relu ) ( net ) <nl> + net = tf . reshape ( net , [ - 1 , 128 * 64 ] ) <nl> + logits = tfp . layers . DenseVariational ( 10 ) ( net ) <nl> + neg_log_likelihood = tf . nn . softmax_cross_entropy_with_logits ( <nl> + labels = labels , logits = logits ) <nl> + kl = sum ( tf . get_collection ( tf . GraphKeys . REGULARIZATION_LOSSES ) ) <nl> + loss = neg_log_likelihood + kl <nl> + train_op = tf . train . AdamOptimizer ( ) . minimize ( loss ) <nl> + ` ` ` <nl> + <nl> + It uses reparameterization gradients to minimize the <nl> + Kullback - Leibler divergence up to a constant , also known as the <nl> + negative Evidence Lower Bound . It consists of the sum of two terms : <nl> + the expected negative log - likelihood , which we approximate via <nl> + Monte Carlo ; and the KL divergence , which is added via regularizer <nl> + terms which are arguments to the layer . <nl> + " " " <nl> + <nl> + def __init__ ( <nl> + self , <nl> + filters , <nl> + kernel_size , <nl> + strides = 1 , <nl> + padding = " valid " , <nl> + data_format = " channels_last " , <nl> + dilation_rate = 1 , <nl> + activation = None , <nl> + activity_regularizer = None , <nl> + trainable = True , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> + loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> + kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( is_singular = True ) , # pylint : disable = line - too - long <nl> + bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + bias_prior_fn = None , <nl> + bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + name = None , <nl> + * * kwargs ) : <nl> + super ( Conv1DVariational , self ) . __init__ ( <nl> + rank = 1 , <nl> + filters = filters , <nl> + kernel_size = kernel_size , <nl> + strides = strides , <nl> + padding = padding , <nl> + data_format = data_format , <nl> + dilation_rate = dilation_rate , <nl> + activation = activation , <nl> + activity_regularizer = activity_regularizer , <nl> + trainable = trainable , <nl> + kernel_posterior_fn = kernel_posterior_fn , <nl> + kernel_posterior_tensor_fn = kernel_posterior_tensor_fn , <nl> + kernel_prior_fn = kernel_prior_fn , <nl> + kernel_divergence_fn = kernel_divergence_fn , <nl> + bias_posterior_fn = bias_posterior_fn , <nl> + bias_posterior_tensor_fn = bias_posterior_tensor_fn , <nl> + bias_prior_fn = bias_prior_fn , <nl> + bias_divergence_fn = bias_divergence_fn , <nl> + name = name , * * kwargs ) <nl> + <nl> + <nl> + def conv1d_variational ( <nl> + inputs , <nl> + filters , <nl> + kernel_size , <nl> + strides = 1 , <nl> + padding = " valid " , <nl> + data_format = " channels_last " , <nl> + dilation_rate = 1 , <nl> + activation = None , <nl> + activity_regularizer = None , <nl> + trainable = True , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> + loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> + kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( is_singular = True ) , # pylint : disable = line - too - long <nl> + bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + bias_prior_fn = None , <nl> + bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + name = None , <nl> + reuse = None ) : <nl> + " " " Functional interface for 1D convolution layer ( e . g . temporal convolution ) . <nl> + <nl> + This layer creates a convolution kernel that is convolved <nl> + ( actually cross - correlated ) with the layer input to produce a tensor of <nl> + outputs . It may also include a bias addition and activation function <nl> + on the outputs . It assumes the ` kernel ` and / or ` bias ` are drawn from <nl> + distributions . <nl> + <nl> + By default , the layer implements a stochastic forward pass via <nl> + sampling from the kernel and bias posteriors , <nl> + ` ` ` none <nl> + outputs = f ( inputs ; kernel , bias ) , kernel , bias ~ posterior <nl> + ` ` ` <nl> + where f denotes the layer ' s calculation . <nl> + <nl> + The arguments permit separate specification of the surrogate posterior <nl> + ( ` q ( W | x ) ` ) , prior ( ` p ( W ) ` ) , and divergence for both the ` kernel ` and ` bias ` <nl> + distributions . <nl> + <nl> + Arguments : <nl> + inputs : Tensor input . <nl> + filters : Integer , the dimensionality of the output space ( i . e . the number <nl> + of filters in the convolution ) . <nl> + kernel_size : An integer or tuple / list of a single integer , specifying the <nl> + length of the 1D convolution window . <nl> + strides : An integer or tuple / list of a single integer , <nl> + specifying the stride length of the convolution . <nl> + Specifying any stride value ! = 1 is incompatible with specifying <nl> + any ` dilation_rate ` value ! = 1 . <nl> + padding : One of ` " valid " ` or ` " same " ` ( case - insensitive ) . <nl> + data_format : A string , one of ` channels_last ` ( default ) or ` channels_first ` . <nl> + The ordering of the dimensions in the inputs . <nl> + ` channels_last ` corresponds to inputs with shape <nl> + ` ( batch , length , channels ) ` while ` channels_first ` corresponds to <nl> + inputs with shape ` ( batch , channels , length ) ` . <nl> + dilation_rate : An integer or tuple / list of a single integer , specifying <nl> + the dilation rate to use for dilated convolution . <nl> + Currently , specifying any ` dilation_rate ` value ! = 1 is <nl> + incompatible with specifying any ` strides ` value ! = 1 . <nl> + activation : Activation function . Set it to None to maintain a <nl> + linear activation . <nl> + activity_regularizer : Optional regularizer function for the output . <nl> + trainable : Boolean , if ` True ` also add variables to the graph collection <nl> + ` GraphKeys . TRAINABLE_VARIABLES ` ( see ` tf . Variable ` ) . <nl> + kernel_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` kernel ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( ) ` . <nl> + kernel_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + kernel_prior_fn : Python ` callable ` which creates ` tf . distributions ` <nl> + instance . See ` default_mean_field_normal_fn ` docstring for required <nl> + parameter signature . <nl> + Default value : ` tf . distributions . Normal ( loc = 0 . , scale = 1 . ) ` . <nl> + kernel_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + sample is a ` Tensor ` . <nl> + bias_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` bias ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( is_singular = True ) ` ( which creates an <nl> + instance of ` tf . distributions . Deterministic ` ) . <nl> + bias_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + bias_prior_fn : Python ` callable ` which creates ` tf . distributions ` instance . <nl> + See ` default_mean_field_normal_fn ` docstring for required parameter <nl> + signature . Default value : ` None ` ( no prior , no variational inference ) <nl> + bias_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + name : A string , the name of the layer . <nl> + reuse : Boolean , whether to reuse the weights of a previous layer <nl> + by the same name . <nl> + <nl> + Returns : <nl> + Output tensor . <nl> + <nl> + Raises : <nl> + ValueError : if eager execution is enabled . <nl> + <nl> + # # # # Examples <nl> + <nl> + We illustrate a Bayesian neural network with [ variational inference ] ( <nl> + https : / / en . wikipedia . org / wiki / Variational_Bayesian_methods ) , <nl> + assuming a dataset of ` features ` and ` labels ` . <nl> + <nl> + ` ` ` python <nl> + tfp = tf . contrib . bayesflow <nl> + <nl> + net = tf . reshape ( features , [ - 1 , 128 , 1 ] ) <nl> + net = tfp . layers . conv1d_variational ( net , <nl> + 64 , <nl> + kernel_size = 5 , <nl> + padding = " SAME " , <nl> + activation = tf . nn . relu ) <nl> + net = tf . reshape ( net , [ - 1 , 128 * 64 ] ) <nl> + logits = tfp . layers . dense_variational ( net , 10 ) <nl> + neg_log_likelihood = tf . nn . softmax_cross_entropy_with_logits ( <nl> + labels = labels , logits = logits ) <nl> + kl = sum ( tf . get_collection ( tf . GraphKeys . REGULARIZATION_LOSSES ) ) <nl> + loss = neg_log_likelihood + kl <nl> + train_op = tf . train . AdamOptimizer ( ) . minimize ( loss ) <nl> + ` ` ` <nl> + <nl> + It uses reparameterization gradients to minimize the <nl> + Kullback - Leibler divergence up to a constant , also known as the <nl> + negative Evidence Lower Bound . It consists of the sum of two terms : <nl> + the expected negative log - likelihood , which we approximate via <nl> + Monte Carlo ; and the KL divergence , which is added via regularizer <nl> + terms which are arguments to the layer . <nl> + " " " <nl> + layer = Conv1DVariational ( <nl> + filters = filters , <nl> + kernel_size = kernel_size , <nl> + strides = strides , <nl> + padding = padding , <nl> + data_format = data_format , <nl> + dilation_rate = dilation_rate , <nl> + activation = activation , <nl> + activity_regularizer = activity_regularizer , <nl> + trainable = trainable , <nl> + kernel_posterior_fn = kernel_posterior_fn , <nl> + kernel_posterior_tensor_fn = kernel_posterior_tensor_fn , <nl> + kernel_prior_fn = kernel_prior_fn , <nl> + kernel_divergence_fn = kernel_divergence_fn , <nl> + bias_posterior_fn = bias_posterior_fn , <nl> + bias_posterior_tensor_fn = bias_posterior_tensor_fn , <nl> + bias_prior_fn = bias_prior_fn , <nl> + bias_divergence_fn = bias_divergence_fn , <nl> + name = name , <nl> + dtype = inputs . dtype . base_dtype , <nl> + _scope = name , <nl> + _reuse = reuse ) <nl> + return layer . apply ( inputs ) <nl> + <nl> + <nl> + class Conv2DVariational ( _ConvVariational ) : <nl> + " " " 2D convolution layer ( e . g . spatial convolution over images ) . <nl> + <nl> + This layer creates a convolution kernel that is convolved <nl> + ( actually cross - correlated ) with the layer input to produce a tensor of <nl> + outputs . It may also include a bias addition and activation function <nl> + on the outputs . It assumes the ` kernel ` and / or ` bias ` are drawn from <nl> + distributions . <nl> + <nl> + By default , the layer implements a stochastic forward pass via <nl> + sampling from the kernel and bias posteriors , <nl> + ` ` ` none <nl> + outputs = f ( inputs ; kernel , bias ) , kernel , bias ~ posterior <nl> + ` ` ` <nl> + where f denotes the layer ' s calculation . <nl> + <nl> + The arguments permit separate specification of the surrogate posterior <nl> + ( ` q ( W | x ) ` ) , prior ( ` p ( W ) ` ) , and divergence for both the ` kernel ` and ` bias ` <nl> + distributions . <nl> + <nl> + Arguments : <nl> + filters : Integer , the dimensionality of the output space ( i . e . the number <nl> + of filters in the convolution ) . <nl> + kernel_size : An integer or tuple / list of 2 integers , specifying the <nl> + height and width of the 2D convolution window . <nl> + Can be a single integer to specify the same value for <nl> + all spatial dimensions . <nl> + strides : An integer or tuple / list of 2 integers , <nl> + specifying the strides of the convolution along the height and width . <nl> + Can be a single integer to specify the same value for <nl> + all spatial dimensions . <nl> + Specifying any stride value ! = 1 is incompatible with specifying <nl> + any ` dilation_rate ` value ! = 1 . <nl> + padding : One of ` " valid " ` or ` " same " ` ( case - insensitive ) . <nl> + data_format : A string , one of ` channels_last ` ( default ) or ` channels_first ` . <nl> + The ordering of the dimensions in the inputs . <nl> + ` channels_last ` corresponds to inputs with shape <nl> + ` ( batch , height , width , channels ) ` while ` channels_first ` corresponds to <nl> + inputs with shape ` ( batch , channels , height , width ) ` . <nl> + <nl> + dilation_rate : An integer or tuple / list of 2 integers , specifying <nl> + the dilation rate to use for dilated convolution . <nl> + Can be a single integer to specify the same value for <nl> + all spatial dimensions . <nl> + Currently , specifying any ` dilation_rate ` value ! = 1 is <nl> + incompatible with specifying any stride value ! = 1 . <nl> + activation : Activation function . Set it to None to maintain a <nl> + linear activation . <nl> + activity_regularizer : Optional regularizer function for the output . <nl> + trainable : Boolean , if ` True ` also add variables to the graph collection <nl> + ` GraphKeys . TRAINABLE_VARIABLES ` ( see ` tf . Variable ` ) . <nl> + kernel_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` kernel ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( ) ` . <nl> + kernel_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + kernel_prior_fn : Python ` callable ` which creates ` tf . distributions ` <nl> + instance . See ` default_mean_field_normal_fn ` docstring for required <nl> + parameter signature . <nl> + Default value : ` tf . distributions . Normal ( loc = 0 . , scale = 1 . ) ` . <nl> + kernel_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + sample is a ` Tensor ` . <nl> + bias_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` bias ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( is_singular = True ) ` ( which creates an <nl> + instance of ` tf . distributions . Deterministic ` ) . <nl> + bias_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + bias_prior_fn : Python ` callable ` which creates ` tf . distributions ` instance . <nl> + See ` default_mean_field_normal_fn ` docstring for required parameter <nl> + signature . Default value : ` None ` ( no prior , no variational inference ) <nl> + bias_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + name : A string , the name of the layer . <nl> + <nl> + Properties : <nl> + filters : Python integer , dimensionality of the output space . <nl> + kernel_size : Size of the convolution window . <nl> + strides : Stride length of convolution . <nl> + padding : Python string describing padding approach . <nl> + data_format : Python string describing input data ' s dimensions . <nl> + dilation_rate : Dilation rate for an atrous convolution . <nl> + activation : Activation function ( ` callable ` ) . <nl> + activity_regularizer : Regularizer function for the output . <nl> + kernel_use_local_reparameterization : Python ` bool ` indicating whether <nl> + ` kernel ` calculation should employ the Local Reparameterization Trick . <nl> + kernel_posterior_fn : ` callable ` returning posterior . <nl> + kernel_posterior_tensor_fn : ` callable ` operating on posterior . <nl> + kernel_prior_fn : ` callable ` returning prior . <nl> + kernel_divergence_fn : ` callable ` returning divergence . <nl> + bias_posterior_fn : ` callable ` returning posterior . <nl> + bias_posterior_tensor_fn : ` callable ` operating on posterior . <nl> + bias_prior_fn : ` callable ` returning prior . <nl> + bias_divergence_fn : ` callable ` returning divergence . <nl> + <nl> + # # # # Examples <nl> + <nl> + We illustrate a Bayesian neural network with [ variational inference ] ( <nl> + https : / / en . wikipedia . org / wiki / Variational_Bayesian_methods ) , <nl> + assuming a dataset of ` features ` and ` labels ` . <nl> + <nl> + ` ` ` python <nl> + tfp = tf . contrib . bayesflow <nl> + <nl> + net = tf . reshape ( features , [ - 1 , 32 , 32 , 3 ] ) <nl> + net = tfp . layers . Conv2DVariational ( 64 , <nl> + kernel_size = 5 , <nl> + padding = " SAME " , <nl> + activation = tf . nn . relu ) ( net ) <nl> + net = tf . layers . MaxPooling2D ( pool_size = 2 , <nl> + strides = 2 , <nl> + padding = " SAME " ) ( net ) <nl> + net = tf . reshape ( net , [ - 1 , 8 * 8 * 64 ] ) <nl> + logits = tfp . layers . DenseVariational ( 10 ) ( net ) <nl> + neg_log_likelihood = tf . nn . softmax_cross_entropy_with_logits ( <nl> + labels = labels , logits = logits ) <nl> + kl = sum ( tf . get_collection ( tf . GraphKeys . REGULARIZATION_LOSSES ) ) <nl> + loss = neg_log_likelihood + kl <nl> + train_op = tf . train . AdamOptimizer ( ) . minimize ( loss ) <nl> + ` ` ` <nl> + <nl> + It uses reparameterization gradients to minimize the <nl> + Kullback - Leibler divergence up to a constant , also known as the <nl> + negative Evidence Lower Bound . It consists of the sum of two terms : <nl> + the expected negative log - likelihood , which we approximate via <nl> + Monte Carlo ; and the KL divergence , which is added via regularizer <nl> + terms which are arguments to the layer . <nl> + " " " <nl> + <nl> + def __init__ ( <nl> + self , <nl> + filters , <nl> + kernel_size , <nl> + strides = ( 1 , 1 ) , <nl> + padding = " valid " , <nl> + data_format = " channels_last " , <nl> + dilation_rate = ( 1 , 1 ) , <nl> + activation = None , <nl> + activity_regularizer = None , <nl> + trainable = True , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> + loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> + kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( is_singular = True ) , # pylint : disable = line - too - long <nl> + bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + bias_prior_fn = None , <nl> + bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + name = None , <nl> + * * kwargs ) : <nl> + super ( Conv2DVariational , self ) . __init__ ( <nl> + rank = 2 , <nl> + filters = filters , <nl> + kernel_size = kernel_size , <nl> + strides = strides , <nl> + padding = padding , <nl> + data_format = data_format , <nl> + dilation_rate = dilation_rate , <nl> + activation = activation , <nl> + activity_regularizer = activity_regularizer , <nl> + trainable = trainable , <nl> + kernel_posterior_fn = kernel_posterior_fn , <nl> + kernel_posterior_tensor_fn = kernel_posterior_tensor_fn , <nl> + kernel_prior_fn = kernel_prior_fn , <nl> + kernel_divergence_fn = kernel_divergence_fn , <nl> + bias_posterior_fn = bias_posterior_fn , <nl> + bias_posterior_tensor_fn = bias_posterior_tensor_fn , <nl> + bias_prior_fn = bias_prior_fn , <nl> + bias_divergence_fn = bias_divergence_fn , <nl> + name = name , * * kwargs ) <nl> + <nl> + <nl> + def conv2d_variational ( <nl> + inputs , <nl> + filters , <nl> + kernel_size , <nl> + strides = ( 1 , 1 ) , <nl> + padding = " valid " , <nl> + data_format = " channels_last " , <nl> + dilation_rate = ( 1 , 1 ) , <nl> + activation = None , <nl> + activity_regularizer = None , <nl> + trainable = True , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> + loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> + kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( is_singular = True ) , # pylint : disable = line - too - long <nl> + bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + bias_prior_fn = None , <nl> + bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + name = None , <nl> + reuse = None ) : <nl> + " " " Functional interface for the 2D convolution layer . <nl> + <nl> + This layer creates a convolution kernel that is convolved <nl> + ( actually cross - correlated ) with the layer input to produce a tensor of <nl> + outputs . It may also include a bias addition and activation function <nl> + on the outputs . It assumes the ` kernel ` and / or ` bias ` are drawn from <nl> + distributions . <nl> + <nl> + By default , the layer implements a stochastic forward pass via <nl> + sampling from the kernel and bias posteriors , <nl> + ` ` ` none <nl> + outputs = f ( inputs ; kernel , bias ) , kernel , bias ~ posterior <nl> + ` ` ` <nl> + where f denotes the layer ' s calculation . <nl> + <nl> + The arguments permit separate specification of the surrogate posterior <nl> + ( ` q ( W | x ) ` ) , prior ( ` p ( W ) ` ) , and divergence for both the ` kernel ` and ` bias ` <nl> + distributions . <nl> + <nl> + Arguments : <nl> + inputs : Tensor input . <nl> + filters : Integer , the dimensionality of the output space ( i . e . the number <nl> + of filters in the convolution ) . <nl> + kernel_size : An integer or tuple / list of 2 integers , specifying the <nl> + height and width of the 2D convolution window . <nl> + Can be a single integer to specify the same value for <nl> + all spatial dimensions . <nl> + strides : An integer or tuple / list of 2 integers , <nl> + specifying the strides of the convolution along the height and width . <nl> + Can be a single integer to specify the same value for <nl> + all spatial dimensions . <nl> + Specifying any stride value ! = 1 is incompatible with specifying <nl> + any ` dilation_rate ` value ! = 1 . <nl> + padding : One of ` " valid " ` or ` " same " ` ( case - insensitive ) . <nl> + data_format : A string , one of ` channels_last ` ( default ) or ` channels_first ` . <nl> + The ordering of the dimensions in the inputs . <nl> + ` channels_last ` corresponds to inputs with shape <nl> + ` ( batch , height , width , channels ) ` while ` channels_first ` corresponds to <nl> + inputs with shape ` ( batch , channels , height , width ) ` . <nl> + <nl> + dilation_rate : An integer or tuple / list of 2 integers , specifying <nl> + the dilation rate to use for dilated convolution . <nl> + Can be a single integer to specify the same value for <nl> + all spatial dimensions . <nl> + Currently , specifying any ` dilation_rate ` value ! = 1 is <nl> + incompatible with specifying any stride value ! = 1 . <nl> + activation : Activation function . Set it to None to maintain a <nl> + linear activation . <nl> + activity_regularizer : Optional regularizer function for the output . <nl> + trainable : Boolean , if ` True ` also add variables to the graph collection <nl> + ` GraphKeys . TRAINABLE_VARIABLES ` ( see ` tf . Variable ` ) . <nl> + kernel_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` kernel ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( ) ` . <nl> + kernel_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + kernel_prior_fn : Python ` callable ` which creates ` tf . distributions ` <nl> + instance . See ` default_mean_field_normal_fn ` docstring for required <nl> + parameter signature . <nl> + Default value : ` tf . distributions . Normal ( loc = 0 . , scale = 1 . ) ` . <nl> + kernel_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + sample is a ` Tensor ` . <nl> + bias_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` bias ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( is_singular = True ) ` ( which creates an <nl> + instance of ` tf . distributions . Deterministic ` ) . <nl> + bias_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + bias_prior_fn : Python ` callable ` which creates ` tf . distributions ` instance . <nl> + See ` default_mean_field_normal_fn ` docstring for required parameter <nl> + signature . Default value : ` None ` ( no prior , no variational inference ) <nl> + bias_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + name : A string , the name of the layer . <nl> + reuse : Boolean , whether to reuse the weights of a previous layer <nl> + by the same name . <nl> + <nl> + Returns : <nl> + Output tensor . <nl> + <nl> + Raises : <nl> + ValueError : if eager execution is enabled . <nl> + <nl> + # # # # Examples <nl> + <nl> + We illustrate a Bayesian neural network with [ variational inference ] ( <nl> + https : / / en . wikipedia . org / wiki / Variational_Bayesian_methods ) , <nl> + assuming a dataset of ` features ` and ` labels ` . <nl> + <nl> + ` ` ` python <nl> + tfp = tf . contrib . bayesflow <nl> + <nl> + net = tf . reshape ( features , [ - 1 , 32 , 32 , 3 ] ) <nl> + net = tfp . layers . conv2d_variational ( net , <nl> + 64 , <nl> + kernel_size = 5 , <nl> + padding = " SAME " , <nl> + activation = tf . nn . relu ) <nl> + net = tf . layers . max_pooling2d ( net , <nl> + pool_size = 2 , <nl> + strides = 2 , <nl> + padding = " SAME " ) <nl> + net = tf . reshape ( net , [ - 1 , 8 * 8 * 64 ] ) <nl> + logits = tfp . layers . dense_variational ( net , 10 ) <nl> + neg_log_likelihood = tf . nn . softmax_cross_entropy_with_logits ( <nl> + labels = labels , logits = logits ) <nl> + kl = sum ( tf . get_collection ( tf . GraphKeys . REGULARIZATION_LOSSES ) ) <nl> + loss = neg_log_likelihood + kl <nl> + train_op = tf . train . AdamOptimizer ( ) . minimize ( loss ) <nl> + ` ` ` <nl> + <nl> + It uses reparameterization gradients to minimize the <nl> + Kullback - Leibler divergence up to a constant , also known as the <nl> + negative Evidence Lower Bound . It consists of the sum of two terms : <nl> + the expected negative log - likelihood , which we approximate via <nl> + Monte Carlo ; and the KL divergence , which is added via regularizer <nl> + terms which are arguments to the layer . <nl> + " " " <nl> + layer = Conv2DVariational ( <nl> + filters = filters , <nl> + kernel_size = kernel_size , <nl> + strides = strides , <nl> + padding = padding , <nl> + data_format = data_format , <nl> + dilation_rate = dilation_rate , <nl> + activation = activation , <nl> + activity_regularizer = activity_regularizer , <nl> + trainable = trainable , <nl> + kernel_posterior_fn = kernel_posterior_fn , <nl> + kernel_posterior_tensor_fn = kernel_posterior_tensor_fn , <nl> + kernel_prior_fn = kernel_prior_fn , <nl> + kernel_divergence_fn = kernel_divergence_fn , <nl> + bias_posterior_fn = bias_posterior_fn , <nl> + bias_posterior_tensor_fn = bias_posterior_tensor_fn , <nl> + bias_prior_fn = bias_prior_fn , <nl> + bias_divergence_fn = bias_divergence_fn , <nl> + name = name , <nl> + dtype = inputs . dtype . base_dtype , <nl> + _scope = name , <nl> + _reuse = reuse ) <nl> + return layer . apply ( inputs ) <nl> + <nl> + <nl> + class Conv3DVariational ( _ConvVariational ) : <nl> + " " " 3D convolution layer ( e . g . spatial convolution over volumes ) . <nl> + <nl> + This layer creates a convolution kernel that is convolved <nl> + ( actually cross - correlated ) with the layer input to produce a tensor of <nl> + outputs . It may also include a bias addition and activation function <nl> + on the outputs . It assumes the ` kernel ` and / or ` bias ` are drawn from <nl> + distributions . <nl> + <nl> + By default , the layer implements a stochastic forward pass via <nl> + sampling from the kernel and bias posteriors , <nl> + ` ` ` none <nl> + outputs = f ( inputs ; kernel , bias ) , kernel , bias ~ posterior <nl> + ` ` ` <nl> + where f denotes the layer ' s calculation . <nl> + <nl> + The arguments permit separate specification of the surrogate posterior <nl> + ( ` q ( W | x ) ` ) , prior ( ` p ( W ) ` ) , and divergence for both the ` kernel ` and ` bias ` <nl> + distributions . <nl> + <nl> + Arguments : <nl> + filters : Integer , the dimensionality of the output space ( i . e . the number <nl> + of filters in the convolution ) . <nl> + kernel_size : An integer or tuple / list of 3 integers , specifying the <nl> + depth , height and width of the 3D convolution window . <nl> + Can be a single integer to specify the same value for <nl> + all spatial dimensions . <nl> + strides : An integer or tuple / list of 3 integers , <nl> + specifying the strides of the convolution along the depth , <nl> + height and width . <nl> + Can be a single integer to specify the same value for <nl> + all spatial dimensions . <nl> + Specifying any stride value ! = 1 is incompatible with specifying <nl> + any ` dilation_rate ` value ! = 1 . <nl> + padding : One of ` " valid " ` or ` " same " ` ( case - insensitive ) . <nl> + data_format : A string , one of ` channels_last ` ( default ) or ` channels_first ` . <nl> + The ordering of the dimensions in the inputs . <nl> + ` channels_last ` corresponds to inputs with shape <nl> + ` ( batch , depth , height , width , channels ) ` while ` channels_first ` <nl> + corresponds to inputs with shape <nl> + ` ( batch , channels , depth , height , width ) ` . <nl> + dilation_rate : An integer or tuple / list of 3 integers , specifying <nl> + the dilation rate to use for dilated convolution . <nl> + Can be a single integer to specify the same value for <nl> + all spatial dimensions . <nl> + Currently , specifying any ` dilation_rate ` value ! = 1 is <nl> + incompatible with specifying any stride value ! = 1 . <nl> + activation : Activation function . Set it to None to maintain a <nl> + linear activation . <nl> + activity_regularizer : Optional regularizer function for the output . <nl> + trainable : Boolean , if ` True ` also add variables to the graph collection <nl> + ` GraphKeys . TRAINABLE_VARIABLES ` ( see ` tf . Variable ` ) . <nl> + kernel_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` kernel ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( ) ` . <nl> + kernel_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + kernel_prior_fn : Python ` callable ` which creates ` tf . distributions ` <nl> + instance . See ` default_mean_field_normal_fn ` docstring for required <nl> + parameter signature . <nl> + Default value : ` tf . distributions . Normal ( loc = 0 . , scale = 1 . ) ` . <nl> + kernel_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + sample is a ` Tensor ` . <nl> + bias_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` bias ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( is_singular = True ) ` ( which creates an <nl> + instance of ` tf . distributions . Deterministic ` ) . <nl> + bias_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + bias_prior_fn : Python ` callable ` which creates ` tf . distributions ` instance . <nl> + See ` default_mean_field_normal_fn ` docstring for required parameter <nl> + signature . Default value : ` None ` ( no prior , no variational inference ) <nl> + bias_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + name : A string , the name of the layer . <nl> + <nl> + Properties : <nl> + filters : Python integer , dimensionality of the output space . <nl> + kernel_size : Size of the convolution window . <nl> + strides : Stride length of convolution . <nl> + padding : Python string describing padding approach . <nl> + data_format : Python string describing input data ' s dimensions . <nl> + dilation_rate : Dilation rate for an atrous convolution . <nl> + activation : Activation function ( ` callable ` ) . <nl> + activity_regularizer : Regularizer function for the output . <nl> + kernel_use_local_reparameterization : Python ` bool ` indicating whether <nl> + ` kernel ` calculation should employ the Local Reparameterization Trick . <nl> + kernel_posterior_fn : ` callable ` returning posterior . <nl> + kernel_posterior_tensor_fn : ` callable ` operating on posterior . <nl> + kernel_prior_fn : ` callable ` returning prior . <nl> + kernel_divergence_fn : ` callable ` returning divergence . <nl> + bias_posterior_fn : ` callable ` returning posterior . <nl> + bias_posterior_tensor_fn : ` callable ` operating on posterior . <nl> + bias_prior_fn : ` callable ` returning prior . <nl> + bias_divergence_fn : ` callable ` returning divergence . <nl> + <nl> + # # # # Examples <nl> + <nl> + We illustrate a Bayesian neural network with [ variational inference ] ( <nl> + https : / / en . wikipedia . org / wiki / Variational_Bayesian_methods ) , <nl> + assuming a dataset of ` features ` and ` labels ` . <nl> + <nl> + ` ` ` python <nl> + tfp = tf . contrib . bayesflow <nl> + <nl> + net = tf . reshape ( features , [ - 1 , 256 , 32 , 32 , 3 ] ) <nl> + net = tfp . layers . Conv3DVariational ( 64 , <nl> + kernel_size = 5 , <nl> + padding = " SAME " , <nl> + activation = tf . nn . relu ) ( net ) <nl> + net = tf . layers . MaxPooling2D ( pool_size = 2 , <nl> + strides = 2 , <nl> + padding = " SAME " ) ( net ) <nl> + net = tf . reshape ( net , [ - 1 , 256 * 8 * 8 * 64 ] ) <nl> + logits = tfp . layers . DenseVariational ( 10 ) ( net ) <nl> + neg_log_likelihood = tf . nn . softmax_cross_entropy_with_logits ( <nl> + labels = labels , logits = logits ) <nl> + kl = sum ( tf . get_collection ( tf . GraphKeys . REGULARIZATION_LOSSES ) ) <nl> + loss = neg_log_likelihood + kl <nl> + train_op = tf . train . AdamOptimizer ( ) . minimize ( loss ) <nl> + ` ` ` <nl> + <nl> + It uses reparameterization gradients to minimize the <nl> + Kullback - Leibler divergence up to a constant , also known as the <nl> + negative Evidence Lower Bound . It consists of the sum of two terms : <nl> + the expected negative log - likelihood , which we approximate via <nl> + Monte Carlo ; and the KL divergence , which is added via regularizer <nl> + terms which are arguments to the layer . <nl> + " " " <nl> + <nl> + def __init__ ( <nl> + self , <nl> + filters , <nl> + kernel_size , <nl> + strides = ( 1 , 1 , 1 ) , <nl> + padding = " valid " , <nl> + data_format = " channels_last " , <nl> + dilation_rate = ( 1 , 1 , 1 ) , <nl> + activation = None , <nl> + activity_regularizer = None , <nl> + trainable = True , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> + loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> + kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( is_singular = True ) , # pylint : disable = line - too - long <nl> + bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + bias_prior_fn = None , <nl> + bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + name = None , <nl> + * * kwargs ) : <nl> + super ( Conv3DVariational , self ) . __init__ ( <nl> + rank = 3 , <nl> + filters = filters , <nl> + kernel_size = kernel_size , <nl> + strides = strides , <nl> + padding = padding , <nl> + data_format = data_format , <nl> + dilation_rate = dilation_rate , <nl> + activation = activation , <nl> + activity_regularizer = activity_regularizer , <nl> + trainable = trainable , <nl> + kernel_posterior_fn = kernel_posterior_fn , <nl> + kernel_posterior_tensor_fn = kernel_posterior_tensor_fn , <nl> + kernel_prior_fn = kernel_prior_fn , <nl> + kernel_divergence_fn = kernel_divergence_fn , <nl> + bias_posterior_fn = bias_posterior_fn , <nl> + bias_posterior_tensor_fn = bias_posterior_tensor_fn , <nl> + bias_prior_fn = bias_prior_fn , <nl> + bias_divergence_fn = bias_divergence_fn , <nl> + name = name , * * kwargs ) <nl> + <nl> + <nl> + def conv3d_variational ( <nl> + inputs , <nl> + filters , <nl> + kernel_size , <nl> + strides = ( 1 , 1 , 1 ) , <nl> + padding = " valid " , <nl> + data_format = " channels_last " , <nl> + dilation_rate = ( 1 , 1 , 1 ) , <nl> + activation = None , <nl> + activity_regularizer = None , <nl> + trainable = True , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> + loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> + kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( is_singular = True ) , # pylint : disable = line - too - long <nl> + bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> + bias_prior_fn = None , <nl> + bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> + name = None , <nl> + reuse = None ) : <nl> + " " " Functional interface for the 3D convolution layer . <nl> + <nl> + This layer creates a convolution kernel that is convolved <nl> + ( actually cross - correlated ) with the layer input to produce a tensor of <nl> + outputs . It may also include a bias addition and activation function <nl> + on the outputs . It assumes the ` kernel ` and / or ` bias ` are drawn from <nl> + distributions . <nl> + <nl> + By default , the layer implements a stochastic forward pass via <nl> + sampling from the kernel and bias posteriors , <nl> + ` ` ` none <nl> + outputs = f ( inputs ; kernel , bias ) , kernel , bias ~ posterior <nl> + ` ` ` <nl> + where f denotes the layer ' s calculation . <nl> + <nl> + The arguments permit separate specification of the surrogate posterior <nl> + ( ` q ( W | x ) ` ) , prior ( ` p ( W ) ` ) , and divergence for both the ` kernel ` and ` bias ` <nl> + distributions . <nl> + <nl> + Arguments : <nl> + inputs : Tensor input . <nl> + filters : Integer , the dimensionality of the output space ( i . e . the number <nl> + of filters in the convolution ) . <nl> + kernel_size : An integer or tuple / list of 3 integers , specifying the <nl> + depth , height and width of the 3D convolution window . <nl> + Can be a single integer to specify the same value for <nl> + all spatial dimensions . <nl> + strides : An integer or tuple / list of 3 integers , <nl> + specifying the strides of the convolution along the depth , <nl> + height and width . <nl> + Can be a single integer to specify the same value for <nl> + all spatial dimensions . <nl> + Specifying any stride value ! = 1 is incompatible with specifying <nl> + any ` dilation_rate ` value ! = 1 . <nl> + padding : One of ` " valid " ` or ` " same " ` ( case - insensitive ) . <nl> + data_format : A string , one of ` channels_last ` ( default ) or ` channels_first ` . <nl> + The ordering of the dimensions in the inputs . <nl> + ` channels_last ` corresponds to inputs with shape <nl> + ` ( batch , depth , height , width , channels ) ` while ` channels_first ` <nl> + corresponds to inputs with shape <nl> + ` ( batch , channels , depth , height , width ) ` . <nl> + dilation_rate : An integer or tuple / list of 3 integers , specifying <nl> + the dilation rate to use for dilated convolution . <nl> + Can be a single integer to specify the same value for <nl> + all spatial dimensions . <nl> + Currently , specifying any ` dilation_rate ` value ! = 1 is <nl> + incompatible with specifying any stride value ! = 1 . <nl> + activation : Activation function . Set it to None to maintain a <nl> + linear activation . <nl> + activity_regularizer : Optional regularizer function for the output . <nl> + trainable : Boolean , if ` True ` also add variables to the graph collection <nl> + ` GraphKeys . TRAINABLE_VARIABLES ` ( see ` tf . Variable ` ) . <nl> + kernel_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` kernel ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( ) ` . <nl> + kernel_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + kernel_prior_fn : Python ` callable ` which creates ` tf . distributions ` <nl> + instance . See ` default_mean_field_normal_fn ` docstring for required <nl> + parameter signature . <nl> + Default value : ` tf . distributions . Normal ( loc = 0 . , scale = 1 . ) ` . <nl> + kernel_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + sample is a ` Tensor ` . <nl> + bias_posterior_fn : Python ` callable ` which creates <nl> + ` tf . distributions . Distribution ` instance representing the surrogate <nl> + posterior of the ` bias ` parameter . Default value : <nl> + ` default_mean_field_normal_fn ( is_singular = True ) ` ( which creates an <nl> + instance of ` tf . distributions . Deterministic ` ) . <nl> + bias_posterior_tensor_fn : Python ` callable ` which takes a <nl> + ` tf . distributions . Distribution ` instance and returns a representative <nl> + value . Default value : ` lambda d : d . sample ( ) ` . <nl> + bias_prior_fn : Python ` callable ` which creates ` tf . distributions ` instance . <nl> + See ` default_mean_field_normal_fn ` docstring for required parameter <nl> + signature . Default value : ` None ` ( no prior , no variational inference ) <nl> + bias_divergence_fn : Python ` callable ` which takes the surrogate posterior <nl> + distribution , prior distribution and random variate sample ( s ) from the <nl> + surrogate posterior and computes or approximates the KL divergence . The <nl> + distributions are ` tf . distributions . Distribution ` - like instances and the <nl> + name : A string , the name of the layer . <nl> + reuse : Boolean , whether to reuse the weights of a previous layer <nl> + by the same name . <nl> + <nl> + Returns : <nl> + Output tensor . <nl> + <nl> + Raises : <nl> + ValueError : if eager execution is enabled . <nl> + <nl> + # # # # Examples <nl> + <nl> + We illustrate a Bayesian neural network with [ variational inference ] ( <nl> + https : / / en . wikipedia . org / wiki / Variational_Bayesian_methods ) , <nl> + assuming a dataset of ` features ` and ` labels ` . <nl> + <nl> + ` ` ` python <nl> + tfp = tf . contrib . bayesflow <nl> + <nl> + net = tf . reshape ( features , [ - 1 , 256 , 32 , 32 , 3 ] ) <nl> + net = tfp . layers . conv3d_variational ( net , <nl> + 64 , <nl> + kernel_size = 5 , <nl> + padding = " SAME " , <nl> + activation = tf . nn . relu ) <nl> + net = tf . layers . max_pooling2d ( net , <nl> + pool_size = 2 , <nl> + strides = 2 , <nl> + padding = " SAME " ) <nl> + net = tf . reshape ( net , [ - 1 , 256 * 8 * 8 * 64 ] ) <nl> + logits = tfp . layers . dense_variational ( net , 10 ) <nl> + neg_log_likelihood = tf . nn . softmax_cross_entropy_with_logits ( <nl> + labels = labels , logits = logits ) <nl> + kl = sum ( tf . get_collection ( tf . GraphKeys . REGULARIZATION_LOSSES ) ) <nl> + loss = neg_log_likelihood + kl <nl> + train_op = tf . train . AdamOptimizer ( ) . minimize ( loss ) <nl> + ` ` ` <nl> + <nl> + It uses reparameterization gradients to minimize the <nl> + Kullback - Leibler divergence up to a constant , also known as the <nl> + negative Evidence Lower Bound . It consists of the sum of two terms : <nl> + the expected negative log - likelihood , which we approximate via <nl> + Monte Carlo ; and the KL divergence , which is added via regularizer <nl> + terms which are arguments to the layer . <nl> + " " " <nl> + layer = Conv3DVariational ( <nl> + filters = filters , <nl> + kernel_size = kernel_size , <nl> + strides = strides , <nl> + padding = padding , <nl> + data_format = data_format , <nl> + dilation_rate = dilation_rate , <nl> + activation = activation , <nl> + activity_regularizer = activity_regularizer , <nl> + trainable = trainable , <nl> + kernel_posterior_fn = kernel_posterior_fn , <nl> + kernel_posterior_tensor_fn = kernel_posterior_tensor_fn , <nl> + kernel_prior_fn = kernel_prior_fn , <nl> + kernel_divergence_fn = kernel_divergence_fn , <nl> + bias_posterior_fn = bias_posterior_fn , <nl> + bias_posterior_tensor_fn = bias_posterior_tensor_fn , <nl> + bias_prior_fn = bias_prior_fn , <nl> + bias_divergence_fn = bias_divergence_fn , <nl> + name = name , <nl> + dtype = inputs . dtype . base_dtype , <nl> + _scope = name , <nl> + _reuse = reuse ) <nl> + return layer . apply ( inputs ) <nl> + <nl> + <nl> + # Aliases <nl> + <nl> + Convolution1DVariational = Conv1DVariational <nl> + Convolution2DVariational = Conv2DVariational <nl> + Convolution3DVariational = Conv3DVariational <nl> + convolution1d_variational = conv1d_variational <nl> + convolution2d_variational = conv2d_variational <nl> + convolution3d_variational = conv3d_variational <nl> mmm a / tensorflow / contrib / bayesflow / python / ops / layers_dense_variational_impl . py <nl> ppp b / tensorflow / contrib / bayesflow / python / ops / layers_dense_variational_impl . py <nl> <nl> @ @ dense_reparameterization <nl> @ @ dense_local_reparameterization <nl> @ @ dense_flipout <nl> - <nl> - @ @ default_loc_scale_fn <nl> - @ @ default_mean_field_normal_fn <nl> " " " <nl> <nl> from __future__ import absolute_import <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> - import numpy as np <nl> - <nl> - from tensorflow . contrib . distributions . python . ops import deterministic as deterministic_lib <nl> + from tensorflow . contrib . bayesflow . python . ops import layers_util <nl> from tensorflow . contrib . distributions . python . ops import independent as independent_lib <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import tensor_shape <nl> from tensorflow . python . layers import base as layers_lib <nl> from tensorflow . python . ops import array_ops <nl> - from tensorflow . python . ops import init_ops <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import nn <nl> - from tensorflow . python . ops import nn_ops <nl> from tensorflow . python . ops import random_ops <nl> from tensorflow . python . ops import standard_ops <nl> from tensorflow . python . ops . distributions import kullback_leibler as kl_lib <nl> <nl> " dense_reparameterization " , <nl> " dense_local_reparameterization " , <nl> " dense_flipout " , <nl> - " default_loc_scale_fn " , <nl> - " default_mean_field_normal_fn " , <nl> ] <nl> <nl> <nl> - def default_loc_scale_fn ( <nl> - is_singular = False , <nl> - loc_initializer = init_ops . random_normal_initializer ( stddev = 0 . 1 ) , <nl> - untransformed_scale_initializer = init_ops . random_normal_initializer ( <nl> - mean = - 3 . , stddev = 0 . 1 ) , <nl> - loc_regularizer = None , <nl> - untransformed_scale_regularizer = None , <nl> - loc_constraint = None , <nl> - untransformed_scale_constraint = None ) : <nl> - " " " Makes closure which creates ` loc ` , ` scale ` params from ` tf . get_variable ` . <nl> - <nl> - This function produces a closure which produces ` loc ` , ` scale ` using <nl> - ` tf . get_variable ` . The closure accepts the following arguments : <nl> - <nl> - dtype : Type of parameter ' s event . <nl> - shape : Python ` list ` - like representing the parameter ' s event shape . <nl> - name : Python ` str ` name prepended to any created ( or existing ) <nl> - ` tf . Variable ` s . <nl> - trainable : Python ` bool ` indicating all created ` tf . Variable ` s should be <nl> - added to the graph collection ` GraphKeys . TRAINABLE_VARIABLES ` . <nl> - add_variable_fn : ` tf . get_variable ` - like ` callable ` used to create ( or <nl> - access existing ) ` tf . Variable ` s . <nl> - <nl> - Args : <nl> - is_singular : Python ` bool ` indicating if ` scale is None ` . Default : ` False ` . <nl> - loc_initializer : Initializer function for the ` loc ` parameters . <nl> - The default is ` tf . random_normal_initializer ( mean = 0 . , stddev = 0 . 1 ) ` . <nl> - untransformed_scale_initializer : Initializer function for the ` scale ` <nl> - parameters . Default value : ` tf . random_normal_initializer ( mean = - 3 . , <nl> - stddev = 0 . 1 ) ` . This implies the softplus transformed result has mean <nl> - approximately ` 0 . 05 ` and std . deviation approximately ` 0 . 005 ` . <nl> - loc_regularizer : Regularizer function for the ` loc ` parameters . <nl> - The default ( ` None ` ) is to use the ` tf . get_variable ` default . <nl> - untransformed_scale_regularizer : Regularizer function for the ` scale ` <nl> - parameters . The default ( ` None ` ) is to use the ` tf . get_variable ` default . <nl> - loc_constraint : An optional projection function to be applied to the <nl> - loc after being updated by an ` Optimizer ` . The function must take as input <nl> - the unprojected variable and must return the projected variable ( which <nl> - must have the same shape ) . Constraints are not safe to use when doing <nl> - asynchronous distributed training . <nl> - The default ( ` None ` ) is to use the ` tf . get_variable ` default . <nl> - untransformed_scale_constraint : An optional projection function to be <nl> - applied to the ` scale ` parameters after being updated by an ` Optimizer ` <nl> - ( e . g . used to implement norm constraints or value constraints ) . The <nl> - function must take as input the unprojected variable and must return the <nl> - projected variable ( which must have the same shape ) . Constraints are not <nl> - safe to use when doing asynchronous distributed training . The default <nl> - ( ` None ` ) is to use the ` tf . get_variable ` default . <nl> - <nl> - Returns : <nl> - default_loc_scale_fn : Python ` callable ` which instantiates ` loc ` , ` scale ` <nl> - parameters from args : ` dtype , shape , name , trainable , add_variable_fn ` . <nl> - " " " <nl> - def _fn ( dtype , shape , name , trainable , add_variable_fn ) : <nl> - " " " Creates ` loc ` , ` scale ` parameters . " " " <nl> - loc = add_variable_fn ( <nl> - name = name + " _loc " , <nl> - shape = shape , <nl> - initializer = loc_initializer , <nl> - regularizer = loc_regularizer , <nl> - constraint = loc_constraint , <nl> - dtype = dtype , <nl> - trainable = trainable ) <nl> - if is_singular : <nl> - return loc , None <nl> - untransformed_scale = add_variable_fn ( <nl> - name = name + " _untransformed_scale " , <nl> - shape = shape , <nl> - initializer = untransformed_scale_initializer , <nl> - regularizer = untransformed_scale_regularizer , <nl> - constraint = untransformed_scale_constraint , <nl> - dtype = dtype , <nl> - trainable = trainable ) <nl> - scale = ( np . finfo ( dtype . as_numpy_dtype ) . eps + <nl> - nn_ops . softplus ( untransformed_scale ) ) <nl> - return loc , scale <nl> - return _fn <nl> - <nl> - <nl> - def default_mean_field_normal_fn ( <nl> - is_singular = False , <nl> - loc_initializer = None , <nl> - untransformed_scale_initializer = None , <nl> - loc_regularizer = None , <nl> - untransformed_scale_regularizer = None , <nl> - loc_constraint = None , <nl> - untransformed_scale_constraint = None ) : <nl> - " " " Creates a function to build Normal distributions with trainable params . <nl> - <nl> - This function produces a closure which produces ` tf . distributions . Normal ` <nl> - parameterized by a loc ` and ` scale ` each created using ` tf . get_variable ` . The <nl> - produced closure accepts the following arguments : <nl> - <nl> - name : Python ` str ` name prepended to any created ( or existing ) <nl> - ` tf . Variable ` s . <nl> - shape : Python ` list ` - like representing the parameter ' s event shape . <nl> - dtype : Type of parameter ' s event . <nl> - trainable : Python ` bool ` indicating all created ` tf . Variable ` s should be <nl> - added to the graph collection ` GraphKeys . TRAINABLE_VARIABLES ` . <nl> - add_variable_fn : ` tf . get_variable ` - like ` callable ` used to create ( or <nl> - access existing ) ` tf . Variable ` s . <nl> - <nl> - Args : <nl> - is_singular : Python ` bool ` if ` True ` , forces the special case limit of <nl> - ` scale - > 0 ` , i . e . , a ` Deterministic ` distribution . <nl> - loc_initializer : Initializer function for the ` loc ` parameters . <nl> - If ` None ` ( default ) , values are initialized using the default <nl> - initializer used by ` tf . get_variable ` . <nl> - untransformed_scale_initializer : Initializer function for the ` scale ` <nl> - parameters . If ` None ` ( default ) , values are initialized using the default <nl> - initializer used by ` tf . get_variable ` . <nl> - loc_regularizer : Regularizer function for the ` loc ` parameters . <nl> - untransformed_scale_regularizer : Regularizer function for the ` scale ` <nl> - parameters . <nl> - loc_constraint : An optional projection function to be applied to the <nl> - loc after being updated by an ` Optimizer ` . The function must take as input <nl> - the unprojected variable and must return the projected variable ( which <nl> - must have the same shape ) . Constraints are not safe to use when doing <nl> - asynchronous distributed training . <nl> - untransformed_scale_constraint : An optional projection function to be <nl> - applied to the ` scale ` parameters after being updated by an ` Optimizer ` <nl> - ( e . g . used to implement norm constraints or value constraints ) . The <nl> - function must take as input the unprojected variable and must return the <nl> - projected variable ( which must have the same shape ) . Constraints are not <nl> - safe to use when doing asynchronous distributed training . <nl> - <nl> - Returns : <nl> - make_normal_fn : Python ` callable ` which creates a ` tf . distributions . Normal ` <nl> - using from args : ` dtype , shape , name , trainable , add_variable_fn ` . <nl> - " " " <nl> - loc_scale_fn_ = default_loc_scale_fn ( <nl> - is_singular , <nl> - loc_initializer , <nl> - untransformed_scale_initializer , <nl> - loc_regularizer , <nl> - untransformed_scale_regularizer , <nl> - loc_constraint , <nl> - untransformed_scale_constraint ) <nl> - def _fn ( dtype , shape , name , trainable , add_variable_fn ) : <nl> - " " " Creates multivariate ` Deterministic ` or ` Normal ` distribution . " " " <nl> - loc , scale = loc_scale_fn_ ( dtype , shape , name , trainable , add_variable_fn ) <nl> - if scale is None : <nl> - dist = deterministic_lib . Deterministic ( loc = loc ) <nl> - else : <nl> - dist = normal_lib . Normal ( loc = loc , scale = scale ) <nl> - reinterpreted_batch_ndims = array_ops . shape ( dist . batch_shape_tensor ( ) ) [ 0 ] <nl> - return independent_lib . Independent ( <nl> - dist , reinterpreted_batch_ndims = reinterpreted_batch_ndims ) <nl> - return _fn <nl> - <nl> - <nl> class _DenseVariational ( layers_lib . Layer ) : <nl> " " " Abstract densely - connected class ( private , used as implementation base ) . <nl> <nl> def __init__ ( <nl> activation = None , <nl> activity_regularizer = None , <nl> trainable = True , <nl> - kernel_posterior_fn = default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> - bias_posterior_fn = default_mean_field_normal_fn ( is_singular = True ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( is_singular = True ) , # pylint : disable = line - too - long <nl> bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> bias_prior_fn = None , <nl> bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> def __init__ ( <nl> activation = None , <nl> activity_regularizer = None , <nl> trainable = True , <nl> - kernel_posterior_fn = default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> - bias_posterior_fn = default_mean_field_normal_fn ( is_singular = True ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( <nl> + is_singular = True ) , <nl> bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> bias_prior_fn = None , <nl> bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> def dense_reparameterization ( <nl> activation = None , <nl> activity_regularizer = None , <nl> trainable = True , <nl> - kernel_posterior_fn = default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> - bias_posterior_fn = default_mean_field_normal_fn ( is_singular = True ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( is_singular = True ) , # pylint : disable = line - too - long <nl> bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> bias_prior_fn = None , <nl> bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> def __init__ ( <nl> activation = None , <nl> activity_regularizer = None , <nl> trainable = True , <nl> - kernel_posterior_fn = default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> - bias_posterior_fn = default_mean_field_normal_fn ( is_singular = True ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( <nl> + is_singular = True ) , <nl> bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> bias_prior_fn = None , <nl> bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> def dense_local_reparameterization ( <nl> activation = None , <nl> activity_regularizer = None , <nl> trainable = True , <nl> - kernel_posterior_fn = default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> - bias_posterior_fn = default_mean_field_normal_fn ( is_singular = True ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( <nl> + is_singular = True ) , <nl> bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> bias_prior_fn = None , <nl> bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> def __init__ ( <nl> activation = None , <nl> activity_regularizer = None , <nl> trainable = True , <nl> - kernel_posterior_fn = default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> - bias_posterior_fn = default_mean_field_normal_fn ( is_singular = True ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( <nl> + is_singular = True ) , <nl> bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> bias_prior_fn = None , <nl> bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> def _apply_variational_kernel ( self , inputs ) : <nl> array_ops . expand_dims ( self . units , 0 ) ] , 0 ) , <nl> dtype = inputs . dtype , <nl> seed = distribution_util . gen_new_seed ( <nl> - self . seed , salt = " conv_variational " ) ) <nl> + self . seed , salt = " dense_flipout " ) ) <nl> perturbed_inputs = self . _matmul ( <nl> inputs * sign_input , self . kernel_posterior_affine_tensor ) * sign_output <nl> <nl> def dense_flipout ( <nl> activation = None , <nl> activity_regularizer = None , <nl> trainable = True , <nl> - kernel_posterior_fn = default_mean_field_normal_fn ( ) , <nl> + kernel_posterior_fn = layers_util . default_mean_field_normal_fn ( ) , <nl> kernel_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> kernel_prior_fn = lambda dtype , * args : normal_lib . Normal ( # pylint : disable = g - long - lambda <nl> loc = dtype . as_numpy_dtype ( 0 . ) , scale = dtype . as_numpy_dtype ( 1 . ) ) , <nl> kernel_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> - bias_posterior_fn = default_mean_field_normal_fn ( is_singular = True ) , <nl> + bias_posterior_fn = layers_util . default_mean_field_normal_fn ( <nl> + is_singular = True ) , <nl> bias_posterior_tensor_fn = lambda d : d . sample ( ) , <nl> bias_prior_fn = None , <nl> bias_divergence_fn = lambda q , p , ignore : kl_lib . kl_divergence ( q , p ) , <nl> new file mode 100644 <nl> index 0000000000000 . . 9a4fecf4e5dcb <nl> mmm / dev / null <nl> ppp b / tensorflow / contrib / bayesflow / python / ops / layers_util . py <nl> <nl> + # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Utilities for probabilistic layers . <nl> + " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + import numpy as np <nl> + <nl> + from tensorflow . contrib . distributions . python . ops import deterministic as deterministic_lib <nl> + from tensorflow . contrib . distributions . python . ops import independent as independent_lib <nl> + from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import init_ops <nl> + from tensorflow . python . ops import nn_ops <nl> + from tensorflow . python . ops . distributions import normal as normal_lib <nl> + <nl> + <nl> + def default_loc_scale_fn ( <nl> + is_singular = False , <nl> + loc_initializer = init_ops . random_normal_initializer ( stddev = 0 . 1 ) , <nl> + untransformed_scale_initializer = init_ops . random_normal_initializer ( <nl> + mean = - 3 . , stddev = 0 . 1 ) , <nl> + loc_regularizer = None , <nl> + untransformed_scale_regularizer = None , <nl> + loc_constraint = None , <nl> + untransformed_scale_constraint = None ) : <nl> + " " " Makes closure which creates ` loc ` , ` scale ` params from ` tf . get_variable ` . <nl> + <nl> + This function produces a closure which produces ` loc ` , ` scale ` using <nl> + ` tf . get_variable ` . The closure accepts the following arguments : <nl> + <nl> + dtype : Type of parameter ' s event . <nl> + shape : Python ` list ` - like representing the parameter ' s event shape . <nl> + name : Python ` str ` name prepended to any created ( or existing ) <nl> + ` tf . Variable ` s . <nl> + trainable : Python ` bool ` indicating all created ` tf . Variable ` s should be <nl> + added to the graph collection ` GraphKeys . TRAINABLE_VARIABLES ` . <nl> + add_variable_fn : ` tf . get_variable ` - like ` callable ` used to create ( or <nl> + access existing ) ` tf . Variable ` s . <nl> + <nl> + Args : <nl> + is_singular : Python ` bool ` indicating if ` scale is None ` . Default : ` False ` . <nl> + loc_initializer : Initializer function for the ` loc ` parameters . <nl> + The default is ` tf . random_normal_initializer ( mean = 0 . , stddev = 0 . 1 ) ` . <nl> + untransformed_scale_initializer : Initializer function for the ` scale ` <nl> + parameters . Default value : ` tf . random_normal_initializer ( mean = - 3 . , <nl> + stddev = 0 . 1 ) ` . This implies the softplus transformed result has mean <nl> + approximately ` 0 . 05 ` and std . deviation approximately ` 0 . 005 ` . <nl> + loc_regularizer : Regularizer function for the ` loc ` parameters . <nl> + The default ( ` None ` ) is to use the ` tf . get_variable ` default . <nl> + untransformed_scale_regularizer : Regularizer function for the ` scale ` <nl> + parameters . The default ( ` None ` ) is to use the ` tf . get_variable ` default . <nl> + loc_constraint : An optional projection function to be applied to the <nl> + loc after being updated by an ` Optimizer ` . The function must take as input <nl> + the unprojected variable and must return the projected variable ( which <nl> + must have the same shape ) . Constraints are not safe to use when doing <nl> + asynchronous distributed training . <nl> + The default ( ` None ` ) is to use the ` tf . get_variable ` default . <nl> + untransformed_scale_constraint : An optional projection function to be <nl> + applied to the ` scale ` parameters after being updated by an ` Optimizer ` <nl> + ( e . g . used to implement norm constraints or value constraints ) . The <nl> + function must take as input the unprojected variable and must return the <nl> + projected variable ( which must have the same shape ) . Constraints are not <nl> + safe to use when doing asynchronous distributed training . The default <nl> + ( ` None ` ) is to use the ` tf . get_variable ` default . <nl> + <nl> + Returns : <nl> + default_loc_scale_fn : Python ` callable ` which instantiates ` loc ` , ` scale ` <nl> + parameters from args : ` dtype , shape , name , trainable , add_variable_fn ` . <nl> + " " " <nl> + def _fn ( dtype , shape , name , trainable , add_variable_fn ) : <nl> + " " " Creates ` loc ` , ` scale ` parameters . " " " <nl> + loc = add_variable_fn ( <nl> + name = name + " _loc " , <nl> + shape = shape , <nl> + initializer = loc_initializer , <nl> + regularizer = loc_regularizer , <nl> + constraint = loc_constraint , <nl> + dtype = dtype , <nl> + trainable = trainable ) <nl> + if is_singular : <nl> + return loc , None <nl> + untransformed_scale = add_variable_fn ( <nl> + name = name + " _untransformed_scale " , <nl> + shape = shape , <nl> + initializer = untransformed_scale_initializer , <nl> + regularizer = untransformed_scale_regularizer , <nl> + constraint = untransformed_scale_constraint , <nl> + dtype = dtype , <nl> + trainable = trainable ) <nl> + scale = ( np . finfo ( dtype . as_numpy_dtype ) . eps + <nl> + nn_ops . softplus ( untransformed_scale ) ) <nl> + return loc , scale <nl> + return _fn <nl> + <nl> + <nl> + def default_mean_field_normal_fn ( <nl> + is_singular = False , <nl> + loc_initializer = None , <nl> + untransformed_scale_initializer = None , <nl> + loc_regularizer = None , <nl> + untransformed_scale_regularizer = None , <nl> + loc_constraint = None , <nl> + untransformed_scale_constraint = None ) : <nl> + " " " Creates a function to build Normal distributions with trainable params . <nl> + <nl> + This function produces a closure which produces ` tf . distributions . Normal ` <nl> + parameterized by a loc ` and ` scale ` each created using ` tf . get_variable ` . The <nl> + produced closure accepts the following arguments : <nl> + <nl> + name : Python ` str ` name prepended to any created ( or existing ) <nl> + ` tf . Variable ` s . <nl> + shape : Python ` list ` - like representing the parameter ' s event shape . <nl> + dtype : Type of parameter ' s event . <nl> + trainable : Python ` bool ` indicating all created ` tf . Variable ` s should be <nl> + added to the graph collection ` GraphKeys . TRAINABLE_VARIABLES ` . <nl> + add_variable_fn : ` tf . get_variable ` - like ` callable ` used to create ( or <nl> + access existing ) ` tf . Variable ` s . <nl> + <nl> + Args : <nl> + is_singular : Python ` bool ` if ` True ` , forces the special case limit of <nl> + ` scale - > 0 ` , i . e . , a ` Deterministic ` distribution . <nl> + loc_initializer : Initializer function for the ` loc ` parameters . <nl> + If ` None ` ( default ) , values are initialized using the default <nl> + initializer used by ` tf . get_variable ` . <nl> + untransformed_scale_initializer : Initializer function for the ` scale ` <nl> + parameters . If ` None ` ( default ) , values are initialized using the default <nl> + initializer used by ` tf . get_variable ` . <nl> + loc_regularizer : Regularizer function for the ` loc ` parameters . <nl> + untransformed_scale_regularizer : Regularizer function for the ` scale ` <nl> + parameters . <nl> + loc_constraint : An optional projection function to be applied to the <nl> + loc after being updated by an ` Optimizer ` . The function must take as input <nl> + the unprojected variable and must return the projected variable ( which <nl> + must have the same shape ) . Constraints are not safe to use when doing <nl> + asynchronous distributed training . <nl> + untransformed_scale_constraint : An optional projection function to be <nl> + applied to the ` scale ` parameters after being updated by an ` Optimizer ` <nl> + ( e . g . used to implement norm constraints or value constraints ) . The <nl> + function must take as input the unprojected variable and must return the <nl> + projected variable ( which must have the same shape ) . Constraints are not <nl> + safe to use when doing asynchronous distributed training . <nl> + <nl> + Returns : <nl> + make_normal_fn : Python ` callable ` which creates a ` tf . distributions . Normal ` <nl> + using from args : ` dtype , shape , name , trainable , add_variable_fn ` . <nl> + " " " <nl> + loc_scale_fn_ = default_loc_scale_fn ( <nl> + is_singular , <nl> + loc_initializer , <nl> + untransformed_scale_initializer , <nl> + loc_regularizer , <nl> + untransformed_scale_regularizer , <nl> + loc_constraint , <nl> + untransformed_scale_constraint ) <nl> + def _fn ( dtype , shape , name , trainable , add_variable_fn ) : <nl> + " " " Creates multivariate ` Deterministic ` or ` Normal ` distribution . " " " <nl> + loc , scale = loc_scale_fn_ ( dtype , shape , name , trainable , add_variable_fn ) <nl> + if scale is None : <nl> + dist = deterministic_lib . Deterministic ( loc = loc ) <nl> + else : <nl> + dist = normal_lib . Normal ( loc = loc , scale = scale ) <nl> + reinterpreted_batch_ndims = array_ops . shape ( dist . batch_shape_tensor ( ) ) [ 0 ] <nl> + return independent_lib . Independent ( <nl> + dist , reinterpreted_batch_ndims = reinterpreted_batch_ndims ) <nl> + return _fn <nl> mmm a / tensorflow / core / BUILD <nl> ppp b / tensorflow / core / BUILD <nl> GPU_RUNTIME_HEADERS = [ <nl> " common_runtime / gpu / gpu_cudamalloc_allocator . h " , <nl> " common_runtime / gpu / gpu_debug_allocator . h " , <nl> " common_runtime / gpu / gpu_device . h " , <nl> + " common_runtime / gpu / gpu_id . h " , <nl> + " common_runtime / gpu / gpu_id_utils . h " , <nl> " common_runtime / gpu / gpu_init . h " , <nl> " common_runtime / gpu / gpu_managed_allocator . h " , <nl> " common_runtime / gpu / gpu_stream_util . h " , <nl> tf_cuda_library ( <nl> " common_runtime / gpu / gpu_debug_allocator . cc " , <nl> " common_runtime / gpu / gpu_device . cc " , <nl> " common_runtime / gpu / gpu_device_factory . cc " , <nl> + " common_runtime / gpu / gpu_id_utils . cc " , <nl> " common_runtime / gpu / gpu_managed_allocator . cc " , <nl> " common_runtime / gpu / gpu_stream_util . cc " , <nl> " common_runtime / gpu / gpu_util . cc " , <nl> tf_cc_tests_gpu ( <nl> size = " small " , <nl> srcs = glob ( [ " user_ops / * * / * _test . cc " ] ) + [ <nl> " common_runtime / gpu / gpu_bfc_allocator_test . cc " , <nl> + " common_runtime / gpu / gpu_device_test . cc " , <nl> + " common_runtime / gpu / gpu_id_utils_test . cc " , <nl> " common_runtime / gpu / gpu_event_mgr_test . cc " , <nl> " common_runtime / gpu / pool_allocator_test . cc " , <nl> ] , <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_bfc_allocator . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_bfc_allocator . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / common_runtime / gpu / gpu_bfc_allocator . h " <nl> <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id_utils . h " <nl> # include " tensorflow / core / common_runtime / gpu / gpu_init . h " <nl> # include " tensorflow / core / lib / strings / strcat . h " <nl> <nl> namespace tensorflow { <nl> <nl> - GPUBFCAllocator : : GPUBFCAllocator ( int device_id , size_t total_memory ) <nl> - : GPUBFCAllocator ( device_id , total_memory , GPUOptions ( ) ) { } <nl> + GPUBFCAllocator : : GPUBFCAllocator ( CudaGpuId cuda_gpu_id , size_t total_memory , <nl> + const string & name ) <nl> + : GPUBFCAllocator ( cuda_gpu_id , total_memory , GPUOptions ( ) , name ) { } <nl> <nl> - GPUBFCAllocator : : GPUBFCAllocator ( int device_id , size_t total_memory , <nl> - const GPUOptions & gpu_options ) <nl> + GPUBFCAllocator : : GPUBFCAllocator ( CudaGpuId cuda_gpu_id , size_t total_memory , <nl> + const GPUOptions & gpu_options , <nl> + const string & name ) <nl> : BFCAllocator ( <nl> new GPUMemAllocator ( <nl> - GPUMachineManager ( ) - > ExecutorForDevice ( device_id ) . ValueOrDie ( ) ) , <nl> - total_memory , gpu_options . allow_growth ( ) , <nl> - strings : : StrCat ( " GPU_ " , device_id , " _bfc " ) ) { } <nl> + GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ) , <nl> + total_memory , gpu_options . allow_growth ( ) , name ) { } <nl> <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_bfc_allocator . h <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_bfc_allocator . h <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / common_runtime / allocator_retry . h " <nl> # include " tensorflow / core / common_runtime / bfc_allocator . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> # include " tensorflow / core / platform / stream_executor . h " <nl> # include " tensorflow / core / platform / thread_annotations . h " <nl> # include " tensorflow / core / platform / types . h " <nl> namespace tensorflow { <nl> / / algorithm . <nl> class GPUBFCAllocator : public BFCAllocator { <nl> public : <nl> - / / ' device_id ' refers to the StreamExecutor ID of the device within <nl> + / / ' cuda_gpu_id ' refers to the ID of the GPU device within <nl> / / the process and must reference a valid ID in the process . <nl> - GPUBFCAllocator ( int device_id , size_t total_memory ) ; <nl> - GPUBFCAllocator ( int device_id , size_t total_memory , <nl> - const GPUOptions & gpu_options ) ; <nl> + GPUBFCAllocator ( CudaGpuId cuda_gpu_id , size_t total_memory , <nl> + const string & name ) ; <nl> + GPUBFCAllocator ( CudaGpuId cuda_gpu_id , size_t total_memory , <nl> + const GPUOptions & gpu_options , const string & name ) ; <nl> virtual ~ GPUBFCAllocator ( ) { } <nl> <nl> TF_DISALLOW_COPY_AND_ASSIGN ( GPUBFCAllocator ) ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_bfc_allocator_test . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_bfc_allocator_test . cc <nl> limitations under the License . <nl> # include < algorithm > <nl> # include < vector > <nl> <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> # include " tensorflow / core / common_runtime / gpu / gpu_init . h " <nl> # include " tensorflow / core / lib / core / threadpool . h " <nl> # include " tensorflow / core / lib / gtl / inlined_vector . h " <nl> static void CheckStats ( Allocator * a , int64 num_allocs , int64 bytes_in_use , <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , NoDups ) { <nl> - GPUBFCAllocator a ( 0 , 1 < < 30 ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1 < < 30 , " GPU_0_bfc " ) ; <nl> CheckStats ( & a , 0 , 0 , 0 , 0 ) ; <nl> <nl> / / Allocate a lot of raw pointers <nl> TEST ( GPUBFCAllocatorTest , NoDups ) { <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , AllocationsAndDeallocations ) { <nl> - GPUBFCAllocator a ( 0 , 1 < < 30 ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1 < < 30 , " GPU_0_bfc " ) ; <nl> / / Allocate 256 raw pointers of sizes between 100 bytes and about <nl> / / a meg <nl> random : : PhiloxRandom philox ( 123 , 17 ) ; <nl> TEST ( GPUBFCAllocatorTest , AllocationsAndDeallocations ) { <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , ExerciseCoalescing ) { <nl> - GPUBFCAllocator a ( 0 , 1 < < 30 ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1 < < 30 , " GPU_0_bfc " ) ; <nl> CheckStats ( & a , 0 , 0 , 0 , 0 ) ; <nl> <nl> float * first_ptr = a . Allocate < float > ( 1024 ) ; <nl> TEST ( GPUBFCAllocatorTest , ExerciseCoalescing ) { <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , AllocateZeroBufSize ) { <nl> - GPUBFCAllocator a ( 0 , 1 < < 30 ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1 < < 30 , " GPU_0_bfc " ) ; <nl> float * ptr = a . Allocate < float > ( 0 ) ; <nl> EXPECT_EQ ( nullptr , ptr ) ; <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , TracksSizes ) { <nl> - GPUBFCAllocator a ( 0 , 1 < < 30 ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1 < < 30 , " GPU_0_bfc " ) ; <nl> EXPECT_EQ ( true , a . TracksAllocationSizes ( ) ) ; <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , AllocatedVsRequested ) { <nl> - GPUBFCAllocator a ( 0 , 1 < < 30 ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1 < < 30 , " GPU_0_bfc " ) ; <nl> float * t1 = a . Allocate < float > ( 1 ) ; <nl> EXPECT_EQ ( 4 , a . RequestedSize ( t1 ) ) ; <nl> EXPECT_EQ ( 256 , a . AllocatedSize ( t1 ) ) ; <nl> TEST ( GPUBFCAllocatorTest , AllocatedVsRequested ) { <nl> <nl> TEST ( GPUBFCAllocatorTest , TestCustomMemoryLimit ) { <nl> / / Configure a 1MiB byte limit <nl> - GPUBFCAllocator a ( 0 , 1 < < 20 ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1 < < 20 , " GPU_0_bfc " ) ; <nl> <nl> float * first_ptr = a . Allocate < float > ( 1 < < 6 ) ; <nl> float * second_ptr = a . Allocate < float > ( 1 < < 20 ) ; <nl> TEST ( GPUBFCAllocatorTest , AllocationsAndDeallocationsWithGrowth ) { <nl> options . set_allow_growth ( true ) ; <nl> <nl> / / Max of 2GiB , but starts out small . <nl> - GPUBFCAllocator a ( 0 , 1LL < < 31 , options ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1LL < < 31 , options , " GPU_0_bfc " ) ; <nl> <nl> / / Allocate 10 raw pointers of sizes between 100 bytes and about <nl> / / 64 megs . <nl> TEST ( GPUBFCAllocatorTest , AllocationsAndDeallocationsWithGrowth ) { <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , DISABLED_AllocatorReceivesZeroMemory ) { <nl> - GPUBFCAllocator a ( 0 , 1UL < < 60 ) ; <nl> - GPUBFCAllocator b ( 0 , 1UL < < 60 ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1UL < < 60 , " GPU_0_bfc " ) ; <nl> + GPUBFCAllocator b ( CudaGpuId ( 0 ) , 1UL < < 60 , " GPU_0_bfc " ) ; <nl> void * amem = a . AllocateRaw ( 1 , 1 ) ; <nl> void * bmem = b . AllocateRaw ( 1 , 1 < < 30 ) ; <nl> a . DeallocateRaw ( amem ) ; <nl> TEST ( GPUBFCAllocatorTest , DISABLED_AllocatorReceivesZeroMemory ) { <nl> } <nl> <nl> static void BM_Allocation ( int iters ) { <nl> - GPUBFCAllocator a ( 0 , 1uLL < < 33 ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1uLL < < 33 , " GPU_0_bfc " ) ; <nl> / / Exercise a few different allocation sizes <nl> std : : vector < size_t > sizes = { 256 , 4096 , 16384 , 524288 , <nl> 512 , 1048576 , 10485760 , 104857600 , <nl> static void BM_Allocation ( int iters ) { <nl> BENCHMARK ( BM_Allocation ) ; <nl> <nl> static void BM_AllocationThreaded ( int iters , int num_threads ) { <nl> - GPUBFCAllocator a ( 0 , 1uLL < < 33 ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1uLL < < 33 , " GPU_0_bfc " ) ; <nl> thread : : ThreadPool pool ( Env : : Default ( ) , " test " , num_threads ) ; <nl> std : : atomic_int_fast32_t count ( iters ) ; <nl> mutex done_lock ; <nl> BENCHMARK ( BM_AllocationThreaded ) - > Arg ( 1 ) - > Arg ( 4 ) - > Arg ( 16 ) ; <nl> / / A more complex benchmark that defers deallocation of an object for <nl> / / " delay " allocations . <nl> static void BM_AllocationDelayed ( int iters , int delay ) { <nl> - GPUBFCAllocator a ( 0 , 1 < < 30 ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1 < < 30 , " GPU_0_bfc " ) ; <nl> / / Exercise a few different allocation sizes <nl> std : : vector < int > sizes = { 256 , 4096 , 16384 , 4096 , 512 , 1024 , 1024 } ; <nl> int size_index = 0 ; <nl> class GPUBFCAllocatorPrivateMethodsTest : public : : testing : : Test { <nl> / / only methods inside this class can access private members of BFCAllocator . <nl> <nl> void TestBinDebugInfo ( ) { <nl> - GPUBFCAllocator a ( 0 , 1 < < 30 ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1 < < 30 , " GPU_0_bfc " ) ; <nl> <nl> std : : vector < void * > initial_ptrs ; <nl> std : : vector < size_t > initial_ptrs_allocated_sizes ; <nl> class GPUBFCAllocatorPrivateMethodsTest : public : : testing : : Test { <nl> } <nl> <nl> void TestLog2FloorNonZeroSlow ( ) { <nl> - GPUBFCAllocator a ( 0 / * device_id * / , 1 / * total_memory * / ) ; <nl> + GPUBFCAllocator a ( CudaGpuId ( 0 ) , 1 / * total_memory * / , " GPU_0_bfc " ) ; <nl> EXPECT_EQ ( - 1 , a . Log2FloorNonZeroSlow ( 0 ) ) ; <nl> EXPECT_EQ ( 0 , a . Log2FloorNonZeroSlow ( 1 ) ) ; <nl> EXPECT_EQ ( 1 , a . Log2FloorNonZeroSlow ( 2 ) ) ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_cudamalloc_allocator . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_cudamalloc_allocator . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / common_runtime / gpu / gpu_cudamalloc_allocator . h " <nl> <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id_utils . h " <nl> # include " tensorflow / core / common_runtime / gpu / gpu_init . h " <nl> # include " tensorflow / core / platform / stream_executor . h " <nl> <nl> namespace gpu = : : perftools : : gputools ; <nl> namespace tensorflow { <nl> <nl> GPUcudaMallocAllocator : : GPUcudaMallocAllocator ( VisitableAllocator * allocator , <nl> - int device_id ) <nl> + CudaGpuId cuda_gpu_id ) <nl> : base_allocator_ ( allocator ) { <nl> - stream_exec_ = GPUMachineManager ( ) - > ExecutorForDevice ( device_id ) . ValueOrDie ( ) ; <nl> + stream_exec_ = GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> } <nl> <nl> GPUcudaMallocAllocator : : ~ GPUcudaMallocAllocator ( ) { delete base_allocator_ ; } <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_cudamalloc_allocator . h <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_cudamalloc_allocator . h <nl> limitations under the License . <nl> <nl> # include < memory > <nl> <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> # include " tensorflow / core / common_runtime / visitable_allocator . h " <nl> # include " tensorflow / core / platform / macros . h " <nl> # include " tensorflow / core / platform / stream_executor . h " <nl> namespace tensorflow { <nl> / / allocated memory . <nl> class GPUcudaMallocAllocator : public VisitableAllocator { <nl> public : <nl> - explicit GPUcudaMallocAllocator ( VisitableAllocator * allocator , int device_id ) ; <nl> + explicit GPUcudaMallocAllocator ( VisitableAllocator * allocator , <nl> + CudaGpuId cuda_gpu_id ) ; <nl> ~ GPUcudaMallocAllocator ( ) override ; <nl> string Name ( ) override { return " gpu_debug " ; } <nl> void * AllocateRaw ( size_t alignment , size_t num_bytes ) override ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_debug_allocator . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_debug_allocator . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / gpu / gpu_debug_allocator . h " <nl> <nl> # include < vector > <nl> + <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id_utils . h " <nl> # include " tensorflow / core / common_runtime / gpu / gpu_init . h " <nl> # include " tensorflow / core / platform / stream_executor . h " <nl> <nl> void InitMask ( perftools : : gputools : : StreamExecutor * exec , void * ptr , <nl> / / GPUDebugAllocator <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> GPUDebugAllocator : : GPUDebugAllocator ( VisitableAllocator * allocator , <nl> - int device_id ) <nl> + CudaGpuId cuda_gpu_id ) <nl> : base_allocator_ ( allocator ) { <nl> - stream_exec_ = GPUMachineManager ( ) - > ExecutorForDevice ( device_id ) . ValueOrDie ( ) ; <nl> + stream_exec_ = GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> } <nl> <nl> GPUDebugAllocator : : ~ GPUDebugAllocator ( ) { delete base_allocator_ ; } <nl> bool GPUDebugAllocator : : CheckFooter ( void * ptr ) { <nl> / / GPUNanResetAllocator <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> GPUNanResetAllocator : : GPUNanResetAllocator ( VisitableAllocator * allocator , <nl> - int device_id ) <nl> + CudaGpuId cuda_gpu_id ) <nl> : base_allocator_ ( allocator ) { <nl> - stream_exec_ = GPUMachineManager ( ) - > ExecutorForDevice ( device_id ) . ValueOrDie ( ) ; <nl> + stream_exec_ = GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> } <nl> <nl> GPUNanResetAllocator : : ~ GPUNanResetAllocator ( ) { delete base_allocator_ ; } <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_debug_allocator . h <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_debug_allocator . h <nl> limitations under the License . <nl> # include < string > <nl> # include < unordered_map > <nl> <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> # include " tensorflow / core / common_runtime / visitable_allocator . h " <nl> # include " tensorflow / core / platform / macros . h " <nl> # include " tensorflow / core / platform / stream_executor . h " <nl> namespace tensorflow { <nl> / / allocated memory . <nl> class GPUDebugAllocator : public VisitableAllocator { <nl> public : <nl> - explicit GPUDebugAllocator ( VisitableAllocator * allocator , int device_id ) ; <nl> + explicit GPUDebugAllocator ( VisitableAllocator * allocator , <nl> + CudaGpuId cuda_gpu_id ) ; <nl> ~ GPUDebugAllocator ( ) override ; <nl> string Name ( ) override { return " gpu_debug " ; } <nl> void * AllocateRaw ( size_t alignment , size_t num_bytes ) override ; <nl> class GPUDebugAllocator : public VisitableAllocator { <nl> / / user forgets to initialize the memory . <nl> class GPUNanResetAllocator : public VisitableAllocator { <nl> public : <nl> - explicit GPUNanResetAllocator ( VisitableAllocator * allocator , int device_id ) ; <nl> + explicit GPUNanResetAllocator ( VisitableAllocator * allocator , <nl> + CudaGpuId cuda_gpu_id ) ; <nl> ~ GPUNanResetAllocator ( ) override ; <nl> string Name ( ) override { return " gpu_nan_reset " ; } <nl> void * AllocateRaw ( size_t alignment , size_t num_bytes ) override ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_debug_allocator_test . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_debug_allocator_test . cc <nl> limitations under the License . <nl> # include < vector > <nl> <nl> # include " tensorflow / core / common_runtime / gpu / gpu_bfc_allocator . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id_utils . h " <nl> # include " tensorflow / core / common_runtime / gpu / gpu_init . h " <nl> # include " tensorflow / core / lib / gtl / inlined_vector . h " <nl> # include " tensorflow / core / platform / logging . h " <nl> namespace gpu = : : perftools : : gputools ; <nl> namespace tensorflow { <nl> <nl> TEST ( GPUDebugAllocatorTest , OverwriteDetection_None ) { <nl> - const int device_id = 0 ; <nl> - GPUDebugAllocator a ( new GPUBFCAllocator ( device_id , 1 < < 30 ) , device_id ) ; <nl> - auto stream_exec = <nl> - GPUMachineManager ( ) - > ExecutorForDevice ( device_id ) . ValueOrDie ( ) ; <nl> + const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + GPUDebugAllocator a ( new GPUBFCAllocator ( cuda_gpu_id , 1 < < 30 , " " ) , <nl> + cuda_gpu_id ) ; <nl> + auto stream_exec = GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> <nl> for ( int s : { 8 } ) { <nl> std : : vector < int64 > cpu_array ( s ) ; <nl> TEST ( GPUDebugAllocatorTest , OverwriteDetection_Header ) { <nl> for ( int s : { 8 , 211 } ) { <nl> EXPECT_DEATH ( <nl> { <nl> - const int device_id = 0 ; <nl> - GPUDebugAllocator a ( new GPUBFCAllocator ( device_id , 1 < < 30 ) , <nl> - device_id ) ; <nl> + const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + GPUDebugAllocator a ( new GPUBFCAllocator ( cuda_gpu_id , 1 < < 30 , " " ) , <nl> + cuda_gpu_id ) ; <nl> auto stream_exec = <nl> - GPUMachineManager ( ) - > ExecutorForDevice ( device_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> <nl> std : : vector < int64 > cpu_array ( s ) ; <nl> memset ( & cpu_array [ 0 ] , 0 , cpu_array . size ( ) * sizeof ( int64 ) ) ; <nl> TEST ( GPUDebugAllocatorTest , OverwriteDetection_Footer ) { <nl> for ( int s : { 8 , 22 } ) { <nl> EXPECT_DEATH ( <nl> { <nl> - const int device_id = 0 ; <nl> - GPUDebugAllocator a ( new GPUBFCAllocator ( device_id , 1 < < 30 ) , <nl> - device_id ) ; <nl> + const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + GPUDebugAllocator a ( new GPUBFCAllocator ( cuda_gpu_id , 1 < < 30 , " " ) , <nl> + cuda_gpu_id ) ; <nl> auto stream_exec = <nl> - GPUMachineManager ( ) - > ExecutorForDevice ( device_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> <nl> std : : vector < int64 > cpu_array ( s ) ; <nl> memset ( & cpu_array [ 0 ] , 0 , cpu_array . size ( ) * sizeof ( int64 ) ) ; <nl> TEST ( GPUDebugAllocatorTest , OverwriteDetection_Footer ) { <nl> } <nl> <nl> TEST ( GPUDebugAllocatorTest , ResetToNan ) { <nl> - const int device_id = 0 ; <nl> - GPUNanResetAllocator a ( new GPUBFCAllocator ( device_id , 1 < < 30 ) , device_id ) ; <nl> - auto stream_exec = <nl> - GPUMachineManager ( ) - > ExecutorForDevice ( device_id ) . ValueOrDie ( ) ; <nl> + const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + GPUNanResetAllocator a ( new GPUBFCAllocator ( cuda_gpu_id , 1 < < 30 , " " ) , <nl> + cuda_gpu_id ) ; <nl> + auto stream_exec = GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> <nl> std : : vector < float > cpu_array ( 1024 ) ; <nl> std : : vector < float > cpu_array_result ( 1024 ) ; <nl> TEST ( GPUDebugAllocatorTest , ResetToNan ) { <nl> } <nl> <nl> TEST ( GPUDebugAllocatorTest , ResetToNanWithHeaderFooter ) { <nl> - const int device_id = 0 ; <nl> + const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> / / NaN reset must be the outer - most allocator . <nl> GPUNanResetAllocator a ( <nl> - new GPUDebugAllocator ( new GPUBFCAllocator ( device_id , 1 < < 30 ) , device_id ) , <nl> - device_id ) ; <nl> - auto stream_exec = <nl> - GPUMachineManager ( ) - > ExecutorForDevice ( device_id ) . ValueOrDie ( ) ; <nl> + new GPUDebugAllocator ( new GPUBFCAllocator ( cuda_gpu_id , 1 < < 30 , " " ) , <nl> + cuda_gpu_id ) , <nl> + cuda_gpu_id ) ; <nl> + auto stream_exec = GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> <nl> std : : vector < float > cpu_array ( 1024 ) ; <nl> std : : vector < float > cpu_array_result ( 1024 ) ; <nl> TEST ( GPUDebugAllocatorTest , ResetToNanWithHeaderFooter ) { <nl> } <nl> <nl> TEST ( GPUDebugAllocatorTest , TracksSizes ) { <nl> - GPUDebugAllocator a ( new GPUBFCAllocator ( 0 , 1 < < 30 ) , 0 ) ; <nl> + const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> + GPUDebugAllocator a ( new GPUBFCAllocator ( cuda_gpu_id , 1 < < 30 , " " ) , <nl> + cuda_gpu_id ) ; <nl> EXPECT_EQ ( true , a . TracksAllocationSizes ( ) ) ; <nl> } <nl> <nl> TEST ( GPUDebugAllocatorTest , AllocatedVsRequested ) { <nl> + const CudaGpuId cuda_gpu_id ( 0 ) ; <nl> GPUNanResetAllocator a ( <nl> - new GPUDebugAllocator ( new GPUBFCAllocator ( 0 , 1 < < 30 ) , 0 ) , 0 ) ; <nl> + new GPUDebugAllocator ( new GPUBFCAllocator ( cuda_gpu_id , 1 < < 30 , " " ) , <nl> + cuda_gpu_id ) , <nl> + cuda_gpu_id ) ; <nl> float * t1 = a . Allocate < float > ( 1 ) ; <nl> EXPECT_EQ ( 4 , a . RequestedSize ( t1 ) ) ; <nl> EXPECT_EQ ( 256 , a . AllocatedSize ( t1 ) ) ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_device . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_device . cc <nl> limitations under the License . <nl> # include " third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor " <nl> # include " tensorflow / core / common_runtime / device_factory . h " <nl> # include " tensorflow / core / common_runtime / gpu / gpu_event_mgr . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id_utils . h " <nl> # include " tensorflow / core / common_runtime / gpu / gpu_init . h " <nl> # include " tensorflow / core / common_runtime / gpu / gpu_stream_util . h " <nl> # include " tensorflow / core / common_runtime / gpu / gpu_util . h " <nl> class EigenCudaStreamDevice : public : : Eigen : : StreamInterface { <nl> } <nl> ~ EigenCudaStreamDevice ( ) override { } <nl> void Reinitialize ( OpKernelContext * context , const cudaStream_t * cuda_stream , <nl> - int gpu_id , : : tensorflow : : Allocator * alloc , char * scratch ) { <nl> + TfGpuId tf_gpu_id , : : tensorflow : : Allocator * alloc , <nl> + char * scratch ) { <nl> if ( LogMemory : : IsEnabled ( ) ) { <nl> operation_ = context - > op_kernel ( ) . name ( ) + " / EigenAllocator " ; <nl> step_id_ = context - > step_id ( ) ; <nl> class EigenCudaStreamDevice : public : : Eigen : : StreamInterface { <nl> reinterpret_cast < unsigned int * > ( scratch + Eigen : : kCudaScratchSize ) ; <nl> stream_ = cuda_stream ; <nl> allocator_ = alloc ; <nl> - device_prop_ = & Eigen : : m_deviceProperties [ gpu_id ] ; <nl> + const int cuda_gpu_id = GpuIdUtil : : TfToCudaGpuId ( tf_gpu_id ) . value ( ) ; <nl> + device_prop_ = & Eigen : : m_deviceProperties [ cuda_gpu_id ] ; <nl> } <nl> <nl> const cudaStream_t & stream ( ) const override { return * stream_ ; } <nl> class EigenCudaStreamDevice : public : : Eigen : : StreamInterface { <nl> class BaseGPUDevice : : StreamGroupFactory { <nl> public : <nl> / / Returns the unique stream group for use with the stream defined by <nl> - / / { gpu_id , stream_group_within_gpu } , creating it if it does not yet exist . <nl> + / / { tf_gpu_id , stream_group_within_gpu } , creating it if it does not yet <nl> + / / exist . <nl> / / This function is thread safe . <nl> - BaseGPUDevice : : StreamGroup * GetOrCreate ( int gpu_id , <nl> + BaseGPUDevice : : StreamGroup * GetOrCreate ( TfGpuId tf_gpu_id , <nl> int stream_group_within_gpu , <nl> gpu : : StreamExecutor * executor ) { <nl> mutex_lock guard ( lock_ ) ; <nl> - StreamGroup * group = & streams_ [ key_type ( gpu_id , stream_group_within_gpu ) ] ; <nl> + StreamGroup * group = <nl> + & streams_ [ key_type ( tf_gpu_id . value ( ) , stream_group_within_gpu ) ] ; <nl> if ( ! group - > compute ) { <nl> group - > compute = new gpu : : Stream ( executor ) ; <nl> group - > compute - > Init ( ) ; <nl> class BaseGPUDevice : : StreamGroupFactory { <nl> <nl> BaseGPUDevice : : BaseGPUDevice ( const SessionOptions & options , const string & name , <nl> Bytes memory_limit , const DeviceLocality & locality , <nl> - int gpu_id , const string & physical_device_desc , <nl> + TfGpuId tf_gpu_id , <nl> + const string & physical_device_desc , <nl> Allocator * gpu_allocator , Allocator * cpu_allocator , <nl> bool sync_every_op , int32 max_streams ) <nl> : LocalDevice ( options , Device : : BuildDeviceAttributes ( name , DEVICE_GPU , <nl> BaseGPUDevice : : BaseGPUDevice ( const SessionOptions & options , const string & name , <nl> physical_device_desc ) ) , <nl> gpu_allocator_ ( gpu_allocator ) , <nl> cpu_allocator_ ( cpu_allocator ) , <nl> - gpu_id_ ( gpu_id ) , <nl> + tf_gpu_id_ ( tf_gpu_id ) , <nl> sync_every_op_ ( sync_every_op ) , <nl> max_streams_ ( max_streams ) { <nl> ProcessState : : singleton ( ) - > EnableGPUDevice ( ) ; <nl> BaseGPUDevice : : ~ BaseGPUDevice ( ) { <nl> } <nl> <nl> Status BaseGPUDevice : : Init ( const SessionOptions & options ) { <nl> - auto executor_status = GPUMachineManager ( ) - > ExecutorForDevice ( gpu_id_ ) ; <nl> + auto executor_status = GpuIdUtil : : ExecutorForTfGpuId ( tf_gpu_id_ ) ; <nl> if ( ! executor_status . status ( ) . ok ( ) ) { <nl> return errors : : Internal ( " Failed to get StreamExecutor for device " , <nl> - gpu_id_ ) ; <nl> + tf_gpu_id_ . value ( ) ) ; <nl> } <nl> <nl> executor_ = executor_status . ValueOrDie ( ) ; <nl> Status BaseGPUDevice : : Init ( const SessionOptions & options ) { <nl> / / Create the specified number of GPU streams <nl> for ( int i = 0 ; i < max_streams_ ; i + + ) { <nl> streams_ . push_back ( <nl> - StreamGroupFactory : : Global ( ) . GetOrCreate ( gpu_id_ , i , executor_ ) ) ; <nl> + StreamGroupFactory : : Global ( ) . GetOrCreate ( tf_gpu_id_ , i , executor_ ) ) ; <nl> <nl> size_t scratch_buffer_size = Eigen : : kCudaScratchSize + sizeof ( unsigned int ) ; <nl> void * scratch_buffer = gpu_allocator_ - > AllocateRaw ( <nl> Allocator : : kAllocatorAlignment , scratch_buffer_size ) ; <nl> if ( scratch_buffer = = nullptr ) { <nl> return errors : : FailedPrecondition ( <nl> - " Failed to allocate scratch buffer for device " , gpu_id_ ) ; <nl> + " Failed to allocate scratch buffer for device " , tf_gpu_id_ . value ( ) ) ; <nl> } <nl> scratch_ . push_back ( static_cast < char * > ( scratch_buffer ) ) ; <nl> <nl> Status BaseGPUDevice : : Init ( const SessionOptions & options ) { <nl> & mem , Eigen : : kCudaScratchSize + sizeof ( unsigned int ) ) ; <nl> if ( ! ok ) { <nl> return errors : : FailedPrecondition ( <nl> - " Failed to memcopy into scratch buffer for device " , gpu_id_ ) ; <nl> + " Failed to memcopy into scratch buffer for device " , <nl> + tf_gpu_id_ . value ( ) ) ; <nl> } <nl> <nl> device_contexts_ . push_back ( new GPUDeviceContext ( <nl> Status BaseGPUDevice : : Init ( const SessionOptions & options ) { <nl> gpu_device_info_ - > stream = streams_ [ 0 ] - > compute ; <nl> gpu_device_info_ - > default_context = device_contexts_ [ 0 ] ; <nl> gpu_device_info_ - > event_mgr = em_ . get ( ) ; <nl> - gpu_device_info_ - > gpu_id = gpu_id_ ; <nl> + gpu_device_info_ - > gpu_id = GpuIdUtil : : TfToCudaGpuId ( tf_gpu_id_ ) . value ( ) ; <nl> set_tensorflow_gpu_device_info ( gpu_device_info_ ) ; <nl> <nl> / / Whether and how the GPU device uses its own threadpool . <nl> Status BaseGPUDevice : : Init ( const SessionOptions & options ) { <nl> / / setting them for each kernel . <nl> / / TODO ( zhengxq ) : pin the thread to the same socket of the target GPU . <nl> thread_pool_ . reset ( new thread : : ThreadPool ( <nl> - options . env , strings : : StrCat ( " gpu_private_ " , gpu_id_ ) , <nl> + options . env , strings : : StrCat ( " gpu_private_ " , tf_gpu_id_ . value ( ) ) , <nl> static_cast < int32 > ( gpu_thread_count ) ) ) ; <nl> set_tensorflow_device_thread_pool ( thread_pool_ . get ( ) ) ; <nl> } else if ( gpu_thread_mode = = " gpu_shared " ) { <nl> void BaseGPUDevice : : ComputeHelper ( OpKernel * op_kernel , <nl> <nl> if ( vlog_1 ) { <nl> VLOG ( 1 ) < < " GpuDevice : : Compute " < < op_kernel - > name ( ) < < " op " <nl> - < < op_kernel - > type_string ( ) < < " on GPU " < < gpu_id_ < < " stream [ " <nl> + < < op_kernel - > type_string ( ) < < " on GPU " < < tf_gpu_id_ < < " stream [ " <nl> < < stream_id < < " ] " ; <nl> } <nl> <nl> void BaseGPUDevice : : ComputeAsync ( AsyncOpKernel * op_kernel , <nl> const auto stream_id = gpu_device_context - > stream_id ( ) ; <nl> <nl> VLOG ( 1 ) < < " GpuDevice : : ComputeAsync " < < op_kernel - > name ( ) < < " op " <nl> - < < op_kernel - > type_string ( ) < < " on GPU " < < gpu_id_ < < " stream [ " <nl> + < < op_kernel - > type_string ( ) < < " on GPU " < < tf_gpu_id_ < < " stream [ " <nl> < < stream_id < < " ] " ; <nl> <nl> / / When TraceMe profiling is off ( which is the default ) , the <nl> class ConcretePerOpGpuDevice : public PerOpGpuDevice { <nl> ConcretePerOpGpuDevice ( ) : device_ ( & stream_device_ ) { } <nl> <nl> void Reinitialize ( OpKernelContext * context , const cudaStream_t * cuda_stream , <nl> - int gpu_id , Allocator * base_allocator , char * scratch ) { <nl> - stream_device_ . Reinitialize ( context , cuda_stream , gpu_id , base_allocator , <nl> + TfGpuId tf_gpu_id , Allocator * base_allocator , <nl> + char * scratch ) { <nl> + stream_device_ . Reinitialize ( context , cuda_stream , tf_gpu_id , base_allocator , <nl> scratch ) ; <nl> } <nl> <nl> class ConcretePerOpGpuDevice : public PerOpGpuDevice { <nl> Eigen : : GpuDevice device_ ; <nl> } ; <nl> <nl> + / / Parse ' visible_device_list ' into a list of CUDA GPU ids . <nl> Status ParseVisibleDeviceList ( const string & visible_device_list , <nl> - std : : vector < int > * visible_gpu_order ) { <nl> + std : : vector < CudaGpuId > * visible_gpu_order ) { <nl> visible_gpu_order - > clear ( ) ; <nl> gpu : : Platform * gpu_manager = GPUMachineManager ( ) ; <nl> <nl> Status ParseVisibleDeviceList ( const string & visible_device_list , <nl> " ' but visible device count is " , <nl> gpu_manager - > VisibleDeviceCount ( ) ) ; <nl> } <nl> - visible_gpu_order - > push_back ( cuda_gpu_id ) ; <nl> + visible_gpu_order - > push_back ( CudaGpuId ( cuda_gpu_id ) ) ; <nl> } <nl> } <nl> <nl> / / Validate no repeats . <nl> - std : : set < int > visible_device_set ( visible_gpu_order - > begin ( ) , <nl> - visible_gpu_order - > end ( ) ) ; <nl> + std : : set < CudaGpuId > visible_device_set ( visible_gpu_order - > begin ( ) , <nl> + visible_gpu_order - > end ( ) ) ; <nl> if ( visible_device_set . size ( ) ! = visible_gpu_order - > size ( ) ) { <nl> return errors : : InvalidArgument ( <nl> " visible_device_list contained a duplicate entry : " , <nl> Status ParseVisibleDeviceList ( const string & visible_device_list , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + Status VerifyVirtualDeviceSettings ( <nl> + const size_t num_gpus_to_use , const GPUOptions & gpu_options , <nl> + const std : : vector < CudaGpuId > & visible_gpu_order , <nl> + const std : : vector < CudaGpuId > & valid_cuda_gpu_ids ) { <nl> + const auto & virtual_devices = gpu_options . experimental ( ) . virtual_devices ( ) ; <nl> + CHECK ( ! virtual_devices . empty ( ) ) ; <nl> + if ( gpu_options . per_process_gpu_memory_fraction ( ) > 0 ) { <nl> + return errors : : InvalidArgument ( <nl> + " It ' s invalid to set per_process_gpu_memory_fraction when " <nl> + " virtual_devices is set . " ) ; <nl> + } <nl> + if ( num_gpus_to_use < virtual_devices . size ( ) ) { <nl> + return errors : : Unknown ( <nl> + " Not enough GPUs to create virtual devices . " <nl> + " num_gpus_to_use : " , <nl> + num_gpus_to_use , " # virtual_devices : " , virtual_devices . size ( ) ) ; <nl> + } <nl> + if ( ! gpu_options . visible_device_list ( ) . empty ( ) & & <nl> + visible_gpu_order . size ( ) ! = virtual_devices . size ( ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " The number of GPUs in visible_device_list doesn ' t match the number " <nl> + " of elements in the virtual_devices list . " , <nl> + " # GPUs in visible_device_list : " , visible_gpu_order . size ( ) , <nl> + " virtual_devices . size ( ) : " , virtual_devices . size ( ) ) ; <nl> + } <nl> + if ( valid_cuda_gpu_ids . size ( ) ! = virtual_devices . size ( ) ) { <nl> + return errors : : Unknown ( <nl> + " The number of valid GPUs doesn ' t match the number of elements in " <nl> + " the virtual_devices list . " , <nl> + " # valid GPUs : " , valid_cuda_gpu_ids . size ( ) , <nl> + " virtual_devices . size ( ) : " , virtual_devices . size ( ) ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> int64 MinSystemMemory ( int64 available_memory ) { <nl> / / We use the following heuristic for now : <nl> / / <nl> int64 MinSystemMemory ( int64 available_memory ) { <nl> # endif <nl> return min_system_memory ; <nl> } <nl> + <nl> + / / Get the memory limit for the virtual device being created on GPU with <nl> + / / ' cuda_gpu_id ' , when that virtual device is the only virtual device being <nl> + / / created on that GPU . <nl> + Status SingleVirtualDeviceMemoryLimit ( const GPUOptions & gpu_options , <nl> + CudaGpuId cuda_gpu_id , <nl> + int64 * memory_limit ) { <nl> + int64 total_memory = 0 ; <nl> + int64 available_memory = 0 ; <nl> + gpu : : StreamExecutor * se = <nl> + GpuIdUtil : : ExecutorForCudaGpuId ( cuda_gpu_id ) . ValueOrDie ( ) ; <nl> + if ( ! se - > DeviceMemoryUsage ( & available_memory , & total_memory ) ) { <nl> + return errors : : Unknown ( " Failed to query available memory for GPU " , <nl> + cuda_gpu_id . value ( ) ) ; <nl> + } <nl> + <nl> + int64 allocated_memory = 0 ; <nl> + const double per_process_gpu_memory_fraction = <nl> + gpu_options . per_process_gpu_memory_fraction ( ) ; <nl> + if ( per_process_gpu_memory_fraction = = 0 ) { <nl> + allocated_memory = available_memory ; <nl> + const int64 min_system_memory = MinSystemMemory ( available_memory ) ; <nl> + if ( min_system_memory < allocated_memory ) { <nl> + allocated_memory - = min_system_memory ; <nl> + } <nl> + } else { <nl> + allocated_memory = total_memory * per_process_gpu_memory_fraction ; <nl> + } <nl> + * memory_limit = allocated_memory ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> } / / namespace <nl> <nl> void BaseGPUDevice : : ReinitializeDevice ( OpKernelContext * context , <nl> void BaseGPUDevice : : ReinitializeDevice ( OpKernelContext * context , <nl> DCHECK ( concrete_device ) ; <nl> const cudaStream_t * cuda_stream = reinterpret_cast < const cudaStream_t * > ( <nl> streams_ [ stream_id ] - > compute - > implementation ( ) - > CudaStreamMemberHack ( ) ) ; <nl> - concrete_device - > Reinitialize ( context , cuda_stream , gpu_id_ , allocator , <nl> + concrete_device - > Reinitialize ( context , cuda_stream , tf_gpu_id_ , allocator , <nl> scratch_ [ stream_id ] ) ; <nl> } <nl> <nl> Status BaseGPUDeviceFactory : : CreateDevices ( const SessionOptions & options , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - size_t n = INT_MAX ; <nl> + size_t num_gpus_to_use = INT_MAX ; <nl> auto iter = options . config . device_count ( ) . find ( " GPU " ) ; <nl> if ( iter ! = options . config . device_count ( ) . end ( ) ) { <nl> - n = iter - > second ; <nl> + num_gpus_to_use = iter - > second ; <nl> } <nl> const auto & gpu_options = options . config . gpu_options ( ) ; <nl> - std : : vector < int > visible_gpu_order ; <nl> + std : : vector < CudaGpuId > visible_gpu_order ; <nl> TF_RETURN_IF_ERROR ( ParseVisibleDeviceList ( gpu_options . visible_device_list ( ) , <nl> & visible_gpu_order ) ) ; <nl> - std : : vector < int > valid_gpu_ids ; <nl> - TF_RETURN_IF_ERROR ( GetValidDeviceIds ( visible_gpu_order , & valid_gpu_ids ) ) ; <nl> - if ( static_cast < size_t > ( n ) > valid_gpu_ids . size ( ) ) { <nl> - n = valid_gpu_ids . size ( ) ; <nl> + <nl> + std : : vector < CudaGpuId > valid_cuda_gpu_ids ; <nl> + TF_RETURN_IF_ERROR ( GetValidDeviceIds ( visible_gpu_order , & valid_cuda_gpu_ids ) ) ; <nl> + if ( num_gpus_to_use > valid_cuda_gpu_ids . size ( ) ) { <nl> + num_gpus_to_use = valid_cuda_gpu_ids . size ( ) ; <nl> } <nl> - if ( ! valid_gpu_ids . empty ( ) ) { <nl> + if ( ! valid_cuda_gpu_ids . empty ( ) ) { <nl> / / Save the original device . <nl> int original_device = 0 ; <nl> cudaError_t err = cudaGetDevice ( & original_device ) ; <nl> Status BaseGPUDeviceFactory : : CreateDevices ( const SessionOptions & options , <nl> } <nl> / / Force to implicitly initialize CUDA runtime on each valid GPU before <nl> / / CreateGPUDevice ( ) . <nl> - for ( int gpu_id : valid_gpu_ids ) { <nl> - err = cudaSetDevice ( gpu_id ) ; <nl> + for ( CudaGpuId cuda_gpu_id : valid_cuda_gpu_ids ) { <nl> + err = cudaSetDevice ( cuda_gpu_id . value ( ) ) ; <nl> if ( err ! = cudaSuccess ) { <nl> - return errors : : Internal ( " cudaSetDevice ( ) on GPU : " , gpu_id , <nl> + return errors : : Internal ( " cudaSetDevice ( ) on GPU : " , cuda_gpu_id . value ( ) , <nl> " failed . Status : " , cudaGetErrorString ( err ) ) ; <nl> } <nl> err = cudaFree ( nullptr ) ; <nl> if ( err ! = cudaSuccess ) { <nl> return errors : : Internal ( <nl> - " CUDA runtime implicit initialization on GPU : " , gpu_id , <nl> + " CUDA runtime implicit initialization on GPU : " , cuda_gpu_id . value ( ) , <nl> " failed . Status : " , cudaGetErrorString ( err ) ) ; <nl> } <nl> } <nl> Status BaseGPUDeviceFactory : : CreateDevices ( const SessionOptions & options , <nl> " failed . Status : " , cudaGetErrorString ( err ) ) ; <nl> } <nl> } <nl> - for ( int i = 0 ; i < n ; i + + ) { <nl> - BaseGPUDevice * gpu_device ; <nl> - TF_RETURN_IF_ERROR ( CreateGPUDevice ( <nl> - options , strings : : StrCat ( name_prefix , " / device : GPU : " , i ) , <nl> - valid_gpu_ids [ i ] , & gpu_device ) ) ; <nl> - TF_RETURN_IF_ERROR ( gpu_device - > Init ( options ) ) ; <nl> - devices - > push_back ( gpu_device ) ; <nl> - } <nl> <nl> + const auto & virtual_devices = gpu_options . experimental ( ) . virtual_devices ( ) ; <nl> + if ( ! virtual_devices . empty ( ) ) { <nl> + TF_RETURN_IF_ERROR ( VerifyVirtualDeviceSettings ( <nl> + num_gpus_to_use , gpu_options , visible_gpu_order , valid_cuda_gpu_ids ) ) ; <nl> + / / We ' ve verified that num_gpus_to_use > = virtual_devices . size ( ) . <nl> + num_gpus_to_use = virtual_devices . size ( ) ; <nl> + CHECK ( gpu_options . visible_device_list ( ) . empty ( ) | | <nl> + valid_cuda_gpu_ids = = visible_gpu_order ) ; <nl> + } <nl> + int next_tf_gpu_id = 0 ; <nl> + for ( int i = 0 ; i < num_gpus_to_use ; + + i ) { <nl> + const CudaGpuId cuda_gpu_id = valid_cuda_gpu_ids [ i ] ; <nl> + std : : vector < int64 > memory_limit_bytes ; <nl> + if ( virtual_devices . empty ( ) | | <nl> + virtual_devices . Get ( i ) . memory_limit_mb_size ( ) = = 0 ) { <nl> + int64 single_virtual_device_memory_limit = 0 ; <nl> + TF_RETURN_IF_ERROR ( SingleVirtualDeviceMemoryLimit ( <nl> + gpu_options , cuda_gpu_id , & single_virtual_device_memory_limit ) ) ; <nl> + memory_limit_bytes . push_back ( single_virtual_device_memory_limit ) ; <nl> + } else { <nl> + const auto & memory_limit_mb = virtual_devices . Get ( i ) . memory_limit_mb ( ) ; <nl> + std : : transform ( memory_limit_mb . begin ( ) , memory_limit_mb . end ( ) , <nl> + std : : back_inserter ( memory_limit_bytes ) , [ ] ( float mb ) { <nl> + return static_cast < int64 > ( mb ) * ( 1ll < < 20 ) ; <nl> + } ) ; <nl> + } <nl> + for ( int64 bytes : memory_limit_bytes ) { <nl> + TfGpuId tf_gpu_id ( next_tf_gpu_id ) ; <nl> + + + next_tf_gpu_id ; <nl> + GpuIdUtil : : InsertTfCudaGpuIdPair ( tf_gpu_id , cuda_gpu_id ) ; <nl> + TF_RETURN_IF_ERROR ( <nl> + CreateGPUDevice ( options , name_prefix , tf_gpu_id , bytes , devices ) ) ; <nl> + } <nl> + } <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - static string GetShortDeviceDescription ( int device_id , <nl> + static string GetShortDeviceDescription ( CudaGpuId cuda_gpu_id , <nl> const gpu : : DeviceDescription & desc ) { <nl> int cc_major ; <nl> int cc_minor ; <nl> static string GetShortDeviceDescription ( int device_id , <nl> cc_minor = 0 ; <nl> } <nl> / / LINT . IfChange <nl> - return strings : : StrCat ( " device : " , device_id , " , name : " , desc . name ( ) , <nl> + return strings : : StrCat ( " device : " , cuda_gpu_id . value ( ) , <nl> + " , name : " , desc . name ( ) , <nl> " , pci bus id : " , desc . pci_bus_id ( ) , <nl> " , compute capability : " , cc_major , " . " , cc_minor ) ; <nl> / / LINT . ThenChange ( / / tensorflow / python / platform / test . py ) <nl> } <nl> <nl> Status BaseGPUDeviceFactory : : CreateGPUDevice ( const SessionOptions & options , <nl> - const string & name , int gpu_id , <nl> - BaseGPUDevice * * out_device ) { <nl> - CHECK_GE ( gpu_id , 0 ) ; <nl> + const string & name_prefix , <nl> + TfGpuId tf_gpu_id , <nl> + int64 memory_limit , <nl> + std : : vector < Device * > * devices ) { <nl> + CHECK_GE ( tf_gpu_id . value ( ) , 0 ) ; <nl> + const string device_name = <nl> + strings : : StrCat ( name_prefix , " / device : GPU : " , tf_gpu_id . value ( ) ) ; <nl> <nl> / / Look up the device , to see its attributes . <nl> - gpu : : Platform * gpu_platform = GPUMachineManager ( ) ; <nl> - CHECK_LT ( gpu_id , gpu_platform - > VisibleDeviceCount ( ) ) ; <nl> + GpuIdUtil : : CheckValidTfGpuId ( tf_gpu_id ) ; <nl> gpu : : StreamExecutor * se = <nl> - gpu_platform - > ExecutorForDevice ( gpu_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForTfGpuId ( tf_gpu_id ) . ValueOrDie ( ) ; <nl> const gpu : : DeviceDescription & desc = se - > GetDeviceDescription ( ) ; <nl> int numa_node = desc . numa_node ( ) ; <nl> if ( numa_node < 0 ) { <nl> Status BaseGPUDeviceFactory : : CreateGPUDevice ( const SessionOptions & options , <nl> / / may run into trouble later with data transfer operations . The <nl> / / trouble may manifest as slower than expected performance , or <nl> / / outright failures . <nl> - LOG ( INFO ) < < " Could not identify NUMA node of " < < name <nl> + LOG ( INFO ) < < " Could not identify NUMA node of " < < device_name <nl> < < " , defaulting to 0 . Your kernel may not have been built " <nl> < < " with NUMA support . " ; <nl> numa_node = 0 ; <nl> } <nl> - <nl> - int64 total_memory , available_memory ; <nl> - if ( ! se - > DeviceMemoryUsage ( & available_memory , & total_memory ) ) { <nl> - return errors : : Unknown ( <nl> - strings : : StrCat ( " Failed to query available memory for GPU " , gpu_id ) ) ; <nl> - } <nl> - <nl> - int64 allocated_memory ; <nl> - double config_memory_fraction = <nl> - options . config . gpu_options ( ) . per_process_gpu_memory_fraction ( ) ; <nl> - if ( config_memory_fraction = = 0 ) { <nl> - allocated_memory = available_memory ; <nl> - const int64 min_system_memory = MinSystemMemory ( available_memory ) ; <nl> - if ( min_system_memory < allocated_memory ) { <nl> - allocated_memory - = min_system_memory ; <nl> - } <nl> - } else { <nl> - allocated_memory = total_memory * config_memory_fraction ; <nl> - } <nl> - <nl> - Bytes allocated_bytes = static_cast < Bytes > ( allocated_memory ) ; <nl> + Bytes allocated_bytes = static_cast < Bytes > ( memory_limit ) ; <nl> <nl> / / Get GPU bus_id from its reported NUMA affinity . Because GPUs are <nl> / / virtualized in some environments , we can ' t just use the GPU id . <nl> / / NUMA locales are indexed from 0 , buses are indexed from 1 . <nl> DeviceLocality dev_locality ; <nl> dev_locality . set_bus_id ( numa_node + 1 ) ; <nl> - VLOG ( 1 ) < < " GPUDevice id " < < gpu_id < < " on bus " < < dev_locality . bus_id ( ) <nl> - < < " numa : " < < numa_node < < " pci : " < < desc . pci_bus_id ( ) ; <nl> - <nl> + const CudaGpuId cuda_gpu_id = GpuIdUtil : : TfToCudaGpuId ( tf_gpu_id ) ; <nl> + VLOG ( 1 ) < < " GPUDevice id " < < cuda_gpu_id < < " on bus " <nl> + < < dev_locality . bus_id ( ) < < " numa : " < < numa_node <nl> + < < " pci : " < < desc . pci_bus_id ( ) ; <nl> + <nl> + LOG ( INFO ) < < " Creating TensorFlow device ( " < < device_name < < " with " <nl> + < < ( memory_limit > > 20 ) < < " MB memory ) - > physical GPU ( " <nl> + < < GetShortDeviceDescription ( cuda_gpu_id , desc ) < < " ) " ; <nl> ProcessState * process_state = ProcessState : : singleton ( ) ; <nl> - * out_device = CreateGPUDevice ( <nl> - options , name , allocated_bytes , dev_locality , gpu_id , <nl> - GetShortDeviceDescription ( gpu_id , desc ) , <nl> - process_state - > GetGPUAllocator ( options . config . gpu_options ( ) , gpu_id , <nl> - allocated_memory ) , <nl> + BaseGPUDevice * gpu_device = CreateGPUDevice ( <nl> + options , device_name , allocated_bytes , dev_locality , tf_gpu_id , <nl> + GetShortDeviceDescription ( cuda_gpu_id , desc ) , <nl> + process_state - > GetGPUAllocator ( options . config . gpu_options ( ) , tf_gpu_id , <nl> + memory_limit ) , <nl> process_state - > GetCPUAllocator ( numa_node ) ) ; <nl> + TF_RETURN_IF_ERROR ( gpu_device - > Init ( options ) ) ; <nl> + devices - > push_back ( gpu_device ) ; <nl> <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> static int GetDefaultMinGPUMultiprocessorCount ( <nl> - gpu : : Platform * gpu_manager , const std : : vector < int > & visible_gpu_order ) { <nl> + gpu : : Platform * gpu_manager , <nl> + const std : : vector < CudaGpuId > & visible_gpu_order ) { <nl> static const int kDefaultMinGPUMultiprocessorCount = 8 ; <nl> <nl> / / Find the highest multi - processor count across all visible GPUs . <nl> int max_count = - 1 ; <nl> for ( int i = 0 ; i < visible_gpu_order . size ( ) ; + + i ) { <nl> - auto exec_status = gpu_manager - > ExecutorForDevice ( visible_gpu_order [ i ] ) ; <nl> + auto exec_status = <nl> + GpuIdUtil : : ExecutorForCudaGpuId ( gpu_manager , visible_gpu_order [ i ] ) ; <nl> if ( ! exec_status . ok ( ) ) { <nl> continue ; <nl> } <nl> static int GetDefaultMinGPUMultiprocessorCount ( <nl> } <nl> <nl> static int GetMinGPUMultiprocessorCount ( <nl> - gpu : : Platform * gpu_manager , const std : : vector < int > & visible_gpu_order ) { <nl> + gpu : : Platform * gpu_manager , <nl> + const std : : vector < CudaGpuId > & visible_gpu_order ) { <nl> const char * tf_min_gpu_core_count = getenv ( " TF_MIN_GPU_MULTIPROCESSOR_COUNT " ) ; <nl> <nl> if ( tf_min_gpu_core_count = = nullptr | | <nl> std : : vector < CudaVersion > GetSupportedCudaComputeCapabilities ( ) { <nl> } <nl> <nl> std : : unique_ptr < std : : map < std : : pair < int , int > , bool > > GetPeerAccessMap ( <nl> - gpu : : Platform * platform , const std : : vector < int > & visible_gpu_order ) { <nl> + gpu : : Platform * platform , const std : : vector < CudaGpuId > & visible_gpu_order ) { <nl> std : : unique_ptr < std : : map < std : : pair < int , int > , bool > > map ( <nl> new std : : map < std : : pair < int , int > , bool > ) ; <nl> for ( int i = 0 ; i < visible_gpu_order . size ( ) ; + + i ) { <nl> - const int i_gpu_id = visible_gpu_order [ i ] ; <nl> + const CudaGpuId i_gpu_id = visible_gpu_order [ i ] ; <nl> for ( int j = 0 ; j < visible_gpu_order . size ( ) ; + + j ) { <nl> - const int j_gpu_id = visible_gpu_order [ j ] ; <nl> + const CudaGpuId j_gpu_id = visible_gpu_order [ j ] ; <nl> gpu : : StreamExecutor * from = <nl> - platform - > ExecutorForDevice ( i_gpu_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForCudaGpuId ( platform , i_gpu_id ) . ValueOrDie ( ) ; <nl> gpu : : StreamExecutor * to = <nl> - platform - > ExecutorForDevice ( j_gpu_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForCudaGpuId ( platform , j_gpu_id ) . ValueOrDie ( ) ; <nl> ( * map ) [ { i , j } ] = from - > CanEnablePeerAccessTo ( to ) ; <nl> } <nl> } <nl> std : : unique_ptr < std : : map < std : : pair < int , int > , bool > > GetPeerAccessMap ( <nl> } <nl> <nl> Status EnablePeerAccess ( gpu : : Platform * platform , <nl> - const std : : vector < int > & visible_gpu_order ) { <nl> + const std : : vector < CudaGpuId > & visible_gpu_order ) { <nl> int possible_peer_count = 0 ; <nl> int enabled_peer_count = 0 ; <nl> for ( int i = 0 ; i < visible_gpu_order . size ( ) ; + + i ) { <nl> - const int i_gpu_id = visible_gpu_order [ i ] ; <nl> + const CudaGpuId i_gpu_id = visible_gpu_order [ i ] ; <nl> for ( int j = 0 ; j < visible_gpu_order . size ( ) ; + + j ) { <nl> - const int j_gpu_id = visible_gpu_order [ j ] ; <nl> - / / We have already validated that ExecutorForDevice ( ) calls <nl> - / / return OK . <nl> + const CudaGpuId j_gpu_id = visible_gpu_order [ j ] ; <nl> + / / We have already validated that ExecutorForDevice ( ) calls return OK . <nl> gpu : : StreamExecutor * from = <nl> - platform - > ExecutorForDevice ( i_gpu_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForCudaGpuId ( platform , i_gpu_id ) . ValueOrDie ( ) ; <nl> gpu : : StreamExecutor * to = <nl> - platform - > ExecutorForDevice ( j_gpu_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForCudaGpuId ( platform , j_gpu_id ) . ValueOrDie ( ) ; <nl> <nl> if ( from - > CanEnablePeerAccessTo ( to ) ) { <nl> + + possible_peer_count ; <nl> Status EnablePeerAccess ( gpu : : Platform * platform , <nl> } / / namespace <nl> <nl> Status BaseGPUDeviceFactory : : GetValidDeviceIds ( <nl> - const std : : vector < int > & visible_gpu_order , std : : vector < int > * ids ) { <nl> + const std : : vector < CudaGpuId > & visible_gpu_order , <nl> + std : : vector < CudaGpuId > * ids ) { <nl> gpu : : Platform * gpu_manager = GPUMachineManager ( ) ; <nl> bool new_gpu_found = false ; <nl> for ( int i = 0 ; i < visible_gpu_order . size ( ) ; + + i ) { <nl> - int gpu_id = visible_gpu_order [ i ] ; <nl> + const CudaGpuId cuda_gpu_id = visible_gpu_order [ i ] ; <nl> <nl> - / / Only perform this once per visible gpu id . <nl> - if ( visible_gpu_initialized_ [ gpu_id ] ) { <nl> + / / Only perform this once per visible cuda gpu id . <nl> + if ( visible_gpu_initialized_ [ cuda_gpu_id . value ( ) ] ) { <nl> continue ; <nl> } <nl> <nl> - visible_gpu_initialized_ [ gpu_id ] = true ; <nl> + visible_gpu_initialized_ [ cuda_gpu_id . value ( ) ] = true ; <nl> new_gpu_found = true ; <nl> <nl> - auto executor = gpu_manager - > ExecutorForDevice ( gpu_id ) ; <nl> + auto executor = GpuIdUtil : : ExecutorForCudaGpuId ( gpu_manager , cuda_gpu_id ) ; <nl> if ( ! executor . ok ( ) ) { <nl> return StreamExecutorUtil : : ConvertStatus ( executor . status ( ) ) ; <nl> } <nl> Status BaseGPUDeviceFactory : : GetValidDeviceIds ( <nl> auto access_map = GetPeerAccessMap ( gpu_manager , visible_gpu_order ) ; <nl> string line_buf = " DMA : " ; <nl> for ( int i = 0 ; i < visible_gpu_order . size ( ) ; + + i ) { <nl> - strings : : StrAppend ( & line_buf , visible_gpu_order [ i ] , " " ) ; <nl> + strings : : StrAppend ( & line_buf , visible_gpu_order [ i ] . value ( ) , " " ) ; <nl> } <nl> LOG ( INFO ) < < line_buf ; <nl> for ( int i = 0 ; i < visible_gpu_order . size ( ) ; + + i ) { <nl> - line_buf = strings : : StrCat ( visible_gpu_order [ i ] , " : " ) ; <nl> + line_buf = strings : : StrCat ( visible_gpu_order [ i ] . value ( ) , " : " ) ; <nl> for ( int j = 0 ; j < visible_gpu_order . size ( ) ; + + j ) { <nl> if ( ( * access_map ) [ { i , j } ] ) { <nl> line_buf . append ( " Y " ) ; <nl> Status BaseGPUDeviceFactory : : GetValidDeviceIds ( <nl> <nl> / / Filter out devices that don ' t have the right capability or power . <nl> for ( int i = 0 ; i < visible_gpu_order . size ( ) ; + + i ) { <nl> - const int32 visible_gpu_id = visible_gpu_order [ i ] ; <nl> - auto exec_status = gpu_manager - > ExecutorForDevice ( visible_gpu_id ) ; <nl> + const CudaGpuId visible_gpu_id = visible_gpu_order [ i ] ; <nl> + auto exec_status = <nl> + GpuIdUtil : : ExecutorForCudaGpuId ( gpu_manager , visible_gpu_id ) ; <nl> if ( ! exec_status . ok ( ) ) { <nl> + LOG ( INFO ) < < " Ignoring visible gpu device " < < visible_gpu_id <nl> + < < " whose executor is in invalid state : " <nl> + < < exec_status . status ( ) . ToString ( ) ; <nl> continue ; <nl> } <nl> gpu : : StreamExecutor * se = exec_status . ValueOrDie ( ) ; <nl> Status BaseGPUDeviceFactory : : GetValidDeviceIds ( <nl> CudaVersion device_capability ; <nl> if ( ! desc . cuda_compute_capability ( & device_capability . major_part , <nl> & device_capability . minor_part ) ) { <nl> + LOG ( INFO ) < < " Ignoring visible gpu device " <nl> + < < " ( " < < GetShortDeviceDescription ( visible_gpu_id , desc ) <nl> + < < " ) " <nl> + < < " whose CUDA compute capability is not available . " ; <nl> continue ; <nl> } <nl> / / Only GPUs with no less than the minimum supported compute capability is <nl> Status BaseGPUDeviceFactory : : GetValidDeviceIds ( <nl> / / multiprocessors . If the TF_MIN_GPU_MULTIPROCESSOR_COUNT environment <nl> / / variable is set , its value will be used to filter out GPUs . <nl> if ( desc . core_count ( ) < min_gpu_core_count ) { <nl> - LOG ( INFO ) < < " Ignoring gpu device " <nl> + LOG ( INFO ) < < " Ignoring visible gpu device " <nl> < < " ( " < < GetShortDeviceDescription ( visible_gpu_id , desc ) <nl> < < " ) " <nl> < < " with Cuda multiprocessor count : " < < desc . core_count ( ) <nl> Status BaseGPUDeviceFactory : : GetValidDeviceIds ( <nl> " TF_MIN_GPU_MULTIPROCESSOR_COUNT . " ; <nl> continue ; <nl> } <nl> - <nl> - size_t new_id = ids - > size ( ) ; <nl> + LOG ( INFO ) < < " Adding visible gpu device " < < visible_gpu_id ; <nl> ids - > push_back ( visible_gpu_id ) ; <nl> - <nl> - LOG ( INFO ) < < " Creating TensorFlow device ( / device : GPU : " < < new_id < < " ) - > " <nl> - < < " ( " < < GetShortDeviceDescription ( visible_gpu_id , desc ) < < " ) " ; <nl> } <nl> <nl> return Status : : OK ( ) ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_device . h <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_device . h <nl> limitations under the License . <nl> # include " third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor " <nl> # include " tensorflow / core / common_runtime / device_factory . h " <nl> # include " tensorflow / core / common_runtime / gpu / gpu_event_mgr . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id_utils . h " <nl> # include " tensorflow / core / common_runtime / gpu_device_context . h " <nl> # include " tensorflow / core / common_runtime / local_device . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> namespace tensorflow { <nl> class BaseGPUDevice : public LocalDevice { <nl> public : <nl> BaseGPUDevice ( const SessionOptions & options , const string & name , <nl> - Bytes memory_limit , const DeviceLocality & locality , int gpu_id , <nl> - const string & physical_device_desc , Allocator * gpu_allocator , <nl> - Allocator * cpu_allocator , bool sync_every_op , <nl> - int32 max_streams ) ; <nl> + Bytes memory_limit , const DeviceLocality & locality , <nl> + TfGpuId tf_gpu_id , const string & physical_device_desc , <nl> + Allocator * gpu_allocator , Allocator * cpu_allocator , <nl> + bool sync_every_op , int32 max_streams ) ; <nl> <nl> ~ BaseGPUDevice ( ) override ; <nl> <nl> class BaseGPUDevice : public LocalDevice { <nl> void ReinitializeGpuDevice ( OpKernelContext * context , PerOpGpuDevice * device , <nl> DeviceContext * dc , Allocator * allocator ) override ; <nl> <nl> - / / Returns the id of this device within the native driver system ; e . g . , for <nl> - / / CUDA this is the ordinal of the GPU within the system . <nl> - int gpu_id ( ) const { return gpu_id_ ; } <nl> + / / Returns the CUDA GPU id of this device within the native driver system ; <nl> + / / e . g . , for CUDA this is the ordinal of the GPU within the system . <nl> + int gpu_id ( ) const { return GpuIdUtil : : TfToCudaGpuId ( tf_gpu_id_ ) . value ( ) ; } <nl> <nl> / / The executor that provides control for the device ; e . g . , for CUDA this <nl> / / corresponds to the cuda context . <nl> class BaseGPUDevice : public LocalDevice { <nl> std : : vector < GPUDeviceContext * > device_contexts_ ; <nl> GpuDeviceInfo * gpu_device_info_ = nullptr ; <nl> mutex trace_mu_ ; <nl> - int gpu_id_ = - 1 ; <nl> + TfGpuId tf_gpu_id_ ; <nl> const bool sync_every_op_ = false ; <nl> const int32 max_streams_ ; <nl> std : : unique_ptr < EventMgr > em_ ; <nl> class BaseGPUDeviceFactory : public DeviceFactory { <nl> std : : vector < Device * > * devices ) override ; <nl> <nl> private : <nl> - Status CreateGPUDevice ( const SessionOptions & options , const string & name , <nl> - int gpu_id , BaseGPUDevice * * out_device ) ; <nl> + / / Creates a BaseGPUDevice associated with ' tf_gpu_id ' , allocates ( strictly ) <nl> + / / ' memory_limit ' bytes of GPU memory to it , and adds it to the ' devices ' <nl> + / / vector . <nl> + Status CreateGPUDevice ( const SessionOptions & options , <nl> + const string & name_prefix , TfGpuId tf_gpu_id , <nl> + int64 memory_limit , std : : vector < Device * > * devices ) ; <nl> <nl> virtual BaseGPUDevice * CreateGPUDevice ( const SessionOptions & options , <nl> const string & name , Bytes memory_limit , <nl> const DeviceLocality & locality , <nl> - int gpu_id , <nl> + TfGpuId tf_gpu_id , <nl> const string & physical_device_desc , <nl> Allocator * gpu_allocator , <nl> Allocator * cpu_allocator ) = 0 ; <nl> <nl> - / / Returns into ' ids ' the list of valid GPU ids , in the order that <nl> - / / they should map to logical gpu ids " / device : GPU : 0 " , " / device : GPU : 1 " , etc , <nl> + / / Returns into ' ids ' the list of valid CUDA GPU ids , in the order that <nl> + / / they should map to TF GPU ids " / device : GPU : 0 " , " / device : GPU : 1 " , etc , <nl> / / based upon ' visible_gpu_order ' which was generated by parsing <nl> - / / GPUOptions : : visible_device_list which is a comma - separated list of <nl> - / / ' visible gpu ids ' . <nl> - Status GetValidDeviceIds ( const std : : vector < int > & visible_gpu_order , <nl> - std : : vector < int > * ids ) ; <nl> + / / GPUOptions : : visible_device_list which is a comma - separated list of CUDA GPU <nl> + / / ids . <nl> + Status GetValidDeviceIds ( const std : : vector < CudaGpuId > & visible_gpu_order , <nl> + std : : vector < CudaGpuId > * ids ) ; <nl> <nl> - / / visible_gpu_initialized_ [ gpu_id ] is true if visible GPU gpu_id <nl> + / / visible_gpu_initialized_ [ cuda_gpu_id ] is true if visible GPU cuda_gpu_id <nl> / / has been initialized by the process . <nl> std : : unordered_map < int , bool > visible_gpu_initialized_ ; <nl> } ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_device_factory . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_device_factory . cc <nl> limitations under the License . <nl> # define EIGEN_USE_GPU <nl> <nl> # include " tensorflow / core / common_runtime / gpu / gpu_device . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> # include " tensorflow / core / common_runtime / gpu / process_state . h " <nl> # include " tensorflow / core / common_runtime / threadpool_device . h " <nl> <nl> namespace tensorflow { <nl> class GPUDevice : public BaseGPUDevice { <nl> public : <nl> GPUDevice ( const SessionOptions & options , const string & name , <nl> - Bytes memory_limit , const DeviceLocality & locality , int gpu_id , <nl> - const string & physical_device_desc , Allocator * gpu_allocator , <nl> - Allocator * cpu_allocator ) <nl> - : BaseGPUDevice ( options , name , memory_limit , locality , gpu_id , <nl> + Bytes memory_limit , const DeviceLocality & locality , <nl> + TfGpuId tf_gpu_id , const string & physical_device_desc , <nl> + Allocator * gpu_allocator , Allocator * cpu_allocator ) <nl> + : BaseGPUDevice ( options , name , memory_limit , locality , tf_gpu_id , <nl> physical_device_desc , gpu_allocator , cpu_allocator , <nl> false / * sync every op * / , 1 / * max_streams * / ) { <nl> if ( options . config . has_gpu_options ( ) ) { <nl> class GPUDeviceFactory : public BaseGPUDeviceFactory { <nl> private : <nl> BaseGPUDevice * CreateGPUDevice ( const SessionOptions & options , <nl> const string & name , Bytes memory_limit , <nl> - const DeviceLocality & locality , int gpu_id , <nl> + const DeviceLocality & locality , <nl> + TfGpuId tf_gpu_id , <nl> const string & physical_device_desc , <nl> Allocator * gpu_allocator , <nl> Allocator * cpu_allocator ) override { <nl> - return new GPUDevice ( options , name , memory_limit , locality , gpu_id , <nl> + return new GPUDevice ( options , name , memory_limit , locality , tf_gpu_id , <nl> physical_device_desc , gpu_allocator , cpu_allocator ) ; <nl> } <nl> } ; <nl> new file mode 100644 <nl> index 0000000000000 . . ff46be9c015ac <nl> mmm / dev / null <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_device_test . cc <nl> <nl> + / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # if GOOGLE_CUDA <nl> + <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_device . h " <nl> + <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_init . h " <nl> + # include " tensorflow / core / lib / core / errors . h " <nl> + # include " tensorflow / core / lib / core / status . h " <nl> + # include " tensorflow / core / lib / core / status_test_util . h " <nl> + # include " tensorflow / core / platform / test . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace { <nl> + const char * kDeviceNamePrefix = " / job : localhost / replica : 0 / task : 0 " ; <nl> + <nl> + static SessionOptions MakeSessionOptions ( <nl> + const string & visible_device_list = " " , <nl> + double per_process_gpu_memory_fraction = 0 , int gpu_device_count = 1 , <nl> + const std : : vector < std : : vector < float > > & memory_limit_mb = { } ) { <nl> + SessionOptions options ; <nl> + ConfigProto * config = & options . config ; <nl> + ( * config - > mutable_device_count ( ) ) [ " GPU " ] = gpu_device_count ; <nl> + GPUOptions * gpu_options = config - > mutable_gpu_options ( ) ; <nl> + gpu_options - > set_visible_device_list ( visible_device_list ) ; <nl> + gpu_options - > set_per_process_gpu_memory_fraction ( <nl> + per_process_gpu_memory_fraction ) ; <nl> + for ( const auto & v : memory_limit_mb ) { <nl> + auto virtual_devices = <nl> + gpu_options - > mutable_experimental ( ) - > add_virtual_devices ( ) ; <nl> + for ( float mb : v ) { <nl> + virtual_devices - > add_memory_limit_mb ( mb ) ; <nl> + } <nl> + } <nl> + return options ; <nl> + } <nl> + <nl> + static bool StartsWith ( const string & lhs , const string & rhs ) { <nl> + if ( rhs . length ( ) > lhs . length ( ) ) return false ; <nl> + return lhs . substr ( 0 , rhs . length ( ) ) = = rhs ; <nl> + } <nl> + <nl> + TEST ( GPUDeviceTest , FailedToParseVisibleDeviceList ) { <nl> + SessionOptions opts = MakeSessionOptions ( " 0 , abc " ) ; <nl> + std : : vector < tensorflow : : Device * > devices ; <nl> + Status status = DeviceFactory : : GetFactory ( " GPU " ) - > CreateDevices ( <nl> + opts , kDeviceNamePrefix , & devices ) ; <nl> + EXPECT_EQ ( status . code ( ) , error : : INVALID_ARGUMENT ) ; <nl> + EXPECT_TRUE ( StartsWith ( status . error_message ( ) , " Could not parse entry " ) ) <nl> + < < status ; <nl> + } <nl> + <nl> + TEST ( GPUDeviceTest , InvalidGpuId ) { <nl> + SessionOptions opts = MakeSessionOptions ( " 100 " ) ; <nl> + std : : vector < tensorflow : : Device * > devices ; <nl> + Status status = DeviceFactory : : GetFactory ( " GPU " ) - > CreateDevices ( <nl> + opts , kDeviceNamePrefix , & devices ) ; <nl> + EXPECT_EQ ( status . code ( ) , error : : INVALID_ARGUMENT ) ; <nl> + EXPECT_TRUE ( StartsWith ( status . error_message ( ) , <nl> + " ' visible_device_list ' listed an invalid GPU id " ) ) <nl> + < < status ; <nl> + } <nl> + <nl> + TEST ( GPUDeviceTest , DuplicateEntryInVisibleDeviceList ) { <nl> + SessionOptions opts = MakeSessionOptions ( " 0 , 0 " ) ; <nl> + std : : vector < tensorflow : : Device * > devices ; <nl> + Status status = DeviceFactory : : GetFactory ( " GPU " ) - > CreateDevices ( <nl> + opts , kDeviceNamePrefix , & devices ) ; <nl> + EXPECT_EQ ( status . code ( ) , error : : INVALID_ARGUMENT ) ; <nl> + EXPECT_TRUE ( StartsWith ( status . error_message ( ) , <nl> + " visible_device_list contained a duplicate entry " ) ) <nl> + < < status ; <nl> + } <nl> + <nl> + TEST ( GPUDeviceTest , VirtualDeviceConfigConflictsWithMemoryFractionSettings ) { <nl> + SessionOptions opts = MakeSessionOptions ( " 0 " , 0 . 1 , 1 , { { } } ) ; <nl> + std : : vector < tensorflow : : Device * > devices ; <nl> + Status status = DeviceFactory : : GetFactory ( " GPU " ) - > CreateDevices ( <nl> + opts , kDeviceNamePrefix , & devices ) ; <nl> + EXPECT_EQ ( status . code ( ) , error : : INVALID_ARGUMENT ) ; <nl> + EXPECT_TRUE ( StartsWith ( status . error_message ( ) , <nl> + " It ' s invalid to set per_process_gpu_memory_fraction " ) ) <nl> + < < status ; <nl> + } <nl> + <nl> + TEST ( GPUDeviceTest , GpuDeviceCountTooSmall ) { <nl> + / / device_count is 0 , but with one entry in visible_device_list and one <nl> + / / ( empty ) VirtualDevices messages . <nl> + SessionOptions opts = MakeSessionOptions ( " 0 " , 0 , 0 , { { } } ) ; <nl> + std : : vector < tensorflow : : Device * > devices ; <nl> + Status status = DeviceFactory : : GetFactory ( " GPU " ) - > CreateDevices ( <nl> + opts , kDeviceNamePrefix , & devices ) ; <nl> + EXPECT_EQ ( status . code ( ) , error : : UNKNOWN ) ; <nl> + EXPECT_TRUE ( StartsWith ( status . error_message ( ) , <nl> + " Not enough GPUs to create virtual devices . " ) ) <nl> + < < status ; <nl> + } <nl> + <nl> + TEST ( GPUDeviceTest , NotEnoughGpuInVisibleDeviceList ) { <nl> + / / Single entry in visible_device_list with two ( empty ) VirtualDevices <nl> + / / messages . <nl> + SessionOptions opts = MakeSessionOptions ( " 0 " , 0 , 8 , { { } , { } } ) ; <nl> + std : : vector < tensorflow : : Device * > devices ; <nl> + Status status = DeviceFactory : : GetFactory ( " GPU " ) - > CreateDevices ( <nl> + opts , kDeviceNamePrefix , & devices ) ; <nl> + EXPECT_EQ ( status . code ( ) , error : : UNKNOWN ) ; <nl> + EXPECT_TRUE ( StartsWith ( status . error_message ( ) , <nl> + " Not enough GPUs to create virtual devices . " ) ) <nl> + < < status ; <nl> + } <nl> + <nl> + TEST ( GPUDeviceTest , VirtualDeviceConfigConflictsWithVisibleDeviceList ) { <nl> + / / This test requires at least two visible GPU hardware . <nl> + if ( GPUMachineManager ( ) - > VisibleDeviceCount ( ) < 2 ) return ; <nl> + / / Three entries in visible_device_list with two ( empty ) VirtualDevices <nl> + / / messages . <nl> + SessionOptions opts = MakeSessionOptions ( " 0 , 1 " , 0 , 8 , { { } } ) ; <nl> + std : : vector < tensorflow : : Device * > devices ; <nl> + Status status = DeviceFactory : : GetFactory ( " GPU " ) - > CreateDevices ( <nl> + opts , kDeviceNamePrefix , & devices ) ; <nl> + EXPECT_EQ ( status . code ( ) , error : : INVALID_ARGUMENT ) ; <nl> + EXPECT_TRUE ( StartsWith ( status . error_message ( ) , <nl> + " The number of GPUs in visible_device_list doesn ' t " <nl> + " match the number of elements in the virtual_devices " <nl> + " list . " ) ) <nl> + < < status ; <nl> + } <nl> + <nl> + TEST ( GPUDeviceTest , EmptyVirtualDeviceConfig ) { <nl> + / / It ' ll create single virtual device when the virtual device config is empty . <nl> + SessionOptions opts = MakeSessionOptions ( " 0 " ) ; <nl> + std : : vector < tensorflow : : Device * > devices ; <nl> + TF_CHECK_OK ( DeviceFactory : : GetFactory ( " GPU " ) - > CreateDevices ( <nl> + opts , kDeviceNamePrefix , & devices ) ) ; <nl> + EXPECT_EQ ( 1 , devices . size ( ) ) ; <nl> + EXPECT_GE ( devices [ 0 ] - > attributes ( ) . memory_limit ( ) , 0 ) ; <nl> + for ( auto d : devices ) delete d ; <nl> + } <nl> + <nl> + TEST ( GPUDeviceTest , SingleVirtualDeviceWithNoMemoryLimit ) { <nl> + / / It ' ll create single virtual device for the gpu in question when <nl> + / / memory_limit_mb is unset . <nl> + SessionOptions opts = MakeSessionOptions ( " 0 " , 0 , 1 , { { } } ) ; <nl> + std : : vector < tensorflow : : Device * > devices ; <nl> + TF_CHECK_OK ( DeviceFactory : : GetFactory ( " GPU " ) - > CreateDevices ( <nl> + opts , kDeviceNamePrefix , & devices ) ) ; <nl> + EXPECT_EQ ( 1 , devices . size ( ) ) ; <nl> + EXPECT_GE ( devices [ 0 ] - > attributes ( ) . memory_limit ( ) , 0 ) ; <nl> + for ( auto d : devices ) delete d ; <nl> + } <nl> + <nl> + TEST ( GPUDeviceTest , SingleVirtualDeviceWithMemoryLimit ) { <nl> + SessionOptions opts = MakeSessionOptions ( " 0 " , 0 , 1 , { { 123 } } ) ; <nl> + std : : vector < tensorflow : : Device * > devices ; <nl> + TF_CHECK_OK ( DeviceFactory : : GetFactory ( " GPU " ) - > CreateDevices ( <nl> + opts , kDeviceNamePrefix , & devices ) ) ; <nl> + EXPECT_EQ ( 1 , devices . size ( ) ) ; <nl> + EXPECT_EQ ( 123 < < 20 , devices [ 0 ] - > attributes ( ) . memory_limit ( ) ) ; <nl> + for ( auto d : devices ) delete d ; <nl> + } <nl> + <nl> + TEST ( GPUDeviceTest , MultipleVirtualDevices ) { <nl> + SessionOptions opts = MakeSessionOptions ( " 0 " , 0 , 1 , { { 123 , 456 } } ) ; <nl> + std : : vector < tensorflow : : Device * > devices ; <nl> + TF_CHECK_OK ( DeviceFactory : : GetFactory ( " GPU " ) - > CreateDevices ( <nl> + opts , kDeviceNamePrefix , & devices ) ) ; <nl> + EXPECT_EQ ( 2 , devices . size ( ) ) ; <nl> + EXPECT_EQ ( 123 < < 20 , devices [ 0 ] - > attributes ( ) . memory_limit ( ) ) ; <nl> + EXPECT_EQ ( 456 < < 20 , devices [ 1 ] - > attributes ( ) . memory_limit ( ) ) ; <nl> + for ( auto d : devices ) delete d ; <nl> + } <nl> + <nl> + } / / namespace <nl> + } / / namespace tensorflow <nl> + <nl> + # endif <nl> new file mode 100644 <nl> index 0000000000000 . . ff81ccd4325e0 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_id . h <nl> <nl> + / * Copyright 2015 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef THIRD_PARTY_TENSORFLOW_CORE_COMMON_RUNTIME_GPU_GPU_ID_H_ <nl> + # define THIRD_PARTY_TENSORFLOW_CORE_COMMON_RUNTIME_GPU_GPU_ID_H_ <nl> + <nl> + # include " tensorflow / core / lib / gtl / int_type . h " <nl> + <nl> + namespace tensorflow { <nl> + <nl> + / / There are three types of GPU ids : <nl> + / / - * physical * GPU id : this is the integer index of a GPU hardware in the <nl> + / / physical machine , it can be filtered by CUDA environment variable <nl> + / / CUDA_VISIBLE_DEVICES . Note that this id is not visible to Tensorflow , but <nl> + / / result after filtering by CUDA_VISIBLE_DEVICES is visible to TF and is <nl> + / / called CUDA GPU id as below . See <nl> + / / http : / / docs . nvidia . com / cuda / cuda - c - programming - guide / index . html # env - vars <nl> + / / for more details . <nl> + / / - CUDA GPU id ( also called * visible * GPU id in <nl> + / / third_party / tensorflow / core / protobuf / config . proto ) : this is the id that is <nl> + / / visible to Tensorflow after filtering by CUDA_VISIBLE_DEVICES , and is <nl> + / / generated by the CUDA GPU driver . It starts from 0 and is used for CUDA API <nl> + / / calls like cuDeviceGet ( ) . <nl> + / / - TF GPU id ( also called * virtual * GPU id in <nl> + / / third_party / tensorflow / core / protobuf / config . proto ) : this is the id that <nl> + / / Tensorflow generates and exposes to its users . It is the id in the < id > <nl> + / / field of the device name " / device : GPU : < id > " , and is also the identifier of <nl> + / / a BaseGPUDevice . Note that the configuration allows us to create multiple <nl> + / / BaseGPUDevice per GPU hardware in order to use multi CUDA streams on the <nl> + / / hardware , so the mapping between TF GPU id and CUDA GPU id is not a 1 : 1 <nl> + / / mappping , see the example below . <nl> + / / <nl> + / / For example , assuming that in the machine we have GPU device with index 0 , 1 , <nl> + / / 2 and 3 ( physical GPU id ) . Setting " CUDA_VISIBLE_DEVICES = 1 , 2 , 3 " will create <nl> + / / the following mapping between CUDA GPU id and physical GPU id : <nl> + / / <nl> + / / CUDA GPU id - > physical GPU id <nl> + / / 0 - > 1 <nl> + / / 1 - > 2 <nl> + / / 2 - > 3 <nl> + / / <nl> + / / Note that physical GPU id 0 is invisible to TF so there is no mapping entry <nl> + / / for it . <nl> + / / <nl> + / / Assuming we configure the Session to create one BaseGPUDevice per GPU <nl> + / / hardware , then setting GPUOptions : : visible_device_list to " 2 , 0 " will create <nl> + / / the following mappting between TF GPU id and CUDA GPU id : <nl> + / / <nl> + / / TF GPU id - > CUDA GPU ID <nl> + / / 0 ( i . e . / device : GPU : 0 ) - > 2 <nl> + / / 1 ( i . e . / device : GPU : 1 ) - > 0 <nl> + / / <nl> + / / Note that CUDA GPU id 1 is filtered out by GPUOptions : : visible_device_list , <nl> + / / so it won ' t be used by the TF process . <nl> + / / <nl> + / / On the other hand , if we configure it to create 2 BaseGPUDevice per GPU <nl> + / / hardware , then setting GPUOptions : : visible_device_list to " 2 , 0 " will create <nl> + / / the following mappting between TF GPU id and CUDA GPU id : <nl> + / / <nl> + / / TF GPU id - > CUDA GPU ID <nl> + / / 0 ( i . e . / device : GPU : 0 ) - > 2 <nl> + / / 1 ( i . e . / device : GPU : 1 ) - > 2 <nl> + / / 2 ( i . e . / device : GPU : 2 ) - > 0 <nl> + / / 3 ( i . e . / device : GPU : 3 ) - > 0 <nl> + / / <nl> + / / We create strong - typed integer classes for both TF GPU id and CUDA GPU id to <nl> + / / minimize programming errors and improve code readability . Except for the <nl> + / / StreamExecutor interface ( as we don ' t change its API ) , whenever we need a <nl> + / / TF GPU id ( or CUDA GPU id ) we should use TfGpuId ( or CudaGpuId ) instead of a <nl> + / / raw integer . <nl> + TF_LIB_GTL_DEFINE_INT_TYPE ( TfGpuId , int32 ) ; <nl> + TF_LIB_GTL_DEFINE_INT_TYPE ( CudaGpuId , int32 ) ; <nl> + <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / THIRD_PARTY_TENSORFLOW_CORE_COMMON_RUNTIME_GPU_GPU_ID_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . 92cd19453f14c <nl> mmm / dev / null <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_id_utils . cc <nl> <nl> + / * Copyright 2015 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id_utils . h " <nl> + <nl> + # include < unordered_map > <nl> + <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> + # include " tensorflow / core / platform / logging . h " <nl> + # include " tensorflow / core / platform / mutex . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace { <nl> + / / Manages the map between TfGpuId and CUDA GPU id . <nl> + class GpuIdManager { <nl> + public : <nl> + static GpuIdManager * singleton ( ) { <nl> + static auto * manager = new GpuIdManager ; <nl> + return manager ; <nl> + } <nl> + <nl> + void InsertOrDie ( TfGpuId tf_gpu_id , CudaGpuId cuda_gpu_id ) <nl> + LOCKS_EXCLUDED ( mu_ ) { <nl> + std : : pair < IdMapType : : iterator , bool > result ; <nl> + { <nl> + mutex_lock lock ( mu_ ) ; <nl> + result = id_map_ . insert ( { tf_gpu_id . value ( ) , cuda_gpu_id . value ( ) } ) ; <nl> + } <nl> + if ( ! result . second ) { <nl> + CHECK_EQ ( cuda_gpu_id . value ( ) , result . first - > second ) <nl> + < < " Mapping the same TfGpuId to a different CUDA GPU id . " <nl> + < < " TfGpuId : " < < tf_gpu_id <nl> + < < " Existing mapped CUDA GPU id : " < < result . first - > second <nl> + < < " CUDA GPU id being tried to map to : " < < cuda_gpu_id ; <nl> + } <nl> + } <nl> + <nl> + int32 FindOrDie ( TfGpuId tf_gpu_id ) const LOCKS_EXCLUDED ( mu_ ) { <nl> + mutex_lock lock ( mu_ ) ; <nl> + auto result = id_map_ . find ( tf_gpu_id . value ( ) ) ; <nl> + CHECK ( result ! = id_map_ . end ( ) ) <nl> + < < " Could not find the mapping for TfGpuId : " < < tf_gpu_id ; <nl> + return result - > second ; <nl> + } <nl> + <nl> + private : <nl> + using IdMapType = std : : unordered_map < int32 , int32 > ; <nl> + mutable mutex mu_ ; <nl> + IdMapType id_map_ GUARDED_BY ( mu_ ) ; <nl> + } ; <nl> + } / / namespace <nl> + <nl> + void GpuIdUtil : : InsertTfCudaGpuIdPair ( TfGpuId tf_gpu_id , <nl> + CudaGpuId cuda_gpu_id ) { <nl> + GpuIdManager : : singleton ( ) - > InsertOrDie ( tf_gpu_id , cuda_gpu_id ) ; <nl> + } <nl> + <nl> + CudaGpuId GpuIdUtil : : TfToCudaGpuId ( TfGpuId tf_gpu_id ) { <nl> + return CudaGpuId ( GpuIdManager : : singleton ( ) - > FindOrDie ( tf_gpu_id ) ) ; <nl> + } <nl> + <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . 78e51c84c1466 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_id_utils . h <nl> <nl> + / * Copyright 2015 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef THIRD_PARTY_TENSORFLOW_CORE_COMMON_RUNTIME_GPU_GPU_ID_UTILS_H_ <nl> + # define THIRD_PARTY_TENSORFLOW_CORE_COMMON_RUNTIME_GPU_GPU_ID_UTILS_H_ <nl> + <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_init . h " <nl> + # include " tensorflow / core / lib / gtl / int_type . h " <nl> + # include " tensorflow / core / platform / stream_executor . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace gpu = : : perftools : : gputools ; <nl> + <nl> + / / Utility methods for translation between Tensorflow GPU ids and CUDA GPU ids . <nl> + class GpuIdUtil { <nl> + public : <nl> + static void InsertTfCudaGpuIdPair ( TfGpuId tf_gpu_id , CudaGpuId cuda_gpu_id ) ; <nl> + static CudaGpuId TfToCudaGpuId ( TfGpuId tf_gpu_id ) ; <nl> + <nl> + / / Convenient methods for getting the associated executor given a TfGpuId or <nl> + / / CudaGpuId . <nl> + static gpu : : port : : StatusOr < gpu : : StreamExecutor * > ExecutorForCudaGpuId ( <nl> + gpu : : Platform * gpu_manager , CudaGpuId cuda_gpu_id ) { <nl> + return gpu_manager - > ExecutorForDevice ( cuda_gpu_id . value ( ) ) ; <nl> + } <nl> + static gpu : : port : : StatusOr < gpu : : StreamExecutor * > ExecutorForCudaGpuId ( <nl> + CudaGpuId cuda_gpu_id ) { <nl> + return ExecutorForCudaGpuId ( GPUMachineManager ( ) , cuda_gpu_id ) ; <nl> + } <nl> + static gpu : : port : : StatusOr < gpu : : StreamExecutor * > ExecutorForTfGpuId ( <nl> + TfGpuId tf_gpu_id ) { <nl> + return ExecutorForCudaGpuId ( GpuIdUtil : : TfToCudaGpuId ( tf_gpu_id ) ) ; <nl> + } <nl> + <nl> + / / Verify that the cuda_gpu_id associated with a TfGpuId is legitimate . <nl> + static void CheckValidTfGpuId ( TfGpuId tf_gpu_id ) { <nl> + const CudaGpuId cuda_gpu_id = GpuIdUtil : : TfToCudaGpuId ( tf_gpu_id ) ; <nl> + const int visible_device_count = GPUMachineManager ( ) - > VisibleDeviceCount ( ) ; <nl> + CHECK_LT ( cuda_gpu_id . value ( ) , visible_device_count ) <nl> + < < " cuda_gpu_id is outside discovered device range . " <nl> + < < " TF GPU id : " < < tf_gpu_id < < " CUDA GPU id : " < < cuda_gpu_id <nl> + < < " visible device count : " < < visible_device_count ; <nl> + } <nl> + } ; <nl> + <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / THIRD_PARTY_TENSORFLOW_CORE_COMMON_RUNTIME_GPU_GPU_ID_UTILS_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . bebe00a4317be <nl> mmm / dev / null <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_id_utils_test . cc <nl> <nl> + / * Copyright 2015 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id_utils . h " <nl> + <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> + # include " tensorflow / core / platform / test . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace test { <nl> + <nl> + TEST ( GpuIdTest , Basics ) { <nl> + TfGpuId key_0 ( 0 ) ; <nl> + CudaGpuId value_0 ( 0 ) ; <nl> + GpuIdUtil : : InsertTfCudaGpuIdPair ( key_0 , value_0 ) ; <nl> + EXPECT_EQ ( value_0 , GpuIdUtil : : TfToCudaGpuId ( key_0 ) ) ; <nl> + <nl> + / / Multiple calls to map the same value is ok . <nl> + GpuIdUtil : : InsertTfCudaGpuIdPair ( key_0 , value_0 ) ; <nl> + EXPECT_EQ ( value_0 , GpuIdUtil : : TfToCudaGpuId ( key_0 ) ) ; <nl> + <nl> + / / Map a different TfGpuId to a different value . <nl> + TfGpuId key_1 ( 3 ) ; <nl> + CudaGpuId value_1 ( 2 ) ; <nl> + GpuIdUtil : : InsertTfCudaGpuIdPair ( key_1 , value_1 ) ; <nl> + EXPECT_EQ ( value_1 , GpuIdUtil : : TfToCudaGpuId ( key_1 ) ) ; <nl> + <nl> + / / Mapping a different TfGpuId to the same value is ok . <nl> + TfGpuId key_2 ( 10 ) ; <nl> + GpuIdUtil : : InsertTfCudaGpuIdPair ( key_2 , value_1 ) ; <nl> + EXPECT_EQ ( value_1 , GpuIdUtil : : TfToCudaGpuId ( key_2 ) ) ; <nl> + <nl> + / / Mapping the same TfGpuId to a different value will crash the program . <nl> + ASSERT_DEATH ( GpuIdUtil : : InsertTfCudaGpuIdPair ( key_2 , value_0 ) , <nl> + " Mapping the same TfGpuId to a different CUDA GPU id " ) ; <nl> + <nl> + / / Getting an nonexistent mapping will crash the program . <nl> + ASSERT_DEATH ( GpuIdUtil : : TfToCudaGpuId ( TfGpuId ( 100 ) ) , <nl> + " Could not find the mapping for TfGpuId " ) ; <nl> + } <nl> + <nl> + } / / namespace test <nl> + } / / namespace tensorflow <nl> mmm a / tensorflow / core / common_runtime / gpu / process_state . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / process_state . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / gpu / gpu_bfc_allocator . h " <nl> # include " tensorflow / core / common_runtime / gpu / gpu_cudamalloc_allocator . h " <nl> # include " tensorflow / core / common_runtime / gpu / gpu_debug_allocator . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id_utils . h " <nl> # include " tensorflow / core / common_runtime / gpu / gpu_init . h " <nl> # include " tensorflow / core / common_runtime / gpu / pool_allocator . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> ProcessState : : MemDesc ProcessState : : PtrType ( const void * ptr ) { <nl> return MemDesc ( ) ; <nl> } <nl> <nl> - Allocator * ProcessState : : GetGPUAllocator ( const GPUOptions & options , int gpu_id , <nl> + Allocator * ProcessState : : GetGPUAllocator ( const GPUOptions & options , <nl> + TfGpuId tf_gpu_id , <nl> size_t total_bytes ) { <nl> # if GOOGLE_CUDA <nl> const string & allocator_type = options . allocator_type ( ) ; <nl> mutex_lock lock ( mu_ ) ; <nl> - gpu : : Platform * gpu_platform = GPUMachineManager ( ) ; <nl> + GpuIdUtil : : CheckValidTfGpuId ( tf_gpu_id ) ; <nl> <nl> - / / Verify that gpu_id is legitimate . <nl> - CHECK_LT ( gpu_id , gpu_platform - > VisibleDeviceCount ( ) ) <nl> - < < " gpu_id is outside discovered device range " ; <nl> - <nl> - if ( gpu_id > = static_cast < int64 > ( gpu_allocators_ . size ( ) ) ) { <nl> - gpu_allocators_ . resize ( gpu_id + 1 ) ; <nl> - if ( FLAGS_brain_gpu_record_mem_types ) gpu_al_ . resize ( gpu_id + 1 ) ; <nl> + if ( tf_gpu_id . value ( ) > = static_cast < int64 > ( gpu_allocators_ . size ( ) ) ) { <nl> + gpu_allocators_ . resize ( tf_gpu_id . value ( ) + 1 ) ; <nl> + if ( FLAGS_brain_gpu_record_mem_types ) gpu_al_ . resize ( tf_gpu_id . value ( ) + 1 ) ; <nl> } <nl> <nl> - if ( gpu_allocators_ [ gpu_id ] = = nullptr ) { <nl> + if ( gpu_allocators_ [ tf_gpu_id . value ( ) ] = = nullptr ) { <nl> VisitableAllocator * gpu_allocator ; <nl> <nl> / / Validate allocator types . <nl> Allocator * ProcessState : : GetGPUAllocator ( const GPUOptions & options , int gpu_id , <nl> return nullptr ; <nl> } <nl> <nl> - gpu_allocator = new GPUBFCAllocator ( gpu_id , total_bytes , options ) ; <nl> + const CudaGpuId cuda_gpu_id = GpuIdUtil : : TfToCudaGpuId ( tf_gpu_id ) ; <nl> + gpu_allocator = <nl> + new GPUBFCAllocator ( cuda_gpu_id , total_bytes , options , <nl> + strings : : StrCat ( " GPU_ " , tf_gpu_id . value ( ) , " _bfc " ) ) ; <nl> <nl> / / If true , checks for memory overwrites by writing <nl> / / distinctive patterns on both ends of allocated memory . <nl> if ( useCudaMemoryGuardAllocator ( ) ) { <nl> - gpu_allocator = new GPUDebugAllocator ( gpu_allocator , gpu_id ) ; <nl> - gpu_allocator = new GPUNanResetAllocator ( gpu_allocator , gpu_id ) ; <nl> + gpu_allocator = new GPUDebugAllocator ( gpu_allocator , cuda_gpu_id ) ; <nl> + gpu_allocator = new GPUNanResetAllocator ( gpu_allocator , cuda_gpu_id ) ; <nl> } else if ( useCudaMallocAllocator ( ) ) { <nl> / / If true , passes all allocation requests through to cudaMalloc <nl> / / useful for doing memory debugging with tools like cuda - memcheck <nl> / / * * WARNING * * probably will not work in a multi - gpu scenario <nl> - gpu_allocator = new GPUcudaMallocAllocator ( gpu_allocator , gpu_id ) ; <nl> + gpu_allocator = new GPUcudaMallocAllocator ( gpu_allocator , cuda_gpu_id ) ; <nl> } <nl> - gpu_allocators_ [ gpu_id ] = gpu_allocator ; <nl> + gpu_allocators_ [ tf_gpu_id . value ( ) ] = gpu_allocator ; <nl> <nl> / / If there are any pending AllocVisitors for this bus , add <nl> / / them now . <nl> gpu : : StreamExecutor * se = <nl> - gpu_platform - > ExecutorForDevice ( gpu_id ) . ValueOrDie ( ) ; <nl> + GpuIdUtil : : ExecutorForTfGpuId ( tf_gpu_id ) . ValueOrDie ( ) ; <nl> int bus_id = se - > GetDeviceDescription ( ) . numa_node ( ) ; <nl> if ( bus_id > = 0 & & bus_id < static_cast < int64 > ( gpu_visitors_ . size ( ) ) ) { <nl> for ( const auto & v : gpu_visitors_ [ bus_id ] ) { <nl> - gpu_allocators_ [ gpu_id ] - > AddAllocVisitor ( v ) ; <nl> + gpu_allocator - > AddAllocVisitor ( v ) ; <nl> } <nl> } <nl> if ( FLAGS_brain_gpu_record_mem_types ) { <nl> MemDesc md ; <nl> md . loc = MemDesc : : GPU ; <nl> - md . dev_index = gpu_id ; <nl> + md . dev_index = cuda_gpu_id . value ( ) ; <nl> md . gpu_registered = false ; <nl> md . nic_registered = true ; <nl> - if ( static_cast < int64 > ( gpu_al_ . size ( ) ) < = gpu_id ) <nl> - gpu_al_ . resize ( gpu_id + 1 ) ; <nl> - gpu_al_ [ gpu_id ] = new internal : : RecordingAllocator ( <nl> - & mem_desc_map_ , gpu_allocators_ [ gpu_id ] , md , & mu_ ) ; <nl> + if ( static_cast < int64 > ( gpu_al_ . size ( ) ) < = tf_gpu_id . value ( ) ) { <nl> + gpu_al_ . resize ( tf_gpu_id . value ( ) + 1 ) ; <nl> + } <nl> + gpu_al_ [ tf_gpu_id . value ( ) ] = new internal : : RecordingAllocator ( <nl> + & mem_desc_map_ , gpu_allocator , md , & mu_ ) ; <nl> } <nl> } <nl> - if ( FLAGS_brain_gpu_record_mem_types ) return gpu_al_ [ gpu_id ] ; <nl> - return gpu_allocators_ [ gpu_id ] ; <nl> + if ( FLAGS_brain_gpu_record_mem_types ) return gpu_al_ [ tf_gpu_id . value ( ) ] ; <nl> + return gpu_allocators_ [ tf_gpu_id . value ( ) ] ; <nl> # else <nl> LOG ( FATAL ) < < " GPUAllocator unavailable . Not compiled with - - config = cuda . " ; <nl> return nullptr ; <nl> Allocator * ProcessState : : GetCUDAHostAllocator ( int numa_node ) { <nl> gpu : : StreamExecutor * se = nullptr ; <nl> for ( int i = 0 ; i < static_cast < int > ( gpu_allocators_ . size ( ) ) ; + + i ) { <nl> if ( gpu_allocators_ [ i ] ! = nullptr ) { <nl> - se = GPUMachineManager ( ) - > ExecutorForDevice ( i ) . ValueOrDie ( ) ; <nl> + se = GpuIdUtil : : ExecutorForTfGpuId ( TfGpuId ( i ) ) . ValueOrDie ( ) ; <nl> break ; <nl> } <nl> } <nl> Allocator * ProcessState : : GetCUDAHostAllocator ( int numa_node ) { <nl> void ProcessState : : AddGPUAllocVisitor ( int bus_id , AllocVisitor visitor ) { <nl> # if GOOGLE_CUDA <nl> mutex_lock lock ( mu_ ) ; <nl> - gpu : : Platform * gpu_platform = GPUMachineManager ( ) ; <nl> - for ( int gpu_id = 0 ; gpu_id < static_cast < int64 > ( gpu_allocators_ . size ( ) ) ; <nl> - + + gpu_id ) { <nl> + for ( int i = 0 ; i < static_cast < int64 > ( gpu_allocators_ . size ( ) ) ; + + i ) { <nl> gpu : : StreamExecutor * se = <nl> - gpu_platform - > ExecutorForDevice ( gpu_id ) . ValueOrDie ( ) ; <nl> - if ( gpu_allocators_ [ gpu_id ] & & <nl> + GpuIdUtil : : ExecutorForTfGpuId ( TfGpuId ( i ) ) . ValueOrDie ( ) ; <nl> + if ( gpu_allocators_ [ i ] & & <nl> ( se - > GetDeviceDescription ( ) . numa_node ( ) + 1 ) = = bus_id ) { <nl> - gpu_allocators_ [ gpu_id ] - > AddAllocVisitor ( visitor ) ; <nl> + gpu_allocators_ [ i ] - > AddAllocVisitor ( visitor ) ; <nl> } <nl> } <nl> while ( bus_id > = static_cast < int64 > ( gpu_visitors_ . size ( ) ) ) { <nl> mmm a / tensorflow / core / common_runtime / gpu / process_state . h <nl> ppp b / tensorflow / core / common_runtime / gpu / process_state . h <nl> limitations under the License . <nl> # define TENSORFLOW_COMMON_RUNTIME_GPU_PROCESS_STATE_H_ <nl> <nl> # include < functional > <nl> + # include < map > <nl> # include < unordered_map > <nl> # include < vector > <nl> <nl> + # include " tensorflow / core / common_runtime / gpu / gpu_id . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / platform / mutex . h " <nl> # include " tensorflow / core / platform / thread_annotations . h " <nl> class ProcessState { <nl> / / <nl> / / ' total_bytes ' is the total number of bytes that should be made <nl> / / available to the allocator . The first call to this function for <nl> - / / a given gpu_id creates the allocator , so only the total_bytes <nl> + / / a given tf_gpu_id creates the allocator , so only the total_bytes <nl> / / used on that first call is used . <nl> / / <nl> / / " Allocator type " describes the type of algorithm to use for the <nl> / / underlying allocator . REQUIRES : Must be a valid type ( see <nl> / / config . proto for the list of supported strings . ) . <nl> / / <nl> - / / REQUIRES : gpu_id must be a valid ordinal for a GPU available in the <nl> + / / REQUIRES : tf_gpu_id must be a valid id for a BaseGPUDevice available in the <nl> / / current system environment . Otherwise returns nullptr . <nl> - virtual Allocator * GetGPUAllocator ( const GPUOptions & options , int gpu_id , <nl> - size_t total_bytes ) ; <nl> + virtual Allocator * GetGPUAllocator ( const GPUOptions & options , <nl> + TfGpuId tf_gpu_id , size_t total_bytes ) ; <nl> <nl> virtual Allocator * GetCUDAHostAllocator ( int numa_node ) ; <nl> <nl> mmm a / tensorflow / core / grappler / op_types . cc <nl> ppp b / tensorflow / core / grappler / op_types . cc <nl> bool IsDequeueOp ( const NodeDef & node ) { <nl> <nl> bool IsDiv ( const NodeDef & node ) { return node . op ( ) = = " Div " ; } <nl> <nl> + bool IsEluGrad ( const NodeDef & node ) { return node . op ( ) = = " EluGrad " ; } <nl> + <nl> bool IsEnter ( const NodeDef & node ) { <nl> const auto & op = node . op ( ) ; <nl> return op = = " Enter " | | op = = " RefEnter " ; <nl> bool IsIdentity ( const NodeDef & node ) { <nl> return op = = " Identity " | | op = = " RefIdentity " ; <nl> } <nl> <nl> + bool IsInvGrad ( const NodeDef & node ) { return node . op ( ) = = " InvGrad " ; } <nl> + <nl> bool IsMatMul ( const NodeDef & node ) { <nl> const auto & op = node . op ( ) ; <nl> return op = = " MatMul " | | op = = " BatchMatMul " | | op = = " QuantizedMatMul " | | <nl> bool IsPlaceholder ( const NodeDef & node ) { <nl> <nl> bool IsRealDiv ( const NodeDef & node ) { return node . op ( ) = = " RealDiv " ; } <nl> <nl> - bool IsReluGrad ( const NodeDef & node ) { return node . op ( ) = = " ReluGrad " ; } <nl> + bool IsReciprocalGrad ( const NodeDef & node ) { <nl> + return node . op ( ) = = " ReciprocalGrad " ; <nl> + } <nl> <nl> bool IsRecv ( const NodeDef & node ) { return node . op ( ) = = " _Recv " ; } <nl> <nl> bool IsReduction ( const NodeDef & node ) { <nl> op = = " Mean " | | op = = " Any " | | op = = " All " ; <nl> } <nl> <nl> + bool IsReluGrad ( const NodeDef & node ) { return node . op ( ) = = " ReluGrad " ; } <nl> + <nl> + bool IsRelu6Grad ( const NodeDef & node ) { return node . op ( ) = = " Relu6Grad " ; } <nl> + <nl> bool IsReshape ( const NodeDef & node ) { return ( node . op ( ) = = " Reshape " ) ; } <nl> <nl> bool IsRestore ( const NodeDef & node ) { <nl> bool IsRestore ( const NodeDef & node ) { <nl> node . op ( ) = = " RestoreSlice " ) ; <nl> } <nl> <nl> + bool IsRsqrtGrad ( const NodeDef & node ) { return node . op ( ) = = " RsqrtGrad " ; } <nl> + <nl> + bool IsSeluGrad ( const NodeDef & node ) { return node . op ( ) = = " SeluGrad " ; } <nl> + <nl> bool IsSend ( const NodeDef & node ) { return node . op ( ) = = " _Send " ; } <nl> <nl> bool IsShape ( const NodeDef & node ) { return node . op ( ) = = " Shape " ; } <nl> <nl> bool IsShapeN ( const NodeDef & node ) { return node . op ( ) = = " ShapeN " ; } <nl> <nl> + bool IsSigmoidGrad ( const NodeDef & node ) { return node . op ( ) = = " SigmoidGrad " ; } <nl> + <nl> bool IsSlice ( const NodeDef & node ) { return node . op ( ) = = " Slice " ; } <nl> <nl> + bool IsSoftplusGrad ( const NodeDef & node ) { return node . op ( ) = = " SoftplusGrad " ; } <nl> + <nl> + bool IsSoftsignGrad ( const NodeDef & node ) { return node . op ( ) = = " SoftsignGrad " ; } <nl> + <nl> bool IsSplit ( const NodeDef & node ) { return node . op ( ) = = " Split " ; } <nl> <nl> + bool IsSqrtGrad ( const NodeDef & node ) { return node . op ( ) = = " SqrtGrad " ; } <nl> + <nl> bool IsSquaredDifference ( const NodeDef & node ) { <nl> return node . op ( ) = = " SquaredDifference " ; <nl> } <nl> bool IsSwitch ( const NodeDef & node ) { <nl> return op = = " Switch " | | op = = " RefSwitch " ; <nl> } <nl> <nl> + bool IsTanhGrad ( const NodeDef & node ) { return node . op ( ) = = " TanhGrad " ; } <nl> + <nl> bool IsTranspose ( const NodeDef & node ) { return node . op ( ) = = " Transpose " ; } <nl> <nl> bool IsVariable ( const NodeDef & node ) { <nl> mmm a / tensorflow / core / grappler / op_types . h <nl> ppp b / tensorflow / core / grappler / op_types . h <nl> bool IsDepthwiseConv2dNativeBackpropFilter ( const NodeDef & node ) ; <nl> bool IsDepthwiseConv2dNativeBackpropInput ( const NodeDef & node ) ; <nl> bool IsDequeueOp ( const NodeDef & node ) ; <nl> bool IsDiv ( const NodeDef & node ) ; <nl> + bool IsEluGrad ( const NodeDef & node ) ; <nl> bool IsEnter ( const NodeDef & node ) ; <nl> bool IsExit ( const NodeDef & node ) ; <nl> bool IsFloorMod ( const NodeDef & node ) ; <nl> bool IsFusedBatchNormGradV1 ( const NodeDef & node ) ; <nl> bool IsIdentity ( const NodeDef & node ) ; <nl> + bool IsInvGrad ( const NodeDef & node ) ; <nl> bool IsMerge ( const NodeDef & node ) ; <nl> bool IsMul ( const NodeDef & node ) ; <nl> bool IsMatMul ( const NodeDef & node ) ; <nl> bool IsPad ( const NodeDef & node ) ; <nl> bool IsNoOp ( const NodeDef & node ) ; <nl> bool IsPlaceholder ( const NodeDef & node ) ; <nl> bool IsRealDiv ( const NodeDef & node ) ; <nl> + bool IsRelu6Grad ( const NodeDef & node ) ; <nl> bool IsReluGrad ( const NodeDef & node ) ; <nl> + bool IsReciprocalGrad ( const NodeDef & node ) ; <nl> bool IsRecv ( const NodeDef & node ) ; <nl> bool IsReduction ( const NodeDef & node ) ; <nl> bool IsReshape ( const NodeDef & node ) ; <nl> bool IsRestore ( const NodeDef & node ) ; <nl> + bool IsRsqrtGrad ( const NodeDef & node ) ; <nl> + bool IsSeluGrad ( const NodeDef & node ) ; <nl> bool IsSend ( const NodeDef & node ) ; <nl> bool IsSlice ( const NodeDef & node ) ; <nl> bool IsShape ( const NodeDef & node ) ; <nl> bool IsShapeN ( const NodeDef & node ) ; <nl> + bool IsSigmoidGrad ( const NodeDef & node ) ; <nl> + bool IsSoftplusGrad ( const NodeDef & node ) ; <nl> + bool IsSoftsignGrad ( const NodeDef & node ) ; <nl> bool IsSplit ( const NodeDef & node ) ; <nl> + bool IsSqrtGrad ( const NodeDef & node ) ; <nl> bool IsSquaredDifference ( const NodeDef & node ) ; <nl> bool IsSqueeze ( const NodeDef & node ) ; <nl> bool IsStopGradient ( const NodeDef & node ) ; <nl> bool IsSub ( const NodeDef & node ) ; <nl> bool IsSum ( const NodeDef & node ) ; <nl> bool IsSwitch ( const NodeDef & node ) ; <nl> + bool IsTanhGrad ( const NodeDef & node ) ; <nl> bool IsTranspose ( const NodeDef & node ) ; <nl> bool IsVariable ( const NodeDef & node ) ; <nl> <nl> mmm a / tensorflow / core / grappler / optimizers / graph_optimizer . h <nl> ppp b / tensorflow / core / grappler / optimizers / graph_optimizer . h <nl> class GraphOptimizer { <nl> GraphDef * optimized_graph ) = 0 ; <nl> <nl> / / Method invoked by the framework so that it can provide feedback <nl> - / / on how well the " optimize_output " ( produced as * output from a <nl> + / / on how well the " optimized_graph " ( produced as * optimized_graph from a <nl> / / call to Optimize ) performed . Lower " result " scores are better . <nl> virtual void Feedback ( Cluster * cluster , const GrapplerItem & item , <nl> const GraphDef & optimized_graph , double result ) = 0 ; <nl> mmm a / tensorflow / core / grappler / optimizers / layout_optimizer . cc <nl> ppp b / tensorflow / core / grappler / optimizers / layout_optimizer . cc <nl> std : : set < string > GetOpsFormatSupported ( ) { <nl> / / TODO ( yaozhang ) : enable SumProcessor with auto - tuning . Currently disabled <nl> / / because of the worse performance in some cases . <nl> std : : set < string > GetOpsFormatAgnostic ( ) { <nl> - std : : set < string > ops_format_agnostic = { " Add " , <nl> + std : : set < string > ops_format_agnostic = { " Abs " , <nl> + " Add " , <nl> " AddN " , <nl> " Acos " , <nl> " Acosh " , <nl> + " Angle " , <nl> " Asin " , <nl> " Asinh " , <nl> " Atan " , <nl> " Atanh " , <nl> + " Bitcast " , <nl> + " Cast " , <nl> " Ceil " , <nl> + " CheckNumerics " , <nl> " Cos " , <nl> " Cosh " , <nl> + " ComplexAbs " , <nl> " Concat " , <nl> " ConcatV2 " , <nl> + " Conj " , <nl> " Digamma " , <nl> + " Elu " , <nl> + " EluGrad " , <nl> " Erf " , <nl> " Erfc " , <nl> " Exp " , <nl> " Expm1 " , <nl> " Floor " , <nl> + " GuaranteeConst " , <nl> " Identity " , <nl> + " Imag " , <nl> " Inv " , <nl> " InvGrad " , <nl> " IsFinite " , <nl> std : : set < string > GetOpsFormatAgnostic ( ) { <nl> " Merge " , <nl> " Mul " , <nl> " Neg " , <nl> + " OnesLike " , <nl> " Pad " , <nl> + " PreventGradient " , <nl> + " Real " , <nl> " RealDiv " , <nl> " Reciprocal " , <nl> " ReciprocalGrad " , <nl> " Relu " , <nl> " Relu6 " , <nl> + " Relu6Grad " , <nl> " ReluGrad " , <nl> " Rint " , <nl> + " Selu " , <nl> + " SeluGrad " , <nl> " Shape " , <nl> " ShapeN " , <nl> " Sigmoid " , <nl> std : : set < string > GetOpsFormatAgnostic ( ) { <nl> " Sin " , <nl> " Sinh " , <nl> " Slice " , <nl> + " Snapshot " , <nl> + " Softplus " , <nl> + " SoftplusGrad " , <nl> " Split " , <nl> " Switch " , <nl> + " RefIdentity " , <nl> " RefMerge " , <nl> " RefSwitch " , <nl> " Round " , <nl> std : : set < string > GetOpsFormatAgnostic ( ) { <nl> " Square " , <nl> " SquaredDifference " , <nl> " Squeeze " , <nl> + " StopGradient " , <nl> / * " Sum " , * / " Sub " , <nl> " Tan " , <nl> " Tanh " , <nl> - " TanhGrad " } ; <nl> + " TanhGrad " , <nl> + " ZerosLike " } ; <nl> return ops_format_agnostic ; <nl> } <nl> <nl> bool IsMaxPoolGradV1 ( const NodeDef & node ) { <nl> return op = = " MaxPoolGrad " ; <nl> } <nl> <nl> + bool IsUnaryGrad ( const NodeDef & node ) { <nl> + bool is_unary_grad = <nl> + IsEluGrad ( node ) | | IsInvGrad ( node ) | | IsReciprocalGrad ( node ) | | <nl> + IsRelu6Grad ( node ) | | IsReluGrad ( node ) | | IsRsqrtGrad ( node ) | | <nl> + IsSeluGrad ( node ) | | IsSigmoidGrad ( node ) | | IsSoftplusGrad ( node ) | | <nl> + IsSoftsignGrad ( node ) | | IsSqrtGrad ( node ) | | IsTanhGrad ( node ) ; <nl> + return is_unary_grad ; <nl> + } <nl> + <nl> class GraphProcessor { <nl> public : <nl> GraphProcessor ( const VirtualPlacer & virtual_placer , <nl> class NodeProcessor : public GraphProcessor { <nl> if ( op = = " Transpose " ) { <nl> added_node_name = AddPrefixToNodeName ( added_node_base_name , <nl> kTransposeNCHWToNHWC , " - " ) ; <nl> - TF_RETURN_IF_ERROR ( HasAttribute ( * node_ , " T " ) ) ; <nl> + DataType dtype ; <nl> + if ( op = = " Imag " | | op = = " Real " | | op = = " Angle " | | <nl> + op = = " Conj " | | op = = " ComplexAbs " ) { <nl> + TF_RETURN_IF_ERROR ( HasAttribute ( * node_ , " Tout " ) ) ; <nl> + dtype = node_ - > attr ( ) . at ( " Tout " ) . type ( ) ; <nl> + } else if ( op = = " Bitcast " ) { <nl> + TF_RETURN_IF_ERROR ( HasAttribute ( * node_ , " type " ) ) ; <nl> + dtype = node_ - > attr ( ) . at ( " type " ) . type ( ) ; <nl> + } else { <nl> + TF_RETURN_IF_ERROR ( HasAttribute ( * node_ , " T " ) ) ; <nl> + dtype = node_ - > attr ( ) . at ( " T " ) . type ( ) ; <nl> + } <nl> TF_RETURN_IF_ERROR ( HasAttribute ( * node_ , " _output_shapes " ) ) ; <nl> AddNodeTranspose ( <nl> - added_node_name , input , const_name , <nl> - node_ - > attr ( ) . at ( " T " ) . type ( ) , <nl> + added_node_name , input , const_name , dtype , <nl> node_ - > attr ( ) . at ( " _output_shapes " ) . list ( ) . shape ( 0 ) , false ) ; <nl> } else if ( op = = " DataFormatVecPermute " ) { <nl> added_node_name = AddPrefixToNodeName ( added_node_base_name , <nl> class SplitProcessor : public ConcatProcessor { <nl> } <nl> } ; <nl> <nl> - class ReluGradProcessor : public AgnosticNodeProcessor { <nl> + class UnaryGradProcessor : public AgnosticNodeProcessor { <nl> public : <nl> - explicit ReluGradProcessor ( const OptimizeContext & opt_cxt ) <nl> + explicit UnaryGradProcessor ( const OptimizeContext & opt_cxt ) <nl> : AgnosticNodeProcessor ( opt_cxt ) { } <nl> <nl> protected : <nl> class DataLayoutOptimizer : GraphProcessor { <nl> node_processor . reset ( new MergeProcessor ( opt_cxt ) ) ; <nl> } else if ( IsPad ( * node ) ) { <nl> node_processor . reset ( new PadProcessor ( opt_cxt ) ) ; <nl> - } else if ( IsReluGrad ( * node ) ) { <nl> - node_processor . reset ( new ReluGradProcessor ( opt_cxt ) ) ; <nl> } else if ( IsSlice ( * node ) ) { <nl> node_processor . reset ( new SliceProcessor ( opt_cxt ) ) ; <nl> } else if ( IsShape ( * node ) | | IsShapeN ( * node ) ) { <nl> class DataLayoutOptimizer : GraphProcessor { <nl> node_processor . reset ( new SumProcessor ( opt_cxt ) ) ; <nl> } else if ( IsSwitch ( * node ) ) { <nl> node_processor . reset ( new SwitchProcessor ( opt_cxt ) ) ; <nl> + } else if ( IsUnaryGrad ( * node ) ) { <nl> + node_processor . reset ( new UnaryGradProcessor ( opt_cxt ) ) ; <nl> } else { <nl> node_processor . reset ( new AgnosticNodeProcessor ( opt_cxt ) ) ; <nl> } <nl> mmm a / tensorflow / core / kernels / resize_area_op_test . cc <nl> ppp b / tensorflow / core / kernels / resize_area_op_test . cc <nl> class ResizeAreaOpTest : public OpsTestBase { <nl> bool is_ref = IsRefType ( input_types_ [ inputs_ . size ( ) ] ) ; <nl> Tensor * input = new Tensor ( device_ - > GetAllocator ( AllocatorAttributes ( ) ) , <nl> DataTypeToEnum < float > : : v ( ) , shape ) ; <nl> - input - > flat < float > ( ) . setZero ( ) ; <nl> + input - > flat < float > ( ) . setRandom ( ) ; <nl> tensors_ . push_back ( input ) ; <nl> if ( is_ref ) { <nl> CHECK_EQ ( RemoveRefType ( input_types_ [ inputs_ . size ( ) ] ) , <nl> mmm a / tensorflow / core / ops / compat / ops_history . v1 . pbtxt <nl> ppp b / tensorflow / core / ops / compat / ops_history . v1 . pbtxt <nl> op { <nl> type : DT_STRING <nl> } <nl> } <nl> + op { <nl> + name : " ShuffleAndRepeatDataset " <nl> + input_arg { <nl> + name : " input_dataset " <nl> + type : DT_VARIANT <nl> + } <nl> + input_arg { <nl> + name : " buffer_size " <nl> + type : DT_INT64 <nl> + } <nl> + input_arg { <nl> + name : " seed " <nl> + type : DT_INT64 <nl> + } <nl> + input_arg { <nl> + name : " seed2 " <nl> + type : DT_INT64 <nl> + } <nl> + input_arg { <nl> + name : " count " <nl> + type : DT_INT64 <nl> + } <nl> + output_arg { <nl> + name : " handle " <nl> + type : DT_VARIANT <nl> + } <nl> + attr { <nl> + name : " output_types " <nl> + type : " list ( type ) " <nl> + has_minimum : true <nl> + minimum : 1 <nl> + } <nl> + attr { <nl> + name : " output_shapes " <nl> + type : " list ( shape ) " <nl> + has_minimum : true <nl> + minimum : 1 <nl> + } <nl> + } <nl> op { <nl> name : " ShuffleDataset " <nl> input_arg { <nl> mmm a / tensorflow / core / ops / ops . pbtxt <nl> ppp b / tensorflow / core / ops / ops . pbtxt <nl> op { <nl> } <nl> summary : " Generate a glob pattern matching all sharded file names . " <nl> } <nl> + op { <nl> + name : " ShuffleAndRepeatDataset " <nl> + input_arg { <nl> + name : " input_dataset " <nl> + type : DT_VARIANT <nl> + } <nl> + input_arg { <nl> + name : " buffer_size " <nl> + description : " The number of output elements to buffer in an iterator over \ nthis dataset . Compare with the ` min_after_dequeue ` attr when creating a \ n ` RandomShuffleQueue ` . " <nl> + type : DT_INT64 <nl> + } <nl> + input_arg { <nl> + name : " seed " <nl> + description : " A scalar seed for the random number generator . If either ` seed ` or \ n ` seed2 ` is set to be non - zero , the random number generator is seeded \ nby the given seed . Otherwise , a random seed is used . " <nl> + type : DT_INT64 <nl> + } <nl> + input_arg { <nl> + name : " seed2 " <nl> + description : " A second scalar seed to avoid seed collision . " <nl> + type : DT_INT64 <nl> + } <nl> + input_arg { <nl> + name : " count " <nl> + description : " A scalar representing the number of times the underlying dataset \ nshould be repeated . The default is ` - 1 ` , which results in infinite repetition . " <nl> + type : DT_INT64 <nl> + } <nl> + output_arg { <nl> + name : " handle " <nl> + type : DT_VARIANT <nl> + } <nl> + attr { <nl> + name : " output_types " <nl> + type : " list ( type ) " <nl> + has_minimum : true <nl> + minimum : 1 <nl> + } <nl> + attr { <nl> + name : " output_shapes " <nl> + type : " list ( shape ) " <nl> + has_minimum : true <nl> + minimum : 1 <nl> + } <nl> + summary : " Creates a dataset that shuffles and repeats elements from ` input_dataset ` " <nl> + description : " pseudorandomly . " <nl> + } <nl> op { <nl> name : " ShuffleDataset " <nl> input_arg { <nl> op { <nl> } <nl> input_arg { <nl> name : " seed " <nl> - description : " A scalar seed for the random number generator . If either seed or \ nseed2 is set to be non - zero , the random number generator is seeded \ nby the given seed . Otherwise , a random seed is used . " <nl> + description : " A scalar seed for the random number generator . If either ` seed ` or \ n ` seed2 ` is set to be non - zero , the random number generator is seeded \ nby the given seed . Otherwise , a random seed is used . " <nl> type : DT_INT64 <nl> } <nl> input_arg { <nl> mmm a / tensorflow / core / protobuf / config . proto <nl> ppp b / tensorflow / core / protobuf / config . proto <nl> message GPUOptions { <nl> / / A comma - separated list of GPU ids that determines the ' visible ' <nl> / / to ' virtual ' mapping of GPU devices . For example , if TensorFlow <nl> / / can see 8 GPU devices in the process , and one wanted to map <nl> - / / visible GPU devices 5 and 3 as " / device : GPU : 0 " , and " / device : GPU : 1 " , then one <nl> - / / would specify this field as " 5 , 3 " . This field is similar in <nl> + / / visible GPU devices 5 and 3 as " / device : GPU : 0 " , and " / device : GPU : 1 " , <nl> + / / then one would specify this field as " 5 , 3 " . This field is similar in <nl> / / spirit to the CUDA_VISIBLE_DEVICES environment variable , except <nl> / / it applies to the visible GPU devices in the process . <nl> / / <nl> - / / NOTE : The GPU driver provides the process with the visible GPUs <nl> - / / in an order which is not guaranteed to have any correlation to <nl> - / / the * physical * GPU id in the machine . This field is used for <nl> - / / remapping " visible " to " virtual " , which means this operates only <nl> - / / after the process starts . Users are required to use vendor <nl> - / / specific mechanisms ( e . g . , CUDA_VISIBLE_DEVICES ) to control the <nl> - / / physical to visible device mapping prior to invoking TensorFlow . <nl> + / / NOTE : <nl> + / / 1 . The GPU driver provides the process with the visible GPUs <nl> + / / in an order which is not guaranteed to have any correlation to <nl> + / / the * physical * GPU id in the machine . This field is used for <nl> + / / remapping " visible " to " virtual " , which means this operates only <nl> + / / after the process starts . Users are required to use vendor <nl> + / / specific mechanisms ( e . g . , CUDA_VISIBLE_DEVICES ) to control the <nl> + / / physical to visible device mapping prior to invoking TensorFlow . <nl> + / / 2 . In the code , the ids in this list are also called " CUDA GPU id " s , <nl> + / / and the ' virtual ' ids of GPU devices ( i . e . the ids in the device <nl> + / / name " / device : GPU : < id > " ) are also called " TF GPU id " s . Please <nl> + / / refer to third_party / tensorflow / core / common_runtime / gpu / gpu_id . h <nl> + / / for more information . <nl> string visible_device_list = 5 ; <nl> <nl> / / In the event polling loop sleep this many microseconds between <nl> message GPUOptions { <nl> / / memory is unpageable , having too much pinned memory might negatively impact <nl> / / the overall host system performance . <nl> bool force_gpu_compatible = 8 ; <nl> + <nl> + / / Everything inside Experimental is subject to change and is not subject <nl> + / / to API stability guarantees in <nl> + / / https : / / www . tensorflow . org / programmers_guide / version_compat . <nl> + message Experimental { <nl> + / / Configuration for breaking down a visible GPU into multiple " virtual " <nl> + / / devices . <nl> + message VirtualDevices { <nl> + / / Per " virtual " device memory limit , in MB . The number of elements in <nl> + / / the list is the number of virtual devices to create on the <nl> + / / corresponding visible GPU ( see " virtual_devices " below ) . <nl> + / / If empty , it will create single virtual device taking all available <nl> + / / memory from the device . <nl> + / / <nl> + / / For the concept of " visible " and " virtual " GPU , see the comments for <nl> + / / " visible_device_list " above for more information . <nl> + repeated float memory_limit_mb = 1 ; <nl> + } <nl> + <nl> + / / The multi virtual device settings . If empty ( not set ) , it will create <nl> + / / single virtual device on each visible GPU , according to the settings <nl> + / / in " visible_device_list " above . Otherwise , the number of elements in the <nl> + / / list must be the same as the number of visible GPUs ( after <nl> + / / " visible_device_list " filtering if it is set ) , and the string represented <nl> + / / device names ( e . g . / device : GPU : < id > ) will refer to the virtual <nl> + / / devices and have the < id > field assigned sequentially starting from 0 , <nl> + / / according to the order they appear in this list and the " memory_limit " <nl> + / / list inside each element . For example , <nl> + / / visible_device_list = " 1 , 0 " <nl> + / / virtual_devices { memory_limit : 1GB memory_limit : 2GB } <nl> + / / virtual_devices { } <nl> + / / will create three virtual devices as : <nl> + / / / device : GPU : 0 - > visible GPU 1 with 1GB memory <nl> + / / / device : GPU : 1 - > visible GPU 1 with 2GB memory <nl> + / / / device : GPU : 2 - > visible GPU 0 with all available memory <nl> + / / <nl> + / / NOTE : <nl> + / / 1 . It ' s invalid to set both this and " per_process_gpu_memory_fraction " <nl> + / / at the same time . <nl> + / / 2 . Currently this setting is per - process , not per - session . Using <nl> + / / different settings in different sessions within same process will <nl> + / / result in undefined behavior . <nl> + repeated VirtualDevices virtual_devices = 1 ; <nl> + } <nl> + <nl> + Experimental experimental = 9 ; <nl> } ; <nl> <nl> / / Options passed to the graph optimizer <nl> mmm a / tensorflow / go / op / wrappers . go <nl> ppp b / tensorflow / go / op / wrappers . go <nl> func CacheDataset ( scope * Scope , input_dataset tf . Output , filename tf . Output , out <nl> return op . Output ( 0 ) <nl> } <nl> <nl> + / / Creates a dataset that shuffles and repeats elements from ` input_dataset ` <nl> + / / <nl> + / / pseudorandomly . <nl> + / / <nl> + / / Arguments : <nl> + / / <nl> + / / buffer_size : The number of output elements to buffer in an iterator over <nl> + / / this dataset . Compare with the ` min_after_dequeue ` attr when creating a <nl> + / / ` RandomShuffleQueue ` . <nl> + / / seed : A scalar seed for the random number generator . If either ` seed ` or <nl> + / / ` seed2 ` is set to be non - zero , the random number generator is seeded <nl> + / / by the given seed . Otherwise , a random seed is used . <nl> + / / seed2 : A second scalar seed to avoid seed collision . <nl> + / / count : A scalar representing the number of times the underlying dataset <nl> + / / should be repeated . The default is ` - 1 ` , which results in infinite repetition . <nl> + / / <nl> + / / <nl> + func ShuffleAndRepeatDataset ( scope * Scope , input_dataset tf . Output , buffer_size tf . Output , seed tf . Output , seed2 tf . Output , count tf . Output , output_types [ ] tf . DataType , output_shapes [ ] tf . Shape ) ( handle tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + attrs : = map [ string ] interface { } { " output_types " : output_types , " output_shapes " : output_shapes } <nl> + opspec : = tf . OpSpec { <nl> + Type : " ShuffleAndRepeatDataset " , <nl> + Input : [ ] tf . Input { <nl> + input_dataset , buffer_size , seed , seed2 , count , <nl> + } , <nl> + Attrs : attrs , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) <nl> + } <nl> + <nl> / / Creates a Dataset that returns pseudorandom numbers . <nl> / / <nl> / / Arguments : <nl> func ShuffleDatasetReshuffleEachIteration ( value bool ) ShuffleDatasetAttr { <nl> / / buffer_size : The number of output elements to buffer in an iterator over <nl> / / this dataset . Compare with the ` min_after_dequeue ` attr when creating a <nl> / / ` RandomShuffleQueue ` . <nl> - / / seed : A scalar seed for the random number generator . If either seed or <nl> - / / seed2 is set to be non - zero , the random number generator is seeded <nl> + / / seed : A scalar seed for the random number generator . If either ` seed ` or <nl> + / / ` seed2 ` is set to be non - zero , the random number generator is seeded <nl> / / by the given seed . Otherwise , a random seed is used . <nl> / / seed2 : A second scalar seed to avoid seed collision . <nl> / / <nl> mmm a / tensorflow / python / BUILD <nl> ppp b / tensorflow / python / BUILD <nl> cuda_py_test ( <nl> ] , <nl> ) <nl> <nl> + cuda_py_test ( <nl> + name = " virtual_gpu_test " , <nl> + size = " small " , <nl> + srcs = [ " client / virtual_gpu_test . py " ] , <nl> + additional_deps = [ <nl> + " : client " , <nl> + " : client_testlib " , <nl> + " : framework_for_generated_wrappers " , <nl> + " : math_ops " , <nl> + " / / tensorflow / core : protos_all_py " , <nl> + ] , <nl> + ) <nl> + <nl> py_test ( <nl> name = " graph_util_test " , <nl> size = " small " , <nl> new file mode 100644 <nl> index 0000000000000 . . addf63474c9ba <nl> mmm / dev / null <nl> ppp b / tensorflow / python / client / virtual_gpu_test . py <nl> <nl> + # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Tests for multiple virtual GPU support . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + import random <nl> + <nl> + import numpy as np <nl> + <nl> + from google . protobuf import text_format <nl> + from tensorflow . core . protobuf import config_pb2 <nl> + from tensorflow . python . framework import dtypes <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . framework import test_util <nl> + from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import math_ops <nl> + from tensorflow . python . ops import random_ops <nl> + from tensorflow . python . ops import variables <nl> + from tensorflow . python . platform import test <nl> + from tensorflow . python . platform import tf_logging as logging <nl> + <nl> + <nl> + class VirtualGpuTestUtil ( object ) : <nl> + <nl> + def __init__ ( self , <nl> + dim = 1000 , <nl> + num_ops = 100 , <nl> + virtual_devices_per_gpu = None , <nl> + device_probabilities = None ) : <nl> + self . _dim = dim <nl> + self . _num_ops = num_ops <nl> + if virtual_devices_per_gpu is None : <nl> + self . _virtual_devices_per_gpu = [ 3 ] <nl> + else : <nl> + self . _virtual_devices_per_gpu = virtual_devices_per_gpu <nl> + self . _visible_device_list = [ <nl> + i for i in range ( len ( self . _virtual_devices_per_gpu ) ) <nl> + ] <nl> + gpu_devices = [ <nl> + ( ' / gpu : ' + str ( i ) ) for i in range ( sum ( self . _virtual_devices_per_gpu ) ) <nl> + ] <nl> + self . devices = [ ' / cpu : 0 ' ] + gpu_devices <nl> + self . _num_devices = len ( self . devices ) <nl> + # Each virtual device gets 2GB memory . <nl> + self . _mem_limits_mb = [ <nl> + ( [ 1 < < 11 ] * i ) for i in self . _virtual_devices_per_gpu <nl> + ] <nl> + self . config = self . _GetSessionConfig ( ) <nl> + <nl> + if device_probabilities is not None : <nl> + self . _device_probabilities = list ( device_probabilities ) # Deep copy <nl> + for i in range ( 1 , self . _num_devices ) : <nl> + self . _device_probabilities [ i ] + = self . _device_probabilities [ i - 1 ] <nl> + else : <nl> + # Each device gets same probability to be assigned an operation . <nl> + step = 1 . 0 / self . _num_devices <nl> + self . _device_probabilities = [ <nl> + ( x + 1 ) * step for x in range ( self . _num_devices ) <nl> + ] <nl> + # To prevent rounding error causing problems . <nl> + self . _device_probabilities [ self . _num_devices - 1 ] = 1 . 1 <nl> + <nl> + logging . info ( ' dim : % d ' , self . _dim ) <nl> + logging . info ( ' num_ops : % d ' , self . _num_ops ) <nl> + logging . info ( ' visible_device_list : % s ' , str ( self . _visible_device_list ) ) <nl> + logging . info ( ' virtual_devices_per_gpu : % s ' , <nl> + str ( self . _virtual_devices_per_gpu ) ) <nl> + logging . info ( ' mem_limits : % s ' , str ( self . _mem_limits_mb ) ) <nl> + logging . info ( ' devices : % s ' , str ( self . devices ) ) <nl> + logging . info ( ' config : % s ' , text_format . MessageToString ( self . config ) ) <nl> + logging . info ( ' device_probabilities : % s ' , str ( self . _device_probabilities ) ) <nl> + <nl> + # Creates virtual GPU devices <nl> + def _GetSessionConfig ( self ) : <nl> + virtual_device_gpu_options = config_pb2 . GPUOptions ( <nl> + visible_device_list = ' , ' . join ( str ( d ) for d in self . _visible_device_list ) , <nl> + experimental = config_pb2 . GPUOptions . Experimental ( virtual_devices = [ <nl> + config_pb2 . GPUOptions . Experimental . VirtualDevices ( <nl> + memory_limit_mb = i ) for i in self . _mem_limits_mb <nl> + ] ) ) <nl> + return config_pb2 . ConfigProto ( gpu_options = virtual_device_gpu_options ) <nl> + <nl> + # Generates a list of 3 - tuples , each tuple contains the source and destination <nl> + # device index for a binary operation like ' add ' , like : <nl> + # ( src_devcie_1 , src_device_2 , dst_device ) <nl> + def _GenerateOperationPlacement ( self ) : <nl> + result = [ ] <nl> + for unused_i in range ( self . _num_ops ) : <nl> + op_device = ( ) <nl> + for unused_j in range ( 3 ) : <nl> + random_num = random . random ( ) <nl> + for device_index in range ( self . _num_devices ) : <nl> + if self . _device_probabilities [ device_index ] > random_num : <nl> + op_device + = ( device_index , ) <nl> + break <nl> + result . append ( op_device ) <nl> + return result <nl> + <nl> + # Logs part of the matrix for debugging purposes . <nl> + def _LogMatrix ( self , mat , dim ) : <nl> + logging . info ( ' mmm - printing the first 10 * 10 submatrix mmm - ' ) <nl> + for i in range ( min ( 10 , dim ) ) : <nl> + row = ' ' <nl> + for j in range ( min ( 10 , dim ) ) : <nl> + row + = ' ' + str ( mat [ i ] [ j ] ) <nl> + logging . info ( row ) <nl> + <nl> + # Runs a list of ' add ' operations where each operation satisfies the device <nl> + # placement constraints in ` op_placement ` , and returns the result . <nl> + def _TestRandomGraphWithDevices ( self , <nl> + sess , <nl> + seed , <nl> + op_placement , <nl> + devices , <nl> + debug_mode = False ) : <nl> + data = [ ] <nl> + shape = ( self . _dim , self . _dim ) <nl> + feed_dict = { } <nl> + # Initialize the matrices <nl> + for i in range ( len ( devices ) ) : <nl> + with ops . device ( devices [ i ] ) : <nl> + var = array_ops . placeholder ( dtypes . float32 , shape = shape ) <nl> + np . random . seed ( seed + i ) <nl> + feed_dict [ var ] = np . random . uniform ( <nl> + low = 0 , high = 0 . 1 , size = shape ) . astype ( np . float32 ) <nl> + data . append ( var ) <nl> + # Run the ' add ' operations on those matrices <nl> + for op in op_placement : <nl> + with ops . device ( devices [ op [ 2 ] ] ) : <nl> + data [ op [ 2 ] ] = math_ops . add ( data [ op [ 0 ] ] , data [ op [ 1 ] ] ) <nl> + with ops . device ( ' / cpu : 0 ' ) : <nl> + s = data [ 0 ] <nl> + for i in range ( 1 , len ( data ) ) : <nl> + s = math_ops . add ( s , data [ i ] ) <nl> + if debug_mode : <nl> + logging . info ( ops . get_default_graph ( ) . as_graph_def ( ) ) <nl> + result = sess . run ( s , feed_dict = feed_dict ) <nl> + self . _LogMatrix ( result , self . _dim ) <nl> + return result <nl> + <nl> + # Generates a random graph with ` self . _num_ops ` ' add ' operations with each <nl> + # operation placed on different virtual device , test that the result is <nl> + # identical to the result obtained by running the same graph on cpu only . <nl> + def TestRandomGraph ( self , sess , op_placement = None , random_seed = None ) : <nl> + debug_mode = False <nl> + if op_placement is None : <nl> + op_placement = self . _GenerateOperationPlacement ( ) <nl> + else : <nl> + debug_mode = True <nl> + if random_seed is None : <nl> + random_seed = random . randint ( 0 , 1 < < 31 ) <nl> + else : <nl> + debug_mode = True <nl> + logging . info ( ' Virtual gpu functional test for random graph . . . ' ) <nl> + logging . info ( ' operation placement : % s ' , str ( op_placement ) ) <nl> + logging . info ( ' random seed : % d ' , random_seed ) <nl> + <nl> + # Run with multiple virtual gpus . <nl> + result_vgd = self . _TestRandomGraphWithDevices ( <nl> + sess , random_seed , op_placement , self . devices , debug_mode = debug_mode ) <nl> + # Run with single cpu . <nl> + result_cpu = self . _TestRandomGraphWithDevices ( <nl> + sess , <nl> + random_seed , <nl> + op_placement , [ ' / cpu : 0 ' ] * self . _num_devices , <nl> + debug_mode = debug_mode ) <nl> + # Test the result <nl> + for i in range ( self . _dim ) : <nl> + for j in range ( self . _dim ) : <nl> + if result_vgd [ i ] [ j ] ! = result_cpu [ i ] [ j ] : <nl> + logging . error ( <nl> + ' Result mismatch at row % d column % d : expected % f , actual % f ' , i , <nl> + j , result_cpu [ i ] [ j ] , result_vgd [ i ] [ j ] ) <nl> + logging . error ( ' Devices : % s ' , self . devices ) <nl> + logging . error ( ' Memory limits ( in MB ) : % s ' , self . _mem_limits_mb ) <nl> + return False <nl> + return True <nl> + <nl> + <nl> + @ test_util . with_c_api <nl> + class VirtualGpuTest ( test_util . TensorFlowTestCase ) : <nl> + <nl> + def __init__ ( self , method_name ) : <nl> + super ( VirtualGpuTest , self ) . __init__ ( method_name ) <nl> + self . _util = VirtualGpuTestUtil ( ) <nl> + <nl> + def testStatsContainAllDeviceNames ( self ) : <nl> + with self . test_session ( config = self . _util . config ) as sess : <nl> + # TODO ( laigd ) : b / 70811538 . The is_gpu_available ( ) call will invoke <nl> + # DeviceFactory : : AddDevices ( ) with a default SessionOption , which prevents <nl> + # adding virtual devices in the future , thus must be called within a <nl> + # context of a session within which virtual devices are created . Same in <nl> + # the following test case . <nl> + if not test . is_gpu_available ( cuda_only = True ) : <nl> + self . skipTest ( ' No GPU available ' ) <nl> + run_options = config_pb2 . RunOptions ( <nl> + trace_level = config_pb2 . RunOptions . FULL_TRACE ) <nl> + run_metadata = config_pb2 . RunMetadata ( ) <nl> + <nl> + mat_shape = [ 10 , 10 ] <nl> + data = [ ] <nl> + for d in self . _util . devices : <nl> + with ops . device ( d ) : <nl> + var = variables . Variable ( random_ops . random_uniform ( mat_shape ) ) <nl> + sess . run ( var . initializer ) <nl> + data . append ( var ) <nl> + s = data [ 0 ] <nl> + for i in range ( 1 , len ( data ) ) : <nl> + s = math_ops . add ( s , data [ i ] ) <nl> + sess . run ( s , options = run_options , run_metadata = run_metadata ) <nl> + <nl> + self . assertTrue ( run_metadata . HasField ( ' step_stats ' ) ) <nl> + step_stats = run_metadata . step_stats <nl> + devices = [ d . device for d in step_stats . dev_stats ] <nl> + self . assertTrue ( ' / job : localhost / replica : 0 / task : 0 / device : CPU : 0 ' in devices ) <nl> + self . assertTrue ( ' / job : localhost / replica : 0 / task : 0 / device : GPU : 0 ' in devices ) <nl> + self . assertTrue ( ' / job : localhost / replica : 0 / task : 0 / device : GPU : 1 ' in devices ) <nl> + self . assertTrue ( ' / job : localhost / replica : 0 / task : 0 / device : GPU : 2 ' in devices ) <nl> + <nl> + def testLargeRandomGraph ( self ) : <nl> + with self . test_session ( config = self . _util . config ) as sess : <nl> + if not test . is_gpu_available ( cuda_only = True ) : <nl> + self . skipTest ( ' No GPU available ' ) <nl> + for _ in range ( 10 ) : <nl> + if not self . _util . TestRandomGraph ( sess ) : <nl> + return <nl> + <nl> + <nl> + if __name__ = = ' __main__ ' : <nl> + test . main ( ) <nl> mmm a / tensorflow / tools / api / golden / tensorflow . - g - p - u - options . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . - g - p - u - options . pbtxt <nl> tf_class { <nl> name : " DESCRIPTOR " <nl> mtype : " < type \ ' google . protobuf . pyext . _message . MessageDescriptor \ ' > " <nl> } <nl> + member { <nl> + name : " EXPERIMENTAL_FIELD_NUMBER " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + member { <nl> + name : " Experimental " <nl> + mtype : " < class \ ' google . protobuf . pyext . cpp_message . GeneratedProtocolMessageType \ ' > " <nl> + } <nl> member { <nl> name : " Extensions " <nl> mtype : " < type \ ' getset_descriptor \ ' > " <nl> mmm a / tensorflow / tools / api / tests / api_compatibility_test . py <nl> ppp b / tensorflow / tools / api / tests / api_compatibility_test . py <nl> def testAPIBackwardsCompatibility ( self ) : <nl> <nl> public_api_visitor = public_api . PublicAPIVisitor ( visitor ) <nl> public_api_visitor . do_not_descend_map [ ' tf ' ] . append ( ' contrib ' ) <nl> + public_api_visitor . do_not_descend_map [ ' tf . GPUOptions ' ] = [ ' Experimental ' ] <nl> traverse . traverse ( tf , public_api_visitor ) <nl> <nl> proto_dict = visitor . GetProtos ( ) <nl> | Merge commit for internal changes | tensorflow/tensorflow | 46ca9d6d06fc77026a4be7eef44fe4aba410aa37 | 2017-12-19T19:21:43Z |
mmm a / tensorflow / core / data / service / BUILD <nl> ppp b / tensorflow / core / data / service / BUILD <nl> load ( <nl> " / / tensorflow / core / platform : build_config . bzl " , <nl> " tf_additional_all_protos " , <nl> " tf_proto_library " , <nl> + " tf_protos_profiler_service " , <nl> ) <nl> load ( " / / tensorflow : tensorflow . bzl " , " tf_grpc_cc_dependency " ) <nl> load ( <nl> cc_library ( <nl> " / / tensorflow / core : lib " , <nl> " / / tensorflow / core : protos_all_cc " , <nl> " / / tensorflow / core : tensorflow " , <nl> + " / / tensorflow / core / profiler / rpc : profiler_service_impl " , <nl> tf_grpc_cc_dependency ( ) , <nl> ] , <nl> alwayslink = 1 , <nl> tf_cc_test ( <nl> " : test_util " , <nl> " : worker_cc_grpc_proto " , <nl> " : worker_proto_cc " , <nl> + " @ com_google_absl / / absl / strings " , <nl> " / / tensorflow / core : lib " , <nl> " / / tensorflow / core : test " , <nl> " / / tensorflow / core : test_main " , <nl> " / / tensorflow / core / data : compression_utils " , <nl> " / / tensorflow / core / kernels / data : dataset_test_base " , <nl> - " @ com_google_absl / / absl / strings " , <nl> tf_grpc_cc_dependency ( ) , <nl> - ] , <nl> + ] + tf_protos_profiler_service ( ) , <nl> ) <nl> <nl> cc_grpc_library ( <nl> mmm a / tensorflow / core / data / service / credentials_factory . cc <nl> ppp b / tensorflow / core / data / service / credentials_factory . cc <nl> Status CredentialsFactory : : Get ( absl : : string_view protocol , <nl> } <nl> <nl> Status CredentialsFactory : : CreateServerCredentials ( <nl> - absl : : string_view protocol , std : : shared_ptr < grpc : : ServerCredentials > * out ) { <nl> + absl : : string_view protocol , <nl> + std : : shared_ptr < : : grpc : : ServerCredentials > * out ) { <nl> CredentialsFactory * factory ; <nl> TF_RETURN_IF_ERROR ( CredentialsFactory : : Get ( protocol , & factory ) ) ; <nl> TF_RETURN_IF_ERROR ( factory - > CreateServerCredentials ( out ) ) ; <nl> Status CredentialsFactory : : CreateServerCredentials ( <nl> <nl> Status CredentialsFactory : : CreateClientCredentials ( <nl> absl : : string_view protocol , <nl> - std : : shared_ptr < grpc : : ChannelCredentials > * out ) { <nl> + std : : shared_ptr < : : grpc : : ChannelCredentials > * out ) { <nl> CredentialsFactory * factory ; <nl> TF_RETURN_IF_ERROR ( CredentialsFactory : : Get ( protocol , & factory ) ) ; <nl> TF_RETURN_IF_ERROR ( factory - > CreateClientCredentials ( out ) ) ; <nl> class InsecureCredentialsFactory : public CredentialsFactory { <nl> std : : string Protocol ( ) override { return " grpc " ; } <nl> <nl> Status CreateServerCredentials ( <nl> - std : : shared_ptr < grpc : : ServerCredentials > * out ) override { <nl> - * out = grpc : : InsecureServerCredentials ( ) ; <nl> + std : : shared_ptr < : : grpc : : ServerCredentials > * out ) override { <nl> + * out = : : grpc : : InsecureServerCredentials ( ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status CreateClientCredentials ( <nl> - std : : shared_ptr < grpc : : ChannelCredentials > * out ) override { <nl> - * out = grpc : : InsecureChannelCredentials ( ) ; <nl> + std : : shared_ptr < : : grpc : : ChannelCredentials > * out ) override { <nl> + * out = : : grpc : : InsecureChannelCredentials ( ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> } ; <nl> mmm a / tensorflow / core / data / service / credentials_factory . h <nl> ppp b / tensorflow / core / data / service / credentials_factory . h <nl> class CredentialsFactory { <nl> <nl> / / Stores server credentials to ` * out ` . <nl> virtual Status CreateServerCredentials ( <nl> - std : : shared_ptr < grpc : : ServerCredentials > * out ) = 0 ; <nl> + std : : shared_ptr < : : grpc : : ServerCredentials > * out ) = 0 ; <nl> <nl> / / Stores client credentials to ` * out ` . <nl> virtual Status CreateClientCredentials ( <nl> - std : : shared_ptr < grpc : : ChannelCredentials > * out ) = 0 ; <nl> + std : : shared_ptr < : : grpc : : ChannelCredentials > * out ) = 0 ; <nl> <nl> / / Registers a credentials factory . <nl> static void Register ( CredentialsFactory * factory ) ; <nl> class CredentialsFactory { <nl> / / ` protocol ` , and stores them to ` * out ` . <nl> static Status CreateServerCredentials ( <nl> absl : : string_view protocol , <nl> - std : : shared_ptr < grpc : : ServerCredentials > * out ) ; <nl> + std : : shared_ptr < : : grpc : : ServerCredentials > * out ) ; <nl> <nl> / / Creates client credentials using the credentials factory registered as <nl> / / ` protocol ` , and stores them to ` * out ` . <nl> static Status CreateClientCredentials ( <nl> absl : : string_view protocol , <nl> - std : : shared_ptr < grpc : : ChannelCredentials > * out ) ; <nl> + std : : shared_ptr < : : grpc : : ChannelCredentials > * out ) ; <nl> <nl> private : <nl> / / Gets the credentials factory registered via ` Register ` for the specified <nl> mmm a / tensorflow / core / data / service / grpc_dispatcher_impl . h <nl> ppp b / tensorflow / core / data / service / grpc_dispatcher_impl . h <nl> namespace data { <nl> / / <nl> class GrpcDispatcherImpl : public DispatcherService : : Service { <nl> public : <nl> - explicit GrpcDispatcherImpl ( grpc : : ServerBuilder * server_builder , <nl> + explicit GrpcDispatcherImpl ( : : grpc : : ServerBuilder * server_builder , <nl> const experimental : : DispatcherConfig & config ) ; <nl> ~ GrpcDispatcherImpl ( ) override { } <nl> <nl> Status Start ( ) ; <nl> <nl> - # define HANDLER ( method ) \ <nl> - grpc : : Status method ( grpc : : ServerContext * context , \ <nl> - const method # # Request * request , \ <nl> - method # # Response * response ) override ; <nl> + # define HANDLER ( method ) \ <nl> + : : grpc : : Status method ( : : grpc : : ServerContext * context , \ <nl> + const method # # Request * request , \ <nl> + method # # Response * response ) override ; <nl> HANDLER ( RegisterWorker ) ; <nl> HANDLER ( WorkerUpdate ) ; <nl> HANDLER ( GetOrRegisterDataset ) ; <nl> mmm a / tensorflow / core / data / service / grpc_util . cc <nl> ppp b / tensorflow / core / data / service / grpc_util . cc <nl> namespace tensorflow { <nl> namespace data { <nl> namespace grpc_util { <nl> <nl> - Status WrapError ( const std : : string & message , const grpc : : Status & status ) { <nl> + Status WrapError ( const std : : string & message , const : : grpc : : Status & status ) { <nl> if ( status . ok ( ) ) { <nl> return errors : : Internal ( " Expected a non - ok grpc status . Wrapping message : " , <nl> message ) ; <nl> mmm a / tensorflow / core / data / service / grpc_util . h <nl> ppp b / tensorflow / core / data / service / grpc_util . h <nl> namespace data { <nl> namespace grpc_util { <nl> <nl> / / Wraps a grpc : : Status in a tensorflow : : Status with the given message . <nl> - Status WrapError ( const std : : string & message , const grpc : : Status & status ) ; <nl> + Status WrapError ( const std : : string & message , const : : grpc : : Status & status ) ; <nl> <nl> / / Retries the given function if the function produces UNAVAILABLE , ABORTED , or <nl> / / CANCELLED status codes . We retry these codes because they can all indicate <nl> mmm a / tensorflow / core / data / service / grpc_worker_impl . cc <nl> ppp b / tensorflow / core / data / service / grpc_worker_impl . cc <nl> Status GrpcWorkerImpl : : Start ( const std : : string & worker_address ) { <nl> return impl_ . Start ( worker_address ) ; <nl> } <nl> <nl> - # define HANDLER ( method ) \ <nl> - grpc : : Status GrpcWorkerImpl : : method ( ServerContext * context , \ <nl> - const method # # Request * request , \ <nl> - method # # Response * response ) { \ <nl> - return ToGrpcStatus ( impl_ . method ( request , response ) ) ; \ <nl> + # define HANDLER ( method ) \ <nl> + : : grpc : : Status GrpcWorkerImpl : : method ( ServerContext * context , \ <nl> + const method # # Request * request , \ <nl> + method # # Response * response ) { \ <nl> + return ToGrpcStatus ( impl_ . method ( request , response ) ) ; \ <nl> } <nl> HANDLER ( ProcessTask ) ; <nl> HANDLER ( GetElement ) ; <nl> mmm a / tensorflow / core / data / service / grpc_worker_impl . h <nl> ppp b / tensorflow / core / data / service / grpc_worker_impl . h <nl> namespace data { <nl> / / <nl> class GrpcWorkerImpl : public WorkerService : : Service { <nl> public : <nl> - explicit GrpcWorkerImpl ( grpc : : ServerBuilder * server_builder , <nl> + explicit GrpcWorkerImpl ( : : grpc : : ServerBuilder * server_builder , <nl> const experimental : : WorkerConfig & config ) ; <nl> ~ GrpcWorkerImpl ( ) override { } <nl> <nl> Status Start ( const std : : string & worker_address ) ; <nl> <nl> - # define HANDLER ( method ) \ <nl> - grpc : : Status method ( grpc : : ServerContext * context , \ <nl> - const method # # Request * request , \ <nl> - method # # Response * response ) override ; <nl> + # define HANDLER ( method ) \ <nl> + : : grpc : : Status method ( : : grpc : : ServerContext * context , \ <nl> + const method # # Request * request , \ <nl> + method # # Response * response ) override ; <nl> HANDLER ( ProcessTask ) ; <nl> HANDLER ( GetElement ) ; <nl> # undef HANDLER <nl> mmm a / tensorflow / core / data / service / server_lib . cc <nl> ppp b / tensorflow / core / data / service / server_lib . cc <nl> Status GrpcDataServerBase : : Start ( ) { <nl> credentials , & bound_port_ ) ; <nl> builder . SetMaxReceiveMessageSize ( - 1 ) ; <nl> <nl> - AddServiceToBuilder ( & builder ) ; <nl> + AddDataServiceToBuilder ( & builder ) ; <nl> + AddProfilerServiceToBuilder ( & builder ) ; <nl> server_ = builder . BuildAndStart ( ) ; <nl> if ( ! server_ ) { <nl> return errors : : Internal ( " Could not start gRPC server " ) ; <nl> void GrpcDataServerBase : : Join ( ) { server_ - > Wait ( ) ; } <nl> <nl> int GrpcDataServerBase : : BoundPort ( ) { return bound_port ( ) ; } <nl> <nl> + void GrpcDataServerBase : : AddProfilerServiceToBuilder ( <nl> + : : grpc : : ServerBuilder * builder ) { <nl> + profiler_service_ = CreateProfilerService ( ) ; <nl> + builder - > RegisterService ( profiler_service_ . get ( ) ) ; <nl> + } <nl> + <nl> DispatchGrpcDataServer : : DispatchGrpcDataServer ( <nl> const experimental : : DispatcherConfig & config ) <nl> : GrpcDataServerBase ( config . port ( ) , config . protocol ( ) , " DispatchServer " ) , <nl> DispatchGrpcDataServer : : DispatchGrpcDataServer ( <nl> <nl> DispatchGrpcDataServer : : ~ DispatchGrpcDataServer ( ) { delete service_ ; } <nl> <nl> - void DispatchGrpcDataServer : : AddServiceToBuilder ( grpc : : ServerBuilder * builder ) { <nl> + void DispatchGrpcDataServer : : AddDataServiceToBuilder ( <nl> + : : grpc : : ServerBuilder * builder ) { <nl> service_ = absl : : make_unique < GrpcDispatcherImpl > ( builder , config_ ) . release ( ) ; <nl> } <nl> <nl> Status DispatchGrpcDataServer : : StartServiceInternal ( ) { <nl> Status DispatchGrpcDataServer : : NumWorkers ( int * num_workers ) { <nl> GetWorkersRequest req ; <nl> GetWorkersResponse resp ; <nl> - grpc : : ServerContext ctx ; <nl> - grpc : : Status s = service_ - > GetWorkers ( & ctx , & req , & resp ) ; <nl> + : : grpc : : ServerContext ctx ; <nl> + : : grpc : : Status s = service_ - > GetWorkers ( & ctx , & req , & resp ) ; <nl> if ( ! s . ok ( ) ) { <nl> return grpc_util : : WrapError ( " Failed to get workers " , s ) ; <nl> } <nl> WorkerGrpcDataServer : : WorkerGrpcDataServer ( <nl> <nl> WorkerGrpcDataServer : : ~ WorkerGrpcDataServer ( ) { delete service_ ; } <nl> <nl> - void WorkerGrpcDataServer : : AddServiceToBuilder ( grpc : : ServerBuilder * builder ) { <nl> + void WorkerGrpcDataServer : : AddDataServiceToBuilder ( <nl> + : : grpc : : ServerBuilder * builder ) { <nl> service_ = absl : : make_unique < GrpcWorkerImpl > ( builder , config_ ) . release ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / data / service / server_lib . h <nl> ppp b / tensorflow / core / data / service / server_lib . h <nl> limitations under the License . <nl> # include " grpcpp / server . h " <nl> # include " grpcpp / server_builder . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> + # include " tensorflow / core / profiler / rpc / profiler_service_impl . h " <nl> # include " tensorflow / core / protobuf / data / experimental / service_config . pb . h " <nl> <nl> namespace tensorflow { <nl> class GrpcDataServerBase { <nl> int BoundPort ( ) ; <nl> <nl> protected : <nl> - virtual void AddServiceToBuilder ( : : grpc : : ServerBuilder * builder ) = 0 ; <nl> + virtual void AddDataServiceToBuilder ( : : grpc : : ServerBuilder * builder ) = 0 ; <nl> + void AddProfilerServiceToBuilder ( : : grpc : : ServerBuilder * builder ) ; <nl> / / Starts the service . This will be called after building the service , so <nl> / / bound_port ( ) will return the actual bound port . <nl> virtual Status StartServiceInternal ( ) = 0 ; <nl> class GrpcDataServerBase { <nl> bool started_ = false ; <nl> bool stopped_ = false ; <nl> <nl> - std : : unique_ptr < grpc : : Server > server_ ; <nl> + std : : unique_ptr < : : grpc : : Server > server_ ; <nl> + / / TensorFlow profiler service implementation . <nl> + std : : unique_ptr < grpc : : ProfilerService : : Service > profiler_service_ = nullptr ; <nl> } ; <nl> <nl> class DispatchGrpcDataServer : public GrpcDataServerBase { <nl> class DispatchGrpcDataServer : public GrpcDataServerBase { <nl> Status NumWorkers ( int * num_workers ) ; <nl> <nl> protected : <nl> - void AddServiceToBuilder ( grpc : : ServerBuilder * builder ) override ; <nl> + void AddDataServiceToBuilder ( : : grpc : : ServerBuilder * builder ) override ; <nl> Status StartServiceInternal ( ) override ; <nl> <nl> private : <nl> class WorkerGrpcDataServer : public GrpcDataServerBase { <nl> ~ WorkerGrpcDataServer ( ) override ; <nl> <nl> protected : <nl> - void AddServiceToBuilder ( grpc : : ServerBuilder * builder ) override ; <nl> + void AddDataServiceToBuilder ( : : grpc : : ServerBuilder * builder ) override ; <nl> Status StartServiceInternal ( ) override ; <nl> <nl> private : <nl> mmm a / tensorflow / core / profiler / rpc / BUILD <nl> ppp b / tensorflow / core / profiler / rpc / BUILD <nl> cc_library ( <nl> features = [ " - layering_check " ] , <nl> visibility = tf_external_workspace_visible ( <nl> [ <nl> + " / / tensorflow / core / data / service : __pkg__ " , <nl> " / / tensorflow / core / distributed_runtime / rpc : __pkg__ " , <nl> " / / tensorflow_serving / model_servers : __pkg__ " , <nl> ] , <nl> | Enable profiler for tf . data service . | tensorflow/tensorflow | 8e7f1aeb483963aad56fee6977599d117d8ba540 | 2020-08-12T23:02:57Z |
mmm a / src / mongo / db / s / balancer / balancer . cpp <nl> ppp b / src / mongo / db / s / balancer / balancer . cpp <nl> void Balancer : : _splitOrMarkJumbo ( OperationContext * txn , <nl> auto scopedCM = uassertStatusOK ( ScopedChunkManager : : refreshAndGet ( txn , nss ) ) ; <nl> const auto cm = scopedCM . cm ( ) . get ( ) ; <nl> <nl> - auto chunk = cm - > findIntersectingChunkWithSimpleCollation ( txn , minKey ) ; <nl> + auto chunk = cm - > findIntersectingChunkWithSimpleCollation ( minKey ) ; <nl> <nl> try { <nl> const auto splitPoints = uassertStatusOK ( shardutil : : selectChunkSplitPoints ( <nl> mmm a / src / mongo / db / s / balancer / balancer_chunk_selection_policy_impl . cpp <nl> ppp b / src / mongo / db / s / balancer / balancer_chunk_selection_policy_impl . cpp <nl> StatusWith < SplitInfoVector > BalancerChunkSelectionPolicyImpl : : _getSplitCandidate <nl> const auto & tagRange = tagRangeEntry . second ; <nl> <nl> shared_ptr < Chunk > chunkAtZoneMin = <nl> - cm - > findIntersectingChunkWithSimpleCollation ( txn , tagRange . min ) ; <nl> + cm - > findIntersectingChunkWithSimpleCollation ( tagRange . min ) ; <nl> invariant ( chunkAtZoneMin - > getMax ( ) . woCompare ( tagRange . min ) > 0 ) ; <nl> <nl> if ( chunkAtZoneMin - > getMin ( ) . woCompare ( tagRange . min ) ) { <nl> StatusWith < SplitInfoVector > BalancerChunkSelectionPolicyImpl : : _getSplitCandidate <nl> continue ; <nl> <nl> shared_ptr < Chunk > chunkAtZoneMax = <nl> - cm - > findIntersectingChunkWithSimpleCollation ( txn , tagRange . max ) ; <nl> + cm - > findIntersectingChunkWithSimpleCollation ( tagRange . max ) ; <nl> <nl> / / We need to check that both the chunk ' s minKey does not match the zone ' s max and also that <nl> / / the max is not equal , which would only happen in the case of the zone ending in MaxKey . <nl> StatusWith < MigrateInfoVector > BalancerChunkSelectionPolicyImpl : : _getMigrateCandi <nl> const auto & tagRange = tagRangeEntry . second ; <nl> <nl> shared_ptr < Chunk > chunkAtZoneMin = <nl> - cm - > findIntersectingChunkWithSimpleCollation ( txn , tagRange . min ) ; <nl> + cm - > findIntersectingChunkWithSimpleCollation ( tagRange . min ) ; <nl> <nl> if ( chunkAtZoneMin - > getMin ( ) . woCompare ( tagRange . min ) ) { <nl> return { ErrorCodes : : IllegalOperation , <nl> StatusWith < MigrateInfoVector > BalancerChunkSelectionPolicyImpl : : _getMigrateCandi <nl> continue ; <nl> <nl> shared_ptr < Chunk > chunkAtZoneMax = <nl> - cm - > findIntersectingChunkWithSimpleCollation ( txn , tagRange . max ) ; <nl> + cm - > findIntersectingChunkWithSimpleCollation ( tagRange . max ) ; <nl> <nl> / / We need to check that both the chunk ' s minKey does not match the zone ' s max and also that <nl> / / the max is not equal , which would only happen in the case of the zone ending in MaxKey . <nl> mmm a / src / mongo / db / s / balancer / migration_manager . cpp <nl> ppp b / src / mongo / db / s / balancer / migration_manager . cpp <nl> Status MigrationManager : : executeManualMigration ( <nl> <nl> const auto & scopedCM = scopedCMStatus . getValue ( ) ; <nl> <nl> - auto chunk = scopedCM . cm ( ) - > findIntersectingChunkWithSimpleCollation ( txn , migrateInfo . minKey ) ; <nl> + auto chunk = scopedCM . cm ( ) - > findIntersectingChunkWithSimpleCollation ( migrateInfo . minKey ) ; <nl> invariant ( chunk ) ; <nl> <nl> Status commandStatus = _processRemoteCommandResponse ( <nl> void MigrationManager : : finishRecovery ( OperationContext * txn , <nl> migrateInfos . pop_front ( ) ; <nl> <nl> auto chunk = <nl> - scopedCM . cm ( ) - > findIntersectingChunkWithSimpleCollation ( txn , migrationInfo . minKey ) ; <nl> + scopedCM . cm ( ) - > findIntersectingChunkWithSimpleCollation ( migrationInfo . minKey ) ; <nl> invariant ( chunk ) ; <nl> <nl> if ( chunk - > getShardId ( ) ! = migrationInfo . from ) { <nl> mmm a / src / mongo / s / chunk_manager . cpp <nl> ppp b / src / mongo / s / chunk_manager . cpp <nl> <nl> # include " mongo / db / query / query_planner . h " <nl> # include " mongo / db / query / query_planner_common . h " <nl> # include " mongo / s / catalog / sharding_catalog_client . h " <nl> - # include " mongo / s / catalog_cache . h " <nl> # include " mongo / s / chunk_diff . h " <nl> # include " mongo / s / client / shard_registry . h " <nl> - # include " mongo / s / config . h " <nl> # include " mongo / s / grid . h " <nl> # include " mongo / util / log . h " <nl> # include " mongo / util / timer . h " <nl> void ChunkManager : : loadExistingRanges ( OperationContext * txn , const ChunkManager * <nl> / / TODO : Merge into diff code above , so we validate in one place <nl> if ( isChunkMapValid ( chunkMap ) ) { <nl> _chunkMap = std : : move ( chunkMap ) ; <nl> - _shardIds = std : : move ( shardIds ) ; <nl> _shardVersions = std : : move ( shardVersions ) ; <nl> _chunkRangeMap = _constructRanges ( _chunkMap ) ; <nl> <nl> bool ChunkManager : : _load ( OperationContext * txn , <nl> } <nl> } <nl> <nl> - StatusWith < shared_ptr < Chunk > > ChunkManager : : findIntersectingChunk ( OperationContext * txn , <nl> - const BSONObj & shardKey , <nl> - const BSONObj & collation ) const { <nl> + std : : shared_ptr < Chunk > ChunkManager : : findIntersectingChunk ( const BSONObj & shardKey , <nl> + const BSONObj & collation ) const { <nl> const bool hasSimpleCollation = ( collation . isEmpty ( ) & & ! _defaultCollator ) | | <nl> SimpleBSONObjComparator : : kInstance . evaluate ( collation = = CollationSpec : : kSimpleSpec ) ; <nl> if ( ! hasSimpleCollation ) { <nl> for ( BSONElement elt : shardKey ) { <nl> - if ( CollationIndexKey : : isCollatableType ( elt . type ( ) ) ) { <nl> - return Status ( ErrorCodes : : ShardKeyNotFound , <nl> - " cannot target single shard due to collation " ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - BSONObj chunkMin ; <nl> - shared_ptr < Chunk > chunk ; <nl> - { <nl> - ChunkMap : : const_iterator it = _chunkMap . upper_bound ( shardKey ) ; <nl> - if ( it ! = _chunkMap . end ( ) ) { <nl> - chunkMin = it - > first ; <nl> - chunk = it - > second ; <nl> + uassert ( ErrorCodes : : ShardKeyNotFound , <nl> + str : : stream ( ) < < " Cannot target single shard due to collation of key " <nl> + < < elt . fieldNameStringData ( ) , <nl> + ! CollationIndexKey : : isCollatableType ( elt . type ( ) ) ) ; <nl> } <nl> } <nl> <nl> - if ( ! chunk ) { <nl> - / / TODO : This should be an invariant <nl> - msgasserted ( 8070 , <nl> - str : : stream ( ) < < " couldn ' t find a chunk intersecting : " < < shardKey <nl> - < < " for ns : " <nl> - < < _nss . ns ( ) <nl> - < < " at version : " <nl> - < < _version . toString ( ) <nl> - < < " , number of chunks : " <nl> - < < _chunkMap . size ( ) ) ; <nl> - } <nl> + const auto it = _chunkMap . upper_bound ( shardKey ) ; <nl> + uassert ( ErrorCodes : : ShardKeyNotFound , <nl> + str : : stream ( ) < < " Cannot target single shard using key " < < shardKey , <nl> + it ! = _chunkMap . end ( ) & & it - > second - > containsKey ( shardKey ) ) ; <nl> <nl> - if ( chunk - > containsKey ( shardKey ) ) { <nl> - return chunk ; <nl> - } <nl> - <nl> - / / TODO : This should be an invariant <nl> - log ( ) < < redact ( chunkMin . toString ( ) ) ; <nl> - log ( ) < < redact ( ( * chunk ) . toString ( ) ) ; <nl> - log ( ) < < redact ( shardKey ) ; <nl> - <nl> - / / Proactively force a reload on the chunk manager in case it somehow got inconsistent <nl> - auto config = uassertStatusOK ( Grid : : get ( txn ) - > catalogCache ( ) - > getDatabase ( txn , _nss . db ( ) ) ) ; <nl> - config - > getChunkManagerIfExists ( txn , _nss . ns ( ) , true ) ; <nl> - <nl> - msgasserted ( 13141 , " Chunk map pointed to incorrect chunk " ) ; <nl> + return it - > second ; <nl> } <nl> <nl> - shared_ptr < Chunk > ChunkManager : : findIntersectingChunkWithSimpleCollation ( <nl> - OperationContext * txn , const BSONObj & shardKey ) const { <nl> - auto chunk = findIntersectingChunk ( txn , shardKey , CollationSpec : : kSimpleSpec ) ; <nl> - <nl> - / / findIntersectingChunk ( ) should succeed in targeting a single shard , since we have the simple <nl> - / / collation . <nl> - massertStatusOK ( chunk . getStatus ( ) ) ; <nl> - return chunk . getValue ( ) ; <nl> + std : : shared_ptr < Chunk > ChunkManager : : findIntersectingChunkWithSimpleCollation ( <nl> + const BSONObj & shardKey ) const { <nl> + return findIntersectingChunk ( shardKey , CollationSpec : : kSimpleSpec ) ; <nl> } <nl> <nl> void ChunkManager : : getShardIdsForQuery ( OperationContext * txn , <nl> void ChunkManager : : getShardIdsForQuery ( OperationContext * txn , <nl> / / Fast path for targeting equalities on the shard key . <nl> auto shardKeyToFind = _keyPattern . extractShardKeyFromQuery ( * cq ) ; <nl> if ( ! shardKeyToFind . isEmpty ( ) ) { <nl> - auto chunk = findIntersectingChunk ( txn , shardKeyToFind , collation ) ; <nl> - if ( chunk . isOK ( ) ) { <nl> - shardIds - > insert ( chunk . getValue ( ) - > getShardId ( ) ) ; <nl> + try { <nl> + auto chunk = findIntersectingChunk ( shardKeyToFind , collation ) ; <nl> + shardIds - > insert ( chunk - > getShardId ( ) ) ; <nl> return ; <nl> + } catch ( const DBException & ) { <nl> + / / The query uses multiple shards <nl> } <nl> } <nl> <nl> void ChunkManager : : getShardIdsForQuery ( OperationContext * txn , <nl> getShardIdsForRange ( it - > first / * min * / , it - > second / * max * / , shardIds ) ; <nl> <nl> / / once we know we need to visit all shards no need to keep looping <nl> - if ( shardIds - > size ( ) = = _shardIds . size ( ) ) { <nl> + if ( shardIds - > size ( ) = = _shardVersions . size ( ) ) { <nl> break ; <nl> } <nl> } <nl> void ChunkManager : : getShardIdsForRange ( const BSONObj & min , <nl> <nl> / / No need to iterate through the rest of the ranges , because we already know we need to use <nl> / / all shards . <nl> - if ( shardIds - > size ( ) = = _shardIds . size ( ) ) { <nl> + if ( shardIds - > size ( ) = = _shardVersions . size ( ) ) { <nl> break ; <nl> } <nl> } <nl> } <nl> <nl> void ChunkManager : : getAllShardIds ( set < ShardId > * all ) const { <nl> - all - > insert ( _shardIds . begin ( ) , _shardIds . end ( ) ) ; <nl> + std : : transform ( _shardVersions . begin ( ) , <nl> + _shardVersions . end ( ) , <nl> + std : : inserter ( * all , all - > begin ( ) ) , <nl> + [ ] ( const ShardVersionMap : : value_type & pair ) { return pair . first ; } ) ; <nl> } <nl> <nl> IndexBounds ChunkManager : : getIndexBoundsForQuery ( const BSONObj & key , <nl> mmm a / src / mongo / s / chunk_manager . h <nl> ppp b / src / mongo / s / chunk_manager . h <nl> class ChunkManager { <nl> / / <nl> <nl> / * * <nl> - * Given a key that has been extracted from a document , returns the <nl> - * chunk that contains that key . <nl> + * Given a shard key ( or a prefix ) that has been extracted from a document , returns the chunk <nl> + * that contains that key . <nl> * <nl> - * For instance , to locate the chunk for document { a : " foo " , b : " bar " } <nl> - * when the shard key is { a : " hashed " } , you can call <nl> - * findIntersectingChunk ( ) on { a : hash ( " foo " ) } <nl> + * Example : findIntersectingChunk ( { a : hash ( ' foo ' ) } ) locates the chunk for document <nl> + * { a : ' foo ' , b : ' bar ' } if the shard key is { a : ' hashed ' } . <nl> * <nl> * If ' collation ' is empty , we use the collection default collation for targeting . <nl> * <nl> - * Returns the error status ShardKeyNotFound if unable to target a single shard due to the <nl> - * collation . <nl> + * Throws a DBException with the ShardKeyNotFound code if unable to target a single shard due to <nl> + * collation or due to the key not matching the shard key pattern . <nl> * / <nl> - StatusWith < std : : shared_ptr < Chunk > > findIntersectingChunk ( OperationContext * txn , <nl> - const BSONObj & shardKey , <nl> - const BSONObj & collation ) const ; <nl> + std : : shared_ptr < Chunk > findIntersectingChunk ( const BSONObj & shardKey , <nl> + const BSONObj & collation ) const ; <nl> <nl> / * * <nl> - * Finds the intersecting chunk , assuming the simple collation . <nl> + * Same as findIntersectingChunk , but assumes the simple collation . <nl> * / <nl> - std : : shared_ptr < Chunk > findIntersectingChunkWithSimpleCollation ( OperationContext * txn , <nl> - const BSONObj & shardKey ) const ; <nl> + std : : shared_ptr < Chunk > findIntersectingChunkWithSimpleCollation ( const BSONObj & shardKey ) const ; <nl> <nl> / * * <nl> * Finds the shard IDs for a given filter and collation . If collation is empty , we use the <nl> class ChunkManager { <nl> / / constructed map must cover the complete space from [ MinKey , MaxKey ) . <nl> ChunkRangeMap _chunkRangeMap ; <nl> <nl> - std : : set < ShardId > _shardIds ; <nl> - <nl> / / Max known version per shard <nl> ShardVersionMap _shardVersions ; <nl> <nl> mmm a / src / mongo / s / chunk_manager_targeter . cpp <nl> ppp b / src / mongo / s / chunk_manager_targeter . cpp <nl> Status ChunkManagerTargeter : : targetInsert ( OperationContext * txn , <nl> <nl> / / Target the shard key or database primary <nl> if ( ! shardKey . isEmpty ( ) ) { <nl> - return targetShardKey ( txn , shardKey , CollationSpec : : kSimpleSpec , doc . objsize ( ) , endpoint ) ; <nl> + * endpoint = targetShardKey ( shardKey , CollationSpec : : kSimpleSpec , doc . objsize ( ) ) . release ( ) ; <nl> } else { <nl> if ( ! _primary ) { <nl> return Status ( ErrorCodes : : NamespaceNotFound , <nl> Status ChunkManagerTargeter : : targetInsert ( OperationContext * txn , <nl> } <nl> <nl> * endpoint = new ShardEndpoint ( _primary - > getId ( ) , ChunkVersion : : UNSHARDED ( ) ) ; <nl> - return Status : : OK ( ) ; <nl> } <nl> + <nl> + return Status : : OK ( ) ; <nl> } <nl> <nl> Status ChunkManagerTargeter : : targetUpdate ( OperationContext * txn , <nl> Status ChunkManagerTargeter : : targetUpdate ( OperationContext * txn , <nl> <nl> / / Target the shard key , query , or replacement doc <nl> if ( ! shardKey . isEmpty ( ) ) { <nl> - / / We can ' t rely on our query targeting to be exact <nl> - ShardEndpoint * endpoint = NULL ; <nl> - Status result = targetShardKey ( <nl> - txn , shardKey , collation , ( query . objsize ( ) + updateExpr . objsize ( ) ) , & endpoint ) ; <nl> - if ( result . isOK ( ) ) { <nl> - endpoints - > push_back ( endpoint ) ; <nl> - return result ; <nl> + try { <nl> + endpoints - > push_back ( <nl> + targetShardKey ( shardKey , collation , ( query . objsize ( ) + updateExpr . objsize ( ) ) ) <nl> + . release ( ) ) ; <nl> + return Status : : OK ( ) ; <nl> + } catch ( const DBException & ) { <nl> + / / This update is potentially not constrained to a single shard <nl> } <nl> } <nl> <nl> Status ChunkManagerTargeter : : targetDelete ( OperationContext * txn , <nl> <nl> / / Target the shard key or delete query <nl> if ( ! shardKey . isEmpty ( ) ) { <nl> - / / We can ' t rely on our query targeting to be exact <nl> - ShardEndpoint * endpoint = NULL ; <nl> - Status result = targetShardKey ( txn , shardKey , collation , 0 , & endpoint ) ; <nl> - if ( result . isOK ( ) ) { <nl> - endpoints - > push_back ( endpoint ) ; <nl> - return result ; <nl> + try { <nl> + endpoints - > push_back ( targetShardKey ( shardKey , collation , 0 ) . release ( ) ) ; <nl> + return Status : : OK ( ) ; <nl> + } catch ( const DBException & ) { <nl> + / / This delete is potentially not constrained to a single shard <nl> } <nl> } <nl> <nl> Status ChunkManagerTargeter : : targetQuery ( OperationContext * txn , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - Status ChunkManagerTargeter : : targetShardKey ( OperationContext * txn , <nl> - const BSONObj & shardKey , <nl> - const BSONObj & collation , <nl> - long long estDataSize , <nl> - ShardEndpoint * * endpoint ) const { <nl> - invariant ( NULL ! = _manager ) ; <nl> - <nl> - auto chunk = _manager - > findIntersectingChunk ( txn , shardKey , collation ) ; <nl> - if ( ! chunk . isOK ( ) ) { <nl> - return chunk . getStatus ( ) ; <nl> - } <nl> + std : : unique_ptr < ShardEndpoint > ChunkManagerTargeter : : targetShardKey ( const BSONObj & shardKey , <nl> + const BSONObj & collation , <nl> + long long estDataSize ) const { <nl> + auto chunk = _manager - > findIntersectingChunk ( shardKey , collation ) ; <nl> <nl> / / Track autosplit stats for sharded collections <nl> / / Note : this is only best effort accounting and is not accurate . <nl> if ( estDataSize > 0 ) { <nl> - _stats - > chunkSizeDelta [ chunk . getValue ( ) - > getMin ( ) ] + = estDataSize ; <nl> + _stats - > chunkSizeDelta [ chunk - > getMin ( ) ] + = estDataSize ; <nl> } <nl> <nl> - * endpoint = new ShardEndpoint ( chunk . getValue ( ) - > getShardId ( ) , <nl> - _manager - > getVersion ( chunk . getValue ( ) - > getShardId ( ) ) ) ; <nl> - <nl> - return Status : : OK ( ) ; <nl> + return stdx : : make_unique < ShardEndpoint > ( chunk - > getShardId ( ) , <nl> + _manager - > getVersion ( chunk - > getShardId ( ) ) ) ; <nl> } <nl> <nl> Status ChunkManagerTargeter : : targetCollection ( vector < ShardEndpoint * > * endpoints ) const { <nl> mmm a / src / mongo / s / chunk_manager_targeter . h <nl> ppp b / src / mongo / s / chunk_manager_targeter . h <nl> class ChunkManagerTargeter : public NSTargeter { <nl> * <nl> * If ' collation ' is empty , we use the collection default collation for targeting . <nl> * / <nl> - Status targetShardKey ( OperationContext * txn , <nl> - const BSONObj & doc , <nl> - const BSONObj & collation , <nl> - long long estDataSize , <nl> - ShardEndpoint * * endpoint ) const ; <nl> + std : : unique_ptr < ShardEndpoint > targetShardKey ( const BSONObj & doc , <nl> + const BSONObj & collation , <nl> + long long estDataSize ) const ; <nl> <nl> / / Full namespace of the collection for this targeter <nl> const NamespaceString _nss ; <nl> mmm a / src / mongo / s / commands / cluster_aggregate . cpp <nl> ppp b / src / mongo / s / commands / cluster_aggregate . cpp <nl> Status ClusterAggregate : : runAggregate ( OperationContext * txn , <nl> <nl> pipeline . getValue ( ) - > optimizePipeline ( ) ; <nl> <nl> - / / If the first $ match stage is an exact match on the shard key ( with a simple collation or <nl> - / / no string matching ) , we only have to send it to one shard , so send the command to that <nl> - / / shard . <nl> - BSONObj firstMatchQuery = pipeline . getValue ( ) - > getInitialQuery ( ) ; <nl> - BSONObj shardKeyMatches ; <nl> - shardKeyMatches = uassertStatusOK ( <nl> - chunkMgr - > getShardKeyPattern ( ) . extractShardKeyFromQuery ( txn , firstMatchQuery ) ) ; <nl> - bool singleShard = false ; <nl> - if ( ! shardKeyMatches . isEmpty ( ) ) { <nl> - auto chunk = chunkMgr - > findIntersectingChunk ( <nl> - txn , shardKeyMatches , request . getValue ( ) . getCollation ( ) ) ; <nl> - if ( chunk . isOK ( ) ) { <nl> - singleShard = true ; <nl> + / / If the first $ match stage is an exact match on the shard key ( with a simple collation or no <nl> + / / string matching ) , we only have to send it to one shard , so send the command to that shard . <nl> + const bool singleShard = [ & ] ( ) { <nl> + BSONObj firstMatchQuery = pipeline . getValue ( ) - > getInitialQuery ( ) ; <nl> + BSONObj shardKeyMatches = uassertStatusOK ( <nl> + chunkMgr - > getShardKeyPattern ( ) . extractShardKeyFromQuery ( txn , firstMatchQuery ) ) ; <nl> + <nl> + if ( shardKeyMatches . isEmpty ( ) ) { <nl> + return false ; <nl> } <nl> - } <nl> + <nl> + try { <nl> + chunkMgr - > findIntersectingChunk ( shardKeyMatches , request . getValue ( ) . getCollation ( ) ) ; <nl> + return true ; <nl> + } catch ( const DBException & ) { <nl> + return false ; <nl> + } <nl> + } ( ) ; <nl> <nl> / / Don ' t need to split pipeline if the first $ match is an exact match on shard key , unless <nl> / / there is a stage that needs to be run on the primary shard . <nl> mmm a / src / mongo / s / commands / cluster_find_and_modify_cmd . cpp <nl> ppp b / src / mongo / s / commands / cluster_find_and_modify_cmd . cpp <nl> class FindAndModifyCmd : public Command { <nl> } <nl> <nl> BSONObj shardKey = status . getValue ( ) ; <nl> - auto chunk = chunkMgr - > findIntersectingChunk ( txn , shardKey , collation ) ; <nl> + auto chunk = chunkMgr - > findIntersectingChunk ( shardKey , collation ) ; <nl> <nl> - if ( ! chunk . isOK ( ) ) { <nl> - uasserted ( ErrorCodes : : ShardKeyNotFound , <nl> - " findAndModify must target a single shard , but was not able to due " <nl> - " to non - simple collation " ) ; <nl> - } <nl> - <nl> - auto shardStatus = <nl> - Grid : : get ( txn ) - > shardRegistry ( ) - > getShard ( txn , chunk . getValue ( ) - > getShardId ( ) ) ; <nl> + auto shardStatus = Grid : : get ( txn ) - > shardRegistry ( ) - > getShard ( txn , chunk - > getShardId ( ) ) ; <nl> if ( ! shardStatus . isOK ( ) ) { <nl> return shardStatus . getStatus ( ) ; <nl> } <nl> class FindAndModifyCmd : public Command { <nl> } <nl> <nl> BSONObj shardKey = status . getValue ( ) ; <nl> - auto chunkStatus = chunkMgr - > findIntersectingChunk ( txn , shardKey , collation ) ; <nl> - if ( ! chunkStatus . isOK ( ) ) { <nl> - uasserted ( ErrorCodes : : ShardKeyNotFound , <nl> - " findAndModify must target a single shard , but was not able to due to " <nl> - " non - simple collation " ) ; <nl> - } <nl> - <nl> - const auto & chunk = chunkStatus . getValue ( ) ; <nl> + auto chunk = chunkMgr - > findIntersectingChunk ( shardKey , collation ) ; <nl> <nl> const bool ok = _runCommand ( txn , conf , chunkMgr , chunk - > getShardId ( ) , nss , cmdObj , result ) ; <nl> if ( ok ) { <nl> mmm a / src / mongo / s / commands / cluster_map_reduce_cmd . cpp <nl> ppp b / src / mongo / s / commands / cluster_map_reduce_cmd . cpp <nl> class MRCmd : public Command { <nl> invariant ( size < std : : numeric_limits < int > : : max ( ) ) ; <nl> <nl> / / key reported should be the chunk ' s minimum <nl> - shared_ptr < Chunk > c = cm - > findIntersectingChunkWithSimpleCollation ( txn , key ) ; <nl> + shared_ptr < Chunk > c = cm - > findIntersectingChunkWithSimpleCollation ( key ) ; <nl> if ( ! c ) { <nl> warning ( ) < < " Mongod reported " < < size < < " bytes inserted for key " < < key <nl> < < " but can ' t find chunk " ; <nl> mmm a / src / mongo / s / commands / cluster_merge_chunks_cmd . cpp <nl> ppp b / src / mongo / s / commands / cluster_merge_chunks_cmd . cpp <nl> class ClusterMergeChunksCommand : public Command { <nl> minKey = cm - > getShardKeyPattern ( ) . normalizeShardKey ( minKey ) ; <nl> maxKey = cm - > getShardKeyPattern ( ) . normalizeShardKey ( maxKey ) ; <nl> <nl> - shared_ptr < Chunk > firstChunk = cm - > findIntersectingChunkWithSimpleCollation ( txn , minKey ) ; <nl> + shared_ptr < Chunk > firstChunk = cm - > findIntersectingChunkWithSimpleCollation ( minKey ) ; <nl> <nl> BSONObjBuilder remoteCmdObjB ; <nl> remoteCmdObjB . append ( cmdObj [ ClusterMergeChunksCommand : : nsField ( ) ] ) ; <nl> mmm a / src / mongo / s / commands / cluster_move_chunk_cmd . cpp <nl> ppp b / src / mongo / s / commands / cluster_move_chunk_cmd . cpp <nl> class MoveChunkCmd : public Command { <nl> return false ; <nl> } <nl> <nl> - chunk = cm - > findIntersectingChunkWithSimpleCollation ( txn , shardKey ) ; <nl> + chunk = cm - > findIntersectingChunkWithSimpleCollation ( shardKey ) ; <nl> } else { <nl> / / bounds <nl> if ( ! cm - > getShardKeyPattern ( ) . isShardKey ( bounds [ 0 ] . Obj ( ) ) | | <nl> class MoveChunkCmd : public Command { <nl> BSONObj minKey = cm - > getShardKeyPattern ( ) . normalizeShardKey ( bounds [ 0 ] . Obj ( ) ) ; <nl> BSONObj maxKey = cm - > getShardKeyPattern ( ) . normalizeShardKey ( bounds [ 1 ] . Obj ( ) ) ; <nl> <nl> - chunk = cm - > findIntersectingChunkWithSimpleCollation ( txn , minKey ) ; <nl> + chunk = cm - > findIntersectingChunkWithSimpleCollation ( minKey ) ; <nl> <nl> if ( chunk - > getMin ( ) . woCompare ( minKey ) ! = 0 | | chunk - > getMax ( ) . woCompare ( maxKey ) ! = 0 ) { <nl> errmsg = str : : stream ( ) < < " no chunk found with the shard key bounds " <nl> mmm a / src / mongo / s / commands / cluster_shard_collection_cmd . cpp <nl> ppp b / src / mongo / s / commands / cluster_shard_collection_cmd . cpp <nl> class ShardCollectionCmd : public Command { <nl> / / 3 . Subdivide the big chunks by splitting at each of the points in " allSplits " <nl> / / that we haven ' t already split by . <nl> auto currentChunk = <nl> - chunkManager - > findIntersectingChunkWithSimpleCollation ( txn , allSplits [ 0 ] ) ; <nl> + chunkManager - > findIntersectingChunkWithSimpleCollation ( allSplits [ 0 ] ) ; <nl> <nl> std : : vector < BSONObj > subSplits ; <nl> for ( unsigned i = 0 ; i < = allSplits . size ( ) ; i + + ) { <nl> class ShardCollectionCmd : public Command { <nl> } <nl> <nl> if ( i < allSplits . size ( ) ) { <nl> - currentChunk = chunkManager - > findIntersectingChunkWithSimpleCollation ( <nl> - txn , allSplits [ i ] ) ; <nl> + currentChunk = <nl> + chunkManager - > findIntersectingChunkWithSimpleCollation ( allSplits [ i ] ) ; <nl> } <nl> } else { <nl> BSONObj splitPoint ( allSplits [ i ] ) ; <nl> mmm a / src / mongo / s / commands / cluster_split_cmd . cpp <nl> ppp b / src / mongo / s / commands / cluster_split_cmd . cpp <nl> class SplitCollectionCmd : public Command { <nl> return false ; <nl> } <nl> <nl> - chunk = cm - > findIntersectingChunkWithSimpleCollation ( txn , shardKey ) ; <nl> + chunk = cm - > findIntersectingChunkWithSimpleCollation ( shardKey ) ; <nl> } else if ( ! bounds . isEmpty ( ) ) { <nl> / / bounds <nl> if ( ! cm - > getShardKeyPattern ( ) . isShardKey ( bounds [ 0 ] . Obj ( ) ) | | <nl> class SplitCollectionCmd : public Command { <nl> BSONObj minKey = cm - > getShardKeyPattern ( ) . normalizeShardKey ( bounds [ 0 ] . Obj ( ) ) ; <nl> BSONObj maxKey = cm - > getShardKeyPattern ( ) . normalizeShardKey ( bounds [ 1 ] . Obj ( ) ) ; <nl> <nl> - chunk = cm - > findIntersectingChunkWithSimpleCollation ( txn , minKey ) ; <nl> + chunk = cm - > findIntersectingChunkWithSimpleCollation ( minKey ) ; <nl> <nl> if ( chunk - > getMin ( ) . woCompare ( minKey ) ! = 0 | | chunk - > getMax ( ) . woCompare ( maxKey ) ! = 0 ) { <nl> errmsg = str : : stream ( ) < < " no chunk found with the shard key bounds " <nl> class SplitCollectionCmd : public Command { <nl> / / Check shard key size when manually provided <nl> uassertStatusOK ( ShardKeyPattern : : checkShardKeySize ( middle ) ) ; <nl> <nl> - chunk = cm - > findIntersectingChunkWithSimpleCollation ( txn , middle ) ; <nl> + chunk = cm - > findIntersectingChunkWithSimpleCollation ( middle ) ; <nl> <nl> if ( chunk - > getMin ( ) . woCompare ( middle ) = = 0 | | chunk - > getMax ( ) . woCompare ( middle ) = = 0 ) { <nl> errmsg = str : : stream ( ) < < " new split key " < < middle <nl> mmm a / src / mongo / s / commands / cluster_write . cpp <nl> ppp b / src / mongo / s / commands / cluster_write . cpp <nl> void splitIfNeeded ( OperationContext * txn , const NamespaceString & nss , const Targ <nl> for ( auto it = stats . chunkSizeDelta . cbegin ( ) ; it ! = stats . chunkSizeDelta . cend ( ) ; + + it ) { <nl> std : : shared_ptr < Chunk > chunk ; <nl> try { <nl> - chunk = scopedCM . cm ( ) - > findIntersectingChunkWithSimpleCollation ( txn , it - > first ) ; <nl> + chunk = scopedCM . cm ( ) - > findIntersectingChunkWithSimpleCollation ( it - > first ) ; <nl> } catch ( const AssertionException & ex ) { <nl> warning ( ) < < " could not find chunk while checking for auto - split : " <nl> < < causedBy ( redact ( ex ) ) ; <nl> void updateChunkWriteStatsAndSplitIfNeeded ( OperationContext * txn , <nl> / / up - to - date view of the chunk we are about to move <nl> auto scopedCM = uassertStatusOK ( ScopedChunkManager : : refreshAndGet ( txn , nss ) ) ; <nl> auto suggestedChunk = scopedCM . cm ( ) - > findIntersectingChunkWithSimpleCollation ( <nl> - txn , suggestedMigrateChunk - > getMin ( ) ) ; <nl> + suggestedMigrateChunk - > getMin ( ) ) ; <nl> <nl> ChunkType chunkToMove ; <nl> chunkToMove . setNS ( nss . ns ( ) ) ; <nl> mmm a / src / mongo / s / config . cpp <nl> ppp b / src / mongo / s / config . cpp <nl> std : : shared_ptr < ChunkManager > DBConfig : : getChunkManagerIfExists ( OperationContext <nl> <nl> try { <nl> return getChunkManager ( txn , ns , shouldReload , forceReload ) ; <nl> - } catch ( AssertionException & e ) { <nl> - warning ( ) < < " chunk manager not found for " < < ns < < causedBy ( e ) ; <nl> + } catch ( const DBException & ) { <nl> return nullptr ; <nl> } <nl> } <nl> | SERVER - 28106 Cleanup the contract of ChunkManager : : findIntersectingChunk | mongodb/mongo | 21c2b106d16d69590d46b02cf252bae8ed055b3d | 2017-02-27T20:26:54Z |
mmm a / core / array . cpp <nl> ppp b / core / array . cpp <nl> void Array : : _assign ( const Array & p_array ) { <nl> } else if ( _p - > typed . type = = Variant : : NIL ) { / / from typed to untyped , must copy , but this is cheap anyway <nl> _p - > array = p_array . _p - > array ; <nl> } else if ( p_array . _p - > typed . type = = Variant : : NIL ) { / / from untyped to typed , must try to check if they are all valid <nl> - for ( int i = 0 ; i < p_array . _p - > array . size ( ) ; i + + ) { <nl> - if ( ! _p - > typed . validate ( p_array . _p - > array [ i ] , " assign " ) ) { <nl> - return ; <nl> + if ( _p - > typed . type = = Variant : : OBJECT ) { <nl> + / / for objects , it needs full validation , either can be converted or fail <nl> + for ( int i = 0 ; i < p_array . _p - > array . size ( ) ; i + + ) { <nl> + if ( ! _p - > typed . validate ( p_array . _p - > array [ i ] , " assign " ) ) { <nl> + return ; <nl> + } <nl> } <nl> + _p - > array = p_array . _p - > array ; / / then just copy , which is cheap anyway <nl> + <nl> + } else { <nl> + / / for non objects , we need to check if there is a valid conversion , which needs to happen one by one , so this is the worst case . <nl> + Vector < Variant > new_array ; <nl> + new_array . resize ( p_array . _p - > array . size ( ) ) ; <nl> + for ( int i = 0 ; i < p_array . _p - > array . size ( ) ; i + + ) { <nl> + Variant src_val = p_array . _p - > array [ i ] ; <nl> + if ( src_val . get_type ( ) = = _p - > typed . type ) { <nl> + new_array . write [ i ] = src_val ; <nl> + } else if ( Variant : : can_convert_strict ( src_val . get_type ( ) , _p - > typed . type ) ) { <nl> + Variant * ptr = & src_val ; <nl> + Callable : : CallError ce ; <nl> + new_array . write [ i ] = Variant : : construct ( _p - > typed . type , ( const Variant * * ) & ptr , 1 , ce , true ) ; <nl> + if ( ce . error ! = Callable : : CallError : : CALL_OK ) { <nl> + ERR_FAIL_MSG ( " Unable to convert array index " + itos ( i ) + " from ' " + Variant : : get_type_name ( src_val . get_type ( ) ) + " ' to ' " + Variant : : get_type_name ( _p - > typed . type ) + " ' . " ) ; <nl> + } <nl> + } else { <nl> + ERR_FAIL_MSG ( " Unable to convert array index " + itos ( i ) + " from ' " + Variant : : get_type_name ( src_val . get_type ( ) ) + " ' to ' " + Variant : : get_type_name ( _p - > typed . type ) + " ' . " ) ; <nl> + } <nl> + } <nl> + <nl> + _p - > array = new_array ; <nl> } <nl> - _p - > array = p_array . _p - > array ; / / then just copy , which is cheap anyway <nl> } else if ( _p - > typed . can_reference ( p_array . _p - > typed ) ) { / / same type or compatible <nl> _ref ( p_array ) ; <nl> } else { <nl> mmm a / core / typed_array . h <nl> ppp b / core / typed_array . h <nl> class TypedArray : public Array { <nl> _FORCE_INLINE_ void operator = ( const Array & p_array ) { <nl> _assign ( p_array ) ; <nl> } <nl> + _FORCE_INLINE_ TypedArray ( const Variant & p_variant ) : <nl> + Array ( Array ( p_variant ) , Variant : : OBJECT , T : : get_class_static ( ) , Variant ( ) ) { <nl> + } <nl> _FORCE_INLINE_ TypedArray ( const Array & p_array ) : <nl> Array ( p_array , Variant : : OBJECT , T : : get_class_static ( ) , Variant ( ) ) { <nl> } <nl> class TypedArray : public Array { <nl> <nl> / / specialization for the rest of variant types <nl> <nl> - # define MAKE_TYPED_ARRAY ( m_type , m_variant_type ) \ <nl> - template < > \ <nl> - class TypedArray < m_type > : public Array { \ <nl> - public : \ <nl> - _FORCE_INLINE_ void operator = ( const Array & p_array ) { \ <nl> - _assign ( p_array ) ; \ <nl> - } \ <nl> - _FORCE_INLINE_ TypedArray ( const Array & p_array ) : \ <nl> - Array ( p_array , m_variant_type , StringName ( ) , Variant ( ) ) { \ <nl> - } \ <nl> - _FORCE_INLINE_ TypedArray ( ) { \ <nl> - set_typed ( m_variant_type , StringName ( ) , Variant ( ) ) ; \ <nl> - } \ <nl> + # define MAKE_TYPED_ARRAY ( m_type , m_variant_type ) \ <nl> + template < > \ <nl> + class TypedArray < m_type > : public Array { \ <nl> + public : \ <nl> + _FORCE_INLINE_ void operator = ( const Array & p_array ) { \ <nl> + _assign ( p_array ) ; \ <nl> + } \ <nl> + _FORCE_INLINE_ TypedArray ( const Variant & p_variant ) : \ <nl> + Array ( Array ( p_variant ) , m_variant_type , StringName ( ) , Variant ( ) ) { \ <nl> + } \ <nl> + _FORCE_INLINE_ TypedArray ( const Array & p_array ) : \ <nl> + Array ( p_array , m_variant_type , StringName ( ) , Variant ( ) ) { \ <nl> + } \ <nl> + _FORCE_INLINE_ TypedArray ( ) { \ <nl> + set_typed ( m_variant_type , StringName ( ) , Variant ( ) ) ; \ <nl> + } \ <nl> } ; <nl> <nl> MAKE_TYPED_ARRAY ( bool , Variant : : BOOL ) <nl> mmm a / drivers / vulkan / rendering_device_vulkan . cpp <nl> ppp b / drivers / vulkan / rendering_device_vulkan . cpp <nl> RID RenderingDeviceVulkan : : vertex_buffer_create ( uint32_t p_size_bytes , const Vec <nl> } <nl> <nl> / / Internally reference counted , this ID is warranted to be unique for the same description , but needs to be freed as many times as it was allocated <nl> - RenderingDevice : : VertexFormatID RenderingDeviceVulkan : : vertex_format_create ( const Vector < VertexDescription > & p_vertex_formats ) { <nl> + RenderingDevice : : VertexFormatID RenderingDeviceVulkan : : vertex_format_create ( const Vector < VertexAttribute > & p_vertex_formats ) { <nl> <nl> _THREAD_SAFE_METHOD_ <nl> <nl> RID RenderingDeviceVulkan : : vertex_array_create ( uint32_t p_vertex_count , VertexFo <nl> <nl> / / validate with buffer <nl> { <nl> - const VertexDescription & atf = vd . vertex_formats [ i ] ; <nl> + const VertexAttribute & atf = vd . vertex_formats [ i ] ; <nl> <nl> uint32_t element_size = get_format_vertex_size ( atf . format ) ; <nl> ERR_FAIL_COND_V ( element_size = = 0 , RID ( ) ) ; / / should never happens since this was prevalidated <nl> mmm a / drivers / vulkan / rendering_device_vulkan . h <nl> ppp b / drivers / vulkan / rendering_device_vulkan . h <nl> class RenderingDeviceVulkan : public RenderingDevice { <nl> RID_Owner < Buffer , true > vertex_buffer_owner ; <nl> <nl> struct VertexDescriptionKey { <nl> - Vector < VertexDescription > vertex_formats ; <nl> + Vector < VertexAttribute > vertex_formats ; <nl> bool operator = = ( const VertexDescriptionKey & p_key ) const { <nl> int vdc = vertex_formats . size ( ) ; <nl> int vdck = p_key . vertex_formats . size ( ) ; <nl> class RenderingDeviceVulkan : public RenderingDevice { <nl> if ( vdc ! = vdck ) { <nl> return false ; <nl> } else { <nl> - const VertexDescription * a_ptr = vertex_formats . ptr ( ) ; <nl> - const VertexDescription * b_ptr = p_key . vertex_formats . ptr ( ) ; <nl> + const VertexAttribute * a_ptr = vertex_formats . ptr ( ) ; <nl> + const VertexAttribute * b_ptr = p_key . vertex_formats . ptr ( ) ; <nl> for ( int i = 0 ; i < vdc ; i + + ) { <nl> - const VertexDescription & a = a_ptr [ i ] ; <nl> - const VertexDescription & b = b_ptr [ i ] ; <nl> + const VertexAttribute & a = a_ptr [ i ] ; <nl> + const VertexAttribute & b = b_ptr [ i ] ; <nl> <nl> if ( a . location ! = b . location ) { <nl> return false ; <nl> class RenderingDeviceVulkan : public RenderingDevice { <nl> uint32_t hash ( ) const { <nl> int vdc = vertex_formats . size ( ) ; <nl> uint32_t h = hash_djb2_one_32 ( vdc ) ; <nl> - const VertexDescription * ptr = vertex_formats . ptr ( ) ; <nl> + const VertexAttribute * ptr = vertex_formats . ptr ( ) ; <nl> for ( int i = 0 ; i < vdc ; i + + ) { <nl> - const VertexDescription & vd = ptr [ i ] ; <nl> + const VertexAttribute & vd = ptr [ i ] ; <nl> h = hash_djb2_one_32 ( vd . location , h ) ; <nl> h = hash_djb2_one_32 ( vd . offset , h ) ; <nl> h = hash_djb2_one_32 ( vd . format , h ) ; <nl> class RenderingDeviceVulkan : public RenderingDevice { <nl> HashMap < VertexDescriptionKey , VertexFormatID , VertexDescriptionHash > vertex_format_cache ; <nl> <nl> struct VertexDescriptionCache { <nl> - Vector < VertexDescription > vertex_formats ; <nl> + Vector < VertexAttribute > vertex_formats ; <nl> VkVertexInputBindingDescription * bindings ; <nl> VkVertexInputAttributeDescription * attributes ; <nl> VkPipelineVertexInputStateCreateInfo create_info ; <nl> class RenderingDeviceVulkan : public RenderingDevice { <nl> virtual RID vertex_buffer_create ( uint32_t p_size_bytes , const Vector < uint8_t > & p_data = Vector < uint8_t > ( ) ) ; <nl> <nl> / / Internally reference counted , this ID is warranted to be unique for the same description , but needs to be freed as many times as it was allocated <nl> - virtual VertexFormatID vertex_format_create ( const Vector < VertexDescription > & p_vertex_formats ) ; <nl> + virtual VertexFormatID vertex_format_create ( const Vector < VertexAttribute > & p_vertex_formats ) ; <nl> virtual RID vertex_array_create ( uint32_t p_vertex_count , VertexFormatID p_vertex_format , const Vector < RID > & p_src_buffers ) ; <nl> <nl> virtual RID index_buffer_create ( uint32_t p_size_indices , IndexBufferFormat p_format , const Vector < uint8_t > & p_data = Vector < uint8_t > ( ) , bool p_use_restart_indices = false ) ; <nl> mmm a / editor / editor_data . cpp <nl> ppp b / editor / editor_data . cpp <nl> Array EditorSelection : : _get_transformable_selected_nodes ( ) { <nl> return ret ; <nl> } <nl> <nl> - Array EditorSelection : : get_selected_nodes ( ) { <nl> + TypedArray < Node > EditorSelection : : get_selected_nodes ( ) { <nl> <nl> - Array ret ; <nl> + TypedArray < Node > ret ; <nl> <nl> for ( Map < Node * , Object * > : : Element * E = selection . front ( ) ; E ; E = E - > next ( ) ) { <nl> <nl> mmm a / editor / editor_data . h <nl> ppp b / editor / editor_data . h <nl> class EditorSelection : public Object { <nl> static void _bind_methods ( ) ; <nl> <nl> public : <nl> - Array get_selected_nodes ( ) ; <nl> + TypedArray < Node > get_selected_nodes ( ) ; <nl> void add_node ( Node * p_node ) ; <nl> void remove_node ( Node * p_node ) ; <nl> bool is_selected ( Node * ) const ; <nl> mmm a / scene / 2d / area_2d . cpp <nl> ppp b / scene / 2d / area_2d . cpp <nl> bool Area2D : : is_monitorable ( ) const { <nl> return monitorable ; <nl> } <nl> <nl> - Array Area2D : : get_overlapping_bodies ( ) const { <nl> + TypedArray < Node2D > Area2D : : get_overlapping_bodies ( ) const { <nl> <nl> ERR_FAIL_COND_V_MSG ( ! monitoring , Array ( ) , " Can ' t find overlapping bodies when monitoring is off . " ) ; <nl> - Array ret ; <nl> + TypedArray < Node2D > ret ; <nl> ret . resize ( body_map . size ( ) ) ; <nl> int idx = 0 ; <nl> for ( const Map < ObjectID , BodyState > : : Element * E = body_map . front ( ) ; E ; E = E - > next ( ) ) { <nl> Array Area2D : : get_overlapping_bodies ( ) const { <nl> return ret ; <nl> } <nl> <nl> - Array Area2D : : get_overlapping_areas ( ) const { <nl> + TypedArray < Area2D > Area2D : : get_overlapping_areas ( ) const { <nl> <nl> ERR_FAIL_COND_V_MSG ( ! monitoring , Array ( ) , " Can ' t find overlapping bodies when monitoring is off . " ) ; <nl> - Array ret ; <nl> + TypedArray < Area2D > ret ; <nl> ret . resize ( area_map . size ( ) ) ; <nl> int idx = 0 ; <nl> for ( const Map < ObjectID , AreaState > : : Element * E = area_map . front ( ) ; E ; E = E - > next ( ) ) { <nl> mmm a / scene / 2d / area_2d . h <nl> ppp b / scene / 2d / area_2d . h <nl> class Area2D : public CollisionObject2D { <nl> void set_collision_layer_bit ( int p_bit , bool p_value ) ; <nl> bool get_collision_layer_bit ( int p_bit ) const ; <nl> <nl> - Array get_overlapping_bodies ( ) const ; / / function for script <nl> - Array get_overlapping_areas ( ) const ; / / function for script <nl> + TypedArray < Node2D > get_overlapping_bodies ( ) const ; / / function for script <nl> + TypedArray < Area2D > get_overlapping_areas ( ) const ; / / function for script <nl> <nl> bool overlaps_area ( Node * p_area ) const ; <nl> bool overlaps_body ( Node * p_body ) const ; <nl> mmm a / scene / 2d / navigation_region_2d . cpp <nl> ppp b / scene / 2d / navigation_region_2d . cpp <nl> Vector < Vector2 > NavigationPolygon : : get_vertices ( ) const { <nl> return vertices ; <nl> } <nl> <nl> - void NavigationPolygon : : _set_polygons ( const Array & p_array ) { <nl> + void NavigationPolygon : : _set_polygons ( const TypedArray < Vector < int32_t > > & p_array ) { <nl> <nl> { <nl> MutexLock lock ( navmesh_generation ) ; <nl> Array NavigationPolygon : : _get_polygons ( ) const { <nl> return ret ; <nl> } <nl> <nl> - void NavigationPolygon : : _set_outlines ( const Array & p_array ) { <nl> + void NavigationPolygon : : _set_outlines ( const TypedArray < Vector < int32_t > > & p_array ) { <nl> <nl> outlines . resize ( p_array . size ( ) ) ; <nl> for ( int i = 0 ; i < p_array . size ( ) ; i + + ) { <nl> mmm a / scene / 2d / navigation_region_2d . h <nl> ppp b / scene / 2d / navigation_region_2d . h <nl> class NavigationPolygon : public Resource { <nl> protected : <nl> static void _bind_methods ( ) ; <nl> <nl> - void _set_polygons ( const Array & p_array ) ; <nl> + void _set_polygons ( const TypedArray < Vector < int32_t > > & p_array ) ; <nl> Array _get_polygons ( ) const ; <nl> <nl> - void _set_outlines ( const Array & p_array ) ; <nl> + void _set_outlines ( const TypedArray < Vector < int32_t > > & p_array ) ; <nl> Array _get_outlines ( ) const ; <nl> <nl> public : <nl> mmm a / scene / 2d / physics_body_2d . cpp <nl> ppp b / scene / 2d / physics_body_2d . cpp <nl> PhysicsBody2D : : PhysicsBody2D ( PhysicsServer2D : : BodyMode p_mode ) : <nl> set_pickable ( false ) ; <nl> } <nl> <nl> - Array PhysicsBody2D : : get_collision_exceptions ( ) { <nl> + TypedArray < PhysicsBody2D > PhysicsBody2D : : get_collision_exceptions ( ) { <nl> List < RID > exceptions ; <nl> PhysicsServer2D : : get_singleton ( ) - > body_get_collision_exceptions ( get_rid ( ) , & exceptions ) ; <nl> Array ret ; <nl> RigidBody2D : : CCDMode RigidBody2D : : get_continuous_collision_detection_mode ( ) cons <nl> return ccd_mode ; <nl> } <nl> <nl> - Array RigidBody2D : : get_colliding_bodies ( ) const { <nl> + TypedArray < Node2D > RigidBody2D : : get_colliding_bodies ( ) const { <nl> <nl> ERR_FAIL_COND_V ( ! contact_monitor , Array ( ) ) ; <nl> <nl> - Array ret ; <nl> + TypedArray < Node2D > ret ; <nl> ret . resize ( contact_monitor - > body_map . size ( ) ) ; <nl> int idx = 0 ; <nl> for ( const Map < ObjectID , BodyState > : : Element * E = contact_monitor - > body_map . front ( ) ; E ; E = E - > next ( ) ) { <nl> mmm a / scene / 2d / physics_body_2d . h <nl> ppp b / scene / 2d / physics_body_2d . h <nl> class PhysicsBody2D : public CollisionObject2D { <nl> void set_collision_layer_bit ( int p_bit , bool p_value ) ; <nl> bool get_collision_layer_bit ( int p_bit ) const ; <nl> <nl> - Array get_collision_exceptions ( ) ; <nl> + TypedArray < PhysicsBody2D > get_collision_exceptions ( ) ; <nl> void add_collision_exception_with ( Node * p_node ) ; / / must be physicsbody <nl> void remove_collision_exception_with ( Node * p_node ) ; <nl> <nl> class RigidBody2D : public PhysicsBody2D { <nl> void add_force ( const Vector2 & p_offset , const Vector2 & p_force ) ; <nl> void add_torque ( float p_torque ) ; <nl> <nl> - Array get_colliding_bodies ( ) const ; / / function for script <nl> + TypedArray < Node2D > get_colliding_bodies ( ) const ; / / function for script <nl> <nl> virtual String get_configuration_warning ( ) const ; <nl> <nl> mmm a / scene / 2d / tile_map . cpp <nl> ppp b / scene / 2d / tile_map . cpp <nl> TypedArray < Vector2i > TileMap : : get_used_cells ( ) const { <nl> return a ; <nl> } <nl> <nl> - Array TileMap : : get_used_cells_by_id ( int p_id ) const { <nl> + TypedArray < Vector2i > TileMap : : get_used_cells_by_id ( int p_id ) const { <nl> <nl> - Array a ; <nl> + TypedArray < Vector2i > a ; <nl> for ( Map < PosKey , Cell > : : Element * E = tile_map . front ( ) ; E ; E = E - > next ( ) ) { <nl> <nl> if ( E - > value ( ) . id = = p_id ) { <nl> - Vector2 p ( E - > key ( ) . x , E - > key ( ) . y ) ; <nl> + Vector2i p ( E - > key ( ) . x , E - > key ( ) . y ) ; <nl> a . push_back ( p ) ; <nl> } <nl> } <nl> mmm a / scene / 2d / tile_map . h <nl> ppp b / scene / 2d / tile_map . h <nl> class TileMap : public Node2D { <nl> bool is_centered_textures_enabled ( ) const ; <nl> <nl> TypedArray < Vector2i > get_used_cells ( ) const ; <nl> - Array get_used_cells_by_id ( int p_id ) const ; <nl> + TypedArray < Vector2i > get_used_cells_by_id ( int p_id ) const ; <nl> Rect2 get_used_rect ( ) ; / / Not const because of cache <nl> <nl> void set_occluder_light_mask ( int p_mask ) ; <nl> mmm a / scene / 3d / area_3d . cpp <nl> ppp b / scene / 3d / area_3d . cpp <nl> bool Area3D : : is_monitoring ( ) const { <nl> return monitoring ; <nl> } <nl> <nl> - Array Area3D : : get_overlapping_bodies ( ) const { <nl> + TypedArray < Node3D > Area3D : : get_overlapping_bodies ( ) const { <nl> <nl> ERR_FAIL_COND_V ( ! monitoring , Array ( ) ) ; <nl> Array ret ; <nl> bool Area3D : : is_monitorable ( ) const { <nl> return monitorable ; <nl> } <nl> <nl> - Array Area3D : : get_overlapping_areas ( ) const { <nl> + TypedArray < Area3D > Area3D : : get_overlapping_areas ( ) const { <nl> <nl> ERR_FAIL_COND_V ( ! monitoring , Array ( ) ) ; <nl> Array ret ; <nl> mmm a / scene / 3d / area_3d . h <nl> ppp b / scene / 3d / area_3d . h <nl> class Area3D : public CollisionObject3D { <nl> void set_collision_layer_bit ( int p_bit , bool p_value ) ; <nl> bool get_collision_layer_bit ( int p_bit ) const ; <nl> <nl> - Array get_overlapping_bodies ( ) const ; <nl> - Array get_overlapping_areas ( ) const ; / / function for script <nl> + TypedArray < Node3D > get_overlapping_bodies ( ) const ; <nl> + TypedArray < Area3D > get_overlapping_areas ( ) const ; / / function for script <nl> <nl> bool overlaps_area ( Node * p_area ) const ; <nl> bool overlaps_body ( Node * p_body ) const ; <nl> mmm a / scene / 3d / physics_body_3d . cpp <nl> ppp b / scene / 3d / physics_body_3d . cpp <nl> bool PhysicsBody3D : : get_collision_layer_bit ( int p_bit ) const { <nl> return get_collision_layer ( ) & ( 1 < < p_bit ) ; <nl> } <nl> <nl> - Array PhysicsBody3D : : get_collision_exceptions ( ) { <nl> + TypedArray < PhysicsBody3D > PhysicsBody3D : : get_collision_exceptions ( ) { <nl> List < RID > exceptions ; <nl> PhysicsServer3D : : get_singleton ( ) - > body_get_collision_exceptions ( get_rid ( ) , & exceptions ) ; <nl> Array ret ; <nl> mmm a / scene / 3d / physics_body_3d . h <nl> ppp b / scene / 3d / physics_body_3d . h <nl> class PhysicsBody3D : public CollisionObject3D { <nl> void set_collision_mask_bit ( int p_bit , bool p_value ) ; <nl> bool get_collision_mask_bit ( int p_bit ) const ; <nl> <nl> - Array get_collision_exceptions ( ) ; <nl> + TypedArray < PhysicsBody3D > get_collision_exceptions ( ) ; <nl> void add_collision_exception_with ( Node * p_node ) ; / / must be physicsbody <nl> void remove_collision_exception_with ( Node * p_node ) ; <nl> <nl> mmm a / scene / 3d / skeleton_3d . cpp <nl> ppp b / scene / 3d / skeleton_3d . cpp <nl> <nl> # include " core / engine . h " <nl> # include " core / message_queue . h " <nl> # include " core / project_settings . h " <nl> + # include " core / type_info . h " <nl> # include " scene / 3d / physics_body_3d . h " <nl> # include " scene / resources / surface_tool . h " <nl> <nl> void _pb_start_simulation ( const Skeleton3D * p_skeleton , Node * p_node , const Vect <nl> } <nl> } <nl> <nl> - void Skeleton3D : : physical_bones_start_simulation_on ( const Array & p_bones ) { <nl> + void Skeleton3D : : physical_bones_start_simulation_on ( const TypedArray < StringName > & p_bones ) { <nl> set_physics_process_internal ( false ) ; <nl> <nl> Vector < int > sim_bones ; <nl> void Skeleton3D : : physical_bones_start_simulation_on ( const Array & p_bones ) { <nl> sim_bones . resize ( p_bones . size ( ) ) ; <nl> int c = 0 ; <nl> for ( int i = sim_bones . size ( ) - 1 ; 0 < = i ; - - i ) { <nl> - Variant : : Type type = p_bones . get ( i ) . get_type ( ) ; <nl> - if ( Variant : : STRING = = type | | Variant : : STRING_NAME = = type ) { <nl> - int bone_id = find_bone ( p_bones . get ( i ) ) ; <nl> - if ( bone_id ! = - 1 ) <nl> - sim_bones . write [ c + + ] = bone_id ; <nl> - } <nl> + int bone_id = find_bone ( p_bones [ i ] ) ; <nl> + if ( bone_id ! = - 1 ) <nl> + sim_bones . write [ c + + ] = bone_id ; <nl> } <nl> sim_bones . resize ( c ) ; <nl> } <nl> mmm a / scene / 3d / skeleton_3d . h <nl> ppp b / scene / 3d / skeleton_3d . h <nl> class Skeleton3D : public Node3D { <nl> <nl> public : <nl> void physical_bones_stop_simulation ( ) ; <nl> - void physical_bones_start_simulation_on ( const Array & p_bones ) ; <nl> + void physical_bones_start_simulation_on ( const TypedArray < StringName > & p_bones ) ; <nl> void physical_bones_add_collision_exception ( RID p_exception ) ; <nl> void physical_bones_remove_collision_exception ( RID p_exception ) ; <nl> # endif / / _3D_DISABLED <nl> mmm a / servers / register_server_types . cpp <nl> ppp b / servers / register_server_types . cpp <nl> void register_server_types ( ) { <nl> ClassDB : : register_class < RDTextureView > ( ) ; <nl> ClassDB : : register_class < RDAttachmentFormat > ( ) ; <nl> ClassDB : : register_class < RDSamplerState > ( ) ; <nl> - ClassDB : : register_class < RDVertexDescription > ( ) ; <nl> + ClassDB : : register_class < RDVertexAttribute > ( ) ; <nl> ClassDB : : register_class < RDUniform > ( ) ; <nl> ClassDB : : register_class < RDPipelineRasterizationState > ( ) ; <nl> ClassDB : : register_class < RDPipelineMultisampleState > ( ) ; <nl> mmm a / servers / rendering / rasterizer_rd / rasterizer_canvas_rd . cpp <nl> ppp b / servers / rendering / rasterizer_rd / rasterizer_canvas_rd . cpp <nl> RasterizerCanvas : : PolygonID RasterizerCanvasRD : : request_polygon ( const Vector < int <nl> <nl> Vector < uint8_t > polygon_buffer ; <nl> polygon_buffer . resize ( buffer_size * sizeof ( float ) ) ; <nl> - Vector < RD : : VertexDescription > descriptions ; <nl> + Vector < RD : : VertexAttribute > descriptions ; <nl> descriptions . resize ( 4 ) ; <nl> Vector < RID > buffers ; <nl> buffers . resize ( 4 ) ; <nl> RasterizerCanvas : : PolygonID RasterizerCanvasRD : : request_polygon ( const Vector < int <nl> uint32_t * uptr = ( uint32_t * ) r ; <nl> uint32_t base_offset = 0 ; <nl> { / / vertices <nl> - RD : : VertexDescription vd ; <nl> + RD : : VertexAttribute vd ; <nl> vd . format = RD : : DATA_FORMAT_R32G32_SFLOAT ; <nl> vd . offset = base_offset * sizeof ( float ) ; <nl> vd . location = RS : : ARRAY_VERTEX ; <nl> RasterizerCanvas : : PolygonID RasterizerCanvasRD : : request_polygon ( const Vector < int <nl> <nl> / / colors <nl> if ( ( uint32_t ) p_colors . size ( ) = = vertex_count | | p_colors . size ( ) = = 1 ) { <nl> - RD : : VertexDescription vd ; <nl> + RD : : VertexAttribute vd ; <nl> vd . format = RD : : DATA_FORMAT_R32G32B32A32_SFLOAT ; <nl> vd . offset = base_offset * sizeof ( float ) ; <nl> vd . location = RS : : ARRAY_COLOR ; <nl> RasterizerCanvas : : PolygonID RasterizerCanvasRD : : request_polygon ( const Vector < int <nl> } <nl> base_offset + = 4 ; <nl> } else { <nl> - RD : : VertexDescription vd ; <nl> + RD : : VertexAttribute vd ; <nl> vd . format = RD : : DATA_FORMAT_R32G32B32A32_SFLOAT ; <nl> vd . offset = 0 ; <nl> vd . location = RS : : ARRAY_COLOR ; <nl> RasterizerCanvas : : PolygonID RasterizerCanvasRD : : request_polygon ( const Vector < int <nl> <nl> / / uvs <nl> if ( ( uint32_t ) p_uvs . size ( ) = = vertex_count ) { <nl> - RD : : VertexDescription vd ; <nl> + RD : : VertexAttribute vd ; <nl> vd . format = RD : : DATA_FORMAT_R32G32_SFLOAT ; <nl> vd . offset = base_offset * sizeof ( float ) ; <nl> vd . location = RS : : ARRAY_TEX_UV ; <nl> RasterizerCanvas : : PolygonID RasterizerCanvasRD : : request_polygon ( const Vector < int <nl> } <nl> base_offset + = 2 ; <nl> } else { <nl> - RD : : VertexDescription vd ; <nl> + RD : : VertexAttribute vd ; <nl> vd . format = RD : : DATA_FORMAT_R32G32_SFLOAT ; <nl> vd . offset = 0 ; <nl> vd . location = RS : : ARRAY_TEX_UV ; <nl> RasterizerCanvas : : PolygonID RasterizerCanvasRD : : request_polygon ( const Vector < int <nl> <nl> / / bones <nl> if ( ( uint32_t ) p_indices . size ( ) = = vertex_count * 4 & & ( uint32_t ) p_weights . size ( ) = = vertex_count * 4 ) { <nl> - RD : : VertexDescription vd ; <nl> + RD : : VertexAttribute vd ; <nl> vd . format = RD : : DATA_FORMAT_R32G32B32A32_UINT ; <nl> vd . offset = base_offset * sizeof ( float ) ; <nl> vd . location = RS : : ARRAY_BONES ; <nl> RasterizerCanvas : : PolygonID RasterizerCanvasRD : : request_polygon ( const Vector < int <nl> <nl> base_offset + = 4 ; <nl> } else { <nl> - RD : : VertexDescription vd ; <nl> + RD : : VertexAttribute vd ; <nl> vd . format = RD : : DATA_FORMAT_R32G32B32A32_UINT ; <nl> vd . offset = 0 ; <nl> vd . location = RS : : ARRAY_BONES ; <nl> RasterizerCanvasRD : : RasterizerCanvasRD ( RasterizerStorageRD * p_storage ) { <nl> } <nl> <nl> / / pipelines <nl> - Vector < RD : : VertexDescription > vf ; <nl> - RD : : VertexDescription vd ; <nl> + Vector < RD : : VertexAttribute > vf ; <nl> + RD : : VertexAttribute vd ; <nl> vd . format = RD : : DATA_FORMAT_R32G32B32_SFLOAT ; <nl> vd . location = 0 ; <nl> vd . offset = 0 ; <nl> mmm a / servers / rendering / rasterizer_rd / rasterizer_storage_rd . cpp <nl> ppp b / servers / rendering / rasterizer_rd / rasterizer_storage_rd . cpp <nl> void RasterizerStorageRD : : _mesh_surface_generate_version_for_input_mask ( Mesh : : Su <nl> <nl> Mesh : : Surface : : Version & v = s - > versions [ version ] ; <nl> <nl> - Vector < RD : : VertexDescription > attributes ; <nl> + Vector < RD : : VertexAttribute > attributes ; <nl> Vector < RID > buffers ; <nl> <nl> uint32_t stride = 0 ; <nl> <nl> for ( int i = 0 ; i < RS : : ARRAY_WEIGHTS ; i + + ) { <nl> <nl> - RD : : VertexDescription vd ; <nl> + RD : : VertexAttribute vd ; <nl> RID buffer ; <nl> vd . location = i ; <nl> <nl> mmm a / servers / rendering / rendering_device . cpp <nl> ppp b / servers / rendering / rendering_device . cpp <nl> Vector < uint8_t > RenderingDevice : : shader_compile_from_source ( ShaderStage p_stage , <nl> return compile_function ( p_stage , p_source_code , p_language , r_error ) ; <nl> } <nl> <nl> - RID RenderingDevice : : _texture_create ( const Ref < RDTextureFormat > & p_format , const Ref < RDTextureView > & p_view , const Array & p_data ) { <nl> + RID RenderingDevice : : _texture_create ( const Ref < RDTextureFormat > & p_format , const Ref < RDTextureView > & p_view , const TypedArray < PackedByteArray > & p_data ) { <nl> <nl> ERR_FAIL_COND_V ( p_format . is_null ( ) , RID ( ) ) ; <nl> ERR_FAIL_COND_V ( p_view . is_null ( ) , RID ( ) ) ; <nl> RID RenderingDevice : : _texture_create_shared_from_slice ( const Ref < RDTextureView > <nl> return texture_create_shared_from_slice ( p_view - > base , p_with_texture , p_layer , p_mipmap , p_slice_type ) ; <nl> } <nl> <nl> - RenderingDevice : : FramebufferFormatID RenderingDevice : : _framebuffer_format_create ( const Array & p_attachments ) { <nl> + RenderingDevice : : FramebufferFormatID RenderingDevice : : _framebuffer_format_create ( const TypedArray < RDAttachmentFormat > & p_attachments ) { <nl> <nl> Vector < AttachmentFormat > attachments ; <nl> attachments . resize ( p_attachments . size ( ) ) ; <nl> RID RenderingDevice : : _sampler_create ( const Ref < RDSamplerState > & p_state ) { <nl> return sampler_create ( p_state - > base ) ; <nl> } <nl> <nl> - RenderingDevice : : VertexFormatID RenderingDevice : : _vertex_format_create ( const Array & p_vertex_formats ) { <nl> + RenderingDevice : : VertexFormatID RenderingDevice : : _vertex_format_create ( const TypedArray < RDVertexAttribute > & p_vertex_formats ) { <nl> <nl> - Vector < VertexDescription > descriptions ; <nl> + Vector < VertexAttribute > descriptions ; <nl> descriptions . resize ( p_vertex_formats . size ( ) ) ; <nl> <nl> for ( int i = 0 ; i < p_vertex_formats . size ( ) ; i + + ) { <nl> - Ref < RDVertexDescription > af = p_vertex_formats [ i ] ; <nl> + Ref < RDVertexAttribute > af = p_vertex_formats [ i ] ; <nl> ERR_FAIL_COND_V ( af . is_null ( ) , INVALID_FORMAT_ID ) ; <nl> descriptions . write [ i ] = af - > base ; <nl> } <nl> return vertex_format_create ( descriptions ) ; <nl> } <nl> <nl> - RID RenderingDevice : : _vertex_array_create ( uint32_t p_vertex_count , VertexFormatID p_vertex_format , const Array & p_src_buffers ) { <nl> + RID RenderingDevice : : _vertex_array_create ( uint32_t p_vertex_count , VertexFormatID p_vertex_format , const TypedArray < RID > & p_src_buffers ) { <nl> <nl> Vector < RID > buffers = Variant ( p_src_buffers ) ; <nl> <nl> RID RenderingDevice : : _render_pipeline_create ( RID p_shader , FramebufferFormatID p <nl> PipelineMultisampleState multisample_state ; <nl> if ( p_multisample_state . is_valid ( ) ) { <nl> multisample_state = p_multisample_state - > base ; <nl> + for ( int i = 0 ; i < p_multisample_state - > sample_masks . size ( ) ; i + + ) { <nl> + int64_t mask = p_multisample_state - > sample_masks [ i ] ; <nl> + multisample_state . sample_mask . push_back ( mask ) ; <nl> + } <nl> } <nl> <nl> PipelineDepthStencilState depth_stencil_state ; <nl> RID RenderingDevice : : _render_pipeline_create ( RID p_shader , FramebufferFormatID p <nl> PipelineColorBlendState color_blend_state ; <nl> if ( p_blend_state . is_valid ( ) ) { <nl> color_blend_state = p_blend_state - > base ; <nl> + for ( int i = 0 ; i < p_blend_state - > attachments . size ( ) ; i + + ) { <nl> + Ref < RDPipelineColorBlendStateAttachment > attachment = p_blend_state - > attachments [ i ] ; <nl> + if ( attachment . is_valid ( ) ) { <nl> + color_blend_state . attachments . push_back ( attachment - > base ) ; <nl> + } <nl> + } <nl> } <nl> <nl> return render_pipeline_create ( p_shader , p_framebuffer_format , p_vertex_format , p_render_primitive , rasterization_state , multisample_state , depth_stencil_state , color_blend_state , p_dynamic_state_flags ) ; <nl> mmm a / servers / rendering / rendering_device . h <nl> ppp b / servers / rendering / rendering_device . h <nl> <nl> # define RENDERING_DEVICE_H <nl> <nl> # include " core / object . h " <nl> + # include " core / typed_array . h " <nl> # include " servers / display_server . h " <nl> <nl> class RDTextureFormat ; <nl> class RDTextureView ; <nl> - class RDAttachments ; <nl> + class RDAttachmentFormat ; <nl> class RDSamplerState ; <nl> - class RDVertexDescriptions ; <nl> + class RDVertexAttribute ; <nl> class RDShaderSource ; <nl> class RDShaderBytecode ; <nl> class RDUniforms ; <nl> class RenderingDevice : public Object { <nl> VERTEX_FREQUENCY_INSTANCE , <nl> } ; <nl> <nl> - struct VertexDescription { <nl> + struct VertexAttribute { <nl> uint32_t location ; / / shader location <nl> uint32_t offset ; <nl> DataFormat format ; <nl> uint32_t stride ; <nl> VertexFrequency frequency ; <nl> - VertexDescription ( ) { <nl> + VertexAttribute ( ) { <nl> location = 0 ; <nl> offset = 0 ; <nl> stride = 0 ; <nl> class RenderingDevice : public Object { <nl> typedef int64_t VertexFormatID ; <nl> <nl> / / This ID is warranted to be unique for the same formats , does not need to be freed <nl> - virtual VertexFormatID vertex_format_create ( const Vector < VertexDescription > & p_vertex_formats ) = 0 ; <nl> + virtual VertexFormatID vertex_format_create ( const Vector < VertexAttribute > & p_vertex_formats ) = 0 ; <nl> virtual RID vertex_array_create ( uint32_t p_vertex_count , VertexFormatID p_vertex_format , const Vector < RID > & p_src_buffers ) = 0 ; <nl> <nl> enum IndexBufferFormat { <nl> class RenderingDevice : public Object { <nl> <nl> protected : <nl> / / binders to script API <nl> - RID _texture_create ( const Ref < RDTextureFormat > & p_format , const Ref < RDTextureView > & p_view , const Array & p_data = Array ( ) ) ; <nl> + RID _texture_create ( const Ref < RDTextureFormat > & p_format , const Ref < RDTextureView > & p_view , const TypedArray < PackedByteArray > & p_data = Array ( ) ) ; <nl> RID _texture_create_shared ( const Ref < RDTextureView > & p_view , RID p_with_texture ) ; <nl> RID _texture_create_shared_from_slice ( const Ref < RDTextureView > & p_view , RID p_with_texture , uint32_t p_layer , uint32_t p_mipmap , TextureSliceType p_slice_type = TEXTURE_SLICE_2D ) ; <nl> <nl> - FramebufferFormatID _framebuffer_format_create ( const Array & p_attachments ) ; <nl> + FramebufferFormatID _framebuffer_format_create ( const TypedArray < RDAttachmentFormat > & p_attachments ) ; <nl> RID _framebuffer_create ( const Array & p_textures , FramebufferFormatID p_format_check = INVALID_ID ) ; <nl> RID _sampler_create ( const Ref < RDSamplerState > & p_state ) ; <nl> - VertexFormatID _vertex_format_create ( const Array & p_vertex_formats ) ; <nl> - RID _vertex_array_create ( uint32_t p_vertex_count , VertexFormatID p_vertex_format , const Array & p_src_buffers ) ; <nl> + VertexFormatID _vertex_format_create ( const TypedArray < RDVertexAttribute > & p_vertex_formats ) ; <nl> + RID _vertex_array_create ( uint32_t p_vertex_count , VertexFormatID p_vertex_format , const TypedArray < RID > & p_src_buffers ) ; <nl> <nl> Ref < RDShaderBytecode > _shader_compile_from_source ( const Ref < RDShaderSource > & p_source , bool p_allow_cache = true ) ; <nl> RID _shader_create ( const Ref < RDShaderBytecode > & p_bytecode ) ; <nl> mmm a / servers / rendering / rendering_device_binds . h <nl> ppp b / servers / rendering / rendering_device_binds . h <nl> class RDSamplerState : public Reference { <nl> } <nl> } ; <nl> <nl> - class RDVertexDescription : public Reference { <nl> - GDCLASS ( RDVertexDescription , Reference ) <nl> + class RDVertexAttribute : public Reference { <nl> + GDCLASS ( RDVertexAttribute , Reference ) <nl> friend class RenderingDevice ; <nl> - RD : : VertexDescription base ; <nl> + RD : : VertexAttribute base ; <nl> <nl> public : <nl> RD_SETGET ( uint32_t , location ) <nl> class RDVertexDescription : public Reference { <nl> <nl> protected : <nl> static void _bind_methods ( ) { <nl> - RD_BIND ( Variant : : INT , RDVertexDescription , location ) ; <nl> - RD_BIND ( Variant : : INT , RDVertexDescription , offset ) ; <nl> - RD_BIND ( Variant : : INT , RDVertexDescription , format ) ; <nl> - RD_BIND ( Variant : : INT , RDVertexDescription , stride ) ; <nl> - RD_BIND ( Variant : : INT , RDVertexDescription , frequency ) ; <nl> + RD_BIND ( Variant : : INT , RDVertexAttribute , location ) ; <nl> + RD_BIND ( Variant : : INT , RDVertexAttribute , offset ) ; <nl> + RD_BIND ( Variant : : INT , RDVertexAttribute , format ) ; <nl> + RD_BIND ( Variant : : INT , RDVertexAttribute , stride ) ; <nl> + RD_BIND ( Variant : : INT , RDVertexAttribute , frequency ) ; <nl> } <nl> } ; <nl> class RDShaderSource : public Reference { <nl> class RDPipelineMultisampleState : public Reference { <nl> friend class RenderingDevice ; <nl> <nl> RD : : PipelineMultisampleState base ; <nl> + TypedArray < int64_t > sample_masks ; <nl> <nl> public : <nl> RD_SETGET ( RD : : TextureSamples , sample_count ) <nl> class RDPipelineMultisampleState : public Reference { <nl> RD_SETGET ( bool , enable_alpha_to_coverage ) <nl> RD_SETGET ( bool , enable_alpha_to_one ) <nl> <nl> - void add_sample_mask ( uint32_t p_sample_mask ) { base . sample_mask . push_back ( p_sample_mask ) ; } <nl> - void clear_sample_masks ( ) { base . sample_mask . clear ( ) ; } <nl> - Vector < int64_t > get_sample_masks ( ) const { <nl> - Vector < int64_t > sample_masks ; <nl> - for ( int i = 0 ; i < base . sample_mask . size ( ) ; i + + ) { <nl> - sample_masks . push_back ( base . sample_mask [ i ] ) ; <nl> - } <nl> - return sample_masks ; <nl> - } <nl> + void set_sample_masks ( const TypedArray < int64_t > & p_masks ) { sample_masks = p_masks ; } <nl> + TypedArray < int64_t > get_sample_masks ( ) const { return sample_masks ; } <nl> <nl> protected : <nl> - void _set_sample_masks ( const Vector < int64_t > & p_masks ) { <nl> - base . sample_mask . clear ( ) ; <nl> - for ( int i = 0 ; i < p_masks . size ( ) ; i + + ) { <nl> - int64_t mask = p_masks [ i ] ; <nl> - base . sample_mask . push_back ( mask ) ; <nl> - } <nl> - } <nl> static void _bind_methods ( ) { <nl> RD_BIND ( Variant : : INT , RDPipelineMultisampleState , sample_count ) ; <nl> RD_BIND ( Variant : : BOOL , RDPipelineMultisampleState , enable_sample_shading ) ; <nl> class RDPipelineMultisampleState : public Reference { <nl> RD_BIND ( Variant : : BOOL , RDPipelineMultisampleState , enable_alpha_to_coverage ) ; <nl> RD_BIND ( Variant : : BOOL , RDPipelineMultisampleState , enable_alpha_to_one ) ; <nl> <nl> - ClassDB : : bind_method ( D_METHOD ( " add_sample_mask " , " mask " ) , & RDPipelineMultisampleState : : add_sample_mask ) ; <nl> - ClassDB : : bind_method ( D_METHOD ( " clear_sample_masks " ) , & RDPipelineMultisampleState : : clear_sample_masks ) ; <nl> - ClassDB : : bind_method ( D_METHOD ( " _set_sample_masks " , " sample_masks " ) , & RDPipelineMultisampleState : : _set_sample_masks ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " set_sample_masks " , " masks " ) , & RDPipelineMultisampleState : : set_sample_masks ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " get_sample_masks " ) , & RDPipelineMultisampleState : : get_sample_masks ) ; <nl> - ADD_PROPERTY ( PropertyInfo ( Variant : : PACKED_INT64_ARRAY , " _sample_masks " , PROPERTY_HINT_NONE , " " , PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_INTERNAL ) , " _set_sample_masks " , " get_sample_masks " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : ARRAY , " sample_masks " , PROPERTY_HINT_ARRAY_TYPE , " int " ) , " set_sample_masks " , " get_sample_masks " ) ; <nl> } <nl> } ; <nl> <nl> class RDPipelineDepthStencilState : public Reference { <nl> <nl> class RDPipelineColorBlendStateAttachment : public Reference { <nl> GDCLASS ( RDPipelineColorBlendStateAttachment , Reference ) <nl> + friend class RenderingDevice ; <nl> RD : : PipelineColorBlendState : : Attachment base ; <nl> <nl> public : <nl> class RDPipelineColorBlendStateAttachment : public Reference { <nl> RD_SETGET ( bool , write_b ) <nl> RD_SETGET ( bool , write_a ) <nl> <nl> - void set_as_disabled ( ) { <nl> - base = RD : : PipelineColorBlendState : : Attachment ( ) ; <nl> - } <nl> - <nl> void set_as_mix ( ) { <nl> <nl> base = RD : : PipelineColorBlendState : : Attachment ( ) ; <nl> class RDPipelineColorBlendStateAttachment : public Reference { <nl> <nl> protected : <nl> static void _bind_methods ( ) { <nl> + <nl> + ClassDB : : bind_method ( D_METHOD ( " set_as_mix " ) , & RDPipelineColorBlendStateAttachment : : set_as_mix ) ; <nl> + <nl> RD_BIND ( Variant : : BOOL , RDPipelineColorBlendStateAttachment , enable_blend ) ; <nl> RD_BIND ( Variant : : INT , RDPipelineColorBlendStateAttachment , src_color_blend_factor ) ; <nl> RD_BIND ( Variant : : INT , RDPipelineColorBlendStateAttachment , dst_color_blend_factor ) ; <nl> class RDPipelineColorBlendState : public Reference { <nl> friend class RenderingDevice ; <nl> RD : : PipelineColorBlendState base ; <nl> <nl> - Vector < Ref < RDPipelineColorBlendStateAttachment > > attachments ; <nl> + TypedArray < RDPipelineColorBlendStateAttachment > attachments ; <nl> <nl> public : <nl> RD_SETGET ( bool , enable_logic_op ) <nl> RD_SETGET ( RD : : LogicOperation , logic_op ) <nl> RD_SETGET ( Color , blend_constant ) <nl> <nl> - void add_attachment ( const Ref < RDPipelineColorBlendStateAttachment > & p_attachment ) { <nl> - attachments . push_back ( p_attachment ) ; <nl> + void set_attachments ( const TypedArray < RDPipelineColorBlendStateAttachment > & p_attachments ) { <nl> + attachments . push_back ( p_attachments ) ; <nl> } <nl> <nl> - void add_no_blend_attachment ( ) { <nl> - Ref < RDPipelineColorBlendStateAttachment > attachment ; <nl> - attachment . instance ( ) ; <nl> - attachment - > set_as_disabled ( ) ; <nl> - add_attachment ( attachment ) ; <nl> - } <nl> - <nl> - void add_blend_mix_attachment ( ) { <nl> - Ref < RDPipelineColorBlendStateAttachment > attachment ; <nl> - attachment . instance ( ) ; <nl> - attachment - > set_as_mix ( ) ; <nl> - add_attachment ( attachment ) ; <nl> - } <nl> - <nl> - void clear_attachments ( ) { <nl> - attachments . clear ( ) ; <nl> - } <nl> - <nl> - Array get_attachments ( ) const { <nl> - Array ret ; <nl> - for ( int i = 0 ; i < attachments . size ( ) ; i + + ) { <nl> - ret . push_back ( attachments [ i ] ) ; <nl> - } <nl> - return ret ; <nl> - } <nl> - void _set_attachments ( const Array & p_attachments ) { <nl> - attachments . clear ( ) ; <nl> - for ( int i = 0 ; i < p_attachments . size ( ) ; i + + ) { <nl> - Ref < RDPipelineColorBlendStateAttachment > attachment = p_attachments [ i ] ; <nl> - ERR_FAIL_COND ( ! attachment . is_valid ( ) ) ; <nl> - attachments . push_back ( attachment ) ; <nl> - } <nl> + TypedArray < RDPipelineColorBlendStateAttachment > get_attachments ( ) const { <nl> + return attachments ; <nl> } <nl> <nl> protected : <nl> class RDPipelineColorBlendState : public Reference { <nl> RD_BIND ( Variant : : INT , RDPipelineColorBlendState , logic_op ) ; <nl> RD_BIND ( Variant : : COLOR , RDPipelineColorBlendState , blend_constant ) ; <nl> <nl> - ClassDB : : bind_method ( D_METHOD ( " add_attachment " , " atachment " ) , & RDPipelineColorBlendState : : add_attachment ) ; <nl> - ClassDB : : bind_method ( D_METHOD ( " add_no_blend_attachment " ) , & RDPipelineColorBlendState : : add_no_blend_attachment ) ; <nl> - ClassDB : : bind_method ( D_METHOD ( " add_blend_mix_attachment " ) , & RDPipelineColorBlendState : : add_blend_mix_attachment ) ; <nl> - ClassDB : : bind_method ( D_METHOD ( " clear_attachments " ) , & RDPipelineColorBlendState : : clear_attachments ) ; <nl> - ClassDB : : bind_method ( D_METHOD ( " _set_attachments " , " attachments " ) , & RDPipelineColorBlendState : : _set_attachments ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " set_attachments " , " atachments " ) , & RDPipelineColorBlendState : : set_attachments ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " get_attachments " ) , & RDPipelineColorBlendState : : get_attachments ) ; <nl> - ADD_PROPERTY ( PropertyInfo ( Variant : : ARRAY , " _attachments " , PROPERTY_HINT_NONE , " " , PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_INTERNAL ) , " _set_attachments " , " get_attachments " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : ARRAY , " attachments " , PROPERTY_HINT_ARRAY_TYPE , " RDPipelineColorBlendStateAttachment " ) , " set_attachments " , " get_attachments " ) ; <nl> } <nl> } ; <nl> <nl> | Add proper type to most public API uses of Array | godotengine/godot | f8ef38efed3c4f814b2762b2e054e0bbf7d911b4 | 2020-04-21T15:16:45Z |
mmm a / tools / ctor_evaller . py <nl> ppp b / tools / ctor_evaller . py <nl> def add_func ( asm , func ) : <nl> asm = get_asm ( js ) <nl> assert len ( asm ) > 0 <nl> asm = asm . replace ( ' use asm ' , ' not asm ' ) # don ' t try to validate this <nl> + # Substitute sbrk with a failing stub : the dynamic heap memory area shouldn ' t get increased during static ctor initialization . <nl> + asm = asm . replace ( ' function _sbrk ( ' , ' function _sbrk ( increment ) { throw " no sbrk when evalling ctors ! " ; } function KILLED_sbrk ( ' , 1 ) <nl> # find all global vars , and provide only safe ones . Also add dumping for those . <nl> pre_funcs_start = asm . find ( ' ; ' ) + 1 <nl> pre_funcs_end = asm . find ( ' function ' , pre_funcs_start ) <nl> def add_func ( asm , func ) : <nl> ' HEAPF32 ' , ' HEAPF64 ' , <nl> ' Int8View ' , ' Int16View ' , ' Int32View ' , ' Uint8View ' , ' Uint16View ' , ' Uint32View ' , ' Float32View ' , ' Float64View ' , <nl> ' nan ' , ' inf ' , <nl> - ' _emscripten_memcpy_big ' , ' _sbrk ' , ' ___dso_handle ' , <nl> + ' _emscripten_memcpy_big ' , ' ___dso_handle ' , <nl> ' _atexit ' , ' ___cxa_atexit ' , <nl> ] or name . startswith ( ' Math_ ' ) : <nl> if ' new ' not in value : <nl> def add_func ( asm , func ) : <nl> var stackMax = stackTop + totalStack ; <nl> if ( stackMax > = totalMemory ) throw ' not enough room for stack ' ; <nl> <nl> - var dynamicTopPtr = stackMax ; <nl> + var dynamicTopPtr = 0 ; <nl> <nl> if ( ! Math . imul ) { <nl> Math . imul = Math . imul | | function ( a , b ) { <nl> def read_and_delete ( filename ) : <nl> out_result = read_and_delete ( out_file ) <nl> err_result = read_and_delete ( err_file ) <nl> if proc . returncode ! = 0 : <nl> - shared . logging . debug ( ' unexpected error while trying to eval ctors : \ n ' + out_result ) <nl> + shared . logging . debug ( ' unexpected error while trying to eval ctors : \ n ' + out_result + ' \ n ' + err_result ) <nl> return ( 0 , 0 , 0 , 0 ) <nl> <nl> # out contains the new mem init and other info <nl> | Restrict ctor evaller from accessing sbrk ( ) . | emscripten-core/emscripten | bb23567760d59852e585433653c709c1d8ea6816 | 2016-08-22T19:36:21Z |
mmm a / tensorflow / contrib / learn / BUILD <nl> ppp b / tensorflow / contrib / learn / BUILD <nl> py_test ( <nl> size = " small " , <nl> srcs = [ " python / learn / learn_io / graph_io_test . py " ] , <nl> srcs_version = " PY2AND3 " , <nl> - tags = [ " manual " ] , # http : / / b / 29229547 <nl> deps = [ <nl> " : learn " , <nl> " / / tensorflow : tensorflow_py " , <nl> mmm a / tensorflow / contrib / learn / python / learn / learn_io / graph_io . py <nl> ppp b / tensorflow / contrib / learn / python / learn / learn_io / graph_io . py <nl> def read_keyed_batch_examples ( <nl> Raises : <nl> ValueError : for invalid inputs . <nl> " " " <nl> - # Retrive files to read . <nl> + # Retrieve files to read . <nl> if isinstance ( file_pattern , list ) : <nl> file_names = file_pattern <nl> if not file_names : <nl> mmm a / tensorflow / contrib / learn / python / learn / learn_io / graph_io_test . py <nl> ppp b / tensorflow / contrib / learn / python / learn / learn_io / graph_io_test . py <nl> def test_batch_randomized ( self ) : <nl> <nl> def _create_temp_file ( self , lines ) : <nl> tempdir = tempfile . mkdtemp ( ) <nl> - filename = os . path . join ( tempdir , " file . csv " ) <nl> + filename = os . path . join ( tempdir , " temp_file " ) <nl> gfile . Open ( filename , " w " ) . write ( lines ) <nl> return filename <nl> <nl> - def test_read_csv ( self ) : <nl> + def _create_sorted_temp_files ( self , lines_list ) : <nl> + tempdir = tempfile . mkdtemp ( ) <nl> + filenames = [ ] <nl> + for i , lines in enumerate ( lines_list ) : <nl> + filename = os . path . join ( tempdir , " temp_file % 05d " % i ) <nl> + gfile . Open ( filename , " w " ) . write ( lines ) <nl> + filenames . append ( filename ) <nl> + return filenames <nl> + <nl> + def test_read_text_lines ( self ) : <nl> gfile . Glob = self . _orig_glob <nl> filename = self . _create_temp_file ( " ABC \ nDEF \ nGHK \ n " ) <nl> <nl> def test_read_csv ( self ) : <nl> <nl> with tf . Graph ( ) . as_default ( ) as g , self . test_session ( graph = g ) as session : <nl> inputs = tf . contrib . learn . io . read_batch_examples ( <nl> - filename , batch_size , <nl> - reader = tf . TextLineReader , randomize_input = False , <nl> - num_epochs = 1 , queue_capacity = queue_capacity , name = name ) <nl> + filename , batch_size , reader = tf . TextLineReader , <nl> + randomize_input = False , num_epochs = 1 , queue_capacity = queue_capacity , <nl> + name = name ) <nl> + session . run ( tf . initialize_local_variables ( ) ) <nl> + <nl> + coord = tf . train . Coordinator ( ) <nl> + tf . train . start_queue_runners ( session , coord = coord ) <nl> + <nl> + self . assertAllEqual ( session . run ( inputs ) , [ b " ABC " ] ) <nl> + self . assertAllEqual ( session . run ( inputs ) , [ b " DEF " ] ) <nl> + self . assertAllEqual ( session . run ( inputs ) , [ b " GHK " ] ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + session . run ( inputs ) <nl> + <nl> + coord . request_stop ( ) <nl> + <nl> + def test_read_text_lines_multifile ( self ) : <nl> + gfile . Glob = self . _orig_glob <nl> + filenames = self . _create_sorted_temp_files ( [ " ABC \ n " , " DEF \ nGHK \ n " ] ) <nl> + <nl> + batch_size = 1 <nl> + queue_capacity = 5 <nl> + name = " my_batch " <nl> + <nl> + with tf . Graph ( ) . as_default ( ) as g , self . test_session ( graph = g ) as session : <nl> + inputs = tf . contrib . learn . io . read_batch_examples ( <nl> + filenames , batch_size , reader = tf . TextLineReader , <nl> + randomize_input = False , num_epochs = 1 , queue_capacity = queue_capacity , <nl> + name = name ) <nl> session . run ( tf . initialize_local_variables ( ) ) <nl> <nl> coord = tf . train . Coordinator ( ) <nl> def test_read_csv ( self ) : <nl> <nl> coord . request_stop ( ) <nl> <nl> - def test_batch_reader ( self ) : <nl> + def test_batch_text_lines ( self ) : <nl> gfile . Glob = self . _orig_glob <nl> filename = self . _create_temp_file ( " A \ nB \ nC \ nD \ nE \ n " ) <nl> <nl> def test_batch_reader ( self ) : <nl> <nl> coord . request_stop ( ) <nl> <nl> - def test_keyed_read_csv ( self ) : <nl> + def test_keyed_read_text_lines ( self ) : <nl> gfile . Glob = self . _orig_glob <nl> filename = self . _create_temp_file ( " ABC \ nDEF \ nGHK \ n " ) <nl> <nl> | Update tests and pydoc for dequeue_batch . | tensorflow/tensorflow | 0204fbd5fec268e2b4d4d4e9185e21725a6c248d | 2016-08-08T22:03:03Z |
mmm a / android / sdk / assets / main . js <nl> ppp b / android / sdk / assets / main . js <nl> <nl> - ( this . nativeLog | | function ( s ) { console . log ( s ) } ) ( " START JS FRAMEWORK : 0 . 14 . 5 Build 20160706 " ) ; ( function ( modules ) { var installedModules = { } ; function __webpack_require__ ( moduleId ) { if ( installedModules [ moduleId ] ) return installedModules [ moduleId ] . exports ; var module = installedModules [ moduleId ] = { exports : { } , id : moduleId , loaded : false } ; modules [ moduleId ] . call ( module . exports , module , module . exports , __webpack_require__ ) ; module . loaded = true ; return module . exports } __webpack_require__ . m = modules ; __webpack_require__ . c = installedModules ; __webpack_require__ . p = " " ; return __webpack_require__ ( 0 ) } ) ( [ function ( module , exports , __webpack_require__ ) { ( function ( global ) { " use strict " ; __webpack_require__ ( 1 ) ; var methods = __webpack_require__ ( 104 ) ; var _global = global ; var registerMethods = _global . registerMethods ; registerMethods ( methods ) } ) . call ( exports , function ( ) { return this } ( ) ) } , function ( module , exports , __webpack_require__ ) { ( function ( global ) { " use strict " ; __webpack_require__ ( 2 ) ; var _runtime = __webpack_require__ ( 77 ) ; var _runtime2 = _interopRequireDefault ( _runtime ) ; var _package = __webpack_require__ ( 102 ) ; function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { " default " : obj } } var native = _package . subversion . native ; var transformer = _package . subversion . transformer ; var _loop = function _loop ( methodName ) { global [ methodName ] = function ( ) { var ret = _runtime2 . default [ methodName ] . apply ( _runtime2 . default , arguments ) ; if ( ret instanceof Error ) { console . error ( ret . toString ( ) ) } return ret } } ; for ( var methodName in _runtime2 . default ) { _loop ( methodName ) } Object . assign ( global , { frameworkVersion : native , needTransformerVersion : transformer } ) ; var methods = __webpack_require__ ( 103 ) ; var _global = global ; var registerMethods = _global . registerMethods ; registerMethods ( methods ) } ) . call ( exports , function ( ) { return this } ( ) ) } , function ( module , exports , __webpack_require__ ) { " use strict " ; __webpack_require__ ( 3 ) ; __webpack_require__ ( 41 ) ; __webpack_require__ ( 42 ) ; __webpack_require__ ( 76 ) } , function ( module , exports , __webpack_require__ ) { " use strict " ; __webpack_require__ ( 4 ) } , function ( module , exports , __webpack_require__ ) { " use strict " ; __webpack_require__ ( 5 ) ; module . exports = __webpack_require__ ( 8 ) . Object . assign } , function ( module , exports , __webpack_require__ ) { " use strict " ; var $ export = __webpack_require__ ( 6 ) ; $ export ( $ export . S + $ export . F , " Object " , { assign : __webpack_require__ ( 24 ) } ) } , function ( module , exports , __webpack_require__ ) { " use strict " ; var global = __webpack_require__ ( 7 ) , core = __webpack_require__ ( 8 ) , hide = __webpack_require__ ( 9 ) , redefine = __webpack_require__ ( 19 ) , ctx = __webpack_require__ ( 22 ) , PROTOTYPE = " prototype " ; var $ export = function $ export ( type , name , source ) { var IS_FORCED = type & $ export . F , IS_GLOBAL = type & $ export . G , IS_STATIC = type & $ export . S , IS_PROTO = type & $ export . P , IS_BIND = type & $ export . B , target = IS_GLOBAL ? global : IS_STATIC ? global [ name ] | | ( global [ name ] = { } ) : ( global [ name ] | | { } ) [ PROTOTYPE ] , exports = IS_GLOBAL ? core : core [ name ] | | ( core [ name ] = { } ) , expProto = exports [ PROTOTYPE ] | | ( exports [ PROTOTYPE ] = { } ) , key , own , out , exp ; if ( IS_GLOBAL ) source = name ; for ( key in source ) { own = ! IS_FORCED & & target & & target [ key ] ! = = undefined ; out = ( own ? target : source ) [ key ] ; exp = IS_BIND & & own ? ctx ( out , global ) : IS_PROTO & & typeof out = = " function " ? ctx ( Function . call , out ) : out ; if ( target ) redefine ( target , key , out , type & $ export . U ) ; if ( exports [ key ] ! = out ) hide ( exports , key , exp ) ; if ( IS_PROTO & & expProto [ key ] ! = out ) expProto [ key ] = out } } ; global . core = core ; $ export . F = 1 ; $ export . G = 2 ; $ export . S = 4 ; $ export . P = 8 ; $ export . B = 16 ; $ export . W = 32 ; $ export . U = 64 ; $ export . R = 128 ; module . exports = $ export } , function ( module , exports ) { " use strict " ; var global = module . exports = typeof window ! = " undefined " & & window . Math = = Math ? window : typeof self ! = " undefined " & & self . Math = = Math ? self : Function ( " return this " ) ( ) ; if ( typeof __g = = " number " ) __g = global } , function ( module , exports ) { " use strict " ; var core = module . exports = { version : " 2 . 4 . 0 " } ; if ( typeof __e = = " number " ) __e = core } , function ( module , exports , __webpack_require__ ) { " use strict " ; var dP = __webpack_require__ ( 10 ) , createDesc = __webpack_require__ ( 18 ) ; module . exports = __webpack_require__ ( 14 ) ? function ( object , key , value ) { return dP . f ( object , key , createDesc ( 1 , value ) ) } : function ( object , key , value ) { object [ key ] = value ; return object } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var anObject = __webpack_require__ ( 11 ) , IE8_DOM_DEFINE = __webpack_require__ ( 13 ) , toPrimitive = __webpack_require__ ( 17 ) , dP = Object . defineProperty ; exports . f = __webpack_require__ ( 14 ) ? Object . defineProperty : function defineProperty ( O , P , Attributes ) { anObject ( O ) ; P = toPrimitive ( P , true ) ; anObject ( Attributes ) ; if ( IE8_DOM_DEFINE ) try { return dP ( O , P , Attributes ) } catch ( e ) { } if ( " get " in Attributes | | " set " in Attributes ) throw TypeError ( " Accessors not supported ! " ) ; if ( " value " in Attributes ) O [ P ] = Attributes . value ; return O } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var isObject = __webpack_require__ ( 12 ) ; module . exports = function ( it ) { if ( ! isObject ( it ) ) throw TypeError ( it + " is not an object ! " ) ; return it } } , function ( module , exports ) { " use strict " ; var _typeof = typeof Symbol = = = " function " & & typeof Symbol . iterator = = = " symbol " ? function ( obj ) { return typeof obj } : function ( obj ) { return obj & & typeof Symbol = = = " function " & & obj . constructor = = = Symbol ? " symbol " : typeof obj } ; module . exports = function ( it ) { return ( typeof it = = = " undefined " ? " undefined " : _typeof ( it ) ) = = = " object " ? it ! = = null : typeof it = = = " function " } } , function ( module , exports , __webpack_require__ ) { " use strict " ; module . exports = ! __webpack_require__ ( 14 ) & & ! __webpack_require__ ( 15 ) ( function ( ) { return Object . defineProperty ( __webpack_require__ ( 16 ) ( " div " ) , " a " , { get : function get ( ) { return 7 } } ) . a ! = 7 } ) } , function ( module , exports , __webpack_require__ ) { " use strict " ; module . exports = ! __webpack_require__ ( 15 ) ( function ( ) { return Object . defineProperty ( { } , " a " , { get : function get ( ) { return 7 } } ) . a ! = 7 } ) } , function ( module , exports ) { " use strict " ; module . exports = function ( exec ) { try { return ! ! exec ( ) } catch ( e ) { return true } } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var isObject = __webpack_require__ ( 12 ) , document = __webpack_require__ ( 7 ) . document , is = isObject ( document ) & & isObject ( document . createElement ) ; module . exports = function ( it ) { return is ? document . createElement ( it ) : { } } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var isObject = __webpack_require__ ( 12 ) ; module . exports = function ( it , S ) { if ( ! isObject ( it ) ) return it ; var fn , val ; if ( S & & typeof ( fn = it . toString ) = = " function " & & ! isObject ( val = fn . call ( it ) ) ) return val ; if ( typeof ( fn = it . valueOf ) = = " function " & & ! isObject ( val = fn . call ( it ) ) ) return val ; if ( ! S & & typeof ( fn = it . toString ) = = " function " & & ! isObject ( val = fn . call ( it ) ) ) return val ; throw TypeError ( " Can ' t convert object to primitive value " ) } } , function ( module , exports ) { " use strict " ; module . exports = function ( bitmap , value ) { return { enumerable : ! ( bitmap & 1 ) , configurable : ! ( bitmap & 2 ) , writable : ! ( bitmap & 4 ) , value : value } } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var global = __webpack_require__ ( 7 ) , hide = __webpack_require__ ( 9 ) , has = __webpack_require__ ( 20 ) , SRC = __webpack_require__ ( 21 ) ( " src " ) , TO_STRING = " toString " , $ toString = Function [ TO_STRING ] , TPL = ( " " + $ toString ) . split ( TO_STRING ) ; __webpack_require__ ( 8 ) . inspectSource = function ( it ) { return $ toString . call ( it ) } ; ( module . exports = function ( O , key , val , safe ) { var isFunction = typeof val = = " function " ; if ( isFunction ) has ( val , " name " ) | | hide ( val , " name " , key ) ; if ( O [ key ] = = = val ) return ; if ( isFunction ) has ( val , SRC ) | | hide ( val , SRC , O [ key ] ? " " + O [ key ] : TPL . join ( String ( key ) ) ) ; if ( O = = = global ) { O [ key ] = val } else { if ( ! safe ) { delete O [ key ] ; hide ( O , key , val ) } else { if ( O [ key ] ) O [ key ] = val ; else hide ( O , key , val ) } } } ) ( Function . prototype , TO_STRING , function toString ( ) { return typeof this = = " function " & & this [ SRC ] | | $ toString . call ( this ) } ) } , function ( module , exports ) { " use strict " ; var hasOwnProperty = { } . hasOwnProperty ; module . exports = function ( it , key ) { return hasOwnProperty . call ( it , key ) } } , function ( module , exports ) { " use strict " ; var id = 0 , px = Math . random ( ) ; module . exports = function ( key ) { return " Symbol ( " . concat ( key = = = undefined ? " " : key , " ) _ " , ( + + id + px ) . toString ( 36 ) ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var aFunction = __webpack_require__ ( 23 ) ; module . exports = function ( fn , that , length ) { aFunction ( fn ) ; if ( that = = = undefined ) return fn ; switch ( length ) { case 1 : return function ( a ) { return fn . call ( that , a ) } ; case 2 : return function ( a , b ) { return fn . call ( that , a , b ) } ; case 3 : return function ( a , b , c ) { return fn . call ( that , a , b , c ) } } return function ( ) { return fn . apply ( that , arguments ) } } } , function ( module , exports ) { " use strict " ; module . exports = function ( it ) { if ( typeof it ! = " function " ) throw TypeError ( it + " is not a function ! " ) ; return it } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var getKeys = __webpack_require__ ( 25 ) , gOPS = __webpack_require__ ( 38 ) , pIE = __webpack_require__ ( 39 ) , toObject = __webpack_require__ ( 40 ) , IObject = __webpack_require__ ( 28 ) , $ assign = Object . assign ; module . exports = ! $ assign | | __webpack_require__ ( 15 ) ( function ( ) { var A = { } , B = { } , S = Symbol ( ) , K = " abcdefghijklmnopqrst " ; A [ S ] = 7 ; K . split ( " " ) . forEach ( function ( k ) { B [ k ] = k } ) ; return $ assign ( { } , A ) [ S ] ! = 7 | | Object . keys ( $ assign ( { } , B ) ) . join ( " " ) ! = K } ) ? function assign ( target , source ) { var T = toObject ( target ) , aLen = arguments . length , index = 1 , getSymbols = gOPS . f , isEnum = pIE . f ; while ( aLen > index ) { var S = IObject ( arguments [ index + + ] ) , keys = getSymbols ? getKeys ( S ) . concat ( getSymbols ( S ) ) : getKeys ( S ) , length = keys . length , j = 0 , key ; while ( length > j ) { if ( isEnum . call ( S , key = keys [ j + + ] ) ) T [ key ] = S [ key ] } } return T } : $ assign } , function ( module , exports , __webpack_require__ ) { " use strict " ; var $ keys = __webpack_require__ ( 26 ) , enumBugKeys = __webpack_require__ ( 37 ) ; module . exports = Object . keys | | function keys ( O ) { return $ keys ( O , enumBugKeys ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var has = __webpack_require__ ( 20 ) , toIObject = __webpack_require__ ( 27 ) , arrayIndexOf = __webpack_require__ ( 31 ) ( false ) , IE_PROTO = __webpack_require__ ( 35 ) ( " IE_PROTO " ) ; module . exports = function ( object , names ) { var O = toIObject ( object ) , i = 0 , result = [ ] , key ; for ( key in O ) { if ( key ! = IE_PROTO ) has ( O , key ) & & result . push ( key ) } while ( names . length > i ) { if ( has ( O , key = names [ i + + ] ) ) { ~ arrayIndexOf ( result , key ) | | result . push ( key ) } } return result } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var IObject = __webpack_require__ ( 28 ) , defined = __webpack_require__ ( 30 ) ; module . exports = function ( it ) { return IObject ( defined ( it ) ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var cof = __webpack_require__ ( 29 ) ; module . exports = Object ( " z " ) . propertyIsEnumerable ( 0 ) ? Object : function ( it ) { return cof ( it ) = = " String " ? it . split ( " " ) : Object ( it ) } } , function ( module , exports ) { " use strict " ; var toString = { } . toString ; module . exports = function ( it ) { return toString . call ( it ) . slice ( 8 , - 1 ) } } , function ( module , exports ) { " use strict " ; module . exports = function ( it ) { if ( it = = undefined ) throw TypeError ( " Can ' t call method on " + it ) ; return it } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var toIObject = __webpack_require__ ( 27 ) , toLength = __webpack_require__ ( 32 ) , toIndex = __webpack_require__ ( 34 ) ; module . exports = function ( IS_INCLUDES ) { return function ( $ this , el , fromIndex ) { var O = toIObject ( $ this ) , length = toLength ( O . length ) , index = toIndex ( fromIndex , length ) , value ; if ( IS_INCLUDES & & el ! = el ) while ( length > index ) { value = O [ index + + ] ; if ( value ! = value ) return true } else for ( ; length > index ; index + + ) { if ( IS_INCLUDES | | index in O ) { if ( O [ index ] = = = el ) return IS_INCLUDES | | index | | 0 } } return ! IS_INCLUDES & & - 1 } } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var toInteger = __webpack_require__ ( 33 ) , min = Math . min ; module . exports = function ( it ) { return it > 0 ? min ( toInteger ( it ) , 9007199254740991 ) : 0 } } , function ( module , exports ) { " use strict " ; var ceil = Math . ceil , floor = Math . floor ; module . exports = function ( it ) { return isNaN ( it = + it ) ? 0 : ( it > 0 ? floor : ceil ) ( it ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var toInteger = __webpack_require__ ( 33 ) , max = Math . max , min = Math . min ; module . exports = function ( index , length ) { index = toInteger ( index ) ; return index < 0 ? max ( index + length , 0 ) : min ( index , length ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var shared = __webpack_require__ ( 36 ) ( " keys " ) , uid = __webpack_require__ ( 21 ) ; module . exports = function ( key ) { return shared [ key ] | | ( shared [ key ] = uid ( key ) ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var global = __webpack_require__ ( 7 ) , SHARED = " __core - js_shared__ " , store = global [ SHARED ] | | ( global [ SHARED ] = { } ) ; module . exports = function ( key ) { return store [ key ] | | ( store [ key ] = { } ) } } , function ( module , exports ) { " use strict " ; module . exports = " constructor , hasOwnProperty , isPrototypeOf , propertyIsEnumerable , toLocaleString , toString , valueOf " . split ( " , " ) } , function ( module , exports ) { " use strict " ; exports . f = Object . getOwnPropertySymbols } , function ( module , exports ) { " use strict " ; exports . f = { } . propertyIsEnumerable } , function ( module , exports , __webpack_require__ ) { " use strict " ; var defined = __webpack_require__ ( 30 ) ; module . exports = function ( it ) { return Object ( defined ( it ) ) } } , function ( module , exports ) { ( function ( global ) { " use strict " ; var _global = global ; var setTimeout = _global . setTimeout ; var setTimeoutNative = _global . setTimeoutNative ; var MSG = ' Use " global . setTimeout " is unexpected , ' + ' please use require ( " @ weex - module " ) . setTimeout instead . ' ; if ( typeof setTimeout = = = " undefined " & & typeof setTimeoutNative = = = " function " ) { ( function ( ) { var timeoutMap = { } ; var timeoutId = 0 ; global . setTimeout = function ( cb , time ) { console . warn ( MSG ) ; timeoutMap [ + + timeoutId ] = cb ; setTimeoutNative ( timeoutId . toString ( ) , time ) } ; global . setTimeoutCallback = function ( id ) { if ( typeof timeoutMap [ id ] = = = " function " ) { timeoutMap [ id ] ( ) ; delete timeoutMap [ id ] } } } ) ( ) } } ) . call ( exports , function ( ) { return this } ( ) ) } , function ( module , exports , __webpack_require__ ) { ( function ( global ) { " use strict " ; global . Promise = null ; __webpack_require__ ( 43 ) ; __webpack_require__ ( 46 ) ; __webpack_require__ ( 57 ) ; __webpack_require__ ( 61 ) } ) . call ( exports , function ( ) { return this } ( ) ) } , function ( module , exports , __webpack_require__ ) { " use strict " ; var classof = __webpack_require__ ( 44 ) , test = { } ; test [ __webpack_require__ ( 45 ) ( " toStringTag " ) ] = " z " ; if ( test + " " ! = " [ object z ] " ) { __webpack_require__ ( 19 ) ( Object . prototype , " toString " , function toString ( ) { return " [ object " + classof ( this ) + " ] " } , true ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var cof = __webpack_require__ ( 29 ) , TAG = __webpack_require__ ( 45 ) ( " toStringTag " ) , ARG = cof ( function ( ) { return arguments } ( ) ) = = " Arguments " ; var tryGet = function tryGet ( it , key ) { try { return it [ key ] } catch ( e ) { } } ; module . exports = function ( it ) { var O , T , B ; return it = = = undefined ? " Undefined " : it = = = null ? " Null " : typeof ( T = tryGet ( O = Object ( it ) , TAG ) ) = = " string " ? T : ARG ? cof ( O ) : ( B = cof ( O ) ) = = " Object " & & typeof O . callee = = " function " ? " Arguments " : B } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var store = __webpack_require__ ( 36 ) ( " wks " ) , uid = __webpack_require__ ( 21 ) , _Symbol = __webpack_require__ ( 7 ) . Symbol , USE_SYMBOL = typeof _Symbol = = " function " ; var $ exports = module . exports = function ( name ) { return store [ name ] | | ( store [ name ] = USE_SYMBOL & & _Symbol [ name ] | | ( USE_SYMBOL ? _Symbol : uid ) ( " Symbol . " + name ) ) } ; $ exports . store = store } , function ( module , exports , __webpack_require__ ) { " use strict " ; var $ at = __webpack_require__ ( 47 ) ( true ) ; __webpack_require__ ( 48 ) ( String , " String " , function ( iterated ) { this . _t = String ( iterated ) ; this . _i = 0 } , function ( ) { var O = this . _t , index = this . _i , point ; if ( index > = O . length ) return { value : undefined , done : true } ; point = $ at ( O , index ) ; this . _i + = point . length ; return { value : point , done : false } } ) } , function ( module , exports , __webpack_require__ ) { " use strict " ; var toInteger = __webpack_require__ ( 33 ) , defined = __webpack_require__ ( 30 ) ; module . exports = function ( TO_STRING ) { return function ( that , pos ) { var s = String ( defined ( that ) ) , i = toInteger ( pos ) , l = s . length , a , b ; if ( i < 0 | | i > = l ) return TO_STRING ? " " : undefined ; a = s . charCodeAt ( i ) ; return a < 55296 | | a > 56319 | | i + 1 = = = l | | ( b = s . charCodeAt ( i + 1 ) ) < 56320 | | b > 57343 ? TO_STRING ? s . charAt ( i ) : a : TO_STRING ? s . slice ( i , i + 2 ) : ( a - 55296 < < 10 ) + ( b - 56320 ) + 65536 } } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var LIBRARY = __webpack_require__ ( 49 ) , $ export = __webpack_require__ ( 6 ) , redefine = __webpack_require__ ( 19 ) , hide = __webpack_require__ ( 9 ) , has = __webpack_require__ ( 20 ) , Iterators = __webpack_require__ ( 50 ) , $ iterCreate = __webpack_require__ ( 51 ) , setToStringTag = __webpack_require__ ( 55 ) , getPrototypeOf = __webpack_require__ ( 56 ) , ITERATOR = __webpack_require__ ( 45 ) ( " iterator " ) , BUGGY = ! ( [ ] . keys & & " next " in [ ] . keys ( ) ) , FF_ITERATOR = " @ @ iterator " , KEYS = " keys " , VALUES = " values " ; var returnThis = function returnThis ( ) { return this } ; module . exports = function ( Base , NAME , Constructor , next , DEFAULT , IS_SET , FORCED ) { $ iterCreate ( Constructor , NAME , next ) ; var getMethod = function getMethod ( kind ) { if ( ! BUGGY & & kind in proto ) return proto [ kind ] ; switch ( kind ) { case KEYS : return function keys ( ) { return new Constructor ( this , kind ) } ; case VALUES : return function values ( ) { return new Constructor ( this , kind ) } } return function entries ( ) { return new Constructor ( this , kind ) } } ; var TAG = NAME + " Iterator " , DEF_VALUES = DEFAULT = = VALUES , VALUES_BUG = false , proto = Base . prototype , $ native = proto [ ITERATOR ] | | proto [ FF_ITERATOR ] | | DEFAULT & & proto [ DEFAULT ] , $ default = $ native | | getMethod ( DEFAULT ) , $ entries = DEFAULT ? ! DEF_VALUES ? $ default : getMethod ( " entries " ) : undefined , $ anyNative = NAME = = " Array " ? proto . entries | | $ native : $ native , methods , key , IteratorPrototype ; if ( $ anyNative ) { IteratorPrototype = getPrototypeOf ( $ anyNative . call ( new Base ) ) ; if ( IteratorPrototype ! = = Object . prototype ) { setToStringTag ( IteratorPrototype , TAG , true ) ; if ( ! LIBRARY & & ! has ( IteratorPrototype , ITERATOR ) ) hide ( IteratorPrototype , ITERATOR , returnThis ) } } if ( DEF_VALUES & & $ native & & $ native . name ! = = VALUES ) { VALUES_BUG = true ; $ default = function values ( ) { return $ native . call ( this ) } } if ( ( ! LIBRARY | | FORCED ) & & ( BUGGY | | VALUES_BUG | | ! proto [ ITERATOR ] ) ) { hide ( proto , ITERATOR , $ default ) } Iterators [ NAME ] = $ default ; Iterators [ TAG ] = returnThis ; if ( DEFAULT ) { methods = { values : DEF_VALUES ? $ default : getMethod ( VALUES ) , keys : IS_SET ? $ default : getMethod ( KEYS ) , entries : $ entries } ; if ( FORCED ) for ( key in methods ) { if ( ! ( key in proto ) ) redefine ( proto , key , methods [ key ] ) } else $ export ( $ export . P + $ export . F * ( BUGGY | | VALUES_BUG ) , NAME , methods ) } return methods } } , function ( module , exports ) { " use strict " ; module . exports = false } , function ( module , exports ) { " use strict " ; module . exports = { } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var create = __webpack_require__ ( 52 ) , descriptor = __webpack_require__ ( 18 ) , setToStringTag = __webpack_require__ ( 55 ) , IteratorPrototype = { } ; __webpack_require__ ( 9 ) ( IteratorPrototype , __webpack_require__ ( 45 ) ( " iterator " ) , function ( ) { return this } ) ; module . exports = function ( Constructor , NAME , next ) { Constructor . prototype = create ( IteratorPrototype , { next : descriptor ( 1 , next ) } ) ; setToStringTag ( Constructor , NAME + " Iterator " ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var anObject = __webpack_require__ ( 11 ) , dPs = __webpack_require__ ( 53 ) , enumBugKeys = __webpack_require__ ( 37 ) , IE_PROTO = __webpack_require__ ( 35 ) ( " IE_PROTO " ) , Empty = function Empty ( ) { } , PROTOTYPE = " prototype " ; var _createDict = function createDict ( ) { var iframe = __webpack_require__ ( 16 ) ( " iframe " ) , i = enumBugKeys . length , gt = " > " , iframeDocument ; iframe . style . display = " none " ; __webpack_require__ ( 54 ) . appendChild ( iframe ) ; iframe . src = " javascript : " ; iframeDocument = iframe . contentWindow . document ; iframeDocument . open ( ) ; iframeDocument . write ( " < script > document . F = Object < / script " + gt ) ; iframeDocument . close ( ) ; _createDict = iframeDocument . F ; while ( i - - ) { delete _createDict [ PROTOTYPE ] [ enumBugKeys [ i ] ] } return _createDict ( ) } ; module . exports = Object . create | | function create ( O , Properties ) { var result ; if ( O ! = = null ) { Empty [ PROTOTYPE ] = anObject ( O ) ; result = new Empty ; Empty [ PROTOTYPE ] = null ; result [ IE_PROTO ] = O } else result = _createDict ( ) ; return Properties = = = undefined ? result : dPs ( result , Properties ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var dP = __webpack_require__ ( 10 ) , anObject = __webpack_require__ ( 11 ) , getKeys = __webpack_require__ ( 25 ) ; module . exports = __webpack_require__ ( 14 ) ? Object . defineProperties : function defineProperties ( O , Properties ) { anObject ( O ) ; var keys = getKeys ( Properties ) , length = keys . length , i = 0 , P ; while ( length > i ) { dP . f ( O , P = keys [ i + + ] , Properties [ P ] ) } return O } } , function ( module , exports , __webpack_require__ ) { " use strict " ; module . exports = __webpack_require__ ( 7 ) . document & & document . documentElement } , function ( module , exports , __webpack_require__ ) { " use strict " ; var def = __webpack_require__ ( 10 ) . f , has = __webpack_require__ ( 20 ) , TAG = __webpack_require__ ( 45 ) ( " toStringTag " ) ; module . exports = function ( it , tag , stat ) { if ( it & & ! has ( it = stat ? it : it . prototype , TAG ) ) def ( it , TAG , { configurable : true , value : tag } ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var has = __webpack_require__ ( 20 ) , toObject = __webpack_require__ ( 40 ) , IE_PROTO = __webpack_require__ ( 35 ) ( " IE_PROTO " ) , ObjectProto = Object . prototype ; module . exports = Object . getPrototypeOf | | function ( O ) { O = toObject ( O ) ; if ( has ( O , IE_PROTO ) ) return O [ IE_PROTO ] ; if ( typeof O . constructor = = " function " & & O instanceof O . constructor ) { return O . constructor . prototype } return O instanceof Object ? ObjectProto : null } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var $ iterators = __webpack_require__ ( 58 ) , redefine = __webpack_require__ ( 19 ) , global = __webpack_require__ ( 7 ) , hide = __webpack_require__ ( 9 ) , Iterators = __webpack_require__ ( 50 ) , wks = __webpack_require__ ( 45 ) , ITERATOR = wks ( " iterator " ) , TO_STRING_TAG = wks ( " toStringTag " ) , ArrayValues = Iterators . Array ; for ( var collections = [ " NodeList " , " DOMTokenList " , " MediaList " , " StyleSheetList " , " CSSRuleList " ] , i = 0 ; i < 5 ; i + + ) { var NAME = collections [ i ] , Collection = global [ NAME ] , proto = Collection & & Collection . prototype , key ; if ( proto ) { if ( ! proto [ ITERATOR ] ) hide ( proto , ITERATOR , ArrayValues ) ; if ( ! proto [ TO_STRING_TAG ] ) hide ( proto , TO_STRING_TAG , NAME ) ; Iterators [ NAME ] = ArrayValues ; for ( key in $ iterators ) { if ( ! proto [ key ] ) redefine ( proto , key , $ iterators [ key ] , true ) } } } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var addToUnscopables = __webpack_require__ ( 59 ) , step = __webpack_require__ ( 60 ) , Iterators = __webpack_require__ ( 50 ) , toIObject = __webpack_require__ ( 27 ) ; module . exports = __webpack_require__ ( 48 ) ( Array , " Array " , function ( iterated , kind ) { this . _t = toIObject ( iterated ) ; this . _i = 0 ; this . _k = kind } , function ( ) { var O = this . _t , kind = this . _k , index = this . _i + + ; if ( ! O | | index > = O . length ) { this . _t = undefined ; return step ( 1 ) } if ( kind = = " keys " ) return step ( 0 , index ) ; if ( kind = = " values " ) return step ( 0 , O [ index ] ) ; return step ( 0 , [ index , O [ index ] ] ) } , " values " ) ; Iterators . Arguments = Iterators . Array ; addToUnscopables ( " keys " ) ; addToUnscopables ( " values " ) ; addToUnscopables ( " entries " ) } , function ( module , exports , __webpack_require__ ) { " use strict " ; var UNSCOPABLES = __webpack_require__ ( 45 ) ( " unscopables " ) , ArrayProto = Array . prototype ; if ( ArrayProto [ UNSCOPABLES ] = = undefined ) __webpack_require__ ( 9 ) ( ArrayProto , UNSCOPABLES , { } ) ; module . exports = function ( key ) { ArrayProto [ UNSCOPABLES ] [ key ] = true } } , function ( module , exports ) { " use strict " ; module . exports = function ( done , value ) { return { value : value , done : ! ! done } } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var LIBRARY = __webpack_require__ ( 49 ) , global = __webpack_require__ ( 7 ) , ctx = __webpack_require__ ( 22 ) , classof = __webpack_require__ ( 44 ) , $ export = __webpack_require__ ( 6 ) , isObject = __webpack_require__ ( 12 ) , anObject = __webpack_require__ ( 11 ) , aFunction = __webpack_require__ ( 23 ) , anInstance = __webpack_require__ ( 62 ) , forOf = __webpack_require__ ( 63 ) , setProto = __webpack_require__ ( 67 ) . set , speciesConstructor = __webpack_require__ ( 69 ) , task = __webpack_require__ ( 70 ) . set , microtask = __webpack_require__ ( 72 ) ( ) , PROMISE = " Promise " , TypeError = global . TypeError , process = global . process , $ Promise = global [ PROMISE ] , process = global . process , isNode = classof ( process ) = = " process " , empty = function empty ( ) { } , Internal , GenericPromiseCapability , Wrapper ; var USE_NATIVE = ! ! function ( ) { try { var promise = $ Promise . resolve ( 1 ) , FakePromise = ( promise . constructor = { } ) [ __webpack_require__ ( 45 ) ( " species " ) ] = function ( exec ) { exec ( empty , empty ) } ; return ( isNode | | typeof PromiseRejectionEvent = = " function " ) & & promise . then ( empty ) instanceof FakePromise } catch ( e ) { } } ( ) ; var sameConstructor = function sameConstructor ( a , b ) { return a = = = b | | a = = = $ Promise & & b = = = Wrapper } ; var isThenable = function isThenable ( it ) { var then ; return isObject ( it ) & & typeof ( then = it . then ) = = " function " ? then : false } ; var newPromiseCapability = function newPromiseCapability ( C ) { return sameConstructor ( $ Promise , C ) ? new PromiseCapability ( C ) : new GenericPromiseCapability ( C ) } ; var PromiseCapability = GenericPromiseCapability = function GenericPromiseCapability ( C ) { var resolve , reject ; this . promise = new C ( function ( $ $ resolve , $ $ reject ) { if ( resolve ! = = undefined | | reject ! = = undefined ) throw TypeError ( " Bad Promise constructor " ) ; resolve = $ $ resolve ; reject = $ $ reject } ) ; this . resolve = aFunction ( resolve ) ; this . reject = aFunction ( reject ) } ; var perform = function perform ( exec ) { try { exec ( ) } catch ( e ) { return { error : e } } } ; var notify = function notify ( promise , isReject ) { if ( promise . _n ) return ; promise . _n = true ; var chain = promise . _c ; microtask ( function ( ) { var value = promise . _v , ok = promise . _s = = 1 , i = 0 ; var run = function run ( reaction ) { var handler = ok ? reaction . ok : reaction . fail , resolve = reaction . resolve , reject = reaction . reject , domain = reaction . domain , result , then ; try { if ( handler ) { if ( ! ok ) { if ( promise . _h = = 2 ) onHandleUnhandled ( promise ) ; promise . _h = 1 } if ( handler = = = true ) result = value ; else { if ( domain ) domain . enter ( ) ; result = handler ( value ) ; if ( domain ) domain . exit ( ) } if ( result = = = reaction . promise ) { reject ( TypeError ( " Promise - chain cycle " ) ) } else if ( then = isThenable ( result ) ) { then . call ( result , resolve , reject ) } else resolve ( result ) } else reject ( value ) } catch ( e ) { reject ( e ) } } ; while ( chain . length > i ) { run ( chain [ i + + ] ) } promise . _c = [ ] ; promise . _n = false ; if ( isReject & & ! promise . _h ) onUnhandled ( promise ) } ) } ; var onUnhandled = function onUnhandled ( promise ) { task . call ( global , function ( ) { var value = promise . _v , abrupt , handler , console ; if ( isUnhandled ( promise ) ) { abrupt = perform ( function ( ) { if ( isNode ) { process . emit ( " unhandledRejection " , value , promise ) } else if ( handler = global . onunhandledrejection ) { handler ( { promise : promise , reason : value } ) } else if ( ( console = global . console ) & & console . error ) { console . error ( " Unhandled promise rejection " , value ) } } ) ; promise . _h = isNode | | isUnhandled ( promise ) ? 2 : 1 } promise . _a = undefined ; if ( abrupt ) throw abrupt . error } ) } ; var isUnhandled = function isUnhandled ( promise ) { if ( promise . _h = = 1 ) return false ; var chain = promise . _a | | promise . _c , i = 0 , reaction ; while ( chain . length > i ) { reaction = chain [ i + + ] ; if ( reaction . fail | | ! isUnhandled ( reaction . promise ) ) return false } return true } ; var onHandleUnhandled = function onHandleUnhandled ( promise ) { task . call ( global , function ( ) { var handler ; if ( isNode ) { process . emit ( " rejectionHandled " , promise ) } else if ( handler = global . onrejectionhandled ) { handler ( { promise : promise , reason : promise . _v } ) } } ) } ; var $ reject = function $ reject ( value ) { var promise = this ; if ( promise . _d ) return ; promise . _d = true ; promise = promise . _w | | promise ; promise . _v = value ; promise . _s = 2 ; if ( ! promise . _a ) promise . _a = promise . _c . slice ( ) ; notify ( promise , true ) } ; var $ resolve = function $ resolve ( value ) { var promise = this , then ; if ( promise . _d ) return ; promise . _d = true ; promise = promise . _w | | promise ; try { if ( promise = = = value ) throw TypeError ( " Promise can ' t be resolved itself " ) ; if ( then = isThenable ( value ) ) { microtask ( function ( ) { var wrapper = { _w : promise , _d : false } ; try { then . call ( value , ctx ( $ resolve , wrapper , 1 ) , ctx ( $ reject , wrapper , 1 ) ) } catch ( e ) { $ reject . call ( wrapper , e ) } } ) } else { promise . _v = value ; promise . _s = 1 ; notify ( promise , false ) } } catch ( e ) { $ reject . call ( { _w : promise , _d : false } , e ) } } ; if ( ! USE_NATIVE ) { $ Promise = function Promise ( executor ) { anInstance ( this , $ Promise , PROMISE , " _h " ) ; aFunction ( executor ) ; Internal . call ( this ) ; try { executor ( ctx ( $ resolve , this , 1 ) , ctx ( $ reject , this , 1 ) ) } catch ( err ) { $ reject . call ( this , err ) } } ; Internal = function Promise ( executor ) { this . _c = [ ] ; this . _a = undefined ; this . _s = 0 ; this . _d = false ; this . _v = undefined ; this . _h = 0 ; this . _n = false } ; Internal . prototype = __webpack_require__ ( 73 ) ( $ Promise . prototype , { then : function then ( onFulfilled , onRejected ) { var reaction = newPromiseCapability ( speciesConstructor ( this , $ Promise ) ) ; reaction . ok = typeof onFulfilled = = " function " ? onFulfilled : true ; reaction . fail = typeof onRejected = = " function " & & onRejected ; reaction . domain = isNode ? process . domain : undefined ; this . _c . push ( reaction ) ; if ( this . _a ) this . _a . push ( reaction ) ; if ( this . _s ) notify ( this , false ) ; return reaction . promise } , " catch " : function _catch ( onRejected ) { return this . then ( undefined , onRejected ) } } ) ; PromiseCapability = function PromiseCapability ( ) { var promise = new Internal ; this . promise = promise ; this . resolve = ctx ( $ resolve , promise , 1 ) ; this . reject = ctx ( $ reject , promise , 1 ) } } $ export ( $ export . G + $ export . W + $ export . F * ! USE_NATIVE , { Promise : $ Promise } ) ; __webpack_require__ ( 55 ) ( $ Promise , PROMISE ) ; __webpack_require__ ( 74 ) ( PROMISE ) ; Wrapper = __webpack_require__ ( 8 ) [ PROMISE ] ; $ export ( $ export . S + $ export . F * ! USE_NATIVE , PROMISE , { reject : function reject ( r ) { var capability = newPromiseCapability ( this ) , $ $ reject = capability . reject ; $ $ reject ( r ) ; return capability . promise } } ) ; $ export ( $ export . S + $ export . F * ( LIBRARY | | ! USE_NATIVE ) , PROMISE , { resolve : function resolve ( x ) { if ( x instanceof $ Promise & & sameConstructor ( x . constructor , this ) ) return x ; var capability = newPromiseCapability ( this ) , $ $ resolve = capability . resolve ; $ $ resolve ( x ) ; return capability . promise } } ) ; $ export ( $ export . S + $ export . F * ! ( USE_NATIVE & & __webpack_require__ ( 75 ) ( function ( iter ) { $ Promise . all ( iter ) [ " catch " ] ( empty ) } ) ) , PROMISE , { all : function all ( iterable ) { var C = this , capability = newPromiseCapability ( C ) , resolve = capability . resolve , reject = capability . reject ; var abrupt = perform ( function ( ) { var values = [ ] , index = 0 , remaining = 1 ; forOf ( iterable , false , function ( promise ) { var $ index = index + + , alreadyCalled = false ; values . push ( undefined ) ; remaining + + ; C . resolve ( promise ) . then ( function ( value ) { if ( alreadyCalled ) return ; alreadyCalled = true ; values [ $ index ] = value ; - - remaining | | resolve ( values ) } , reject ) } ) ; - - remaining | | resolve ( values ) } ) ; if ( abrupt ) reject ( abrupt . error ) ; return capability . promise } , race : function race ( iterable ) { var C = this , capability = newPromiseCapability ( C ) , reject = capability . reject ; var abrupt = perform ( function ( ) { forOf ( iterable , false , function ( promise ) { C . resolve ( promise ) . then ( capability . resolve , reject ) } ) } ) ; if ( abrupt ) reject ( abrupt . error ) ; return capability . promise } } ) } , function ( module , exports ) { " use strict " ; module . exports = function ( it , Constructor , name , forbiddenField ) { if ( ! ( it instanceof Constructor ) | | forbiddenField ! = = undefined & & forbiddenField in it ) { throw TypeError ( name + " : incorrect invocation ! " ) } return it } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var ctx = __webpack_require__ ( 22 ) , call = __webpack_require__ ( 64 ) , isArrayIter = __webpack_require__ ( 65 ) , anObject = __webpack_require__ ( 11 ) , toLength = __webpack_require__ ( 32 ) , getIterFn = __webpack_require__ ( 66 ) , BREAK = { } , RETURN = { } ; var _exports = module . exports = function ( iterable , entries , fn , that , ITERATOR ) { var iterFn = ITERATOR ? function ( ) { return iterable } : getIterFn ( iterable ) , f = ctx ( fn , that , entries ? 2 : 1 ) , index = 0 , length , step , iterator , result ; if ( typeof iterFn ! = " function " ) throw TypeError ( iterable + " is not iterable ! " ) ; if ( isArrayIter ( iterFn ) ) for ( length = toLength ( iterable . length ) ; length > index ; index + + ) { result = entries ? f ( anObject ( step = iterable [ index ] ) [ 0 ] , step [ 1 ] ) : f ( iterable [ index ] ) ; if ( result = = = BREAK | | result = = = RETURN ) return result } else for ( iterator = iterFn . call ( iterable ) ; ! ( step = iterator . next ( ) ) . done ; ) { result = call ( iterator , f , step . value , entries ) ; if ( result = = = BREAK | | result = = = RETURN ) return result } } ; _exports . BREAK = BREAK ; _exports . RETURN = RETURN } , function ( module , exports , __webpack_require__ ) { " use strict " ; var anObject = __webpack_require__ ( 11 ) ; module . exports = function ( iterator , fn , value , entries ) { try { return entries ? fn ( anObject ( value ) [ 0 ] , value [ 1 ] ) : fn ( value ) } catch ( e ) { var ret = iterator [ " return " ] ; if ( ret ! = = undefined ) anObject ( ret . call ( iterator ) ) ; throw e } } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var Iterators = __webpack_require__ ( 50 ) , ITERATOR = __webpack_require__ ( 45 ) ( " iterator " ) , ArrayProto = Array . prototype ; module . exports = function ( it ) { return it ! = = undefined & & ( Iterators . Array = = = it | | ArrayProto [ ITERATOR ] = = = it ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var classof = __webpack_require__ ( 44 ) , ITERATOR = __webpack_require__ ( 45 ) ( " iterator " ) , Iterators = __webpack_require__ ( 50 ) ; module . exports = __webpack_require__ ( 8 ) . getIteratorMethod = function ( it ) { if ( it ! = undefined ) return it [ ITERATOR ] | | it [ " @ @ iterator " ] | | Iterators [ classof ( it ) ] } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var isObject = __webpack_require__ ( 12 ) , anObject = __webpack_require__ ( 11 ) ; var check = function check ( O , proto ) { anObject ( O ) ; if ( ! isObject ( proto ) & & proto ! = = null ) throw TypeError ( proto + " : can ' t set as prototype ! " ) } ; module . exports = { set : Object . setPrototypeOf | | ( " __proto__ " in { } ? function ( test , buggy , set ) { try { set = __webpack_require__ ( 22 ) ( Function . call , __webpack_require__ ( 68 ) . f ( Object . prototype , " __proto__ " ) . set , 2 ) ; set ( test , [ ] ) ; buggy = ! ( test instanceof Array ) } catch ( e ) { buggy = true } return function setPrototypeOf ( O , proto ) { check ( O , proto ) ; if ( buggy ) O . __proto__ = proto ; else set ( O , proto ) ; return O } } ( { } , false ) : undefined ) , check : check } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var pIE = __webpack_require__ ( 39 ) , createDesc = __webpack_require__ ( 18 ) , toIObject = __webpack_require__ ( 27 ) , toPrimitive = __webpack_require__ ( 17 ) , has = __webpack_require__ ( 20 ) , IE8_DOM_DEFINE = __webpack_require__ ( 13 ) , gOPD = Object . getOwnPropertyDescriptor ; <nl> - exports . f = __webpack_require__ ( 14 ) ? gOPD : function getOwnPropertyDescriptor ( O , P ) { O = toIObject ( O ) ; P = toPrimitive ( P , true ) ; if ( IE8_DOM_DEFINE ) try { return gOPD ( O , P ) } catch ( e ) { } if ( has ( O , P ) ) return createDesc ( ! pIE . f . call ( O , P ) , O [ P ] ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var anObject = __webpack_require__ ( 11 ) , aFunction = __webpack_require__ ( 23 ) , SPECIES = __webpack_require__ ( 45 ) ( " species " ) ; module . exports = function ( O , D ) { var C = anObject ( O ) . constructor , S ; return C = = = undefined | | ( S = anObject ( C ) [ SPECIES ] ) = = undefined ? D : aFunction ( S ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var ctx = __webpack_require__ ( 22 ) , invoke = __webpack_require__ ( 71 ) , html = __webpack_require__ ( 54 ) , cel = __webpack_require__ ( 16 ) , global = __webpack_require__ ( 7 ) , process = global . process , setTask = global . setImmediate , clearTask = global . clearImmediate , MessageChannel = global . MessageChannel , counter = 0 , queue = { } , ONREADYSTATECHANGE = " onreadystatechange " , defer , channel , port ; var run = function run ( ) { var id = + this ; if ( queue . hasOwnProperty ( id ) ) { var fn = queue [ id ] ; delete queue [ id ] ; fn ( ) } } ; var listener = function listener ( event ) { run . call ( event . data ) } ; if ( ! setTask | | ! clearTask ) { setTask = function setImmediate ( fn ) { var args = [ ] , i = 1 ; while ( arguments . length > i ) { args . push ( arguments [ i + + ] ) } queue [ + + counter ] = function ( ) { invoke ( typeof fn = = " function " ? fn : Function ( fn ) , args ) } ; defer ( counter ) ; return counter } ; clearTask = function clearImmediate ( id ) { delete queue [ id ] } ; if ( __webpack_require__ ( 29 ) ( process ) = = " process " ) { defer = function defer ( id ) { process . nextTick ( ctx ( run , id , 1 ) ) } } else if ( MessageChannel ) { channel = new MessageChannel ; port = channel . port2 ; channel . port1 . onmessage = listener ; defer = ctx ( port . postMessage , port , 1 ) } else if ( global . addEventListener & & typeof postMessage = = " function " & & ! global . importScripts ) { defer = function defer ( id ) { global . postMessage ( id + " " , " * " ) } ; global . addEventListener ( " message " , listener , false ) } else if ( ONREADYSTATECHANGE in cel ( " script " ) ) { defer = function defer ( id ) { html . appendChild ( cel ( " script " ) ) [ ONREADYSTATECHANGE ] = function ( ) { html . removeChild ( this ) ; run . call ( id ) } } } else { defer = function defer ( id ) { setTimeout ( ctx ( run , id , 1 ) , 0 ) } } } module . exports = { set : setTask , clear : clearTask } } , function ( module , exports ) { " use strict " ; module . exports = function ( fn , args , that ) { var un = that = = = undefined ; switch ( args . length ) { case 0 : return un ? fn ( ) : fn . call ( that ) ; case 1 : return un ? fn ( args [ 0 ] ) : fn . call ( that , args [ 0 ] ) ; case 2 : return un ? fn ( args [ 0 ] , args [ 1 ] ) : fn . call ( that , args [ 0 ] , args [ 1 ] ) ; case 3 : return un ? fn ( args [ 0 ] , args [ 1 ] , args [ 2 ] ) : fn . call ( that , args [ 0 ] , args [ 1 ] , args [ 2 ] ) ; case 4 : return un ? fn ( args [ 0 ] , args [ 1 ] , args [ 2 ] , args [ 3 ] ) : fn . call ( that , args [ 0 ] , args [ 1 ] , args [ 2 ] , args [ 3 ] ) } return fn . apply ( that , args ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var global = __webpack_require__ ( 7 ) , macrotask = __webpack_require__ ( 70 ) . set , Observer = global . MutationObserver | | global . WebKitMutationObserver , process = global . process , Promise = global . Promise , isNode = __webpack_require__ ( 29 ) ( process ) = = " process " ; module . exports = function ( ) { var head , last , notify ; var flush = function flush ( ) { var parent , fn ; if ( isNode & & ( parent = process . domain ) ) parent . exit ( ) ; while ( head ) { fn = head . fn ; head = head . next ; try { fn ( ) } catch ( e ) { if ( head ) notify ( ) ; else last = undefined ; throw e } } last = undefined ; if ( parent ) parent . enter ( ) } ; if ( isNode ) { notify = function notify ( ) { process . nextTick ( flush ) } } else if ( Observer ) { var toggle = true , node = document . createTextNode ( " " ) ; new Observer ( flush ) . observe ( node , { characterData : true } ) ; notify = function notify ( ) { node . data = toggle = ! toggle } } else if ( Promise & & Promise . resolve ) { var promise = Promise . resolve ( ) ; notify = function notify ( ) { promise . then ( flush ) } } else { notify = function notify ( ) { macrotask . call ( global , flush ) } } return function ( fn ) { var task = { fn : fn , next : undefined } ; if ( last ) last . next = task ; if ( ! head ) { head = task ; notify ( ) } last = task } } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var redefine = __webpack_require__ ( 19 ) ; module . exports = function ( target , src , safe ) { for ( var key in src ) { redefine ( target , key , src [ key ] , safe ) } return target } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var global = __webpack_require__ ( 7 ) , dP = __webpack_require__ ( 10 ) , DESCRIPTORS = __webpack_require__ ( 14 ) , SPECIES = __webpack_require__ ( 45 ) ( " species " ) ; module . exports = function ( KEY ) { var C = global [ KEY ] ; if ( DESCRIPTORS & & C & & ! C [ SPECIES ] ) dP . f ( C , SPECIES , { configurable : true , get : function get ( ) { return this } } ) } } , function ( module , exports , __webpack_require__ ) { " use strict " ; var ITERATOR = __webpack_require__ ( 45 ) ( " iterator " ) , SAFE_CLOSING = false ; try { var riter = [ 7 ] [ ITERATOR ] ( ) ; riter [ " return " ] = function ( ) { SAFE_CLOSING = true } ; Array . from ( riter , function ( ) { throw 2 } ) } catch ( e ) { } module . exports = function ( exec , skipClosing ) { if ( ! skipClosing & & ! SAFE_CLOSING ) return false ; var safe = false ; try { var arr = [ 7 ] , iter = arr [ ITERATOR ] ( ) ; iter . next = function ( ) { return { done : safe = true } } ; arr [ ITERATOR ] = function ( ) { return iter } ; exec ( arr ) } catch ( e ) { } return safe } } , function ( module , exports ) { ( function ( global ) { " use strict " ; function _toConsumableArray ( arr ) { if ( Array . isArray ( arr ) ) { for ( var i = 0 , arr2 = Array ( arr . length ) ; i < arr . length ; i + + ) { arr2 [ i ] = arr [ i ] } return arr2 } else { return Array . from ( arr ) } } var _global = global ; var console = _global . console ; var nativeLog = _global . nativeLog ; var LEVELS = [ " error " , " warn " , " info " , " log " , " debug " ] ; var levelMap = { } ; generateLevelMap ( ) ; if ( typeof console = = = " undefined " | | global . WXEnvironment & & global . WXEnvironment . platform = = = " iOS " ) { global . console = { debug : function debug ( ) { for ( var _len = arguments . length , args = Array ( _len ) , _key = 0 ; _key < _len ; _key + + ) { args [ _key ] = arguments [ _key ] } if ( checkLevel ( " debug " ) ) { nativeLog . apply ( undefined , _toConsumableArray ( format ( args ) ) . concat ( [ " __DEBUG " ] ) ) } } , log : function log ( ) { for ( var _len2 = arguments . length , args = Array ( _len2 ) , _key2 = 0 ; _key2 < _len2 ; _key2 + + ) { args [ _key2 ] = arguments [ _key2 ] } if ( checkLevel ( " log " ) ) { nativeLog . apply ( undefined , _toConsumableArray ( format ( args ) ) . concat ( [ " __LOG " ] ) ) } } , info : function info ( ) { for ( var _len3 = arguments . length , args = Array ( _len3 ) , _key3 = 0 ; _key3 < _len3 ; _key3 + + ) { args [ _key3 ] = arguments [ _key3 ] } if ( checkLevel ( " info " ) ) { nativeLog . apply ( undefined , _toConsumableArray ( format ( args ) ) . concat ( [ " __INFO " ] ) ) } } , warn : function warn ( ) { for ( var _len4 = arguments . length , args = Array ( _len4 ) , _key4 = 0 ; _key4 < _len4 ; _key4 + + ) { args [ _key4 ] = arguments [ _key4 ] } if ( checkLevel ( " warn " ) ) { nativeLog . apply ( undefined , _toConsumableArray ( format ( args ) ) . concat ( [ " __WARN " ] ) ) } } , error : function error ( ) { for ( var _len5 = arguments . length , args = Array ( _len5 ) , _key5 = 0 ; _key5 < _len5 ; _key5 + + ) { args [ _key5 ] = arguments [ _key5 ] } if ( checkLevel ( " error " ) ) { nativeLog . apply ( undefined , _toConsumableArray ( format ( args ) ) . concat ( [ " __ERROR " ] ) ) } } } } else { var debug = console . debug ; var log = console . log ; var info = console . info ; var warn = console . warn ; var error = console . error ; console . __ori__ = { debug : debug , log : log , info : info , warn : warn , error : error } ; console . debug = function ( ) { for ( var _len6 = arguments . length , args = Array ( _len6 ) , _key6 = 0 ; _key6 < _len6 ; _key6 + + ) { args [ _key6 ] = arguments [ _key6 ] } if ( checkLevel ( " debug " ) ) { console . __ori__ . debug . apply ( console , args ) } } ; console . log = function ( ) { for ( var _len7 = arguments . length , args = Array ( _len7 ) , _key7 = 0 ; _key7 < _len7 ; _key7 + + ) { args [ _key7 ] = arguments [ _key7 ] } if ( checkLevel ( " log " ) ) { console . __ori__ . log . apply ( console , args ) } } ; console . info = function ( ) { for ( var _len8 = arguments . length , args = Array ( _len8 ) , _key8 = 0 ; _key8 < _len8 ; _key8 + + ) { args [ _key8 ] = arguments [ _key8 ] } if ( checkLevel ( " info " ) ) { console . __ori__ . info . apply ( console , args ) } } ; console . warn = function ( ) { for ( var _len9 = arguments . length , args = Array ( _len9 ) , _key9 = 0 ; _key9 < _len9 ; _key9 + + ) { args [ _key9 ] = arguments [ _key9 ] } if ( checkLevel ( " warn " ) ) { console . __ori__ . warn . apply ( console , args ) } } ; console . error = function ( ) { for ( var _len10 = arguments . length , args = Array ( _len10 ) , _key10 = 0 ; _key10 < _len10 ; _key10 + + ) { args [ _key10 ] = arguments [ _key10 ] } if ( checkLevel ( " error " ) ) { console . __ori__ . error . apply ( console , args ) } } } function generateLevelMap ( ) { LEVELS . forEach ( function ( level ) { var levelIndex = LEVELS . indexOf ( level ) ; levelMap [ level ] = { } ; LEVELS . forEach ( function ( type ) { var typeIndex = LEVELS . indexOf ( type ) ; if ( typeIndex < = levelIndex ) { levelMap [ level ] [ type ] = true } } ) } ) } function normalize ( v ) { var type = Object . prototype . toString . call ( v ) ; if ( type . toLowerCase ( ) = = = " [ object object ] " ) { v = JSON . stringify ( v ) } else { v = String ( v ) } return v } function checkLevel ( type ) { var logLevel = global . WXEnvironment & & global . WXEnvironment . logLevel | | " log " ; return levelMap [ logLevel ] & & levelMap [ logLevel ] [ type ] } function format ( args ) { return args . map ( function ( v ) { return normalize ( v ) } ) } } ) . call ( exports , function ( ) { return this } ( ) ) } , function ( module , exports , __webpack_require__ ) { ( function ( global ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . createInstance = createInstance ; var _frameworks = __webpack_require__ ( 78 ) ; var _frameworks2 = _interopRequireDefault ( _frameworks ) ; var _vdom = __webpack_require__ ( 100 ) ; function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { " default " : obj } } var config = { Document : _vdom . Document , Element : _vdom . Element , Comment : _vdom . Comment , sendTasks : function sendTasks ( ) { var _global ; return ( _global = global ) . callNative . apply ( _global , arguments ) } } ; for ( var name in _frameworks2 . default ) { var framework = _frameworks2 . default [ name ] ; framework . init ( config ) } var versionRegExp = / ^ \ / \ / * ( \ { [ ^ \ } ] * \ } ) * \ r ? \ n / ; function checkVersion ( code ) { var info = void 0 ; var result = versionRegExp . exec ( code ) ; if ( result ) { try { info = JSON . parse ( result [ 1 ] ) } catch ( e ) { } } return info } var instanceMap = { } ; function createInstance ( id , code , config , data ) { var info = instanceMap [ id ] ; if ( ! info ) { info = checkVersion ( code ) | | { } ; if ( ! _frameworks2 . default [ info . framework ] ) { info . framework = " Weex " } instanceMap [ id ] = info ; config = config | | { } ; config . bundleVersion = info . version ; return _frameworks2 . default [ info . framework ] . createInstance ( id , code , config , data ) } return new Error ( ' invalid instance id " ' + id + ' " ' ) } var methods = { createInstance : createInstance } ; function genInit ( methodName ) { methods [ methodName ] = function ( ) { for ( var _name in _frameworks2 . default ) { var _framework = _frameworks2 . default [ _name ] ; if ( _framework & & _framework [ methodName ] ) { _framework [ methodName ] . apply ( _framework , arguments ) } } } } [ " registerComponents " , " registerModules " , " registerMethods " ] . forEach ( genInit ) ; function genInstance ( methodName ) { methods [ methodName ] = function ( ) { var id = arguments . length < = 0 ? undefined : arguments [ 0 ] ; var info = instanceMap [ id ] ; if ( info & & _frameworks2 . default [ info . framework ] ) { var _frameworks $ info $ fram ; return ( _frameworks $ info $ fram = _frameworks2 . default [ info . framework ] ) [ methodName ] . apply ( _frameworks $ info $ fram , arguments ) } return new Error ( ' invalid instance id " ' + id + ' " ' ) } } [ " destroyInstance " , " refreshInstance " , " callJS " , " getRoot " ] . forEach ( genInstance ) ; methods . receiveTasks = methods . callJS ; exports . default = methods } ) . call ( exports , function ( ) { return this } ( ) ) } , function ( module , exports , __webpack_require__ ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; var _default = __webpack_require__ ( 79 ) ; var Weex = _interopRequireWildcard ( _default ) ; function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] } } newObj . default = obj ; return newObj } } exports . default = { Weex : Weex } } , function ( module , exports , __webpack_require__ ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; var _typeof = typeof Symbol = = = " function " & & typeof Symbol . iterator = = = " symbol " ? function ( obj ) { return typeof obj } : function ( obj ) { return obj & & typeof Symbol = = = " function " & & obj . constructor = = = Symbol ? " symbol " : typeof obj } ; exports . init = init ; exports . createInstance = createInstance ; exports . refreshInstance = refreshInstance ; exports . destroyInstance = destroyInstance ; exports . registerComponents = registerComponents ; exports . registerModules = registerModules ; exports . registerMethods = registerMethods ; exports . getRoot = getRoot ; exports . callJS = callJS ; var _config = __webpack_require__ ( 80 ) ; var _config2 = _interopRequireDefault ( _config ) ; var _app = __webpack_require__ ( 81 ) ; var _app2 = _interopRequireDefault ( _app ) ; var _vm = __webpack_require__ ( 86 ) ; var _vm2 = _interopRequireDefault ( _vm ) ; function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { " default " : obj } } function _toConsumableArray ( arr ) { if ( Array . isArray ( arr ) ) { for ( var i = 0 , arr2 = Array ( arr . length ) ; i < arr . length ; i + + ) { arr2 [ i ] = arr [ i ] } return arr2 } else { return Array . from ( arr ) } } var nativeComponentMap = _config2 . default . nativeComponentMap ; var instanceMap = { } ; function init ( cfg ) { _config2 . default . Document = cfg . Document ; _config2 . default . Element = cfg . Element ; _config2 . default . Comment = cfg . Comment ; _config2 . default . sendTasks = cfg . sendTasks } function createInstance ( instanceId , code , options , data ) { var instance = instanceMap [ instanceId ] ; options = options | | { } ; _config2 . default . debug = options . debug ; var result = void 0 ; if ( ! instance ) { instance = new _app2 . default ( instanceId , options ) ; instanceMap [ instanceId ] = instance ; result = instance . init ( code , data ) } else { result = new Error ( ' invalid instance id " ' + instanceId + ' " ' ) } return result } function refreshInstance ( instanceId , data ) { var instance = instanceMap [ instanceId ] ; var result = void 0 ; if ( instance ) { result = instance . refreshData ( data ) } else { result = new Error ( ' invalid instance id " ' + instanceId + ' " ' ) } return result } function destroyInstance ( instanceId ) { var instance = instanceMap [ instanceId ] ; if ( ! instance ) { return new Error ( ' invalid instance id " ' + instanceId + ' " ' ) } instance . destroy ( ) ; delete instanceMap [ instanceId ] ; return instanceMap } function registerComponents ( components ) { if ( Array . isArray ( components ) ) { components . forEach ( function register ( name ) { if ( ! name ) { return } if ( typeof name = = = " string " ) { nativeComponentMap [ name ] = true } else if ( ( typeof name = = = " undefined " ? " undefined " : _typeof ( name ) ) = = = " object " & & typeof name . type = = = " string " ) { nativeComponentMap [ name . type ] = name } } ) } } function registerModules ( modules ) { if ( ( typeof modules = = = " undefined " ? " undefined " : _typeof ( modules ) ) = = = " object " ) { _vm2 . default . registerModules ( modules ) } } function registerMethods ( apis ) { if ( ( typeof apis = = = " undefined " ? " undefined " : _typeof ( apis ) ) = = = " object " ) { _vm2 . default . registerMethods ( apis ) } } function getRoot ( instanceId ) { var instance = instanceMap [ instanceId ] ; var result = void 0 ; if ( instance ) { result = instance . getRootElement ( ) } else { result = new Error ( ' invalid instance id " ' + instanceId + ' " ' ) } return result } var jsHandlers = { fireEvent : function fireEvent ( instanceId , ref , type , data , domChanges ) { var instance = instanceMap [ instanceId ] ; return instance . fireEvent ( ref , type , data , domChanges ) } , callback : function callback ( instanceId , funcId , data , ifLast ) { var instance = instanceMap [ instanceId ] ; return instance . callback ( funcId , data , ifLast ) } } ; function callJS ( instanceId , tasks ) { var instance = instanceMap [ instanceId ] ; if ( instance & & Array . isArray ( tasks ) ) { var _ret = function ( ) { var results = [ ] ; tasks . forEach ( function ( task ) { var handler = jsHandlers [ task . method ] ; var args = [ ] . concat ( _toConsumableArray ( task . args ) ) ; if ( typeof handler = = = " function " ) { args . unshift ( instanceId ) ; results . push ( handler . apply ( undefined , _toConsumableArray ( args ) ) ) } } ) ; return { v : results } } ( ) ; if ( ( typeof _ret = = = " undefined " ? " undefined " : _typeof ( _ret ) ) = = = " object " ) return _ret . v } return new Error ( ' invalid instance id " ' + instanceId + ' " or tasks ' ) } } , function ( module , exports ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . default = { nativeComponentMap : { text : true , image : true , container : true , slider : { type : " slider " , append : " tree " } , cell : { type : " cell " , append : " tree " } } , customComponentMap : { } , debug : false } } , function ( module , exports , __webpack_require__ ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . default = AppInstance ; var _util = __webpack_require__ ( 82 ) ; var _bundle = __webpack_require__ ( 83 ) ; var bundle = _interopRequireWildcard ( _bundle ) ; var _ctrl = __webpack_require__ ( 98 ) ; var ctrl = _interopRequireWildcard ( _ctrl ) ; var _differ = __webpack_require__ ( 99 ) ; var _differ2 = _interopRequireDefault ( _differ ) ; var _config = __webpack_require__ ( 80 ) ; var _config2 = _interopRequireDefault ( _config ) ; var _register = __webpack_require__ ( 96 ) ; function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { " default " : obj } } function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] } } newObj . default = obj ; return newObj } } function AppInstance ( instanceId , options ) { this . id = instanceId ; this . options = options | | { } ; this . vm = null ; this . customComponentMap = { } ; this . callbacks = { } ; this . doc = new _config2 . default . Document ( instanceId , this . options . bundleUrl ) ; this . differ = new _differ2 . default ( instanceId ) ; this . uid = 0 } function normalize ( app , v ) { var type = ( 0 , _util . typof ) ( v ) ; switch ( type ) { case " undefined " : case " null " : return " " ; case " regexp " : return v . toString ( ) ; case " date " : return v . toISOString ( ) ; case " number " : case " string " : case " boolean " : case " array " : case " object " : if ( v instanceof _config2 . default . Element ) { return v . ref } return v ; case " function " : app . callbacks [ + + app . uid ] = v ; return app . uid . toString ( ) ; default : return JSON . stringify ( v ) } } AppInstance . prototype . callTasks = function ( tasks ) { var _this = this ; if ( ( 0 , _util . typof ) ( tasks ) ! = = " array " ) { tasks = [ tasks ] } tasks . forEach ( function ( task ) { task . args = task . args . map ( function ( arg ) { return normalize ( _this , arg ) } ) } ) ; return _config2 . default . sendTasks ( this . id , tasks , " - 1 " ) } ; ( 0 , _util . extend ) ( AppInstance . prototype , bundle , ctrl , { registerComponent : _register . registerComponent , requireComponent : _register . requireComponent , requireModule : _register . requireModule } ) } , function ( module , exports ) { ( function ( global ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; var _typeof = typeof Symbol = = = " function " & & typeof Symbol . iterator = = = " symbol " ? function ( obj ) { return typeof obj } : function ( obj ) { return obj & & typeof Symbol = = = " function " & & obj . constructor = = = Symbol ? " symbol " : typeof obj } ; exports . isReserved = isReserved ; exports . def = def ; exports . remove = remove ; exports . hasOwn = hasOwn ; exports . cached = cached ; exports . bind = bind ; exports . toArray = toArray ; exports . extend = extend ; exports . isObject = isObject ; exports . isPlainObject = isPlainObject ; exports . stringify = stringify ; exports . typof = typof ; exports . normalize = normalize ; exports . error = error ; exports . warn = warn ; exports . info = info ; exports . debug = debug ; exports . log = log ; function isReserved ( str ) { var c = ( str + " " ) . charCodeAt ( 0 ) ; return c = = = 36 | | c = = = 95 } function def ( obj , key , val , enumerable ) { Object . defineProperty ( obj , key , { value : val , enumerable : ! ! enumerable , writable : true , configurable : true } ) } var hasProto = exports . hasProto = " __proto__ " in { } ; var inBrowser = exports . inBrowser = typeof window ! = = " undefined " & & Object . prototype . toString . call ( window ) ! = = " [ object Object ] " ; var devtools = exports . devtools = inBrowser & & window . __VUE_DEVTOOLS_GLOBAL_HOOK__ ; var UA = inBrowser & & window . navigator . userAgent . toLowerCase ( ) ; var isIos = UA & & / ( iphone | ipad | ipod | ios ) / i . test ( UA ) ; var isWechat = UA & & UA . indexOf ( " micromessenger " ) > 0 ; var nextTick = exports . nextTick = function ( ) { var callbacks = [ ] ; var pending = false ; var timerFunc = void 0 ; function nextTickHandler ( ) { pending = false ; var copies = callbacks . slice ( 0 ) ; callbacks = [ ] ; for ( var i = 0 ; i < copies . length ; i + + ) { copies [ i ] ( ) } } if ( typeof MutationObserver ! = = " undefined " & & ! ( isWechat & & isIos ) ) { ( function ( ) { var counter = 1 ; var observer = new MutationObserver ( nextTickHandler ) ; var textNode = document . createTextNode ( counter ) ; observer . observe ( textNode , { characterData : true } ) ; timerFunc = function timerFunc ( ) { counter = ( counter + 1 ) % 2 ; textNode . data = counter } } ) ( ) } else { var context = inBrowser ? window : typeof global ! = = " undefined " ? global : { } ; timerFunc = context . setImmediate | | setTimeout } return function ( cb , ctx ) { var func = ctx ? function ( ) { cb . call ( ctx ) } : cb ; callbacks . push ( func ) ; if ( pending ) return ; pending = true ; timerFunc ( nextTickHandler , 0 ) } } ( ) ; var _Set = void 0 ; if ( typeof Set ! = = " undefined " & & Set . toString ( ) . match ( / native code / ) ) { exports . _Set = _Set = Set } else { exports . _Set = _Set = function _Set ( ) { this . set = Object . create ( null ) } ; _Set . prototype . has = function ( key ) { return this . set [ key ] ! = = undefined } ; _Set . prototype . add = function ( key ) { this . set [ key ] = 1 } ; _Set . prototype . clear = function ( ) { this . set = Object . create ( null ) } } exports . _Set = _Set ; function remove ( arr , item ) { if ( arr . length ) { var index = arr . indexOf ( item ) ; if ( index > - 1 ) { return arr . splice ( index , 1 ) } } } var hasOwnProperty = Object . prototype . hasOwnProperty ; function hasOwn ( obj , key ) { return hasOwnProperty . call ( obj , key ) } function cached ( fn ) { var cache = Object . create ( null ) ; return function cachedFn ( str ) { var hit = cache [ str ] ; return hit | | ( cache [ str ] = fn ( str ) ) } } var camelizeRE = / - ( \ w ) / g ; var camelize = exports . camelize = cached ( function ( str ) { return str . replace ( camelizeRE , toUpper ) } ) ; function toUpper ( _ , c ) { return c ? c . toUpperCase ( ) : " " } var hyphenateRE = / ( [ a - z \ d ] ) ( [ A - Z ] ) / g ; var hyphenate = exports . hyphenate = cached ( function ( str ) { return str . replace ( hyphenateRE , " $ 1 - $ 2 " ) . toLowerCase ( ) } ) ; function bind ( fn , ctx ) { return function ( a ) { var l = arguments . length ; return l ? l > 1 ? fn . apply ( ctx , arguments ) : fn . call ( ctx , a ) : fn . call ( ctx ) } } function toArray ( list , start ) { start = start | | 0 ; var i = list . length - start ; var ret = new Array ( i ) ; while ( i - - ) { ret [ i ] = list [ i + start ] } return ret } function extend ( target ) { for ( var _len = arguments . length , src = Array ( _len > 1 ? _len - 1 : 0 ) , _key = 1 ; _key < _len ; _key + + ) { src [ _key - 1 ] = arguments [ _key ] } if ( typeof Object . assign = = = " function " ) { Object . assign . apply ( Object , [ target ] . concat ( src ) ) } else { var first = src . shift ( ) ; for ( var key in first ) { target [ key ] = first [ key ] } if ( src . length ) { extend . apply ( undefined , [ target ] . concat ( src ) ) } } return target } function isObject ( obj ) { return obj ! = = null & & ( typeof obj = = = " undefined " ? " undefined " : _typeof ( obj ) ) = = = " object " } var toString = Object . prototype . toString ; var OBJECT_STRING = " [ object Object ] " ; function isPlainObject ( obj ) { return toString . call ( obj ) = = = OBJECT_STRING } var isArray = exports . isArray = Array . isArray ; function stringify ( x ) { return typeof x = = = " undefined " | | x = = = null | | typeof x = = = " function " ? " " : ( typeof x = = = " undefined " ? " undefined " : _typeof ( x ) ) = = = " object " ? x instanceof RegExp ? x . toString ( ) : x instanceof Date ? JSON . parse ( JSON . stringify ( x ) ) : JSON . stringify ( x ) : x . toString ( ) } function typof ( v ) { var s = Object . prototype . toString . call ( v ) ; return s . substring ( 8 , s . length - 1 ) . toLowerCase ( ) } function normalize ( v ) { var type = typof ( v ) ; switch ( type ) { case " undefined " : case " null " : return " " ; case " regexp " : return v . toString ( ) ; case " date " : return v . toISOString ( ) ; case " number " : case " string " : case " boolean " : case " array " : case " object " : case " function " : return v } } var enableLog = typeof console ! = = " undefined " & & global . IS_PRODUCT ! = = true ; function error ( ) { var _console ; for ( var _len2 = arguments . length , args = Array ( _len2 ) , _key2 = 0 ; _key2 < _len2 ; _key2 + + ) { args [ _key2 ] = arguments [ _key2 ] } enableLog & & console . error & & ( _console = console ) . error . apply ( _console , [ " [ JS Framework ] " ] . concat ( args ) ) } function warn ( ) { var _console2 ; for ( var _len3 = arguments . length , args = Array ( _len3 ) , _key3 = 0 ; _key3 < _len3 ; _key3 + + ) { args [ _key3 ] = arguments [ _key3 ] } enableLog & & console . warn & & ( _console2 = console ) . warn . apply ( _console2 , [ " [ JS Framework ] " ] . concat ( args ) ) } function info ( ) { var _console3 ; for ( var _len4 = arguments . length , args = Array ( _len4 ) , _key4 = 0 ; _key4 < _len4 ; _key4 + + ) { args [ _key4 ] = arguments [ _key4 ] } enableLog & & console . info & & ( _console3 = console ) . info . apply ( _console3 , [ " [ JS Framework ] " ] . concat ( args ) ) } function debug ( ) { var _console4 ; for ( var _len5 = arguments . length , args = Array ( _len5 ) , _key5 = 0 ; _key5 < _len5 ; _key5 + + ) { args [ _key5 ] = arguments [ _key5 ] } enableLog & & console . debug & & ( _console4 = console ) . debug . apply ( _console4 , [ " [ JS Framework ] " ] . concat ( args ) ) } function log ( ) { var _console5 ; for ( var _len6 = arguments . length , args = Array ( _len6 ) , _key6 = 0 ; _key6 < _len6 ; _key6 + + ) { args [ _key6 ] = arguments [ _key6 ] } enableLog & & console . log & & ( _console5 = console ) . log . apply ( _console5 , [ " [ JS Framework ] " ] . concat ( args ) ) } } ) . call ( exports , function ( ) { return this } ( ) ) } , function ( module , exports , __webpack_require__ ) { ( function ( global ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . require = exports . define = undefined ; exports . clearCommonModules = clearCommonModules ; exports . bootstrap = bootstrap ; exports . register = register ; exports . render = render ; var _semver = __webpack_require__ ( 84 ) ; var _semver2 = _interopRequireDefault ( _semver ) ; var _util = __webpack_require__ ( 82 ) ; var _ = _interopRequireWildcard ( _util ) ; var _vm = __webpack_require__ ( 86 ) ; var _vm2 = _interopRequireDefault ( _vm ) ; var _downgrade = __webpack_require__ ( 97 ) ; var downgrade = _interopRequireWildcard ( _downgrade ) ; function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] } } newObj . default = obj ; return newObj } } function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { " default " : obj } } function _defineProperty ( obj , key , value ) { if ( key in obj ) { Object . defineProperty ( obj , key , { value : value , enumerable : true , configurable : true , writable : true } ) } else { obj [ key ] = value } return obj } var WEEX_COMPONENT_REG = / ^ @ weex - component \ / / ; var WEEX_MODULE_REG = / ^ @ weex - module \ / / ; var NORMAL_MODULE_REG = / ^ \ . { 1 , 2 } \ / / ; var JS_SURFIX_REG = / \ . js $ / ; var isWeexComponent = function isWeexComponent ( name ) { return ! ! name . match ( WEEX_COMPONENT_REG ) } ; var isWeexModule = function isWeexModule ( name ) { return ! ! name . match ( WEEX_MODULE_REG ) } ; var isNormalModule = function isNormalModule ( name ) { return ! ! name . match ( NORMAL_MODULE_REG ) } ; var isNpmModule = function isNpmModule ( name ) { return ! isWeexComponent ( name ) & & ! isWeexModule ( name ) & & ! isNormalModule ( name ) } ; function removeWeexPrefix ( str ) { return str . replace ( WEEX_COMPONENT_REG , " " ) . replace ( WEEX_MODULE_REG , " " ) } function removeJSSurfix ( str ) { return str . replace ( JS_SURFIX_REG , " " ) } var commonModules = { } ; function clearCommonModules ( ) { commonModules = { } } var define = exports . define = function define ( name , deps , factory ) { var _this = this ; _ . debug ( " define a component " , name ) ; if ( _ . typof ( deps ) = = = " function " ) { factory = deps ; deps = [ ] } var _require = function _require ( name ) { var cleanName = void 0 ; if ( isWeexComponent ( name ) ) { cleanName = removeWeexPrefix ( name ) ; return _this . requireComponent ( cleanName ) } if ( isWeexModule ( name ) ) { cleanName = removeWeexPrefix ( name ) ; return _this . requireModule ( cleanName ) } if ( isNormalModule ( name ) ) { cleanName = removeJSSurfix ( name ) ; return commonModules [ name ] } if ( isNpmModule ( name ) ) { cleanName = removeJSSurfix ( name ) ; return commonModules [ name ] } } ; var _module = { exports : { } } ; var cleanName = void 0 ; if ( isWeexComponent ( name ) ) { cleanName = removeWeexPrefix ( name ) ; factory ( _require , _module . exports , _module ) ; this . registerComponent ( cleanName , _module . exports ) } else if ( isWeexModule ( name ) ) { cleanName = removeWeexPrefix ( name ) ; factory ( _require , _module . exports , _module ) ; _vm2 . default . registerModules ( _defineProperty ( { } , cleanName , _module . exports ) ) } else if ( isNormalModule ( name ) ) { cleanName = removeJSSurfix ( name ) ; factory ( _require , _module . exports , _module ) ; commonModules [ cleanName ] = _module . exports } else if ( isNpmModule ( name ) ) { cleanName = removeJSSurfix ( name ) ; factory ( _require , _module . exports , _module ) ; var exports = _module . exports ; if ( exports . template | | exports . style | | exports . methods ) { this . registerComponent ( cleanName , exports ) } else { commonModules [ cleanName ] = _module . exports } } } ; function bootstrap ( name , config , data ) { _ . debug ( " bootstrap for " + name ) ; var cleanName = void 0 ; if ( isWeexComponent ( name ) ) { cleanName = removeWeexPrefix ( name ) } else if ( isNpmModule ( name ) ) { cleanName = removeJSSurfix ( name ) ; if ( ! this . customComponentMap [ cleanName ] ) { return new Error ( " It ' s not a component : " + name ) } } else { return new Error ( " Wrong component name : " + name ) } config = _ . isPlainObject ( config ) ? config : { } ; if ( typeof config . transformerVersion = = = " string " & & typeof global . needTransformerVersion = = = " string " & & ! _semver2 . default . satisfies ( config . transformerVersion , global . needTransformerVersion ) ) { return new Error ( " JS Bundle version : " + config . transformerVersion + " " + ( " not compatible with " + global . needTransformerVersion ) ) } var _checkDowngrade = downgrade . check ( config . downgrade ) ; if ( _checkDowngrade . isDowngrade ) { this . callTasks ( [ { module : " instanceWrap " , method : " error " , args : [ _checkDowngrade . errorType , _checkDowngrade . code , _checkDowngrade . errorMessage ] } ] ) ; return new Error ( " Downgrade [ " + _checkDowngrade . code + " ] : " + _checkDowngrade . errorMessage ) } this . vm = new _vm2 . default ( cleanName , null , { _app : this } , null , data ) } function register ( type , options ) { _ . warn ( " Register is deprecated , please install lastest transformer . " ) ; this . registerComponent ( type , options ) } function render ( type , data ) { _ . warn ( " Render is deprecated , please install lastest transformer . " ) ; return this . bootstrap ( type , { } , data ) } function _require2 ( type ) { var _this2 = this ; _ . warn ( " Require is deprecated , please install lastest transformer . " ) ; return function ( data ) { return _this2 . bootstrap ( type , { } , data ) } } exports . require = _require2 } ) . call ( exports , function ( ) { return this } ( ) ) } , function ( module , exports , __webpack_require__ ) { ( function ( process ) { " use strict " ; var _typeof = typeof Symbol = = = " function " & & typeof Symbol . iterator = = = " symbol " ? function ( obj ) { return typeof obj } : function ( obj ) { return obj & & typeof Symbol = = = " function " & & obj . constructor = = = Symbol ? " symbol " : typeof obj } ; exports = module . exports = SemVer ; var debug ; if ( ( typeof process = = = " undefined " ? " undefined " : _typeof ( process ) ) = = = " object " & & process . env & & process . env . NODE_DEBUG & & / \ bsemver \ b / i . test ( process . env . NODE_DEBUG ) ) debug = function debug ( ) { var args = Array . prototype . slice . call ( arguments , 0 ) ; args . unshift ( " SEMVER " ) ; console . log . apply ( console , args ) } ; else debug = function debug ( ) { } ; exports . SEMVER_SPEC_VERSION = " 2 . 0 . 0 " ; var MAX_LENGTH = 256 ; var MAX_SAFE_INTEGER = Number . MAX_SAFE_INTEGER | | 9007199254740991 ; var re = exports . re = [ ] ; var src = exports . src = [ ] ; var R = 0 ; var NUMERICIDENTIFIER = R + + ; src [ NUMERICIDENTIFIER ] = " 0 | [ 1 - 9 ] \ \ d * " ; var NUMERICIDENTIFIERLOOSE = R + + ; src [ NUMERICIDENTIFIERLOOSE ] = " [ 0 - 9 ] + " ; var NONNUMERICIDENTIFIER = R + + ; src [ NONNUMERICIDENTIFIER ] = " \ \ d * [ a - zA - Z - ] [ a - zA - Z0 - 9 - ] * " ; var MAINVERSION = R + + ; src [ MAINVERSION ] = " ( " + src [ NUMERICIDENTIFIER ] + " ) \ \ . " + " ( " + src [ NUMERICIDENTIFIER ] + " ) \ \ . " + " ( " + src [ NUMERICIDENTIFIER ] + " ) " ; var MAINVERSIONLOOSE = R + + ; src [ MAINVERSIONLOOSE ] = " ( " + src [ NUMERICIDENTIFIERLOOSE ] + " ) \ \ . " + " ( " + src [ NUMERICIDENTIFIERLOOSE ] + " ) \ \ . " + " ( " + src [ NUMERICIDENTIFIERLOOSE ] + " ) " ; var PRERELEASEIDENTIFIER = R + + ; src [ PRERELEASEIDENTIFIER ] = " ( ? : " + src [ NUMERICIDENTIFIER ] + " | " + src [ NONNUMERICIDENTIFIER ] + " ) " ; var PRERELEASEIDENTIFIERLOOSE = R + + ; src [ PRERELEASEIDENTIFIERLOOSE ] = " ( ? : " + src [ NUMERICIDENTIFIERLOOSE ] + " | " + src [ NONNUMERICIDENTIFIER ] + " ) " ; var PRERELEASE = R + + ; src [ PRERELEASE ] = " ( ? : - ( " + src [ PRERELEASEIDENTIFIER ] + " ( ? : \ \ . " + src [ PRERELEASEIDENTIFIER ] + " ) * ) ) " ; var PRERELEASELOOSE = R + + ; src [ PRERELEASELOOSE ] = " ( ? : - ? ( " + src [ PRERELEASEIDENTIFIERLOOSE ] + " ( ? : \ \ . " + src [ PRERELEASEIDENTIFIERLOOSE ] + " ) * ) ) " ; var BUILDIDENTIFIER = R + + ; src [ BUILDIDENTIFIER ] = " [ 0 - 9A - Za - z - ] + " ; var BUILD = R + + ; src [ BUILD ] = " ( ? : \ \ + ( " + src [ BUILDIDENTIFIER ] + " ( ? : \ \ . " + src [ BUILDIDENTIFIER ] + " ) * ) ) " ; var FULL = R + + ; var FULLPLAIN = " v ? " + src [ MAINVERSION ] + src [ PRERELEASE ] + " ? " + src [ BUILD ] + " ? " ; src [ FULL ] = " ^ " + FULLPLAIN + " $ " ; var LOOSEPLAIN = " [ v = \ \ s ] * " + src [ MAINVERSIONLOOSE ] + src [ PRERELEASELOOSE ] + " ? " + src [ BUILD ] + " ? " ; var LOOSE = R + + ; src [ LOOSE ] = " ^ " + LOOSEPLAIN + " $ " ; var GTLT = R + + ; src [ GTLT ] = " ( ( ? : < | > ) ? = ? ) " ; var XRANGEIDENTIFIERLOOSE = R + + ; src [ XRANGEIDENTIFIERLOOSE ] = src [ NUMERICIDENTIFIERLOOSE ] + " | x | X | \ \ * " ; var XRANGEIDENTIFIER = R + + ; src [ XRANGEIDENTIFIER ] = src [ NUMERICIDENTIFIER ] + " | x | X | \ \ * " ; var XRANGEPLAIN = R + + ; src [ XRANGEPLAIN ] = " [ v = \ \ s ] * ( " + src [ XRANGEIDENTIFIER ] + " ) " + " ( ? : \ \ . ( " + src [ XRANGEIDENTIFIER ] + " ) " + " ( ? : \ \ . ( " + src [ XRANGEIDENTIFIER ] + " ) " + " ( ? : " + src [ PRERELEASE ] + " ) ? " + src [ BUILD ] + " ? " + " ) ? ) ? " ; var XRANGEPLAINLOOSE = R + + ; src [ XRANGEPLAINLOOSE ] = " [ v = \ \ s ] * ( " + src [ XRANGEIDENTIFIERLOOSE ] + " ) " + " ( ? : \ \ . ( " + src [ XRANGEIDENTIFIERLOOSE ] + " ) " + " ( ? : \ \ . ( " + src [ XRANGEIDENTIFIERLOOSE ] + " ) " + " ( ? : " + src [ PRERELEASELOOSE ] + " ) ? " + src [ BUILD ] + " ? " + " ) ? ) ? " ; var XRANGE = R + + ; src [ XRANGE ] = " ^ " + src [ GTLT ] + " \ \ s * " + src [ XRANGEPLAIN ] + " $ " ; var XRANGELOOSE = R + + ; src [ XRANGELOOSE ] = " ^ " + src [ GTLT ] + " \ \ s * " + src [ XRANGEPLAINLOOSE ] + " $ " ; var LONETILDE = R + + ; src [ LONETILDE ] = " ( ? : ~ > ? ) " ; var TILDETRIM = R + + ; src [ TILDETRIM ] = " ( \ \ s * ) " + src [ LONETILDE ] + " \ \ s + " ; re [ TILDETRIM ] = new RegExp ( src [ TILDETRIM ] , " g " ) ; var tildeTrimReplace = " $ 1 ~ " ; var TILDE = R + + ; src [ TILDE ] = " ^ " + src [ LONETILDE ] + src [ XRANGEPLAIN ] + " $ " ; var TILDELOOSE = R + + ; src [ TILDELOOSE ] = " ^ " + src [ LONETILDE ] + src [ XRANGEPLAINLOOSE ] + " $ " ; var LONECARET = R + + ; src [ LONECARET ] = " ( ? : \ \ ^ ) " ; var CARETTRIM = R + + ; src [ CARETTRIM ] = " ( \ \ s * ) " + src [ LONECARET ] + " \ \ s + " ; re [ CARETTRIM ] = new RegExp ( src [ CARETTRIM ] , " g " ) ; var caretTrimReplace = " $ 1 ^ " ; var CARET = R + + ; src [ CARET ] = " ^ " + src [ LONECARET ] + src [ XRANGEPLAIN ] + " $ " ; var CARETLOOSE = R + + ; src [ CARETLOOSE ] = " ^ " + src [ LONECARET ] + src [ XRANGEPLAINLOOSE ] + " $ " ; var COMPARATORLOOSE = R + + ; src [ COMPARATORLOOSE ] = " ^ " + src [ GTLT ] + " \ \ s * ( " + LOOSEPLAIN + " ) $ | ^ $ " ; var COMPARATOR = R + + ; src [ COMPARATOR ] = " ^ " + src [ GTLT ] + " \ \ s * ( " + FULLPLAIN + " ) $ | ^ $ " ; var COMPARATORTRIM = R + + ; src [ COMPARATORTRIM ] = " ( \ \ s * ) " + src [ GTLT ] + " \ \ s * ( " + LOOSEPLAIN + " | " + src [ XRANGEPLAIN ] + " ) " ; re [ COMPARATORTRIM ] = new RegExp ( src [ COMPARATORTRIM ] , " g " ) ; var comparatorTrimReplace = " $ 1 $ 2 $ 3 " ; var HYPHENRANGE = R + + ; src [ HYPHENRANGE ] = " ^ \ \ s * ( " + src [ XRANGEPLAIN ] + " ) " + " \ \ s + - \ \ s + " + " ( " + src [ XRANGEPLAIN ] + " ) " + " \ \ s * $ " ; var HYPHENRANGELOOSE = R + + ; src [ HYPHENRANGELOOSE ] = " ^ \ \ s * ( " + src [ XRANGEPLAINLOOSE ] + " ) " + " \ \ s + - \ \ s + " + " ( " + src [ XRANGEPLAINLOOSE ] + " ) " + " \ \ s * $ " ; <nl> - var STAR = R + + ; src [ STAR ] = " ( < | > ) ? = ? \ \ s * \ \ * " ; for ( var i = 0 ; i < R ; i + + ) { debug ( i , src [ i ] ) ; if ( ! re [ i ] ) re [ i ] = new RegExp ( src [ i ] ) } exports . parse = parse ; function parse ( version , loose ) { if ( version instanceof SemVer ) return version ; if ( typeof version ! = = " string " ) return null ; if ( version . length > MAX_LENGTH ) return null ; var r = loose ? re [ LOOSE ] : re [ FULL ] ; if ( ! r . test ( version ) ) return null ; try { return new SemVer ( version , loose ) } catch ( er ) { return null } } exports . valid = valid ; function valid ( version , loose ) { var v = parse ( version , loose ) ; return v ? v . version : null } exports . clean = clean ; function clean ( version , loose ) { var s = parse ( version . trim ( ) . replace ( / ^ [ = v ] + / , " " ) , loose ) ; return s ? s . version : null } exports . SemVer = SemVer ; function SemVer ( version , loose ) { if ( version instanceof SemVer ) { if ( version . loose = = = loose ) return version ; else version = version . version } else if ( typeof version ! = = " string " ) { throw new TypeError ( " Invalid Version : " + version ) } if ( version . length > MAX_LENGTH ) throw new TypeError ( " version is longer than " + MAX_LENGTH + " characters " ) ; if ( ! ( this instanceof SemVer ) ) return new SemVer ( version , loose ) ; debug ( " SemVer " , version , loose ) ; this . loose = loose ; var m = version . trim ( ) . match ( loose ? re [ LOOSE ] : re [ FULL ] ) ; if ( ! m ) throw new TypeError ( " Invalid Version : " + version ) ; this . raw = version ; this . major = + m [ 1 ] ; this . minor = + m [ 2 ] ; this . patch = + m [ 3 ] ; if ( this . major > MAX_SAFE_INTEGER | | this . major < 0 ) throw new TypeError ( " Invalid major version " ) ; if ( this . minor > MAX_SAFE_INTEGER | | this . minor < 0 ) throw new TypeError ( " Invalid minor version " ) ; if ( this . patch > MAX_SAFE_INTEGER | | this . patch < 0 ) throw new TypeError ( " Invalid patch version " ) ; if ( ! m [ 4 ] ) this . prerelease = [ ] ; else this . prerelease = m [ 4 ] . split ( " . " ) . map ( function ( id ) { if ( / ^ [ 0 - 9 ] + $ / . test ( id ) ) { var num = + id ; if ( num > = 0 & & num < MAX_SAFE_INTEGER ) return num } return id } ) ; this . build = m [ 5 ] ? m [ 5 ] . split ( " . " ) : [ ] ; this . format ( ) } SemVer . prototype . format = function ( ) { this . version = this . major + " . " + this . minor + " . " + this . patch ; if ( this . prerelease . length ) this . version + = " - " + this . prerelease . join ( " . " ) ; return this . version } ; SemVer . prototype . toString = function ( ) { return this . version } ; SemVer . prototype . compare = function ( other ) { debug ( " SemVer . compare " , this . version , this . loose , other ) ; if ( ! ( other instanceof SemVer ) ) other = new SemVer ( other , this . loose ) ; return this . compareMain ( other ) | | this . comparePre ( other ) } ; SemVer . prototype . compareMain = function ( other ) { if ( ! ( other instanceof SemVer ) ) other = new SemVer ( other , this . loose ) ; return compareIdentifiers ( this . major , other . major ) | | compareIdentifiers ( this . minor , other . minor ) | | compareIdentifiers ( this . patch , other . patch ) } ; SemVer . prototype . comparePre = function ( other ) { if ( ! ( other instanceof SemVer ) ) other = new SemVer ( other , this . loose ) ; if ( this . prerelease . length & & ! other . prerelease . length ) return - 1 ; else if ( ! this . prerelease . length & & other . prerelease . length ) return 1 ; else if ( ! this . prerelease . length & & ! other . prerelease . length ) return 0 ; var i = 0 ; do { var a = this . prerelease [ i ] ; var b = other . prerelease [ i ] ; debug ( " prerelease compare " , i , a , b ) ; if ( a = = = undefined & & b = = = undefined ) return 0 ; else if ( b = = = undefined ) return 1 ; else if ( a = = = undefined ) return - 1 ; else if ( a = = = b ) continue ; else return compareIdentifiers ( a , b ) } while ( + + i ) } ; SemVer . prototype . inc = function ( release , identifier ) { switch ( release ) { case " premajor " : this . prerelease . length = 0 ; this . patch = 0 ; this . minor = 0 ; this . major + + ; this . inc ( " pre " , identifier ) ; break ; case " preminor " : this . prerelease . length = 0 ; this . patch = 0 ; this . minor + + ; this . inc ( " pre " , identifier ) ; break ; case " prepatch " : this . prerelease . length = 0 ; this . inc ( " patch " , identifier ) ; this . inc ( " pre " , identifier ) ; break ; case " prerelease " : if ( this . prerelease . length = = = 0 ) this . inc ( " patch " , identifier ) ; this . inc ( " pre " , identifier ) ; break ; case " major " : if ( this . minor ! = = 0 | | this . patch ! = = 0 | | this . prerelease . length = = = 0 ) this . major + + ; this . minor = 0 ; this . patch = 0 ; this . prerelease = [ ] ; break ; case " minor " : if ( this . patch ! = = 0 | | this . prerelease . length = = = 0 ) this . minor + + ; this . patch = 0 ; this . prerelease = [ ] ; break ; case " patch " : if ( this . prerelease . length = = = 0 ) this . patch + + ; this . prerelease = [ ] ; break ; case " pre " : if ( this . prerelease . length = = = 0 ) this . prerelease = [ 0 ] ; else { var i = this . prerelease . length ; while ( - - i > = 0 ) { if ( typeof this . prerelease [ i ] = = = " number " ) { this . prerelease [ i ] + + ; i = - 2 } } if ( i = = = - 1 ) this . prerelease . push ( 0 ) } if ( identifier ) { if ( this . prerelease [ 0 ] = = = identifier ) { if ( isNaN ( this . prerelease [ 1 ] ) ) this . prerelease = [ identifier , 0 ] } else this . prerelease = [ identifier , 0 ] } break ; default : throw new Error ( " invalid increment argument : " + release ) } this . format ( ) ; this . raw = this . version ; return this } ; exports . inc = inc ; function inc ( version , release , loose , identifier ) { if ( typeof loose = = = " string " ) { identifier = loose ; loose = undefined } try { return new SemVer ( version , loose ) . inc ( release , identifier ) . version } catch ( er ) { return null } } exports . diff = diff ; function diff ( version1 , version2 ) { if ( eq ( version1 , version2 ) ) { return null } else { var v1 = parse ( version1 ) ; var v2 = parse ( version2 ) ; if ( v1 . prerelease . length | | v2 . prerelease . length ) { for ( var key in v1 ) { if ( key = = = " major " | | key = = = " minor " | | key = = = " patch " ) { if ( v1 [ key ] ! = = v2 [ key ] ) { return " pre " + key } } } return " prerelease " } for ( var key in v1 ) { if ( key = = = " major " | | key = = = " minor " | | key = = = " patch " ) { if ( v1 [ key ] ! = = v2 [ key ] ) { return key } } } } } exports . compareIdentifiers = compareIdentifiers ; var numeric = / ^ [ 0 - 9 ] + $ / ; function compareIdentifiers ( a , b ) { var anum = numeric . test ( a ) ; var bnum = numeric . test ( b ) ; if ( anum & & bnum ) { a = + a ; b = + b } return anum & & ! bnum ? - 1 : bnum & & ! anum ? 1 : a < b ? - 1 : a > b ? 1 : 0 } exports . rcompareIdentifiers = rcompareIdentifiers ; function rcompareIdentifiers ( a , b ) { return compareIdentifiers ( b , a ) } exports . major = major ; function major ( a , loose ) { return new SemVer ( a , loose ) . major } exports . minor = minor ; function minor ( a , loose ) { return new SemVer ( a , loose ) . minor } exports . patch = patch ; function patch ( a , loose ) { return new SemVer ( a , loose ) . patch } exports . compare = compare ; function compare ( a , b , loose ) { return new SemVer ( a , loose ) . compare ( b ) } exports . compareLoose = compareLoose ; function compareLoose ( a , b ) { return compare ( a , b , true ) } exports . rcompare = rcompare ; function rcompare ( a , b , loose ) { return compare ( b , a , loose ) } exports . sort = sort ; function sort ( list , loose ) { return list . sort ( function ( a , b ) { return exports . compare ( a , b , loose ) } ) } exports . rsort = rsort ; function rsort ( list , loose ) { return list . sort ( function ( a , b ) { return exports . rcompare ( a , b , loose ) } ) } exports . gt = gt ; function gt ( a , b , loose ) { return compare ( a , b , loose ) > 0 } exports . lt = lt ; function lt ( a , b , loose ) { return compare ( a , b , loose ) < 0 } exports . eq = eq ; function eq ( a , b , loose ) { return compare ( a , b , loose ) = = = 0 } exports . neq = neq ; function neq ( a , b , loose ) { return compare ( a , b , loose ) ! = = 0 } exports . gte = gte ; function gte ( a , b , loose ) { return compare ( a , b , loose ) > = 0 } exports . lte = lte ; function lte ( a , b , loose ) { return compare ( a , b , loose ) < = 0 } exports . cmp = cmp ; function cmp ( a , op , b , loose ) { var ret ; switch ( op ) { case " = = = " : if ( ( typeof a = = = " undefined " ? " undefined " : _typeof ( a ) ) = = = " object " ) a = a . version ; if ( ( typeof b = = = " undefined " ? " undefined " : _typeof ( b ) ) = = = " object " ) b = b . version ; ret = a = = = b ; break ; case " ! = = " : if ( ( typeof a = = = " undefined " ? " undefined " : _typeof ( a ) ) = = = " object " ) a = a . version ; if ( ( typeof b = = = " undefined " ? " undefined " : _typeof ( b ) ) = = = " object " ) b = b . version ; ret = a ! = = b ; break ; case " " : case " = " : case " = = " : ret = eq ( a , b , loose ) ; break ; case " ! = " : ret = neq ( a , b , loose ) ; break ; case " > " : ret = gt ( a , b , loose ) ; break ; case " > = " : ret = gte ( a , b , loose ) ; break ; case " < " : ret = lt ( a , b , loose ) ; break ; case " < = " : ret = lte ( a , b , loose ) ; break ; default : throw new TypeError ( " Invalid operator : " + op ) } return ret } exports . Comparator = Comparator ; function Comparator ( comp , loose ) { if ( comp instanceof Comparator ) { if ( comp . loose = = = loose ) return comp ; else comp = comp . value } if ( ! ( this instanceof Comparator ) ) return new Comparator ( comp , loose ) ; debug ( " comparator " , comp , loose ) ; this . loose = loose ; this . parse ( comp ) ; if ( this . semver = = = ANY ) this . value = " " ; else this . value = this . operator + this . semver . version ; debug ( " comp " , this ) } var ANY = { } ; Comparator . prototype . parse = function ( comp ) { var r = this . loose ? re [ COMPARATORLOOSE ] : re [ COMPARATOR ] ; var m = comp . match ( r ) ; if ( ! m ) throw new TypeError ( " Invalid comparator : " + comp ) ; this . operator = m [ 1 ] ; if ( this . operator = = = " = " ) this . operator = " " ; if ( ! m [ 2 ] ) this . semver = ANY ; else this . semver = new SemVer ( m [ 2 ] , this . loose ) } ; Comparator . prototype . toString = function ( ) { return this . value } ; Comparator . prototype . test = function ( version ) { debug ( " Comparator . test " , version , this . loose ) ; if ( this . semver = = = ANY ) return true ; if ( typeof version = = = " string " ) version = new SemVer ( version , this . loose ) ; return cmp ( version , this . operator , this . semver , this . loose ) } ; exports . Range = Range ; function Range ( range , loose ) { if ( range instanceof Range & & range . loose = = = loose ) return range ; if ( ! ( this instanceof Range ) ) return new Range ( range , loose ) ; this . loose = loose ; this . raw = range ; this . set = range . split ( / \ s * \ | \ | \ s * / ) . map ( function ( range ) { return this . parseRange ( range . trim ( ) ) } , this ) . filter ( function ( c ) { return c . length } ) ; if ( ! this . set . length ) { throw new TypeError ( " Invalid SemVer Range : " + range ) } this . format ( ) } Range . prototype . format = function ( ) { this . range = this . set . map ( function ( comps ) { return comps . join ( " " ) . trim ( ) } ) . join ( " | | " ) . trim ( ) ; return this . range } ; Range . prototype . toString = function ( ) { return this . range } ; Range . prototype . parseRange = function ( range ) { var loose = this . loose ; range = range . trim ( ) ; debug ( " range " , range , loose ) ; var hr = loose ? re [ HYPHENRANGELOOSE ] : re [ HYPHENRANGE ] ; range = range . replace ( hr , hyphenReplace ) ; debug ( " hyphen replace " , range ) ; range = range . replace ( re [ COMPARATORTRIM ] , comparatorTrimReplace ) ; debug ( " comparator trim " , range , re [ COMPARATORTRIM ] ) ; range = range . replace ( re [ TILDETRIM ] , tildeTrimReplace ) ; range = range . replace ( re [ CARETTRIM ] , caretTrimReplace ) ; range = range . split ( / \ s + / ) . join ( " " ) ; var compRe = loose ? re [ COMPARATORLOOSE ] : re [ COMPARATOR ] ; var set = range . split ( " " ) . map ( function ( comp ) { return parseComparator ( comp , loose ) } ) . join ( " " ) . split ( / \ s + / ) ; if ( this . loose ) { set = set . filter ( function ( comp ) { return ! ! comp . match ( compRe ) } ) } set = set . map ( function ( comp ) { return new Comparator ( comp , loose ) } ) ; return set } ; exports . toComparators = toComparators ; function toComparators ( range , loose ) { return new Range ( range , loose ) . set . map ( function ( comp ) { return comp . map ( function ( c ) { return c . value } ) . join ( " " ) . trim ( ) . split ( " " ) } ) } function parseComparator ( comp , loose ) { debug ( " comp " , comp ) ; comp = replaceCarets ( comp , loose ) ; debug ( " caret " , comp ) ; comp = replaceTildes ( comp , loose ) ; debug ( " tildes " , comp ) ; comp = replaceXRanges ( comp , loose ) ; debug ( " xrange " , comp ) ; comp = replaceStars ( comp , loose ) ; debug ( " stars " , comp ) ; return comp } function isX ( id ) { return ! id | | id . toLowerCase ( ) = = = " x " | | id = = = " * " } function replaceTildes ( comp , loose ) { return comp . trim ( ) . split ( / \ s + / ) . map ( function ( comp ) { return replaceTilde ( comp , loose ) } ) . join ( " " ) } function replaceTilde ( comp , loose ) { var r = loose ? re [ TILDELOOSE ] : re [ TILDE ] ; return comp . replace ( r , function ( _ , M , m , p , pr ) { debug ( " tilde " , comp , _ , M , m , p , pr ) ; var ret ; if ( isX ( M ) ) ret = " " ; else if ( isX ( m ) ) ret = " > = " + M + " . 0 . 0 < " + ( + M + 1 ) + " . 0 . 0 " ; else if ( isX ( p ) ) ret = " > = " + M + " . " + m + " . 0 < " + M + " . " + ( + m + 1 ) + " . 0 " ; else if ( pr ) { debug ( " replaceTilde pr " , pr ) ; if ( pr . charAt ( 0 ) ! = = " - " ) pr = " - " + pr ; ret = " > = " + M + " . " + m + " . " + p + pr + " < " + M + " . " + ( + m + 1 ) + " . 0 " } else ret = " > = " + M + " . " + m + " . " + p + " < " + M + " . " + ( + m + 1 ) + " . 0 " ; debug ( " tilde return " , ret ) ; return ret } ) } function replaceCarets ( comp , loose ) { return comp . trim ( ) . split ( / \ s + / ) . map ( function ( comp ) { return replaceCaret ( comp , loose ) } ) . join ( " " ) } function replaceCaret ( comp , loose ) { debug ( " caret " , comp , loose ) ; var r = loose ? re [ CARETLOOSE ] : re [ CARET ] ; return comp . replace ( r , function ( _ , M , m , p , pr ) { debug ( " caret " , comp , _ , M , m , p , pr ) ; var ret ; if ( isX ( M ) ) ret = " " ; else if ( isX ( m ) ) ret = " > = " + M + " . 0 . 0 < " + ( + M + 1 ) + " . 0 . 0 " ; else if ( isX ( p ) ) { if ( M = = = " 0 " ) ret = " > = " + M + " . " + m + " . 0 < " + M + " . " + ( + m + 1 ) + " . 0 " ; else ret = " > = " + M + " . " + m + " . 0 < " + ( + M + 1 ) + " . 0 . 0 " } else if ( pr ) { debug ( " replaceCaret pr " , pr ) ; if ( pr . charAt ( 0 ) ! = = " - " ) pr = " - " + pr ; if ( M = = = " 0 " ) { if ( m = = = " 0 " ) ret = " > = " + M + " . " + m + " . " + p + pr + " < " + M + " . " + m + " . " + ( + p + 1 ) ; else ret = " > = " + M + " . " + m + " . " + p + pr + " < " + M + " . " + ( + m + 1 ) + " . 0 " } else ret = " > = " + M + " . " + m + " . " + p + pr + " < " + ( + M + 1 ) + " . 0 . 0 " } else { debug ( " no pr " ) ; if ( M = = = " 0 " ) { if ( m = = = " 0 " ) ret = " > = " + M + " . " + m + " . " + p + " < " + M + " . " + m + " . " + ( + p + 1 ) ; else ret = " > = " + M + " . " + m + " . " + p + " < " + M + " . " + ( + m + 1 ) + " . 0 " } else ret = " > = " + M + " . " + m + " . " + p + " < " + ( + M + 1 ) + " . 0 . 0 " } debug ( " caret return " , ret ) ; return ret } ) } function replaceXRanges ( comp , loose ) { debug ( " replaceXRanges " , comp , loose ) ; return comp . split ( / \ s + / ) . map ( function ( comp ) { return replaceXRange ( comp , loose ) } ) . join ( " " ) } function replaceXRange ( comp , loose ) { comp = comp . trim ( ) ; var r = loose ? re [ XRANGELOOSE ] : re [ XRANGE ] ; return comp . replace ( r , function ( ret , gtlt , M , m , p , pr ) { debug ( " xRange " , comp , ret , gtlt , M , m , p , pr ) ; var xM = isX ( M ) ; var xm = xM | | isX ( m ) ; var xp = xm | | isX ( p ) ; var anyX = xp ; if ( gtlt = = = " = " & & anyX ) gtlt = " " ; if ( xM ) { if ( gtlt = = = " > " | | gtlt = = = " < " ) { ret = " < 0 . 0 . 0 " } else { ret = " * " } } else if ( gtlt & & anyX ) { if ( xm ) m = 0 ; if ( xp ) p = 0 ; if ( gtlt = = = " > " ) { gtlt = " > = " ; if ( xm ) { M = + M + 1 ; m = 0 ; p = 0 } else if ( xp ) { m = + m + 1 ; p = 0 } } else if ( gtlt = = = " < = " ) { gtlt = " < " ; if ( xm ) M = + M + 1 ; else m = + m + 1 } ret = gtlt + M + " . " + m + " . " + p } else if ( xm ) { ret = " > = " + M + " . 0 . 0 < " + ( + M + 1 ) + " . 0 . 0 " } else if ( xp ) { ret = " > = " + M + " . " + m + " . 0 < " + M + " . " + ( + m + 1 ) + " . 0 " } debug ( " xRange return " , ret ) ; return ret } ) } function replaceStars ( comp , loose ) { debug ( " replaceStars " , comp , loose ) ; return comp . trim ( ) . replace ( re [ STAR ] , " " ) } function hyphenReplace ( $ 0 , from , fM , fm , fp , fpr , fb , to , tM , tm , tp , tpr , tb ) { if ( isX ( fM ) ) from = " " ; else if ( isX ( fm ) ) from = " > = " + fM + " . 0 . 0 " ; else if ( isX ( fp ) ) from = " > = " + fM + " . " + fm + " . 0 " ; else from = " > = " + from ; if ( isX ( tM ) ) to = " " ; else if ( isX ( tm ) ) to = " < " + ( + tM + 1 ) + " . 0 . 0 " ; else if ( isX ( tp ) ) to = " < " + tM + " . " + ( + tm + 1 ) + " . 0 " ; else if ( tpr ) to = " < = " + tM + " . " + tm + " . " + tp + " - " + tpr ; else to = " < = " + to ; return ( from + " " + to ) . trim ( ) } Range . prototype . test = function ( version ) { if ( ! version ) return false ; if ( typeof version = = = " string " ) version = new SemVer ( version , this . loose ) ; for ( var i = 0 ; i < this . set . length ; i + + ) { if ( testSet ( this . set [ i ] , version ) ) return true } return false } ; function testSet ( set , version ) { for ( var i = 0 ; i < set . length ; i + + ) { if ( ! set [ i ] . test ( version ) ) return false } if ( version . prerelease . length ) { for ( var i = 0 ; i < set . length ; i + + ) { debug ( set [ i ] . semver ) ; if ( set [ i ] . semver = = = ANY ) continue ; if ( set [ i ] . semver . prerelease . length > 0 ) { var allowed = set [ i ] . semver ; if ( allowed . major = = = version . major & & allowed . minor = = = version . minor & & allowed . patch = = = version . patch ) return true } } return false } return true } exports . satisfies = satisfies ; function satisfies ( version , range , loose ) { try { range = new Range ( range , loose ) } catch ( er ) { return false } return range . test ( version ) } exports . maxSatisfying = maxSatisfying ; function maxSatisfying ( versions , range , loose ) { return versions . filter ( function ( version ) { return satisfies ( version , range , loose ) } ) . sort ( function ( a , b ) { return rcompare ( a , b , loose ) } ) [ 0 ] | | null } exports . validRange = validRange ; function validRange ( range , loose ) { try { return new Range ( range , loose ) . range | | " * " } catch ( er ) { return null } } exports . ltr = ltr ; function ltr ( version , range , loose ) { return outside ( version , range , " < " , loose ) } exports . gtr = gtr ; function gtr ( version , range , loose ) { return outside ( version , range , " > " , loose ) } exports . outside = outside ; function outside ( version , range , hilo , loose ) { version = new SemVer ( version , loose ) ; range = new Range ( range , loose ) ; var gtfn , ltefn , ltfn , comp , ecomp ; switch ( hilo ) { case " > " : gtfn = gt ; ltefn = lte ; ltfn = lt ; comp = " > " ; ecomp = " > = " ; break ; case " < " : gtfn = lt ; ltefn = gte ; ltfn = gt ; comp = " < " ; ecomp = " < = " ; break ; default : throw new TypeError ( ' Must provide a hilo val of " < " or " > " ' ) } if ( satisfies ( version , range , loose ) ) { return false } for ( var i = 0 ; i < range . set . length ; + + i ) { var comparators = range . set [ i ] ; var high = null ; var low = null ; comparators . forEach ( function ( comparator ) { if ( comparator . semver = = = ANY ) { comparator = new Comparator ( " > = 0 . 0 . 0 " ) } high = high | | comparator ; low = low | | comparator ; if ( gtfn ( comparator . semver , high . semver , loose ) ) { high = comparator } else if ( ltfn ( comparator . semver , low . semver , loose ) ) { low = comparator } } ) ; if ( high . operator = = = comp | | high . operator = = = ecomp ) { return false } if ( ( ! low . operator | | low . operator = = = comp ) & & ltefn ( version , low . semver ) ) { return false } else if ( low . operator = = = ecomp & & ltfn ( version , low . semver ) ) { return false } } return true } exports . prerelease = prerelease ; function prerelease ( version , loose ) { var parsed = parse ( version , loose ) ; return parsed & & parsed . prerelease . length ? parsed . prerelease : null } } ) . call ( exports , __webpack_require__ ( 85 ) ) } , function ( module , exports ) { " use strict " ; var process = module . exports = { } ; var cachedSetTimeout ; var cachedClearTimeout ; ( function ( ) { try { cachedSetTimeout = setTimeout } catch ( e ) { cachedSetTimeout = function cachedSetTimeout ( ) { throw new Error ( " setTimeout is not defined " ) } } try { cachedClearTimeout = clearTimeout } catch ( e ) { cachedClearTimeout = function cachedClearTimeout ( ) { throw new Error ( " clearTimeout is not defined " ) } } } ) ( ) ; var queue = [ ] ; var draining = false ; var currentQueue ; var queueIndex = - 1 ; function cleanUpNextTick ( ) { if ( ! draining | | ! currentQueue ) { return } draining = false ; if ( currentQueue . length ) { queue = currentQueue . concat ( queue ) } else { queueIndex = - 1 } if ( queue . length ) { drainQueue ( ) } } function drainQueue ( ) { if ( draining ) { return } var timeout = cachedSetTimeout ( cleanUpNextTick ) ; draining = true ; var len = queue . length ; while ( len ) { currentQueue = queue ; queue = [ ] ; while ( + + queueIndex < len ) { if ( currentQueue ) { currentQueue [ queueIndex ] . run ( ) } } queueIndex = - 1 ; len = queue . length } currentQueue = null ; draining = false ; cachedClearTimeout ( timeout ) } process . nextTick = function ( fun ) { var args = new Array ( arguments . length - 1 ) ; if ( arguments . length > 1 ) { for ( var i = 1 ; i < arguments . length ; i + + ) { args [ i - 1 ] = arguments [ i ] } } queue . push ( new Item ( fun , args ) ) ; if ( queue . length = = = 1 & & ! draining ) { cachedSetTimeout ( drainQueue , 0 ) } } ; function Item ( fun , array ) { this . fun = fun ; this . array = array } Item . prototype . run = function ( ) { this . fun . apply ( null , this . array ) } ; process . title = " browser " ; process . browser = true ; process . env = { } ; process . argv = [ ] ; process . version = " " ; process . versions = { } ; function noop ( ) { } process . on = noop ; process . addListener = noop ; process . once = noop ; process . off = noop ; process . removeListener = noop ; process . removeAllListeners = noop ; process . emit = noop ; process . binding = function ( name ) { throw new Error ( " process . binding is not supported " ) } ; process . cwd = function ( ) { return " / " } ; process . chdir = function ( dir ) { throw new Error ( " process . chdir is not supported " ) } ; process . umask = function ( ) { return 0 } } , function ( module , exports , __webpack_require__ ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . default = Vm ; var _util = __webpack_require__ ( 82 ) ; var _ = _interopRequireWildcard ( _util ) ; var _state = __webpack_require__ ( 87 ) ; var state = _interopRequireWildcard ( _state ) ; var _compiler = __webpack_require__ ( 92 ) ; var compiler = _interopRequireWildcard ( _compiler ) ; var _directive = __webpack_require__ ( 93 ) ; var directive = _interopRequireWildcard ( _directive ) ; var _domHelper = __webpack_require__ ( 94 ) ; var domHelper = _interopRequireWildcard ( _domHelper ) ; var _events = __webpack_require__ ( 95 ) ; var events = _interopRequireWildcard ( _events ) ; var _register = __webpack_require__ ( 96 ) ; function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] } } newObj . default = obj ; return newObj } } function callOldReadyEntry ( vm , component ) { if ( component . methods & & component . methods . ready ) { _ . warn ( ' " exports . methods . ready " is deprecated , ' + ' please use " exports . created " instead ' ) ; component . methods . ready . call ( vm ) } } function Vm ( type , options , parentVm , parentEl , mergedData , externalEvents ) { this . _parent = parentVm . _realParent ? parentVm . _realParent : parentVm ; this . _app = parentVm . _app ; parentVm . _childrenVms & & parentVm . _childrenVms . push ( this ) ; if ( ! options ) { options = this . _app . customComponentMap [ type ] | | { } } var data = options . data | | { } ; this . _options = options ; this . _methods = options . methods | | { } ; this . _computed = options . computed | | { } ; this . _css = options . style | | { } ; this . _ids = { } ; this . _vmEvents = { } ; this . _childrenVms = [ ] ; this . _type = type ; this . _initEvents ( externalEvents ) ; _ . debug ( ' " init " lifecycle in Vm ( ' + this . _type + " ) " ) ; this . $ emit ( " hook : init " ) ; this . _inited = true ; this . _data = typeof data = = = " function " ? data ( ) : data ; if ( mergedData ) { _ . extend ( this . _data , mergedData ) } this . _initState ( ) ; _ . debug ( ' " created " lifecycle in Vm ( ' + this . _type + " ) " ) ; this . $ emit ( " hook : created " ) ; this . _created = true ; callOldReadyEntry ( this , options ) ; this . _parentEl = parentEl | | this . _app . doc . documentElement ; this . _build ( ) } _ . extend ( Vm . prototype , state , compiler , directive , domHelper , events ) ; _ . extend ( Vm , { registerModules : _register . registerModules , registerMethods : _register . registerMethods } ) } , function ( module , exports , __webpack_require__ ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . _initState = _initState ; exports . _initData = _initData ; exports . _initComputed = _initComputed ; exports . _initMethods = _initMethods ; var _watcher = __webpack_require__ ( 88 ) ; var _watcher2 = _interopRequireDefault ( _watcher ) ; var _dep = __webpack_require__ ( 89 ) ; var _dep2 = _interopRequireDefault ( _dep ) ; var _observer = __webpack_require__ ( 90 ) ; var _util = __webpack_require__ ( 82 ) ; function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { " default " : obj } } function _initState ( ) { var vm = this ; vm . _watchers = [ ] ; vm . _initData ( ) ; vm . _initComputed ( ) ; vm . _initMethods ( ) } function _initData ( ) { var vm = this ; var data = vm . _data ; if ( ! ( 0 , _util . isPlainObject ) ( data ) ) { data = { } } var keys = Object . keys ( data ) ; var i = keys . length ; while ( i - - ) { ( 0 , _observer . proxy ) ( vm , keys [ i ] ) } ( 0 , _observer . observe ) ( data , vm ) } function noop ( ) { } function _initComputed ( ) { var vm = this ; var computed = vm . _computed ; if ( computed ) { for ( var key in computed ) { var userDef = computed [ key ] ; var def = { enumerable : true , configurable : true } ; if ( typeof userDef = = = " function " ) { def . get = makeComputedGetter ( userDef , vm ) ; def . set = noop } else { def . get = userDef . get ? userDef . cache ! = = false ? makeComputedGetter ( userDef . get , vm ) : ( 0 , _util . bind ) ( userDef . get , vm ) : noop ; def . set = userDef . set ? ( 0 , _util . bind ) ( userDef . set , vm ) : noop } Object . defineProperty ( vm , key , def ) } } } function makeComputedGetter ( getter , owner ) { var watcher = new _watcher2 . default ( owner , getter , null , { lazy : true } ) ; return function computedGetter ( ) { if ( watcher . dirty ) { watcher . evaluate ( ) } if ( _dep2 . default . target ) { watcher . depend ( ) } return watcher . value } } function _initMethods ( ) { var vm = this ; var methods = vm . _methods ; if ( methods ) { for ( var key in methods ) { vm [ key ] = ( 0 , _util . bind ) ( methods [ key ] , vm ) } } } } , function ( module , exports , __webpack_require__ ) { ( function ( process ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . default = Watcher ; var _dep = __webpack_require__ ( 89 ) ; var _dep2 = _interopRequireDefault ( _dep ) ; var _util = __webpack_require__ ( 82 ) ; function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { " default " : obj } } var uid = 0 ; var prevTarget = void 0 ; function Watcher ( vm , expOrFn , cb , options ) { if ( options ) { ( 0 , _util . extend ) ( this , options ) } var isFn = typeof expOrFn = = = " function " ; this . vm = vm ; vm . _watchers . push ( this ) ; this . expression = expOrFn ; this . cb = cb ; this . id = + + uid ; this . active = true ; this . dirty = this . lazy ; this . deps = [ ] ; this . newDeps = [ ] ; this . depIds = new _util . _Set ; this . newDepIds = new _util . _Set ; if ( isFn ) { this . getter = expOrFn } else { this . getter = ( 0 , _util . parsePath ) ( expOrFn ) ; if ( ! this . getter ) { this . getter = function ( ) { } ; process . env . NODE_ENV ! = = " production " & & ( 0 , _util . warn ) ( " Failed watching path : " + expOrFn + " Watcher only accepts simple dot - delimited paths . " + " For full control , use a function instead . " , vm ) } } this . value = this . lazy ? undefined : this . get ( ) ; this . queued = this . shallow = false } Watcher . prototype . get = function ( ) { this . beforeGet ( ) ; var value = this . getter . call ( this . vm , this . vm ) ; if ( this . deep ) { traverse ( value ) } this . afterGet ( ) ; return value } ; Watcher . prototype . beforeGet = function ( ) { prevTarget = _dep2 . default . target ; _dep2 . default . target = this } ; Watcher . prototype . addDep = function ( dep ) { var id = dep . id ; if ( ! this . newDepIds . has ( id ) ) { this . newDepIds . add ( id ) ; this . newDeps . push ( dep ) ; if ( ! this . depIds . has ( id ) ) { dep . addSub ( this ) } } } ; Watcher . prototype . afterGet = function ( ) { _dep2 . default . target = prevTarget ; var i = this . deps . length ; while ( i - - ) { var dep = this . deps [ i ] ; if ( ! this . newDepIds . has ( dep . id ) ) { dep . removeSub ( this ) } } var tmp = this . depIds ; this . depIds = this . newDepIds ; this . newDepIds = tmp ; this . newDepIds . clear ( ) ; tmp = this . deps ; this . deps = this . newDeps ; this . newDeps = tmp ; this . newDeps . length = 0 } ; Watcher . prototype . update = function ( shallow ) { if ( this . lazy ) { this . dirty = true } else { this . run ( ) } } ; Watcher . prototype . run = function ( ) { if ( this . active ) { var value = this . get ( ) ; if ( value ! = = this . value | | ( ( 0 , _util . isObject ) ( value ) | | this . deep ) & & ! this . shallow ) { var oldValue = this . value ; this . value = value ; this . cb . call ( this . vm , value , oldValue ) } this . queued = this . shallow = false } } ; Watcher . prototype . evaluate = function ( ) { var current = _dep2 . default . target ; this . value = this . get ( ) ; this . dirty = false ; _dep2 . default . target = current } ; Watcher . prototype . depend = function ( ) { var i = this . deps . length ; while ( i - - ) { this . deps [ i ] . depend ( ) } } ; Watcher . prototype . teardown = function ( ) { if ( this . active ) { if ( ! this . vm . _isBeingDestroyed & & ! this . vm . _vForRemoving ) { ( 0 , _util . remove ) ( this . vm . _watchers , this ) } var i = this . deps . length ; while ( i - - ) { this . deps [ i ] . removeSub ( this ) } this . active = false ; this . vm = this . cb = this . value = null } } ; var seenObjects = new _util . _Set ; function traverse ( val , seen ) { var i = void 0 , keys = void 0 , isA = void 0 , isO = void 0 ; if ( ! seen ) { seen = seenObjects ; seen . clear ( ) } isA = ( 0 , _util . isArray ) ( val ) ; isO = ( 0 , _util . isObject ) ( val ) ; if ( isA | | isO ) { if ( val . __ob__ ) { var depId = val . __ob__ . dep . id ; if ( seen . has ( depId ) ) { return } else { seen . add ( depId ) } } if ( isA ) { i = val . length ; while ( i - - ) { traverse ( val [ i ] , seen ) } } else if ( isO ) { keys = Object . keys ( val ) ; i = keys . length ; while ( i - - ) { traverse ( val [ keys [ i ] ] , seen ) } } } } } ) . call ( exports , __webpack_require__ ( 85 ) ) } , function ( module , exports , __webpack_require__ ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . default = Dep ; var _util = __webpack_require__ ( 82 ) ; var uid = 0 ; function Dep ( ) { this . id = uid + + ; this . subs = [ ] } Dep . target = null ; Dep . prototype . addSub = function ( sub ) { this . subs . push ( sub ) } ; Dep . prototype . removeSub = function ( sub ) { ( 0 , _util . remove ) ( this . subs , sub ) } ; Dep . prototype . depend = function ( ) { Dep . target . addDep ( this ) } ; Dep . prototype . notify = function ( ) { var subs = this . subs . slice ( ) ; for ( var i = 0 , l = subs . length ; i < l ; i + + ) { subs [ i ] . update ( ) } } } , function ( module , exports , __webpack_require__ ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . Observer = Observer ; exports . observe = observe ; exports . defineReactive = defineReactive ; exports . set = set ; exports . del = del ; exports . proxy = proxy ; exports . unproxy = unproxy ; var _dep = __webpack_require__ ( 89 ) ; var _dep2 = _interopRequireDefault ( _dep ) ; var _array = __webpack_require__ ( 91 ) ; var _util = __webpack_require__ ( 82 ) ; function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { " default " : obj } } var arrayKeys = Object . getOwnPropertyNames ( _array . arrayMethods ) ; function Observer ( value ) { this . value = value ; this . dep = new _dep2 . default ; ( 0 , _util . def ) ( value , " __ob__ " , this ) ; if ( ( 0 , _util . isArray ) ( value ) ) { var augment = _util . hasProto ? protoAugment : copyAugment ; augment ( value , _array . arrayMethods , arrayKeys ) ; this . observeArray ( value ) } else { this . walk ( value ) } } Observer . prototype . walk = function ( obj ) { for ( var key in obj ) { this . convert ( key , obj [ key ] ) } } ; Observer . prototype . observeArray = function ( items ) { for ( var i = 0 , l = items . length ; i < l ; i + + ) { observe ( items [ i ] ) } } ; Observer . prototype . convert = function ( key , val ) { defineReactive ( this . value , key , val ) } ; Observer . prototype . addVm = function ( vm ) { ( this . vms | | ( this . vms = [ ] ) ) . push ( vm ) } ; Observer . prototype . removeVm = function ( vm ) { ( 0 , _util . remove ) ( this . vms , vm ) } ; function protoAugment ( target , src ) { target . __proto__ = src } function copyAugment ( target , src , keys ) { for ( var i = 0 , l = keys . length ; i < l ; i + + ) { var key = keys [ i ] ; ( 0 , _util . def ) ( target , key , src [ key ] ) } } function observe ( value , vm ) { if ( ! ( 0 , _util . isObject ) ( value ) ) { return } var ob = void 0 ; if ( ( 0 , _util . hasOwn ) ( value , " __ob__ " ) & & value . __ob__ instanceof Observer ) { ob = value . __ob__ } else if ( ( ( 0 , _util . isArray ) ( value ) | | ( 0 , _util . isPlainObject ) ( value ) ) & & Object . isExtensible ( value ) & & ! value . _isVue ) { ob = new Observer ( value ) } if ( ob & & vm ) { ob . addVm ( vm ) } return ob } function defineReactive ( obj , key , val ) { var dep = new _dep2 . default ; var property = Object . getOwnPropertyDescriptor ( obj , key ) ; if ( property & & property . configurable = = = false ) { return } var getter = property & & property . get ; var setter = property & & property . set ; var childOb = observe ( val ) ; Object . defineProperty ( obj , key , { enumerable : true , configurable : true , get : function reactiveGetter ( ) { var value = getter ? getter . call ( obj ) : val ; if ( _dep2 . default . target ) { dep . depend ( ) ; if ( childOb ) { childOb . dep . depend ( ) } if ( ( 0 , _util . isArray ) ( value ) ) { for ( var e , i = 0 , l = value . length ; i < l ; i + + ) { e = value [ i ] ; e & & e . __ob__ & & e . __ob__ . dep . depend ( ) } } } return value } , set : function reactiveSetter ( newVal ) { var value = getter ? getter . call ( obj ) : val ; if ( newVal = = = value ) { return } if ( setter ) { setter . call ( obj , newVal ) } else { val = newVal } childOb = observe ( newVal ) ; dep . notify ( ) } } ) } function set ( obj , key , val ) { if ( ( 0 , _util . isArray ) ( obj ) ) { return obj . splice ( key , 1 , val ) } if ( ( 0 , _util . hasOwn ) ( obj , key ) ) { obj [ key ] = val ; return } if ( obj . _isVue ) { set ( obj . _data , key , val ) ; return } var ob = obj . __ob__ ; if ( ! ob ) { obj [ key ] = val ; return } ob . convert ( key , val ) ; ob . dep . notify ( ) ; if ( ob . vms ) { var i = ob . vms . length ; while ( i - - ) { var vm = ob . vms [ i ] ; proxy ( vm , key ) ; vm . $ forceUpdate ( ) } } return val } function del ( obj , key ) { if ( ! ( 0 , _util . hasOwn ) ( obj , key ) ) { return } delete obj [ key ] ; var ob = obj . __ob__ ; if ( ! ob ) { if ( obj . _isVue ) { delete obj . _data [ key ] ; obj . $ forceUpdate ( ) } return } ob . dep . notify ( ) ; if ( ob . vms ) { var i = ob . vms . length ; while ( i - - ) { var vm = ob . vms [ i ] ; unproxy ( vm , key ) ; vm . $ forceUpdate ( ) } } } var KEY_WORDS = [ " $ index " , " $ value " , " $ event " ] ; function proxy ( vm , key ) { if ( KEY_WORDS . indexOf ( key ) > - 1 | | ! ( 0 , _util . isReserved ) ( key ) ) { Object . defineProperty ( vm , key , { configurable : true , enumerable : true , get : function proxyGetter ( ) { return vm . _data [ key ] } , set : function proxySetter ( val ) { vm . _data [ key ] = val } } ) } } function unproxy ( vm , key ) { if ( ! ( 0 , _util . isReserved ) ( key ) ) { delete vm [ key ] } } } , function ( module , exports , __webpack_require__ ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . arrayMethods = undefined ; var _util = __webpack_require__ ( 82 ) ; var arrayProto = Array . prototype ; var arrayMethods = exports . arrayMethods = Object . create ( arrayProto ) ; [ " push " , " pop " , " shift " , " unshift " , " splice " , " sort " , " reverse " ] . forEach ( function ( method ) { var original = arrayProto [ method ] ; ( 0 , _util . def ) ( arrayMethods , method , function mutator ( ) { var i = arguments . length ; var args = new Array ( i ) ; while ( i - - ) { args [ i ] = arguments [ i ] } var result = original . apply ( this , args ) ; var ob = this . __ob__ ; var inserted = void 0 ; switch ( method ) { case " push " : inserted = args ; break ; case " unshift " : inserted = args ; break ; case " splice " : inserted = args . slice ( 2 ) ; break } if ( inserted ) ob . observeArray ( inserted ) ; ob . dep . notify ( ) ; return result } ) } ) ; ( 0 , _util . def ) ( arrayProto , " $ set " , function $ set ( index , val ) { if ( index > = this . length ) { this . length = index + 1 } return this . splice ( index , 1 , val ) [ 0 ] } ) ; ( 0 , _util . def ) ( arrayProto , " $ remove " , function $ remove ( index ) { if ( ! this . length ) return ; if ( typeof index ! = = " number " ) { index = this . indexOf ( index ) } if ( index > - 1 ) { this . splice ( index , 1 ) } } ) } , function ( module , exports , __webpack_require__ ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; var _typeof = typeof Symbol = = = " function " & & typeof Symbol . iterator = = = " symbol " ? function ( obj ) { return typeof obj } : function ( obj ) { return obj & & typeof Symbol = = = " function " & & obj . constructor = = = Symbol ? " symbol " : typeof obj } ; exports . _build = _build ; exports . _compile = _compile ; exports . _targetIsFragment = _targetIsFragment ; exports . _targetIsContent = _targetIsContent ; exports . _targetNeedCheckRepeat = _targetNeedCheckRepeat ; exports . _targetNeedCheckShown = _targetNeedCheckShown ; exports . _targetNeedCheckType = _targetNeedCheckType ; exports . _targetIsComposed = _targetIsComposed ; exports . _compileFragment = _compileFragment ; exports . _compileRepeat = _compileRepeat ; exports . _compileShown = _compileShown ; exports . _compileType = _compileType ; exports . _compileCustomComponent = _compileCustomComponent ; exports . _compileNativeComponent = _compileNativeComponent ; exports . _compileChildren = _compileChildren ; exports . _bindRepeat = _bindRepeat ; exports . _bindShown = _bindShown ; exports . _watchBlock = _watchBlock ; exports . _mergeContext = _mergeContext ; var _util = __webpack_require__ ( 82 ) ; var _ = _interopRequireWildcard ( _util ) ; function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] } } newObj . default = obj ; return newObj } } function _build ( ) { var opt = this . _options | | { } ; var template = opt . template | | { } ; if ( opt . replace ) { if ( template . children & & template . children . length = = = 1 ) { this . _compile ( template . children [ 0 ] , this . _parentEl ) } else { this . _compile ( template . children , this . _parentEl ) } } else { this . _compile ( template , this . _parentEl ) } _ . debug ( ' " ready " lifecycle in Vm ( ' + this . _type + " ) " ) ; this . $ emit ( " hook : ready " ) ; this . _ready = true } function _compile ( target , dest , meta ) { var app = this . _app | | { } ; if ( app . lastSignal = = = - 1 ) { return } var context = this ; if ( context . _targetIsFragment ( target ) ) { context . _compileFragment ( target , dest , meta ) ; return } meta = meta | | { } ; if ( context . _targetIsContent ( target ) ) { _ . debug ( ' compile " content " block by ' , target ) ; context . _content = context . _createBlock ( dest ) ; return } if ( context . _targetNeedCheckRepeat ( target , meta ) ) { _ . debug ( ' compile " repeat " logic by ' , target ) ; context . _compileRepeat ( target , dest ) ; return } if ( context . _targetNeedCheckShown ( target , meta ) ) { _ . debug ( ' compile " if " logic by ' , target ) ; context . _compileShown ( target , dest , meta ) ; <nl> - return } var typeGetter = meta . type | | target . type ; if ( context . _targetNeedCheckType ( typeGetter , meta ) ) { context . _compileType ( target , dest , typeGetter , meta ) ; return } var type = typeGetter ; var component = context . _targetIsComposed ( target , type ) ; if ( component ) { _ . debug ( " compile composed component by " , target ) ; context . _compileCustomComponent ( component , target , dest , type , meta ) ; return } _ . debug ( " compile native component by " , target ) ; context . _compileNativeComponent ( target , dest , type ) } function _targetIsFragment ( target ) { return Array . isArray ( target ) } function _targetIsContent ( target ) { return target . type = = = " content " | | target . type = = = " slot " } function _targetNeedCheckRepeat ( target , meta ) { return ! meta . hasOwnProperty ( " repeat " ) & & target . repeat } function _targetNeedCheckShown ( target , meta ) { return ! meta . hasOwnProperty ( " shown " ) & & target . shown } function _targetNeedCheckType ( typeGetter , meta ) { return typeof typeGetter = = = " function " & & ! meta . hasOwnProperty ( " type " ) } function _targetIsComposed ( target , type ) { var component = void 0 ; if ( this . _app & & this . _app . customComponentMap ) { component = this . _app . customComponentMap [ type ] } if ( this . _options & & this . _options . components ) { component = this . _options . components [ type ] } if ( target . component ) { component = component | | { } } return component } function _compileFragment ( target , dest , meta ) { var _this = this ; var fragBlock = this . _createBlock ( dest ) ; target . forEach ( function ( child ) { _this . _compile ( child , fragBlock , meta ) } ) } function _compileRepeat ( target , dest ) { var repeat = target . repeat ; var oldStyle = typeof repeat = = = " function " ; var getter = repeat . getter | | repeat . expression | | repeat ; if ( typeof getter ! = = " function " ) { getter = function getter ( ) { return [ ] } } var key = repeat . key | | " $ index " ; var value = repeat . value | | " $ value " ; var trackBy = repeat . trackBy | | target . trackBy | | target . attr & & target . attr . trackBy | | key ; var fragBlock = this . _createBlock ( dest ) ; fragBlock . children = [ ] ; fragBlock . data = [ ] ; fragBlock . vms = [ ] ; this . _bindRepeat ( target , fragBlock , { getter : getter , key : key , value : value , trackBy : trackBy , oldStyle : oldStyle } ) } function _compileShown ( target , dest , meta ) { var newMeta = { shown : true } ; var fragBlock = this . _createBlock ( dest ) ; if ( dest . element & & dest . children ) { dest . children . push ( fragBlock ) } if ( meta . repeat ) { newMeta . repeat = meta . repeat } this . _bindShown ( target , fragBlock , newMeta ) } function _compileType ( target , dest , typeGetter , meta ) { var _this2 = this ; var type = typeGetter . call ( this ) ; var newMeta = Object . assign ( { type : type } , meta ) ; var fragBlock = this . _createBlock ( dest ) ; if ( dest . element & & dest . children ) { dest . children . push ( fragBlock ) } this . _watch ( typeGetter , function ( value ) { var newMeta = Object . assign ( { type : value } , meta ) ; _this2 . _removeBlock ( fragBlock , true ) ; _this2 . _compile ( target , fragBlock , newMeta ) } ) ; this . _compile ( target , fragBlock , newMeta ) } function _compileCustomComponent ( component , target , dest , type , meta ) { var Vm = this . constructor ; var context = this ; var subVm = new Vm ( type , component , context , dest , undefined , { " hook : init " : function hookInit ( ) { context . _setId ( target . id , null , this ) ; this . _externalBinding = { parent : context , template : target } } , " hook : created " : function hookCreated ( ) { context . _bindSubVm ( this , target , meta . repeat ) } , " hook : ready " : function hookReady ( ) { if ( this . _content ) { context . _compileChildren ( target , this . _content ) } } } ) ; this . _bindSubVmAfterInitialized ( subVm , target ) } function _compileNativeComponent ( template , dest , type ) { this . _applyNaitveComponentOptions ( template ) ; var element = void 0 ; if ( dest . ref = = = " _documentElement " ) { _ . debug ( " compile to create body for " , type ) ; element = this . _createBody ( type ) } else { _ . debug ( " compile to create element for " , type ) ; element = this . _createElement ( type ) } if ( ! this . _rootEl ) { this . _rootEl = element ; var binding = this . _externalBinding | | { } ; var target = binding . template ; var vm = binding . parent ; if ( target & & target . events & & vm & & element ) { for ( var _type in target . events ) { var handler = vm [ target . events [ _type ] ] ; if ( handler ) { element . addEvent ( _type , _ . bind ( handler , vm ) ) } } } } this . _bindElement ( element , template ) ; if ( template . attr & & template . attr . append ) { template . append = template . attr . append } if ( template . append ) { element . attr = element . attr | | { } ; element . attr . append = template . append } var treeMode = template . append = = = " tree " ; var app = this . _app | | { } ; if ( app . lastSignal ! = = - 1 & & ! treeMode ) { _ . debug ( " compile to append single node for " , element ) ; app . lastSignal = this . _attachTarget ( element , dest ) } if ( app . lastSignal ! = = - 1 ) { this . _compileChildren ( template , element ) } if ( app . lastSignal ! = = - 1 & & treeMode ) { _ . debug ( " compile to append whole tree for " , element ) ; app . lastSignal = this . _attachTarget ( element , dest ) } } function _compileChildren ( template , dest ) { var _this3 = this ; var app = this . _app | | { } ; var children = template . children ; if ( children & & children . length ) { children . every ( function ( child ) { _this3 . _compile ( child , dest ) ; return app . lastSignal ! = = - 1 } ) } } function _bindRepeat ( target , fragBlock , info ) { var _this4 = this ; var vms = fragBlock . vms ; var children = fragBlock . children ; var getter = info . getter ; var trackBy = info . trackBy ; var oldStyle = info . oldStyle ; var keyName = info . key ; var valueName = info . value ; function compileItem ( item , index , context ) { var mergedData = void 0 ; if ( oldStyle ) { mergedData = item ; if ( ( typeof item = = = " undefined " ? " undefined " : _typeof ( item ) ) = = = " object " ) { mergedData [ keyName ] = index ; if ( ! mergedData . hasOwnProperty ( " INDEX " ) ) { Object . defineProperty ( mergedData , " INDEX " , { value : function value ( ) { _ . warn ( ' " INDEX " in repeat is deprecated , ' + ' please use " $ index " instead ' ) } } ) } } } else { mergedData = { } ; mergedData [ keyName ] = index ; mergedData [ valueName ] = item } context = context . _mergeContext ( mergedData ) ; vms . push ( context ) ; context . _compile ( target , fragBlock , { repeat : item } ) } var list = this . _watchBlock ( fragBlock , getter , " repeat " , function ( data ) { _ . debug ( ' the " repeat " item has changed ' , data ) ; if ( ! fragBlock ) { return } var oldChildren = children . slice ( ) ; var oldVms = vms . slice ( ) ; var oldData = fragBlock . data . slice ( ) ; var trackMap = { } ; var reusedMap = { } ; data . forEach ( function ( item , index ) { var key = trackBy ? item [ trackBy ] : index ; if ( key = = null | | key = = = " " ) { return } trackMap [ key ] = item } ) ; var reusedList = [ ] ; oldData . forEach ( function ( item , index ) { var key = trackBy ? item [ trackBy ] : index ; if ( trackMap . hasOwnProperty ( key ) ) { reusedMap [ key ] = { item : item , index : index , key : key , target : oldChildren [ index ] , vm : oldVms [ index ] } ; reusedList . push ( item ) } else { _this4 . _removeTarget ( oldChildren [ index ] ) } } ) ; children . length = 0 ; vms . length = 0 ; fragBlock . data = data . slice ( ) ; fragBlock . updateMark = fragBlock . start ; data . forEach ( function ( item , index ) { var key = trackBy ? item [ trackBy ] : index ; var reused = reusedMap [ key ] ; if ( reused ) { if ( reused . item = = = reusedList [ 0 ] ) { reusedList . shift ( ) } else { reusedList . $ remove ( reused . item ) ; _this4 . _moveTarget ( reused . target , fragBlock . updateMark , true ) } children . push ( reused . target ) ; vms . push ( reused . vm ) ; reused . vm [ keyName ] = index ; fragBlock . updateMark = reused . target } else { compileItem ( item , index , _this4 ) } } ) ; delete fragBlock . updateMark } ) ; fragBlock . data = list . slice ( 0 ) ; list . forEach ( function ( item , index ) { compileItem ( item , index , _this4 ) } ) } function _bindShown ( target , fragBlock , meta ) { var _this5 = this ; var display = this . _watchBlock ( fragBlock , target . shown , " shown " , function ( display ) { _ . debug ( ' the " if " item was changed ' , display ) ; if ( ! fragBlock | | ! ! fragBlock . display = = = ! ! display ) { return } fragBlock . display = ! ! display ; if ( display ) { _this5 . _compile ( target , fragBlock , meta ) } else { _this5 . _removeBlock ( fragBlock , true ) } } ) ; fragBlock . display = ! ! display ; if ( display ) { this . _compile ( target , fragBlock , meta ) } } function _watchBlock ( fragBlock , calc , type , handler ) { var differ = this & & this . _app & & this . _app . differ ; var config = { } ; var depth = ( fragBlock . element . depth | | 0 ) + 1 ; return this . _watch ( calc , function ( value ) { config . latestValue = value ; if ( differ & & ! config . recorded ) { differ . append ( type , depth , fragBlock . blockId , function ( ) { var latestValue = config . latestValue ; handler ( latestValue ) ; config . recorded = false ; config . latestValue = undefined } ) } config . recorded = true } ) } function _mergeContext ( mergedData ) { var context = Object . create ( this ) ; context . _data = mergedData ; context . _initData ( ) ; context . _initComputed ( ) ; context . _realParent = this ; return context } } , function ( module , exports , __webpack_require__ ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; var _typeof = typeof Symbol = = = " function " & & typeof Symbol . iterator = = = " symbol " ? function ( obj ) { return typeof obj } : function ( obj ) { return obj & & typeof Symbol = = = " function " & & obj . constructor = = = Symbol ? " symbol " : typeof obj } ; exports . _applyNaitveComponentOptions = _applyNaitveComponentOptions ; exports . _bindElement = _bindElement ; exports . _bindSubVm = _bindSubVm ; exports . _bindSubVmAfterInitialized = _bindSubVmAfterInitialized ; exports . _setId = _setId ; exports . _setAttr = _setAttr ; exports . _setClass = _setClass ; exports . _setStyle = _setStyle ; exports . _setEvent = _setEvent ; exports . _bindEvents = _bindEvents ; exports . _bindDir = _bindDir ; exports . _bindKey = _bindKey ; exports . _watch = _watch ; var _util = __webpack_require__ ( 82 ) ; var _ = _interopRequireWildcard ( _util ) ; var _watcher = __webpack_require__ ( 88 ) ; var _watcher2 = _interopRequireDefault ( _watcher ) ; var _config = __webpack_require__ ( 80 ) ; var _config2 = _interopRequireDefault ( _config ) ; function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { " default " : obj } } function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] } } newObj . default = obj ; return newObj } } var nativeComponentMap = _config2 . default . nativeComponentMap ; var SETTERS = { attr : " setAttr " , style : " setStyle " , event : " addEvent " } ; function _applyNaitveComponentOptions ( template ) { var type = template . type ; var options = nativeComponentMap [ type ] ; if ( ( typeof options = = = " undefined " ? " undefined " : _typeof ( options ) ) = = = " object " ) { for ( var key in options ) { if ( template [ key ] = = null ) { template [ key ] = options [ key ] } else if ( _ . typof ( template [ key ] ) = = = " object " & & _ . typof ( options [ key ] ) = = = " object " ) { for ( var subkey in options [ key ] ) { if ( template [ key ] [ subkey ] = = null ) { template [ key ] [ subkey ] = options [ key ] [ subkey ] } } } } } } function _bindElement ( el , template ) { this . _setId ( template . id , el , this ) ; this . _setAttr ( el , template . attr ) ; this . _setClass ( el , template . classList ) ; this . _setStyle ( el , template . style ) ; this . _bindEvents ( el , template . events ) } function _bindSubVm ( subVm , template , repeatItem ) { subVm = subVm | | { } ; template = template | | { } ; var options = subVm . _options | | { } ; var props = options . props ; if ( Array . isArray ( props ) ) { props = props . reduce ( function ( result , value ) { result [ value ] = true ; return result } , { } ) } mergeProps ( repeatItem , props , this , subVm ) ; mergeProps ( template . attr , props , this , subVm ) } function _bindSubVmAfterInitialized ( subVm , template ) { mergeClassStyle ( template . classList , this , subVm ) ; mergeStyle ( template . style , this , subVm ) } function mergeProps ( target , props , vm , subVm ) { if ( ! target ) { return } var _loop = function _loop ( key ) { if ( ! props | | props [ key ] ) { var value = target [ key ] ; if ( typeof value = = = " function " ) { var returnValue = vm . _watch ( value , function ( v ) { subVm [ key ] = v } ) ; subVm [ key ] = returnValue } else { subVm [ key ] = value } } } ; for ( var key in target ) { _loop ( key ) } } function mergeStyle ( target , vm , subVm ) { var _loop2 = function _loop2 ( key ) { var value = target [ key ] ; if ( typeof value = = = " function " ) { var returnValue = vm . _watch ( value , function ( v ) { if ( subVm . _rootEl ) { subVm . _rootEl . setStyle ( key , v ) } } ) ; subVm . _rootEl . setStyle ( key , returnValue ) } else { if ( subVm . _rootEl ) { subVm . _rootEl . setStyle ( key , value ) } } } ; for ( var key in target ) { _loop2 ( key ) } } function mergeClassStyle ( target , vm , subVm ) { var css = vm . _options & & vm . _options . style | | { } ; if ( ! subVm . _rootEl ) { return } if ( typeof target = = = " function " ) { var _value = vm . _watch ( target , function ( v ) { setClassStyle ( subVm . _rootEl , css , v ) } ) ; setClassStyle ( subVm . _rootEl , css , _value ) } else if ( target ! = null ) { setClassStyle ( subVm . _rootEl , css , target ) } } function _setId ( id , el , vm ) { var _this = this ; var map = Object . create ( null ) ; Object . defineProperties ( map , { vm : { value : vm , writable : false , configurable : false } , el : { get : function get ( ) { return el | | vm . _rootEl } , configurable : false } } ) ; if ( typeof id = = = " function " ) { var handler = id ; id = handler . call ( this ) ; if ( id ) { this . _ids [ id ] = map } this . _watch ( handler , function ( newId ) { if ( newId ) { _this . _ids [ newId ] = map } } ) } else if ( id & & typeof id = = = " string " ) { this . _ids [ id ] = map } } function _setAttr ( el , attr ) { this . _bindDir ( el , " attr " , attr ) } function setClassStyle ( el , css , classList ) { var classStyle = { } ; var length = classList . length ; for ( var i = 0 ; i < length ; i + + ) { var style = css [ classList [ i ] ] ; if ( style ) { for ( var key in style ) { classStyle [ key ] = style [ key ] } } } el . setClassStyle ( classStyle ) } function _setClass ( el , classList ) { if ( typeof classList ! = = " function " & & ! Array . isArray ( classList ) ) { return } if ( Array . isArray ( classList ) & & ! classList . length ) { el . setClassStyle ( { } ) ; return } var style = this . _options & & this . _options . style | | { } ; if ( typeof classList = = = " function " ) { var _value2 = this . _watch ( classList , function ( v ) { setClassStyle ( el , style , v ) } ) ; setClassStyle ( el , style , _value2 ) } else { setClassStyle ( el , style , classList ) } } function _setStyle ( el , style ) { this . _bindDir ( el , " style " , style ) } function _setEvent ( el , type , handler ) { el . addEvent ( type , _ . bind ( handler , this ) ) } function _bindEvents ( el , events ) { if ( ! events ) { return } var keys = Object . keys ( events ) ; var i = keys . length ; while ( i - - ) { var key = keys [ i ] ; var handler = events [ key ] ; if ( typeof handler = = = " string " ) { handler = this [ handler ] ; if ( ! handler ) { _ . error ( ' The method " ' + handler + ' " is not defined . ' ) } } this . _setEvent ( el , key , handler ) } } function _bindDir ( el , name , data ) { if ( ! data ) { return } var keys = Object . keys ( data ) ; var i = keys . length ; while ( i - - ) { var key = keys [ i ] ; var _value3 = data [ key ] ; if ( typeof _value3 = = = " function " ) { this . _bindKey ( el , name , key , _value3 ) } else { el [ SETTERS [ name ] ] ( key , _value3 ) } } } function _bindKey ( el , name , key , calc ) { var _this2 = this ; var methodName = SETTERS [ name ] ; var value = this . _watch ( calc , function ( value ) { function handler ( ) { el [ methodName ] ( key , value ) } var differ = _this2 & & _this2 . _app & & _this2 . _app . differ ; if ( differ ) { differ . append ( " element " , el . depth , el . ref , handler ) } else { handler ( ) } } ) ; el [ methodName ] ( key , value ) } function _watch ( calc , callback ) { var watcher = new _watcher2 . default ( this , calc , function ( value , oldValue ) { if ( ( typeof value = = = " undefined " ? " undefined " : _typeof ( value ) ) ! = = " object " & & value = = = oldValue ) { return } callback ( value ) } ) ; return watcher . value } } , function ( module , exports ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; var _typeof = typeof Symbol = = = " function " & & typeof Symbol . iterator = = = " symbol " ? function ( obj ) { return typeof obj } : function ( obj ) { return obj & & typeof Symbol = = = " function " & & obj . constructor = = = Symbol ? " symbol " : typeof obj } ; exports . _createBody = _createBody ; exports . _createElement = _createElement ; exports . _createBlock = _createBlock ; exports . _createBlockStart = _createBlockStart ; exports . _createBlockEnd = _createBlockEnd ; exports . _attachTarget = _attachTarget ; exports . _moveTarget = _moveTarget ; exports . _moveElement = _moveElement ; exports . _moveBlock = _moveBlock ; exports . _removeTarget = _removeTarget ; exports . _removeElement = _removeElement ; exports . _removeBlock = _removeBlock ; function _createBody ( type ) { var doc = this . _app . doc ; return doc . createBody ( type ) } function _createElement ( type ) { var doc = this . _app . doc ; return doc . createElement ( type ) } function _createBlock ( element ) { var start = this . _createBlockStart ( ) ; var end = this . _createBlockEnd ( ) ; var blockId = lastestBlockId + + ; if ( element . element ) { element . element . insertBefore ( start , element . end ) ; element . element . insertBefore ( end , element . end ) ; element = element . element } else { element . appendChild ( start ) ; element . appendChild ( end ) } return { start : start , end : end , element : element , blockId : blockId } } var lastestBlockId = 1 ; function _createBlockStart ( ) { var doc = this . _app . doc ; var anchor = doc . createComment ( " start " ) ; return anchor } function _createBlockEnd ( ) { var doc = this . _app . doc ; var anchor = doc . createComment ( " end " ) ; return anchor } function _attachTarget ( target , dest ) { if ( dest . element ) { var before = dest . end ; var after = dest . updateMark ; if ( dest . children ) { dest . children . push ( target ) } if ( after ) { var signal = this . _moveTarget ( target , after ) ; dest . updateMark = target . element ? target . end : target ; return signal } else if ( target . element ) { dest . element . insertBefore ( target . start , before ) ; dest . element . insertBefore ( target . end , before ) } else { return dest . element . insertBefore ( target , before ) } } else { if ( target . element ) { dest . appendChild ( target . start ) ; dest . appendChild ( target . end ) } else { return dest . appendChild ( target ) } } } function _moveTarget ( target , after ) { if ( target . element ) { return this . _moveBlock ( target , after ) } return this . _moveElement ( target , after ) } function _moveElement ( element , after ) { var parent = after . parentNode ; if ( parent ) { return parent . insertAfter ( element , after ) } } function _moveBlock ( fragBlock , after ) { var parent = after . parentNode ; if ( parent ) { var _ret = function ( ) { var el = fragBlock . start ; var signal = void 0 ; var group = [ el ] ; while ( el & & el ! = = fragBlock . end ) { el = el . nextSibling ; group . push ( el ) } var temp = after ; group . every ( function ( el ) { signal = parent . insertAfter ( el , temp ) ; temp = el ; return signal ! = = - 1 } ) ; return { v : signal } } ( ) ; if ( ( typeof _ret = = = " undefined " ? " undefined " : _typeof ( _ret ) ) = = = " object " ) return _ret . v } } function _removeTarget ( target ) { if ( target . element ) { this . _removeBlock ( target ) } else { this . _removeElement ( target ) } } function _removeElement ( target ) { var parent = target . parentNode ; if ( parent ) { parent . removeChild ( target ) } } function _removeBlock ( fragBlock ) { var _this = this ; var preserveBlock = arguments . length < = 1 | | arguments [ 1 ] = = = undefined ? false : arguments [ 1 ] ; var result = [ ] ; var el = fragBlock . start . nextSibling ; while ( el & & el ! = = fragBlock . end ) { result . push ( el ) ; el = el . nextSibling } if ( ! preserveBlock ) { this . _removeElement ( fragBlock . start ) } result . forEach ( function ( el ) { _this . _removeElement ( el ) } ) ; if ( ! preserveBlock ) { this . _removeElement ( fragBlock . end ) } } } , function ( module , exports ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . $ emit = $ emit ; exports . $ dispatch = $ dispatch ; exports . $ broadcast = $ broadcast ; exports . $ on = $ on ; exports . $ off = $ off ; exports . _initEvents = _initEvents ; function Evt ( type , detail ) { if ( detail instanceof Evt ) { return detail } this . timestamp = Date . now ( ) ; this . detail = detail ; this . type = type ; var shouldStop = false ; this . stop = function ( ) { shouldStop = true } ; this . hasStopped = function ( ) { return shouldStop } } function $ emit ( type , detail ) { var _this = this ; var events = this . _vmEvents ; var handlerList = events [ type ] ; if ( handlerList ) { ( function ( ) { var evt = new Evt ( type , detail ) ; handlerList . forEach ( function ( handler ) { handler . call ( _this , evt ) } ) } ) ( ) } } function $ dispatch ( type , detail ) { var evt = new Evt ( type , detail ) ; this . $ emit ( type , evt ) ; if ( ! evt . hasStopped ( ) & & this . _parent & & this . _parent . $ dispatch ) { this . _parent . $ dispatch ( type , evt ) } } function $ broadcast ( type , detail ) { var evt = new Evt ( type , detail ) ; this . $ emit ( type , evt ) ; if ( ! evt . hasStopped ( ) & & this . _childrenVms ) { this . _childrenVms . forEach ( function ( subVm ) { subVm . $ broadcast ( type , evt ) } ) } } function $ on ( type , handler ) { if ( ! type | | typeof handler ! = = " function " ) { return } var events = this . _vmEvents ; var handlerList = events [ type ] | | [ ] ; handlerList . push ( handler ) ; events [ type ] = handlerList ; if ( type = = = " hook : ready " & & this . _ready ) { this . $ emit ( " hook : ready " ) } } function $ off ( type , handler ) { if ( ! type ) { return } var events = this . _vmEvents ; if ( ! handler ) { delete events [ type ] ; return } var handlerList = events [ type ] ; if ( ! handlerList ) { return } handlerList . $ remove ( handler ) } var LIFE_CYCLE_TYPES = [ " init " , " created " , " ready " ] ; function _initEvents ( externalEvents ) { var _this2 = this ; var options = this . _options | | { } ; var events = options . events | | { } ; for ( var type1 in events ) { this . $ on ( type1 , events [ type1 ] ) } for ( var type2 in externalEvents ) { this . $ on ( type2 , externalEvents [ type2 ] ) } LIFE_CYCLE_TYPES . forEach ( function ( type ) { _this2 . $ on ( " hook : " + type , options [ type ] ) } ) } } , function ( module , exports ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . clearModules = clearModules ; exports . getModule = getModule ; exports . requireModule = requireModule ; exports . registerModules = registerModules ; exports . registerMethods = registerMethods ; exports . requireComponent = requireComponent ; exports . registerComponent = registerComponent ; var nativeModules = { } ; function assignModules ( modules , ifReplace ) { var _loop = function _loop ( moduleName ) { var methods = nativeModules [ moduleName ] ; if ( ! methods ) { methods = { } ; nativeModules [ moduleName ] = methods } modules [ moduleName ] . forEach ( function ( method ) { if ( typeof method = = = " string " ) { method = { name : method } } if ( ! methods [ method . name ] | | ifReplace ) { methods [ method . name ] = method } } ) } ; for ( var moduleName in modules ) { _loop ( moduleName ) } } function assignApis ( Ctor , apis ) { var p = Ctor . prototype ; for ( var apiName in apis ) { if ( ! p . hasOwnProperty ( apiName ) ) { p [ apiName ] = apis [ apiName ] } } } function clearModules ( ) { nativeModules = { } } function getModule ( moduleName ) { return nativeModules [ moduleName ] } function requireModule ( moduleName ) { var _this = this ; var methods = nativeModules [ moduleName ] ; var target = { } ; var _loop2 = function _loop2 ( methodName ) { target [ methodName ] = function ( ) { for ( var _len = arguments . length , args = Array ( _len ) , _key = 0 ; _key < _len ; _key + + ) { args [ _key ] = arguments [ _key ] } return _this . callTasks ( { module : moduleName , method : methodName , args : args } ) } } ; for ( var methodName in methods ) { _loop2 ( methodName ) } return target } function registerModules ( modules , ifReplace ) { assignModules ( modules , ifReplace ) } function registerMethods ( apis ) { assignApis ( this , apis ) } function requireComponent ( name ) { var customComponentMap = this . customComponentMap ; return customComponentMap [ name ] } function registerComponent ( name , exports ) { var customComponentMap = this . customComponentMap ; if ( customComponentMap [ name ] ) { throw new Error ( " define a component ( " + name + " ) that already exists " ) } customComponentMap [ name ] = exports } } , function ( module , exports , __webpack_require__ ) { ( function ( global ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . normalizeVersion = normalizeVersion ; exports . getError = getError ; exports . check = check ; var _semver = __webpack_require__ ( 84 ) ; var _semver2 = _interopRequireDefault ( _semver ) ; var _util = __webpack_require__ ( 82 ) ; function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { " default " : obj } } function normalizeVersion ( v ) { var isValid = _semver2 . default . valid ( v ) ; if ( isValid ) { return v } v = typeof v = = = " string " ? v : " " ; var split = v . split ( " . " ) ; var i = 0 ; var result = [ ] ; while ( i < 3 ) { var s = typeof split [ i ] = = = " string " & & split [ i ] ? split [ i ] : " 0 " ; result . push ( s ) ; i + + } return result . join ( " . " ) } function getError ( key , val , criteria ) { var result = { isDowngrade : true , errorType : 1 , code : 1e3 } ; var getMsg = function getMsg ( key , val , criteria ) { return " Downgrade [ " + key + " ] : : deviceInfo " + val + " matched criteria " + criteria } ; var _key = key . toLowerCase ( ) ; result . errorMessage = getMsg ( key , val , criteria ) ; if ( _key . indexOf ( " osversion " ) > = 0 ) { result . code = 1001 } else if ( _key . indexOf ( " appversion " ) > = 0 ) { result . code = 1002 } else if ( _key . indexOf ( " weexversion " ) > = 0 ) { result . code = 1003 } else if ( _key . indexOf ( " devicemodel " ) > = 0 ) { result . code = 1004 } return result } function check ( config , deviceInfo ) { deviceInfo = deviceInfo | | global . WXEnvironment ; deviceInfo = ( 0 , _util . isPlainObject ) ( deviceInfo ) ? deviceInfo : { } ; var result = { isDowngrade : false } ; if ( ( 0 , _util . typof ) ( config ) = = = " function " ) { var customDowngrade = config . call ( this , deviceInfo , { semver : _semver2 . default , normalizeVersion : this . normalizeVersion } ) ; customDowngrade = ! ! customDowngrade ; result = customDowngrade ? this . getError ( " custom " , " " , " custom params " ) : result } else { config = ( 0 , _util . isPlainObject ) ( config ) ? config : { } ; var platform = deviceInfo . platform | | " unknow " ; var dPlatform = platform . toLowerCase ( ) ; var cObj = config [ dPlatform ] | | { } ; for ( var i in deviceInfo ) { var key = i ; var keyLower = key . toLowerCase ( ) ; var val = deviceInfo [ i ] ; var isVersion = keyLower . indexOf ( " version " ) > = 0 ; var isDeviceModel = keyLower . indexOf ( " devicemodel " ) > = 0 ; var criteria = cObj [ i ] ; if ( criteria & & isVersion ) { var c = this . normalizeVersion ( criteria ) ; var d = this . normalizeVersion ( deviceInfo [ i ] ) ; if ( _semver2 . default . satisfies ( d , c ) ) { result = this . getError ( key , val , criteria ) ; break } } else if ( isDeviceModel ) { var _criteria = ( 0 , _util . typof ) ( criteria ) = = = " array " ? criteria : [ criteria ] ; if ( _criteria . indexOf ( val ) > = 0 ) { result = this . getError ( key , val , criteria ) ; break } } } } return result } } ) . call ( exports , function ( ) { return this } ( ) ) } , function ( module , exports , __webpack_require__ ) { ( function ( global ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . updateActions = updateActions ; exports . init = init ; exports . destroy = destroy ; exports . getRootElement = getRootElement ; exports . fireEvent = fireEvent ; exports . callback = callback ; exports . refreshData = refreshData ; var _util = __webpack_require__ ( 82 ) ; var _ = _interopRequireWildcard ( _util ) ; function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] } } newObj . default = obj ; return newObj } } function _toConsumableArray ( arr ) { if ( Array . isArray ( arr ) ) { for ( var i = 0 , arr2 = Array ( arr . length ) ; i < arr . length ; i + + ) { arr2 [ i ] = arr [ i ] } return arr2 } else { return Array . from ( arr ) } } function updateActions ( ) { this . differ . flush ( ) ; var tasks = [ ] ; if ( this . doc & & this . doc . listener & & this . doc . listener . updates . length ) { tasks . push . apply ( tasks , _toConsumableArray ( this . doc . listener . updates ) ) ; this . doc . listener . updates = [ ] } if ( tasks . length ) { return this . callTasks ( tasks ) } } function init ( code , data ) { var _this = this ; _ . debug ( " Intialize an instance with : \ n " , code , data ) ; var result = void 0 ; var define = _ . bind ( this . define , this ) ; var bootstrap = function bootstrap ( name , config , _data ) { result = _this . bootstrap ( name , config , _data | | data ) ; _this . updateActions ( ) ; _this . doc . listener . createFinish ( ) ; _ . debug ( " After intialized an instance ( " + _this . id + " ) " ) } ; var register = _ . bind ( this . register , this ) ; var render = function render ( name , _data ) { result = _this . bootstrap ( name , { } , _data ) } ; var require = function require ( name ) { return function ( _data ) { result = _this . bootstrap ( name , { } , _data ) } } ; var document = this . doc ; var functionBody = void 0 ; if ( typeof code = = = " function " ) { functionBody = code . toString ( ) . substr ( 12 ) } else if ( code ) { functionBody = code . toString ( ) } var _global = global ; var WXEnvironment = _global . WXEnvironment ; if ( WXEnvironment & & WXEnvironment . platform ! = = " Web " ) { ( function ( ) { var timer = _this . requireModule ( " timer " ) ; var timerAPIs = { setTimeout : function setTimeout ( ) { for ( var _len = arguments . length , args = Array ( _len ) , _key = 0 ; _key < _len ; _key + + ) { args [ _key ] = arguments [ _key ] } var handler = function handler ( ) { args [ 0 ] . apply ( args , _toConsumableArray ( args . slice ( 2 ) ) ) } ; timer . setTimeout ( handler , args [ 1 ] ) ; return _this . uid . toString ( ) } , setInterval : function setInterval ( ) { for ( var _len2 = arguments . length , args = Array ( _len2 ) , _key2 = 0 ; _key2 < _len2 ; _key2 + + ) { args [ _key2 ] = arguments [ _key2 ] } var handler = function handler ( ) { args [ 0 ] . apply ( args , _toConsumableArray ( args . slice ( 2 ) ) ) } ; timer . setInterval ( handler , args [ 1 ] ) ; return _this . uid . toString ( ) } , clearTimeout : function clearTimeout ( n ) { timer . clearTimeout ( n ) } , clearInterval : function clearInterval ( n ) { timer . clearInterval ( n ) } } ; var fn = new Function ( " define " , " require " , " document " , " bootstrap " , " register " , " render " , " __weex_define__ " , " __weex_bootstrap__ " , " setTimeout " , " setInterval " , " clearTimeout " , " clearInterval " , functionBody ) ; fn ( define , require , document , bootstrap , register , render , define , bootstrap , timerAPIs . setTimeout , timerAPIs . setInterval , timerAPIs . clearTimeout , timerAPIs . clearInterval ) } ) ( ) } else { var _fn = new Function ( " define " , " require " , " document " , " bootstrap " , " register " , " render " , " __weex_define__ " , " __weex_bootstrap__ " , functionBody ) ; _fn ( define , require , document , bootstrap , register , render , define , bootstrap ) } return result } function destroy ( ) { _ . debug ( " Destory an instance ( " + this . id + " ) " ) ; this . id = " " ; this . options = null ; this . blocks = null ; this . vm = null ; this . doc = null ; this . customComponentMap = null ; this . callbacks = null } function getRootElement ( ) { var doc = this . doc | | { } ; var body = doc . body | | { } ; return body . toJSON ? body . toJSON ( ) : { } } function fireEvent ( ref , type , e , domChanges ) { var _this2 = this ; _ . debug ( ' Fire a " ' + type + ' " event on an element ( ' + ref + " ) in instance ( " + this . id + " ) " ) ; if ( Array . isArray ( ref ) ) { ref . some ( function ( ref ) { return _this2 . fireEvent ( ref , type , e ) ! = = false } ) ; return } var el = this . doc . getRef ( ref ) ; if ( el ) { this . doc . close ( ) ; var result = this . doc . fireEvent ( el , type , e , domChanges ) ; this . updateActions ( ) ; this . doc . listener . updateFinish ( ) ; this . doc . open ( ) ; return result } return new Error ( ' invalid element reference " ' + ref + ' " ' ) } function callback ( callbackId , data , ifKeepAlive ) { _ . debug ( " Invoke a callback ( " + callbackId + " ) with " , data , " in instance ( " + this . id + " ) " ) ; var callback = this . callbacks [ callbackId ] ; if ( typeof callback = = = " function " ) { this . doc . close ( ) ; callback ( data ) ; if ( typeof ifKeepAlive = = = " undefined " | | ifKeepAlive = = = false ) { this . callbacks [ callbackId ] = undefined } this . updateActions ( ) ; this . doc . listener . updateFinish ( ) ; this . doc . open ( ) ; return } return new Error ( ' invalid callback id " ' + callbackId + ' " ' ) } function refreshData ( data ) { _ . debug ( " Refresh with " , data , " in instance [ " + this . id + " ] " ) ; var vm = this . vm ; if ( vm & & data ) { this . doc . close ( ) ; if ( typeof vm . refreshData = = = " function " ) { vm . refreshData ( data ) } else { _ . extend ( vm , data ) } this . updateActions ( ) ; this . doc . listener . refreshFinish ( ) ; this . doc . open ( ) ; return } return new Error ( ' invalid data " ' + data + ' " ' ) } } ) . call ( exports , function ( ) { return this } ( ) ) } , function ( module , exports ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; var _createClass = function ( ) { function defineProperties ( target , props ) { for ( var i = 0 ; i < props . length ; i + + ) { var descriptor = props [ i ] ; descriptor . enumerable = descriptor . enumerable | | false ; descriptor . configurable = true ; if ( " value " in descriptor ) descriptor . writable = true ; Object . defineProperty ( target , descriptor . key , descriptor ) } } return function ( Constructor , protoProps , staticProps ) { if ( protoProps ) defineProperties ( Constructor . prototype , protoProps ) ; if ( staticProps ) defineProperties ( Constructor , staticProps ) ; return Constructor } } ( ) ; function _classCallCheck ( instance , Constructor ) { if ( ! ( instance instanceof Constructor ) ) { throw new TypeError ( " Cannot call a class as a function " ) } } var Differ = function ( ) { function Differ ( id ) { _classCallCheck ( this , Differ ) ; this . id = id ; this . map = [ ] ; this . hooks = [ ] } _createClass ( Differ , [ { key : " isEmpty " , value : function isEmpty ( ) { return this . map . length = = = 0 } } , { key : " append " , value : function append ( type , depth , ref , handler ) { var _this = this ; if ( ! this . hasTimer ) { this . hasTimer = true ; setTimeout ( function ( ) { _this . hasTimer = false ; _this . flush ( true ) } , 0 ) } var map = this . map ; if ( ! map [ depth ] ) { map [ depth ] = { } } var group = map [ depth ] ; if ( ! group [ type ] ) { group [ type ] = { } } if ( type = = = " element " ) { if ( ! group [ type ] [ ref ] ) { group [ type ] [ ref ] = [ ] } group [ type ] [ ref ] . push ( handler ) } else { group [ type ] [ ref ] = handler } } } , { key : " flush " , value : function flush ( isTimeout ) { var map = this . map . slice ( ) ; this . map . length = 0 ; map . forEach ( function ( group ) { callTypeMap ( group , " repeat " ) ; callTypeMap ( group , " shown " ) ; callTypeList ( group , " element " ) } ) ; var hooks = this . hooks . slice ( ) ; this . hooks . length = 0 ; hooks . forEach ( function ( fn ) { fn ( ) } ) ; if ( ! this . isEmpty ( ) ) { this . flush ( ) } } } , { key : " then " , value : function then ( fn ) { this . hooks . push ( fn ) } } ] ) ; return Differ } ( ) ; exports . default = Differ ; function callTypeMap ( group , type ) { var map = group [ type ] ; for ( var ref in map ) { map [ ref ] ( ) } } function callTypeList ( group , type ) { var map = group [ type ] ; for ( var ref in map ) { var list = map [ ref ] ; list . forEach ( function ( handler ) { handler ( ) } ) } } } , function ( module , exports , __webpack_require__ ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . instanceMap = undefined ; exports . Document = Document ; exports . Node = Node ; exports . Element = Element ; exports . Comment = Comment ; var _listener4 = __webpack_require__ ( 101 ) ; var _listener5 = _interopRequireDefault ( _listener4 ) ; function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { " default " : obj } } var DEFAULT_TAG_NAME = " div " ; var instanceMap = exports . instanceMap = { } ; var nextNodeRef = 1 ; function Document ( id , url , handler ) { id = id ? id . toString ( ) : " " ; this . id = id ; this . URL = url ; instanceMap [ id ] = this ; this . nodeMap = { } ; this . listener = new _listener5 . default ( id , handler | | genCallTasks ( id ) ) ; this . createDocumentElement ( ) } function genCallTasks ( id ) { return function ( tasks ) { if ( ! Array . isArray ( tasks ) ) { tasks = [ tasks ] } return callNative ( id , tasks , " - 1 " ) } } Document . prototype . destroy = function ( ) { delete this . listener ; delete this . nodeMap ; delete instanceMap [ this . id ] } ; Document . prototype . open = function ( ) { this . listener . batched = false } ; Document . prototype . close = function ( ) { this . listener . batched = true } ; Document . prototype . createDocumentElement = function ( ) { var _this = this ; if ( ! this . documentElement ) { var el = new Element ( " document " ) ; el . docId = this . id ; el . ownerDocument = this ; el . role = " documentElement " ; el . depth = 0 ; el . ref = " _documentElement " ; this . nodeMap . _documentElement = el ; this . documentElement = el ; el . appendChild = function ( node ) { appendBody ( _this , node ) } ; el . insertBefore = function ( node , before ) { appendBody ( _this , node , before ) } } return this . documentElement } ; function appendBody ( doc , node , before ) { var documentElement = doc . documentElement ; if ( documentElement . pureChildren . length > 0 | | node . parentNode ) { return } var children = documentElement . children ; var beforeIndex = children . indexOf ( before ) ; if ( beforeIndex < 0 ) { children . push ( node ) } else { children . splice ( beforeIndex , 0 , node ) } if ( node . nodeType = = = 1 ) { if ( node . role = = = " body " ) { node . docId = doc . id ; node . ownerDocument = doc ; node . parentNode = documentElement } else { node . children . forEach ( function ( child ) { child . parentNode = node } ) ; setBody ( doc , node ) ; node . docId = doc . id ; <nl> - node . ownerDocument = doc ; linkParent ( node , documentElement ) ; delete doc . nodeMap [ node . nodeId ] } documentElement . pureChildren . push ( node ) ; doc . listener . createBody ( node ) } else { node . parentNode = documentElement ; doc . nodeMap [ node . ref ] = node } } function setBody ( doc , el ) { el . role = " body " ; el . depth = 1 ; delete doc . nodeMap [ el . nodeId ] ; el . ref = " _root " ; doc . nodeMap . _root = el ; doc . body = el } Document . prototype . createBody = function ( type , props ) { if ( ! this . body ) { var el = new Element ( type , props ) ; setBody ( this , el ) } return this . body } ; Document . prototype . createElement = function ( tagName , props ) { return new Element ( tagName , props ) } ; Document . prototype . createComment = function ( text ) { return new Comment ( text ) } ; Document . prototype . fireEvent = function ( el , type , e , domChanges ) { if ( ! el ) { return } e = e | | { } ; e . type = type ; e . target = el ; e . timestamp = Date . now ( ) ; if ( domChanges ) { updateElement ( el , domChanges ) } return el . fireEvent ( type , e ) } ; Document . prototype . getRef = function ( ref ) { return this . nodeMap [ ref ] } ; function updateElement ( el , changes ) { var attrs = changes . attrs | | { } ; for ( var name in attrs ) { el . setAttr ( name , attrs [ name ] , true ) } var style = changes . style | | { } ; for ( var _name in style ) { el . setStyle ( _name , style [ _name ] , true ) } } function Node ( ) { this . nodeId = ( nextNodeRef + + ) . toString ( ) ; this . ref = this . nodeId ; this . children = [ ] ; this . pureChildren = [ ] ; this . parentNode = null ; this . nextSibling = null ; this . previousSibling = null } Node . prototype . destroy = function ( ) { var doc = instanceMap [ this . docId ] ; if ( doc ) { delete this . docId ; delete doc . nodeMap [ this . nodeId ] } this . children . forEach ( function ( child ) { child . destroy ( ) } ) } ; function Element ( ) { var type = arguments . length < = 0 | | arguments [ 0 ] = = = undefined ? DEFAULT_TAG_NAME : arguments [ 0 ] ; var props = arguments [ 1 ] ; props = props | | { } ; this . nodeType = 1 ; this . nodeId = ( nextNodeRef + + ) . toString ( ) ; this . ref = this . nodeId ; this . type = type ; this . attr = props . attr | | { } ; this . classStyle = props . classStyle | | { } ; this . style = props . style | | { } ; this . event = { } ; this . children = [ ] ; this . pureChildren = [ ] } Element . prototype = new Node ; Element . prototype . appendChild = function ( node ) { if ( node . parentNode & & node . parentNode ! = = this ) { return } if ( ! node . parentNode ) { linkParent ( node , this ) ; insertIndex ( node , this . children , this . children . length , true ) ; if ( this . docId ) { registerNode ( this . docId , node ) } if ( node . nodeType = = = 1 ) { insertIndex ( node , this . pureChildren , this . pureChildren . length ) ; if ( this . docId ) { var listener = instanceMap [ this . docId ] . listener ; return listener . addElement ( node , this . ref , - 1 ) } } } else { moveIndex ( node , this . children , this . children . length , true ) ; if ( node . nodeType = = = 1 ) { var index = moveIndex ( node , this . pureChildren , this . pureChildren . length ) ; if ( this . docId & & index > = 0 ) { var _listener = instanceMap [ this . docId ] . listener ; return _listener . moveElement ( node . ref , this . ref , index ) } } } } ; Element . prototype . insertBefore = function ( node , before ) { if ( node . parentNode & & node . parentNode ! = = this ) { return } if ( node = = = before | | node . nextSibling = = = before ) { return } if ( ! node . parentNode ) { linkParent ( node , this ) ; insertIndex ( node , this . children , this . children . indexOf ( before ) , true ) ; if ( this . docId ) { registerNode ( this . docId , node ) } if ( node . nodeType = = = 1 ) { var pureBefore = nextElement ( before ) ; var index = insertIndex ( node , this . pureChildren , pureBefore ? this . pureChildren . indexOf ( pureBefore ) : this . pureChildren . length ) ; if ( this . docId ) { var listener = instanceMap [ this . docId ] . listener ; return listener . addElement ( node , this . ref , index ) } } } else { moveIndex ( node , this . children , this . children . indexOf ( before ) , true ) ; if ( node . nodeType = = = 1 ) { var _pureBefore = nextElement ( before ) ; var _index = moveIndex ( node , this . pureChildren , _pureBefore ? this . pureChildren . indexOf ( _pureBefore ) : this . pureChildren . length ) ; if ( this . docId & & _index > = 0 ) { var _listener2 = instanceMap [ this . docId ] . listener ; return _listener2 . moveElement ( node . ref , this . ref , _index ) } } } } ; Element . prototype . insertAfter = function ( node , after ) { if ( node . parentNode & & node . parentNode ! = = this ) { return } if ( node = = = after | | node . previousSibling = = = after ) { return } if ( ! node . parentNode ) { linkParent ( node , this ) ; insertIndex ( node , this . children , this . children . indexOf ( after ) + 1 , true ) ; if ( this . docId ) { registerNode ( this . docId , node ) } if ( node . nodeType = = = 1 ) { var index = insertIndex ( node , this . pureChildren , this . pureChildren . indexOf ( previousElement ( after ) ) + 1 ) ; if ( this . docId ) { var listener = instanceMap [ this . docId ] . listener ; return listener . addElement ( node , this . ref , index ) } } } else { moveIndex ( node , this . children , this . children . indexOf ( after ) + 1 , true ) ; if ( node . nodeType = = = 1 ) { var _index2 = moveIndex ( node , this . pureChildren , this . pureChildren . indexOf ( previousElement ( after ) ) + 1 ) ; if ( this . docId & & _index2 > = 0 ) { var _listener3 = instanceMap [ this . docId ] . listener ; return _listener3 . moveElement ( node . ref , this . ref , _index2 ) } } } } ; Element . prototype . removeChild = function ( node , preserved ) { if ( node . parentNode ) { removeIndex ( node , this . children , true ) ; if ( node . nodeType = = = 1 ) { removeIndex ( node , this . pureChildren ) ; if ( this . docId ) { var listener = instanceMap [ this . docId ] . listener ; listener . removeElement ( node . ref ) } } } if ( ! preserved ) { node . destroy ( ) } } ; Element . prototype . clear = function ( ) { var _this2 = this ; if ( this . docId ) { ( function ( ) { var listener = instanceMap [ _this2 . docId ] . listener ; _this2 . pureChildren . forEach ( function ( node ) { listener . removeElement ( node . ref ) } ) } ) ( ) } this . children . forEach ( function ( node ) { node . destroy ( ) } ) ; this . children . length = 0 ; this . pureChildren . length = 0 } ; function nextElement ( node ) { while ( node ) { if ( node . nodeType = = = 1 ) { return node } node = node . nextSibling } } function previousElement ( node ) { while ( node ) { if ( node . nodeType = = = 1 ) { return node } node = node . previousSibling } } function linkParent ( node , parent ) { node . parentNode = parent ; if ( parent . docId ) { node . docId = parent . docId ; node . ownerDocument = parent . ownerDocument ; node . ownerDocument . nodeMap [ node . nodeId ] = node ; node . depth = parent . depth + 1 } node . children . forEach ( function ( child ) { linkParent ( child , node ) } ) } function registerNode ( docId , node ) { var doc = instanceMap [ docId ] ; doc . nodeMap [ node . nodeId ] = node } function insertIndex ( target , list , newIndex , changeSibling ) { if ( newIndex < 0 ) { newIndex = 0 } var before = list [ newIndex - 1 ] ; var after = list [ newIndex ] ; list . splice ( newIndex , 0 , target ) ; if ( changeSibling ) { before & & ( before . nextSibling = target ) ; target . previousSibling = before ; target . nextSibling = after ; after & & ( after . previousSibling = target ) } return newIndex } function moveIndex ( target , list , newIndex , changeSibling ) { var index = list . indexOf ( target ) ; if ( index < 0 ) { return - 1 } if ( changeSibling ) { var before = list [ index - 1 ] ; var after = list [ index + 1 ] ; before & & ( before . nextSibling = after ) ; after & & ( after . previousSibling = before ) } list . splice ( index , 1 ) ; var newIndexAfter = newIndex ; if ( index < = newIndex ) { newIndexAfter = newIndex - 1 } var beforeNew = list [ newIndexAfter - 1 ] ; var afterNew = list [ newIndexAfter ] ; list . splice ( newIndexAfter , 0 , target ) ; if ( changeSibling ) { beforeNew & & ( beforeNew . nextSibling = target ) ; target . previousSibling = beforeNew ; target . nextSibling = afterNew ; afterNew & & ( afterNew . previousSibling = target ) } if ( index = = = newIndexAfter ) { return - 1 } return newIndex } function removeIndex ( target , list , changeSibling ) { var index = list . indexOf ( target ) ; if ( index < 0 ) { return } if ( changeSibling ) { var before = list [ index - 1 ] ; var after = list [ index + 1 ] ; before & & ( before . nextSibling = after ) ; after & & ( after . previousSibling = before ) } list . splice ( index , 1 ) } Element . prototype . setAttr = function ( key , value , silent ) { if ( this . attr [ key ] = = = value ) { return } this . attr [ key ] = value ; if ( ! silent & & this . docId ) { var listener = instanceMap [ this . docId ] . listener ; listener . setAttr ( this . ref , key , value ) } } ; Element . prototype . setStyle = function ( key , value , silent ) { if ( this . style [ key ] = = = value ) { return } this . style [ key ] = value ; if ( ! silent & & this . docId ) { var listener = instanceMap [ this . docId ] . listener ; listener . setStyle ( this . ref , key , value ) } } ; Element . prototype . setClassStyle = function ( classStyle ) { this . classStyle = classStyle ; if ( this . docId ) { var listener = instanceMap [ this . docId ] . listener ; listener . setStyles ( this . ref , this . toStyle ( ) ) } } ; Element . prototype . addEvent = function ( type , handler ) { if ( ! this . event [ type ] ) { this . event [ type ] = handler ; if ( this . docId ) { var listener = instanceMap [ this . docId ] . listener ; listener . addEvent ( this . ref , type ) } } } ; Element . prototype . removeEvent = function ( type ) { if ( this . event [ type ] ) { delete this . event [ type ] ; if ( this . docId ) { var listener = instanceMap [ this . docId ] . listener ; listener . removeEvent ( this . ref , type ) } } } ; Element . prototype . fireEvent = function ( type , e ) { var handler = this . event [ type ] ; if ( handler ) { return handler . call ( this , e ) } } ; Element . prototype . toStyle = function ( ) { return Object . assign ( { } , this . classStyle , this . style ) } ; Element . prototype . toJSON = function ( ) { var result = { ref : this . ref . toString ( ) , type : this . type , attr : this . attr , style : this . toStyle ( ) } ; var event = Object . keys ( this . event ) ; if ( event . length ) { result . event = event } if ( this . pureChildren . length ) { result . children = this . pureChildren . map ( function ( child ) { return child . toJSON ( ) } ) } return result } ; Element . prototype . toString = function ( ) { return " < " + this . type + " attr = " + JSON . stringify ( this . attr ) + " style = " + JSON . stringify ( this . toStyle ( ) ) + " > " + this . pureChildren . map ( function ( child ) { return child . toString ( ) } ) . join ( " " ) + " < / " + this . type + " > " } ; function Comment ( value ) { this . nodeType = 8 ; this . nodeId = ( nextNodeRef + + ) . toString ( ) ; this . ref = this . nodeId ; this . type = " comment " ; this . value = value ; this . children = [ ] ; this . pureChildren = [ ] } Comment . prototype = new Node ; Comment . prototype . toString = function ( ) { return " < ! - - " + this . value + " - - > " } } , function ( module , exports ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . default = Listener ; exports . createAction = createAction ; function Listener ( id , handler ) { this . id = id ; this . batched = false ; this . updates = [ ] ; if ( typeof handler = = = " function " ) { this . handler = handler } } Listener . prototype . createFinish = function ( callback ) { var handler = this . handler ; return handler ( [ createAction ( " createFinish " , [ ] ) ] , callback ) } ; Listener . prototype . updateFinish = function ( callback ) { var handler = this . handler ; return handler ( [ createAction ( " updateFinish " , [ ] ) ] , callback ) } ; Listener . prototype . refreshFinish = function ( callback ) { var handler = this . handler ; return handler ( [ createAction ( " refreshFinish " , [ ] ) ] , callback ) } ; Listener . prototype . createBody = function ( element ) { var body = element . toJSON ( ) ; var children = body . children ; delete body . children ; var actions = [ createAction ( " createBody " , [ body ] ) ] ; if ( children ) { actions . push . apply ( actions , children . map ( function ( child ) { return createAction ( " addElement " , [ body . ref , child , - 1 ] ) } ) ) } return this . addActions ( actions ) } ; Listener . prototype . addElement = function ( element , ref , index ) { if ( ! ( index > = 0 ) ) { index = - 1 } return this . addActions ( createAction ( " addElement " , [ ref , element . toJSON ( ) , index ] ) ) } ; Listener . prototype . removeElement = function ( ref ) { if ( Array . isArray ( ref ) ) { var actions = ref . map ( function ( r ) { return createAction ( " removeElement " , [ r ] ) } ) ; return this . addActions ( actions ) } return this . addActions ( createAction ( " removeElement " , [ ref ] ) ) } ; Listener . prototype . moveElement = function ( targetRef , parentRef , index ) { return this . addActions ( createAction ( " moveElement " , [ targetRef , parentRef , index ] ) ) } ; Listener . prototype . setAttr = function ( ref , key , value ) { var result = { } ; result [ key ] = value ; return this . addActions ( createAction ( " updateAttrs " , [ ref , result ] ) ) } ; Listener . prototype . setStyle = function ( ref , key , value ) { var result = { } ; result [ key ] = value ; return this . addActions ( createAction ( " updateStyle " , [ ref , result ] ) ) } ; Listener . prototype . setStyles = function ( ref , style ) { return this . addActions ( createAction ( " updateStyle " , [ ref , style ] ) ) } ; Listener . prototype . addEvent = function ( ref , type ) { return this . addActions ( createAction ( " addEvent " , [ ref , type ] ) ) } ; Listener . prototype . removeEvent = function ( ref , type ) { return this . addActions ( createAction ( " removeEvent " , [ ref , type ] ) ) } ; Listener . prototype . handler = function ( actions , cb ) { return cb & & cb ( ) } ; Listener . prototype . addActions = function ( actions ) { var updates = this . updates ; var handler = this . handler ; if ( ! Array . isArray ( actions ) ) { actions = [ actions ] } if ( this . batched ) { updates . push . apply ( updates , actions ) } else { return handler ( actions ) } } ; function createAction ( name , args ) { return { module : " dom " , method : name , args : args } } } , function ( module , exports ) { module . exports = { name : " weex " , version : " 0 . 4 . 0 " , description : " A framework for building Mobile cross - platform UI " , license : " Apache - 2 . 0 " , repository : { type : " git " , url : " git @ github . com : alibaba / weex . git " } , homepage : " http : / / alibaba . github . io / weex / " , bugs : { url : " https : / / github . com / alibaba / weex / issues " } , " private " : " true " , keywords : [ " weex " , " hybrid " , " webcomponent " , " appframework " , " mvvm " , " javascript " , " webkit " , " v8 " , " jscore " , " html5 " , " android " , " ios " , " yunos " ] , engines : { node : " > = 4 " } , scripts : { postinstall : " bash . / bin / install - hooks . sh " , " build : browser " : " webpack - - config build / webpack . browser . config . js " , " build : native " : " webpack - - config build / webpack . native . config . js " , " build : examples " : " webpack - - config build / webpack . examples . config . js " , " build : test " : " webpack - - config build / webpack . test . config . js " , " dist : browser " : " npm run build : browser & & bash . / bin / dist - browser . sh " , dist : " npm run dist : browser " , " dev : browser " : " webpack - - watch - - config build / webpack . browser . config . js " , " dev : native " : " webpack - - watch - - config build / webpack . native . config . js " , " dev : examples " : " webpack - - watch - - config build / webpack . examples . config . js " , " dev : test " : " webpack - - watch - - config build / webpack . test . config . js " , build : " npm run build : native & & npm run build : browser & & npm run build : examples & & npm run build : test " , lint : " eslint html5 " , " test : unit " : " mocha - - compilers js : babel - core / register html5 / test / unit / * / * . js html5 / test / unit / * / * / * . js " , " test : cover " : " babel - node node_modules / isparta / bin / isparta cover - - report text node_modules / mocha / bin / _mocha - - - - reporter dot html5 / test / unit / * / * . js html5 / test / unit / * / * / * . js " , " test : e2e " : " node html5 / test / e2e / runner . js " , test : " npm run lint & & npm run test : cover & & npm run test : e2e " , serve : " serve . / - p 12580 " , " clean : examples " : ' echo " \ \ 033 [ 36 ; 1m [ Clean ] \ \ 033 [ 0m \ \ 033 [ 33mexamples \ \ 033 [ 0m " & & rm - vrf examples / build / * ' , " clean : test " : ' echo " \ \ 033 [ 36 ; 1m [ Clean ] \ \ 033 [ 0m \ \ 033 [ 33mtest \ \ 033 [ 0m " & & rm - vrf test / build / * ' , clean : " npm run clean : examples & & npm run clean : test " , " copy : js " : " cp - vf . / dist / native . js . / android / sdk / assets / main . js " , " copy : examples " : " rm - rf . / android / playground / app / src / main / assets / * & & cp - vrf . / examples / build / * . / android / playground / app / src / main / assets / " , copy : " npm run copy : js & & npm run copy : examples " } , subversion : { browser : " 0 . 2 . 23 " , framework : " 0 . 14 . 5 " , transformer : " > = 0 . 1 . 5 < 0 . 4 " } , dependencies : { animationjs : " ^ 0 . 1 . 5 " , carrousel : " ^ 0 . 1 . 11 " , " core - js " : " ^ 2 . 4 . 0 " , cubicbezier : " ^ 0 . 1 . 1 " , envd : " ^ 0 . 1 . 1 " , fixedsticky : " ^ 0 . 1 . 0 " , httpurl : " ^ 0 . 1 . 1 " , kountdown : " ^ 0 . 1 . 2 " , lazyimg : " ^ 0 . 1 . 2 " , lie : " ^ 3 . 0 . 4 " , modals : " ^ 0 . 1 . 5 " , " scroll - to " : " 0 . 0 . 2 " , semver : " ^ 5 . 1 . 0 " , transitionize : " 0 . 0 . 3 " , " weex - components " : " ^ 0 . 1 . 2 " } , devDependencies : { " babel - cli " : " ~ 6 . 4 . 5 " , " babel - loader " : " ^ 6 . 2 . 4 " , " babel - preset - es2015 " : " ^ 6 . 9 . 0 " , chai : " ^ 3 . 5 . 0 " , chromedriver : " ^ 2 . 21 . 2 " , " cross - spawn " : " ^ 4 . 0 . 0 " , " css - loader " : " ^ 0 . 23 . 1 " , eslint : " ^ 2 . 11 . 1 " , " http - server " : " ^ 0 . 9 . 0 " , isparta : " ^ 4 . 0 . 0 " , istanbul : " ^ 0 . 4 . 3 " , " json - loader " : " ^ 0 . 5 . 4 " , mocha : " ^ 2 . 5 . 3 " , nightwatch : " ^ 0 . 9 . 4 " , " phantomjs - prebuilt " : " ^ 2 . 1 . 7 " , " selenium - server " : " ^ 2 . 53 . 0 " , serve : " ^ 1 . 4 . 0 " , sinon : " ^ 1 . 17 . 4 " , " sinon - chai " : " ^ 2 . 8 . 0 " , " style - loader " : " ^ 0 . 13 . 1 " , " uglify - js " : " ^ 2 . 6 . 4 " , webpack : " ^ 1 . 13 . 1 " , " weex - loader " : " ^ 0 . 2 . 0 " } } } , function ( module , exports , __webpack_require__ ) { ( function ( global ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . $ = $ ; exports . $ el = $ el ; exports . $ vm = $ vm ; exports . $ renderThen = $ renderThen ; exports . $ scrollTo = $ scrollTo ; exports . $ transition = $ transition ; exports . $ getConfig = $ getConfig ; exports . $ sendHttp = $ sendHttp ; exports . $ openURL = $ openURL ; exports . $ setTitle = $ setTitle ; exports . $ call = $ call ; var _util = __webpack_require__ ( 82 ) ; var _ = _interopRequireWildcard ( _util ) ; function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] } } newObj . default = obj ; return newObj } } function $ ( id ) { _ . warn ( " Vm # $ is deprecated , please use Vm # $ vm instead " ) ; var info = this . _ids [ id ] ; if ( info ) { return info . vm } } function $ el ( id ) { var info = this . _ids [ id ] ; if ( info ) { return info . el } } function $ vm ( id ) { var info = this . _ids [ id ] ; if ( info ) { return info . vm } } function $ renderThen ( fn ) { var app = this . _app ; var differ = app . differ ; return differ . then ( function ( ) { fn ( ) } ) } function $ scrollTo ( id , offset ) { _ . warn ( " Vm # $ scrollTo is deprecated , " + " please use \ " require ( ' @ weex - module / dom ' ) " + ' . scrollTo ( el , options ) " instead ' ) ; var el = this . $ el ( id ) ; if ( el ) { var dom = this . _app . requireModule ( " dom " ) ; dom . scrollToElement ( el . ref , { offset : offset } ) } } function $ transition ( id , options , callback ) { var _this = this ; var el = this . $ el ( id ) ; if ( el & & options & & options . styles ) { var animation = this . _app . requireModule ( " animation " ) ; animation . transition ( el . ref , options , function ( ) { _this . _setStyle ( el , options . styles ) ; callback & & callback . apply ( undefined , arguments ) } ) } } function $ getConfig ( callback ) { var config = _ . extend ( { env : global . WXEnvironment | | { } } , this . _app . options ) ; if ( _ . typof ( callback ) = = = " function " ) { _ . warn ( " the callback of Vm # $ getConfig ( callback ) is deprecated , " + " this api now can directly RETURN config info . " ) ; callback ( config ) } return config } function $ sendHttp ( params , callback ) { _ . warn ( " Vm # $ sendHttp is deprecated , " + " please use \ " require ( ' @ weex - module / stream ' ) " + ' . sendHttp ( params , callback ) " instead ' ) ; var stream = this . _app . requireModule ( " stream " ) ; stream . sendHttp ( params , callback ) } function $ openURL ( url ) { _ . warn ( " Vm # $ openURL is deprecated , " + " please use \ " require ( ' @ weex - module / event ' ) " + ' . openURL ( url ) " instead ' ) ; var event = this . _app . requireModule ( " event " ) ; event . openURL ( url ) } function $ setTitle ( title ) { _ . warn ( " Vm # $ setTitle is deprecated , " + " please use \ " require ( ' @ weex - module / pageInfo ' ) " + ' . setTitle ( title ) " instead ' ) ; var pageInfo = this . _app . requireModule ( " pageInfo " ) ; pageInfo . setTitle ( title ) } function $ call ( moduleName , methodName ) { _ . warn ( " Vm # $ call is deprecated , " + " please use \ " require ( ' @ weex - module / moduleName ' ) \ " instead " ) ; var module = this . _app . requireModule ( moduleName ) ; if ( module & & module [ methodName ] ) { for ( var _len = arguments . length , args = Array ( _len > 2 ? _len - 2 : 0 ) , _key = 2 ; _key < _len ; _key + + ) { args [ _key - 2 ] = arguments [ _key ] } module [ methodName ] . apply ( module , args ) } } } ) . call ( exports , function ( ) { return this } ( ) ) } , function ( module , exports ) { " use strict " ; Object . defineProperty ( exports , " __esModule " , { value : true } ) ; exports . $ userTrack = $ userTrack ; exports . $ sendMtop = $ sendMtop ; exports . $ callWindvane = $ callWindvane ; exports . $ setSpm = $ setSpm ; exports . $ getUserInfo = $ getUserInfo ; exports . $ login = $ login ; exports . $ logout = $ logout ; function $ userTrack ( type , name , comName , param ) { var userTrack = this . _app . requireModule ( " userTrack " ) ; userTrack . commit ( type , name , comName , param ) } function $ sendMtop ( params , callback ) { if ( typeof window = = = " undefined " ) { var windvane = this . _app . requireModule ( " windvane " ) ; windvane . call ( { " class " : " MtopWVPlugin " , method : " send " , data : params } , callback ) } else { var stream = this . _app . requireModule ( " stream " ) ; stream . sendMtop ( params , callback ) } } function $ callWindvane ( params , callback ) { var windvane = this . _app . requireModule ( " windvane " ) ; windvane . call ( params , callback ) } function $ setSpm ( a , b ) { var pageInfo = this . _app . requireModule ( " pageInfo " ) ; pageInfo . setSpm ( a , b ) } function $ getUserInfo ( callback ) { var user = this . _app . requireModule ( " user " ) ; user . getUserInfo ( callback ) } function $ login ( callback ) { var user = this . _app . requireModule ( " user " ) ; user . login ( callback ) } function $ logout ( callback ) { var user = this . _app . requireModule ( " user " ) ; user . logout ( callback ) } } ] ) ; <nl> \ No newline at end of file <nl> + ( this . nativeLog | | function ( s ) { console . log ( s ) } ) ( ' START JS FRAMEWORK : 0 . 14 . 3 Build 20160704 ' ) ; <nl> + / * * * * * * / ( function ( modules ) { / / webpackBootstrap <nl> + / * * * * * * / / / The module cache <nl> + / * * * * * * / var installedModules = { } ; <nl> + / * * * * * * / <nl> + / * * * * * * / / / The require function <nl> + / * * * * * * / function __webpack_require__ ( moduleId ) { <nl> + / * * * * * * / <nl> + / * * * * * * / / / Check if module is in cache <nl> + / * * * * * * / if ( installedModules [ moduleId ] ) <nl> + / * * * * * * / return installedModules [ moduleId ] . exports ; <nl> + / * * * * * * / <nl> + / * * * * * * / / / Create a new module ( and put it into the cache ) <nl> + / * * * * * * / var module = installedModules [ moduleId ] = { <nl> + / * * * * * * / exports : { } , <nl> + / * * * * * * / id : moduleId , <nl> + / * * * * * * / loaded : false <nl> + / * * * * * * / } ; <nl> + / * * * * * * / <nl> + / * * * * * * / / / Execute the module function <nl> + / * * * * * * / modules [ moduleId ] . call ( module . exports , module , module . exports , __webpack_require__ ) ; <nl> + / * * * * * * / <nl> + / * * * * * * / / / Flag the module as loaded <nl> + / * * * * * * / module . loaded = true ; <nl> + / * * * * * * / <nl> + / * * * * * * / / / Return the exports of the module <nl> + / * * * * * * / return module . exports ; <nl> + / * * * * * * / } <nl> + / * * * * * * / <nl> + / * * * * * * / <nl> + / * * * * * * / / / expose the modules object ( __webpack_modules__ ) <nl> + / * * * * * * / __webpack_require__ . m = modules ; <nl> + / * * * * * * / <nl> + / * * * * * * / / / expose the module cache <nl> + / * * * * * * / __webpack_require__ . c = installedModules ; <nl> + / * * * * * * / <nl> + / * * * * * * / / / __webpack_public_path__ <nl> + / * * * * * * / __webpack_require__ . p = " " ; <nl> + / * * * * * * / <nl> + / * * * * * * / / / Load entry module and return exports <nl> + / * * * * * * / return __webpack_require__ ( 0 ) ; <nl> + / * * * * * * / } ) <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + / * * * * * * / ( [ <nl> + / * 0 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + / * WEBPACK VAR INJECTION * / ( function ( global ) { ' use strict ' ; <nl> + <nl> + __webpack_require__ ( 1 ) ; <nl> + <nl> + / * * <nl> + * register methods <nl> + * / <nl> + var methods = __webpack_require__ ( 71 ) ; <nl> + var _global = global ; <nl> + var registerMethods = _global . registerMethods ; <nl> + <nl> + registerMethods ( methods ) ; <nl> + / * WEBPACK VAR INJECTION * / } . call ( exports , ( function ( ) { return this ; } ( ) ) ) ) <nl> + <nl> + / * * * / } , <nl> + / * 1 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + / * WEBPACK VAR INJECTION * / ( function ( global ) { ' use strict ' ; <nl> + <nl> + __webpack_require__ ( 2 ) ; <nl> + <nl> + var _runtime = __webpack_require__ ( 44 ) ; <nl> + <nl> + var _runtime2 = _interopRequireDefault ( _runtime ) ; <nl> + <nl> + var _package = __webpack_require__ ( 69 ) ; <nl> + <nl> + function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { default : obj } ; } <nl> + <nl> + var native = _package . subversion . native ; <nl> + var transformer = _package . subversion . transformer ; <nl> + <nl> + var _loop = function _loop ( methodName ) { <nl> + global [ methodName ] = function ( ) { <nl> + var ret = _runtime2 . default [ methodName ] . apply ( _runtime2 . default , arguments ) ; <nl> + if ( ret instanceof Error ) { <nl> + console . error ( ret . toString ( ) ) ; <nl> + } <nl> + return ret ; <nl> + } ; <nl> + } ; <nl> + <nl> + for ( var methodName in _runtime2 . default ) { <nl> + _loop ( methodName ) ; <nl> + } <nl> + <nl> + Object . assign ( global , { <nl> + frameworkVersion : native , <nl> + needTransformerVersion : transformer <nl> + } ) ; <nl> + <nl> + / * * <nl> + * register methods <nl> + * / <nl> + var methods = __webpack_require__ ( 70 ) ; <nl> + var _global = global ; <nl> + var registerMethods = _global . registerMethods ; <nl> + <nl> + registerMethods ( methods ) ; <nl> + / * WEBPACK VAR INJECTION * / } . call ( exports , ( function ( ) { return this ; } ( ) ) ) ) <nl> + <nl> + / * * * / } , <nl> + / * 2 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + __webpack_require__ ( 3 ) ; <nl> + <nl> + __webpack_require__ ( 41 ) ; <nl> + <nl> + __webpack_require__ ( 42 ) ; <nl> + <nl> + __webpack_require__ ( 43 ) ; <nl> + <nl> + / * * * / } , <nl> + / * 3 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + __webpack_require__ ( 4 ) ; <nl> + <nl> + / * * * / } , <nl> + / * 4 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + __webpack_require__ ( 5 ) ; <nl> + module . exports = __webpack_require__ ( 8 ) . Object . assign ; <nl> + <nl> + / * * * / } , <nl> + / * 5 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + / / 19 . 1 . 3 . 1 Object . assign ( target , source ) <nl> + var $ export = __webpack_require__ ( 6 ) ; <nl> + <nl> + $ export ( $ export . S + $ export . F , ' Object ' , { assign : __webpack_require__ ( 24 ) } ) ; <nl> + <nl> + / * * * / } , <nl> + / * 6 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + var global = __webpack_require__ ( 7 ) , <nl> + core = __webpack_require__ ( 8 ) , <nl> + hide = __webpack_require__ ( 9 ) , <nl> + redefine = __webpack_require__ ( 19 ) , <nl> + ctx = __webpack_require__ ( 22 ) , <nl> + PROTOTYPE = ' prototype ' ; <nl> + <nl> + var $ export = function $ export ( type , name , source ) { <nl> + var IS_FORCED = type & $ export . F , <nl> + IS_GLOBAL = type & $ export . G , <nl> + IS_STATIC = type & $ export . S , <nl> + IS_PROTO = type & $ export . P , <nl> + IS_BIND = type & $ export . B , <nl> + target = IS_GLOBAL ? global : IS_STATIC ? global [ name ] | | ( global [ name ] = { } ) : ( global [ name ] | | { } ) [ PROTOTYPE ] , <nl> + exports = IS_GLOBAL ? core : core [ name ] | | ( core [ name ] = { } ) , <nl> + expProto = exports [ PROTOTYPE ] | | ( exports [ PROTOTYPE ] = { } ) , <nl> + key , <nl> + own , <nl> + out , <nl> + exp ; <nl> + if ( IS_GLOBAL ) source = name ; <nl> + for ( key in source ) { <nl> + / / contains in native <nl> + own = ! IS_FORCED & & target & & target [ key ] ! = = undefined ; <nl> + / / export native or passed <nl> + out = ( own ? target : source ) [ key ] ; <nl> + / / bind timers to global for call from export context <nl> + exp = IS_BIND & & own ? ctx ( out , global ) : IS_PROTO & & typeof out = = ' function ' ? ctx ( Function . call , out ) : out ; <nl> + / / extend global <nl> + if ( target ) redefine ( target , key , out , type & $ export . U ) ; <nl> + / / export <nl> + if ( exports [ key ] ! = out ) hide ( exports , key , exp ) ; <nl> + if ( IS_PROTO & & expProto [ key ] ! = out ) expProto [ key ] = out ; <nl> + } <nl> + } ; <nl> + global . core = core ; <nl> + / / type bitmap <nl> + $ export . F = 1 ; / / forced <nl> + $ export . G = 2 ; / / global <nl> + $ export . S = 4 ; / / static <nl> + $ export . P = 8 ; / / proto <nl> + $ export . B = 16 ; / / bind <nl> + $ export . W = 32 ; / / wrap <nl> + $ export . U = 64 ; / / safe <nl> + $ export . R = 128 ; / / real proto method for ` library ` <nl> + module . exports = $ export ; <nl> + <nl> + / * * * / } , <nl> + / * 7 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + / / https : / / github . com / zloirock / core - js / issues / 86 # issuecomment - 115759028 <nl> + var global = module . exports = typeof window ! = ' undefined ' & & window . Math = = Math ? window : typeof self ! = ' undefined ' & & self . Math = = Math ? self : Function ( ' return this ' ) ( ) ; <nl> + if ( typeof __g = = ' number ' ) __g = global ; / / eslint - disable - line no - undef <nl> + <nl> + / * * * / } , <nl> + / * 8 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + var core = module . exports = { version : ' 2 . 4 . 0 ' } ; <nl> + if ( typeof __e = = ' number ' ) __e = core ; / / eslint - disable - line no - undef <nl> + <nl> + / * * * / } , <nl> + / * 9 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + var dP = __webpack_require__ ( 10 ) , <nl> + createDesc = __webpack_require__ ( 18 ) ; <nl> + module . exports = __webpack_require__ ( 14 ) ? function ( object , key , value ) { <nl> + return dP . f ( object , key , createDesc ( 1 , value ) ) ; <nl> + } : function ( object , key , value ) { <nl> + object [ key ] = value ; <nl> + return object ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 10 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + var anObject = __webpack_require__ ( 11 ) , <nl> + IE8_DOM_DEFINE = __webpack_require__ ( 13 ) , <nl> + toPrimitive = __webpack_require__ ( 17 ) , <nl> + dP = Object . defineProperty ; <nl> + <nl> + exports . f = __webpack_require__ ( 14 ) ? Object . defineProperty : function defineProperty ( O , P , Attributes ) { <nl> + anObject ( O ) ; <nl> + P = toPrimitive ( P , true ) ; <nl> + anObject ( Attributes ) ; <nl> + if ( IE8_DOM_DEFINE ) try { <nl> + return dP ( O , P , Attributes ) ; <nl> + } catch ( e ) { / * empty * / } <nl> + if ( ' get ' in Attributes | | ' set ' in Attributes ) throw TypeError ( ' Accessors not supported ! ' ) ; <nl> + if ( ' value ' in Attributes ) O [ P ] = Attributes . value ; <nl> + return O ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 11 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + var isObject = __webpack_require__ ( 12 ) ; <nl> + module . exports = function ( it ) { <nl> + if ( ! isObject ( it ) ) throw TypeError ( it + ' is not an object ! ' ) ; <nl> + return it ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 12 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + var _typeof = typeof Symbol = = = " function " & & typeof Symbol . iterator = = = " symbol " ? function ( obj ) { return typeof obj ; } : function ( obj ) { return obj & & typeof Symbol = = = " function " & & obj . constructor = = = Symbol ? " symbol " : typeof obj ; } ; <nl> + <nl> + module . exports = function ( it ) { <nl> + return ( typeof it = = = ' undefined ' ? ' undefined ' : _typeof ( it ) ) = = = ' object ' ? it ! = = null : typeof it = = = ' function ' ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 13 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + module . exports = ! __webpack_require__ ( 14 ) & & ! __webpack_require__ ( 15 ) ( function ( ) { <nl> + return Object . defineProperty ( __webpack_require__ ( 16 ) ( ' div ' ) , ' a ' , { get : function get ( ) { <nl> + return 7 ; <nl> + } } ) . a ! = 7 ; <nl> + } ) ; <nl> + <nl> + / * * * / } , <nl> + / * 14 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + / / Thank ' s IE8 for his funny defineProperty <nl> + module . exports = ! __webpack_require__ ( 15 ) ( function ( ) { <nl> + return Object . defineProperty ( { } , ' a ' , { get : function get ( ) { <nl> + return 7 ; <nl> + } } ) . a ! = 7 ; <nl> + } ) ; <nl> + <nl> + / * * * / } , <nl> + / * 15 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + " use strict " ; <nl> + <nl> + module . exports = function ( exec ) { <nl> + try { <nl> + return ! ! exec ( ) ; <nl> + } catch ( e ) { <nl> + return true ; <nl> + } <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 16 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + var isObject = __webpack_require__ ( 12 ) , <nl> + document = __webpack_require__ ( 7 ) . document <nl> + / / in old IE typeof document . createElement is ' object ' <nl> + , <nl> + is = isObject ( document ) & & isObject ( document . createElement ) ; <nl> + module . exports = function ( it ) { <nl> + return is ? document . createElement ( it ) : { } ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 17 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + / / 7 . 1 . 1 ToPrimitive ( input [ , PreferredType ] ) <nl> + var isObject = __webpack_require__ ( 12 ) ; <nl> + / / instead of the ES6 spec version , we didn ' t implement @ @ toPrimitive case <nl> + / / and the second argument - flag - preferred type is a string <nl> + module . exports = function ( it , S ) { <nl> + if ( ! isObject ( it ) ) return it ; <nl> + var fn , val ; <nl> + if ( S & & typeof ( fn = it . toString ) = = ' function ' & & ! isObject ( val = fn . call ( it ) ) ) return val ; <nl> + if ( typeof ( fn = it . valueOf ) = = ' function ' & & ! isObject ( val = fn . call ( it ) ) ) return val ; <nl> + if ( ! S & & typeof ( fn = it . toString ) = = ' function ' & & ! isObject ( val = fn . call ( it ) ) ) return val ; <nl> + throw TypeError ( " Can ' t convert object to primitive value " ) ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 18 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + " use strict " ; <nl> + <nl> + module . exports = function ( bitmap , value ) { <nl> + return { <nl> + enumerable : ! ( bitmap & 1 ) , <nl> + configurable : ! ( bitmap & 2 ) , <nl> + writable : ! ( bitmap & 4 ) , <nl> + value : value <nl> + } ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 19 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + var global = __webpack_require__ ( 7 ) , <nl> + hide = __webpack_require__ ( 9 ) , <nl> + has = __webpack_require__ ( 20 ) , <nl> + SRC = __webpack_require__ ( 21 ) ( ' src ' ) , <nl> + TO_STRING = ' toString ' , <nl> + $ toString = Function [ TO_STRING ] , <nl> + TPL = ( ' ' + $ toString ) . split ( TO_STRING ) ; <nl> + <nl> + __webpack_require__ ( 8 ) . inspectSource = function ( it ) { <nl> + return $ toString . call ( it ) ; <nl> + } ; <nl> + <nl> + ( module . exports = function ( O , key , val , safe ) { <nl> + var isFunction = typeof val = = ' function ' ; <nl> + if ( isFunction ) has ( val , ' name ' ) | | hide ( val , ' name ' , key ) ; <nl> + if ( O [ key ] = = = val ) return ; <nl> + if ( isFunction ) has ( val , SRC ) | | hide ( val , SRC , O [ key ] ? ' ' + O [ key ] : TPL . join ( String ( key ) ) ) ; <nl> + if ( O = = = global ) { <nl> + O [ key ] = val ; <nl> + } else { <nl> + if ( ! safe ) { <nl> + delete O [ key ] ; <nl> + hide ( O , key , val ) ; <nl> + } else { <nl> + if ( O [ key ] ) O [ key ] = val ; else hide ( O , key , val ) ; <nl> + } <nl> + } <nl> + / / add fake Function # toString for correct work wrapped methods / constructors with methods like LoDash isNative <nl> + } ) ( Function . prototype , TO_STRING , function toString ( ) { <nl> + return typeof this = = ' function ' & & this [ SRC ] | | $ toString . call ( this ) ; <nl> + } ) ; <nl> + <nl> + / * * * / } , <nl> + / * 20 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + " use strict " ; <nl> + <nl> + var hasOwnProperty = { } . hasOwnProperty ; <nl> + module . exports = function ( it , key ) { <nl> + return hasOwnProperty . call ( it , key ) ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 21 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + var id = 0 , <nl> + px = Math . random ( ) ; <nl> + module . exports = function ( key ) { <nl> + return ' Symbol ( ' . concat ( key = = = undefined ? ' ' : key , ' ) _ ' , ( + + id + px ) . toString ( 36 ) ) ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 22 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + / / optional / simple context binding <nl> + var aFunction = __webpack_require__ ( 23 ) ; <nl> + module . exports = function ( fn , that , length ) { <nl> + aFunction ( fn ) ; <nl> + if ( that = = = undefined ) return fn ; <nl> + switch ( length ) { <nl> + case 1 : <nl> + return function ( a ) { <nl> + return fn . call ( that , a ) ; <nl> + } ; <nl> + case 2 : <nl> + return function ( a , b ) { <nl> + return fn . call ( that , a , b ) ; <nl> + } ; <nl> + case 3 : <nl> + return function ( a , b , c ) { <nl> + return fn . call ( that , a , b , c ) ; <nl> + } ; <nl> + } <nl> + return function ( ) / * . . . args * / { <nl> + return fn . apply ( that , arguments ) ; <nl> + } ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 23 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + module . exports = function ( it ) { <nl> + if ( typeof it ! = ' function ' ) throw TypeError ( it + ' is not a function ! ' ) ; <nl> + return it ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 24 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + / / 19 . 1 . 2 . 1 Object . assign ( target , source , . . . ) <nl> + <nl> + var getKeys = __webpack_require__ ( 25 ) , <nl> + gOPS = __webpack_require__ ( 38 ) , <nl> + pIE = __webpack_require__ ( 39 ) , <nl> + toObject = __webpack_require__ ( 40 ) , <nl> + IObject = __webpack_require__ ( 28 ) , <nl> + $ assign = Object . assign ; <nl> + <nl> + / / should work with symbols and should have deterministic property order ( V8 bug ) <nl> + module . exports = ! $ assign | | __webpack_require__ ( 15 ) ( function ( ) { <nl> + var A = { } , <nl> + B = { } , <nl> + S = Symbol ( ) , <nl> + K = ' abcdefghijklmnopqrst ' ; <nl> + A [ S ] = 7 ; <nl> + K . split ( ' ' ) . forEach ( function ( k ) { <nl> + B [ k ] = k ; <nl> + } ) ; <nl> + return $ assign ( { } , A ) [ S ] ! = 7 | | Object . keys ( $ assign ( { } , B ) ) . join ( ' ' ) ! = K ; <nl> + } ) ? function assign ( target , source ) { <nl> + / / eslint - disable - line no - unused - vars <nl> + var T = toObject ( target ) , <nl> + aLen = arguments . length , <nl> + index = 1 , <nl> + getSymbols = gOPS . f , <nl> + isEnum = pIE . f ; <nl> + while ( aLen > index ) { <nl> + var S = IObject ( arguments [ index + + ] ) , <nl> + keys = getSymbols ? getKeys ( S ) . concat ( getSymbols ( S ) ) : getKeys ( S ) , <nl> + length = keys . length , <nl> + j = 0 , <nl> + key ; <nl> + while ( length > j ) { <nl> + if ( isEnum . call ( S , key = keys [ j + + ] ) ) T [ key ] = S [ key ] ; <nl> + } <nl> + } return T ; <nl> + } : $ assign ; <nl> + <nl> + / * * * / } , <nl> + / * 25 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + / / 19 . 1 . 2 . 14 / 15 . 2 . 3 . 14 Object . keys ( O ) <nl> + var $ keys = __webpack_require__ ( 26 ) , <nl> + enumBugKeys = __webpack_require__ ( 37 ) ; <nl> + <nl> + module . exports = Object . keys | | function keys ( O ) { <nl> + return $ keys ( O , enumBugKeys ) ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 26 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + var has = __webpack_require__ ( 20 ) , <nl> + toIObject = __webpack_require__ ( 27 ) , <nl> + arrayIndexOf = __webpack_require__ ( 31 ) ( false ) , <nl> + IE_PROTO = __webpack_require__ ( 35 ) ( ' IE_PROTO ' ) ; <nl> + <nl> + module . exports = function ( object , names ) { <nl> + var O = toIObject ( object ) , <nl> + i = 0 , <nl> + result = [ ] , <nl> + key ; <nl> + for ( key in O ) { <nl> + if ( key ! = IE_PROTO ) has ( O , key ) & & result . push ( key ) ; <nl> + } / / Don ' t enum bug & hidden keys <nl> + while ( names . length > i ) { <nl> + if ( has ( O , key = names [ i + + ] ) ) { <nl> + ~ arrayIndexOf ( result , key ) | | result . push ( key ) ; <nl> + } <nl> + } return result ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 27 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + / / to indexed object , toObject with fallback for non - array - like ES3 strings <nl> + var IObject = __webpack_require__ ( 28 ) , <nl> + defined = __webpack_require__ ( 30 ) ; <nl> + module . exports = function ( it ) { <nl> + return IObject ( defined ( it ) ) ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 28 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + / / fallback for non - array - like ES3 and non - enumerable old V8 strings <nl> + var cof = __webpack_require__ ( 29 ) ; <nl> + module . exports = Object ( ' z ' ) . propertyIsEnumerable ( 0 ) ? Object : function ( it ) { <nl> + return cof ( it ) = = ' String ' ? it . split ( ' ' ) : Object ( it ) ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 29 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + " use strict " ; <nl> + <nl> + var toString = { } . toString ; <nl> + <nl> + module . exports = function ( it ) { <nl> + return toString . call ( it ) . slice ( 8 , - 1 ) ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 30 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + " use strict " ; <nl> + <nl> + / / 7 . 2 . 1 RequireObjectCoercible ( argument ) <nl> + module . exports = function ( it ) { <nl> + if ( it = = undefined ) throw TypeError ( " Can ' t call method on " + it ) ; <nl> + return it ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 31 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + / / false - > Array # indexOf <nl> + / / true - > Array # includes <nl> + var toIObject = __webpack_require__ ( 27 ) , <nl> + toLength = __webpack_require__ ( 32 ) , <nl> + toIndex = __webpack_require__ ( 34 ) ; <nl> + module . exports = function ( IS_INCLUDES ) { <nl> + return function ( $ this , el , fromIndex ) { <nl> + var O = toIObject ( $ this ) , <nl> + length = toLength ( O . length ) , <nl> + index = toIndex ( fromIndex , length ) , <nl> + value ; <nl> + / / Array # includes uses SameValueZero equality algorithm <nl> + if ( IS_INCLUDES & & el ! = el ) while ( length > index ) { <nl> + value = O [ index + + ] ; <nl> + if ( value ! = value ) return true ; <nl> + / / Array # toIndex ignores holes , Array # includes - not <nl> + } else for ( ; length > index ; index + + ) { <nl> + if ( IS_INCLUDES | | index in O ) { <nl> + if ( O [ index ] = = = el ) return IS_INCLUDES | | index | | 0 ; <nl> + } <nl> + } return ! IS_INCLUDES & & - 1 ; <nl> + } ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 32 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + / / 7 . 1 . 15 ToLength <nl> + var toInteger = __webpack_require__ ( 33 ) , <nl> + min = Math . min ; <nl> + module . exports = function ( it ) { <nl> + return it > 0 ? min ( toInteger ( it ) , 0x1fffffffffffff ) : 0 ; / / pow ( 2 , 53 ) - 1 = = 9007199254740991 <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 33 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + " use strict " ; <nl> + <nl> + / / 7 . 1 . 4 ToInteger <nl> + var ceil = Math . ceil , <nl> + floor = Math . floor ; <nl> + module . exports = function ( it ) { <nl> + return isNaN ( it = + it ) ? 0 : ( it > 0 ? floor : ceil ) ( it ) ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 34 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + var toInteger = __webpack_require__ ( 33 ) , <nl> + max = Math . max , <nl> + min = Math . min ; <nl> + module . exports = function ( index , length ) { <nl> + index = toInteger ( index ) ; <nl> + return index < 0 ? max ( index + length , 0 ) : min ( index , length ) ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 35 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + var shared = __webpack_require__ ( 36 ) ( ' keys ' ) , <nl> + uid = __webpack_require__ ( 21 ) ; <nl> + module . exports = function ( key ) { <nl> + return shared [ key ] | | ( shared [ key ] = uid ( key ) ) ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 36 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + var global = __webpack_require__ ( 7 ) , <nl> + SHARED = ' __core - js_shared__ ' , <nl> + store = global [ SHARED ] | | ( global [ SHARED ] = { } ) ; <nl> + module . exports = function ( key ) { <nl> + return store [ key ] | | ( store [ key ] = { } ) ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 37 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + / / IE 8 - don ' t enum bug keys <nl> + module . exports = ' constructor , hasOwnProperty , isPrototypeOf , propertyIsEnumerable , toLocaleString , toString , valueOf ' . split ( ' , ' ) ; <nl> + <nl> + / * * * / } , <nl> + / * 38 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + " use strict " ; <nl> + <nl> + exports . f = Object . getOwnPropertySymbols ; <nl> + <nl> + / * * * / } , <nl> + / * 39 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + " use strict " ; <nl> + <nl> + exports . f = { } . propertyIsEnumerable ; <nl> + <nl> + / * * * / } , <nl> + / * 40 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + / / 7 . 1 . 13 ToObject ( argument ) <nl> + var defined = __webpack_require__ ( 30 ) ; <nl> + module . exports = function ( it ) { <nl> + return Object ( defined ( it ) ) ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 41 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + / * WEBPACK VAR INJECTION * / ( function ( global ) { ' use strict ' ; <nl> + <nl> + var _global = global ; <nl> + var setTimeout = _global . setTimeout ; <nl> + var setTimeoutNative = _global . setTimeoutNative ; <nl> + <nl> + <nl> + var MSG = ' Use " global . setTimeout " is unexpected , ' + ' please use require ( " @ weex - module " ) . setTimeout instead . ' ; <nl> + <nl> + / / fix no setTimeout on Android V8 <nl> + / * istanbul ignore if * / <nl> + if ( typeof setTimeout = = = ' undefined ' & & typeof setTimeoutNative = = = ' function ' ) { <nl> + ( function ( ) { <nl> + var timeoutMap = { } ; <nl> + var timeoutId = 0 ; <nl> + global . setTimeout = function ( cb , time ) { <nl> + console . warn ( MSG ) ; <nl> + timeoutMap [ + + timeoutId ] = cb ; <nl> + setTimeoutNative ( timeoutId . toString ( ) , time ) ; <nl> + } ; <nl> + global . setTimeoutCallback = function ( id ) { <nl> + if ( typeof timeoutMap [ id ] = = = ' function ' ) { <nl> + timeoutMap [ id ] ( ) ; <nl> + delete timeoutMap [ id ] ; <nl> + } <nl> + } ; <nl> + } ) ( ) ; <nl> + } <nl> + / * WEBPACK VAR INJECTION * / } . call ( exports , ( function ( ) { return this ; } ( ) ) ) ) <nl> + <nl> + / * * * / } , <nl> + / * 42 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + / * WEBPACK VAR INJECTION * / ( function ( global ) { ' use strict ' ; <nl> + <nl> + var OriginPromise = global . Promise | | function ( ) { } ; <nl> + var MSG = ' Using " Promise " is unexpected ' ; <nl> + <nl> + var UnexpectedPromise = function UnexpectedPromise ( ) { <nl> + console . warn ( MSG ) ; <nl> + <nl> + for ( var _len = arguments . length , args = Array ( _len ) , _key = 0 ; _key < _len ; _key + + ) { <nl> + args [ _key ] = arguments [ _key ] ; <nl> + } <nl> + <nl> + return new ( Function . prototype . bind . apply ( OriginPromise , [ null ] . concat ( args ) ) ) ( ) ; <nl> + } ; <nl> + <nl> + var fn = [ ' all ' , ' race ' , ' resolve ' , ' reject ' ] ; <nl> + fn . forEach ( function ( n ) { <nl> + UnexpectedPromise [ n ] = function ( ) { <nl> + console . warn ( MSG ) ; <nl> + return OriginPromise [ n ] & & OriginPromise [ n ] . apply ( OriginPromise , arguments ) ; <nl> + } ; <nl> + } ) ; <nl> + <nl> + global . Promise = UnexpectedPromise ; <nl> + / * WEBPACK VAR INJECTION * / } . call ( exports , ( function ( ) { return this ; } ( ) ) ) ) <nl> + <nl> + / * * * / } , <nl> + / * 43 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + / * WEBPACK VAR INJECTION * / ( function ( global ) { ' use strict ' ; <nl> + <nl> + function _toConsumableArray ( arr ) { if ( Array . isArray ( arr ) ) { for ( var i = 0 , arr2 = Array ( arr . length ) ; i < arr . length ; i + + ) { arr2 [ i ] = arr [ i ] ; } return arr2 ; } else { return Array . from ( arr ) ; } } <nl> + <nl> + var _global = global ; <nl> + var console = _global . console ; <nl> + var nativeLog = _global . nativeLog ; <nl> + <nl> + var LEVELS = [ ' error ' , ' warn ' , ' info ' , ' log ' , ' debug ' ] ; <nl> + var levelMap = { } ; <nl> + <nl> + generateLevelMap ( ) ; <nl> + <nl> + / * istanbul ignore if * / <nl> + if ( typeof console = = = ' undefined ' | | / / Android <nl> + global . WXEnvironment & & global . WXEnvironment . platform = = = ' iOS ' / / iOS <nl> + ) { <nl> + global . console = { <nl> + debug : function debug ( ) { <nl> + for ( var _len = arguments . length , args = Array ( _len ) , _key = 0 ; _key < _len ; _key + + ) { <nl> + args [ _key ] = arguments [ _key ] ; <nl> + } <nl> + <nl> + if ( checkLevel ( ' debug ' ) ) { <nl> + nativeLog . apply ( undefined , _toConsumableArray ( format ( args ) ) . concat ( [ ' __DEBUG ' ] ) ) ; <nl> + } <nl> + } , <nl> + log : function log ( ) { <nl> + for ( var _len2 = arguments . length , args = Array ( _len2 ) , _key2 = 0 ; _key2 < _len2 ; _key2 + + ) { <nl> + args [ _key2 ] = arguments [ _key2 ] ; <nl> + } <nl> + <nl> + if ( checkLevel ( ' log ' ) ) { <nl> + nativeLog . apply ( undefined , _toConsumableArray ( format ( args ) ) . concat ( [ ' __LOG ' ] ) ) ; <nl> + } <nl> + } , <nl> + info : function info ( ) { <nl> + for ( var _len3 = arguments . length , args = Array ( _len3 ) , _key3 = 0 ; _key3 < _len3 ; _key3 + + ) { <nl> + args [ _key3 ] = arguments [ _key3 ] ; <nl> + } <nl> + <nl> + if ( checkLevel ( ' info ' ) ) { <nl> + nativeLog . apply ( undefined , _toConsumableArray ( format ( args ) ) . concat ( [ ' __INFO ' ] ) ) ; <nl> + } <nl> + } , <nl> + warn : function warn ( ) { <nl> + for ( var _len4 = arguments . length , args = Array ( _len4 ) , _key4 = 0 ; _key4 < _len4 ; _key4 + + ) { <nl> + args [ _key4 ] = arguments [ _key4 ] ; <nl> + } <nl> + <nl> + if ( checkLevel ( ' warn ' ) ) { <nl> + nativeLog . apply ( undefined , _toConsumableArray ( format ( args ) ) . concat ( [ ' __WARN ' ] ) ) ; <nl> + } <nl> + } , <nl> + error : function error ( ) { <nl> + for ( var _len5 = arguments . length , args = Array ( _len5 ) , _key5 = 0 ; _key5 < _len5 ; _key5 + + ) { <nl> + args [ _key5 ] = arguments [ _key5 ] ; <nl> + } <nl> + <nl> + if ( checkLevel ( ' error ' ) ) { <nl> + nativeLog . apply ( undefined , _toConsumableArray ( format ( args ) ) . concat ( [ ' __ERROR ' ] ) ) ; <nl> + } <nl> + } <nl> + } ; <nl> + } else { <nl> + / / HTML5 <nl> + var debug = console . debug ; <nl> + var log = console . log ; <nl> + var info = console . info ; <nl> + var warn = console . warn ; <nl> + var error = console . error ; <nl> + <nl> + console . __ori__ = { debug : debug , log : log , info : info , warn : warn , error : error } ; <nl> + console . debug = function ( ) { <nl> + for ( var _len6 = arguments . length , args = Array ( _len6 ) , _key6 = 0 ; _key6 < _len6 ; _key6 + + ) { <nl> + args [ _key6 ] = arguments [ _key6 ] ; <nl> + } <nl> + <nl> + if ( checkLevel ( ' debug ' ) ) { <nl> + console . __ori__ . debug . apply ( console , args ) ; <nl> + } <nl> + } ; <nl> + console . log = function ( ) { <nl> + for ( var _len7 = arguments . length , args = Array ( _len7 ) , _key7 = 0 ; _key7 < _len7 ; _key7 + + ) { <nl> + args [ _key7 ] = arguments [ _key7 ] ; <nl> + } <nl> + <nl> + if ( checkLevel ( ' log ' ) ) { <nl> + console . __ori__ . log . apply ( console , args ) ; <nl> + } <nl> + } ; <nl> + console . info = function ( ) { <nl> + for ( var _len8 = arguments . length , args = Array ( _len8 ) , _key8 = 0 ; _key8 < _len8 ; _key8 + + ) { <nl> + args [ _key8 ] = arguments [ _key8 ] ; <nl> + } <nl> + <nl> + if ( checkLevel ( ' info ' ) ) { <nl> + console . __ori__ . info . apply ( console , args ) ; <nl> + } <nl> + } ; <nl> + console . warn = function ( ) { <nl> + for ( var _len9 = arguments . length , args = Array ( _len9 ) , _key9 = 0 ; _key9 < _len9 ; _key9 + + ) { <nl> + args [ _key9 ] = arguments [ _key9 ] ; <nl> + } <nl> + <nl> + if ( checkLevel ( ' warn ' ) ) { <nl> + console . __ori__ . warn . apply ( console , args ) ; <nl> + } <nl> + } ; <nl> + console . error = function ( ) { <nl> + for ( var _len10 = arguments . length , args = Array ( _len10 ) , _key10 = 0 ; _key10 < _len10 ; _key10 + + ) { <nl> + args [ _key10 ] = arguments [ _key10 ] ; <nl> + } <nl> + <nl> + if ( checkLevel ( ' error ' ) ) { <nl> + console . __ori__ . error . apply ( console , args ) ; <nl> + } <nl> + } ; <nl> + } <nl> + <nl> + function generateLevelMap ( ) { <nl> + LEVELS . forEach ( function ( level ) { <nl> + var levelIndex = LEVELS . indexOf ( level ) ; <nl> + levelMap [ level ] = { } ; <nl> + LEVELS . forEach ( function ( type ) { <nl> + var typeIndex = LEVELS . indexOf ( type ) ; <nl> + if ( typeIndex < = levelIndex ) { <nl> + levelMap [ level ] [ type ] = true ; <nl> + } <nl> + } ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + function normalize ( v ) { <nl> + var type = Object . prototype . toString . call ( v ) ; <nl> + if ( type . toLowerCase ( ) = = = ' [ object object ] ' ) { <nl> + v = JSON . stringify ( v ) ; <nl> + } else { <nl> + v = String ( v ) ; <nl> + } <nl> + return v ; <nl> + } <nl> + <nl> + function checkLevel ( type ) { <nl> + var logLevel = global . WXEnvironment & & global . WXEnvironment . logLevel | | ' log ' ; <nl> + return levelMap [ logLevel ] & & levelMap [ logLevel ] [ type ] ; <nl> + } <nl> + <nl> + function format ( args ) { <nl> + return args . map ( function ( v ) { <nl> + return normalize ( v ) ; <nl> + } ) ; <nl> + } <nl> + / * WEBPACK VAR INJECTION * / } . call ( exports , ( function ( ) { return this ; } ( ) ) ) ) <nl> + <nl> + / * * * / } , <nl> + / * 44 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + / * WEBPACK VAR INJECTION * / ( function ( global ) { ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . createInstance = createInstance ; <nl> + <nl> + var _frameworks = __webpack_require__ ( 45 ) ; <nl> + <nl> + var _frameworks2 = _interopRequireDefault ( _frameworks ) ; <nl> + <nl> + var _vdom = __webpack_require__ ( 67 ) ; <nl> + <nl> + function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { default : obj } ; } <nl> + <nl> + var config = { <nl> + Document : _vdom . Document , Element : _vdom . Element , Comment : _vdom . Comment , <nl> + sendTasks : function sendTasks ( ) { <nl> + var _global ; <nl> + <nl> + return ( _global = global ) . callNative . apply ( _global , arguments ) ; <nl> + } <nl> + } ; <nl> + <nl> + for ( var name in _frameworks2 . default ) { <nl> + var framework = _frameworks2 . default [ name ] ; <nl> + framework . init ( config ) ; <nl> + } <nl> + <nl> + var versionRegExp = / ^ \ / \ / * ( \ { [ ^ \ } ] * \ } ) * \ r ? \ n / ; <nl> + <nl> + function checkVersion ( code ) { <nl> + var info = void 0 ; <nl> + var result = versionRegExp . exec ( code ) ; <nl> + if ( result ) { <nl> + try { <nl> + info = JSON . parse ( result [ 1 ] ) ; <nl> + } catch ( e ) { } <nl> + } <nl> + return info ; <nl> + } <nl> + <nl> + var instanceMap = { } ; <nl> + <nl> + function createInstance ( id , code , config , data ) { <nl> + var info = instanceMap [ id ] ; <nl> + if ( ! info ) { <nl> + info = checkVersion ( code ) | | { } ; <nl> + if ( ! _frameworks2 . default [ info . framework ] ) { <nl> + info . framework = ' Weex ' ; <nl> + } <nl> + instanceMap [ id ] = info ; <nl> + config = config | | { } ; <nl> + config . bundleVersion = info . version ; <nl> + return _frameworks2 . default [ info . framework ] . createInstance ( id , code , config , data ) ; <nl> + } <nl> + return new Error ( ' invalid instance id " ' + id + ' " ' ) ; <nl> + } <nl> + <nl> + var methods = { <nl> + createInstance : createInstance <nl> + } ; <nl> + <nl> + function genInit ( methodName ) { <nl> + methods [ methodName ] = function ( ) { <nl> + for ( var _name in _frameworks2 . default ) { <nl> + var _framework = _frameworks2 . default [ _name ] ; <nl> + if ( _framework & & _framework [ methodName ] ) { <nl> + _framework [ methodName ] . apply ( _framework , arguments ) ; <nl> + } <nl> + } <nl> + } ; <nl> + } <nl> + <nl> + [ ' registerComponents ' , ' registerModules ' , ' registerMethods ' ] . forEach ( genInit ) ; <nl> + <nl> + function genInstance ( methodName ) { <nl> + methods [ methodName ] = function ( ) { <nl> + var id = arguments . length < = 0 ? undefined : arguments [ 0 ] ; <nl> + var info = instanceMap [ id ] ; <nl> + if ( info & & _frameworks2 . default [ info . framework ] ) { <nl> + var _frameworks $ info $ fram ; <nl> + <nl> + return ( _frameworks $ info $ fram = _frameworks2 . default [ info . framework ] ) [ methodName ] . apply ( _frameworks $ info $ fram , arguments ) ; <nl> + } <nl> + return new Error ( ' invalid instance id " ' + id + ' " ' ) ; <nl> + } ; <nl> + } <nl> + <nl> + [ ' destroyInstance ' , ' refreshInstance ' , ' callJS ' , ' getRoot ' ] . forEach ( genInstance ) ; <nl> + <nl> + methods . receiveTasks = methods . callJS ; <nl> + <nl> + exports . default = methods ; <nl> + / * WEBPACK VAR INJECTION * / } . call ( exports , ( function ( ) { return this ; } ( ) ) ) ) <nl> + <nl> + / * * * / } , <nl> + / * 45 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + <nl> + var _default = __webpack_require__ ( 46 ) ; <nl> + <nl> + var Weex = _interopRequireWildcard ( _default ) ; <nl> + <nl> + function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj ; } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] ; } } newObj . default = obj ; return newObj ; } } <nl> + <nl> + exports . default = { <nl> + Weex : Weex <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 46 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + <nl> + var _typeof = typeof Symbol = = = " function " & & typeof Symbol . iterator = = = " symbol " ? function ( obj ) { return typeof obj ; } : function ( obj ) { return obj & & typeof Symbol = = = " function " & & obj . constructor = = = Symbol ? " symbol " : typeof obj ; } ; / * * <nl> + * @ fileOverview Main entry , instance manager <nl> + * <nl> + * - createInstance ( instanceId , code , options , data ) <nl> + * - refreshInstance ( instanceId , data ) <nl> + * - destroyInstance ( instanceId ) <nl> + * - registerComponents ( components ) <nl> + * - registerModules ( modules ) <nl> + * - getRoot ( instanceId ) <nl> + * - instanceMap <nl> + * - callJS ( instanceId , tasks ) <nl> + * - fireEvent ( ref , type , data ) <nl> + * - callback ( funcId , data ) <nl> + * / <nl> + <nl> + exports . init = init ; <nl> + exports . createInstance = createInstance ; <nl> + exports . refreshInstance = refreshInstance ; <nl> + exports . destroyInstance = destroyInstance ; <nl> + exports . registerComponents = registerComponents ; <nl> + exports . registerModules = registerModules ; <nl> + exports . registerMethods = registerMethods ; <nl> + exports . getRoot = getRoot ; <nl> + exports . callJS = callJS ; <nl> + <nl> + var _config = __webpack_require__ ( 47 ) ; <nl> + <nl> + var _config2 = _interopRequireDefault ( _config ) ; <nl> + <nl> + var _app = __webpack_require__ ( 48 ) ; <nl> + <nl> + var _app2 = _interopRequireDefault ( _app ) ; <nl> + <nl> + var _vm = __webpack_require__ ( 53 ) ; <nl> + <nl> + var _vm2 = _interopRequireDefault ( _vm ) ; <nl> + <nl> + function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { default : obj } ; } <nl> + <nl> + function _toConsumableArray ( arr ) { if ( Array . isArray ( arr ) ) { for ( var i = 0 , arr2 = Array ( arr . length ) ; i < arr . length ; i + + ) { arr2 [ i ] = arr [ i ] ; } return arr2 ; } else { return Array . from ( arr ) ; } } <nl> + <nl> + var nativeComponentMap = _config2 . default . nativeComponentMap ; <nl> + <nl> + var instanceMap = { } ; <nl> + <nl> + function init ( cfg ) { <nl> + _config2 . default . Document = cfg . Document ; <nl> + _config2 . default . Element = cfg . Element ; <nl> + _config2 . default . Comment = cfg . Comment ; <nl> + _config2 . default . sendTasks = cfg . sendTasks ; <nl> + } <nl> + <nl> + / * * <nl> + * create a Weex instance <nl> + * <nl> + * @ param { string } instanceId <nl> + * @ param { string } code <nl> + * @ param { object } [ options ] option ` HAS_LOG ` enable print log <nl> + * @ param { object } [ data ] <nl> + * / <nl> + function createInstance ( instanceId , code , options , data ) { <nl> + var instance = instanceMap [ instanceId ] ; <nl> + options = options | | { } ; <nl> + <nl> + _config2 . default . debug = options . debug ; <nl> + <nl> + var result = void 0 ; <nl> + if ( ! instance ) { <nl> + instance = new _app2 . default ( instanceId , options ) ; <nl> + instanceMap [ instanceId ] = instance ; <nl> + result = instance . init ( code , data ) ; <nl> + } else { <nl> + result = new Error ( ' invalid instance id " ' + instanceId + ' " ' ) ; <nl> + } <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + / * * <nl> + * refresh a Weex instance <nl> + * <nl> + * @ param { string } instanceId <nl> + * @ param { object } data <nl> + * / <nl> + function refreshInstance ( instanceId , data ) { <nl> + var instance = instanceMap [ instanceId ] ; <nl> + var result = void 0 ; <nl> + if ( instance ) { <nl> + result = instance . refreshData ( data ) ; <nl> + } else { <nl> + result = new Error ( ' invalid instance id " ' + instanceId + ' " ' ) ; <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> + / * * <nl> + * destroy a Weex instance <nl> + * @ param { string } instanceId <nl> + * / <nl> + function destroyInstance ( instanceId ) { <nl> + var instance = instanceMap [ instanceId ] ; <nl> + if ( ! instance ) { <nl> + return new Error ( ' invalid instance id " ' + instanceId + ' " ' ) ; <nl> + } <nl> + <nl> + instance . destroy ( ) ; <nl> + delete instanceMap [ instanceId ] ; <nl> + return instanceMap ; <nl> + } <nl> + <nl> + / * * <nl> + * register the name of each native component <nl> + * @ param { array } components array of name <nl> + * / <nl> + function registerComponents ( components ) { <nl> + if ( Array . isArray ( components ) ) { <nl> + components . forEach ( function register ( name ) { <nl> + / * istanbul ignore if * / <nl> + if ( ! name ) { <nl> + return ; <nl> + } <nl> + if ( typeof name = = = ' string ' ) { <nl> + nativeComponentMap [ name ] = true ; <nl> + } else if ( ( typeof name = = = ' undefined ' ? ' undefined ' : _typeof ( name ) ) = = = ' object ' & & typeof name . type = = = ' string ' ) { <nl> + nativeComponentMap [ name . type ] = name ; <nl> + } <nl> + } ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * register the name and methods of each module <nl> + * @ param { object } modules a object of modules <nl> + * / <nl> + function registerModules ( modules ) { <nl> + if ( ( typeof modules = = = ' undefined ' ? ' undefined ' : _typeof ( modules ) ) = = = ' object ' ) { <nl> + _vm2 . default . registerModules ( modules ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * register the name and methods of each api <nl> + * @ param { object } apis a object of apis <nl> + * / <nl> + function registerMethods ( apis ) { <nl> + if ( ( typeof apis = = = ' undefined ' ? ' undefined ' : _typeof ( apis ) ) = = = ' object ' ) { <nl> + _vm2 . default . registerMethods ( apis ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * get a whole element tree of an instance <nl> + * for debugging <nl> + * @ param { string } instanceId <nl> + * @ return { object } a virtual dom tree <nl> + * / <nl> + function getRoot ( instanceId ) { <nl> + var instance = instanceMap [ instanceId ] ; <nl> + var result = void 0 ; <nl> + if ( instance ) { <nl> + result = instance . getRootElement ( ) ; <nl> + } else { <nl> + result = new Error ( ' invalid instance id " ' + instanceId + ' " ' ) ; <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> + var jsHandlers = { <nl> + fireEvent : function fireEvent ( instanceId , ref , type , data , domChanges ) { <nl> + var instance = instanceMap [ instanceId ] ; <nl> + return instance . fireEvent ( ref , type , data , domChanges ) ; <nl> + } , <nl> + <nl> + callback : function callback ( instanceId , funcId , data , ifLast ) { <nl> + var instance = instanceMap [ instanceId ] ; <nl> + return instance . callback ( funcId , data , ifLast ) ; <nl> + } <nl> + } ; <nl> + <nl> + / * * <nl> + * accept calls from native ( event or callback ) <nl> + * <nl> + * @ param { string } instanceId <nl> + * @ param { array } tasks list with ` method ` and ` args ` <nl> + * / <nl> + function callJS ( instanceId , tasks ) { <nl> + var instance = instanceMap [ instanceId ] ; <nl> + if ( instance & & Array . isArray ( tasks ) ) { <nl> + var _ret = function ( ) { <nl> + var results = [ ] ; <nl> + tasks . forEach ( function ( task ) { <nl> + var handler = jsHandlers [ task . method ] ; <nl> + var args = [ ] . concat ( _toConsumableArray ( task . args ) ) ; <nl> + if ( typeof handler = = = ' function ' ) { <nl> + args . unshift ( instanceId ) ; <nl> + results . push ( handler . apply ( undefined , _toConsumableArray ( args ) ) ) ; <nl> + } <nl> + } ) ; <nl> + return { <nl> + v : results <nl> + } ; <nl> + } ( ) ; <nl> + <nl> + if ( ( typeof _ret = = = ' undefined ' ? ' undefined ' : _typeof ( _ret ) ) = = = " object " ) return _ret . v ; <nl> + } <nl> + return new Error ( ' invalid instance id " ' + instanceId + ' " or tasks ' ) ; <nl> + } <nl> + <nl> + / * * * / } , <nl> + / * 47 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . default = { <nl> + nativeComponentMap : { <nl> + text : true , <nl> + image : true , <nl> + container : true , <nl> + slider : { <nl> + type : ' slider ' , <nl> + append : ' tree ' <nl> + } , <nl> + cell : { <nl> + type : ' cell ' , <nl> + append : ' tree ' <nl> + } <nl> + } , <nl> + customComponentMap : { } , <nl> + debug : false <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 48 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . default = AppInstance ; <nl> + <nl> + var _util = __webpack_require__ ( 49 ) ; <nl> + <nl> + var _bundle = __webpack_require__ ( 50 ) ; <nl> + <nl> + var bundle = _interopRequireWildcard ( _bundle ) ; <nl> + <nl> + var _ctrl = __webpack_require__ ( 65 ) ; <nl> + <nl> + var ctrl = _interopRequireWildcard ( _ctrl ) ; <nl> + <nl> + var _differ = __webpack_require__ ( 66 ) ; <nl> + <nl> + var _differ2 = _interopRequireDefault ( _differ ) ; <nl> + <nl> + var _config = __webpack_require__ ( 47 ) ; <nl> + <nl> + var _config2 = _interopRequireDefault ( _config ) ; <nl> + <nl> + var _register = __webpack_require__ ( 63 ) ; <nl> + <nl> + function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { default : obj } ; } <nl> + <nl> + function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj ; } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] ; } } newObj . default = obj ; return newObj ; } } <nl> + <nl> + / * * <nl> + * @ fileOverview <nl> + * Weex instance constructor & definition <nl> + * / <nl> + <nl> + function AppInstance ( instanceId , options ) { <nl> + this . id = instanceId ; <nl> + this . options = options | | { } ; <nl> + this . vm = null ; <nl> + this . customComponentMap = { } ; <nl> + this . callbacks = { } ; <nl> + this . doc = new _config2 . default . Document ( instanceId , this . options . bundleUrl ) ; <nl> + this . differ = new _differ2 . default ( instanceId ) ; <nl> + this . uid = 0 ; <nl> + } <nl> + <nl> + function normalize ( app , v ) { <nl> + var type = ( 0 , _util . typof ) ( v ) ; <nl> + <nl> + switch ( type ) { <nl> + case ' undefined ' : <nl> + case ' null ' : <nl> + return ' ' ; <nl> + case ' regexp ' : <nl> + return v . toString ( ) ; <nl> + case ' date ' : <nl> + return v . toISOString ( ) ; <nl> + case ' number ' : <nl> + case ' string ' : <nl> + case ' boolean ' : <nl> + case ' array ' : <nl> + case ' object ' : <nl> + if ( v instanceof _config2 . default . Element ) { <nl> + return v . ref ; <nl> + } <nl> + return v ; <nl> + case ' function ' : <nl> + app . callbacks [ + + app . uid ] = v ; <nl> + return app . uid . toString ( ) ; <nl> + default : <nl> + return JSON . stringify ( v ) ; <nl> + } <nl> + } <nl> + <nl> + AppInstance . prototype . callTasks = function ( tasks ) { <nl> + var _this = this ; <nl> + <nl> + if ( ( 0 , _util . typof ) ( tasks ) ! = = ' array ' ) { <nl> + tasks = [ tasks ] ; <nl> + } <nl> + <nl> + tasks . forEach ( function ( task ) { <nl> + task . args = task . args . map ( function ( arg ) { <nl> + return normalize ( _this , arg ) ; <nl> + } ) ; <nl> + } ) ; <nl> + <nl> + return _config2 . default . sendTasks ( this . id , tasks , ' - 1 ' ) ; <nl> + } ; <nl> + <nl> + ( 0 , _util . extend ) ( AppInstance . prototype , bundle , ctrl , { <nl> + registerComponent : _register . registerComponent , <nl> + requireComponent : _register . requireComponent , <nl> + requireModule : _register . requireModule <nl> + } ) ; <nl> + <nl> + / * * * / } , <nl> + / * 49 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + / * WEBPACK VAR INJECTION * / ( function ( global ) { ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + <nl> + var _typeof = typeof Symbol = = = " function " & & typeof Symbol . iterator = = = " symbol " ? function ( obj ) { return typeof obj ; } : function ( obj ) { return obj & & typeof Symbol = = = " function " & & obj . constructor = = = Symbol ? " symbol " : typeof obj ; } ; <nl> + <nl> + exports . isReserved = isReserved ; <nl> + exports . def = def ; <nl> + exports . remove = remove ; <nl> + exports . hasOwn = hasOwn ; <nl> + exports . cached = cached ; <nl> + exports . bind = bind ; <nl> + exports . toArray = toArray ; <nl> + exports . extend = extend ; <nl> + exports . isObject = isObject ; <nl> + exports . isPlainObject = isPlainObject ; <nl> + exports . stringify = stringify ; <nl> + exports . typof = typof ; <nl> + exports . normalize = normalize ; <nl> + exports . error = error ; <nl> + exports . warn = warn ; <nl> + exports . info = info ; <nl> + exports . debug = debug ; <nl> + exports . log = log ; <nl> + / * global MutationObserver * / <nl> + <nl> + / / / lang . js <nl> + <nl> + / * * <nl> + * Check if a string starts with $ or _ <nl> + * <nl> + * @ param { String } str <nl> + * @ return { Boolean } <nl> + * / <nl> + <nl> + function isReserved ( str ) { <nl> + var c = ( str + ' ' ) . charCodeAt ( 0 ) ; <nl> + return c = = = 0x24 | | c = = = 0x5F ; <nl> + } <nl> + <nl> + / * * <nl> + * Define a property . <nl> + * <nl> + * @ param { Object } obj <nl> + * @ param { String } key <nl> + * @ param { * } val <nl> + * @ param { Boolean } [ enumerable ] <nl> + * / <nl> + <nl> + function def ( obj , key , val , enumerable ) { <nl> + Object . defineProperty ( obj , key , { <nl> + value : val , <nl> + enumerable : ! ! enumerable , <nl> + writable : true , <nl> + configurable : true <nl> + } ) ; <nl> + } <nl> + <nl> + / / / env . js <nl> + <nl> + / / can we use __proto__ ? <nl> + var hasProto = exports . hasProto = ' __proto__ ' in { } ; <nl> + <nl> + / / Browser environment sniffing <nl> + var inBrowser = exports . inBrowser = typeof window ! = = ' undefined ' & & Object . prototype . toString . call ( window ) ! = = ' [ object Object ] ' ; <nl> + <nl> + / / detect devtools <nl> + var devtools = exports . devtools = inBrowser & & window . __VUE_DEVTOOLS_GLOBAL_HOOK__ ; <nl> + <nl> + / / UA sniffing for working around browser - specific quirks <nl> + var UA = inBrowser & & window . navigator . userAgent . toLowerCase ( ) ; <nl> + var isIos = UA & & / ( iphone | ipad | ipod | ios ) / i . test ( UA ) ; <nl> + var isWechat = UA & & UA . indexOf ( ' micromessenger ' ) > 0 ; <nl> + <nl> + / * * <nl> + * Defer a task to execute it asynchronously . Ideally this <nl> + * should be executed as a microtask , so we leverage <nl> + * MutationObserver if it ' s available , and fallback to <nl> + * setTimeout ( 0 ) . <nl> + * <nl> + * @ param { Function } cb <nl> + * @ param { Object } ctx <nl> + * / <nl> + <nl> + var nextTick = exports . nextTick = function ( ) { <nl> + var callbacks = [ ] ; <nl> + var pending = false ; <nl> + var timerFunc = void 0 ; <nl> + function nextTickHandler ( ) { <nl> + pending = false ; <nl> + var copies = callbacks . slice ( 0 ) ; <nl> + callbacks = [ ] ; <nl> + for ( var i = 0 ; i < copies . length ; i + + ) { <nl> + copies [ i ] ( ) ; <nl> + } <nl> + } <nl> + <nl> + / * istanbul ignore if * / <nl> + if ( typeof MutationObserver ! = = ' undefined ' & & ! ( isWechat & & isIos ) ) { <nl> + ( function ( ) { <nl> + var counter = 1 ; <nl> + var observer = new MutationObserver ( nextTickHandler ) ; <nl> + var textNode = document . createTextNode ( counter ) ; <nl> + observer . observe ( textNode , { <nl> + characterData : true <nl> + } ) ; <nl> + timerFunc = function timerFunc ( ) { <nl> + counter = ( counter + 1 ) % 2 ; <nl> + textNode . data = counter ; <nl> + } ; <nl> + } ) ( ) ; <nl> + } else { <nl> + / / webpack attempts to inject a shim for setImmediate <nl> + / / if it is used as a global , so we have to work around that to <nl> + / / avoid bundling unnecessary code . <nl> + var context = inBrowser ? window : typeof global ! = = ' undefined ' ? global : { } ; <nl> + timerFunc = context . setImmediate | | setTimeout ; <nl> + } <nl> + return function ( cb , ctx ) { <nl> + var func = ctx ? function ( ) { <nl> + cb . call ( ctx ) ; <nl> + } : cb ; <nl> + callbacks . push ( func ) ; <nl> + if ( pending ) return ; <nl> + pending = true ; <nl> + timerFunc ( nextTickHandler , 0 ) ; <nl> + } ; <nl> + } ( ) ; <nl> + <nl> + var _Set = void 0 ; <nl> + / * istanbul ignore if * / <nl> + if ( typeof Set ! = = ' undefined ' & & Set . toString ( ) . match ( / native code / ) ) { <nl> + / / use native Set when available . <nl> + exports . _Set = _Set = Set ; <nl> + } else { <nl> + / / a non - standard Set polyfill that only works with primitive keys . <nl> + exports . _Set = _Set = function _Set ( ) { <nl> + this . set = Object . create ( null ) ; <nl> + } ; <nl> + _Set . prototype . has = function ( key ) { <nl> + return this . set [ key ] ! = = undefined ; <nl> + } ; <nl> + _Set . prototype . add = function ( key ) { <nl> + this . set [ key ] = 1 ; <nl> + } ; <nl> + _Set . prototype . clear = function ( ) { <nl> + this . set = Object . create ( null ) ; <nl> + } ; <nl> + } <nl> + <nl> + exports . _Set = _Set ; <nl> + <nl> + / / / shared <nl> + <nl> + / * * <nl> + * Remove an item from an array <nl> + * <nl> + * @ param { Array } arr <nl> + * @ param { * } item <nl> + * / <nl> + <nl> + function remove ( arr , item ) { <nl> + if ( arr . length ) { <nl> + var index = arr . indexOf ( item ) ; <nl> + if ( index > - 1 ) { <nl> + return arr . splice ( index , 1 ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Check whether the object has the property . <nl> + * <nl> + * @ param { Object } obj <nl> + * @ param { String } key <nl> + * @ return { Boolean } <nl> + * / <nl> + var hasOwnProperty = Object . prototype . hasOwnProperty ; <nl> + function hasOwn ( obj , key ) { <nl> + return hasOwnProperty . call ( obj , key ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Create a cached version of a pure function . <nl> + * <nl> + * @ param { Function } fn <nl> + * @ return { Function } <nl> + * / <nl> + <nl> + function cached ( fn ) { <nl> + var cache = Object . create ( null ) ; <nl> + return function cachedFn ( str ) { <nl> + var hit = cache [ str ] ; <nl> + return hit | | ( cache [ str ] = fn ( str ) ) ; <nl> + } ; <nl> + } <nl> + <nl> + / * * <nl> + * Camelize a hyphen - delmited string . <nl> + * <nl> + * @ param { String } str <nl> + * @ return { String } <nl> + * / <nl> + <nl> + var camelizeRE = / - ( \ w ) / g ; <nl> + var camelize = exports . camelize = cached ( function ( str ) { <nl> + return str . replace ( camelizeRE , toUpper ) ; <nl> + } ) ; <nl> + <nl> + function toUpper ( _ , c ) { <nl> + return c ? c . toUpperCase ( ) : ' ' ; <nl> + } <nl> + <nl> + / * * <nl> + * Hyphenate a camelCase string . <nl> + * <nl> + * @ param { String } str <nl> + * @ return { String } <nl> + * / <nl> + <nl> + var hyphenateRE = / ( [ a - z \ d ] ) ( [ A - Z ] ) / g ; <nl> + var hyphenate = exports . hyphenate = cached ( function ( str ) { <nl> + return str . replace ( hyphenateRE , ' $ 1 - $ 2 ' ) . toLowerCase ( ) ; <nl> + } ) ; <nl> + <nl> + / * * <nl> + * Simple bind , faster than native <nl> + * <nl> + * @ param { Function } fn <nl> + * @ param { Object } ctx <nl> + * @ return { Function } <nl> + * / <nl> + <nl> + function bind ( fn , ctx ) { <nl> + return function ( a ) { <nl> + var l = arguments . length ; <nl> + return l ? l > 1 ? fn . apply ( ctx , arguments ) : fn . call ( ctx , a ) : fn . call ( ctx ) ; <nl> + } ; <nl> + } <nl> + <nl> + / * * <nl> + * Convert an Array - like object to a real Array . <nl> + * <nl> + * @ param { Array - like } list <nl> + * @ param { Number } [ start ] - start index <nl> + * @ return { Array } <nl> + * / <nl> + <nl> + function toArray ( list , start ) { <nl> + start = start | | 0 ; <nl> + var i = list . length - start ; <nl> + var ret = new Array ( i ) ; <nl> + while ( i - - ) { <nl> + ret [ i ] = list [ i + start ] ; <nl> + } <nl> + return ret ; <nl> + } <nl> + <nl> + / * * <nl> + * Mix properties into target object . <nl> + * <nl> + * @ param { Object } to <nl> + * @ param { Object } from <nl> + * / <nl> + <nl> + function extend ( target ) { <nl> + for ( var _len = arguments . length , src = Array ( _len > 1 ? _len - 1 : 0 ) , _key = 1 ; _key < _len ; _key + + ) { <nl> + src [ _key - 1 ] = arguments [ _key ] ; <nl> + } <nl> + <nl> + if ( typeof Object . assign = = = ' function ' ) { <nl> + Object . assign . apply ( Object , [ target ] . concat ( src ) ) ; <nl> + } else { <nl> + var first = src . shift ( ) ; <nl> + for ( var key in first ) { <nl> + target [ key ] = first [ key ] ; <nl> + } <nl> + if ( src . length ) { <nl> + extend . apply ( undefined , [ target ] . concat ( src ) ) ; <nl> + } <nl> + } <nl> + return target ; <nl> + } <nl> + <nl> + / * * <nl> + * Quick object check - this is primarily used to tell <nl> + * Objects from primitive values when we know the value <nl> + * is a JSON - compliant type . <nl> + * <nl> + * @ param { * } obj <nl> + * @ return { Boolean } <nl> + * / <nl> + <nl> + function isObject ( obj ) { <nl> + return obj ! = = null & & ( typeof obj = = = ' undefined ' ? ' undefined ' : _typeof ( obj ) ) = = = ' object ' ; <nl> + } <nl> + <nl> + / * * <nl> + * Strict object type check . Only returns true <nl> + * for plain JavaScript objects . <nl> + * <nl> + * @ param { * } obj <nl> + * @ return { Boolean } <nl> + * / <nl> + <nl> + var toString = Object . prototype . toString ; <nl> + var OBJECT_STRING = ' [ object Object ] ' ; <nl> + function isPlainObject ( obj ) { <nl> + return toString . call ( obj ) = = = OBJECT_STRING ; <nl> + } <nl> + <nl> + / * * <nl> + * Array type check . <nl> + * <nl> + * @ param { * } obj <nl> + * @ return { Boolean } <nl> + * / <nl> + <nl> + var isArray = exports . isArray = Array . isArray ; <nl> + <nl> + / / / other <nl> + <nl> + function stringify ( x ) { <nl> + return typeof x = = = ' undefined ' | | x = = = null | | typeof x = = = ' function ' ? ' ' : ( typeof x = = = ' undefined ' ? ' undefined ' : _typeof ( x ) ) = = = ' object ' ? x instanceof RegExp ? x . toString ( ) : x instanceof Date ? JSON . parse ( JSON . stringify ( x ) ) : JSON . stringify ( x ) : x . toString ( ) ; <nl> + } <nl> + <nl> + function typof ( v ) { <nl> + var s = Object . prototype . toString . call ( v ) ; <nl> + return s . substring ( 8 , s . length - 1 ) . toLowerCase ( ) ; <nl> + } <nl> + <nl> + function normalize ( v ) { <nl> + var type = typof ( v ) ; <nl> + <nl> + switch ( type ) { <nl> + case ' undefined ' : <nl> + case ' null ' : <nl> + return ' ' ; <nl> + case ' regexp ' : <nl> + return v . toString ( ) ; <nl> + case ' date ' : <nl> + return v . toISOString ( ) ; <nl> + case ' number ' : <nl> + case ' string ' : <nl> + case ' boolean ' : <nl> + case ' array ' : <nl> + case ' object ' : <nl> + case ' function ' : <nl> + return v ; <nl> + } <nl> + } <nl> + <nl> + var enableLog = typeof console ! = = ' undefined ' & & global . IS_PRODUCT ! = = true ; <nl> + <nl> + / * * <nl> + * @ param { String } msg <nl> + * / <nl> + function error ( ) { <nl> + var _console ; <nl> + <nl> + for ( var _len2 = arguments . length , args = Array ( _len2 ) , _key2 = 0 ; _key2 < _len2 ; _key2 + + ) { <nl> + args [ _key2 ] = arguments [ _key2 ] ; <nl> + } <nl> + <nl> + enableLog & & console . error & & ( _console = console ) . error . apply ( _console , [ ' [ JS Framework ] ' ] . concat ( args ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ param { String } msg <nl> + * / <nl> + function warn ( ) { <nl> + var _console2 ; <nl> + <nl> + for ( var _len3 = arguments . length , args = Array ( _len3 ) , _key3 = 0 ; _key3 < _len3 ; _key3 + + ) { <nl> + args [ _key3 ] = arguments [ _key3 ] ; <nl> + } <nl> + <nl> + enableLog & & console . warn & & ( _console2 = console ) . warn . apply ( _console2 , [ ' [ JS Framework ] ' ] . concat ( args ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ param { String } msg <nl> + * / <nl> + function info ( ) { <nl> + var _console3 ; <nl> + <nl> + for ( var _len4 = arguments . length , args = Array ( _len4 ) , _key4 = 0 ; _key4 < _len4 ; _key4 + + ) { <nl> + args [ _key4 ] = arguments [ _key4 ] ; <nl> + } <nl> + <nl> + enableLog & & console . info & & ( _console3 = console ) . info . apply ( _console3 , [ ' [ JS Framework ] ' ] . concat ( args ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ param { String } msg <nl> + * / <nl> + function debug ( ) { <nl> + var _console4 ; <nl> + <nl> + for ( var _len5 = arguments . length , args = Array ( _len5 ) , _key5 = 0 ; _key5 < _len5 ; _key5 + + ) { <nl> + args [ _key5 ] = arguments [ _key5 ] ; <nl> + } <nl> + <nl> + enableLog & & console . debug & & ( _console4 = console ) . debug . apply ( _console4 , [ ' [ JS Framework ] ' ] . concat ( args ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ param { String } msg <nl> + * / <nl> + function log ( ) { <nl> + var _console5 ; <nl> + <nl> + for ( var _len6 = arguments . length , args = Array ( _len6 ) , _key6 = 0 ; _key6 < _len6 ; _key6 + + ) { <nl> + args [ _key6 ] = arguments [ _key6 ] ; <nl> + } <nl> + <nl> + enableLog & & console . log & & ( _console5 = console ) . log . apply ( _console5 , [ ' [ JS Framework ] ' ] . concat ( args ) ) ; <nl> + } <nl> + / * WEBPACK VAR INJECTION * / } . call ( exports , ( function ( ) { return this ; } ( ) ) ) ) <nl> + <nl> + / * * * / } , <nl> + / * 50 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + / * WEBPACK VAR INJECTION * / ( function ( global ) { ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . require = exports . define = undefined ; <nl> + exports . clearCommonModules = clearCommonModules ; <nl> + exports . bootstrap = bootstrap ; <nl> + exports . register = register ; <nl> + exports . render = render ; <nl> + <nl> + var _semver = __webpack_require__ ( 51 ) ; <nl> + <nl> + var _semver2 = _interopRequireDefault ( _semver ) ; <nl> + <nl> + var _util = __webpack_require__ ( 49 ) ; <nl> + <nl> + var _ = _interopRequireWildcard ( _util ) ; <nl> + <nl> + var _vm = __webpack_require__ ( 53 ) ; <nl> + <nl> + var _vm2 = _interopRequireDefault ( _vm ) ; <nl> + <nl> + var _downgrade = __webpack_require__ ( 64 ) ; <nl> + <nl> + var downgrade = _interopRequireWildcard ( _downgrade ) ; <nl> + <nl> + function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj ; } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] ; } } newObj . default = obj ; return newObj ; } } <nl> + <nl> + function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { default : obj } ; } <nl> + <nl> + function _defineProperty ( obj , key , value ) { if ( key in obj ) { Object . defineProperty ( obj , key , { value : value , enumerable : true , configurable : true , writable : true } ) ; } else { obj [ key ] = value ; } return obj ; } / * * <nl> + * @ fileOverview <nl> + * api that invoked by js bundle code <nl> + * <nl> + * - define ( name , factory ) : define a new composed component type <nl> + * - bootstrap ( type , config , data ) : require a certain type & <nl> + * render with ( optional ) data <nl> + * <nl> + * deprecated : <nl> + * - register ( type , options ) : register a new composed component type <nl> + * - render ( type , data ) : render by a certain type with ( optional ) data <nl> + * - require ( type ) ( data ) : require a type then render with data <nl> + * / <nl> + <nl> + var WEEX_COMPONENT_REG = / ^ @ weex - component \ / / ; <nl> + var WEEX_MODULE_REG = / ^ @ weex - module \ / / ; <nl> + var NORMAL_MODULE_REG = / ^ \ . { 1 , 2 } \ / / ; <nl> + var JS_SURFIX_REG = / \ . js $ / ; <nl> + <nl> + var isWeexComponent = function isWeexComponent ( name ) { <nl> + return ! ! name . match ( WEEX_COMPONENT_REG ) ; <nl> + } ; <nl> + var isWeexModule = function isWeexModule ( name ) { <nl> + return ! ! name . match ( WEEX_MODULE_REG ) ; <nl> + } ; <nl> + var isNormalModule = function isNormalModule ( name ) { <nl> + return ! ! name . match ( NORMAL_MODULE_REG ) ; <nl> + } ; <nl> + var isNpmModule = function isNpmModule ( name ) { <nl> + return ! isWeexComponent ( name ) & & ! isWeexModule ( name ) & & ! isNormalModule ( name ) ; <nl> + } ; <nl> + <nl> + function removeWeexPrefix ( str ) { <nl> + return str . replace ( WEEX_COMPONENT_REG , ' ' ) . replace ( WEEX_MODULE_REG , ' ' ) ; <nl> + } <nl> + <nl> + function removeJSSurfix ( str ) { <nl> + return str . replace ( JS_SURFIX_REG , ' ' ) ; <nl> + } <nl> + <nl> + var commonModules = { } ; <nl> + <nl> + function clearCommonModules ( ) { <nl> + commonModules = { } ; <nl> + } <nl> + <nl> + / / define ( name , factory ) for primary usage <nl> + / / or <nl> + / / define ( name , deps , factory ) for compatibility <nl> + / / Notice : DO NOT use function define ( ) { } , <nl> + / / it will cause error after builded by webpack <nl> + var define = exports . define = function define ( name , deps , factory ) { <nl> + var _this = this ; <nl> + <nl> + _ . debug ( ' define a component ' , name ) ; <nl> + <nl> + if ( _ . typof ( deps ) = = = ' function ' ) { <nl> + factory = deps ; <nl> + deps = [ ] ; <nl> + } <nl> + <nl> + var _require = function _require ( name ) { <nl> + var cleanName = void 0 ; <nl> + <nl> + if ( isWeexComponent ( name ) ) { <nl> + cleanName = removeWeexPrefix ( name ) ; <nl> + return _this . requireComponent ( cleanName ) ; <nl> + } <nl> + if ( isWeexModule ( name ) ) { <nl> + cleanName = removeWeexPrefix ( name ) ; <nl> + return _this . requireModule ( cleanName ) ; <nl> + } <nl> + if ( isNormalModule ( name ) ) { <nl> + cleanName = removeJSSurfix ( name ) ; <nl> + return commonModules [ name ] ; <nl> + } <nl> + if ( isNpmModule ( name ) ) { <nl> + cleanName = removeJSSurfix ( name ) ; <nl> + return commonModules [ name ] ; <nl> + } <nl> + } ; <nl> + var _module = { exports : { } } ; <nl> + <nl> + var cleanName = void 0 ; <nl> + if ( isWeexComponent ( name ) ) { <nl> + cleanName = removeWeexPrefix ( name ) ; <nl> + <nl> + factory ( _require , _module . exports , _module ) ; <nl> + <nl> + this . registerComponent ( cleanName , _module . exports ) ; <nl> + } else if ( isWeexModule ( name ) ) { <nl> + cleanName = removeWeexPrefix ( name ) ; <nl> + <nl> + factory ( _require , _module . exports , _module ) ; <nl> + <nl> + _vm2 . default . registerModules ( _defineProperty ( { } , cleanName , _module . exports ) ) ; <nl> + } else if ( isNormalModule ( name ) ) { <nl> + cleanName = removeJSSurfix ( name ) ; <nl> + <nl> + factory ( _require , _module . exports , _module ) ; <nl> + <nl> + commonModules [ cleanName ] = _module . exports ; <nl> + } else if ( isNpmModule ( name ) ) { <nl> + cleanName = removeJSSurfix ( name ) ; <nl> + <nl> + factory ( _require , _module . exports , _module ) ; <nl> + <nl> + var exports = _module . exports ; <nl> + if ( exports . template | | exports . style | | exports . methods ) { <nl> + / / downgrade to old define method ( define ( ' componentName ' , factory ) ) <nl> + / / the exports contain one key of template , style or methods <nl> + / / but it has risk ! ! ! <nl> + this . registerComponent ( cleanName , exports ) ; <nl> + } else { <nl> + commonModules [ cleanName ] = _module . exports ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + function bootstrap ( name , config , data ) { <nl> + _ . debug ( ' bootstrap for ' + name ) ; <nl> + <nl> + var cleanName = void 0 ; <nl> + <nl> + if ( isWeexComponent ( name ) ) { <nl> + cleanName = removeWeexPrefix ( name ) ; <nl> + } else if ( isNpmModule ( name ) ) { <nl> + cleanName = removeJSSurfix ( name ) ; <nl> + / / check if define by old ' define ' method <nl> + / * istanbul ignore if * / <nl> + if ( ! this . customComponentMap [ cleanName ] ) { <nl> + return new Error ( ' It \ ' s not a component : ' + name ) ; <nl> + } <nl> + } else { <nl> + return new Error ( ' Wrong component name : ' + name ) ; <nl> + } <nl> + <nl> + config = _ . isPlainObject ( config ) ? config : { } ; <nl> + <nl> + if ( typeof config . transformerVersion = = = ' string ' & & typeof global . needTransformerVersion = = = ' string ' & & ! _semver2 . default . satisfies ( config . transformerVersion , global . needTransformerVersion ) ) { <nl> + return new Error ( ' JS Bundle version : ' + config . transformerVersion + ' ' + ( ' not compatible with ' + global . needTransformerVersion ) ) ; <nl> + } <nl> + <nl> + var _checkDowngrade = downgrade . check ( config . downgrade ) ; <nl> + / * istanbul ignore if * / <nl> + if ( _checkDowngrade . isDowngrade ) { <nl> + this . callTasks ( [ { <nl> + module : ' instanceWrap ' , <nl> + method : ' error ' , <nl> + args : [ _checkDowngrade . errorType , _checkDowngrade . code , _checkDowngrade . errorMessage ] <nl> + } ] ) ; <nl> + return new Error ( ' Downgrade [ ' + _checkDowngrade . code + ' ] : ' + _checkDowngrade . errorMessage ) ; <nl> + } <nl> + <nl> + this . vm = new _vm2 . default ( cleanName , null , { _app : this } , null , data ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ deprecated <nl> + * / <nl> + function register ( type , options ) { <nl> + _ . warn ( ' Register is deprecated , please install lastest transformer . ' ) ; <nl> + this . registerComponent ( type , options ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ deprecated <nl> + * / <nl> + function render ( type , data ) { <nl> + _ . warn ( ' Render is deprecated , please install lastest transformer . ' ) ; <nl> + return this . bootstrap ( type , { } , data ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ deprecated <nl> + * / <nl> + function _require2 ( type ) { <nl> + var _this2 = this ; <nl> + <nl> + _ . warn ( ' Require is deprecated , please install lastest transformer . ' ) ; <nl> + return function ( data ) { <nl> + return _this2 . bootstrap ( type , { } , data ) ; <nl> + } ; <nl> + } <nl> + exports . require = _require2 ; <nl> + / * WEBPACK VAR INJECTION * / } . call ( exports , ( function ( ) { return this ; } ( ) ) ) ) <nl> + <nl> + / * * * / } , <nl> + / * 51 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + / * WEBPACK VAR INJECTION * / ( function ( process ) { ' use strict ' ; <nl> + <nl> + var _typeof = typeof Symbol = = = " function " & & typeof Symbol . iterator = = = " symbol " ? function ( obj ) { return typeof obj ; } : function ( obj ) { return obj & & typeof Symbol = = = " function " & & obj . constructor = = = Symbol ? " symbol " : typeof obj ; } ; <nl> + <nl> + exports = module . exports = SemVer ; <nl> + <nl> + / / The debug function is excluded entirely from the minified version . <nl> + / * nomin * / var debug ; <nl> + / * nomin * / if ( ( typeof process = = = ' undefined ' ? ' undefined ' : _typeof ( process ) ) = = = ' object ' & & <nl> + / * nomin * / process . env & & <nl> + / * nomin * / process . env . NODE_DEBUG & & <nl> + / * nomin * / / \ bsemver \ b / i . test ( process . env . NODE_DEBUG ) ) <nl> + / * nomin * / debug = function debug ( ) { <nl> + / * nomin * / var args = Array . prototype . slice . call ( arguments , 0 ) ; <nl> + / * nomin * / args . unshift ( ' SEMVER ' ) ; <nl> + / * nomin * / console . log . apply ( console , args ) ; <nl> + / * nomin * / <nl> + } ; <nl> + / * nomin * / else <nl> + / * nomin * / debug = function debug ( ) { } ; <nl> + <nl> + / / Note : this is the semver . org version of the spec that it implements <nl> + / / Not necessarily the package version of this code . <nl> + exports . SEMVER_SPEC_VERSION = ' 2 . 0 . 0 ' ; <nl> + <nl> + var MAX_LENGTH = 256 ; <nl> + var MAX_SAFE_INTEGER = Number . MAX_SAFE_INTEGER | | 9007199254740991 ; <nl> + <nl> + / / The actual regexps go on exports . re <nl> + var re = exports . re = [ ] ; <nl> + var src = exports . src = [ ] ; <nl> + var R = 0 ; <nl> + <nl> + / / The following Regular Expressions can be used for tokenizing , <nl> + / / validating , and parsing SemVer version strings . <nl> + <nl> + / / # # Numeric Identifier <nl> + / / A single ` 0 ` , or a non - zero digit followed by zero or more digits . <nl> + <nl> + var NUMERICIDENTIFIER = R + + ; <nl> + src [ NUMERICIDENTIFIER ] = ' 0 | [ 1 - 9 ] \ \ d * ' ; <nl> + var NUMERICIDENTIFIERLOOSE = R + + ; <nl> + src [ NUMERICIDENTIFIERLOOSE ] = ' [ 0 - 9 ] + ' ; <nl> + <nl> + / / # # Non - numeric Identifier <nl> + / / Zero or more digits , followed by a letter or hyphen , and then zero or <nl> + / / more letters , digits , or hyphens . <nl> + <nl> + var NONNUMERICIDENTIFIER = R + + ; <nl> + src [ NONNUMERICIDENTIFIER ] = ' \ \ d * [ a - zA - Z - ] [ a - zA - Z0 - 9 - ] * ' ; <nl> + <nl> + / / # # Main Version <nl> + / / Three dot - separated numeric identifiers . <nl> + <nl> + var MAINVERSION = R + + ; <nl> + src [ MAINVERSION ] = ' ( ' + src [ NUMERICIDENTIFIER ] + ' ) \ \ . ' + ' ( ' + src [ NUMERICIDENTIFIER ] + ' ) \ \ . ' + ' ( ' + src [ NUMERICIDENTIFIER ] + ' ) ' ; <nl> + <nl> + var MAINVERSIONLOOSE = R + + ; <nl> + src [ MAINVERSIONLOOSE ] = ' ( ' + src [ NUMERICIDENTIFIERLOOSE ] + ' ) \ \ . ' + ' ( ' + src [ NUMERICIDENTIFIERLOOSE ] + ' ) \ \ . ' + ' ( ' + src [ NUMERICIDENTIFIERLOOSE ] + ' ) ' ; <nl> + <nl> + / / # # Pre - release Version Identifier <nl> + / / A numeric identifier , or a non - numeric identifier . <nl> + <nl> + var PRERELEASEIDENTIFIER = R + + ; <nl> + src [ PRERELEASEIDENTIFIER ] = ' ( ? : ' + src [ NUMERICIDENTIFIER ] + ' | ' + src [ NONNUMERICIDENTIFIER ] + ' ) ' ; <nl> + <nl> + var PRERELEASEIDENTIFIERLOOSE = R + + ; <nl> + src [ PRERELEASEIDENTIFIERLOOSE ] = ' ( ? : ' + src [ NUMERICIDENTIFIERLOOSE ] + ' | ' + src [ NONNUMERICIDENTIFIER ] + ' ) ' ; <nl> + <nl> + / / # # Pre - release Version <nl> + / / Hyphen , followed by one or more dot - separated pre - release version <nl> + / / identifiers . <nl> + <nl> + var PRERELEASE = R + + ; <nl> + src [ PRERELEASE ] = ' ( ? : - ( ' + src [ PRERELEASEIDENTIFIER ] + ' ( ? : \ \ . ' + src [ PRERELEASEIDENTIFIER ] + ' ) * ) ) ' ; <nl> + <nl> + var PRERELEASELOOSE = R + + ; <nl> + src [ PRERELEASELOOSE ] = ' ( ? : - ? ( ' + src [ PRERELEASEIDENTIFIERLOOSE ] + ' ( ? : \ \ . ' + src [ PRERELEASEIDENTIFIERLOOSE ] + ' ) * ) ) ' ; <nl> + <nl> + / / # # Build Metadata Identifier <nl> + / / Any combination of digits , letters , or hyphens . <nl> + <nl> + var BUILDIDENTIFIER = R + + ; <nl> + src [ BUILDIDENTIFIER ] = ' [ 0 - 9A - Za - z - ] + ' ; <nl> + <nl> + / / # # Build Metadata <nl> + / / Plus sign , followed by one or more period - separated build metadata <nl> + / / identifiers . <nl> + <nl> + var BUILD = R + + ; <nl> + src [ BUILD ] = ' ( ? : \ \ + ( ' + src [ BUILDIDENTIFIER ] + ' ( ? : \ \ . ' + src [ BUILDIDENTIFIER ] + ' ) * ) ) ' ; <nl> + <nl> + / / # # Full Version String <nl> + / / A main version , followed optionally by a pre - release version and <nl> + / / build metadata . <nl> + <nl> + / / Note that the only major , minor , patch , and pre - release sections of <nl> + / / the version string are capturing groups . The build metadata is not a <nl> + / / capturing group , because it should not ever be used in version <nl> + / / comparison . <nl> + <nl> + var FULL = R + + ; <nl> + var FULLPLAIN = ' v ? ' + src [ MAINVERSION ] + src [ PRERELEASE ] + ' ? ' + src [ BUILD ] + ' ? ' ; <nl> + <nl> + src [ FULL ] = ' ^ ' + FULLPLAIN + ' $ ' ; <nl> + <nl> + / / like full , but allows v1 . 2 . 3 and = 1 . 2 . 3 , which people do sometimes . <nl> + / / also , 1 . 0 . 0alpha1 ( prerelease without the hyphen ) which is pretty <nl> + / / common in the npm registry . <nl> + var LOOSEPLAIN = ' [ v = \ \ s ] * ' + src [ MAINVERSIONLOOSE ] + src [ PRERELEASELOOSE ] + ' ? ' + src [ BUILD ] + ' ? ' ; <nl> + <nl> + var LOOSE = R + + ; <nl> + src [ LOOSE ] = ' ^ ' + LOOSEPLAIN + ' $ ' ; <nl> + <nl> + var GTLT = R + + ; <nl> + src [ GTLT ] = ' ( ( ? : < | > ) ? = ? ) ' ; <nl> + <nl> + / / Something like " 2 . * " or " 1 . 2 . x " . <nl> + / / Note that " x . x " is a valid xRange identifer , meaning " any version " <nl> + / / Only the first item is strictly required . <nl> + var XRANGEIDENTIFIERLOOSE = R + + ; <nl> + src [ XRANGEIDENTIFIERLOOSE ] = src [ NUMERICIDENTIFIERLOOSE ] + ' | x | X | \ \ * ' ; <nl> + var XRANGEIDENTIFIER = R + + ; <nl> + src [ XRANGEIDENTIFIER ] = src [ NUMERICIDENTIFIER ] + ' | x | X | \ \ * ' ; <nl> + <nl> + var XRANGEPLAIN = R + + ; <nl> + src [ XRANGEPLAIN ] = ' [ v = \ \ s ] * ( ' + src [ XRANGEIDENTIFIER ] + ' ) ' + ' ( ? : \ \ . ( ' + src [ XRANGEIDENTIFIER ] + ' ) ' + ' ( ? : \ \ . ( ' + src [ XRANGEIDENTIFIER ] + ' ) ' + ' ( ? : ' + src [ PRERELEASE ] + ' ) ? ' + src [ BUILD ] + ' ? ' + ' ) ? ) ? ' ; <nl> + <nl> + var XRANGEPLAINLOOSE = R + + ; <nl> + src [ XRANGEPLAINLOOSE ] = ' [ v = \ \ s ] * ( ' + src [ XRANGEIDENTIFIERLOOSE ] + ' ) ' + ' ( ? : \ \ . ( ' + src [ XRANGEIDENTIFIERLOOSE ] + ' ) ' + ' ( ? : \ \ . ( ' + src [ XRANGEIDENTIFIERLOOSE ] + ' ) ' + ' ( ? : ' + src [ PRERELEASELOOSE ] + ' ) ? ' + src [ BUILD ] + ' ? ' + ' ) ? ) ? ' ; <nl> + <nl> + var XRANGE = R + + ; <nl> + src [ XRANGE ] = ' ^ ' + src [ GTLT ] + ' \ \ s * ' + src [ XRANGEPLAIN ] + ' $ ' ; <nl> + var XRANGELOOSE = R + + ; <nl> + src [ XRANGELOOSE ] = ' ^ ' + src [ GTLT ] + ' \ \ s * ' + src [ XRANGEPLAINLOOSE ] + ' $ ' ; <nl> + <nl> + / / Tilde ranges . <nl> + / / Meaning is " reasonably at or greater than " <nl> + var LONETILDE = R + + ; <nl> + src [ LONETILDE ] = ' ( ? : ~ > ? ) ' ; <nl> + <nl> + var TILDETRIM = R + + ; <nl> + src [ TILDETRIM ] = ' ( \ \ s * ) ' + src [ LONETILDE ] + ' \ \ s + ' ; <nl> + re [ TILDETRIM ] = new RegExp ( src [ TILDETRIM ] , ' g ' ) ; <nl> + var tildeTrimReplace = ' $ 1 ~ ' ; <nl> + <nl> + var TILDE = R + + ; <nl> + src [ TILDE ] = ' ^ ' + src [ LONETILDE ] + src [ XRANGEPLAIN ] + ' $ ' ; <nl> + var TILDELOOSE = R + + ; <nl> + src [ TILDELOOSE ] = ' ^ ' + src [ LONETILDE ] + src [ XRANGEPLAINLOOSE ] + ' $ ' ; <nl> + <nl> + / / Caret ranges . <nl> + / / Meaning is " at least and backwards compatible with " <nl> + var LONECARET = R + + ; <nl> + src [ LONECARET ] = ' ( ? : \ \ ^ ) ' ; <nl> + <nl> + var CARETTRIM = R + + ; <nl> + src [ CARETTRIM ] = ' ( \ \ s * ) ' + src [ LONECARET ] + ' \ \ s + ' ; <nl> + re [ CARETTRIM ] = new RegExp ( src [ CARETTRIM ] , ' g ' ) ; <nl> + var caretTrimReplace = ' $ 1 ^ ' ; <nl> + <nl> + var CARET = R + + ; <nl> + src [ CARET ] = ' ^ ' + src [ LONECARET ] + src [ XRANGEPLAIN ] + ' $ ' ; <nl> + var CARETLOOSE = R + + ; <nl> + src [ CARETLOOSE ] = ' ^ ' + src [ LONECARET ] + src [ XRANGEPLAINLOOSE ] + ' $ ' ; <nl> + <nl> + / / A simple gt / lt / eq thing , or just " " to indicate " any version " <nl> + var COMPARATORLOOSE = R + + ; <nl> + src [ COMPARATORLOOSE ] = ' ^ ' + src [ GTLT ] + ' \ \ s * ( ' + LOOSEPLAIN + ' ) $ | ^ $ ' ; <nl> + var COMPARATOR = R + + ; <nl> + src [ COMPARATOR ] = ' ^ ' + src [ GTLT ] + ' \ \ s * ( ' + FULLPLAIN + ' ) $ | ^ $ ' ; <nl> + <nl> + / / An expression to strip any whitespace between the gtlt and the thing <nl> + / / it modifies , so that ` > 1 . 2 . 3 ` = = > ` > 1 . 2 . 3 ` <nl> + var COMPARATORTRIM = R + + ; <nl> + src [ COMPARATORTRIM ] = ' ( \ \ s * ) ' + src [ GTLT ] + ' \ \ s * ( ' + LOOSEPLAIN + ' | ' + src [ XRANGEPLAIN ] + ' ) ' ; <nl> + <nl> + / / this one has to use the / g flag <nl> + re [ COMPARATORTRIM ] = new RegExp ( src [ COMPARATORTRIM ] , ' g ' ) ; <nl> + var comparatorTrimReplace = ' $ 1 $ 2 $ 3 ' ; <nl> + <nl> + / / Something like ` 1 . 2 . 3 - 1 . 2 . 4 ` <nl> + / / Note that these all use the loose form , because they ' ll be <nl> + / / checked against either the strict or loose comparator form <nl> + / / later . <nl> + var HYPHENRANGE = R + + ; <nl> + src [ HYPHENRANGE ] = ' ^ \ \ s * ( ' + src [ XRANGEPLAIN ] + ' ) ' + ' \ \ s + - \ \ s + ' + ' ( ' + src [ XRANGEPLAIN ] + ' ) ' + ' \ \ s * $ ' ; <nl> + <nl> + var HYPHENRANGELOOSE = R + + ; <nl> + src [ HYPHENRANGELOOSE ] = ' ^ \ \ s * ( ' + src [ XRANGEPLAINLOOSE ] + ' ) ' + ' \ \ s + - \ \ s + ' + ' ( ' + src [ XRANGEPLAINLOOSE ] + ' ) ' + ' \ \ s * $ ' ; <nl> + <nl> + / / Star ranges basically just allow anything at all . <nl> + var STAR = R + + ; <nl> + src [ STAR ] = ' ( < | > ) ? = ? \ \ s * \ \ * ' ; <nl> + <nl> + / / Compile to actual regexp objects . <nl> + / / All are flag - free , unless they were created above with a flag . <nl> + for ( var i = 0 ; i < R ; i + + ) { <nl> + debug ( i , src [ i ] ) ; <nl> + if ( ! re [ i ] ) re [ i ] = new RegExp ( src [ i ] ) ; <nl> + } <nl> + <nl> + exports . parse = parse ; <nl> + function parse ( version , loose ) { <nl> + if ( version instanceof SemVer ) return version ; <nl> + <nl> + if ( typeof version ! = = ' string ' ) return null ; <nl> + <nl> + if ( version . length > MAX_LENGTH ) return null ; <nl> + <nl> + var r = loose ? re [ LOOSE ] : re [ FULL ] ; <nl> + if ( ! r . test ( version ) ) return null ; <nl> + <nl> + try { <nl> + return new SemVer ( version , loose ) ; <nl> + } catch ( er ) { <nl> + return null ; <nl> + } <nl> + } <nl> + <nl> + exports . valid = valid ; <nl> + function valid ( version , loose ) { <nl> + var v = parse ( version , loose ) ; <nl> + return v ? v . version : null ; <nl> + } <nl> + <nl> + exports . clean = clean ; <nl> + function clean ( version , loose ) { <nl> + var s = parse ( version . trim ( ) . replace ( / ^ [ = v ] + / , ' ' ) , loose ) ; <nl> + return s ? s . version : null ; <nl> + } <nl> + <nl> + exports . SemVer = SemVer ; <nl> + <nl> + function SemVer ( version , loose ) { <nl> + if ( version instanceof SemVer ) { <nl> + if ( version . loose = = = loose ) return version ; else version = version . version ; <nl> + } else if ( typeof version ! = = ' string ' ) { <nl> + throw new TypeError ( ' Invalid Version : ' + version ) ; <nl> + } <nl> + <nl> + if ( version . length > MAX_LENGTH ) throw new TypeError ( ' version is longer than ' + MAX_LENGTH + ' characters ' ) ; <nl> + <nl> + if ( ! ( this instanceof SemVer ) ) return new SemVer ( version , loose ) ; <nl> + <nl> + debug ( ' SemVer ' , version , loose ) ; <nl> + this . loose = loose ; <nl> + var m = version . trim ( ) . match ( loose ? re [ LOOSE ] : re [ FULL ] ) ; <nl> + <nl> + if ( ! m ) throw new TypeError ( ' Invalid Version : ' + version ) ; <nl> + <nl> + this . raw = version ; <nl> + <nl> + / / these are actually numbers <nl> + this . major = + m [ 1 ] ; <nl> + this . minor = + m [ 2 ] ; <nl> + this . patch = + m [ 3 ] ; <nl> + <nl> + if ( this . major > MAX_SAFE_INTEGER | | this . major < 0 ) throw new TypeError ( ' Invalid major version ' ) ; <nl> + <nl> + if ( this . minor > MAX_SAFE_INTEGER | | this . minor < 0 ) throw new TypeError ( ' Invalid minor version ' ) ; <nl> + <nl> + if ( this . patch > MAX_SAFE_INTEGER | | this . patch < 0 ) throw new TypeError ( ' Invalid patch version ' ) ; <nl> + <nl> + / / numberify any prerelease numeric ids <nl> + if ( ! m [ 4 ] ) this . prerelease = [ ] ; else this . prerelease = m [ 4 ] . split ( ' . ' ) . map ( function ( id ) { <nl> + if ( / ^ [ 0 - 9 ] + $ / . test ( id ) ) { <nl> + var num = + id ; <nl> + if ( num > = 0 & & num < MAX_SAFE_INTEGER ) return num ; <nl> + } <nl> + return id ; <nl> + } ) ; <nl> + <nl> + this . build = m [ 5 ] ? m [ 5 ] . split ( ' . ' ) : [ ] ; <nl> + this . format ( ) ; <nl> + } <nl> + <nl> + SemVer . prototype . format = function ( ) { <nl> + this . version = this . major + ' . ' + this . minor + ' . ' + this . patch ; <nl> + if ( this . prerelease . length ) this . version + = ' - ' + this . prerelease . join ( ' . ' ) ; <nl> + return this . version ; <nl> + } ; <nl> + <nl> + SemVer . prototype . toString = function ( ) { <nl> + return this . version ; <nl> + } ; <nl> + <nl> + SemVer . prototype . compare = function ( other ) { <nl> + debug ( ' SemVer . compare ' , this . version , this . loose , other ) ; <nl> + if ( ! ( other instanceof SemVer ) ) other = new SemVer ( other , this . loose ) ; <nl> + <nl> + return this . compareMain ( other ) | | this . comparePre ( other ) ; <nl> + } ; <nl> + <nl> + SemVer . prototype . compareMain = function ( other ) { <nl> + if ( ! ( other instanceof SemVer ) ) other = new SemVer ( other , this . loose ) ; <nl> + <nl> + return compareIdentifiers ( this . major , other . major ) | | compareIdentifiers ( this . minor , other . minor ) | | compareIdentifiers ( this . patch , other . patch ) ; <nl> + } ; <nl> + <nl> + SemVer . prototype . comparePre = function ( other ) { <nl> + if ( ! ( other instanceof SemVer ) ) other = new SemVer ( other , this . loose ) ; <nl> + <nl> + / / NOT having a prerelease is > having one <nl> + if ( this . prerelease . length & & ! other . prerelease . length ) return - 1 ; else if ( ! this . prerelease . length & & other . prerelease . length ) return 1 ; else if ( ! this . prerelease . length & & ! other . prerelease . length ) return 0 ; <nl> + <nl> + var i = 0 ; <nl> + do { <nl> + var a = this . prerelease [ i ] ; <nl> + var b = other . prerelease [ i ] ; <nl> + debug ( ' prerelease compare ' , i , a , b ) ; <nl> + if ( a = = = undefined & & b = = = undefined ) return 0 ; else if ( b = = = undefined ) return 1 ; else if ( a = = = undefined ) return - 1 ; else if ( a = = = b ) continue ; else return compareIdentifiers ( a , b ) ; <nl> + } while ( + + i ) ; <nl> + } ; <nl> + <nl> + / / preminor will bump the version up to the next minor release , and immediately <nl> + / / down to pre - release . premajor and prepatch work the same way . <nl> + SemVer . prototype . inc = function ( release , identifier ) { <nl> + switch ( release ) { <nl> + case ' premajor ' : <nl> + this . prerelease . length = 0 ; <nl> + this . patch = 0 ; <nl> + this . minor = 0 ; <nl> + this . major + + ; <nl> + this . inc ( ' pre ' , identifier ) ; <nl> + break ; <nl> + case ' preminor ' : <nl> + this . prerelease . length = 0 ; <nl> + this . patch = 0 ; <nl> + this . minor + + ; <nl> + this . inc ( ' pre ' , identifier ) ; <nl> + break ; <nl> + case ' prepatch ' : <nl> + / / If this is already a prerelease , it will bump to the next version <nl> + / / drop any prereleases that might already exist , since they are not <nl> + / / relevant at this point . <nl> + this . prerelease . length = 0 ; <nl> + this . inc ( ' patch ' , identifier ) ; <nl> + this . inc ( ' pre ' , identifier ) ; <nl> + break ; <nl> + / / If the input is a non - prerelease version , this acts the same as <nl> + / / prepatch . <nl> + case ' prerelease ' : <nl> + if ( this . prerelease . length = = = 0 ) this . inc ( ' patch ' , identifier ) ; <nl> + this . inc ( ' pre ' , identifier ) ; <nl> + break ; <nl> + <nl> + case ' major ' : <nl> + / / If this is a pre - major version , bump up to the same major version . <nl> + / / Otherwise increment major . <nl> + / / 1 . 0 . 0 - 5 bumps to 1 . 0 . 0 <nl> + / / 1 . 1 . 0 bumps to 2 . 0 . 0 <nl> + if ( this . minor ! = = 0 | | this . patch ! = = 0 | | this . prerelease . length = = = 0 ) this . major + + ; <nl> + this . minor = 0 ; <nl> + this . patch = 0 ; <nl> + this . prerelease = [ ] ; <nl> + break ; <nl> + case ' minor ' : <nl> + / / If this is a pre - minor version , bump up to the same minor version . <nl> + / / Otherwise increment minor . <nl> + / / 1 . 2 . 0 - 5 bumps to 1 . 2 . 0 <nl> + / / 1 . 2 . 1 bumps to 1 . 3 . 0 <nl> + if ( this . patch ! = = 0 | | this . prerelease . length = = = 0 ) this . minor + + ; <nl> + this . patch = 0 ; <nl> + this . prerelease = [ ] ; <nl> + break ; <nl> + case ' patch ' : <nl> + / / If this is not a pre - release version , it will increment the patch . <nl> + / / If it is a pre - release it will bump up to the same patch version . <nl> + / / 1 . 2 . 0 - 5 patches to 1 . 2 . 0 <nl> + / / 1 . 2 . 0 patches to 1 . 2 . 1 <nl> + if ( this . prerelease . length = = = 0 ) this . patch + + ; <nl> + this . prerelease = [ ] ; <nl> + break ; <nl> + / / This probably shouldn ' t be used publicly . <nl> + / / 1 . 0 . 0 " pre " would become 1 . 0 . 0 - 0 which is the wrong direction . <nl> + case ' pre ' : <nl> + if ( this . prerelease . length = = = 0 ) this . prerelease = [ 0 ] ; else { <nl> + var i = this . prerelease . length ; <nl> + while ( - - i > = 0 ) { <nl> + if ( typeof this . prerelease [ i ] = = = ' number ' ) { <nl> + this . prerelease [ i ] + + ; <nl> + i = - 2 ; <nl> + } <nl> + } <nl> + if ( i = = = - 1 ) / / didn ' t increment anything <nl> + this . prerelease . push ( 0 ) ; <nl> + } <nl> + if ( identifier ) { <nl> + / / 1 . 2 . 0 - beta . 1 bumps to 1 . 2 . 0 - beta . 2 , <nl> + / / 1 . 2 . 0 - beta . fooblz or 1 . 2 . 0 - beta bumps to 1 . 2 . 0 - beta . 0 <nl> + if ( this . prerelease [ 0 ] = = = identifier ) { <nl> + if ( isNaN ( this . prerelease [ 1 ] ) ) this . prerelease = [ identifier , 0 ] ; <nl> + } else this . prerelease = [ identifier , 0 ] ; <nl> + } <nl> + break ; <nl> + <nl> + default : <nl> + throw new Error ( ' invalid increment argument : ' + release ) ; <nl> + } <nl> + this . format ( ) ; <nl> + this . raw = this . version ; <nl> + return this ; <nl> + } ; <nl> + <nl> + exports . inc = inc ; <nl> + function inc ( version , release , loose , identifier ) { <nl> + if ( typeof loose = = = ' string ' ) { <nl> + identifier = loose ; <nl> + loose = undefined ; <nl> + } <nl> + <nl> + try { <nl> + return new SemVer ( version , loose ) . inc ( release , identifier ) . version ; <nl> + } catch ( er ) { <nl> + return null ; <nl> + } <nl> + } <nl> + <nl> + exports . diff = diff ; <nl> + function diff ( version1 , version2 ) { <nl> + if ( eq ( version1 , version2 ) ) { <nl> + return null ; <nl> + } else { <nl> + var v1 = parse ( version1 ) ; <nl> + var v2 = parse ( version2 ) ; <nl> + if ( v1 . prerelease . length | | v2 . prerelease . length ) { <nl> + for ( var key in v1 ) { <nl> + if ( key = = = ' major ' | | key = = = ' minor ' | | key = = = ' patch ' ) { <nl> + if ( v1 [ key ] ! = = v2 [ key ] ) { <nl> + return ' pre ' + key ; <nl> + } <nl> + } <nl> + } <nl> + return ' prerelease ' ; <nl> + } <nl> + for ( var key in v1 ) { <nl> + if ( key = = = ' major ' | | key = = = ' minor ' | | key = = = ' patch ' ) { <nl> + if ( v1 [ key ] ! = = v2 [ key ] ) { <nl> + return key ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + exports . compareIdentifiers = compareIdentifiers ; <nl> + <nl> + var numeric = / ^ [ 0 - 9 ] + $ / ; <nl> + function compareIdentifiers ( a , b ) { <nl> + var anum = numeric . test ( a ) ; <nl> + var bnum = numeric . test ( b ) ; <nl> + <nl> + if ( anum & & bnum ) { <nl> + a = + a ; <nl> + b = + b ; <nl> + } <nl> + <nl> + return anum & & ! bnum ? - 1 : bnum & & ! anum ? 1 : a < b ? - 1 : a > b ? 1 : 0 ; <nl> + } <nl> + <nl> + exports . rcompareIdentifiers = rcompareIdentifiers ; <nl> + function rcompareIdentifiers ( a , b ) { <nl> + return compareIdentifiers ( b , a ) ; <nl> + } <nl> + <nl> + exports . major = major ; <nl> + function major ( a , loose ) { <nl> + return new SemVer ( a , loose ) . major ; <nl> + } <nl> + <nl> + exports . minor = minor ; <nl> + function minor ( a , loose ) { <nl> + return new SemVer ( a , loose ) . minor ; <nl> + } <nl> + <nl> + exports . patch = patch ; <nl> + function patch ( a , loose ) { <nl> + return new SemVer ( a , loose ) . patch ; <nl> + } <nl> + <nl> + exports . compare = compare ; <nl> + function compare ( a , b , loose ) { <nl> + return new SemVer ( a , loose ) . compare ( b ) ; <nl> + } <nl> + <nl> + exports . compareLoose = compareLoose ; <nl> + function compareLoose ( a , b ) { <nl> + return compare ( a , b , true ) ; <nl> + } <nl> + <nl> + exports . rcompare = rcompare ; <nl> + function rcompare ( a , b , loose ) { <nl> + return compare ( b , a , loose ) ; <nl> + } <nl> + <nl> + exports . sort = sort ; <nl> + function sort ( list , loose ) { <nl> + return list . sort ( function ( a , b ) { <nl> + return exports . compare ( a , b , loose ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + exports . rsort = rsort ; <nl> + function rsort ( list , loose ) { <nl> + return list . sort ( function ( a , b ) { <nl> + return exports . rcompare ( a , b , loose ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + exports . gt = gt ; <nl> + function gt ( a , b , loose ) { <nl> + return compare ( a , b , loose ) > 0 ; <nl> + } <nl> + <nl> + exports . lt = lt ; <nl> + function lt ( a , b , loose ) { <nl> + return compare ( a , b , loose ) < 0 ; <nl> + } <nl> + <nl> + exports . eq = eq ; <nl> + function eq ( a , b , loose ) { <nl> + return compare ( a , b , loose ) = = = 0 ; <nl> + } <nl> + <nl> + exports . neq = neq ; <nl> + function neq ( a , b , loose ) { <nl> + return compare ( a , b , loose ) ! = = 0 ; <nl> + } <nl> + <nl> + exports . gte = gte ; <nl> + function gte ( a , b , loose ) { <nl> + return compare ( a , b , loose ) > = 0 ; <nl> + } <nl> + <nl> + exports . lte = lte ; <nl> + function lte ( a , b , loose ) { <nl> + return compare ( a , b , loose ) < = 0 ; <nl> + } <nl> + <nl> + exports . cmp = cmp ; <nl> + function cmp ( a , op , b , loose ) { <nl> + var ret ; <nl> + switch ( op ) { <nl> + case ' = = = ' : <nl> + if ( ( typeof a = = = ' undefined ' ? ' undefined ' : _typeof ( a ) ) = = = ' object ' ) a = a . version ; <nl> + if ( ( typeof b = = = ' undefined ' ? ' undefined ' : _typeof ( b ) ) = = = ' object ' ) b = b . version ; <nl> + ret = a = = = b ; <nl> + break ; <nl> + case ' ! = = ' : <nl> + if ( ( typeof a = = = ' undefined ' ? ' undefined ' : _typeof ( a ) ) = = = ' object ' ) a = a . version ; <nl> + if ( ( typeof b = = = ' undefined ' ? ' undefined ' : _typeof ( b ) ) = = = ' object ' ) b = b . version ; <nl> + ret = a ! = = b ; <nl> + break ; <nl> + case ' ' : case ' = ' : case ' = = ' : <nl> + ret = eq ( a , b , loose ) ; break ; <nl> + case ' ! = ' : <nl> + ret = neq ( a , b , loose ) ; break ; <nl> + case ' > ' : <nl> + ret = gt ( a , b , loose ) ; break ; <nl> + case ' > = ' : <nl> + ret = gte ( a , b , loose ) ; break ; <nl> + case ' < ' : <nl> + ret = lt ( a , b , loose ) ; break ; <nl> + case ' < = ' : <nl> + ret = lte ( a , b , loose ) ; break ; <nl> + default : <nl> + throw new TypeError ( ' Invalid operator : ' + op ) ; <nl> + } <nl> + return ret ; <nl> + } <nl> + <nl> + exports . Comparator = Comparator ; <nl> + function Comparator ( comp , loose ) { <nl> + if ( comp instanceof Comparator ) { <nl> + if ( comp . loose = = = loose ) return comp ; else comp = comp . value ; <nl> + } <nl> + <nl> + if ( ! ( this instanceof Comparator ) ) return new Comparator ( comp , loose ) ; <nl> + <nl> + debug ( ' comparator ' , comp , loose ) ; <nl> + this . loose = loose ; <nl> + this . parse ( comp ) ; <nl> + <nl> + if ( this . semver = = = ANY ) this . value = ' ' ; else this . value = this . operator + this . semver . version ; <nl> + <nl> + debug ( ' comp ' , this ) ; <nl> + } <nl> + <nl> + var ANY = { } ; <nl> + Comparator . prototype . parse = function ( comp ) { <nl> + var r = this . loose ? re [ COMPARATORLOOSE ] : re [ COMPARATOR ] ; <nl> + var m = comp . match ( r ) ; <nl> + <nl> + if ( ! m ) throw new TypeError ( ' Invalid comparator : ' + comp ) ; <nl> + <nl> + this . operator = m [ 1 ] ; <nl> + if ( this . operator = = = ' = ' ) this . operator = ' ' ; <nl> + <nl> + / / if it literally is just ' > ' or ' ' then allow anything . <nl> + if ( ! m [ 2 ] ) this . semver = ANY ; else this . semver = new SemVer ( m [ 2 ] , this . loose ) ; <nl> + } ; <nl> + <nl> + Comparator . prototype . toString = function ( ) { <nl> + return this . value ; <nl> + } ; <nl> + <nl> + Comparator . prototype . test = function ( version ) { <nl> + debug ( ' Comparator . test ' , version , this . loose ) ; <nl> + <nl> + if ( this . semver = = = ANY ) return true ; <nl> + <nl> + if ( typeof version = = = ' string ' ) version = new SemVer ( version , this . loose ) ; <nl> + <nl> + return cmp ( version , this . operator , this . semver , this . loose ) ; <nl> + } ; <nl> + <nl> + exports . Range = Range ; <nl> + function Range ( range , loose ) { <nl> + if ( range instanceof Range & & range . loose = = = loose ) return range ; <nl> + <nl> + if ( ! ( this instanceof Range ) ) return new Range ( range , loose ) ; <nl> + <nl> + this . loose = loose ; <nl> + <nl> + / / First , split based on boolean or | | <nl> + this . raw = range ; <nl> + this . set = range . split ( / \ s * \ | \ | \ s * / ) . map ( function ( range ) { <nl> + return this . parseRange ( range . trim ( ) ) ; <nl> + } , this ) . filter ( function ( c ) { <nl> + / / throw out any that are not relevant for whatever reason <nl> + return c . length ; <nl> + } ) ; <nl> + <nl> + if ( ! this . set . length ) { <nl> + throw new TypeError ( ' Invalid SemVer Range : ' + range ) ; <nl> + } <nl> + <nl> + this . format ( ) ; <nl> + } <nl> + <nl> + Range . prototype . format = function ( ) { <nl> + this . range = this . set . map ( function ( comps ) { <nl> + return comps . join ( ' ' ) . trim ( ) ; <nl> + } ) . join ( ' | | ' ) . trim ( ) ; <nl> + return this . range ; <nl> + } ; <nl> + <nl> + Range . prototype . toString = function ( ) { <nl> + return this . range ; <nl> + } ; <nl> + <nl> + Range . prototype . parseRange = function ( range ) { <nl> + var loose = this . loose ; <nl> + range = range . trim ( ) ; <nl> + debug ( ' range ' , range , loose ) ; <nl> + / / ` 1 . 2 . 3 - 1 . 2 . 4 ` = > ` > = 1 . 2 . 3 < = 1 . 2 . 4 ` <nl> + var hr = loose ? re [ HYPHENRANGELOOSE ] : re [ HYPHENRANGE ] ; <nl> + range = range . replace ( hr , hyphenReplace ) ; <nl> + debug ( ' hyphen replace ' , range ) ; <nl> + / / ` > 1 . 2 . 3 < 1 . 2 . 5 ` = > ` > 1 . 2 . 3 < 1 . 2 . 5 ` <nl> + range = range . replace ( re [ COMPARATORTRIM ] , comparatorTrimReplace ) ; <nl> + debug ( ' comparator trim ' , range , re [ COMPARATORTRIM ] ) ; <nl> + <nl> + / / ` ~ 1 . 2 . 3 ` = > ` ~ 1 . 2 . 3 ` <nl> + range = range . replace ( re [ TILDETRIM ] , tildeTrimReplace ) ; <nl> + <nl> + / / ` ^ 1 . 2 . 3 ` = > ` ^ 1 . 2 . 3 ` <nl> + range = range . replace ( re [ CARETTRIM ] , caretTrimReplace ) ; <nl> + <nl> + / / normalize spaces <nl> + range = range . split ( / \ s + / ) . join ( ' ' ) ; <nl> + <nl> + / / At this point , the range is completely trimmed and <nl> + / / ready to be split into comparators . <nl> + <nl> + var compRe = loose ? re [ COMPARATORLOOSE ] : re [ COMPARATOR ] ; <nl> + var set = range . split ( ' ' ) . map ( function ( comp ) { <nl> + return parseComparator ( comp , loose ) ; <nl> + } ) . join ( ' ' ) . split ( / \ s + / ) ; <nl> + if ( this . loose ) { <nl> + / / in loose mode , throw out any that are not valid comparators <nl> + set = set . filter ( function ( comp ) { <nl> + return ! ! comp . match ( compRe ) ; <nl> + } ) ; <nl> + } <nl> + set = set . map ( function ( comp ) { <nl> + return new Comparator ( comp , loose ) ; <nl> + } ) ; <nl> + <nl> + return set ; <nl> + } ; <nl> + <nl> + / / Mostly just for testing and legacy API reasons <nl> + exports . toComparators = toComparators ; <nl> + function toComparators ( range , loose ) { <nl> + return new Range ( range , loose ) . set . map ( function ( comp ) { <nl> + return comp . map ( function ( c ) { <nl> + return c . value ; <nl> + } ) . join ( ' ' ) . trim ( ) . split ( ' ' ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + / / comprised of xranges , tildes , stars , and gtlt ' s at this point . <nl> + / / already replaced the hyphen ranges <nl> + / / turn into a set of JUST comparators . <nl> + function parseComparator ( comp , loose ) { <nl> + debug ( ' comp ' , comp ) ; <nl> + comp = replaceCarets ( comp , loose ) ; <nl> + debug ( ' caret ' , comp ) ; <nl> + comp = replaceTildes ( comp , loose ) ; <nl> + debug ( ' tildes ' , comp ) ; <nl> + comp = replaceXRanges ( comp , loose ) ; <nl> + debug ( ' xrange ' , comp ) ; <nl> + comp = replaceStars ( comp , loose ) ; <nl> + debug ( ' stars ' , comp ) ; <nl> + return comp ; <nl> + } <nl> + <nl> + function isX ( id ) { <nl> + return ! id | | id . toLowerCase ( ) = = = ' x ' | | id = = = ' * ' ; <nl> + } <nl> + <nl> + / / ~ , ~ > - - > * ( any , kinda silly ) <nl> + / / ~ 2 , ~ 2 . x , ~ 2 . x . x , ~ > 2 , ~ > 2 . x ~ > 2 . x . x - - > > = 2 . 0 . 0 < 3 . 0 . 0 <nl> + / / ~ 2 . 0 , ~ 2 . 0 . x , ~ > 2 . 0 , ~ > 2 . 0 . x - - > > = 2 . 0 . 0 < 2 . 1 . 0 <nl> + / / ~ 1 . 2 , ~ 1 . 2 . x , ~ > 1 . 2 , ~ > 1 . 2 . x - - > > = 1 . 2 . 0 < 1 . 3 . 0 <nl> + / / ~ 1 . 2 . 3 , ~ > 1 . 2 . 3 - - > > = 1 . 2 . 3 < 1 . 3 . 0 <nl> + / / ~ 1 . 2 . 0 , ~ > 1 . 2 . 0 - - > > = 1 . 2 . 0 < 1 . 3 . 0 <nl> + function replaceTildes ( comp , loose ) { <nl> + return comp . trim ( ) . split ( / \ s + / ) . map ( function ( comp ) { <nl> + return replaceTilde ( comp , loose ) ; <nl> + } ) . join ( ' ' ) ; <nl> + } <nl> + <nl> + function replaceTilde ( comp , loose ) { <nl> + var r = loose ? re [ TILDELOOSE ] : re [ TILDE ] ; <nl> + return comp . replace ( r , function ( _ , M , m , p , pr ) { <nl> + debug ( ' tilde ' , comp , _ , M , m , p , pr ) ; <nl> + var ret ; <nl> + <nl> + if ( isX ( M ) ) ret = ' ' ; else if ( isX ( m ) ) ret = ' > = ' + M + ' . 0 . 0 < ' + ( + M + 1 ) + ' . 0 . 0 ' ; else if ( isX ( p ) ) <nl> + / / ~ 1 . 2 = = > = 1 . 2 . 0 < 1 . 3 . 0 <nl> + ret = ' > = ' + M + ' . ' + m + ' . 0 < ' + M + ' . ' + ( + m + 1 ) + ' . 0 ' ; else if ( pr ) { <nl> + debug ( ' replaceTilde pr ' , pr ) ; <nl> + if ( pr . charAt ( 0 ) ! = = ' - ' ) pr = ' - ' + pr ; <nl> + ret = ' > = ' + M + ' . ' + m + ' . ' + p + pr + ' < ' + M + ' . ' + ( + m + 1 ) + ' . 0 ' ; <nl> + } else <nl> + / / ~ 1 . 2 . 3 = = > = 1 . 2 . 3 < 1 . 3 . 0 <nl> + ret = ' > = ' + M + ' . ' + m + ' . ' + p + ' < ' + M + ' . ' + ( + m + 1 ) + ' . 0 ' ; <nl> + <nl> + debug ( ' tilde return ' , ret ) ; <nl> + return ret ; <nl> + } ) ; <nl> + } <nl> + <nl> + / / ^ - - > * ( any , kinda silly ) <nl> + / / ^ 2 , ^ 2 . x , ^ 2 . x . x - - > > = 2 . 0 . 0 < 3 . 0 . 0 <nl> + / / ^ 2 . 0 , ^ 2 . 0 . x - - > > = 2 . 0 . 0 < 3 . 0 . 0 <nl> + / / ^ 1 . 2 , ^ 1 . 2 . x - - > > = 1 . 2 . 0 < 2 . 0 . 0 <nl> + / / ^ 1 . 2 . 3 - - > > = 1 . 2 . 3 < 2 . 0 . 0 <nl> + / / ^ 1 . 2 . 0 - - > > = 1 . 2 . 0 < 2 . 0 . 0 <nl> + function replaceCarets ( comp , loose ) { <nl> + return comp . trim ( ) . split ( / \ s + / ) . map ( function ( comp ) { <nl> + return replaceCaret ( comp , loose ) ; <nl> + } ) . join ( ' ' ) ; <nl> + } <nl> + <nl> + function replaceCaret ( comp , loose ) { <nl> + debug ( ' caret ' , comp , loose ) ; <nl> + var r = loose ? re [ CARETLOOSE ] : re [ CARET ] ; <nl> + return comp . replace ( r , function ( _ , M , m , p , pr ) { <nl> + debug ( ' caret ' , comp , _ , M , m , p , pr ) ; <nl> + var ret ; <nl> + <nl> + if ( isX ( M ) ) ret = ' ' ; else if ( isX ( m ) ) ret = ' > = ' + M + ' . 0 . 0 < ' + ( + M + 1 ) + ' . 0 . 0 ' ; else if ( isX ( p ) ) { <nl> + if ( M = = = ' 0 ' ) ret = ' > = ' + M + ' . ' + m + ' . 0 < ' + M + ' . ' + ( + m + 1 ) + ' . 0 ' ; else ret = ' > = ' + M + ' . ' + m + ' . 0 < ' + ( + M + 1 ) + ' . 0 . 0 ' ; <nl> + } else if ( pr ) { <nl> + debug ( ' replaceCaret pr ' , pr ) ; <nl> + if ( pr . charAt ( 0 ) ! = = ' - ' ) pr = ' - ' + pr ; <nl> + if ( M = = = ' 0 ' ) { <nl> + if ( m = = = ' 0 ' ) ret = ' > = ' + M + ' . ' + m + ' . ' + p + pr + ' < ' + M + ' . ' + m + ' . ' + ( + p + 1 ) ; else ret = ' > = ' + M + ' . ' + m + ' . ' + p + pr + ' < ' + M + ' . ' + ( + m + 1 ) + ' . 0 ' ; <nl> + } else ret = ' > = ' + M + ' . ' + m + ' . ' + p + pr + ' < ' + ( + M + 1 ) + ' . 0 . 0 ' ; <nl> + } else { <nl> + debug ( ' no pr ' ) ; <nl> + if ( M = = = ' 0 ' ) { <nl> + if ( m = = = ' 0 ' ) ret = ' > = ' + M + ' . ' + m + ' . ' + p + ' < ' + M + ' . ' + m + ' . ' + ( + p + 1 ) ; else ret = ' > = ' + M + ' . ' + m + ' . ' + p + ' < ' + M + ' . ' + ( + m + 1 ) + ' . 0 ' ; <nl> + } else ret = ' > = ' + M + ' . ' + m + ' . ' + p + ' < ' + ( + M + 1 ) + ' . 0 . 0 ' ; <nl> + } <nl> + <nl> + debug ( ' caret return ' , ret ) ; <nl> + return ret ; <nl> + } ) ; <nl> + } <nl> + <nl> + function replaceXRanges ( comp , loose ) { <nl> + debug ( ' replaceXRanges ' , comp , loose ) ; <nl> + return comp . split ( / \ s + / ) . map ( function ( comp ) { <nl> + return replaceXRange ( comp , loose ) ; <nl> + } ) . join ( ' ' ) ; <nl> + } <nl> + <nl> + function replaceXRange ( comp , loose ) { <nl> + comp = comp . trim ( ) ; <nl> + var r = loose ? re [ XRANGELOOSE ] : re [ XRANGE ] ; <nl> + return comp . replace ( r , function ( ret , gtlt , M , m , p , pr ) { <nl> + debug ( ' xRange ' , comp , ret , gtlt , M , m , p , pr ) ; <nl> + var xM = isX ( M ) ; <nl> + var xm = xM | | isX ( m ) ; <nl> + var xp = xm | | isX ( p ) ; <nl> + var anyX = xp ; <nl> + <nl> + if ( gtlt = = = ' = ' & & anyX ) gtlt = ' ' ; <nl> + <nl> + if ( xM ) { <nl> + if ( gtlt = = = ' > ' | | gtlt = = = ' < ' ) { <nl> + / / nothing is allowed <nl> + ret = ' < 0 . 0 . 0 ' ; <nl> + } else { <nl> + / / nothing is forbidden <nl> + ret = ' * ' ; <nl> + } <nl> + } else if ( gtlt & & anyX ) { <nl> + / / replace X with 0 <nl> + if ( xm ) m = 0 ; <nl> + if ( xp ) p = 0 ; <nl> + <nl> + if ( gtlt = = = ' > ' ) { <nl> + / / > 1 = > > = 2 . 0 . 0 <nl> + / / > 1 . 2 = > > = 1 . 3 . 0 <nl> + / / > 1 . 2 . 3 = > > = 1 . 2 . 4 <nl> + gtlt = ' > = ' ; <nl> + if ( xm ) { <nl> + M = + M + 1 ; <nl> + m = 0 ; <nl> + p = 0 ; <nl> + } else if ( xp ) { <nl> + m = + m + 1 ; <nl> + p = 0 ; <nl> + } <nl> + } else if ( gtlt = = = ' < = ' ) { <nl> + / / < = 0 . 7 . x is actually < 0 . 8 . 0 , since any 0 . 7 . x should <nl> + / / pass . Similarly , < = 7 . x is actually < 8 . 0 . 0 , etc . <nl> + gtlt = ' < ' ; <nl> + if ( xm ) M = + M + 1 ; else m = + m + 1 ; <nl> + } <nl> + <nl> + ret = gtlt + M + ' . ' + m + ' . ' + p ; <nl> + } else if ( xm ) { <nl> + ret = ' > = ' + M + ' . 0 . 0 < ' + ( + M + 1 ) + ' . 0 . 0 ' ; <nl> + } else if ( xp ) { <nl> + ret = ' > = ' + M + ' . ' + m + ' . 0 < ' + M + ' . ' + ( + m + 1 ) + ' . 0 ' ; <nl> + } <nl> + <nl> + debug ( ' xRange return ' , ret ) ; <nl> + <nl> + return ret ; <nl> + } ) ; <nl> + } <nl> + <nl> + / / Because * is AND - ed with everything else in the comparator , <nl> + / / and ' ' means " any version " , just remove the * s entirely . <nl> + function replaceStars ( comp , loose ) { <nl> + debug ( ' replaceStars ' , comp , loose ) ; <nl> + / / Looseness is ignored here . star is always as loose as it gets ! <nl> + return comp . trim ( ) . replace ( re [ STAR ] , ' ' ) ; <nl> + } <nl> + <nl> + / / This function is passed to string . replace ( re [ HYPHENRANGE ] ) <nl> + / / M , m , patch , prerelease , build <nl> + / / 1 . 2 - 3 . 4 . 5 = > > = 1 . 2 . 0 < = 3 . 4 . 5 <nl> + / / 1 . 2 . 3 - 3 . 4 = > > = 1 . 2 . 0 < 3 . 5 . 0 Any 3 . 4 . x will do <nl> + / / 1 . 2 - 3 . 4 = > > = 1 . 2 . 0 < 3 . 5 . 0 <nl> + function hyphenReplace ( $ 0 , from , fM , fm , fp , fpr , fb , to , tM , tm , tp , tpr , tb ) { <nl> + <nl> + if ( isX ( fM ) ) from = ' ' ; else if ( isX ( fm ) ) from = ' > = ' + fM + ' . 0 . 0 ' ; else if ( isX ( fp ) ) from = ' > = ' + fM + ' . ' + fm + ' . 0 ' ; else from = ' > = ' + from ; <nl> + <nl> + if ( isX ( tM ) ) to = ' ' ; else if ( isX ( tm ) ) to = ' < ' + ( + tM + 1 ) + ' . 0 . 0 ' ; else if ( isX ( tp ) ) to = ' < ' + tM + ' . ' + ( + tm + 1 ) + ' . 0 ' ; else if ( tpr ) to = ' < = ' + tM + ' . ' + tm + ' . ' + tp + ' - ' + tpr ; else to = ' < = ' + to ; <nl> + <nl> + return ( from + ' ' + to ) . trim ( ) ; <nl> + } <nl> + <nl> + / / if ANY of the sets match ALL of its comparators , then pass <nl> + Range . prototype . test = function ( version ) { <nl> + if ( ! version ) return false ; <nl> + <nl> + if ( typeof version = = = ' string ' ) version = new SemVer ( version , this . loose ) ; <nl> + <nl> + for ( var i = 0 ; i < this . set . length ; i + + ) { <nl> + if ( testSet ( this . set [ i ] , version ) ) return true ; <nl> + } <nl> + return false ; <nl> + } ; <nl> + <nl> + function testSet ( set , version ) { <nl> + for ( var i = 0 ; i < set . length ; i + + ) { <nl> + if ( ! set [ i ] . test ( version ) ) return false ; <nl> + } <nl> + <nl> + if ( version . prerelease . length ) { <nl> + / / Find the set of versions that are allowed to have prereleases <nl> + / / For example , ^ 1 . 2 . 3 - pr . 1 desugars to > = 1 . 2 . 3 - pr . 1 < 2 . 0 . 0 <nl> + / / That should allow ` 1 . 2 . 3 - pr . 2 ` to pass . <nl> + / / However , ` 1 . 2 . 4 - alpha . notready ` should NOT be allowed , <nl> + / / even though it ' s within the range set by the comparators . <nl> + for ( var i = 0 ; i < set . length ; i + + ) { <nl> + debug ( set [ i ] . semver ) ; <nl> + if ( set [ i ] . semver = = = ANY ) continue ; <nl> + <nl> + if ( set [ i ] . semver . prerelease . length > 0 ) { <nl> + var allowed = set [ i ] . semver ; <nl> + if ( allowed . major = = = version . major & & allowed . minor = = = version . minor & & allowed . patch = = = version . patch ) return true ; <nl> + } <nl> + } <nl> + <nl> + / / Version has a - pre , but it ' s not one of the ones we like . <nl> + return false ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + exports . satisfies = satisfies ; <nl> + function satisfies ( version , range , loose ) { <nl> + try { <nl> + range = new Range ( range , loose ) ; <nl> + } catch ( er ) { <nl> + return false ; <nl> + } <nl> + return range . test ( version ) ; <nl> + } <nl> + <nl> + exports . maxSatisfying = maxSatisfying ; <nl> + function maxSatisfying ( versions , range , loose ) { <nl> + return versions . filter ( function ( version ) { <nl> + return satisfies ( version , range , loose ) ; <nl> + } ) . sort ( function ( a , b ) { <nl> + return rcompare ( a , b , loose ) ; <nl> + } ) [ 0 ] | | null ; <nl> + } <nl> + <nl> + exports . validRange = validRange ; <nl> + function validRange ( range , loose ) { <nl> + try { <nl> + / / Return ' * ' instead of ' ' so that truthiness works . <nl> + / / This will throw if it ' s invalid anyway <nl> + return new Range ( range , loose ) . range | | ' * ' ; <nl> + } catch ( er ) { <nl> + return null ; <nl> + } <nl> + } <nl> + <nl> + / / Determine if version is less than all the versions possible in the range <nl> + exports . ltr = ltr ; <nl> + function ltr ( version , range , loose ) { <nl> + return outside ( version , range , ' < ' , loose ) ; <nl> + } <nl> + <nl> + / / Determine if version is greater than all the versions possible in the range . <nl> + exports . gtr = gtr ; <nl> + function gtr ( version , range , loose ) { <nl> + return outside ( version , range , ' > ' , loose ) ; <nl> + } <nl> + <nl> + exports . outside = outside ; <nl> + function outside ( version , range , hilo , loose ) { <nl> + version = new SemVer ( version , loose ) ; <nl> + range = new Range ( range , loose ) ; <nl> + <nl> + var gtfn , ltefn , ltfn , comp , ecomp ; <nl> + switch ( hilo ) { <nl> + case ' > ' : <nl> + gtfn = gt ; <nl> + ltefn = lte ; <nl> + ltfn = lt ; <nl> + comp = ' > ' ; <nl> + ecomp = ' > = ' ; <nl> + break ; <nl> + case ' < ' : <nl> + gtfn = lt ; <nl> + ltefn = gte ; <nl> + ltfn = gt ; <nl> + comp = ' < ' ; <nl> + ecomp = ' < = ' ; <nl> + break ; <nl> + default : <nl> + throw new TypeError ( ' Must provide a hilo val of " < " or " > " ' ) ; <nl> + } <nl> + <nl> + / / If it satisifes the range it is not outside <nl> + if ( satisfies ( version , range , loose ) ) { <nl> + return false ; <nl> + } <nl> + <nl> + / / From now on , variable terms are as if we ' re in " gtr " mode . <nl> + / / but note that everything is flipped for the " ltr " function . <nl> + <nl> + for ( var i = 0 ; i < range . set . length ; + + i ) { <nl> + var comparators = range . set [ i ] ; <nl> + <nl> + var high = null ; <nl> + var low = null ; <nl> + <nl> + comparators . forEach ( function ( comparator ) { <nl> + if ( comparator . semver = = = ANY ) { <nl> + comparator = new Comparator ( ' > = 0 . 0 . 0 ' ) ; <nl> + } <nl> + high = high | | comparator ; <nl> + low = low | | comparator ; <nl> + if ( gtfn ( comparator . semver , high . semver , loose ) ) { <nl> + high = comparator ; <nl> + } else if ( ltfn ( comparator . semver , low . semver , loose ) ) { <nl> + low = comparator ; <nl> + } <nl> + } ) ; <nl> + <nl> + / / If the edge version comparator has a operator then our version <nl> + / / isn ' t outside it <nl> + if ( high . operator = = = comp | | high . operator = = = ecomp ) { <nl> + return false ; <nl> + } <nl> + <nl> + / / If the lowest version comparator has an operator and our version <nl> + / / is less than it then it isn ' t higher than the range <nl> + if ( ( ! low . operator | | low . operator = = = comp ) & & ltefn ( version , low . semver ) ) { <nl> + return false ; <nl> + } else if ( low . operator = = = ecomp & & ltfn ( version , low . semver ) ) { <nl> + return false ; <nl> + } <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + exports . prerelease = prerelease ; <nl> + function prerelease ( version , loose ) { <nl> + var parsed = parse ( version , loose ) ; <nl> + return parsed & & parsed . prerelease . length ? parsed . prerelease : null ; <nl> + } <nl> + / * WEBPACK VAR INJECTION * / } . call ( exports , __webpack_require__ ( 52 ) ) ) <nl> + <nl> + / * * * / } , <nl> + / * 52 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + / / shim for using process in browser <nl> + <nl> + var process = module . exports = { } ; <nl> + <nl> + / / cached from whatever global is present so that test runners that stub it <nl> + / / don ' t break things . But we need to wrap it in a try catch in case it is <nl> + / / wrapped in strict mode code which doesn ' t define any globals . It ' s inside a <nl> + / / function because try / catches deoptimize in certain engines . <nl> + <nl> + var cachedSetTimeout ; <nl> + var cachedClearTimeout ; <nl> + <nl> + ( function ( ) { <nl> + try { <nl> + cachedSetTimeout = setTimeout ; <nl> + } catch ( e ) { <nl> + cachedSetTimeout = function cachedSetTimeout ( ) { <nl> + throw new Error ( ' setTimeout is not defined ' ) ; <nl> + } ; <nl> + } <nl> + try { <nl> + cachedClearTimeout = clearTimeout ; <nl> + } catch ( e ) { <nl> + cachedClearTimeout = function cachedClearTimeout ( ) { <nl> + throw new Error ( ' clearTimeout is not defined ' ) ; <nl> + } ; <nl> + } <nl> + } ) ( ) ; <nl> + var queue = [ ] ; <nl> + var draining = false ; <nl> + var currentQueue ; <nl> + var queueIndex = - 1 ; <nl> + <nl> + function cleanUpNextTick ( ) { <nl> + if ( ! draining | | ! currentQueue ) { <nl> + return ; <nl> + } <nl> + draining = false ; <nl> + if ( currentQueue . length ) { <nl> + queue = currentQueue . concat ( queue ) ; <nl> + } else { <nl> + queueIndex = - 1 ; <nl> + } <nl> + if ( queue . length ) { <nl> + drainQueue ( ) ; <nl> + } <nl> + } <nl> + <nl> + function drainQueue ( ) { <nl> + if ( draining ) { <nl> + return ; <nl> + } <nl> + var timeout = cachedSetTimeout ( cleanUpNextTick ) ; <nl> + draining = true ; <nl> + <nl> + var len = queue . length ; <nl> + while ( len ) { <nl> + currentQueue = queue ; <nl> + queue = [ ] ; <nl> + while ( + + queueIndex < len ) { <nl> + if ( currentQueue ) { <nl> + currentQueue [ queueIndex ] . run ( ) ; <nl> + } <nl> + } <nl> + queueIndex = - 1 ; <nl> + len = queue . length ; <nl> + } <nl> + currentQueue = null ; <nl> + draining = false ; <nl> + cachedClearTimeout ( timeout ) ; <nl> + } <nl> + <nl> + process . nextTick = function ( fun ) { <nl> + var args = new Array ( arguments . length - 1 ) ; <nl> + if ( arguments . length > 1 ) { <nl> + for ( var i = 1 ; i < arguments . length ; i + + ) { <nl> + args [ i - 1 ] = arguments [ i ] ; <nl> + } <nl> + } <nl> + queue . push ( new Item ( fun , args ) ) ; <nl> + if ( queue . length = = = 1 & & ! draining ) { <nl> + cachedSetTimeout ( drainQueue , 0 ) ; <nl> + } <nl> + } ; <nl> + <nl> + / / v8 likes predictible objects <nl> + function Item ( fun , array ) { <nl> + this . fun = fun ; <nl> + this . array = array ; <nl> + } <nl> + Item . prototype . run = function ( ) { <nl> + this . fun . apply ( null , this . array ) ; <nl> + } ; <nl> + process . title = ' browser ' ; <nl> + process . browser = true ; <nl> + process . env = { } ; <nl> + process . argv = [ ] ; <nl> + process . version = ' ' ; / / empty string to avoid regexp issues <nl> + process . versions = { } ; <nl> + <nl> + function noop ( ) { } <nl> + <nl> + process . on = noop ; <nl> + process . addListener = noop ; <nl> + process . once = noop ; <nl> + process . off = noop ; <nl> + process . removeListener = noop ; <nl> + process . removeAllListeners = noop ; <nl> + process . emit = noop ; <nl> + <nl> + process . binding = function ( name ) { <nl> + throw new Error ( ' process . binding is not supported ' ) ; <nl> + } ; <nl> + <nl> + process . cwd = function ( ) { <nl> + return ' / ' ; <nl> + } ; <nl> + process . chdir = function ( dir ) { <nl> + throw new Error ( ' process . chdir is not supported ' ) ; <nl> + } ; <nl> + process . umask = function ( ) { <nl> + return 0 ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 53 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . default = Vm ; <nl> + <nl> + var _util = __webpack_require__ ( 49 ) ; <nl> + <nl> + var _ = _interopRequireWildcard ( _util ) ; <nl> + <nl> + var _state = __webpack_require__ ( 54 ) ; <nl> + <nl> + var state = _interopRequireWildcard ( _state ) ; <nl> + <nl> + var _compiler = __webpack_require__ ( 59 ) ; <nl> + <nl> + var compiler = _interopRequireWildcard ( _compiler ) ; <nl> + <nl> + var _directive = __webpack_require__ ( 60 ) ; <nl> + <nl> + var directive = _interopRequireWildcard ( _directive ) ; <nl> + <nl> + var _domHelper = __webpack_require__ ( 61 ) ; <nl> + <nl> + var domHelper = _interopRequireWildcard ( _domHelper ) ; <nl> + <nl> + var _events = __webpack_require__ ( 62 ) ; <nl> + <nl> + var events = _interopRequireWildcard ( _events ) ; <nl> + <nl> + var _register = __webpack_require__ ( 63 ) ; <nl> + <nl> + function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj ; } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] ; } } newObj . default = obj ; return newObj ; } } <nl> + <nl> + function callOldReadyEntry ( vm , component ) { <nl> + if ( component . methods & & component . methods . ready ) { <nl> + _ . warn ( ' " exports . methods . ready " is deprecated , ' + ' please use " exports . created " instead ' ) ; <nl> + component . methods . ready . call ( vm ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * ViewModel constructor <nl> + * <nl> + * @ param { string } type <nl> + * @ param { object } options component options <nl> + * @ param { object } parentVm which contains _app <nl> + * @ param { object } parentEl root element or frag block <nl> + * @ param { object } mergedData external data <nl> + * @ param { object } externalEvents external events <nl> + * / <nl> + / * * <nl> + * @ fileOverview <nl> + * ViewModel Constructor & definition <nl> + * / <nl> + <nl> + function Vm ( type , options , parentVm , parentEl , mergedData , externalEvents ) { <nl> + this . _parent = parentVm . _realParent ? parentVm . _realParent : parentVm ; <nl> + this . _app = parentVm . _app ; <nl> + parentVm . _childrenVms & & parentVm . _childrenVms . push ( this ) ; <nl> + <nl> + if ( ! options ) { <nl> + options = this . _app . customComponentMap [ type ] | | { } ; <nl> + } <nl> + var data = options . data | | { } ; <nl> + <nl> + this . _options = options ; <nl> + this . _methods = options . methods | | { } ; <nl> + this . _computed = options . computed | | { } ; <nl> + this . _css = options . style | | { } ; <nl> + this . _ids = { } ; <nl> + this . _vmEvents = { } ; <nl> + this . _childrenVms = [ ] ; <nl> + this . _type = type ; <nl> + <nl> + / / bind events and lifecycles <nl> + this . _initEvents ( externalEvents ) ; <nl> + <nl> + _ . debug ( ' " init " lifecycle in Vm ( ' + this . _type + ' ) ' ) ; <nl> + this . $ emit ( ' hook : init ' ) ; <nl> + this . _inited = true ; <nl> + / / proxy data and methods <nl> + / / observe data and add this to vms <nl> + this . _data = typeof data = = = ' function ' ? data ( ) : data ; <nl> + if ( mergedData ) { <nl> + _ . extend ( this . _data , mergedData ) ; <nl> + } <nl> + this . _initState ( ) ; <nl> + <nl> + _ . debug ( ' " created " lifecycle in Vm ( ' + this . _type + ' ) ' ) ; <nl> + this . $ emit ( ' hook : created ' ) ; <nl> + this . _created = true ; <nl> + / / backward old ready entry <nl> + callOldReadyEntry ( this , options ) ; <nl> + <nl> + / / if no parentElement then specify the documentElement <nl> + this . _parentEl = parentEl | | this . _app . doc . documentElement ; <nl> + this . _build ( ) ; <nl> + } <nl> + <nl> + _ . extend ( Vm . prototype , state , compiler , directive , domHelper , events ) ; <nl> + _ . extend ( Vm , { <nl> + registerModules : _register . registerModules , <nl> + registerMethods : _register . registerMethods <nl> + } ) ; <nl> + <nl> + / * * * / } , <nl> + / * 54 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . _initState = _initState ; <nl> + exports . _initData = _initData ; <nl> + exports . _initComputed = _initComputed ; <nl> + exports . _initMethods = _initMethods ; <nl> + <nl> + var _watcher = __webpack_require__ ( 55 ) ; <nl> + <nl> + var _watcher2 = _interopRequireDefault ( _watcher ) ; <nl> + <nl> + var _dep = __webpack_require__ ( 56 ) ; <nl> + <nl> + var _dep2 = _interopRequireDefault ( _dep ) ; <nl> + <nl> + var _observer = __webpack_require__ ( 57 ) ; <nl> + <nl> + var _util = __webpack_require__ ( 49 ) ; <nl> + <nl> + function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { default : obj } ; } <nl> + <nl> + / * eslint - disable * / <nl> + <nl> + function _initState ( ) { <nl> + var vm = this ; <nl> + vm . _watchers = [ ] ; <nl> + vm . _initData ( ) ; <nl> + vm . _initComputed ( ) ; <nl> + vm . _initMethods ( ) ; <nl> + } <nl> + <nl> + function _initData ( ) { <nl> + var vm = this ; <nl> + var data = vm . _data ; <nl> + <nl> + if ( ! ( 0 , _util . isPlainObject ) ( data ) ) { <nl> + data = { } ; <nl> + } <nl> + / / proxy data on instance <nl> + var keys = Object . keys ( data ) ; <nl> + var i = keys . length ; <nl> + while ( i - - ) { <nl> + ( 0 , _observer . proxy ) ( vm , keys [ i ] ) ; <nl> + } <nl> + / / observe data <nl> + ( 0 , _observer . observe ) ( data , vm ) ; <nl> + } <nl> + <nl> + function noop ( ) { } <nl> + <nl> + function _initComputed ( ) { <nl> + var vm = this ; <nl> + var computed = vm . _computed ; <nl> + if ( computed ) { <nl> + for ( var key in computed ) { <nl> + var userDef = computed [ key ] ; <nl> + var def = { <nl> + enumerable : true , <nl> + configurable : true <nl> + } ; <nl> + if ( typeof userDef = = = ' function ' ) { <nl> + def . get = makeComputedGetter ( userDef , vm ) ; <nl> + def . set = noop ; <nl> + } else { <nl> + def . get = userDef . get ? userDef . cache ! = = false ? makeComputedGetter ( userDef . get , vm ) : ( 0 , _util . bind ) ( userDef . get , vm ) : noop ; <nl> + def . set = userDef . set ? ( 0 , _util . bind ) ( userDef . set , vm ) : noop ; <nl> + } <nl> + Object . defineProperty ( vm , key , def ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + function makeComputedGetter ( getter , owner ) { <nl> + var watcher = new _watcher2 . default ( owner , getter , null , { <nl> + lazy : true <nl> + } ) ; <nl> + return function computedGetter ( ) { <nl> + if ( watcher . dirty ) { <nl> + watcher . evaluate ( ) ; <nl> + } <nl> + if ( _dep2 . default . target ) { <nl> + watcher . depend ( ) ; <nl> + } <nl> + return watcher . value ; <nl> + } ; <nl> + } <nl> + <nl> + function _initMethods ( ) { <nl> + var vm = this ; <nl> + var methods = vm . _methods ; <nl> + if ( methods ) { <nl> + for ( var key in methods ) { <nl> + vm [ key ] = ( 0 , _util . bind ) ( methods [ key ] , vm ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / * * * / } , <nl> + / * 55 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + / * WEBPACK VAR INJECTION * / ( function ( process ) { ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . default = Watcher ; <nl> + <nl> + var _dep = __webpack_require__ ( 56 ) ; <nl> + <nl> + var _dep2 = _interopRequireDefault ( _dep ) ; <nl> + <nl> + var _util = __webpack_require__ ( 49 ) ; <nl> + <nl> + function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { default : obj } ; } <nl> + <nl> + / * eslint - disable * / <nl> + <nl> + var uid = 0 ; <nl> + / / import { pushWatcher } from ' . / batcher ' <nl> + <nl> + var prevTarget = void 0 ; <nl> + <nl> + / * * <nl> + * A watcher parses an expression , collects dependencies , <nl> + * and fires callback when the expression value changes . <nl> + * This is used for both the $ watch ( ) api and directives . <nl> + * <nl> + * @ param { Vue } vm <nl> + * @ param { String | Function } expOrFn <nl> + * @ param { Function } cb <nl> + * @ param { Object } options <nl> + * - { Array } filters <nl> + * - { Boolean } twoWay <nl> + * - { Boolean } deep <nl> + * - { Boolean } user <nl> + * - { Boolean } sync <nl> + * - { Boolean } lazy <nl> + * - { Function } [ preProcess ] <nl> + * - { Function } [ postProcess ] <nl> + * @ constructor <nl> + * / <nl> + <nl> + function Watcher ( vm , expOrFn , cb , options ) { <nl> + / / mix in options <nl> + if ( options ) { <nl> + ( 0 , _util . extend ) ( this , options ) ; <nl> + } <nl> + var isFn = typeof expOrFn = = = ' function ' ; <nl> + this . vm = vm ; <nl> + vm . _watchers . push ( this ) ; <nl> + this . expression = expOrFn ; <nl> + this . cb = cb ; <nl> + this . id = + + uid ; / / uid for batching <nl> + this . active = true ; <nl> + this . dirty = this . lazy ; / / for lazy watchers <nl> + this . deps = [ ] ; <nl> + this . newDeps = [ ] ; <nl> + this . depIds = new _util . _Set ( ) ; <nl> + this . newDepIds = new _util . _Set ( ) ; <nl> + / / parse expression for getter <nl> + if ( isFn ) { <nl> + this . getter = expOrFn ; <nl> + } else { <nl> + this . getter = ( 0 , _util . parsePath ) ( expOrFn ) ; <nl> + if ( ! this . getter ) { <nl> + this . getter = function ( ) { } ; <nl> + process . env . NODE_ENV ! = = ' production ' & & ( 0 , _util . warn ) ( ' Failed watching path : ' + expOrFn + ' Watcher only accepts simple dot - delimited paths . ' + ' For full control , use a function instead . ' , vm ) ; <nl> + } <nl> + } <nl> + this . value = this . lazy ? undefined : this . get ( ) ; <nl> + / / state for avoiding false triggers for deep and Array <nl> + / / watchers during vm . _digest ( ) <nl> + this . queued = this . shallow = false ; <nl> + } <nl> + <nl> + / * * <nl> + * Evaluate the getter , and re - collect dependencies . <nl> + * / <nl> + <nl> + Watcher . prototype . get = function ( ) { <nl> + this . beforeGet ( ) ; <nl> + var value = this . getter . call ( this . vm , this . vm ) ; <nl> + / / " touch " every property so they are all tracked as <nl> + / / dependencies for deep watching <nl> + if ( this . deep ) { <nl> + traverse ( value ) ; <nl> + } <nl> + this . afterGet ( ) ; <nl> + return value ; <nl> + } ; <nl> + <nl> + / * * <nl> + * Prepare for dependency collection . <nl> + * / <nl> + <nl> + Watcher . prototype . beforeGet = function ( ) { <nl> + prevTarget = _dep2 . default . target ; <nl> + _dep2 . default . target = this ; <nl> + } ; <nl> + <nl> + / * * <nl> + * Add a dependency to this directive . <nl> + * <nl> + * @ param { Dep } dep <nl> + * / <nl> + <nl> + Watcher . prototype . addDep = function ( dep ) { <nl> + var id = dep . id ; <nl> + if ( ! this . newDepIds . has ( id ) ) { <nl> + this . newDepIds . add ( id ) ; <nl> + this . newDeps . push ( dep ) ; <nl> + if ( ! this . depIds . has ( id ) ) { <nl> + dep . addSub ( this ) ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + / * * <nl> + * Clean up for dependency collection . <nl> + * / <nl> + <nl> + Watcher . prototype . afterGet = function ( ) { <nl> + _dep2 . default . target = prevTarget ; <nl> + var i = this . deps . length ; <nl> + while ( i - - ) { <nl> + var dep = this . deps [ i ] ; <nl> + if ( ! this . newDepIds . has ( dep . id ) ) { <nl> + dep . removeSub ( this ) ; <nl> + } <nl> + } <nl> + var tmp = this . depIds ; <nl> + this . depIds = this . newDepIds ; <nl> + this . newDepIds = tmp ; <nl> + this . newDepIds . clear ( ) ; <nl> + tmp = this . deps ; <nl> + this . deps = this . newDeps ; <nl> + this . newDeps = tmp ; <nl> + this . newDeps . length = 0 ; <nl> + } ; <nl> + <nl> + / * * <nl> + * Subscriber interface . <nl> + * Will be called when a dependency changes . <nl> + * <nl> + * @ param { Boolean } shallow <nl> + * / <nl> + <nl> + Watcher . prototype . update = function ( shallow ) { <nl> + if ( this . lazy ) { <nl> + this . dirty = true ; <nl> + } else { <nl> + this . run ( ) ; <nl> + } <nl> + / / } else if ( this . sync ) { <nl> + / / this . run ( ) <nl> + / / } else { <nl> + / / / / if queued , only overwrite shallow with non - shallow , <nl> + / / / / but not the other way around . <nl> + / / this . shallow = this . queued <nl> + / / ? shallow <nl> + / / ? this . shallow <nl> + / / : false <nl> + / / : ! ! shallow <nl> + / / this . queued = true <nl> + / / pushWatcher ( this ) <nl> + / / } <nl> + } ; <nl> + <nl> + / * * <nl> + * Batcher job interface . <nl> + * Will be called by the batcher . <nl> + * / <nl> + <nl> + Watcher . prototype . run = function ( ) { <nl> + if ( this . active ) { <nl> + var value = this . get ( ) ; <nl> + if ( value ! = = this . value | | <nl> + / / Deep watchers and watchers on Object / Arrays should fire even <nl> + / / when the value is the same , because the value may <nl> + / / have mutated ; but only do so if this is a <nl> + / / non - shallow update ( caused by a vm digest ) . <nl> + ( ( 0 , _util . isObject ) ( value ) | | this . deep ) & & ! this . shallow ) { <nl> + / / set new value <nl> + var oldValue = this . value ; <nl> + this . value = value ; <nl> + this . cb . call ( this . vm , value , oldValue ) ; <nl> + } <nl> + this . queued = this . shallow = false ; <nl> + } <nl> + } ; <nl> + <nl> + / * * <nl> + * Evaluate the value of the watcher . <nl> + * This only gets called for lazy watchers . <nl> + * / <nl> + <nl> + Watcher . prototype . evaluate = function ( ) { <nl> + / / avoid overwriting another watcher that is being <nl> + / / collected . <nl> + var current = _dep2 . default . target ; <nl> + this . value = this . get ( ) ; <nl> + this . dirty = false ; <nl> + _dep2 . default . target = current ; <nl> + } ; <nl> + <nl> + / * * <nl> + * Depend on all deps collected by this watcher . <nl> + * / <nl> + <nl> + Watcher . prototype . depend = function ( ) { <nl> + var i = this . deps . length ; <nl> + while ( i - - ) { <nl> + this . deps [ i ] . depend ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + / * * <nl> + * Remove self from all dependencies ' subcriber list . <nl> + * / <nl> + <nl> + Watcher . prototype . teardown = function ( ) { <nl> + if ( this . active ) { <nl> + / / remove self from vm ' s watcher list <nl> + / / this is a somewhat expensive operation so we skip it <nl> + / / if the vm is being destroyed or is performing a v - for <nl> + / / re - render ( the watcher list is then filtered by v - for ) . <nl> + if ( ! this . vm . _isBeingDestroyed & & ! this . vm . _vForRemoving ) { <nl> + ( 0 , _util . remove ) ( this . vm . _watchers , this ) ; <nl> + } <nl> + var i = this . deps . length ; <nl> + while ( i - - ) { <nl> + this . deps [ i ] . removeSub ( this ) ; <nl> + } <nl> + this . active = false ; <nl> + this . vm = this . cb = this . value = null ; <nl> + } <nl> + } ; <nl> + <nl> + / * * <nl> + * Recrusively traverse an object to evoke all converted <nl> + * getters , so that every nested property inside the object <nl> + * is collected as a " deep " dependency . <nl> + * <nl> + * @ param { * } val <nl> + * @ param { Set } seen <nl> + * / <nl> + <nl> + var seenObjects = new _util . _Set ( ) ; <nl> + function traverse ( val , seen ) { <nl> + var i = void 0 , <nl> + keys = void 0 , <nl> + isA = void 0 , <nl> + isO = void 0 ; <nl> + if ( ! seen ) { <nl> + seen = seenObjects ; <nl> + seen . clear ( ) ; <nl> + } <nl> + isA = ( 0 , _util . isArray ) ( val ) ; <nl> + isO = ( 0 , _util . isObject ) ( val ) ; <nl> + if ( isA | | isO ) { <nl> + if ( val . __ob__ ) { <nl> + var depId = val . __ob__ . dep . id ; <nl> + if ( seen . has ( depId ) ) { <nl> + return ; <nl> + } else { <nl> + seen . add ( depId ) ; <nl> + } <nl> + } <nl> + if ( isA ) { <nl> + i = val . length ; <nl> + while ( i - - ) { <nl> + traverse ( val [ i ] , seen ) ; <nl> + } <nl> + } else if ( isO ) { <nl> + keys = Object . keys ( val ) ; <nl> + i = keys . length ; <nl> + while ( i - - ) { <nl> + traverse ( val [ keys [ i ] ] , seen ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + / * WEBPACK VAR INJECTION * / } . call ( exports , __webpack_require__ ( 52 ) ) ) <nl> + <nl> + / * * * / } , <nl> + / * 56 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . default = Dep ; <nl> + <nl> + var _util = __webpack_require__ ( 49 ) ; <nl> + <nl> + var uid = 0 ; <nl> + <nl> + / * * <nl> + * A dep is an observable that can have multiple <nl> + * directives subscribing to it . <nl> + * <nl> + * @ constructor <nl> + * / <nl> + <nl> + / * eslint - disable * / <nl> + <nl> + function Dep ( ) { <nl> + this . id = uid + + ; <nl> + this . subs = [ ] ; <nl> + } <nl> + <nl> + / / the current target watcher being evaluated . <nl> + / / this is globally unique because there could be only one <nl> + / / watcher being evaluated at any time . <nl> + Dep . target = null ; <nl> + <nl> + / * * <nl> + * Add a directive subscriber . <nl> + * <nl> + * @ param { Directive } sub <nl> + * / <nl> + <nl> + Dep . prototype . addSub = function ( sub ) { <nl> + this . subs . push ( sub ) ; <nl> + } ; <nl> + <nl> + / * * <nl> + * Remove a directive subscriber . <nl> + * <nl> + * @ param { Directive } sub <nl> + * / <nl> + <nl> + Dep . prototype . removeSub = function ( sub ) { <nl> + ( 0 , _util . remove ) ( this . subs , sub ) ; <nl> + } ; <nl> + <nl> + / * * <nl> + * Add self as a dependency to the target watcher . <nl> + * / <nl> + <nl> + Dep . prototype . depend = function ( ) { <nl> + Dep . target . addDep ( this ) ; <nl> + } ; <nl> + <nl> + / * * <nl> + * Notify all subscribers of a new value . <nl> + * / <nl> + <nl> + Dep . prototype . notify = function ( ) { <nl> + / / stablize the subscriber list first <nl> + var subs = this . subs . slice ( ) ; <nl> + for ( var i = 0 , l = subs . length ; i < l ; i + + ) { <nl> + subs [ i ] . update ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 57 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . Observer = Observer ; <nl> + exports . observe = observe ; <nl> + exports . defineReactive = defineReactive ; <nl> + exports . set = set ; <nl> + exports . del = del ; <nl> + exports . proxy = proxy ; <nl> + exports . unproxy = unproxy ; <nl> + <nl> + var _dep = __webpack_require__ ( 56 ) ; <nl> + <nl> + var _dep2 = _interopRequireDefault ( _dep ) ; <nl> + <nl> + var _array = __webpack_require__ ( 58 ) ; <nl> + <nl> + var _util = __webpack_require__ ( 49 ) ; <nl> + <nl> + function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { default : obj } ; } <nl> + <nl> + var arrayKeys = Object . getOwnPropertyNames ( _array . arrayMethods ) ; <nl> + <nl> + / * * <nl> + * Observer class that are attached to each observed <nl> + * object . Once attached , the observer converts target <nl> + * object ' s property keys into getter / setters that <nl> + * collect dependencies and dispatches updates . <nl> + * <nl> + * @ param { Array | Object } value <nl> + * @ constructor <nl> + * / <nl> + <nl> + / * eslint - disable * / <nl> + <nl> + function Observer ( value ) { <nl> + this . value = value ; <nl> + this . dep = new _dep2 . default ( ) ; <nl> + ( 0 , _util . def ) ( value , ' __ob__ ' , this ) ; <nl> + if ( ( 0 , _util . isArray ) ( value ) ) { <nl> + var augment = _util . hasProto ? protoAugment : copyAugment ; <nl> + augment ( value , _array . arrayMethods , arrayKeys ) ; <nl> + this . observeArray ( value ) ; <nl> + } else { <nl> + this . walk ( value ) ; <nl> + } <nl> + } <nl> + <nl> + / / Instance methods <nl> + <nl> + / * * <nl> + * Walk through each property and convert them into <nl> + * getter / setters . This method should only be called when <nl> + * value type is Object . <nl> + * <nl> + * @ param { Object } obj <nl> + * / <nl> + <nl> + Observer . prototype . walk = function ( obj ) { <nl> + for ( var key in obj ) { <nl> + this . convert ( key , obj [ key ] ) ; <nl> + } <nl> + } ; <nl> + <nl> + / * * <nl> + * Observe a list of Array items . <nl> + * <nl> + * @ param { Array } items <nl> + * / <nl> + <nl> + Observer . prototype . observeArray = function ( items ) { <nl> + for ( var i = 0 , l = items . length ; i < l ; i + + ) { <nl> + observe ( items [ i ] ) ; <nl> + } <nl> + } ; <nl> + <nl> + / * * <nl> + * Convert a property into getter / setter so we can emit <nl> + * the events when the property is accessed / changed . <nl> + * <nl> + * @ param { String } key <nl> + * @ param { * } val <nl> + * / <nl> + <nl> + Observer . prototype . convert = function ( key , val ) { <nl> + defineReactive ( this . value , key , val ) ; <nl> + } ; <nl> + <nl> + / * * <nl> + * Add an owner vm , so that when $ set / $ delete mutations <nl> + * happen we can notify owner vms to proxy the keys and <nl> + * digest the watchers . This is only called when the object <nl> + * is observed as an instance ' s root $ data . <nl> + * <nl> + * @ param { Vue } vm <nl> + * / <nl> + <nl> + Observer . prototype . addVm = function ( vm ) { <nl> + ( this . vms | | ( this . vms = [ ] ) ) . push ( vm ) ; <nl> + } ; <nl> + <nl> + / * * <nl> + * Remove an owner vm . This is called when the object is <nl> + * swapped out as an instance ' s $ data object . <nl> + * <nl> + * @ param { Vue } vm <nl> + * / <nl> + <nl> + Observer . prototype . removeVm = function ( vm ) { <nl> + ( 0 , _util . remove ) ( this . vms , vm ) ; <nl> + } ; <nl> + <nl> + / / helpers <nl> + <nl> + / * * <nl> + * Augment an target Object or Array by intercepting <nl> + * the prototype chain using __proto__ <nl> + * <nl> + * @ param { Object | Array } target <nl> + * @ param { Object } src <nl> + * / <nl> + <nl> + function protoAugment ( target , src ) { <nl> + / * eslint - disable no - proto * / <nl> + target . __proto__ = src ; <nl> + / * eslint - enable no - proto * / <nl> + } <nl> + <nl> + / * * <nl> + * Augment an target Object or Array by defining <nl> + * hidden properties . <nl> + * <nl> + * @ param { Object | Array } target <nl> + * @ param { Object } proto <nl> + * / <nl> + <nl> + function copyAugment ( target , src , keys ) { <nl> + for ( var i = 0 , l = keys . length ; i < l ; i + + ) { <nl> + var key = keys [ i ] ; <nl> + ( 0 , _util . def ) ( target , key , src [ key ] ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Attempt to create an observer instance for a value , <nl> + * returns the new observer if successfully observed , <nl> + * or the existing observer if the value already has one . <nl> + * <nl> + * @ param { * } value <nl> + * @ param { Vue } [ vm ] <nl> + * @ return { Observer | undefined } <nl> + * @ static <nl> + * / <nl> + <nl> + function observe ( value , vm ) { <nl> + if ( ! ( 0 , _util . isObject ) ( value ) ) { <nl> + return ; <nl> + } <nl> + var ob = void 0 ; <nl> + if ( ( 0 , _util . hasOwn ) ( value , ' __ob__ ' ) & & value . __ob__ instanceof Observer ) { <nl> + ob = value . __ob__ ; <nl> + } else if ( ( ( 0 , _util . isArray ) ( value ) | | ( 0 , _util . isPlainObject ) ( value ) ) & & Object . isExtensible ( value ) & & ! value . _isVue ) { <nl> + ob = new Observer ( value ) ; <nl> + } <nl> + if ( ob & & vm ) { <nl> + ob . addVm ( vm ) ; <nl> + } <nl> + return ob ; <nl> + } <nl> + <nl> + / * * <nl> + * Define a reactive property on an Object . <nl> + * <nl> + * @ param { Object } obj <nl> + * @ param { String } key <nl> + * @ param { * } val <nl> + * / <nl> + <nl> + function defineReactive ( obj , key , val ) { <nl> + var dep = new _dep2 . default ( ) ; <nl> + <nl> + var property = Object . getOwnPropertyDescriptor ( obj , key ) ; <nl> + if ( property & & property . configurable = = = false ) { <nl> + return ; <nl> + } <nl> + <nl> + / / cater for pre - defined getter / setters <nl> + var getter = property & & property . get ; <nl> + var setter = property & & property . set ; <nl> + <nl> + var childOb = observe ( val ) ; <nl> + Object . defineProperty ( obj , key , { <nl> + enumerable : true , <nl> + configurable : true , <nl> + get : function reactiveGetter ( ) { <nl> + var value = getter ? getter . call ( obj ) : val ; <nl> + if ( _dep2 . default . target ) { <nl> + dep . depend ( ) ; <nl> + if ( childOb ) { <nl> + childOb . dep . depend ( ) ; <nl> + } <nl> + if ( ( 0 , _util . isArray ) ( value ) ) { <nl> + for ( var e , i = 0 , l = value . length ; i < l ; i + + ) { <nl> + e = value [ i ] ; <nl> + e & & e . __ob__ & & e . __ob__ . dep . depend ( ) ; <nl> + } <nl> + } <nl> + } <nl> + return value ; <nl> + } , <nl> + set : function reactiveSetter ( newVal ) { <nl> + var value = getter ? getter . call ( obj ) : val ; <nl> + if ( newVal = = = value ) { <nl> + return ; <nl> + } <nl> + if ( setter ) { <nl> + setter . call ( obj , newVal ) ; <nl> + } else { <nl> + val = newVal ; <nl> + } <nl> + childOb = observe ( newVal ) ; <nl> + dep . notify ( ) ; <nl> + } <nl> + } ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Set a property on an object . Adds the new property and <nl> + * triggers change notification if the property doesn ' t <nl> + * already exist . <nl> + * <nl> + * @ param { Object } obj <nl> + * @ param { String } key <nl> + * @ param { * } val <nl> + * @ public <nl> + * / <nl> + <nl> + function set ( obj , key , val ) { <nl> + if ( ( 0 , _util . isArray ) ( obj ) ) { <nl> + return obj . splice ( key , 1 , val ) ; <nl> + } <nl> + if ( ( 0 , _util . hasOwn ) ( obj , key ) ) { <nl> + obj [ key ] = val ; <nl> + return ; <nl> + } <nl> + if ( obj . _isVue ) { <nl> + set ( obj . _data , key , val ) ; <nl> + return ; <nl> + } <nl> + var ob = obj . __ob__ ; <nl> + if ( ! ob ) { <nl> + obj [ key ] = val ; <nl> + return ; <nl> + } <nl> + ob . convert ( key , val ) ; <nl> + ob . dep . notify ( ) ; <nl> + if ( ob . vms ) { <nl> + var i = ob . vms . length ; <nl> + while ( i - - ) { <nl> + var vm = ob . vms [ i ] ; <nl> + proxy ( vm , key ) ; <nl> + vm . $ forceUpdate ( ) ; <nl> + } <nl> + } <nl> + return val ; <nl> + } <nl> + <nl> + / * * <nl> + * Delete a property and trigger change if necessary . <nl> + * <nl> + * @ param { Object } obj <nl> + * @ param { String } key <nl> + * / <nl> + <nl> + function del ( obj , key ) { <nl> + if ( ! ( 0 , _util . hasOwn ) ( obj , key ) ) { <nl> + return ; <nl> + } <nl> + delete obj [ key ] ; <nl> + var ob = obj . __ob__ ; <nl> + <nl> + if ( ! ob ) { <nl> + if ( obj . _isVue ) { <nl> + delete obj . _data [ key ] ; <nl> + obj . $ forceUpdate ( ) ; <nl> + } <nl> + return ; <nl> + } <nl> + ob . dep . notify ( ) ; <nl> + if ( ob . vms ) { <nl> + var i = ob . vms . length ; <nl> + while ( i - - ) { <nl> + var vm = ob . vms [ i ] ; <nl> + unproxy ( vm , key ) ; <nl> + vm . $ forceUpdate ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + var KEY_WORDS = [ ' $ index ' , ' $ value ' , ' $ event ' ] ; <nl> + function proxy ( vm , key ) { <nl> + if ( KEY_WORDS . indexOf ( key ) > - 1 | | ! ( 0 , _util . isReserved ) ( key ) ) { <nl> + Object . defineProperty ( vm , key , { <nl> + configurable : true , <nl> + enumerable : true , <nl> + get : function proxyGetter ( ) { <nl> + return vm . _data [ key ] ; <nl> + } , <nl> + set : function proxySetter ( val ) { <nl> + vm . _data [ key ] = val ; <nl> + } <nl> + } ) ; <nl> + } <nl> + } <nl> + <nl> + function unproxy ( vm , key ) { <nl> + if ( ! ( 0 , _util . isReserved ) ( key ) ) { <nl> + delete vm [ key ] ; <nl> + } <nl> + } <nl> + <nl> + / * * * / } , <nl> + / * 58 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . arrayMethods = undefined ; <nl> + <nl> + var _util = __webpack_require__ ( 49 ) ; <nl> + <nl> + var arrayProto = Array . prototype ; / * eslint - disable * / <nl> + <nl> + var arrayMethods = exports . arrayMethods = Object . create ( arrayProto ) <nl> + <nl> + / * * <nl> + * Intercept mutating methods and emit events <nl> + * / <nl> + <nl> + ; [ ' push ' , ' pop ' , ' shift ' , ' unshift ' , ' splice ' , ' sort ' , ' reverse ' ] . forEach ( function ( method ) { <nl> + / / cache original method <nl> + var original = arrayProto [ method ] ; <nl> + ( 0 , _util . def ) ( arrayMethods , method , function mutator ( ) { <nl> + / / avoid leaking arguments : <nl> + / / http : / / jsperf . com / closure - with - arguments <nl> + var i = arguments . length ; <nl> + var args = new Array ( i ) ; <nl> + while ( i - - ) { <nl> + args [ i ] = arguments [ i ] ; <nl> + } <nl> + var result = original . apply ( this , args ) ; <nl> + var ob = this . __ob__ ; <nl> + var inserted = void 0 ; <nl> + switch ( method ) { <nl> + case ' push ' : <nl> + inserted = args ; <nl> + break ; <nl> + case ' unshift ' : <nl> + inserted = args ; <nl> + break ; <nl> + case ' splice ' : <nl> + inserted = args . slice ( 2 ) ; <nl> + break ; <nl> + } <nl> + if ( inserted ) ob . observeArray ( inserted ) ; <nl> + / / notify change <nl> + ob . dep . notify ( ) ; <nl> + return result ; <nl> + } ) ; <nl> + } ) ; <nl> + <nl> + / * * <nl> + * Swap the element at the given index with a new value <nl> + * and emits corresponding event . <nl> + * <nl> + * @ param { Number } index <nl> + * @ param { * } val <nl> + * @ return { * } - replaced element <nl> + * / <nl> + <nl> + ( 0 , _util . def ) ( arrayProto , ' $ set ' , function $ set ( index , val ) { <nl> + if ( index > = this . length ) { <nl> + this . length = index + 1 ; <nl> + } <nl> + return this . splice ( index , 1 , val ) [ 0 ] ; <nl> + } ) ; <nl> + <nl> + / * * <nl> + * Convenience method to remove the element at given index . <nl> + * <nl> + * @ param { Number } index <nl> + * @ param { * } val <nl> + * / <nl> + <nl> + ( 0 , _util . def ) ( arrayProto , ' $ remove ' , function $ remove ( index ) { <nl> + / * istanbul ignore if * / <nl> + if ( ! this . length ) return ; <nl> + if ( typeof index ! = = ' number ' ) { <nl> + index = this . indexOf ( index ) ; <nl> + } <nl> + if ( index > - 1 ) { <nl> + this . splice ( index , 1 ) ; <nl> + } <nl> + } ) ; <nl> + <nl> + / * * * / } , <nl> + / * 59 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + <nl> + var _typeof = typeof Symbol = = = " function " & & typeof Symbol . iterator = = = " symbol " ? function ( obj ) { return typeof obj ; } : function ( obj ) { return obj & & typeof Symbol = = = " function " & & obj . constructor = = = Symbol ? " symbol " : typeof obj ; } ; / * * <nl> + * @ fileOverview <nl> + * ViewModel template parser & data - binding process <nl> + * <nl> + * required : <nl> + * index . js : Vm <nl> + * dom - helper . js : _createElement , _createBlock <nl> + * dom - helper . js : _attachTarget , _moveTarget , _removeTarget <nl> + * directive . js : _bindElement , _bindSubVm , _watch <nl> + * events . js : $ on <nl> + * / <nl> + <nl> + exports . _build = _build ; <nl> + exports . _compile = _compile ; <nl> + exports . _targetIsFragment = _targetIsFragment ; <nl> + exports . _targetIsContent = _targetIsContent ; <nl> + exports . _targetNeedCheckRepeat = _targetNeedCheckRepeat ; <nl> + exports . _targetNeedCheckShown = _targetNeedCheckShown ; <nl> + exports . _targetNeedCheckType = _targetNeedCheckType ; <nl> + exports . _targetIsComposed = _targetIsComposed ; <nl> + exports . _compileFragment = _compileFragment ; <nl> + exports . _compileRepeat = _compileRepeat ; <nl> + exports . _compileShown = _compileShown ; <nl> + exports . _compileType = _compileType ; <nl> + exports . _compileCustomComponent = _compileCustomComponent ; <nl> + exports . _compileNativeComponent = _compileNativeComponent ; <nl> + exports . _compileChildren = _compileChildren ; <nl> + exports . _bindRepeat = _bindRepeat ; <nl> + exports . _bindShown = _bindShown ; <nl> + exports . _watchBlock = _watchBlock ; <nl> + exports . _mergeContext = _mergeContext ; <nl> + <nl> + var _util = __webpack_require__ ( 49 ) ; <nl> + <nl> + var _ = _interopRequireWildcard ( _util ) ; <nl> + <nl> + function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj ; } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] ; } } newObj . default = obj ; return newObj ; } } <nl> + <nl> + / * * <nl> + * build ( externalDirs ) <nl> + * createVm ( ) <nl> + * merge ( externalDirs , dirs ) <nl> + * compile ( template , parentNode ) <nl> + * if ( type is content ) create contentNode <nl> + * else if ( dirs have v - for ) foreach - > create context <nl> + * - > compile ( templateWithoutFor , parentNode ) : diff ( list ) onchange <nl> + * else if ( dirs have v - if ) assert <nl> + * - > compile ( templateWithoutIf , parentNode ) : toggle ( shown ) onchange <nl> + * else if ( type is native ) <nl> + * set ( dirs ) : update ( id / attr / style / class ) onchange <nl> + * append ( template , parentNode ) <nl> + * foreach childNodes - > compile ( childNode , template ) <nl> + * else if ( type is custom ) <nl> + * addChildVm ( vm , parentVm ) <nl> + * build ( externalDirs ) <nl> + * foreach childNodes - > compile ( childNode , template ) <nl> + * / <nl> + function _build ( ) { <nl> + var opt = this . _options | | { } ; <nl> + var template = opt . template | | { } ; <nl> + <nl> + if ( opt . replace ) { <nl> + if ( template . children & & template . children . length = = = 1 ) { <nl> + this . _compile ( template . children [ 0 ] , this . _parentEl ) ; <nl> + } else { <nl> + this . _compile ( template . children , this . _parentEl ) ; <nl> + } <nl> + } else { <nl> + this . _compile ( template , this . _parentEl ) ; <nl> + } <nl> + <nl> + _ . debug ( ' " ready " lifecycle in Vm ( ' + this . _type + ' ) ' ) ; <nl> + this . $ emit ( ' hook : ready ' ) ; <nl> + this . _ready = true ; <nl> + } <nl> + <nl> + / * * <nl> + * Generate elements by child or children and append to parent elements . <nl> + * Root element info would be merged if has . The first argument may be an array <nl> + * if the root element with options . replace has not only one child . <nl> + * <nl> + * @ param { object | array } target <nl> + * @ param { object } dest <nl> + * @ param { object } meta <nl> + * / <nl> + function _compile ( target , dest , meta ) { <nl> + var app = this . _app | | { } ; <nl> + <nl> + if ( app . lastSignal = = = - 1 ) { <nl> + return ; <nl> + } <nl> + <nl> + var context = this ; <nl> + if ( context . _targetIsFragment ( target ) ) { <nl> + context . _compileFragment ( target , dest , meta ) ; <nl> + return ; <nl> + } <nl> + meta = meta | | { } ; <nl> + if ( context . _targetIsContent ( target ) ) { <nl> + _ . debug ( ' compile " content " block by ' , target ) ; <nl> + context . _content = context . _createBlock ( dest ) ; <nl> + return ; <nl> + } <nl> + <nl> + if ( context . _targetNeedCheckRepeat ( target , meta ) ) { <nl> + _ . debug ( ' compile " repeat " logic by ' , target ) ; <nl> + context . _compileRepeat ( target , dest ) ; <nl> + return ; <nl> + } <nl> + if ( context . _targetNeedCheckShown ( target , meta ) ) { <nl> + _ . debug ( ' compile " if " logic by ' , target ) ; <nl> + context . _compileShown ( target , dest , meta ) ; <nl> + return ; <nl> + } <nl> + var typeGetter = meta . type | | target . type ; <nl> + if ( context . _targetNeedCheckType ( typeGetter , meta ) ) { <nl> + context . _compileType ( target , dest , typeGetter , meta ) ; <nl> + return ; <nl> + } <nl> + var type = typeGetter ; <nl> + var component = context . _targetIsComposed ( target , type ) ; <nl> + if ( component ) { <nl> + _ . debug ( ' compile composed component by ' , target ) ; <nl> + context . _compileCustomComponent ( component , target , dest , type , meta ) ; <nl> + return ; <nl> + } <nl> + _ . debug ( ' compile native component by ' , target ) ; <nl> + context . _compileNativeComponent ( target , dest , type ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Check if target is a fragment ( an array ) . <nl> + * <nl> + * @ param { object } target <nl> + * @ return { boolean } <nl> + * / <nl> + function _targetIsFragment ( target ) { <nl> + return Array . isArray ( target ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Check if target type is content / slot . <nl> + * <nl> + * @ param { object } target <nl> + * @ return { boolean } <nl> + * / <nl> + function _targetIsContent ( target ) { <nl> + return target . type = = = ' content ' | | target . type = = = ' slot ' ; <nl> + } <nl> + <nl> + / * * <nl> + * Check if target need to compile by a list . <nl> + * <nl> + * @ param { object } target <nl> + * @ param { object } meta <nl> + * @ return { boolean } <nl> + * / <nl> + function _targetNeedCheckRepeat ( target , meta ) { <nl> + return ! meta . hasOwnProperty ( ' repeat ' ) & & target . repeat ; <nl> + } <nl> + <nl> + / * * <nl> + * Check if target need to compile by a boolean value . <nl> + * <nl> + * @ param { object } target <nl> + * @ param { object } meta <nl> + * @ return { boolean } <nl> + * / <nl> + function _targetNeedCheckShown ( target , meta ) { <nl> + return ! meta . hasOwnProperty ( ' shown ' ) & & target . shown ; <nl> + } <nl> + <nl> + / * * <nl> + * Check if target need to compile by a dynamic type . <nl> + * <nl> + * @ param { string | function } typeGetter <nl> + * @ param { object } meta <nl> + * @ return { boolean } <nl> + * / <nl> + function _targetNeedCheckType ( typeGetter , meta ) { <nl> + return typeof typeGetter = = = ' function ' & & ! meta . hasOwnProperty ( ' type ' ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Check if this kind of component is composed . <nl> + * <nl> + * @ param { string } type <nl> + * @ return { boolean } <nl> + * / <nl> + function _targetIsComposed ( target , type ) { <nl> + var component = void 0 ; <nl> + if ( this . _app & & this . _app . customComponentMap ) { <nl> + component = this . _app . customComponentMap [ type ] ; <nl> + } <nl> + if ( this . _options & & this . _options . components ) { <nl> + component = this . _options . components [ type ] ; <nl> + } <nl> + if ( target . component ) { <nl> + component = component | | { } ; <nl> + } <nl> + return component ; <nl> + } <nl> + <nl> + / * * <nl> + * Compile a list of targets . <nl> + * <nl> + * @ param { object } target <nl> + * @ param { object } dest <nl> + * @ param { object } meta <nl> + * / <nl> + function _compileFragment ( target , dest , meta ) { <nl> + var _this = this ; <nl> + <nl> + var fragBlock = this . _createBlock ( dest ) ; <nl> + target . forEach ( function ( child ) { <nl> + _this . _compile ( child , fragBlock , meta ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Compile a target with repeat directive . <nl> + * <nl> + * @ param { object } target <nl> + * @ param { object } dest <nl> + * / <nl> + function _compileRepeat ( target , dest ) { <nl> + var repeat = target . repeat ; <nl> + var oldStyle = typeof repeat = = = ' function ' ; <nl> + var getter = repeat . getter | | repeat . expression | | repeat ; <nl> + if ( typeof getter ! = = ' function ' ) { <nl> + getter = function getter ( ) { <nl> + return [ ] ; <nl> + } ; <nl> + } <nl> + var key = repeat . key | | ' $ index ' ; <nl> + var value = repeat . value | | ' $ value ' ; <nl> + var trackBy = repeat . trackBy | | target . trackBy | | target . attr & & target . attr . trackBy | | key ; <nl> + <nl> + var fragBlock = this . _createBlock ( dest ) ; <nl> + fragBlock . children = [ ] ; <nl> + fragBlock . data = [ ] ; <nl> + fragBlock . vms = [ ] ; <nl> + <nl> + this . _bindRepeat ( target , fragBlock , { getter : getter , key : key , value : value , trackBy : trackBy , oldStyle : oldStyle } ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Compile a target with if directive . <nl> + * <nl> + * @ param { object } target <nl> + * @ param { object } dest <nl> + * @ param { object } meta <nl> + * / <nl> + function _compileShown ( target , dest , meta ) { <nl> + var newMeta = { shown : true } ; <nl> + var fragBlock = this . _createBlock ( dest ) ; <nl> + <nl> + if ( dest . element & & dest . children ) { <nl> + dest . children . push ( fragBlock ) ; <nl> + } <nl> + <nl> + if ( meta . repeat ) { <nl> + newMeta . repeat = meta . repeat ; <nl> + } <nl> + <nl> + this . _bindShown ( target , fragBlock , newMeta ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Compile a target with dynamic component type . <nl> + * <nl> + * @ param { object } target <nl> + * @ param { object } dest <nl> + * @ param { function } typeGetter <nl> + * / <nl> + function _compileType ( target , dest , typeGetter , meta ) { <nl> + var _this2 = this ; <nl> + <nl> + var type = typeGetter . call ( this ) ; <nl> + var newMeta = Object . assign ( { type : type } , meta ) ; <nl> + var fragBlock = this . _createBlock ( dest ) ; <nl> + <nl> + if ( dest . element & & dest . children ) { <nl> + dest . children . push ( fragBlock ) ; <nl> + } <nl> + <nl> + this . _watch ( typeGetter , function ( value ) { <nl> + var newMeta = Object . assign ( { type : value } , meta ) ; <nl> + _this2 . _removeBlock ( fragBlock , true ) ; <nl> + _this2 . _compile ( target , fragBlock , newMeta ) ; <nl> + } ) ; <nl> + <nl> + this . _compile ( target , fragBlock , newMeta ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Compile a composed component . <nl> + * <nl> + * @ param { object } target <nl> + * @ param { object } dest <nl> + * @ param { string } type <nl> + * / <nl> + function _compileCustomComponent ( component , target , dest , type , meta ) { <nl> + var Vm = this . constructor ; <nl> + var context = this ; <nl> + var subVm = new Vm ( type , component , context , dest , undefined , { <nl> + ' hook : init ' : function hookInit ( ) { <nl> + context . _setId ( target . id , null , this ) ; <nl> + / / bind template earlier because of lifecycle issues <nl> + this . _externalBinding = { <nl> + parent : context , <nl> + template : target <nl> + } ; <nl> + } , <nl> + ' hook : created ' : function hookCreated ( ) { <nl> + context . _bindSubVm ( this , target , meta . repeat ) ; <nl> + } , <nl> + ' hook : ready ' : function hookReady ( ) { <nl> + if ( this . _content ) { <nl> + context . _compileChildren ( target , this . _content ) ; <nl> + } <nl> + } <nl> + } ) ; <nl> + this . _bindSubVmAfterInitialized ( subVm , target ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Generate element from template and attach to the dest if needed . <nl> + * The time to attach depends on whether the mode status is node or tree . <nl> + * <nl> + * @ param { object } template <nl> + * @ param { object } dest <nl> + * @ param { string } type <nl> + * / <nl> + function _compileNativeComponent ( template , dest , type ) { <nl> + this . _applyNaitveComponentOptions ( template ) ; <nl> + <nl> + var element = void 0 ; <nl> + if ( dest . ref = = = ' _documentElement ' ) { <nl> + / / if its parent is documentElement then it ' s a body <nl> + _ . debug ( ' compile to create body for ' , type ) ; <nl> + element = this . _createBody ( type ) ; <nl> + } else { <nl> + _ . debug ( ' compile to create element for ' , type ) ; <nl> + element = this . _createElement ( type ) ; <nl> + } <nl> + <nl> + if ( ! this . _rootEl ) { <nl> + this . _rootEl = element ; <nl> + / / bind event earlier because of lifecycle issues <nl> + var binding = this . _externalBinding | | { } ; <nl> + var target = binding . template ; <nl> + var vm = binding . parent ; <nl> + if ( target & & target . events & & vm & & element ) { <nl> + for ( var _type in target . events ) { <nl> + var handler = vm [ target . events [ _type ] ] ; <nl> + if ( handler ) { <nl> + element . addEvent ( _type , _ . bind ( handler , vm ) ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + this . _bindElement ( element , template ) ; <nl> + <nl> + if ( template . attr & & template . attr . append ) { <nl> + / / backward , append prop in attr <nl> + template . append = template . attr . append ; <nl> + } <nl> + <nl> + if ( template . append ) { <nl> + / / give the append attribute for ios adaptation <nl> + element . attr = element . attr | | { } ; <nl> + element . attr . append = template . append ; <nl> + } <nl> + <nl> + var treeMode = template . append = = = ' tree ' ; <nl> + var app = this . _app | | { } ; <nl> + if ( app . lastSignal ! = = - 1 & & ! treeMode ) { <nl> + _ . debug ( ' compile to append single node for ' , element ) ; <nl> + app . lastSignal = this . _attachTarget ( element , dest ) ; <nl> + } <nl> + if ( app . lastSignal ! = = - 1 ) { <nl> + this . _compileChildren ( template , element ) ; <nl> + } <nl> + if ( app . lastSignal ! = = - 1 & & treeMode ) { <nl> + _ . debug ( ' compile to append whole tree for ' , element ) ; <nl> + app . lastSignal = this . _attachTarget ( element , dest ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Set all children to a certain parent element . <nl> + * <nl> + * @ param { object } template <nl> + * @ param { object } dest <nl> + * / <nl> + function _compileChildren ( template , dest ) { <nl> + var _this3 = this ; <nl> + <nl> + var app = this . _app | | { } ; <nl> + var children = template . children ; <nl> + if ( children & & children . length ) { <nl> + children . every ( function ( child ) { <nl> + _this3 . _compile ( child , dest ) ; <nl> + return app . lastSignal ! = = - 1 ; <nl> + } ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Watch the list update and refresh the changes . <nl> + * <nl> + * @ param { object } target <nl> + * @ param { object } fragBlock { vms , data , children } <nl> + * @ param { object } info { getter , key , value , trackBy , oldStyle } <nl> + * / <nl> + function _bindRepeat ( target , fragBlock , info ) { <nl> + var _this4 = this ; <nl> + <nl> + var vms = fragBlock . vms ; <nl> + var children = fragBlock . children ; <nl> + var getter = info . getter ; <nl> + var trackBy = info . trackBy ; <nl> + var oldStyle = info . oldStyle ; <nl> + <nl> + var keyName = info . key ; <nl> + var valueName = info . value ; <nl> + <nl> + function compileItem ( item , index , context ) { <nl> + var mergedData = void 0 ; <nl> + if ( oldStyle ) { <nl> + mergedData = item ; <nl> + if ( ( typeof item = = = ' undefined ' ? ' undefined ' : _typeof ( item ) ) = = = ' object ' ) { <nl> + mergedData [ keyName ] = index ; <nl> + if ( ! mergedData . hasOwnProperty ( ' INDEX ' ) ) { <nl> + Object . defineProperty ( mergedData , ' INDEX ' , { <nl> + value : function value ( ) { <nl> + _ . warn ( ' " INDEX " in repeat is deprecated , ' + ' please use " $ index " instead ' ) ; <nl> + } <nl> + } ) ; <nl> + } <nl> + } <nl> + } else { <nl> + mergedData = { } ; <nl> + mergedData [ keyName ] = index ; <nl> + mergedData [ valueName ] = item ; <nl> + } <nl> + context = context . _mergeContext ( mergedData ) ; <nl> + vms . push ( context ) ; <nl> + context . _compile ( target , fragBlock , { repeat : item } ) ; <nl> + } <nl> + <nl> + var list = this . _watchBlock ( fragBlock , getter , ' repeat ' , function ( data ) { <nl> + _ . debug ( ' the " repeat " item has changed ' , data ) ; <nl> + <nl> + if ( ! fragBlock ) { <nl> + return ; <nl> + } <nl> + <nl> + var oldChildren = children . slice ( ) ; <nl> + var oldVms = vms . slice ( ) ; <nl> + var oldData = fragBlock . data . slice ( ) ; <nl> + / / 1 . collect all new refs track by <nl> + var trackMap = { } ; <nl> + var reusedMap = { } ; <nl> + data . forEach ( function ( item , index ) { <nl> + var key = trackBy ? item [ trackBy ] : index ; <nl> + / * istanbul ignore if * / <nl> + if ( key = = null | | key = = = ' ' ) { <nl> + return ; <nl> + } <nl> + trackMap [ key ] = item ; <nl> + } ) ; <nl> + <nl> + / / 2 . remove unused element foreach old item <nl> + var reusedList = [ ] ; <nl> + oldData . forEach ( function ( item , index ) { <nl> + var key = trackBy ? item [ trackBy ] : index ; <nl> + if ( trackMap . hasOwnProperty ( key ) ) { <nl> + reusedMap [ key ] = { <nl> + item : item , index : index , key : key , <nl> + target : oldChildren [ index ] , <nl> + vm : oldVms [ index ] <nl> + } ; <nl> + reusedList . push ( item ) ; <nl> + } else { <nl> + _this4 . _removeTarget ( oldChildren [ index ] ) ; <nl> + } <nl> + } ) ; <nl> + <nl> + / / 3 . create new element foreach new item <nl> + children . length = 0 ; <nl> + vms . length = 0 ; <nl> + fragBlock . data = data . slice ( ) ; <nl> + fragBlock . updateMark = fragBlock . start ; <nl> + <nl> + data . forEach ( function ( item , index ) { <nl> + var key = trackBy ? item [ trackBy ] : index ; <nl> + var reused = reusedMap [ key ] ; <nl> + if ( reused ) { <nl> + if ( reused . item = = = reusedList [ 0 ] ) { <nl> + reusedList . shift ( ) ; <nl> + } else { <nl> + reusedList . $ remove ( reused . item ) ; <nl> + _this4 . _moveTarget ( reused . target , fragBlock . updateMark , true ) ; <nl> + } <nl> + children . push ( reused . target ) ; <nl> + vms . push ( reused . vm ) ; <nl> + reused . vm [ keyName ] = index ; <nl> + fragBlock . updateMark = reused . target ; <nl> + } else { <nl> + compileItem ( item , index , _this4 ) ; <nl> + } <nl> + } ) ; <nl> + <nl> + delete fragBlock . updateMark ; <nl> + } ) ; <nl> + <nl> + fragBlock . data = list . slice ( 0 ) ; <nl> + list . forEach ( function ( item , index ) { <nl> + compileItem ( item , index , _this4 ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Watch the display update and add / remove the element . <nl> + * <nl> + * @ param { object } target <nl> + * @ param { object } fragBlock <nl> + * @ param { object } context <nl> + * / <nl> + function _bindShown ( target , fragBlock , meta ) { <nl> + var _this5 = this ; <nl> + <nl> + var display = this . _watchBlock ( fragBlock , target . shown , ' shown ' , function ( display ) { <nl> + _ . debug ( ' the " if " item was changed ' , display ) ; <nl> + <nl> + if ( ! fragBlock | | ! ! fragBlock . display = = = ! ! display ) { <nl> + return ; <nl> + } <nl> + fragBlock . display = ! ! display ; <nl> + if ( display ) { <nl> + _this5 . _compile ( target , fragBlock , meta ) ; <nl> + } else { <nl> + _this5 . _removeBlock ( fragBlock , true ) ; <nl> + } <nl> + } ) ; <nl> + <nl> + fragBlock . display = ! ! display ; <nl> + if ( display ) { <nl> + this . _compile ( target , fragBlock , meta ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Watch calc value changes and append certain type action to differ . <nl> + * It is used for if or repeat data - binding generator . <nl> + * <nl> + * @ param { object } fragBlock <nl> + * @ param { function } calc <nl> + * @ param { string } type <nl> + * @ param { function } handler <nl> + * @ return { any } init value of calc <nl> + * / <nl> + function _watchBlock ( fragBlock , calc , type , handler ) { <nl> + var differ = this & & this . _app & & this . _app . differ ; <nl> + var config = { } ; <nl> + var depth = ( fragBlock . element . depth | | 0 ) + 1 ; <nl> + <nl> + return this . _watch ( calc , function ( value ) { <nl> + config . latestValue = value ; <nl> + if ( differ & & ! config . recorded ) { <nl> + differ . append ( type , depth , fragBlock . blockId , function ( ) { <nl> + var latestValue = config . latestValue ; <nl> + handler ( latestValue ) ; <nl> + config . recorded = false ; <nl> + config . latestValue = undefined ; <nl> + } ) ; <nl> + } <nl> + config . recorded = true ; <nl> + } ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Clone a context and merge certain data . <nl> + * <nl> + * @ param { object } mergedData <nl> + * @ return { object } <nl> + * / <nl> + function _mergeContext ( mergedData ) { <nl> + var context = Object . create ( this ) ; <nl> + context . _data = mergedData ; <nl> + context . _initData ( ) ; <nl> + context . _initComputed ( ) ; <nl> + context . _realParent = this ; <nl> + return context ; <nl> + } <nl> + <nl> + / * * * / } , <nl> + / * 60 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + <nl> + var _typeof = typeof Symbol = = = " function " & & typeof Symbol . iterator = = = " symbol " ? function ( obj ) { return typeof obj ; } : function ( obj ) { return obj & & typeof Symbol = = = " function " & & obj . constructor = = = Symbol ? " symbol " : typeof obj ; } ; / * * <nl> + * @ fileOverview <nl> + * Directive Parser <nl> + * / <nl> + <nl> + exports . _applyNaitveComponentOptions = _applyNaitveComponentOptions ; <nl> + exports . _bindElement = _bindElement ; <nl> + exports . _bindSubVm = _bindSubVm ; <nl> + exports . _bindSubVmAfterInitialized = _bindSubVmAfterInitialized ; <nl> + exports . _setId = _setId ; <nl> + exports . _setAttr = _setAttr ; <nl> + exports . _setClass = _setClass ; <nl> + exports . _setStyle = _setStyle ; <nl> + exports . _setEvent = _setEvent ; <nl> + exports . _bindEvents = _bindEvents ; <nl> + exports . _bindDir = _bindDir ; <nl> + exports . _bindKey = _bindKey ; <nl> + exports . _watch = _watch ; <nl> + <nl> + var _util = __webpack_require__ ( 49 ) ; <nl> + <nl> + var _ = _interopRequireWildcard ( _util ) ; <nl> + <nl> + var _watcher = __webpack_require__ ( 55 ) ; <nl> + <nl> + var _watcher2 = _interopRequireDefault ( _watcher ) ; <nl> + <nl> + var _config = __webpack_require__ ( 47 ) ; <nl> + <nl> + var _config2 = _interopRequireDefault ( _config ) ; <nl> + <nl> + function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { default : obj } ; } <nl> + <nl> + function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj ; } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] ; } } newObj . default = obj ; return newObj ; } } <nl> + <nl> + var nativeComponentMap = _config2 . default . nativeComponentMap ; <nl> + <nl> + <nl> + var SETTERS = { <nl> + attr : ' setAttr ' , <nl> + style : ' setStyle ' , <nl> + event : ' addEvent ' <nl> + } ; <nl> + <nl> + / * * <nl> + * apply the native component ' s options ( specified by template . type ) <nl> + * to the template <nl> + * / <nl> + function _applyNaitveComponentOptions ( template ) { <nl> + var type = template . type ; <nl> + <nl> + var options = nativeComponentMap [ type ] ; <nl> + <nl> + if ( ( typeof options = = = ' undefined ' ? ' undefined ' : _typeof ( options ) ) = = = ' object ' ) { <nl> + for ( var key in options ) { <nl> + if ( template [ key ] = = null ) { <nl> + template [ key ] = options [ key ] ; <nl> + } else if ( _ . typof ( template [ key ] ) = = = ' object ' & & _ . typof ( options [ key ] ) = = = ' object ' ) { <nl> + for ( var subkey in options [ key ] ) { <nl> + if ( template [ key ] [ subkey ] = = null ) { <nl> + template [ key ] [ subkey ] = options [ key ] [ subkey ] ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * bind all id , attr , classnames , style , events to an element <nl> + * / <nl> + function _bindElement ( el , template ) { <nl> + this . _setId ( template . id , el , this ) ; <nl> + this . _setAttr ( el , template . attr ) ; <nl> + this . _setClass ( el , template . classList ) ; <nl> + this . _setStyle ( el , template . style ) ; <nl> + this . _bindEvents ( el , template . events ) ; <nl> + } <nl> + <nl> + / * * <nl> + * bind all props to sub vm and bind all style , events to the root element <nl> + * of the sub vm if it doesn ' t have a replaced multi - node fragment <nl> + * / <nl> + function _bindSubVm ( subVm , template , repeatItem ) { <nl> + subVm = subVm | | { } ; <nl> + template = template | | { } ; <nl> + <nl> + var options = subVm . _options | | { } ; <nl> + <nl> + / / bind props <nl> + var props = options . props ; <nl> + <nl> + if ( Array . isArray ( props ) ) { <nl> + props = props . reduce ( function ( result , value ) { <nl> + result [ value ] = true ; <nl> + return result ; <nl> + } , { } ) ; <nl> + } <nl> + <nl> + mergeProps ( repeatItem , props , this , subVm ) ; <nl> + mergeProps ( template . attr , props , this , subVm ) ; <nl> + } <nl> + <nl> + function _bindSubVmAfterInitialized ( subVm , template ) { <nl> + mergeClassStyle ( template . classList , this , subVm ) ; <nl> + mergeStyle ( template . style , this , subVm ) ; <nl> + } <nl> + <nl> + function mergeProps ( target , props , vm , subVm ) { <nl> + if ( ! target ) { <nl> + return ; <nl> + } <nl> + <nl> + var _loop = function _loop ( key ) { <nl> + if ( ! props | | props [ key ] ) { <nl> + var value = target [ key ] ; <nl> + if ( typeof value = = = ' function ' ) { <nl> + var returnValue = vm . _watch ( value , function ( v ) { <nl> + subVm [ key ] = v ; <nl> + } ) ; <nl> + subVm [ key ] = returnValue ; <nl> + } else { <nl> + subVm [ key ] = value ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + for ( var key in target ) { <nl> + _loop ( key ) ; <nl> + } <nl> + } <nl> + <nl> + function mergeStyle ( target , vm , subVm ) { <nl> + var _loop2 = function _loop2 ( key ) { <nl> + var value = target [ key ] ; <nl> + if ( typeof value = = = ' function ' ) { <nl> + var returnValue = vm . _watch ( value , function ( v ) { <nl> + if ( subVm . _rootEl ) { <nl> + subVm . _rootEl . setStyle ( key , v ) ; <nl> + } <nl> + } ) ; <nl> + subVm . _rootEl . setStyle ( key , returnValue ) ; <nl> + } else { <nl> + if ( subVm . _rootEl ) { <nl> + subVm . _rootEl . setStyle ( key , value ) ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + for ( var key in target ) { <nl> + _loop2 ( key ) ; <nl> + } <nl> + } <nl> + <nl> + function mergeClassStyle ( target , vm , subVm ) { <nl> + var css = vm . _options & & vm . _options . style | | { } ; <nl> + <nl> + / * istanbul ignore if * / <nl> + if ( ! subVm . _rootEl ) { <nl> + return ; <nl> + } <nl> + <nl> + if ( typeof target = = = ' function ' ) { <nl> + var _value = vm . _watch ( target , function ( v ) { <nl> + setClassStyle ( subVm . _rootEl , css , v ) ; <nl> + } ) ; <nl> + setClassStyle ( subVm . _rootEl , css , _value ) ; <nl> + } else if ( target ! = null ) { <nl> + setClassStyle ( subVm . _rootEl , css , target ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * bind id to an element <nl> + * each id is unique in a whole vm <nl> + * / <nl> + function _setId ( id , el , vm ) { <nl> + var _this = this ; <nl> + <nl> + var map = Object . create ( null ) ; <nl> + <nl> + Object . defineProperties ( map , { <nl> + vm : { <nl> + value : vm , <nl> + writable : false , <nl> + configurable : false <nl> + } , <nl> + el : { <nl> + get : function get ( ) { <nl> + return el | | vm . _rootEl ; <nl> + } , <nl> + configurable : false <nl> + } <nl> + } ) ; <nl> + <nl> + if ( typeof id = = = ' function ' ) { <nl> + var handler = id ; <nl> + id = handler . call ( this ) ; <nl> + if ( id ) { <nl> + this . _ids [ id ] = map ; <nl> + } <nl> + this . _watch ( handler , function ( newId ) { <nl> + if ( newId ) { <nl> + _this . _ids [ newId ] = map ; <nl> + } <nl> + } ) ; <nl> + } else if ( id & & typeof id = = = ' string ' ) { <nl> + this . _ids [ id ] = map ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * bind attr to an element <nl> + * / <nl> + function _setAttr ( el , attr ) { <nl> + this . _bindDir ( el , ' attr ' , attr ) ; <nl> + } <nl> + <nl> + function setClassStyle ( el , css , classList ) { <nl> + var classStyle = { } ; <nl> + var length = classList . length ; <nl> + <nl> + for ( var i = 0 ; i < length ; i + + ) { <nl> + var style = css [ classList [ i ] ] ; <nl> + if ( style ) { <nl> + for ( var key in style ) { <nl> + classStyle [ key ] = style [ key ] ; <nl> + } <nl> + } <nl> + } <nl> + el . setClassStyle ( classStyle ) ; <nl> + } <nl> + <nl> + / * * <nl> + * bind classnames to an element <nl> + * / <nl> + function _setClass ( el , classList ) { <nl> + if ( typeof classList ! = = ' function ' & & ! Array . isArray ( classList ) ) { <nl> + return ; <nl> + } <nl> + if ( Array . isArray ( classList ) & & ! classList . length ) { <nl> + el . setClassStyle ( { } ) ; <nl> + return ; <nl> + } <nl> + <nl> + var style = this . _options & & this . _options . style | | { } ; <nl> + if ( typeof classList = = = ' function ' ) { <nl> + var _value2 = this . _watch ( classList , function ( v ) { <nl> + setClassStyle ( el , style , v ) ; <nl> + } ) ; <nl> + setClassStyle ( el , style , _value2 ) ; <nl> + } else { <nl> + setClassStyle ( el , style , classList ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * bind style to an element <nl> + * / <nl> + function _setStyle ( el , style ) { <nl> + this . _bindDir ( el , ' style ' , style ) ; <nl> + } <nl> + <nl> + / * * <nl> + * add an event type and handler to an element and generate a dom update <nl> + * / <nl> + function _setEvent ( el , type , handler ) { <nl> + el . addEvent ( type , _ . bind ( handler , this ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * add all events of an element <nl> + * / <nl> + function _bindEvents ( el , events ) { <nl> + if ( ! events ) { <nl> + return ; <nl> + } <nl> + var keys = Object . keys ( events ) ; <nl> + var i = keys . length ; <nl> + while ( i - - ) { <nl> + var key = keys [ i ] ; <nl> + var handler = events [ key ] ; <nl> + if ( typeof handler = = = ' string ' ) { <nl> + handler = this [ handler ] ; <nl> + / * istanbul ignore if * / <nl> + if ( ! handler ) { <nl> + _ . error ( ' The method " ' + handler + ' " is not defined . ' ) ; <nl> + } <nl> + } <nl> + this . _setEvent ( el , key , handler ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * set a series of members as a kind of an element <nl> + * for example : style , attr , . . . <nl> + * if the value is a function then bind the data changes <nl> + * / <nl> + function _bindDir ( el , name , data ) { <nl> + if ( ! data ) { <nl> + return ; <nl> + } <nl> + var keys = Object . keys ( data ) ; <nl> + var i = keys . length ; <nl> + while ( i - - ) { <nl> + var key = keys [ i ] ; <nl> + var _value3 = data [ key ] ; <nl> + if ( typeof _value3 = = = ' function ' ) { <nl> + this . _bindKey ( el , name , key , _value3 ) ; <nl> + } else { <nl> + el [ SETTERS [ name ] ] ( key , _value3 ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * bind data changes to a certain key to a name series in an element <nl> + * / <nl> + function _bindKey ( el , name , key , calc ) { <nl> + var _this2 = this ; <nl> + <nl> + var methodName = SETTERS [ name ] ; <nl> + / / watch the calc , and returns a value by calc . call ( ) <nl> + var value = this . _watch ( calc , function ( value ) { <nl> + function handler ( ) { <nl> + el [ methodName ] ( key , value ) ; <nl> + } <nl> + var differ = _this2 & & _this2 . _app & & _this2 . _app . differ ; <nl> + if ( differ ) { <nl> + differ . append ( ' element ' , el . depth , el . ref , handler ) ; <nl> + } else { <nl> + handler ( ) ; <nl> + } <nl> + } ) ; <nl> + <nl> + el [ methodName ] ( key , value ) ; <nl> + } <nl> + <nl> + / * * <nl> + * watch a calc function and callback if the calc value changes <nl> + * / <nl> + function _watch ( calc , callback ) { <nl> + var watcher = new _watcher2 . default ( this , calc , function ( value , oldValue ) { <nl> + / * istanbul ignore if * / <nl> + if ( ( typeof value = = = ' undefined ' ? ' undefined ' : _typeof ( value ) ) ! = = ' object ' & & value = = = oldValue ) { <nl> + return ; <nl> + } <nl> + callback ( value ) ; <nl> + } ) ; <nl> + <nl> + return watcher . value ; <nl> + } <nl> + <nl> + / * * * / } , <nl> + / * 61 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . _createBody = _createBody ; <nl> + exports . _createElement = _createElement ; <nl> + exports . _createBlock = _createBlock ; <nl> + exports . _createBlockStart = _createBlockStart ; <nl> + exports . _createBlockEnd = _createBlockEnd ; <nl> + exports . _attachTarget = _attachTarget ; <nl> + exports . _moveTarget = _moveTarget ; <nl> + exports . _moveElement = _moveElement ; <nl> + exports . _moveBlock = _moveBlock ; <nl> + exports . _removeTarget = _removeTarget ; <nl> + exports . _removeElement = _removeElement ; <nl> + exports . _removeBlock = _removeBlock ; <nl> + / * * <nl> + * @ fileOverview Document & Element Helpers . <nl> + * <nl> + * required : <nl> + * Document # : createElement , createComment , getRef <nl> + * Element # : appendChild , insertBefore , removeChild , nextSibling <nl> + * / <nl> + <nl> + / * * <nl> + * Create a body by type <nl> + * Using this . _app . doc <nl> + * <nl> + * @ param { string } type <nl> + * / <nl> + function _createBody ( type ) { <nl> + var doc = this . _app . doc ; <nl> + return doc . createBody ( type ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Create an element by type <nl> + * Using this . _app . doc <nl> + * <nl> + * @ param { string } type <nl> + * / <nl> + function _createElement ( type ) { <nl> + var doc = this . _app . doc ; <nl> + return doc . createElement ( type ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Create and return a frag block for an element . <nl> + * The frag block has a starter , ender and the element itself . <nl> + * <nl> + * @ param { object } element <nl> + * / <nl> + function _createBlock ( element ) { <nl> + var start = this . _createBlockStart ( ) ; <nl> + var end = this . _createBlockEnd ( ) ; <nl> + var blockId = lastestBlockId + + ; <nl> + if ( element . element ) { <nl> + element . element . insertBefore ( start , element . end ) ; <nl> + element . element . insertBefore ( end , element . end ) ; <nl> + element = element . element ; <nl> + } else { <nl> + element . appendChild ( start ) ; <nl> + element . appendChild ( end ) ; <nl> + } <nl> + return { start : start , end : end , element : element , blockId : blockId } ; <nl> + } <nl> + <nl> + var lastestBlockId = 1 ; <nl> + <nl> + / * * <nl> + * Create and return a block starter . <nl> + * Using this . _app . doc <nl> + * / <nl> + function _createBlockStart ( ) { <nl> + var doc = this . _app . doc ; <nl> + var anchor = doc . createComment ( ' start ' ) ; <nl> + return anchor ; <nl> + } <nl> + <nl> + / * * <nl> + * Create and return a block ender . <nl> + * Using this . _app . doc <nl> + * / <nl> + function _createBlockEnd ( ) { <nl> + var doc = this . _app . doc ; <nl> + var anchor = doc . createComment ( ' end ' ) ; <nl> + return anchor ; <nl> + } <nl> + <nl> + / * * <nl> + * Attach target to a certain dest using appendChild by default . <nl> + * If the dest is a frag block then insert before the ender . <nl> + * If the target is a frag block then attach the starter and ender in order . <nl> + * <nl> + * @ param { object } target <nl> + * @ param { object } dest <nl> + * / <nl> + function _attachTarget ( target , dest ) { <nl> + if ( dest . element ) { <nl> + var before = dest . end ; <nl> + var after = dest . updateMark ; <nl> + / / push new target for watch list update later <nl> + if ( dest . children ) { <nl> + dest . children . push ( target ) ; <nl> + } <nl> + / / for check repeat case <nl> + if ( after ) { <nl> + this . _moveTarget ( target , after ) ; <nl> + dest . updateMark = target . element ? target . end : target ; <nl> + } else if ( target . element ) { <nl> + dest . element . insertBefore ( target . start , before ) ; <nl> + dest . element . insertBefore ( target . end , before ) ; <nl> + } else { <nl> + return dest . element . insertBefore ( target , before ) ; <nl> + } <nl> + } else { <nl> + if ( target . element ) { <nl> + dest . appendChild ( target . start ) ; <nl> + dest . appendChild ( target . end ) ; <nl> + } else { <nl> + return dest . appendChild ( target ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Move target before a certain element . The target maybe block or element . <nl> + * <nl> + * @ param { object } target <nl> + * @ param { object } before <nl> + * / <nl> + function _moveTarget ( target , after ) { <nl> + if ( target . element ) { <nl> + this . _moveBlock ( target , after ) ; <nl> + } else { <nl> + this . _moveElement ( target , after ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Move element before a certain element . <nl> + * <nl> + * @ param { object } element <nl> + * @ param { object } before <nl> + * / <nl> + function _moveElement ( element , after ) { <nl> + var parent = after . parentNode ; <nl> + if ( parent ) { <nl> + parent . insertAfter ( element , after ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Move all elements of the block before a certain element . <nl> + * <nl> + * @ param { object } fragBlock <nl> + * @ param { object } before <nl> + * / <nl> + function _moveBlock ( fragBlock , after ) { <nl> + var parent = after . parentNode ; <nl> + <nl> + if ( parent ) { <nl> + ( function ( ) { <nl> + var el = fragBlock . start ; <nl> + var group = [ el ] ; <nl> + <nl> + while ( el & & el ! = = fragBlock . end ) { <nl> + el = el . nextSibling ; <nl> + group . push ( el ) ; <nl> + } <nl> + <nl> + var temp = after ; <nl> + group . forEach ( function ( el ) { <nl> + parent . insertAfter ( el , temp ) ; <nl> + temp = el ; <nl> + } ) ; <nl> + } ) ( ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Remove target from DOM tree . <nl> + * If the target is a frag block then call _removeBlock <nl> + * <nl> + * @ param { object } target <nl> + * / <nl> + function _removeTarget ( target ) { <nl> + if ( target . element ) { <nl> + this . _removeBlock ( target ) ; <nl> + } else { <nl> + this . _removeElement ( target ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Remove a certain element . <nl> + * Using this . _app . doc <nl> + * <nl> + * @ param { object } target <nl> + * / <nl> + function _removeElement ( target ) { <nl> + var parent = target . parentNode ; <nl> + <nl> + if ( parent ) { <nl> + parent . removeChild ( target ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Remove a frag block . <nl> + * The second param decides whether the block self should be removed too . <nl> + * <nl> + * @ param { object } fragBlock <nl> + * @ param { Boolean } preserveBlock = false <nl> + * / <nl> + function _removeBlock ( fragBlock ) { <nl> + var _this = this ; <nl> + <nl> + var preserveBlock = arguments . length < = 1 | | arguments [ 1 ] = = = undefined ? false : arguments [ 1 ] ; <nl> + <nl> + var result = [ ] ; <nl> + var el = fragBlock . start . nextSibling ; <nl> + <nl> + while ( el & & el ! = = fragBlock . end ) { <nl> + result . push ( el ) ; <nl> + el = el . nextSibling ; <nl> + } <nl> + <nl> + if ( ! preserveBlock ) { <nl> + this . _removeElement ( fragBlock . start ) ; <nl> + } <nl> + result . forEach ( function ( el ) { <nl> + _this . _removeElement ( el ) ; <nl> + } ) ; <nl> + if ( ! preserveBlock ) { <nl> + this . _removeElement ( fragBlock . end ) ; <nl> + } <nl> + } <nl> + <nl> + / * * * / } , <nl> + / * 62 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . $ emit = $ emit ; <nl> + exports . $ dispatch = $ dispatch ; <nl> + exports . $ broadcast = $ broadcast ; <nl> + exports . $ on = $ on ; <nl> + exports . $ off = $ off ; <nl> + exports . _initEvents = _initEvents ; <nl> + function Evt ( type , detail ) { <nl> + if ( detail instanceof Evt ) { <nl> + return detail ; <nl> + } <nl> + <nl> + this . timestamp = Date . now ( ) ; <nl> + this . detail = detail ; <nl> + this . type = type ; <nl> + <nl> + var shouldStop = false ; <nl> + this . stop = function ( ) { <nl> + shouldStop = true ; <nl> + } ; <nl> + this . hasStopped = function ( ) { <nl> + return shouldStop ; <nl> + } ; <nl> + } <nl> + <nl> + function $ emit ( type , detail ) { <nl> + var _this = this ; <nl> + <nl> + var events = this . _vmEvents ; <nl> + var handlerList = events [ type ] ; <nl> + if ( handlerList ) { <nl> + ( function ( ) { <nl> + var evt = new Evt ( type , detail ) ; <nl> + handlerList . forEach ( function ( handler ) { <nl> + handler . call ( _this , evt ) ; <nl> + } ) ; <nl> + } ) ( ) ; <nl> + } <nl> + } <nl> + <nl> + function $ dispatch ( type , detail ) { <nl> + var evt = new Evt ( type , detail ) ; <nl> + this . $ emit ( type , evt ) ; <nl> + <nl> + if ( ! evt . hasStopped ( ) & & this . _parent & & this . _parent . $ dispatch ) { <nl> + this . _parent . $ dispatch ( type , evt ) ; <nl> + } <nl> + } <nl> + <nl> + function $ broadcast ( type , detail ) { <nl> + var evt = new Evt ( type , detail ) ; <nl> + this . $ emit ( type , evt ) ; <nl> + <nl> + if ( ! evt . hasStopped ( ) & & this . _childrenVms ) { <nl> + this . _childrenVms . forEach ( function ( subVm ) { <nl> + subVm . $ broadcast ( type , evt ) ; <nl> + } ) ; <nl> + } <nl> + } <nl> + <nl> + function $ on ( type , handler ) { <nl> + if ( ! type | | typeof handler ! = = ' function ' ) { <nl> + return ; <nl> + } <nl> + var events = this . _vmEvents ; <nl> + var handlerList = events [ type ] | | [ ] ; <nl> + handlerList . push ( handler ) ; <nl> + events [ type ] = handlerList ; <nl> + <nl> + / / fixed old version lifecycle design <nl> + if ( type = = = ' hook : ready ' & & this . _ready ) { <nl> + this . $ emit ( ' hook : ready ' ) ; <nl> + } <nl> + } <nl> + <nl> + function $ off ( type , handler ) { <nl> + if ( ! type ) { <nl> + return ; <nl> + } <nl> + var events = this . _vmEvents ; <nl> + if ( ! handler ) { <nl> + delete events [ type ] ; <nl> + return ; <nl> + } <nl> + var handlerList = events [ type ] ; <nl> + if ( ! handlerList ) { <nl> + return ; <nl> + } <nl> + handlerList . $ remove ( handler ) ; <nl> + } <nl> + <nl> + var LIFE_CYCLE_TYPES = [ ' init ' , ' created ' , ' ready ' ] ; <nl> + <nl> + function _initEvents ( externalEvents ) { <nl> + var _this2 = this ; <nl> + <nl> + var options = this . _options | | { } ; <nl> + var events = options . events | | { } ; <nl> + for ( var type1 in events ) { <nl> + this . $ on ( type1 , events [ type1 ] ) ; <nl> + } <nl> + for ( var type2 in externalEvents ) { <nl> + this . $ on ( type2 , externalEvents [ type2 ] ) ; <nl> + } <nl> + LIFE_CYCLE_TYPES . forEach ( function ( type ) { <nl> + _this2 . $ on ( ' hook : ' + type , options [ type ] ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + / * * * / } , <nl> + / * 63 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . clearModules = clearModules ; <nl> + exports . getModule = getModule ; <nl> + exports . requireModule = requireModule ; <nl> + exports . registerModules = registerModules ; <nl> + exports . registerMethods = registerMethods ; <nl> + exports . requireComponent = requireComponent ; <nl> + exports . registerComponent = registerComponent ; <nl> + var nativeModules = { } ; <nl> + <nl> + function assignModules ( modules , ifReplace ) { <nl> + var _loop = function _loop ( moduleName ) { <nl> + / / init ` modules [ moduleName ] [ ] ` <nl> + var methods = nativeModules [ moduleName ] ; <nl> + if ( ! methods ) { <nl> + methods = { } ; <nl> + nativeModules [ moduleName ] = methods ; <nl> + } <nl> + <nl> + / / push each non - existed new method <nl> + modules [ moduleName ] . forEach ( function ( method ) { <nl> + if ( typeof method = = = ' string ' ) { <nl> + method = { <nl> + name : method <nl> + } ; <nl> + } <nl> + <nl> + if ( ! methods [ method . name ] | | ifReplace ) { <nl> + methods [ method . name ] = method ; <nl> + } <nl> + } ) ; <nl> + } ; <nl> + <nl> + for ( var moduleName in modules ) { <nl> + _loop ( moduleName ) ; <nl> + } <nl> + } <nl> + <nl> + function assignApis ( Ctor , apis ) { <nl> + var p = Ctor . prototype ; <nl> + <nl> + for ( var apiName in apis ) { <nl> + if ( ! p . hasOwnProperty ( apiName ) ) { <nl> + p [ apiName ] = apis [ apiName ] ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + function clearModules ( ) { <nl> + nativeModules = { } ; <nl> + } <nl> + <nl> + function getModule ( moduleName ) { <nl> + return nativeModules [ moduleName ] ; <nl> + } <nl> + <nl> + / * * <nl> + * @ context a instance of AppInstance <nl> + * / <nl> + function requireModule ( moduleName ) { <nl> + var _this = this ; <nl> + <nl> + var methods = nativeModules [ moduleName ] ; <nl> + var target = { } ; <nl> + <nl> + var _loop2 = function _loop2 ( methodName ) { <nl> + target [ methodName ] = function ( ) { <nl> + for ( var _len = arguments . length , args = Array ( _len ) , _key = 0 ; _key < _len ; _key + + ) { <nl> + args [ _key ] = arguments [ _key ] ; <nl> + } <nl> + <nl> + return _this . callTasks ( { <nl> + module : moduleName , <nl> + method : methodName , <nl> + args : args <nl> + } ) ; <nl> + } ; <nl> + } ; <nl> + <nl> + for ( var methodName in methods ) { <nl> + _loop2 ( methodName ) ; <nl> + } <nl> + <nl> + return target ; <nl> + } <nl> + <nl> + / * * <nl> + * @ context Vm <nl> + * / <nl> + function registerModules ( modules , ifReplace ) { <nl> + assignModules ( modules , ifReplace ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ context Vm <nl> + * / <nl> + function registerMethods ( apis ) { <nl> + assignApis ( this , apis ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ context a instance of AppInstance <nl> + * / <nl> + function requireComponent ( name ) { <nl> + var customComponentMap = this . customComponentMap ; <nl> + <nl> + return customComponentMap [ name ] ; <nl> + } <nl> + <nl> + / * * <nl> + * @ context a instance of AppInstance <nl> + * / <nl> + function registerComponent ( name , exports ) { <nl> + var customComponentMap = this . customComponentMap ; <nl> + <nl> + <nl> + if ( customComponentMap [ name ] ) { <nl> + throw new Error ( ' define a component ( ' + name + ' ) that already exists ' ) ; <nl> + } <nl> + <nl> + customComponentMap [ name ] = exports ; <nl> + } <nl> + <nl> + / * * * / } , <nl> + / * 64 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + / * WEBPACK VAR INJECTION * / ( function ( global ) { ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . normalizeVersion = normalizeVersion ; <nl> + exports . getError = getError ; <nl> + exports . check = check ; <nl> + <nl> + var _semver = __webpack_require__ ( 51 ) ; <nl> + <nl> + var _semver2 = _interopRequireDefault ( _semver ) ; <nl> + <nl> + var _util = __webpack_require__ ( 49 ) ; <nl> + <nl> + function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { default : obj } ; } <nl> + <nl> + / * * <nl> + * [ normalizeVersion description ] <nl> + * @ param { String } Version . ie : 1 , 1 . 0 , 1 . 0 . 0 <nl> + * @ return { String } Version <nl> + * / <nl> + function normalizeVersion ( v ) { <nl> + var isValid = _semver2 . default . valid ( v ) ; <nl> + if ( isValid ) { <nl> + return v ; <nl> + } <nl> + <nl> + v = typeof v = = = ' string ' ? v : ' ' ; <nl> + var split = v . split ( ' . ' ) ; <nl> + var i = 0 ; <nl> + var result = [ ] ; <nl> + <nl> + while ( i < 3 ) { <nl> + var s = typeof split [ i ] = = = ' string ' & & split [ i ] ? split [ i ] : ' 0 ' ; <nl> + result . push ( s ) ; <nl> + i + + ; <nl> + } <nl> + <nl> + return result . join ( ' . ' ) ; <nl> + } <nl> + <nl> + function getError ( key , val , criteria ) { <nl> + var result = { <nl> + isDowngrade : true , <nl> + errorType : 1 , <nl> + code : 1000 <nl> + } ; <nl> + var getMsg = function getMsg ( key , val , criteria ) { <nl> + return ' Downgrade [ ' + key + ' ] : : deviceInfo ' + val + ' matched criteria ' + criteria ; <nl> + } ; <nl> + var _key = key . toLowerCase ( ) ; <nl> + <nl> + result . errorMessage = getMsg ( key , val , criteria ) ; <nl> + <nl> + if ( _key . indexOf ( ' osversion ' ) > = 0 ) { <nl> + result . code = 1001 ; <nl> + } else if ( _key . indexOf ( ' appversion ' ) > = 0 ) { <nl> + result . code = 1002 ; <nl> + } else if ( _key . indexOf ( ' weexversion ' ) > = 0 ) { <nl> + result . code = 1003 ; <nl> + } else if ( _key . indexOf ( ' devicemodel ' ) > = 0 ) { <nl> + result . code = 1004 ; <nl> + } <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + / * * <nl> + * WEEX framework input ( deviceInfo ) <nl> + * { <nl> + * platform : ' iOS ' or ' android ' <nl> + * osVersion : ' 1 . 0 . 0 ' or ' 1 . 0 ' or ' 1 ' <nl> + * appVersion : ' 1 . 0 . 0 ' or ' 1 . 0 ' or ' 1 ' <nl> + * weexVersion : ' 1 . 0 . 0 ' or ' 1 . 0 ' or ' 1 ' <nl> + * dDeviceModel : ' MODEL_NAME ' <nl> + * } <nl> + * <nl> + * downgrade config ( config ) <nl> + * { <nl> + * ios : { <nl> + * osVersion : ' > 1 . 0 . 0 ' or ' > = 1 . 0 . 0 ' or ' < 1 . 0 . 0 ' or ' < = 1 . 0 . 0 ' or ' 1 . 0 . 0 ' <nl> + * appVersion : ' > 1 . 0 . 0 ' or ' > = 1 . 0 . 0 ' or ' < 1 . 0 . 0 ' or ' < = 1 . 0 . 0 ' or ' 1 . 0 . 0 ' <nl> + * weexVersion : ' > 1 . 0 . 0 ' or ' > = 1 . 0 . 0 ' or ' < 1 . 0 . 0 ' or ' < = 1 . 0 . 0 ' or ' 1 . 0 . 0 ' <nl> + * deviceModel : [ ' modelA ' , ' modelB ' , . . . ] <nl> + * } , <nl> + * android : { <nl> + * osVersion : ' > 1 . 0 . 0 ' or ' > = 1 . 0 . 0 ' or ' < 1 . 0 . 0 ' or ' < = 1 . 0 . 0 ' or ' 1 . 0 . 0 ' <nl> + * appVersion : ' > 1 . 0 . 0 ' or ' > = 1 . 0 . 0 ' or ' < 1 . 0 . 0 ' or ' < = 1 . 0 . 0 ' or ' 1 . 0 . 0 ' <nl> + * weexVersion : ' > 1 . 0 . 0 ' or ' > = 1 . 0 . 0 ' or ' < 1 . 0 . 0 ' or ' < = 1 . 0 . 0 ' or ' 1 . 0 . 0 ' <nl> + * deviceModel : [ ' modelA ' , ' modelB ' , . . . ] <nl> + * } <nl> + * } <nl> + * <nl> + * <nl> + * @ param { object } deviceInfo Weex SDK framework input <nl> + * @ param { object } config user input <nl> + * @ return { Object } { isDowngrade : true / false , errorMessage . . . } <nl> + * / <nl> + function check ( config , deviceInfo ) { <nl> + deviceInfo = deviceInfo | | global . WXEnvironment ; <nl> + deviceInfo = ( 0 , _util . isPlainObject ) ( deviceInfo ) ? deviceInfo : { } ; <nl> + <nl> + var result = { <nl> + isDowngrade : false / / defautl is pass <nl> + } ; <nl> + <nl> + if ( ( 0 , _util . typof ) ( config ) = = = ' function ' ) { <nl> + var customDowngrade = config . call ( this , deviceInfo , { <nl> + semver : _semver2 . default , <nl> + normalizeVersion : this . normalizeVersion <nl> + } ) ; <nl> + <nl> + customDowngrade = ! ! customDowngrade ; <nl> + <nl> + result = customDowngrade ? this . getError ( ' custom ' , ' ' , ' custom params ' ) : result ; <nl> + } else { <nl> + config = ( 0 , _util . isPlainObject ) ( config ) ? config : { } ; <nl> + <nl> + var platform = deviceInfo . platform | | ' unknow ' ; <nl> + var dPlatform = platform . toLowerCase ( ) ; <nl> + var cObj = config [ dPlatform ] | | { } ; <nl> + <nl> + for ( var i in deviceInfo ) { <nl> + var key = i ; <nl> + var keyLower = key . toLowerCase ( ) ; <nl> + var val = deviceInfo [ i ] ; <nl> + var isVersion = keyLower . indexOf ( ' version ' ) > = 0 ; <nl> + var isDeviceModel = keyLower . indexOf ( ' devicemodel ' ) > = 0 ; <nl> + var criteria = cObj [ i ] ; <nl> + <nl> + if ( criteria & & isVersion ) { <nl> + var c = this . normalizeVersion ( criteria ) ; <nl> + var d = this . normalizeVersion ( deviceInfo [ i ] ) ; <nl> + <nl> + if ( _semver2 . default . satisfies ( d , c ) ) { <nl> + result = this . getError ( key , val , criteria ) ; <nl> + break ; <nl> + } <nl> + } else if ( isDeviceModel ) { <nl> + var _criteria = ( 0 , _util . typof ) ( criteria ) = = = ' array ' ? criteria : [ criteria ] ; <nl> + if ( _criteria . indexOf ( val ) > = 0 ) { <nl> + result = this . getError ( key , val , criteria ) ; <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + return result ; <nl> + } <nl> + / * WEBPACK VAR INJECTION * / } . call ( exports , ( function ( ) { return this ; } ( ) ) ) ) <nl> + <nl> + / * * * / } , <nl> + / * 65 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + / * WEBPACK VAR INJECTION * / ( function ( global ) { ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . updateActions = updateActions ; <nl> + exports . init = init ; <nl> + exports . destroy = destroy ; <nl> + exports . getRootElement = getRootElement ; <nl> + exports . fireEvent = fireEvent ; <nl> + exports . callback = callback ; <nl> + exports . refreshData = refreshData ; <nl> + <nl> + var _util = __webpack_require__ ( 49 ) ; <nl> + <nl> + var _ = _interopRequireWildcard ( _util ) ; <nl> + <nl> + function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj ; } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] ; } } newObj . default = obj ; return newObj ; } } <nl> + <nl> + function _toConsumableArray ( arr ) { if ( Array . isArray ( arr ) ) { for ( var i = 0 , arr2 = Array ( arr . length ) ; i < arr . length ; i + + ) { arr2 [ i ] = arr [ i ] ; } return arr2 ; } else { return Array . from ( arr ) ; } } / * * <nl> + * @ fileOverview <nl> + * instance controls from native <nl> + * <nl> + * - init bundle <nl> + * - fire event <nl> + * - callback <nl> + * - destroy <nl> + * <nl> + * corresponded with the API of instance manager ( framework . js ) <nl> + * / <nl> + <nl> + function updateActions ( ) { <nl> + this . differ . flush ( ) ; <nl> + var tasks = [ ] ; <nl> + if ( this . doc & & this . doc . listener & & this . doc . listener . updates . length ) { <nl> + tasks . push . apply ( tasks , _toConsumableArray ( this . doc . listener . updates ) ) ; <nl> + this . doc . listener . updates = [ ] ; <nl> + } <nl> + if ( tasks . length ) { <nl> + return this . callTasks ( tasks ) ; <nl> + } <nl> + } <nl> + <nl> + function init ( code , data ) { <nl> + var _this = this ; <nl> + <nl> + _ . debug ( ' Intialize an instance with : \ n ' , code , data ) ; <nl> + <nl> + var result = void 0 ; <nl> + / / @ see : lib / app / bundle . js <nl> + var define = _ . bind ( this . define , this ) ; <nl> + var bootstrap = function bootstrap ( name , config , _data ) { <nl> + result = _this . bootstrap ( name , config , _data | | data ) ; <nl> + _this . updateActions ( ) ; <nl> + _this . doc . listener . createFinish ( ) ; <nl> + _ . debug ( ' After intialized an instance ( ' + _this . id + ' ) ' ) ; <nl> + } ; <nl> + <nl> + / / backward ( register / render ) <nl> + var register = _ . bind ( this . register , this ) ; <nl> + var render = function render ( name , _data ) { <nl> + result = _this . bootstrap ( name , { } , _data ) ; <nl> + } ; <nl> + <nl> + var require = function require ( name ) { <nl> + return function ( _data ) { <nl> + result = _this . bootstrap ( name , { } , _data ) ; <nl> + } ; <nl> + } ; <nl> + <nl> + var document = this . doc ; <nl> + <nl> + var functionBody = void 0 ; <nl> + / * istanbul ignore if * / <nl> + if ( typeof code = = = ' function ' ) { <nl> + / / ` function ( ) { . . . } ` - > ` { . . . } ` <nl> + / / not very strict <nl> + functionBody = code . toString ( ) . substr ( 12 ) ; <nl> + } else if ( code ) { <nl> + functionBody = code . toString ( ) ; <nl> + } <nl> + <nl> + var _global = global ; <nl> + var WXEnvironment = _global . WXEnvironment ; <nl> + <nl> + if ( WXEnvironment & & WXEnvironment . platform ! = = ' Web ' ) { <nl> + ( function ( ) { <nl> + var timer = _this . requireModule ( ' timer ' ) ; <nl> + var timerAPIs = { <nl> + setTimeout : function setTimeout ( ) { <nl> + for ( var _len = arguments . length , args = Array ( _len ) , _key = 0 ; _key < _len ; _key + + ) { <nl> + args [ _key ] = arguments [ _key ] ; <nl> + } <nl> + <nl> + var handler = function handler ( ) { <nl> + args [ 0 ] . apply ( args , _toConsumableArray ( args . slice ( 2 ) ) ) ; <nl> + } ; <nl> + timer . setTimeout ( handler , args [ 1 ] ) ; <nl> + return _this . uid . toString ( ) ; <nl> + } , <nl> + setInterval : function setInterval ( ) { <nl> + for ( var _len2 = arguments . length , args = Array ( _len2 ) , _key2 = 0 ; _key2 < _len2 ; _key2 + + ) { <nl> + args [ _key2 ] = arguments [ _key2 ] ; <nl> + } <nl> + <nl> + var handler = function handler ( ) { <nl> + args [ 0 ] . apply ( args , _toConsumableArray ( args . slice ( 2 ) ) ) ; <nl> + } ; <nl> + timer . setInterval ( handler , args [ 1 ] ) ; <nl> + return _this . uid . toString ( ) ; <nl> + } , <nl> + clearTimeout : function clearTimeout ( n ) { <nl> + timer . clearTimeout ( n ) ; <nl> + } , <nl> + clearInterval : function clearInterval ( n ) { <nl> + timer . clearInterval ( n ) ; <nl> + } <nl> + } ; <nl> + <nl> + var fn = new Function ( ' define ' , ' require ' , ' document ' , ' bootstrap ' , ' register ' , ' render ' , ' __weex_define__ ' , / / alias for define <nl> + ' __weex_bootstrap__ ' , / / alias for bootstrap <nl> + ' setTimeout ' , ' setInterval ' , ' clearTimeout ' , ' clearInterval ' , functionBody ) ; <nl> + <nl> + fn ( define , require , document , bootstrap , register , render , define , bootstrap , timerAPIs . setTimeout , timerAPIs . setInterval , timerAPIs . clearTimeout , timerAPIs . clearInterval ) ; <nl> + } ) ( ) ; <nl> + } else { <nl> + var _fn = new Function ( ' define ' , ' require ' , ' document ' , ' bootstrap ' , ' register ' , ' render ' , ' __weex_define__ ' , / / alias for define <nl> + ' __weex_bootstrap__ ' , / / alias for bootstrap <nl> + functionBody ) ; <nl> + <nl> + _fn ( define , require , document , bootstrap , register , render , define , bootstrap ) ; <nl> + } <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + function destroy ( ) { <nl> + _ . debug ( ' Destory an instance ( ' + this . id + ' ) ' ) ; <nl> + <nl> + this . id = ' ' ; <nl> + this . options = null ; <nl> + this . blocks = null ; <nl> + this . vm = null ; <nl> + this . doc = null ; <nl> + this . customComponentMap = null ; <nl> + this . callbacks = null ; <nl> + } <nl> + <nl> + function getRootElement ( ) { <nl> + var doc = this . doc | | { } ; <nl> + var body = doc . body | | { } ; <nl> + return body . toJSON ? body . toJSON ( ) : { } ; <nl> + } <nl> + <nl> + function fireEvent ( ref , type , e , domChanges ) { <nl> + var _this2 = this ; <nl> + <nl> + _ . debug ( ' Fire a " ' + type + ' " event on an element ( ' + ref + ' ) in instance ( ' + this . id + ' ) ' ) ; <nl> + if ( Array . isArray ( ref ) ) { <nl> + ref . some ( function ( ref ) { <nl> + return _this2 . fireEvent ( ref , type , e ) ! = = false ; <nl> + } ) ; <nl> + return ; <nl> + } <nl> + <nl> + var el = this . doc . getRef ( ref ) ; <nl> + <nl> + if ( el ) { <nl> + this . doc . close ( ) ; <nl> + var result = this . doc . fireEvent ( el , type , e , domChanges ) ; <nl> + this . updateActions ( ) ; <nl> + this . doc . listener . updateFinish ( ) ; <nl> + this . doc . open ( ) ; <nl> + return result ; <nl> + } <nl> + <nl> + return new Error ( ' invalid element reference " ' + ref + ' " ' ) ; <nl> + } <nl> + <nl> + function callback ( callbackId , data , ifKeepAlive ) { <nl> + _ . debug ( ' Invoke a callback ( ' + callbackId + ' ) with ' , data , ' in instance ( ' + this . id + ' ) ' ) ; <nl> + <nl> + var callback = this . callbacks [ callbackId ] ; <nl> + <nl> + if ( typeof callback = = = ' function ' ) { <nl> + this . doc . close ( ) ; <nl> + callback ( data ) ; / / data is already a object , @ see : lib / runtime / index . js <nl> + <nl> + if ( typeof ifKeepAlive = = = ' undefined ' | | ifKeepAlive = = = false ) { <nl> + this . callbacks [ callbackId ] = undefined ; <nl> + } <nl> + <nl> + this . updateActions ( ) ; <nl> + this . doc . listener . updateFinish ( ) ; <nl> + this . doc . open ( ) ; <nl> + return ; <nl> + } <nl> + <nl> + return new Error ( ' invalid callback id " ' + callbackId + ' " ' ) ; <nl> + } <nl> + <nl> + function refreshData ( data ) { <nl> + _ . debug ( ' Refresh with ' , data , ' in instance [ ' + this . id + ' ] ' ) ; <nl> + <nl> + var vm = this . vm ; <nl> + <nl> + if ( vm & & data ) { <nl> + this . doc . close ( ) ; <nl> + if ( typeof vm . refreshData = = = ' function ' ) { <nl> + vm . refreshData ( data ) ; <nl> + } else { <nl> + _ . extend ( vm , data ) ; <nl> + } <nl> + this . updateActions ( ) ; <nl> + this . doc . listener . refreshFinish ( ) ; <nl> + this . doc . open ( ) ; <nl> + return ; <nl> + } <nl> + <nl> + return new Error ( ' invalid data " ' + data + ' " ' ) ; <nl> + } <nl> + / * WEBPACK VAR INJECTION * / } . call ( exports , ( function ( ) { return this ; } ( ) ) ) ) <nl> + <nl> + / * * * / } , <nl> + / * 66 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + <nl> + var _createClass = function ( ) { function defineProperties ( target , props ) { for ( var i = 0 ; i < props . length ; i + + ) { var descriptor = props [ i ] ; descriptor . enumerable = descriptor . enumerable | | false ; descriptor . configurable = true ; if ( " value " in descriptor ) descriptor . writable = true ; Object . defineProperty ( target , descriptor . key , descriptor ) ; } } return function ( Constructor , protoProps , staticProps ) { if ( protoProps ) defineProperties ( Constructor . prototype , protoProps ) ; if ( staticProps ) defineProperties ( Constructor , staticProps ) ; return Constructor ; } ; } ( ) ; <nl> + <nl> + function _classCallCheck ( instance , Constructor ) { if ( ! ( instance instanceof Constructor ) ) { throw new TypeError ( " Cannot call a class as a function " ) ; } } <nl> + <nl> + var Differ = function ( ) { <nl> + function Differ ( id ) { <nl> + _classCallCheck ( this , Differ ) ; <nl> + <nl> + this . id = id ; <nl> + this . map = [ ] ; <nl> + this . hooks = [ ] ; <nl> + } <nl> + <nl> + _createClass ( Differ , [ { <nl> + key : ' isEmpty ' , <nl> + value : function isEmpty ( ) { <nl> + return this . map . length = = = 0 ; <nl> + } <nl> + } , { <nl> + key : ' append ' , <nl> + value : function append ( type , depth , ref , handler ) { <nl> + var _this = this ; <nl> + <nl> + if ( ! this . hasTimer ) { <nl> + this . hasTimer = true ; <nl> + setTimeout ( function ( ) { <nl> + _this . hasTimer = false ; <nl> + _this . flush ( true ) ; <nl> + } , 0 ) ; <nl> + } <nl> + var map = this . map ; <nl> + if ( ! map [ depth ] ) { <nl> + map [ depth ] = { } ; <nl> + } <nl> + var group = map [ depth ] ; <nl> + if ( ! group [ type ] ) { <nl> + group [ type ] = { } ; <nl> + } <nl> + if ( type = = = ' element ' ) { <nl> + if ( ! group [ type ] [ ref ] ) { <nl> + group [ type ] [ ref ] = [ ] ; <nl> + } <nl> + group [ type ] [ ref ] . push ( handler ) ; <nl> + } else { <nl> + group [ type ] [ ref ] = handler ; <nl> + } <nl> + } <nl> + } , { <nl> + key : ' flush ' , <nl> + value : function flush ( isTimeout ) { <nl> + var map = this . map . slice ( ) ; <nl> + this . map . length = 0 ; <nl> + map . forEach ( function ( group ) { <nl> + callTypeMap ( group , ' repeat ' ) ; <nl> + callTypeMap ( group , ' shown ' ) ; <nl> + callTypeList ( group , ' element ' ) ; <nl> + } ) ; <nl> + <nl> + var hooks = this . hooks . slice ( ) ; <nl> + this . hooks . length = 0 ; <nl> + hooks . forEach ( function ( fn ) { <nl> + fn ( ) ; <nl> + } ) ; <nl> + <nl> + if ( ! this . isEmpty ( ) ) { <nl> + this . flush ( ) ; <nl> + } <nl> + } <nl> + } , { <nl> + key : ' then ' , <nl> + value : function then ( fn ) { <nl> + this . hooks . push ( fn ) ; <nl> + } <nl> + } ] ) ; <nl> + <nl> + return Differ ; <nl> + } ( ) ; <nl> + <nl> + exports . default = Differ ; <nl> + <nl> + <nl> + function callTypeMap ( group , type ) { <nl> + var map = group [ type ] ; <nl> + for ( var ref in map ) { <nl> + map [ ref ] ( ) ; <nl> + } <nl> + } <nl> + <nl> + function callTypeList ( group , type ) { <nl> + var map = group [ type ] ; <nl> + for ( var ref in map ) { <nl> + var list = map [ ref ] ; <nl> + list . forEach ( function ( handler ) { <nl> + handler ( ) ; <nl> + } ) ; <nl> + } <nl> + } <nl> + <nl> + / * * * / } , <nl> + / * 67 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . instanceMap = undefined ; <nl> + exports . Document = Document ; <nl> + exports . Node = Node ; <nl> + exports . Element = Element ; <nl> + exports . Comment = Comment ; <nl> + <nl> + var _listener4 = __webpack_require__ ( 68 ) ; <nl> + <nl> + var _listener5 = _interopRequireDefault ( _listener4 ) ; <nl> + <nl> + function _interopRequireDefault ( obj ) { return obj & & obj . __esModule ? obj : { default : obj } ; } <nl> + <nl> + var DEFAULT_TAG_NAME = ' div ' ; / * * <nl> + * @ fileOverview <nl> + * A simple virtual dom implementation <nl> + * / <nl> + <nl> + var instanceMap = exports . instanceMap = { } ; <nl> + var nextNodeRef = 1 ; <nl> + <nl> + function Document ( id , url , handler ) { <nl> + id = id ? id . toString ( ) : ' ' ; <nl> + this . id = id ; <nl> + this . URL = url ; <nl> + <nl> + instanceMap [ id ] = this ; <nl> + this . nodeMap = { } ; <nl> + this . listener = new _listener5 . default ( id , handler | | genCallTasks ( id ) ) ; <nl> + this . createDocumentElement ( ) ; <nl> + } <nl> + <nl> + function genCallTasks ( id ) { <nl> + return function ( tasks ) { <nl> + if ( ! Array . isArray ( tasks ) ) { <nl> + tasks = [ tasks ] ; <nl> + } <nl> + return callNative ( id , tasks , ' - 1 ' ) ; <nl> + } ; <nl> + } <nl> + <nl> + Document . prototype . destroy = function ( ) { <nl> + delete this . listener ; <nl> + delete this . nodeMap ; <nl> + delete instanceMap [ this . id ] ; <nl> + } ; <nl> + <nl> + Document . prototype . open = function ( ) { <nl> + this . listener . batched = false ; <nl> + } ; <nl> + <nl> + Document . prototype . close = function ( ) { <nl> + this . listener . batched = true ; <nl> + } ; <nl> + <nl> + Document . prototype . createDocumentElement = function ( ) { <nl> + var _this = this ; <nl> + <nl> + if ( ! this . documentElement ) { <nl> + var el = new Element ( ' document ' ) ; <nl> + el . docId = this . id ; <nl> + el . ownerDocument = this ; <nl> + el . role = ' documentElement ' ; <nl> + el . depth = 0 ; <nl> + el . ref = ' _documentElement ' ; <nl> + this . nodeMap . _documentElement = el ; <nl> + this . documentElement = el ; <nl> + el . appendChild = function ( node ) { <nl> + appendBody ( _this , node ) ; <nl> + } ; <nl> + el . insertBefore = function ( node , before ) { <nl> + appendBody ( _this , node , before ) ; <nl> + } ; <nl> + } <nl> + <nl> + return this . documentElement ; <nl> + } ; <nl> + <nl> + function appendBody ( doc , node , before ) { <nl> + var documentElement = doc . documentElement ; <nl> + <nl> + <nl> + if ( documentElement . pureChildren . length > 0 | | node . parentNode ) { <nl> + return ; <nl> + } <nl> + var children = documentElement . children ; <nl> + var beforeIndex = children . indexOf ( before ) ; <nl> + if ( beforeIndex < 0 ) { <nl> + children . push ( node ) ; <nl> + } else { <nl> + children . splice ( beforeIndex , 0 , node ) ; <nl> + } <nl> + <nl> + if ( node . nodeType = = = 1 ) { <nl> + if ( node . role = = = ' body ' ) { <nl> + node . docId = doc . id ; <nl> + node . ownerDocument = doc ; <nl> + node . parentNode = documentElement ; <nl> + } else { <nl> + node . children . forEach ( function ( child ) { <nl> + child . parentNode = node ; <nl> + } ) ; <nl> + setBody ( doc , node ) ; <nl> + node . docId = doc . id ; <nl> + node . ownerDocument = doc ; <nl> + linkParent ( node , documentElement ) ; <nl> + delete doc . nodeMap [ node . nodeId ] ; <nl> + } <nl> + documentElement . pureChildren . push ( node ) ; <nl> + doc . listener . createBody ( node ) ; <nl> + } else { <nl> + node . parentNode = documentElement ; <nl> + doc . nodeMap [ node . ref ] = node ; <nl> + } <nl> + } <nl> + <nl> + function setBody ( doc , el ) { <nl> + el . role = ' body ' ; <nl> + el . depth = 1 ; <nl> + delete doc . nodeMap [ el . nodeId ] ; <nl> + el . ref = ' _root ' ; <nl> + doc . nodeMap . _root = el ; <nl> + doc . body = el ; <nl> + } <nl> + <nl> + Document . prototype . createBody = function ( type , props ) { <nl> + if ( ! this . body ) { <nl> + var el = new Element ( type , props ) ; <nl> + setBody ( this , el ) ; <nl> + } <nl> + <nl> + return this . body ; <nl> + } ; <nl> + <nl> + Document . prototype . createElement = function ( tagName , props ) { <nl> + return new Element ( tagName , props ) ; <nl> + } ; <nl> + <nl> + Document . prototype . createComment = function ( text ) { <nl> + return new Comment ( text ) ; <nl> + } ; <nl> + <nl> + Document . prototype . fireEvent = function ( el , type , e , domChanges ) { <nl> + if ( ! el ) { <nl> + return ; <nl> + } <nl> + e = e | | { } ; <nl> + e . type = type ; <nl> + e . target = el ; <nl> + e . timestamp = Date . now ( ) ; <nl> + if ( domChanges ) { <nl> + updateElement ( el , domChanges ) ; <nl> + } <nl> + return el . fireEvent ( type , e ) ; <nl> + } ; <nl> + <nl> + Document . prototype . getRef = function ( ref ) { <nl> + return this . nodeMap [ ref ] ; <nl> + } ; <nl> + <nl> + function updateElement ( el , changes ) { <nl> + var attrs = changes . attrs | | { } ; <nl> + for ( var name in attrs ) { <nl> + el . setAttr ( name , attrs [ name ] , true ) ; <nl> + } <nl> + var style = changes . style | | { } ; <nl> + for ( var _name in style ) { <nl> + el . setStyle ( _name , style [ _name ] , true ) ; <nl> + } <nl> + } <nl> + <nl> + function Node ( ) { <nl> + this . nodeId = ( nextNodeRef + + ) . toString ( ) ; <nl> + this . ref = this . nodeId ; <nl> + this . children = [ ] ; <nl> + this . pureChildren = [ ] ; <nl> + this . parentNode = null ; <nl> + this . nextSibling = null ; <nl> + this . previousSibling = null ; <nl> + } <nl> + <nl> + Node . prototype . destroy = function ( ) { <nl> + var doc = instanceMap [ this . docId ] ; <nl> + if ( doc ) { <nl> + delete this . docId ; <nl> + delete doc . nodeMap [ this . nodeId ] ; <nl> + } <nl> + this . children . forEach ( function ( child ) { <nl> + child . destroy ( ) ; <nl> + } ) ; <nl> + } ; <nl> + <nl> + function Element ( ) { <nl> + var type = arguments . length < = 0 | | arguments [ 0 ] = = = undefined ? DEFAULT_TAG_NAME : arguments [ 0 ] ; <nl> + var props = arguments [ 1 ] ; <nl> + <nl> + props = props | | { } ; <nl> + this . nodeType = 1 ; <nl> + this . nodeId = ( nextNodeRef + + ) . toString ( ) ; <nl> + this . ref = this . nodeId ; <nl> + this . type = type ; <nl> + this . attr = props . attr | | { } ; <nl> + this . classStyle = props . classStyle | | { } ; <nl> + this . style = props . style | | { } ; <nl> + this . event = { } ; <nl> + this . children = [ ] ; <nl> + this . pureChildren = [ ] ; <nl> + } <nl> + <nl> + Element . prototype = new Node ( ) ; <nl> + <nl> + Element . prototype . appendChild = function ( node ) { <nl> + if ( node . parentNode & & node . parentNode ! = = this ) { <nl> + return ; <nl> + } <nl> + if ( ! node . parentNode ) { <nl> + linkParent ( node , this ) ; <nl> + insertIndex ( node , this . children , this . children . length , true ) ; <nl> + if ( this . docId ) { <nl> + registerNode ( this . docId , node ) ; <nl> + } <nl> + if ( node . nodeType = = = 1 ) { <nl> + insertIndex ( node , this . pureChildren , this . pureChildren . length ) ; <nl> + if ( this . docId ) { <nl> + var listener = instanceMap [ this . docId ] . listener ; <nl> + return listener . addElement ( node , this . ref , - 1 ) ; <nl> + } <nl> + } <nl> + } else { <nl> + moveIndex ( node , this . children , this . children . length , true ) ; <nl> + if ( node . nodeType = = = 1 ) { <nl> + var index = moveIndex ( node , this . pureChildren , this . pureChildren . length ) ; <nl> + if ( this . docId & & index > = 0 ) { <nl> + var _listener = instanceMap [ this . docId ] . listener ; <nl> + return _listener . moveElement ( node . ref , this . ref , index ) ; <nl> + } <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + Element . prototype . insertBefore = function ( node , before ) { <nl> + if ( node . parentNode & & node . parentNode ! = = this ) { <nl> + return ; <nl> + } <nl> + if ( node = = = before | | node . nextSibling = = = before ) { <nl> + return ; <nl> + } <nl> + if ( ! node . parentNode ) { <nl> + linkParent ( node , this ) ; <nl> + insertIndex ( node , this . children , this . children . indexOf ( before ) , true ) ; <nl> + if ( this . docId ) { <nl> + registerNode ( this . docId , node ) ; <nl> + } <nl> + if ( node . nodeType = = = 1 ) { <nl> + var pureBefore = nextElement ( before ) ; <nl> + var index = insertIndex ( node , this . pureChildren , pureBefore ? this . pureChildren . indexOf ( pureBefore ) : this . pureChildren . length ) ; <nl> + if ( this . docId ) { <nl> + var listener = instanceMap [ this . docId ] . listener ; <nl> + return listener . addElement ( node , this . ref , index ) ; <nl> + } <nl> + } <nl> + } else { <nl> + moveIndex ( node , this . children , this . children . indexOf ( before ) , true ) ; <nl> + if ( node . nodeType = = = 1 ) { <nl> + var _pureBefore = nextElement ( before ) ; <nl> + var _index = moveIndex ( node , this . pureChildren , _pureBefore ? this . pureChildren . indexOf ( _pureBefore ) : this . pureChildren . length ) ; <nl> + if ( this . docId & & _index > = 0 ) { <nl> + var _listener2 = instanceMap [ this . docId ] . listener ; <nl> + return _listener2 . moveElement ( node . ref , this . ref , _index ) ; <nl> + } <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + Element . prototype . insertAfter = function ( node , after ) { <nl> + if ( node . parentNode & & node . parentNode ! = = this ) { <nl> + return ; <nl> + } <nl> + if ( node = = = after | | node . previousSibling = = = after ) { <nl> + return ; <nl> + } <nl> + if ( ! node . parentNode ) { <nl> + linkParent ( node , this ) ; <nl> + insertIndex ( node , this . children , this . children . indexOf ( after ) + 1 , true ) ; <nl> + if ( this . docId ) { <nl> + registerNode ( this . docId , node ) ; <nl> + } <nl> + if ( node . nodeType = = = 1 ) { <nl> + var index = insertIndex ( node , this . pureChildren , this . pureChildren . indexOf ( previousElement ( after ) ) + 1 ) ; <nl> + if ( this . docId ) { <nl> + var listener = instanceMap [ this . docId ] . listener ; <nl> + return listener . addElement ( node , this . ref , index ) ; <nl> + } <nl> + } <nl> + } else { <nl> + moveIndex ( node , this . children , this . children . indexOf ( after ) + 1 , true ) ; <nl> + if ( node . nodeType = = = 1 ) { <nl> + var _index2 = moveIndex ( node , this . pureChildren , this . pureChildren . indexOf ( previousElement ( after ) ) + 1 ) ; <nl> + if ( this . docId & & _index2 > = 0 ) { <nl> + var _listener3 = instanceMap [ this . docId ] . listener ; <nl> + return _listener3 . moveElement ( node . ref , this . ref , _index2 ) ; <nl> + } <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + Element . prototype . removeChild = function ( node , preserved ) { <nl> + if ( node . parentNode ) { <nl> + removeIndex ( node , this . children , true ) ; <nl> + if ( node . nodeType = = = 1 ) { <nl> + removeIndex ( node , this . pureChildren ) ; <nl> + if ( this . docId ) { <nl> + var listener = instanceMap [ this . docId ] . listener ; <nl> + listener . removeElement ( node . ref ) ; <nl> + } <nl> + } <nl> + } <nl> + if ( ! preserved ) { <nl> + node . destroy ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + Element . prototype . clear = function ( ) { <nl> + var _this2 = this ; <nl> + <nl> + if ( this . docId ) { <nl> + ( function ( ) { <nl> + var listener = instanceMap [ _this2 . docId ] . listener ; <nl> + _this2 . pureChildren . forEach ( function ( node ) { <nl> + listener . removeElement ( node . ref ) ; <nl> + } ) ; <nl> + } ) ( ) ; <nl> + } <nl> + this . children . forEach ( function ( node ) { <nl> + node . destroy ( ) ; <nl> + } ) ; <nl> + this . children . length = 0 ; <nl> + this . pureChildren . length = 0 ; <nl> + } ; <nl> + <nl> + function nextElement ( node ) { <nl> + while ( node ) { <nl> + if ( node . nodeType = = = 1 ) { <nl> + return node ; <nl> + } <nl> + node = node . nextSibling ; <nl> + } <nl> + } <nl> + <nl> + function previousElement ( node ) { <nl> + while ( node ) { <nl> + if ( node . nodeType = = = 1 ) { <nl> + return node ; <nl> + } <nl> + node = node . previousSibling ; <nl> + } <nl> + } <nl> + <nl> + function linkParent ( node , parent ) { <nl> + node . parentNode = parent ; <nl> + if ( parent . docId ) { <nl> + node . docId = parent . docId ; <nl> + node . ownerDocument = parent . ownerDocument ; <nl> + node . ownerDocument . nodeMap [ node . nodeId ] = node ; <nl> + node . depth = parent . depth + 1 ; <nl> + } <nl> + node . children . forEach ( function ( child ) { <nl> + linkParent ( child , node ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + function registerNode ( docId , node ) { <nl> + var doc = instanceMap [ docId ] ; <nl> + doc . nodeMap [ node . nodeId ] = node ; <nl> + } <nl> + <nl> + function insertIndex ( target , list , newIndex , changeSibling ) { <nl> + if ( newIndex < 0 ) { <nl> + newIndex = 0 ; <nl> + } <nl> + var before = list [ newIndex - 1 ] ; <nl> + var after = list [ newIndex ] ; <nl> + list . splice ( newIndex , 0 , target ) ; <nl> + if ( changeSibling ) { <nl> + before & & ( before . nextSibling = target ) ; <nl> + target . previousSibling = before ; <nl> + target . nextSibling = after ; <nl> + after & & ( after . previousSibling = target ) ; <nl> + } <nl> + return newIndex ; <nl> + } <nl> + <nl> + function moveIndex ( target , list , newIndex , changeSibling ) { <nl> + var index = list . indexOf ( target ) ; <nl> + if ( index < 0 ) { <nl> + return - 1 ; <nl> + } <nl> + if ( changeSibling ) { <nl> + var before = list [ index - 1 ] ; <nl> + var after = list [ index + 1 ] ; <nl> + before & & ( before . nextSibling = after ) ; <nl> + after & & ( after . previousSibling = before ) ; <nl> + } <nl> + list . splice ( index , 1 ) ; <nl> + var newIndexAfter = newIndex ; <nl> + if ( index < = newIndex ) { <nl> + newIndexAfter = newIndex - 1 ; <nl> + } <nl> + var beforeNew = list [ newIndexAfter - 1 ] ; <nl> + var afterNew = list [ newIndexAfter ] ; <nl> + list . splice ( newIndexAfter , 0 , target ) ; <nl> + if ( changeSibling ) { <nl> + beforeNew & & ( beforeNew . nextSibling = target ) ; <nl> + target . previousSibling = beforeNew ; <nl> + target . nextSibling = afterNew ; <nl> + afterNew & & ( afterNew . previousSibling = target ) ; <nl> + } <nl> + if ( index = = = newIndexAfter ) { <nl> + return - 1 ; <nl> + } <nl> + return newIndex ; <nl> + } <nl> + <nl> + function removeIndex ( target , list , changeSibling ) { <nl> + var index = list . indexOf ( target ) ; <nl> + if ( index < 0 ) { <nl> + return ; <nl> + } <nl> + if ( changeSibling ) { <nl> + var before = list [ index - 1 ] ; <nl> + var after = list [ index + 1 ] ; <nl> + before & & ( before . nextSibling = after ) ; <nl> + after & & ( after . previousSibling = before ) ; <nl> + } <nl> + list . splice ( index , 1 ) ; <nl> + } <nl> + <nl> + Element . prototype . setAttr = function ( key , value , silent ) { <nl> + if ( this . attr [ key ] = = = value ) { <nl> + return ; <nl> + } <nl> + this . attr [ key ] = value ; <nl> + if ( ! silent & & this . docId ) { <nl> + var listener = instanceMap [ this . docId ] . listener ; <nl> + listener . setAttr ( this . ref , key , value ) ; <nl> + } <nl> + } ; <nl> + <nl> + Element . prototype . setStyle = function ( key , value , silent ) { <nl> + if ( this . style [ key ] = = = value ) { <nl> + return ; <nl> + } <nl> + this . style [ key ] = value ; <nl> + if ( ! silent & & this . docId ) { <nl> + var listener = instanceMap [ this . docId ] . listener ; <nl> + listener . setStyle ( this . ref , key , value ) ; <nl> + } <nl> + } ; <nl> + <nl> + Element . prototype . setClassStyle = function ( classStyle ) { <nl> + this . classStyle = classStyle ; <nl> + if ( this . docId ) { <nl> + var listener = instanceMap [ this . docId ] . listener ; <nl> + listener . setStyles ( this . ref , this . toStyle ( ) ) ; <nl> + } <nl> + } ; <nl> + <nl> + Element . prototype . addEvent = function ( type , handler ) { <nl> + if ( ! this . event [ type ] ) { <nl> + this . event [ type ] = handler ; <nl> + if ( this . docId ) { <nl> + var listener = instanceMap [ this . docId ] . listener ; <nl> + listener . addEvent ( this . ref , type ) ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + Element . prototype . removeEvent = function ( type ) { <nl> + if ( this . event [ type ] ) { <nl> + delete this . event [ type ] ; <nl> + if ( this . docId ) { <nl> + var listener = instanceMap [ this . docId ] . listener ; <nl> + listener . removeEvent ( this . ref , type ) ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + Element . prototype . fireEvent = function ( type , e ) { <nl> + var handler = this . event [ type ] ; <nl> + if ( handler ) { <nl> + return handler . call ( this , e ) ; <nl> + } <nl> + } ; <nl> + <nl> + Element . prototype . toStyle = function ( ) { <nl> + return Object . assign ( { } , this . classStyle , this . style ) ; <nl> + } ; <nl> + <nl> + Element . prototype . toJSON = function ( ) { <nl> + var result = { <nl> + ref : this . ref . toString ( ) , <nl> + type : this . type , <nl> + attr : this . attr , <nl> + style : this . toStyle ( ) <nl> + } ; <nl> + var event = Object . keys ( this . event ) ; <nl> + if ( event . length ) { <nl> + result . event = event ; <nl> + } <nl> + if ( this . pureChildren . length ) { <nl> + result . children = this . pureChildren . map ( function ( child ) { <nl> + return child . toJSON ( ) ; <nl> + } ) ; <nl> + } <nl> + return result ; <nl> + } ; <nl> + <nl> + Element . prototype . toString = function ( ) { <nl> + return ' < ' + this . type + ' attr = ' + JSON . stringify ( this . attr ) + ' style = ' + JSON . stringify ( this . toStyle ( ) ) + ' > ' + this . pureChildren . map ( function ( child ) { <nl> + return child . toString ( ) ; <nl> + } ) . join ( ' ' ) + ' < / ' + this . type + ' > ' ; <nl> + } ; <nl> + <nl> + function Comment ( value ) { <nl> + this . nodeType = 8 ; <nl> + this . nodeId = ( nextNodeRef + + ) . toString ( ) ; <nl> + this . ref = this . nodeId ; <nl> + this . type = ' comment ' ; <nl> + this . value = value ; <nl> + this . children = [ ] ; <nl> + this . pureChildren = [ ] ; <nl> + } <nl> + <nl> + Comment . prototype = new Node ( ) ; <nl> + <nl> + Comment . prototype . toString = function ( ) { <nl> + return ' < ! - - ' + this . value + ' - - > ' ; <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 68 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . default = Listener ; <nl> + exports . createAction = createAction ; <nl> + function Listener ( id , handler ) { <nl> + this . id = id ; <nl> + this . batched = false ; <nl> + this . updates = [ ] ; <nl> + if ( typeof handler = = = ' function ' ) { <nl> + this . handler = handler ; <nl> + } <nl> + } <nl> + <nl> + Listener . prototype . createFinish = function ( callback ) { <nl> + var handler = this . handler ; <nl> + return handler ( [ createAction ( ' createFinish ' , [ ] ) ] , callback ) ; <nl> + } ; <nl> + <nl> + Listener . prototype . updateFinish = function ( callback ) { <nl> + var handler = this . handler ; <nl> + return handler ( [ createAction ( ' updateFinish ' , [ ] ) ] , callback ) ; <nl> + } ; <nl> + <nl> + Listener . prototype . refreshFinish = function ( callback ) { <nl> + var handler = this . handler ; <nl> + return handler ( [ createAction ( ' refreshFinish ' , [ ] ) ] , callback ) ; <nl> + } ; <nl> + <nl> + Listener . prototype . createBody = function ( element ) { <nl> + var body = element . toJSON ( ) ; <nl> + var children = body . children ; <nl> + delete body . children ; <nl> + var actions = [ createAction ( ' createBody ' , [ body ] ) ] ; <nl> + if ( children ) { <nl> + actions . push . apply ( actions , children . map ( function ( child ) { <nl> + return createAction ( ' addElement ' , [ body . ref , child , - 1 ] ) ; <nl> + } ) ) ; <nl> + } <nl> + return this . addActions ( actions ) ; <nl> + } ; <nl> + <nl> + Listener . prototype . addElement = function ( element , ref , index ) { <nl> + if ( ! ( index > = 0 ) ) { <nl> + index = - 1 ; <nl> + } <nl> + return this . addActions ( createAction ( ' addElement ' , [ ref , element . toJSON ( ) , index ] ) ) ; <nl> + } ; <nl> + <nl> + Listener . prototype . removeElement = function ( ref ) { <nl> + if ( Array . isArray ( ref ) ) { <nl> + var actions = ref . map ( function ( r ) { <nl> + return createAction ( ' removeElement ' , [ r ] ) ; <nl> + } ) ; <nl> + return this . addActions ( actions ) ; <nl> + } <nl> + return this . addActions ( createAction ( ' removeElement ' , [ ref ] ) ) ; <nl> + } ; <nl> + <nl> + Listener . prototype . moveElement = function ( targetRef , parentRef , index ) { <nl> + return this . addActions ( createAction ( ' moveElement ' , [ targetRef , parentRef , index ] ) ) ; <nl> + } ; <nl> + <nl> + Listener . prototype . setAttr = function ( ref , key , value ) { <nl> + var result = { } ; <nl> + result [ key ] = value ; <nl> + return this . addActions ( createAction ( ' updateAttrs ' , [ ref , result ] ) ) ; <nl> + } ; <nl> + <nl> + Listener . prototype . setStyle = function ( ref , key , value ) { <nl> + var result = { } ; <nl> + result [ key ] = value ; <nl> + return this . addActions ( createAction ( ' updateStyle ' , [ ref , result ] ) ) ; <nl> + } ; <nl> + <nl> + Listener . prototype . setStyles = function ( ref , style ) { <nl> + return this . addActions ( createAction ( ' updateStyle ' , [ ref , style ] ) ) ; <nl> + } ; <nl> + <nl> + Listener . prototype . addEvent = function ( ref , type ) { <nl> + return this . addActions ( createAction ( ' addEvent ' , [ ref , type ] ) ) ; <nl> + } ; <nl> + <nl> + Listener . prototype . removeEvent = function ( ref , type ) { <nl> + return this . addActions ( createAction ( ' removeEvent ' , [ ref , type ] ) ) ; <nl> + } ; <nl> + <nl> + Listener . prototype . handler = function ( actions , cb ) { <nl> + return cb & & cb ( ) ; <nl> + } ; <nl> + <nl> + Listener . prototype . addActions = function ( actions ) { <nl> + var updates = this . updates ; <nl> + var handler = this . handler ; <nl> + <nl> + if ( ! Array . isArray ( actions ) ) { <nl> + actions = [ actions ] ; <nl> + } <nl> + <nl> + if ( this . batched ) { <nl> + updates . push . apply ( updates , actions ) ; <nl> + } else { <nl> + return handler ( actions ) ; <nl> + } <nl> + } ; <nl> + <nl> + function createAction ( name , args ) { <nl> + return { module : ' dom ' , method : name , args : args } ; <nl> + } <nl> + <nl> + / * * * / } , <nl> + / * 69 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + module . exports = { <nl> + " name " : " weex " , <nl> + " version " : " 0 . 4 . 0 " , <nl> + " description " : " A framework for building Mobile cross - platform UI " , <nl> + " license " : " Apache - 2 . 0 " , <nl> + " repository " : { <nl> + " type " : " git " , <nl> + " url " : " git @ github . com : alibaba / weex . git " <nl> + } , <nl> + " homepage " : " http : / / alibaba . github . io / weex / " , <nl> + " bugs " : { <nl> + " url " : " https : / / github . com / alibaba / weex / issues " <nl> + } , <nl> + " private " : " true " , <nl> + " keywords " : [ <nl> + " weex " , <nl> + " hybrid " , <nl> + " webcomponent " , <nl> + " appframework " , <nl> + " mvvm " , <nl> + " javascript " , <nl> + " webkit " , <nl> + " v8 " , <nl> + " jscore " , <nl> + " html5 " , <nl> + " android " , <nl> + " ios " , <nl> + " yunos " <nl> + ] , <nl> + " engines " : { <nl> + " node " : " > = 4 " <nl> + } , <nl> + " scripts " : { <nl> + " postinstall " : " bash . / bin / install - hooks . sh " , <nl> + " build : browser " : " webpack - - config build / webpack . browser . config . js " , <nl> + " build : native " : " webpack - - config build / webpack . native . config . js " , <nl> + " build : examples " : " webpack - - config build / webpack . examples . config . js " , <nl> + " build : test " : " webpack - - config build / webpack . test . config . js " , <nl> + " dist : browser " : " npm run build : browser & & bash . / bin / dist - browser . sh " , <nl> + " dist " : " npm run dist : browser " , <nl> + " dev : browser " : " webpack - - watch - - config build / webpack . browser . config . js " , <nl> + " dev : native " : " webpack - - watch - - config build / webpack . native . config . js " , <nl> + " dev : examples " : " webpack - - watch - - config build / webpack . examples . config . js " , <nl> + " dev : test " : " webpack - - watch - - config build / webpack . test . config . js " , <nl> + " build " : " npm run build : native & & npm run build : browser & & npm run build : examples & & npm run build : test " , <nl> + " lint " : " eslint html5 " , <nl> + " test : unit " : " mocha - - compilers js : babel - core / register html5 / test / unit / * / * . js html5 / test / unit / * / * / * . js " , <nl> + " test : cover " : " babel - node node_modules / isparta / bin / isparta cover - - report text node_modules / mocha / bin / _mocha - - - - reporter dot html5 / test / unit / * / * . js html5 / test / unit / * / * / * . js " , <nl> + " test : e2e " : " node html5 / test / e2e / runner . js " , <nl> + " test " : " npm run lint & & npm run test : cover & & npm run test : e2e " , <nl> + " serve " : " serve . / - p 12580 " , <nl> + " clean : examples " : " echo \ " \ \ 033 [ 36 ; 1m [ Clean ] \ \ 033 [ 0m \ \ 033 [ 33mexamples \ \ 033 [ 0m \ " & & rm - vrf examples / build / * " , <nl> + " clean : test " : " echo \ " \ \ 033 [ 36 ; 1m [ Clean ] \ \ 033 [ 0m \ \ 033 [ 33mtest \ \ 033 [ 0m \ " & & rm - vrf test / build / * " , <nl> + " clean " : " npm run clean : examples & & npm run clean : test " , <nl> + " copy : js " : " cp - vf . / dist / native . js . / android / sdk / assets / main . js " , <nl> + " copy : examples " : " rm - rf . / android / playground / app / src / main / assets / * & & cp - vrf . / examples / build / * . / android / playground / app / src / main / assets / " , <nl> + " copy " : " npm run copy : js & & npm run copy : examples " <nl> + } , <nl> + " subversion " : { <nl> + " browser " : " 0 . 2 . 23 " , <nl> + " framework " : " 0 . 10 . 13 " , <nl> + " transformer " : " > = 0 . 1 . 5 < 0 . 4 " <nl> + } , <nl> + " dependencies " : { <nl> + " animationjs " : " ^ 0 . 1 . 5 " , <nl> + " carrousel " : " ^ 0 . 1 . 11 " , <nl> + " core - js " : " ^ 2 . 4 . 0 " , <nl> + " cubicbezier " : " ^ 0 . 1 . 1 " , <nl> + " envd " : " ^ 0 . 1 . 1 " , <nl> + " fixedsticky " : " ^ 0 . 1 . 0 " , <nl> + " httpurl " : " ^ 0 . 1 . 1 " , <nl> + " kountdown " : " ^ 0 . 1 . 2 " , <nl> + " lazyimg " : " ^ 0 . 1 . 2 " , <nl> + " lie " : " ^ 3 . 0 . 4 " , <nl> + " modals " : " ^ 0 . 1 . 5 " , <nl> + " scroll - to " : " 0 . 0 . 2 " , <nl> + " semver " : " ^ 5 . 1 . 0 " , <nl> + " transitionize " : " 0 . 0 . 3 " , <nl> + " weex - components " : " ^ 0 . 1 . 2 " <nl> + } , <nl> + " devDependencies " : { <nl> + " babel - cli " : " ~ 6 . 4 . 5 " , <nl> + " babel - loader " : " ^ 6 . 2 . 4 " , <nl> + " babel - preset - es2015 " : " ^ 6 . 9 . 0 " , <nl> + " chai " : " ^ 3 . 5 . 0 " , <nl> + " chromedriver " : " ^ 2 . 21 . 2 " , <nl> + " cross - spawn " : " ^ 4 . 0 . 0 " , <nl> + " css - loader " : " ^ 0 . 23 . 1 " , <nl> + " eslint " : " ^ 2 . 11 . 1 " , <nl> + " http - server " : " ^ 0 . 9 . 0 " , <nl> + " isparta " : " ^ 4 . 0 . 0 " , <nl> + " istanbul " : " ^ 0 . 4 . 3 " , <nl> + " json - loader " : " ^ 0 . 5 . 4 " , <nl> + " mocha " : " ^ 2 . 5 . 3 " , <nl> + " nightwatch " : " ^ 0 . 9 . 4 " , <nl> + " phantomjs - prebuilt " : " ^ 2 . 1 . 7 " , <nl> + " selenium - server " : " ^ 2 . 53 . 0 " , <nl> + " serve " : " ^ 1 . 4 . 0 " , <nl> + " sinon " : " ^ 1 . 17 . 4 " , <nl> + " sinon - chai " : " ^ 2 . 8 . 0 " , <nl> + " style - loader " : " ^ 0 . 13 . 1 " , <nl> + " uglify - js " : " ^ 2 . 6 . 4 " , <nl> + " webpack " : " ^ 1 . 13 . 1 " , <nl> + " weex - loader " : " ^ 0 . 2 . 0 " <nl> + } <nl> + } ; <nl> + <nl> + / * * * / } , <nl> + / * 70 * / <nl> + / * * * / function ( module , exports , __webpack_require__ ) { <nl> + <nl> + / * WEBPACK VAR INJECTION * / ( function ( global ) { ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . $ = $ ; <nl> + exports . $ el = $ el ; <nl> + exports . $ vm = $ vm ; <nl> + exports . $ renderThen = $ renderThen ; <nl> + exports . $ scrollTo = $ scrollTo ; <nl> + exports . $ transition = $ transition ; <nl> + exports . $ getConfig = $ getConfig ; <nl> + exports . $ sendHttp = $ sendHttp ; <nl> + exports . $ openURL = $ openURL ; <nl> + exports . $ setTitle = $ setTitle ; <nl> + exports . $ call = $ call ; <nl> + <nl> + var _util = __webpack_require__ ( 49 ) ; <nl> + <nl> + var _ = _interopRequireWildcard ( _util ) ; <nl> + <nl> + function _interopRequireWildcard ( obj ) { if ( obj & & obj . __esModule ) { return obj ; } else { var newObj = { } ; if ( obj ! = null ) { for ( var key in obj ) { if ( Object . prototype . hasOwnProperty . call ( obj , key ) ) newObj [ key ] = obj [ key ] ; } } newObj . default = obj ; return newObj ; } } <nl> + <nl> + / * * <nl> + * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + * common <nl> + * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + * / <nl> + <nl> + / * * <nl> + * @ deprecated use $ vm instead <nl> + * find the vm by id <nl> + * Note : there is only one id in whole component <nl> + * @ param { string } id <nl> + * @ return { Vm } <nl> + * / <nl> + function $ ( id ) { <nl> + _ . warn ( ' Vm # $ is deprecated , please use Vm # $ vm instead ' ) ; <nl> + var info = this . _ids [ id ] ; <nl> + if ( info ) { <nl> + return info . vm ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * find the element by id <nl> + * Note : there is only one id in whole component <nl> + * @ param { string } id <nl> + * @ return { Element } <nl> + * / <nl> + / * * <nl> + * @ fileOverview The api for invoking with " $ " prefix <nl> + * / <nl> + function $ el ( id ) { <nl> + var info = this . _ids [ id ] ; <nl> + if ( info ) { <nl> + return info . el ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * find the vm of the custom component by id <nl> + * Note : there is only one id in whole component <nl> + * @ param { string } id <nl> + * @ return { Vm } <nl> + * / <nl> + function $ vm ( id ) { <nl> + var info = this . _ids [ id ] ; <nl> + if ( info ) { <nl> + return info . vm ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Fire when differ rendering finished <nl> + * <nl> + * @ param { Function } fn <nl> + * / <nl> + function $ renderThen ( fn ) { <nl> + var app = this . _app ; <nl> + var differ = app . differ ; <nl> + return differ . then ( function ( ) { <nl> + fn ( ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + / * * <nl> + * scroll an element specified by id into view , <nl> + * moreover specify a number of offset optionally <nl> + * @ param { string } id <nl> + * @ param { number } offset <nl> + * / <nl> + function $ scrollTo ( id , offset ) { <nl> + _ . warn ( ' Vm # $ scrollTo is deprecated , ' + ' please use " require ( \ ' @ weex - module / dom \ ' ) ' + ' . scrollTo ( el , options ) " instead ' ) ; <nl> + var el = this . $ el ( id ) ; <nl> + if ( el ) { <nl> + var dom = this . _app . requireModule ( ' dom ' ) ; <nl> + dom . scrollToElement ( el . ref , { offset : offset } ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * perform transition animation on an element specified by id <nl> + * @ param { string } id <nl> + * @ param { object } options <nl> + * @ param { object } options . styles <nl> + * @ param { object } options . duration ( ms ) <nl> + * @ param { object } [ options . timingFunction ] <nl> + * @ param { object } [ options . delay = 0 ( ms ) ] <nl> + * @ param { Function } callback <nl> + * / <nl> + function $ transition ( id , options , callback ) { <nl> + var _this = this ; <nl> + <nl> + var el = this . $ el ( id ) ; <nl> + if ( el & & options & & options . styles ) { <nl> + var animation = this . _app . requireModule ( ' animation ' ) ; <nl> + animation . transition ( el . ref , options , function ( ) { <nl> + _this . _setStyle ( el , options . styles ) ; <nl> + callback & & callback . apply ( undefined , arguments ) ; <nl> + } ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * get some config <nl> + * @ return { object } some config for app instance <nl> + * @ property { string } bundleUrl <nl> + * @ property { boolean } debug <nl> + * @ property { object } env <nl> + * @ property { string } env . weexVersion ( ex . 1 . 0 . 0 ) <nl> + * @ property { string } env . appName ( ex . TB / TM ) <nl> + * @ property { string } env . appVersion ( ex . 5 . 0 . 0 ) <nl> + * @ property { string } env . platform ( ex . iOS / Android ) <nl> + * @ property { string } env . osVersion ( ex . 7 . 0 . 0 ) <nl> + * @ property { string } env . deviceModel * * native only * * <nl> + * @ property { number } env . [ deviceWidth = 750 ] <nl> + * @ property { number } env . deviceHeight <nl> + * / <nl> + function $ getConfig ( callback ) { <nl> + var config = _ . extend ( { <nl> + env : global . WXEnvironment | | { } <nl> + } , this . _app . options ) ; <nl> + if ( _ . typof ( callback ) = = = ' function ' ) { <nl> + _ . warn ( ' the callback of Vm # $ getConfig ( callback ) is deprecated , ' + ' this api now can directly RETURN config info . ' ) ; <nl> + callback ( config ) ; <nl> + } <nl> + return config ; <nl> + } <nl> + <nl> + / * * <nl> + * @ deprecated <nl> + * request network via http protocol <nl> + * @ param { object } params <nl> + * @ param { Function } callback <nl> + * / <nl> + function $ sendHttp ( params , callback ) { <nl> + _ . warn ( ' Vm # $ sendHttp is deprecated , ' + ' please use " require ( \ ' @ weex - module / stream \ ' ) ' + ' . sendHttp ( params , callback ) " instead ' ) ; <nl> + var stream = this . _app . requireModule ( ' stream ' ) ; <nl> + stream . sendHttp ( params , callback ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ deprecated <nl> + * open a url <nl> + * @ param { string } url <nl> + * / <nl> + function $ openURL ( url ) { <nl> + _ . warn ( ' Vm # $ openURL is deprecated , ' + ' please use " require ( \ ' @ weex - module / event \ ' ) ' + ' . openURL ( url ) " instead ' ) ; <nl> + var event = this . _app . requireModule ( ' event ' ) ; <nl> + event . openURL ( url ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ deprecated <nl> + * set a title for page <nl> + * @ param { string } title <nl> + * / <nl> + function $ setTitle ( title ) { <nl> + _ . warn ( ' Vm # $ setTitle is deprecated , ' + ' please use " require ( \ ' @ weex - module / pageInfo \ ' ) ' + ' . setTitle ( title ) " instead ' ) ; <nl> + var pageInfo = this . _app . requireModule ( ' pageInfo ' ) ; <nl> + pageInfo . setTitle ( title ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ deprecated use " require ( ' @ weex - module / moduleName ' ) instead " <nl> + * invoke a native method by specifing the name of module and method <nl> + * @ param { string } moduleName <nl> + * @ param { string } methodName <nl> + * @ param { . . . * } the rest arguments <nl> + * / <nl> + function $ call ( moduleName , methodName ) { <nl> + _ . warn ( ' Vm # $ call is deprecated , ' + ' please use " require ( \ ' @ weex - module / moduleName \ ' ) " instead ' ) ; <nl> + var module = this . _app . requireModule ( moduleName ) ; <nl> + if ( module & & module [ methodName ] ) { <nl> + for ( var _len = arguments . length , args = Array ( _len > 2 ? _len - 2 : 0 ) , _key = 2 ; _key < _len ; _key + + ) { <nl> + args [ _key - 2 ] = arguments [ _key ] ; <nl> + } <nl> + <nl> + module [ methodName ] . apply ( module , args ) ; <nl> + } <nl> + } <nl> + / * WEBPACK VAR INJECTION * / } . call ( exports , ( function ( ) { return this ; } ( ) ) ) ) <nl> + <nl> + / * * * / } , <nl> + / * 71 * / <nl> + / * * * / function ( module , exports ) { <nl> + <nl> + ' use strict ' ; <nl> + <nl> + Object . defineProperty ( exports , " __esModule " , { <nl> + value : true <nl> + } ) ; <nl> + exports . $ userTrack = $ userTrack ; <nl> + exports . $ sendMtop = $ sendMtop ; <nl> + exports . $ callWindvane = $ callWindvane ; <nl> + exports . $ setSpm = $ setSpm ; <nl> + exports . $ getUserInfo = $ getUserInfo ; <nl> + exports . $ login = $ login ; <nl> + exports . $ logout = $ logout ; <nl> + / * * <nl> + * @ fileOverview The api for invoking with " $ " prefix <nl> + * / <nl> + <nl> + / * * <nl> + * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + * private for ali <nl> + * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + * / <nl> + <nl> + / * * <nl> + * invoke user - track on Taobao Moblie <nl> + * @ param { string } type : enter , click , expose <nl> + * @ param { string } name <nl> + * @ param { string } comName <nl> + * @ param { object } param <nl> + * / <nl> + function $ userTrack ( type , name , comName , param ) { <nl> + var userTrack = this . _app . requireModule ( ' userTrack ' ) ; <nl> + userTrack . commit ( type , name , comName , param ) ; <nl> + } <nl> + <nl> + / * * <nl> + * request a restfull api via the mtop gateway <nl> + * @ param { object } params <nl> + * @ param { Function } callback <nl> + * / <nl> + function $ sendMtop ( params , callback ) { <nl> + / * istanbul ignore else * / <nl> + if ( typeof window = = = ' undefined ' ) { <nl> + / / in native , use windvane <nl> + var windvane = this . _app . requireModule ( ' windvane ' ) ; <nl> + windvane . call ( { <nl> + class : ' MtopWVPlugin ' , <nl> + method : ' send ' , <nl> + data : params <nl> + } , callback ) ; <nl> + } else { <nl> + / / in web brwoser , use stream . sendMtop <nl> + var stream = this . _app . requireModule ( ' stream ' ) ; <nl> + stream . sendMtop ( params , callback ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * request a native api via windvane protocol <nl> + * @ param { object } params <nl> + * @ param { Function } callback <nl> + * / <nl> + function $ callWindvane ( params , callback ) { <nl> + var windvane = this . _app . requireModule ( ' windvane ' ) ; <nl> + windvane . call ( params , callback ) ; <nl> + } <nl> + <nl> + / * * <nl> + * set spm for the page <nl> + * @ param { string } a <nl> + * @ param { string } b <nl> + * / <nl> + function $ setSpm ( a , b ) { <nl> + var pageInfo = this . _app . requireModule ( ' pageInfo ' ) ; <nl> + pageInfo . setSpm ( a , b ) ; <nl> + } <nl> + <nl> + / * * <nl> + * get the information of the current logined user <nl> + * @ param { Function } callback <nl> + * / <nl> + function $ getUserInfo ( callback ) { <nl> + var user = this . _app . requireModule ( ' user ' ) ; <nl> + user . getUserInfo ( callback ) ; <nl> + } <nl> + <nl> + / * * <nl> + * perform login <nl> + * @ param { Function } callback <nl> + * / <nl> + function $ login ( callback ) { <nl> + var user = this . _app . requireModule ( ' user ' ) ; <nl> + user . login ( callback ) ; <nl> + } <nl> + <nl> + / * * <nl> + * perform logout <nl> + * @ param { Function } callback <nl> + * / <nl> + function $ logout ( callback ) { <nl> + var user = this . _app . requireModule ( ' user ' ) ; <nl> + user . logout ( callback ) ; <nl> + } <nl> + <nl> + / * * * / } <nl> + / * * * * * * / ] ) ; <nl> + / / # sourceMappingURL = data : application / json ; base64 , eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIndlYnBhY2s6Ly8vd2VicGFjay9ib290c3RyYXAgMDBhYjhlZjY2ODNjZmRmMjUzNTIiLCJ3ZWJwYWNrOi8vLy4vaW5kZXguanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvbmF0aXZlL2luZGV4LmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L3NoYXJlZC9pbmRleC5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9zaGFyZWQvb2JqZWN0QXNzaWduLmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL2ZuL29iamVjdC9hc3NpZ24uanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9lczYub2JqZWN0LmFzc2lnbi5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19leHBvcnQuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fZ2xvYmFsLmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX2NvcmUuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9faGlkZS5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19vYmplY3QtZHAuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fYW4tb2JqZWN0LmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX2lzLW9iamVjdC5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19pZTgtZG9tLWRlZmluZS5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19kZXNjcmlwdG9ycy5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19mYWlscy5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19kb20tY3JlYXRlLmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX3RvLXByaW1pdGl2ZS5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19wcm9wZXJ0eS1kZXNjLmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX3JlZGVmaW5lLmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX2hhcy5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL191aWQuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fY3R4LmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX2EtZnVuY3Rpb24uanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fb2JqZWN0LWFzc2lnbi5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19vYmplY3Qta2V5cy5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19vYmplY3Qta2V5cy1pbnRlcm5hbC5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL190by1pb2JqZWN0LmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX2lvYmplY3QuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fY29mLmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX2RlZmluZWQuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fYXJyYXktaW5jbHVkZXMuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fdG8tbGVuZ3RoLmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX3RvLWludGVnZXIuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fdG8taW5kZXguanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fc2hhcmVkLWtleS5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19zaGFyZWQuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fZW51bS1idWcta2V5cy5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19vYmplY3QtZ29wcy5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19vYmplY3QtcGllLmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX3RvLW9iamVjdC5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9zaGFyZWQvc2V0VGltZW91dC5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9zaGFyZWQvcHJvbWlzZS5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9zaGFyZWQvY29uc29sZS5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9ydW50aW1lL2luZGV4LmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L3J1bnRpbWUvZnJhbWV3b3Jrcy5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L2luZGV4LmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L2RlZmF1bHQvY29uZmlnLmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L2RlZmF1bHQvYXBwL2luZGV4LmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L2RlZmF1bHQvdXRpbC9pbmRleC5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L2FwcC9idW5kbGUuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9zZW12ZXIvNS4yLjAvc2VtdmVyL3NlbXZlci5qcyIsIndlYnBhY2s6Ly8vLi9 + Ly5ucG1pbnN0YWxsL3Byb2Nlc3MvMC4xMS41L3Byb2Nlc3MvYnJvd3Nlci5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L3ZtL2luZGV4LmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L2RlZmF1bHQvY29yZS9zdGF0ZS5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L2NvcmUvd2F0Y2hlci5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L2NvcmUvZGVwLmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L2RlZmF1bHQvY29yZS9vYnNlcnZlci5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L2NvcmUvYXJyYXkuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvZGVmYXVsdC92bS9jb21waWxlci5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L3ZtL2RpcmVjdGl2ZS5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L3ZtL2RvbS1oZWxwZXIuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvZGVmYXVsdC92bS9ldmVudHMuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvZGVmYXVsdC9hcHAvcmVnaXN0ZXIuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvZGVmYXVsdC9hcHAvZG93bmdyYWRlLmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L2RlZmF1bHQvYXBwL2N0cmwuanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvZGVmYXVsdC9hcHAvZGlmZmVyLmpzIiwid2VicGFjazovLy8vVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L3Zkb20vaW5kZXguanMiLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvdmRvbS9saXN0ZW5lci5qcyIsIndlYnBhY2s6Ly8vL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9wYWNrYWdlLmpzb24iLCJ3ZWJwYWNrOi8vLy9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvZGVmYXVsdC9hcGkvbWV0aG9kcy5qcyIsIndlYnBhY2s6Ly8vLi9saWIvYXBpL21ldGhvZHMuanMiXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6Ijs7QUFBQTtBQUNBOztBQUVBO0FBQ0E7O0FBRUE7QUFDQTtBQUNBOztBQUVBO0FBQ0E7QUFDQSx1QkFBZTtBQUNmO0FBQ0E7QUFDQTs7QUFFQTtBQUNBOztBQUVBO0FBQ0E7O0FBRUE7QUFDQTtBQUNBOzs7QUFHQTtBQUNBOztBQUVBO0FBQ0E7O0FBRUE7QUFDQTs7QUFFQTtBQUNBOzs7Ozs7Ozs7QUN0Q0E7Ozs7O0FBS0EsS0FBTSxVQUFVLG9CQUFRLEVBQVIsQ0FBaEI7ZUFDMEIsTTtLQUFuQixlLFdBQUEsZTs7QUFDUCxpQkFBZ0IsT0FBaEIsRTs7Ozs7Ozs7O0FDUEE7O0FBQ0E7Ozs7QUFDQTs7OztLQUVRLE0sdUJBQUEsTTtLQUFRLFcsdUJBQUEsVzs7NEJBRUwsVTtBQUNULFVBQU8sVUFBUCxJQUFxQixZQUFtQjtBQUN0QyxTQUFNLE1BQU0sa0JBQVEsVUFBUixxQ0FBWjtBQUNBLFNBQUksZUFBZSxLQUFuQixFQUEwQjtBQUN4QixlQUFRLEtBQVIsQ0FBYyxJQUFJLFFBQUosRUFBZDtBQUNEO0FBQ0QsWUFBTyxHQUFQO0FBQ0QsSUFORDs7O0FBREYsTUFBSyxJQUFNLFVBQVgsdUJBQWtDO0FBQUEsU0FBdkIsVUFBdUI7QUFRakM7O0FBRUQsUUFBTyxNQUFQLENBQWMsTUFBZCxFQUFzQjtBQUNwQixxQkFBa0IsTUFERTtBQUVwQiwyQkFBd0I7QUFGSixFQUF0Qjs7Ozs7QUFRQSxLQUFNLFVBQVUsb0JBQVEsRUFBUixDQUFoQjtlQUM0QixNO0tBQXBCLGUsV0FBQSxlOztBQUNSLGlCQUFnQixPQUFoQixFOzs7Ozs7Ozs7QUMxQkE7O0FBQ0E7O0FBQ0E7O0FBQ0EseUI7Ozs7Ozs7O0FDSEEsd0I7Ozs7Ozs7O0FDQUEscUJBQVEsQ0FBUjtBQUNBLFFBQU8sT0FBUCxHQUFpQixvQkFBUSxDQUFSLEVBQStCLE1BQS9CLENBQXNDLE1BQXZELEM7Ozs7Ozs7OztBQ0FBLEtBQUksVUFBVSxvQkFBUSxDQUFSLENBQWQ7O0FBRUEsU0FBUSxRQUFRLENBQVIsR0FBWSxRQUFRLENBQTVCLEVBQStCLFFBQS9CLEVBQXlDLEVBQUMsUUFBUSxvQkFBUSxFQUFSLENBQVQsRUFBekMsRTs7Ozs7Ozs7QUNIQSxLQUFJLFNBQVksb0JBQVEsQ0FBUixDQUFoQjtBQUFBLEtBQ0ksT0FBWSxvQkFBUSxDQUFSLENBRGhCO0FBQUEsS0FFSSxPQUFZLG9CQUFRLENBQVIsQ0FGaEI7QUFBQSxLQUdJLFdBQVksb0JBQVEsRUFBUixDQUhoQjtBQUFBLEtBSUksTUFBWSxvQkFBUSxFQUFSLENBSmhCO0FBQUEsS0FLSSxZQUFZLFdBTGhCOztBQU9BLEtBQUksVUFBVSxTQUFWLE9BQVUsQ0FBUyxJQUFULEVBQWUsSUFBZixFQUFxQixNQUFyQixFQUE0QjtBQUN4QyxPQUFJLFlBQVksT0FBTyxRQUFRLENBQS9CO0FBQUEsT0FDSSxZQUFZLE9BQU8sUUFBUSxDQUQvQjtBQUFBLE9BRUksWUFBWSxPQUFPLFFBQVEsQ0FGL0I7QUFBQSxPQUdJLFdBQVksT0FBTyxRQUFRLENBSC9CO0FBQUEsT0FJSSxVQUFZLE9BQU8sUUFBUSxDQUovQjtBQUFBLE9BS0ksU0FBWSxZQUFZLE1BQVosR0FBcUIsWUFBWSxPQUFPLElBQVAsTUFBaUIsT0FBTyxJQUFQLElBQWUsRUFBaEMsQ0FBWixHQUFrRCxDQUFDLE9BQU8sSUFBUCxLQUFnQixFQUFqQixFQUFxQixTQUFyQixDQUx2RjtBQUFBLE9BTUksVUFBWSxZQUFZLElBQVosR0FBbUIsS0FBSyxJQUFMLE1BQWUsS0FBSyxJQUFMLElBQWEsRUFBNUIsQ0FObkM7QUFBQSxPQU9JLFdBQVksUUFBUSxTQUFSLE1BQXVCLFFBQVEsU0FBUixJQUFxQixFQUE1QyxDQVBoQjtBQUFBLE9BUUksR0FSSjtBQUFBLE9BUVMsR0FSVDtBQUFBLE9BUWMsR0FSZDtBQUFBLE9BUW1CLEdBUm5CO0FBU0EsT0FBRyxTQUFILEVBQWEsU0FBUyxJQUFUO0FBQ2IsUUFBSSxHQUFKLElBQVcsTUFBWCxFQUFrQjs7QUFFaEIsV0FBTSxDQUFDLFNBQUQsSUFBYyxNQUFkLElBQXdCLE9BQU8sR0FBUCxNQUFnQixTQUE5Qzs7QUFFQSxXQUFNLENBQUMsTUFBTSxNQUFOLEdBQWUsTUFBaEIsRUFBd0IsR0FBeEIsQ0FBTjs7QUFFQSxXQUFNLFdBQVcsR0FBWCxHQUFpQixJQUFJLEdBQUosRUFBUyxNQUFULENBQWpCLEdBQW9DLFlBQVksT0FBTyxHQUFQLElBQWMsVUFBMUIsR0FBdUMsSUFBSSxTQUFTLElBQWIsRUFBbUIsR0FBbkIsQ0FBdkMsR0FBaUUsR0FBM0c7O0FBRUEsU0FBRyxNQUFILEVBQVUsU0FBUyxNQUFULEVBQWlCLEdBQWpCLEVBQXNCLEdBQXRCLEVBQTJCLE9BQU8sUUFBUSxDQUExQzs7QUFFVixTQUFHLFFBQVEsR0FBUixLQUFnQixHQUFuQixFQUF1QixLQUFLLE9BQUwsRUFBYyxHQUFkLEVBQW1CLEdBQW5CO0FBQ3ZCLFNBQUcsWUFBWSxTQUFTLEdBQVQsS0FBaUIsR0FBaEMsRUFBb0MsU0FBUyxHQUFULElBQWdCLEdBQWhCO0FBQ3JDO0FBQ0YsRUF4QkQ7QUF5QkEsUUFBTyxJQUFQLEdBQWMsSUFBZDs7QUFFQSxTQUFRLENBQVIsR0FBWSxDQUFaLEM7QUFDQSxTQUFRLENBQVIsR0FBWSxDQUFaLEM7QUFDQSxTQUFRLENBQVIsR0FBWSxDQUFaLEM7QUFDQSxTQUFRLENBQVIsR0FBWSxDQUFaLEM7QUFDQSxTQUFRLENBQVIsR0FBWSxFQUFaLEM7QUFDQSxTQUFRLENBQVIsR0FBWSxFQUFaLEM7QUFDQSxTQUFRLENBQVIsR0FBWSxFQUFaLEM7QUFDQSxTQUFRLENBQVIsR0FBWSxHQUFaLEM7QUFDQSxRQUFPLE9BQVAsR0FBaUIsT0FBakIsQzs7Ozs7Ozs7O0FDekNBLEtBQUksU0FBUyxPQUFPLE9BQVAsR0FBaUIsT0FBTyxNQUFQLElBQWlCLFdBQWpCLElBQWdDLE9BQU8sSUFBUCxJQUFlLElBQS9DLEdBQzFCLE1BRDBCLEdBQ2pCLE9BQU8sSUFBUCxJQUFlLFdBQWYsSUFBOEIsS0FBSyxJQUFMLElBQWEsSUFBM0MsR0FBa0QsSUFBbEQsR0FBeUQsU0FBUyxhQUFULEdBRHRFO0FBRUEsS0FBRyxPQUFPLEdBQVAsSUFBYyxRQUFqQixFQUEwQixNQUFNLE1BQU4sQzs7Ozs7Ozs7QUNIMUIsS0FBSSxPQUFPLE9BQU8sT0FBUCxHQUFpQixFQUFDLFNBQVMsT0FBVixFQUE1QjtBQUNBLEtBQUcsT0FBTyxHQUFQLElBQWMsUUFBakIsRUFBMEIsTUFBTSxJQUFOLEM7Ozs7Ozs7O0FDRDFCLEtBQUksS0FBYSxvQkFBUSxFQUFSLENBQWpCO0FBQUEsS0FDSSxhQUFhLG9CQUFRLEVBQVIsQ0FEakI7QUFFQSxRQUFPLE9BQVAsR0FBaUIsb0JBQVEsRUFBUixJQUE0QixVQUFTLE1BQVQsRUFBaUIsR0FBakIsRUFBc0IsS0FBdEIsRUFBNEI7QUFDdkUsVUFBTyxHQUFHLENBQUgsQ0FBSyxNQUFMLEVBQWEsR0FBYixFQUFrQixXQUFXLENBQVgsRUFBYyxLQUFkLENBQWxCLENBQVA7QUFDRCxFQUZnQixHQUViLFVBQVMsTUFBVCxFQUFpQixHQUFqQixFQUFzQixLQUF0QixFQUE0QjtBQUM5QixVQUFPLEdBQVAsSUFBYyxLQUFkO0FBQ0EsVUFBTyxNQUFQO0FBQ0QsRUFMRCxDOzs7Ozs7OztBQ0ZBLEtBQUksV0FBaUIsb0JBQVEsRUFBUixDQUFyQjtBQUFBLEtBQ0ksaUJBQWlCLG9CQUFRLEVBQVIsQ0FEckI7QUFBQSxLQUVJLGNBQWlCLG9CQUFRLEVBQVIsQ0FGckI7QUFBQSxLQUdJLEtBQWlCLE9BQU8sY0FINUI7O0FBS0EsU0FBUSxDQUFSLEdBQVksb0JBQVEsRUFBUixJQUE0QixPQUFPLGNBQW5DLEdBQW9ELFNBQVMsY0FBVCxDQUF3QixDQUF4QixFQUEyQixDQUEzQixFQUE4QixVQUE5QixFQUF5QztBQUN2RyxZQUFTLENBQVQ7QUFDQSxPQUFJLFlBQVksQ0FBWixFQUFlLElBQWYsQ0FBSjtBQUNBLFlBQVMsVUFBVDtBQUNBLE9BQUcsY0FBSCxFQUFrQixJQUFJO0FBQ3BCLFlBQU8sR0FBRyxDQUFILEVBQU0sQ0FBTixFQUFTLFVBQVQsQ0FBUDtBQUNELElBRmlCLENBRWhCLE9BQU0sQ0FBTixFQUFRLEMsV0FBZTtBQUN6QixPQUFHLFNBQVMsVUFBVCxJQUF1QixTQUFTLFVBQW5DLEVBQThDLE1BQU0sVUFBVSwwQkFBVixDQUFOO0FBQzlDLE9BQUcsV0FBVyxVQUFkLEVBQXlCLEVBQUUsQ0FBRixJQUFPLFdBQVcsS0FBbEI7QUFDekIsVUFBTyxDQUFQO0FBQ0QsRUFWRCxDOzs7Ozs7OztBQ0xBLEtBQUksV0FBVyxvQkFBUSxFQUFSLENBQWY7QUFDQSxRQUFPLE9BQVAsR0FBaUIsVUFBUyxFQUFULEVBQVk7QUFDM0IsT0FBRyxDQUFDLFNBQVMsRUFBVCxDQUFKLEVBQWlCLE1BQU0sVUFBVSxLQUFLLG9CQUFmLENBQU47QUFDakIsVUFBTyxFQUFQO0FBQ0QsRUFIRCxDOzs7Ozs7Ozs7O0FDREEsUUFBTyxPQUFQLEdBQWlCLFVBQVMsRUFBVCxFQUFZO0FBQzNCLFVBQU8sUUFBTyxFQUFQLHlDQUFPLEVBQVAsT0FBYyxRQUFkLEdBQXlCLE9BQU8sSUFBaEMsR0FBdUMsT0FBTyxFQUFQLEtBQWMsVUFBNUQ7QUFDRCxFQUZELEM7Ozs7Ozs7O0FDQUEsUUFBTyxPQUFQLEdBQWlCLENBQUMsb0JBQVEsRUFBUixDQUFELElBQThCLENBQUMsb0JBQVEsRUFBUixFQUFvQixZQUFVO0FBQzVFLFVBQU8sT0FBTyxjQUFQLENBQXNCLG9CQUFRLEVBQVIsRUFBeUIsS0FBekIsQ0FBdEIsRUFBdUQsR0FBdkQsRUFBNEQsRUFBQyxLQUFLLGVBQVU7QUFBRSxjQUFPLENBQVA7QUFBVyxNQUE3QixFQUE1RCxFQUE0RixDQUE1RixJQUFpRyxDQUF4RztBQUNELEVBRitDLENBQWhELEM7Ozs7Ozs7OztBQ0NBLFFBQU8sT0FBUCxHQUFpQixDQUFDLG9CQUFRLEVBQVIsRUFBb0IsWUFBVTtBQUM5QyxVQUFPLE9BQU8sY0FBUCxDQUFzQixFQUF0QixFQUEwQixHQUExQixFQUErQixFQUFDLEtBQUssZUFBVTtBQUFFLGNBQU8sQ0FBUDtBQUFXLE1BQTdCLEVBQS9CLEVBQStELENBQS9ELElBQW9FLENBQTNFO0FBQ0QsRUFGaUIsQ0FBbEIsQzs7Ozs7Ozs7QUNEQSxRQUFPLE9BQVAsR0FBaUIsVUFBUyxJQUFULEVBQWM7QUFDN0IsT0FBSTtBQUNGLFlBQU8sQ0FBQyxDQUFDLE1BQVQ7QUFDRCxJQUZELENBRUUsT0FBTSxDQUFOLEVBQVE7QUFDUixZQUFPLElBQVA7QUFDRDtBQUNGLEVBTkQsQzs7Ozs7Ozs7QUNBQSxLQUFJLFdBQVcsb0JBQVEsRUFBUixDQUFmO0FBQUEsS0FDSSxXQUFXLG9CQUFRLENBQVIsRUFBcUI7O0FBRHBDO0FBQUEsS0FHSSxLQUFLLFNBQVMsUUFBVCxLQUFzQixTQUFTLFNBQVMsYUFBbEIsQ0FIL0I7QUFJQSxRQUFPLE9BQVAsR0FBaUIsVUFBUyxFQUFULEVBQVk7QUFDM0IsVUFBTyxLQUFLLFNBQVMsYUFBVCxDQUF1QixFQUF2QixDQUFMLEdBQWtDLEVBQXpDO0FBQ0QsRUFGRCxDOzs7Ozs7Ozs7QUNIQSxLQUFJLFdBQVcsb0JBQVEsRUFBUixDQUFmOzs7QUFHQSxRQUFPLE9BQVAsR0FBaUIsVUFBUyxFQUFULEVBQWEsQ0FBYixFQUFlO0FBQzlCLE9BQUcsQ0FBQyxTQUFTLEVBQVQsQ0FBSixFQUFpQixPQUFPLEVBQVA7QUFDakIsT0FBSSxFQUFKLEVBQVEsR0FBUjtBQUNBLE9BQUcsS0FBSyxRQUFRLEtBQUssR0FBRyxRQUFoQixLQUE2QixVQUFsQyxJQUFnRCxDQUFDLFNBQVMsTUFBTSxHQUFHLElBQUgsQ0FBUSxFQUFSLENBQWYsQ0FBcEQsRUFBZ0YsT0FBTyxHQUFQO0FBQ2hGLE9BQUcsUUFBUSxLQUFLLEdBQUcsT0FBaEIsS0FBNEIsVUFBNUIsSUFBMEMsQ0FBQyxTQUFTLE1BQU0sR0FBRyxJQUFILENBQVEsRUFBUixDQUFmLENBQTlDLEVBQTBFLE9BQU8sR0FBUDtBQUMxRSxPQUFHLENBQUMsQ0FBRCxJQUFNLFFBQVEsS0FBSyxHQUFHLFFBQWhCLEtBQTZCLFVBQW5DLElBQWlELENBQUMsU0FBUyxNQUFNLEdBQUcsSUFBSCxDQUFRLEVBQVIsQ0FBZixDQUFyRCxFQUFpRixPQUFPLEdBQVA7QUFDakYsU0FBTSxVQUFVLHlDQUFWLENBQU47QUFDRCxFQVBELEM7Ozs7Ozs7O0FDSkEsUUFBTyxPQUFQLEdBQWlCLFVBQVMsTUFBVCxFQUFpQixLQUFqQixFQUF1QjtBQUN0QyxVQUFPO0FBQ0wsaUJBQWMsRUFBRSxTQUFTLENBQVgsQ0FEVDtBQUVMLG1CQUFjLEVBQUUsU0FBUyxDQUFYLENBRlQ7QUFHTCxlQUFjLEVBQUUsU0FBUyxDQUFYLENBSFQ7QUFJTCxZQUFjO0FBSlQsSUFBUDtBQU1ELEVBUEQsQzs7Ozs7Ozs7QUNBQSxLQUFJLFNBQVksb0JBQVEsQ0FBUixDQUFoQjtBQUFBLEtBQ0ksT0FBWSxvQkFBUSxDQUFSLENBRGhCO0FBQUEsS0FFSSxNQUFZLG9CQUFRLEVBQVIsQ0FGaEI7QUFBQSxLQUdJLE1BQVksb0JBQVEsRUFBUixFQUFrQixLQUFsQixDQUhoQjtBQUFBLEtBSUksWUFBWSxVQUpoQjtBQUFBLEtBS0ksWUFBWSxTQUFTLFNBQVQsQ0FMaEI7QUFBQSxLQU1JLE1BQVksQ0FBQyxLQUFLLFNBQU4sRUFBaUIsS0FBakIsQ0FBdUIsU0FBdkIsQ0FOaEI7O0FBUUEscUJBQVEsQ0FBUixFQUFtQixhQUFuQixHQUFtQyxVQUFTLEVBQVQsRUFBWTtBQUM3QyxVQUFPLFVBQVUsSUFBVixDQUFlLEVBQWYsQ0FBUDtBQUNELEVBRkQ7O0FBSUEsRUFBQyxPQUFPLE9BQVAsR0FBaUIsVUFBUyxDQUFULEVBQVksR0FBWixFQUFpQixHQUFqQixFQUFzQixJQUF0QixFQUEyQjtBQUMzQyxPQUFJLGFBQWEsT0FBTyxHQUFQLElBQWMsVUFBL0I7QUFDQSxPQUFHLFVBQUgsRUFBYyxJQUFJLEdBQUosRUFBUyxNQUFULEtBQW9CLEtBQUssR0FBTCxFQUFVLE1BQVYsRUFBa0IsR0FBbEIsQ0FBcEI7QUFDZCxPQUFHLEVBQUUsR0FBRixNQUFXLEdBQWQsRUFBa0I7QUFDbEIsT0FBRyxVQUFILEVBQWMsSUFBSSxHQUFKLEVBQVMsR0FBVCxLQUFpQixLQUFLLEdBQUwsRUFBVSxHQUFWLEVBQWUsRUFBRSxHQUFGLElBQVMsS0FBSyxFQUFFLEdBQUYsQ0FBZCxHQUF1QixJQUFJLElBQUosQ0FBUyxPQUFPLEdBQVAsQ0FBVCxDQUF0QyxDQUFqQjtBQUNkLE9BQUcsTUFBTSxNQUFULEVBQWdCO0FBQ2QsT0FBRSxHQUFGLElBQVMsR0FBVDtBQUNELElBRkQsTUFFTztBQUNMLFNBQUcsQ0FBQyxJQUFKLEVBQVM7QUFDUCxjQUFPLEVBQUUsR0FBRixDQUFQO0FBQ0EsWUFBSyxDQUFMLEVBQVEsR0FBUixFQUFhLEdBQWI7QUFDRCxNQUhELE1BR087QUFDTCxXQUFHLEVBQUUsR0FBRixDQUFILEVBQVUsRUFBRSxHQUFGLElBQVMsR0FBVCxDQUFWLEtBQ0ssS0FBSyxDQUFMLEVBQVEsR0FBUixFQUFhLEdBQWI7QUFDTjtBQUNGOztBQUVGLEVBakJELEVBaUJHLFNBQVMsU0FqQlosRUFpQnVCLFNBakJ2QixFQWlCa0MsU0FBUyxRQUFULEdBQW1CO0FBQ25ELFVBQU8sT0FBTyxJQUFQLElBQWUsVUFBZixJQUE2QixLQUFLLEdBQUwsQ0FBN0IsSUFBMEMsVUFBVSxJQUFWLENBQWUsSUFBZixDQUFqRDtBQUNELEVBbkJELEU7Ozs7Ozs7O0FDWkEsS0FBSSxpQkFBaUIsR0FBRyxjQUF4QjtBQUNBLFFBQU8sT0FBUCxHQUFpQixVQUFTLEVBQVQsRUFBYSxHQUFiLEVBQWlCO0FBQ2hDLFVBQU8sZUFBZSxJQUFmLENBQW9CLEVBQXBCLEVBQXdCLEdBQXhCLENBQVA7QUFDRCxFQUZELEM7Ozs7Ozs7O0FDREEsS0FBSSxLQUFLLENBQVQ7QUFBQSxLQUNJLEtBQUssS0FBSyxNQUFMLEVBRFQ7QUFFQSxRQUFPLE9BQVAsR0FBaUIsVUFBUyxHQUFULEVBQWE7QUFDNUIsVUFBTyxVQUFVLE1BQVYsQ0FBaUIsUUFBUSxTQUFSLEdBQW9CLEVBQXBCLEdBQXlCLEdBQTFDLEVBQStDLElBQS9DLEVBQXFELENBQUMsRUFBRSxFQUFGLEdBQU8sRUFBUixFQUFZLFFBQVosQ0FBcUIsRUFBckIsQ0FBckQsQ0FBUDtBQUNELEVBRkQsQzs7Ozs7Ozs7O0FDREEsS0FBSSxZQUFZLG9CQUFRLEVBQVIsQ0FBaEI7QUFDQSxRQUFPLE9BQVAsR0FBaUIsVUFBUyxFQUFULEVBQWEsSUFBYixFQUFtQixNQUFuQixFQUEwQjtBQUN6QyxhQUFVLEVBQVY7QUFDQSxPQUFHLFNBQVMsU0FBWixFQUFzQixPQUFPLEVBQVA7QUFDdEIsV0FBTyxNQUFQO0FBQ0UsVUFBSyxDQUFMO0FBQVEsY0FBTyxVQUFTLENBQVQsRUFBVztBQUN4QixnQkFBTyxHQUFHLElBQUgsQ0FBUSxJQUFSLEVBQWMsQ0FBZCxDQUFQO0FBQ0QsUUFGTztBQUdSLFVBQUssQ0FBTDtBQUFRLGNBQU8sVUFBUyxDQUFULEVBQVksQ0FBWixFQUFjO0FBQzNCLGdCQUFPLEdBQUcsSUFBSCxDQUFRLElBQVIsRUFBYyxDQUFkLEVBQWlCLENBQWpCLENBQVA7QUFDRCxRQUZPO0FBR1IsVUFBSyxDQUFMO0FBQVEsY0FBTyxVQUFTLENBQVQsRUFBWSxDQUFaLEVBQWUsQ0FBZixFQUFpQjtBQUM5QixnQkFBTyxHQUFHLElBQUgsQ0FBUSxJQUFSLEVBQWMsQ0FBZCxFQUFpQixDQUFqQixFQUFvQixDQUFwQixDQUFQO0FBQ0QsUUFGTztBQVBWO0FBV0EsVUFBTyxZLGFBQXVCO0FBQzVCLFlBQU8sR0FBRyxLQUFILENBQVMsSUFBVCxFQUFlLFNBQWYsQ0FBUDtBQUNELElBRkQ7QUFHRCxFQWpCRCxDOzs7Ozs7OztBQ0ZBLFFBQU8sT0FBUCxHQUFpQixVQUFTLEVBQVQsRUFBWTtBQUMzQixPQUFHLE9BQU8sRUFBUCxJQUFhLFVBQWhCLEVBQTJCLE1BQU0sVUFBVSxLQUFLLHFCQUFmLENBQU47QUFDM0IsVUFBTyxFQUFQO0FBQ0QsRUFIRCxDOzs7Ozs7QUNBQTs7O0FBRUEsS0FBSSxVQUFXLG9CQUFRLEVBQVIsQ0FBZjtBQUFBLEtBQ0ksT0FBVyxvQkFBUSxFQUFSLENBRGY7QUFBQSxLQUVJLE1BQVcsb0JBQVEsRUFBUixDQUZmO0FBQUEsS0FHSSxXQUFXLG9CQUFRLEVBQVIsQ0FIZjtBQUFBLEtBSUksVUFBVyxvQkFBUSxFQUFSLENBSmY7QUFBQSxLQUtJLFVBQVcsT0FBTyxNQUx0Qjs7O0FBUUEsUUFBTyxPQUFQLEdBQWlCLENBQUMsT0FBRCxJQUFZLG9CQUFRLEVBQVIsRUFBb0IsWUFBVTtBQUN6RCxPQUFJLElBQUksRUFBUjtBQUFBLE9BQ0ksSUFBSSxFQURSO0FBQUEsT0FFSSxJQUFJLFFBRlI7QUFBQSxPQUdJLElBQUksc0JBSFI7QUFJQSxLQUFFLENBQUYsSUFBTyxDQUFQO0FBQ0EsS0FBRSxLQUFGLENBQVEsRUFBUixFQUFZLE9BQVosQ0FBb0IsVUFBUyxDQUFULEVBQVc7QUFBRSxPQUFFLENBQUYsSUFBTyxDQUFQO0FBQVcsSUFBNUM7QUFDQSxVQUFPLFFBQVEsRUFBUixFQUFZLENBQVosRUFBZSxDQUFmLEtBQXFCLENBQXJCLElBQTBCLE9BQU8sSUFBUCxDQUFZLFFBQVEsRUFBUixFQUFZLENBQVosQ0FBWixFQUE0QixJQUE1QixDQUFpQyxFQUFqQyxLQUF3QyxDQUF6RTtBQUNELEVBUjRCLENBQVosR0FRWixTQUFTLE1BQVQsQ0FBZ0IsTUFBaEIsRUFBd0IsTUFBeEIsRUFBK0I7O0FBQ2xDLE9BQUksSUFBUSxTQUFTLE1BQVQsQ0FBWjtBQUFBLE9BQ0ksT0FBUSxVQUFVLE1BRHRCO0FBQUEsT0FFSSxRQUFRLENBRlo7QUFBQSxPQUdJLGFBQWEsS0FBSyxDQUh0QjtBQUFBLE9BSUksU0FBYSxJQUFJLENBSnJCO0FBS0EsVUFBTSxPQUFPLEtBQWIsRUFBbUI7QUFDakIsU0FBSSxJQUFTLFFBQVEsVUFBVSxPQUFWLENBQVIsQ0FBYjtBQUFBLFNBQ0ksT0FBUyxhQUFhLFFBQVEsQ0FBUixFQUFXLE1BQVgsQ0FBa0IsV0FBVyxDQUFYLENBQWxCLENBQWIsR0FBZ0QsUUFBUSxDQUFSLENBRDdEO0FBQUEsU0FFSSxTQUFTLEtBQUssTUFGbEI7QUFBQSxTQUdJLElBQVMsQ0FIYjtBQUFBLFNBSUksR0FKSjtBQUtBLFlBQU0sU0FBUyxDQUFmO0FBQWlCLFdBQUcsT0FBTyxJQUFQLENBQVksQ0FBWixFQUFlLE1BQU0sS0FBSyxHQUFMLENBQXJCLENBQUgsRUFBbUMsRUFBRSxHQUFGLElBQVMsRUFBRSxHQUFGLENBQVQ7QUFBcEQ7QUFDRCxJQUFDLE9BQU8sQ0FBUDtBQUNILEVBdEJnQixHQXNCYixPQXRCSixDOzs7Ozs7Ozs7QUNUQSxLQUFJLFFBQWMsb0JBQVEsRUFBUixDQUFsQjtBQUFBLEtBQ0ksY0FBYyxvQkFBUSxFQUFSLENBRGxCOztBQUdBLFFBQU8sT0FBUCxHQUFpQixPQUFPLElBQVAsSUFBZSxTQUFTLElBQVQsQ0FBYyxDQUFkLEVBQWdCO0FBQzlDLFVBQU8sTUFBTSxDQUFOLEVBQVMsV0FBVCxDQUFQO0FBQ0QsRUFGRCxDOzs7Ozs7OztBQ0pBLEtBQUksTUFBZSxvQkFBUSxFQUFSLENBQW5CO0FBQUEsS0FDSSxZQUFlLG9CQUFRLEVBQVIsQ0FEbkI7QUFBQSxLQUVJLGVBQWUsb0JBQVEsRUFBUixFQUE2QixLQUE3QixDQUZuQjtBQUFBLEtBR0ksV0FBZSxvQkFBUSxFQUFSLEVBQXlCLFVBQXpCLENBSG5COztBQUtBLFFBQU8sT0FBUCxHQUFpQixVQUFTLE1BQVQsRUFBaUIsS0FBakIsRUFBdUI7QUFDdEMsT0FBSSxJQUFTLFVBQVUsTUFBVixDQUFiO0FBQUEsT0FDSSxJQUFTLENBRGI7QUFBQSxPQUVJLFNBQVMsRUFGYjtBQUFBLE9BR0ksR0FISjtBQUlBLFFBQUksR0FBSixJQUFXLENBQVg7QUFBYSxTQUFHLE9BQU8sUUFBVixFQUFtQixJQUFJLENBQUosRUFBTyxHQUFQLEtBQWUsT0FBTyxJQUFQLENBQVksR0FBWixDQUFmO0FBQWhDLEk7QUFFQSxVQUFNLE1BQU0sTUFBTixHQUFlLENBQXJCO0FBQXVCLFNBQUcsSUFBSSxDQUFKLEVBQU8sTUFBTSxNQUFNLEdBQU4sQ0FBYixDQUFILEVBQTRCO0FBQ2pELFFBQUMsYUFBYSxNQUFiLEVBQXFCLEdBQXJCLENBQUQsSUFBOEIsT0FBTyxJQUFQLENBQVksR0FBWixDQUE5QjtBQUNEO0FBRkQsSUFHQSxPQUFPLE1BQVA7QUFDRCxFQVhELEM7Ozs7Ozs7OztBQ0pBLEtBQUksVUFBVSxvQkFBUSxFQUFSLENBQWQ7QUFBQSxLQUNJLFVBQVUsb0JBQVEsRUFBUixDQURkO0FBRUEsUUFBTyxPQUFQLEdBQWlCLFVBQVMsRUFBVCxFQUFZO0FBQzNCLFVBQU8sUUFBUSxRQUFRLEVBQVIsQ0FBUixDQUFQO0FBQ0QsRUFGRCxDOzs7Ozs7Ozs7QUNGQSxLQUFJLE1BQU0sb0JBQVEsRUFBUixDQUFWO0FBQ0EsUUFBTyxPQUFQLEdBQWlCLE9BQU8sR0FBUCxFQUFZLG9CQUFaLENBQWlDLENBQWpDLElBQXNDLE1BQXRDLEdBQStDLFVBQVMsRUFBVCxFQUFZO0FBQzFFLFVBQU8sSUFBSSxFQUFKLEtBQVcsUUFBWCxHQUFzQixHQUFHLEtBQUgsQ0FBUyxFQUFULENBQXRCLEdBQXFDLE9BQU8sRUFBUCxDQUE1QztBQUNELEVBRkQsQzs7Ozs7Ozs7QUNGQSxLQUFJLFdBQVcsR0FBRyxRQUFsQjs7QUFFQSxRQUFPLE9BQVAsR0FBaUIsVUFBUyxFQUFULEVBQVk7QUFDM0IsVUFBTyxTQUFTLElBQVQsQ0FBYyxFQUFkLEVBQWtCLEtBQWxCLENBQXdCLENBQXhCLEVBQTJCLENBQUMsQ0FBNUIsQ0FBUDtBQUNELEVBRkQsQzs7Ozs7Ozs7O0FDREEsUUFBTyxPQUFQLEdBQWlCLFVBQVMsRUFBVCxFQUFZO0FBQzNCLE9BQUcsTUFBTSxTQUFULEVBQW1CLE1BQU0sVUFBVSwyQkFBMkIsRUFBckMsQ0FBTjtBQUNuQixVQUFPLEVBQVA7QUFDRCxFQUhELEM7Ozs7Ozs7Ozs7QUNDQSxLQUFJLFlBQVksb0JBQVEsRUFBUixDQUFoQjtBQUFBLEtBQ0ksV0FBWSxvQkFBUSxFQUFSLENBRGhCO0FBQUEsS0FFSSxVQUFZLG9CQUFRLEVBQVIsQ0FGaEI7QUFHQSxRQUFPLE9BQVAsR0FBaUIsVUFBUyxXQUFULEVBQXFCO0FBQ3BDLFVBQU8sVUFBUyxLQUFULEVBQWdCLEVBQWhCLEVBQW9CLFNBQXBCLEVBQThCO0FBQ25DLFNBQUksSUFBUyxVQUFVLEtBQVYsQ0FBYjtBQUFBLFNBQ0ksU0FBUyxTQUFTLEVBQUUsTUFBWCxDQURiO0FBQUEsU0FFSSxRQUFTLFFBQVEsU0FBUixFQUFtQixNQUFuQixDQUZiO0FBQUEsU0FHSSxLQUhKOztBQUtBLFNBQUcsZUFBZSxNQUFNLEVBQXhCLEVBQTJCLE9BQU0sU0FBUyxLQUFmLEVBQXFCO0FBQzlDLGVBQVEsRUFBRSxPQUFGLENBQVI7QUFDQSxXQUFHLFNBQVMsS0FBWixFQUFrQixPQUFPLElBQVA7O0FBRW5CLE1BSkQsTUFJTyxPQUFLLFNBQVMsS0FBZCxFQUFxQixPQUFyQjtBQUE2QixXQUFHLGVBQWUsU0FBUyxDQUEzQixFQUE2QjtBQUMvRCxhQUFHLEVBQUUsS0FBRixNQUFhLEVBQWhCLEVBQW1CLE9BQU8sZUFBZSxLQUFmLElBQXdCLENBQS9CO0FBQ3BCO0FBRk0sTUFFTCxPQUFPLENBQUMsV0FBRCxJQUFnQixDQUFDLENBQXhCO0FBQ0gsSUFiRDtBQWNELEVBZkQsQzs7Ozs7Ozs7O0FDSkEsS0FBSSxZQUFZLG9CQUFRLEVBQVIsQ0FBaEI7QUFBQSxLQUNJLE1BQVksS0FBSyxHQURyQjtBQUVBLFFBQU8sT0FBUCxHQUFpQixVQUFTLEVBQVQsRUFBWTtBQUMzQixVQUFPLEtBQUssQ0FBTCxHQUFTLElBQUksVUFBVSxFQUFWLENBQUosRUFBbUIsZ0JBQW5CLENBQVQsR0FBZ0QsQ0FBdkQsQztBQUNELEVBRkQsQzs7Ozs7Ozs7O0FDRkEsS0FBSSxPQUFRLEtBQUssSUFBakI7QUFBQSxLQUNJLFFBQVEsS0FBSyxLQURqQjtBQUVBLFFBQU8sT0FBUCxHQUFpQixVQUFTLEVBQVQsRUFBWTtBQUMzQixVQUFPLE1BQU0sS0FBSyxDQUFDLEVBQVosSUFBa0IsQ0FBbEIsR0FBc0IsQ0FBQyxLQUFLLENBQUwsR0FBUyxLQUFULEdBQWlCLElBQWxCLEVBQXdCLEVBQXhCLENBQTdCO0FBQ0QsRUFGRCxDOzs7Ozs7OztBQ0hBLEtBQUksWUFBWSxvQkFBUSxFQUFSLENBQWhCO0FBQUEsS0FDSSxNQUFZLEtBQUssR0FEckI7QUFBQSxLQUVJLE1BQVksS0FBSyxHQUZyQjtBQUdBLFFBQU8sT0FBUCxHQUFpQixVQUFTLEtBQVQsRUFBZ0IsTUFBaEIsRUFBdUI7QUFDdEMsV0FBUSxVQUFVLEtBQVYsQ0FBUjtBQUNBLFVBQU8sUUFBUSxDQUFSLEdBQVksSUFBSSxRQUFRLE1BQVosRUFBb0IsQ0FBcEIsQ0FBWixHQUFxQyxJQUFJLEtBQUosRUFBVyxNQUFYLENBQTVDO0FBQ0QsRUFIRCxDOzs7Ozs7OztBQ0hBLEtBQUksU0FBUyxvQkFBUSxFQUFSLEVBQXFCLE1BQXJCLENBQWI7QUFBQSxLQUNJLE1BQVMsb0JBQVEsRUFBUixDQURiO0FBRUEsUUFBTyxPQUFQLEdBQWlCLFVBQVMsR0FBVCxFQUFhO0FBQzVCLFVBQU8sT0FBTyxHQUFQLE1BQWdCLE9BQU8sR0FBUCxJQUFjLElBQUksR0FBSixDQUE5QixDQUFQO0FBQ0QsRUFGRCxDOzs7Ozs7OztBQ0ZBLEtBQUksU0FBUyxvQkFBUSxDQUFSLENBQWI7QUFBQSxLQUNJLFNBQVMsb0JBRGI7QUFBQSxLQUVJLFFBQVMsT0FBTyxNQUFQLE1BQW1CLE9BQU8sTUFBUCxJQUFpQixFQUFwQyxDQUZiO0FBR0EsUUFBTyxPQUFQLEdBQWlCLFVBQVMsR0FBVCxFQUFhO0FBQzVCLFVBQU8sTUFBTSxHQUFOLE1BQWUsTUFBTSxHQUFOLElBQWEsRUFBNUIsQ0FBUDtBQUNELEVBRkQsQzs7Ozs7Ozs7O0FDRkEsUUFBTyxPQUFQLEdBQ0UsK0ZBRGUsQ0FFZixLQUZlLENBRVQsR0FGUyxDQUFqQixDOzs7Ozs7OztBQ0RBLFNBQVEsQ0FBUixHQUFZLE9BQU8scUJBQW5CLEM7Ozs7Ozs7O0FDQUEsU0FBUSxDQUFSLEdBQVksR0FBRyxvQkFBZixDOzs7Ozs7Ozs7QUNDQSxLQUFJLFVBQVUsb0JBQVEsRUFBUixDQUFkO0FBQ0EsUUFBTyxPQUFQLEdBQWlCLFVBQVMsRUFBVCxFQUFZO0FBQzNCLFVBQU8sT0FBTyxRQUFRLEVBQVIsQ0FBUCxDQUFQO0FBQ0QsRUFGRCxDOzs7Ozs7OztlQ0FJLE07S0FERixVLFdBQUEsVTtLQUFZLGdCLFdBQUEsZ0I7OztBQUdkLEtBQU0sTUFBTSw2Q0FDRSx3REFEZDs7OztBQUtBLEtBQUksT0FBTyxVQUFQLEtBQXNCLFdBQXRCLElBQ0YsT0FBTyxnQkFBUCxLQUE0QixVQUQ5QixFQUMwQztBQUFBO0FBQ3hDLFNBQU0sYUFBYSxFQUFuQjtBQUNBLFNBQUksWUFBWSxDQUFoQjtBQUNBLFlBQU8sVUFBUCxHQUFvQixVQUFDLEVBQUQsRUFBSyxJQUFMLEVBQWM7QUFDaEMsZUFBUSxJQUFSLENBQWEsR0FBYjtBQUNBLGtCQUFXLEVBQUUsU0FBYixJQUEwQixFQUExQjtBQUNBLHdCQUFpQixVQUFVLFFBQVYsRUFBakIsRUFBdUMsSUFBdkM7QUFDRCxNQUpEO0FBS0EsWUFBTyxrQkFBUCxHQUE0QixVQUFDLEVBQUQsRUFBUTtBQUNsQyxXQUFJLE9BQU8sV0FBVyxFQUFYLENBQVAsS0FBMEIsVUFBOUIsRUFBMEM7QUFDeEMsb0JBQVcsRUFBWDtBQUNBLGdCQUFPLFdBQVcsRUFBWCxDQUFQO0FBQ0Q7QUFDRixNQUxEO0FBUndDO0FBY3pDLEU7Ozs7Ozs7OztBQ3hCRCxLQUFNLGdCQUFnQixPQUFPLE9BQVAsSUFDRSxZQUFZLENBQUUsQ0FEdEM7QUFFQSxLQUFNLE1BQU0sK0JBQVo7O0FBRUEsS0FBTSxvQkFBb0IsU0FBcEIsaUJBQW9CLEdBQW1CO0FBQzNDLFdBQVEsSUFBUixDQUFhLEdBQWI7O0FBRDJDLHFDQUFOLElBQU07QUFBTixTQUFNO0FBQUE7O0FBRTNDLDZDQUFXLGFBQVgsZ0JBQTRCLElBQTVCO0FBQ0QsRUFIRDs7QUFLQSxLQUFNLEtBQUssQ0FBQyxLQUFELEVBQVEsTUFBUixFQUFnQixTQUFoQixFQUEyQixRQUEzQixDQUFYO0FBQ0EsSUFBRyxPQUFILENBQVcsYUFBSztBQUNkLHFCQUFrQixDQUFsQixJQUF1QixZQUFtQjtBQUN4QyxhQUFRLElBQVIsQ0FBYSxHQUFiO0FBQ0EsWUFBTyxjQUFjLENBQWQsS0FBb0IsY0FBYyxDQUFkLGlDQUEzQjtBQUNELElBSEQ7QUFJRCxFQUxEOztBQU9BLFFBQU8sT0FBUCxHQUFpQixpQkFBakIsQzs7Ozs7Ozs7Ozs7ZUNqQitCLE07S0FBdkIsTyxXQUFBLE87S0FBUyxTLFdBQUEsUzs7QUFDakIsS0FBTSxTQUFTLENBQUMsT0FBRCxFQUFVLE1BQVYsRUFBa0IsTUFBbEIsRUFBMEIsS0FBMUIsRUFBaUMsT0FBakMsQ0FBZjtBQUNBLEtBQU0sV0FBVyxFQUFqQjs7QUFFQTs7O0FBR0EsS0FDRSxPQUFPLE9BQVAsS0FBbUIsV0FBbkIsSTtBQUNDLFFBQU8sYUFBUCxJQUF3QixPQUFPLGFBQVAsQ0FBcUIsUUFBckIsS0FBa0MsSztBQUY3RCxHQUdFO0FBQ0EsWUFBTyxPQUFQLEdBQWlCO0FBQ2YsY0FBTyxpQkFBYTtBQUFBLDJDQUFULElBQVM7QUFBVCxlQUFTO0FBQUE7O0FBQ2xCLGFBQUksV0FBVyxPQUFYLENBQUosRUFBeUI7QUFBRSx5REFBYSxPQUFPLElBQVAsQ0FBYixVQUEyQixTQUEzQjtBQUF1QztBQUNuRSxRQUhjO0FBSWYsWUFBSyxlQUFhO0FBQUEsNENBQVQsSUFBUztBQUFULGVBQVM7QUFBQTs7QUFDaEIsYUFBSSxXQUFXLEtBQVgsQ0FBSixFQUF1QjtBQUFFLHlEQUFhLE9BQU8sSUFBUCxDQUFiLFVBQTJCLE9BQTNCO0FBQXFDO0FBQy9ELFFBTmM7QUFPZixhQUFNLGdCQUFhO0FBQUEsNENBQVQsSUFBUztBQUFULGVBQVM7QUFBQTs7QUFDakIsYUFBSSxXQUFXLE1BQVgsQ0FBSixFQUF3QjtBQUFFLHlEQUFhLE9BQU8sSUFBUCxDQUFiLFVBQTJCLFFBQTNCO0FBQXNDO0FBQ2pFLFFBVGM7QUFVZixhQUFNLGdCQUFhO0FBQUEsNENBQVQsSUFBUztBQUFULGVBQVM7QUFBQTs7QUFDakIsYUFBSSxXQUFXLE1BQVgsQ0FBSixFQUF3QjtBQUFFLHlEQUFhLE9BQU8sSUFBUCxDQUFiLFVBQTJCLFFBQTNCO0FBQXNDO0FBQ2pFLFFBWmM7QUFhZixjQUFPLGlCQUFhO0FBQUEsNENBQVQsSUFBUztBQUFULGVBQVM7QUFBQTs7QUFDbEIsYUFBSSxXQUFXLE9BQVgsQ0FBSixFQUF5QjtBQUFFLHlEQUFhLE9BQU8sSUFBUCxDQUFiLFVBQTJCLFNBQTNCO0FBQXVDO0FBQ25FO0FBZmMsTUFBakI7QUFpQkQsSUFyQkQsTUFzQks7O0FBQUEsT0FDSyxLQURMLEdBQ3VDLE9BRHZDLENBQ0ssS0FETDtBQUFBLE9BQ1ksR0FEWixHQUN1QyxPQUR2QyxDQUNZLEdBRFo7QUFBQSxPQUNpQixJQURqQixHQUN1QyxPQUR2QyxDQUNpQixJQURqQjtBQUFBLE9BQ3VCLElBRHZCLEdBQ3VDLE9BRHZDLENBQ3VCLElBRHZCO0FBQUEsT0FDNkIsS0FEN0IsR0FDdUMsT0FEdkMsQ0FDNkIsS0FEN0I7O0FBRUgsV0FBUSxPQUFSLEdBQWtCLEVBQUUsWUFBRixFQUFTLFFBQVQsRUFBYyxVQUFkLEVBQW9CLFVBQXBCLEVBQTBCLFlBQTFCLEVBQWxCO0FBQ0EsV0FBUSxLQUFSLEdBQWdCLFlBQWE7QUFBQSx3Q0FBVCxJQUFTO0FBQVQsV0FBUztBQUFBOztBQUMzQixTQUFJLFdBQVcsT0FBWCxDQUFKLEVBQXlCO0FBQUUsZUFBUSxPQUFSLENBQWdCLEtBQWhCLENBQXNCLEtBQXRCLENBQTRCLE9BQTVCLEVBQXFDLElBQXJDO0FBQTRDO0FBQ3hFLElBRkQ7QUFHQSxXQUFRLEdBQVIsR0FBYyxZQUFhO0FBQUEsd0NBQVQsSUFBUztBQUFULFdBQVM7QUFBQTs7QUFDekIsU0FBSSxXQUFXLEtBQVgsQ0FBSixFQUF1QjtBQUFFLGVBQVEsT0FBUixDQUFnQixHQUFoQixDQUFvQixLQUFwQixDQUEwQixPQUExQixFQUFtQyxJQUFuQztBQUEwQztBQUNwRSxJQUZEO0FBR0EsV0FBUSxJQUFSLEdBQWUsWUFBYTtBQUFBLHdDQUFULElBQVM7QUFBVCxXQUFTO0FBQUE7O0FBQzFCLFNBQUksV0FBVyxNQUFYLENBQUosRUFBd0I7QUFBRSxlQUFRLE9BQVIsQ0FBZ0IsSUFBaEIsQ0FBcUIsS0FBckIsQ0FBMkIsT0FBM0IsRUFBb0MsSUFBcEM7QUFBMkM7QUFDdEUsSUFGRDtBQUdBLFdBQVEsSUFBUixHQUFlLFlBQWE7QUFBQSx3Q0FBVCxJQUFTO0FBQVQsV0FBUztBQUFBOztBQUMxQixTQUFJLFdBQVcsTUFBWCxDQUFKLEVBQXdCO0FBQUUsZUFBUSxPQUFSLENBQWdCLElBQWhCLENBQXFCLEtBQXJCLENBQTJCLE9BQTNCLEVBQW9DLElBQXBDO0FBQTJDO0FBQ3RFLElBRkQ7QUFHQSxXQUFRLEtBQVIsR0FBZ0IsWUFBYTtBQUFBLHlDQUFULElBQVM7QUFBVCxXQUFTO0FBQUE7O0FBQzNCLFNBQUksV0FBVyxPQUFYLENBQUosRUFBeUI7QUFBRSxlQUFRLE9BQVIsQ0FBZ0IsS0FBaEIsQ0FBc0IsS0FBdEIsQ0FBNEIsT0FBNUIsRUFBcUMsSUFBckM7QUFBNEM7QUFDeEUsSUFGRDtBQUdEOztBQUVELFVBQVMsZ0JBQVQsR0FBNkI7QUFDM0IsVUFBTyxPQUFQLENBQWUsaUJBQVM7QUFDdEIsU0FBTSxhQUFhLE9BQU8sT0FBUCxDQUFlLEtBQWYsQ0FBbkI7QUFDQSxjQUFTLEtBQVQsSUFBa0IsRUFBbEI7QUFDQSxZQUFPLE9BQVAsQ0FBZSxnQkFBUTtBQUNyQixXQUFNLFlBQVksT0FBTyxPQUFQLENBQWUsSUFBZixDQUFsQjtBQUNBLFdBQUksYUFBYSxVQUFqQixFQUE2QjtBQUMzQixrQkFBUyxLQUFULEVBQWdCLElBQWhCLElBQXdCLElBQXhCO0FBQ0Q7QUFDRixNQUxEO0FBTUQsSUFURDtBQVVEOztBQUVELFVBQVMsU0FBVCxDQUFvQixDQUFwQixFQUF1QjtBQUNyQixPQUFNLE9BQU8sT0FBTyxTQUFQLENBQWlCLFFBQWpCLENBQTBCLElBQTFCLENBQStCLENBQS9CLENBQWI7QUFDQSxPQUFJLEtBQUssV0FBTCxPQUF1QixpQkFBM0IsRUFBOEM7QUFDNUMsU0FBSSxLQUFLLFNBQUwsQ0FBZSxDQUFmLENBQUo7QUFDRCxJQUZELE1BR0s7QUFDSCxTQUFJLE9BQU8sQ0FBUCxDQUFKO0FBQ0Q7QUFDRCxVQUFPLENBQVA7QUFDRDs7QUFFRCxVQUFTLFVBQVQsQ0FBcUIsSUFBckIsRUFBMkI7QUFDekIsT0FBTSxXQUFZLE9BQU8sYUFBUCxJQUF3QixPQUFPLGFBQVAsQ0FBcUIsUUFBOUMsSUFBMkQsS0FBNUU7QUFDQSxVQUFPLFNBQVMsUUFBVCxLQUFzQixTQUFTLFFBQVQsRUFBbUIsSUFBbkIsQ0FBN0I7QUFDRDs7QUFFRCxVQUFTLE1BQVQsQ0FBaUIsSUFBakIsRUFBdUI7QUFDckIsVUFBTyxLQUFLLEdBQUwsQ0FBUztBQUFBLFlBQUssVUFBVSxDQUFWLENBQUw7QUFBQSxJQUFULENBQVA7QUFDRCxFOzs7Ozs7Ozs7Ozs7U0NoRGUsYyxHQUFBLGM7O0FBaENoQjs7OztBQUVBOzs7O0FBRUEsS0FBTSxTQUFTO0FBQ2IsMkJBRGEsRUFDSCxzQkFERyxFQUNNLHNCQUROO0FBRWIsWUFGYSx1QkFFTztBQUFBOztBQUNsQixZQUFPLG1CQUFPLFVBQVAsMEJBQVA7QUFDRDtBQUpZLEVBQWY7O0FBT0EsTUFBSyxJQUFNLElBQVgsMEJBQStCO0FBQzdCLE9BQU0sWUFBWSxxQkFBVyxJQUFYLENBQWxCO0FBQ0EsYUFBVSxJQUFWLENBQWUsTUFBZjtBQUNEOztBQUVELEtBQU0sZ0JBQWdCLDRCQUF0Qjs7QUFFQSxVQUFTLFlBQVQsQ0FBdUIsSUFBdkIsRUFBNkI7QUFDM0IsT0FBSSxhQUFKO0FBQ0EsT0FBTSxTQUFTLGNBQWMsSUFBZCxDQUFtQixJQUFuQixDQUFmO0FBQ0EsT0FBSSxNQUFKLEVBQVk7QUFDVixTQUFJO0FBQ0YsY0FBTyxLQUFLLEtBQUwsQ0FBVyxPQUFPLENBQVAsQ0FBWCxDQUFQO0FBQ0QsTUFGRCxDQUdBLE9BQU8sQ0FBUCxFQUFVLENBQUU7QUFDYjtBQUNELFVBQU8sSUFBUDtBQUNEOztBQUVELEtBQU0sY0FBYyxFQUFwQjs7QUFFTyxVQUFTLGNBQVQsQ0FBeUIsRUFBekIsRUFBNkIsSUFBN0IsRUFBbUMsTUFBbkMsRUFBMkMsSUFBM0MsRUFBaUQ7QUFDdEQsT0FBSSxPQUFPLFlBQVksRUFBWixDQUFYO0FBQ0EsT0FBSSxDQUFDLElBQUwsRUFBVztBQUNULFlBQU8sYUFBYSxJQUFiLEtBQXNCLEVBQTdCO0FBQ0EsU0FBSSxDQUFDLHFCQUFXLEtBQUssU0FBaEIsQ0FBTCxFQUFpQztBQUMvQixZQUFLLFNBQUwsR0FBaUIsTUFBakI7QUFDRDtBQUNELGlCQUFZLEVBQVosSUFBa0IsSUFBbEI7QUFDQSxjQUFTLFVBQVUsRUFBbkI7QUFDQSxZQUFPLGFBQVAsR0FBdUIsS0FBSyxPQUE1QjtBQUNBLFlBQU8scUJBQVcsS0FBSyxTQUFoQixFQUEyQixjQUEzQixDQUEwQyxFQUExQyxFQUE4QyxJQUE5QyxFQUFvRCxNQUFwRCxFQUE0RCxJQUE1RCxDQUFQO0FBQ0Q7QUFDRCxVQUFPLElBQUksS0FBSiwyQkFBa0MsRUFBbEMsT0FBUDtBQUNEOztBQUVELEtBQU0sVUFBVTtBQUNkO0FBRGMsRUFBaEI7O0FBSUEsVUFBUyxPQUFULENBQWtCLFVBQWxCLEVBQThCO0FBQzVCLFdBQVEsVUFBUixJQUFzQixZQUFtQjtBQUN2QyxVQUFLLElBQU0sS0FBWCwwQkFBK0I7QUFDN0IsV0FBTSxhQUFZLHFCQUFXLEtBQVgsQ0FBbEI7QUFDQSxXQUFJLGNBQWEsV0FBVSxVQUFWLENBQWpCLEVBQXdDO0FBQ3RDLG9CQUFVLFVBQVY7QUFDRDtBQUNGO0FBQ0YsSUFQRDtBQVFEOztBQUVELEVBQUMsb0JBQUQsRUFBdUIsaUJBQXZCLEVBQTBDLGlCQUExQyxFQUE2RCxPQUE3RCxDQUFxRSxPQUFyRTs7QUFFQSxVQUFTLFdBQVQsQ0FBc0IsVUFBdEIsRUFBa0M7QUFDaEMsV0FBUSxVQUFSLElBQXNCLFlBQW1CO0FBQ3ZDLFNBQU0scURBQU47QUFDQSxTQUFNLE9BQU8sWUFBWSxFQUFaLENBQWI7QUFDQSxTQUFJLFFBQVEscUJBQVcsS0FBSyxTQUFoQixDQUFaLEVBQXdDO0FBQUE7O0FBQ3RDLGNBQU8sOENBQVcsS0FBSyxTQUFoQixHQUEyQixVQUEzQix5Q0FBUDtBQUNEO0FBQ0QsWUFBTyxJQUFJLEtBQUosMkJBQWtDLEVBQWxDLE9BQVA7QUFDRCxJQVBEO0FBUUQ7O0FBRUQsRUFBQyxpQkFBRCxFQUFvQixpQkFBcEIsRUFBdUMsUUFBdkMsRUFBaUQsU0FBakQsRUFBNEQsT0FBNUQsQ0FBb0UsV0FBcEU7O0FBRUEsU0FBUSxZQUFSLEdBQXVCLFFBQVEsTUFBL0I7O21CQUVlLE87Ozs7Ozs7Ozs7Ozs7QUMvRWY7O0tBQVksSTs7OzttQkFFRztBQUNiO0FBRGEsRTs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7O1NDc0JDLEksR0FBQSxJO1NBZUEsYyxHQUFBLGM7U0F5QkEsZSxHQUFBLGU7U0FnQkEsZSxHQUFBLGU7U0FlQSxrQixHQUFBLGtCO1NBcUJBLGUsR0FBQSxlO1NBVUEsZSxHQUFBLGU7U0FZQSxPLEdBQUEsTztTQThCQSxNLEdBQUEsTTs7QUF6SmhCOzs7O0FBQ0E7Ozs7QUFDQTs7Ozs7Ozs7S0FHRSxrQixvQkFBQSxrQjs7QUFFRixLQUFNLGNBQWMsRUFBcEI7O0FBRU8sVUFBUyxJQUFULENBQWUsR0FBZixFQUFvQjtBQUN6QixvQkFBTyxRQUFQLEdBQWtCLElBQUksUUFBdEI7QUFDQSxvQkFBTyxPQUFQLEdBQWlCLElBQUksT0FBckI7QUFDQSxvQkFBTyxPQUFQLEdBQWlCLElBQUksT0FBckI7QUFDQSxvQkFBTyxTQUFQLEdBQW1CLElBQUksU0FBdkI7QUFDRDs7Ozs7Ozs7OztBQVVNLFVBQVMsY0FBVCxDQUF5QixVQUF6QixFQUFxQyxJQUFyQyxFQUEyQyxPQUEzQyxFQUFvRCxJQUFwRCxFQUEwRDtBQUMvRCxPQUFJLFdBQVcsWUFBWSxVQUFaLENBQWY7QUFDQSxhQUFVLFdBQVcsRUFBckI7O0FBRUEsb0JBQU8sS0FBUCxHQUFlLFFBQVEsS0FBdkI7O0FBRUEsT0FBSSxlQUFKO0FBQ0EsT0FBSSxDQUFDLFFBQUwsRUFBZTtBQUNiLGdCQUFXLGtCQUFnQixVQUFoQixFQUE0QixPQUE1QixDQUFYO0FBQ0EsaUJBQVksVUFBWixJQUEwQixRQUExQjtBQUNBLGNBQVMsU0FBUyxJQUFULENBQWMsSUFBZCxFQUFvQixJQUFwQixDQUFUO0FBQ0QsSUFKRCxNQUtLO0FBQ0gsY0FBUyxJQUFJLEtBQUosMkJBQWtDLFVBQWxDLE9BQVQ7QUFDRDs7QUFFRCxVQUFPLE1BQVA7QUFDRDs7Ozs7Ozs7QUFRTSxVQUFTLGVBQVQsQ0FBMEIsVUFBMUIsRUFBc0MsSUFBdEMsRUFBNEM7QUFDakQsT0FBTSxXQUFXLFlBQVksVUFBWixDQUFqQjtBQUNBLE9BQUksZUFBSjtBQUNBLE9BQUksUUFBSixFQUFjO0FBQ1osY0FBUyxTQUFTLFdBQVQsQ0FBcUIsSUFBckIsQ0FBVDtBQUNELElBRkQsTUFHSztBQUNILGNBQVMsSUFBSSxLQUFKLDJCQUFrQyxVQUFsQyxPQUFUO0FBQ0Q7QUFDRCxVQUFPLE1BQVA7QUFDRDs7Ozs7O0FBTU0sVUFBUyxlQUFULENBQTBCLFVBQTFCLEVBQXNDO0FBQzNDLE9BQU0sV0FBVyxZQUFZLFVBQVosQ0FBakI7QUFDQSxPQUFJLENBQUMsUUFBTCxFQUFlO0FBQ2IsWUFBTyxJQUFJLEtBQUosMkJBQWtDLFVBQWxDLE9BQVA7QUFDRDs7QUFFRCxZQUFTLE9BQVQ7QUFDQSxVQUFPLFlBQVksVUFBWixDQUFQO0FBQ0EsVUFBTyxXQUFQO0FBQ0Q7Ozs7OztBQU1NLFVBQVMsa0JBQVQsQ0FBNkIsVUFBN0IsRUFBeUM7QUFDOUMsT0FBSSxNQUFNLE9BQU4sQ0FBYyxVQUFkLENBQUosRUFBK0I7QUFDN0IsZ0JBQVcsT0FBWCxDQUFtQixTQUFTLFFBQVQsQ0FBbUIsSUFBbkIsRUFBeUI7O0FBRTFDLFdBQUksQ0FBQyxJQUFMLEVBQVc7QUFDVDtBQUNEO0FBQ0QsV0FBSSxPQUFPLElBQVAsS0FBZ0IsUUFBcEIsRUFBOEI7QUFDNUIsNEJBQW1CLElBQW5CLElBQTJCLElBQTNCO0FBQ0QsUUFGRCxNQUdLLElBQUksUUFBTyxJQUFQLHlDQUFPLElBQVAsT0FBZ0IsUUFBaEIsSUFBNEIsT0FBTyxLQUFLLElBQVosS0FBcUIsUUFBckQsRUFBK0Q7QUFDbEUsNEJBQW1CLEtBQUssSUFBeEIsSUFBZ0MsSUFBaEM7QUFDRDtBQUNGLE1BWEQ7QUFZRDtBQUNGOzs7Ozs7QUFNTSxVQUFTLGVBQVQsQ0FBMEIsT0FBMUIsRUFBbUM7QUFDeEMsT0FBSSxRQUFPLE9BQVAseUNBQU8sT0FBUCxPQUFtQixRQUF2QixFQUFpQztBQUMvQixrQkFBRyxlQUFILENBQW1CLE9BQW5CO0FBQ0Q7QUFDRjs7Ozs7O0FBTU0sVUFBUyxlQUFULENBQTBCLElBQTFCLEVBQWdDO0FBQ3JDLE9BQUksUUFBTyxJQUFQLHlDQUFPLElBQVAsT0FBZ0IsUUFBcEIsRUFBOEI7QUFDNUIsa0JBQUcsZUFBSCxDQUFtQixJQUFuQjtBQUNEO0FBQ0Y7Ozs7Ozs7O0FBUU0sVUFBUyxPQUFULENBQWtCLFVBQWxCLEVBQThCO0FBQ25DLE9BQU0sV0FBVyxZQUFZLFVBQVosQ0FBakI7QUFDQSxPQUFJLGVBQUo7QUFDQSxPQUFJLFFBQUosRUFBYztBQUNaLGNBQVMsU0FBUyxjQUFULEVBQVQ7QUFDRCxJQUZELE1BR0s7QUFDSCxjQUFTLElBQUksS0FBSiwyQkFBa0MsVUFBbEMsT0FBVDtBQUNEO0FBQ0QsVUFBTyxNQUFQO0FBQ0Q7O0FBRUQsS0FBTSxhQUFhO0FBQ2pCLGNBQVcsU0FBUyxTQUFULENBQW9CLFVBQXBCLEVBQWdDLEdBQWhDLEVBQXFDLElBQXJDLEVBQTJDLElBQTNDLEVBQWlELFVBQWpELEVBQTZEO0FBQ3RFLFNBQU0sV0FBVyxZQUFZLFVBQVosQ0FBakI7QUFDQSxZQUFPLFNBQVMsU0FBVCxDQUFtQixHQUFuQixFQUF3QixJQUF4QixFQUE4QixJQUE5QixFQUFvQyxVQUFwQyxDQUFQO0FBQ0QsSUFKZ0I7O0FBTWpCLGFBQVUsU0FBUyxRQUFULENBQW1CLFVBQW5CLEVBQStCLE1BQS9CLEVBQXVDLElBQXZDLEVBQTZDLE1BQTdDLEVBQXFEO0FBQzdELFNBQU0sV0FBVyxZQUFZLFVBQVosQ0FBakI7QUFDQSxZQUFPLFNBQVMsUUFBVCxDQUFrQixNQUFsQixFQUEwQixJQUExQixFQUFnQyxNQUFoQyxDQUFQO0FBQ0Q7QUFUZ0IsRUFBbkI7Ozs7Ozs7O0FBa0JPLFVBQVMsTUFBVCxDQUFpQixVQUFqQixFQUE2QixLQUE3QixFQUFvQztBQUN6QyxPQUFNLFdBQVcsWUFBWSxVQUFaLENBQWpCO0FBQ0EsT0FBSSxZQUFZLE1BQU0sT0FBTixDQUFjLEtBQWQsQ0FBaEIsRUFBc0M7QUFBQTtBQUNwQyxXQUFNLFVBQVUsRUFBaEI7QUFDQSxhQUFNLE9BQU4sQ0FBYyxVQUFDLElBQUQsRUFBVTtBQUN0QixhQUFNLFVBQVUsV0FBVyxLQUFLLE1BQWhCLENBQWhCO0FBQ0EsYUFBTSxvQ0FBVyxLQUFLLElBQWhCLEVBQU47QUFDQSxhQUFJLE9BQU8sT0FBUCxLQUFtQixVQUF2QixFQUFtQztBQUNqQyxnQkFBSyxPQUFMLENBQWEsVUFBYjtBQUNBLG1CQUFRLElBQVIsQ0FBYSw0Q0FBVyxJQUFYLEVBQWI7QUFDRDtBQUNGLFFBUEQ7QUFRQTtBQUFBLFlBQU87QUFBUDtBQVZvQzs7QUFBQTtBQVdyQztBQUNELFVBQU8sSUFBSSxLQUFKLDJCQUFrQyxVQUFsQyxnQkFBUDtBQUNELEU7Ozs7Ozs7Ozs7O21CQ3ZMYztBQUNiLHVCQUFvQjtBQUNsQixXQUFNLElBRFk7QUFFbEIsWUFBTyxJQUZXO0FBR2xCLGdCQUFXLElBSE87QUFJbEIsYUFBUTtBQUNOLGFBQU0sUUFEQTtBQUVOLGVBQVE7QUFGRixNQUpVO0FBUWxCLFdBQU07QUFDSixhQUFNLE1BREY7QUFFSixlQUFRO0FBRko7QUFSWSxJQURQO0FBY2IsdUJBQW9CLEVBZFA7QUFlYixVQUFPO0FBZk0sRTs7Ozs7Ozs7Ozs7bUJDYVMsVzs7QUFSeEI7O0FBQ0E7O0tBQVksTTs7QUFDWjs7S0FBWSxJOztBQUNaOzs7O0FBRUE7Ozs7QUFDQTs7Ozs7Ozs7Ozs7QUFFZSxVQUFTLFdBQVQsQ0FBc0IsVUFBdEIsRUFBa0MsT0FBbEMsRUFBMkM7QUFDeEQsUUFBSyxFQUFMLEdBQVUsVUFBVjtBQUNBLFFBQUssT0FBTCxHQUFlLFdBQVcsRUFBMUI7QUFDQSxRQUFLLEVBQUwsR0FBVSxJQUFWO0FBQ0EsUUFBSyxrQkFBTCxHQUEwQixFQUExQjtBQUNBLFFBQUssU0FBTCxHQUFpQixFQUFqQjtBQUNBLFFBQUssR0FBTCxHQUFXLElBQUksaUJBQVMsUUFBYixDQUNULFVBRFMsRUFFVCxLQUFLLE9BQUwsQ0FBYSxTQUZKLENBQVg7QUFJQSxRQUFLLE1BQUwsR0FBYyxxQkFBVyxVQUFYLENBQWQ7QUFDQSxRQUFLLEdBQUwsR0FBVyxDQUFYO0FBQ0Q7O0FBRUQsVUFBUyxTQUFULENBQW9CLEdBQXBCLEVBQXlCLENBQXpCLEVBQTRCO0FBQzFCLE9BQU0sT0FBTyxpQkFBTSxDQUFOLENBQWI7O0FBRUEsV0FBUSxJQUFSO0FBQ0UsVUFBSyxXQUFMO0FBQ0EsVUFBSyxNQUFMO0FBQ0UsY0FBTyxFQUFQO0FBQ0YsVUFBSyxRQUFMO0FBQ0UsY0FBTyxFQUFFLFFBQUYsRUFBUDtBQUNGLFVBQUssTUFBTDtBQUNFLGNBQU8sRUFBRSxXQUFGLEVBQVA7QUFDRixVQUFLLFFBQUw7QUFDQSxVQUFLLFFBQUw7QUFDQSxVQUFLLFNBQUw7QUFDQSxVQUFLLE9BQUw7QUFDQSxVQUFLLFFBQUw7QUFDRSxXQUFJLGFBQWEsaUJBQVMsT0FBMUIsRUFBbUM7QUFDakMsZ0JBQU8sRUFBRSxHQUFUO0FBQ0Q7QUFDRCxjQUFPLENBQVA7QUFDRixVQUFLLFVBQUw7QUFDRSxXQUFJLFNBQUosQ0FBYyxFQUFFLElBQUksR0FBcEIsSUFBMkIsQ0FBM0I7QUFDQSxjQUFPLElBQUksR0FBSixDQUFRLFFBQVIsRUFBUDtBQUNGO0FBQ0UsY0FBTyxLQUFLLFNBQUwsQ0FBZSxDQUFmLENBQVA7QUFyQko7QUF1QkQ7O0FBRUQsYUFBWSxTQUFaLENBQXNCLFNBQXRCLEdBQWtDLFVBQVUsS0FBVixFQUFpQjtBQUFBOztBQUNqRCxPQUFJLGlCQUFNLEtBQU4sTUFBaUIsT0FBckIsRUFBOEI7QUFDNUIsYUFBUSxDQUFDLEtBQUQsQ0FBUjtBQUNEOztBQUVELFNBQU0sT0FBTixDQUFjLFVBQUMsSUFBRCxFQUFVO0FBQ3RCLFVBQUssSUFBTCxHQUFZLEtBQUssSUFBTCxDQUFVLEdBQVYsQ0FBYztBQUFBLGNBQU8saUJBQWdCLEdBQWhCLENBQVA7QUFBQSxNQUFkLENBQVo7QUFDRCxJQUZEOztBQUlBLFVBQU8saUJBQVMsU0FBVCxDQUFtQixLQUFLLEVBQXhCLEVBQTRCLEtBQTVCLEVBQW1DLElBQW5DLENBQVA7QUFDRCxFQVZEOztBQVlBLG1CQUFPLFlBQVksU0FBbkIsRUFBOEIsTUFBOUIsRUFBc0MsSUFBdEMsRUFBNEM7QUFDMUMsaURBRDBDO0FBRTFDLCtDQUYwQztBQUcxQztBQUgwQyxFQUE1QyxFOzs7Ozs7Ozs7Ozs7OztTQ3hEZ0IsVSxHQUFBLFU7U0FjQSxHLEdBQUEsRztTQW9IQSxNLEdBQUEsTTtTQWlCQSxNLEdBQUEsTTtTQVdBLE0sR0FBQSxNO1NBOENBLEksR0FBQSxJO1NBbUJBLE8sR0FBQSxPO1NBaUJBLE0sR0FBQSxNO1NBeUJBLFEsR0FBQSxRO1NBY0EsYSxHQUFBLGE7U0FlQSxTLEdBQUEsUztTQVlBLEssR0FBQSxLO1NBS0EsUyxHQUFBLFM7U0EwQkEsSyxHQUFBLEs7U0FPQSxJLEdBQUEsSTtTQU9BLEksR0FBQSxJO1NBT0EsSyxHQUFBLEs7U0FPQSxHLEdBQUEsRzs7Ozs7Ozs7Ozs7O0FBN1dULFVBQVMsVUFBVCxDQUFxQixHQUFyQixFQUEwQjtBQUMvQixPQUFNLElBQUksQ0FBQyxNQUFNLEVBQVAsRUFBVyxVQUFYLENBQXNCLENBQXRCLENBQVY7QUFDQSxVQUFPLE1BQU0sSUFBTixJQUFjLE1BQU0sSUFBM0I7QUFDRDs7Ozs7Ozs7Ozs7QUFXTSxVQUFTLEdBQVQsQ0FBYyxHQUFkLEVBQW1CLEdBQW5CLEVBQXdCLEdBQXhCLEVBQTZCLFVBQTdCLEVBQXlDO0FBQzlDLFVBQU8sY0FBUCxDQUFzQixHQUF0QixFQUEyQixHQUEzQixFQUFnQztBQUM5QixZQUFPLEdBRHVCO0FBRTlCLGlCQUFZLENBQUMsQ0FBQyxVQUZnQjtBQUc5QixlQUFVLElBSG9CO0FBSTlCLG1CQUFjO0FBSmdCLElBQWhDO0FBTUQ7Ozs7O0FBS00sS0FBTSw4QkFBVyxlQUFlLEVBQWhDOzs7QUFHQSxLQUFNLGdDQUNYLE9BQU8sTUFBUCxLQUFrQixXQUFsQixJQUNBLE9BQU8sU0FBUCxDQUFpQixRQUFqQixDQUEwQixJQUExQixDQUErQixNQUEvQixNQUEyQyxpQkFGdEM7OztBQUtBLEtBQU0sOEJBQVcsYUFBYSxPQUFPLDRCQUFyQzs7O0FBR1AsS0FBTSxLQUFLLGFBQWEsT0FBTyxTQUFQLENBQWlCLFNBQWpCLENBQTJCLFdBQTNCLEVBQXhCO0FBQ0EsS0FBTSxRQUFRLE1BQU0sMEJBQTBCLElBQTFCLENBQStCLEVBQS9CLENBQXBCO0FBQ0EsS0FBTSxXQUFXLE1BQU0sR0FBRyxPQUFILENBQVcsZ0JBQVgsSUFBK0IsQ0FBdEQ7Ozs7Ozs7Ozs7OztBQVlPLEtBQU0sOEJBQVksWUFBWTtBQUNuQyxPQUFJLFlBQVksRUFBaEI7QUFDQSxPQUFJLFVBQVUsS0FBZDtBQUNBLE9BQUksa0JBQUo7QUFDQSxZQUFTLGVBQVQsR0FBNEI7QUFDMUIsZUFBVSxLQUFWO0FBQ0EsU0FBTSxTQUFTLFVBQVUsS0FBVixDQUFnQixDQUFoQixDQUFmO0FBQ0EsaUJBQVksRUFBWjtBQUNBLFVBQUssSUFBSSxJQUFJLENBQWIsRUFBZ0IsSUFBSSxPQUFPLE1BQTNCLEVBQW1DLEdBQW5DLEVBQXdDO0FBQ3RDLGNBQU8sQ0FBUDtBQUNEO0FBQ0Y7OztBQUdELE9BQUksT0FBTyxnQkFBUCxLQUE0QixXQUE1QixJQUEyQyxFQUFFLFlBQVksS0FBZCxDQUEvQyxFQUFxRTtBQUFBO0FBQ25FLFdBQUksVUFBVSxDQUFkO0FBQ0EsV0FBTSxXQUFXLElBQUksZ0JBQUosQ0FBcUIsZUFBckIsQ0FBakI7QUFDQSxXQUFNLFdBQVcsU0FBUyxjQUFULENBQXdCLE9BQXhCLENBQWpCO0FBQ0EsZ0JBQVMsT0FBVCxDQUFpQixRQUFqQixFQUEyQjtBQUN6Qix3QkFBZTtBQURVLFFBQTNCO0FBR0EsbUJBQVkscUJBQVk7QUFDdEIsbUJBQVUsQ0FBQyxVQUFVLENBQVgsSUFBZ0IsQ0FBMUI7QUFDQSxrQkFBUyxJQUFULEdBQWdCLE9BQWhCO0FBQ0QsUUFIRDtBQVBtRTtBQVdwRSxJQVhELE1BWUs7Ozs7QUFJSCxTQUFNLFVBQVUsWUFDWixNQURZLEdBRVosT0FBTyxNQUFQLEtBQWtCLFdBQWxCLEdBQWdDLE1BQWhDLEdBQXlDLEVBRjdDO0FBR0EsaUJBQVksUUFBUSxZQUFSLElBQXdCLFVBQXBDO0FBQ0Q7QUFDRCxVQUFPLFVBQVUsRUFBVixFQUFjLEdBQWQsRUFBbUI7QUFDeEIsU0FBTSxPQUFPLE1BQ1QsWUFBWTtBQUFFLFVBQUcsSUFBSCxDQUFRLEdBQVI7QUFBYyxNQURuQixHQUVULEVBRko7QUFHQSxlQUFVLElBQVYsQ0FBZSxJQUFmO0FBQ0EsU0FBSSxPQUFKLEVBQWE7QUFDYixlQUFVLElBQVY7QUFDQSxlQUFVLGVBQVYsRUFBMkIsQ0FBM0I7QUFDRCxJQVJEO0FBU0QsRUE1Q3VCLEVBQWpCOztBQThDUCxLQUFJLGFBQUo7O0FBRUEsS0FBSSxPQUFPLEdBQVAsS0FBZSxXQUFmLElBQThCLElBQUksUUFBSixHQUFlLEtBQWYsQ0FBcUIsYUFBckIsQ0FBbEMsRUFBdUU7O0FBRXJFLFdBa0JPLElBbEJQLFVBQU8sR0FBUDtBQUNELEVBSEQsTUFJSzs7QUFFSCxXQWNPLElBZFAsVUFBTyxnQkFBWTtBQUNqQixVQUFLLEdBQUwsR0FBVyxPQUFPLE1BQVAsQ0FBYyxJQUFkLENBQVg7QUFDRCxJQUZEO0FBR0EsUUFBSyxTQUFMLENBQWUsR0FBZixHQUFxQixVQUFVLEdBQVYsRUFBZTtBQUNsQyxZQUFPLEtBQUssR0FBTCxDQUFTLEdBQVQsTUFBa0IsU0FBekI7QUFDRCxJQUZEO0FBR0EsUUFBSyxTQUFMLENBQWUsR0FBZixHQUFxQixVQUFVLEdBQVYsRUFBZTtBQUNsQyxVQUFLLEdBQUwsQ0FBUyxHQUFULElBQWdCLENBQWhCO0FBQ0QsSUFGRDtBQUdBLFFBQUssU0FBTCxDQUFlLEtBQWYsR0FBdUIsWUFBWTtBQUNqQyxVQUFLLEdBQUwsR0FBVyxPQUFPLE1BQVAsQ0FBYyxJQUFkLENBQVg7QUFDRCxJQUZEO0FBR0Q7O1NBRVEsSSxHQUFBLEk7Ozs7Ozs7Ozs7O0FBV0YsVUFBUyxNQUFULENBQWlCLEdBQWpCLEVBQXNCLElBQXRCLEVBQTRCO0FBQ2pDLE9BQUksSUFBSSxNQUFSLEVBQWdCO0FBQ2QsU0FBTSxRQUFRLElBQUksT0FBSixDQUFZLElBQVosQ0FBZDtBQUNBLFNBQUksUUFBUSxDQUFDLENBQWIsRUFBZ0I7QUFDZCxjQUFPLElBQUksTUFBSixDQUFXLEtBQVgsRUFBa0IsQ0FBbEIsQ0FBUDtBQUNEO0FBQ0Y7QUFDRjs7Ozs7Ozs7O0FBU0QsS0FBTSxpQkFBaUIsT0FBTyxTQUFQLENBQWlCLGNBQXhDO0FBQ08sVUFBUyxNQUFULENBQWlCLEdBQWpCLEVBQXNCLEdBQXRCLEVBQTJCO0FBQ2hDLFVBQU8sZUFBZSxJQUFmLENBQW9CLEdBQXBCLEVBQXlCLEdBQXpCLENBQVA7QUFDRDs7Ozs7Ozs7O0FBU00sVUFBUyxNQUFULENBQWlCLEVBQWpCLEVBQXFCO0FBQzFCLE9BQU0sUUFBUSxPQUFPLE1BQVAsQ0FBYyxJQUFkLENBQWQ7QUFDQSxVQUFPLFNBQVMsUUFBVCxDQUFtQixHQUFuQixFQUF3QjtBQUM3QixTQUFNLE1BQU0sTUFBTSxHQUFOLENBQVo7QUFDQSxZQUFPLFFBQVEsTUFBTSxHQUFOLElBQWEsR0FBRyxHQUFILENBQXJCLENBQVA7QUFDRCxJQUhEO0FBSUQ7Ozs7Ozs7OztBQVNELEtBQU0sYUFBYSxRQUFuQjtBQUNPLEtBQU0sOEJBQVcsT0FBTyxlQUFPO0FBQ3BDLFVBQU8sSUFBSSxPQUFKLENBQVksVUFBWixFQUF3QixPQUF4QixDQUFQO0FBQ0QsRUFGdUIsQ0FBakI7O0FBSVAsVUFBUyxPQUFULENBQWtCLENBQWxCLEVBQXFCLENBQXJCLEVBQXdCO0FBQ3RCLFVBQU8sSUFBSSxFQUFFLFdBQUYsRUFBSixHQUFzQixFQUE3QjtBQUNEOzs7Ozs7Ozs7QUFTRCxLQUFNLGNBQWMsbUJBQXBCO0FBQ08sS0FBTSxnQ0FBWSxPQUFPLGVBQU87QUFDckMsVUFBTyxJQUNKLE9BREksQ0FDSSxXQURKLEVBQ2lCLE9BRGpCLEVBRUosV0FGSSxFQUFQO0FBR0QsRUFKd0IsQ0FBbEI7Ozs7Ozs7Ozs7QUFjQSxVQUFTLElBQVQsQ0FBZSxFQUFmLEVBQW1CLEdBQW5CLEVBQXdCO0FBQzdCLFVBQU8sVUFBVSxDQUFWLEVBQWE7QUFDbEIsU0FBTSxJQUFJLFVBQVUsTUFBcEI7QUFDQSxZQUFPLElBQ0gsSUFBSSxDQUFKLEdBQ0UsR0FBRyxLQUFILENBQVMsR0FBVCxFQUFjLFNBQWQsQ0FERixHQUVFLEdBQUcsSUFBSCxDQUFRLEdBQVIsRUFBYSxDQUFiLENBSEMsR0FJSCxHQUFHLElBQUgsQ0FBUSxHQUFSLENBSko7QUFLRCxJQVBEO0FBUUQ7Ozs7Ozs7Ozs7QUFVTSxVQUFTLE9BQVQsQ0FBa0IsSUFBbEIsRUFBd0IsS0FBeEIsRUFBK0I7QUFDcEMsV0FBUSxTQUFTLENBQWpCO0FBQ0EsT0FBSSxJQUFJLEtBQUssTUFBTCxHQUFjLEtBQXRCO0FBQ0EsT0FBTSxNQUFNLElBQUksS0FBSixDQUFVLENBQVYsQ0FBWjtBQUNBLFVBQU8sR0FBUCxFQUFZO0FBQ1YsU0FBSSxDQUFKLElBQVMsS0FBSyxJQUFJLEtBQVQsQ0FBVDtBQUNEO0FBQ0QsVUFBTyxHQUFQO0FBQ0Q7Ozs7Ozs7OztBQVNNLFVBQVMsTUFBVCxDQUFpQixNQUFqQixFQUFpQztBQUFBLHFDQUFMLEdBQUs7QUFBTCxRQUFLO0FBQUE7O0FBQ3RDLE9BQUksT0FBTyxPQUFPLE1BQWQsS0FBeUIsVUFBN0IsRUFBeUM7QUFDdkMsWUFBTyxNQUFQLGdCQUFjLE1BQWQsU0FBeUIsR0FBekI7QUFDRCxJQUZELE1BR0s7QUFDSCxTQUFNLFFBQVEsSUFBSSxLQUFKLEVBQWQ7QUFDQSxVQUFLLElBQU0sR0FBWCxJQUFrQixLQUFsQixFQUF5QjtBQUN2QixjQUFPLEdBQVAsSUFBYyxNQUFNLEdBQU4sQ0FBZDtBQUNEO0FBQ0QsU0FBSSxJQUFJLE1BQVIsRUFBZ0I7QUFDZCxnQ0FBTyxNQUFQLFNBQWtCLEdBQWxCO0FBQ0Q7QUFDRjtBQUNELFVBQU8sTUFBUDtBQUNEOzs7Ozs7Ozs7OztBQVdNLFVBQVMsUUFBVCxDQUFtQixHQUFuQixFQUF3QjtBQUM3QixVQUFPLFFBQVEsSUFBUixJQUFnQixRQUFPLEdBQVAseUNBQU8sR0FBUCxPQUFlLFFBQXRDO0FBQ0Q7Ozs7Ozs7Ozs7QUFVRCxLQUFNLFdBQVcsT0FBTyxTQUFQLENBQWlCLFFBQWxDO0FBQ0EsS0FBTSxnQkFBZ0IsaUJBQXRCO0FBQ08sVUFBUyxhQUFULENBQXdCLEdBQXhCLEVBQTZCO0FBQ2xDLFVBQU8sU0FBUyxJQUFULENBQWMsR0FBZCxNQUF1QixhQUE5QjtBQUNEOzs7Ozs7Ozs7QUFTTSxLQUFNLDRCQUFVLE1BQU0sT0FBdEI7Ozs7QUFJQSxVQUFTLFNBQVQsQ0FBb0IsQ0FBcEIsRUFBdUI7QUFDNUIsVUFBTyxPQUFPLENBQVAsS0FBYSxXQUFiLElBQTRCLE1BQU0sSUFBbEMsSUFBMEMsT0FBUSxDQUFSLEtBQWUsVUFBekQsR0FDSCxFQURHLEdBRUgsUUFBTyxDQUFQLHlDQUFPLENBQVAsT0FBYSxRQUFiLEdBQ0UsYUFBYSxNQUFiLEdBQ0UsRUFBRSxRQUFGLEVBREYsR0FFRSxhQUFhLElBQWIsR0FDRSxLQUFLLEtBQUwsQ0FBVyxLQUFLLFNBQUwsQ0FBZSxDQUFmLENBQVgsQ0FERixHQUVFLEtBQUssU0FBTCxDQUFlLENBQWYsQ0FMTixHQU1FLEVBQUUsUUFBRixFQVJOO0FBU0Q7O0FBRU0sVUFBUyxLQUFULENBQWdCLENBQWhCLEVBQW1CO0FBQ3hCLE9BQU0sSUFBSSxPQUFPLFNBQVAsQ0FBaUIsUUFBakIsQ0FBMEIsSUFBMUIsQ0FBK0IsQ0FBL0IsQ0FBVjtBQUNBLFVBQU8sRUFBRSxTQUFGLENBQVksQ0FBWixFQUFlLEVBQUUsTUFBRixHQUFXLENBQTFCLEVBQTZCLFdBQTdCLEVBQVA7QUFDRDs7QUFFTSxVQUFTLFNBQVQsQ0FBb0IsQ0FBcEIsRUFBdUI7QUFDNUIsT0FBTSxPQUFPLE1BQU0sQ0FBTixDQUFiOztBQUVBLFdBQVEsSUFBUjtBQUNFLFVBQUssV0FBTDtBQUNBLFVBQUssTUFBTDtBQUNFLGNBQU8sRUFBUDtBQUNGLFVBQUssUUFBTDtBQUNFLGNBQU8sRUFBRSxRQUFGLEVBQVA7QUFDRixVQUFLLE1BQUw7QUFDRSxjQUFPLEVBQUUsV0FBRixFQUFQO0FBQ0YsVUFBSyxRQUFMO0FBQ0EsVUFBSyxRQUFMO0FBQ0EsVUFBSyxTQUFMO0FBQ0EsVUFBSyxPQUFMO0FBQ0EsVUFBSyxRQUFMO0FBQ0EsVUFBSyxVQUFMO0FBQ0UsY0FBTyxDQUFQO0FBZEo7QUFnQkQ7O0FBRUQsS0FBTSxZQUFZLE9BQU8sT0FBUCxLQUFtQixXQUFuQixJQUFrQyxPQUFPLFVBQVAsS0FBc0IsSUFBMUU7Ozs7O0FBS08sVUFBUyxLQUFULEdBQXlCO0FBQUE7O0FBQUEsc0NBQU4sSUFBTTtBQUFOLFNBQU07QUFBQTs7QUFDOUIsZ0JBQWEsUUFBUSxLQUFyQixJQUE4QixxQkFBUSxLQUFSLGtCQUFjLGdCQUFkLFNBQW1DLElBQW5DLEVBQTlCO0FBQ0Q7Ozs7O0FBS00sVUFBUyxJQUFULEdBQXdCO0FBQUE7O0FBQUEsc0NBQU4sSUFBTTtBQUFOLFNBQU07QUFBQTs7QUFDN0IsZ0JBQWEsUUFBUSxJQUFyQixJQUE2QixzQkFBUSxJQUFSLG1CQUFhLGdCQUFiLFNBQWtDLElBQWxDLEVBQTdCO0FBQ0Q7Ozs7O0FBS00sVUFBUyxJQUFULEdBQXdCO0FBQUE7O0FBQUEsc0NBQU4sSUFBTTtBQUFOLFNBQU07QUFBQTs7QUFDN0IsZ0JBQWEsUUFBUSxJQUFyQixJQUE2QixzQkFBUSxJQUFSLG1CQUFhLGdCQUFiLFNBQWtDLElBQWxDLEVBQTdCO0FBQ0Q7Ozs7O0FBS00sVUFBUyxLQUFULEdBQXlCO0FBQUE7O0FBQUEsc0NBQU4sSUFBTTtBQUFOLFNBQU07QUFBQTs7QUFDOUIsZ0JBQWEsUUFBUSxLQUFyQixJQUE4QixzQkFBUSxLQUFSLG1CQUFjLGdCQUFkLFNBQW1DLElBQW5DLEVBQTlCO0FBQ0Q7Ozs7O0FBS00sVUFBUyxHQUFULEdBQXVCO0FBQUE7O0FBQUEsc0NBQU4sSUFBTTtBQUFOLFNBQU07QUFBQTs7QUFDNUIsZ0JBQWEsUUFBUSxHQUFyQixJQUE0QixzQkFBUSxHQUFSLG1CQUFZLGdCQUFaLFNBQWlDLElBQWpDLEVBQTVCO0FBQ0QsRTs7Ozs7Ozs7Ozs7OztTQ2hWZSxrQixHQUFBLGtCO1NBbUZBLFMsR0FBQSxTO1NBbURBLFEsR0FBQSxRO1NBUUEsTSxHQUFBLE07O0FBMUtoQjs7OztBQUNBOztLQUFZLEM7O0FBQ1o7Ozs7QUFDQTs7S0FBWSxTOzs7Ozs7Ozs7Ozs7Ozs7Ozs7OztBQUVaLEtBQU0scUJBQXFCLG9CQUEzQjtBQUNBLEtBQU0sa0JBQWtCLGlCQUF4QjtBQUNBLEtBQU0sb0JBQW9CLFlBQTFCO0FBQ0EsS0FBTSxnQkFBZ0IsT0FBdEI7O0FBRUEsS0FBTSxrQkFBa0IsU0FBbEIsZUFBa0I7QUFBQSxVQUFRLENBQUMsQ0FBQyxLQUFLLEtBQUwsQ0FBVyxrQkFBWCxDQUFWO0FBQUEsRUFBeEI7QUFDQSxLQUFNLGVBQWUsU0FBZixZQUFlO0FBQUEsVUFBUSxDQUFDLENBQUMsS0FBSyxLQUFMLENBQVcsZUFBWCxDQUFWO0FBQUEsRUFBckI7QUFDQSxLQUFNLGlCQUFpQixTQUFqQixjQUFpQjtBQUFBLFVBQVEsQ0FBQyxDQUFDLEtBQUssS0FBTCxDQUFXLGlCQUFYLENBQVY7QUFBQSxFQUF2QjtBQUNBLEtBQU0sY0FBYyxTQUFkLFdBQWM7QUFBQSxVQUFRLENBQUMsZ0JBQWdCLElBQWhCLENBQUQsSUFDRSxDQUFDLGFBQWEsSUFBYixDQURILElBRUUsQ0FBQyxlQUFlLElBQWYsQ0FGWDtBQUFBLEVBQXBCOztBQUlBLFVBQVMsZ0JBQVQsQ0FBMkIsR0FBM0IsRUFBZ0M7QUFDOUIsVUFBTyxJQUFJLE9BQUosQ0FBWSxrQkFBWixFQUFnQyxFQUFoQyxFQUNFLE9BREYsQ0FDVSxlQURWLEVBQzJCLEVBRDNCLENBQVA7QUFFRDs7QUFFRCxVQUFTLGNBQVQsQ0FBeUIsR0FBekIsRUFBOEI7QUFDNUIsVUFBTyxJQUFJLE9BQUosQ0FBWSxhQUFaLEVBQTJCLEVBQTNCLENBQVA7QUFDRDs7QUFFRCxLQUFJLGdCQUFnQixFQUFwQjs7QUFFTyxVQUFTLGtCQUFULEdBQStCO0FBQ3BDLG1CQUFnQixFQUFoQjtBQUNEOzs7Ozs7O0FBT00sS0FBTSwwQkFBUyxTQUFULE1BQVMsQ0FBVSxJQUFWLEVBQWdCLElBQWhCLEVBQXNCLE9BQXRCLEVBQStCO0FBQUE7O0FBQ25ELEtBQUUsS0FBRixDQUFRLG9CQUFSLEVBQThCLElBQTlCOztBQUVBLE9BQUksRUFBRSxLQUFGLENBQVEsSUFBUixNQUFrQixVQUF0QixFQUFrQztBQUNoQyxlQUFVLElBQVY7QUFDQSxZQUFPLEVBQVA7QUFDRDs7QUFFRCxPQUFNLFdBQVcsU0FBWCxRQUFXLENBQUMsSUFBRCxFQUFVO0FBQ3pCLFNBQUksa0JBQUo7O0FBRUEsU0FBSSxnQkFBZ0IsSUFBaEIsQ0FBSixFQUEyQjtBQUN6QixtQkFBWSxpQkFBaUIsSUFBakIsQ0FBWjtBQUNBLGNBQU8sTUFBSyxnQkFBTCxDQUFzQixTQUF0QixDQUFQO0FBQ0Q7QUFDRCxTQUFJLGFBQWEsSUFBYixDQUFKLEVBQXdCO0FBQ3RCLG1CQUFZLGlCQUFpQixJQUFqQixDQUFaO0FBQ0EsY0FBTyxNQUFLLGFBQUwsQ0FBbUIsU0FBbkIsQ0FBUDtBQUNEO0FBQ0QsU0FBSSxlQUFlLElBQWYsQ0FBSixFQUEwQjtBQUN4QixtQkFBWSxlQUFlLElBQWYsQ0FBWjtBQUNBLGNBQU8sY0FBYyxJQUFkLENBQVA7QUFDRDtBQUNELFNBQUksWUFBWSxJQUFaLENBQUosRUFBdUI7QUFDckIsbUJBQVksZUFBZSxJQUFmLENBQVo7QUFDQSxjQUFPLGNBQWMsSUFBZCxDQUFQO0FBQ0Q7QUFDRixJQW5CRDtBQW9CQSxPQUFNLFVBQVUsRUFBRSxTQUFTLEVBQVgsRUFBaEI7O0FBRUEsT0FBSSxrQkFBSjtBQUNBLE9BQUksZ0JBQWdCLElBQWhCLENBQUosRUFBMkI7QUFDekIsaUJBQVksaUJBQWlCLElBQWpCLENBQVo7O0FBRUEsYUFBUSxRQUFSLEVBQWtCLFFBQVEsT0FBMUIsRUFBbUMsT0FBbkM7O0FBRUEsVUFBSyxpQkFBTCxDQUF1QixTQUF2QixFQUFrQyxRQUFRLE9BQTFDO0FBQ0QsSUFORCxNQU9LLElBQUksYUFBYSxJQUFiLENBQUosRUFBd0I7QUFDM0IsaUJBQVksaUJBQWlCLElBQWpCLENBQVo7O0FBRUEsYUFBUSxRQUFSLEVBQWtCLFFBQVEsT0FBMUIsRUFBbUMsT0FBbkM7O0FBRUEsa0JBQUcsZUFBSCxxQkFDRyxTQURILEVBQ2UsUUFBUSxPQUR2QjtBQUdELElBUkksTUFTQSxJQUFJLGVBQWUsSUFBZixDQUFKLEVBQTBCO0FBQzdCLGlCQUFZLGVBQWUsSUFBZixDQUFaOztBQUVBLGFBQVEsUUFBUixFQUFrQixRQUFRLE9BQTFCLEVBQW1DLE9BQW5DOztBQUVBLG1CQUFjLFNBQWQsSUFBMkIsUUFBUSxPQUFuQztBQUNELElBTkksTUFPQSxJQUFJLFlBQVksSUFBWixDQUFKLEVBQXVCO0FBQzFCLGlCQUFZLGVBQWUsSUFBZixDQUFaOztBQUVBLGFBQVEsUUFBUixFQUFrQixRQUFRLE9BQTFCLEVBQW1DLE9BQW5DOztBQUVBLFNBQU0sVUFBVSxRQUFRLE9BQXhCO0FBQ0EsU0FBSSxRQUFRLFFBQVIsSUFDQSxRQUFRLEtBRFIsSUFFQSxRQUFRLE9BRlosRUFFcUI7Ozs7QUFJbkIsWUFBSyxpQkFBTCxDQUF1QixTQUF2QixFQUFrQyxPQUFsQztBQUNELE1BUEQsTUFRSztBQUNILHFCQUFjLFNBQWQsSUFBMkIsUUFBUSxPQUFuQztBQUNEO0FBQ0Y7QUFDRixFQXhFTTs7QUEwRUEsVUFBUyxTQUFULENBQW9CLElBQXBCLEVBQTBCLE1BQTFCLEVBQWtDLElBQWxDLEVBQXdDO0FBQzdDLEtBQUUsS0FBRixvQkFBeUIsSUFBekI7O0FBRUEsT0FBSSxrQkFBSjs7QUFFQSxPQUFJLGdCQUFnQixJQUFoQixDQUFKLEVBQTJCO0FBQ3pCLGlCQUFZLGlCQUFpQixJQUFqQixDQUFaO0FBQ0QsSUFGRCxNQUdLLElBQUksWUFBWSxJQUFaLENBQUosRUFBdUI7QUFDMUIsaUJBQVksZUFBZSxJQUFmLENBQVo7OztBQUdBLFNBQUksQ0FBQyxLQUFLLGtCQUFMLENBQXdCLFNBQXhCLENBQUwsRUFBeUM7QUFDdkMsY0FBTyxJQUFJLEtBQUosNkJBQW1DLElBQW5DLENBQVA7QUFDRDtBQUNGLElBUEksTUFRQTtBQUNILFlBQU8sSUFBSSxLQUFKLDRCQUFtQyxJQUFuQyxDQUFQO0FBQ0Q7O0FBRUQsWUFBUyxFQUFFLGFBQUYsQ0FBZ0IsTUFBaEIsSUFBMEIsTUFBMUIsR0FBbUMsRUFBNUM7O0FBRUEsT0FBSSxPQUFPLE9BQU8sa0JBQWQsS0FBcUMsUUFBckMsSUFDRixPQUFPLE9BQU8sc0JBQWQsS0FBeUMsUUFEdkMsSUFFRixDQUFDLGlCQUFPLFNBQVAsQ0FBaUIsT0FBTyxrQkFBeEIsRUFDQyxPQUFPLHNCQURSLENBRkgsRUFHb0M7QUFDbEMsWUFBTyxJQUFJLEtBQUosQ0FBVSx3QkFBc0IsT0FBTyxrQkFBN0IsbUNBQ1EsT0FBTyxzQkFEZixDQUFWLENBQVA7QUFFRDs7QUFFRCxPQUFNLGtCQUFrQixVQUFVLEtBQVYsQ0FBZ0IsT0FBTyxTQUF2QixDQUF4Qjs7QUFFQSxPQUFJLGdCQUFnQixXQUFwQixFQUFpQztBQUMvQixVQUFLLFNBQUwsQ0FBZSxDQUFDO0FBQ2QsZUFBUSxjQURNO0FBRWQsZUFBUSxPQUZNO0FBR2QsYUFBTSxDQUNKLGdCQUFnQixTQURaLEVBRUosZ0JBQWdCLElBRlosRUFHSixnQkFBZ0IsWUFIWjtBQUhRLE1BQUQsQ0FBZjtBQVNBLFlBQU8sSUFBSSxLQUFKLGdCQUF1QixnQkFBZ0IsSUFBdkMsV0FBaUQsZ0JBQWdCLFlBQWpFLENBQVA7QUFDRDs7QUFFRCxRQUFLLEVBQUwsR0FBVSxpQkFBTyxTQUFQLEVBQWtCLElBQWxCLEVBQXdCLEVBQUUsTUFBTSxJQUFSLEVBQXhCLEVBQXdDLElBQXhDLEVBQThDLElBQTlDLENBQVY7QUFDRDs7Ozs7QUFLTSxVQUFTLFFBQVQsQ0FBbUIsSUFBbkIsRUFBeUIsT0FBekIsRUFBa0M7QUFDdkMsS0FBRSxJQUFGLENBQU8sNkRBQVA7QUFDQSxRQUFLLGlCQUFMLENBQXVCLElBQXZCLEVBQTZCLE9BQTdCO0FBQ0Q7Ozs7O0FBS00sVUFBUyxNQUFULENBQWlCLElBQWpCLEVBQXVCLElBQXZCLEVBQTZCO0FBQ2xDLEtBQUUsSUFBRixDQUFPLDJEQUFQO0FBQ0EsVUFBTyxLQUFLLFNBQUwsQ0FBZSxJQUFmLEVBQXFCLEVBQXJCLEVBQXlCLElBQXpCLENBQVA7QUFDRDs7Ozs7QUFLTSxVQUFTLFNBQVQsQ0FBa0IsSUFBbEIsRUFBd0I7QUFBQTs7QUFDN0IsS0FBRSxJQUFGLENBQU8sNERBQVA7QUFDQSxVQUFPLFVBQUMsSUFBRCxFQUFVO0FBQ2YsWUFBTyxPQUFLLFNBQUwsQ0FBZSxJQUFmLEVBQXFCLEVBQXJCLEVBQXlCLElBQXpCLENBQVA7QUFDRCxJQUZEO0FBR0Q7Ozs7Ozs7Ozs7OztBQ3JNRCxXQUFVLE9BQU8sT0FBUCxHQUFpQixNQUEzQjs7O1lBR1ksSUFBSSxLQUFKO1lBQ0EsSUFBSSxRQUFPLE9BQVAseUNBQU8sT0FBUCxPQUFtQixRQUFuQjtZQUNBLFFBQVEsR0FEUjtZQUVBLFFBQVEsR0FBUixDQUFZLFVBRlo7WUFHQSxjQUFjLElBQWQsQ0FBbUIsUUFBUSxHQUFSLENBQVksVUFBL0IsQ0FISjtjQUlFLFFBQVEsaUJBQVc7Z0JBQ2pCLElBQUksT0FBTyxNQUFNLFNBQU4sQ0FBZ0IsS0FBaEIsQ0FBc0IsSUFBdEIsQ0FBMkIsU0FBM0IsRUFBc0MsQ0FBdEMsQ0FBWDtnQkFDQSxLQUFLLE9BQUwsQ0FBYSxRQUFiO2dCQUNBLFFBQVEsR0FBUixDQUFZLEtBQVosQ0FBa0IsT0FBbEIsRUFBMkIsSUFBM0I7O0FBQ0MsSUFKSDtjQUpGO2NBVUUsUUFBUSxpQkFBVyxDQUFFLENBQXJCOzs7O0FBSWQsU0FBUSxtQkFBUixHQUE4QixPQUE5Qjs7QUFFQSxLQUFJLGFBQWEsR0FBakI7QUFDQSxLQUFJLG1CQUFtQixPQUFPLGdCQUFQLElBQTJCLGdCQUFsRDs7O0FBR0EsS0FBSSxLQUFLLFFBQVEsRUFBUixHQUFhLEVBQXRCO0FBQ0EsS0FBSSxNQUFNLFFBQVEsR0FBUixHQUFjLEVBQXhCO0FBQ0EsS0FBSSxJQUFJLENBQVI7Ozs7Ozs7O0FBUUEsS0FBSSxvQkFBb0IsR0FBeEI7QUFDQSxLQUFJLGlCQUFKLElBQXlCLGFBQXpCO0FBQ0EsS0FBSSx5QkFBeUIsR0FBN0I7QUFDQSxLQUFJLHNCQUFKLElBQThCLFFBQTlCOzs7Ozs7QUFPQSxLQUFJLHVCQUF1QixHQUEzQjtBQUNBLEtBQUksb0JBQUosSUFBNEIsNEJBQTVCOzs7OztBQU1BLEtBQUksY0FBYyxHQUFsQjtBQUNBLEtBQUksV0FBSixJQUFtQixNQUFNLElBQUksaUJBQUosQ0FBTixHQUErQixNQUEvQixHQUNBLEdBREEsR0FDTSxJQUFJLGlCQUFKLENBRE4sR0FDK0IsTUFEL0IsR0FFQSxHQUZBLEdBRU0sSUFBSSxpQkFBSixDQUZOLEdBRStCLEdBRmxEOztBQUlBLEtBQUksbUJBQW1CLEdBQXZCO0FBQ0EsS0FBSSxnQkFBSixJQUF3QixNQUFNLElBQUksc0JBQUosQ0FBTixHQUFvQyxNQUFwQyxHQUNBLEdBREEsR0FDTSxJQUFJLHNCQUFKLENBRE4sR0FDb0MsTUFEcEMsR0FFQSxHQUZBLEdBRU0sSUFBSSxzQkFBSixDQUZOLEdBRW9DLEdBRjVEOzs7OztBQU9BLEtBQUksdUJBQXVCLEdBQTNCO0FBQ0EsS0FBSSxvQkFBSixJQUE0QixRQUFRLElBQUksaUJBQUosQ0FBUixHQUNBLEdBREEsR0FDTSxJQUFJLG9CQUFKLENBRE4sR0FDa0MsR0FEOUQ7O0FBR0EsS0FBSSw0QkFBNEIsR0FBaEM7QUFDQSxLQUFJLHlCQUFKLElBQWlDLFFBQVEsSUFBSSxzQkFBSixDQUFSLEdBQ0EsR0FEQSxHQUNNLElBQUksb0JBQUosQ0FETixHQUNrQyxHQURuRTs7Ozs7O0FBUUEsS0FBSSxhQUFhLEdBQWpCO0FBQ0EsS0FBSSxVQUFKLElBQWtCLFVBQVUsSUFBSSxvQkFBSixDQUFWLEdBQ0EsUUFEQSxHQUNXLElBQUksb0JBQUosQ0FEWCxHQUN1QyxNQUR6RDs7QUFHQSxLQUFJLGtCQUFrQixHQUF0QjtBQUNBLEtBQUksZUFBSixJQUF1QixXQUFXLElBQUkseUJBQUosQ0FBWCxHQUNBLFFBREEsR0FDVyxJQUFJLHlCQUFKLENBRFgsR0FDNEMsTUFEbkU7Ozs7O0FBTUEsS0FBSSxrQkFBa0IsR0FBdEI7QUFDQSxLQUFJLGVBQUosSUFBdUIsZUFBdkI7Ozs7OztBQU1BLEtBQUksUUFBUSxHQUFaO0FBQ0EsS0FBSSxLQUFKLElBQWEsWUFBWSxJQUFJLGVBQUosQ0FBWixHQUNBLFFBREEsR0FDVyxJQUFJLGVBQUosQ0FEWCxHQUNrQyxNQUQvQzs7Ozs7Ozs7Ozs7QUFhQSxLQUFJLE9BQU8sR0FBWDtBQUNBLEtBQUksWUFBWSxPQUFPLElBQUksV0FBSixDQUFQLEdBQ0EsSUFBSSxVQUFKLENBREEsR0FDa0IsR0FEbEIsR0FFQSxJQUFJLEtBQUosQ0FGQSxHQUVhLEdBRjdCOztBQUlBLEtBQUksSUFBSixJQUFZLE1BQU0sU0FBTixHQUFrQixHQUE5Qjs7Ozs7QUFLQSxLQUFJLGFBQWEsYUFBYSxJQUFJLGdCQUFKLENBQWIsR0FDQSxJQUFJLGVBQUosQ0FEQSxHQUN1QixHQUR2QixHQUVBLElBQUksS0FBSixDQUZBLEdBRWEsR0FGOUI7O0FBSUEsS0FBSSxRQUFRLEdBQVo7QUFDQSxLQUFJLEtBQUosSUFBYSxNQUFNLFVBQU4sR0FBbUIsR0FBaEM7O0FBRUEsS0FBSSxPQUFPLEdBQVg7QUFDQSxLQUFJLElBQUosSUFBWSxjQUFaOzs7OztBQUtBLEtBQUksd0JBQXdCLEdBQTVCO0FBQ0EsS0FBSSxxQkFBSixJQUE2QixJQUFJLHNCQUFKLElBQThCLFVBQTNEO0FBQ0EsS0FBSSxtQkFBbUIsR0FBdkI7QUFDQSxLQUFJLGdCQUFKLElBQXdCLElBQUksaUJBQUosSUFBeUIsVUFBakQ7O0FBRUEsS0FBSSxjQUFjLEdBQWxCO0FBQ0EsS0FBSSxXQUFKLElBQW1CLGNBQWMsSUFBSSxnQkFBSixDQUFkLEdBQXNDLEdBQXRDLEdBQ0EsU0FEQSxHQUNZLElBQUksZ0JBQUosQ0FEWixHQUNvQyxHQURwQyxHQUVBLFNBRkEsR0FFWSxJQUFJLGdCQUFKLENBRlosR0FFb0MsR0FGcEMsR0FHQSxLQUhBLEdBR1EsSUFBSSxVQUFKLENBSFIsR0FHMEIsSUFIMUIsR0FJQSxJQUFJLEtBQUosQ0FKQSxHQUlhLEdBSmIsR0FLQSxNQUxuQjs7QUFPQSxLQUFJLG1CQUFtQixHQUF2QjtBQUNBLEtBQUksZ0JBQUosSUFBd0IsY0FBYyxJQUFJLHFCQUFKLENBQWQsR0FBMkMsR0FBM0MsR0FDQSxTQURBLEdBQ1ksSUFBSSxxQkFBSixDQURaLEdBQ3lDLEdBRHpDLEdBRUEsU0FGQSxHQUVZLElBQUkscUJBQUosQ0FGWixHQUV5QyxHQUZ6QyxHQUdBLEtBSEEsR0FHUSxJQUFJLGVBQUosQ0FIUixHQUcrQixJQUgvQixHQUlBLElBQUksS0FBSixDQUpBLEdBSWEsR0FKYixHQUtBLE1BTHhCOztBQU9BLEtBQUksU0FBUyxHQUFiO0FBQ0EsS0FBSSxNQUFKLElBQWMsTUFBTSxJQUFJLElBQUosQ0FBTixHQUFrQixNQUFsQixHQUEyQixJQUFJLFdBQUosQ0FBM0IsR0FBOEMsR0FBNUQ7QUFDQSxLQUFJLGNBQWMsR0FBbEI7QUFDQSxLQUFJLFdBQUosSUFBbUIsTUFBTSxJQUFJLElBQUosQ0FBTixHQUFrQixNQUFsQixHQUEyQixJQUFJLGdCQUFKLENBQTNCLEdBQW1ELEdBQXRFOzs7O0FBSUEsS0FBSSxZQUFZLEdBQWhCO0FBQ0EsS0FBSSxTQUFKLElBQWlCLFNBQWpCOztBQUVBLEtBQUksWUFBWSxHQUFoQjtBQUNBLEtBQUksU0FBSixJQUFpQixXQUFXLElBQUksU0FBSixDQUFYLEdBQTRCLE1BQTdDO0FBQ0EsSUFBRyxTQUFILElBQWdCLElBQUksTUFBSixDQUFXLElBQUksU0FBSixDQUFYLEVBQTJCLEdBQTNCLENBQWhCO0FBQ0EsS0FBSSxtQkFBbUIsS0FBdkI7O0FBRUEsS0FBSSxRQUFRLEdBQVo7QUFDQSxLQUFJLEtBQUosSUFBYSxNQUFNLElBQUksU0FBSixDQUFOLEdBQXVCLElBQUksV0FBSixDQUF2QixHQUEwQyxHQUF2RDtBQUNBLEtBQUksYUFBYSxHQUFqQjtBQUNBLEtBQUksVUFBSixJQUFrQixNQUFNLElBQUksU0FBSixDQUFOLEdBQXVCLElBQUksZ0JBQUosQ0FBdkIsR0FBK0MsR0FBakU7Ozs7QUFJQSxLQUFJLFlBQVksR0FBaEI7QUFDQSxLQUFJLFNBQUosSUFBaUIsU0FBakI7O0FBRUEsS0FBSSxZQUFZLEdBQWhCO0FBQ0EsS0FBSSxTQUFKLElBQWlCLFdBQVcsSUFBSSxTQUFKLENBQVgsR0FBNEIsTUFBN0M7QUFDQSxJQUFHLFNBQUgsSUFBZ0IsSUFBSSxNQUFKLENBQVcsSUFBSSxTQUFKLENBQVgsRUFBMkIsR0FBM0IsQ0FBaEI7QUFDQSxLQUFJLG1CQUFtQixLQUF2Qjs7QUFFQSxLQUFJLFFBQVEsR0FBWjtBQUNBLEtBQUksS0FBSixJQUFhLE1BQU0sSUFBSSxTQUFKLENBQU4sR0FBdUIsSUFBSSxXQUFKLENBQXZCLEdBQTBDLEdBQXZEO0FBQ0EsS0FBSSxhQUFhLEdBQWpCO0FBQ0EsS0FBSSxVQUFKLElBQWtCLE1BQU0sSUFBSSxTQUFKLENBQU4sR0FBdUIsSUFBSSxnQkFBSixDQUF2QixHQUErQyxHQUFqRTs7O0FBR0EsS0FBSSxrQkFBa0IsR0FBdEI7QUFDQSxLQUFJLGVBQUosSUFBdUIsTUFBTSxJQUFJLElBQUosQ0FBTixHQUFrQixPQUFsQixHQUE0QixVQUE1QixHQUF5QyxPQUFoRTtBQUNBLEtBQUksYUFBYSxHQUFqQjtBQUNBLEtBQUksVUFBSixJQUFrQixNQUFNLElBQUksSUFBSixDQUFOLEdBQWtCLE9BQWxCLEdBQTRCLFNBQTVCLEdBQXdDLE9BQTFEOzs7O0FBS0EsS0FBSSxpQkFBaUIsR0FBckI7QUFDQSxLQUFJLGNBQUosSUFBc0IsV0FBVyxJQUFJLElBQUosQ0FBWCxHQUNBLE9BREEsR0FDVSxVQURWLEdBQ3VCLEdBRHZCLEdBQzZCLElBQUksV0FBSixDQUQ3QixHQUNnRCxHQUR0RTs7O0FBSUEsSUFBRyxjQUFILElBQXFCLElBQUksTUFBSixDQUFXLElBQUksY0FBSixDQUFYLEVBQWdDLEdBQWhDLENBQXJCO0FBQ0EsS0FBSSx3QkFBd0IsUUFBNUI7Ozs7OztBQU9BLEtBQUksY0FBYyxHQUFsQjtBQUNBLEtBQUksV0FBSixJQUFtQixXQUFXLElBQUksV0FBSixDQUFYLEdBQThCLEdBQTlCLEdBQ0EsV0FEQSxHQUVBLEdBRkEsR0FFTSxJQUFJLFdBQUosQ0FGTixHQUV5QixHQUZ6QixHQUdBLE9BSG5COztBQUtBLEtBQUksbUJBQW1CLEdBQXZCO0FBQ0EsS0FBSSxnQkFBSixJQUF3QixXQUFXLElBQUksZ0JBQUosQ0FBWCxHQUFtQyxHQUFuQyxHQUNBLFdBREEsR0FFQSxHQUZBLEdBRU0sSUFBSSxnQkFBSixDQUZOLEdBRThCLEdBRjlCLEdBR0EsT0FIeEI7OztBQU1BLEtBQUksT0FBTyxHQUFYO0FBQ0EsS0FBSSxJQUFKLElBQVksaUJBQVo7Ozs7QUFJQSxNQUFLLElBQUksSUFBSSxDQUFiLEVBQWdCLElBQUksQ0FBcEIsRUFBdUIsR0FBdkIsRUFBNEI7QUFDMUIsU0FBTSxDQUFOLEVBQVMsSUFBSSxDQUFKLENBQVQ7QUFDQSxPQUFJLENBQUMsR0FBRyxDQUFILENBQUwsRUFDRSxHQUFHLENBQUgsSUFBUSxJQUFJLE1BQUosQ0FBVyxJQUFJLENBQUosQ0FBWCxDQUFSO0FBQ0g7O0FBRUQsU0FBUSxLQUFSLEdBQWdCLEtBQWhCO0FBQ0EsVUFBUyxLQUFULENBQWUsT0FBZixFQUF3QixLQUF4QixFQUErQjtBQUM3QixPQUFJLG1CQUFtQixNQUF2QixFQUNFLE9BQU8sT0FBUDs7QUFFRixPQUFJLE9BQU8sT0FBUCxLQUFtQixRQUF2QixFQUNFLE9BQU8sSUFBUDs7QUFFRixPQUFJLFFBQVEsTUFBUixHQUFpQixVQUFyQixFQUNFLE9BQU8sSUFBUDs7QUFFRixPQUFJLElBQUksUUFBUSxHQUFHLEtBQUgsQ0FBUixHQUFvQixHQUFHLElBQUgsQ0FBNUI7QUFDQSxPQUFJLENBQUMsRUFBRSxJQUFGLENBQU8sT0FBUCxDQUFMLEVBQ0UsT0FBTyxJQUFQOztBQUVGLE9BQUk7QUFDRixZQUFPLElBQUksTUFBSixDQUFXLE9BQVgsRUFBb0IsS0FBcEIsQ0FBUDtBQUNELElBRkQsQ0FFRSxPQUFPLEVBQVAsRUFBVztBQUNYLFlBQU8sSUFBUDtBQUNEO0FBQ0Y7O0FBRUQsU0FBUSxLQUFSLEdBQWdCLEtBQWhCO0FBQ0EsVUFBUyxLQUFULENBQWUsT0FBZixFQUF3QixLQUF4QixFQUErQjtBQUM3QixPQUFJLElBQUksTUFBTSxPQUFOLEVBQWUsS0FBZixDQUFSO0FBQ0EsVUFBTyxJQUFJLEVBQUUsT0FBTixHQUFnQixJQUF2QjtBQUNEOztBQUdELFNBQVEsS0FBUixHQUFnQixLQUFoQjtBQUNBLFVBQVMsS0FBVCxDQUFlLE9BQWYsRUFBd0IsS0FBeEIsRUFBK0I7QUFDN0IsT0FBSSxJQUFJLE1BQU0sUUFBUSxJQUFSLEdBQWUsT0FBZixDQUF1QixRQUF2QixFQUFpQyxFQUFqQyxDQUFOLEVBQTRDLEtBQTVDLENBQVI7QUFDQSxVQUFPLElBQUksRUFBRSxPQUFOLEdBQWdCLElBQXZCO0FBQ0Q7O0FBRUQsU0FBUSxNQUFSLEdBQWlCLE1BQWpCOztBQUVBLFVBQVMsTUFBVCxDQUFnQixPQUFoQixFQUF5QixLQUF6QixFQUFnQztBQUM5QixPQUFJLG1CQUFtQixNQUF2QixFQUErQjtBQUM3QixTQUFJLFFBQVEsS0FBUixLQUFrQixLQUF0QixFQUNFLE9BQU8sT0FBUCxDQURGLEtBR0UsVUFBVSxRQUFRLE9BQWxCO0FBQ0gsSUFMRCxNQUtPLElBQUksT0FBTyxPQUFQLEtBQW1CLFFBQXZCLEVBQWlDO0FBQ3RDLFdBQU0sSUFBSSxTQUFKLENBQWMsc0JBQXNCLE9BQXBDLENBQU47QUFDRDs7QUFFRCxPQUFJLFFBQVEsTUFBUixHQUFpQixVQUFyQixFQUNFLE1BQU0sSUFBSSxTQUFKLENBQWMsNEJBQTRCLFVBQTVCLEdBQXlDLGFBQXZELENBQU47O0FBRUYsT0FBSSxFQUFFLGdCQUFnQixNQUFsQixDQUFKLEVBQ0UsT0FBTyxJQUFJLE1BQUosQ0FBVyxPQUFYLEVBQW9CLEtBQXBCLENBQVA7O0FBRUYsU0FBTSxRQUFOLEVBQWdCLE9BQWhCLEVBQXlCLEtBQXpCO0FBQ0EsUUFBSyxLQUFMLEdBQWEsS0FBYjtBQUNBLE9BQUksSUFBSSxRQUFRLElBQVIsR0FBZSxLQUFmLENBQXFCLFFBQVEsR0FBRyxLQUFILENBQVIsR0FBb0IsR0FBRyxJQUFILENBQXpDLENBQVI7O0FBRUEsT0FBSSxDQUFDLENBQUwsRUFDRSxNQUFNLElBQUksU0FBSixDQUFjLHNCQUFzQixPQUFwQyxDQUFOOztBQUVGLFFBQUssR0FBTCxHQUFXLE9BQVg7OztBQUdBLFFBQUssS0FBTCxHQUFhLENBQUMsRUFBRSxDQUFGLENBQWQ7QUFDQSxRQUFLLEtBQUwsR0FBYSxDQUFDLEVBQUUsQ0FBRixDQUFkO0FBQ0EsUUFBSyxLQUFMLEdBQWEsQ0FBQyxFQUFFLENBQUYsQ0FBZDs7QUFFQSxPQUFJLEtBQUssS0FBTCxHQUFhLGdCQUFiLElBQWlDLEtBQUssS0FBTCxHQUFhLENBQWxELEVBQ0UsTUFBTSxJQUFJLFNBQUosQ0FBYyx1QkFBZCxDQUFOOztBQUVGLE9BQUksS0FBSyxLQUFMLEdBQWEsZ0JBQWIsSUFBaUMsS0FBSyxLQUFMLEdBQWEsQ0FBbEQsRUFDRSxNQUFNLElBQUksU0FBSixDQUFjLHVCQUFkLENBQU47O0FBRUYsT0FBSSxLQUFLLEtBQUwsR0FBYSxnQkFBYixJQUFpQyxLQUFLLEtBQUwsR0FBYSxDQUFsRCxFQUNFLE1BQU0sSUFBSSxTQUFKLENBQWMsdUJBQWQsQ0FBTjs7O0FBR0YsT0FBSSxDQUFDLEVBQUUsQ0FBRixDQUFMLEVBQ0UsS0FBSyxVQUFMLEdBQWtCLEVBQWxCLENBREYsS0FHRSxLQUFLLFVBQUwsR0FBa0IsRUFBRSxDQUFGLEVBQUssS0FBTCxDQUFXLEdBQVgsRUFBZ0IsR0FBaEIsQ0FBb0IsVUFBUyxFQUFULEVBQWE7QUFDakQsU0FBSSxXQUFXLElBQVgsQ0FBZ0IsRUFBaEIsQ0FBSixFQUF5QjtBQUN2QixXQUFJLE1BQU0sQ0FBQyxFQUFYO0FBQ0EsV0FBSSxPQUFPLENBQVAsSUFBWSxNQUFNLGdCQUF0QixFQUNFLE9BQU8sR0FBUDtBQUNIO0FBQ0QsWUFBTyxFQUFQO0FBQ0QsSUFQaUIsQ0FBbEI7O0FBU0YsUUFBSyxLQUFMLEdBQWEsRUFBRSxDQUFGLElBQU8sRUFBRSxDQUFGLEVBQUssS0FBTCxDQUFXLEdBQVgsQ0FBUCxHQUF5QixFQUF0QztBQUNBLFFBQUssTUFBTDtBQUNEOztBQUVELFFBQU8sU0FBUCxDQUFpQixNQUFqQixHQUEwQixZQUFXO0FBQ25DLFFBQUssT0FBTCxHQUFlLEtBQUssS0FBTCxHQUFhLEdBQWIsR0FBbUIsS0FBSyxLQUF4QixHQUFnQyxHQUFoQyxHQUFzQyxLQUFLLEtBQTFEO0FBQ0EsT0FBSSxLQUFLLFVBQUwsQ0FBZ0IsTUFBcEIsRUFDRSxLQUFLLE9BQUwsSUFBZ0IsTUFBTSxLQUFLLFVBQUwsQ0FBZ0IsSUFBaEIsQ0FBcUIsR0FBckIsQ0FBdEI7QUFDRixVQUFPLEtBQUssT0FBWjtBQUNELEVBTEQ7O0FBT0EsUUFBTyxTQUFQLENBQWlCLFFBQWpCLEdBQTRCLFlBQVc7QUFDckMsVUFBTyxLQUFLLE9BQVo7QUFDRCxFQUZEOztBQUlBLFFBQU8sU0FBUCxDQUFpQixPQUFqQixHQUEyQixVQUFTLEtBQVQsRUFBZ0I7QUFDekMsU0FBTSxnQkFBTixFQUF3QixLQUFLLE9BQTdCLEVBQXNDLEtBQUssS0FBM0MsRUFBa0QsS0FBbEQ7QUFDQSxPQUFJLEVBQUUsaUJBQWlCLE1BQW5CLENBQUosRUFDRSxRQUFRLElBQUksTUFBSixDQUFXLEtBQVgsRUFBa0IsS0FBSyxLQUF2QixDQUFSOztBQUVGLFVBQU8sS0FBSyxXQUFMLENBQWlCLEtBQWpCLEtBQTJCLEtBQUssVUFBTCxDQUFnQixLQUFoQixDQUFsQztBQUNELEVBTkQ7O0FBUUEsUUFBTyxTQUFQLENBQWlCLFdBQWpCLEdBQStCLFVBQVMsS0FBVCxFQUFnQjtBQUM3QyxPQUFJLEVBQUUsaUJBQWlCLE1BQW5CLENBQUosRUFDRSxRQUFRLElBQUksTUFBSixDQUFXLEtBQVgsRUFBa0IsS0FBSyxLQUF2QixDQUFSOztBQUVGLFVBQU8sbUJBQW1CLEtBQUssS0FBeEIsRUFBK0IsTUFBTSxLQUFyQyxLQUNBLG1CQUFtQixLQUFLLEtBQXhCLEVBQStCLE1BQU0sS0FBckMsQ0FEQSxJQUVBLG1CQUFtQixLQUFLLEtBQXhCLEVBQStCLE1BQU0sS0FBckMsQ0FGUDtBQUdELEVBUEQ7O0FBU0EsUUFBTyxTQUFQLENBQWlCLFVBQWpCLEdBQThCLFVBQVMsS0FBVCxFQUFnQjtBQUM1QyxPQUFJLEVBQUUsaUJBQWlCLE1BQW5CLENBQUosRUFDRSxRQUFRLElBQUksTUFBSixDQUFXLEtBQVgsRUFBa0IsS0FBSyxLQUF2QixDQUFSOzs7QUFHRixPQUFJLEtBQUssVUFBTCxDQUFnQixNQUFoQixJQUEwQixDQUFDLE1BQU0sVUFBTixDQUFpQixNQUFoRCxFQUNFLE9BQU8sQ0FBQyxDQUFSLENBREYsS0FFSyxJQUFJLENBQUMsS0FBSyxVQUFMLENBQWdCLE1BQWpCLElBQTJCLE1BQU0sVUFBTixDQUFpQixNQUFoRCxFQUNILE9BQU8sQ0FBUCxDQURHLEtBRUEsSUFBSSxDQUFDLEtBQUssVUFBTCxDQUFnQixNQUFqQixJQUEyQixDQUFDLE1BQU0sVUFBTixDQUFpQixNQUFqRCxFQUNILE9BQU8sQ0FBUDs7QUFFRixPQUFJLElBQUksQ0FBUjtBQUNBLE1BQUc7QUFDRCxTQUFJLElBQUksS0FBSyxVQUFMLENBQWdCLENBQWhCLENBQVI7QUFDQSxTQUFJLElBQUksTUFBTSxVQUFOLENBQWlCLENBQWpCLENBQVI7QUFDQSxXQUFNLG9CQUFOLEVBQTRCLENBQTVCLEVBQStCLENBQS9CLEVBQWtDLENBQWxDO0FBQ0EsU0FBSSxNQUFNLFNBQU4sSUFBbUIsTUFBTSxTQUE3QixFQUNFLE9BQU8sQ0FBUCxDQURGLEtBRUssSUFBSSxNQUFNLFNBQVYsRUFDSCxPQUFPLENBQVAsQ0FERyxLQUVBLElBQUksTUFBTSxTQUFWLEVBQ0gsT0FBTyxDQUFDLENBQVIsQ0FERyxLQUVBLElBQUksTUFBTSxDQUFWLEVBQ0gsU0FERyxLQUdILE9BQU8sbUJBQW1CLENBQW5CLEVBQXNCLENBQXRCLENBQVA7QUFDSCxJQWRELFFBY1MsRUFBRSxDQWRYO0FBZUQsRUE1QkQ7Ozs7QUFnQ0EsUUFBTyxTQUFQLENBQWlCLEdBQWpCLEdBQXVCLFVBQVMsT0FBVCxFQUFrQixVQUFsQixFQUE4QjtBQUNuRCxXQUFRLE9BQVI7QUFDRSxVQUFLLFVBQUw7QUFDRSxZQUFLLFVBQUwsQ0FBZ0IsTUFBaEIsR0FBeUIsQ0FBekI7QUFDQSxZQUFLLEtBQUwsR0FBYSxDQUFiO0FBQ0EsWUFBSyxLQUFMLEdBQWEsQ0FBYjtBQUNBLFlBQUssS0FBTDtBQUNBLFlBQUssR0FBTCxDQUFTLEtBQVQsRUFBZ0IsVUFBaEI7QUFDQTtBQUNGLFVBQUssVUFBTDtBQUNFLFlBQUssVUFBTCxDQUFnQixNQUFoQixHQUF5QixDQUF6QjtBQUNBLFlBQUssS0FBTCxHQUFhLENBQWI7QUFDQSxZQUFLLEtBQUw7QUFDQSxZQUFLLEdBQUwsQ0FBUyxLQUFULEVBQWdCLFVBQWhCO0FBQ0E7QUFDRixVQUFLLFVBQUw7Ozs7QUFJRSxZQUFLLFVBQUwsQ0FBZ0IsTUFBaEIsR0FBeUIsQ0FBekI7QUFDQSxZQUFLLEdBQUwsQ0FBUyxPQUFULEVBQWtCLFVBQWxCO0FBQ0EsWUFBSyxHQUFMLENBQVMsS0FBVCxFQUFnQixVQUFoQjtBQUNBOzs7QUFHRixVQUFLLFlBQUw7QUFDRSxXQUFJLEtBQUssVUFBTCxDQUFnQixNQUFoQixLQUEyQixDQUEvQixFQUNFLEtBQUssR0FBTCxDQUFTLE9BQVQsRUFBa0IsVUFBbEI7QUFDRixZQUFLLEdBQUwsQ0FBUyxLQUFULEVBQWdCLFVBQWhCO0FBQ0E7O0FBRUYsVUFBSyxPQUFMOzs7OztBQUtFLFdBQUksS0FBSyxLQUFMLEtBQWUsQ0FBZixJQUFvQixLQUFLLEtBQUwsS0FBZSxDQUFuQyxJQUF3QyxLQUFLLFVBQUwsQ0FBZ0IsTUFBaEIsS0FBMkIsQ0FBdkUsRUFDRSxLQUFLLEtBQUw7QUFDRixZQUFLLEtBQUwsR0FBYSxDQUFiO0FBQ0EsWUFBSyxLQUFMLEdBQWEsQ0FBYjtBQUNBLFlBQUssVUFBTCxHQUFrQixFQUFsQjtBQUNBO0FBQ0YsVUFBSyxPQUFMOzs7OztBQUtFLFdBQUksS0FBSyxLQUFMLEtBQWUsQ0FBZixJQUFvQixLQUFLLFVBQUwsQ0FBZ0IsTUFBaEIsS0FBMkIsQ0FBbkQsRUFDRSxLQUFLLEtBQUw7QUFDRixZQUFLLEtBQUwsR0FBYSxDQUFiO0FBQ0EsWUFBSyxVQUFMLEdBQWtCLEVBQWxCO0FBQ0E7QUFDRixVQUFLLE9BQUw7Ozs7O0FBS0UsV0FBSSxLQUFLLFVBQUwsQ0FBZ0IsTUFBaEIsS0FBMkIsQ0FBL0IsRUFDRSxLQUFLLEtBQUw7QUFDRixZQUFLLFVBQUwsR0FBa0IsRUFBbEI7QUFDQTs7O0FBR0YsVUFBSyxLQUFMO0FBQ0UsV0FBSSxLQUFLLFVBQUwsQ0FBZ0IsTUFBaEIsS0FBMkIsQ0FBL0IsRUFDRSxLQUFLLFVBQUwsR0FBa0IsQ0FBQyxDQUFELENBQWxCLENBREYsS0FFSztBQUNILGFBQUksSUFBSSxLQUFLLFVBQUwsQ0FBZ0IsTUFBeEI7QUFDQSxnQkFBTyxFQUFFLENBQUYsSUFBTyxDQUFkLEVBQWlCO0FBQ2YsZUFBSSxPQUFPLEtBQUssVUFBTCxDQUFnQixDQUFoQixDQUFQLEtBQThCLFFBQWxDLEVBQTRDO0FBQzFDLGtCQUFLLFVBQUwsQ0FBZ0IsQ0FBaEI7QUFDQSxpQkFBSSxDQUFDLENBQUw7QUFDRDtBQUNGO0FBQ0QsYUFBSSxNQUFNLENBQUMsQ0FBWCxFO0FBQ0UsZ0JBQUssVUFBTCxDQUFnQixJQUFoQixDQUFxQixDQUFyQjtBQUNIO0FBQ0QsV0FBSSxVQUFKLEVBQWdCOzs7QUFHZCxhQUFJLEtBQUssVUFBTCxDQUFnQixDQUFoQixNQUF1QixVQUEzQixFQUF1QztBQUNyQyxlQUFJLE1BQU0sS0FBSyxVQUFMLENBQWdCLENBQWhCLENBQU4sQ0FBSixFQUNFLEtBQUssVUFBTCxHQUFrQixDQUFDLFVBQUQsRUFBYSxDQUFiLENBQWxCO0FBQ0gsVUFIRCxNQUlFLEtBQUssVUFBTCxHQUFrQixDQUFDLFVBQUQsRUFBYSxDQUFiLENBQWxCO0FBQ0g7QUFDRDs7QUFFRjtBQUNFLGFBQU0sSUFBSSxLQUFKLENBQVUsaUNBQWlDLE9BQTNDLENBQU47QUF4Rko7QUEwRkEsUUFBSyxNQUFMO0FBQ0EsUUFBSyxHQUFMLEdBQVcsS0FBSyxPQUFoQjtBQUNBLFVBQU8sSUFBUDtBQUNELEVBOUZEOztBQWdHQSxTQUFRLEdBQVIsR0FBYyxHQUFkO0FBQ0EsVUFBUyxHQUFULENBQWEsT0FBYixFQUFzQixPQUF0QixFQUErQixLQUEvQixFQUFzQyxVQUF0QyxFQUFrRDtBQUNoRCxPQUFJLE9BQU8sS0FBUCxLQUFrQixRQUF0QixFQUFnQztBQUM5QixrQkFBYSxLQUFiO0FBQ0EsYUFBUSxTQUFSO0FBQ0Q7O0FBRUQsT0FBSTtBQUNGLFlBQU8sSUFBSSxNQUFKLENBQVcsT0FBWCxFQUFvQixLQUFwQixFQUEyQixHQUEzQixDQUErQixPQUEvQixFQUF3QyxVQUF4QyxFQUFvRCxPQUEzRDtBQUNELElBRkQsQ0FFRSxPQUFPLEVBQVAsRUFBVztBQUNYLFlBQU8sSUFBUDtBQUNEO0FBQ0Y7O0FBRUQsU0FBUSxJQUFSLEdBQWUsSUFBZjtBQUNBLFVBQVMsSUFBVCxDQUFjLFFBQWQsRUFBd0IsUUFBeEIsRUFBa0M7QUFDaEMsT0FBSSxHQUFHLFFBQUgsRUFBYSxRQUFiLENBQUosRUFBNEI7QUFDMUIsWUFBTyxJQUFQO0FBQ0QsSUFGRCxNQUVPO0FBQ0wsU0FBSSxLQUFLLE1BQU0sUUFBTixDQUFUO0FBQ0EsU0FBSSxLQUFLLE1BQU0sUUFBTixDQUFUO0FBQ0EsU0FBSSxHQUFHLFVBQUgsQ0FBYyxNQUFkLElBQXdCLEdBQUcsVUFBSCxDQUFjLE1BQTFDLEVBQWtEO0FBQ2hELFlBQUssSUFBSSxHQUFULElBQWdCLEVBQWhCLEVBQW9CO0FBQ2xCLGFBQUksUUFBUSxPQUFSLElBQW1CLFFBQVEsT0FBM0IsSUFBc0MsUUFBUSxPQUFsRCxFQUEyRDtBQUN6RCxlQUFJLEdBQUcsR0FBSCxNQUFZLEdBQUcsR0FBSCxDQUFoQixFQUF5QjtBQUN2QixvQkFBTyxRQUFNLEdBQWI7QUFDRDtBQUNGO0FBQ0Y7QUFDRCxjQUFPLFlBQVA7QUFDRDtBQUNELFVBQUssSUFBSSxHQUFULElBQWdCLEVBQWhCLEVBQW9CO0FBQ2xCLFdBQUksUUFBUSxPQUFSLElBQW1CLFFBQVEsT0FBM0IsSUFBc0MsUUFBUSxPQUFsRCxFQUEyRDtBQUN6RCxhQUFJLEdBQUcsR0FBSCxNQUFZLEdBQUcsR0FBSCxDQUFoQixFQUF5QjtBQUN2QixrQkFBTyxHQUFQO0FBQ0Q7QUFDRjtBQUNGO0FBQ0Y7QUFDRjs7QUFFRCxTQUFRLGtCQUFSLEdBQTZCLGtCQUE3Qjs7QUFFQSxLQUFJLFVBQVUsVUFBZDtBQUNBLFVBQVMsa0JBQVQsQ0FBNEIsQ0FBNUIsRUFBK0IsQ0FBL0IsRUFBa0M7QUFDaEMsT0FBSSxPQUFPLFFBQVEsSUFBUixDQUFhLENBQWIsQ0FBWDtBQUNBLE9BQUksT0FBTyxRQUFRLElBQVIsQ0FBYSxDQUFiLENBQVg7O0FBRUEsT0FBSSxRQUFRLElBQVosRUFBa0I7QUFDaEIsU0FBSSxDQUFDLENBQUw7QUFDQSxTQUFJLENBQUMsQ0FBTDtBQUNEOztBQUVELFVBQVEsUUFBUSxDQUFDLElBQVYsR0FBa0IsQ0FBQyxDQUFuQixHQUNDLFFBQVEsQ0FBQyxJQUFWLEdBQWtCLENBQWxCLEdBQ0EsSUFBSSxDQUFKLEdBQVEsQ0FBQyxDQUFULEdBQ0EsSUFBSSxDQUFKLEdBQVEsQ0FBUixHQUNBLENBSlA7QUFLRDs7QUFFRCxTQUFRLG1CQUFSLEdBQThCLG1CQUE5QjtBQUNBLFVBQVMsbUJBQVQsQ0FBNkIsQ0FBN0IsRUFBZ0MsQ0FBaEMsRUFBbUM7QUFDakMsVUFBTyxtQkFBbUIsQ0FBbkIsRUFBc0IsQ0FBdEIsQ0FBUDtBQUNEOztBQUVELFNBQVEsS0FBUixHQUFnQixLQUFoQjtBQUNBLFVBQVMsS0FBVCxDQUFlLENBQWYsRUFBa0IsS0FBbEIsRUFBeUI7QUFDdkIsVUFBTyxJQUFJLE1BQUosQ0FBVyxDQUFYLEVBQWMsS0FBZCxFQUFxQixLQUE1QjtBQUNEOztBQUVELFNBQVEsS0FBUixHQUFnQixLQUFoQjtBQUNBLFVBQVMsS0FBVCxDQUFlLENBQWYsRUFBa0IsS0FBbEIsRUFBeUI7QUFDdkIsVUFBTyxJQUFJLE1BQUosQ0FBVyxDQUFYLEVBQWMsS0FBZCxFQUFxQixLQUE1QjtBQUNEOztBQUVELFNBQVEsS0FBUixHQUFnQixLQUFoQjtBQUNBLFVBQVMsS0FBVCxDQUFlLENBQWYsRUFBa0IsS0FBbEIsRUFBeUI7QUFDdkIsVUFBTyxJQUFJLE1BQUosQ0FBVyxDQUFYLEVBQWMsS0FBZCxFQUFxQixLQUE1QjtBQUNEOztBQUVELFNBQVEsT0FBUixHQUFrQixPQUFsQjtBQUNBLFVBQVMsT0FBVCxDQUFpQixDQUFqQixFQUFvQixDQUFwQixFQUF1QixLQUF2QixFQUE4QjtBQUM1QixVQUFPLElBQUksTUFBSixDQUFXLENBQVgsRUFBYyxLQUFkLEVBQXFCLE9BQXJCLENBQTZCLENBQTdCLENBQVA7QUFDRDs7QUFFRCxTQUFRLFlBQVIsR0FBdUIsWUFBdkI7QUFDQSxVQUFTLFlBQVQsQ0FBc0IsQ0FBdEIsRUFBeUIsQ0FBekIsRUFBNEI7QUFDMUIsVUFBTyxRQUFRLENBQVIsRUFBVyxDQUFYLEVBQWMsSUFBZCxDQUFQO0FBQ0Q7O0FBRUQsU0FBUSxRQUFSLEdBQW1CLFFBQW5CO0FBQ0EsVUFBUyxRQUFULENBQWtCLENBQWxCLEVBQXFCLENBQXJCLEVBQXdCLEtBQXhCLEVBQStCO0FBQzdCLFVBQU8sUUFBUSxDQUFSLEVBQVcsQ0FBWCxFQUFjLEtBQWQsQ0FBUDtBQUNEOztBQUVELFNBQVEsSUFBUixHQUFlLElBQWY7QUFDQSxVQUFTLElBQVQsQ0FBYyxJQUFkLEVBQW9CLEtBQXBCLEVBQTJCO0FBQ3pCLFVBQU8sS0FBSyxJQUFMLENBQVUsVUFBUyxDQUFULEVBQVksQ0FBWixFQUFlO0FBQzlCLFlBQU8sUUFBUSxPQUFSLENBQWdCLENBQWhCLEVBQW1CLENBQW5CLEVBQXNCLEtBQXRCLENBQVA7QUFDRCxJQUZNLENBQVA7QUFHRDs7QUFFRCxTQUFRLEtBQVIsR0FBZ0IsS0FBaEI7QUFDQSxVQUFTLEtBQVQsQ0FBZSxJQUFmLEVBQXFCLEtBQXJCLEVBQTRCO0FBQzFCLFVBQU8sS0FBSyxJQUFMLENBQVUsVUFBUyxDQUFULEVBQVksQ0FBWixFQUFlO0FBQzlCLFlBQU8sUUFBUSxRQUFSLENBQWlCLENBQWpCLEVBQW9CLENBQXBCLEVBQXVCLEtBQXZCLENBQVA7QUFDRCxJQUZNLENBQVA7QUFHRDs7QUFFRCxTQUFRLEVBQVIsR0FBYSxFQUFiO0FBQ0EsVUFBUyxFQUFULENBQVksQ0FBWixFQUFlLENBQWYsRUFBa0IsS0FBbEIsRUFBeUI7QUFDdkIsVUFBTyxRQUFRLENBQVIsRUFBVyxDQUFYLEVBQWMsS0FBZCxJQUF1QixDQUE5QjtBQUNEOztBQUVELFNBQVEsRUFBUixHQUFhLEVBQWI7QUFDQSxVQUFTLEVBQVQsQ0FBWSxDQUFaLEVBQWUsQ0FBZixFQUFrQixLQUFsQixFQUF5QjtBQUN2QixVQUFPLFFBQVEsQ0FBUixFQUFXLENBQVgsRUFBYyxLQUFkLElBQXVCLENBQTlCO0FBQ0Q7O0FBRUQsU0FBUSxFQUFSLEdBQWEsRUFBYjtBQUNBLFVBQVMsRUFBVCxDQUFZLENBQVosRUFBZSxDQUFmLEVBQWtCLEtBQWxCLEVBQXlCO0FBQ3ZCLFVBQU8sUUFBUSxDQUFSLEVBQVcsQ0FBWCxFQUFjLEtBQWQsTUFBeUIsQ0FBaEM7QUFDRDs7QUFFRCxTQUFRLEdBQVIsR0FBYyxHQUFkO0FBQ0EsVUFBUyxHQUFULENBQWEsQ0FBYixFQUFnQixDQUFoQixFQUFtQixLQUFuQixFQUEwQjtBQUN4QixVQUFPLFFBQVEsQ0FBUixFQUFXLENBQVgsRUFBYyxLQUFkLE1BQXlCLENBQWhDO0FBQ0Q7O0FBRUQsU0FBUSxHQUFSLEdBQWMsR0FBZDtBQUNBLFVBQVMsR0FBVCxDQUFhLENBQWIsRUFBZ0IsQ0FBaEIsRUFBbUIsS0FBbkIsRUFBMEI7QUFDeEIsVUFBTyxRQUFRLENBQVIsRUFBVyxDQUFYLEVBQWMsS0FBZCxLQUF3QixDQUEvQjtBQUNEOztBQUVELFNBQVEsR0FBUixHQUFjLEdBQWQ7QUFDQSxVQUFTLEdBQVQsQ0FBYSxDQUFiLEVBQWdCLENBQWhCLEVBQW1CLEtBQW5CLEVBQTBCO0FBQ3hCLFVBQU8sUUFBUSxDQUFSLEVBQVcsQ0FBWCxFQUFjLEtBQWQsS0FBd0IsQ0FBL0I7QUFDRDs7QUFFRCxTQUFRLEdBQVIsR0FBYyxHQUFkO0FBQ0EsVUFBUyxHQUFULENBQWEsQ0FBYixFQUFnQixFQUFoQixFQUFvQixDQUFwQixFQUF1QixLQUF2QixFQUE4QjtBQUM1QixPQUFJLEdBQUo7QUFDQSxXQUFRLEVBQVI7QUFDRSxVQUFLLEtBQUw7QUFDRSxXQUFJLFFBQU8sQ0FBUCx5Q0FBTyxDQUFQLE9BQWEsUUFBakIsRUFBMkIsSUFBSSxFQUFFLE9BQU47QUFDM0IsV0FBSSxRQUFPLENBQVAseUNBQU8sQ0FBUCxPQUFhLFFBQWpCLEVBQTJCLElBQUksRUFBRSxPQUFOO0FBQzNCLGFBQU0sTUFBTSxDQUFaO0FBQ0E7QUFDRixVQUFLLEtBQUw7QUFDRSxXQUFJLFFBQU8sQ0FBUCx5Q0FBTyxDQUFQLE9BQWEsUUFBakIsRUFBMkIsSUFBSSxFQUFFLE9BQU47QUFDM0IsV0FBSSxRQUFPLENBQVAseUNBQU8sQ0FBUCxPQUFhLFFBQWpCLEVBQTJCLElBQUksRUFBRSxPQUFOO0FBQzNCLGFBQU0sTUFBTSxDQUFaO0FBQ0E7QUFDRixVQUFLLEVBQUwsQ0FBUyxLQUFLLEdBQUwsQ0FBVSxLQUFLLElBQUw7QUFBVyxhQUFNLEdBQUcsQ0FBSCxFQUFNLENBQU4sRUFBUyxLQUFULENBQU4sQ0FBdUI7QUFDckQsVUFBSyxJQUFMO0FBQVcsYUFBTSxJQUFJLENBQUosRUFBTyxDQUFQLEVBQVUsS0FBVixDQUFOLENBQXdCO0FBQ25DLFVBQUssR0FBTDtBQUFVLGFBQU0sR0FBRyxDQUFILEVBQU0sQ0FBTixFQUFTLEtBQVQsQ0FBTixDQUF1QjtBQUNqQyxVQUFLLElBQUw7QUFBVyxhQUFNLElBQUksQ0FBSixFQUFPLENBQVAsRUFBVSxLQUFWLENBQU4sQ0FBd0I7QUFDbkMsVUFBSyxHQUFMO0FBQVUsYUFBTSxHQUFHLENBQUgsRUFBTSxDQUFOLEVBQVMsS0FBVCxDQUFOLENBQXVCO0FBQ2pDLFVBQUssSUFBTDtBQUFXLGFBQU0sSUFBSSxDQUFKLEVBQU8sQ0FBUCxFQUFVLEtBQVYsQ0FBTixDQUF3QjtBQUNuQztBQUFTLGFBQU0sSUFBSSxTQUFKLENBQWMsdUJBQXVCLEVBQXJDLENBQU47QUFqQlg7QUFtQkEsVUFBTyxHQUFQO0FBQ0Q7O0FBRUQsU0FBUSxVQUFSLEdBQXFCLFVBQXJCO0FBQ0EsVUFBUyxVQUFULENBQW9CLElBQXBCLEVBQTBCLEtBQTFCLEVBQWlDO0FBQy9CLE9BQUksZ0JBQWdCLFVBQXBCLEVBQWdDO0FBQzlCLFNBQUksS0FBSyxLQUFMLEtBQWUsS0FBbkIsRUFDRSxPQUFPLElBQVAsQ0FERixLQUdFLE9BQU8sS0FBSyxLQUFaO0FBQ0g7O0FBRUQsT0FBSSxFQUFFLGdCQUFnQixVQUFsQixDQUFKLEVBQ0UsT0FBTyxJQUFJLFVBQUosQ0FBZSxJQUFmLEVBQXFCLEtBQXJCLENBQVA7O0FBRUYsU0FBTSxZQUFOLEVBQW9CLElBQXBCLEVBQTBCLEtBQTFCO0FBQ0EsUUFBSyxLQUFMLEdBQWEsS0FBYjtBQUNBLFFBQUssS0FBTCxDQUFXLElBQVg7O0FBRUEsT0FBSSxLQUFLLE1BQUwsS0FBZ0IsR0FBcEIsRUFDRSxLQUFLLEtBQUwsR0FBYSxFQUFiLENBREYsS0FHRSxLQUFLLEtBQUwsR0FBYSxLQUFLLFFBQUwsR0FBZ0IsS0FBSyxNQUFMLENBQVksT0FBekM7O0FBRUYsU0FBTSxNQUFOLEVBQWMsSUFBZDtBQUNEOztBQUVELEtBQUksTUFBTSxFQUFWO0FBQ0EsWUFBVyxTQUFYLENBQXFCLEtBQXJCLEdBQTZCLFVBQVMsSUFBVCxFQUFlO0FBQzFDLE9BQUksSUFBSSxLQUFLLEtBQUwsR0FBYSxHQUFHLGVBQUgsQ0FBYixHQUFtQyxHQUFHLFVBQUgsQ0FBM0M7QUFDQSxPQUFJLElBQUksS0FBSyxLQUFMLENBQVcsQ0FBWCxDQUFSOztBQUVBLE9BQUksQ0FBQyxDQUFMLEVBQ0UsTUFBTSxJQUFJLFNBQUosQ0FBYyx5QkFBeUIsSUFBdkMsQ0FBTjs7QUFFRixRQUFLLFFBQUwsR0FBZ0IsRUFBRSxDQUFGLENBQWhCO0FBQ0EsT0FBSSxLQUFLLFFBQUwsS0FBa0IsR0FBdEIsRUFDRSxLQUFLLFFBQUwsR0FBZ0IsRUFBaEI7OztBQUdGLE9BQUksQ0FBQyxFQUFFLENBQUYsQ0FBTCxFQUNFLEtBQUssTUFBTCxHQUFjLEdBQWQsQ0FERixLQUdFLEtBQUssTUFBTCxHQUFjLElBQUksTUFBSixDQUFXLEVBQUUsQ0FBRixDQUFYLEVBQWlCLEtBQUssS0FBdEIsQ0FBZDtBQUNILEVBaEJEOztBQWtCQSxZQUFXLFNBQVgsQ0FBcUIsUUFBckIsR0FBZ0MsWUFBVztBQUN6QyxVQUFPLEtBQUssS0FBWjtBQUNELEVBRkQ7O0FBSUEsWUFBVyxTQUFYLENBQXFCLElBQXJCLEdBQTRCLFVBQVMsT0FBVCxFQUFrQjtBQUM1QyxTQUFNLGlCQUFOLEVBQXlCLE9BQXpCLEVBQWtDLEtBQUssS0FBdkM7O0FBRUEsT0FBSSxLQUFLLE1BQUwsS0FBZ0IsR0FBcEIsRUFDRSxPQUFPLElBQVA7O0FBRUYsT0FBSSxPQUFPLE9BQVAsS0FBbUIsUUFBdkIsRUFDRSxVQUFVLElBQUksTUFBSixDQUFXLE9BQVgsRUFBb0IsS0FBSyxLQUF6QixDQUFWOztBQUVGLFVBQU8sSUFBSSxPQUFKLEVBQWEsS0FBSyxRQUFsQixFQUE0QixLQUFLLE1BQWpDLEVBQXlDLEtBQUssS0FBOUMsQ0FBUDtBQUNELEVBVkQ7O0FBYUEsU0FBUSxLQUFSLEdBQWdCLEtBQWhCO0FBQ0EsVUFBUyxLQUFULENBQWUsS0FBZixFQUFzQixLQUF0QixFQUE2QjtBQUMzQixPQUFLLGlCQUFpQixLQUFsQixJQUE0QixNQUFNLEtBQU4sS0FBZ0IsS0FBaEQsRUFDRSxPQUFPLEtBQVA7O0FBRUYsT0FBSSxFQUFFLGdCQUFnQixLQUFsQixDQUFKLEVBQ0UsT0FBTyxJQUFJLEtBQUosQ0FBVSxLQUFWLEVBQWlCLEtBQWpCLENBQVA7O0FBRUYsUUFBSyxLQUFMLEdBQWEsS0FBYjs7O0FBR0EsUUFBSyxHQUFMLEdBQVcsS0FBWDtBQUNBLFFBQUssR0FBTCxHQUFXLE1BQU0sS0FBTixDQUFZLFlBQVosRUFBMEIsR0FBMUIsQ0FBOEIsVUFBUyxLQUFULEVBQWdCO0FBQ3ZELFlBQU8sS0FBSyxVQUFMLENBQWdCLE1BQU0sSUFBTixFQUFoQixDQUFQO0FBQ0QsSUFGVSxFQUVSLElBRlEsRUFFRixNQUZFLENBRUssVUFBUyxDQUFULEVBQVk7O0FBRTFCLFlBQU8sRUFBRSxNQUFUO0FBQ0QsSUFMVSxDQUFYOztBQU9BLE9BQUksQ0FBQyxLQUFLLEdBQUwsQ0FBUyxNQUFkLEVBQXNCO0FBQ3BCLFdBQU0sSUFBSSxTQUFKLENBQWMsMkJBQTJCLEtBQXpDLENBQU47QUFDRDs7QUFFRCxRQUFLLE1BQUw7QUFDRDs7QUFFRCxPQUFNLFNBQU4sQ0FBZ0IsTUFBaEIsR0FBeUIsWUFBVztBQUNsQyxRQUFLLEtBQUwsR0FBYSxLQUFLLEdBQUwsQ0FBUyxHQUFULENBQWEsVUFBUyxLQUFULEVBQWdCO0FBQ3hDLFlBQU8sTUFBTSxJQUFOLENBQVcsR0FBWCxFQUFnQixJQUFoQixFQUFQO0FBQ0QsSUFGWSxFQUVWLElBRlUsQ0FFTCxJQUZLLEVBRUMsSUFGRCxFQUFiO0FBR0EsVUFBTyxLQUFLLEtBQVo7QUFDRCxFQUxEOztBQU9BLE9BQU0sU0FBTixDQUFnQixRQUFoQixHQUEyQixZQUFXO0FBQ3BDLFVBQU8sS0FBSyxLQUFaO0FBQ0QsRUFGRDs7QUFJQSxPQUFNLFNBQU4sQ0FBZ0IsVUFBaEIsR0FBNkIsVUFBUyxLQUFULEVBQWdCO0FBQzNDLE9BQUksUUFBUSxLQUFLLEtBQWpCO0FBQ0EsV0FBUSxNQUFNLElBQU4sRUFBUjtBQUNBLFNBQU0sT0FBTixFQUFlLEtBQWYsRUFBc0IsS0FBdEI7O0FBRUEsT0FBSSxLQUFLLFFBQVEsR0FBRyxnQkFBSCxDQUFSLEdBQStCLEdBQUcsV0FBSCxDQUF4QztBQUNBLFdBQVEsTUFBTSxPQUFOLENBQWMsRUFBZCxFQUFrQixhQUFsQixDQUFSO0FBQ0EsU0FBTSxnQkFBTixFQUF3QixLQUF4Qjs7QUFFQSxXQUFRLE1BQU0sT0FBTixDQUFjLEdBQUcsY0FBSCxDQUFkLEVBQWtDLHFCQUFsQyxDQUFSO0FBQ0EsU0FBTSxpQkFBTixFQUF5QixLQUF6QixFQUFnQyxHQUFHLGNBQUgsQ0FBaEM7OztBQUdBLFdBQVEsTUFBTSxPQUFOLENBQWMsR0FBRyxTQUFILENBQWQsRUFBNkIsZ0JBQTdCLENBQVI7OztBQUdBLFdBQVEsTUFBTSxPQUFOLENBQWMsR0FBRyxTQUFILENBQWQsRUFBNkIsZ0JBQTdCLENBQVI7OztBQUdBLFdBQVEsTUFBTSxLQUFOLENBQVksS0FBWixFQUFtQixJQUFuQixDQUF3QixHQUF4QixDQUFSOzs7OztBQUtBLE9BQUksU0FBUyxRQUFRLEdBQUcsZUFBSCxDQUFSLEdBQThCLEdBQUcsVUFBSCxDQUEzQztBQUNBLE9BQUksTUFBTSxNQUFNLEtBQU4sQ0FBWSxHQUFaLEVBQWlCLEdBQWpCLENBQXFCLFVBQVMsSUFBVCxFQUFlO0FBQzVDLFlBQU8sZ0JBQWdCLElBQWhCLEVBQXNCLEtBQXRCLENBQVA7QUFDRCxJQUZTLEVBRVAsSUFGTyxDQUVGLEdBRkUsRUFFRyxLQUZILENBRVMsS0FGVCxDQUFWO0FBR0EsT0FBSSxLQUFLLEtBQVQsRUFBZ0I7O0FBRWQsV0FBTSxJQUFJLE1BQUosQ0FBVyxVQUFTLElBQVQsRUFBZTtBQUM5QixjQUFPLENBQUMsQ0FBQyxLQUFLLEtBQUwsQ0FBVyxNQUFYLENBQVQ7QUFDRCxNQUZLLENBQU47QUFHRDtBQUNELFNBQU0sSUFBSSxHQUFKLENBQVEsVUFBUyxJQUFULEVBQWU7QUFDM0IsWUFBTyxJQUFJLFVBQUosQ0FBZSxJQUFmLEVBQXFCLEtBQXJCLENBQVA7QUFDRCxJQUZLLENBQU47O0FBSUEsVUFBTyxHQUFQO0FBQ0QsRUF2Q0Q7OztBQTBDQSxTQUFRLGFBQVIsR0FBd0IsYUFBeEI7QUFDQSxVQUFTLGFBQVQsQ0FBdUIsS0FBdkIsRUFBOEIsS0FBOUIsRUFBcUM7QUFDbkMsVUFBTyxJQUFJLEtBQUosQ0FBVSxLQUFWLEVBQWlCLEtBQWpCLEVBQXdCLEdBQXhCLENBQTRCLEdBQTVCLENBQWdDLFVBQVMsSUFBVCxFQUFlO0FBQ3BELFlBQU8sS0FBSyxHQUFMLENBQVMsVUFBUyxDQUFULEVBQVk7QUFDMUIsY0FBTyxFQUFFLEtBQVQ7QUFDRCxNQUZNLEVBRUosSUFGSSxDQUVDLEdBRkQsRUFFTSxJQUZOLEdBRWEsS0FGYixDQUVtQixHQUZuQixDQUFQO0FBR0QsSUFKTSxDQUFQO0FBS0Q7Ozs7O0FBS0QsVUFBUyxlQUFULENBQXlCLElBQXpCLEVBQStCLEtBQS9CLEVBQXNDO0FBQ3BDLFNBQU0sTUFBTixFQUFjLElBQWQ7QUFDQSxVQUFPLGNBQWMsSUFBZCxFQUFvQixLQUFwQixDQUFQO0FBQ0EsU0FBTSxPQUFOLEVBQWUsSUFBZjtBQUNBLFVBQU8sY0FBYyxJQUFkLEVBQW9CLEtBQXBCLENBQVA7QUFDQSxTQUFNLFFBQU4sRUFBZ0IsSUFBaEI7QUFDQSxVQUFPLGVBQWUsSUFBZixFQUFxQixLQUFyQixDQUFQO0FBQ0EsU0FBTSxRQUFOLEVBQWdCLElBQWhCO0FBQ0EsVUFBTyxhQUFhLElBQWIsRUFBbUIsS0FBbkIsQ0FBUDtBQUNBLFNBQU0sT0FBTixFQUFlLElBQWY7QUFDQSxVQUFPLElBQVA7QUFDRDs7QUFFRCxVQUFTLEdBQVQsQ0FBYSxFQUFiLEVBQWlCO0FBQ2YsVUFBTyxDQUFDLEVBQUQsSUFBTyxHQUFHLFdBQUgsT0FBcUIsR0FBNUIsSUFBbUMsT0FBTyxHQUFqRDtBQUNEOzs7Ozs7OztBQVFELFVBQVMsYUFBVCxDQUF1QixJQUF2QixFQUE2QixLQUE3QixFQUFvQztBQUNsQyxVQUFPLEtBQUssSUFBTCxHQUFZLEtBQVosQ0FBa0IsS0FBbEIsRUFBeUIsR0FBekIsQ0FBNkIsVUFBUyxJQUFULEVBQWU7QUFDakQsWUFBTyxhQUFhLElBQWIsRUFBbUIsS0FBbkIsQ0FBUDtBQUNELElBRk0sRUFFSixJQUZJLENBRUMsR0FGRCxDQUFQO0FBR0Q7O0FBRUQsVUFBUyxZQUFULENBQXNCLElBQXRCLEVBQTRCLEtBQTVCLEVBQW1DO0FBQ2pDLE9BQUksSUFBSSxRQUFRLEdBQUcsVUFBSCxDQUFSLEdBQXlCLEdBQUcsS0FBSCxDQUFqQztBQUNBLFVBQU8sS0FBSyxPQUFMLENBQWEsQ0FBYixFQUFnQixVQUFTLENBQVQsRUFBWSxDQUFaLEVBQWUsQ0FBZixFQUFrQixDQUFsQixFQUFxQixFQUFyQixFQUF5QjtBQUM5QyxXQUFNLE9BQU4sRUFBZSxJQUFmLEVBQXFCLENBQXJCLEVBQXdCLENBQXhCLEVBQTJCLENBQTNCLEVBQThCLENBQTlCLEVBQWlDLEVBQWpDO0FBQ0EsU0FBSSxHQUFKOztBQUVBLFNBQUksSUFBSSxDQUFKLENBQUosRUFDRSxNQUFNLEVBQU4sQ0FERixLQUVLLElBQUksSUFBSSxDQUFKLENBQUosRUFDSCxNQUFNLE9BQU8sQ0FBUCxHQUFXLFFBQVgsSUFBdUIsQ0FBQyxDQUFELEdBQUssQ0FBNUIsSUFBaUMsTUFBdkMsQ0FERyxLQUVBLElBQUksSUFBSSxDQUFKLENBQUo7O0FBRUgsYUFBTSxPQUFPLENBQVAsR0FBVyxHQUFYLEdBQWlCLENBQWpCLEdBQXFCLE1BQXJCLEdBQThCLENBQTlCLEdBQWtDLEdBQWxDLElBQXlDLENBQUMsQ0FBRCxHQUFLLENBQTlDLElBQW1ELElBQXpELENBRkcsS0FHQSxJQUFJLEVBQUosRUFBUTtBQUNYLGFBQU0saUJBQU4sRUFBeUIsRUFBekI7QUFDQSxXQUFJLEdBQUcsTUFBSCxDQUFVLENBQVYsTUFBaUIsR0FBckIsRUFDRSxLQUFLLE1BQU0sRUFBWDtBQUNGLGFBQU0sT0FBTyxDQUFQLEdBQVcsR0FBWCxHQUFpQixDQUFqQixHQUFxQixHQUFyQixHQUEyQixDQUEzQixHQUErQixFQUEvQixHQUNBLElBREEsR0FDTyxDQURQLEdBQ1csR0FEWCxJQUNrQixDQUFDLENBQUQsR0FBSyxDQUR2QixJQUM0QixJQURsQztBQUVELE1BTkk7O0FBUUgsYUFBTSxPQUFPLENBQVAsR0FBVyxHQUFYLEdBQWlCLENBQWpCLEdBQXFCLEdBQXJCLEdBQTJCLENBQTNCLEdBQ0EsSUFEQSxHQUNPLENBRFAsR0FDVyxHQURYLElBQ2tCLENBQUMsQ0FBRCxHQUFLLENBRHZCLElBQzRCLElBRGxDOztBQUdGLFdBQU0sY0FBTixFQUFzQixHQUF0QjtBQUNBLFlBQU8sR0FBUDtBQUNELElBeEJNLENBQVA7QUF5QkQ7Ozs7Ozs7O0FBUUQsVUFBUyxhQUFULENBQXVCLElBQXZCLEVBQTZCLEtBQTdCLEVBQW9DO0FBQ2xDLFVBQU8sS0FBSyxJQUFMLEdBQVksS0FBWixDQUFrQixLQUFsQixFQUF5QixHQUF6QixDQUE2QixVQUFTLElBQVQsRUFBZTtBQUNqRCxZQUFPLGFBQWEsSUFBYixFQUFtQixLQUFuQixDQUFQO0FBQ0QsSUFGTSxFQUVKLElBRkksQ0FFQyxHQUZELENBQVA7QUFHRDs7QUFFRCxVQUFTLFlBQVQsQ0FBc0IsSUFBdEIsRUFBNEIsS0FBNUIsRUFBbUM7QUFDakMsU0FBTSxPQUFOLEVBQWUsSUFBZixFQUFxQixLQUFyQjtBQUNBLE9BQUksSUFBSSxRQUFRLEdBQUcsVUFBSCxDQUFSLEdBQXlCLEdBQUcsS0FBSCxDQUFqQztBQUNBLFVBQU8sS0FBSyxPQUFMLENBQWEsQ0FBYixFQUFnQixVQUFTLENBQVQsRUFBWSxDQUFaLEVBQWUsQ0FBZixFQUFrQixDQUFsQixFQUFxQixFQUFyQixFQUF5QjtBQUM5QyxXQUFNLE9BQU4sRUFBZSxJQUFmLEVBQXFCLENBQXJCLEVBQXdCLENBQXhCLEVBQTJCLENBQTNCLEVBQThCLENBQTlCLEVBQWlDLEVBQWpDO0FBQ0EsU0FBSSxHQUFKOztBQUVBLFNBQUksSUFBSSxDQUFKLENBQUosRUFDRSxNQUFNLEVBQU4sQ0FERixLQUVLLElBQUksSUFBSSxDQUFKLENBQUosRUFDSCxNQUFNLE9BQU8sQ0FBUCxHQUFXLFFBQVgsSUFBdUIsQ0FBQyxDQUFELEdBQUssQ0FBNUIsSUFBaUMsTUFBdkMsQ0FERyxLQUVBLElBQUksSUFBSSxDQUFKLENBQUosRUFBWTtBQUNmLFdBQUksTUFBTSxHQUFWLEVBQ0UsTUFBTSxPQUFPLENBQVAsR0FBVyxHQUFYLEdBQWlCLENBQWpCLEdBQXFCLE1BQXJCLEdBQThCLENBQTlCLEdBQWtDLEdBQWxDLElBQXlDLENBQUMsQ0FBRCxHQUFLLENBQTlDLElBQW1ELElBQXpELENBREYsS0FHRSxNQUFNLE9BQU8sQ0FBUCxHQUFXLEdBQVgsR0FBaUIsQ0FBakIsR0FBcUIsTUFBckIsSUFBK0IsQ0FBQyxDQUFELEdBQUssQ0FBcEMsSUFBeUMsTUFBL0M7QUFDSCxNQUxJLE1BS0UsSUFBSSxFQUFKLEVBQVE7QUFDYixhQUFNLGlCQUFOLEVBQXlCLEVBQXpCO0FBQ0EsV0FBSSxHQUFHLE1BQUgsQ0FBVSxDQUFWLE1BQWlCLEdBQXJCLEVBQ0UsS0FBSyxNQUFNLEVBQVg7QUFDRixXQUFJLE1BQU0sR0FBVixFQUFlO0FBQ2IsYUFBSSxNQUFNLEdBQVYsRUFDRSxNQUFNLE9BQU8sQ0FBUCxHQUFXLEdBQVgsR0FBaUIsQ0FBakIsR0FBcUIsR0FBckIsR0FBMkIsQ0FBM0IsR0FBK0IsRUFBL0IsR0FDQSxJQURBLEdBQ08sQ0FEUCxHQUNXLEdBRFgsR0FDaUIsQ0FEakIsR0FDcUIsR0FEckIsSUFDNEIsQ0FBQyxDQUFELEdBQUssQ0FEakMsQ0FBTixDQURGLEtBSUUsTUFBTSxPQUFPLENBQVAsR0FBVyxHQUFYLEdBQWlCLENBQWpCLEdBQXFCLEdBQXJCLEdBQTJCLENBQTNCLEdBQStCLEVBQS9CLEdBQ0EsSUFEQSxHQUNPLENBRFAsR0FDVyxHQURYLElBQ2tCLENBQUMsQ0FBRCxHQUFLLENBRHZCLElBQzRCLElBRGxDO0FBRUgsUUFQRCxNQVFFLE1BQU0sT0FBTyxDQUFQLEdBQVcsR0FBWCxHQUFpQixDQUFqQixHQUFxQixHQUFyQixHQUEyQixDQUEzQixHQUErQixFQUEvQixHQUNBLElBREEsSUFDUSxDQUFDLENBQUQsR0FBSyxDQURiLElBQ2tCLE1BRHhCO0FBRUgsTUFkTSxNQWNBO0FBQ0wsYUFBTSxPQUFOO0FBQ0EsV0FBSSxNQUFNLEdBQVYsRUFBZTtBQUNiLGFBQUksTUFBTSxHQUFWLEVBQ0UsTUFBTSxPQUFPLENBQVAsR0FBVyxHQUFYLEdBQWlCLENBQWpCLEdBQXFCLEdBQXJCLEdBQTJCLENBQTNCLEdBQ0EsSUFEQSxHQUNPLENBRFAsR0FDVyxHQURYLEdBQ2lCLENBRGpCLEdBQ3FCLEdBRHJCLElBQzRCLENBQUMsQ0FBRCxHQUFLLENBRGpDLENBQU4sQ0FERixLQUlFLE1BQU0sT0FBTyxDQUFQLEdBQVcsR0FBWCxHQUFpQixDQUFqQixHQUFxQixHQUFyQixHQUEyQixDQUEzQixHQUNBLElBREEsR0FDTyxDQURQLEdBQ1csR0FEWCxJQUNrQixDQUFDLENBQUQsR0FBSyxDQUR2QixJQUM0QixJQURsQztBQUVILFFBUEQsTUFRRSxNQUFNLE9BQU8sQ0FBUCxHQUFXLEdBQVgsR0FBaUIsQ0FBakIsR0FBcUIsR0FBckIsR0FBMkIsQ0FBM0IsR0FDQSxJQURBLElBQ1EsQ0FBQyxDQUFELEdBQUssQ0FEYixJQUNrQixNQUR4QjtBQUVIOztBQUVELFdBQU0sY0FBTixFQUFzQixHQUF0QjtBQUNBLFlBQU8sR0FBUDtBQUNELElBM0NNLENBQVA7QUE0Q0Q7O0FBRUQsVUFBUyxjQUFULENBQXdCLElBQXhCLEVBQThCLEtBQTlCLEVBQXFDO0FBQ25DLFNBQU0sZ0JBQU4sRUFBd0IsSUFBeEIsRUFBOEIsS0FBOUI7QUFDQSxVQUFPLEtBQUssS0FBTCxDQUFXLEtBQVgsRUFBa0IsR0FBbEIsQ0FBc0IsVUFBUyxJQUFULEVBQWU7QUFDMUMsWUFBTyxjQUFjLElBQWQsRUFBb0IsS0FBcEIsQ0FBUDtBQUNELElBRk0sRUFFSixJQUZJLENBRUMsR0FGRCxDQUFQO0FBR0Q7O0FBRUQsVUFBUyxhQUFULENBQXVCLElBQXZCLEVBQTZCLEtBQTdCLEVBQW9DO0FBQ2xDLFVBQU8sS0FBSyxJQUFMLEVBQVA7QUFDQSxPQUFJLElBQUksUUFBUSxHQUFHLFdBQUgsQ0FBUixHQUEwQixHQUFHLE1BQUgsQ0FBbEM7QUFDQSxVQUFPLEtBQUssT0FBTCxDQUFhLENBQWIsRUFBZ0IsVUFBUyxHQUFULEVBQWMsSUFBZCxFQUFvQixDQUFwQixFQUF1QixDQUF2QixFQUEwQixDQUExQixFQUE2QixFQUE3QixFQUFpQztBQUN0RCxXQUFNLFFBQU4sRUFBZ0IsSUFBaEIsRUFBc0IsR0FBdEIsRUFBMkIsSUFBM0IsRUFBaUMsQ0FBakMsRUFBb0MsQ0FBcEMsRUFBdUMsQ0FBdkMsRUFBMEMsRUFBMUM7QUFDQSxTQUFJLEtBQUssSUFBSSxDQUFKLENBQVQ7QUFDQSxTQUFJLEtBQUssTUFBTSxJQUFJLENBQUosQ0FBZjtBQUNBLFNBQUksS0FBSyxNQUFNLElBQUksQ0FBSixDQUFmO0FBQ0EsU0FBSSxPQUFPLEVBQVg7O0FBRUEsU0FBSSxTQUFTLEdBQVQsSUFBZ0IsSUFBcEIsRUFDRSxPQUFPLEVBQVA7O0FBRUYsU0FBSSxFQUFKLEVBQVE7QUFDTixXQUFJLFNBQVMsR0FBVCxJQUFnQixTQUFTLEdBQTdCLEVBQWtDOztBQUVoQyxlQUFNLFFBQU47QUFDRCxRQUhELE1BR087O0FBRUwsZUFBTSxHQUFOO0FBQ0Q7QUFDRixNQVJELE1BUU8sSUFBSSxRQUFRLElBQVosRUFBa0I7O0FBRXZCLFdBQUksRUFBSixFQUNFLElBQUksQ0FBSjtBQUNGLFdBQUksRUFBSixFQUNFLElBQUksQ0FBSjs7QUFFRixXQUFJLFNBQVMsR0FBYixFQUFrQjs7OztBQUloQixnQkFBTyxJQUFQO0FBQ0EsYUFBSSxFQUFKLEVBQVE7QUFDTixlQUFJLENBQUMsQ0FBRCxHQUFLLENBQVQ7QUFDQSxlQUFJLENBQUo7QUFDQSxlQUFJLENBQUo7QUFDRCxVQUpELE1BSU8sSUFBSSxFQUFKLEVBQVE7QUFDYixlQUFJLENBQUMsQ0FBRCxHQUFLLENBQVQ7QUFDQSxlQUFJLENBQUo7QUFDRDtBQUNGLFFBYkQsTUFhTyxJQUFJLFNBQVMsSUFBYixFQUFtQjs7O0FBR3hCLGdCQUFPLEdBQVA7QUFDQSxhQUFJLEVBQUosRUFDRSxJQUFJLENBQUMsQ0FBRCxHQUFLLENBQVQsQ0FERixLQUdFLElBQUksQ0FBQyxDQUFELEdBQUssQ0FBVDtBQUNIOztBQUVELGFBQU0sT0FBTyxDQUFQLEdBQVcsR0FBWCxHQUFpQixDQUFqQixHQUFxQixHQUFyQixHQUEyQixDQUFqQztBQUNELE1BL0JNLE1BK0JBLElBQUksRUFBSixFQUFRO0FBQ2IsYUFBTSxPQUFPLENBQVAsR0FBVyxRQUFYLElBQXVCLENBQUMsQ0FBRCxHQUFLLENBQTVCLElBQWlDLE1BQXZDO0FBQ0QsTUFGTSxNQUVBLElBQUksRUFBSixFQUFRO0FBQ2IsYUFBTSxPQUFPLENBQVAsR0FBVyxHQUFYLEdBQWlCLENBQWpCLEdBQXFCLE1BQXJCLEdBQThCLENBQTlCLEdBQWtDLEdBQWxDLElBQXlDLENBQUMsQ0FBRCxHQUFLLENBQTlDLElBQW1ELElBQXpEO0FBQ0Q7O0FBRUQsV0FBTSxlQUFOLEVBQXVCLEdBQXZCOztBQUVBLFlBQU8sR0FBUDtBQUNELElBMURNLENBQVA7QUEyREQ7Ozs7QUFJRCxVQUFTLFlBQVQsQ0FBc0IsSUFBdEIsRUFBNEIsS0FBNUIsRUFBbUM7QUFDakMsU0FBTSxjQUFOLEVBQXNCLElBQXRCLEVBQTRCLEtBQTVCOztBQUVBLFVBQU8sS0FBSyxJQUFMLEdBQVksT0FBWixDQUFvQixHQUFHLElBQUgsQ0FBcEIsRUFBOEIsRUFBOUIsQ0FBUDtBQUNEOzs7Ozs7O0FBT0QsVUFBUyxhQUFULENBQXVCLEVBQXZCLEVBQ3VCLElBRHZCLEVBQzZCLEVBRDdCLEVBQ2lDLEVBRGpDLEVBQ3FDLEVBRHJDLEVBQ3lDLEdBRHpDLEVBQzhDLEVBRDlDLEVBRXVCLEVBRnZCLEVBRTJCLEVBRjNCLEVBRStCLEVBRi9CLEVBRW1DLEVBRm5DLEVBRXVDLEdBRnZDLEVBRTRDLEVBRjVDLEVBRWdEOztBQUU5QyxPQUFJLElBQUksRUFBSixDQUFKLEVBQ0UsT0FBTyxFQUFQLENBREYsS0FFSyxJQUFJLElBQUksRUFBSixDQUFKLEVBQ0gsT0FBTyxPQUFPLEVBQVAsR0FBWSxNQUFuQixDQURHLEtBRUEsSUFBSSxJQUFJLEVBQUosQ0FBSixFQUNILE9BQU8sT0FBTyxFQUFQLEdBQVksR0FBWixHQUFrQixFQUFsQixHQUF1QixJQUE5QixDQURHLEtBR0gsT0FBTyxPQUFPLElBQWQ7O0FBRUYsT0FBSSxJQUFJLEVBQUosQ0FBSixFQUNFLEtBQUssRUFBTCxDQURGLEtBRUssSUFBSSxJQUFJLEVBQUosQ0FBSixFQUNILEtBQUssT0FBTyxDQUFDLEVBQUQsR0FBTSxDQUFiLElBQWtCLE1BQXZCLENBREcsS0FFQSxJQUFJLElBQUksRUFBSixDQUFKLEVBQ0gsS0FBSyxNQUFNLEVBQU4sR0FBVyxHQUFYLElBQWtCLENBQUMsRUFBRCxHQUFNLENBQXhCLElBQTZCLElBQWxDLENBREcsS0FFQSxJQUFJLEdBQUosRUFDSCxLQUFLLE9BQU8sRUFBUCxHQUFZLEdBQVosR0FBa0IsRUFBbEIsR0FBdUIsR0FBdkIsR0FBNkIsRUFBN0IsR0FBa0MsR0FBbEMsR0FBd0MsR0FBN0MsQ0FERyxLQUdILEtBQUssT0FBTyxFQUFaOztBQUVGLFVBQU8sQ0FBQyxPQUFPLEdBQVAsR0FBYSxFQUFkLEVBQWtCLElBQWxCLEVBQVA7QUFDRDs7O0FBSUQsT0FBTSxTQUFOLENBQWdCLElBQWhCLEdBQXVCLFVBQVMsT0FBVCxFQUFrQjtBQUN2QyxPQUFJLENBQUMsT0FBTCxFQUNFLE9BQU8sS0FBUDs7QUFFRixPQUFJLE9BQU8sT0FBUCxLQUFtQixRQUF2QixFQUNFLFVBQVUsSUFBSSxNQUFKLENBQVcsT0FBWCxFQUFvQixLQUFLLEtBQXpCLENBQVY7O0FBRUYsUUFBSyxJQUFJLElBQUksQ0FBYixFQUFnQixJQUFJLEtBQUssR0FBTCxDQUFTLE1BQTdCLEVBQXFDLEdBQXJDLEVBQTBDO0FBQ3hDLFNBQUksUUFBUSxLQUFLLEdBQUwsQ0FBUyxDQUFULENBQVIsRUFBcUIsT0FBckIsQ0FBSixFQUNFLE9BQU8sSUFBUDtBQUNIO0FBQ0QsVUFBTyxLQUFQO0FBQ0QsRUFaRDs7QUFjQSxVQUFTLE9BQVQsQ0FBaUIsR0FBakIsRUFBc0IsT0FBdEIsRUFBK0I7QUFDN0IsUUFBSyxJQUFJLElBQUksQ0FBYixFQUFnQixJQUFJLElBQUksTUFBeEIsRUFBZ0MsR0FBaEMsRUFBcUM7QUFDbkMsU0FBSSxDQUFDLElBQUksQ0FBSixFQUFPLElBQVAsQ0FBWSxPQUFaLENBQUwsRUFDRSxPQUFPLEtBQVA7QUFDSDs7QUFFRCxPQUFJLFFBQVEsVUFBUixDQUFtQixNQUF2QixFQUErQjs7Ozs7O0FBTTdCLFVBQUssSUFBSSxJQUFJLENBQWIsRUFBZ0IsSUFBSSxJQUFJLE1BQXhCLEVBQWdDLEdBQWhDLEVBQXFDO0FBQ25DLGFBQU0sSUFBSSxDQUFKLEVBQU8sTUFBYjtBQUNBLFdBQUksSUFBSSxDQUFKLEVBQU8sTUFBUCxLQUFrQixHQUF0QixFQUNFOztBQUVGLFdBQUksSUFBSSxDQUFKLEVBQU8sTUFBUCxDQUFjLFVBQWQsQ0FBeUIsTUFBekIsR0FBa0MsQ0FBdEMsRUFBeUM7QUFDdkMsYUFBSSxVQUFVLElBQUksQ0FBSixFQUFPLE1BQXJCO0FBQ0EsYUFBSSxRQUFRLEtBQVIsS0FBa0IsUUFBUSxLQUExQixJQUNBLFFBQVEsS0FBUixLQUFrQixRQUFRLEtBRDFCLElBRUEsUUFBUSxLQUFSLEtBQWtCLFFBQVEsS0FGOUIsRUFHRSxPQUFPLElBQVA7QUFDSDtBQUNGOzs7QUFHRCxZQUFPLEtBQVA7QUFDRDs7QUFFRCxVQUFPLElBQVA7QUFDRDs7QUFFRCxTQUFRLFNBQVIsR0FBb0IsU0FBcEI7QUFDQSxVQUFTLFNBQVQsQ0FBbUIsT0FBbkIsRUFBNEIsS0FBNUIsRUFBbUMsS0FBbkMsRUFBMEM7QUFDeEMsT0FBSTtBQUNGLGFBQVEsSUFBSSxLQUFKLENBQVUsS0FBVixFQUFpQixLQUFqQixDQUFSO0FBQ0QsSUFGRCxDQUVFLE9BQU8sRUFBUCxFQUFXO0FBQ1gsWUFBTyxLQUFQO0FBQ0Q7QUFDRCxVQUFPLE1BQU0sSUFBTixDQUFXLE9BQVgsQ0FBUDtBQUNEOztBQUVELFNBQVEsYUFBUixHQUF3QixhQUF4QjtBQUNBLFVBQVMsYUFBVCxDQUF1QixRQUF2QixFQUFpQyxLQUFqQyxFQUF3QyxLQUF4QyxFQUErQztBQUM3QyxVQUFPLFNBQVMsTUFBVCxDQUFnQixVQUFTLE9BQVQsRUFBa0I7QUFDdkMsWUFBTyxVQUFVLE9BQVYsRUFBbUIsS0FBbkIsRUFBMEIsS0FBMUIsQ0FBUDtBQUNELElBRk0sRUFFSixJQUZJLENBRUMsVUFBUyxDQUFULEVBQVksQ0FBWixFQUFlO0FBQ3JCLFlBQU8sU0FBUyxDQUFULEVBQVksQ0FBWixFQUFlLEtBQWYsQ0FBUDtBQUNELElBSk0sRUFJSixDQUpJLEtBSUUsSUFKVDtBQUtEOztBQUVELFNBQVEsVUFBUixHQUFxQixVQUFyQjtBQUNBLFVBQVMsVUFBVCxDQUFvQixLQUFwQixFQUEyQixLQUEzQixFQUFrQztBQUNoQyxPQUFJOzs7QUFHRixZQUFPLElBQUksS0FBSixDQUFVLEtBQVYsRUFBaUIsS0FBakIsRUFBd0IsS0FBeEIsSUFBaUMsR0FBeEM7QUFDRCxJQUpELENBSUUsT0FBTyxFQUFQLEVBQVc7QUFDWCxZQUFPLElBQVA7QUFDRDtBQUNGOzs7QUFHRCxTQUFRLEdBQVIsR0FBYyxHQUFkO0FBQ0EsVUFBUyxHQUFULENBQWEsT0FBYixFQUFzQixLQUF0QixFQUE2QixLQUE3QixFQUFvQztBQUNsQyxVQUFPLFFBQVEsT0FBUixFQUFpQixLQUFqQixFQUF3QixHQUF4QixFQUE2QixLQUE3QixDQUFQO0FBQ0Q7OztBQUdELFNBQVEsR0FBUixHQUFjLEdBQWQ7QUFDQSxVQUFTLEdBQVQsQ0FBYSxPQUFiLEVBQXNCLEtBQXRCLEVBQTZCLEtBQTdCLEVBQW9DO0FBQ2xDLFVBQU8sUUFBUSxPQUFSLEVBQWlCLEtBQWpCLEVBQXdCLEdBQXhCLEVBQTZCLEtBQTdCLENBQVA7QUFDRDs7QUFFRCxTQUFRLE9BQVIsR0FBa0IsT0FBbEI7QUFDQSxVQUFTLE9BQVQsQ0FBaUIsT0FBakIsRUFBMEIsS0FBMUIsRUFBaUMsSUFBakMsRUFBdUMsS0FBdkMsRUFBOEM7QUFDNUMsYUFBVSxJQUFJLE1BQUosQ0FBVyxPQUFYLEVBQW9CLEtBQXBCLENBQVY7QUFDQSxXQUFRLElBQUksS0FBSixDQUFVLEtBQVYsRUFBaUIsS0FBakIsQ0FBUjs7QUFFQSxPQUFJLElBQUosRUFBVSxLQUFWLEVBQWlCLElBQWpCLEVBQXVCLElBQXZCLEVBQTZCLEtBQTdCO0FBQ0EsV0FBUSxJQUFSO0FBQ0UsVUFBSyxHQUFMO0FBQ0UsY0FBTyxFQUFQO0FBQ0EsZUFBUSxHQUFSO0FBQ0EsY0FBTyxFQUFQO0FBQ0EsY0FBTyxHQUFQO0FBQ0EsZUFBUSxJQUFSO0FBQ0E7QUFDRixVQUFLLEdBQUw7QUFDRSxjQUFPLEVBQVA7QUFDQSxlQUFRLEdBQVI7QUFDQSxjQUFPLEVBQVA7QUFDQSxjQUFPLEdBQVA7QUFDQSxlQUFRLElBQVI7QUFDQTtBQUNGO0FBQ0UsYUFBTSxJQUFJLFNBQUosQ0FBYyx1Q0FBZCxDQUFOO0FBaEJKOzs7QUFvQkEsT0FBSSxVQUFVLE9BQVYsRUFBbUIsS0FBbkIsRUFBMEIsS0FBMUIsQ0FBSixFQUFzQztBQUNwQyxZQUFPLEtBQVA7QUFDRDs7Ozs7QUFLRCxRQUFLLElBQUksSUFBSSxDQUFiLEVBQWdCLElBQUksTUFBTSxHQUFOLENBQVUsTUFBOUIsRUFBc0MsRUFBRSxDQUF4QyxFQUEyQztBQUN6QyxTQUFJLGNBQWMsTUFBTSxHQUFOLENBQVUsQ0FBVixDQUFsQjs7QUFFQSxTQUFJLE9BQU8sSUFBWDtBQUNBLFNBQUksTUFBTSxJQUFWOztBQUVBLGlCQUFZLE9BQVosQ0FBb0IsVUFBUyxVQUFULEVBQXFCO0FBQ3ZDLFdBQUksV0FBVyxNQUFYLEtBQXNCLEdBQTFCLEVBQStCO0FBQzdCLHNCQUFhLElBQUksVUFBSixDQUFlLFNBQWYsQ0FBYjtBQUNEO0FBQ0QsY0FBTyxRQUFRLFVBQWY7QUFDQSxhQUFNLE9BQU8sVUFBYjtBQUNBLFdBQUksS0FBSyxXQUFXLE1BQWhCLEVBQXdCLEtBQUssTUFBN0IsRUFBcUMsS0FBckMsQ0FBSixFQUFpRDtBQUMvQyxnQkFBTyxVQUFQO0FBQ0QsUUFGRCxNQUVPLElBQUksS0FBSyxXQUFXLE1BQWhCLEVBQXdCLElBQUksTUFBNUIsRUFBb0MsS0FBcEMsQ0FBSixFQUFnRDtBQUNyRCxlQUFNLFVBQU47QUFDRDtBQUNGLE1BWEQ7Ozs7QUFlQSxTQUFJLEtBQUssUUFBTCxLQUFrQixJQUFsQixJQUEwQixLQUFLLFFBQUwsS0FBa0IsS0FBaEQsRUFBdUQ7QUFDckQsY0FBTyxLQUFQO0FBQ0Q7Ozs7QUFJRCxTQUFJLENBQUMsQ0FBQyxJQUFJLFFBQUwsSUFBaUIsSUFBSSxRQUFKLEtBQWlCLElBQW5DLEtBQ0EsTUFBTSxPQUFOLEVBQWUsSUFBSSxNQUFuQixDQURKLEVBQ2dDO0FBQzlCLGNBQU8sS0FBUDtBQUNELE1BSEQsTUFHTyxJQUFJLElBQUksUUFBSixLQUFpQixLQUFqQixJQUEwQixLQUFLLE9BQUwsRUFBYyxJQUFJLE1BQWxCLENBQTlCLEVBQXlEO0FBQzlELGNBQU8sS0FBUDtBQUNEO0FBQ0Y7QUFDRCxVQUFPLElBQVA7QUFDRDs7QUFFRCxTQUFRLFVBQVIsR0FBcUIsVUFBckI7QUFDQSxVQUFTLFVBQVQsQ0FBb0IsT0FBcEIsRUFBNkIsS0FBN0IsRUFBb0M7QUFDbEMsT0FBSSxTQUFTLE1BQU0sT0FBTixFQUFlLEtBQWYsQ0FBYjtBQUNBLFVBQVEsVUFBVSxPQUFPLFVBQVAsQ0FBa0IsTUFBN0IsR0FBdUMsT0FBTyxVQUE5QyxHQUEyRCxJQUFsRTtBQUNELEU7Ozs7Ozs7Ozs7O0FDdnFDRCxLQUFJLFVBQVUsT0FBTyxPQUFQLEdBQWlCLEVBQS9COzs7Ozs7O0FBT0EsS0FBSSxnQkFBSjtBQUNBLEtBQUksa0JBQUo7O0FBRUMsY0FBWTtBQUNYLFNBQUk7QUFDRiw0QkFBbUIsVUFBbkI7QUFDRCxNQUZELENBRUUsT0FBTyxDQUFQLEVBQVU7QUFDViw0QkFBbUIsNEJBQVk7QUFDN0IsbUJBQU0sSUFBSSxLQUFKLENBQVUsMkJBQVYsQ0FBTjtBQUNELFVBRkQ7QUFHRDtBQUNELFNBQUk7QUFDRiw4QkFBcUIsWUFBckI7QUFDRCxNQUZELENBRUUsT0FBTyxDQUFQLEVBQVU7QUFDViw4QkFBcUIsOEJBQVk7QUFDL0IsbUJBQU0sSUFBSSxLQUFKLENBQVUsNkJBQVYsQ0FBTjtBQUNELFVBRkQ7QUFHRDtBQUNGLEVBZkEsR0FBRDtBQWdCQSxLQUFJLFFBQVEsRUFBWjtBQUNBLEtBQUksV0FBVyxLQUFmO0FBQ0EsS0FBSSxZQUFKO0FBQ0EsS0FBSSxhQUFhLENBQUMsQ0FBbEI7O0FBRUEsVUFBUyxlQUFULEdBQTJCO0FBQ3ZCLFNBQUksQ0FBQyxRQUFELElBQWEsQ0FBQyxZQUFsQixFQUFnQztBQUM1QjtBQUNIO0FBQ0QsZ0JBQVcsS0FBWDtBQUNBLFNBQUksYUFBYSxNQUFqQixFQUF5QjtBQUNyQixpQkFBUSxhQUFhLE1BQWIsQ0FBb0IsS0FBcEIsQ0FBUjtBQUNILE1BRkQsTUFFTztBQUNILHNCQUFhLENBQUMsQ0FBZDtBQUNIO0FBQ0QsU0FBSSxNQUFNLE1BQVYsRUFBa0I7QUFDZDtBQUNIO0FBQ0o7O0FBRUQsVUFBUyxVQUFULEdBQXNCO0FBQ2xCLFNBQUksUUFBSixFQUFjO0FBQ1Y7QUFDSDtBQUNELFNBQUksVUFBVSxpQkFBaUIsZUFBakIsQ0FBZDtBQUNBLGdCQUFXLElBQVg7O0FBRUEsU0FBSSxNQUFNLE1BQU0sTUFBaEI7QUFDQSxZQUFNLEdBQU4sRUFBVztBQUNQLHdCQUFlLEtBQWY7QUFDQSxpQkFBUSxFQUFSO0FBQ0EsZ0JBQU8sRUFBRSxVQUFGLEdBQWUsR0FBdEIsRUFBMkI7QUFDdkIsaUJBQUksWUFBSixFQUFrQjtBQUNkLDhCQUFhLFVBQWIsRUFBeUIsR0FBekI7QUFDSDtBQUNKO0FBQ0Qsc0JBQWEsQ0FBQyxDQUFkO0FBQ0EsZUFBTSxNQUFNLE1BQVo7QUFDSDtBQUNELG9CQUFlLElBQWY7QUFDQSxnQkFBVyxLQUFYO0FBQ0Esd0JBQW1CLE9BQW5CO0FBQ0g7O0FBRUQsU0FBUSxRQUFSLEdBQW1CLFVBQVUsR0FBVixFQUFlO0FBQzlCLFNBQUksT0FBTyxJQUFJLEtBQUosQ0FBVSxVQUFVLE1BQVYsR0FBbUIsQ0FBN0IsQ0FBWDtBQUNBLFNBQUksVUFBVSxNQUFWLEdBQW1CLENBQXZCLEVBQTBCO0FBQ3RCLGNBQUssSUFBSSxJQUFJLENBQWIsRUFBZ0IsSUFBSSxVQUFVLE1BQTlCLEVBQXNDLEdBQXRDLEVBQTJDO0FBQ3ZDLGtCQUFLLElBQUksQ0FBVCxJQUFjLFVBQVUsQ0FBVixDQUFkO0FBQ0g7QUFDSjtBQUNELFdBQU0sSUFBTixDQUFXLElBQUksSUFBSixDQUFTLEdBQVQsRUFBYyxJQUFkLENBQVg7QUFDQSxTQUFJLE1BQU0sTUFBTixLQUFpQixDQUFqQixJQUFzQixDQUFDLFFBQTNCLEVBQXFDO0FBQ2pDLDBCQUFpQixVQUFqQixFQUE2QixDQUE3QjtBQUNIO0FBQ0osRUFYRDs7O0FBY0EsVUFBUyxJQUFULENBQWMsR0FBZCxFQUFtQixLQUFuQixFQUEwQjtBQUN0QixVQUFLLEdBQUwsR0FBVyxHQUFYO0FBQ0EsVUFBSyxLQUFMLEdBQWEsS0FBYjtBQUNIO0FBQ0QsTUFBSyxTQUFMLENBQWUsR0FBZixHQUFxQixZQUFZO0FBQzdCLFVBQUssR0FBTCxDQUFTLEtBQVQsQ0FBZSxJQUFmLEVBQXFCLEtBQUssS0FBMUI7QUFDSCxFQUZEO0FBR0EsU0FBUSxLQUFSLEdBQWdCLFNBQWhCO0FBQ0EsU0FBUSxPQUFSLEdBQWtCLElBQWxCO0FBQ0EsU0FBUSxHQUFSLEdBQWMsRUFBZDtBQUNBLFNBQVEsSUFBUixHQUFlLEVBQWY7QUFDQSxTQUFRLE9BQVIsR0FBa0IsRUFBbEIsQztBQUNBLFNBQVEsUUFBUixHQUFtQixFQUFuQjs7QUFFQSxVQUFTLElBQVQsR0FBZ0IsQ0FBRTs7QUFFbEIsU0FBUSxFQUFSLEdBQWEsSUFBYjtBQUNBLFNBQVEsV0FBUixHQUFzQixJQUF0QjtBQUNBLFNBQVEsSUFBUixHQUFlLElBQWY7QUFDQSxTQUFRLEdBQVIsR0FBYyxJQUFkO0FBQ0EsU0FBUSxjQUFSLEdBQXlCLElBQXpCO0FBQ0EsU0FBUSxrQkFBUixHQUE2QixJQUE3QjtBQUNBLFNBQVEsSUFBUixHQUFlLElBQWY7O0FBRUEsU0FBUSxPQUFSLEdBQWtCLFVBQVUsSUFBVixFQUFnQjtBQUM5QixXQUFNLElBQUksS0FBSixDQUFVLGtDQUFWLENBQU47QUFDSCxFQUZEOztBQUlBLFNBQVEsR0FBUixHQUFjLFlBQVk7QUFBRSxZQUFPLEdBQVA7QUFBWSxFQUF4QztBQUNBLFNBQVEsS0FBUixHQUFnQixVQUFVLEdBQVYsRUFBZTtBQUMzQixXQUFNLElBQUksS0FBSixDQUFVLGdDQUFWLENBQU47QUFDSCxFQUZEO0FBR0EsU0FBUSxLQUFSLEdBQWdCLFlBQVc7QUFBRSxZQUFPLENBQVA7QUFBVyxFQUF4QyxDOzs7Ozs7Ozs7OzttQkNyRndCLEU7O0FBNUJ4Qjs7S0FBWSxDOztBQUNaOztLQUFZLEs7O0FBQ1o7O0tBQVksUTs7QUFDWjs7S0FBWSxTOztBQUNaOztLQUFZLFM7O0FBQ1o7O0tBQVksTTs7QUFFWjs7OztBQUVBLFVBQVMsaUJBQVQsQ0FBNEIsRUFBNUIsRUFBZ0MsU0FBaEMsRUFBMkM7QUFDekMsT0FBSSxVQUFVLE9BQVYsSUFDQSxVQUFVLE9BQVYsQ0FBa0IsS0FEdEIsRUFDNkI7QUFDM0IsT0FBRSxJQUFGLENBQU8sNENBQ0wsc0NBREY7QUFFQSxlQUFVLE9BQVYsQ0FBa0IsS0FBbEIsQ0FBd0IsSUFBeEIsQ0FBNkIsRUFBN0I7QUFDRDtBQUNGOzs7Ozs7Ozs7Ozs7Ozs7OztBQVljLFVBQVMsRUFBVCxDQUNiLElBRGEsRUFFYixPQUZhLEVBR2IsUUFIYSxFQUliLFFBSmEsRUFLYixVQUxhLEVBTWIsY0FOYSxFQU9iO0FBQ0EsUUFBSyxPQUFMLEdBQWUsU0FBUyxXQUFULEdBQXVCLFNBQVMsV0FBaEMsR0FBOEMsUUFBN0Q7QUFDQSxRQUFLLElBQUwsR0FBWSxTQUFTLElBQXJCO0FBQ0EsWUFBUyxZQUFULElBQXlCLFNBQVMsWUFBVCxDQUFzQixJQUF0QixDQUEyQixJQUEzQixDQUF6Qjs7QUFFQSxPQUFJLENBQUMsT0FBTCxFQUFjO0FBQ1osZUFBVSxLQUFLLElBQUwsQ0FBVSxrQkFBVixDQUE2QixJQUE3QixLQUFzQyxFQUFoRDtBQUNEO0FBQ0QsT0FBTSxPQUFPLFFBQVEsSUFBUixJQUFnQixFQUE3Qjs7QUFFQSxRQUFLLFFBQUwsR0FBZ0IsT0FBaEI7QUFDQSxRQUFLLFFBQUwsR0FBZ0IsUUFBUSxPQUFSLElBQW1CLEVBQW5DO0FBQ0EsUUFBSyxTQUFMLEdBQWlCLFFBQVEsUUFBUixJQUFvQixFQUFyQztBQUNBLFFBQUssSUFBTCxHQUFZLFFBQVEsS0FBUixJQUFpQixFQUE3QjtBQUNBLFFBQUssSUFBTCxHQUFZLEVBQVo7QUFDQSxRQUFLLFNBQUwsR0FBaUIsRUFBakI7QUFDQSxRQUFLLFlBQUwsR0FBb0IsRUFBcEI7QUFDQSxRQUFLLEtBQUwsR0FBYSxJQUFiOzs7QUFHQSxRQUFLLFdBQUwsQ0FBaUIsY0FBakI7O0FBRUEsS0FBRSxLQUFGLDZCQUFrQyxLQUFLLEtBQXZDO0FBQ0EsUUFBSyxLQUFMLENBQVcsV0FBWDtBQUNBLFFBQUssT0FBTCxHQUFlLElBQWY7OztBQUdBLFFBQUssS0FBTCxHQUFhLE9BQU8sSUFBUCxLQUFnQixVQUFoQixHQUE2QixNQUE3QixHQUFzQyxJQUFuRDtBQUNBLE9BQUksVUFBSixFQUFnQjtBQUNkLE9BQUUsTUFBRixDQUFTLEtBQUssS0FBZCxFQUFxQixVQUFyQjtBQUNEO0FBQ0QsUUFBSyxVQUFMOztBQUVBLEtBQUUsS0FBRixnQ0FBcUMsS0FBSyxLQUExQztBQUNBLFFBQUssS0FBTCxDQUFXLGNBQVg7QUFDQSxRQUFLLFFBQUwsR0FBZ0IsSUFBaEI7O0FBRUEscUJBQWtCLElBQWxCLEVBQXdCLE9BQXhCOzs7QUFHQSxRQUFLLFNBQUwsR0FBaUIsWUFBWSxLQUFLLElBQUwsQ0FBVSxHQUFWLENBQWMsZUFBM0M7QUFDQSxRQUFLLE1BQUw7QUFDRDs7QUFFRCxHQUFFLE1BQUYsQ0FBUyxHQUFHLFNBQVosRUFBdUIsS0FBdkIsRUFBOEIsUUFBOUIsRUFBd0MsU0FBeEMsRUFBbUQsU0FBbkQsRUFBOEQsTUFBOUQ7QUFDQSxHQUFFLE1BQUYsQ0FBUyxFQUFULEVBQWE7QUFDWCw2Q0FEVztBQUVYO0FBRlcsRUFBYixFOzs7Ozs7Ozs7OztTQ3RFZ0IsVSxHQUFBLFU7U0FRQSxTLEdBQUEsUztTQW9CQSxhLEdBQUEsYTtTQTJDQSxZLEdBQUEsWTs7QUFwRmhCOzs7O0FBQ0E7Ozs7QUFDQTs7QUFLQTs7Ozs7O0FBTU8sVUFBUyxVQUFULEdBQXVCO0FBQzVCLE9BQU0sS0FBSyxJQUFYO0FBQ0EsTUFBRyxTQUFILEdBQWUsRUFBZjtBQUNBLE1BQUcsU0FBSDtBQUNBLE1BQUcsYUFBSDtBQUNBLE1BQUcsWUFBSDtBQUNEOztBQUVNLFVBQVMsU0FBVCxHQUFzQjtBQUMzQixPQUFNLEtBQUssSUFBWDtBQUNBLE9BQUksT0FBTyxHQUFHLEtBQWQ7O0FBRUEsT0FBSSxDQUFDLHlCQUFjLElBQWQsQ0FBTCxFQUEwQjtBQUN4QixZQUFPLEVBQVA7QUFDRDs7QUFFRCxPQUFNLE9BQU8sT0FBTyxJQUFQLENBQVksSUFBWixDQUFiO0FBQ0EsT0FBSSxJQUFJLEtBQUssTUFBYjtBQUNBLFVBQU8sR0FBUCxFQUFZO0FBQ1YsMEJBQU0sRUFBTixFQUFVLEtBQUssQ0FBTCxDQUFWO0FBQ0Q7O0FBRUQsMEJBQVEsSUFBUixFQUFjLEVBQWQ7QUFDRDs7QUFFRCxVQUFTLElBQVQsR0FBaUIsQ0FDaEI7O0FBRU0sVUFBUyxhQUFULEdBQTBCO0FBQy9CLE9BQU0sS0FBSyxJQUFYO0FBQ0EsT0FBTSxXQUFXLEdBQUcsU0FBcEI7QUFDQSxPQUFJLFFBQUosRUFBYztBQUNaLFVBQUssSUFBSSxHQUFULElBQWdCLFFBQWhCLEVBQTBCO0FBQ3hCLFdBQU0sVUFBVSxTQUFTLEdBQVQsQ0FBaEI7QUFDQSxXQUFNLE1BQU07QUFDVixxQkFBWSxJQURGO0FBRVYsdUJBQWM7QUFGSixRQUFaO0FBSUEsV0FBSSxPQUFPLE9BQVAsS0FBbUIsVUFBdkIsRUFBbUM7QUFDakMsYUFBSSxHQUFKLEdBQVUsbUJBQW1CLE9BQW5CLEVBQTRCLEVBQTVCLENBQVY7QUFDQSxhQUFJLEdBQUosR0FBVSxJQUFWO0FBQ0QsUUFIRCxNQUdPO0FBQ0wsYUFBSSxHQUFKLEdBQVUsUUFBUSxHQUFSLEdBQ04sUUFBUSxLQUFSLEtBQWtCLEtBQWxCLEdBQ0UsbUJBQW1CLFFBQVEsR0FBM0IsRUFBZ0MsRUFBaEMsQ0FERixHQUVFLGdCQUFLLFFBQVEsR0FBYixFQUFrQixFQUFsQixDQUhJLEdBSU4sSUFKSjtBQUtBLGFBQUksR0FBSixHQUFVLFFBQVEsR0FBUixHQUNOLGdCQUFLLFFBQVEsR0FBYixFQUFrQixFQUFsQixDQURNLEdBRU4sSUFGSjtBQUdEO0FBQ0QsY0FBTyxjQUFQLENBQXNCLEVBQXRCLEVBQTBCLEdBQTFCLEVBQStCLEdBQS9CO0FBQ0Q7QUFDRjtBQUNGOztBQUVELFVBQVMsa0JBQVQsQ0FBNkIsTUFBN0IsRUFBcUMsS0FBckMsRUFBNEM7QUFDMUMsT0FBTSxVQUFVLHNCQUFZLEtBQVosRUFBbUIsTUFBbkIsRUFBMkIsSUFBM0IsRUFBaUM7QUFDL0MsV0FBTTtBQUR5QyxJQUFqQyxDQUFoQjtBQUdBLFVBQU8sU0FBUyxjQUFULEdBQTJCO0FBQ2hDLFNBQUksUUFBUSxLQUFaLEVBQW1CO0FBQ2pCLGVBQVEsUUFBUjtBQUNEO0FBQ0QsU0FBSSxjQUFJLE1BQVIsRUFBZ0I7QUFDZCxlQUFRLE1BQVI7QUFDRDtBQUNELFlBQU8sUUFBUSxLQUFmO0FBQ0QsSUFSRDtBQVNEOztBQUVNLFVBQVMsWUFBVCxHQUF5QjtBQUM5QixPQUFNLEtBQUssSUFBWDtBQUNBLE9BQU0sVUFBVSxHQUFHLFFBQW5CO0FBQ0EsT0FBSSxPQUFKLEVBQWE7QUFDWCxVQUFLLElBQUksR0FBVCxJQUFnQixPQUFoQixFQUF5QjtBQUN2QixVQUFHLEdBQUgsSUFBVSxnQkFBSyxRQUFRLEdBQVIsQ0FBTCxFQUFtQixFQUFuQixDQUFWO0FBQ0Q7QUFDRjtBQUNGLEU7Ozs7Ozs7Ozs7O21CQ3pEdUIsTzs7QUFuQ3hCOzs7O0FBRUE7Ozs7OztBQVVBLEtBQUksTUFBTSxDQUFWOzs7QUFDQSxLQUFJLG1CQUFKOzs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7O0FBc0JlLFVBQVMsT0FBVCxDQUFrQixFQUFsQixFQUFzQixPQUF0QixFQUErQixFQUEvQixFQUFtQyxPQUFuQyxFQUE0Qzs7QUFFekQsT0FBSSxPQUFKLEVBQWE7QUFDWCx1QkFBTyxJQUFQLEVBQWEsT0FBYjtBQUNEO0FBQ0QsT0FBTSxPQUFPLE9BQU8sT0FBUCxLQUFtQixVQUFoQztBQUNBLFFBQUssRUFBTCxHQUFVLEVBQVY7QUFDQSxNQUFHLFNBQUgsQ0FBYSxJQUFiLENBQWtCLElBQWxCO0FBQ0EsUUFBSyxVQUFMLEdBQWtCLE9BQWxCO0FBQ0EsUUFBSyxFQUFMLEdBQVUsRUFBVjtBQUNBLFFBQUssRUFBTCxHQUFVLEVBQUUsR0FBWixDO0FBQ0EsUUFBSyxNQUFMLEdBQWMsSUFBZDtBQUNBLFFBQUssS0FBTCxHQUFhLEtBQUssSUFBbEIsQztBQUNBLFFBQUssSUFBTCxHQUFZLEVBQVo7QUFDQSxRQUFLLE9BQUwsR0FBZSxFQUFmO0FBQ0EsUUFBSyxNQUFMLEdBQWMsZ0JBQWQ7QUFDQSxRQUFLLFNBQUwsR0FBaUIsZ0JBQWpCOztBQUVBLE9BQUksSUFBSixFQUFVO0FBQ1IsVUFBSyxNQUFMLEdBQWMsT0FBZDtBQUNELElBRkQsTUFFTztBQUNMLFVBQUssTUFBTCxHQUFjLHFCQUFVLE9BQVYsQ0FBZDtBQUNBLFNBQUksQ0FBQyxLQUFLLE1BQVYsRUFBa0I7QUFDaEIsWUFBSyxNQUFMLEdBQWMsWUFBWSxDQUFFLENBQTVCO0FBQ0EsZUFBUSxHQUFSLENBQVksUUFBWixLQUF5QixZQUF6QixJQUF5QyxnQkFDdkMsMkJBQTJCLE9BQTNCLEdBQ0EsbURBREEsR0FFQSwyQ0FIdUMsRUFJdkMsRUFKdUMsQ0FBekM7QUFNRDtBQUNGO0FBQ0QsUUFBSyxLQUFMLEdBQWEsS0FBSyxJQUFMLEdBQ1QsU0FEUyxHQUVULEtBQUssR0FBTCxFQUZKOzs7QUFLQSxRQUFLLE1BQUwsR0FBYyxLQUFLLE9BQUwsR0FBZSxLQUE3QjtBQUNEOzs7Ozs7QUFNRCxTQUFRLFNBQVIsQ0FBa0IsR0FBbEIsR0FBd0IsWUFBWTtBQUNsQyxRQUFLLFNBQUw7QUFDQSxPQUFNLFFBQVEsS0FBSyxNQUFMLENBQVksSUFBWixDQUFpQixLQUFLLEVBQXRCLEVBQTBCLEtBQUssRUFBL0IsQ0FBZDs7O0FBR0EsT0FBSSxLQUFLLElBQVQsRUFBZTtBQUNiLGNBQVMsS0FBVDtBQUNEO0FBQ0QsUUFBSyxRQUFMO0FBQ0EsVUFBTyxLQUFQO0FBQ0QsRUFWRDs7Ozs7O0FBZ0JBLFNBQVEsU0FBUixDQUFrQixTQUFsQixHQUE4QixZQUFZO0FBQ3hDLGdCQUFhLGNBQUksTUFBakI7QUFDQSxpQkFBSSxNQUFKLEdBQWEsSUFBYjtBQUNELEVBSEQ7Ozs7Ozs7O0FBV0EsU0FBUSxTQUFSLENBQWtCLE1BQWxCLEdBQTJCLFVBQVUsR0FBVixFQUFlO0FBQ3hDLE9BQU0sS0FBSyxJQUFJLEVBQWY7QUFDQSxPQUFJLENBQUMsS0FBSyxTQUFMLENBQWUsR0FBZixDQUFtQixFQUFuQixDQUFMLEVBQTZCO0FBQzNCLFVBQUssU0FBTCxDQUFlLEdBQWYsQ0FBbUIsRUFBbkI7QUFDQSxVQUFLLE9BQUwsQ0FBYSxJQUFiLENBQWtCLEdBQWxCO0FBQ0EsU0FBSSxDQUFDLEtBQUssTUFBTCxDQUFZLEdBQVosQ0FBZ0IsRUFBaEIsQ0FBTCxFQUEwQjtBQUN4QixXQUFJLE1BQUosQ0FBVyxJQUFYO0FBQ0Q7QUFDRjtBQUNGLEVBVEQ7Ozs7OztBQWVBLFNBQVEsU0FBUixDQUFrQixRQUFsQixHQUE2QixZQUFZO0FBQ3ZDLGlCQUFJLE1BQUosR0FBYSxVQUFiO0FBQ0EsT0FBSSxJQUFJLEtBQUssSUFBTCxDQUFVLE1BQWxCO0FBQ0EsVUFBTyxHQUFQLEVBQVk7QUFDVixTQUFNLE1BQU0sS0FBSyxJQUFMLENBQVUsQ0FBVixDQUFaO0FBQ0EsU0FBSSxDQUFDLEtBQUssU0FBTCxDQUFlLEdBQWYsQ0FBbUIsSUFBSSxFQUF2QixDQUFMLEVBQWlDO0FBQy9CLFdBQUksU0FBSixDQUFjLElBQWQ7QUFDRDtBQUNGO0FBQ0QsT0FBSSxNQUFNLEtBQUssTUFBZjtBQUNBLFFBQUssTUFBTCxHQUFjLEtBQUssU0FBbkI7QUFDQSxRQUFLLFNBQUwsR0FBaUIsR0FBakI7QUFDQSxRQUFLLFNBQUwsQ0FBZSxLQUFmO0FBQ0EsU0FBTSxLQUFLLElBQVg7QUFDQSxRQUFLLElBQUwsR0FBWSxLQUFLLE9BQWpCO0FBQ0EsUUFBSyxPQUFMLEdBQWUsR0FBZjtBQUNBLFFBQUssT0FBTCxDQUFhLE1BQWIsR0FBc0IsQ0FBdEI7QUFDRCxFQWpCRDs7Ozs7Ozs7O0FBMEJBLFNBQVEsU0FBUixDQUFrQixNQUFsQixHQUEyQixVQUFVLE9BQVYsRUFBbUI7QUFDNUMsT0FBSSxLQUFLLElBQVQsRUFBZTtBQUNiLFVBQUssS0FBTCxHQUFhLElBQWI7QUFDRCxJQUZELE1BRU87QUFDTCxVQUFLLEdBQUw7QUFDRDs7Ozs7Ozs7Ozs7Ozs7QUFjRixFQW5CRDs7Ozs7OztBQTBCQSxTQUFRLFNBQVIsQ0FBa0IsR0FBbEIsR0FBd0IsWUFBWTtBQUNsQyxPQUFJLEtBQUssTUFBVCxFQUFpQjtBQUNmLFNBQU0sUUFBUSxLQUFLLEdBQUwsRUFBZDtBQUNBLFNBQ0UsVUFBVSxLQUFLLEtBQWY7Ozs7O0FBS0MsTUFBQyxvQkFBUyxLQUFULEtBQW1CLEtBQUssSUFBekIsS0FBa0MsQ0FBQyxLQUFLLE9BTjNDLEVBT0U7O0FBRUEsV0FBTSxXQUFXLEtBQUssS0FBdEI7QUFDQSxZQUFLLEtBQUwsR0FBYSxLQUFiO0FBQ0EsWUFBSyxFQUFMLENBQVEsSUFBUixDQUFhLEtBQUssRUFBbEIsRUFBc0IsS0FBdEIsRUFBNkIsUUFBN0I7QUFDRDtBQUNELFVBQUssTUFBTCxHQUFjLEtBQUssT0FBTCxHQUFlLEtBQTdCO0FBQ0Q7QUFDRixFQWxCRDs7Ozs7OztBQXlCQSxTQUFRLFNBQVIsQ0FBa0IsUUFBbEIsR0FBNkIsWUFBWTs7O0FBR3ZDLE9BQU0sVUFBVSxjQUFJLE1BQXBCO0FBQ0EsUUFBSyxLQUFMLEdBQWEsS0FBSyxHQUFMLEVBQWI7QUFDQSxRQUFLLEtBQUwsR0FBYSxLQUFiO0FBQ0EsaUJBQUksTUFBSixHQUFhLE9BQWI7QUFDRCxFQVBEOzs7Ozs7QUFhQSxTQUFRLFNBQVIsQ0FBa0IsTUFBbEIsR0FBMkIsWUFBWTtBQUNyQyxPQUFJLElBQUksS0FBSyxJQUFMLENBQVUsTUFBbEI7QUFDQSxVQUFPLEdBQVAsRUFBWTtBQUNWLFVBQUssSUFBTCxDQUFVLENBQVYsRUFBYSxNQUFiO0FBQ0Q7QUFDRixFQUxEOzs7Ozs7QUFXQSxTQUFRLFNBQVIsQ0FBa0IsUUFBbEIsR0FBNkIsWUFBWTtBQUN2QyxPQUFJLEtBQUssTUFBVCxFQUFpQjs7Ozs7QUFLZixTQUFJLENBQUMsS0FBSyxFQUFMLENBQVEsaUJBQVQsSUFBOEIsQ0FBQyxLQUFLLEVBQUwsQ0FBUSxhQUEzQyxFQUEwRDtBQUN4RCx5QkFBTyxLQUFLLEVBQUwsQ0FBUSxTQUFmLEVBQTBCLElBQTFCO0FBQ0Q7QUFDRCxTQUFJLElBQUksS0FBSyxJQUFMLENBQVUsTUFBbEI7QUFDQSxZQUFPLEdBQVAsRUFBWTtBQUNWLFlBQUssSUFBTCxDQUFVLENBQVYsRUFBYSxTQUFiLENBQXVCLElBQXZCO0FBQ0Q7QUFDRCxVQUFLLE1BQUwsR0FBYyxLQUFkO0FBQ0EsVUFBSyxFQUFMLEdBQVUsS0FBSyxFQUFMLEdBQVUsS0FBSyxLQUFMLEdBQWEsSUFBakM7QUFDRDtBQUNGLEVBaEJEOzs7Ozs7Ozs7OztBQTJCQSxLQUFNLGNBQWMsZ0JBQXBCO0FBQ0EsVUFBUyxRQUFULENBQW1CLEdBQW5CLEVBQXdCLElBQXhCLEVBQThCO0FBQzVCLE9BQUksVUFBSjtBQUFBLE9BQU8sYUFBUDtBQUFBLE9BQWEsWUFBYjtBQUFBLE9BQWtCLFlBQWxCO0FBQ0EsT0FBSSxDQUFDLElBQUwsRUFBVztBQUNULFlBQU8sV0FBUDtBQUNBLFVBQUssS0FBTDtBQUNEO0FBQ0QsU0FBTSxtQkFBUSxHQUFSLENBQU47QUFDQSxTQUFNLG9CQUFTLEdBQVQsQ0FBTjtBQUNBLE9BQUksT0FBTyxHQUFYLEVBQWdCO0FBQ2QsU0FBSSxJQUFJLE1BQVIsRUFBZ0I7QUFDZCxXQUFNLFFBQVEsSUFBSSxNQUFKLENBQVcsR0FBWCxDQUFlLEVBQTdCO0FBQ0EsV0FBSSxLQUFLLEdBQUwsQ0FBUyxLQUFULENBQUosRUFBcUI7QUFDbkI7QUFDRCxRQUZELE1BRU87QUFDTCxjQUFLLEdBQUwsQ0FBUyxLQUFUO0FBQ0Q7QUFDRjtBQUNELFNBQUksR0FBSixFQUFTO0FBQ1AsV0FBSSxJQUFJLE1BQVI7QUFDQSxjQUFPLEdBQVA7QUFBWSxrQkFBUyxJQUFJLENBQUosQ0FBVCxFQUFpQixJQUFqQjtBQUFaO0FBQ0QsTUFIRCxNQUdPLElBQUksR0FBSixFQUFTO0FBQ2QsY0FBTyxPQUFPLElBQVAsQ0FBWSxHQUFaLENBQVA7QUFDQSxXQUFJLEtBQUssTUFBVDtBQUNBLGNBQU8sR0FBUDtBQUFZLGtCQUFTLElBQUksS0FBSyxDQUFMLENBQUosQ0FBVCxFQUF1QixJQUF2QjtBQUFaO0FBQ0Q7QUFDRjtBQUNGLEU7Ozs7Ozs7Ozs7OzttQkN6UXVCLEc7O0FBWHhCOztBQUVBLEtBQUksTUFBTSxDQUFWOzs7Ozs7Ozs7OztBQVNlLFVBQVMsR0FBVCxHQUFnQjtBQUM3QixRQUFLLEVBQUwsR0FBVSxLQUFWO0FBQ0EsUUFBSyxJQUFMLEdBQVksRUFBWjtBQUNEOzs7OztBQUtELEtBQUksTUFBSixHQUFhLElBQWI7Ozs7Ozs7O0FBUUEsS0FBSSxTQUFKLENBQWMsTUFBZCxHQUF1QixVQUFVLEdBQVYsRUFBZTtBQUNwQyxRQUFLLElBQUwsQ0FBVSxJQUFWLENBQWUsR0FBZjtBQUNELEVBRkQ7Ozs7Ozs7O0FBVUEsS0FBSSxTQUFKLENBQWMsU0FBZCxHQUEwQixVQUFVLEdBQVYsRUFBZTtBQUN2QyxxQkFBTyxLQUFLLElBQVosRUFBa0IsR0FBbEI7QUFDRCxFQUZEOzs7Ozs7QUFRQSxLQUFJLFNBQUosQ0FBYyxNQUFkLEdBQXVCLFlBQVk7QUFDakMsT0FBSSxNQUFKLENBQVcsTUFBWCxDQUFrQixJQUFsQjtBQUNELEVBRkQ7Ozs7OztBQVFBLEtBQUksU0FBSixDQUFjLE1BQWQsR0FBdUIsWUFBWTs7QUFFakMsT0FBTSxPQUFPLEtBQUssSUFBTCxDQUFVLEtBQVYsRUFBYjtBQUNBLFFBQUssSUFBSSxJQUFJLENBQVIsRUFBVyxJQUFJLEtBQUssTUFBekIsRUFBaUMsSUFBSSxDQUFyQyxFQUF3QyxHQUF4QyxFQUE2QztBQUMzQyxVQUFLLENBQUwsRUFBUSxNQUFSO0FBQ0Q7QUFDRixFQU5ELEM7Ozs7Ozs7Ozs7O1NDNUJnQixRLEdBQUEsUTtTQXlIQSxPLEdBQUEsTztTQTRCQSxjLEdBQUEsYztTQTJEQSxHLEdBQUEsRztTQXFDQSxHLEdBQUEsRztTQTBCQSxLLEdBQUEsSztTQWVBLE8sR0FBQSxPOztBQXZUaEI7Ozs7QUFDQTs7QUFDQTs7OztBQVdBLEtBQU0sWUFBWSxPQUFPLG1CQUFQLHFCQUFsQjs7Ozs7Ozs7Ozs7Ozs7QUFZTyxVQUFTLFFBQVQsQ0FBbUIsS0FBbkIsRUFBMEI7QUFDL0IsUUFBSyxLQUFMLEdBQWEsS0FBYjtBQUNBLFFBQUssR0FBTCxHQUFXLG1CQUFYO0FBQ0Esa0JBQUksS0FBSixFQUFXLFFBQVgsRUFBcUIsSUFBckI7QUFDQSxPQUFJLG1CQUFRLEtBQVIsQ0FBSixFQUFvQjtBQUNsQixTQUFNLFVBQVUsaUJBQ1osWUFEWSxHQUVaLFdBRko7QUFHQSxhQUFRLEtBQVIsdUJBQTZCLFNBQTdCO0FBQ0EsVUFBSyxZQUFMLENBQWtCLEtBQWxCO0FBQ0QsSUFORCxNQU1PO0FBQ0wsVUFBSyxJQUFMLENBQVUsS0FBVjtBQUNEO0FBQ0Y7Ozs7Ozs7Ozs7OztBQVlELFVBQVMsU0FBVCxDQUFtQixJQUFuQixHQUEwQixVQUFVLEdBQVYsRUFBZTtBQUN2QyxRQUFLLElBQUksR0FBVCxJQUFnQixHQUFoQixFQUFxQjtBQUNuQixVQUFLLE9BQUwsQ0FBYSxHQUFiLEVBQWtCLElBQUksR0FBSixDQUFsQjtBQUNEO0FBQ0YsRUFKRDs7Ozs7Ozs7QUFZQSxVQUFTLFNBQVQsQ0FBbUIsWUFBbkIsR0FBa0MsVUFBVSxLQUFWLEVBQWlCO0FBQ2pELFFBQUssSUFBSSxJQUFJLENBQVIsRUFBVyxJQUFJLE1BQU0sTUFBMUIsRUFBa0MsSUFBSSxDQUF0QyxFQUF5QyxHQUF6QyxFQUE4QztBQUM1QyxhQUFRLE1BQU0sQ0FBTixDQUFSO0FBQ0Q7QUFDRixFQUpEOzs7Ozs7Ozs7O0FBY0EsVUFBUyxTQUFULENBQW1CLE9BQW5CLEdBQTZCLFVBQVUsR0FBVixFQUFlLEdBQWYsRUFBb0I7QUFDL0Msa0JBQWUsS0FBSyxLQUFwQixFQUEyQixHQUEzQixFQUFnQyxHQUFoQztBQUNELEVBRkQ7Ozs7Ozs7Ozs7O0FBYUEsVUFBUyxTQUFULENBQW1CLEtBQW5CLEdBQTJCLFVBQVUsRUFBVixFQUFjO0FBQ3ZDLElBQUMsS0FBSyxHQUFMLEtBQWEsS0FBSyxHQUFMLEdBQVcsRUFBeEIsQ0FBRCxFQUE4QixJQUE5QixDQUFtQyxFQUFuQztBQUNELEVBRkQ7Ozs7Ozs7OztBQVdBLFVBQVMsU0FBVCxDQUFtQixRQUFuQixHQUE4QixVQUFVLEVBQVYsRUFBYztBQUMxQyxxQkFBTyxLQUFLLEdBQVosRUFBaUIsRUFBakI7QUFDRCxFQUZEOzs7Ozs7Ozs7Ozs7QUFjQSxVQUFTLFlBQVQsQ0FBdUIsTUFBdkIsRUFBK0IsR0FBL0IsRUFBb0M7O0FBRWxDLFVBQU8sU0FBUCxHQUFtQixHQUFuQjs7QUFFRDs7Ozs7Ozs7OztBQVVELFVBQVMsV0FBVCxDQUFzQixNQUF0QixFQUE4QixHQUE5QixFQUFtQyxJQUFuQyxFQUF5QztBQUN2QyxRQUFLLElBQUksSUFBSSxDQUFSLEVBQVcsSUFBSSxLQUFLLE1BQXpCLEVBQWlDLElBQUksQ0FBckMsRUFBd0MsR0FBeEMsRUFBNkM7QUFDM0MsU0FBTSxNQUFNLEtBQUssQ0FBTCxDQUFaO0FBQ0Esb0JBQUksTUFBSixFQUFZLEdBQVosRUFBaUIsSUFBSSxHQUFKLENBQWpCO0FBQ0Q7QUFDRjs7Ozs7Ozs7Ozs7OztBQWFNLFVBQVMsT0FBVCxDQUFrQixLQUFsQixFQUF5QixFQUF6QixFQUE2QjtBQUNsQyxPQUFJLENBQUMsb0JBQVMsS0FBVCxDQUFMLEVBQXNCO0FBQ3BCO0FBQ0Q7QUFDRCxPQUFJLFdBQUo7QUFDQSxPQUFJLGtCQUFPLEtBQVAsRUFBYyxRQUFkLEtBQTJCLE1BQU0sTUFBTixZQUF3QixRQUF2RCxFQUFpRTtBQUMvRCxVQUFLLE1BQU0sTUFBWDtBQUNELElBRkQsTUFFTyxJQUNMLENBQUMsbUJBQVEsS0FBUixLQUFrQix5QkFBYyxLQUFkLENBQW5CLEtBQ0EsT0FBTyxZQUFQLENBQW9CLEtBQXBCLENBREEsSUFFQSxDQUFDLE1BQU0sTUFIRixFQUlMO0FBQ0EsVUFBSyxJQUFJLFFBQUosQ0FBYSxLQUFiLENBQUw7QUFDRDtBQUNELE9BQUksTUFBTSxFQUFWLEVBQWM7QUFDWixRQUFHLEtBQUgsQ0FBUyxFQUFUO0FBQ0Q7QUFDRCxVQUFPLEVBQVA7QUFDRDs7Ozs7Ozs7OztBQVVNLFVBQVMsY0FBVCxDQUF5QixHQUF6QixFQUE4QixHQUE5QixFQUFtQyxHQUFuQyxFQUF3QztBQUM3QyxPQUFNLE1BQU0sbUJBQVo7O0FBRUEsT0FBTSxXQUFXLE9BQU8sd0JBQVAsQ0FBZ0MsR0FBaEMsRUFBcUMsR0FBckMsQ0FBakI7QUFDQSxPQUFJLFlBQVksU0FBUyxZQUFULEtBQTBCLEtBQTFDLEVBQWlEO0FBQy9DO0FBQ0Q7OztBQUdELE9BQU0sU0FBUyxZQUFZLFNBQVMsR0FBcEM7QUFDQSxPQUFNLFNBQVMsWUFBWSxTQUFTLEdBQXBDOztBQUVBLE9BQUksVUFBVSxRQUFRLEdBQVIsQ0FBZDtBQUNBLFVBQU8sY0FBUCxDQUFzQixHQUF0QixFQUEyQixHQUEzQixFQUFnQztBQUM5QixpQkFBWSxJQURrQjtBQUU5QixtQkFBYyxJQUZnQjtBQUc5QixVQUFLLFNBQVMsY0FBVCxHQUEyQjtBQUM5QixXQUFNLFFBQVEsU0FBUyxPQUFPLElBQVAsQ0FBWSxHQUFaLENBQVQsR0FBNEIsR0FBMUM7QUFDQSxXQUFJLGNBQUksTUFBUixFQUFnQjtBQUNkLGFBQUksTUFBSjtBQUNBLGFBQUksT0FBSixFQUFhO0FBQ1gsbUJBQVEsR0FBUixDQUFZLE1BQVo7QUFDRDtBQUNELGFBQUksbUJBQVEsS0FBUixDQUFKLEVBQW9CO0FBQ2xCLGdCQUFLLElBQUksQ0FBSixFQUFPLElBQUksQ0FBWCxFQUFjLElBQUksTUFBTSxNQUE3QixFQUFxQyxJQUFJLENBQXpDLEVBQTRDLEdBQTVDLEVBQWlEO0FBQy9DLGlCQUFJLE1BQU0sQ0FBTixDQUFKO0FBQ0Esa0JBQUssRUFBRSxNQUFQLElBQWlCLEVBQUUsTUFBRixDQUFTLEdBQVQsQ0FBYSxNQUFiLEVBQWpCO0FBQ0Q7QUFDRjtBQUNGO0FBQ0QsY0FBTyxLQUFQO0FBQ0QsTUFsQjZCO0FBbUI5QixVQUFLLFNBQVMsY0FBVCxDQUF5QixNQUF6QixFQUFpQztBQUNwQyxXQUFNLFFBQVEsU0FBUyxPQUFPLElBQVAsQ0FBWSxHQUFaLENBQVQsR0FBNEIsR0FBMUM7QUFDQSxXQUFJLFdBQVcsS0FBZixFQUFzQjtBQUNwQjtBQUNEO0FBQ0QsV0FBSSxNQUFKLEVBQVk7QUFDVixnQkFBTyxJQUFQLENBQVksR0FBWixFQUFpQixNQUFqQjtBQUNELFFBRkQsTUFFTztBQUNMLGVBQU0sTUFBTjtBQUNEO0FBQ0QsaUJBQVUsUUFBUSxNQUFSLENBQVY7QUFDQSxXQUFJLE1BQUo7QUFDRDtBQS9CNkIsSUFBaEM7QUFpQ0Q7Ozs7Ozs7Ozs7Ozs7QUFhTSxVQUFTLEdBQVQsQ0FBYyxHQUFkLEVBQW1CLEdBQW5CLEVBQXdCLEdBQXhCLEVBQTZCO0FBQ2xDLE9BQUksbUJBQVEsR0FBUixDQUFKLEVBQWtCO0FBQ2hCLFlBQU8sSUFBSSxNQUFKLENBQVcsR0FBWCxFQUFnQixDQUFoQixFQUFtQixHQUFuQixDQUFQO0FBQ0Q7QUFDRCxPQUFJLGtCQUFPLEdBQVAsRUFBWSxHQUFaLENBQUosRUFBc0I7QUFDcEIsU0FBSSxHQUFKLElBQVcsR0FBWDtBQUNBO0FBQ0Q7QUFDRCxPQUFJLElBQUksTUFBUixFQUFnQjtBQUNkLFNBQUksSUFBSSxLQUFSLEVBQWUsR0FBZixFQUFvQixHQUFwQjtBQUNBO0FBQ0Q7QUFDRCxPQUFNLEtBQUssSUFBSSxNQUFmO0FBQ0EsT0FBSSxDQUFDLEVBQUwsRUFBUztBQUNQLFNBQUksR0FBSixJQUFXLEdBQVg7QUFDQTtBQUNEO0FBQ0QsTUFBRyxPQUFILENBQVcsR0FBWCxFQUFnQixHQUFoQjtBQUNBLE1BQUcsR0FBSCxDQUFPLE1BQVA7QUFDQSxPQUFJLEdBQUcsR0FBUCxFQUFZO0FBQ1YsU0FBSSxJQUFJLEdBQUcsR0FBSCxDQUFPLE1BQWY7QUFDQSxZQUFPLEdBQVAsRUFBWTtBQUNWLFdBQU0sS0FBSyxHQUFHLEdBQUgsQ0FBTyxDQUFQLENBQVg7QUFDQSxhQUFNLEVBQU4sRUFBVSxHQUFWO0FBQ0EsVUFBRyxZQUFIO0FBQ0Q7QUFDRjtBQUNELFVBQU8sR0FBUDtBQUNEOzs7Ozs7Ozs7QUFTTSxVQUFTLEdBQVQsQ0FBYyxHQUFkLEVBQW1CLEdBQW5CLEVBQXdCO0FBQzdCLE9BQUksQ0FBQyxrQkFBTyxHQUFQLEVBQVksR0FBWixDQUFMLEVBQXVCO0FBQ3JCO0FBQ0Q7QUFDRCxVQUFPLElBQUksR0FBSixDQUFQO0FBQ0EsT0FBTSxLQUFLLElBQUksTUFBZjs7QUFFQSxPQUFJLENBQUMsRUFBTCxFQUFTO0FBQ1AsU0FBSSxJQUFJLE1BQVIsRUFBZ0I7QUFDZCxjQUFPLElBQUksS0FBSixDQUFVLEdBQVYsQ0FBUDtBQUNBLFdBQUksWUFBSjtBQUNEO0FBQ0Q7QUFDRDtBQUNELE1BQUcsR0FBSCxDQUFPLE1BQVA7QUFDQSxPQUFJLEdBQUcsR0FBUCxFQUFZO0FBQ1YsU0FBSSxJQUFJLEdBQUcsR0FBSCxDQUFPLE1BQWY7QUFDQSxZQUFPLEdBQVAsRUFBWTtBQUNWLFdBQU0sS0FBSyxHQUFHLEdBQUgsQ0FBTyxDQUFQLENBQVg7QUFDQSxlQUFRLEVBQVIsRUFBWSxHQUFaO0FBQ0EsVUFBRyxZQUFIO0FBQ0Q7QUFDRjtBQUNGOztBQUVELEtBQU0sWUFBWSxDQUFDLFFBQUQsRUFBVyxRQUFYLEVBQXFCLFFBQXJCLENBQWxCO0FBQ08sVUFBUyxLQUFULENBQWdCLEVBQWhCLEVBQW9CLEdBQXBCLEVBQXlCO0FBQzlCLE9BQUksVUFBVSxPQUFWLENBQWtCLEdBQWxCLElBQXlCLENBQUMsQ0FBMUIsSUFBK0IsQ0FBQyxzQkFBVyxHQUFYLENBQXBDLEVBQXFEO0FBQ25ELFlBQU8sY0FBUCxDQUFzQixFQUF0QixFQUEwQixHQUExQixFQUErQjtBQUM3QixxQkFBYyxJQURlO0FBRTdCLG1CQUFZLElBRmlCO0FBRzdCLFlBQUssU0FBUyxXQUFULEdBQXdCO0FBQzNCLGdCQUFPLEdBQUcsS0FBSCxDQUFTLEdBQVQsQ0FBUDtBQUNELFFBTDRCO0FBTTdCLFlBQUssU0FBUyxXQUFULENBQXNCLEdBQXRCLEVBQTJCO0FBQzlCLFlBQUcsS0FBSCxDQUFTLEdBQVQsSUFBZ0IsR0FBaEI7QUFDRDtBQVI0QixNQUEvQjtBQVVEO0FBQ0Y7O0FBRU0sVUFBUyxPQUFULENBQWtCLEVBQWxCLEVBQXNCLEdBQXRCLEVBQTJCO0FBQ2hDLE9BQUksQ0FBQyxzQkFBVyxHQUFYLENBQUwsRUFBc0I7QUFDcEIsWUFBTyxHQUFHLEdBQUgsQ0FBUDtBQUNEO0FBQ0YsRTs7Ozs7Ozs7Ozs7OztBQzNURDs7QUFFQSxLQUFNLGFBQWEsTUFBTSxTQUF6QixDOztBQUNPLEtBQU0sc0NBQWUsT0FBTyxNQUFQLENBQWMsVUFBZDs7Ozs7O0FBQXJCLEVBTU4sQ0FDQyxNQURELEVBRUMsS0FGRCxFQUdDLE9BSEQsRUFJQyxTQUpELEVBS0MsUUFMRCxFQU1DLE1BTkQsRUFPQyxTQVBELEVBU0EsT0FUQSxDQVNRLFVBQVUsTUFBVixFQUFrQjs7QUFFekIsT0FBTSxXQUFXLFdBQVcsTUFBWCxDQUFqQjtBQUNBLGtCQUFJLFlBQUosRUFBa0IsTUFBbEIsRUFBMEIsU0FBUyxPQUFULEdBQW9COzs7QUFHNUMsU0FBSSxJQUFJLFVBQVUsTUFBbEI7QUFDQSxTQUFNLE9BQU8sSUFBSSxLQUFKLENBQVUsQ0FBVixDQUFiO0FBQ0EsWUFBTyxHQUFQLEVBQVk7QUFDVixZQUFLLENBQUwsSUFBVSxVQUFVLENBQVYsQ0FBVjtBQUNEO0FBQ0QsU0FBTSxTQUFTLFNBQVMsS0FBVCxDQUFlLElBQWYsRUFBcUIsSUFBckIsQ0FBZjtBQUNBLFNBQU0sS0FBSyxLQUFLLE1BQWhCO0FBQ0EsU0FBSSxpQkFBSjtBQUNBLGFBQVEsTUFBUjtBQUNFLFlBQUssTUFBTDtBQUNFLG9CQUFXLElBQVg7QUFDQTtBQUNGLFlBQUssU0FBTDtBQUNFLG9CQUFXLElBQVg7QUFDQTtBQUNGLFlBQUssUUFBTDtBQUNFLG9CQUFXLEtBQUssS0FBTCxDQUFXLENBQVgsQ0FBWDtBQUNBO0FBVEo7QUFXQSxTQUFJLFFBQUosRUFBYyxHQUFHLFlBQUgsQ0FBZ0IsUUFBaEI7O0FBRWQsUUFBRyxHQUFILENBQU8sTUFBUDtBQUNBLFlBQU8sTUFBUDtBQUNELElBMUJEO0FBMkJELEVBdkNBOzs7Ozs7Ozs7OztBQWtERCxnQkFDRSxVQURGLEVBRUUsTUFGRixFQUdFLFNBQVMsSUFBVCxDQUFlLEtBQWYsRUFBc0IsR0FBdEIsRUFBMkI7QUFDekIsT0FBSSxTQUFTLEtBQUssTUFBbEIsRUFBMEI7QUFDeEIsVUFBSyxNQUFMLEdBQWMsUUFBUSxDQUF0QjtBQUNEO0FBQ0QsVUFBTyxLQUFLLE1BQUwsQ0FBWSxLQUFaLEVBQW1CLENBQW5CLEVBQXNCLEdBQXRCLEVBQTJCLENBQTNCLENBQVA7QUFDRCxFQVJIOzs7Ozs7Ozs7QUFrQkEsZ0JBQ0UsVUFERixFQUVFLFNBRkYsRUFHRSxTQUFTLE9BQVQsQ0FBa0IsS0FBbEIsRUFBeUI7O0FBRXZCLE9BQUksQ0FBQyxLQUFLLE1BQVYsRUFBa0I7QUFDbEIsT0FBSSxPQUFPLEtBQVAsS0FBaUIsUUFBckIsRUFBK0I7QUFDN0IsYUFBUSxLQUFLLE9BQUwsQ0FBYSxLQUFiLENBQVI7QUFDRDtBQUNELE9BQUksUUFBUSxDQUFDLENBQWIsRUFBZ0I7QUFDZCxVQUFLLE1BQUwsQ0FBWSxLQUFaLEVBQW1CLENBQW5CO0FBQ0Q7QUFDRixFQVpILEU7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7OztTQzlDZ0IsTSxHQUFBLE07U0E4QkEsUSxHQUFBLFE7U0FtREEsaUIsR0FBQSxpQjtTQVVBLGdCLEdBQUEsZ0I7U0FXQSxzQixHQUFBLHNCO1NBV0EscUIsR0FBQSxxQjtTQVdBLG9CLEdBQUEsb0I7U0FVQSxpQixHQUFBLGlCO1NBcUJBLGdCLEdBQUEsZ0I7U0FhQSxjLEdBQUEsYztTQTJCQSxhLEdBQUEsYTtTQXNCQSxZLEdBQUEsWTtTQXlCQSx1QixHQUFBLHVCO1NBZ0NBLHVCLEdBQUEsdUI7U0E4REEsZ0IsR0FBQSxnQjtTQWtCQSxXLEdBQUEsVztTQXFIQSxVLEdBQUEsVTtTQWtDQSxXLEdBQUEsVztTQXlCQSxhLEdBQUEsYTs7QUF2aUJoQjs7S0FBWSxDOzs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7OztBQXFCTCxVQUFTLE1BQVQsR0FBbUI7QUFDeEIsT0FBTSxNQUFNLEtBQUssUUFBTCxJQUFpQixFQUE3QjtBQUNBLE9BQU0sV0FBVyxJQUFJLFFBQUosSUFBZ0IsRUFBakM7O0FBRUEsT0FBSSxJQUFJLE9BQVIsRUFBaUI7QUFDZixTQUFJLFNBQVMsUUFBVCxJQUFxQixTQUFTLFFBQVQsQ0FBa0IsTUFBbEIsS0FBNkIsQ0FBdEQsRUFBeUQ7QUFDdkQsWUFBSyxRQUFMLENBQWMsU0FBUyxRQUFULENBQWtCLENBQWxCLENBQWQsRUFBb0MsS0FBSyxTQUF6QztBQUNELE1BRkQsTUFHSztBQUNILFlBQUssUUFBTCxDQUFjLFNBQVMsUUFBdkIsRUFBaUMsS0FBSyxTQUF0QztBQUNEO0FBQ0YsSUFQRCxNQVFLO0FBQ0gsVUFBSyxRQUFMLENBQWMsUUFBZCxFQUF3QixLQUFLLFNBQTdCO0FBQ0Q7O0FBRUQsS0FBRSxLQUFGLDhCQUFtQyxLQUFLLEtBQXhDO0FBQ0EsUUFBSyxLQUFMLENBQVcsWUFBWDtBQUNBLFFBQUssTUFBTCxHQUFjLElBQWQ7QUFDRDs7Ozs7Ozs7Ozs7QUFXTSxVQUFTLFFBQVQsQ0FBbUIsTUFBbkIsRUFBMkIsSUFBM0IsRUFBaUMsSUFBakMsRUFBdUM7QUFDNUMsT0FBTSxNQUFNLEtBQUssSUFBTCxJQUFhLEVBQXpCOztBQUVBLE9BQUksSUFBSSxVQUFKLEtBQW1CLENBQUMsQ0FBeEIsRUFBMkI7QUFDekI7QUFDRDs7QUFFRCxPQUFNLFVBQVUsSUFBaEI7QUFDQSxPQUFJLFFBQVEsaUJBQVIsQ0FBMEIsTUFBMUIsQ0FBSixFQUF1QztBQUNyQyxhQUFRLGdCQUFSLENBQXlCLE1BQXpCLEVBQWlDLElBQWpDLEVBQXVDLElBQXZDO0FBQ0E7QUFDRDtBQUNELFVBQU8sUUFBUSxFQUFmO0FBQ0EsT0FBSSxRQUFRLGdCQUFSLENBQXlCLE1BQXpCLENBQUosRUFBc0M7QUFDcEMsT0FBRSxLQUFGLENBQVEsNEJBQVIsRUFBc0MsTUFBdEM7QUFDQSxhQUFRLFFBQVIsR0FBbUIsUUFBUSxZQUFSLENBQXFCLElBQXJCLENBQW5CO0FBQ0E7QUFDRDs7QUFFRCxPQUFJLFFBQVEsc0JBQVIsQ0FBK0IsTUFBL0IsRUFBdUMsSUFBdkMsQ0FBSixFQUFrRDtBQUNoRCxPQUFFLEtBQUYsQ0FBUSwyQkFBUixFQUFxQyxNQUFyQztBQUNBLGFBQVEsY0FBUixDQUF1QixNQUF2QixFQUErQixJQUEvQjtBQUNBO0FBQ0Q7QUFDRCxPQUFJLFFBQVEscUJBQVIsQ0FBOEIsTUFBOUIsRUFBc0MsSUFBdEMsQ0FBSixFQUFpRDtBQUMvQyxPQUFFLEtBQUYsQ0FBUSx1QkFBUixFQUFpQyxNQUFqQztBQUNBLGFBQVEsYUFBUixDQUFzQixNQUF0QixFQUE4QixJQUE5QixFQUFvQyxJQUFwQztBQUNBO0FBQ0Q7QUFDRCxPQUFNLGFBQWEsS0FBSyxJQUFMLElBQWEsT0FBTyxJQUF2QztBQUNBLE9BQUksUUFBUSxvQkFBUixDQUE2QixVQUE3QixFQUF5QyxJQUF6QyxDQUFKLEVBQW9EO0FBQ2xELGFBQVEsWUFBUixDQUFxQixNQUFyQixFQUE2QixJQUE3QixFQUFtQyxVQUFuQyxFQUErQyxJQUEvQztBQUNBO0FBQ0Q7QUFDRCxPQUFNLE9BQU8sVUFBYjtBQUNBLE9BQU0sWUFBWSxRQUFRLGlCQUFSLENBQTBCLE1BQTFCLEVBQWtDLElBQWxDLENBQWxCO0FBQ0EsT0FBSSxTQUFKLEVBQWU7QUFDYixPQUFFLEtBQUYsQ0FBUSwrQkFBUixFQUF5QyxNQUF6QztBQUNBLGFBQVEsdUJBQVIsQ0FBZ0MsU0FBaEMsRUFBMkMsTUFBM0MsRUFBbUQsSUFBbkQsRUFBeUQsSUFBekQsRUFBK0QsSUFBL0Q7QUFDQTtBQUNEO0FBQ0QsS0FBRSxLQUFGLENBQVEsNkJBQVIsRUFBdUMsTUFBdkM7QUFDQSxXQUFRLHVCQUFSLENBQWdDLE1BQWhDLEVBQXdDLElBQXhDLEVBQThDLElBQTlDO0FBQ0Q7Ozs7Ozs7O0FBUU0sVUFBUyxpQkFBVCxDQUE0QixNQUE1QixFQUFvQztBQUN6QyxVQUFPLE1BQU0sT0FBTixDQUFjLE1BQWQsQ0FBUDtBQUNEOzs7Ozs7OztBQVFNLFVBQVMsZ0JBQVQsQ0FBMkIsTUFBM0IsRUFBbUM7QUFDeEMsVUFBTyxPQUFPLElBQVAsS0FBZ0IsU0FBaEIsSUFBNkIsT0FBTyxJQUFQLEtBQWdCLE1BQXBEO0FBQ0Q7Ozs7Ozs7OztBQVNNLFVBQVMsc0JBQVQsQ0FBaUMsTUFBakMsRUFBeUMsSUFBekMsRUFBK0M7QUFDcEQsVUFBTyxDQUFDLEtBQUssY0FBTCxDQUFvQixRQUFwQixDQUFELElBQWtDLE9BQU8sTUFBaEQ7QUFDRDs7Ozs7Ozs7O0FBU00sVUFBUyxxQkFBVCxDQUFnQyxNQUFoQyxFQUF3QyxJQUF4QyxFQUE4QztBQUNuRCxVQUFPLENBQUMsS0FBSyxjQUFMLENBQW9CLE9BQXBCLENBQUQsSUFBaUMsT0FBTyxLQUEvQztBQUNEOzs7Ozs7Ozs7QUFTTSxVQUFTLG9CQUFULENBQStCLFVBQS9CLEVBQTJDLElBQTNDLEVBQWlEO0FBQ3RELFVBQVEsT0FBTyxVQUFQLEtBQXNCLFVBQXZCLElBQXNDLENBQUMsS0FBSyxjQUFMLENBQW9CLE1BQXBCLENBQTlDO0FBQ0Q7Ozs7Ozs7O0FBUU0sVUFBUyxpQkFBVCxDQUE0QixNQUE1QixFQUFvQyxJQUFwQyxFQUEwQztBQUMvQyxPQUFJLGtCQUFKO0FBQ0EsT0FBSSxLQUFLLElBQUwsSUFBYSxLQUFLLElBQUwsQ0FBVSxrQkFBM0IsRUFBK0M7QUFDN0MsaUJBQVksS0FBSyxJQUFMLENBQVUsa0JBQVYsQ0FBNkIsSUFBN0IsQ0FBWjtBQUNEO0FBQ0QsT0FBSSxLQUFLLFFBQUwsSUFBaUIsS0FBSyxRQUFMLENBQWMsVUFBbkMsRUFBK0M7QUFDN0MsaUJBQVksS0FBSyxRQUFMLENBQWMsVUFBZCxDQUF5QixJQUF6QixDQUFaO0FBQ0Q7QUFDRCxPQUFJLE9BQU8sU0FBWCxFQUFzQjtBQUNwQixpQkFBWSxhQUFhLEVBQXpCO0FBQ0Q7QUFDRCxVQUFPLFNBQVA7QUFDRDs7Ozs7Ozs7O0FBU00sVUFBUyxnQkFBVCxDQUEyQixNQUEzQixFQUFtQyxJQUFuQyxFQUF5QyxJQUF6QyxFQUErQztBQUFBOztBQUNwRCxPQUFNLFlBQVksS0FBSyxZQUFMLENBQWtCLElBQWxCLENBQWxCO0FBQ0EsVUFBTyxPQUFQLENBQWUsVUFBQyxLQUFELEVBQVc7QUFDeEIsV0FBSyxRQUFMLENBQWMsS0FBZCxFQUFxQixTQUFyQixFQUFnQyxJQUFoQztBQUNELElBRkQ7QUFHRDs7Ozs7Ozs7QUFRTSxVQUFTLGNBQVQsQ0FBeUIsTUFBekIsRUFBaUMsSUFBakMsRUFBdUM7QUFDNUMsT0FBTSxTQUFTLE9BQU8sTUFBdEI7QUFDQSxPQUFNLFdBQVcsT0FBTyxNQUFQLEtBQWtCLFVBQW5DO0FBQ0EsT0FBSSxTQUFTLE9BQU8sTUFBUCxJQUFpQixPQUFPLFVBQXhCLElBQXNDLE1BQW5EO0FBQ0EsT0FBSSxPQUFPLE1BQVAsS0FBa0IsVUFBdEIsRUFBa0M7QUFDaEMsY0FBUyxrQkFBWTtBQUFFLGNBQU8sRUFBUDtBQUFXLE1BQWxDO0FBQ0Q7QUFDRCxPQUFNLE1BQU0sT0FBTyxHQUFQLElBQWMsUUFBMUI7QUFDQSxPQUFNLFFBQVEsT0FBTyxLQUFQLElBQWdCLFFBQTlCO0FBQ0EsT0FBTSxVQUFVLE9BQU8sT0FBUCxJQUFrQixPQUFPLE9BQXpCLElBQ2IsT0FBTyxJQUFQLElBQWUsT0FBTyxJQUFQLENBQVksT0FEZCxJQUMwQixHQUQxQzs7QUFHQSxPQUFNLFlBQVksS0FBSyxZQUFMLENBQWtCLElBQWxCLENBQWxCO0FBQ0EsYUFBVSxRQUFWLEdBQXFCLEVBQXJCO0FBQ0EsYUFBVSxJQUFWLEdBQWlCLEVBQWpCO0FBQ0EsYUFBVSxHQUFWLEdBQWdCLEVBQWhCOztBQUVBLFFBQUssV0FBTCxDQUFpQixNQUFqQixFQUF5QixTQUF6QixFQUFvQyxFQUFFLGNBQUYsRUFBVSxRQUFWLEVBQWUsWUFBZixFQUFzQixnQkFBdEIsRUFBK0Isa0JBQS9CLEVBQXBDO0FBQ0Q7Ozs7Ozs7OztBQVNNLFVBQVMsYUFBVCxDQUF3QixNQUF4QixFQUFnQyxJQUFoQyxFQUFzQyxJQUF0QyxFQUE0QztBQUNqRCxPQUFNLFVBQVUsRUFBRSxPQUFPLElBQVQsRUFBaEI7QUFDQSxPQUFNLFlBQVksS0FBSyxZQUFMLENBQWtCLElBQWxCLENBQWxCOztBQUVBLE9BQUksS0FBSyxPQUFMLElBQWdCLEtBQUssUUFBekIsRUFBbUM7QUFDakMsVUFBSyxRQUFMLENBQWMsSUFBZCxDQUFtQixTQUFuQjtBQUNEOztBQUVELE9BQUksS0FBSyxNQUFULEVBQWlCO0FBQ2YsYUFBUSxNQUFSLEdBQWlCLEtBQUssTUFBdEI7QUFDRDs7QUFFRCxRQUFLLFVBQUwsQ0FBZ0IsTUFBaEIsRUFBd0IsU0FBeEIsRUFBbUMsT0FBbkM7QUFDRDs7Ozs7Ozs7O0FBU00sVUFBUyxZQUFULENBQXVCLE1BQXZCLEVBQStCLElBQS9CLEVBQXFDLFVBQXJDLEVBQWlELElBQWpELEVBQXVEO0FBQUE7O0FBQzVELE9BQU0sT0FBTyxXQUFXLElBQVgsQ0FBZ0IsSUFBaEIsQ0FBYjtBQUNBLE9BQU0sVUFBVSxPQUFPLE1BQVAsQ0FBYyxFQUFFLFVBQUYsRUFBZCxFQUF3QixJQUF4QixDQUFoQjtBQUNBLE9BQU0sWUFBWSxLQUFLLFlBQUwsQ0FBa0IsSUFBbEIsQ0FBbEI7O0FBRUEsT0FBSSxLQUFLLE9BQUwsSUFBZ0IsS0FBSyxRQUF6QixFQUFtQztBQUNqQyxVQUFLLFFBQUwsQ0FBYyxJQUFkLENBQW1CLFNBQW5CO0FBQ0Q7O0FBRUQsUUFBSyxNQUFMLENBQVksVUFBWixFQUF3QixVQUFDLEtBQUQsRUFBVztBQUNqQyxTQUFNLFVBQVUsT0FBTyxNQUFQLENBQWMsRUFBRSxNQUFNLEtBQVIsRUFBZCxFQUErQixJQUEvQixDQUFoQjtBQUNBLFlBQUssWUFBTCxDQUFrQixTQUFsQixFQUE2QixJQUE3QjtBQUNBLFlBQUssUUFBTCxDQUFjLE1BQWQsRUFBc0IsU0FBdEIsRUFBaUMsT0FBakM7QUFDRCxJQUpEOztBQU1BLFFBQUssUUFBTCxDQUFjLE1BQWQsRUFBc0IsU0FBdEIsRUFBaUMsT0FBakM7QUFDRDs7Ozs7Ozs7O0FBU00sVUFBUyx1QkFBVCxDQUFrQyxTQUFsQyxFQUE2QyxNQUE3QyxFQUFxRCxJQUFyRCxFQUEyRCxJQUEzRCxFQUFpRSxJQUFqRSxFQUF1RTtBQUM1RSxPQUFNLEtBQUssS0FBSyxXQUFoQjtBQUNBLE9BQU0sVUFBVSxJQUFoQjtBQUNBLE9BQU0sUUFBUSxJQUFJLEVBQUosQ0FBTyxJQUFQLEVBQWEsU0FBYixFQUF3QixPQUF4QixFQUFpQyxJQUFqQyxFQUF1QyxTQUF2QyxFQUFrRDtBQUM5RCxrQkFBYSxvQkFBWTtBQUN2QixlQUFRLE1BQVIsQ0FBZSxPQUFPLEVBQXRCLEVBQTBCLElBQTFCLEVBQWdDLElBQWhDOztBQUVBLFlBQUssZ0JBQUwsR0FBd0I7QUFDdEIsaUJBQVEsT0FEYztBQUV0QixtQkFBVTtBQUZZLFFBQXhCO0FBSUQsTUFSNkQ7QUFTOUQscUJBQWdCLHVCQUFZO0FBQzFCLGVBQVEsVUFBUixDQUFtQixJQUFuQixFQUF5QixNQUF6QixFQUFpQyxLQUFLLE1BQXRDO0FBQ0QsTUFYNkQ7QUFZOUQsbUJBQWMscUJBQVk7QUFDeEIsV0FBSSxLQUFLLFFBQVQsRUFBbUI7QUFDakIsaUJBQVEsZ0JBQVIsQ0FBeUIsTUFBekIsRUFBaUMsS0FBSyxRQUF0QztBQUNEO0FBQ0Y7QUFoQjZELElBQWxELENBQWQ7QUFrQkEsUUFBSywwQkFBTCxDQUFnQyxLQUFoQyxFQUF1QyxNQUF2QztBQUNEOzs7Ozs7Ozs7O0FBVU0sVUFBUyx1QkFBVCxDQUFrQyxRQUFsQyxFQUE0QyxJQUE1QyxFQUFrRCxJQUFsRCxFQUF3RDtBQUM3RCxRQUFLLDRCQUFMLENBQWtDLFFBQWxDOztBQUVBLE9BQUksZ0JBQUo7QUFDQSxPQUFJLEtBQUssR0FBTCxLQUFhLGtCQUFqQixFQUFxQzs7QUFFbkMsT0FBRSxLQUFGLENBQVEsNEJBQVIsRUFBc0MsSUFBdEM7QUFDQSxlQUFVLEtBQUssV0FBTCxDQUFpQixJQUFqQixDQUFWO0FBQ0QsSUFKRCxNQUtLO0FBQ0gsT0FBRSxLQUFGLENBQVEsK0JBQVIsRUFBeUMsSUFBekM7QUFDQSxlQUFVLEtBQUssY0FBTCxDQUFvQixJQUFwQixDQUFWO0FBQ0Q7O0FBRUQsT0FBSSxDQUFDLEtBQUssT0FBVixFQUFtQjtBQUNqQixVQUFLLE9BQUwsR0FBZSxPQUFmOztBQUVBLFNBQU0sVUFBVSxLQUFLLGdCQUFMLElBQXlCLEVBQXpDO0FBQ0EsU0FBTSxTQUFTLFFBQVEsUUFBdkI7QUFDQSxTQUFNLEtBQUssUUFBUSxNQUFuQjtBQUNBLFNBQUksVUFBVSxPQUFPLE1BQWpCLElBQTJCLEVBQTNCLElBQWlDLE9BQXJDLEVBQThDO0FBQzVDLFlBQUssSUFBTSxLQUFYLElBQW1CLE9BQU8sTUFBMUIsRUFBa0M7QUFDaEMsYUFBTSxVQUFVLEdBQUcsT0FBTyxNQUFQLENBQWMsS0FBZCxDQUFILENBQWhCO0FBQ0EsYUFBSSxPQUFKLEVBQWE7QUFDWCxtQkFBUSxRQUFSLENBQWlCLEtBQWpCLEVBQXVCLEVBQUUsSUFBRixDQUFPLE9BQVAsRUFBZ0IsRUFBaEIsQ0FBdkI7QUFDRDtBQUNGO0FBQ0Y7QUFDRjs7QUFFRCxRQUFLLFlBQUwsQ0FBa0IsT0FBbEIsRUFBMkIsUUFBM0I7O0FBRUEsT0FBSSxTQUFTLElBQVQsSUFBaUIsU0FBUyxJQUFULENBQWMsTUFBbkMsRUFBMkM7O0FBQ3pDLGNBQVMsTUFBVCxHQUFrQixTQUFTLElBQVQsQ0FBYyxNQUFoQztBQUNEOztBQUVELE9BQUksU0FBUyxNQUFiLEVBQXFCOztBQUNuQixhQUFRLElBQVIsR0FBZSxRQUFRLElBQVIsSUFBZ0IsRUFBL0I7QUFDQSxhQUFRLElBQVIsQ0FBYSxNQUFiLEdBQXNCLFNBQVMsTUFBL0I7QUFDRDs7QUFFRCxPQUFNLFdBQVcsU0FBUyxNQUFULEtBQW9CLE1BQXJDO0FBQ0EsT0FBTSxNQUFNLEtBQUssSUFBTCxJQUFhLEVBQXpCO0FBQ0EsT0FBSSxJQUFJLFVBQUosS0FBbUIsQ0FBQyxDQUFwQixJQUF5QixDQUFDLFFBQTlCLEVBQXdDO0FBQ3RDLE9BQUUsS0FBRixDQUFRLG1DQUFSLEVBQTZDLE9BQTdDO0FBQ0EsU0FBSSxVQUFKLEdBQWlCLEtBQUssYUFBTCxDQUFtQixPQUFuQixFQUE0QixJQUE1QixDQUFqQjtBQUNEO0FBQ0QsT0FBSSxJQUFJLFVBQUosS0FBbUIsQ0FBQyxDQUF4QixFQUEyQjtBQUN6QixVQUFLLGdCQUFMLENBQXNCLFFBQXRCLEVBQWdDLE9BQWhDO0FBQ0Q7QUFDRCxPQUFJLElBQUksVUFBSixLQUFtQixDQUFDLENBQXBCLElBQXlCLFFBQTdCLEVBQXVDO0FBQ3JDLE9BQUUsS0FBRixDQUFRLGtDQUFSLEVBQTRDLE9BQTVDO0FBQ0EsU0FBSSxVQUFKLEdBQWlCLEtBQUssYUFBTCxDQUFtQixPQUFuQixFQUE0QixJQUE1QixDQUFqQjtBQUNEO0FBQ0Y7Ozs7Ozs7O0FBUU0sVUFBUyxnQkFBVCxDQUEyQixRQUEzQixFQUFxQyxJQUFyQyxFQUEyQztBQUFBOztBQUNoRCxPQUFNLE1BQU0sS0FBSyxJQUFMLElBQWEsRUFBekI7QUFDQSxPQUFNLFdBQVcsU0FBUyxRQUExQjtBQUNBLE9BQUksWUFBWSxTQUFTLE1BQXpCLEVBQWlDO0FBQy9CLGNBQVMsS0FBVCxDQUFlLFVBQUMsS0FBRCxFQUFXO0FBQ3hCLGNBQUssUUFBTCxDQUFjLEtBQWQsRUFBcUIsSUFBckI7QUFDQSxjQUFPLElBQUksVUFBSixLQUFtQixDQUFDLENBQTNCO0FBQ0QsTUFIRDtBQUlEO0FBQ0Y7Ozs7Ozs7OztBQVNNLFVBQVMsV0FBVCxDQUFzQixNQUF0QixFQUE4QixTQUE5QixFQUF5QyxJQUF6QyxFQUErQztBQUFBOztBQUNwRCxPQUFNLE1BQU0sVUFBVSxHQUF0QjtBQUNBLE9BQU0sV0FBVyxVQUFVLFFBQTNCO0FBRm9ELE9BRzVDLE1BSDRDLEdBR2QsSUFIYyxDQUc1QyxNQUg0QztBQUFBLE9BR3BDLE9BSG9DLEdBR2QsSUFIYyxDQUdwQyxPQUhvQztBQUFBLE9BRzNCLFFBSDJCLEdBR2QsSUFIYyxDQUczQixRQUgyQjs7QUFJcEQsT0FBTSxVQUFVLEtBQUssR0FBckI7QUFDQSxPQUFNLFlBQVksS0FBSyxLQUF2Qjs7QUFFQSxZQUFTLFdBQVQsQ0FBc0IsSUFBdEIsRUFBNEIsS0FBNUIsRUFBbUMsT0FBbkMsRUFBNEM7QUFDMUMsU0FBSSxtQkFBSjtBQUNBLFNBQUksUUFBSixFQUFjO0FBQ1osb0JBQWEsSUFBYjtBQUNBLFdBQUksUUFBTyxJQUFQLHlDQUFPLElBQVAsT0FBZ0IsUUFBcEIsRUFBOEI7QUFDNUIsb0JBQVcsT0FBWCxJQUFzQixLQUF0QjtBQUNBLGFBQUksQ0FBQyxXQUFXLGNBQVgsQ0FBMEIsT0FBMUIsQ0FBTCxFQUF5QztBQUN2QyxrQkFBTyxjQUFQLENBQXNCLFVBQXRCLEVBQWtDLE9BQWxDLEVBQTJDO0FBQ3pDLG9CQUFPLGlCQUFNO0FBQ1gsaUJBQUUsSUFBRixDQUFPLHFDQUNMLDhCQURGO0FBRUQ7QUFKd0MsWUFBM0M7QUFNRDtBQUNGO0FBQ0YsTUFiRCxNQWNLO0FBQ0gsb0JBQWEsRUFBYjtBQUNBLGtCQUFXLE9BQVgsSUFBc0IsS0FBdEI7QUFDQSxrQkFBVyxTQUFYLElBQXdCLElBQXhCO0FBQ0Q7QUFDRCxlQUFVLFFBQVEsYUFBUixDQUFzQixVQUF0QixDQUFWO0FBQ0EsU0FBSSxJQUFKLENBQVMsT0FBVDtBQUNBLGFBQVEsUUFBUixDQUFpQixNQUFqQixFQUF5QixTQUF6QixFQUFvQyxFQUFFLFFBQVEsSUFBVixFQUFwQztBQUNEOztBQUVELE9BQU0sT0FBTyxLQUFLLFdBQUwsQ0FBaUIsU0FBakIsRUFBNEIsTUFBNUIsRUFBb0MsUUFBcEMsRUFDWCxVQUFDLElBQUQsRUFBVTtBQUNSLE9BQUUsS0FBRixDQUFRLCtCQUFSLEVBQXlDLElBQXpDOztBQUVBLFNBQUksQ0FBQyxTQUFMLEVBQWdCO0FBQ2Q7QUFDRDs7QUFFRCxTQUFNLGNBQWMsU0FBUyxLQUFULEVBQXBCO0FBQ0EsU0FBTSxTQUFTLElBQUksS0FBSixFQUFmO0FBQ0EsU0FBTSxVQUFVLFVBQVUsSUFBVixDQUFlLEtBQWYsRUFBaEI7O0FBRUEsU0FBTSxXQUFXLEVBQWpCO0FBQ0EsU0FBTSxZQUFZLEVBQWxCO0FBQ0EsVUFBSyxPQUFMLENBQWEsVUFBQyxJQUFELEVBQU8sS0FBUCxFQUFpQjtBQUM1QixXQUFNLE1BQU0sVUFBVSxLQUFLLE9BQUwsQ0FBVixHQUEwQixLQUF0Qzs7QUFFQSxXQUFJLE9BQU8sSUFBUCxJQUFlLFFBQVEsRUFBM0IsRUFBK0I7QUFDN0I7QUFDRDtBQUNELGdCQUFTLEdBQVQsSUFBZ0IsSUFBaEI7QUFDRCxNQVBEOzs7QUFVQSxTQUFNLGFBQWEsRUFBbkI7QUFDQSxhQUFRLE9BQVIsQ0FBZ0IsVUFBQyxJQUFELEVBQU8sS0FBUCxFQUFpQjtBQUMvQixXQUFNLE1BQU0sVUFBVSxLQUFLLE9BQUwsQ0FBVixHQUEwQixLQUF0QztBQUNBLFdBQUksU0FBUyxjQUFULENBQXdCLEdBQXhCLENBQUosRUFBa0M7QUFDaEMsbUJBQVUsR0FBVixJQUFpQjtBQUNmLHFCQURlLEVBQ1QsWUFEUyxFQUNGLFFBREU7QUFFZixtQkFBUSxZQUFZLEtBQVosQ0FGTztBQUdmLGVBQUksT0FBTyxLQUFQO0FBSFcsVUFBakI7QUFLQSxvQkFBVyxJQUFYLENBQWdCLElBQWhCO0FBQ0QsUUFQRCxNQVFLO0FBQ0gsZ0JBQUssYUFBTCxDQUFtQixZQUFZLEtBQVosQ0FBbkI7QUFDRDtBQUNGLE1BYkQ7OztBQWdCQSxjQUFTLE1BQVQsR0FBa0IsQ0FBbEI7QUFDQSxTQUFJLE1BQUosR0FBYSxDQUFiO0FBQ0EsZUFBVSxJQUFWLEdBQWlCLEtBQUssS0FBTCxFQUFqQjtBQUNBLGVBQVUsVUFBVixHQUF1QixVQUFVLEtBQWpDOztBQUVBLFVBQUssT0FBTCxDQUFhLFVBQUMsSUFBRCxFQUFPLEtBQVAsRUFBaUI7QUFDNUIsV0FBTSxNQUFNLFVBQVUsS0FBSyxPQUFMLENBQVYsR0FBMEIsS0FBdEM7QUFDQSxXQUFNLFNBQVMsVUFBVSxHQUFWLENBQWY7QUFDQSxXQUFJLE1BQUosRUFBWTtBQUNWLGFBQUksT0FBTyxJQUFQLEtBQWdCLFdBQVcsQ0FBWCxDQUFwQixFQUFtQztBQUNqQyxzQkFBVyxLQUFYO0FBQ0QsVUFGRCxNQUdLO0FBQ0gsc0JBQVcsT0FBWCxDQUFtQixPQUFPLElBQTFCO0FBQ0Esa0JBQUssV0FBTCxDQUFpQixPQUFPLE1BQXhCLEVBQWdDLFVBQVUsVUFBMUMsRUFBc0QsSUFBdEQ7QUFDRDtBQUNELGtCQUFTLElBQVQsQ0FBYyxPQUFPLE1BQXJCO0FBQ0EsYUFBSSxJQUFKLENBQVMsT0FBTyxFQUFoQjtBQUNBLGdCQUFPLEVBQVAsQ0FBVSxPQUFWLElBQXFCLEtBQXJCO0FBQ0EsbUJBQVUsVUFBVixHQUF1QixPQUFPLE1BQTlCO0FBQ0QsUUFaRCxNQWFLO0FBQ0gscUJBQVksSUFBWixFQUFrQixLQUFsQjtBQUNEO0FBQ0YsTUFuQkQ7O0FBcUJBLFlBQU8sVUFBVSxVQUFqQjtBQUNELElBcEVVLENBQWI7O0FBdUVBLGFBQVUsSUFBVixHQUFpQixLQUFLLEtBQUwsQ0FBVyxDQUFYLENBQWpCO0FBQ0EsUUFBSyxPQUFMLENBQWEsVUFBQyxJQUFELEVBQU8sS0FBUCxFQUFpQjtBQUM1QixpQkFBWSxJQUFaLEVBQWtCLEtBQWxCO0FBQ0QsSUFGRDtBQUdEOzs7Ozs7Ozs7QUFTTSxVQUFTLFVBQVQsQ0FBcUIsTUFBckIsRUFBNkIsU0FBN0IsRUFBd0MsSUFBeEMsRUFBOEM7QUFBQTs7QUFDbkQsT0FBTSxVQUFVLEtBQUssV0FBTCxDQUFpQixTQUFqQixFQUE0QixPQUFPLEtBQW5DLEVBQTBDLE9BQTFDLEVBQ2QsVUFBQyxPQUFELEVBQWE7QUFDWCxPQUFFLEtBQUYsQ0FBUSwyQkFBUixFQUFxQyxPQUFyQzs7QUFFQSxTQUFJLENBQUMsU0FBRCxJQUFjLENBQUMsQ0FBQyxVQUFVLE9BQVosS0FBd0IsQ0FBQyxDQUFDLE9BQTVDLEVBQXFEO0FBQ25EO0FBQ0Q7QUFDRCxlQUFVLE9BQVYsR0FBb0IsQ0FBQyxDQUFDLE9BQXRCO0FBQ0EsU0FBSSxPQUFKLEVBQWE7QUFDWCxjQUFLLFFBQUwsQ0FBYyxNQUFkLEVBQXNCLFNBQXRCLEVBQWlDLElBQWpDO0FBQ0QsTUFGRCxNQUdLO0FBQ0gsY0FBSyxZQUFMLENBQWtCLFNBQWxCLEVBQTZCLElBQTdCO0FBQ0Q7QUFDRixJQWRhLENBQWhCOztBQWlCQSxhQUFVLE9BQVYsR0FBb0IsQ0FBQyxDQUFDLE9BQXRCO0FBQ0EsT0FBSSxPQUFKLEVBQWE7QUFDWCxVQUFLLFFBQUwsQ0FBYyxNQUFkLEVBQXNCLFNBQXRCLEVBQWlDLElBQWpDO0FBQ0Q7QUFDRjs7Ozs7Ozs7Ozs7O0FBWU0sVUFBUyxXQUFULENBQXNCLFNBQXRCLEVBQWlDLElBQWpDLEVBQXVDLElBQXZDLEVBQTZDLE9BQTdDLEVBQXNEO0FBQzNELE9BQU0sU0FBUyxRQUFRLEtBQUssSUFBYixJQUFxQixLQUFLLElBQUwsQ0FBVSxNQUE5QztBQUNBLE9BQU0sU0FBUyxFQUFmO0FBQ0EsT0FBTSxRQUFRLENBQUMsVUFBVSxPQUFWLENBQWtCLEtBQWxCLElBQTJCLENBQTVCLElBQWlDLENBQS9DOztBQUVBLFVBQU8sS0FBSyxNQUFMLENBQVksSUFBWixFQUFrQixVQUFDLEtBQUQsRUFBVztBQUNsQyxZQUFPLFdBQVAsR0FBcUIsS0FBckI7QUFDQSxTQUFJLFVBQVUsQ0FBQyxPQUFPLFFBQXRCLEVBQWdDO0FBQzlCLGNBQU8sTUFBUCxDQUFjLElBQWQsRUFBb0IsS0FBcEIsRUFBMkIsVUFBVSxPQUFyQyxFQUE4QyxZQUFNO0FBQ2xELGFBQU0sY0FBYyxPQUFPLFdBQTNCO0FBQ0EsaUJBQVEsV0FBUjtBQUNBLGdCQUFPLFFBQVAsR0FBa0IsS0FBbEI7QUFDQSxnQkFBTyxXQUFQLEdBQXFCLFNBQXJCO0FBQ0QsUUFMRDtBQU1EO0FBQ0QsWUFBTyxRQUFQLEdBQWtCLElBQWxCO0FBQ0QsSUFYTSxDQUFQO0FBWUQ7Ozs7Ozs7O0FBUU0sVUFBUyxhQUFULENBQXdCLFVBQXhCLEVBQW9DO0FBQ3pDLE9BQU0sVUFBVSxPQUFPLE1BQVAsQ0FBYyxJQUFkLENBQWhCO0FBQ0EsV0FBUSxLQUFSLEdBQWdCLFVBQWhCO0FBQ0EsV0FBUSxTQUFSO0FBQ0EsV0FBUSxhQUFSO0FBQ0EsV0FBUSxXQUFSLEdBQXNCLElBQXRCO0FBQ0EsVUFBTyxPQUFQO0FBQ0QsRTs7Ozs7Ozs7Ozs7Ozs7Ozs7U0NwaUJlLDRCLEdBQUEsNEI7U0F3QkEsWSxHQUFBLFk7U0FZQSxVLEdBQUEsVTtTQW9CQSwwQixHQUFBLDBCO1NBbUVBLE0sR0FBQSxNO1NBbUNBLFEsR0FBQSxRO1NBc0JBLFMsR0FBQSxTO1NBd0JBLFMsR0FBQSxTO1NBT0EsUyxHQUFBLFM7U0FPQSxXLEdBQUEsVztTQXlCQSxRLEdBQUEsUTtTQXFCQSxRLEdBQUEsUTtTQXNCQSxNLEdBQUEsTTs7QUEvU2hCOztLQUFZLEM7O0FBRVo7Ozs7QUFDQTs7Ozs7Ozs7S0FFUSxrQixvQkFBQSxrQjs7O0FBRVIsS0FBTSxVQUFVO0FBQ2QsU0FBTSxTQURRO0FBRWQsVUFBTyxVQUZPO0FBR2QsVUFBTztBQUhPLEVBQWhCOzs7Ozs7QUFVTyxVQUFTLDRCQUFULENBQXVDLFFBQXZDLEVBQWlEO0FBQUEsT0FDOUMsSUFEOEMsR0FDckMsUUFEcUMsQ0FDOUMsSUFEOEM7O0FBRXRELE9BQU0sVUFBVSxtQkFBbUIsSUFBbkIsQ0FBaEI7O0FBRUEsT0FBSSxRQUFPLE9BQVAseUNBQU8sT0FBUCxPQUFtQixRQUF2QixFQUFpQztBQUMvQixVQUFLLElBQU0sR0FBWCxJQUFrQixPQUFsQixFQUEyQjtBQUN6QixXQUFJLFNBQVMsR0FBVCxLQUFpQixJQUFyQixFQUEyQjtBQUN6QixrQkFBUyxHQUFULElBQWdCLFFBQVEsR0FBUixDQUFoQjtBQUNELFFBRkQsTUFHSyxJQUFJLEVBQUUsS0FBRixDQUFRLFNBQVMsR0FBVCxDQUFSLE1BQTJCLFFBQTNCLElBQ1AsRUFBRSxLQUFGLENBQVEsUUFBUSxHQUFSLENBQVIsTUFBMEIsUUFEdkIsRUFDaUM7QUFDcEMsY0FBSyxJQUFNLE1BQVgsSUFBcUIsUUFBUSxHQUFSLENBQXJCLEVBQW1DO0FBQ2pDLGVBQUksU0FBUyxHQUFULEVBQWMsTUFBZCxLQUF5QixJQUE3QixFQUFtQztBQUNqQyxzQkFBUyxHQUFULEVBQWMsTUFBZCxJQUF3QixRQUFRLEdBQVIsRUFBYSxNQUFiLENBQXhCO0FBQ0Q7QUFDRjtBQUNGO0FBQ0Y7QUFDRjtBQUNGOzs7OztBQUtNLFVBQVMsWUFBVCxDQUF1QixFQUF2QixFQUEyQixRQUEzQixFQUFxQztBQUMxQyxRQUFLLE1BQUwsQ0FBWSxTQUFTLEVBQXJCLEVBQXlCLEVBQXpCLEVBQTZCLElBQTdCO0FBQ0EsUUFBSyxRQUFMLENBQWMsRUFBZCxFQUFrQixTQUFTLElBQTNCO0FBQ0EsUUFBSyxTQUFMLENBQWUsRUFBZixFQUFtQixTQUFTLFNBQTVCO0FBQ0EsUUFBSyxTQUFMLENBQWUsRUFBZixFQUFtQixTQUFTLEtBQTVCO0FBQ0EsUUFBSyxXQUFMLENBQWlCLEVBQWpCLEVBQXFCLFNBQVMsTUFBOUI7QUFDRDs7Ozs7O0FBTU0sVUFBUyxVQUFULENBQXFCLEtBQXJCLEVBQTRCLFFBQTVCLEVBQXNDLFVBQXRDLEVBQWtEO0FBQ3ZELFdBQVEsU0FBUyxFQUFqQjtBQUNBLGNBQVcsWUFBWSxFQUF2Qjs7QUFFQSxPQUFNLFVBQVUsTUFBTSxRQUFOLElBQWtCLEVBQWxDOzs7QUFHQSxPQUFJLFFBQVEsUUFBUSxLQUFwQjs7QUFFQSxPQUFJLE1BQU0sT0FBTixDQUFjLEtBQWQsQ0FBSixFQUEwQjtBQUN4QixhQUFRLE1BQU0sTUFBTixDQUFhLFVBQUMsTUFBRCxFQUFTLEtBQVQsRUFBbUI7QUFDdEMsY0FBTyxLQUFQLElBQWdCLElBQWhCO0FBQ0EsY0FBTyxNQUFQO0FBQ0QsTUFITyxFQUdMLEVBSEssQ0FBUjtBQUlEOztBQUVELGNBQVcsVUFBWCxFQUF1QixLQUF2QixFQUE4QixJQUE5QixFQUFvQyxLQUFwQztBQUNBLGNBQVcsU0FBUyxJQUFwQixFQUEwQixLQUExQixFQUFpQyxJQUFqQyxFQUF1QyxLQUF2QztBQUNEOztBQUVNLFVBQVMsMEJBQVQsQ0FBcUMsS0FBckMsRUFBNEMsUUFBNUMsRUFBc0Q7QUFDM0QsbUJBQWdCLFNBQVMsU0FBekIsRUFBb0MsSUFBcEMsRUFBMEMsS0FBMUM7QUFDQSxjQUFXLFNBQVMsS0FBcEIsRUFBMkIsSUFBM0IsRUFBaUMsS0FBakM7QUFDRDs7QUFFRCxVQUFTLFVBQVQsQ0FBcUIsTUFBckIsRUFBNkIsS0FBN0IsRUFBb0MsRUFBcEMsRUFBd0MsS0FBeEMsRUFBK0M7QUFDN0MsT0FBSSxDQUFDLE1BQUwsRUFBYTtBQUNYO0FBQ0Q7O0FBSDRDLDhCQUlsQyxHQUprQztBQUszQyxTQUFJLENBQUMsS0FBRCxJQUFVLE1BQU0sR0FBTixDQUFkLEVBQTBCO0FBQ3hCLFdBQU0sUUFBUSxPQUFPLEdBQVAsQ0FBZDtBQUNBLFdBQUksT0FBTyxLQUFQLEtBQWlCLFVBQXJCLEVBQWlDO0FBQy9CLGFBQU0sY0FBYyxHQUFHLE1BQUgsQ0FBVSxLQUFWLEVBQWlCLFVBQVUsQ0FBVixFQUFhO0FBQ2hELGlCQUFNLEdBQU4sSUFBYSxDQUFiO0FBQ0QsVUFGbUIsQ0FBcEI7QUFHQSxlQUFNLEdBQU4sSUFBYSxXQUFiO0FBQ0QsUUFMRCxNQU1LO0FBQ0gsZUFBTSxHQUFOLElBQWEsS0FBYjtBQUNEO0FBQ0Y7QUFoQjBDOztBQUk3QyxRQUFLLElBQU0sR0FBWCxJQUFrQixNQUFsQixFQUEwQjtBQUFBLFdBQWYsR0FBZTtBQWF6QjtBQUNGOztBQUVELFVBQVMsVUFBVCxDQUFxQixNQUFyQixFQUE2QixFQUE3QixFQUFpQyxLQUFqQyxFQUF3QztBQUFBLGdDQUMzQixHQUQyQjtBQUVwQyxTQUFNLFFBQVEsT0FBTyxHQUFQLENBQWQ7QUFDQSxTQUFJLE9BQU8sS0FBUCxLQUFpQixVQUFyQixFQUFpQztBQUMvQixXQUFNLGNBQWMsR0FBRyxNQUFILENBQVUsS0FBVixFQUFpQixVQUFVLENBQVYsRUFBYTtBQUNoRCxhQUFJLE1BQU0sT0FBVixFQUFtQjtBQUNqQixpQkFBTSxPQUFOLENBQWMsUUFBZCxDQUF1QixHQUF2QixFQUE0QixDQUE1QjtBQUNEO0FBQ0YsUUFKbUIsQ0FBcEI7QUFLQSxhQUFNLE9BQU4sQ0FBYyxRQUFkLENBQXVCLEdBQXZCLEVBQTRCLFdBQTVCO0FBQ0QsTUFQRCxNQVFLO0FBQ0gsV0FBSSxNQUFNLE9BQVYsRUFBbUI7QUFDakIsZUFBTSxPQUFOLENBQWMsUUFBZCxDQUF1QixHQUF2QixFQUE0QixLQUE1QjtBQUNEO0FBQ0Y7QUFmbUM7O0FBQ3RDLFFBQUssSUFBTSxHQUFYLElBQWtCLE1BQWxCLEVBQTBCO0FBQUEsWUFBZixHQUFlO0FBZXpCO0FBQ0Y7O0FBRUQsVUFBUyxlQUFULENBQTBCLE1BQTFCLEVBQWtDLEVBQWxDLEVBQXNDLEtBQXRDLEVBQTZDO0FBQzNDLE9BQU0sTUFBTSxHQUFHLFFBQUgsSUFBZSxHQUFHLFFBQUgsQ0FBWSxLQUEzQixJQUFvQyxFQUFoRDs7O0FBR0EsT0FBSSxDQUFDLE1BQU0sT0FBWCxFQUFvQjtBQUNsQjtBQUNEOztBQUVELE9BQUksT0FBTyxNQUFQLEtBQWtCLFVBQXRCLEVBQWtDO0FBQ2hDLFNBQU0sU0FBUSxHQUFHLE1BQUgsQ0FBVSxNQUFWLEVBQWtCLGFBQUs7QUFDbkMscUJBQWMsTUFBTSxPQUFwQixFQUE2QixHQUE3QixFQUFrQyxDQUFsQztBQUNELE1BRmEsQ0FBZDtBQUdBLG1CQUFjLE1BQU0sT0FBcEIsRUFBNkIsR0FBN0IsRUFBa0MsTUFBbEM7QUFDRCxJQUxELE1BTUssSUFBSSxVQUFVLElBQWQsRUFBb0I7QUFDdkIsbUJBQWMsTUFBTSxPQUFwQixFQUE2QixHQUE3QixFQUFrQyxNQUFsQztBQUNEO0FBQ0Y7Ozs7OztBQU1NLFVBQVMsTUFBVCxDQUFpQixFQUFqQixFQUFxQixFQUFyQixFQUF5QixFQUF6QixFQUE2QjtBQUFBOztBQUNsQyxPQUFNLE1BQU0sT0FBTyxNQUFQLENBQWMsSUFBZCxDQUFaOztBQUVBLFVBQU8sZ0JBQVAsQ0FBd0IsR0FBeEIsRUFBNkI7QUFDM0IsU0FBSTtBQUNGLGNBQU8sRUFETDtBQUVGLGlCQUFVLEtBRlI7QUFHRixxQkFBYztBQUhaLE1BRHVCO0FBTTNCLFNBQUk7QUFDRixZQUFLO0FBQUEsZ0JBQU0sTUFBTSxHQUFHLE9BQWY7QUFBQSxRQURIO0FBRUYscUJBQWM7QUFGWjtBQU51QixJQUE3Qjs7QUFZQSxPQUFJLE9BQU8sRUFBUCxLQUFjLFVBQWxCLEVBQThCO0FBQzVCLFNBQU0sVUFBVSxFQUFoQjtBQUNBLFVBQUssUUFBUSxJQUFSLENBQWEsSUFBYixDQUFMO0FBQ0EsU0FBSSxFQUFKLEVBQVE7QUFDTixZQUFLLElBQUwsQ0FBVSxFQUFWLElBQWdCLEdBQWhCO0FBQ0Q7QUFDRCxVQUFLLE1BQUwsQ0FBWSxPQUFaLEVBQXFCLFVBQUMsS0FBRCxFQUFXO0FBQzlCLFdBQUksS0FBSixFQUFXO0FBQ1QsZUFBSyxJQUFMLENBQVUsS0FBVixJQUFtQixHQUFuQjtBQUNEO0FBQ0YsTUFKRDtBQUtELElBWEQsTUFZSyxJQUFJLE1BQU0sT0FBTyxFQUFQLEtBQWMsUUFBeEIsRUFBa0M7QUFDckMsVUFBSyxJQUFMLENBQVUsRUFBVixJQUFnQixHQUFoQjtBQUNEO0FBQ0Y7Ozs7O0FBS00sVUFBUyxRQUFULENBQW1CLEVBQW5CLEVBQXVCLElBQXZCLEVBQTZCO0FBQ2xDLFFBQUssUUFBTCxDQUFjLEVBQWQsRUFBa0IsTUFBbEIsRUFBMEIsSUFBMUI7QUFDRDs7QUFFRCxVQUFTLGFBQVQsQ0FBd0IsRUFBeEIsRUFBNEIsR0FBNUIsRUFBaUMsU0FBakMsRUFBNEM7QUFDMUMsT0FBTSxhQUFhLEVBQW5CO0FBQ0EsT0FBTSxTQUFTLFVBQVUsTUFBekI7O0FBRUEsUUFBSyxJQUFJLElBQUksQ0FBYixFQUFnQixJQUFJLE1BQXBCLEVBQTRCLEdBQTVCLEVBQWlDO0FBQy9CLFNBQU0sUUFBUSxJQUFJLFVBQVUsQ0FBVixDQUFKLENBQWQ7QUFDQSxTQUFJLEtBQUosRUFBVztBQUNULFlBQUssSUFBTSxHQUFYLElBQWtCLEtBQWxCLEVBQXlCO0FBQ3ZCLG9CQUFXLEdBQVgsSUFBa0IsTUFBTSxHQUFOLENBQWxCO0FBQ0Q7QUFDRjtBQUNGO0FBQ0QsTUFBRyxhQUFILENBQWlCLFVBQWpCO0FBQ0Q7Ozs7O0FBS00sVUFBUyxTQUFULENBQW9CLEVBQXBCLEVBQXdCLFNBQXhCLEVBQW1DO0FBQ3hDLE9BQUksT0FBTyxTQUFQLEtBQXFCLFVBQXJCLElBQW1DLENBQUMsTUFBTSxPQUFOLENBQWMsU0FBZCxDQUF4QyxFQUFrRTtBQUNoRTtBQUNEO0FBQ0QsT0FBSSxNQUFNLE9BQU4sQ0FBYyxTQUFkLEtBQTRCLENBQUMsVUFBVSxNQUEzQyxFQUFtRDtBQUNqRCxRQUFHLGFBQUgsQ0FBaUIsRUFBakI7QUFDQTtBQUNEOztBQUVELE9BQU0sUUFBUSxLQUFLLFFBQUwsSUFBaUIsS0FBSyxRQUFMLENBQWMsS0FBL0IsSUFBd0MsRUFBdEQ7QUFDQSxPQUFJLE9BQU8sU0FBUCxLQUFxQixVQUF6QixFQUFxQztBQUNuQyxTQUFNLFVBQVEsS0FBSyxNQUFMLENBQVksU0FBWixFQUF1QixhQUFLO0FBQ3hDLHFCQUFjLEVBQWQsRUFBa0IsS0FBbEIsRUFBeUIsQ0FBekI7QUFDRCxNQUZhLENBQWQ7QUFHQSxtQkFBYyxFQUFkLEVBQWtCLEtBQWxCLEVBQXlCLE9BQXpCO0FBQ0QsSUFMRCxNQU1LO0FBQ0gsbUJBQWMsRUFBZCxFQUFrQixLQUFsQixFQUF5QixTQUF6QjtBQUNEO0FBQ0Y7Ozs7O0FBS00sVUFBUyxTQUFULENBQW9CLEVBQXBCLEVBQXdCLEtBQXhCLEVBQStCO0FBQ3BDLFFBQUssUUFBTCxDQUFjLEVBQWQsRUFBa0IsT0FBbEIsRUFBMkIsS0FBM0I7QUFDRDs7Ozs7QUFLTSxVQUFTLFNBQVQsQ0FBb0IsRUFBcEIsRUFBd0IsSUFBeEIsRUFBOEIsT0FBOUIsRUFBdUM7QUFDNUMsTUFBRyxRQUFILENBQVksSUFBWixFQUFrQixFQUFFLElBQUYsQ0FBTyxPQUFQLEVBQWdCLElBQWhCLENBQWxCO0FBQ0Q7Ozs7O0FBS00sVUFBUyxXQUFULENBQXNCLEVBQXRCLEVBQTBCLE1BQTFCLEVBQWtDO0FBQ3ZDLE9BQUksQ0FBQyxNQUFMLEVBQWE7QUFDWDtBQUNEO0FBQ0QsT0FBTSxPQUFPLE9BQU8sSUFBUCxDQUFZLE1BQVosQ0FBYjtBQUNBLE9BQUksSUFBSSxLQUFLLE1BQWI7QUFDQSxVQUFPLEdBQVAsRUFBWTtBQUNWLFNBQU0sTUFBTSxLQUFLLENBQUwsQ0FBWjtBQUNBLFNBQUksVUFBVSxPQUFPLEdBQVAsQ0FBZDtBQUNBLFNBQUksT0FBTyxPQUFQLEtBQW1CLFFBQXZCLEVBQWlDO0FBQy9CLGlCQUFVLEtBQUssT0FBTCxDQUFWOztBQUVBLFdBQUksQ0FBQyxPQUFMLEVBQWM7QUFDWixXQUFFLEtBQUYsa0JBQXVCLE9BQXZCO0FBQ0Q7QUFDRjtBQUNELFVBQUssU0FBTCxDQUFlLEVBQWYsRUFBbUIsR0FBbkIsRUFBd0IsT0FBeEI7QUFDRDtBQUNGOzs7Ozs7O0FBT00sVUFBUyxRQUFULENBQW1CLEVBQW5CLEVBQXVCLElBQXZCLEVBQTZCLElBQTdCLEVBQW1DO0FBQ3hDLE9BQUksQ0FBQyxJQUFMLEVBQVc7QUFDVDtBQUNEO0FBQ0QsT0FBTSxPQUFPLE9BQU8sSUFBUCxDQUFZLElBQVosQ0FBYjtBQUNBLE9BQUksSUFBSSxLQUFLLE1BQWI7QUFDQSxVQUFPLEdBQVAsRUFBWTtBQUNWLFNBQU0sTUFBTSxLQUFLLENBQUwsQ0FBWjtBQUNBLFNBQU0sVUFBUSxLQUFLLEdBQUwsQ0FBZDtBQUNBLFNBQUksT0FBTyxPQUFQLEtBQWlCLFVBQXJCLEVBQWlDO0FBQy9CLFlBQUssUUFBTCxDQUFjLEVBQWQsRUFBa0IsSUFBbEIsRUFBd0IsR0FBeEIsRUFBNkIsT0FBN0I7QUFDRCxNQUZELE1BR0s7QUFDSCxVQUFHLFFBQVEsSUFBUixDQUFILEVBQWtCLEdBQWxCLEVBQXVCLE9BQXZCO0FBQ0Q7QUFDRjtBQUNGOzs7OztBQUtNLFVBQVMsUUFBVCxDQUFtQixFQUFuQixFQUF1QixJQUF2QixFQUE2QixHQUE3QixFQUFrQyxJQUFsQyxFQUF3QztBQUFBOztBQUM3QyxPQUFNLGFBQWEsUUFBUSxJQUFSLENBQW5COztBQUVBLE9BQU0sUUFBUSxLQUFLLE1BQUwsQ0FBWSxJQUFaLEVBQWtCLFVBQUMsS0FBRCxFQUFXO0FBQ3pDLGNBQVMsT0FBVCxHQUFvQjtBQUNsQixVQUFHLFVBQUgsRUFBZSxHQUFmLEVBQW9CLEtBQXBCO0FBQ0Q7QUFDRCxTQUFNLFNBQVMsVUFBUSxPQUFLLElBQWIsSUFBcUIsT0FBSyxJQUFMLENBQVUsTUFBOUM7QUFDQSxTQUFJLE1BQUosRUFBWTtBQUNWLGNBQU8sTUFBUCxDQUFjLFNBQWQsRUFBeUIsR0FBRyxLQUE1QixFQUFtQyxHQUFHLEdBQXRDLEVBQTJDLE9BQTNDO0FBQ0QsTUFGRCxNQUdLO0FBQ0g7QUFDRDtBQUNGLElBWGEsQ0FBZDs7QUFhQSxNQUFHLFVBQUgsRUFBZSxHQUFmLEVBQW9CLEtBQXBCO0FBQ0Q7Ozs7O0FBS00sVUFBUyxNQUFULENBQWlCLElBQWpCLEVBQXVCLFFBQXZCLEVBQWlDO0FBQ3RDLE9BQU0sVUFBVSxzQkFBWSxJQUFaLEVBQWtCLElBQWxCLEVBQXdCLFVBQVUsS0FBVixFQUFpQixRQUFqQixFQUEyQjs7QUFFakUsU0FBSSxRQUFPLEtBQVAseUNBQU8sS0FBUCxPQUFpQixRQUFqQixJQUE2QixVQUFVLFFBQTNDLEVBQXFEO0FBQ25EO0FBQ0Q7QUFDRCxjQUFTLEtBQVQ7QUFDRCxJQU5lLENBQWhCOztBQVFBLFVBQU8sUUFBUSxLQUFmO0FBQ0QsRTs7Ozs7Ozs7Ozs7U0NoVGUsVyxHQUFBLFc7U0FXQSxjLEdBQUEsYztTQVdBLFksR0FBQSxZO1NBc0JBLGlCLEdBQUEsaUI7U0FVQSxlLEdBQUEsZTtTQWNBLGEsR0FBQSxhO1NBc0NBLFcsR0FBQSxXO1NBZUEsWSxHQUFBLFk7U0FhQSxVLEdBQUEsVTtTQTBCQSxhLEdBQUEsYTtTQWVBLGMsR0FBQSxjO1NBZUEsWSxHQUFBLFk7Ozs7Ozs7Ozs7Ozs7OztBQTlMVCxVQUFTLFdBQVQsQ0FBc0IsSUFBdEIsRUFBNEI7QUFDakMsT0FBTSxNQUFNLEtBQUssSUFBTCxDQUFVLEdBQXRCO0FBQ0EsVUFBTyxJQUFJLFVBQUosQ0FBZSxJQUFmLENBQVA7QUFDRDs7Ozs7Ozs7QUFRTSxVQUFTLGNBQVQsQ0FBeUIsSUFBekIsRUFBK0I7QUFDcEMsT0FBTSxNQUFNLEtBQUssSUFBTCxDQUFVLEdBQXRCO0FBQ0EsVUFBTyxJQUFJLGFBQUosQ0FBa0IsSUFBbEIsQ0FBUDtBQUNEOzs7Ozs7OztBQVFNLFVBQVMsWUFBVCxDQUF1QixPQUF2QixFQUFnQztBQUNyQyxPQUFNLFFBQVEsS0FBSyxpQkFBTCxFQUFkO0FBQ0EsT0FBTSxNQUFNLEtBQUssZUFBTCxFQUFaO0FBQ0EsT0FBTSxVQUFVLGdCQUFoQjtBQUNBLE9BQUksUUFBUSxPQUFaLEVBQXFCO0FBQ25CLGFBQVEsT0FBUixDQUFnQixZQUFoQixDQUE2QixLQUE3QixFQUFvQyxRQUFRLEdBQTVDO0FBQ0EsYUFBUSxPQUFSLENBQWdCLFlBQWhCLENBQTZCLEdBQTdCLEVBQWtDLFFBQVEsR0FBMUM7QUFDQSxlQUFVLFFBQVEsT0FBbEI7QUFDRCxJQUpELE1BS0s7QUFDSCxhQUFRLFdBQVIsQ0FBb0IsS0FBcEI7QUFDQSxhQUFRLFdBQVIsQ0FBb0IsR0FBcEI7QUFDRDtBQUNELFVBQU8sRUFBRSxZQUFGLEVBQVMsUUFBVCxFQUFjLGdCQUFkLEVBQXVCLGdCQUF2QixFQUFQO0FBQ0Q7O0FBRUQsS0FBSSxpQkFBaUIsQ0FBckI7Ozs7OztBQU1PLFVBQVMsaUJBQVQsR0FBOEI7QUFDbkMsT0FBTSxNQUFNLEtBQUssSUFBTCxDQUFVLEdBQXRCO0FBQ0EsT0FBTSxTQUFTLElBQUksYUFBSixDQUFrQixPQUFsQixDQUFmO0FBQ0EsVUFBTyxNQUFQO0FBQ0Q7Ozs7OztBQU1NLFVBQVMsZUFBVCxHQUE0QjtBQUNqQyxPQUFNLE1BQU0sS0FBSyxJQUFMLENBQVUsR0FBdEI7QUFDQSxPQUFNLFNBQVMsSUFBSSxhQUFKLENBQWtCLEtBQWxCLENBQWY7QUFDQSxVQUFPLE1BQVA7QUFDRDs7Ozs7Ozs7OztBQVVNLFVBQVMsYUFBVCxDQUF3QixNQUF4QixFQUFnQyxJQUFoQyxFQUFzQztBQUMzQyxPQUFJLEtBQUssT0FBVCxFQUFrQjtBQUNoQixTQUFNLFNBQVMsS0FBSyxHQUFwQjtBQUNBLFNBQU0sUUFBUSxLQUFLLFVBQW5COztBQUVBLFNBQUksS0FBSyxRQUFULEVBQW1CO0FBQ2pCLFlBQUssUUFBTCxDQUFjLElBQWQsQ0FBbUIsTUFBbkI7QUFDRDs7QUFFRCxTQUFJLEtBQUosRUFBVztBQUNULFlBQUssV0FBTCxDQUFpQixNQUFqQixFQUF5QixLQUF6QjtBQUNBLFlBQUssVUFBTCxHQUFrQixPQUFPLE9BQVAsR0FBaUIsT0FBTyxHQUF4QixHQUE4QixNQUFoRDtBQUNELE1BSEQsTUFJSyxJQUFJLE9BQU8sT0FBWCxFQUFvQjtBQUN2QixZQUFLLE9BQUwsQ0FBYSxZQUFiLENBQTBCLE9BQU8sS0FBakMsRUFBd0MsTUFBeEM7QUFDQSxZQUFLLE9BQUwsQ0FBYSxZQUFiLENBQTBCLE9BQU8sR0FBakMsRUFBc0MsTUFBdEM7QUFDRCxNQUhJLE1BSUE7QUFDSCxjQUFPLEtBQUssT0FBTCxDQUFhLFlBQWIsQ0FBMEIsTUFBMUIsRUFBa0MsTUFBbEMsQ0FBUDtBQUNEO0FBQ0YsSUFuQkQsTUFvQks7QUFDSCxTQUFJLE9BQU8sT0FBWCxFQUFvQjtBQUNsQixZQUFLLFdBQUwsQ0FBaUIsT0FBTyxLQUF4QjtBQUNBLFlBQUssV0FBTCxDQUFpQixPQUFPLEdBQXhCO0FBQ0QsTUFIRCxNQUlLO0FBQ0gsY0FBTyxLQUFLLFdBQUwsQ0FBaUIsTUFBakIsQ0FBUDtBQUNEO0FBQ0Y7QUFDRjs7Ozs7Ozs7QUFRTSxVQUFTLFdBQVQsQ0FBc0IsTUFBdEIsRUFBOEIsS0FBOUIsRUFBcUM7QUFDMUMsT0FBSSxPQUFPLE9BQVgsRUFBb0I7QUFDbEIsVUFBSyxVQUFMLENBQWdCLE1BQWhCLEVBQXdCLEtBQXhCO0FBQ0QsSUFGRCxNQUdLO0FBQ0gsVUFBSyxZQUFMLENBQWtCLE1BQWxCLEVBQTBCLEtBQTFCO0FBQ0Q7QUFDRjs7Ozs7Ozs7QUFRTSxVQUFTLFlBQVQsQ0FBdUIsT0FBdkIsRUFBZ0MsS0FBaEMsRUFBdUM7QUFDNUMsT0FBTSxTQUFTLE1BQU0sVUFBckI7QUFDQSxPQUFJLE1BQUosRUFBWTtBQUNWLFlBQU8sV0FBUCxDQUFtQixPQUFuQixFQUE0QixLQUE1QjtBQUNEO0FBQ0Y7Ozs7Ozs7O0FBUU0sVUFBUyxVQUFULENBQXFCLFNBQXJCLEVBQWdDLEtBQWhDLEVBQXVDO0FBQzVDLE9BQU0sU0FBUyxNQUFNLFVBQXJCOztBQUVBLE9BQUksTUFBSixFQUFZO0FBQUE7QUFDVixXQUFJLEtBQUssVUFBVSxLQUFuQjtBQUNBLFdBQU0sUUFBUSxDQUFDLEVBQUQsQ0FBZDs7QUFFQSxjQUFPLE1BQU0sT0FBTyxVQUFVLEdBQTlCLEVBQW1DO0FBQ2pDLGNBQUssR0FBRyxXQUFSO0FBQ0EsZUFBTSxJQUFOLENBQVcsRUFBWDtBQUNEOztBQUVELFdBQUksT0FBTyxLQUFYO0FBQ0EsYUFBTSxPQUFOLENBQWMsVUFBQyxFQUFELEVBQVE7QUFDcEIsZ0JBQU8sV0FBUCxDQUFtQixFQUFuQixFQUF1QixJQUF2QjtBQUNBLGdCQUFPLEVBQVA7QUFDRCxRQUhEO0FBVlU7QUFjWDtBQUNGOzs7Ozs7OztBQVFNLFVBQVMsYUFBVCxDQUF3QixNQUF4QixFQUFnQztBQUNyQyxPQUFJLE9BQU8sT0FBWCxFQUFvQjtBQUNsQixVQUFLLFlBQUwsQ0FBa0IsTUFBbEI7QUFDRCxJQUZELE1BR0s7QUFDSCxVQUFLLGNBQUwsQ0FBb0IsTUFBcEI7QUFDRDtBQUNGOzs7Ozs7OztBQVFNLFVBQVMsY0FBVCxDQUF5QixNQUF6QixFQUFpQztBQUN0QyxPQUFNLFNBQVMsT0FBTyxVQUF0Qjs7QUFFQSxPQUFJLE1BQUosRUFBWTtBQUNWLFlBQU8sV0FBUCxDQUFtQixNQUFuQjtBQUNEO0FBQ0Y7Ozs7Ozs7OztBQVNNLFVBQVMsWUFBVCxDQUF1QixTQUF2QixFQUF5RDtBQUFBOztBQUFBLE9BQXZCLGFBQXVCLHlEQUFQLEtBQU87O0FBQzlELE9BQU0sU0FBUyxFQUFmO0FBQ0EsT0FBSSxLQUFLLFVBQVUsS0FBVixDQUFnQixXQUF6Qjs7QUFFQSxVQUFPLE1BQU0sT0FBTyxVQUFVLEdBQTlCLEVBQW1DO0FBQ2pDLFlBQU8sSUFBUCxDQUFZLEVBQVo7QUFDQSxVQUFLLEdBQUcsV0FBUjtBQUNEOztBQUVELE9BQUksQ0FBQyxhQUFMLEVBQW9CO0FBQ2xCLFVBQUssY0FBTCxDQUFvQixVQUFVLEtBQTlCO0FBQ0Q7QUFDRCxVQUFPLE9BQVAsQ0FBZSxVQUFDLEVBQUQsRUFBUTtBQUNyQixXQUFLLGNBQUwsQ0FBb0IsRUFBcEI7QUFDRCxJQUZEO0FBR0EsT0FBSSxDQUFDLGFBQUwsRUFBb0I7QUFDbEIsVUFBSyxjQUFMLENBQW9CLFVBQVUsR0FBOUI7QUFDRDtBQUNGLEU7Ozs7Ozs7Ozs7O1NDNU1lLEssR0FBQSxLO1NBV0EsUyxHQUFBLFM7U0FTQSxVLEdBQUEsVTtTQVdBLEcsR0FBQSxHO1NBZUEsSSxHQUFBLEk7U0FrQkEsVyxHQUFBLFc7QUFsRmhCLFVBQVMsR0FBVCxDQUFjLElBQWQsRUFBb0IsTUFBcEIsRUFBNEI7QUFDMUIsT0FBSSxrQkFBa0IsR0FBdEIsRUFBMkI7QUFDekIsWUFBTyxNQUFQO0FBQ0Q7O0FBRUQsUUFBSyxTQUFMLEdBQWlCLEtBQUssR0FBTCxFQUFqQjtBQUNBLFFBQUssTUFBTCxHQUFjLE1BQWQ7QUFDQSxRQUFLLElBQUwsR0FBWSxJQUFaOztBQUVBLE9BQUksYUFBYSxLQUFqQjtBQUNBLFFBQUssSUFBTCxHQUFZLFlBQVk7QUFDdEIsa0JBQWEsSUFBYjtBQUNELElBRkQ7QUFHQSxRQUFLLFVBQUwsR0FBa0IsWUFBWTtBQUM1QixZQUFPLFVBQVA7QUFDRCxJQUZEO0FBR0Q7O0FBRU0sVUFBUyxLQUFULENBQWdCLElBQWhCLEVBQXNCLE1BQXRCLEVBQThCO0FBQUE7O0FBQ25DLE9BQU0sU0FBUyxLQUFLLFNBQXBCO0FBQ0EsT0FBTSxjQUFjLE9BQU8sSUFBUCxDQUFwQjtBQUNBLE9BQUksV0FBSixFQUFpQjtBQUFBO0FBQ2YsV0FBTSxNQUFNLElBQUksR0FBSixDQUFRLElBQVIsRUFBYyxNQUFkLENBQVo7QUFDQSxtQkFBWSxPQUFaLENBQW9CLFVBQUMsT0FBRCxFQUFhO0FBQy9CLGlCQUFRLElBQVIsUUFBbUIsR0FBbkI7QUFDRCxRQUZEO0FBRmU7QUFLaEI7QUFDRjs7QUFFTSxVQUFTLFNBQVQsQ0FBb0IsSUFBcEIsRUFBMEIsTUFBMUIsRUFBa0M7QUFDdkMsT0FBTSxNQUFNLElBQUksR0FBSixDQUFRLElBQVIsRUFBYyxNQUFkLENBQVo7QUFDQSxRQUFLLEtBQUwsQ0FBVyxJQUFYLEVBQWlCLEdBQWpCOztBQUVBLE9BQUksQ0FBQyxJQUFJLFVBQUosRUFBRCxJQUFxQixLQUFLLE9BQTFCLElBQXFDLEtBQUssT0FBTCxDQUFhLFNBQXRELEVBQWlFO0FBQy9ELFVBQUssT0FBTCxDQUFhLFNBQWIsQ0FBdUIsSUFBdkIsRUFBNkIsR0FBN0I7QUFDRDtBQUNGOztBQUVNLFVBQVMsVUFBVCxDQUFxQixJQUFyQixFQUEyQixNQUEzQixFQUFtQztBQUN4QyxPQUFNLE1BQU0sSUFBSSxHQUFKLENBQVEsSUFBUixFQUFjLE1BQWQsQ0FBWjtBQUNBLFFBQUssS0FBTCxDQUFXLElBQVgsRUFBaUIsR0FBakI7O0FBRUEsT0FBSSxDQUFDLElBQUksVUFBSixFQUFELElBQXFCLEtBQUssWUFBOUIsRUFBNEM7QUFDMUMsVUFBSyxZQUFMLENBQWtCLE9BQWxCLENBQTBCLFVBQUMsS0FBRCxFQUFXO0FBQ25DLGFBQU0sVUFBTixDQUFpQixJQUFqQixFQUF1QixHQUF2QjtBQUNELE1BRkQ7QUFHRDtBQUNGOztBQUVNLFVBQVMsR0FBVCxDQUFjLElBQWQsRUFBb0IsT0FBcEIsRUFBNkI7QUFDbEMsT0FBSSxDQUFDLElBQUQsSUFBUyxPQUFPLE9BQVAsS0FBbUIsVUFBaEMsRUFBNEM7QUFDMUM7QUFDRDtBQUNELE9BQU0sU0FBUyxLQUFLLFNBQXBCO0FBQ0EsT0FBTSxjQUFjLE9BQU8sSUFBUCxLQUFnQixFQUFwQztBQUNBLGVBQVksSUFBWixDQUFpQixPQUFqQjtBQUNBLFVBQU8sSUFBUCxJQUFlLFdBQWY7OztBQUdBLE9BQUksU0FBUyxZQUFULElBQXlCLEtBQUssTUFBbEMsRUFBMEM7QUFDeEMsVUFBSyxLQUFMLENBQVcsWUFBWDtBQUNEO0FBQ0Y7O0FBRU0sVUFBUyxJQUFULENBQWUsSUFBZixFQUFxQixPQUFyQixFQUE4QjtBQUNuQyxPQUFJLENBQUMsSUFBTCxFQUFXO0FBQ1Q7QUFDRDtBQUNELE9BQU0sU0FBUyxLQUFLLFNBQXBCO0FBQ0EsT0FBSSxDQUFDLE9BQUwsRUFBYztBQUNaLFlBQU8sT0FBTyxJQUFQLENBQVA7QUFDQTtBQUNEO0FBQ0QsT0FBTSxjQUFjLE9BQU8sSUFBUCxDQUFwQjtBQUNBLE9BQUksQ0FBQyxXQUFMLEVBQWtCO0FBQ2hCO0FBQ0Q7QUFDRCxlQUFZLE9BQVosQ0FBb0IsT0FBcEI7QUFDRDs7QUFFRCxLQUFNLG1CQUFtQixDQUFDLE1BQUQsRUFBUyxTQUFULEVBQW9CLE9BQXBCLENBQXpCOztBQUVPLFVBQVMsV0FBVCxDQUFzQixjQUF0QixFQUFzQztBQUFBOztBQUMzQyxPQUFNLFVBQVUsS0FBSyxRQUFMLElBQWlCLEVBQWpDO0FBQ0EsT0FBTSxTQUFTLFFBQVEsTUFBUixJQUFrQixFQUFqQztBQUNBLFFBQUssSUFBTSxLQUFYLElBQW9CLE1BQXBCLEVBQTRCO0FBQzFCLFVBQUssR0FBTCxDQUFTLEtBQVQsRUFBZ0IsT0FBTyxLQUFQLENBQWhCO0FBQ0Q7QUFDRCxRQUFLLElBQU0sS0FBWCxJQUFvQixjQUFwQixFQUFvQztBQUNsQyxVQUFLLEdBQUwsQ0FBUyxLQUFULEVBQWdCLGVBQWUsS0FBZixDQUFoQjtBQUNEO0FBQ0Qsb0JBQWlCLE9BQWpCLENBQXlCLFVBQUMsSUFBRCxFQUFVO0FBQ2pDLFlBQUssR0FBTCxXQUFpQixJQUFqQixFQUF5QixRQUFRLElBQVIsQ0FBekI7QUFDRCxJQUZEO0FBR0QsRTs7Ozs7Ozs7Ozs7U0MxRGUsWSxHQUFBLFk7U0FJQSxTLEdBQUEsUztTQU9BLGEsR0FBQSxhO1NBa0JBLGUsR0FBQSxlO1NBT0EsZSxHQUFBLGU7U0FPQSxnQixHQUFBLGdCO1NBUUEsaUIsR0FBQSxpQjtBQXZGaEIsS0FBSSxnQkFBZ0IsRUFBcEI7O0FBRUEsVUFBUyxhQUFULENBQXdCLE9BQXhCLEVBQWlDLFNBQWpDLEVBQTRDO0FBQUEsOEJBQy9CLFVBRCtCOztBQUd4QyxTQUFJLFVBQVUsY0FBYyxVQUFkLENBQWQ7QUFDQSxTQUFJLENBQUMsT0FBTCxFQUFjO0FBQ1osaUJBQVUsRUFBVjtBQUNBLHFCQUFjLFVBQWQsSUFBNEIsT0FBNUI7QUFDRDs7O0FBR0QsYUFBUSxVQUFSLEVBQW9CLE9BQXBCLENBQTRCLFVBQVUsTUFBVixFQUFrQjtBQUM1QyxXQUFJLE9BQU8sTUFBUCxLQUFrQixRQUF0QixFQUFnQztBQUM5QixrQkFBUztBQUNQLGlCQUFNO0FBREMsVUFBVDtBQUdEOztBQUVELFdBQUksQ0FBQyxRQUFRLE9BQU8sSUFBZixDQUFELElBQXlCLFNBQTdCLEVBQXdDO0FBQ3RDLGlCQUFRLE9BQU8sSUFBZixJQUF1QixNQUF2QjtBQUNEO0FBQ0YsTUFWRDtBQVZ3Qzs7QUFDMUMsUUFBSyxJQUFNLFVBQVgsSUFBeUIsT0FBekIsRUFBa0M7QUFBQSxXQUF2QixVQUF1QjtBQW9CakM7QUFDRjs7QUFFRCxVQUFTLFVBQVQsQ0FBcUIsSUFBckIsRUFBMkIsSUFBM0IsRUFBaUM7QUFDL0IsT0FBTSxJQUFJLEtBQUssU0FBZjs7QUFFQSxRQUFLLElBQU0sT0FBWCxJQUFzQixJQUF0QixFQUE0QjtBQUMxQixTQUFJLENBQUMsRUFBRSxjQUFGLENBQWlCLE9BQWpCLENBQUwsRUFBZ0M7QUFDOUIsU0FBRSxPQUFGLElBQWEsS0FBSyxPQUFMLENBQWI7QUFDRDtBQUNGO0FBQ0Y7O0FBRU0sVUFBUyxZQUFULEdBQXlCO0FBQzlCLG1CQUFnQixFQUFoQjtBQUNEOztBQUVNLFVBQVMsU0FBVCxDQUFvQixVQUFwQixFQUFnQztBQUNyQyxVQUFPLGNBQWMsVUFBZCxDQUFQO0FBQ0Q7Ozs7O0FBS00sVUFBUyxhQUFULENBQXdCLFVBQXhCLEVBQW9DO0FBQUE7O0FBQ3pDLE9BQU0sVUFBVSxjQUFjLFVBQWQsQ0FBaEI7QUFDQSxPQUFNLFNBQVMsRUFBZjs7QUFGeUMsZ0NBSTlCLFVBSjhCO0FBS3ZDLFlBQU8sVUFBUCxJQUFxQjtBQUFBLHlDQUFJLElBQUo7QUFBSSxhQUFKO0FBQUE7O0FBQUEsY0FBYSxNQUFLLFNBQUwsQ0FBZTtBQUMvQyxpQkFBUSxVQUR1QztBQUUvQyxpQkFBUSxVQUZ1QztBQUcvQyxlQUFNO0FBSHlDLFFBQWYsQ0FBYjtBQUFBLE1BQXJCO0FBTHVDOztBQUl6QyxRQUFLLElBQU0sVUFBWCxJQUF5QixPQUF6QixFQUFrQztBQUFBLFlBQXZCLFVBQXVCO0FBTWpDOztBQUVELFVBQU8sTUFBUDtBQUNEOzs7OztBQUtNLFVBQVMsZUFBVCxDQUEwQixPQUExQixFQUFtQyxTQUFuQyxFQUE4QztBQUNuRCxpQkFBYyxPQUFkLEVBQXVCLFNBQXZCO0FBQ0Q7Ozs7O0FBS00sVUFBUyxlQUFULENBQTBCLElBQTFCLEVBQWdDO0FBQ3JDLGNBQVcsSUFBWCxFQUFpQixJQUFqQjtBQUNEOzs7OztBQUtNLFVBQVMsZ0JBQVQsQ0FBMkIsSUFBM0IsRUFBaUM7QUFBQSxPQUM5QixrQkFEOEIsR0FDUCxJQURPLENBQzlCLGtCQUQ4Qjs7QUFFdEMsVUFBTyxtQkFBbUIsSUFBbkIsQ0FBUDtBQUNEOzs7OztBQUtNLFVBQVMsaUJBQVQsQ0FBNEIsSUFBNUIsRUFBa0MsT0FBbEMsRUFBMkM7QUFBQSxPQUN4QyxrQkFEd0MsR0FDakIsSUFEaUIsQ0FDeEMsa0JBRHdDOzs7QUFHaEQsT0FBSSxtQkFBbUIsSUFBbkIsQ0FBSixFQUE4QjtBQUM1QixXQUFNLElBQUksS0FBSix5QkFBZ0MsSUFBaEMsMkJBQU47QUFDRDs7QUFFRCxzQkFBbUIsSUFBbkIsSUFBMkIsT0FBM0I7QUFDRCxFOzs7Ozs7Ozs7OztTQ3ZGZSxnQixHQUFBLGdCO1NBb0JBLFEsR0FBQSxRO1NBNkRBLEssR0FBQSxLOztBQXpGaEI7Ozs7QUFDQTs7Ozs7Ozs7O0FBT08sVUFBUyxnQkFBVCxDQUEyQixDQUEzQixFQUE4QjtBQUNuQyxPQUFNLFVBQVUsaUJBQU8sS0FBUCxDQUFhLENBQWIsQ0FBaEI7QUFDQSxPQUFJLE9BQUosRUFBYTtBQUNYLFlBQU8sQ0FBUDtBQUNEOztBQUVELE9BQUksT0FBUSxDQUFSLEtBQWUsUUFBZixHQUEwQixDQUExQixHQUE4QixFQUFsQztBQUNBLE9BQU0sUUFBUSxFQUFFLEtBQUYsQ0FBUSxHQUFSLENBQWQ7QUFDQSxPQUFJLElBQUksQ0FBUjtBQUNBLE9BQU0sU0FBUyxFQUFmOztBQUVBLFVBQU8sSUFBSSxDQUFYLEVBQWM7QUFDWixTQUFNLElBQUksT0FBUSxNQUFNLENBQU4sQ0FBUixLQUFzQixRQUF0QixJQUFrQyxNQUFNLENBQU4sQ0FBbEMsR0FBNkMsTUFBTSxDQUFOLENBQTdDLEdBQXdELEdBQWxFO0FBQ0EsWUFBTyxJQUFQLENBQVksQ0FBWjtBQUNBO0FBQ0Q7O0FBRUQsVUFBTyxPQUFPLElBQVAsQ0FBWSxHQUFaLENBQVA7QUFDRDs7QUFFTSxVQUFTLFFBQVQsQ0FBbUIsR0FBbkIsRUFBd0IsR0FBeEIsRUFBNkIsUUFBN0IsRUFBdUM7QUFDNUMsT0FBTSxTQUFTO0FBQ2Isa0JBQWEsSUFEQTtBQUViLGdCQUFXLENBRkU7QUFHYixXQUFNO0FBSE8sSUFBZjtBQUtBLE9BQU0sU0FBUyxTQUFULE1BQVMsQ0FBVSxHQUFWLEVBQWUsR0FBZixFQUFvQixRQUFwQixFQUE4QjtBQUMzQyxZQUFPLGVBQWUsR0FBZixHQUFxQixrQkFBckIsR0FDSCxHQURHLEdBQ0csb0JBREgsR0FDMEIsUUFEakM7QUFFRCxJQUhEO0FBSUEsT0FBTSxPQUFPLElBQUksV0FBSixFQUFiOztBQUVBLFVBQU8sWUFBUCxHQUFzQixPQUFPLEdBQVAsRUFBWSxHQUFaLEVBQWlCLFFBQWpCLENBQXRCOztBQUVBLE9BQUksS0FBSyxPQUFMLENBQWEsV0FBYixLQUE2QixDQUFqQyxFQUFvQztBQUNsQyxZQUFPLElBQVAsR0FBYyxJQUFkO0FBQ0QsSUFGRCxNQUdLLElBQUksS0FBSyxPQUFMLENBQWEsWUFBYixLQUE4QixDQUFsQyxFQUFxQztBQUN4QyxZQUFPLElBQVAsR0FBYyxJQUFkO0FBQ0QsSUFGSSxNQUdBLElBQUksS0FBSyxPQUFMLENBQWEsYUFBYixLQUErQixDQUFuQyxFQUFzQztBQUN6QyxZQUFPLElBQVAsR0FBYyxJQUFkO0FBQ0QsSUFGSSxNQUdBLElBQUksS0FBSyxPQUFMLENBQWEsYUFBYixLQUErQixDQUFuQyxFQUFzQztBQUN6QyxZQUFPLElBQVAsR0FBYyxJQUFkO0FBQ0Q7O0FBRUQsVUFBTyxNQUFQO0FBQ0Q7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7OztBQWlDTSxVQUFTLEtBQVQsQ0FBZ0IsTUFBaEIsRUFBd0IsVUFBeEIsRUFBb0M7QUFDekMsZ0JBQWEsY0FBYyxPQUFPLGFBQWxDO0FBQ0EsZ0JBQWEseUJBQWMsVUFBZCxJQUE0QixVQUE1QixHQUF5QyxFQUF0RDs7QUFFQSxPQUFJLFNBQVM7QUFDWCxrQkFBYSxLO0FBREYsSUFBYjs7QUFJQSxPQUFJLGlCQUFNLE1BQU4sTUFBa0IsVUFBdEIsRUFBa0M7QUFDaEMsU0FBSSxrQkFBa0IsT0FBTyxJQUFQLENBQVksSUFBWixFQUFrQixVQUFsQixFQUE4QjtBQUNsRCwrQkFEa0Q7QUFFbEQseUJBQWtCLEtBQUs7QUFGMkIsTUFBOUIsQ0FBdEI7O0FBS0EsdUJBQWtCLENBQUMsQ0FBQyxlQUFwQjs7QUFFQSxjQUFTLGtCQUFrQixLQUFLLFFBQUwsQ0FBYyxRQUFkLEVBQXdCLEVBQXhCLEVBQTRCLGVBQTVCLENBQWxCLEdBQWlFLE1BQTFFO0FBQ0QsSUFURCxNQVVLO0FBQ0gsY0FBUyx5QkFBYyxNQUFkLElBQXdCLE1BQXhCLEdBQWlDLEVBQTFDOztBQUVBLFNBQU0sV0FBVyxXQUFXLFFBQVgsSUFBdUIsUUFBeEM7QUFDQSxTQUFNLFlBQVksU0FBUyxXQUFULEVBQWxCO0FBQ0EsU0FBTSxPQUFPLE9BQU8sU0FBUCxLQUFxQixFQUFsQzs7QUFFQSxVQUFLLElBQU0sQ0FBWCxJQUFnQixVQUFoQixFQUE0QjtBQUMxQixXQUFNLE1BQU0sQ0FBWjtBQUNBLFdBQU0sV0FBVyxJQUFJLFdBQUosRUFBakI7QUFDQSxXQUFNLE1BQU0sV0FBVyxDQUFYLENBQVo7QUFDQSxXQUFNLFlBQVksU0FBUyxPQUFULENBQWlCLFNBQWpCLEtBQStCLENBQWpEO0FBQ0EsV0FBTSxnQkFBZ0IsU0FBUyxPQUFULENBQWlCLGFBQWpCLEtBQW1DLENBQXpEO0FBQ0EsV0FBTSxXQUFXLEtBQUssQ0FBTCxDQUFqQjs7QUFFQSxXQUFJLFlBQVksU0FBaEIsRUFBMkI7QUFDekIsYUFBTSxJQUFJLEtBQUssZ0JBQUwsQ0FBc0IsUUFBdEIsQ0FBVjtBQUNBLGFBQU0sSUFBSSxLQUFLLGdCQUFMLENBQXNCLFdBQVcsQ0FBWCxDQUF0QixDQUFWOztBQUVBLGFBQUksaUJBQU8sU0FBUCxDQUFpQixDQUFqQixFQUFvQixDQUFwQixDQUFKLEVBQTRCO0FBQzFCLG9CQUFTLEtBQUssUUFBTCxDQUFjLEdBQWQsRUFBbUIsR0FBbkIsRUFBd0IsUUFBeEIsQ0FBVDtBQUNBO0FBQ0Q7QUFDRixRQVJELE1BU0ssSUFBSSxhQUFKLEVBQW1CO0FBQ3RCLGFBQU0sWUFBWSxpQkFBTSxRQUFOLE1BQW9CLE9BQXBCLEdBQThCLFFBQTlCLEdBQXlDLENBQUMsUUFBRCxDQUEzRDtBQUNBLGFBQUksVUFBVSxPQUFWLENBQWtCLEdBQWxCLEtBQTBCLENBQTlCLEVBQWlDO0FBQy9CLG9CQUFTLEtBQUssUUFBTCxDQUFjLEdBQWQsRUFBbUIsR0FBbkIsRUFBd0IsUUFBeEIsQ0FBVDtBQUNBO0FBQ0Q7QUFDRjtBQUNGO0FBQ0Y7O0FBRUQsVUFBTyxNQUFQO0FBQ0QsRTs7Ozs7Ozs7Ozs7O1NDaEllLGEsR0FBQSxhO1NBWUEsSSxHQUFBLEk7U0F1SEEsTyxHQUFBLE87U0FZQSxjLEdBQUEsYztTQU1BLFMsR0FBQSxTO1NBdUJBLFEsR0FBQSxRO1NBdUJBLFcsR0FBQSxXOztBQXJNaEI7O0tBQVksQzs7Ozs7Ozs7Ozs7Ozs7OztBQUVMLFVBQVMsYUFBVCxHQUEwQjtBQUMvQixRQUFLLE1BQUwsQ0FBWSxLQUFaO0FBQ0EsT0FBTSxRQUFRLEVBQWQ7QUFDQSxPQUFJLEtBQUssR0FBTCxJQUFZLEtBQUssR0FBTCxDQUFTLFFBQXJCLElBQWlDLEtBQUssR0FBTCxDQUFTLFFBQVQsQ0FBa0IsT0FBbEIsQ0FBMEIsTUFBL0QsRUFBdUU7QUFDckUsV0FBTSxJQUFOLGlDQUFjLEtBQUssR0FBTCxDQUFTLFFBQVQsQ0FBa0IsT0FBaEM7QUFDQSxVQUFLLEdBQUwsQ0FBUyxRQUFULENBQWtCLE9BQWxCLEdBQTRCLEVBQTVCO0FBQ0Q7QUFDRCxPQUFJLE1BQU0sTUFBVixFQUFrQjtBQUNoQixZQUFPLEtBQUssU0FBTCxDQUFlLEtBQWYsQ0FBUDtBQUNEO0FBQ0Y7O0FBRU0sVUFBUyxJQUFULENBQWUsSUFBZixFQUFxQixJQUFyQixFQUEyQjtBQUFBOztBQUNoQyxLQUFFLEtBQUYsQ0FBUSwrQkFBUixFQUF5QyxJQUF6QyxFQUErQyxJQUEvQzs7QUFFQSxPQUFJLGVBQUo7O0FBRUEsT0FBTSxTQUFTLEVBQUUsSUFBRixDQUFPLEtBQUssTUFBWixFQUFvQixJQUFwQixDQUFmO0FBQ0EsT0FBTSxZQUFZLFNBQVosU0FBWSxDQUFDLElBQUQsRUFBTyxNQUFQLEVBQWUsS0FBZixFQUF5QjtBQUN6QyxjQUFTLE1BQUssU0FBTCxDQUFlLElBQWYsRUFBcUIsTUFBckIsRUFBNkIsU0FBUyxJQUF0QyxDQUFUO0FBQ0EsV0FBSyxhQUFMO0FBQ0EsV0FBSyxHQUFMLENBQVMsUUFBVCxDQUFrQixZQUFsQjtBQUNBLE9BQUUsS0FBRixtQ0FBd0MsTUFBSyxFQUE3QztBQUNELElBTEQ7OztBQVFBLE9BQU0sV0FBVyxFQUFFLElBQUYsQ0FBTyxLQUFLLFFBQVosRUFBc0IsSUFBdEIsQ0FBakI7QUFDQSxPQUFNLFNBQVMsU0FBVCxNQUFTLENBQUMsSUFBRCxFQUFPLEtBQVAsRUFBaUI7QUFDOUIsY0FBUyxNQUFLLFNBQUwsQ0FBZSxJQUFmLEVBQXFCLEVBQXJCLEVBQXlCLEtBQXpCLENBQVQ7QUFDRCxJQUZEOztBQUlBLE9BQU0sVUFBVSxTQUFWLE9BQVU7QUFBQSxZQUFRLGlCQUFTO0FBQy9CLGdCQUFTLE1BQUssU0FBTCxDQUFlLElBQWYsRUFBcUIsRUFBckIsRUFBeUIsS0FBekIsQ0FBVDtBQUNELE1BRmU7QUFBQSxJQUFoQjs7QUFJQSxPQUFNLFdBQVcsS0FBSyxHQUF0Qjs7QUFFQSxPQUFJLHFCQUFKOztBQUVBLE9BQUksT0FBTyxJQUFQLEtBQWdCLFVBQXBCLEVBQWdDOzs7QUFHOUIsb0JBQWUsS0FBSyxRQUFMLEdBQWdCLE1BQWhCLENBQXVCLEVBQXZCLENBQWY7QUFDRCxJQUpELE1BS0ssSUFBSSxJQUFKLEVBQVU7QUFDYixvQkFBZSxLQUFLLFFBQUwsRUFBZjtBQUNEOztBQWxDK0IsaUJBb0NOLE1BcENNO0FBQUEsT0FvQ3hCLGFBcEN3QixXQW9DeEIsYUFwQ3dCOztBQXFDaEMsT0FBSSxpQkFBaUIsY0FBYyxRQUFkLEtBQTJCLEtBQWhELEVBQXVEO0FBQUE7QUFDckQsV0FBTSxRQUFRLE1BQUssYUFBTCxDQUFtQixPQUFuQixDQUFkO0FBQ0EsV0FBTSxZQUFZO0FBQ2hCLHFCQUFZLHNCQUFhO0FBQUEsNkNBQVQsSUFBUztBQUFULGlCQUFTO0FBQUE7O0FBQ3ZCLGVBQU0sVUFBVSxTQUFWLE9BQVUsR0FBWTtBQUMxQixrQkFBSyxDQUFMLGlDQUFXLEtBQUssS0FBTCxDQUFXLENBQVgsQ0FBWDtBQUNELFlBRkQ7QUFHQSxpQkFBTSxVQUFOLENBQWlCLE9BQWpCLEVBQTBCLEtBQUssQ0FBTCxDQUExQjtBQUNBLGtCQUFPLE1BQUssR0FBTCxDQUFTLFFBQVQsRUFBUDtBQUNELFVBUGU7QUFRaEIsc0JBQWEsdUJBQWE7QUFBQSw4Q0FBVCxJQUFTO0FBQVQsaUJBQVM7QUFBQTs7QUFDeEIsZUFBTSxVQUFVLFNBQVYsT0FBVSxHQUFZO0FBQzFCLGtCQUFLLENBQUwsaUNBQVcsS0FBSyxLQUFMLENBQVcsQ0FBWCxDQUFYO0FBQ0QsWUFGRDtBQUdBLGlCQUFNLFdBQU4sQ0FBa0IsT0FBbEIsRUFBMkIsS0FBSyxDQUFMLENBQTNCO0FBQ0Esa0JBQU8sTUFBSyxHQUFMLENBQVMsUUFBVCxFQUFQO0FBQ0QsVUFkZTtBQWVoQix1QkFBYyxzQkFBQyxDQUFELEVBQU87QUFDbkIsaUJBQU0sWUFBTixDQUFtQixDQUFuQjtBQUNELFVBakJlO0FBa0JoQix3QkFBZSx1QkFBQyxDQUFELEVBQU87QUFDcEIsaUJBQU0sYUFBTixDQUFvQixDQUFwQjtBQUNEO0FBcEJlLFFBQWxCOztBQXVCQSxXQUFNLEtBQUssSUFBSSxRQUFKLENBQ1QsUUFEUyxFQUVULFNBRlMsRUFHVCxVQUhTLEVBSVQsV0FKUyxFQUtULFVBTFMsRUFNVCxRQU5TLEVBT1QsaUJBUFMsRTtBQVFULDJCQVJTLEU7QUFTVCxtQkFUUyxFQVVULGFBVlMsRUFXVCxjQVhTLEVBWVQsZUFaUyxFQWFULFlBYlMsQ0FBWDs7QUFnQkEsVUFDRSxNQURGLEVBRUUsT0FGRixFQUdFLFFBSEYsRUFJRSxTQUpGLEVBS0UsUUFMRixFQU1FLE1BTkYsRUFPRSxNQVBGLEVBUUUsU0FSRixFQVNFLFVBQVUsVUFUWixFQVVFLFVBQVUsV0FWWixFQVdFLFVBQVUsWUFYWixFQVlFLFVBQVUsYUFaWjtBQXpDcUQ7QUFzRHRELElBdERELE1BdURLO0FBQ0gsU0FBTSxNQUFLLElBQUksUUFBSixDQUNULFFBRFMsRUFFVCxTQUZTLEVBR1QsVUFIUyxFQUlULFdBSlMsRUFLVCxVQUxTLEVBTVQsUUFOUyxFQU9ULGlCQVBTLEU7QUFRVCx5QkFSUyxFO0FBU1QsaUJBVFMsQ0FBWDs7QUFZQSxTQUNFLE1BREYsRUFFRSxPQUZGLEVBR0UsUUFIRixFQUlFLFNBSkYsRUFLRSxRQUxGLEVBTUUsTUFORixFQU9FLE1BUEYsRUFRRSxTQVJGO0FBU0Q7O0FBRUQsVUFBTyxNQUFQO0FBQ0Q7O0FBRU0sVUFBUyxPQUFULEdBQW9CO0FBQ3pCLEtBQUUsS0FBRiwwQkFBK0IsS0FBSyxFQUFwQzs7QUFFQSxRQUFLLEVBQUwsR0FBVSxFQUFWO0FBQ0EsUUFBSyxPQUFMLEdBQWUsSUFBZjtBQUNBLFFBQUssTUFBTCxHQUFjLElBQWQ7QUFDQSxRQUFLLEVBQUwsR0FBVSxJQUFWO0FBQ0EsUUFBSyxHQUFMLEdBQVcsSUFBWDtBQUNBLFFBQUssa0JBQUwsR0FBMEIsSUFBMUI7QUFDQSxRQUFLLFNBQUwsR0FBaUIsSUFBakI7QUFDRDs7QUFFTSxVQUFTLGNBQVQsR0FBMkI7QUFDaEMsT0FBTSxNQUFNLEtBQUssR0FBTCxJQUFZLEVBQXhCO0FBQ0EsT0FBTSxPQUFPLElBQUksSUFBSixJQUFZLEVBQXpCO0FBQ0EsVUFBTyxLQUFLLE1BQUwsR0FBYyxLQUFLLE1BQUwsRUFBZCxHQUE4QixFQUFyQztBQUNEOztBQUVNLFVBQVMsU0FBVCxDQUFvQixHQUFwQixFQUF5QixJQUF6QixFQUErQixDQUEvQixFQUFrQyxVQUFsQyxFQUE4QztBQUFBOztBQUNuRCxLQUFFLEtBQUYsY0FBbUIsSUFBbkIsOEJBQWdELEdBQWhELHNCQUFvRSxLQUFLLEVBQXpFO0FBQ0EsT0FBSSxNQUFNLE9BQU4sQ0FBYyxHQUFkLENBQUosRUFBd0I7QUFDdEIsU0FBSSxJQUFKLENBQVMsVUFBQyxHQUFELEVBQVM7QUFDaEIsY0FBTyxPQUFLLFNBQUwsQ0FBZSxHQUFmLEVBQW9CLElBQXBCLEVBQTBCLENBQTFCLE1BQWlDLEtBQXhDO0FBQ0QsTUFGRDtBQUdBO0FBQ0Q7O0FBRUQsT0FBTSxLQUFLLEtBQUssR0FBTCxDQUFTLE1BQVQsQ0FBZ0IsR0FBaEIsQ0FBWDs7QUFFQSxPQUFJLEVBQUosRUFBUTtBQUNOLFVBQUssR0FBTCxDQUFTLEtBQVQ7QUFDQSxTQUFNLFNBQVMsS0FBSyxHQUFMLENBQVMsU0FBVCxDQUFtQixFQUFuQixFQUF1QixJQUF2QixFQUE2QixDQUE3QixFQUFnQyxVQUFoQyxDQUFmO0FBQ0EsVUFBSyxhQUFMO0FBQ0EsVUFBSyxHQUFMLENBQVMsUUFBVCxDQUFrQixZQUFsQjtBQUNBLFVBQUssR0FBTCxDQUFTLElBQVQ7QUFDQSxZQUFPLE1BQVA7QUFDRDs7QUFFRCxVQUFPLElBQUksS0FBSixpQ0FBd0MsR0FBeEMsT0FBUDtBQUNEOztBQUVNLFVBQVMsUUFBVCxDQUFtQixVQUFuQixFQUErQixJQUEvQixFQUFxQyxXQUFyQyxFQUFrRDtBQUN2RCxLQUFFLEtBQUYsd0JBQTZCLFVBQTdCLGFBQWlELElBQWpELG1CQUN5QixLQUFLLEVBRDlCOztBQUdBLE9BQU0sV0FBVyxLQUFLLFNBQUwsQ0FBZSxVQUFmLENBQWpCOztBQUVBLE9BQUksT0FBTyxRQUFQLEtBQW9CLFVBQXhCLEVBQW9DO0FBQ2xDLFVBQUssR0FBTCxDQUFTLEtBQVQ7QUFDQSxjQUFTLElBQVQsRTs7QUFFQSxTQUFJLE9BQU8sV0FBUCxLQUF1QixXQUF2QixJQUFzQyxnQkFBZ0IsS0FBMUQsRUFBaUU7QUFDL0QsWUFBSyxTQUFMLENBQWUsVUFBZixJQUE2QixTQUE3QjtBQUNEOztBQUVELFVBQUssYUFBTDtBQUNBLFVBQUssR0FBTCxDQUFTLFFBQVQsQ0FBa0IsWUFBbEI7QUFDQSxVQUFLLEdBQUwsQ0FBUyxJQUFUO0FBQ0E7QUFDRDs7QUFFRCxVQUFPLElBQUksS0FBSiwyQkFBa0MsVUFBbEMsT0FBUDtBQUNEOztBQUVNLFVBQVMsV0FBVCxDQUFzQixJQUF0QixFQUE0QjtBQUNqQyxLQUFFLEtBQUYsaUJBQXdCLElBQXhCLG1CQUN5QixLQUFLLEVBRDlCOztBQUdBLE9BQU0sS0FBSyxLQUFLLEVBQWhCOztBQUVBLE9BQUksTUFBTSxJQUFWLEVBQWdCO0FBQ2QsVUFBSyxHQUFMLENBQVMsS0FBVDtBQUNBLFNBQUksT0FBTyxHQUFHLFdBQVYsS0FBMEIsVUFBOUIsRUFBMEM7QUFDeEMsVUFBRyxXQUFILENBQWUsSUFBZjtBQUNELE1BRkQsTUFHSztBQUNILFNBQUUsTUFBRixDQUFTLEVBQVQsRUFBYSxJQUFiO0FBQ0Q7QUFDRCxVQUFLLGFBQUw7QUFDQSxVQUFLLEdBQUwsQ0FBUyxRQUFULENBQWtCLGFBQWxCO0FBQ0EsVUFBSyxHQUFMLENBQVMsSUFBVDtBQUNBO0FBQ0Q7O0FBRUQsVUFBTyxJQUFJLEtBQUosb0JBQTJCLElBQTNCLE9BQVA7QUFDRCxFOzs7Ozs7Ozs7Ozs7Ozs7OztLQ3RPb0IsTTtBQUNuQixtQkFBYSxFQUFiLEVBQWlCO0FBQUE7O0FBQ2YsVUFBSyxFQUFMLEdBQVUsRUFBVjtBQUNBLFVBQUssR0FBTCxHQUFXLEVBQVg7QUFDQSxVQUFLLEtBQUwsR0FBYSxFQUFiO0FBQ0Q7Ozs7K0JBQ1U7QUFDVCxjQUFPLEtBQUssR0FBTCxDQUFTLE1BQVQsS0FBb0IsQ0FBM0I7QUFDRDs7OzRCQUNPLEksRUFBTSxLLEVBQU8sRyxFQUFLLE8sRUFBUztBQUFBOztBQUNqQyxXQUFJLENBQUMsS0FBSyxRQUFWLEVBQW9CO0FBQ2xCLGNBQUssUUFBTCxHQUFnQixJQUFoQjtBQUNBLG9CQUFXLFlBQU07QUFDZixpQkFBSyxRQUFMLEdBQWdCLEtBQWhCO0FBQ0EsaUJBQUssS0FBTCxDQUFXLElBQVg7QUFDRCxVQUhELEVBR0csQ0FISDtBQUlEO0FBQ0QsV0FBTSxNQUFNLEtBQUssR0FBakI7QUFDQSxXQUFJLENBQUMsSUFBSSxLQUFKLENBQUwsRUFBaUI7QUFDZixhQUFJLEtBQUosSUFBYSxFQUFiO0FBQ0Q7QUFDRCxXQUFNLFFBQVEsSUFBSSxLQUFKLENBQWQ7QUFDQSxXQUFJLENBQUMsTUFBTSxJQUFOLENBQUwsRUFBa0I7QUFDaEIsZUFBTSxJQUFOLElBQWMsRUFBZDtBQUNEO0FBQ0QsV0FBSSxTQUFTLFNBQWIsRUFBd0I7QUFDdEIsYUFBSSxDQUFDLE1BQU0sSUFBTixFQUFZLEdBQVosQ0FBTCxFQUF1QjtBQUNyQixpQkFBTSxJQUFOLEVBQVksR0FBWixJQUFtQixFQUFuQjtBQUNEO0FBQ0QsZUFBTSxJQUFOLEVBQVksR0FBWixFQUFpQixJQUFqQixDQUFzQixPQUF0QjtBQUNELFFBTEQsTUFNSztBQUNILGVBQU0sSUFBTixFQUFZLEdBQVosSUFBbUIsT0FBbkI7QUFDRDtBQUNGOzs7MkJBQ00sUyxFQUFXO0FBQ2hCLFdBQU0sTUFBTSxLQUFLLEdBQUwsQ0FBUyxLQUFULEVBQVo7QUFDQSxZQUFLLEdBQUwsQ0FBUyxNQUFULEdBQWtCLENBQWxCO0FBQ0EsV0FBSSxPQUFKLENBQVksVUFBQyxLQUFELEVBQVc7QUFDckIscUJBQVksS0FBWixFQUFtQixRQUFuQjtBQUNBLHFCQUFZLEtBQVosRUFBbUIsT0FBbkI7QUFDQSxzQkFBYSxLQUFiLEVBQW9CLFNBQXBCO0FBQ0QsUUFKRDs7QUFNQSxXQUFNLFFBQVEsS0FBSyxLQUFMLENBQVcsS0FBWCxFQUFkO0FBQ0EsWUFBSyxLQUFMLENBQVcsTUFBWCxHQUFvQixDQUFwQjtBQUNBLGFBQU0sT0FBTixDQUFjLFVBQUMsRUFBRCxFQUFRO0FBQ3BCO0FBQ0QsUUFGRDs7QUFJQSxXQUFJLENBQUMsS0FBSyxPQUFMLEVBQUwsRUFBcUI7QUFDbkIsY0FBSyxLQUFMO0FBQ0Q7QUFDRjs7OzBCQUNLLEUsRUFBSTtBQUNSLFlBQUssS0FBTCxDQUFXLElBQVgsQ0FBZ0IsRUFBaEI7QUFDRDs7Ozs7O21CQXhEa0IsTTs7O0FBMkRyQixVQUFTLFdBQVQsQ0FBc0IsS0FBdEIsRUFBNkIsSUFBN0IsRUFBbUM7QUFDakMsT0FBTSxNQUFNLE1BQU0sSUFBTixDQUFaO0FBQ0EsUUFBSyxJQUFNLEdBQVgsSUFBa0IsR0FBbEIsRUFBdUI7QUFDckIsU0FBSSxHQUFKO0FBQ0Q7QUFDRjs7QUFFRCxVQUFTLFlBQVQsQ0FBdUIsS0FBdkIsRUFBOEIsSUFBOUIsRUFBb0M7QUFDbEMsT0FBTSxNQUFNLE1BQU0sSUFBTixDQUFaO0FBQ0EsUUFBSyxJQUFNLEdBQVgsSUFBa0IsR0FBbEIsRUFBdUI7QUFDckIsU0FBTSxPQUFPLElBQUksR0FBSixDQUFiO0FBQ0EsVUFBSyxPQUFMLENBQWEsVUFBQyxPQUFELEVBQWE7QUFBRTtBQUFXLE1BQXZDO0FBQ0Q7QUFDRixFOzs7Ozs7Ozs7Ozs7U0M1RGUsUSxHQUFBLFE7U0FzSkEsSSxHQUFBLEk7U0FxQkEsTyxHQUFBLE87U0EyVkEsTyxHQUFBLE87O0FBN2dCaEI7Ozs7OztBQUVBLEtBQU0sbUJBQW1CLEtBQXpCLEM7Ozs7O0FBRU8sS0FBTSxvQ0FBYyxFQUFwQjtBQUNQLEtBQUksY0FBYyxDQUFsQjs7QUFFTyxVQUFTLFFBQVQsQ0FBbUIsRUFBbkIsRUFBdUIsR0FBdkIsRUFBNEIsT0FBNUIsRUFBcUM7QUFDMUMsUUFBSyxLQUFLLEdBQUcsUUFBSCxFQUFMLEdBQXFCLEVBQTFCO0FBQ0EsUUFBSyxFQUFMLEdBQVUsRUFBVjtBQUNBLFFBQUssR0FBTCxHQUFXLEdBQVg7O0FBRUEsZUFBWSxFQUFaLElBQWtCLElBQWxCO0FBQ0EsUUFBSyxPQUFMLEdBQWUsRUFBZjtBQUNBLFFBQUssUUFBTCxHQUFnQix1QkFBYSxFQUFiLEVBQWlCLFdBQVcsYUFBYSxFQUFiLENBQTVCLENBQWhCO0FBQ0EsUUFBSyxxQkFBTDtBQUNEOztBQUVELFVBQVMsWUFBVCxDQUF1QixFQUF2QixFQUEyQjtBQUN6QixVQUFPLFVBQUMsS0FBRCxFQUFXO0FBQ2hCLFNBQUksQ0FBQyxNQUFNLE9BQU4sQ0FBYyxLQUFkLENBQUwsRUFBMkI7QUFDekIsZUFBUSxDQUFDLEtBQUQsQ0FBUjtBQUNEO0FBQ0QsWUFBTyxXQUFXLEVBQVgsRUFBZSxLQUFmLEVBQXNCLElBQXRCLENBQVA7QUFDRCxJQUxEO0FBTUQ7O0FBRUQsVUFBUyxTQUFULENBQW1CLE9BQW5CLEdBQTZCLFlBQVk7QUFDdkMsVUFBTyxLQUFLLFFBQVo7QUFDQSxVQUFPLEtBQUssT0FBWjtBQUNBLFVBQU8sWUFBWSxLQUFLLEVBQWpCLENBQVA7QUFDRCxFQUpEOztBQU1BLFVBQVMsU0FBVCxDQUFtQixJQUFuQixHQUEwQixZQUFZO0FBQ3BDLFFBQUssUUFBTCxDQUFjLE9BQWQsR0FBd0IsS0FBeEI7QUFDRCxFQUZEOztBQUlBLFVBQVMsU0FBVCxDQUFtQixLQUFuQixHQUEyQixZQUFZO0FBQ3JDLFFBQUssUUFBTCxDQUFjLE9BQWQsR0FBd0IsSUFBeEI7QUFDRCxFQUZEOztBQUlBLFVBQVMsU0FBVCxDQUFtQixxQkFBbkIsR0FBMkMsWUFBWTtBQUFBOztBQUNyRCxPQUFJLENBQUMsS0FBSyxlQUFWLEVBQTJCO0FBQ3pCLFNBQU0sS0FBSyxJQUFJLE9BQUosQ0FBWSxVQUFaLENBQVg7QUFDQSxRQUFHLEtBQUgsR0FBVyxLQUFLLEVBQWhCO0FBQ0EsUUFBRyxhQUFILEdBQW1CLElBQW5CO0FBQ0EsUUFBRyxJQUFILEdBQVUsaUJBQVY7QUFDQSxRQUFHLEtBQUgsR0FBVyxDQUFYO0FBQ0EsUUFBRyxHQUFILEdBQVMsa0JBQVQ7QUFDQSxVQUFLLE9BQUwsQ0FBYSxnQkFBYixHQUFnQyxFQUFoQztBQUNBLFVBQUssZUFBTCxHQUF1QixFQUF2QjtBQUNBLFFBQUcsV0FBSCxHQUFpQixVQUFDLElBQUQsRUFBVTtBQUN6Qix5QkFBaUIsSUFBakI7QUFDRCxNQUZEO0FBR0EsUUFBRyxZQUFILEdBQWtCLFVBQUMsSUFBRCxFQUFPLE1BQVAsRUFBa0I7QUFDbEMseUJBQWlCLElBQWpCLEVBQXVCLE1BQXZCO0FBQ0QsTUFGRDtBQUdEOztBQUVELFVBQU8sS0FBSyxlQUFaO0FBQ0QsRUFuQkQ7O0FBcUJBLFVBQVMsVUFBVCxDQUFxQixHQUFyQixFQUEwQixJQUExQixFQUFnQyxNQUFoQyxFQUF3QztBQUFBLE9BQzlCLGVBRDhCLEdBQ1YsR0FEVSxDQUM5QixlQUQ4Qjs7O0FBR3RDLE9BQUksZ0JBQWdCLFlBQWhCLENBQTZCLE1BQTdCLEdBQXNDLENBQXRDLElBQTJDLEtBQUssVUFBcEQsRUFBZ0U7QUFDOUQ7QUFDRDtBQUNELE9BQU0sV0FBVyxnQkFBZ0IsUUFBakM7QUFDQSxPQUFNLGNBQWMsU0FBUyxPQUFULENBQWlCLE1BQWpCLENBQXBCO0FBQ0EsT0FBSSxjQUFjLENBQWxCLEVBQXFCO0FBQ25CLGNBQVMsSUFBVCxDQUFjLElBQWQ7QUFDRCxJQUZELE1BR0s7QUFDSCxjQUFTLE1BQVQsQ0FBZ0IsV0FBaEIsRUFBNkIsQ0FBN0IsRUFBZ0MsSUFBaEM7QUFDRDs7QUFFRCxPQUFJLEtBQUssUUFBTCxLQUFrQixDQUF0QixFQUF5QjtBQUN2QixTQUFJLEtBQUssSUFBTCxLQUFjLE1BQWxCLEVBQTBCO0FBQ3hCLFlBQUssS0FBTCxHQUFhLElBQUksRUFBakI7QUFDQSxZQUFLLGFBQUwsR0FBcUIsR0FBckI7QUFDQSxZQUFLLFVBQUwsR0FBa0IsZUFBbEI7QUFDRCxNQUpELE1BS0s7QUFDSCxZQUFLLFFBQUwsQ0FBYyxPQUFkLENBQXNCLGlCQUFTO0FBQzdCLGVBQU0sVUFBTixHQUFtQixJQUFuQjtBQUNELFFBRkQ7QUFHQSxlQUFRLEdBQVIsRUFBYSxJQUFiO0FBQ0EsWUFBSyxLQUFMLEdBQWEsSUFBSSxFQUFqQjtBQUNBLFlBQUssYUFBTCxHQUFxQixHQUFyQjtBQUNBLGtCQUFXLElBQVgsRUFBaUIsZUFBakI7QUFDQSxjQUFPLElBQUksT0FBSixDQUFZLEtBQUssTUFBakIsQ0FBUDtBQUNEO0FBQ0QscUJBQWdCLFlBQWhCLENBQTZCLElBQTdCLENBQWtDLElBQWxDO0FBQ0EsU0FBSSxRQUFKLENBQWEsVUFBYixDQUF3QixJQUF4QjtBQUNELElBbEJELE1BbUJLO0FBQ0gsVUFBSyxVQUFMLEdBQWtCLGVBQWxCO0FBQ0EsU0FBSSxPQUFKLENBQVksS0FBSyxHQUFqQixJQUF3QixJQUF4QjtBQUNEO0FBQ0Y7O0FBRUQsVUFBUyxPQUFULENBQWtCLEdBQWxCLEVBQXVCLEVBQXZCLEVBQTJCO0FBQ3pCLE1BQUcsSUFBSCxHQUFVLE1BQVY7QUFDQSxNQUFHLEtBQUgsR0FBVyxDQUFYO0FBQ0EsVUFBTyxJQUFJLE9BQUosQ0FBWSxHQUFHLE1BQWYsQ0FBUDtBQUNBLE1BQUcsR0FBSCxHQUFTLE9BQVQ7QUFDQSxPQUFJLE9BQUosQ0FBWSxLQUFaLEdBQW9CLEVBQXBCO0FBQ0EsT0FBSSxJQUFKLEdBQVcsRUFBWDtBQUNEOztBQUVELFVBQVMsU0FBVCxDQUFtQixVQUFuQixHQUFnQyxVQUFVLElBQVYsRUFBZ0IsS0FBaEIsRUFBdUI7QUFDckQsT0FBSSxDQUFDLEtBQUssSUFBVixFQUFnQjtBQUNkLFNBQU0sS0FBSyxJQUFJLE9BQUosQ0FBWSxJQUFaLEVBQWtCLEtBQWxCLENBQVg7QUFDQSxhQUFRLElBQVIsRUFBYyxFQUFkO0FBQ0Q7O0FBRUQsVUFBTyxLQUFLLElBQVo7QUFDRCxFQVBEOztBQVNBLFVBQVMsU0FBVCxDQUFtQixhQUFuQixHQUFtQyxVQUFVLE9BQVYsRUFBbUIsS0FBbkIsRUFBMEI7QUFDM0QsVUFBTyxJQUFJLE9BQUosQ0FBWSxPQUFaLEVBQXFCLEtBQXJCLENBQVA7QUFDRCxFQUZEOztBQUlBLFVBQVMsU0FBVCxDQUFtQixhQUFuQixHQUFtQyxVQUFVLElBQVYsRUFBZ0I7QUFDakQsVUFBTyxJQUFJLE9BQUosQ0FBWSxJQUFaLENBQVA7QUFDRCxFQUZEOztBQUlBLFVBQVMsU0FBVCxDQUFtQixTQUFuQixHQUErQixVQUFVLEVBQVYsRUFBYyxJQUFkLEVBQW9CLENBQXBCLEVBQXVCLFVBQXZCLEVBQW1DO0FBQ2hFLE9BQUksQ0FBQyxFQUFMLEVBQVM7QUFDUDtBQUNEO0FBQ0QsT0FBSSxLQUFLLEVBQVQ7QUFDQSxLQUFFLElBQUYsR0FBUyxJQUFUO0FBQ0EsS0FBRSxNQUFGLEdBQVcsRUFBWDtBQUNBLEtBQUUsU0FBRixHQUFjLEtBQUssR0FBTCxFQUFkO0FBQ0EsT0FBSSxVQUFKLEVBQWdCO0FBQ2QsbUJBQWMsRUFBZCxFQUFrQixVQUFsQjtBQUNEO0FBQ0QsVUFBTyxHQUFHLFNBQUgsQ0FBYSxJQUFiLEVBQW1CLENBQW5CLENBQVA7QUFDRCxFQVpEOztBQWNBLFVBQVMsU0FBVCxDQUFtQixNQUFuQixHQUE0QixVQUFVLEdBQVYsRUFBZTtBQUN6QyxVQUFPLEtBQUssT0FBTCxDQUFhLEdBQWIsQ0FBUDtBQUNELEVBRkQ7O0FBSUEsVUFBUyxhQUFULENBQXdCLEVBQXhCLEVBQTRCLE9BQTVCLEVBQXFDO0FBQ25DLE9BQU0sUUFBUSxRQUFRLEtBQVIsSUFBaUIsRUFBL0I7QUFDQSxRQUFLLElBQU0sSUFBWCxJQUFtQixLQUFuQixFQUEwQjtBQUN4QixRQUFHLE9BQUgsQ0FBVyxJQUFYLEVBQWlCLE1BQU0sSUFBTixDQUFqQixFQUE4QixJQUE5QjtBQUNEO0FBQ0QsT0FBTSxRQUFRLFFBQVEsS0FBUixJQUFpQixFQUEvQjtBQUNBLFFBQUssSUFBTSxLQUFYLElBQW1CLEtBQW5CLEVBQTBCO0FBQ3hCLFFBQUcsUUFBSCxDQUFZLEtBQVosRUFBa0IsTUFBTSxLQUFOLENBQWxCLEVBQStCLElBQS9CO0FBQ0Q7QUFDRjs7QUFFTSxVQUFTLElBQVQsR0FBaUI7QUFDdEIsUUFBSyxNQUFMLEdBQWMsQ0FBQyxhQUFELEVBQWdCLFFBQWhCLEVBQWQ7QUFDQSxRQUFLLEdBQUwsR0FBVyxLQUFLLE1BQWhCO0FBQ0EsUUFBSyxRQUFMLEdBQWdCLEVBQWhCO0FBQ0EsUUFBSyxZQUFMLEdBQW9CLEVBQXBCO0FBQ0EsUUFBSyxVQUFMLEdBQWtCLElBQWxCO0FBQ0EsUUFBSyxXQUFMLEdBQW1CLElBQW5CO0FBQ0EsUUFBSyxlQUFMLEdBQXVCLElBQXZCO0FBQ0Q7O0FBRUQsTUFBSyxTQUFMLENBQWUsT0FBZixHQUF5QixZQUFZO0FBQ25DLE9BQU0sTUFBTSxZQUFZLEtBQUssS0FBakIsQ0FBWjtBQUNBLE9BQUksR0FBSixFQUFTO0FBQ1AsWUFBTyxLQUFLLEtBQVo7QUFDQSxZQUFPLElBQUksT0FBSixDQUFZLEtBQUssTUFBakIsQ0FBUDtBQUNEO0FBQ0QsUUFBSyxRQUFMLENBQWMsT0FBZCxDQUFzQixpQkFBUztBQUM3QixXQUFNLE9BQU47QUFDRCxJQUZEO0FBR0QsRUFURDs7QUFXTyxVQUFTLE9BQVQsR0FBa0Q7QUFBQSxPQUFoQyxJQUFnQyx5REFBekIsZ0JBQXlCO0FBQUEsT0FBUCxLQUFPOztBQUN2RCxXQUFRLFNBQVMsRUFBakI7QUFDQSxRQUFLLFFBQUwsR0FBZ0IsQ0FBaEI7QUFDQSxRQUFLLE1BQUwsR0FBYyxDQUFDLGFBQUQsRUFBZ0IsUUFBaEIsRUFBZDtBQUNBLFFBQUssR0FBTCxHQUFXLEtBQUssTUFBaEI7QUFDQSxRQUFLLElBQUwsR0FBWSxJQUFaO0FBQ0EsUUFBSyxJQUFMLEdBQVksTUFBTSxJQUFOLElBQWMsRUFBMUI7QUFDQSxRQUFLLFVBQUwsR0FBa0IsTUFBTSxVQUFOLElBQW9CLEVBQXRDO0FBQ0EsUUFBSyxLQUFMLEdBQWEsTUFBTSxLQUFOLElBQWUsRUFBNUI7QUFDQSxRQUFLLEtBQUwsR0FBYSxFQUFiO0FBQ0EsUUFBSyxRQUFMLEdBQWdCLEVBQWhCO0FBQ0EsUUFBSyxZQUFMLEdBQW9CLEVBQXBCO0FBQ0Q7O0FBRUQsU0FBUSxTQUFSLEdBQW9CLElBQUksSUFBSixFQUFwQjs7QUFFQSxTQUFRLFNBQVIsQ0FBa0IsV0FBbEIsR0FBZ0MsVUFBVSxJQUFWLEVBQWdCO0FBQzlDLE9BQUksS0FBSyxVQUFMLElBQW1CLEtBQUssVUFBTCxLQUFvQixJQUEzQyxFQUFpRDtBQUMvQztBQUNEO0FBQ0QsT0FBSSxDQUFDLEtBQUssVUFBVixFQUFzQjtBQUNwQixnQkFBVyxJQUFYLEVBQWlCLElBQWpCO0FBQ0EsaUJBQVksSUFBWixFQUFrQixLQUFLLFFBQXZCLEVBQWlDLEtBQUssUUFBTCxDQUFjLE1BQS9DLEVBQXVELElBQXZEO0FBQ0EsU0FBSSxLQUFLLEtBQVQsRUFBZ0I7QUFDZCxvQkFBYSxLQUFLLEtBQWxCLEVBQXlCLElBQXpCO0FBQ0Q7QUFDRCxTQUFJLEtBQUssUUFBTCxLQUFrQixDQUF0QixFQUF5QjtBQUN2QixtQkFBWSxJQUFaLEVBQWtCLEtBQUssWUFBdkIsRUFBcUMsS0FBSyxZQUFMLENBQWtCLE1BQXZEO0FBQ0EsV0FBSSxLQUFLLEtBQVQsRUFBZ0I7QUFDZCxhQUFNLFdBQVcsWUFBWSxLQUFLLEtBQWpCLEVBQXdCLFFBQXpDO0FBQ0EsZ0JBQU8sU0FBUyxVQUFULENBQW9CLElBQXBCLEVBQTBCLEtBQUssR0FBL0IsRUFBb0MsQ0FBQyxDQUFyQyxDQUFQO0FBQ0Q7QUFDRjtBQUNGLElBYkQsTUFjSztBQUNILGVBQVUsSUFBVixFQUFnQixLQUFLLFFBQXJCLEVBQStCLEtBQUssUUFBTCxDQUFjLE1BQTdDLEVBQXFELElBQXJEO0FBQ0EsU0FBSSxLQUFLLFFBQUwsS0FBa0IsQ0FBdEIsRUFBeUI7QUFDdkIsV0FBTSxRQUFRLFVBQVUsSUFBVixFQUFnQixLQUFLLFlBQXJCLEVBQW1DLEtBQUssWUFBTCxDQUFrQixNQUFyRCxDQUFkO0FBQ0EsV0FBSSxLQUFLLEtBQUwsSUFBYyxTQUFTLENBQTNCLEVBQThCO0FBQzVCLGFBQU0sWUFBVyxZQUFZLEtBQUssS0FBakIsRUFBd0IsUUFBekM7QUFDQSxnQkFBTyxVQUFTLFdBQVQsQ0FBcUIsS0FBSyxHQUExQixFQUErQixLQUFLLEdBQXBDLEVBQXlDLEtBQXpDLENBQVA7QUFDRDtBQUNGO0FBQ0Y7QUFDRixFQTVCRDs7QUE4QkEsU0FBUSxTQUFSLENBQWtCLFlBQWxCLEdBQWlDLFVBQVUsSUFBVixFQUFnQixNQUFoQixFQUF3QjtBQUN2RCxPQUFJLEtBQUssVUFBTCxJQUFtQixLQUFLLFVBQUwsS0FBb0IsSUFBM0MsRUFBaUQ7QUFDL0M7QUFDRDtBQUNELE9BQUksU0FBUyxNQUFULElBQW1CLEtBQUssV0FBTCxLQUFxQixNQUE1QyxFQUFvRDtBQUNsRDtBQUNEO0FBQ0QsT0FBSSxDQUFDLEtBQUssVUFBVixFQUFzQjtBQUNwQixnQkFBVyxJQUFYLEVBQWlCLElBQWpCO0FBQ0EsaUJBQVksSUFBWixFQUFrQixLQUFLLFFBQXZCLEVBQWlDLEtBQUssUUFBTCxDQUFjLE9BQWQsQ0FBc0IsTUFBdEIsQ0FBakMsRUFBZ0UsSUFBaEU7QUFDQSxTQUFJLEtBQUssS0FBVCxFQUFnQjtBQUNkLG9CQUFhLEtBQUssS0FBbEIsRUFBeUIsSUFBekI7QUFDRDtBQUNELFNBQUksS0FBSyxRQUFMLEtBQWtCLENBQXRCLEVBQXlCO0FBQ3ZCLFdBQU0sYUFBYSxZQUFZLE1BQVosQ0FBbkI7QUFDQSxXQUFNLFFBQVEsWUFDWixJQURZLEVBRVosS0FBSyxZQUZPLEVBR1osYUFDSSxLQUFLLFlBQUwsQ0FBa0IsT0FBbEIsQ0FBMEIsVUFBMUIsQ0FESixHQUVJLEtBQUssWUFBTCxDQUFrQixNQUxWLENBQWQ7QUFPQSxXQUFJLEtBQUssS0FBVCxFQUFnQjtBQUNkLGFBQU0sV0FBVyxZQUFZLEtBQUssS0FBakIsRUFBd0IsUUFBekM7QUFDQSxnQkFBTyxTQUFTLFVBQVQsQ0FBb0IsSUFBcEIsRUFBMEIsS0FBSyxHQUEvQixFQUFvQyxLQUFwQyxDQUFQO0FBQ0Q7QUFDRjtBQUNGLElBcEJELE1BcUJLO0FBQ0gsZUFBVSxJQUFWLEVBQWdCLEtBQUssUUFBckIsRUFBK0IsS0FBSyxRQUFMLENBQWMsT0FBZCxDQUFzQixNQUF0QixDQUEvQixFQUE4RCxJQUE5RDtBQUNBLFNBQUksS0FBSyxRQUFMLEtBQWtCLENBQXRCLEVBQXlCO0FBQ3ZCLFdBQU0sY0FBYSxZQUFZLE1BQVosQ0FBbkI7QUFDQSxXQUFNLFNBQVEsVUFDWixJQURZLEVBRVosS0FBSyxZQUZPLEVBR1osY0FDSSxLQUFLLFlBQUwsQ0FBa0IsT0FBbEIsQ0FBMEIsV0FBMUIsQ0FESixHQUVJLEtBQUssWUFBTCxDQUFrQixNQUxWLENBQWQ7QUFPQSxXQUFJLEtBQUssS0FBTCxJQUFjLFVBQVMsQ0FBM0IsRUFBOEI7QUFDNUIsYUFBTSxhQUFXLFlBQVksS0FBSyxLQUFqQixFQUF3QixRQUF6QztBQUNBLGdCQUFPLFdBQVMsV0FBVCxDQUFxQixLQUFLLEdBQTFCLEVBQStCLEtBQUssR0FBcEMsRUFBeUMsTUFBekMsQ0FBUDtBQUNEO0FBQ0Y7QUFDRjtBQUNGLEVBN0NEOztBQStDQSxTQUFRLFNBQVIsQ0FBa0IsV0FBbEIsR0FBZ0MsVUFBVSxJQUFWLEVBQWdCLEtBQWhCLEVBQXVCO0FBQ3JELE9BQUksS0FBSyxVQUFMLElBQW1CLEtBQUssVUFBTCxLQUFvQixJQUEzQyxFQUFpRDtBQUMvQztBQUNEO0FBQ0QsT0FBSSxTQUFTLEtBQVQsSUFBa0IsS0FBSyxlQUFMLEtBQXlCLEtBQS9DLEVBQXNEO0FBQ3BEO0FBQ0Q7QUFDRCxPQUFJLENBQUMsS0FBSyxVQUFWLEVBQXNCO0FBQ3BCLGdCQUFXLElBQVgsRUFBaUIsSUFBakI7QUFDQSxpQkFBWSxJQUFaLEVBQWtCLEtBQUssUUFBdkIsRUFBaUMsS0FBSyxRQUFMLENBQWMsT0FBZCxDQUFzQixLQUF0QixJQUErQixDQUFoRSxFQUFtRSxJQUFuRTtBQUNBLFNBQUksS0FBSyxLQUFULEVBQWdCO0FBQ2Qsb0JBQWEsS0FBSyxLQUFsQixFQUF5QixJQUF6QjtBQUNEO0FBQ0QsU0FBSSxLQUFLLFFBQUwsS0FBa0IsQ0FBdEIsRUFBeUI7QUFDdkIsV0FBTSxRQUFRLFlBQ1osSUFEWSxFQUVaLEtBQUssWUFGTyxFQUdaLEtBQUssWUFBTCxDQUFrQixPQUFsQixDQUEwQixnQkFBZ0IsS0FBaEIsQ0FBMUIsSUFBb0QsQ0FIeEMsQ0FBZDtBQUtBLFdBQUksS0FBSyxLQUFULEVBQWdCO0FBQ2QsYUFBTSxXQUFXLFlBQVksS0FBSyxLQUFqQixFQUF3QixRQUF6QztBQUNBLGdCQUFPLFNBQVMsVUFBVCxDQUFvQixJQUFwQixFQUEwQixLQUFLLEdBQS9CLEVBQW9DLEtBQXBDLENBQVA7QUFDRDtBQUNGO0FBQ0YsSUFqQkQsTUFrQks7QUFDSCxlQUFVLElBQVYsRUFBZ0IsS0FBSyxRQUFyQixFQUErQixLQUFLLFFBQUwsQ0FBYyxPQUFkLENBQXNCLEtBQXRCLElBQStCLENBQTlELEVBQWlFLElBQWpFO0FBQ0EsU0FBSSxLQUFLLFFBQUwsS0FBa0IsQ0FBdEIsRUFBeUI7QUFDdkIsV0FBTSxVQUFRLFVBQ1osSUFEWSxFQUVaLEtBQUssWUFGTyxFQUdaLEtBQUssWUFBTCxDQUFrQixPQUFsQixDQUEwQixnQkFBZ0IsS0FBaEIsQ0FBMUIsSUFBb0QsQ0FIeEMsQ0FBZDtBQUtBLFdBQUksS0FBSyxLQUFMLElBQWMsV0FBUyxDQUEzQixFQUE4QjtBQUM1QixhQUFNLGFBQVcsWUFBWSxLQUFLLEtBQWpCLEVBQXdCLFFBQXpDO0FBQ0EsZ0JBQU8sV0FBUyxXQUFULENBQXFCLEtBQUssR0FBMUIsRUFBK0IsS0FBSyxHQUFwQyxFQUF5QyxPQUF6QyxDQUFQO0FBQ0Q7QUFDRjtBQUNGO0FBQ0YsRUF2Q0Q7O0FBeUNBLFNBQVEsU0FBUixDQUFrQixXQUFsQixHQUFnQyxVQUFVLElBQVYsRUFBZ0IsU0FBaEIsRUFBMkI7QUFDekQsT0FBSSxLQUFLLFVBQVQsRUFBcUI7QUFDbkIsaUJBQVksSUFBWixFQUFrQixLQUFLLFFBQXZCLEVBQWlDLElBQWpDO0FBQ0EsU0FBSSxLQUFLLFFBQUwsS0FBa0IsQ0FBdEIsRUFBeUI7QUFDdkIsbUJBQVksSUFBWixFQUFrQixLQUFLLFlBQXZCO0FBQ0EsV0FBSSxLQUFLLEtBQVQsRUFBZ0I7QUFDZCxhQUFNLFdBQVcsWUFBWSxLQUFLLEtBQWpCLEVBQXdCLFFBQXpDO0FBQ0Esa0JBQVMsYUFBVCxDQUF1QixLQUFLLEdBQTVCO0FBQ0Q7QUFDRjtBQUNGO0FBQ0QsT0FBSSxDQUFDLFNBQUwsRUFBZ0I7QUFDZCxVQUFLLE9BQUw7QUFDRDtBQUNGLEVBZEQ7O0FBZ0JBLFNBQVEsU0FBUixDQUFrQixLQUFsQixHQUEwQixZQUFZO0FBQUE7O0FBQ3BDLE9BQUksS0FBSyxLQUFULEVBQWdCO0FBQUE7QUFDZCxXQUFNLFdBQVcsWUFBWSxPQUFLLEtBQWpCLEVBQXdCLFFBQXpDO0FBQ0EsY0FBSyxZQUFMLENBQWtCLE9BQWxCLENBQTBCLGdCQUFRO0FBQ2hDLGtCQUFTLGFBQVQsQ0FBdUIsS0FBSyxHQUE1QjtBQUNELFFBRkQ7QUFGYztBQUtmO0FBQ0QsUUFBSyxRQUFMLENBQWMsT0FBZCxDQUFzQixnQkFBUTtBQUM1QixVQUFLLE9BQUw7QUFDRCxJQUZEO0FBR0EsUUFBSyxRQUFMLENBQWMsTUFBZCxHQUF1QixDQUF2QjtBQUNBLFFBQUssWUFBTCxDQUFrQixNQUFsQixHQUEyQixDQUEzQjtBQUNELEVBWkQ7O0FBY0EsVUFBUyxXQUFULENBQXNCLElBQXRCLEVBQTRCO0FBQzFCLFVBQU8sSUFBUCxFQUFhO0FBQ1gsU0FBSSxLQUFLLFFBQUwsS0FBa0IsQ0FBdEIsRUFBeUI7QUFDdkIsY0FBTyxJQUFQO0FBQ0Q7QUFDRCxZQUFPLEtBQUssV0FBWjtBQUNEO0FBQ0Y7O0FBRUQsVUFBUyxlQUFULENBQTBCLElBQTFCLEVBQWdDO0FBQzlCLFVBQU8sSUFBUCxFQUFhO0FBQ1gsU0FBSSxLQUFLLFFBQUwsS0FBa0IsQ0FBdEIsRUFBeUI7QUFDdkIsY0FBTyxJQUFQO0FBQ0Q7QUFDRCxZQUFPLEtBQUssZUFBWjtBQUNEO0FBQ0Y7O0FBRUQsVUFBUyxVQUFULENBQXFCLElBQXJCLEVBQTJCLE1BQTNCLEVBQW1DO0FBQ2pDLFFBQUssVUFBTCxHQUFrQixNQUFsQjtBQUNBLE9BQUksT0FBTyxLQUFYLEVBQWtCO0FBQ2hCLFVBQUssS0FBTCxHQUFhLE9BQU8sS0FBcEI7QUFDQSxVQUFLLGFBQUwsR0FBcUIsT0FBTyxhQUE1QjtBQUNBLFVBQUssYUFBTCxDQUFtQixPQUFuQixDQUEyQixLQUFLLE1BQWhDLElBQTBDLElBQTFDO0FBQ0EsVUFBSyxLQUFMLEdBQWEsT0FBTyxLQUFQLEdBQWUsQ0FBNUI7QUFDRDtBQUNELFFBQUssUUFBTCxDQUFjLE9BQWQsQ0FBc0IsaUJBQVM7QUFDN0IsZ0JBQVcsS0FBWCxFQUFrQixJQUFsQjtBQUNELElBRkQ7QUFHRDs7QUFFRCxVQUFTLFlBQVQsQ0FBdUIsS0FBdkIsRUFBOEIsSUFBOUIsRUFBb0M7QUFDbEMsT0FBTSxNQUFNLFlBQVksS0FBWixDQUFaO0FBQ0EsT0FBSSxPQUFKLENBQVksS0FBSyxNQUFqQixJQUEyQixJQUEzQjtBQUNEOztBQUVELFVBQVMsV0FBVCxDQUFzQixNQUF0QixFQUE4QixJQUE5QixFQUFvQyxRQUFwQyxFQUE4QyxhQUE5QyxFQUE2RDtBQUMzRCxPQUFJLFdBQVcsQ0FBZixFQUFrQjtBQUNoQixnQkFBVyxDQUFYO0FBQ0Q7QUFDRCxPQUFNLFNBQVMsS0FBSyxXQUFXLENBQWhCLENBQWY7QUFDQSxPQUFNLFFBQVEsS0FBSyxRQUFMLENBQWQ7QUFDQSxRQUFLLE1BQUwsQ0FBWSxRQUFaLEVBQXNCLENBQXRCLEVBQXlCLE1BQXpCO0FBQ0EsT0FBSSxhQUFKLEVBQW1CO0FBQ2pCLGdCQUFXLE9BQU8sV0FBUCxHQUFxQixNQUFoQztBQUNBLFlBQU8sZUFBUCxHQUF5QixNQUF6QjtBQUNBLFlBQU8sV0FBUCxHQUFxQixLQUFyQjtBQUNBLGVBQVUsTUFBTSxlQUFOLEdBQXdCLE1BQWxDO0FBQ0Q7QUFDRCxVQUFPLFFBQVA7QUFDRDs7QUFFRCxVQUFTLFNBQVQsQ0FBb0IsTUFBcEIsRUFBNEIsSUFBNUIsRUFBa0MsUUFBbEMsRUFBNEMsYUFBNUMsRUFBMkQ7QUFDekQsT0FBTSxRQUFRLEtBQUssT0FBTCxDQUFhLE1BQWIsQ0FBZDtBQUNBLE9BQUksUUFBUSxDQUFaLEVBQWU7QUFDYixZQUFPLENBQUMsQ0FBUjtBQUNEO0FBQ0QsT0FBSSxhQUFKLEVBQW1CO0FBQ2pCLFNBQU0sU0FBUyxLQUFLLFFBQVEsQ0FBYixDQUFmO0FBQ0EsU0FBTSxRQUFRLEtBQUssUUFBUSxDQUFiLENBQWQ7QUFDQSxnQkFBVyxPQUFPLFdBQVAsR0FBcUIsS0FBaEM7QUFDQSxlQUFVLE1BQU0sZUFBTixHQUF3QixNQUFsQztBQUNEO0FBQ0QsUUFBSyxNQUFMLENBQVksS0FBWixFQUFtQixDQUFuQjtBQUNBLE9BQUksZ0JBQWdCLFFBQXBCO0FBQ0EsT0FBSSxTQUFTLFFBQWIsRUFBdUI7QUFDckIscUJBQWdCLFdBQVcsQ0FBM0I7QUFDRDtBQUNELE9BQU0sWUFBWSxLQUFLLGdCQUFnQixDQUFyQixDQUFsQjtBQUNBLE9BQU0sV0FBVyxLQUFLLGFBQUwsQ0FBakI7QUFDQSxRQUFLLE1BQUwsQ0FBWSxhQUFaLEVBQTJCLENBQTNCLEVBQThCLE1BQTlCO0FBQ0EsT0FBSSxhQUFKLEVBQW1CO0FBQ2pCLG1CQUFjLFVBQVUsV0FBVixHQUF3QixNQUF0QztBQUNBLFlBQU8sZUFBUCxHQUF5QixTQUF6QjtBQUNBLFlBQU8sV0FBUCxHQUFxQixRQUFyQjtBQUNBLGtCQUFhLFNBQVMsZUFBVCxHQUEyQixNQUF4QztBQUNEO0FBQ0QsT0FBSSxVQUFVLGFBQWQsRUFBNkI7QUFDM0IsWUFBTyxDQUFDLENBQVI7QUFDRDtBQUNELFVBQU8sUUFBUDtBQUNEOztBQUVELFVBQVMsV0FBVCxDQUFzQixNQUF0QixFQUE4QixJQUE5QixFQUFvQyxhQUFwQyxFQUFtRDtBQUNqRCxPQUFNLFFBQVEsS0FBSyxPQUFMLENBQWEsTUFBYixDQUFkO0FBQ0EsT0FBSSxRQUFRLENBQVosRUFBZTtBQUNiO0FBQ0Q7QUFDRCxPQUFJLGFBQUosRUFBbUI7QUFDakIsU0FBTSxTQUFTLEtBQUssUUFBUSxDQUFiLENBQWY7QUFDQSxTQUFNLFFBQVEsS0FBSyxRQUFRLENBQWIsQ0FBZDtBQUNBLGdCQUFXLE9BQU8sV0FBUCxHQUFxQixLQUFoQztBQUNBLGVBQVUsTUFBTSxlQUFOLEdBQXdCLE1BQWxDO0FBQ0Q7QUFDRCxRQUFLLE1BQUwsQ0FBWSxLQUFaLEVBQW1CLENBQW5CO0FBQ0Q7O0FBRUQsU0FBUSxTQUFSLENBQWtCLE9BQWxCLEdBQTRCLFVBQVUsR0FBVixFQUFlLEtBQWYsRUFBc0IsTUFBdEIsRUFBOEI7QUFDeEQsT0FBSSxLQUFLLElBQUwsQ0FBVSxHQUFWLE1BQW1CLEtBQXZCLEVBQThCO0FBQzVCO0FBQ0Q7QUFDRCxRQUFLLElBQUwsQ0FBVSxHQUFWLElBQWlCLEtBQWpCO0FBQ0EsT0FBSSxDQUFDLE1BQUQsSUFBVyxLQUFLLEtBQXBCLEVBQTJCO0FBQ3pCLFNBQU0sV0FBVyxZQUFZLEtBQUssS0FBakIsRUFBd0IsUUFBekM7QUFDQSxjQUFTLE9BQVQsQ0FBaUIsS0FBSyxHQUF0QixFQUEyQixHQUEzQixFQUFnQyxLQUFoQztBQUNEO0FBQ0YsRUFURDs7QUFXQSxTQUFRLFNBQVIsQ0FBa0IsUUFBbEIsR0FBNkIsVUFBVSxHQUFWLEVBQWUsS0FBZixFQUFzQixNQUF0QixFQUE4QjtBQUN6RCxPQUFJLEtBQUssS0FBTCxDQUFXLEdBQVgsTUFBb0IsS0FBeEIsRUFBK0I7QUFDN0I7QUFDRDtBQUNELFFBQUssS0FBTCxDQUFXLEdBQVgsSUFBa0IsS0FBbEI7QUFDQSxPQUFJLENBQUMsTUFBRCxJQUFXLEtBQUssS0FBcEIsRUFBMkI7QUFDekIsU0FBTSxXQUFXLFlBQVksS0FBSyxLQUFqQixFQUF3QixRQUF6QztBQUNBLGNBQVMsUUFBVCxDQUFrQixLQUFLLEdBQXZCLEVBQTRCLEdBQTVCLEVBQWlDLEtBQWpDO0FBQ0Q7QUFDRixFQVREOztBQVdBLFNBQVEsU0FBUixDQUFrQixhQUFsQixHQUFrQyxVQUFVLFVBQVYsRUFBc0I7QUFDdEQsUUFBSyxVQUFMLEdBQWtCLFVBQWxCO0FBQ0EsT0FBSSxLQUFLLEtBQVQsRUFBZ0I7QUFDZCxTQUFNLFdBQVcsWUFBWSxLQUFLLEtBQWpCLEVBQXdCLFFBQXpDO0FBQ0EsY0FBUyxTQUFULENBQW1CLEtBQUssR0FBeEIsRUFBNkIsS0FBSyxPQUFMLEVBQTdCO0FBQ0Q7QUFDRixFQU5EOztBQVFBLFNBQVEsU0FBUixDQUFrQixRQUFsQixHQUE2QixVQUFVLElBQVYsRUFBZ0IsT0FBaEIsRUFBeUI7QUFDcEQsT0FBSSxDQUFDLEtBQUssS0FBTCxDQUFXLElBQVgsQ0FBTCxFQUF1QjtBQUNyQixVQUFLLEtBQUwsQ0FBVyxJQUFYLElBQW1CLE9BQW5CO0FBQ0EsU0FBSSxLQUFLLEtBQVQsRUFBZ0I7QUFDZCxXQUFNLFdBQVcsWUFBWSxLQUFLLEtBQWpCLEVBQXdCLFFBQXpDO0FBQ0EsZ0JBQVMsUUFBVCxDQUFrQixLQUFLLEdBQXZCLEVBQTRCLElBQTVCO0FBQ0Q7QUFDRjtBQUNGLEVBUkQ7O0FBVUEsU0FBUSxTQUFSLENBQWtCLFdBQWxCLEdBQWdDLFVBQVUsSUFBVixFQUFnQjtBQUM5QyxPQUFJLEtBQUssS0FBTCxDQUFXLElBQVgsQ0FBSixFQUFzQjtBQUNwQixZQUFPLEtBQUssS0FBTCxDQUFXLElBQVgsQ0FBUDtBQUNBLFNBQUksS0FBSyxLQUFULEVBQWdCO0FBQ2QsV0FBTSxXQUFXLFlBQVksS0FBSyxLQUFqQixFQUF3QixRQUF6QztBQUNBLGdCQUFTLFdBQVQsQ0FBcUIsS0FBSyxHQUExQixFQUErQixJQUEvQjtBQUNEO0FBQ0Y7QUFDRixFQVJEOztBQVVBLFNBQVEsU0FBUixDQUFrQixTQUFsQixHQUE4QixVQUFVLElBQVYsRUFBZ0IsQ0FBaEIsRUFBbUI7QUFDL0MsT0FBTSxVQUFVLEtBQUssS0FBTCxDQUFXLElBQVgsQ0FBaEI7QUFDQSxPQUFJLE9BQUosRUFBYTtBQUNYLFlBQU8sUUFBUSxJQUFSLENBQWEsSUFBYixFQUFtQixDQUFuQixDQUFQO0FBQ0Q7QUFDRixFQUxEOztBQU9BLFNBQVEsU0FBUixDQUFrQixPQUFsQixHQUE0QixZQUFZO0FBQ3RDLFVBQU8sT0FBTyxNQUFQLENBQWMsRUFBZCxFQUFrQixLQUFLLFVBQXZCLEVBQW1DLEtBQUssS0FBeEMsQ0FBUDtBQUNELEVBRkQ7O0FBSUEsU0FBUSxTQUFSLENBQWtCLE1BQWxCLEdBQTJCLFlBQVk7QUFDckMsT0FBTSxTQUFTO0FBQ2IsVUFBSyxLQUFLLEdBQUwsQ0FBUyxRQUFULEVBRFE7QUFFYixXQUFNLEtBQUssSUFGRTtBQUdiLFdBQU0sS0FBSyxJQUhFO0FBSWIsWUFBTyxLQUFLLE9BQUw7QUFKTSxJQUFmO0FBTUEsT0FBTSxRQUFRLE9BQU8sSUFBUCxDQUFZLEtBQUssS0FBakIsQ0FBZDtBQUNBLE9BQUksTUFBTSxNQUFWLEVBQWtCO0FBQ2hCLFlBQU8sS0FBUCxHQUFlLEtBQWY7QUFDRDtBQUNELE9BQUksS0FBSyxZQUFMLENBQWtCLE1BQXRCLEVBQThCO0FBQzVCLFlBQU8sUUFBUCxHQUFrQixLQUFLLFlBQUwsQ0FBa0IsR0FBbEIsQ0FBc0IsVUFBQyxLQUFEO0FBQUEsY0FBVyxNQUFNLE1BQU4sRUFBWDtBQUFBLE1BQXRCLENBQWxCO0FBQ0Q7QUFDRCxVQUFPLE1BQVA7QUFDRCxFQWZEOztBQWlCQSxTQUFRLFNBQVIsQ0FBa0IsUUFBbEIsR0FBNkIsWUFBWTtBQUN2QyxVQUFPLE1BQU0sS0FBSyxJQUFYLEdBQ0wsUUFESyxHQUNNLEtBQUssU0FBTCxDQUFlLEtBQUssSUFBcEIsQ0FETixHQUVMLFNBRkssR0FFTyxLQUFLLFNBQUwsQ0FBZSxLQUFLLE9BQUwsRUFBZixDQUZQLEdBRXdDLEdBRnhDLEdBR0wsS0FBSyxZQUFMLENBQWtCLEdBQWxCLENBQXNCLFVBQUMsS0FBRDtBQUFBLFlBQVcsTUFBTSxRQUFOLEVBQVg7QUFBQSxJQUF0QixFQUFtRCxJQUFuRCxDQUF3RCxFQUF4RCxDQUhLLEdBSUwsSUFKSyxHQUlFLEtBQUssSUFKUCxHQUljLEdBSnJCO0FBS0QsRUFORDs7QUFRTyxVQUFTLE9BQVQsQ0FBa0IsS0FBbEIsRUFBeUI7QUFDOUIsUUFBSyxRQUFMLEdBQWdCLENBQWhCO0FBQ0EsUUFBSyxNQUFMLEdBQWMsQ0FBQyxhQUFELEVBQWdCLFFBQWhCLEVBQWQ7QUFDQSxRQUFLLEdBQUwsR0FBVyxLQUFLLE1BQWhCO0FBQ0EsUUFBSyxJQUFMLEdBQVksU0FBWjtBQUNBLFFBQUssS0FBTCxHQUFhLEtBQWI7QUFDQSxRQUFLLFFBQUwsR0FBZ0IsRUFBaEI7QUFDQSxRQUFLLFlBQUwsR0FBb0IsRUFBcEI7QUFDRDs7QUFFRCxTQUFRLFNBQVIsR0FBb0IsSUFBSSxJQUFKLEVBQXBCOztBQUVBLFNBQVEsU0FBUixDQUFrQixRQUFsQixHQUE2QixZQUFZO0FBQ3ZDLFVBQU8sVUFBVSxLQUFLLEtBQWYsR0FBdUIsTUFBOUI7QUFDRCxFQUZELEM7Ozs7Ozs7Ozs7O21CQzloQndCLFE7U0FvR1IsWSxHQUFBLFk7QUFwR0QsVUFBUyxRQUFULENBQW1CLEVBQW5CLEVBQXVCLE9BQXZCLEVBQWdDO0FBQzdDLFFBQUssRUFBTCxHQUFVLEVBQVY7QUFDQSxRQUFLLE9BQUwsR0FBZSxLQUFmO0FBQ0EsUUFBSyxPQUFMLEdBQWUsRUFBZjtBQUNBLE9BQUksT0FBTyxPQUFQLEtBQW1CLFVBQXZCLEVBQW1DO0FBQ2pDLFVBQUssT0FBTCxHQUFlLE9BQWY7QUFDRDtBQUNGOztBQUVELFVBQVMsU0FBVCxDQUFtQixZQUFuQixHQUFrQyxVQUFVLFFBQVYsRUFBb0I7QUFDcEQsT0FBTSxVQUFVLEtBQUssT0FBckI7QUFDQSxVQUFPLFFBQVEsQ0FBQyxhQUFhLGNBQWIsRUFBNkIsRUFBN0IsQ0FBRCxDQUFSLEVBQTRDLFFBQTVDLENBQVA7QUFDRCxFQUhEOztBQUtBLFVBQVMsU0FBVCxDQUFtQixZQUFuQixHQUFrQyxVQUFVLFFBQVYsRUFBb0I7QUFDcEQsT0FBTSxVQUFVLEtBQUssT0FBckI7QUFDQSxVQUFPLFFBQVEsQ0FBQyxhQUFhLGNBQWIsRUFBNkIsRUFBN0IsQ0FBRCxDQUFSLEVBQTRDLFFBQTVDLENBQVA7QUFDRCxFQUhEOztBQUtBLFVBQVMsU0FBVCxDQUFtQixhQUFuQixHQUFtQyxVQUFVLFFBQVYsRUFBb0I7QUFDckQsT0FBTSxVQUFVLEtBQUssT0FBckI7QUFDQSxVQUFPLFFBQVEsQ0FBQyxhQUFhLGVBQWIsRUFBOEIsRUFBOUIsQ0FBRCxDQUFSLEVBQTZDLFFBQTdDLENBQVA7QUFDRCxFQUhEOztBQUtBLFVBQVMsU0FBVCxDQUFtQixVQUFuQixHQUFnQyxVQUFVLE9BQVYsRUFBbUI7QUFDakQsT0FBTSxPQUFPLFFBQVEsTUFBUixFQUFiO0FBQ0EsT0FBTSxXQUFXLEtBQUssUUFBdEI7QUFDQSxVQUFPLEtBQUssUUFBWjtBQUNBLE9BQU0sVUFBVSxDQUFDLGFBQWEsWUFBYixFQUEyQixDQUFDLElBQUQsQ0FBM0IsQ0FBRCxDQUFoQjtBQUNBLE9BQUksUUFBSixFQUFjO0FBQ1osYUFBUSxJQUFSLENBQWEsS0FBYixDQUFtQixPQUFuQixFQUE0QixTQUFTLEdBQVQsQ0FBYSxpQkFBUztBQUNoRCxjQUFPLGFBQWEsWUFBYixFQUEyQixDQUFDLEtBQUssR0FBTixFQUFXLEtBQVgsRUFBa0IsQ0FBQyxDQUFuQixDQUEzQixDQUFQO0FBQ0QsTUFGMkIsQ0FBNUI7QUFHRDtBQUNELFVBQU8sS0FBSyxVQUFMLENBQWdCLE9BQWhCLENBQVA7QUFDRCxFQVhEOztBQWFBLFVBQVMsU0FBVCxDQUFtQixVQUFuQixHQUFnQyxVQUFVLE9BQVYsRUFBbUIsR0FBbkIsRUFBd0IsS0FBeEIsRUFBK0I7QUFDN0QsT0FBSSxFQUFFLFNBQVMsQ0FBWCxDQUFKLEVBQW1CO0FBQ2pCLGFBQVEsQ0FBQyxDQUFUO0FBQ0Q7QUFDRCxVQUFPLEtBQUssVUFBTCxDQUFnQixhQUFhLFlBQWIsRUFBMkIsQ0FBQyxHQUFELEVBQU0sUUFBUSxNQUFSLEVBQU4sRUFBd0IsS0FBeEIsQ0FBM0IsQ0FBaEIsQ0FBUDtBQUNELEVBTEQ7O0FBT0EsVUFBUyxTQUFULENBQW1CLGFBQW5CLEdBQW1DLFVBQVUsR0FBVixFQUFlO0FBQ2hELE9BQUksTUFBTSxPQUFOLENBQWMsR0FBZCxDQUFKLEVBQXdCO0FBQ3RCLFNBQU0sVUFBVSxJQUFJLEdBQUosQ0FBUSxVQUFDLENBQUQ7QUFBQSxjQUFPLGFBQWEsZUFBYixFQUE4QixDQUFDLENBQUQsQ0FBOUIsQ0FBUDtBQUFBLE1BQVIsQ0FBaEI7QUFDQSxZQUFPLEtBQUssVUFBTCxDQUFnQixPQUFoQixDQUFQO0FBQ0Q7QUFDRCxVQUFPLEtBQUssVUFBTCxDQUFnQixhQUFhLGVBQWIsRUFBOEIsQ0FBQyxHQUFELENBQTlCLENBQWhCLENBQVA7QUFDRCxFQU5EOztBQVFBLFVBQVMsU0FBVCxDQUFtQixXQUFuQixHQUFpQyxVQUFVLFNBQVYsRUFBcUIsU0FBckIsRUFBZ0MsS0FBaEMsRUFBdUM7QUFDdEUsVUFBTyxLQUFLLFVBQUwsQ0FBZ0IsYUFBYSxhQUFiLEVBQTRCLENBQUMsU0FBRCxFQUFZLFNBQVosRUFBdUIsS0FBdkIsQ0FBNUIsQ0FBaEIsQ0FBUDtBQUNELEVBRkQ7O0FBSUEsVUFBUyxTQUFULENBQW1CLE9BQW5CLEdBQTZCLFVBQVUsR0FBVixFQUFlLEdBQWYsRUFBb0IsS0FBcEIsRUFBMkI7QUFDdEQsT0FBTSxTQUFTLEVBQWY7QUFDQSxVQUFPLEdBQVAsSUFBYyxLQUFkO0FBQ0EsVUFBTyxLQUFLLFVBQUwsQ0FBZ0IsYUFBYSxhQUFiLEVBQTRCLENBQUMsR0FBRCxFQUFNLE1BQU4sQ0FBNUIsQ0FBaEIsQ0FBUDtBQUNELEVBSkQ7O0FBTUEsVUFBUyxTQUFULENBQW1CLFFBQW5CLEdBQThCLFVBQVUsR0FBVixFQUFlLEdBQWYsRUFBb0IsS0FBcEIsRUFBMkI7QUFDdkQsT0FBTSxTQUFTLEVBQWY7QUFDQSxVQUFPLEdBQVAsSUFBYyxLQUFkO0FBQ0EsVUFBTyxLQUFLLFVBQUwsQ0FBZ0IsYUFBYSxhQUFiLEVBQTRCLENBQUMsR0FBRCxFQUFNLE1BQU4sQ0FBNUIsQ0FBaEIsQ0FBUDtBQUNELEVBSkQ7O0FBTUEsVUFBUyxTQUFULENBQW1CLFNBQW5CLEdBQStCLFVBQVUsR0FBVixFQUFlLEtBQWYsRUFBc0I7QUFDbkQsVUFBTyxLQUFLLFVBQUwsQ0FBZ0IsYUFBYSxhQUFiLEVBQTRCLENBQUMsR0FBRCxFQUFNLEtBQU4sQ0FBNUIsQ0FBaEIsQ0FBUDtBQUNELEVBRkQ7O0FBSUEsVUFBUyxTQUFULENBQW1CLFFBQW5CLEdBQThCLFVBQVUsR0FBVixFQUFlLElBQWYsRUFBcUI7QUFDakQsVUFBTyxLQUFLLFVBQUwsQ0FBZ0IsYUFBYSxVQUFiLEVBQXlCLENBQUMsR0FBRCxFQUFNLElBQU4sQ0FBekIsQ0FBaEIsQ0FBUDtBQUNELEVBRkQ7O0FBSUEsVUFBUyxTQUFULENBQW1CLFdBQW5CLEdBQWlDLFVBQVUsR0FBVixFQUFlLElBQWYsRUFBcUI7QUFDcEQsVUFBTyxLQUFLLFVBQUwsQ0FBZ0IsYUFBYSxhQUFiLEVBQTRCLENBQUMsR0FBRCxFQUFNLElBQU4sQ0FBNUIsQ0FBaEIsQ0FBUDtBQUNELEVBRkQ7O0FBSUEsVUFBUyxTQUFULENBQW1CLE9BQW5CLEdBQTZCLFVBQVUsT0FBVixFQUFtQixFQUFuQixFQUF1QjtBQUNsRCxVQUFPLE1BQU0sSUFBYjtBQUNELEVBRkQ7O0FBSUEsVUFBUyxTQUFULENBQW1CLFVBQW5CLEdBQWdDLFVBQVUsT0FBVixFQUFtQjtBQUNqRCxPQUFNLFVBQVUsS0FBSyxPQUFyQjtBQUNBLE9BQU0sVUFBVSxLQUFLLE9BQXJCOztBQUVBLE9BQUksQ0FBQyxNQUFNLE9BQU4sQ0FBYyxPQUFkLENBQUwsRUFBNkI7QUFDM0IsZUFBVSxDQUFDLE9BQUQsQ0FBVjtBQUNEOztBQUVELE9BQUksS0FBSyxPQUFULEVBQWtCO0FBQ2hCLGFBQVEsSUFBUixDQUFhLEtBQWIsQ0FBbUIsT0FBbkIsRUFBNEIsT0FBNUI7QUFDRCxJQUZELE1BR0s7QUFDSCxZQUFPLFFBQVEsT0FBUixDQUFQO0FBQ0Q7QUFDRixFQWREOztBQWdCTyxVQUFTLFlBQVQsQ0FBdUIsSUFBdkIsRUFBNkIsSUFBN0IsRUFBbUM7QUFDeEMsVUFBTyxFQUFFLFFBQVEsS0FBVixFQUFpQixRQUFRLElBQXpCLEVBQStCLE1BQU0sSUFBckMsRUFBUDtBQUNELEU7Ozs7OztBQ3RHRDtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0EsR0FBRTtBQUNGO0FBQ0E7QUFDQTtBQUNBLEdBQUU7QUFDRjtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQSxHQUFFO0FBQ0Y7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQSxzQ0FBcUM7QUFDckMsa0NBQWlDO0FBQ2pDO0FBQ0E7QUFDQTtBQUNBO0FBQ0EsR0FBRTtBQUNGO0FBQ0E7QUFDQTtBQUNBO0FBQ0EsR0FBRTtBQUNGO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0EsR0FBRTtBQUNGO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0EsRzs7Ozs7Ozs7Ozs7U0N2RmdCLEMsR0FBQSxDO1NBY0EsRyxHQUFBLEc7U0FhQSxHLEdBQUEsRztTQVlBLFcsR0FBQSxXO1NBY0EsUyxHQUFBLFM7U0FxQkEsVyxHQUFBLFc7U0EwQkEsVSxHQUFBLFU7U0FrQkEsUyxHQUFBLFM7U0FhQSxRLEdBQUEsUTtTQWFBLFMsR0FBQSxTO1NBZUEsSyxHQUFBLEs7O0FBOUtoQjs7S0FBWSxDOzs7Ozs7Ozs7Ozs7Ozs7OztBQWVMLFVBQVMsQ0FBVCxDQUFZLEVBQVosRUFBZ0I7QUFDckIsS0FBRSxJQUFGLENBQU8sK0NBQVA7QUFDQSxPQUFNLE9BQU8sS0FBSyxJQUFMLENBQVUsRUFBVixDQUFiO0FBQ0EsT0FBSSxJQUFKLEVBQVU7QUFDUixZQUFPLEtBQUssRUFBWjtBQUNEO0FBQ0Y7Ozs7Ozs7Ozs7O0FBUU0sVUFBUyxHQUFULENBQWMsRUFBZCxFQUFrQjtBQUN2QixPQUFNLE9BQU8sS0FBSyxJQUFMLENBQVUsRUFBVixDQUFiO0FBQ0EsT0FBSSxJQUFKLEVBQVU7QUFDUixZQUFPLEtBQUssRUFBWjtBQUNEO0FBQ0Y7Ozs7Ozs7O0FBUU0sVUFBUyxHQUFULENBQWMsRUFBZCxFQUFrQjtBQUN2QixPQUFNLE9BQU8sS0FBSyxJQUFMLENBQVUsRUFBVixDQUFiO0FBQ0EsT0FBSSxJQUFKLEVBQVU7QUFDUixZQUFPLEtBQUssRUFBWjtBQUNEO0FBQ0Y7Ozs7Ozs7QUFPTSxVQUFTLFdBQVQsQ0FBc0IsRUFBdEIsRUFBMEI7QUFDL0IsT0FBTSxNQUFNLEtBQUssSUFBakI7QUFDQSxPQUFNLFNBQVMsSUFBSSxNQUFuQjtBQUNBLFVBQU8sT0FBTyxJQUFQLENBQVksWUFBTTtBQUN2QjtBQUNELElBRk0sQ0FBUDtBQUdEOzs7Ozs7OztBQVFNLFVBQVMsU0FBVCxDQUFvQixFQUFwQixFQUF3QixNQUF4QixFQUFnQztBQUNyQyxLQUFFLElBQUYsQ0FBTyxpQ0FDQywyQ0FERCxHQUVDLGlDQUZSO0FBR0EsT0FBTSxLQUFLLEtBQUssR0FBTCxDQUFTLEVBQVQsQ0FBWDtBQUNBLE9BQUksRUFBSixFQUFRO0FBQ04sU0FBTSxNQUFNLEtBQUssSUFBTCxDQUFVLGFBQVYsQ0FBd0IsS0FBeEIsQ0FBWjtBQUNBLFNBQUksZUFBSixDQUFvQixHQUFHLEdBQXZCLEVBQTRCLEVBQUUsUUFBUSxNQUFWLEVBQTVCO0FBQ0Q7QUFDRjs7Ozs7Ozs7Ozs7O0FBWU0sVUFBUyxXQUFULENBQXNCLEVBQXRCLEVBQTBCLE9BQTFCLEVBQW1DLFFBQW5DLEVBQTZDO0FBQUE7O0FBQ2xELE9BQU0sS0FBSyxLQUFLLEdBQUwsQ0FBUyxFQUFULENBQVg7QUFDQSxPQUFJLE1BQU0sT0FBTixJQUFpQixRQUFRLE1BQTdCLEVBQXFDO0FBQ25DLFNBQU0sWUFBWSxLQUFLLElBQUwsQ0FBVSxhQUFWLENBQXdCLFdBQXhCLENBQWxCO0FBQ0EsZUFBVSxVQUFWLENBQXFCLEdBQUcsR0FBeEIsRUFBNkIsT0FBN0IsRUFBc0MsWUFBYTtBQUNqRCxhQUFLLFNBQUwsQ0FBZSxFQUFmLEVBQW1CLFFBQVEsTUFBM0I7QUFDQSxtQkFBWSxvQ0FBWjtBQUNELE1BSEQ7QUFJRDtBQUNGOzs7Ozs7Ozs7Ozs7Ozs7OztBQWlCTSxVQUFTLFVBQVQsQ0FBcUIsUUFBckIsRUFBK0I7QUFDcEMsT0FBTSxTQUFTLEVBQUUsTUFBRixDQUFTO0FBQ3RCLFVBQUssT0FBTyxhQUFQLElBQXdCO0FBRFAsSUFBVCxFQUVaLEtBQUssSUFBTCxDQUFVLE9BRkUsQ0FBZjtBQUdBLE9BQUksRUFBRSxLQUFGLENBQVEsUUFBUixNQUFzQixVQUExQixFQUFzQztBQUNwQyxPQUFFLElBQUYsQ0FBTyw0REFDTCwrQ0FERjtBQUVBLGNBQVMsTUFBVDtBQUNEO0FBQ0QsVUFBTyxNQUFQO0FBQ0Q7Ozs7Ozs7O0FBUU0sVUFBUyxTQUFULENBQW9CLE1BQXBCLEVBQTRCLFFBQTVCLEVBQXNDO0FBQzNDLEtBQUUsSUFBRixDQUFPLGlDQUNDLDhDQURELEdBRUMsc0NBRlI7QUFHQSxPQUFNLFNBQVMsS0FBSyxJQUFMLENBQVUsYUFBVixDQUF3QixRQUF4QixDQUFmO0FBQ0EsVUFBTyxRQUFQLENBQWdCLE1BQWhCLEVBQXdCLFFBQXhCO0FBQ0Q7Ozs7Ozs7QUFPTSxVQUFTLFFBQVQsQ0FBbUIsR0FBbkIsRUFBd0I7QUFDN0IsS0FBRSxJQUFGLENBQU8sZ0NBQ0MsNkNBREQsR0FFQyx3QkFGUjtBQUdBLE9BQU0sUUFBUSxLQUFLLElBQUwsQ0FBVSxhQUFWLENBQXdCLE9BQXhCLENBQWQ7QUFDQSxTQUFNLE9BQU4sQ0FBYyxHQUFkO0FBQ0Q7Ozs7Ozs7QUFPTSxVQUFTLFNBQVQsQ0FBb0IsS0FBcEIsRUFBMkI7QUFDaEMsS0FBRSxJQUFGLENBQU8saUNBQ0MsZ0RBREQsR0FFQywyQkFGUjtBQUdBLE9BQU0sV0FBVyxLQUFLLElBQUwsQ0FBVSxhQUFWLENBQXdCLFVBQXhCLENBQWpCO0FBQ0EsWUFBUyxRQUFULENBQWtCLEtBQWxCO0FBQ0Q7Ozs7Ozs7OztBQVNNLFVBQVMsS0FBVCxDQUFnQixVQUFoQixFQUE0QixVQUE1QixFQUFpRDtBQUN0RCxLQUFFLElBQUYsQ0FBTyw2QkFDTCwyREFERjtBQUVBLE9BQU0sU0FBUyxLQUFLLElBQUwsQ0FBVSxhQUFWLENBQXdCLFVBQXhCLENBQWY7QUFDQSxPQUFJLFVBQVUsT0FBTyxVQUFQLENBQWQsRUFBa0M7QUFBQSx1Q0FKYyxJQUlkO0FBSmMsV0FJZDtBQUFBOztBQUNoQyxZQUFPLFVBQVAsZ0JBQXNCLElBQXRCO0FBQ0Q7QUFDRixFOzs7Ozs7Ozs7Ozs7U0N2S2UsVSxHQUFBLFU7U0FVQSxTLEdBQUEsUztTQXNCQSxhLEdBQUEsYTtTQVVBLE8sR0FBQSxPO1NBU0EsWSxHQUFBLFk7U0FTQSxNLEdBQUEsTTtTQVNBLE8sR0FBQSxPOzs7Ozs7Ozs7Ozs7Ozs7Ozs7QUFyRVQsVUFBUyxVQUFULENBQW9CLElBQXBCLEVBQTBCLElBQTFCLEVBQWdDLE9BQWhDLEVBQXlDLEtBQXpDLEVBQWdEO0FBQ3JELE9BQU0sWUFBWSxLQUFLLElBQUwsQ0FBVSxhQUFWLENBQXdCLFdBQXhCLENBQWxCO0FBQ0EsYUFBVSxNQUFWLENBQWlCLElBQWpCLEVBQXVCLElBQXZCLEVBQTZCLE9BQTdCLEVBQXNDLEtBQXRDO0FBQ0Q7Ozs7Ozs7QUFPTSxVQUFTLFNBQVQsQ0FBbUIsTUFBbkIsRUFBMkIsUUFBM0IsRUFBcUM7O0FBRTFDLE9BQUksT0FBTyxNQUFQLEtBQWtCLFdBQXRCLEVBQW1DOztBQUVqQyxTQUFNLFdBQVcsS0FBSyxJQUFMLENBQVUsYUFBVixDQUF3QixVQUF4QixDQUFqQjtBQUNBLGNBQVMsSUFBVCxDQUFjO0FBQ1osY0FBTyxjQURLO0FBRVosZUFBUSxNQUZJO0FBR1osYUFBTTtBQUhNLE1BQWQsRUFJRyxRQUpIO0FBS0QsSUFSRCxNQVFPOztBQUVMLFNBQU0sU0FBUyxLQUFLLElBQUwsQ0FBVSxhQUFWLENBQXdCLFFBQXhCLENBQWY7QUFDQSxZQUFPLFFBQVAsQ0FBZ0IsTUFBaEIsRUFBd0IsUUFBeEI7QUFDRDtBQUNGOzs7Ozs7O0FBT00sVUFBUyxhQUFULENBQXVCLE1BQXZCLEVBQStCLFFBQS9CLEVBQXlDO0FBQzlDLE9BQU0sV0FBVyxLQUFLLElBQUwsQ0FBVSxhQUFWLENBQXdCLFVBQXhCLENBQWpCO0FBQ0EsWUFBUyxJQUFULENBQWMsTUFBZCxFQUFzQixRQUF0QjtBQUNEOzs7Ozs7O0FBT00sVUFBUyxPQUFULENBQWlCLENBQWpCLEVBQW9CLENBQXBCLEVBQXVCO0FBQzVCLE9BQU0sV0FBVyxLQUFLLElBQUwsQ0FBVSxhQUFWLENBQXdCLFVBQXhCLENBQWpCO0FBQ0EsWUFBUyxNQUFULENBQWdCLENBQWhCLEVBQW1CLENBQW5CO0FBQ0Q7Ozs7OztBQU1NLFVBQVMsWUFBVCxDQUFzQixRQUF0QixFQUFnQztBQUNyQyxPQUFNLE9BQU8sS0FBSyxJQUFMLENBQVUsYUFBVixDQUF3QixNQUF4QixDQUFiO0FBQ0EsUUFBSyxXQUFMLENBQWlCLFFBQWpCO0FBQ0Q7Ozs7OztBQU1NLFVBQVMsTUFBVCxDQUFnQixRQUFoQixFQUEwQjtBQUMvQixPQUFNLE9BQU8sS0FBSyxJQUFMLENBQVUsYUFBVixDQUF3QixNQUF4QixDQUFiO0FBQ0EsUUFBSyxLQUFMLENBQVcsUUFBWDtBQUNEOzs7Ozs7QUFNTSxVQUFTLE9BQVQsQ0FBaUIsUUFBakIsRUFBMkI7QUFDaEMsT0FBTSxPQUFPLEtBQUssSUFBTCxDQUFVLGFBQVYsQ0FBd0IsTUFBeEIsQ0FBYjtBQUNBLFFBQUssTUFBTCxDQUFZLFFBQVo7QUFDRCxFIiwiZmlsZSI6ImluZGV4LmpzIiwic291cmNlc0NvbnRlbnQiOlsiIFx0Ly8gVGhlIG1vZHVsZSBjYWNoZVxuIFx0dmFyIGluc3RhbGxlZE1vZHVsZXMgPSB7fTtcblxuIFx0Ly8gVGhlIHJlcXVpcmUgZnVuY3Rpb25cbiBcdGZ1bmN0aW9uIF9fd2VicGFja19yZXF1aXJlX18obW9kdWxlSWQpIHtcblxuIFx0XHQvLyBDaGVjayBpZiBtb2R1bGUgaXMgaW4gY2FjaGVcbiBcdFx0aWYoaW5zdGFsbGVkTW9kdWxlc1ttb2R1bGVJZF0pXG4gXHRcdFx0cmV0dXJuIGluc3RhbGxlZE1vZHVsZXNbbW9kdWxlSWRdLmV4cG9ydHM7XG5cbiBcdFx0Ly8gQ3JlYXRlIGEgbmV3IG1vZHVsZSAoYW5kIHB1dCBpdCBpbnRvIHRoZSBjYWNoZSlcbiBcdFx0dmFyIG1vZHVsZSA9IGluc3RhbGxlZE1vZHVsZXNbbW9kdWxlSWRdID0ge1xuIFx0XHRcdGV4cG9ydHM6IHt9LFxuIFx0XHRcdGlkOiBtb2R1bGVJZCxcbiBcdFx0XHRsb2FkZWQ6IGZhbHNlXG4gXHRcdH07XG5cbiBcdFx0Ly8gRXhlY3V0ZSB0aGUgbW9kdWxlIGZ1bmN0aW9uXG4gXHRcdG1vZHVsZXNbbW9kdWxlSWRdLmNhbGwobW9kdWxlLmV4cG9ydHMsIG1vZHVsZSwgbW9kdWxlLmV4cG9ydHMsIF9fd2VicGFja19yZXF1aXJlX18pO1xuXG4gXHRcdC8vIEZsYWcgdGhlIG1vZHVsZSBhcyBsb2FkZWRcbiBcdFx0bW9kdWxlLmxvYWRlZCA9IHRydWU7XG5cbiBcdFx0Ly8gUmV0dXJuIHRoZSBleHBvcnRzIG9mIHRoZSBtb2R1bGVcbiBcdFx0cmV0dXJuIG1vZHVsZS5leHBvcnRzO1xuIFx0fVxuXG5cbiBcdC8vIGV4cG9zZSB0aGUgbW9kdWxlcyBvYmplY3QgKF9fd2VicGFja19tb2R1bGVzX18pXG4gXHRfX3dlYnBhY2tfcmVxdWlyZV9fLm0gPSBtb2R1bGVzO1xuXG4gXHQvLyBleHBvc2UgdGhlIG1vZHVsZSBjYWNoZVxuIFx0X193ZWJwYWNrX3JlcXVpcmVfXy5jID0gaW5zdGFsbGVkTW9kdWxlcztcblxuIFx0Ly8gX193ZWJwYWNrX3B1YmxpY19wYXRoX19cbiBcdF9fd2VicGFja19yZXF1aXJlX18ucCA9IFwiXCI7XG5cbiBcdC8vIExvYWQgZW50cnkgbW9kdWxlIGFuZCByZXR1cm4gZXhwb3J0c1xuIFx0cmV0dXJuIF9fd2VicGFja19yZXF1aXJlX18oMCk7XG5cblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiB3ZWJwYWNrL2Jvb3RzdHJhcCAwMGFiOGVmNjY4M2NmZGYyNTM1MlxuICoqLyIsImltcG9ydCAnd2VleC9odG1sNS9uYXRpdmUnXG5cbi8qKlxuICogcmVnaXN0ZXIgbWV0aG9kc1xuICovXG5jb25zdCBtZXRob2RzID0gcmVxdWlyZSgnLi9saWIvYXBpL21ldGhvZHMnKVxuY29uc3Qge3JlZ2lzdGVyTWV0aG9kc30gPSBnbG9iYWxcbnJlZ2lzdGVyTWV0aG9kcyhtZXRob2RzKVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogLi9pbmRleC5qc1xuICoqLyIsImltcG9ydCAnLi4vc2hhcmVkJ1xuaW1wb3J0IHJ1bnRpbWUgZnJvbSAnLi4vcnVudGltZSdcbmltcG9ydCB7IHN1YnZlcnNpb24gfSBmcm9tICcuLi8uLi9wYWNrYWdlLmpzb24nXG5cbmNvbnN0IHsgbmF0aXZlLCB0cmFuc2Zvcm1lciB9ID0gc3VidmVyc2lvblxuXG5mb3IgKGNvbnN0IG1ldGhvZE5hbWUgaW4gcnVudGltZSkge1xuICBnbG9iYWxbbWV0aG9kTmFtZV0gPSBmdW5jdGlvbiAoLi4uYXJncykge1xuICAgIGNvbnN0IHJldCA9IHJ1bnRpbWVbbWV0aG9kTmFtZV0oLi4uYXJncylcbiAgICBpZiAocmV0IGluc3RhbmNlb2YgRXJyb3IpIHtcbiAgICAgIGNvbnNvbGUuZXJyb3IocmV0LnRvU3RyaW5nKCkpXG4gICAgfVxuICAgIHJldHVybiByZXRcbiAgfVxufVxuXG5PYmplY3QuYXNzaWduKGdsb2JhbCwge1xuICBmcmFtZXdvcmtWZXJzaW9uOiBuYXRpdmUsXG4gIG5lZWRUcmFuc2Zvcm1lclZlcnNpb246IHRyYW5zZm9ybWVyXG59KVxuXG4vKipcbiAqIHJlZ2lzdGVyIG1ldGhvZHNcbiAqL1xuY29uc3QgbWV0aG9kcyA9IHJlcXVpcmUoJy4uL2RlZmF1bHQvYXBpL21ldGhvZHMnKVxuY29uc3QgeyByZWdpc3Rlck1ldGhvZHMgfSA9IGdsb2JhbFxucmVnaXN0ZXJNZXRob2RzKG1ldGhvZHMpXG5cblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L25hdGl2ZS9pbmRleC5qc1xuICoqLyIsImltcG9ydCAnLi9vYmplY3RBc3NpZ24nXG5pbXBvcnQgJy4vc2V0VGltZW91dCdcbmltcG9ydCAnLi9wcm9taXNlJ1xuaW1wb3J0ICcuL2NvbnNvbGUnXG5cblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L3NoYXJlZC9pbmRleC5qc1xuICoqLyIsImltcG9ydCAnY29yZS1qcy9mbi9vYmplY3QvYXNzaWduJ1xuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9zaGFyZWQvb2JqZWN0QXNzaWduLmpzXG4gKiovIiwicmVxdWlyZSgnLi4vLi4vbW9kdWxlcy9lczYub2JqZWN0LmFzc2lnbicpO1xubW9kdWxlLmV4cG9ydHMgPSByZXF1aXJlKCcuLi8uLi9tb2R1bGVzL19jb3JlJykuT2JqZWN0LmFzc2lnbjtcblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL2ZuL29iamVjdC9hc3NpZ24uanNcbiAqKi8iLCIvLyAxOS4xLjMuMSBPYmplY3QuYXNzaWduKHRhcmdldCwgc291cmNlKVxudmFyICRleHBvcnQgPSByZXF1aXJlKCcuL19leHBvcnQnKTtcblxuJGV4cG9ydCgkZXhwb3J0LlMgKyAkZXhwb3J0LkYsICdPYmplY3QnLCB7YXNzaWduOiByZXF1aXJlKCcuL19vYmplY3QtYXNzaWduJyl9KTtcblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvZXM2Lm9iamVjdC5hc3NpZ24uanNcbiAqKi8iLCJ2YXIgZ2xvYmFsICAgID0gcmVxdWlyZSgnLi9fZ2xvYmFsJylcbiAgLCBjb3JlICAgICAgPSByZXF1aXJlKCcuL19jb3JlJylcbiAgLCBoaWRlICAgICAgPSByZXF1aXJlKCcuL19oaWRlJylcbiAgLCByZWRlZmluZSAgPSByZXF1aXJlKCcuL19yZWRlZmluZScpXG4gICwgY3R4ICAgICAgID0gcmVxdWlyZSgnLi9fY3R4JylcbiAgLCBQUk9UT1RZUEUgPSAncHJvdG90eXBlJztcblxudmFyICRleHBvcnQgPSBmdW5jdGlvbih0eXBlLCBuYW1lLCBzb3VyY2Upe1xuICB2YXIgSVNfRk9SQ0VEID0gdHlwZSAmICRleHBvcnQuRlxuICAgICwgSVNfR0xPQkFMID0gdHlwZSAmICRleHBvcnQuR1xuICAgICwgSVNfU1RBVElDID0gdHlwZSAmICRleHBvcnQuU1xuICAgICwgSVNfUFJPVE8gID0gdHlwZSAmICRleHBvcnQuUFxuICAgICwgSVNfQklORCAgID0gdHlwZSAmICRleHBvcnQuQlxuICAgICwgdGFyZ2V0ICAgID0gSVNfR0xPQkFMID8gZ2xvYmFsIDogSVNfU1RBVElDID8gZ2xvYmFsW25hbWVdIHx8IChnbG9iYWxbbmFtZV0gPSB7fSkgOiAoZ2xvYmFsW25hbWVdIHx8IHt9KVtQUk9UT1RZUEVdXG4gICAgLCBleHBvcnRzICAgPSBJU19HTE9CQUwgPyBjb3JlIDogY29yZVtuYW1lXSB8fCAoY29yZVtuYW1lXSA9IHt9KVxuICAgICwgZXhwUHJvdG8gID0gZXhwb3J0c1tQUk9UT1RZUEVdIHx8IChleHBvcnRzW1BST1RPVFlQRV0gPSB7fSlcbiAgICAsIGtleSwgb3duLCBvdXQsIGV4cDtcbiAgaWYoSVNfR0xPQkFMKXNvdXJjZSA9IG5hbWU7XG4gIGZvcihrZXkgaW4gc291cmNlKXtcbiAgICAvLyBjb250YWlucyBpbiBuYXRpdmVcbiAgICBvd24gPSAhSVNfRk9SQ0VEICYmIHRhcmdldCAmJiB0YXJnZXRba2V5XSAhPT0gdW5kZWZpbmVkO1xuICAgIC8vIGV4cG9ydCBuYXRpdmUgb3IgcGFzc2VkXG4gICAgb3V0ID0gKG93biA / IHRhcmdldCA6IHNvdXJjZSlba2V5XTtcbiAgICAvLyBiaW5kIHRpbWVycyB0byBnbG9iYWwgZm9yIGNhbGwgZnJvbSBleHBvcnQgY29udGV4dFxuICAgIGV4cCA9IElTX0JJTkQgJiYgb3duID8gY3R4KG91dCwgZ2xvYmFsKSA6IElTX1BST1RPICYmIHR5cGVvZiBvdXQgPT0gJ2Z1bmN0aW9uJyA / IGN0eChGdW5jdGlvbi5jYWxsLCBvdXQpIDogb3V0O1xuICAgIC8vIGV4dGVuZCBnbG9iYWxcbiAgICBpZih0YXJnZXQpcmVkZWZpbmUodGFyZ2V0LCBrZXksIG91dCwgdHlwZSAmICRleHBvcnQuVSk7XG4gICAgLy8gZXhwb3J0XG4gICAgaWYoZXhwb3J0c1trZXldICE9IG91dCloaWRlKGV4cG9ydHMsIGtleSwgZXhwKTtcbiAgICBpZihJU19QUk9UTyAmJiBleHBQcm90b1trZXldICE9IG91dClleHBQcm90b1trZXldID0gb3V0O1xuICB9XG59O1xuZ2xvYmFsLmNvcmUgPSBjb3JlO1xuLy8gdHlwZSBiaXRtYXBcbiRleHBvcnQuRiA9IDE7ICAgLy8gZm9yY2VkXG4kZXhwb3J0LkcgPSAyOyAgIC8vIGdsb2JhbFxuJGV4cG9ydC5TID0gNDsgICAvLyBzdGF0aWNcbiRleHBvcnQuUCA9IDg7ICAgLy8gcHJvdG9cbiRleHBvcnQuQiA9IDE2OyAgLy8gYmluZFxuJGV4cG9ydC5XID0gMzI7ICAvLyB3cmFwXG4kZXhwb3J0LlUgPSA2NDsgIC8vIHNhZmVcbiRleHBvcnQuUiA9IDEyODsgLy8gcmVhbCBwcm90byBtZXRob2QgZm9yIGBsaWJyYXJ5YCBcbm1vZHVsZS5leHBvcnRzID0gJGV4cG9ydDtcblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX2V4cG9ydC5qc1xuICoqLyIsIi8vIGh0dHBzOi8vZ2l0aHViLmNvbS96bG9pcm9jay9jb3JlLWpzL2lzc3Vlcy84NiNpc3N1ZWNvbW1lbnQtMTE1NzU5MDI4XG52YXIgZ2xvYmFsID0gbW9kdWxlLmV4cG9ydHMgPSB0eXBlb2Ygd2luZG93ICE9ICd1bmRlZmluZWQnICYmIHdpbmRvdy5NYXRoID09IE1hdGhcbiAgPyB3aW5kb3cgOiB0eXBlb2Ygc2VsZiAhPSAndW5kZWZpbmVkJyAmJiBzZWxmLk1hdGggPT0gTWF0aCA / IHNlbGYgOiBGdW5jdGlvbigncmV0dXJuIHRoaXMnKSgpO1xuaWYodHlwZW9mIF9fZyA9PSAnbnVtYmVyJylfX2cgPSBnbG9iYWw7IC8vIGVzbGludC1kaXNhYmxlLWxpbmUgbm8tdW5kZWZcblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX2dsb2JhbC5qc1xuICoqLyIsInZhciBjb3JlID0gbW9kdWxlLmV4cG9ydHMgPSB7dmVyc2lvbjogJzIuNC4wJ307XG5pZih0eXBlb2YgX19lID09ICdudW1iZXInKV9fZSA9IGNvcmU7IC8vIGVzbGludC1kaXNhYmxlLWxpbmUgbm8tdW5kZWZcblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX2NvcmUuanNcbiAqKi8iLCJ2YXIgZFAgICAgICAgICA9IHJlcXVpcmUoJy4vX29iamVjdC1kcCcpXG4gICwgY3JlYXRlRGVzYyA9IHJlcXVpcmUoJy4vX3Byb3BlcnR5LWRlc2MnKTtcbm1vZHVsZS5leHBvcnRzID0gcmVxdWlyZSgnLi9fZGVzY3JpcHRvcnMnKSA / IGZ1bmN0aW9uKG9iamVjdCwga2V5LCB2YWx1ZSl7XG4gIHJldHVybiBkUC5mKG9iamVjdCwga2V5LCBjcmVhdGVEZXNjKDEsIHZhbHVlKSk7XG59IDogZnVuY3Rpb24ob2JqZWN0LCBrZXksIHZhbHVlKXtcbiAgb2JqZWN0W2tleV0gPSB2YWx1ZTtcbiAgcmV0dXJuIG9iamVjdDtcbn07XG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19oaWRlLmpzXG4gKiovIiwidmFyIGFuT2JqZWN0ICAgICAgID0gcmVxdWlyZSgnLi9fYW4tb2JqZWN0JylcbiAgLCBJRThfRE9NX0RFRklORSA9IHJlcXVpcmUoJy4vX2llOC1kb20tZGVmaW5lJylcbiAgLCB0b1ByaW1pdGl2ZSAgICA9IHJlcXVpcmUoJy4vX3RvLXByaW1pdGl2ZScpXG4gICwgZFAgICAgICAgICAgICAgPSBPYmplY3QuZGVmaW5lUHJvcGVydHk7XG5cbmV4cG9ydHMuZiA9IHJlcXVpcmUoJy4vX2Rlc2NyaXB0b3JzJykgPyBPYmplY3QuZGVmaW5lUHJvcGVydHkgOiBmdW5jdGlvbiBkZWZpbmVQcm9wZXJ0eShPLCBQLCBBdHRyaWJ1dGVzKXtcbiAgYW5PYmplY3QoTyk7XG4gIFAgPSB0b1ByaW1pdGl2ZShQLCB0cnVlKTtcbiAgYW5PYmplY3QoQXR0cmlidXRlcyk7XG4gIGlmKElFOF9ET01fREVGSU5FKXRyeSB7XG4gICAgcmV0dXJuIGRQKE8sIFAsIEF0dHJpYnV0ZXMpO1xuICB9IGNhdGNoKGUpeyAvKiBlbXB0eSAqLyB9XG4gIGlmKCdnZXQnIGluIEF0dHJpYnV0ZXMgfHwgJ3NldCcgaW4gQXR0cmlidXRlcyl0aHJvdyBUeXBlRXJyb3IoJ0FjY2Vzc29ycyBub3Qgc3VwcG9ydGVkIScpO1xuICBpZigndmFsdWUnIGluIEF0dHJpYnV0ZXMpT1tQXSA9IEF0dHJpYnV0ZXMudmFsdWU7XG4gIHJldHVybiBPO1xufTtcblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX29iamVjdC1kcC5qc1xuICoqLyIsInZhciBpc09iamVjdCA9IHJlcXVpcmUoJy4vX2lzLW9iamVjdCcpO1xubW9kdWxlLmV4cG9ydHMgPSBmdW5jdGlvbihpdCl7XG4gIGlmKCFpc09iamVjdChpdCkpdGhyb3cgVHlwZUVycm9yKGl0ICsgJyBpcyBub3QgYW4gb2JqZWN0IScpO1xuICByZXR1cm4gaXQ7XG59O1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fYW4tb2JqZWN0LmpzXG4gKiovIiwibW9kdWxlLmV4cG9ydHMgPSBmdW5jdGlvbihpdCl7XG4gIHJldHVybiB0eXBlb2YgaXQgPT09ICdvYmplY3QnID8gaXQgIT09IG51bGwgOiB0eXBlb2YgaXQgPT09ICdmdW5jdGlvbic7XG59O1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9faXMtb2JqZWN0LmpzXG4gKiovIiwibW9kdWxlLmV4cG9ydHMgPSAhcmVxdWlyZSgnLi9fZGVzY3JpcHRvcnMnKSAmJiAhcmVxdWlyZSgnLi9fZmFpbHMnKShmdW5jdGlvbigpe1xyXG4gIHJldHVybiBPYmplY3QuZGVmaW5lUHJvcGVydHkocmVxdWlyZSgnLi9fZG9tLWNyZWF0ZScpKCdkaXYnKSwgJ2EnLCB7Z2V0OiBmdW5jdGlvbigpeyByZXR1cm4gNzsgfX0pLmEgIT0gNztcclxufSk7XG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19pZTgtZG9tLWRlZmluZS5qc1xuICoqLyIsIi8vIFRoYW5rJ3MgSUU4IGZvciBoaXMgZnVubnkgZGVmaW5lUHJvcGVydHlcbm1vZHVsZS5leHBvcnRzID0gIXJlcXVpcmUoJy4vX2ZhaWxzJykoZnVuY3Rpb24oKXtcbiAgcmV0dXJuIE9iamVjdC5kZWZpbmVQcm9wZXJ0eSh7fSwgJ2EnLCB7Z2V0OiBmdW5jdGlvbigpeyByZXR1cm4gNzsgfX0pLmEgIT0gNztcbn0pO1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fZGVzY3JpcHRvcnMuanNcbiAqKi8iLCJtb2R1bGUuZXhwb3J0cyA9IGZ1bmN0aW9uKGV4ZWMpe1xuICB0cnkge1xuICAgIHJldHVybiAhIWV4ZWMoKTtcbiAgfSBjYXRjaChlKXtcbiAgICByZXR1cm4gdHJ1ZTtcbiAgfVxufTtcblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX2ZhaWxzLmpzXG4gKiovIiwidmFyIGlzT2JqZWN0ID0gcmVxdWlyZSgnLi9faXMtb2JqZWN0JylcbiAgLCBkb2N1bWVudCA9IHJlcXVpcmUoJy4vX2dsb2JhbCcpLmRvY3VtZW50XG4gIC8vIGluIG9sZCBJRSB0eXBlb2YgZG9jdW1lbnQuY3JlYXRlRWxlbWVudCBpcyAnb2JqZWN0J1xuICAsIGlzID0gaXNPYmplY3QoZG9jdW1lbnQpICYmIGlzT2JqZWN0KGRvY3VtZW50LmNyZWF0ZUVsZW1lbnQpO1xubW9kdWxlLmV4cG9ydHMgPSBmdW5jdGlvbihpdCl7XG4gIHJldHVybiBpcyA / IGRvY3VtZW50LmNyZWF0ZUVsZW1lbnQoaXQpIDoge307XG59O1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fZG9tLWNyZWF0ZS5qc1xuICoqLyIsIi8vIDcuMS4xIFRvUHJpbWl0aXZlKGlucHV0IFssIFByZWZlcnJlZFR5cGVdKVxudmFyIGlzT2JqZWN0ID0gcmVxdWlyZSgnLi9faXMtb2JqZWN0Jyk7XG4vLyBpbnN0ZWFkIG9mIHRoZSBFUzYgc3BlYyB2ZXJzaW9uLCB3ZSBkaWRuJ3QgaW1wbGVtZW50IEBAdG9QcmltaXRpdmUgY2FzZVxuLy8gYW5kIHRoZSBzZWNvbmQgYXJndW1lbnQgLSBmbGFnIC0gcHJlZmVycmVkIHR5cGUgaXMgYSBzdHJpbmdcbm1vZHVsZS5leHBvcnRzID0gZnVuY3Rpb24oaXQsIFMpe1xuICBpZighaXNPYmplY3QoaXQpKXJldHVybiBpdDtcbiAgdmFyIGZuLCB2YWw7XG4gIGlmKFMgJiYgdHlwZW9mIChmbiA9IGl0LnRvU3RyaW5nKSA9PSAnZnVuY3Rpb24nICYmICFpc09iamVjdCh2YWwgPSBmbi5jYWxsKGl0KSkpcmV0dXJuIHZhbDtcbiAgaWYodHlwZW9mIChmbiA9IGl0LnZhbHVlT2YpID09ICdmdW5jdGlvbicgJiYgIWlzT2JqZWN0KHZhbCA9IGZuLmNhbGwoaXQpKSlyZXR1cm4gdmFsO1xuICBpZighUyAmJiB0eXBlb2YgKGZuID0gaXQudG9TdHJpbmcpID09ICdmdW5jdGlvbicgJiYgIWlzT2JqZWN0KHZhbCA9IGZuLmNhbGwoaXQpKSlyZXR1cm4gdmFsO1xuICB0aHJvdyBUeXBlRXJyb3IoXCJDYW4ndCBjb252ZXJ0IG9iamVjdCB0byBwcmltaXRpdmUgdmFsdWVcIik7XG59O1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fdG8tcHJpbWl0aXZlLmpzXG4gKiovIiwibW9kdWxlLmV4cG9ydHMgPSBmdW5jdGlvbihiaXRtYXAsIHZhbHVlKXtcbiAgcmV0dXJuIHtcbiAgICBlbnVtZXJhYmxlICA6ICEoYml0bWFwICYgMSksXG4gICAgY29uZmlndXJhYmxlOiAhKGJpdG1hcCAmIDIpLFxuICAgIHdyaXRhYmxlICAgIDogIShiaXRtYXAgJiA0KSxcbiAgICB2YWx1ZSAgICAgICA6IHZhbHVlXG4gIH07XG59O1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fcHJvcGVydHktZGVzYy5qc1xuICoqLyIsInZhciBnbG9iYWwgICAgPSByZXF1aXJlKCcuL19nbG9iYWwnKVxuICAsIGhpZGUgICAgICA9IHJlcXVpcmUoJy4vX2hpZGUnKVxuICAsIGhhcyAgICAgICA9IHJlcXVpcmUoJy4vX2hhcycpXG4gICwgU1JDICAgICAgID0gcmVxdWlyZSgnLi9fdWlkJykoJ3NyYycpXG4gICwgVE9fU1RSSU5HID0gJ3RvU3RyaW5nJ1xuICAsICR0b1N0cmluZyA9IEZ1bmN0aW9uW1RPX1NUUklOR11cbiAgLCBUUEwgICAgICAgPSAoJycgKyAkdG9TdHJpbmcpLnNwbGl0KFRPX1NUUklORyk7XG5cbnJlcXVpcmUoJy4vX2NvcmUnKS5pbnNwZWN0U291cmNlID0gZnVuY3Rpb24oaXQpe1xuICByZXR1cm4gJHRvU3RyaW5nLmNhbGwoaXQpO1xufTtcblxuKG1vZHVsZS5leHBvcnRzID0gZnVuY3Rpb24oTywga2V5LCB2YWwsIHNhZmUpe1xuICB2YXIgaXNGdW5jdGlvbiA9IHR5cGVvZiB2YWwgPT0gJ2Z1bmN0aW9uJztcbiAgaWYoaXNGdW5jdGlvbiloYXModmFsLCAnbmFtZScpIHx8IGhpZGUodmFsLCAnbmFtZScsIGtleSk7XG4gIGlmKE9ba2V5XSA9PT0gdmFsKXJldHVybjtcbiAgaWYoaXNGdW5jdGlvbiloYXModmFsLCBTUkMpIHx8IGhpZGUodmFsLCBTUkMsIE9ba2V5XSA / ICcnICsgT1trZXldIDogVFBMLmpvaW4oU3RyaW5nKGtleSkpKTtcbiAgaWYoTyA9PT0gZ2xvYmFsKXtcbiAgICBPW2tleV0gPSB2YWw7XG4gIH0gZWxzZSB7XG4gICAgaWYoIXNhZmUpe1xuICAgICAgZGVsZXRlIE9ba2V5XTtcbiAgICAgIGhpZGUoTywga2V5LCB2YWwpO1xuICAgIH0gZWxzZSB7XG4gICAgICBpZihPW2tleV0pT1trZXldID0gdmFsO1xuICAgICAgZWxzZSBoaWRlKE8sIGtleSwgdmFsKTtcbiAgICB9XG4gIH1cbi8vIGFkZCBmYWtlIEZ1bmN0aW9uI3RvU3RyaW5nIGZvciBjb3JyZWN0IHdvcmsgd3JhcHBlZCBtZXRob2RzIC8gY29uc3RydWN0b3JzIHdpdGggbWV0aG9kcyBsaWtlIExvRGFzaCBpc05hdGl2ZVxufSkoRnVuY3Rpb24ucHJvdG90eXBlLCBUT19TVFJJTkcsIGZ1bmN0aW9uIHRvU3RyaW5nKCl7XG4gIHJldHVybiB0eXBlb2YgdGhpcyA9PSAnZnVuY3Rpb24nICYmIHRoaXNbU1JDXSB8fCAkdG9TdHJpbmcuY2FsbCh0aGlzKTtcbn0pO1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fcmVkZWZpbmUuanNcbiAqKi8iLCJ2YXIgaGFzT3duUHJvcGVydHkgPSB7fS5oYXNPd25Qcm9wZXJ0eTtcbm1vZHVsZS5leHBvcnRzID0gZnVuY3Rpb24oaXQsIGtleSl7XG4gIHJldHVybiBoYXNPd25Qcm9wZXJ0eS5jYWxsKGl0LCBrZXkpO1xufTtcblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX2hhcy5qc1xuICoqLyIsInZhciBpZCA9IDBcbiAgLCBweCA9IE1hdGgucmFuZG9tKCk7XG5tb2R1bGUuZXhwb3J0cyA9IGZ1bmN0aW9uKGtleSl7XG4gIHJldHVybiAnU3ltYm9sKCcuY29uY2F0KGtleSA9PT0gdW5kZWZpbmVkID8gJycgOiBrZXksICcpXycsICgrK2lkICsgcHgpLnRvU3RyaW5nKDM2KSk7XG59O1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fdWlkLmpzXG4gKiovIiwiLy8gb3B0aW9uYWwgLyBzaW1wbGUgY29udGV4dCBiaW5kaW5nXG52YXIgYUZ1bmN0aW9uID0gcmVxdWlyZSgnLi9fYS1mdW5jdGlvbicpO1xubW9kdWxlLmV4cG9ydHMgPSBmdW5jdGlvbihmbiwgdGhhdCwgbGVuZ3RoKXtcbiAgYUZ1bmN0aW9uKGZuKTtcbiAgaWYodGhhdCA9PT0gdW5kZWZpbmVkKXJldHVybiBmbjtcbiAgc3dpdGNoKGxlbmd0aCl7XG4gICAgY2FzZSAxOiByZXR1cm4gZnVuY3Rpb24oYSl7XG4gICAgICByZXR1cm4gZm4uY2FsbCh0aGF0LCBhKTtcbiAgICB9O1xuICAgIGNhc2UgMjogcmV0dXJuIGZ1bmN0aW9uKGEsIGIpe1xuICAgICAgcmV0dXJuIGZuLmNhbGwodGhhdCwgYSwgYik7XG4gICAgfTtcbiAgICBjYXNlIDM6IHJldHVybiBmdW5jdGlvbihhLCBiLCBjKXtcbiAgICAgIHJldHVybiBmbi5jYWxsKHRoYXQsIGEsIGIsIGMpO1xuICAgIH07XG4gIH1cbiAgcmV0dXJuIGZ1bmN0aW9uKC8qIC4uLmFyZ3MgKi8pe1xuICAgIHJldHVybiBmbi5hcHBseSh0aGF0LCBhcmd1bWVudHMpO1xuICB9O1xufTtcblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX2N0eC5qc1xuICoqLyIsIm1vZHVsZS5leHBvcnRzID0gZnVuY3Rpb24oaXQpe1xuICBpZih0eXBlb2YgaXQgIT0gJ2Z1bmN0aW9uJyl0aHJvdyBUeXBlRXJyb3IoaXQgKyAnIGlzIG5vdCBhIGZ1bmN0aW9uIScpO1xuICByZXR1cm4gaXQ7XG59O1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fYS1mdW5jdGlvbi5qc1xuICoqLyIsIid1c2Ugc3RyaWN0Jztcbi8vIDE5LjEuMi4xIE9iamVjdC5hc3NpZ24odGFyZ2V0LCBzb3VyY2UsIC4uLilcbnZhciBnZXRLZXlzICA9IHJlcXVpcmUoJy4vX29iamVjdC1rZXlzJylcbiAgLCBnT1BTICAgICA9IHJlcXVpcmUoJy4vX29iamVjdC1nb3BzJylcbiAgLCBwSUUgICAgICA9IHJlcXVpcmUoJy4vX29iamVjdC1waWUnKVxuICAsIHRvT2JqZWN0ID0gcmVxdWlyZSgnLi9fdG8tb2JqZWN0JylcbiAgLCBJT2JqZWN0ICA9IHJlcXVpcmUoJy4vX2lvYmplY3QnKVxuICAsICRhc3NpZ24gID0gT2JqZWN0LmFzc2lnbjtcblxuLy8gc2hvdWxkIHdvcmsgd2l0aCBzeW1ib2xzIGFuZCBzaG91bGQgaGF2ZSBkZXRlcm1pbmlzdGljIHByb3BlcnR5IG9yZGVyIChWOCBidWcpXG5tb2R1bGUuZXhwb3J0cyA9ICEkYXNzaWduIHx8IHJlcXVpcmUoJy4vX2ZhaWxzJykoZnVuY3Rpb24oKXtcbiAgdmFyIEEgPSB7fVxuICAgICwgQiA9IHt9XG4gICAgLCBTID0gU3ltYm9sKClcbiAgICAsIEsgPSAnYWJjZGVmZ2hpamtsbW5vcHFyc3QnO1xuICBBW1NdID0gNztcbiAgSy5zcGxpdCgnJykuZm9yRWFjaChmdW5jdGlvbihrKXsgQltrXSA9IGs7IH0pO1xuICByZXR1cm4gJGFzc2lnbih7fSwgQSlbU10gIT0gNyB8fCBPYmplY3Qua2V5cygkYXNzaWduKHt9LCBCKSkuam9pbignJykgIT0gSztcbn0pID8gZnVuY3Rpb24gYXNzaWduKHRhcmdldCwgc291cmNlKXsgLy8gZXNsaW50LWRpc2FibGUtbGluZSBuby11bnVzZWQtdmFyc1xuICB2YXIgVCAgICAgPSB0b09iamVjdCh0YXJnZXQpXG4gICAgLCBhTGVuICA9IGFyZ3VtZW50cy5sZW5ndGhcbiAgICAsIGluZGV4ID0gMVxuICAgICwgZ2V0U3ltYm9scyA9IGdPUFMuZlxuICAgICwgaXNFbnVtICAgICA9IHBJRS5mO1xuICB3aGlsZShhTGVuID4gaW5kZXgpe1xuICAgIHZhciBTICAgICAgPSBJT2JqZWN0KGFyZ3VtZW50c1tpbmRleCsrXSlcbiAgICAgICwga2V5cyAgID0gZ2V0U3ltYm9scyA / IGdldEtleXMoUykuY29uY2F0KGdldFN5bWJvbHMoUykpIDogZ2V0S2V5cyhTKVxuICAgICAgLCBsZW5ndGggPSBrZXlzLmxlbmd0aFxuICAgICAgLCBqICAgICAgPSAwXG4gICAgICAsIGtleTtcbiAgICB3aGlsZShsZW5ndGggPiBqKWlmKGlzRW51bS5jYWxsKFMsIGtleSA9IGtleXNbaisrXSkpVFtrZXldID0gU1trZXldO1xuICB9IHJldHVybiBUO1xufSA6ICRhc3NpZ247XG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19vYmplY3QtYXNzaWduLmpzXG4gKiovIiwiLy8gMTkuMS4yLjE0IC8gMTUuMi4zLjE0IE9iamVjdC5rZXlzKE8pXHJcbnZhciAka2V5cyAgICAgICA9IHJlcXVpcmUoJy4vX29iamVjdC1rZXlzLWludGVybmFsJylcclxuICAsIGVudW1CdWdLZXlzID0gcmVxdWlyZSgnLi9fZW51bS1idWcta2V5cycpO1xyXG5cclxubW9kdWxlLmV4cG9ydHMgPSBPYmplY3Qua2V5cyB8fCBmdW5jdGlvbiBrZXlzKE8pe1xyXG4gIHJldHVybiAka2V5cyhPLCBlbnVtQnVnS2V5cyk7XHJcbn07XG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19vYmplY3Qta2V5cy5qc1xuICoqLyIsInZhciBoYXMgICAgICAgICAgPSByZXF1aXJlKCcuL19oYXMnKVxyXG4gICwgdG9JT2JqZWN0ICAgID0gcmVxdWlyZSgnLi9fdG8taW9iamVjdCcpXHJcbiAgLCBhcnJheUluZGV4T2YgPSByZXF1aXJlKCcuL19hcnJheS1pbmNsdWRlcycpKGZhbHNlKVxyXG4gICwgSUVfUFJPVE8gICAgID0gcmVxdWlyZSgnLi9fc2hhcmVkLWtleScpKCdJRV9QUk9UTycpO1xyXG5cclxubW9kdWxlLmV4cG9ydHMgPSBmdW5jdGlvbihvYmplY3QsIG5hbWVzKXtcclxuICB2YXIgTyAgICAgID0gdG9JT2JqZWN0KG9iamVjdClcclxuICAgICwgaSAgICAgID0gMFxyXG4gICAgLCByZXN1bHQgPSBbXVxyXG4gICAgLCBrZXk7XHJcbiAgZm9yKGtleSBpbiBPKWlmKGtleSAhPSBJRV9QUk9UTyloYXMoTywga2V5KSAmJiByZXN1bHQucHVzaChrZXkpO1xyXG4gIC8vIERvbid0IGVudW0gYnVnICYgaGlkZGVuIGtleXNcclxuICB3aGlsZShuYW1lcy5sZW5ndGggPiBpKWlmKGhhcyhPLCBrZXkgPSBuYW1lc1tpKytdKSl7XHJcbiAgICB + YXJyYXlJbmRleE9mKHJlc3VsdCwga2V5KSB8fCByZXN1bHQucHVzaChrZXkpO1xyXG4gIH1cclxuICByZXR1cm4gcmVzdWx0O1xyXG59O1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fb2JqZWN0LWtleXMtaW50ZXJuYWwuanNcbiAqKi8iLCIvLyB0byBpbmRleGVkIG9iamVjdCwgdG9PYmplY3Qgd2l0aCBmYWxsYmFjayBmb3Igbm9uLWFycmF5LWxpa2UgRVMzIHN0cmluZ3NcbnZhciBJT2JqZWN0ID0gcmVxdWlyZSgnLi9faW9iamVjdCcpXG4gICwgZGVmaW5lZCA9IHJlcXVpcmUoJy4vX2RlZmluZWQnKTtcbm1vZHVsZS5leHBvcnRzID0gZnVuY3Rpb24oaXQpe1xuICByZXR1cm4gSU9iamVjdChkZWZpbmVkKGl0KSk7XG59O1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fdG8taW9iamVjdC5qc1xuICoqLyIsIi8vIGZhbGxiYWNrIGZvciBub24tYXJyYXktbGlrZSBFUzMgYW5kIG5vbi1lbnVtZXJhYmxlIG9sZCBWOCBzdHJpbmdzXG52YXIgY29mID0gcmVxdWlyZSgnLi9fY29mJyk7XG5tb2R1bGUuZXhwb3J0cyA9IE9iamVjdCgneicpLnByb3BlcnR5SXNFbnVtZXJhYmxlKDApID8gT2JqZWN0IDogZnVuY3Rpb24oaXQpe1xuICByZXR1cm4gY29mKGl0KSA9PSAnU3RyaW5nJyA / IGl0LnNwbGl0KCcnKSA6IE9iamVjdChpdCk7XG59O1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9faW9iamVjdC5qc1xuICoqLyIsInZhciB0b1N0cmluZyA9IHt9LnRvU3RyaW5nO1xuXG5tb2R1bGUuZXhwb3J0cyA9IGZ1bmN0aW9uKGl0KXtcbiAgcmV0dXJuIHRvU3RyaW5nLmNhbGwoaXQpLnNsaWNlKDgsIC0xKTtcbn07XG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19jb2YuanNcbiAqKi8iLCIvLyA3LjIuMSBSZXF1aXJlT2JqZWN0Q29lcmNpYmxlKGFyZ3VtZW50KVxubW9kdWxlLmV4cG9ydHMgPSBmdW5jdGlvbihpdCl7XG4gIGlmKGl0ID09IHVuZGVmaW5lZCl0aHJvdyBUeXBlRXJyb3IoXCJDYW4ndCBjYWxsIG1ldGhvZCBvbiAgXCIgKyBpdCk7XG4gIHJldHVybiBpdDtcbn07XG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19kZWZpbmVkLmpzXG4gKiovIiwiLy8gZmFsc2UgLT4gQXJyYXkjaW5kZXhPZlxuLy8gdHJ1ZSAgLT4gQXJyYXkjaW5jbHVkZXNcbnZhciB0b0lPYmplY3QgPSByZXF1aXJlKCcuL190by1pb2JqZWN0JylcbiAgLCB0b0xlbmd0aCAgPSByZXF1aXJlKCcuL190by1sZW5ndGgnKVxuICAsIHRvSW5kZXggICA9IHJlcXVpcmUoJy4vX3RvLWluZGV4Jyk7XG5tb2R1bGUuZXhwb3J0cyA9IGZ1bmN0aW9uKElTX0lOQ0xVREVTKXtcbiAgcmV0dXJuIGZ1bmN0aW9uKCR0aGlzLCBlbCwgZnJvbUluZGV4KXtcbiAgICB2YXIgTyAgICAgID0gdG9JT2JqZWN0KCR0aGlzKVxuICAgICAgLCBsZW5ndGggPSB0b0xlbmd0aChPLmxlbmd0aClcbiAgICAgICwgaW5kZXggID0gdG9JbmRleChmcm9tSW5kZXgsIGxlbmd0aClcbiAgICAgICwgdmFsdWU7XG4gICAgLy8gQXJyYXkjaW5jbHVkZXMgdXNlcyBTYW1lVmFsdWVaZXJvIGVxdWFsaXR5IGFsZ29yaXRobVxuICAgIGlmKElTX0lOQ0xVREVTICYmIGVsICE9IGVsKXdoaWxlKGxlbmd0aCA + IGluZGV4KXtcbiAgICAgIHZhbHVlID0gT1tpbmRleCsrXTtcbiAgICAgIGlmKHZhbHVlICE9IHZhbHVlKXJldHVybiB0cnVlO1xuICAgIC8vIEFycmF5I3RvSW5kZXggaWdub3JlcyBob2xlcywgQXJyYXkjaW5jbHVkZXMgLSBub3RcbiAgICB9IGVsc2UgZm9yKDtsZW5ndGggPiBpbmRleDsgaW5kZXgrKylpZihJU19JTkNMVURFUyB8fCBpbmRleCBpbiBPKXtcbiAgICAgIGlmKE9baW5kZXhdID09PSBlbClyZXR1cm4gSVNfSU5DTFVERVMgfHwgaW5kZXggfHwgMDtcbiAgICB9IHJldHVybiAhSVNfSU5DTFVERVMgJiYgLTE7XG4gIH07XG59O1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fYXJyYXktaW5jbHVkZXMuanNcbiAqKi8iLCIvLyA3LjEuMTUgVG9MZW5ndGhcbnZhciB0b0ludGVnZXIgPSByZXF1aXJlKCcuL190by1pbnRlZ2VyJylcbiAgLCBtaW4gICAgICAgPSBNYXRoLm1pbjtcbm1vZHVsZS5leHBvcnRzID0gZnVuY3Rpb24oaXQpe1xuICByZXR1cm4gaXQgPiAwID8gbWluKHRvSW50ZWdlcihpdCksIDB4MWZmZmZmZmZmZmZmZmYpIDogMDsgLy8gcG93KDIsIDUzKSAtIDEgPT0gOTAwNzE5OTI1NDc0MDk5MVxufTtcblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX3RvLWxlbmd0aC5qc1xuICoqLyIsIi8vIDcuMS40IFRvSW50ZWdlclxudmFyIGNlaWwgID0gTWF0aC5jZWlsXG4gICwgZmxvb3IgPSBNYXRoLmZsb29yO1xubW9kdWxlLmV4cG9ydHMgPSBmdW5jdGlvbihpdCl7XG4gIHJldHVybiBpc05hTihpdCA9ICtpdCkgPyAwIDogKGl0ID4gMCA / IGZsb29yIDogY2VpbCkoaXQpO1xufTtcblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX3RvLWludGVnZXIuanNcbiAqKi8iLCJ2YXIgdG9JbnRlZ2VyID0gcmVxdWlyZSgnLi9fdG8taW50ZWdlcicpXG4gICwgbWF4ICAgICAgID0gTWF0aC5tYXhcbiAgLCBtaW4gICAgICAgPSBNYXRoLm1pbjtcbm1vZHVsZS5leHBvcnRzID0gZnVuY3Rpb24oaW5kZXgsIGxlbmd0aCl7XG4gIGluZGV4ID0gdG9JbnRlZ2VyKGluZGV4KTtcbiAgcmV0dXJuIGluZGV4IDwgMCA / IG1heChpbmRleCArIGxlbmd0aCwgMCkgOiBtaW4oaW5kZXgsIGxlbmd0aCk7XG59O1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fdG8taW5kZXguanNcbiAqKi8iLCJ2YXIgc2hhcmVkID0gcmVxdWlyZSgnLi9fc2hhcmVkJykoJ2tleXMnKVxyXG4gICwgdWlkICAgID0gcmVxdWlyZSgnLi9fdWlkJyk7XHJcbm1vZHVsZS5leHBvcnRzID0gZnVuY3Rpb24oa2V5KXtcclxuICByZXR1cm4gc2hhcmVkW2tleV0gfHwgKHNoYXJlZFtrZXldID0gdWlkKGtleSkpO1xyXG59O1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fc2hhcmVkLWtleS5qc1xuICoqLyIsInZhciBnbG9iYWwgPSByZXF1aXJlKCcuL19nbG9iYWwnKVxuICAsIFNIQVJFRCA9ICdfX2NvcmUtanNfc2hhcmVkX18nXG4gICwgc3RvcmUgID0gZ2xvYmFsW1NIQVJFRF0gfHwgKGdsb2JhbFtTSEFSRURdID0ge30pO1xubW9kdWxlLmV4cG9ydHMgPSBmdW5jdGlvbihrZXkpe1xuICByZXR1cm4gc3RvcmVba2V5XSB8fCAoc3RvcmVba2V5XSA9IHt9KTtcbn07XG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL19zaGFyZWQuanNcbiAqKi8iLCIvLyBJRSA4LSBkb24ndCBlbnVtIGJ1ZyBrZXlzXHJcbm1vZHVsZS5leHBvcnRzID0gKFxyXG4gICdjb25zdHJ1Y3RvcixoYXNPd25Qcm9wZXJ0eSxpc1Byb3RvdHlwZU9mLHByb3BlcnR5SXNFbnVtZXJhYmxlLHRvTG9jYWxlU3RyaW5nLHRvU3RyaW5nLHZhbHVlT2YnXHJcbikuc3BsaXQoJywnKTtcblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX2VudW0tYnVnLWtleXMuanNcbiAqKi8iLCJleHBvcnRzLmYgPSBPYmplY3QuZ2V0T3duUHJvcGVydHlTeW1ib2xzO1xuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9jb3JlLWpzLzIuNC4wL2NvcmUtanMvbW9kdWxlcy9fb2JqZWN0LWdvcHMuanNcbiAqKi8iLCJleHBvcnRzLmYgPSB7fS5wcm9wZXJ0eUlzRW51bWVyYWJsZTtcblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L34vLm5wbWluc3RhbGwvY29yZS1qcy8yLjQuMC9jb3JlLWpzL21vZHVsZXMvX29iamVjdC1waWUuanNcbiAqKi8iLCIvLyA3LjEuMTMgVG9PYmplY3QoYXJndW1lbnQpXG52YXIgZGVmaW5lZCA9IHJlcXVpcmUoJy4vX2RlZmluZWQnKTtcbm1vZHVsZS5leHBvcnRzID0gZnVuY3Rpb24oaXQpe1xuICByZXR1cm4gT2JqZWN0KGRlZmluZWQoaXQpKTtcbn07XG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9 + Ly5ucG1pbnN0YWxsL2NvcmUtanMvMi40LjAvY29yZS1qcy9tb2R1bGVzL190by1vYmplY3QuanNcbiAqKi8iLCJjb25zdCB7XG4gIHNldFRpbWVvdXQsIHNldFRpbWVvdXROYXRpdmVcbn0gPSBnbG9iYWxcblxuY29uc3QgTVNHID0gJ1VzZSBcImdsb2JhbC5zZXRUaW1lb3V0XCIgIGlzIHVuZXhwZWN0ZWQsICcgK1xuICAgICAgICAgICAgICAncGxlYXNlIHVzZSByZXF1aXJlKFwiQHdlZXgtbW9kdWxlXCIpLnNldFRpbWVvdXQgaW5zdGVhZC4nXG5cbi8vIGZpeCBubyBzZXRUaW1lb3V0IG9uIEFuZHJvaWQgVjhcbi8qIGlzdGFuYnVsIGlnbm9yZSBpZiAqL1xuaWYgKHR5cGVvZiBzZXRUaW1lb3V0ID09PSAndW5kZWZpbmVkJyAmJlxuICB0eXBlb2Ygc2V0VGltZW91dE5hdGl2ZSA9PT0gJ2Z1bmN0aW9uJykge1xuICBjb25zdCB0aW1lb3V0TWFwID0ge31cbiAgbGV0IHRpbWVvdXRJZCA9IDBcbiAgZ2xvYmFsLnNldFRpbWVvdXQgPSAoY2IsIHRpbWUpID0 + IHtcbiAgICBjb25zb2xlLndhcm4oTVNHKVxuICAgIHRpbWVvdXRNYXBbKyt0aW1lb3V0SWRdID0gY2JcbiAgICBzZXRUaW1lb3V0TmF0aXZlKHRpbWVvdXRJZC50b1N0cmluZygpLCB0aW1lKVxuICB9XG4gIGdsb2JhbC5zZXRUaW1lb3V0Q2FsbGJhY2sgPSAoaWQpID0 + IHtcbiAgICBpZiAodHlwZW9mIHRpbWVvdXRNYXBbaWRdID09PSAnZnVuY3Rpb24nKSB7XG4gICAgICB0aW1lb3V0TWFwW2lkXSgpXG4gICAgICBkZWxldGUgdGltZW91dE1hcFtpZF1cbiAgICB9XG4gIH1cbn1cblxuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvc2hhcmVkL3NldFRpbWVvdXQuanNcbiAqKi8iLCJjb25zdCBPcmlnaW5Qcm9taXNlID0gZ2xvYmFsLlByb21pc2UgfHxcbiAgICAgICAgICAgICAgICAgICAgICAgIGZ1bmN0aW9uICgpIHt9XG5jb25zdCBNU0cgPSAnVXNpbmcgXCJQcm9taXNlXCIgaXMgdW5leHBlY3RlZCdcblxuY29uc3QgVW5leHBlY3RlZFByb21pc2UgPSBmdW5jdGlvbiAoLi4uYXJncykge1xuICBjb25zb2xlLndhcm4oTVNHKVxuICByZXR1cm4gbmV3IE9yaWdpblByb21pc2UoLi4uYXJncylcbn1cblxuY29uc3QgZm4gPSBbJ2FsbCcsICdyYWNlJywgJ3Jlc29sdmUnLCAncmVqZWN0J11cbmZuLmZvckVhY2gobiA9PiB7XG4gIFVuZXhwZWN0ZWRQcm9taXNlW25dID0gZnVuY3Rpb24gKC4uLmFyZ3MpIHtcbiAgICBjb25zb2xlLndhcm4oTVNHKVxuICAgIHJldHVybiBPcmlnaW5Qcm9taXNlW25dICYmIE9yaWdpblByb21pc2Vbbl0oLi4uYXJncylcbiAgfVxufSlcblxuZ2xvYmFsLlByb21pc2UgPSBVbmV4cGVjdGVkUHJvbWlzZVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9zaGFyZWQvcHJvbWlzZS5qc1xuICoqLyIsImNvbnN0IHsgY29uc29sZSwgbmF0aXZlTG9nIH0gPSBnbG9iYWxcbmNvbnN0IExFVkVMUyA9IFsnZXJyb3InLCAnd2FybicsICdpbmZvJywgJ2xvZycsICdkZWJ1ZyddXG5jb25zdCBsZXZlbE1hcCA9IHt9XG5cbmdlbmVyYXRlTGV2ZWxNYXAoKVxuXG4vKiBpc3RhbmJ1bCBpZ25vcmUgaWYgKi9cbmlmIChcbiAgdHlwZW9mIGNvbnNvbGUgPT09ICd1bmRlZmluZWQnIHx8IC8vIEFuZHJvaWRcbiAgKGdsb2JhbC5XWEVudmlyb25tZW50ICYmIGdsb2JhbC5XWEVudmlyb25tZW50LnBsYXRmb3JtID09PSAnaU9TJykgLy8gaU9TXG4pIHtcbiAgZ2xvYmFsLmNvbnNvbGUgPSB7XG4gICAgZGVidWc6ICguLi5hcmdzKSA9PiB7XG4gICAgICBpZiAoY2hlY2tMZXZlbCgnZGVidWcnKSkgeyBuYXRpdmVMb2coLi4uZm9ybWF0KGFyZ3MpLCAnX19ERUJVRycpIH1cbiAgICB9LFxuICAgIGxvZzogKC4uLmFyZ3MpID0 + IHtcbiAgICAgIGlmIChjaGVja0xldmVsKCdsb2cnKSkgeyBuYXRpdmVMb2coLi4uZm9ybWF0KGFyZ3MpLCAnX19MT0cnKSB9XG4gICAgfSxcbiAgICBpbmZvOiAoLi4uYXJncykgPT4ge1xuICAgICAgaWYgKGNoZWNrTGV2ZWwoJ2luZm8nKSkgeyBuYXRpdmVMb2coLi4uZm9ybWF0KGFyZ3MpLCAnX19JTkZPJykgfVxuICAgIH0sXG4gICAgd2FybjogKC4uLmFyZ3MpID0 + IHtcbiAgICAgIGlmIChjaGVja0xldmVsKCd3YXJuJykpIHsgbmF0aXZlTG9nKC4uLmZvcm1hdChhcmdzKSwgJ19fV0FSTicpIH1cbiAgICB9LFxuICAgIGVycm9yOiAoLi4uYXJncykgPT4ge1xuICAgICAgaWYgKGNoZWNrTGV2ZWwoJ2Vycm9yJykpIHsgbmF0aXZlTG9nKC4uLmZvcm1hdChhcmdzKSwgJ19fRVJST1InKSB9XG4gICAgfVxuICB9XG59XG5lbHNlIHsgLy8gSFRNTDVcbiAgY29uc3QgeyBkZWJ1ZywgbG9nLCBpbmZvLCB3YXJuLCBlcnJvciB9ID0gY29uc29sZVxuICBjb25zb2xlLl9fb3JpX18gPSB7IGRlYnVnLCBsb2csIGluZm8sIHdhcm4sIGVycm9yIH1cbiAgY29uc29sZS5kZWJ1ZyA9ICguLi5hcmdzKSA9PiB7XG4gICAgaWYgKGNoZWNrTGV2ZWwoJ2RlYnVnJykpIHsgY29uc29sZS5fX29yaV9fLmRlYnVnLmFwcGx5KGNvbnNvbGUsIGFyZ3MpIH1cbiAgfVxuICBjb25zb2xlLmxvZyA9ICguLi5hcmdzKSA9PiB7XG4gICAgaWYgKGNoZWNrTGV2ZWwoJ2xvZycpKSB7IGNvbnNvbGUuX19vcmlfXy5sb2cuYXBwbHkoY29uc29sZSwgYXJncykgfVxuICB9XG4gIGNvbnNvbGUuaW5mbyA9ICguLi5hcmdzKSA9PiB7XG4gICAgaWYgKGNoZWNrTGV2ZWwoJ2luZm8nKSkgeyBjb25zb2xlLl9fb3JpX18uaW5mby5hcHBseShjb25zb2xlLCBhcmdzKSB9XG4gIH1cbiAgY29uc29sZS53YXJuID0gKC4uLmFyZ3MpID0 + IHtcbiAgICBpZiAoY2hlY2tMZXZlbCgnd2FybicpKSB7IGNvbnNvbGUuX19vcmlfXy53YXJuLmFwcGx5KGNvbnNvbGUsIGFyZ3MpIH1cbiAgfVxuICBjb25zb2xlLmVycm9yID0gKC4uLmFyZ3MpID0 + IHtcbiAgICBpZiAoY2hlY2tMZXZlbCgnZXJyb3InKSkgeyBjb25zb2xlLl9fb3JpX18uZXJyb3IuYXBwbHkoY29uc29sZSwgYXJncykgfVxuICB9XG59XG5cbmZ1bmN0aW9uIGdlbmVyYXRlTGV2ZWxNYXAgKCkge1xuICBMRVZFTFMuZm9yRWFjaChsZXZlbCA9PiB7XG4gICAgY29uc3QgbGV2ZWxJbmRleCA9IExFVkVMUy5pbmRleE9mKGxldmVsKVxuICAgIGxldmVsTWFwW2xldmVsXSA9IHt9XG4gICAgTEVWRUxTLmZvckVhY2godHlwZSA9PiB7XG4gICAgICBjb25zdCB0eXBlSW5kZXggPSBMRVZFTFMuaW5kZXhPZih0eXBlKVxuICAgICAgaWYgKHR5cGVJbmRleCA8PSBsZXZlbEluZGV4KSB7XG4gICAgICAgIGxldmVsTWFwW2xldmVsXVt0eXBlXSA9IHRydWVcbiAgICAgIH1cbiAgICB9KVxuICB9KVxufVxuXG5mdW5jdGlvbiBub3JtYWxpemUgKHYpIHtcbiAgY29uc3QgdHlwZSA9IE9iamVjdC5wcm90b3R5cGUudG9TdHJpbmcuY2FsbCh2KVxuICBpZiAodHlwZS50b0xvd2VyQ2FzZSgpID09PSAnW29iamVjdCBvYmplY3RdJykge1xuICAgIHYgPSBKU09OLnN0cmluZ2lmeSh2KVxuICB9XG4gIGVsc2Uge1xuICAgIHYgPSBTdHJpbmcodilcbiAgfVxuICByZXR1cm4gdlxufVxuXG5mdW5jdGlvbiBjaGVja0xldmVsICh0eXBlKSB7XG4gIGNvbnN0IGxvZ0xldmVsID0gKGdsb2JhbC5XWEVudmlyb25tZW50ICYmIGdsb2JhbC5XWEVudmlyb25tZW50LmxvZ0xldmVsKSB8fCAnbG9nJ1xuICByZXR1cm4gbGV2ZWxNYXBbbG9nTGV2ZWxdICYmIGxldmVsTWFwW2xvZ0xldmVsXVt0eXBlXVxufVxuXG5mdW5jdGlvbiBmb3JtYXQgKGFyZ3MpIHtcbiAgcmV0dXJuIGFyZ3MubWFwKHYgPT4gbm9ybWFsaXplKHYpKVxufVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9zaGFyZWQvY29uc29sZS5qc1xuICoqLyIsImltcG9ydCBmcmFtZXdvcmtzIGZyb20gJy4vZnJhbWV3b3JrcydcblxuaW1wb3J0IHsgRG9jdW1lbnQsIEVsZW1lbnQsIENvbW1lbnQgfSBmcm9tICcuLi92ZG9tJ1xuXG5jb25zdCBjb25maWcgPSB7XG4gIERvY3VtZW50LCBFbGVtZW50LCBDb21tZW50LFxuICBzZW5kVGFza3MgKC4uLmFyZ3MpIHtcbiAgICByZXR1cm4gZ2xvYmFsLmNhbGxOYXRpdmUoLi4uYXJncylcbiAgfVxufVxuXG5mb3IgKGNvbnN0IG5hbWUgaW4gZnJhbWV3b3Jrcykge1xuICBjb25zdCBmcmFtZXdvcmsgPSBmcmFtZXdvcmtzW25hbWVdXG4gIGZyYW1ld29yay5pbml0KGNvbmZpZylcbn1cblxuY29uc3QgdmVyc2lvblJlZ0V4cCA9IC9eXFwvXFwvICooXFx7W15cXH1dKlxcfSkgKlxccj9cXG4vXG5cbmZ1bmN0aW9uIGNoZWNrVmVyc2lvbiAoY29kZSkge1xuICBsZXQgaW5mb1xuICBjb25zdCByZXN1bHQgPSB2ZXJzaW9uUmVnRXhwLmV4ZWMoY29kZSlcbiAgaWYgKHJlc3VsdCkge1xuICAgIHRyeSB7XG4gICAgICBpbmZvID0gSlNPTi5wYXJzZShyZXN1bHRbMV0pXG4gICAgfVxuICAgIGNhdGNoIChlKSB7fVxuICB9XG4gIHJldHVybiBpbmZvXG59XG5cbmNvbnN0IGluc3RhbmNlTWFwID0ge31cblxuZXhwb3J0IGZ1bmN0aW9uIGNyZWF0ZUluc3RhbmNlIChpZCwgY29kZSwgY29uZmlnLCBkYXRhKSB7XG4gIGxldCBpbmZvID0gaW5zdGFuY2VNYXBbaWRdXG4gIGlmICghaW5mbykge1xuICAgIGluZm8gPSBjaGVja1ZlcnNpb24oY29kZSkgfHwge31cbiAgICBpZiAoIWZyYW1ld29ya3NbaW5mby5mcmFtZXdvcmtdKSB7XG4gICAgICBpbmZvLmZyYW1ld29yayA9ICdXZWV4J1xuICAgIH1cbiAgICBpbnN0YW5jZU1hcFtpZF0gPSBpbmZvXG4gICAgY29uZmlnID0gY29uZmlnIHx8IHt9XG4gICAgY29uZmlnLmJ1bmRsZVZlcnNpb24gPSBpbmZvLnZlcnNpb25cbiAgICByZXR1cm4gZnJhbWV3b3Jrc1tpbmZvLmZyYW1ld29ya10uY3JlYXRlSW5zdGFuY2UoaWQsIGNvZGUsIGNvbmZpZywgZGF0YSlcbiAgfVxuICByZXR1cm4gbmV3IEVycm9yKGBpbnZhbGlkIGluc3RhbmNlIGlkIFwiJHtpZH1cImApXG59XG5cbmNvbnN0IG1ldGhvZHMgPSB7XG4gIGNyZWF0ZUluc3RhbmNlXG59XG5cbmZ1bmN0aW9uIGdlbkluaXQgKG1ldGhvZE5hbWUpIHtcbiAgbWV0aG9kc1ttZXRob2ROYW1lXSA9IGZ1bmN0aW9uICguLi5hcmdzKSB7XG4gICAgZm9yIChjb25zdCBuYW1lIGluIGZyYW1ld29ya3MpIHtcbiAgICAgIGNvbnN0IGZyYW1ld29yayA9IGZyYW1ld29ya3NbbmFtZV1cbiAgICAgIGlmIChmcmFtZXdvcmsgJiYgZnJhbWV3b3JrW21ldGhvZE5hbWVdKSB7XG4gICAgICAgIGZyYW1ld29ya1ttZXRob2ROYW1lXSguLi5hcmdzKVxuICAgICAgfVxuICAgIH1cbiAgfVxufVxuXG5bJ3JlZ2lzdGVyQ29tcG9uZW50cycsICdyZWdpc3Rlck1vZHVsZXMnLCAncmVnaXN0ZXJNZXRob2RzJ10uZm9yRWFjaChnZW5Jbml0KVxuXG5mdW5jdGlvbiBnZW5JbnN0YW5jZSAobWV0aG9kTmFtZSkge1xuICBtZXRob2RzW21ldGhvZE5hbWVdID0gZnVuY3Rpb24gKC4uLmFyZ3MpIHtcbiAgICBjb25zdCBpZCA9IGFyZ3NbMF1cbiAgICBjb25zdCBpbmZvID0gaW5zdGFuY2VNYXBbaWRdXG4gICAgaWYgKGluZm8gJiYgZnJhbWV3b3Jrc1tpbmZvLmZyYW1ld29ya10pIHtcbiAgICAgIHJldHVybiBmcmFtZXdvcmtzW2luZm8uZnJhbWV3b3JrXVttZXRob2ROYW1lXSguLi5hcmdzKVxuICAgIH1cbiAgICByZXR1cm4gbmV3IEVycm9yKGBpbnZhbGlkIGluc3RhbmNlIGlkIFwiJHtpZH1cImApXG4gIH1cbn1cblxuWydkZXN0cm95SW5zdGFuY2UnLCAncmVmcmVzaEluc3RhbmNlJywgJ2NhbGxKUycsICdnZXRSb290J10uZm9yRWFjaChnZW5JbnN0YW5jZSlcblxubWV0aG9kcy5yZWNlaXZlVGFza3MgPSBtZXRob2RzLmNhbGxKU1xuXG5leHBvcnQgZGVmYXVsdCBtZXRob2RzXG5cblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L3J1bnRpbWUvaW5kZXguanNcbiAqKi8iLCJpbXBvcnQgKiBhcyBXZWV4IGZyb20gJy4uL2RlZmF1bHQnXG5cbmV4cG9ydCBkZWZhdWx0IHtcbiAgV2VleFxufVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9ydW50aW1lL2ZyYW1ld29ya3MuanNcbiAqKi8iLCIvKipcbiAqIEBmaWxlT3ZlcnZpZXcgTWFpbiBlbnRyeSwgaW5zdGFuY2UgbWFuYWdlclxuICpcbiAqIC0gY3JlYXRlSW5zdGFuY2UoaW5zdGFuY2VJZCwgY29kZSwgb3B0aW9ucywgZGF0YSlcbiAqIC0gcmVmcmVzaEluc3RhbmNlKGluc3RhbmNlSWQsIGRhdGEpXG4gKiAtIGRlc3Ryb3lJbnN0YW5jZShpbnN0YW5jZUlkKVxuICogLSByZWdpc3RlckNvbXBvbmVudHMoY29tcG9uZW50cylcbiAqIC0gcmVnaXN0ZXJNb2R1bGVzKG1vZHVsZXMpXG4gKiAtIGdldFJvb3QoaW5zdGFuY2VJZClcbiAqIC0gaW5zdGFuY2VNYXBcbiAqIC0gY2FsbEpTKGluc3RhbmNlSWQsIHRhc2tzKVxuICogICAtIGZpcmVFdmVudChyZWYsIHR5cGUsIGRhdGEpXG4gKiAgIC0gY2FsbGJhY2soZnVuY0lkLCBkYXRhKVxuICovXG5cbmltcG9ydCBjb25maWcgZnJvbSAnLi9jb25maWcnXG5pbXBvcnQgQXBwSW5zdGFuY2UgZnJvbSAnLi9hcHAnXG5pbXBvcnQgVm0gZnJvbSAnLi92bSdcblxuY29uc3Qge1xuICBuYXRpdmVDb21wb25lbnRNYXBcbn0gPSBjb25maWdcbmNvbnN0IGluc3RhbmNlTWFwID0ge31cblxuZXhwb3J0IGZ1bmN0aW9uIGluaXQgKGNmZykge1xuICBjb25maWcuRG9jdW1lbnQgPSBjZmcuRG9jdW1lbnRcbiAgY29uZmlnLkVsZW1lbnQgPSBjZmcuRWxlbWVudFxuICBjb25maWcuQ29tbWVudCA9IGNmZy5Db21tZW50XG4gIGNvbmZpZy5zZW5kVGFza3MgPSBjZmcuc2VuZFRhc2tzXG59XG5cbi8qKlxuICogY3JlYXRlIGEgV2VleCBpbnN0YW5jZVxuICpcbiAqIEBwYXJhbSAge3N0cmluZ30gaW5zdGFuY2VJZFxuICogQHBhcmFtICB7c3RyaW5nfSBjb2RlXG4gKiBAcGFyYW0gIHtvYmplY3R9IFtvcHRpb25zXSBvcHRpb24gYEhBU19MT0dgIGVuYWJsZSBwcmludCBsb2dcbiAqIEBwYXJhbSAge29iamVjdH0gW2RhdGFdXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBjcmVhdGVJbnN0YW5jZSAoaW5zdGFuY2VJZCwgY29kZSwgb3B0aW9ucywgZGF0YSkge1xuICBsZXQgaW5zdGFuY2UgPSBpbnN0YW5jZU1hcFtpbnN0YW5jZUlkXVxuICBvcHRpb25zID0gb3B0aW9ucyB8fCB7fVxuXG4gIGNvbmZpZy5kZWJ1ZyA9IG9wdGlvbnMuZGVidWdcblxuICBsZXQgcmVzdWx0XG4gIGlmICghaW5zdGFuY2UpIHtcbiAgICBpbnN0YW5jZSA9IG5ldyBBcHBJbnN0YW5jZShpbnN0YW5jZUlkLCBvcHRpb25zKVxuICAgIGluc3RhbmNlTWFwW2luc3RhbmNlSWRdID0gaW5zdGFuY2VcbiAgICByZXN1bHQgPSBpbnN0YW5jZS5pbml0KGNvZGUsIGRhdGEpXG4gIH1cbiAgZWxzZSB7XG4gICAgcmVzdWx0ID0gbmV3IEVycm9yKGBpbnZhbGlkIGluc3RhbmNlIGlkIFwiJHtpbnN0YW5jZUlkfVwiYClcbiAgfVxuXG4gIHJldHVybiByZXN1bHRcbn1cblxuLyoqXG4gKiByZWZyZXNoIGEgV2VleCBpbnN0YW5jZVxuICpcbiAqIEBwYXJhbSAge3N0cmluZ30gaW5zdGFuY2VJZFxuICogQHBhcmFtICB7b2JqZWN0fSBkYXRhXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiByZWZyZXNoSW5zdGFuY2UgKGluc3RhbmNlSWQsIGRhdGEpIHtcbiAgY29uc3QgaW5zdGFuY2UgPSBpbnN0YW5jZU1hcFtpbnN0YW5jZUlkXVxuICBsZXQgcmVzdWx0XG4gIGlmIChpbnN0YW5jZSkge1xuICAgIHJlc3VsdCA9IGluc3RhbmNlLnJlZnJlc2hEYXRhKGRhdGEpXG4gIH1cbiAgZWxzZSB7XG4gICAgcmVzdWx0ID0gbmV3IEVycm9yKGBpbnZhbGlkIGluc3RhbmNlIGlkIFwiJHtpbnN0YW5jZUlkfVwiYClcbiAgfVxuICByZXR1cm4gcmVzdWx0XG59XG5cbi8qKlxuICogZGVzdHJveSBhIFdlZXggaW5zdGFuY2VcbiAqIEBwYXJhbSAge3N0cmluZ30gaW5zdGFuY2VJZFxuICovXG5leHBvcnQgZnVuY3Rpb24gZGVzdHJveUluc3RhbmNlIChpbnN0YW5jZUlkKSB7XG4gIGNvbnN0IGluc3RhbmNlID0gaW5zdGFuY2VNYXBbaW5zdGFuY2VJZF1cbiAgaWYgKCFpbnN0YW5jZSkge1xuICAgIHJldHVybiBuZXcgRXJyb3IoYGludmFsaWQgaW5zdGFuY2UgaWQgXCIke2luc3RhbmNlSWR9XCJgKVxuICB9XG5cbiAgaW5zdGFuY2UuZGVzdHJveSgpXG4gIGRlbGV0ZSBpbnN0YW5jZU1hcFtpbnN0YW5jZUlkXVxuICByZXR1cm4gaW5zdGFuY2VNYXBcbn1cblxuLyoqXG4gKiByZWdpc3RlciB0aGUgbmFtZSBvZiBlYWNoIG5hdGl2ZSBjb21wb25lbnRcbiAqIEBwYXJhbSAge2FycmF5fSBjb21wb25lbnRzIGFycmF5IG9mIG5hbWVcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIHJlZ2lzdGVyQ29tcG9uZW50cyAoY29tcG9uZW50cykge1xuICBpZiAoQXJyYXkuaXNBcnJheShjb21wb25lbnRzKSkge1xuICAgIGNvbXBvbmVudHMuZm9yRWFjaChmdW5jdGlvbiByZWdpc3RlciAobmFtZSkge1xuICAgICAgLyogaXN0YW5idWwgaWdub3JlIGlmICovXG4gICAgICBpZiAoIW5hbWUpIHtcbiAgICAgICAgcmV0dXJuXG4gICAgICB9XG4gICAgICBpZiAodHlwZW9mIG5hbWUgPT09ICdzdHJpbmcnKSB7XG4gICAgICAgIG5hdGl2ZUNvbXBvbmVudE1hcFtuYW1lXSA9IHRydWVcbiAgICAgIH1cbiAgICAgIGVsc2UgaWYgKHR5cGVvZiBuYW1lID09PSAnb2JqZWN0JyAmJiB0eXBlb2YgbmFtZS50eXBlID09PSAnc3RyaW5nJykge1xuICAgICAgICBuYXRpdmVDb21wb25lbnRNYXBbbmFtZS50eXBlXSA9IG5hbWVcbiAgICAgIH1cbiAgICB9KVxuICB9XG59XG5cbi8qKlxuICogcmVnaXN0ZXIgdGhlIG5hbWUgYW5kIG1ldGhvZHMgb2YgZWFjaCBtb2R1bGVcbiAqIEBwYXJhbSAge29iamVjdH0gbW9kdWxlcyBhIG9iamVjdCBvZiBtb2R1bGVzXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiByZWdpc3Rlck1vZHVsZXMgKG1vZHVsZXMpIHtcbiAgaWYgKHR5cGVvZiBtb2R1bGVzID09PSAnb2JqZWN0Jykge1xuICAgIFZtLnJlZ2lzdGVyTW9kdWxlcyhtb2R1bGVzKVxuICB9XG59XG5cbi8qKlxuICogcmVnaXN0ZXIgdGhlIG5hbWUgYW5kIG1ldGhvZHMgb2YgZWFjaCBhcGlcbiAqIEBwYXJhbSAge29iamVjdH0gYXBpcyBhIG9iamVjdCBvZiBhcGlzXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiByZWdpc3Rlck1ldGhvZHMgKGFwaXMpIHtcbiAgaWYgKHR5cGVvZiBhcGlzID09PSAnb2JqZWN0Jykge1xuICAgIFZtLnJlZ2lzdGVyTWV0aG9kcyhhcGlzKVxuICB9XG59XG5cbi8qKlxuICogZ2V0IGEgd2hvbGUgZWxlbWVudCB0cmVlIG9mIGFuIGluc3RhbmNlXG4gKiBmb3IgZGVidWdnaW5nXG4gKiBAcGFyYW0gIHtzdHJpbmd9IGluc3RhbmNlSWRcbiAqIEByZXR1cm4ge29iamVjdH0gYSB2aXJ0dWFsIGRvbSB0cmVlXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBnZXRSb290IChpbnN0YW5jZUlkKSB7XG4gIGNvbnN0IGluc3RhbmNlID0gaW5zdGFuY2VNYXBbaW5zdGFuY2VJZF1cbiAgbGV0IHJlc3VsdFxuICBpZiAoaW5zdGFuY2UpIHtcbiAgICByZXN1bHQgPSBpbnN0YW5jZS5nZXRSb290RWxlbWVudCgpXG4gIH1cbiAgZWxzZSB7XG4gICAgcmVzdWx0ID0gbmV3IEVycm9yKGBpbnZhbGlkIGluc3RhbmNlIGlkIFwiJHtpbnN0YW5jZUlkfVwiYClcbiAgfVxuICByZXR1cm4gcmVzdWx0XG59XG5cbmNvbnN0IGpzSGFuZGxlcnMgPSB7XG4gIGZpcmVFdmVudDogZnVuY3Rpb24gZmlyZUV2ZW50IChpbnN0YW5jZUlkLCByZWYsIHR5cGUsIGRhdGEsIGRvbUNoYW5nZXMpIHtcbiAgICBjb25zdCBpbnN0YW5jZSA9IGluc3RhbmNlTWFwW2luc3RhbmNlSWRdXG4gICAgcmV0dXJuIGluc3RhbmNlLmZpcmVFdmVudChyZWYsIHR5cGUsIGRhdGEsIGRvbUNoYW5nZXMpXG4gIH0sXG5cbiAgY2FsbGJhY2s6IGZ1bmN0aW9uIGNhbGxiYWNrIChpbnN0YW5jZUlkLCBmdW5jSWQsIGRhdGEsIGlmTGFzdCkge1xuICAgIGNvbnN0IGluc3RhbmNlID0gaW5zdGFuY2VNYXBbaW5zdGFuY2VJZF1cbiAgICByZXR1cm4gaW5zdGFuY2UuY2FsbGJhY2soZnVuY0lkLCBkYXRhLCBpZkxhc3QpXG4gIH1cbn1cblxuLyoqXG4gKiBhY2NlcHQgY2FsbHMgZnJvbSBuYXRpdmUgKGV2ZW50IG9yIGNhbGxiYWNrKVxuICpcbiAqIEBwYXJhbSAge3N0cmluZ30gaW5zdGFuY2VJZFxuICogQHBhcmFtICB7YXJyYXl9IHRhc2tzIGxpc3Qgd2l0aCBgbWV0aG9kYCBhbmQgYGFyZ3NgXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBjYWxsSlMgKGluc3RhbmNlSWQsIHRhc2tzKSB7XG4gIGNvbnN0IGluc3RhbmNlID0gaW5zdGFuY2VNYXBbaW5zdGFuY2VJZF1cbiAgaWYgKGluc3RhbmNlICYmIEFycmF5LmlzQXJyYXkodGFza3MpKSB7XG4gICAgY29uc3QgcmVzdWx0cyA9IFtdXG4gICAgdGFza3MuZm9yRWFjaCgodGFzaykgPT4ge1xuICAgICAgY29uc3QgaGFuZGxlciA9IGpzSGFuZGxlcnNbdGFzay5tZXRob2RdXG4gICAgICBjb25zdCBhcmdzID0gWy4uLnRhc2suYXJnc11cbiAgICAgIGlmICh0eXBlb2YgaGFuZGxlciA9PT0gJ2Z1bmN0aW9uJykge1xuICAgICAgICBhcmdzLnVuc2hpZnQoaW5zdGFuY2VJZClcbiAgICAgICAgcmVzdWx0cy5wdXNoKGhhbmRsZXIoLi4uYXJncykpXG4gICAgICB9XG4gICAgfSlcbiAgICByZXR1cm4gcmVzdWx0c1xuICB9XG4gIHJldHVybiBuZXcgRXJyb3IoYGludmFsaWQgaW5zdGFuY2UgaWQgXCIke2luc3RhbmNlSWR9XCIgb3IgdGFza3NgKVxufVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L2luZGV4LmpzXG4gKiovIiwiZXhwb3J0IGRlZmF1bHQge1xuICBuYXRpdmVDb21wb25lbnRNYXA6IHtcbiAgICB0ZXh0OiB0cnVlLFxuICAgIGltYWdlOiB0cnVlLFxuICAgIGNvbnRhaW5lcjogdHJ1ZSxcbiAgICBzbGlkZXI6IHtcbiAgICAgIHR5cGU6ICdzbGlkZXInLFxuICAgICAgYXBwZW5kOiAndHJlZSdcbiAgICB9LFxuICAgIGNlbGw6IHtcbiAgICAgIHR5cGU6ICdjZWxsJyxcbiAgICAgIGFwcGVuZDogJ3RyZWUnXG4gICAgfVxuICB9LFxuICBjdXN0b21Db21wb25lbnRNYXA6IHt9LFxuICBkZWJ1ZzogZmFsc2Vcbn1cblxuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvZGVmYXVsdC9jb25maWcuanNcbiAqKi8iLCIvKipcbiAqIEBmaWxlT3ZlcnZpZXdcbiAqIFdlZXggaW5zdGFuY2UgY29uc3RydWN0b3IgJiBkZWZpbml0aW9uXG4gKi9cblxuaW1wb3J0IHsgZXh0ZW5kLCB0eXBvZiB9IGZyb20gJy4uL3V0aWwnXG5pbXBvcnQgKiBhcyBidW5kbGUgZnJvbSAnLi9idW5kbGUnXG5pbXBvcnQgKiBhcyBjdHJsIGZyb20gJy4vY3RybCdcbmltcG9ydCBEaWZmZXIgZnJvbSAnLi9kaWZmZXInXG5cbmltcG9ydCByZW5kZXJlciBmcm9tICcuLi9jb25maWcnXG5pbXBvcnQgeyByZWdpc3RlckNvbXBvbmVudCwgcmVxdWlyZUNvbXBvbmVudCwgcmVxdWlyZU1vZHVsZSB9IGZyb20gJy4vcmVnaXN0ZXInXG5cbmV4cG9ydCBkZWZhdWx0IGZ1bmN0aW9uIEFwcEluc3RhbmNlIChpbnN0YW5jZUlkLCBvcHRpb25zKSB7XG4gIHRoaXMuaWQgPSBpbnN0YW5jZUlkXG4gIHRoaXMub3B0aW9ucyA9IG9wdGlvbnMgfHwge31cbiAgdGhpcy52bSA9IG51bGxcbiAgdGhpcy5jdXN0b21Db21wb25lbnRNYXAgPSB7fVxuICB0aGlzLmNhbGxiYWNrcyA9IHt9XG4gIHRoaXMuZG9jID0gbmV3IHJlbmRlcmVyLkRvY3VtZW50KFxuICAgIGluc3RhbmNlSWQsXG4gICAgdGhpcy5vcHRpb25zLmJ1bmRsZVVybFxuICApXG4gIHRoaXMuZGlmZmVyID0gbmV3IERpZmZlcihpbnN0YW5jZUlkKVxuICB0aGlzLnVpZCA9IDBcbn1cblxuZnVuY3Rpb24gbm9ybWFsaXplIChhcHAsIHYpIHtcbiAgY29uc3QgdHlwZSA9IHR5cG9mKHYpXG5cbiAgc3dpdGNoICh0eXBlKSB7XG4gICAgY2FzZSAndW5kZWZpbmVkJzpcbiAgICBjYXNlICdudWxsJzpcbiAgICAgIHJldHVybiAnJ1xuICAgIGNhc2UgJ3JlZ2V4cCc6XG4gICAgICByZXR1cm4gdi50b1N0cmluZygpXG4gICAgY2FzZSAnZGF0ZSc6XG4gICAgICByZXR1cm4gdi50b0lTT1N0cmluZygpXG4gICAgY2FzZSAnbnVtYmVyJzpcbiAgICBjYXNlICdzdHJpbmcnOlxuICAgIGNhc2UgJ2Jvb2xlYW4nOlxuICAgIGNhc2UgJ2FycmF5JzpcbiAgICBjYXNlICdvYmplY3QnOlxuICAgICAgaWYgKHYgaW5zdGFuY2VvZiByZW5kZXJlci5FbGVtZW50KSB7XG4gICAgICAgIHJldHVybiB2LnJlZlxuICAgICAgfVxuICAgICAgcmV0dXJuIHZcbiAgICBjYXNlICdmdW5jdGlvbic6XG4gICAgICBhcHAuY2FsbGJhY2tzWysrYXBwLnVpZF0gPSB2XG4gICAgICByZXR1cm4gYXBwLnVpZC50b1N0cmluZygpXG4gICAgZGVmYXVsdDpcbiAgICAgIHJldHVybiBKU09OLnN0cmluZ2lmeSh2KVxuICB9XG59XG5cbkFwcEluc3RhbmNlLnByb3RvdHlwZS5jYWxsVGFza3MgPSBmdW5jdGlvbiAodGFza3MpIHtcbiAgaWYgKHR5cG9mKHRhc2tzKSAhPT0gJ2FycmF5Jykge1xuICAgIHRhc2tzID0gW3Rhc2tzXVxuICB9XG5cbiAgdGFza3MuZm9yRWFjaCgodGFzaykgPT4ge1xuICAgIHRhc2suYXJncyA9IHRhc2suYXJncy5tYXAoYXJnID0 + IG5vcm1hbGl6ZSh0aGlzLCBhcmcpKVxuICB9KVxuXG4gIHJldHVybiByZW5kZXJlci5zZW5kVGFza3ModGhpcy5pZCwgdGFza3MsICctMScpXG59XG5cbmV4dGVuZChBcHBJbnN0YW5jZS5wcm90b3R5cGUsIGJ1bmRsZSwgY3RybCwge1xuICByZWdpc3RlckNvbXBvbmVudCxcbiAgcmVxdWlyZUNvbXBvbmVudCxcbiAgcmVxdWlyZU1vZHVsZVxufSlcblxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L2FwcC9pbmRleC5qc1xuICoqLyIsIi8qIGdsb2JhbCBNdXRhdGlvbk9ic2VydmVyICovXG5cbi8vIC8gbGFuZy5qc1xuXG4vKipcbiAqIENoZWNrIGlmIGEgc3RyaW5nIHN0YXJ0cyB3aXRoICQgb3IgX1xuICpcbiAqIEBwYXJhbSB7U3RyaW5nfSBzdHJcbiAqIEByZXR1cm4ge0Jvb2xlYW59XG4gKi9cblxuZXhwb3J0IGZ1bmN0aW9uIGlzUmVzZXJ2ZWQgKHN0cikge1xuICBjb25zdCBjID0gKHN0ciArICcnKS5jaGFyQ29kZUF0KDApXG4gIHJldHVybiBjID09PSAweDI0IHx8IGMgPT09IDB4NUZcbn1cblxuLyoqXG4gKiBEZWZpbmUgYSBwcm9wZXJ0eS5cbiAqXG4gKiBAcGFyYW0ge09iamVjdH0gb2JqXG4gKiBAcGFyYW0ge1N0cmluZ30ga2V5XG4gKiBAcGFyYW0geyp9IHZhbFxuICogQHBhcmFtIHtCb29sZWFufSBbZW51bWVyYWJsZV1cbiAqL1xuXG5leHBvcnQgZnVuY3Rpb24gZGVmIChvYmosIGtleSwgdmFsLCBlbnVtZXJhYmxlKSB7XG4gIE9iamVjdC5kZWZpbmVQcm9wZXJ0eShvYmosIGtleSwge1xuICAgIHZhbHVlOiB2YWwsXG4gICAgZW51bWVyYWJsZTogISFlbnVtZXJhYmxlLFxuICAgIHdyaXRhYmxlOiB0cnVlLFxuICAgIGNvbmZpZ3VyYWJsZTogdHJ1ZVxuICB9KVxufVxuXG4vLyAvIGVudi5qc1xuXG4vLyBjYW4gd2UgdXNlIF9fcHJvdG9fXz9cbmV4cG9ydCBjb25zdCBoYXNQcm90byA9ICdfX3Byb3RvX18nIGluIHt9XG5cbi8vIEJyb3dzZXIgZW52aXJvbm1lbnQgc25pZmZpbmdcbmV4cG9ydCBjb25zdCBpbkJyb3dzZXIgPVxuICB0eXBlb2Ygd2luZG93ICE9PSAndW5kZWZpbmVkJyAmJlxuICBPYmplY3QucHJvdG90eXBlLnRvU3RyaW5nLmNhbGwod2luZG93KSAhPT0gJ1tvYmplY3QgT2JqZWN0XSdcblxuLy8gZGV0ZWN0IGRldnRvb2xzXG5leHBvcnQgY29uc3QgZGV2dG9vbHMgPSBpbkJyb3dzZXIgJiYgd2luZG93Ll9fVlVFX0RFVlRPT0xTX0dMT0JBTF9IT09LX19cblxuLy8gVUEgc25pZmZpbmcgZm9yIHdvcmtpbmcgYXJvdW5kIGJyb3dzZXItc3BlY2lmaWMgcXVpcmtzXG5jb25zdCBVQSA9IGluQnJvd3NlciAmJiB3aW5kb3cubmF2aWdhdG9yLnVzZXJBZ2VudC50b0xvd2VyQ2FzZSgpXG5jb25zdCBpc0lvcyA9IFVBICYmIC8oaXBob25lfGlwYWR8aXBvZHxpb3MpL2kudGVzdChVQSlcbmNvbnN0IGlzV2VjaGF0ID0gVUEgJiYgVUEuaW5kZXhPZignbWljcm9tZXNzZW5nZXInKSA + IDBcblxuLyoqXG4gKiBEZWZlciBhIHRhc2sgdG8gZXhlY3V0ZSBpdCBhc3luY2hyb25vdXNseS4gSWRlYWxseSB0aGlzXG4gKiBzaG91bGQgYmUgZXhlY3V0ZWQgYXMgYSBtaWNyb3Rhc2ssIHNvIHdlIGxldmVyYWdlXG4gKiBNdXRhdGlvbk9ic2VydmVyIGlmIGl0J3MgYXZhaWxhYmxlLCBhbmQgZmFsbGJhY2sgdG9cbiAqIHNldFRpbWVvdXQoMCkuXG4gKlxuICogQHBhcmFtIHtGdW5jdGlvbn0gY2JcbiAqIEBwYXJhbSB7T2JqZWN0fSBjdHhcbiAqL1xuXG5leHBvcnQgY29uc3QgbmV4dFRpY2sgPSAoZnVuY3Rpb24gKCkge1xuICBsZXQgY2FsbGJhY2tzID0gW11cbiAgbGV0IHBlbmRpbmcgPSBmYWxzZVxuICBsZXQgdGltZXJGdW5jXG4gIGZ1bmN0aW9uIG5leHRUaWNrSGFuZGxlciAoKSB7XG4gICAgcGVuZGluZyA9IGZhbHNlXG4gICAgY29uc3QgY29waWVzID0gY2FsbGJhY2tzLnNsaWNlKDApXG4gICAgY2FsbGJhY2tzID0gW11cbiAgICBmb3IgKGxldCBpID0gMDsgaSA8IGNvcGllcy5sZW5ndGg7IGkrKykge1xuICAgICAgY29waWVzW2ldKClcbiAgICB9XG4gIH1cblxuICAvKiBpc3RhbmJ1bCBpZ25vcmUgaWYgKi9cbiAgaWYgKHR5cGVvZiBNdXRhdGlvbk9ic2VydmVyICE9PSAndW5kZWZpbmVkJyAmJiAhKGlzV2VjaGF0ICYmIGlzSW9zKSkge1xuICAgIGxldCBjb3VudGVyID0gMVxuICAgIGNvbnN0IG9ic2VydmVyID0gbmV3IE11dGF0aW9uT2JzZXJ2ZXIobmV4dFRpY2tIYW5kbGVyKVxuICAgIGNvbnN0IHRleHROb2RlID0gZG9jdW1lbnQuY3JlYXRlVGV4dE5vZGUoY291bnRlcilcbiAgICBvYnNlcnZlci5vYnNlcnZlKHRleHROb2RlLCB7XG4gICAgICBjaGFyYWN0ZXJEYXRhOiB0cnVlXG4gICAgfSlcbiAgICB0aW1lckZ1bmMgPSBmdW5jdGlvbiAoKSB7XG4gICAgICBjb3VudGVyID0gKGNvdW50ZXIgKyAxKSAlIDJcbiAgICAgIHRleHROb2RlLmRhdGEgPSBjb3VudGVyXG4gICAgfVxuICB9XG4gIGVsc2Uge1xuICAgIC8vIHdlYnBhY2sgYXR0ZW1wdHMgdG8gaW5qZWN0IGEgc2hpbSBmb3Igc2V0SW1tZWRpYXRlXG4gICAgLy8gaWYgaXQgaXMgdXNlZCBhcyBhIGdsb2JhbCwgc28gd2UgaGF2ZSB0byB3b3JrIGFyb3VuZCB0aGF0IHRvXG4gICAgLy8gYXZvaWQgYnVuZGxpbmcgdW5uZWNlc3NhcnkgY29kZS5cbiAgICBjb25zdCBjb250ZXh0ID0gaW5Ccm93c2VyXG4gICAgICA / IHdpbmRvd1xuICAgICAgOiB0eXBlb2YgZ2xvYmFsICE9PSAndW5kZWZpbmVkJyA / IGdsb2JhbCA6IHt9XG4gICAgdGltZXJGdW5jID0gY29udGV4dC5zZXRJbW1lZGlhdGUgfHwgc2V0VGltZW91dFxuICB9XG4gIHJldHVybiBmdW5jdGlvbiAoY2IsIGN0eCkge1xuICAgIGNvbnN0IGZ1bmMgPSBjdHhcbiAgICAgID8gZnVuY3Rpb24gKCkgeyBjYi5jYWxsKGN0eCkgfVxuICAgICAgOiBjYlxuICAgIGNhbGxiYWNrcy5wdXNoKGZ1bmMpXG4gICAgaWYgKHBlbmRpbmcpIHJldHVyblxuICAgIHBlbmRpbmcgPSB0cnVlXG4gICAgdGltZXJGdW5jKG5leHRUaWNrSGFuZGxlciwgMClcbiAgfVxufSkoKVxuXG5sZXQgX1NldFxuLyogaXN0YW5idWwgaWdub3JlIGlmICovXG5pZiAodHlwZW9mIFNldCAhPT0gJ3VuZGVmaW5lZCcgJiYgU2V0LnRvU3RyaW5nKCkubWF0Y2goL25hdGl2ZSBjb2RlLykpIHtcbiAgLy8gdXNlIG5hdGl2ZSBTZXQgd2hlbiBhdmFpbGFibGUuXG4gIF9TZXQgPSBTZXRcbn1cbmVsc2Uge1xuICAvLyBhIG5vbi1zdGFuZGFyZCBTZXQgcG9seWZpbGwgdGhhdCBvbmx5IHdvcmtzIHdpdGggcHJpbWl0aXZlIGtleXMuXG4gIF9TZXQgPSBmdW5jdGlvbiAoKSB7XG4gICAgdGhpcy5zZXQgPSBPYmplY3QuY3JlYXRlKG51bGwpXG4gIH1cbiAgX1NldC5wcm90b3R5cGUuaGFzID0gZnVuY3Rpb24gKGtleSkge1xuICAgIHJldHVybiB0aGlzLnNldFtrZXldICE9PSB1bmRlZmluZWRcbiAgfVxuICBfU2V0LnByb3RvdHlwZS5hZGQgPSBmdW5jdGlvbiAoa2V5KSB7XG4gICAgdGhpcy5zZXRba2V5XSA9IDFcbiAgfVxuICBfU2V0LnByb3RvdHlwZS5jbGVhciA9IGZ1bmN0aW9uICgpIHtcbiAgICB0aGlzLnNldCA9IE9iamVjdC5jcmVhdGUobnVsbClcbiAgfVxufVxuXG5leHBvcnQgeyBfU2V0IH1cblxuLy8gLyBzaGFyZWRcblxuLyoqXG4gKiBSZW1vdmUgYW4gaXRlbSBmcm9tIGFuIGFycmF5XG4gKlxuICogQHBhcmFtIHtBcnJheX0gYXJyXG4gKiBAcGFyYW0geyp9IGl0ZW1cbiAqL1xuXG5leHBvcnQgZnVuY3Rpb24gcmVtb3ZlIChhcnIsIGl0ZW0pIHtcbiAgaWYgKGFyci5sZW5ndGgpIHtcbiAgICBjb25zdCBpbmRleCA9IGFyci5pbmRleE9mKGl0ZW0pXG4gICAgaWYgKGluZGV4ID4gLTEpIHtcbiAgICAgIHJldHVybiBhcnIuc3BsaWNlKGluZGV4LCAxKVxuICAgIH1cbiAgfVxufVxuXG4vKipcbiAqIENoZWNrIHdoZXRoZXIgdGhlIG9iamVjdCBoYXMgdGhlIHByb3BlcnR5LlxuICpcbiAqIEBwYXJhbSB7T2JqZWN0fSBvYmpcbiAqIEBwYXJhbSB7U3RyaW5nfSBrZXlcbiAqIEByZXR1cm4ge0Jvb2xlYW59XG4gKi9cbmNvbnN0IGhhc093blByb3BlcnR5ID0gT2JqZWN0LnByb3RvdHlwZS5oYXNPd25Qcm9wZXJ0eVxuZXhwb3J0IGZ1bmN0aW9uIGhhc093biAob2JqLCBrZXkpIHtcbiAgcmV0dXJuIGhhc093blByb3BlcnR5LmNhbGwob2JqLCBrZXkpXG59XG5cbi8qKlxuICogQ3JlYXRlIGEgY2FjaGVkIHZlcnNpb24gb2YgYSBwdXJlIGZ1bmN0aW9uLlxuICpcbiAqIEBwYXJhbSB7RnVuY3Rpb259IGZuXG4gKiBAcmV0dXJuIHtGdW5jdGlvbn1cbiAqL1xuXG5leHBvcnQgZnVuY3Rpb24gY2FjaGVkIChmbikge1xuICBjb25zdCBjYWNoZSA9IE9iamVjdC5jcmVhdGUobnVsbClcbiAgcmV0dXJuIGZ1bmN0aW9uIGNhY2hlZEZuIChzdHIpIHtcbiAgICBjb25zdCBoaXQgPSBjYWNoZVtzdHJdXG4gICAgcmV0dXJuIGhpdCB8fCAoY2FjaGVbc3RyXSA9IGZuKHN0cikpXG4gIH1cbn1cblxuLyoqXG4gKiBDYW1lbGl6ZSBhIGh5cGhlbi1kZWxtaXRlZCBzdHJpbmcuXG4gKlxuICogQHBhcmFtIHtTdHJpbmd9IHN0clxuICogQHJldHVybiB7U3RyaW5nfVxuICovXG5cbmNvbnN0IGNhbWVsaXplUkUgPSAvLShcXHcpL2dcbmV4cG9ydCBjb25zdCBjYW1lbGl6ZSA9IGNhY2hlZChzdHIgPT4ge1xuICByZXR1cm4gc3RyLnJlcGxhY2UoY2FtZWxpemVSRSwgdG9VcHBlcilcbn0pXG5cbmZ1bmN0aW9uIHRvVXBwZXIgKF8sIGMpIHtcbiAgcmV0dXJuIGMgPyBjLnRvVXBwZXJDYXNlKCkgOiAnJ1xufVxuXG4vKipcbiAqIEh5cGhlbmF0ZSBhIGNhbWVsQ2FzZSBzdHJpbmcuXG4gKlxuICogQHBhcmFtIHtTdHJpbmd9IHN0clxuICogQHJldHVybiB7U3RyaW5nfVxuICovXG5cbmNvbnN0IGh5cGhlbmF0ZVJFID0gLyhbYS16XFxkXSkoW0EtWl0pL2dcbmV4cG9ydCBjb25zdCBoeXBoZW5hdGUgPSBjYWNoZWQoc3RyID0 + IHtcbiAgcmV0dXJuIHN0clxuICAgIC5yZXBsYWNlKGh5cGhlbmF0ZVJFLCAnJDEtJDInKVxuICAgIC50b0xvd2VyQ2FzZSgpXG59KVxuXG4vKipcbiAqIFNpbXBsZSBiaW5kLCBmYXN0ZXIgdGhhbiBuYXRpdmVcbiAqXG4gKiBAcGFyYW0ge0Z1bmN0aW9ufSBmblxuICogQHBhcmFtIHtPYmplY3R9IGN0eFxuICogQHJldHVybiB7RnVuY3Rpb259XG4gKi9cblxuZXhwb3J0IGZ1bmN0aW9uIGJpbmQgKGZuLCBjdHgpIHtcbiAgcmV0dXJuIGZ1bmN0aW9uIChhKSB7XG4gICAgY29uc3QgbCA9IGFyZ3VtZW50cy5sZW5ndGhcbiAgICByZXR1cm4gbFxuICAgICAgPyBsID4gMVxuICAgICAgICA / IGZuLmFwcGx5KGN0eCwgYXJndW1lbnRzKVxuICAgICAgICA6IGZuLmNhbGwoY3R4LCBhKVxuICAgICAgOiBmbi5jYWxsKGN0eClcbiAgfVxufVxuXG4vKipcbiAqIENvbnZlcnQgYW4gQXJyYXktbGlrZSBvYmplY3QgdG8gYSByZWFsIEFycmF5LlxuICpcbiAqIEBwYXJhbSB7QXJyYXktbGlrZX0gbGlzdFxuICogQHBhcmFtIHtOdW1iZXJ9IFtzdGFydF0gLSBzdGFydCBpbmRleFxuICogQHJldHVybiB7QXJyYXl9XG4gKi9cblxuZXhwb3J0IGZ1bmN0aW9uIHRvQXJyYXkgKGxpc3QsIHN0YXJ0KSB7XG4gIHN0YXJ0ID0gc3RhcnQgfHwgMFxuICBsZXQgaSA9IGxpc3QubGVuZ3RoIC0gc3RhcnRcbiAgY29uc3QgcmV0ID0gbmV3IEFycmF5KGkpXG4gIHdoaWxlIChpLS0pIHtcbiAgICByZXRbaV0gPSBsaXN0W2kgKyBzdGFydF1cbiAgfVxuICByZXR1cm4gcmV0XG59XG5cbi8qKlxuICogTWl4IHByb3BlcnRpZXMgaW50byB0YXJnZXQgb2JqZWN0LlxuICpcbiAqIEBwYXJhbSB7T2JqZWN0fSB0b1xuICogQHBhcmFtIHtPYmplY3R9IGZyb21cbiAqL1xuXG5leHBvcnQgZnVuY3Rpb24gZXh0ZW5kICh0YXJnZXQsIC4uLnNyYykge1xuICBpZiAodHlwZW9mIE9iamVjdC5hc3NpZ24gPT09ICdmdW5jdGlvbicpIHtcbiAgICBPYmplY3QuYXNzaWduKHRhcmdldCwgLi4uc3JjKVxuICB9XG4gIGVsc2Uge1xuICAgIGNvbnN0IGZpcnN0ID0gc3JjLnNoaWZ0KClcbiAgICBmb3IgKGNvbnN0IGtleSBpbiBmaXJzdCkge1xuICAgICAgdGFyZ2V0W2tleV0gPSBmaXJzdFtrZXldXG4gICAgfVxuICAgIGlmIChzcmMubGVuZ3RoKSB7XG4gICAgICBleHRlbmQodGFyZ2V0LCAuLi5zcmMpXG4gICAgfVxuICB9XG4gIHJldHVybiB0YXJnZXRcbn1cblxuLyoqXG4gKiBRdWljayBvYmplY3QgY2hlY2sgLSB0aGlzIGlzIHByaW1hcmlseSB1c2VkIHRvIHRlbGxcbiAqIE9iamVjdHMgZnJvbSBwcmltaXRpdmUgdmFsdWVzIHdoZW4gd2Uga25vdyB0aGUgdmFsdWVcbiAqIGlzIGEgSlNPTi1jb21wbGlhbnQgdHlwZS5cbiAqXG4gKiBAcGFyYW0geyp9IG9ialxuICogQHJldHVybiB7Qm9vbGVhbn1cbiAqL1xuXG5leHBvcnQgZnVuY3Rpb24gaXNPYmplY3QgKG9iaikge1xuICByZXR1cm4gb2JqICE9PSBudWxsICYmIHR5cGVvZiBvYmogPT09ICdvYmplY3QnXG59XG5cbi8qKlxuICogU3RyaWN0IG9iamVjdCB0eXBlIGNoZWNrLiBPbmx5IHJldHVybnMgdHJ1ZVxuICogZm9yIHBsYWluIEphdmFTY3JpcHQgb2JqZWN0cy5cbiAqXG4gKiBAcGFyYW0geyp9IG9ialxuICogQHJldHVybiB7Qm9vbGVhbn1cbiAqL1xuXG5jb25zdCB0b1N0cmluZyA9IE9iamVjdC5wcm90b3R5cGUudG9TdHJpbmdcbmNvbnN0IE9CSkVDVF9TVFJJTkcgPSAnW29iamVjdCBPYmplY3RdJ1xuZXhwb3J0IGZ1bmN0aW9uIGlzUGxhaW5PYmplY3QgKG9iaikge1xuICByZXR1cm4gdG9TdHJpbmcuY2FsbChvYmopID09PSBPQkpFQ1RfU1RSSU5HXG59XG5cbi8qKlxuICogQXJyYXkgdHlwZSBjaGVjay5cbiAqXG4gKiBAcGFyYW0geyp9IG9ialxuICogQHJldHVybiB7Qm9vbGVhbn1cbiAqL1xuXG5leHBvcnQgY29uc3QgaXNBcnJheSA9IEFycmF5LmlzQXJyYXlcblxuLy8gLyBvdGhlclxuXG5leHBvcnQgZnVuY3Rpb24gc3RyaW5naWZ5ICh4KSB7XG4gIHJldHVybiB0eXBlb2YgeCA9PT0gJ3VuZGVmaW5lZCcgfHwgeCA9PT0gbnVsbCB8fCB0eXBlb2YgKHgpID09PSAnZnVuY3Rpb24nXG4gICAgPyAnJ1xuICAgIDogdHlwZW9mIHggPT09ICdvYmplY3QnXG4gICAgICA / IHggaW5zdGFuY2VvZiBSZWdFeHBcbiAgICAgICAgPyB4LnRvU3RyaW5nKClcbiAgICAgICAgOiB4IGluc3RhbmNlb2YgRGF0ZVxuICAgICAgICAgID8gSlNPTi5wYXJzZShKU09OLnN0cmluZ2lmeSh4KSlcbiAgICAgICAgICA6IEpTT04uc3RyaW5naWZ5KHgpXG4gICAgICA6IHgudG9TdHJpbmcoKVxufVxuXG5leHBvcnQgZnVuY3Rpb24gdHlwb2YgKHYpIHtcbiAgY29uc3QgcyA9IE9iamVjdC5wcm90b3R5cGUudG9TdHJpbmcuY2FsbCh2KVxuICByZXR1cm4gcy5zdWJzdHJpbmcoOCwgcy5sZW5ndGggLSAxKS50b0xvd2VyQ2FzZSgpXG59XG5cbmV4cG9ydCBmdW5jdGlvbiBub3JtYWxpemUgKHYpIHtcbiAgY29uc3QgdHlwZSA9IHR5cG9mKHYpXG5cbiAgc3dpdGNoICh0eXBlKSB7XG4gICAgY2FzZSAndW5kZWZpbmVkJzpcbiAgICBjYXNlICdudWxsJzpcbiAgICAgIHJldHVybiAnJ1xuICAgIGNhc2UgJ3JlZ2V4cCc6XG4gICAgICByZXR1cm4gdi50b1N0cmluZygpXG4gICAgY2FzZSAnZGF0ZSc6XG4gICAgICByZXR1cm4gdi50b0lTT1N0cmluZygpXG4gICAgY2FzZSAnbnVtYmVyJzpcbiAgICBjYXNlICdzdHJpbmcnOlxuICAgIGNhc2UgJ2Jvb2xlYW4nOlxuICAgIGNhc2UgJ2FycmF5JzpcbiAgICBjYXNlICdvYmplY3QnOlxuICAgIGNhc2UgJ2Z1bmN0aW9uJzpcbiAgICAgIHJldHVybiB2XG4gIH1cbn1cblxuY29uc3QgZW5hYmxlTG9nID0gdHlwZW9mIGNvbnNvbGUgIT09ICd1bmRlZmluZWQnICYmIGdsb2JhbC5JU19QUk9EVUNUICE9PSB0cnVlXG5cbi8qKlxuICogQHBhcmFtIHtTdHJpbmd9IG1zZ1xuICovXG5leHBvcnQgZnVuY3Rpb24gZXJyb3IgKC4uLmFyZ3MpIHtcbiAgZW5hYmxlTG9nICYmIGNvbnNvbGUuZXJyb3IgJiYgY29uc29sZS5lcnJvcignW0pTIEZyYW1ld29ya10nLCAuLi5hcmdzKVxufVxuXG4vKipcbiAqIEBwYXJhbSB7U3RyaW5nfSBtc2dcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIHdhcm4gKC4uLmFyZ3MpIHtcbiAgZW5hYmxlTG9nICYmIGNvbnNvbGUud2FybiAmJiBjb25zb2xlLndhcm4oJ1tKUyBGcmFtZXdvcmtdJywgLi4uYXJncylcbn1cblxuLyoqXG4gKiBAcGFyYW0ge1N0cmluZ30gbXNnXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBpbmZvICguLi5hcmdzKSB7XG4gIGVuYWJsZUxvZyAmJiBjb25zb2xlLmluZm8gJiYgY29uc29sZS5pbmZvKCdbSlMgRnJhbWV3b3JrXScsIC4uLmFyZ3MpXG59XG5cbi8qKlxuICogQHBhcmFtIHtTdHJpbmd9IG1zZ1xuICovXG5leHBvcnQgZnVuY3Rpb24gZGVidWcgKC4uLmFyZ3MpIHtcbiAgZW5hYmxlTG9nICYmIGNvbnNvbGUuZGVidWcgJiYgY29uc29sZS5kZWJ1ZygnW0pTIEZyYW1ld29ya10nLCAuLi5hcmdzKVxufVxuXG4vKipcbiAqIEBwYXJhbSB7U3RyaW5nfSBtc2dcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIGxvZyAoLi4uYXJncykge1xuICBlbmFibGVMb2cgJiYgY29uc29sZS5sb2cgJiYgY29uc29sZS5sb2coJ1tKUyBGcmFtZXdvcmtdJywgLi4uYXJncylcbn1cblxuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvZGVmYXVsdC91dGlsL2luZGV4LmpzXG4gKiovIiwiLyoqXG4gKiBAZmlsZU92ZXJ2aWV3XG4gKiBhcGkgdGhhdCBpbnZva2VkIGJ5IGpzIGJ1bmRsZSBjb2RlXG4gKlxuICogLSBkZWZpbmUobmFtZSwgZmFjdG9yeSk6IGRlZmluZSBhIG5ldyBjb21wb3NlZCBjb21wb25lbnQgdHlwZVxuICogLSBib290c3RyYXAodHlwZSwgY29uZmlnLCBkYXRhKTogcmVxdWlyZSBhIGNlcnRhaW4gdHlwZSAmXG4gKiAgICAgICAgIHJlbmRlciB3aXRoIChvcHRpb25hbCkgZGF0YVxuICpcbiAqIGRlcHJlY2F0ZWQ6XG4gKiAtIHJlZ2lzdGVyKHR5cGUsIG9wdGlvbnMpOiByZWdpc3RlciBhIG5ldyBjb21wb3NlZCBjb21wb25lbnQgdHlwZVxuICogLSByZW5kZXIodHlwZSwgZGF0YSk6IHJlbmRlciBieSBhIGNlcnRhaW4gdHlwZSB3aXRoIChvcHRpb25hbCkgZGF0YVxuICogLSByZXF1aXJlKHR5cGUpKGRhdGEpOiByZXF1aXJlIGEgdHlwZSB0aGVuIHJlbmRlciB3aXRoIGRhdGFcbiAqL1xuXG5pbXBvcnQgc2VtdmVyIGZyb20gJ3NlbXZlcidcbmltcG9ydCAqIGFzIF8gZnJvbSAnLi4vdXRpbCdcbmltcG9ydCBWbSBmcm9tICcuLi92bSdcbmltcG9ydCAqIGFzIGRvd25ncmFkZSBmcm9tICcuL2Rvd25ncmFkZSdcblxuY29uc3QgV0VFWF9DT01QT05FTlRfUkVHID0gL15Ad2VleC1jb21wb25lbnRcXC8vXG5jb25zdCBXRUVYX01PRFVMRV9SRUcgPSAvXkB3ZWV4LW1vZHVsZVxcLy9cbmNvbnN0IE5PUk1BTF9NT0RVTEVfUkVHID0gL15cXC57MSwyfVxcLy9cbmNvbnN0IEpTX1NVUkZJWF9SRUcgPSAvXFwuanMkL1xuXG5jb25zdCBpc1dlZXhDb21wb25lbnQgPSBuYW1lID0 + ICEhbmFtZS5tYXRjaChXRUVYX0NPTVBPTkVOVF9SRUcpXG5jb25zdCBpc1dlZXhNb2R1bGUgPSBuYW1lID0 + ICEhbmFtZS5tYXRjaChXRUVYX01PRFVMRV9SRUcpXG5jb25zdCBpc05vcm1hbE1vZHVsZSA9IG5hbWUgPT4gISFuYW1lLm1hdGNoKE5PUk1BTF9NT0RVTEVfUkVHKVxuY29uc3QgaXNOcG1Nb2R1bGUgPSBuYW1lID0 + ICFpc1dlZXhDb21wb25lbnQobmFtZSkgJiZcbiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICFpc1dlZXhNb2R1bGUobmFtZSkgJiZcbiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICFpc05vcm1hbE1vZHVsZShuYW1lKVxuXG5mdW5jdGlvbiByZW1vdmVXZWV4UHJlZml4IChzdHIpIHtcbiAgcmV0dXJuIHN0ci5yZXBsYWNlKFdFRVhfQ09NUE9ORU5UX1JFRywgJycpXG4gICAgICAgICAgLnJlcGxhY2UoV0VFWF9NT0RVTEVfUkVHLCAnJylcbn1cblxuZnVuY3Rpb24gcmVtb3ZlSlNTdXJmaXggKHN0cikge1xuICByZXR1cm4gc3RyLnJlcGxhY2UoSlNfU1VSRklYX1JFRywgJycpXG59XG5cbmxldCBjb21tb25Nb2R1bGVzID0ge31cblxuZXhwb3J0IGZ1bmN0aW9uIGNsZWFyQ29tbW9uTW9kdWxlcyAoKSB7XG4gIGNvbW1vbk1vZHVsZXMgPSB7fVxufVxuXG4vLyBkZWZpbmUobmFtZSwgZmFjdG9yeSkgZm9yIHByaW1hcnkgdXNhZ2Vcbi8vIG9yXG4vLyBkZWZpbmUobmFtZSwgZGVwcywgZmFjdG9yeSkgZm9yIGNvbXBhdGliaWxpdHlcbi8vIE5vdGljZTogRE8gTk9UIHVzZSBmdW5jdGlvbiBkZWZpbmUoKSB7fSxcbi8vIGl0IHdpbGwgY2F1c2UgZXJyb3IgYWZ0ZXIgYnVpbGRlZCBieSB3ZWJwYWNrXG5leHBvcnQgY29uc3QgZGVmaW5lID0gZnVuY3Rpb24gKG5hbWUsIGRlcHMsIGZhY3RvcnkpIHtcbiAgXy5kZWJ1ZygnZGVmaW5lIGEgY29tcG9uZW50JywgbmFtZSlcblxuICBpZiAoXy50eXBvZihkZXBzKSA9PT0gJ2Z1bmN0aW9uJykge1xuICAgIGZhY3RvcnkgPSBkZXBzXG4gICAgZGVwcyA9IFtdXG4gIH1cblxuICBjb25zdCBfcmVxdWlyZSA9IChuYW1lKSA9PiB7XG4gICAgbGV0IGNsZWFuTmFtZVxuXG4gICAgaWYgKGlzV2VleENvbXBvbmVudChuYW1lKSkge1xuICAgICAgY2xlYW5OYW1lID0gcmVtb3ZlV2VleFByZWZpeChuYW1lKVxuICAgICAgcmV0dXJuIHRoaXMucmVxdWlyZUNvbXBvbmVudChjbGVhbk5hbWUpXG4gICAgfVxuICAgIGlmIChpc1dlZXhNb2R1bGUobmFtZSkpIHtcbiAgICAgIGNsZWFuTmFtZSA9IHJlbW92ZVdlZXhQcmVmaXgobmFtZSlcbiAgICAgIHJldHVybiB0aGlzLnJlcXVpcmVNb2R1bGUoY2xlYW5OYW1lKVxuICAgIH1cbiAgICBpZiAoaXNOb3JtYWxNb2R1bGUobmFtZSkpIHtcbiAgICAgIGNsZWFuTmFtZSA9IHJlbW92ZUpTU3VyZml4KG5hbWUpXG4gICAgICByZXR1cm4gY29tbW9uTW9kdWxlc1tuYW1lXVxuICAgIH1cbiAgICBpZiAoaXNOcG1Nb2R1bGUobmFtZSkpIHtcbiAgICAgIGNsZWFuTmFtZSA9IHJlbW92ZUpTU3VyZml4KG5hbWUpXG4gICAgICByZXR1cm4gY29tbW9uTW9kdWxlc1tuYW1lXVxuICAgIH1cbiAgfVxuICBjb25zdCBfbW9kdWxlID0geyBleHBvcnRzOiB7fX1cblxuICBsZXQgY2xlYW5OYW1lXG4gIGlmIChpc1dlZXhDb21wb25lbnQobmFtZSkpIHtcbiAgICBjbGVhbk5hbWUgPSByZW1vdmVXZWV4UHJlZml4KG5hbWUpXG5cbiAgICBmYWN0b3J5KF9yZXF1aXJlLCBfbW9kdWxlLmV4cG9ydHMsIF9tb2R1bGUpXG5cbiAgICB0aGlzLnJlZ2lzdGVyQ29tcG9uZW50KGNsZWFuTmFtZSwgX21vZHVsZS5leHBvcnRzKVxuICB9XG4gIGVsc2UgaWYgKGlzV2VleE1vZHVsZShuYW1lKSkge1xuICAgIGNsZWFuTmFtZSA9IHJlbW92ZVdlZXhQcmVmaXgobmFtZSlcblxuICAgIGZhY3RvcnkoX3JlcXVpcmUsIF9tb2R1bGUuZXhwb3J0cywgX21vZHVsZSlcblxuICAgIFZtLnJlZ2lzdGVyTW9kdWxlcyh7XG4gICAgICBbY2xlYW5OYW1lXTogX21vZHVsZS5leHBvcnRzXG4gICAgfSlcbiAgfVxuICBlbHNlIGlmIChpc05vcm1hbE1vZHVsZShuYW1lKSkge1xuICAgIGNsZWFuTmFtZSA9IHJlbW92ZUpTU3VyZml4KG5hbWUpXG5cbiAgICBmYWN0b3J5KF9yZXF1aXJlLCBfbW9kdWxlLmV4cG9ydHMsIF9tb2R1bGUpXG5cbiAgICBjb21tb25Nb2R1bGVzW2NsZWFuTmFtZV0gPSBfbW9kdWxlLmV4cG9ydHNcbiAgfVxuICBlbHNlIGlmIChpc05wbU1vZHVsZShuYW1lKSkge1xuICAgIGNsZWFuTmFtZSA9IHJlbW92ZUpTU3VyZml4KG5hbWUpXG5cbiAgICBmYWN0b3J5KF9yZXF1aXJlLCBfbW9kdWxlLmV4cG9ydHMsIF9tb2R1bGUpXG5cbiAgICBjb25zdCBleHBvcnRzID0gX21vZHVsZS5leHBvcnRzXG4gICAgaWYgKGV4cG9ydHMudGVtcGxhdGUgfHxcbiAgICAgICAgZXhwb3J0cy5zdHlsZSB8fFxuICAgICAgICBleHBvcnRzLm1ldGhvZHMpIHtcbiAgICAgIC8vIGRvd25ncmFkZSB0byBvbGQgZGVmaW5lIG1ldGhvZCAoZGVmaW5lKCdjb21wb25lbnROYW1lJywgZmFjdG9yeSkpXG4gICAgICAvLyB0aGUgZXhwb3J0cyBjb250YWluIG9uZSBrZXkgb2YgdGVtcGxhdGUsIHN0eWxlIG9yIG1ldGhvZHNcbiAgICAgIC8vIGJ1dCBpdCBoYXMgcmlzayEhIVxuICAgICAgdGhpcy5yZWdpc3RlckNvbXBvbmVudChjbGVhbk5hbWUsIGV4cG9ydHMpXG4gICAgfVxuICAgIGVsc2Uge1xuICAgICAgY29tbW9uTW9kdWxlc1tjbGVhbk5hbWVdID0gX21vZHVsZS5leHBvcnRzXG4gICAgfVxuICB9XG59XG5cbmV4cG9ydCBmdW5jdGlvbiBib290c3RyYXAgKG5hbWUsIGNvbmZpZywgZGF0YSkge1xuICBfLmRlYnVnKGBib290c3RyYXAgZm9yICR7bmFtZX1gKVxuXG4gIGxldCBjbGVhbk5hbWVcblxuICBpZiAoaXNXZWV4Q29tcG9uZW50KG5hbWUpKSB7XG4gICAgY2xlYW5OYW1lID0gcmVtb3ZlV2VleFByZWZpeChuYW1lKVxuICB9XG4gIGVsc2UgaWYgKGlzTnBtTW9kdWxlKG5hbWUpKSB7XG4gICAgY2xlYW5OYW1lID0gcmVtb3ZlSlNTdXJmaXgobmFtZSlcbiAgICAvLyBjaGVjayBpZiBkZWZpbmUgYnkgb2xkICdkZWZpbmUnIG1ldGhvZFxuICAgIC8qIGlzdGFuYnVsIGlnbm9yZSBpZiAqL1xuICAgIGlmICghdGhpcy5jdXN0b21Db21wb25lbnRNYXBbY2xlYW5OYW1lXSkge1xuICAgICAgcmV0dXJuIG5ldyBFcnJvcihgSXQncyBub3QgYSBjb21wb25lbnQ6ICR7bmFtZX1gKVxuICAgIH1cbiAgfVxuICBlbHNlIHtcbiAgICByZXR1cm4gbmV3IEVycm9yKGBXcm9uZyBjb21wb25lbnQgbmFtZTogJHtuYW1lfWApXG4gIH1cblxuICBjb25maWcgPSBfLmlzUGxhaW5PYmplY3QoY29uZmlnKSA / IGNvbmZpZyA6IHt9XG5cbiAgaWYgKHR5cGVvZiBjb25maWcudHJhbnNmb3JtZXJWZXJzaW9uID09PSAnc3RyaW5nJyAmJlxuICAgIHR5cGVvZiBnbG9iYWwubmVlZFRyYW5zZm9ybWVyVmVyc2lvbiA9PT0gJ3N0cmluZycgJiZcbiAgICAhc2VtdmVyLnNhdGlzZmllcyhjb25maWcudHJhbnNmb3JtZXJWZXJzaW9uLFxuICAgICAgZ2xvYmFsLm5lZWRUcmFuc2Zvcm1lclZlcnNpb24pKSB7XG4gICAgcmV0dXJuIG5ldyBFcnJvcihgSlMgQnVuZGxlIHZlcnNpb246ICR7Y29uZmlnLnRyYW5zZm9ybWVyVmVyc2lvbn0gYCArXG4gICAgICBgbm90IGNvbXBhdGlibGUgd2l0aCAke2dsb2JhbC5uZWVkVHJhbnNmb3JtZXJWZXJzaW9ufWApXG4gIH1cblxuICBjb25zdCBfY2hlY2tEb3duZ3JhZGUgPSBkb3duZ3JhZGUuY2hlY2soY29uZmlnLmRvd25ncmFkZSlcbiAgLyogaXN0YW5idWwgaWdub3JlIGlmICovXG4gIGlmIChfY2hlY2tEb3duZ3JhZGUuaXNEb3duZ3JhZGUpIHtcbiAgICB0aGlzLmNhbGxUYXNrcyhbe1xuICAgICAgbW9kdWxlOiAnaW5zdGFuY2VXcmFwJyxcbiAgICAgIG1ldGhvZDogJ2Vycm9yJyxcbiAgICAgIGFyZ3M6IFtcbiAgICAgICAgX2NoZWNrRG93bmdyYWRlLmVycm9yVHlwZSxcbiAgICAgICAgX2NoZWNrRG93bmdyYWRlLmNvZGUsXG4gICAgICAgIF9jaGVja0Rvd25ncmFkZS5lcnJvck1lc3NhZ2VcbiAgICAgIF1cbiAgICB9XSlcbiAgICByZXR1cm4gbmV3IEVycm9yKGBEb3duZ3JhZGVbJHtfY2hlY2tEb3duZ3JhZGUuY29kZX1dOiAke19jaGVja0Rvd25ncmFkZS5lcnJvck1lc3NhZ2V9YClcbiAgfVxuXG4gIHRoaXMudm0gPSBuZXcgVm0oY2xlYW5OYW1lLCBudWxsLCB7IF9hcHA6IHRoaXMgfSwgbnVsbCwgZGF0YSlcbn1cblxuLyoqXG4gKiBAZGVwcmVjYXRlZFxuICovXG5leHBvcnQgZnVuY3Rpb24gcmVnaXN0ZXIgKHR5cGUsIG9wdGlvbnMpIHtcbiAgXy53YXJuKCdSZWdpc3RlciBpcyBkZXByZWNhdGVkLCBwbGVhc2UgaW5zdGFsbCBsYXN0ZXN0IHRyYW5zZm9ybWVyLicpXG4gIHRoaXMucmVnaXN0ZXJDb21wb25lbnQodHlwZSwgb3B0aW9ucylcbn1cblxuLyoqXG4gKiBAZGVwcmVjYXRlZFxuICovXG5leHBvcnQgZnVuY3Rpb24gcmVuZGVyICh0eXBlLCBkYXRhKSB7XG4gIF8ud2FybignUmVuZGVyIGlzIGRlcHJlY2F0ZWQsIHBsZWFzZSBpbnN0YWxsIGxhc3Rlc3QgdHJhbnNmb3JtZXIuJylcbiAgcmV0dXJuIHRoaXMuYm9vdHN0cmFwKHR5cGUsIHt9LCBkYXRhKVxufVxuXG4vKipcbiAqIEBkZXByZWNhdGVkXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiByZXF1aXJlICh0eXBlKSB7XG4gIF8ud2FybignUmVxdWlyZSBpcyBkZXByZWNhdGVkLCBwbGVhc2UgaW5zdGFsbCBsYXN0ZXN0IHRyYW5zZm9ybWVyLicpXG4gIHJldHVybiAoZGF0YSkgPT4ge1xuICAgIHJldHVybiB0aGlzLmJvb3RzdHJhcCh0eXBlLCB7fSwgZGF0YSlcbiAgfVxufVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L2FwcC9idW5kbGUuanNcbiAqKi8iLCJleHBvcnRzID0gbW9kdWxlLmV4cG9ydHMgPSBTZW1WZXI7XG5cbi8vIFRoZSBkZWJ1ZyBmdW5jdGlvbiBpcyBleGNsdWRlZCBlbnRpcmVseSBmcm9tIHRoZSBtaW5pZmllZCB2ZXJzaW9uLlxuLyogbm9taW4gKi8gdmFyIGRlYnVnO1xuLyogbm9taW4gKi8gaWYgKHR5cGVvZiBwcm9jZXNzID09PSAnb2JqZWN0JyAmJlxuICAgIC8qIG5vbWluICovIHByb2Nlc3MuZW52ICYmXG4gICAgLyogbm9taW4gKi8gcHJvY2Vzcy5lbnYuTk9ERV9ERUJVRyAmJlxuICAgIC8qIG5vbWluICovIC9cXGJzZW12ZXJcXGIvaS50ZXN0KHByb2Nlc3MuZW52Lk5PREVfREVCVUcpKVxuICAvKiBub21pbiAqLyBkZWJ1ZyA9IGZ1bmN0aW9uKCkge1xuICAgIC8qIG5vbWluICovIHZhciBhcmdzID0gQXJyYXkucHJvdG90eXBlLnNsaWNlLmNhbGwoYXJndW1lbnRzLCAwKTtcbiAgICAvKiBub21pbiAqLyBhcmdzLnVuc2hpZnQoJ1NFTVZFUicpO1xuICAgIC8qIG5vbWluICovIGNvbnNvbGUubG9nLmFwcGx5KGNvbnNvbGUsIGFyZ3MpO1xuICAgIC8qIG5vbWluICovIH07XG4vKiBub21pbiAqLyBlbHNlXG4gIC8qIG5vbWluICovIGRlYnVnID0gZnVuY3Rpb24oKSB7fTtcblxuLy8gTm90ZTogdGhpcyBpcyB0aGUgc2VtdmVyLm9yZyB2ZXJzaW9uIG9mIHRoZSBzcGVjIHRoYXQgaXQgaW1wbGVtZW50c1xuLy8gTm90IG5lY2Vzc2FyaWx5IHRoZSBwYWNrYWdlIHZlcnNpb24gb2YgdGhpcyBjb2RlLlxuZXhwb3J0cy5TRU1WRVJfU1BFQ19WRVJTSU9OID0gJzIuMC4wJztcblxudmFyIE1BWF9MRU5HVEggPSAyNTY7XG52YXIgTUFYX1NBRkVfSU5URUdFUiA9IE51bWJlci5NQVhfU0FGRV9JTlRFR0VSIHx8IDkwMDcxOTkyNTQ3NDA5OTE7XG5cbi8vIFRoZSBhY3R1YWwgcmVnZXhwcyBnbyBvbiBleHBvcnRzLnJlXG52YXIgcmUgPSBleHBvcnRzLnJlID0gW107XG52YXIgc3JjID0gZXhwb3J0cy5zcmMgPSBbXTtcbnZhciBSID0gMDtcblxuLy8gVGhlIGZvbGxvd2luZyBSZWd1bGFyIEV4cHJlc3Npb25zIGNhbiBiZSB1c2VkIGZvciB0b2tlbml6aW5nLFxuLy8gdmFsaWRhdGluZywgYW5kIHBhcnNpbmcgU2VtVmVyIHZlcnNpb24gc3RyaW5ncy5cblxuLy8gIyMgTnVtZXJpYyBJZGVudGlmaWVyXG4vLyBBIHNpbmdsZSBgMGAsIG9yIGEgbm9uLXplcm8gZGlnaXQgZm9sbG93ZWQgYnkgemVybyBvciBtb3JlIGRpZ2l0cy5cblxudmFyIE5VTUVSSUNJREVOVElGSUVSID0gUisrO1xuc3JjW05VTUVSSUNJREVOVElGSUVSXSA9ICcwfFsxLTldXFxcXGQqJztcbnZhciBOVU1FUklDSURFTlRJRklFUkxPT1NFID0gUisrO1xuc3JjW05VTUVSSUNJREVOVElGSUVSTE9PU0VdID0gJ1swLTldKyc7XG5cblxuLy8gIyMgTm9uLW51bWVyaWMgSWRlbnRpZmllclxuLy8gWmVybyBvciBtb3JlIGRpZ2l0cywgZm9sbG93ZWQgYnkgYSBsZXR0ZXIgb3IgaHlwaGVuLCBhbmQgdGhlbiB6ZXJvIG9yXG4vLyBtb3JlIGxldHRlcnMsIGRpZ2l0cywgb3IgaHlwaGVucy5cblxudmFyIE5PTk5VTUVSSUNJREVOVElGSUVSID0gUisrO1xuc3JjW05PTk5VTUVSSUNJREVOVElGSUVSXSA9ICdcXFxcZCpbYS16QS1aLV1bYS16QS1aMC05LV0qJztcblxuXG4vLyAjIyBNYWluIFZlcnNpb25cbi8vIFRocmVlIGRvdC1zZXBhcmF0ZWQgbnVtZXJpYyBpZGVudGlmaWVycy5cblxudmFyIE1BSU5WRVJTSU9OID0gUisrO1xuc3JjW01BSU5WRVJTSU9OXSA9ICcoJyArIHNyY1tOVU1FUklDSURFTlRJRklFUl0gKyAnKVxcXFwuJyArXG4gICAgICAgICAgICAgICAgICAgJygnICsgc3JjW05VTUVSSUNJREVOVElGSUVSXSArICcpXFxcXC4nICtcbiAgICAgICAgICAgICAgICAgICAnKCcgKyBzcmNbTlVNRVJJQ0lERU5USUZJRVJdICsgJyknO1xuXG52YXIgTUFJTlZFUlNJT05MT09TRSA9IFIrKztcbnNyY1tNQUlOVkVSU0lPTkxPT1NFXSA9ICcoJyArIHNyY1tOVU1FUklDSURFTlRJRklFUkxPT1NFXSArICcpXFxcXC4nICtcbiAgICAgICAgICAgICAgICAgICAgICAgICcoJyArIHNyY1tOVU1FUklDSURFTlRJRklFUkxPT1NFXSArICcpXFxcXC4nICtcbiAgICAgICAgICAgICAgICAgICAgICAgICcoJyArIHNyY1tOVU1FUklDSURFTlRJRklFUkxPT1NFXSArICcpJztcblxuLy8gIyMgUHJlLXJlbGVhc2UgVmVyc2lvbiBJZGVudGlmaWVyXG4vLyBBIG51bWVyaWMgaWRlbnRpZmllciwgb3IgYSBub24tbnVtZXJpYyBpZGVudGlmaWVyLlxuXG52YXIgUFJFUkVMRUFTRUlERU5USUZJRVIgPSBSKys7XG5zcmNbUFJFUkVMRUFTRUlERU5USUZJRVJdID0gJyg / OicgKyBzcmNbTlVNRVJJQ0lERU5USUZJRVJdICtcbiAgICAgICAgICAgICAgICAgICAgICAgICAgICAnfCcgKyBzcmNbTk9OTlVNRVJJQ0lERU5USUZJRVJdICsgJyknO1xuXG52YXIgUFJFUkVMRUFTRUlERU5USUZJRVJMT09TRSA9IFIrKztcbnNyY1tQUkVSRUxFQVNFSURFTlRJRklFUkxPT1NFXSA9ICcoPzonICsgc3JjW05VTUVSSUNJREVOVElGSUVSTE9PU0VdICtcbiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICd8JyArIHNyY1tOT05OVU1FUklDSURFTlRJRklFUl0gKyAnKSc7XG5cblxuLy8gIyMgUHJlLXJlbGVhc2UgVmVyc2lvblxuLy8gSHlwaGVuLCBmb2xsb3dlZCBieSBvbmUgb3IgbW9yZSBkb3Qtc2VwYXJhdGVkIHByZS1yZWxlYXNlIHZlcnNpb25cbi8vIGlkZW50aWZpZXJzLlxuXG52YXIgUFJFUkVMRUFTRSA9IFIrKztcbnNyY1tQUkVSRUxFQVNFXSA9ICcoPzotKCcgKyBzcmNbUFJFUkVMRUFTRUlERU5USUZJRVJdICtcbiAgICAgICAgICAgICAgICAgICcoPzpcXFxcLicgKyBzcmNbUFJFUkVMRUFTRUlERU5USUZJRVJdICsgJykqKSknO1xuXG52YXIgUFJFUkVMRUFTRUxPT1NFID0gUisrO1xuc3JjW1BSRVJFTEVBU0VMT09TRV0gPSAnKD86LT8oJyArIHNyY1tQUkVSRUxFQVNFSURFTlRJRklFUkxPT1NFXSArXG4gICAgICAgICAgICAgICAgICAgICAgICcoPzpcXFxcLicgKyBzcmNbUFJFUkVMRUFTRUlERU5USUZJRVJMT09TRV0gKyAnKSopKSc7XG5cbi8vICMjIEJ1aWxkIE1ldGFkYXRhIElkZW50aWZpZXJcbi8vIEFueSBjb21iaW5hdGlvbiBvZiBkaWdpdHMsIGxldHRlcnMsIG9yIGh5cGhlbnMuXG5cbnZhciBCVUlMRElERU5USUZJRVIgPSBSKys7XG5zcmNbQlVJTERJREVOVElGSUVSXSA9ICdbMC05QS1aYS16LV0rJztcblxuLy8gIyMgQnVpbGQgTWV0YWRhdGFcbi8vIFBsdXMgc2lnbiwgZm9sbG93ZWQgYnkgb25lIG9yIG1vcmUgcGVyaW9kLXNlcGFyYXRlZCBidWlsZCBtZXRhZGF0YVxuLy8gaWRlbnRpZmllcnMuXG5cbnZhciBCVUlMRCA9IFIrKztcbnNyY1tCVUlMRF0gPSAnKD86XFxcXCsoJyArIHNyY1tCVUlMRElERU5USUZJRVJdICtcbiAgICAgICAgICAgICAnKD86XFxcXC4nICsgc3JjW0JVSUxESURFTlRJRklFUl0gKyAnKSopKSc7XG5cblxuLy8gIyMgRnVsbCBWZXJzaW9uIFN0cmluZ1xuLy8gQSBtYWluIHZlcnNpb24sIGZvbGxvd2VkIG9wdGlvbmFsbHkgYnkgYSBwcmUtcmVsZWFzZSB2ZXJzaW9uIGFuZFxuLy8gYnVpbGQgbWV0YWRhdGEuXG5cbi8vIE5vdGUgdGhhdCB0aGUgb25seSBtYWpvciwgbWlub3IsIHBhdGNoLCBhbmQgcHJlLXJlbGVhc2Ugc2VjdGlvbnMgb2Zcbi8vIHRoZSB2ZXJzaW9uIHN0cmluZyBhcmUgY2FwdHVyaW5nIGdyb3Vwcy4gIFRoZSBidWlsZCBtZXRhZGF0YSBpcyBub3QgYVxuLy8gY2FwdHVyaW5nIGdyb3VwLCBiZWNhdXNlIGl0IHNob3VsZCBub3QgZXZlciBiZSB1c2VkIGluIHZlcnNpb25cbi8vIGNvbXBhcmlzb24uXG5cbnZhciBGVUxMID0gUisrO1xudmFyIEZVTExQTEFJTiA9ICd2PycgKyBzcmNbTUFJTlZFUlNJT05dICtcbiAgICAgICAgICAgICAgICBzcmNbUFJFUkVMRUFTRV0gKyAnPycgK1xuICAgICAgICAgICAgICAgIHNyY1tCVUlMRF0gKyAnPyc7XG5cbnNyY1tGVUxMXSA9ICdeJyArIEZVTExQTEFJTiArICckJztcblxuLy8gbGlrZSBmdWxsLCBidXQgYWxsb3dzIHYxLjIuMyBhbmQgPTEuMi4zLCB3aGljaCBwZW9wbGUgZG8gc29tZXRpbWVzLlxuLy8gYWxzbywgMS4wLjBhbHBoYTEgKHByZXJlbGVhc2Ugd2l0aG91dCB0aGUgaHlwaGVuKSB3aGljaCBpcyBwcmV0dHlcbi8vIGNvbW1vbiBpbiB0aGUgbnBtIHJlZ2lzdHJ5LlxudmFyIExPT1NFUExBSU4gPSAnW3Y9XFxcXHNdKicgKyBzcmNbTUFJTlZFUlNJT05MT09TRV0gK1xuICAgICAgICAgICAgICAgICBzcmNbUFJFUkVMRUFTRUxPT1NFXSArICc / JyArXG4gICAgICAgICAgICAgICAgIHNyY1tCVUlMRF0gKyAnPyc7XG5cbnZhciBMT09TRSA9IFIrKztcbnNyY1tMT09TRV0gPSAnXicgKyBMT09TRVBMQUlOICsgJyQnO1xuXG52YXIgR1RMVCA9IFIrKztcbnNyY1tHVExUXSA9ICcoKD86PHw + KT89PyknO1xuXG4vLyBTb21ldGhpbmcgbGlrZSBcIjIuKlwiIG9yIFwiMS4yLnhcIi5cbi8vIE5vdGUgdGhhdCBcIngueFwiIGlzIGEgdmFsaWQgeFJhbmdlIGlkZW50aWZlciwgbWVhbmluZyBcImFueSB2ZXJzaW9uXCJcbi8vIE9ubHkgdGhlIGZpcnN0IGl0ZW0gaXMgc3RyaWN0bHkgcmVxdWlyZWQuXG52YXIgWFJBTkdFSURFTlRJRklFUkxPT1NFID0gUisrO1xuc3JjW1hSQU5HRUlERU5USUZJRVJMT09TRV0gPSBzcmNbTlVNRVJJQ0lERU5USUZJRVJMT09TRV0gKyAnfHh8WHxcXFxcKic7XG52YXIgWFJBTkdFSURFTlRJRklFUiA9IFIrKztcbnNyY1tYUkFOR0VJREVOVElGSUVSXSA9IHNyY1tOVU1FUklDSURFTlRJRklFUl0gKyAnfHh8WHxcXFxcKic7XG5cbnZhciBYUkFOR0VQTEFJTiA9IFIrKztcbnNyY1tYUkFOR0VQTEFJTl0gPSAnW3Y9XFxcXHNdKignICsgc3JjW1hSQU5HRUlERU5USUZJRVJdICsgJyknICtcbiAgICAgICAgICAgICAgICAgICAnKD86XFxcXC4oJyArIHNyY1tYUkFOR0VJREVOVElGSUVSXSArICcpJyArXG4gICAgICAgICAgICAgICAgICAgJyg / OlxcXFwuKCcgKyBzcmNbWFJBTkdFSURFTlRJRklFUl0gKyAnKScgK1xuICAgICAgICAgICAgICAgICAgICcoPzonICsgc3JjW1BSRVJFTEVBU0VdICsgJyk / JyArXG4gICAgICAgICAgICAgICAgICAgc3JjW0JVSUxEXSArICc / JyArXG4gICAgICAgICAgICAgICAgICAgJyk / KT8nO1xuXG52YXIgWFJBTkdFUExBSU5MT09TRSA9IFIrKztcbnNyY1tYUkFOR0VQTEFJTkxPT1NFXSA9ICdbdj1cXFxcc10qKCcgKyBzcmNbWFJBTkdFSURFTlRJRklFUkxPT1NFXSArICcpJyArXG4gICAgICAgICAgICAgICAgICAgICAgICAnKD86XFxcXC4oJyArIHNyY1tYUkFOR0VJREVOVElGSUVSTE9PU0VdICsgJyknICtcbiAgICAgICAgICAgICAgICAgICAgICAgICcoPzpcXFxcLignICsgc3JjW1hSQU5HRUlERU5USUZJRVJMT09TRV0gKyAnKScgK1xuICAgICAgICAgICAgICAgICAgICAgICAgJyg / OicgKyBzcmNbUFJFUkVMRUFTRUxPT1NFXSArICcpPycgK1xuICAgICAgICAgICAgICAgICAgICAgICAgc3JjW0JVSUxEXSArICc / JyArXG4gICAgICAgICAgICAgICAgICAgICAgICAnKT8pPyc7XG5cbnZhciBYUkFOR0UgPSBSKys7XG5zcmNbWFJBTkdFXSA9ICdeJyArIHNyY1tHVExUXSArICdcXFxccyonICsgc3JjW1hSQU5HRVBMQUlOXSArICckJztcbnZhciBYUkFOR0VMT09TRSA9IFIrKztcbnNyY1tYUkFOR0VMT09TRV0gPSAnXicgKyBzcmNbR1RMVF0gKyAnXFxcXHMqJyArIHNyY1tYUkFOR0VQTEFJTkxPT1NFXSArICckJztcblxuLy8gVGlsZGUgcmFuZ2VzLlxuLy8gTWVhbmluZyBpcyBcInJlYXNvbmFibHkgYXQgb3IgZ3JlYXRlciB0aGFuXCJcbnZhciBMT05FVElMREUgPSBSKys7XG5zcmNbTE9ORVRJTERFXSA9ICcoPzp + Pj8pJztcblxudmFyIFRJTERFVFJJTSA9IFIrKztcbnNyY1tUSUxERVRSSU1dID0gJyhcXFxccyopJyArIHNyY1tMT05FVElMREVdICsgJ1xcXFxzKyc7XG5yZVtUSUxERVRSSU1dID0gbmV3IFJlZ0V4cChzcmNbVElMREVUUklNXSwgJ2cnKTtcbnZhciB0aWxkZVRyaW1SZXBsYWNlID0gJyQxfic7XG5cbnZhciBUSUxERSA9IFIrKztcbnNyY1tUSUxERV0gPSAnXicgKyBzcmNbTE9ORVRJTERFXSArIHNyY1tYUkFOR0VQTEFJTl0gKyAnJCc7XG52YXIgVElMREVMT09TRSA9IFIrKztcbnNyY1tUSUxERUxPT1NFXSA9ICdeJyArIHNyY1tMT05FVElMREVdICsgc3JjW1hSQU5HRVBMQUlOTE9PU0VdICsgJyQnO1xuXG4vLyBDYXJldCByYW5nZXMuXG4vLyBNZWFuaW5nIGlzIFwiYXQgbGVhc3QgYW5kIGJhY2t3YXJkcyBjb21wYXRpYmxlIHdpdGhcIlxudmFyIExPTkVDQVJFVCA9IFIrKztcbnNyY1tMT05FQ0FSRVRdID0gJyg / OlxcXFxeKSc7XG5cbnZhciBDQVJFVFRSSU0gPSBSKys7XG5zcmNbQ0FSRVRUUklNXSA9ICcoXFxcXHMqKScgKyBzcmNbTE9ORUNBUkVUXSArICdcXFxccysnO1xucmVbQ0FSRVRUUklNXSA9IG5ldyBSZWdFeHAoc3JjW0NBUkVUVFJJTV0sICdnJyk7XG52YXIgY2FyZXRUcmltUmVwbGFjZSA9ICckMV4nO1xuXG52YXIgQ0FSRVQgPSBSKys7XG5zcmNbQ0FSRVRdID0gJ14nICsgc3JjW0xPTkVDQVJFVF0gKyBzcmNbWFJBTkdFUExBSU5dICsgJyQnO1xudmFyIENBUkVUTE9PU0UgPSBSKys7XG5zcmNbQ0FSRVRMT09TRV0gPSAnXicgKyBzcmNbTE9ORUNBUkVUXSArIHNyY1tYUkFOR0VQTEFJTkxPT1NFXSArICckJztcblxuLy8gQSBzaW1wbGUgZ3QvbHQvZXEgdGhpbmcsIG9yIGp1c3QgXCJcIiB0byBpbmRpY2F0ZSBcImFueSB2ZXJzaW9uXCJcbnZhciBDT01QQVJBVE9STE9PU0UgPSBSKys7XG5zcmNbQ09NUEFSQVRPUkxPT1NFXSA9ICdeJyArIHNyY1tHVExUXSArICdcXFxccyooJyArIExPT1NFUExBSU4gKyAnKSR8XiQnO1xudmFyIENPTVBBUkFUT1IgPSBSKys7XG5zcmNbQ09NUEFSQVRPUl0gPSAnXicgKyBzcmNbR1RMVF0gKyAnXFxcXHMqKCcgKyBGVUxMUExBSU4gKyAnKSR8XiQnO1xuXG5cbi8vIEFuIGV4cHJlc3Npb24gdG8gc3RyaXAgYW55IHdoaXRlc3BhY2UgYmV0d2VlbiB0aGUgZ3RsdCBhbmQgdGhlIHRoaW5nXG4vLyBpdCBtb2RpZmllcywgc28gdGhhdCBgPiAxLjIuM2AgPT0 + IGA + MS4yLjNgXG52YXIgQ09NUEFSQVRPUlRSSU0gPSBSKys7XG5zcmNbQ09NUEFSQVRPUlRSSU1dID0gJyhcXFxccyopJyArIHNyY1tHVExUXSArXG4gICAgICAgICAgICAgICAgICAgICAgJ1xcXFxzKignICsgTE9PU0VQTEFJTiArICd8JyArIHNyY1tYUkFOR0VQTEFJTl0gKyAnKSc7XG5cbi8vIHRoaXMgb25lIGhhcyB0byB1c2UgdGhlIC9nIGZsYWdcbnJlW0NPTVBBUkFUT1JUUklNXSA9IG5ldyBSZWdFeHAoc3JjW0NPTVBBUkFUT1JUUklNXSwgJ2cnKTtcbnZhciBjb21wYXJhdG9yVHJpbVJlcGxhY2UgPSAnJDEkMiQzJztcblxuXG4vLyBTb21ldGhpbmcgbGlrZSBgMS4yLjMgLSAxLjIuNGBcbi8vIE5vdGUgdGhhdCB0aGVzZSBhbGwgdXNlIHRoZSBsb29zZSBmb3JtLCBiZWNhdXNlIHRoZXknbGwgYmVcbi8vIGNoZWNrZWQgYWdhaW5zdCBlaXRoZXIgdGhlIHN0cmljdCBvciBsb29zZSBjb21wYXJhdG9yIGZvcm1cbi8vIGxhdGVyLlxudmFyIEhZUEhFTlJBTkdFID0gUisrO1xuc3JjW0hZUEhFTlJBTkdFXSA9ICdeXFxcXHMqKCcgKyBzcmNbWFJBTkdFUExBSU5dICsgJyknICtcbiAgICAgICAgICAgICAgICAgICAnXFxcXHMrLVxcXFxzKycgK1xuICAgICAgICAgICAgICAgICAgICcoJyArIHNyY1tYUkFOR0VQTEFJTl0gKyAnKScgK1xuICAgICAgICAgICAgICAgICAgICdcXFxccyokJztcblxudmFyIEhZUEhFTlJBTkdFTE9PU0UgPSBSKys7XG5zcmNbSFlQSEVOUkFOR0VMT09TRV0gPSAnXlxcXFxzKignICsgc3JjW1hSQU5HRVBMQUlOTE9PU0VdICsgJyknICtcbiAgICAgICAgICAgICAgICAgICAgICAgICdcXFxccystXFxcXHMrJyArXG4gICAgICAgICAgICAgICAgICAgICAgICAnKCcgKyBzcmNbWFJBTkdFUExBSU5MT09TRV0gKyAnKScgK1xuICAgICAgICAgICAgICAgICAgICAgICAgJ1xcXFxzKiQnO1xuXG4vLyBTdGFyIHJhbmdlcyBiYXNpY2FsbHkganVzdCBhbGxvdyBhbnl0aGluZyBhdCBhbGwuXG52YXIgU1RBUiA9IFIrKztcbnNyY1tTVEFSXSA9ICcoPHw + KT89P1xcXFxzKlxcXFwqJztcblxuLy8gQ29tcGlsZSB0byBhY3R1YWwgcmVnZXhwIG9iamVjdHMuXG4vLyBBbGwgYXJlIGZsYWctZnJlZSwgdW5sZXNzIHRoZXkgd2VyZSBjcmVhdGVkIGFib3ZlIHdpdGggYSBmbGFnLlxuZm9yICh2YXIgaSA9IDA7IGkgPCBSOyBpKyspIHtcbiAgZGVidWcoaSwgc3JjW2ldKTtcbiAgaWYgKCFyZVtpXSlcbiAgICByZVtpXSA9IG5ldyBSZWdFeHAoc3JjW2ldKTtcbn1cblxuZXhwb3J0cy5wYXJzZSA9IHBhcnNlO1xuZnVuY3Rpb24gcGFyc2UodmVyc2lvbiwgbG9vc2UpIHtcbiAgaWYgKHZlcnNpb24gaW5zdGFuY2VvZiBTZW1WZXIpXG4gICAgcmV0dXJuIHZlcnNpb247XG5cbiAgaWYgKHR5cGVvZiB2ZXJzaW9uICE9PSAnc3RyaW5nJylcbiAgICByZXR1cm4gbnVsbDtcblxuICBpZiAodmVyc2lvbi5sZW5ndGggPiBNQVhfTEVOR1RIKVxuICAgIHJldHVybiBudWxsO1xuXG4gIHZhciByID0gbG9vc2UgPyByZVtMT09TRV0gOiByZVtGVUxMXTtcbiAgaWYgKCFyLnRlc3QodmVyc2lvbikpXG4gICAgcmV0dXJuIG51bGw7XG5cbiAgdHJ5IHtcbiAgICByZXR1cm4gbmV3IFNlbVZlcih2ZXJzaW9uLCBsb29zZSk7XG4gIH0gY2F0Y2ggKGVyKSB7XG4gICAgcmV0dXJuIG51bGw7XG4gIH1cbn1cblxuZXhwb3J0cy52YWxpZCA9IHZhbGlkO1xuZnVuY3Rpb24gdmFsaWQodmVyc2lvbiwgbG9vc2UpIHtcbiAgdmFyIHYgPSBwYXJzZSh2ZXJzaW9uLCBsb29zZSk7XG4gIHJldHVybiB2ID8gdi52ZXJzaW9uIDogbnVsbDtcbn1cblxuXG5leHBvcnRzLmNsZWFuID0gY2xlYW47XG5mdW5jdGlvbiBjbGVhbih2ZXJzaW9uLCBsb29zZSkge1xuICB2YXIgcyA9IHBhcnNlKHZlcnNpb24udHJpbSgpLnJlcGxhY2UoL15bPXZdKy8sICcnKSwgbG9vc2UpO1xuICByZXR1cm4gcyA / IHMudmVyc2lvbiA6IG51bGw7XG59XG5cbmV4cG9ydHMuU2VtVmVyID0gU2VtVmVyO1xuXG5mdW5jdGlvbiBTZW1WZXIodmVyc2lvbiwgbG9vc2UpIHtcbiAgaWYgKHZlcnNpb24gaW5zdGFuY2VvZiBTZW1WZXIpIHtcbiAgICBpZiAodmVyc2lvbi5sb29zZSA9PT0gbG9vc2UpXG4gICAgICByZXR1cm4gdmVyc2lvbjtcbiAgICBlbHNlXG4gICAgICB2ZXJzaW9uID0gdmVyc2lvbi52ZXJzaW9uO1xuICB9IGVsc2UgaWYgKHR5cGVvZiB2ZXJzaW9uICE9PSAnc3RyaW5nJykge1xuICAgIHRocm93IG5ldyBUeXBlRXJyb3IoJ0ludmFsaWQgVmVyc2lvbjogJyArIHZlcnNpb24pO1xuICB9XG5cbiAgaWYgKHZlcnNpb24ubGVuZ3RoID4gTUFYX0xFTkdUSClcbiAgICB0aHJvdyBuZXcgVHlwZUVycm9yKCd2ZXJzaW9uIGlzIGxvbmdlciB0aGFuICcgKyBNQVhfTEVOR1RIICsgJyBjaGFyYWN0ZXJzJylcblxuICBpZiAoISh0aGlzIGluc3RhbmNlb2YgU2VtVmVyKSlcbiAgICByZXR1cm4gbmV3IFNlbVZlcih2ZXJzaW9uLCBsb29zZSk7XG5cbiAgZGVidWcoJ1NlbVZlcicsIHZlcnNpb24sIGxvb3NlKTtcbiAgdGhpcy5sb29zZSA9IGxvb3NlO1xuICB2YXIgbSA9IHZlcnNpb24udHJpbSgpLm1hdGNoKGxvb3NlID8gcmVbTE9PU0VdIDogcmVbRlVMTF0pO1xuXG4gIGlmICghbSlcbiAgICB0aHJvdyBuZXcgVHlwZUVycm9yKCdJbnZhbGlkIFZlcnNpb246ICcgKyB2ZXJzaW9uKTtcblxuICB0aGlzLnJhdyA9IHZlcnNpb247XG5cbiAgLy8gdGhlc2UgYXJlIGFjdHVhbGx5IG51bWJlcnNcbiAgdGhpcy5tYWpvciA9ICttWzFdO1xuICB0aGlzLm1pbm9yID0gK21bMl07XG4gIHRoaXMucGF0Y2ggPSArbVszXTtcblxuICBpZiAodGhpcy5tYWpvciA + IE1BWF9TQUZFX0lOVEVHRVIgfHwgdGhpcy5tYWpvciA8IDApXG4gICAgdGhyb3cgbmV3IFR5cGVFcnJvcignSW52YWxpZCBtYWpvciB2ZXJzaW9uJylcblxuICBpZiAodGhpcy5taW5vciA + IE1BWF9TQUZFX0lOVEVHRVIgfHwgdGhpcy5taW5vciA8IDApXG4gICAgdGhyb3cgbmV3IFR5cGVFcnJvcignSW52YWxpZCBtaW5vciB2ZXJzaW9uJylcblxuICBpZiAodGhpcy5wYXRjaCA + IE1BWF9TQUZFX0lOVEVHRVIgfHwgdGhpcy5wYXRjaCA8IDApXG4gICAgdGhyb3cgbmV3IFR5cGVFcnJvcignSW52YWxpZCBwYXRjaCB2ZXJzaW9uJylcblxuICAvLyBudW1iZXJpZnkgYW55IHByZXJlbGVhc2UgbnVtZXJpYyBpZHNcbiAgaWYgKCFtWzRdKVxuICAgIHRoaXMucHJlcmVsZWFzZSA9IFtdO1xuICBlbHNlXG4gICAgdGhpcy5wcmVyZWxlYXNlID0gbVs0XS5zcGxpdCgnLicpLm1hcChmdW5jdGlvbihpZCkge1xuICAgICAgaWYgKC9eWzAtOV0rJC8udGVzdChpZCkpIHtcbiAgICAgICAgdmFyIG51bSA9ICtpZFxuICAgICAgICBpZiAobnVtID49IDAgJiYgbnVtIDwgTUFYX1NBRkVfSU5URUdFUilcbiAgICAgICAgICByZXR1cm4gbnVtXG4gICAgICB9XG4gICAgICByZXR1cm4gaWQ7XG4gICAgfSk7XG5cbiAgdGhpcy5idWlsZCA9IG1bNV0gPyBtWzVdLnNwbGl0KCcuJykgOiBbXTtcbiAgdGhpcy5mb3JtYXQoKTtcbn1cblxuU2VtVmVyLnByb3RvdHlwZS5mb3JtYXQgPSBmdW5jdGlvbigpIHtcbiAgdGhpcy52ZXJzaW9uID0gdGhpcy5tYWpvciArICcuJyArIHRoaXMubWlub3IgKyAnLicgKyB0aGlzLnBhdGNoO1xuICBpZiAodGhpcy5wcmVyZWxlYXNlLmxlbmd0aClcbiAgICB0aGlzLnZlcnNpb24gKz0gJy0nICsgdGhpcy5wcmVyZWxlYXNlLmpvaW4oJy4nKTtcbiAgcmV0dXJuIHRoaXMudmVyc2lvbjtcbn07XG5cblNlbVZlci5wcm90b3R5cGUudG9TdHJpbmcgPSBmdW5jdGlvbigpIHtcbiAgcmV0dXJuIHRoaXMudmVyc2lvbjtcbn07XG5cblNlbVZlci5wcm90b3R5cGUuY29tcGFyZSA9IGZ1bmN0aW9uKG90aGVyKSB7XG4gIGRlYnVnKCdTZW1WZXIuY29tcGFyZScsIHRoaXMudmVyc2lvbiwgdGhpcy5sb29zZSwgb3RoZXIpO1xuICBpZiAoIShvdGhlciBpbnN0YW5jZW9mIFNlbVZlcikpXG4gICAgb3RoZXIgPSBuZXcgU2VtVmVyKG90aGVyLCB0aGlzLmxvb3NlKTtcblxuICByZXR1cm4gdGhpcy5jb21wYXJlTWFpbihvdGhlcikgfHwgdGhpcy5jb21wYXJlUHJlKG90aGVyKTtcbn07XG5cblNlbVZlci5wcm90b3R5cGUuY29tcGFyZU1haW4gPSBmdW5jdGlvbihvdGhlcikge1xuICBpZiAoIShvdGhlciBpbnN0YW5jZW9mIFNlbVZlcikpXG4gICAgb3RoZXIgPSBuZXcgU2VtVmVyKG90aGVyLCB0aGlzLmxvb3NlKTtcblxuICByZXR1cm4gY29tcGFyZUlkZW50aWZpZXJzKHRoaXMubWFqb3IsIG90aGVyLm1ham9yKSB8fFxuICAgICAgICAgY29tcGFyZUlkZW50aWZpZXJzKHRoaXMubWlub3IsIG90aGVyLm1pbm9yKSB8fFxuICAgICAgICAgY29tcGFyZUlkZW50aWZpZXJzKHRoaXMucGF0Y2gsIG90aGVyLnBhdGNoKTtcbn07XG5cblNlbVZlci5wcm90b3R5cGUuY29tcGFyZVByZSA9IGZ1bmN0aW9uKG90aGVyKSB7XG4gIGlmICghKG90aGVyIGluc3RhbmNlb2YgU2VtVmVyKSlcbiAgICBvdGhlciA9IG5ldyBTZW1WZXIob3RoZXIsIHRoaXMubG9vc2UpO1xuXG4gIC8vIE5PVCBoYXZpbmcgYSBwcmVyZWxlYXNlIGlzID4gaGF2aW5nIG9uZVxuICBpZiAodGhpcy5wcmVyZWxlYXNlLmxlbmd0aCAmJiAhb3RoZXIucHJlcmVsZWFzZS5sZW5ndGgpXG4gICAgcmV0dXJuIC0xO1xuICBlbHNlIGlmICghdGhpcy5wcmVyZWxlYXNlLmxlbmd0aCAmJiBvdGhlci5wcmVyZWxlYXNlLmxlbmd0aClcbiAgICByZXR1cm4gMTtcbiAgZWxzZSBpZiAoIXRoaXMucHJlcmVsZWFzZS5sZW5ndGggJiYgIW90aGVyLnByZXJlbGVhc2UubGVuZ3RoKVxuICAgIHJldHVybiAwO1xuXG4gIHZhciBpID0gMDtcbiAgZG8ge1xuICAgIHZhciBhID0gdGhpcy5wcmVyZWxlYXNlW2ldO1xuICAgIHZhciBiID0gb3RoZXIucHJlcmVsZWFzZVtpXTtcbiAgICBkZWJ1ZygncHJlcmVsZWFzZSBjb21wYXJlJywgaSwgYSwgYik7XG4gICAgaWYgKGEgPT09IHVuZGVmaW5lZCAmJiBiID09PSB1bmRlZmluZWQpXG4gICAgICByZXR1cm4gMDtcbiAgICBlbHNlIGlmIChiID09PSB1bmRlZmluZWQpXG4gICAgICByZXR1cm4gMTtcbiAgICBlbHNlIGlmIChhID09PSB1bmRlZmluZWQpXG4gICAgICByZXR1cm4gLTE7XG4gICAgZWxzZSBpZiAoYSA9PT0gYilcbiAgICAgIGNvbnRpbnVlO1xuICAgIGVsc2VcbiAgICAgIHJldHVybiBjb21wYXJlSWRlbnRpZmllcnMoYSwgYik7XG4gIH0gd2hpbGUgKCsraSk7XG59O1xuXG4vLyBwcmVtaW5vciB3aWxsIGJ1bXAgdGhlIHZlcnNpb24gdXAgdG8gdGhlIG5leHQgbWlub3IgcmVsZWFzZSwgYW5kIGltbWVkaWF0ZWx5XG4vLyBkb3duIHRvIHByZS1yZWxlYXNlLiBwcmVtYWpvciBhbmQgcHJlcGF0Y2ggd29yayB0aGUgc2FtZSB3YXkuXG5TZW1WZXIucHJvdG90eXBlLmluYyA9IGZ1bmN0aW9uKHJlbGVhc2UsIGlkZW50aWZpZXIpIHtcbiAgc3dpdGNoIChyZWxlYXNlKSB7XG4gICAgY2FzZSAncHJlbWFqb3InOlxuICAgICAgdGhpcy5wcmVyZWxlYXNlLmxlbmd0aCA9IDA7XG4gICAgICB0aGlzLnBhdGNoID0gMDtcbiAgICAgIHRoaXMubWlub3IgPSAwO1xuICAgICAgdGhpcy5tYWpvcisrO1xuICAgICAgdGhpcy5pbmMoJ3ByZScsIGlkZW50aWZpZXIpO1xuICAgICAgYnJlYWs7XG4gICAgY2FzZSAncHJlbWlub3InOlxuICAgICAgdGhpcy5wcmVyZWxlYXNlLmxlbmd0aCA9IDA7XG4gICAgICB0aGlzLnBhdGNoID0gMDtcbiAgICAgIHRoaXMubWlub3IrKztcbiAgICAgIHRoaXMuaW5jKCdwcmUnLCBpZGVudGlmaWVyKTtcbiAgICAgIGJyZWFrO1xuICAgIGNhc2UgJ3ByZXBhdGNoJzpcbiAgICAgIC8vIElmIHRoaXMgaXMgYWxyZWFkeSBhIHByZXJlbGVhc2UsIGl0IHdpbGwgYnVtcCB0byB0aGUgbmV4dCB2ZXJzaW9uXG4gICAgICAvLyBkcm9wIGFueSBwcmVyZWxlYXNlcyB0aGF0IG1pZ2h0IGFscmVhZHkgZXhpc3QsIHNpbmNlIHRoZXkgYXJlIG5vdFxuICAgICAgLy8gcmVsZXZhbnQgYXQgdGhpcyBwb2ludC5cbiAgICAgIHRoaXMucHJlcmVsZWFzZS5sZW5ndGggPSAwO1xuICAgICAgdGhpcy5pbmMoJ3BhdGNoJywgaWRlbnRpZmllcik7XG4gICAgICB0aGlzLmluYygncHJlJywgaWRlbnRpZmllcik7XG4gICAgICBicmVhaztcbiAgICAvLyBJZiB0aGUgaW5wdXQgaXMgYSBub24tcHJlcmVsZWFzZSB2ZXJzaW9uLCB0aGlzIGFjdHMgdGhlIHNhbWUgYXNcbiAgICAvLyBwcmVwYXRjaC5cbiAgICBjYXNlICdwcmVyZWxlYXNlJzpcbiAgICAgIGlmICh0aGlzLnByZXJlbGVhc2UubGVuZ3RoID09PSAwKVxuICAgICAgICB0aGlzLmluYygncGF0Y2gnLCBpZGVudGlmaWVyKTtcbiAgICAgIHRoaXMuaW5jKCdwcmUnLCBpZGVudGlmaWVyKTtcbiAgICAgIGJyZWFrO1xuXG4gICAgY2FzZSAnbWFqb3InOlxuICAgICAgLy8gSWYgdGhpcyBpcyBhIHByZS1tYWpvciB2ZXJzaW9uLCBidW1wIHVwIHRvIHRoZSBzYW1lIG1ham9yIHZlcnNpb24uXG4gICAgICAvLyBPdGhlcndpc2UgaW5jcmVtZW50IG1ham9yLlxuICAgICAgLy8gMS4wLjAtNSBidW1wcyB0byAxLjAuMFxuICAgICAgLy8gMS4xLjAgYnVtcHMgdG8gMi4wLjBcbiAgICAgIGlmICh0aGlzLm1pbm9yICE9PSAwIHx8IHRoaXMucGF0Y2ggIT09IDAgfHwgdGhpcy5wcmVyZWxlYXNlLmxlbmd0aCA9PT0gMClcbiAgICAgICAgdGhpcy5tYWpvcisrO1xuICAgICAgdGhpcy5taW5vciA9IDA7XG4gICAgICB0aGlzLnBhdGNoID0gMDtcbiAgICAgIHRoaXMucHJlcmVsZWFzZSA9IFtdO1xuICAgICAgYnJlYWs7XG4gICAgY2FzZSAnbWlub3InOlxuICAgICAgLy8gSWYgdGhpcyBpcyBhIHByZS1taW5vciB2ZXJzaW9uLCBidW1wIHVwIHRvIHRoZSBzYW1lIG1pbm9yIHZlcnNpb24uXG4gICAgICAvLyBPdGhlcndpc2UgaW5jcmVtZW50IG1pbm9yLlxuICAgICAgLy8gMS4yLjAtNSBidW1wcyB0byAxLjIuMFxuICAgICAgLy8gMS4yLjEgYnVtcHMgdG8gMS4zLjBcbiAgICAgIGlmICh0aGlzLnBhdGNoICE9PSAwIHx8IHRoaXMucHJlcmVsZWFzZS5sZW5ndGggPT09IDApXG4gICAgICAgIHRoaXMubWlub3IrKztcbiAgICAgIHRoaXMucGF0Y2ggPSAwO1xuICAgICAgdGhpcy5wcmVyZWxlYXNlID0gW107XG4gICAgICBicmVhaztcbiAgICBjYXNlICdwYXRjaCc6XG4gICAgICAvLyBJZiB0aGlzIGlzIG5vdCBhIHByZS1yZWxlYXNlIHZlcnNpb24sIGl0IHdpbGwgaW5jcmVtZW50IHRoZSBwYXRjaC5cbiAgICAgIC8vIElmIGl0IGlzIGEgcHJlLXJlbGVhc2UgaXQgd2lsbCBidW1wIHVwIHRvIHRoZSBzYW1lIHBhdGNoIHZlcnNpb24uXG4gICAgICAvLyAxLjIuMC01IHBhdGNoZXMgdG8gMS4yLjBcbiAgICAgIC8vIDEuMi4wIHBhdGNoZXMgdG8gMS4yLjFcbiAgICAgIGlmICh0aGlzLnByZXJlbGVhc2UubGVuZ3RoID09PSAwKVxuICAgICAgICB0aGlzLnBhdGNoKys7XG4gICAgICB0aGlzLnByZXJlbGVhc2UgPSBbXTtcbiAgICAgIGJyZWFrO1xuICAgIC8vIFRoaXMgcHJvYmFibHkgc2hvdWxkbid0IGJlIHVzZWQgcHVibGljbHkuXG4gICAgLy8gMS4wLjAgXCJwcmVcIiB3b3VsZCBiZWNvbWUgMS4wLjAtMCB3aGljaCBpcyB0aGUgd3JvbmcgZGlyZWN0aW9uLlxuICAgIGNhc2UgJ3ByZSc6XG4gICAgICBpZiAodGhpcy5wcmVyZWxlYXNlLmxlbmd0aCA9PT0gMClcbiAgICAgICAgdGhpcy5wcmVyZWxlYXNlID0gWzBdO1xuICAgICAgZWxzZSB7XG4gICAgICAgIHZhciBpID0gdGhpcy5wcmVyZWxlYXNlLmxlbmd0aDtcbiAgICAgICAgd2hpbGUgKC0taSA + PSAwKSB7XG4gICAgICAgICAgaWYgKHR5cGVvZiB0aGlzLnByZXJlbGVhc2VbaV0gPT09ICdudW1iZXInKSB7XG4gICAgICAgICAgICB0aGlzLnByZXJlbGVhc2VbaV0rKztcbiAgICAgICAgICAgIGkgPSAtMjtcbiAgICAgICAgICB9XG4gICAgICAgIH1cbiAgICAgICAgaWYgKGkgPT09IC0xKSAvLyBkaWRuJ3QgaW5jcmVtZW50IGFueXRoaW5nXG4gICAgICAgICAgdGhpcy5wcmVyZWxlYXNlLnB1c2goMCk7XG4gICAgICB9XG4gICAgICBpZiAoaWRlbnRpZmllcikge1xuICAgICAgICAvLyAxLjIuMC1iZXRhLjEgYnVtcHMgdG8gMS4yLjAtYmV0YS4yLFxuICAgICAgICAvLyAxLjIuMC1iZXRhLmZvb2JseiBvciAxLjIuMC1iZXRhIGJ1bXBzIHRvIDEuMi4wLWJldGEuMFxuICAgICAgICBpZiAodGhpcy5wcmVyZWxlYXNlWzBdID09PSBpZGVudGlmaWVyKSB7XG4gICAgICAgICAgaWYgKGlzTmFOKHRoaXMucHJlcmVsZWFzZVsxXSkpXG4gICAgICAgICAgICB0aGlzLnByZXJlbGVhc2UgPSBbaWRlbnRpZmllciwgMF07XG4gICAgICAgIH0gZWxzZVxuICAgICAgICAgIHRoaXMucHJlcmVsZWFzZSA9IFtpZGVudGlmaWVyLCAwXTtcbiAgICAgIH1cbiAgICAgIGJyZWFrO1xuXG4gICAgZGVmYXVsdDpcbiAgICAgIHRocm93IG5ldyBFcnJvcignaW52YWxpZCBpbmNyZW1lbnQgYXJndW1lbnQ6ICcgKyByZWxlYXNlKTtcbiAgfVxuICB0aGlzLmZvcm1hdCgpO1xuICB0aGlzLnJhdyA9IHRoaXMudmVyc2lvbjtcbiAgcmV0dXJuIHRoaXM7XG59O1xuXG5leHBvcnRzLmluYyA9IGluYztcbmZ1bmN0aW9uIGluYyh2ZXJzaW9uLCByZWxlYXNlLCBsb29zZSwgaWRlbnRpZmllcikge1xuICBpZiAodHlwZW9mKGxvb3NlKSA9PT0gJ3N0cmluZycpIHtcbiAgICBpZGVudGlmaWVyID0gbG9vc2U7XG4gICAgbG9vc2UgPSB1bmRlZmluZWQ7XG4gIH1cblxuICB0cnkge1xuICAgIHJldHVybiBuZXcgU2VtVmVyKHZlcnNpb24sIGxvb3NlKS5pbmMocmVsZWFzZSwgaWRlbnRpZmllcikudmVyc2lvbjtcbiAgfSBjYXRjaCAoZXIpIHtcbiAgICByZXR1cm4gbnVsbDtcbiAgfVxufVxuXG5leHBvcnRzLmRpZmYgPSBkaWZmO1xuZnVuY3Rpb24gZGlmZih2ZXJzaW9uMSwgdmVyc2lvbjIpIHtcbiAgaWYgKGVxKHZlcnNpb24xLCB2ZXJzaW9uMikpIHtcbiAgICByZXR1cm4gbnVsbDtcbiAgfSBlbHNlIHtcbiAgICB2YXIgdjEgPSBwYXJzZSh2ZXJzaW9uMSk7XG4gICAgdmFyIHYyID0gcGFyc2UodmVyc2lvbjIpO1xuICAgIGlmICh2MS5wcmVyZWxlYXNlLmxlbmd0aCB8fCB2Mi5wcmVyZWxlYXNlLmxlbmd0aCkge1xuICAgICAgZm9yICh2YXIga2V5IGluIHYxKSB7XG4gICAgICAgIGlmIChrZXkgPT09ICdtYWpvcicgfHwga2V5ID09PSAnbWlub3InIHx8IGtleSA9PT0gJ3BhdGNoJykge1xuICAgICAgICAgIGlmICh2MVtrZXldICE9PSB2MltrZXldKSB7XG4gICAgICAgICAgICByZXR1cm4gJ3ByZScra2V5O1xuICAgICAgICAgIH1cbiAgICAgICAgfVxuICAgICAgfVxuICAgICAgcmV0dXJuICdwcmVyZWxlYXNlJztcbiAgICB9XG4gICAgZm9yICh2YXIga2V5IGluIHYxKSB7XG4gICAgICBpZiAoa2V5ID09PSAnbWFqb3InIHx8IGtleSA9PT0gJ21pbm9yJyB8fCBrZXkgPT09ICdwYXRjaCcpIHtcbiAgICAgICAgaWYgKHYxW2tleV0gIT09IHYyW2tleV0pIHtcbiAgICAgICAgICByZXR1cm4ga2V5O1xuICAgICAgICB9XG4gICAgICB9XG4gICAgfVxuICB9XG59XG5cbmV4cG9ydHMuY29tcGFyZUlkZW50aWZpZXJzID0gY29tcGFyZUlkZW50aWZpZXJzO1xuXG52YXIgbnVtZXJpYyA9IC9eWzAtOV0rJC87XG5mdW5jdGlvbiBjb21wYXJlSWRlbnRpZmllcnMoYSwgYikge1xuICB2YXIgYW51bSA9IG51bWVyaWMudGVzdChhKTtcbiAgdmFyIGJudW0gPSBudW1lcmljLnRlc3QoYik7XG5cbiAgaWYgKGFudW0gJiYgYm51bSkge1xuICAgIGEgPSArYTtcbiAgICBiID0gK2I7XG4gIH1cblxuICByZXR1cm4gKGFudW0gJiYgIWJudW0pID8gLTEgOlxuICAgICAgICAgKGJudW0gJiYgIWFudW0pID8gMSA6XG4gICAgICAgICBhIDwgYiA / IC0xIDpcbiAgICAgICAgIGEgPiBiID8gMSA6XG4gICAgICAgICAwO1xufVxuXG5leHBvcnRzLnJjb21wYXJlSWRlbnRpZmllcnMgPSByY29tcGFyZUlkZW50aWZpZXJzO1xuZnVuY3Rpb24gcmNvbXBhcmVJZGVudGlmaWVycyhhLCBiKSB7XG4gIHJldHVybiBjb21wYXJlSWRlbnRpZmllcnMoYiwgYSk7XG59XG5cbmV4cG9ydHMubWFqb3IgPSBtYWpvcjtcbmZ1bmN0aW9uIG1ham9yKGEsIGxvb3NlKSB7XG4gIHJldHVybiBuZXcgU2VtVmVyKGEsIGxvb3NlKS5tYWpvcjtcbn1cblxuZXhwb3J0cy5taW5vciA9IG1pbm9yO1xuZnVuY3Rpb24gbWlub3IoYSwgbG9vc2UpIHtcbiAgcmV0dXJuIG5ldyBTZW1WZXIoYSwgbG9vc2UpLm1pbm9yO1xufVxuXG5leHBvcnRzLnBhdGNoID0gcGF0Y2g7XG5mdW5jdGlvbiBwYXRjaChhLCBsb29zZSkge1xuICByZXR1cm4gbmV3IFNlbVZlcihhLCBsb29zZSkucGF0Y2g7XG59XG5cbmV4cG9ydHMuY29tcGFyZSA9IGNvbXBhcmU7XG5mdW5jdGlvbiBjb21wYXJlKGEsIGIsIGxvb3NlKSB7XG4gIHJldHVybiBuZXcgU2VtVmVyKGEsIGxvb3NlKS5jb21wYXJlKGIpO1xufVxuXG5leHBvcnRzLmNvbXBhcmVMb29zZSA9IGNvbXBhcmVMb29zZTtcbmZ1bmN0aW9uIGNvbXBhcmVMb29zZShhLCBiKSB7XG4gIHJldHVybiBjb21wYXJlKGEsIGIsIHRydWUpO1xufVxuXG5leHBvcnRzLnJjb21wYXJlID0gcmNvbXBhcmU7XG5mdW5jdGlvbiByY29tcGFyZShhLCBiLCBsb29zZSkge1xuICByZXR1cm4gY29tcGFyZShiLCBhLCBsb29zZSk7XG59XG5cbmV4cG9ydHMuc29ydCA9IHNvcnQ7XG5mdW5jdGlvbiBzb3J0KGxpc3QsIGxvb3NlKSB7XG4gIHJldHVybiBsaXN0LnNvcnQoZnVuY3Rpb24oYSwgYikge1xuICAgIHJldHVybiBleHBvcnRzLmNvbXBhcmUoYSwgYiwgbG9vc2UpO1xuICB9KTtcbn1cblxuZXhwb3J0cy5yc29ydCA9IHJzb3J0O1xuZnVuY3Rpb24gcnNvcnQobGlzdCwgbG9vc2UpIHtcbiAgcmV0dXJuIGxpc3Quc29ydChmdW5jdGlvbihhLCBiKSB7XG4gICAgcmV0dXJuIGV4cG9ydHMucmNvbXBhcmUoYSwgYiwgbG9vc2UpO1xuICB9KTtcbn1cblxuZXhwb3J0cy5ndCA9IGd0O1xuZnVuY3Rpb24gZ3QoYSwgYiwgbG9vc2UpIHtcbiAgcmV0dXJuIGNvbXBhcmUoYSwgYiwgbG9vc2UpID4gMDtcbn1cblxuZXhwb3J0cy5sdCA9IGx0O1xuZnVuY3Rpb24gbHQoYSwgYiwgbG9vc2UpIHtcbiAgcmV0dXJuIGNvbXBhcmUoYSwgYiwgbG9vc2UpIDwgMDtcbn1cblxuZXhwb3J0cy5lcSA9IGVxO1xuZnVuY3Rpb24gZXEoYSwgYiwgbG9vc2UpIHtcbiAgcmV0dXJuIGNvbXBhcmUoYSwgYiwgbG9vc2UpID09PSAwO1xufVxuXG5leHBvcnRzLm5lcSA9IG5lcTtcbmZ1bmN0aW9uIG5lcShhLCBiLCBsb29zZSkge1xuICByZXR1cm4gY29tcGFyZShhLCBiLCBsb29zZSkgIT09IDA7XG59XG5cbmV4cG9ydHMuZ3RlID0gZ3RlO1xuZnVuY3Rpb24gZ3RlKGEsIGIsIGxvb3NlKSB7XG4gIHJldHVybiBjb21wYXJlKGEsIGIsIGxvb3NlKSA + PSAwO1xufVxuXG5leHBvcnRzLmx0ZSA9IGx0ZTtcbmZ1bmN0aW9uIGx0ZShhLCBiLCBsb29zZSkge1xuICByZXR1cm4gY29tcGFyZShhLCBiLCBsb29zZSkgPD0gMDtcbn1cblxuZXhwb3J0cy5jbXAgPSBjbXA7XG5mdW5jdGlvbiBjbXAoYSwgb3AsIGIsIGxvb3NlKSB7XG4gIHZhciByZXQ7XG4gIHN3aXRjaCAob3ApIHtcbiAgICBjYXNlICc9PT0nOlxuICAgICAgaWYgKHR5cGVvZiBhID09PSAnb2JqZWN0JykgYSA9IGEudmVyc2lvbjtcbiAgICAgIGlmICh0eXBlb2YgYiA9PT0gJ29iamVjdCcpIGIgPSBiLnZlcnNpb247XG4gICAgICByZXQgPSBhID09PSBiO1xuICAgICAgYnJlYWs7XG4gICAgY2FzZSAnIT09JzpcbiAgICAgIGlmICh0eXBlb2YgYSA9PT0gJ29iamVjdCcpIGEgPSBhLnZlcnNpb247XG4gICAgICBpZiAodHlwZW9mIGIgPT09ICdvYmplY3QnKSBiID0gYi52ZXJzaW9uO1xuICAgICAgcmV0ID0gYSAhPT0gYjtcbiAgICAgIGJyZWFrO1xuICAgIGNhc2UgJyc6IGNhc2UgJz0nOiBjYXNlICc9PSc6IHJldCA9IGVxKGEsIGIsIGxvb3NlKTsgYnJlYWs7XG4gICAgY2FzZSAnIT0nOiByZXQgPSBuZXEoYSwgYiwgbG9vc2UpOyBicmVhaztcbiAgICBjYXNlICc + JzogcmV0ID0gZ3QoYSwgYiwgbG9vc2UpOyBicmVhaztcbiAgICBjYXNlICc + PSc6IHJldCA9IGd0ZShhLCBiLCBsb29zZSk7IGJyZWFrO1xuICAgIGNhc2UgJzwnOiByZXQgPSBsdChhLCBiLCBsb29zZSk7IGJyZWFrO1xuICAgIGNhc2UgJzw9JzogcmV0ID0gbHRlKGEsIGIsIGxvb3NlKTsgYnJlYWs7XG4gICAgZGVmYXVsdDogdGhyb3cgbmV3IFR5cGVFcnJvcignSW52YWxpZCBvcGVyYXRvcjogJyArIG9wKTtcbiAgfVxuICByZXR1cm4gcmV0O1xufVxuXG5leHBvcnRzLkNvbXBhcmF0b3IgPSBDb21wYXJhdG9yO1xuZnVuY3Rpb24gQ29tcGFyYXRvcihjb21wLCBsb29zZSkge1xuICBpZiAoY29tcCBpbnN0YW5jZW9mIENvbXBhcmF0b3IpIHtcbiAgICBpZiAoY29tcC5sb29zZSA9PT0gbG9vc2UpXG4gICAgICByZXR1cm4gY29tcDtcbiAgICBlbHNlXG4gICAgICBjb21wID0gY29tcC52YWx1ZTtcbiAgfVxuXG4gIGlmICghKHRoaXMgaW5zdGFuY2VvZiBDb21wYXJhdG9yKSlcbiAgICByZXR1cm4gbmV3IENvbXBhcmF0b3IoY29tcCwgbG9vc2UpO1xuXG4gIGRlYnVnKCdjb21wYXJhdG9yJywgY29tcCwgbG9vc2UpO1xuICB0aGlzLmxvb3NlID0gbG9vc2U7XG4gIHRoaXMucGFyc2UoY29tcCk7XG5cbiAgaWYgKHRoaXMuc2VtdmVyID09PSBBTlkpXG4gICAgdGhpcy52YWx1ZSA9ICcnO1xuICBlbHNlXG4gICAgdGhpcy52YWx1ZSA9IHRoaXMub3BlcmF0b3IgKyB0aGlzLnNlbXZlci52ZXJzaW9uO1xuXG4gIGRlYnVnKCdjb21wJywgdGhpcyk7XG59XG5cbnZhciBBTlkgPSB7fTtcbkNvbXBhcmF0b3IucHJvdG90eXBlLnBhcnNlID0gZnVuY3Rpb24oY29tcCkge1xuICB2YXIgciA9IHRoaXMubG9vc2UgPyByZVtDT01QQVJBVE9STE9PU0VdIDogcmVbQ09NUEFSQVRPUl07XG4gIHZhciBtID0gY29tcC5tYXRjaChyKTtcblxuICBpZiAoIW0pXG4gICAgdGhyb3cgbmV3IFR5cGVFcnJvcignSW52YWxpZCBjb21wYXJhdG9yOiAnICsgY29tcCk7XG5cbiAgdGhpcy5vcGVyYXRvciA9IG1bMV07XG4gIGlmICh0aGlzLm9wZXJhdG9yID09PSAnPScpXG4gICAgdGhpcy5vcGVyYXRvciA9ICcnO1xuXG4gIC8vIGlmIGl0IGxpdGVyYWxseSBpcyBqdXN0ICc + JyBvciAnJyB0aGVuIGFsbG93IGFueXRoaW5nLlxuICBpZiAoIW1bMl0pXG4gICAgdGhpcy5zZW12ZXIgPSBBTlk7XG4gIGVsc2VcbiAgICB0aGlzLnNlbXZlciA9IG5ldyBTZW1WZXIobVsyXSwgdGhpcy5sb29zZSk7XG59O1xuXG5Db21wYXJhdG9yLnByb3RvdHlwZS50b1N0cmluZyA9IGZ1bmN0aW9uKCkge1xuICByZXR1cm4gdGhpcy52YWx1ZTtcbn07XG5cbkNvbXBhcmF0b3IucHJvdG90eXBlLnRlc3QgPSBmdW5jdGlvbih2ZXJzaW9uKSB7XG4gIGRlYnVnKCdDb21wYXJhdG9yLnRlc3QnLCB2ZXJzaW9uLCB0aGlzLmxvb3NlKTtcblxuICBpZiAodGhpcy5zZW12ZXIgPT09IEFOWSlcbiAgICByZXR1cm4gdHJ1ZTtcblxuICBpZiAodHlwZW9mIHZlcnNpb24gPT09ICdzdHJpbmcnKVxuICAgIHZlcnNpb24gPSBuZXcgU2VtVmVyKHZlcnNpb24sIHRoaXMubG9vc2UpO1xuXG4gIHJldHVybiBjbXAodmVyc2lvbiwgdGhpcy5vcGVyYXRvciwgdGhpcy5zZW12ZXIsIHRoaXMubG9vc2UpO1xufTtcblxuXG5leHBvcnRzLlJhbmdlID0gUmFuZ2U7XG5mdW5jdGlvbiBSYW5nZShyYW5nZSwgbG9vc2UpIHtcbiAgaWYgKChyYW5nZSBpbnN0YW5jZW9mIFJhbmdlKSAmJiByYW5nZS5sb29zZSA9PT0gbG9vc2UpXG4gICAgcmV0dXJuIHJhbmdlO1xuXG4gIGlmICghKHRoaXMgaW5zdGFuY2VvZiBSYW5nZSkpXG4gICAgcmV0dXJuIG5ldyBSYW5nZShyYW5nZSwgbG9vc2UpO1xuXG4gIHRoaXMubG9vc2UgPSBsb29zZTtcblxuICAvLyBGaXJzdCwgc3BsaXQgYmFzZWQgb24gYm9vbGVhbiBvciB8fFxuICB0aGlzLnJhdyA9IHJhbmdlO1xuICB0aGlzLnNldCA9IHJhbmdlLnNwbGl0KC9cXHMqXFx8XFx8XFxzKi8pLm1hcChmdW5jdGlvbihyYW5nZSkge1xuICAgIHJldHVybiB0aGlzLnBhcnNlUmFuZ2UocmFuZ2UudHJpbSgpKTtcbiAgfSwgdGhpcykuZmlsdGVyKGZ1bmN0aW9uKGMpIHtcbiAgICAvLyB0aHJvdyBvdXQgYW55IHRoYXQgYXJlIG5vdCByZWxldmFudCBmb3Igd2hhdGV2ZXIgcmVhc29uXG4gICAgcmV0dXJuIGMubGVuZ3RoO1xuICB9KTtcblxuICBpZiAoIXRoaXMuc2V0Lmxlbmd0aCkge1xuICAgIHRocm93IG5ldyBUeXBlRXJyb3IoJ0ludmFsaWQgU2VtVmVyIFJhbmdlOiAnICsgcmFuZ2UpO1xuICB9XG5cbiAgdGhpcy5mb3JtYXQoKTtcbn1cblxuUmFuZ2UucHJvdG90eXBlLmZvcm1hdCA9IGZ1bmN0aW9uKCkge1xuICB0aGlzLnJhbmdlID0gdGhpcy5zZXQubWFwKGZ1bmN0aW9uKGNvbXBzKSB7XG4gICAgcmV0dXJuIGNvbXBzLmpvaW4oJyAnKS50cmltKCk7XG4gIH0pLmpvaW4oJ3x8JykudHJpbSgpO1xuICByZXR1cm4gdGhpcy5yYW5nZTtcbn07XG5cblJhbmdlLnByb3RvdHlwZS50b1N0cmluZyA9IGZ1bmN0aW9uKCkge1xuICByZXR1cm4gdGhpcy5yYW5nZTtcbn07XG5cblJhbmdlLnByb3RvdHlwZS5wYXJzZVJhbmdlID0gZnVuY3Rpb24ocmFuZ2UpIHtcbiAgdmFyIGxvb3NlID0gdGhpcy5sb29zZTtcbiAgcmFuZ2UgPSByYW5nZS50cmltKCk7XG4gIGRlYnVnKCdyYW5nZScsIHJhbmdlLCBsb29zZSk7XG4gIC8vIGAxLjIuMyAtIDEuMi40YCA9PiBgPj0xLjIuMyA8PTEuMi40YFxuICB2YXIgaHIgPSBsb29zZSA / IHJlW0hZUEhFTlJBTkdFTE9PU0VdIDogcmVbSFlQSEVOUkFOR0VdO1xuICByYW5nZSA9IHJhbmdlLnJlcGxhY2UoaHIsIGh5cGhlblJlcGxhY2UpO1xuICBkZWJ1ZygnaHlwaGVuIHJlcGxhY2UnLCByYW5nZSk7XG4gIC8vIGA + IDEuMi4zIDwgMS4yLjVgID0 + IGA + MS4yLjMgPDEuMi41YFxuICByYW5nZSA9IHJhbmdlLnJlcGxhY2UocmVbQ09NUEFSQVRPUlRSSU1dLCBjb21wYXJhdG9yVHJpbVJlcGxhY2UpO1xuICBkZWJ1ZygnY29tcGFyYXRvciB0cmltJywgcmFuZ2UsIHJlW0NPTVBBUkFUT1JUUklNXSk7XG5cbiAgLy8gYH4gMS4yLjNgID0 + IGB + MS4yLjNgXG4gIHJhbmdlID0gcmFuZ2UucmVwbGFjZShyZVtUSUxERVRSSU1dLCB0aWxkZVRyaW1SZXBsYWNlKTtcblxuICAvLyBgXiAxLjIuM2AgPT4gYF4xLjIuM2BcbiAgcmFuZ2UgPSByYW5nZS5yZXBsYWNlKHJlW0NBUkVUVFJJTV0sIGNhcmV0VHJpbVJlcGxhY2UpO1xuXG4gIC8vIG5vcm1hbGl6ZSBzcGFjZXNcbiAgcmFuZ2UgPSByYW5nZS5zcGxpdCgvXFxzKy8pLmpvaW4oJyAnKTtcblxuICAvLyBBdCB0aGlzIHBvaW50LCB0aGUgcmFuZ2UgaXMgY29tcGxldGVseSB0cmltbWVkIGFuZFxuICAvLyByZWFkeSB0byBiZSBzcGxpdCBpbnRvIGNvbXBhcmF0b3JzLlxuXG4gIHZhciBjb21wUmUgPSBsb29zZSA / IHJlW0NPTVBBUkFUT1JMT09TRV0gOiByZVtDT01QQVJBVE9SXTtcbiAgdmFyIHNldCA9IHJhbmdlLnNwbGl0KCcgJykubWFwKGZ1bmN0aW9uKGNvbXApIHtcbiAgICByZXR1cm4gcGFyc2VDb21wYXJhdG9yKGNvbXAsIGxvb3NlKTtcbiAgfSkuam9pbignICcpLnNwbGl0KC9cXHMrLyk7XG4gIGlmICh0aGlzLmxvb3NlKSB7XG4gICAgLy8gaW4gbG9vc2UgbW9kZSwgdGhyb3cgb3V0IGFueSB0aGF0IGFyZSBub3QgdmFsaWQgY29tcGFyYXRvcnNcbiAgICBzZXQgPSBzZXQuZmlsdGVyKGZ1bmN0aW9uKGNvbXApIHtcbiAgICAgIHJldHVybiAhIWNvbXAubWF0Y2goY29tcFJlKTtcbiAgICB9KTtcbiAgfVxuICBzZXQgPSBzZXQubWFwKGZ1bmN0aW9uKGNvbXApIHtcbiAgICByZXR1cm4gbmV3IENvbXBhcmF0b3IoY29tcCwgbG9vc2UpO1xuICB9KTtcblxuICByZXR1cm4gc2V0O1xufTtcblxuLy8gTW9zdGx5IGp1c3QgZm9yIHRlc3RpbmcgYW5kIGxlZ2FjeSBBUEkgcmVhc29uc1xuZXhwb3J0cy50b0NvbXBhcmF0b3JzID0gdG9Db21wYXJhdG9ycztcbmZ1bmN0aW9uIHRvQ29tcGFyYXRvcnMocmFuZ2UsIGxvb3NlKSB7XG4gIHJldHVybiBuZXcgUmFuZ2UocmFuZ2UsIGxvb3NlKS5zZXQubWFwKGZ1bmN0aW9uKGNvbXApIHtcbiAgICByZXR1cm4gY29tcC5tYXAoZnVuY3Rpb24oYykge1xuICAgICAgcmV0dXJuIGMudmFsdWU7XG4gICAgfSkuam9pbignICcpLnRyaW0oKS5zcGxpdCgnICcpO1xuICB9KTtcbn1cblxuLy8gY29tcHJpc2VkIG9mIHhyYW5nZXMsIHRpbGRlcywgc3RhcnMsIGFuZCBndGx0J3MgYXQgdGhpcyBwb2ludC5cbi8vIGFscmVhZHkgcmVwbGFjZWQgdGhlIGh5cGhlbiByYW5nZXNcbi8vIHR1cm4gaW50byBhIHNldCBvZiBKVVNUIGNvbXBhcmF0b3JzLlxuZnVuY3Rpb24gcGFyc2VDb21wYXJhdG9yKGNvbXAsIGxvb3NlKSB7XG4gIGRlYnVnKCdjb21wJywgY29tcCk7XG4gIGNvbXAgPSByZXBsYWNlQ2FyZXRzKGNvbXAsIGxvb3NlKTtcbiAgZGVidWcoJ2NhcmV0JywgY29tcCk7XG4gIGNvbXAgPSByZXBsYWNlVGlsZGVzKGNvbXAsIGxvb3NlKTtcbiAgZGVidWcoJ3RpbGRlcycsIGNvbXApO1xuICBjb21wID0gcmVwbGFjZVhSYW5nZXMoY29tcCwgbG9vc2UpO1xuICBkZWJ1ZygneHJhbmdlJywgY29tcCk7XG4gIGNvbXAgPSByZXBsYWNlU3RhcnMoY29tcCwgbG9vc2UpO1xuICBkZWJ1Zygnc3RhcnMnLCBjb21wKTtcbiAgcmV0dXJuIGNvbXA7XG59XG5cbmZ1bmN0aW9uIGlzWChpZCkge1xuICByZXR1cm4gIWlkIHx8IGlkLnRvTG93ZXJDYXNlKCkgPT09ICd4JyB8fCBpZCA9PT0gJyonO1xufVxuXG4vLyB + LCB + PiAtLT4gKiAoYW55LCBraW5kYSBzaWxseSlcbi8vIH4yLCB + Mi54LCB + Mi54LngsIH4 + Miwgfj4yLnggfj4yLngueCAtLT4gPj0yLjAuMCA8My4wLjBcbi8vIH4yLjAsIH4yLjAueCwgfj4yLjAsIH4 + Mi4wLnggLS0 + ID49Mi4wLjAgPDIuMS4wXG4vLyB + MS4yLCB + MS4yLngsIH4 + MS4yLCB + PjEuMi54IC0tPiA + PTEuMi4wIDwxLjMuMFxuLy8gfjEuMi4zLCB + PjEuMi4zIC0tPiA + PTEuMi4zIDwxLjMuMFxuLy8gfjEuMi4wLCB + PjEuMi4wIC0tPiA + PTEuMi4wIDwxLjMuMFxuZnVuY3Rpb24gcmVwbGFjZVRpbGRlcyhjb21wLCBsb29zZSkge1xuICByZXR1cm4gY29tcC50cmltKCkuc3BsaXQoL1xccysvKS5tYXAoZnVuY3Rpb24oY29tcCkge1xuICAgIHJldHVybiByZXBsYWNlVGlsZGUoY29tcCwgbG9vc2UpO1xuICB9KS5qb2luKCcgJyk7XG59XG5cbmZ1bmN0aW9uIHJlcGxhY2VUaWxkZShjb21wLCBsb29zZSkge1xuICB2YXIgciA9IGxvb3NlID8gcmVbVElMREVMT09TRV0gOiByZVtUSUxERV07XG4gIHJldHVybiBjb21wLnJlcGxhY2UociwgZnVuY3Rpb24oXywgTSwgbSwgcCwgcHIpIHtcbiAgICBkZWJ1ZygndGlsZGUnLCBjb21wLCBfLCBNLCBtLCBwLCBwcik7XG4gICAgdmFyIHJldDtcblxuICAgIGlmIChpc1goTSkpXG4gICAgICByZXQgPSAnJztcbiAgICBlbHNlIGlmIChpc1gobSkpXG4gICAgICByZXQgPSAnPj0nICsgTSArICcuMC4wIDwnICsgKCtNICsgMSkgKyAnLjAuMCc7XG4gICAgZWxzZSBpZiAoaXNYKHApKVxuICAgICAgLy8gfjEuMiA9PSA + PTEuMi4wIDwxLjMuMFxuICAgICAgcmV0ID0gJz49JyArIE0gKyAnLicgKyBtICsgJy4wIDwnICsgTSArICcuJyArICgrbSArIDEpICsgJy4wJztcbiAgICBlbHNlIGlmIChwcikge1xuICAgICAgZGVidWcoJ3JlcGxhY2VUaWxkZSBwcicsIHByKTtcbiAgICAgIGlmIChwci5jaGFyQXQoMCkgIT09ICctJylcbiAgICAgICAgcHIgPSAnLScgKyBwcjtcbiAgICAgIHJldCA9ICc + PScgKyBNICsgJy4nICsgbSArICcuJyArIHAgKyBwciArXG4gICAgICAgICAgICAnIDwnICsgTSArICcuJyArICgrbSArIDEpICsgJy4wJztcbiAgICB9IGVsc2VcbiAgICAgIC8vIH4xLjIuMyA9PSA + PTEuMi4zIDwxLjMuMFxuICAgICAgcmV0ID0gJz49JyArIE0gKyAnLicgKyBtICsgJy4nICsgcCArXG4gICAgICAgICAgICAnIDwnICsgTSArICcuJyArICgrbSArIDEpICsgJy4wJztcblxuICAgIGRlYnVnKCd0aWxkZSByZXR1cm4nLCByZXQpO1xuICAgIHJldHVybiByZXQ7XG4gIH0pO1xufVxuXG4vLyBeIC0tPiAqIChhbnksIGtpbmRhIHNpbGx5KVxuLy8gXjIsIF4yLngsIF4yLngueCAtLT4gPj0yLjAuMCA8My4wLjBcbi8vIF4yLjAsIF4yLjAueCAtLT4gPj0yLjAuMCA8My4wLjBcbi8vIF4xLjIsIF4xLjIueCAtLT4gPj0xLjIuMCA8Mi4wLjBcbi8vIF4xLjIuMyAtLT4gPj0xLjIuMyA8Mi4wLjBcbi8vIF4xLjIuMCAtLT4gPj0xLjIuMCA8Mi4wLjBcbmZ1bmN0aW9uIHJlcGxhY2VDYXJldHMoY29tcCwgbG9vc2UpIHtcbiAgcmV0dXJuIGNvbXAudHJpbSgpLnNwbGl0KC9cXHMrLykubWFwKGZ1bmN0aW9uKGNvbXApIHtcbiAgICByZXR1cm4gcmVwbGFjZUNhcmV0KGNvbXAsIGxvb3NlKTtcbiAgfSkuam9pbignICcpO1xufVxuXG5mdW5jdGlvbiByZXBsYWNlQ2FyZXQoY29tcCwgbG9vc2UpIHtcbiAgZGVidWcoJ2NhcmV0JywgY29tcCwgbG9vc2UpO1xuICB2YXIgciA9IGxvb3NlID8gcmVbQ0FSRVRMT09TRV0gOiByZVtDQVJFVF07XG4gIHJldHVybiBjb21wLnJlcGxhY2UociwgZnVuY3Rpb24oXywgTSwgbSwgcCwgcHIpIHtcbiAgICBkZWJ1ZygnY2FyZXQnLCBjb21wLCBfLCBNLCBtLCBwLCBwcik7XG4gICAgdmFyIHJldDtcblxuICAgIGlmIChpc1goTSkpXG4gICAgICByZXQgPSAnJztcbiAgICBlbHNlIGlmIChpc1gobSkpXG4gICAgICByZXQgPSAnPj0nICsgTSArICcuMC4wIDwnICsgKCtNICsgMSkgKyAnLjAuMCc7XG4gICAgZWxzZSBpZiAoaXNYKHApKSB7XG4gICAgICBpZiAoTSA9PT0gJzAnKVxuICAgICAgICByZXQgPSAnPj0nICsgTSArICcuJyArIG0gKyAnLjAgPCcgKyBNICsgJy4nICsgKCttICsgMSkgKyAnLjAnO1xuICAgICAgZWxzZVxuICAgICAgICByZXQgPSAnPj0nICsgTSArICcuJyArIG0gKyAnLjAgPCcgKyAoK00gKyAxKSArICcuMC4wJztcbiAgICB9IGVsc2UgaWYgKHByKSB7XG4gICAgICBkZWJ1ZygncmVwbGFjZUNhcmV0IHByJywgcHIpO1xuICAgICAgaWYgKHByLmNoYXJBdCgwKSAhPT0gJy0nKVxuICAgICAgICBwciA9ICctJyArIHByO1xuICAgICAgaWYgKE0gPT09ICcwJykge1xuICAgICAgICBpZiAobSA9PT0gJzAnKVxuICAgICAgICAgIHJldCA9ICc + PScgKyBNICsgJy4nICsgbSArICcuJyArIHAgKyBwciArXG4gICAgICAgICAgICAgICAgJyA8JyArIE0gKyAnLicgKyBtICsgJy4nICsgKCtwICsgMSk7XG4gICAgICAgIGVsc2VcbiAgICAgICAgICByZXQgPSAnPj0nICsgTSArICcuJyArIG0gKyAnLicgKyBwICsgcHIgK1xuICAgICAgICAgICAgICAgICcgPCcgKyBNICsgJy4nICsgKCttICsgMSkgKyAnLjAnO1xuICAgICAgfSBlbHNlXG4gICAgICAgIHJldCA9ICc + PScgKyBNICsgJy4nICsgbSArICcuJyArIHAgKyBwciArXG4gICAgICAgICAgICAgICcgPCcgKyAoK00gKyAxKSArICcuMC4wJztcbiAgICB9IGVsc2Uge1xuICAgICAgZGVidWcoJ25vIHByJyk7XG4gICAgICBpZiAoTSA9PT0gJzAnKSB7XG4gICAgICAgIGlmIChtID09PSAnMCcpXG4gICAgICAgICAgcmV0ID0gJz49JyArIE0gKyAnLicgKyBtICsgJy4nICsgcCArXG4gICAgICAgICAgICAgICAgJyA8JyArIE0gKyAnLicgKyBtICsgJy4nICsgKCtwICsgMSk7XG4gICAgICAgIGVsc2VcbiAgICAgICAgICByZXQgPSAnPj0nICsgTSArICcuJyArIG0gKyAnLicgKyBwICtcbiAgICAgICAgICAgICAgICAnIDwnICsgTSArICcuJyArICgrbSArIDEpICsgJy4wJztcbiAgICAgIH0gZWxzZVxuICAgICAgICByZXQgPSAnPj0nICsgTSArICcuJyArIG0gKyAnLicgKyBwICtcbiAgICAgICAgICAgICAgJyA8JyArICgrTSArIDEpICsgJy4wLjAnO1xuICAgIH1cblxuICAgIGRlYnVnKCdjYXJldCByZXR1cm4nLCByZXQpO1xuICAgIHJldHVybiByZXQ7XG4gIH0pO1xufVxuXG5mdW5jdGlvbiByZXBsYWNlWFJhbmdlcyhjb21wLCBsb29zZSkge1xuICBkZWJ1ZygncmVwbGFjZVhSYW5nZXMnLCBjb21wLCBsb29zZSk7XG4gIHJldHVybiBjb21wLnNwbGl0KC9cXHMrLykubWFwKGZ1bmN0aW9uKGNvbXApIHtcbiAgICByZXR1cm4gcmVwbGFjZVhSYW5nZShjb21wLCBsb29zZSk7XG4gIH0pLmpvaW4oJyAnKTtcbn1cblxuZnVuY3Rpb24gcmVwbGFjZVhSYW5nZShjb21wLCBsb29zZSkge1xuICBjb21wID0gY29tcC50cmltKCk7XG4gIHZhciByID0gbG9vc2UgPyByZVtYUkFOR0VMT09TRV0gOiByZVtYUkFOR0VdO1xuICByZXR1cm4gY29tcC5yZXBsYWNlKHIsIGZ1bmN0aW9uKHJldCwgZ3RsdCwgTSwgbSwgcCwgcHIpIHtcbiAgICBkZWJ1ZygneFJhbmdlJywgY29tcCwgcmV0LCBndGx0LCBNLCBtLCBwLCBwcik7XG4gICAgdmFyIHhNID0gaXNYKE0pO1xuICAgIHZhciB4bSA9IHhNIHx8IGlzWChtKTtcbiAgICB2YXIgeHAgPSB4bSB8fCBpc1gocCk7XG4gICAgdmFyIGFueVggPSB4cDtcblxuICAgIGlmIChndGx0ID09PSAnPScgJiYgYW55WClcbiAgICAgIGd0bHQgPSAnJztcblxuICAgIGlmICh4TSkge1xuICAgICAgaWYgKGd0bHQgPT09ICc + JyB8fCBndGx0ID09PSAnPCcpIHtcbiAgICAgICAgLy8gbm90aGluZyBpcyBhbGxvd2VkXG4gICAgICAgIHJldCA9ICc8MC4wLjAnO1xuICAgICAgfSBlbHNlIHtcbiAgICAgICAgLy8gbm90aGluZyBpcyBmb3JiaWRkZW5cbiAgICAgICAgcmV0ID0gJyonO1xuICAgICAgfVxuICAgIH0gZWxzZSBpZiAoZ3RsdCAmJiBhbnlYKSB7XG4gICAgICAvLyByZXBsYWNlIFggd2l0aCAwXG4gICAgICBpZiAoeG0pXG4gICAgICAgIG0gPSAwO1xuICAgICAgaWYgKHhwKVxuICAgICAgICBwID0gMDtcblxuICAgICAgaWYgKGd0bHQgPT09ICc + Jykge1xuICAgICAgICAvLyA + MSA9PiA + PTIuMC4wXG4gICAgICAgIC8vID4xLjIgPT4gPj0xLjMuMFxuICAgICAgICAvLyA + MS4yLjMgPT4gPj0gMS4yLjRcbiAgICAgICAgZ3RsdCA9ICc + PSc7XG4gICAgICAgIGlmICh4bSkge1xuICAgICAgICAgIE0gPSArTSArIDE7XG4gICAgICAgICAgbSA9IDA7XG4gICAgICAgICAgcCA9IDA7XG4gICAgICAgIH0gZWxzZSBpZiAoeHApIHtcbiAgICAgICAgICBtID0gK20gKyAxO1xuICAgICAgICAgIHAgPSAwO1xuICAgICAgICB9XG4gICAgICB9IGVsc2UgaWYgKGd0bHQgPT09ICc8PScpIHtcbiAgICAgICAgLy8gPD0wLjcueCBpcyBhY3R1YWxseSA8MC44LjAsIHNpbmNlIGFueSAwLjcueCBzaG91bGRcbiAgICAgICAgLy8gcGFzcy4gIFNpbWlsYXJseSwgPD03LnggaXMgYWN0dWFsbHkgPDguMC4wLCBldGMuXG4gICAgICAgIGd0bHQgPSAnPCdcbiAgICAgICAgaWYgKHhtKVxuICAgICAgICAgIE0gPSArTSArIDFcbiAgICAgICAgZWxzZVxuICAgICAgICAgIG0gPSArbSArIDFcbiAgICAgIH1cblxuICAgICAgcmV0ID0gZ3RsdCArIE0gKyAnLicgKyBtICsgJy4nICsgcDtcbiAgICB9IGVsc2UgaWYgKHhtKSB7XG4gICAgICByZXQgPSAnPj0nICsgTSArICcuMC4wIDwnICsgKCtNICsgMSkgKyAnLjAuMCc7XG4gICAgfSBlbHNlIGlmICh4cCkge1xuICAgICAgcmV0ID0gJz49JyArIE0gKyAnLicgKyBtICsgJy4wIDwnICsgTSArICcuJyArICgrbSArIDEpICsgJy4wJztcbiAgICB9XG5cbiAgICBkZWJ1ZygneFJhbmdlIHJldHVybicsIHJldCk7XG5cbiAgICByZXR1cm4gcmV0O1xuICB9KTtcbn1cblxuLy8gQmVjYXVzZSAqIGlzIEFORC1lZCB3aXRoIGV2ZXJ5dGhpbmcgZWxzZSBpbiB0aGUgY29tcGFyYXRvcixcbi8vIGFuZCAnJyBtZWFucyBcImFueSB2ZXJzaW9uXCIsIGp1c3QgcmVtb3ZlIHRoZSAqcyBlbnRpcmVseS5cbmZ1bmN0aW9uIHJlcGxhY2VTdGFycyhjb21wLCBsb29zZSkge1xuICBkZWJ1ZygncmVwbGFjZVN0YXJzJywgY29tcCwgbG9vc2UpO1xuICAvLyBMb29zZW5lc3MgaXMgaWdub3JlZCBoZXJlLiAgc3RhciBpcyBhbHdheXMgYXMgbG9vc2UgYXMgaXQgZ2V0cyFcbiAgcmV0dXJuIGNvbXAudHJpbSgpLnJlcGxhY2UocmVbU1RBUl0sICcnKTtcbn1cblxuLy8gVGhpcyBmdW5jdGlvbiBpcyBwYXNzZWQgdG8gc3RyaW5nLnJlcGxhY2UocmVbSFlQSEVOUkFOR0VdKVxuLy8gTSwgbSwgcGF0Y2gsIHByZXJlbGVhc2UsIGJ1aWxkXG4vLyAxLjIgLSAzLjQuNSA9PiA + PTEuMi4wIDw9My40LjVcbi8vIDEuMi4zIC0gMy40ID0 + ID49MS4yLjAgPDMuNS4wIEFueSAzLjQueCB3aWxsIGRvXG4vLyAxLjIgLSAzLjQgPT4gPj0xLjIuMCA8My41LjBcbmZ1bmN0aW9uIGh5cGhlblJlcGxhY2UoJDAsXG4gICAgICAgICAgICAgICAgICAgICAgIGZyb20sIGZNLCBmbSwgZnAsIGZwciwgZmIsXG4gICAgICAgICAgICAgICAgICAgICAgIHRvLCB0TSwgdG0sIHRwLCB0cHIsIHRiKSB7XG5cbiAgaWYgKGlzWChmTSkpXG4gICAgZnJvbSA9ICcnO1xuICBlbHNlIGlmIChpc1goZm0pKVxuICAgIGZyb20gPSAnPj0nICsgZk0gKyAnLjAuMCc7XG4gIGVsc2UgaWYgKGlzWChmcCkpXG4gICAgZnJvbSA9ICc + PScgKyBmTSArICcuJyArIGZtICsgJy4wJztcbiAgZWxzZVxuICAgIGZyb20gPSAnPj0nICsgZnJvbTtcblxuICBpZiAoaXNYKHRNKSlcbiAgICB0byA9ICcnO1xuICBlbHNlIGlmIChpc1godG0pKVxuICAgIHRvID0gJzwnICsgKCt0TSArIDEpICsgJy4wLjAnO1xuICBlbHNlIGlmIChpc1godHApKVxuICAgIHRvID0gJzwnICsgdE0gKyAnLicgKyAoK3RtICsgMSkgKyAnLjAnO1xuICBlbHNlIGlmICh0cHIpXG4gICAgdG8gPSAnPD0nICsgdE0gKyAnLicgKyB0bSArICcuJyArIHRwICsgJy0nICsgdHByO1xuICBlbHNlXG4gICAgdG8gPSAnPD0nICsgdG87XG5cbiAgcmV0dXJuIChmcm9tICsgJyAnICsgdG8pLnRyaW0oKTtcbn1cblxuXG4vLyBpZiBBTlkgb2YgdGhlIHNldHMgbWF0Y2ggQUxMIG9mIGl0cyBjb21wYXJhdG9ycywgdGhlbiBwYXNzXG5SYW5nZS5wcm90b3R5cGUudGVzdCA9IGZ1bmN0aW9uKHZlcnNpb24pIHtcbiAgaWYgKCF2ZXJzaW9uKVxuICAgIHJldHVybiBmYWxzZTtcblxuICBpZiAodHlwZW9mIHZlcnNpb24gPT09ICdzdHJpbmcnKVxuICAgIHZlcnNpb24gPSBuZXcgU2VtVmVyKHZlcnNpb24sIHRoaXMubG9vc2UpO1xuXG4gIGZvciAodmFyIGkgPSAwOyBpIDwgdGhpcy5zZXQubGVuZ3RoOyBpKyspIHtcbiAgICBpZiAodGVzdFNldCh0aGlzLnNldFtpXSwgdmVyc2lvbikpXG4gICAgICByZXR1cm4gdHJ1ZTtcbiAgfVxuICByZXR1cm4gZmFsc2U7XG59O1xuXG5mdW5jdGlvbiB0ZXN0U2V0KHNldCwgdmVyc2lvbikge1xuICBmb3IgKHZhciBpID0gMDsgaSA8IHNldC5sZW5ndGg7IGkrKykge1xuICAgIGlmICghc2V0W2ldLnRlc3QodmVyc2lvbikpXG4gICAgICByZXR1cm4gZmFsc2U7XG4gIH1cblxuICBpZiAodmVyc2lvbi5wcmVyZWxlYXNlLmxlbmd0aCkge1xuICAgIC8vIEZpbmQgdGhlIHNldCBvZiB2ZXJzaW9ucyB0aGF0IGFyZSBhbGxvd2VkIHRvIGhhdmUgcHJlcmVsZWFzZXNcbiAgICAvLyBGb3IgZXhhbXBsZSwgXjEuMi4zLXByLjEgZGVzdWdhcnMgdG8gPj0xLjIuMy1wci4xIDwyLjAuMFxuICAgIC8vIFRoYXQgc2hvdWxkIGFsbG93IGAxLjIuMy1wci4yYCB0byBwYXNzLlxuICAgIC8vIEhvd2V2ZXIsIGAxLjIuNC1hbHBoYS5ub3RyZWFkeWAgc2hvdWxkIE5PVCBiZSBhbGxvd2VkLFxuICAgIC8vIGV2ZW4gdGhvdWdoIGl0J3Mgd2l0aGluIHRoZSByYW5nZSBzZXQgYnkgdGhlIGNvbXBhcmF0b3JzLlxuICAgIGZvciAodmFyIGkgPSAwOyBpIDwgc2V0Lmxlbmd0aDsgaSsrKSB7XG4gICAgICBkZWJ1ZyhzZXRbaV0uc2VtdmVyKTtcbiAgICAgIGlmIChzZXRbaV0uc2VtdmVyID09PSBBTlkpXG4gICAgICAgIGNvbnRpbnVlO1xuXG4gICAgICBpZiAoc2V0W2ldLnNlbXZlci5wcmVyZWxlYXNlLmxlbmd0aCA + IDApIHtcbiAgICAgICAgdmFyIGFsbG93ZWQgPSBzZXRbaV0uc2VtdmVyO1xuICAgICAgICBpZiAoYWxsb3dlZC5tYWpvciA9PT0gdmVyc2lvbi5tYWpvciAmJlxuICAgICAgICAgICAgYWxsb3dlZC5taW5vciA9PT0gdmVyc2lvbi5taW5vciAmJlxuICAgICAgICAgICAgYWxsb3dlZC5wYXRjaCA9PT0gdmVyc2lvbi5wYXRjaClcbiAgICAgICAgICByZXR1cm4gdHJ1ZTtcbiAgICAgIH1cbiAgICB9XG5cbiAgICAvLyBWZXJzaW9uIGhhcyBhIC1wcmUsIGJ1dCBpdCdzIG5vdCBvbmUgb2YgdGhlIG9uZXMgd2UgbGlrZS5cbiAgICByZXR1cm4gZmFsc2U7XG4gIH1cblxuICByZXR1cm4gdHJ1ZTtcbn1cblxuZXhwb3J0cy5zYXRpc2ZpZXMgPSBzYXRpc2ZpZXM7XG5mdW5jdGlvbiBzYXRpc2ZpZXModmVyc2lvbiwgcmFuZ2UsIGxvb3NlKSB7XG4gIHRyeSB7XG4gICAgcmFuZ2UgPSBuZXcgUmFuZ2UocmFuZ2UsIGxvb3NlKTtcbiAgfSBjYXRjaCAoZXIpIHtcbiAgICByZXR1cm4gZmFsc2U7XG4gIH1cbiAgcmV0dXJuIHJhbmdlLnRlc3QodmVyc2lvbik7XG59XG5cbmV4cG9ydHMubWF4U2F0aXNmeWluZyA9IG1heFNhdGlzZnlpbmc7XG5mdW5jdGlvbiBtYXhTYXRpc2Z5aW5nKHZlcnNpb25zLCByYW5nZSwgbG9vc2UpIHtcbiAgcmV0dXJuIHZlcnNpb25zLmZpbHRlcihmdW5jdGlvbih2ZXJzaW9uKSB7XG4gICAgcmV0dXJuIHNhdGlzZmllcyh2ZXJzaW9uLCByYW5nZSwgbG9vc2UpO1xuICB9KS5zb3J0KGZ1bmN0aW9uKGEsIGIpIHtcbiAgICByZXR1cm4gcmNvbXBhcmUoYSwgYiwgbG9vc2UpO1xuICB9KVswXSB8fCBudWxsO1xufVxuXG5leHBvcnRzLnZhbGlkUmFuZ2UgPSB2YWxpZFJhbmdlO1xuZnVuY3Rpb24gdmFsaWRSYW5nZShyYW5nZSwgbG9vc2UpIHtcbiAgdHJ5IHtcbiAgICAvLyBSZXR1cm4gJyonIGluc3RlYWQgb2YgJycgc28gdGhhdCB0cnV0aGluZXNzIHdvcmtzLlxuICAgIC8vIFRoaXMgd2lsbCB0aHJvdyBpZiBpdCdzIGludmFsaWQgYW55d2F5XG4gICAgcmV0dXJuIG5ldyBSYW5nZShyYW5nZSwgbG9vc2UpLnJhbmdlIHx8ICcqJztcbiAgfSBjYXRjaCAoZXIpIHtcbiAgICByZXR1cm4gbnVsbDtcbiAgfVxufVxuXG4vLyBEZXRlcm1pbmUgaWYgdmVyc2lvbiBpcyBsZXNzIHRoYW4gYWxsIHRoZSB2ZXJzaW9ucyBwb3NzaWJsZSBpbiB0aGUgcmFuZ2VcbmV4cG9ydHMubHRyID0gbHRyO1xuZnVuY3Rpb24gbHRyKHZlcnNpb24sIHJhbmdlLCBsb29zZSkge1xuICByZXR1cm4gb3V0c2lkZSh2ZXJzaW9uLCByYW5nZSwgJzwnLCBsb29zZSk7XG59XG5cbi8vIERldGVybWluZSBpZiB2ZXJzaW9uIGlzIGdyZWF0ZXIgdGhhbiBhbGwgdGhlIHZlcnNpb25zIHBvc3NpYmxlIGluIHRoZSByYW5nZS5cbmV4cG9ydHMuZ3RyID0gZ3RyO1xuZnVuY3Rpb24gZ3RyKHZlcnNpb24sIHJhbmdlLCBsb29zZSkge1xuICByZXR1cm4gb3V0c2lkZSh2ZXJzaW9uLCByYW5nZSwgJz4nLCBsb29zZSk7XG59XG5cbmV4cG9ydHMub3V0c2lkZSA9IG91dHNpZGU7XG5mdW5jdGlvbiBvdXRzaWRlKHZlcnNpb24sIHJhbmdlLCBoaWxvLCBsb29zZSkge1xuICB2ZXJzaW9uID0gbmV3IFNlbVZlcih2ZXJzaW9uLCBsb29zZSk7XG4gIHJhbmdlID0gbmV3IFJhbmdlKHJhbmdlLCBsb29zZSk7XG5cbiAgdmFyIGd0Zm4sIGx0ZWZuLCBsdGZuLCBjb21wLCBlY29tcDtcbiAgc3dpdGNoIChoaWxvKSB7XG4gICAgY2FzZSAnPic6XG4gICAgICBndGZuID0gZ3Q7XG4gICAgICBsdGVmbiA9IGx0ZTtcbiAgICAgIGx0Zm4gPSBsdDtcbiAgICAgIGNvbXAgPSAnPic7XG4gICAgICBlY29tcCA9ICc + PSc7XG4gICAgICBicmVhaztcbiAgICBjYXNlICc8JzpcbiAgICAgIGd0Zm4gPSBsdDtcbiAgICAgIGx0ZWZuID0gZ3RlO1xuICAgICAgbHRmbiA9IGd0O1xuICAgICAgY29tcCA9ICc8JztcbiAgICAgIGVjb21wID0gJzw9JztcbiAgICAgIGJyZWFrO1xuICAgIGRlZmF1bHQ6XG4gICAgICB0aHJvdyBuZXcgVHlwZUVycm9yKCdNdXN0IHByb3ZpZGUgYSBoaWxvIHZhbCBvZiBcIjxcIiBvciBcIj5cIicpO1xuICB9XG5cbiAgLy8gSWYgaXQgc2F0aXNpZmVzIHRoZSByYW5nZSBpdCBpcyBub3Qgb3V0c2lkZVxuICBpZiAoc2F0aXNmaWVzKHZlcnNpb24sIHJhbmdlLCBsb29zZSkpIHtcbiAgICByZXR1cm4gZmFsc2U7XG4gIH1cblxuICAvLyBGcm9tIG5vdyBvbiwgdmFyaWFibGUgdGVybXMgYXJlIGFzIGlmIHdlJ3JlIGluIFwiZ3RyXCIgbW9kZS5cbiAgLy8gYnV0IG5vdGUgdGhhdCBldmVyeXRoaW5nIGlzIGZsaXBwZWQgZm9yIHRoZSBcImx0clwiIGZ1bmN0aW9uLlxuXG4gIGZvciAodmFyIGkgPSAwOyBpIDwgcmFuZ2Uuc2V0Lmxlbmd0aDsgKytpKSB7XG4gICAgdmFyIGNvbXBhcmF0b3JzID0gcmFuZ2Uuc2V0W2ldO1xuXG4gICAgdmFyIGhpZ2ggPSBudWxsO1xuICAgIHZhciBsb3cgPSBudWxsO1xuXG4gICAgY29tcGFyYXRvcnMuZm9yRWFjaChmdW5jdGlvbihjb21wYXJhdG9yKSB7XG4gICAgICBpZiAoY29tcGFyYXRvci5zZW12ZXIgPT09IEFOWSkge1xuICAgICAgICBjb21wYXJhdG9yID0gbmV3IENvbXBhcmF0b3IoJz49MC4wLjAnKVxuICAgICAgfVxuICAgICAgaGlnaCA9IGhpZ2ggfHwgY29tcGFyYXRvcjtcbiAgICAgIGxvdyA9IGxvdyB8fCBjb21wYXJhdG9yO1xuICAgICAgaWYgKGd0Zm4oY29tcGFyYXRvci5zZW12ZXIsIGhpZ2guc2VtdmVyLCBsb29zZSkpIHtcbiAgICAgICAgaGlnaCA9IGNvbXBhcmF0b3I7XG4gICAgICB9IGVsc2UgaWYgKGx0Zm4oY29tcGFyYXRvci5zZW12ZXIsIGxvdy5zZW12ZXIsIGxvb3NlKSkge1xuICAgICAgICBsb3cgPSBjb21wYXJhdG9yO1xuICAgICAgfVxuICAgIH0pO1xuXG4gICAgLy8gSWYgdGhlIGVkZ2UgdmVyc2lvbiBjb21wYXJhdG9yIGhhcyBhIG9wZXJhdG9yIHRoZW4gb3VyIHZlcnNpb25cbiAgICAvLyBpc24ndCBvdXRzaWRlIGl0XG4gICAgaWYgKGhpZ2gub3BlcmF0b3IgPT09IGNvbXAgfHwgaGlnaC5vcGVyYXRvciA9PT0gZWNvbXApIHtcbiAgICAgIHJldHVybiBmYWxzZTtcbiAgICB9XG5cbiAgICAvLyBJZiB0aGUgbG93ZXN0IHZlcnNpb24gY29tcGFyYXRvciBoYXMgYW4gb3BlcmF0b3IgYW5kIG91ciB2ZXJzaW9uXG4gICAgLy8gaXMgbGVzcyB0aGFuIGl0IHRoZW4gaXQgaXNuJ3QgaGlnaGVyIHRoYW4gdGhlIHJhbmdlXG4gICAgaWYgKCghbG93Lm9wZXJhdG9yIHx8IGxvdy5vcGVyYXRvciA9PT0gY29tcCkgJiZcbiAgICAgICAgbHRlZm4odmVyc2lvbiwgbG93LnNlbXZlcikpIHtcbiAgICAgIHJldHVybiBmYWxzZTtcbiAgICB9IGVsc2UgaWYgKGxvdy5vcGVyYXRvciA9PT0gZWNvbXAgJiYgbHRmbih2ZXJzaW9uLCBsb3cuc2VtdmVyKSkge1xuICAgICAgcmV0dXJuIGZhbHNlO1xuICAgIH1cbiAgfVxuICByZXR1cm4gdHJ1ZTtcbn1cblxuZXhwb3J0cy5wcmVyZWxlYXNlID0gcHJlcmVsZWFzZTtcbmZ1bmN0aW9uIHByZXJlbGVhc2UodmVyc2lvbiwgbG9vc2UpIHtcbiAgdmFyIHBhcnNlZCA9IHBhcnNlKHZlcnNpb24sIGxvb3NlKTtcbiAgcmV0dXJuIChwYXJzZWQgJiYgcGFyc2VkLnByZXJlbGVhc2UubGVuZ3RoKSA / IHBhcnNlZC5wcmVyZWxlYXNlIDogbnVsbDtcbn1cblxuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvfi8ubnBtaW5zdGFsbC9zZW12ZXIvNS4yLjAvc2VtdmVyL3NlbXZlci5qc1xuICoqLyIsIi8vIHNoaW0gZm9yIHVzaW5nIHByb2Nlc3MgaW4gYnJvd3NlclxuXG52YXIgcHJvY2VzcyA9IG1vZHVsZS5leHBvcnRzID0ge307XG5cbi8vIGNhY2hlZCBmcm9tIHdoYXRldmVyIGdsb2JhbCBpcyBwcmVzZW50IHNvIHRoYXQgdGVzdCBydW5uZXJzIHRoYXQgc3R1YiBpdFxuLy8gZG9uJ3QgYnJlYWsgdGhpbmdzLiAgQnV0IHdlIG5lZWQgdG8gd3JhcCBpdCBpbiBhIHRyeSBjYXRjaCBpbiBjYXNlIGl0IGlzXG4vLyB3cmFwcGVkIGluIHN0cmljdCBtb2RlIGNvZGUgd2hpY2ggZG9lc24ndCBkZWZpbmUgYW55IGdsb2JhbHMuICBJdCdzIGluc2lkZSBhXG4vLyBmdW5jdGlvbiBiZWNhdXNlIHRyeS9jYXRjaGVzIGRlb3B0aW1pemUgaW4gY2VydGFpbiBlbmdpbmVzLlxuXG52YXIgY2FjaGVkU2V0VGltZW91dDtcbnZhciBjYWNoZWRDbGVhclRpbWVvdXQ7XG5cbihmdW5jdGlvbiAoKSB7XG4gIHRyeSB7XG4gICAgY2FjaGVkU2V0VGltZW91dCA9IHNldFRpbWVvdXQ7XG4gIH0gY2F0Y2ggKGUpIHtcbiAgICBjYWNoZWRTZXRUaW1lb3V0ID0gZnVuY3Rpb24gKCkge1xuICAgICAgdGhyb3cgbmV3IEVycm9yKCdzZXRUaW1lb3V0IGlzIG5vdCBkZWZpbmVkJyk7XG4gICAgfVxuICB9XG4gIHRyeSB7XG4gICAgY2FjaGVkQ2xlYXJUaW1lb3V0ID0gY2xlYXJUaW1lb3V0O1xuICB9IGNhdGNoIChlKSB7XG4gICAgY2FjaGVkQ2xlYXJUaW1lb3V0ID0gZnVuY3Rpb24gKCkge1xuICAgICAgdGhyb3cgbmV3IEVycm9yKCdjbGVhclRpbWVvdXQgaXMgbm90IGRlZmluZWQnKTtcbiAgICB9XG4gIH1cbn0gKCkpXG52YXIgcXVldWUgPSBbXTtcbnZhciBkcmFpbmluZyA9IGZhbHNlO1xudmFyIGN1cnJlbnRRdWV1ZTtcbnZhciBxdWV1ZUluZGV4ID0gLTE7XG5cbmZ1bmN0aW9uIGNsZWFuVXBOZXh0VGljaygpIHtcbiAgICBpZiAoIWRyYWluaW5nIHx8ICFjdXJyZW50UXVldWUpIHtcbiAgICAgICAgcmV0dXJuO1xuICAgIH1cbiAgICBkcmFpbmluZyA9IGZhbHNlO1xuICAgIGlmIChjdXJyZW50UXVldWUubGVuZ3RoKSB7XG4gICAgICAgIHF1ZXVlID0gY3VycmVudFF1ZXVlLmNvbmNhdChxdWV1ZSk7XG4gICAgfSBlbHNlIHtcbiAgICAgICAgcXVldWVJbmRleCA9IC0xO1xuICAgIH1cbiAgICBpZiAocXVldWUubGVuZ3RoKSB7XG4gICAgICAgIGRyYWluUXVldWUoKTtcbiAgICB9XG59XG5cbmZ1bmN0aW9uIGRyYWluUXVldWUoKSB7XG4gICAgaWYgKGRyYWluaW5nKSB7XG4gICAgICAgIHJldHVybjtcbiAgICB9XG4gICAgdmFyIHRpbWVvdXQgPSBjYWNoZWRTZXRUaW1lb3V0KGNsZWFuVXBOZXh0VGljayk7XG4gICAgZHJhaW5pbmcgPSB0cnVlO1xuXG4gICAgdmFyIGxlbiA9IHF1ZXVlLmxlbmd0aDtcbiAgICB3aGlsZShsZW4pIHtcbiAgICAgICAgY3VycmVudFF1ZXVlID0gcXVldWU7XG4gICAgICAgIHF1ZXVlID0gW107XG4gICAgICAgIHdoaWxlICgrK3F1ZXVlSW5kZXggPCBsZW4pIHtcbiAgICAgICAgICAgIGlmIChjdXJyZW50UXVldWUpIHtcbiAgICAgICAgICAgICAgICBjdXJyZW50UXVldWVbcXVldWVJbmRleF0ucnVuKCk7XG4gICAgICAgICAgICB9XG4gICAgICAgIH1cbiAgICAgICAgcXVldWVJbmRleCA9IC0xO1xuICAgICAgICBsZW4gPSBxdWV1ZS5sZW5ndGg7XG4gICAgfVxuICAgIGN1cnJlbnRRdWV1ZSA9IG51bGw7XG4gICAgZHJhaW5pbmcgPSBmYWxzZTtcbiAgICBjYWNoZWRDbGVhclRpbWVvdXQodGltZW91dCk7XG59XG5cbnByb2Nlc3MubmV4dFRpY2sgPSBmdW5jdGlvbiAoZnVuKSB7XG4gICAgdmFyIGFyZ3MgPSBuZXcgQXJyYXkoYXJndW1lbnRzLmxlbmd0aCAtIDEpO1xuICAgIGlmIChhcmd1bWVudHMubGVuZ3RoID4gMSkge1xuICAgICAgICBmb3IgKHZhciBpID0gMTsgaSA8IGFyZ3VtZW50cy5sZW5ndGg7IGkrKykge1xuICAgICAgICAgICAgYXJnc1tpIC0gMV0gPSBhcmd1bWVudHNbaV07XG4gICAgICAgIH1cbiAgICB9XG4gICAgcXVldWUucHVzaChuZXcgSXRlbShmdW4sIGFyZ3MpKTtcbiAgICBpZiAocXVldWUubGVuZ3RoID09PSAxICYmICFkcmFpbmluZykge1xuICAgICAgICBjYWNoZWRTZXRUaW1lb3V0KGRyYWluUXVldWUsIDApO1xuICAgIH1cbn07XG5cbi8vIHY4IGxpa2VzIHByZWRpY3RpYmxlIG9iamVjdHNcbmZ1bmN0aW9uIEl0ZW0oZnVuLCBhcnJheSkge1xuICAgIHRoaXMuZnVuID0gZnVuO1xuICAgIHRoaXMuYXJyYXkgPSBhcnJheTtcbn1cbkl0ZW0ucHJvdG90eXBlLnJ1biA9IGZ1bmN0aW9uICgpIHtcbiAgICB0aGlzLmZ1bi5hcHBseShudWxsLCB0aGlzLmFycmF5KTtcbn07XG5wcm9jZXNzLnRpdGxlID0gJ2Jyb3dzZXInO1xucHJvY2Vzcy5icm93c2VyID0gdHJ1ZTtcbnByb2Nlc3MuZW52ID0ge307XG5wcm9jZXNzLmFyZ3YgPSBbXTtcbnByb2Nlc3MudmVyc2lvbiA9ICcnOyAvLyBlbXB0eSBzdHJpbmcgdG8gYXZvaWQgcmVnZXhwIGlzc3Vlc1xucHJvY2Vzcy52ZXJzaW9ucyA9IHt9O1xuXG5mdW5jdGlvbiBub29wKCkge31cblxucHJvY2Vzcy5vbiA9IG5vb3A7XG5wcm9jZXNzLmFkZExpc3RlbmVyID0gbm9vcDtcbnByb2Nlc3Mub25jZSA9IG5vb3A7XG5wcm9jZXNzLm9mZiA9IG5vb3A7XG5wcm9jZXNzLnJlbW92ZUxpc3RlbmVyID0gbm9vcDtcbnByb2Nlc3MucmVtb3ZlQWxsTGlzdGVuZXJzID0gbm9vcDtcbnByb2Nlc3MuZW1pdCA9IG5vb3A7XG5cbnByb2Nlc3MuYmluZGluZyA9IGZ1bmN0aW9uIChuYW1lKSB7XG4gICAgdGhyb3cgbmV3IEVycm9yKCdwcm9jZXNzLmJpbmRpbmcgaXMgbm90IHN1cHBvcnRlZCcpO1xufTtcblxucHJvY2Vzcy5jd2QgPSBmdW5jdGlvbiAoKSB7IHJldHVybiAnLycgfTtcbnByb2Nlc3MuY2hkaXIgPSBmdW5jdGlvbiAoZGlyKSB7XG4gICAgdGhyb3cgbmV3IEVycm9yKCdwcm9jZXNzLmNoZGlyIGlzIG5vdCBzdXBwb3J0ZWQnKTtcbn07XG5wcm9jZXNzLnVtYXNrID0gZnVuY3Rpb24oKSB7IHJldHVybiAwOyB9O1xuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogLi9 + Ly5ucG1pbnN0YWxsL3Byb2Nlc3MvMC4xMS41L3Byb2Nlc3MvYnJvd3Nlci5qc1xuICoqLyIsIi8qKlxuICogQGZpbGVPdmVydmlld1xuICogVmlld01vZGVsIENvbnN0cnVjdG9yICYgZGVmaW5pdGlvblxuICovXG5cbmltcG9ydCAqIGFzIF8gZnJvbSAnLi4vdXRpbCdcbmltcG9ydCAqIGFzIHN0YXRlIGZyb20gJy4uL2NvcmUvc3RhdGUnXG5pbXBvcnQgKiBhcyBjb21waWxlciBmcm9tICcuL2NvbXBpbGVyJ1xuaW1wb3J0ICogYXMgZGlyZWN0aXZlIGZyb20gJy4vZGlyZWN0aXZlJ1xuaW1wb3J0ICogYXMgZG9tSGVscGVyIGZyb20gJy4vZG9tLWhlbHBlcidcbmltcG9ydCAqIGFzIGV2ZW50cyBmcm9tICcuL2V2ZW50cydcblxuaW1wb3J0IHsgcmVnaXN0ZXJNb2R1bGVzLCByZWdpc3Rlck1ldGhvZHMgfSBmcm9tICcuLi9hcHAvcmVnaXN0ZXInXG5cbmZ1bmN0aW9uIGNhbGxPbGRSZWFkeUVudHJ5ICh2bSwgY29tcG9uZW50KSB7XG4gIGlmIChjb21wb25lbnQubWV0aG9kcyAmJlxuICAgICAgY29tcG9uZW50Lm1ldGhvZHMucmVhZHkpIHtcbiAgICBfLndhcm4oJ1wiZXhwb3J0cy5tZXRob2RzLnJlYWR5XCIgaXMgZGVwcmVjYXRlZCwgJyArXG4gICAgICAncGxlYXNlIHVzZSBcImV4cG9ydHMuY3JlYXRlZFwiIGluc3RlYWQnKVxuICAgIGNvbXBvbmVudC5tZXRob2RzLnJlYWR5LmNhbGwodm0pXG4gIH1cbn1cblxuLyoqXG4gKiBWaWV3TW9kZWwgY29uc3RydWN0b3JcbiAqXG4gKiBAcGFyYW0ge3N0cmluZ30gdHlwZVxuICogQHBhcmFtIHtvYmplY3R9IG9wdGlvbnMgICAgY29tcG9uZW50IG9wdGlvbnNcbiAqIEBwYXJhbSB7b2JqZWN0fSBwYXJlbnRWbSAgIHdoaWNoIGNvbnRhaW5zIF9hcHBcbiAqIEBwYXJhbSB7b2JqZWN0fSBwYXJlbnRFbCAgIHJvb3QgZWxlbWVudCBvciBmcmFnIGJsb2NrXG4gKiBAcGFyYW0ge29iamVjdH0gbWVyZ2VkRGF0YSBleHRlcm5hbCBkYXRhXG4gKiBAcGFyYW0ge29iamVjdH0gZXh0ZXJuYWxFdmVudHMgZXh0ZXJuYWwgZXZlbnRzXG4gKi9cbmV4cG9ydCBkZWZhdWx0IGZ1bmN0aW9uIFZtIChcbiAgdHlwZSxcbiAgb3B0aW9ucyxcbiAgcGFyZW50Vm0sXG4gIHBhcmVudEVsLFxuICBtZXJnZWREYXRhLFxuICBleHRlcm5hbEV2ZW50c1xuKSB7XG4gIHRoaXMuX3BhcmVudCA9IHBhcmVudFZtLl9yZWFsUGFyZW50ID8gcGFyZW50Vm0uX3JlYWxQYXJlbnQgOiBwYXJlbnRWbVxuICB0aGlzLl9hcHAgPSBwYXJlbnRWbS5fYXBwXG4gIHBhcmVudFZtLl9jaGlsZHJlblZtcyAmJiBwYXJlbnRWbS5fY2hpbGRyZW5WbXMucHVzaCh0aGlzKVxuXG4gIGlmICghb3B0aW9ucykge1xuICAgIG9wdGlvbnMgPSB0aGlzLl9hcHAuY3VzdG9tQ29tcG9uZW50TWFwW3R5cGVdIHx8IHt9XG4gIH1cbiAgY29uc3QgZGF0YSA9IG9wdGlvbnMuZGF0YSB8fCB7fVxuXG4gIHRoaXMuX29wdGlvbnMgPSBvcHRpb25zXG4gIHRoaXMuX21ldGhvZHMgPSBvcHRpb25zLm1ldGhvZHMgfHwge31cbiAgdGhpcy5fY29tcHV0ZWQgPSBvcHRpb25zLmNvbXB1dGVkIHx8IHt9XG4gIHRoaXMuX2NzcyA9IG9wdGlvbnMuc3R5bGUgfHwge31cbiAgdGhpcy5faWRzID0ge31cbiAgdGhpcy5fdm1FdmVudHMgPSB7fVxuICB0aGlzLl9jaGlsZHJlblZtcyA9IFtdXG4gIHRoaXMuX3R5cGUgPSB0eXBlXG5cbiAgLy8gYmluZCBldmVudHMgYW5kIGxpZmVjeWNsZXNcbiAgdGhpcy5faW5pdEV2ZW50cyhleHRlcm5hbEV2ZW50cylcblxuICBfLmRlYnVnKGBcImluaXRcIiBsaWZlY3ljbGUgaW4gVm0oJHt0aGlzLl90eXBlfSlgKVxuICB0aGlzLiRlbWl0KCdob29rOmluaXQnKVxuICB0aGlzLl9pbml0ZWQgPSB0cnVlXG4gIC8vIHByb3h5IGRhdGEgYW5kIG1ldGhvZHNcbiAgLy8gb2JzZXJ2ZSBkYXRhIGFuZCBhZGQgdGhpcyB0byB2bXNcbiAgdGhpcy5fZGF0YSA9IHR5cGVvZiBkYXRhID09PSAnZnVuY3Rpb24nID8gZGF0YSgpIDogZGF0YVxuICBpZiAobWVyZ2VkRGF0YSkge1xuICAgIF8uZXh0ZW5kKHRoaXMuX2RhdGEsIG1lcmdlZERhdGEpXG4gIH1cbiAgdGhpcy5faW5pdFN0YXRlKClcblxuICBfLmRlYnVnKGBcImNyZWF0ZWRcIiBsaWZlY3ljbGUgaW4gVm0oJHt0aGlzLl90eXBlfSlgKVxuICB0aGlzLiRlbWl0KCdob29rOmNyZWF0ZWQnKVxuICB0aGlzLl9jcmVhdGVkID0gdHJ1ZVxuICAvLyBiYWNrd2FyZCBvbGQgcmVhZHkgZW50cnlcbiAgY2FsbE9sZFJlYWR5RW50cnkodGhpcywgb3B0aW9ucylcblxuICAvLyBpZiBubyBwYXJlbnRFbGVtZW50IHRoZW4gc3BlY2lmeSB0aGUgZG9jdW1lbnRFbGVtZW50XG4gIHRoaXMuX3BhcmVudEVsID0gcGFyZW50RWwgfHwgdGhpcy5fYXBwLmRvYy5kb2N1bWVudEVsZW1lbnRcbiAgdGhpcy5fYnVpbGQoKVxufVxuXG5fLmV4dGVuZChWbS5wcm90b3R5cGUsIHN0YXRlLCBjb21waWxlciwgZGlyZWN0aXZlLCBkb21IZWxwZXIsIGV2ZW50cylcbl8uZXh0ZW5kKFZtLCB7XG4gIHJlZ2lzdGVyTW9kdWxlcyxcbiAgcmVnaXN0ZXJNZXRob2RzXG59KVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L3ZtL2luZGV4LmpzXG4gKiovIiwiLyogZXNsaW50LWRpc2FibGUgKi9cblxuaW1wb3J0IFdhdGNoZXIgZnJvbSAnLi93YXRjaGVyJ1xuaW1wb3J0IERlcCBmcm9tICcuL2RlcCdcbmltcG9ydCB7XG4gIG9ic2VydmUsXG4gIHByb3h5LFxuICB1bnByb3h5XG59IGZyb20gJy4vb2JzZXJ2ZXInXG5pbXBvcnQge1xuICBpc0FycmF5LFxuICBpc1BsYWluT2JqZWN0LFxuICBiaW5kXG59IGZyb20gJy4uL3V0aWwnXG5cbmV4cG9ydCBmdW5jdGlvbiBfaW5pdFN0YXRlICgpIHtcbiAgY29uc3Qgdm0gPSB0aGlzXG4gIHZtLl93YXRjaGVycyA9IFtdXG4gIHZtLl9pbml0RGF0YSgpXG4gIHZtLl9pbml0Q29tcHV0ZWQoKVxuICB2bS5faW5pdE1ldGhvZHMoKVxufVxuXG5leHBvcnQgZnVuY3Rpb24gX2luaXREYXRhICgpIHtcbiAgY29uc3Qgdm0gPSB0aGlzXG4gIGxldCBkYXRhID0gdm0uX2RhdGFcblxuICBpZiAoIWlzUGxhaW5PYmplY3QoZGF0YSkpIHtcbiAgICBkYXRhID0ge31cbiAgfVxuICAvLyBwcm94eSBkYXRhIG9uIGluc3RhbmNlXG4gIGNvbnN0IGtleXMgPSBPYmplY3Qua2V5cyhkYXRhKVxuICBsZXQgaSA9IGtleXMubGVuZ3RoXG4gIHdoaWxlIChpLS0pIHtcbiAgICBwcm94eSh2bSwga2V5c1tpXSlcbiAgfVxuICAvLyBvYnNlcnZlIGRhdGFcbiAgb2JzZXJ2ZShkYXRhLCB2bSlcbn1cblxuZnVuY3Rpb24gbm9vcCAoKSB7XG59XG5cbmV4cG9ydCBmdW5jdGlvbiBfaW5pdENvbXB1dGVkICgpIHtcbiAgY29uc3Qgdm0gPSB0aGlzXG4gIGNvbnN0IGNvbXB1dGVkID0gdm0uX2NvbXB1dGVkXG4gIGlmIChjb21wdXRlZCkge1xuICAgIGZvciAobGV0IGtleSBpbiBjb21wdXRlZCkge1xuICAgICAgY29uc3QgdXNlckRlZiA9IGNvbXB1dGVkW2tleV1cbiAgICAgIGNvbnN0IGRlZiA9IHtcbiAgICAgICAgZW51bWVyYWJsZTogdHJ1ZSxcbiAgICAgICAgY29uZmlndXJhYmxlOiB0cnVlXG4gICAgICB9XG4gICAgICBpZiAodHlwZW9mIHVzZXJEZWYgPT09ICdmdW5jdGlvbicpIHtcbiAgICAgICAgZGVmLmdldCA9IG1ha2VDb21wdXRlZEdldHRlcih1c2VyRGVmLCB2bSlcbiAgICAgICAgZGVmLnNldCA9IG5vb3BcbiAgICAgIH0gZWxzZSB7XG4gICAgICAgIGRlZi5nZXQgPSB1c2VyRGVmLmdldFxuICAgICAgICAgID8gdXNlckRlZi5jYWNoZSAhPT0gZmFsc2VcbiAgICAgICAgICAgID8gbWFrZUNvbXB1dGVkR2V0dGVyKHVzZXJEZWYuZ2V0LCB2bSlcbiAgICAgICAgICAgIDogYmluZCh1c2VyRGVmLmdldCwgdm0pXG4gICAgICAgICAgOiBub29wXG4gICAgICAgIGRlZi5zZXQgPSB1c2VyRGVmLnNldFxuICAgICAgICAgID8gYmluZCh1c2VyRGVmLnNldCwgdm0pXG4gICAgICAgICAgOiBub29wXG4gICAgICB9XG4gICAgICBPYmplY3QuZGVmaW5lUHJvcGVydHkodm0sIGtleSwgZGVmKVxuICAgIH1cbiAgfVxufVxuXG5mdW5jdGlvbiBtYWtlQ29tcHV0ZWRHZXR0ZXIgKGdldHRlciwgb3duZXIpIHtcbiAgY29uc3Qgd2F0Y2hlciA9IG5ldyBXYXRjaGVyKG93bmVyLCBnZXR0ZXIsIG51bGwsIHtcbiAgICBsYXp5OiB0cnVlXG4gIH0pXG4gIHJldHVybiBmdW5jdGlvbiBjb21wdXRlZEdldHRlciAoKSB7XG4gICAgaWYgKHdhdGNoZXIuZGlydHkpIHtcbiAgICAgIHdhdGNoZXIuZXZhbHVhdGUoKVxuICAgIH1cbiAgICBpZiAoRGVwLnRhcmdldCkge1xuICAgICAgd2F0Y2hlci5kZXBlbmQoKVxuICAgIH1cbiAgICByZXR1cm4gd2F0Y2hlci52YWx1ZVxuICB9XG59XG5cbmV4cG9ydCBmdW5jdGlvbiBfaW5pdE1ldGhvZHMgKCkge1xuICBjb25zdCB2bSA9IHRoaXNcbiAgY29uc3QgbWV0aG9kcyA9IHZtLl9tZXRob2RzXG4gIGlmIChtZXRob2RzKSB7XG4gICAgZm9yIChsZXQga2V5IGluIG1ldGhvZHMpIHtcbiAgICAgIHZtW2tleV0gPSBiaW5kKG1ldGhvZHNba2V5XSwgdm0pXG4gICAgfVxuICB9XG59XG5cblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L2RlZmF1bHQvY29yZS9zdGF0ZS5qc1xuICoqLyIsIi8qIGVzbGludC1kaXNhYmxlICovXG5cbmltcG9ydCBEZXAgZnJvbSAnLi9kZXAnXG4vLyBpbXBvcnQgeyBwdXNoV2F0Y2hlciB9IGZyb20gJy4vYmF0Y2hlcidcbmltcG9ydCB7XG4gIHdhcm4sXG4gIHJlbW92ZSxcbiAgZXh0ZW5kLFxuICBpc0FycmF5LFxuICBpc09iamVjdCxcbiAgcGFyc2VQYXRoLFxuICBfU2V0IGFzIFNldFxufSBmcm9tICcuLi91dGlsJ1xuXG5sZXQgdWlkID0gMFxubGV0IHByZXZUYXJnZXRcblxuLyoqXG4gKiBBIHdhdGNoZXIgcGFyc2VzIGFuIGV4cHJlc3Npb24sIGNvbGxlY3RzIGRlcGVuZGVuY2llcyxcbiAqIGFuZCBmaXJlcyBjYWxsYmFjayB3aGVuIHRoZSBleHByZXNzaW9uIHZhbHVlIGNoYW5nZXMuXG4gKiBUaGlzIGlzIHVzZWQgZm9yIGJvdGggdGhlICR3YXRjaCgpIGFwaSBhbmQgZGlyZWN0aXZlcy5cbiAqXG4gKiBAcGFyYW0ge1Z1ZX0gdm1cbiAqIEBwYXJhbSB7U3RyaW5nfEZ1bmN0aW9ufSBleHBPckZuXG4gKiBAcGFyYW0ge0Z1bmN0aW9ufSBjYlxuICogQHBhcmFtIHtPYmplY3R9IG9wdGlvbnNcbiAqICAgICAgICAgICAgICAgICAtIHtBcnJheX0gZmlsdGVyc1xuICogICAgICAgICAgICAgICAgIC0ge0Jvb2xlYW59IHR3b1dheVxuICogICAgICAgICAgICAgICAgIC0ge0Jvb2xlYW59IGRlZXBcbiAqICAgICAgICAgICAgICAgICAtIHtCb29sZWFufSB1c2VyXG4gKiAgICAgICAgICAgICAgICAgLSB7Qm9vbGVhbn0gc3luY1xuICogICAgICAgICAgICAgICAgIC0ge0Jvb2xlYW59IGxhenlcbiAqICAgICAgICAgICAgICAgICAtIHtGdW5jdGlvbn0gW3ByZVByb2Nlc3NdXG4gKiAgICAgICAgICAgICAgICAgLSB7RnVuY3Rpb259IFtwb3N0UHJvY2Vzc11cbiAqIEBjb25zdHJ1Y3RvclxuICovXG5cbmV4cG9ydCBkZWZhdWx0IGZ1bmN0aW9uIFdhdGNoZXIgKHZtLCBleHBPckZuLCBjYiwgb3B0aW9ucykge1xuICAvLyBtaXggaW4gb3B0aW9uc1xuICBpZiAob3B0aW9ucykge1xuICAgIGV4dGVuZCh0aGlzLCBvcHRpb25zKVxuICB9XG4gIGNvbnN0IGlzRm4gPSB0eXBlb2YgZXhwT3JGbiA9PT0gJ2Z1bmN0aW9uJ1xuICB0aGlzLnZtID0gdm1cbiAgdm0uX3dhdGNoZXJzLnB1c2godGhpcylcbiAgdGhpcy5leHByZXNzaW9uID0gZXhwT3JGblxuICB0aGlzLmNiID0gY2JcbiAgdGhpcy5pZCA9ICsrdWlkIC8vIHVpZCBmb3IgYmF0Y2hpbmdcbiAgdGhpcy5hY3RpdmUgPSB0cnVlXG4gIHRoaXMuZGlydHkgPSB0aGlzLmxhenkgLy8gZm9yIGxhenkgd2F0Y2hlcnNcbiAgdGhpcy5kZXBzID0gW11cbiAgdGhpcy5uZXdEZXBzID0gW11cbiAgdGhpcy5kZXBJZHMgPSBuZXcgU2V0KClcbiAgdGhpcy5uZXdEZXBJZHMgPSBuZXcgU2V0KClcbiAgLy8gcGFyc2UgZXhwcmVzc2lvbiBmb3IgZ2V0dGVyXG4gIGlmIChpc0ZuKSB7XG4gICAgdGhpcy5nZXR0ZXIgPSBleHBPckZuXG4gIH0gZWxzZSB7XG4gICAgdGhpcy5nZXR0ZXIgPSBwYXJzZVBhdGgoZXhwT3JGbilcbiAgICBpZiAoIXRoaXMuZ2V0dGVyKSB7XG4gICAgICB0aGlzLmdldHRlciA9IGZ1bmN0aW9uICgpIHt9XG4gICAgICBwcm9jZXNzLmVudi5OT0RFX0VOViAhPT0gJ3Byb2R1Y3Rpb24nICYmIHdhcm4oXG4gICAgICAgICdGYWlsZWQgd2F0Y2hpbmcgcGF0aDogJyArIGV4cE9yRm4gK1xuICAgICAgICAnV2F0Y2hlciBvbmx5IGFjY2VwdHMgc2ltcGxlIGRvdC1kZWxpbWl0ZWQgcGF0aHMuICcgK1xuICAgICAgICAnRm9yIGZ1bGwgY29udHJvbCwgdXNlIGEgZnVuY3Rpb24gaW5zdGVhZC4nLFxuICAgICAgICB2bVxuICAgICAgKVxuICAgIH1cbiAgfVxuICB0aGlzLnZhbHVlID0gdGhpcy5sYXp5XG4gICAgPyB1bmRlZmluZWRcbiAgICA6IHRoaXMuZ2V0KClcbiAgLy8gc3RhdGUgZm9yIGF2b2lkaW5nIGZhbHNlIHRyaWdnZXJzIGZvciBkZWVwIGFuZCBBcnJheVxuICAvLyB3YXRjaGVycyBkdXJpbmcgdm0uX2RpZ2VzdCgpXG4gIHRoaXMucXVldWVkID0gdGhpcy5zaGFsbG93ID0gZmFsc2Vcbn1cblxuLyoqXG4gKiBFdmFsdWF0ZSB0aGUgZ2V0dGVyLCBhbmQgcmUtY29sbGVjdCBkZXBlbmRlbmNpZXMuXG4gKi9cblxuV2F0Y2hlci5wcm90b3R5cGUuZ2V0ID0gZnVuY3Rpb24gKCkge1xuICB0aGlzLmJlZm9yZUdldCgpXG4gIGNvbnN0IHZhbHVlID0gdGhpcy5nZXR0ZXIuY2FsbCh0aGlzLnZtLCB0aGlzLnZtKVxuICAvLyBcInRvdWNoXCIgZXZlcnkgcHJvcGVydHkgc28gdGhleSBhcmUgYWxsIHRyYWNrZWQgYXNcbiAgLy8gZGVwZW5kZW5jaWVzIGZvciBkZWVwIHdhdGNoaW5nXG4gIGlmICh0aGlzLmRlZXApIHtcbiAgICB0cmF2ZXJzZSh2YWx1ZSlcbiAgfVxuICB0aGlzLmFmdGVyR2V0KClcbiAgcmV0dXJuIHZhbHVlXG59XG5cbi8qKlxuICogUHJlcGFyZSBmb3IgZGVwZW5kZW5jeSBjb2xsZWN0aW9uLlxuICovXG5cbldhdGNoZXIucHJvdG90eXBlLmJlZm9yZUdldCA9IGZ1bmN0aW9uICgpIHtcbiAgcHJldlRhcmdldCA9IERlcC50YXJnZXRcbiAgRGVwLnRhcmdldCA9IHRoaXNcbn1cblxuLyoqXG4gKiBBZGQgYSBkZXBlbmRlbmN5IHRvIHRoaXMgZGlyZWN0aXZlLlxuICpcbiAqIEBwYXJhbSB7RGVwfSBkZXBcbiAqL1xuXG5XYXRjaGVyLnByb3RvdHlwZS5hZGREZXAgPSBmdW5jdGlvbiAoZGVwKSB7XG4gIGNvbnN0IGlkID0gZGVwLmlkXG4gIGlmICghdGhpcy5uZXdEZXBJZHMuaGFzKGlkKSkge1xuICAgIHRoaXMubmV3RGVwSWRzLmFkZChpZClcbiAgICB0aGlzLm5ld0RlcHMucHVzaChkZXApXG4gICAgaWYgKCF0aGlzLmRlcElkcy5oYXMoaWQpKSB7XG4gICAgICBkZXAuYWRkU3ViKHRoaXMpXG4gICAgfVxuICB9XG59XG5cbi8qKlxuICogQ2xlYW4gdXAgZm9yIGRlcGVuZGVuY3kgY29sbGVjdGlvbi5cbiAqL1xuXG5XYXRjaGVyLnByb3RvdHlwZS5hZnRlckdldCA9IGZ1bmN0aW9uICgpIHtcbiAgRGVwLnRhcmdldCA9IHByZXZUYXJnZXRcbiAgbGV0IGkgPSB0aGlzLmRlcHMubGVuZ3RoXG4gIHdoaWxlIChpLS0pIHtcbiAgICBjb25zdCBkZXAgPSB0aGlzLmRlcHNbaV1cbiAgICBpZiAoIXRoaXMubmV3RGVwSWRzLmhhcyhkZXAuaWQpKSB7XG4gICAgICBkZXAucmVtb3ZlU3ViKHRoaXMpXG4gICAgfVxuICB9XG4gIGxldCB0bXAgPSB0aGlzLmRlcElkc1xuICB0aGlzLmRlcElkcyA9IHRoaXMubmV3RGVwSWRzXG4gIHRoaXMubmV3RGVwSWRzID0gdG1wXG4gIHRoaXMubmV3RGVwSWRzLmNsZWFyKClcbiAgdG1wID0gdGhpcy5kZXBzXG4gIHRoaXMuZGVwcyA9IHRoaXMubmV3RGVwc1xuICB0aGlzLm5ld0RlcHMgPSB0bXBcbiAgdGhpcy5uZXdEZXBzLmxlbmd0aCA9IDBcbn1cblxuLyoqXG4gKiBTdWJzY3JpYmVyIGludGVyZmFjZS5cbiAqIFdpbGwgYmUgY2FsbGVkIHdoZW4gYSBkZXBlbmRlbmN5IGNoYW5nZXMuXG4gKlxuICogQHBhcmFtIHtCb29sZWFufSBzaGFsbG93XG4gKi9cblxuV2F0Y2hlci5wcm90b3R5cGUudXBkYXRlID0gZnVuY3Rpb24gKHNoYWxsb3cpIHtcbiAgaWYgKHRoaXMubGF6eSkge1xuICAgIHRoaXMuZGlydHkgPSB0cnVlXG4gIH0gZWxzZSB7XG4gICAgdGhpcy5ydW4oKVxuICB9XG4gIC8vIH0gZWxzZSBpZiAodGhpcy5zeW5jKSB7XG4gIC8vICAgdGhpcy5ydW4oKVxuICAvLyB9IGVsc2Uge1xuICAvLyAgIC8vIGlmIHF1ZXVlZCwgb25seSBvdmVyd3JpdGUgc2hhbGxvdyB3aXRoIG5vbi1zaGFsbG93LFxuICAvLyAgIC8vIGJ1dCBub3QgdGhlIG90aGVyIHdheSBhcm91bmQuXG4gIC8vICAgdGhpcy5zaGFsbG93ID0gdGhpcy5xdWV1ZWRcbiAgLy8gICAgID8gc2hhbGxvd1xuICAvLyAgICAgICA / IHRoaXMuc2hhbGxvd1xuICAvLyAgICAgICA6IGZhbHNlXG4gIC8vICAgICA6ICEhc2hhbGxvd1xuICAvLyAgIHRoaXMucXVldWVkID0gdHJ1ZVxuICAvLyAgIHB1c2hXYXRjaGVyKHRoaXMpXG4gIC8vIH1cbn1cblxuLyoqXG4gKiBCYXRjaGVyIGpvYiBpbnRlcmZhY2UuXG4gKiBXaWxsIGJlIGNhbGxlZCBieSB0aGUgYmF0Y2hlci5cbiAqL1xuXG5XYXRjaGVyLnByb3RvdHlwZS5ydW4gPSBmdW5jdGlvbiAoKSB7XG4gIGlmICh0aGlzLmFjdGl2ZSkge1xuICAgIGNvbnN0IHZhbHVlID0gdGhpcy5nZXQoKVxuICAgIGlmIChcbiAgICAgIHZhbHVlICE9PSB0aGlzLnZhbHVlIHx8XG4gICAgICAvLyBEZWVwIHdhdGNoZXJzIGFuZCB3YXRjaGVycyBvbiBPYmplY3QvQXJyYXlzIHNob3VsZCBmaXJlIGV2ZW5cbiAgICAgIC8vIHdoZW4gdGhlIHZhbHVlIGlzIHRoZSBzYW1lLCBiZWNhdXNlIHRoZSB2YWx1ZSBtYXlcbiAgICAgIC8vIGhhdmUgbXV0YXRlZDsgYnV0IG9ubHkgZG8gc28gaWYgdGhpcyBpcyBhXG4gICAgICAvLyBub24tc2hhbGxvdyB1cGRhdGUgKGNhdXNlZCBieSBhIHZtIGRpZ2VzdCkuXG4gICAgICAoKGlzT2JqZWN0KHZhbHVlKSB8fCB0aGlzLmRlZXApICYmICF0aGlzLnNoYWxsb3cpXG4gICAgKSB7XG4gICAgICAvLyBzZXQgbmV3IHZhbHVlXG4gICAgICBjb25zdCBvbGRWYWx1ZSA9IHRoaXMudmFsdWVcbiAgICAgIHRoaXMudmFsdWUgPSB2YWx1ZVxuICAgICAgdGhpcy5jYi5jYWxsKHRoaXMudm0sIHZhbHVlLCBvbGRWYWx1ZSlcbiAgICB9XG4gICAgdGhpcy5xdWV1ZWQgPSB0aGlzLnNoYWxsb3cgPSBmYWxzZVxuICB9XG59XG5cbi8qKlxuICogRXZhbHVhdGUgdGhlIHZhbHVlIG9mIHRoZSB3YXRjaGVyLlxuICogVGhpcyBvbmx5IGdldHMgY2FsbGVkIGZvciBsYXp5IHdhdGNoZXJzLlxuICovXG5cbldhdGNoZXIucHJvdG90eXBlLmV2YWx1YXRlID0gZnVuY3Rpb24gKCkge1xuICAvLyBhdm9pZCBvdmVyd3JpdGluZyBhbm90aGVyIHdhdGNoZXIgdGhhdCBpcyBiZWluZ1xuICAvLyBjb2xsZWN0ZWQuXG4gIGNvbnN0IGN1cnJlbnQgPSBEZXAudGFyZ2V0XG4gIHRoaXMudmFsdWUgPSB0aGlzLmdldCgpXG4gIHRoaXMuZGlydHkgPSBmYWxzZVxuICBEZXAudGFyZ2V0ID0gY3VycmVudFxufVxuXG4vKipcbiAqIERlcGVuZCBvbiBhbGwgZGVwcyBjb2xsZWN0ZWQgYnkgdGhpcyB3YXRjaGVyLlxuICovXG5cbldhdGNoZXIucHJvdG90eXBlLmRlcGVuZCA9IGZ1bmN0aW9uICgpIHtcbiAgbGV0IGkgPSB0aGlzLmRlcHMubGVuZ3RoXG4gIHdoaWxlIChpLS0pIHtcbiAgICB0aGlzLmRlcHNbaV0uZGVwZW5kKClcbiAgfVxufVxuXG4vKipcbiAqIFJlbW92ZSBzZWxmIGZyb20gYWxsIGRlcGVuZGVuY2llcycgc3ViY3JpYmVyIGxpc3QuXG4gKi9cblxuV2F0Y2hlci5wcm90b3R5cGUudGVhcmRvd24gPSBmdW5jdGlvbiAoKSB7XG4gIGlmICh0aGlzLmFjdGl2ZSkge1xuICAgIC8vIHJlbW92ZSBzZWxmIGZyb20gdm0ncyB3YXRjaGVyIGxpc3RcbiAgICAvLyB0aGlzIGlzIGEgc29tZXdoYXQgZXhwZW5zaXZlIG9wZXJhdGlvbiBzbyB3ZSBza2lwIGl0XG4gICAgLy8gaWYgdGhlIHZtIGlzIGJlaW5nIGRlc3Ryb3llZCBvciBpcyBwZXJmb3JtaW5nIGEgdi1mb3JcbiAgICAvLyByZS1yZW5kZXIgKHRoZSB3YXRjaGVyIGxpc3QgaXMgdGhlbiBmaWx0ZXJlZCBieSB2LWZvcikuXG4gICAgaWYgKCF0aGlzLnZtLl9pc0JlaW5nRGVzdHJveWVkICYmICF0aGlzLnZtLl92Rm9yUmVtb3ZpbmcpIHtcbiAgICAgIHJlbW92ZSh0aGlzLnZtLl93YXRjaGVycywgdGhpcylcbiAgICB9XG4gICAgbGV0IGkgPSB0aGlzLmRlcHMubGVuZ3RoXG4gICAgd2hpbGUgKGktLSkge1xuICAgICAgdGhpcy5kZXBzW2ldLnJlbW92ZVN1Yih0aGlzKVxuICAgIH1cbiAgICB0aGlzLmFjdGl2ZSA9IGZhbHNlXG4gICAgdGhpcy52bSA9IHRoaXMuY2IgPSB0aGlzLnZhbHVlID0gbnVsbFxuICB9XG59XG5cbi8qKlxuICogUmVjcnVzaXZlbHkgdHJhdmVyc2UgYW4gb2JqZWN0IHRvIGV2b2tlIGFsbCBjb252ZXJ0ZWRcbiAqIGdldHRlcnMsIHNvIHRoYXQgZXZlcnkgbmVzdGVkIHByb3BlcnR5IGluc2lkZSB0aGUgb2JqZWN0XG4gKiBpcyBjb2xsZWN0ZWQgYXMgYSBcImRlZXBcIiBkZXBlbmRlbmN5LlxuICpcbiAqIEBwYXJhbSB7Kn0gdmFsXG4gKiBAcGFyYW0ge1NldH0gc2VlblxuICovXG5cbmNvbnN0IHNlZW5PYmplY3RzID0gbmV3IFNldCgpXG5mdW5jdGlvbiB0cmF2ZXJzZSAodmFsLCBzZWVuKSB7XG4gIGxldCBpLCBrZXlzLCBpc0EsIGlzT1xuICBpZiAoIXNlZW4pIHtcbiAgICBzZWVuID0gc2Vlbk9iamVjdHNcbiAgICBzZWVuLmNsZWFyKClcbiAgfVxuICBpc0EgPSBpc0FycmF5KHZhbClcbiAgaXNPID0gaXNPYmplY3QodmFsKVxuICBpZiAoaXNBIHx8IGlzTykge1xuICAgIGlmICh2YWwuX19vYl9fKSB7XG4gICAgICBjb25zdCBkZXBJZCA9IHZhbC5fX29iX18uZGVwLmlkXG4gICAgICBpZiAoc2Vlbi5oYXMoZGVwSWQpKSB7XG4gICAgICAgIHJldHVyblxuICAgICAgfSBlbHNlIHtcbiAgICAgICAgc2Vlbi5hZGQoZGVwSWQpXG4gICAgICB9XG4gICAgfVxuICAgIGlmIChpc0EpIHtcbiAgICAgIGkgPSB2YWwubGVuZ3RoXG4gICAgICB3aGlsZSAoaS0tKSB0cmF2ZXJzZSh2YWxbaV0sIHNlZW4pXG4gICAgfSBlbHNlIGlmIChpc08pIHtcbiAgICAgIGtleXMgPSBPYmplY3Qua2V5cyh2YWwpXG4gICAgICBpID0ga2V5cy5sZW5ndGhcbiAgICAgIHdoaWxlIChpLS0pIHRyYXZlcnNlKHZhbFtrZXlzW2ldXSwgc2VlbilcbiAgICB9XG4gIH1cbn1cblxuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvZGVmYXVsdC9jb3JlL3dhdGNoZXIuanNcbiAqKi8iLCIvKiBlc2xpbnQtZGlzYWJsZSAqL1xuXG5pbXBvcnQgeyByZW1vdmUgfSBmcm9tICcuLi91dGlsJ1xuXG5sZXQgdWlkID0gMFxuXG4vKipcbiAqIEEgZGVwIGlzIGFuIG9ic2VydmFibGUgdGhhdCBjYW4gaGF2ZSBtdWx0aXBsZVxuICogZGlyZWN0aXZlcyBzdWJzY3JpYmluZyB0byBpdC5cbiAqXG4gKiBAY29uc3RydWN0b3JcbiAqL1xuXG5leHBvcnQgZGVmYXVsdCBmdW5jdGlvbiBEZXAgKCkge1xuICB0aGlzLmlkID0gdWlkKytcbiAgdGhpcy5zdWJzID0gW11cbn1cblxuLy8gdGhlIGN1cnJlbnQgdGFyZ2V0IHdhdGNoZXIgYmVpbmcgZXZhbHVhdGVkLlxuLy8gdGhpcyBpcyBnbG9iYWxseSB1bmlxdWUgYmVjYXVzZSB0aGVyZSBjb3VsZCBiZSBvbmx5IG9uZVxuLy8gd2F0Y2hlciBiZWluZyBldmFsdWF0ZWQgYXQgYW55IHRpbWUuXG5EZXAudGFyZ2V0ID0gbnVsbFxuXG4vKipcbiAqIEFkZCBhIGRpcmVjdGl2ZSBzdWJzY3JpYmVyLlxuICpcbiAqIEBwYXJhbSB7RGlyZWN0aXZlfSBzdWJcbiAqL1xuXG5EZXAucHJvdG90eXBlLmFkZFN1YiA9IGZ1bmN0aW9uIChzdWIpIHtcbiAgdGhpcy5zdWJzLnB1c2goc3ViKVxufVxuXG4vKipcbiAqIFJlbW92ZSBhIGRpcmVjdGl2ZSBzdWJzY3JpYmVyLlxuICpcbiAqIEBwYXJhbSB7RGlyZWN0aXZlfSBzdWJcbiAqL1xuXG5EZXAucHJvdG90eXBlLnJlbW92ZVN1YiA9IGZ1bmN0aW9uIChzdWIpIHtcbiAgcmVtb3ZlKHRoaXMuc3Vicywgc3ViKVxufVxuXG4vKipcbiAqIEFkZCBzZWxmIGFzIGEgZGVwZW5kZW5jeSB0byB0aGUgdGFyZ2V0IHdhdGNoZXIuXG4gKi9cblxuRGVwLnByb3RvdHlwZS5kZXBlbmQgPSBmdW5jdGlvbiAoKSB7XG4gIERlcC50YXJnZXQuYWRkRGVwKHRoaXMpXG59XG5cbi8qKlxuICogTm90aWZ5IGFsbCBzdWJzY3JpYmVycyBvZiBhIG5ldyB2YWx1ZS5cbiAqL1xuXG5EZXAucHJvdG90eXBlLm5vdGlmeSA9IGZ1bmN0aW9uICgpIHtcbiAgLy8gc3RhYmxpemUgdGhlIHN1YnNjcmliZXIgbGlzdCBmaXJzdFxuICBjb25zdCBzdWJzID0gdGhpcy5zdWJzLnNsaWNlKClcbiAgZm9yIChsZXQgaSA9IDAsIGwgPSBzdWJzLmxlbmd0aDsgaSA8IGw7IGkrKykge1xuICAgIHN1YnNbaV0udXBkYXRlKClcbiAgfVxufVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L2NvcmUvZGVwLmpzXG4gKiovIiwiLyogZXNsaW50LWRpc2FibGUgKi9cblxuaW1wb3J0IERlcCBmcm9tICcuL2RlcCdcbmltcG9ydCB7IGFycmF5TWV0aG9kcyB9IGZyb20gJy4vYXJyYXknXG5pbXBvcnQge1xuICBkZWYsXG4gIHJlbW92ZSxcbiAgaXNBcnJheSxcbiAgaXNPYmplY3QsXG4gIGlzUGxhaW5PYmplY3QsXG4gIGhhc1Byb3RvLFxuICBoYXNPd24sXG4gIGlzUmVzZXJ2ZWRcbn0gZnJvbSAnLi4vdXRpbCdcblxuY29uc3QgYXJyYXlLZXlzID0gT2JqZWN0LmdldE93blByb3BlcnR5TmFtZXMoYXJyYXlNZXRob2RzKVxuXG4vKipcbiAqIE9ic2VydmVyIGNsYXNzIHRoYXQgYXJlIGF0dGFjaGVkIHRvIGVhY2ggb2JzZXJ2ZWRcbiAqIG9iamVjdC4gT25jZSBhdHRhY2hlZCwgdGhlIG9ic2VydmVyIGNvbnZlcnRzIHRhcmdldFxuICogb2JqZWN0J3MgcHJvcGVydHkga2V5cyBpbnRvIGdldHRlci9zZXR0ZXJzIHRoYXRcbiAqIGNvbGxlY3QgZGVwZW5kZW5jaWVzIGFuZCBkaXNwYXRjaGVzIHVwZGF0ZXMuXG4gKlxuICogQHBhcmFtIHtBcnJheXxPYmplY3R9IHZhbHVlXG4gKiBAY29uc3RydWN0b3JcbiAqL1xuXG5leHBvcnQgZnVuY3Rpb24gT2JzZXJ2ZXIgKHZhbHVlKSB7XG4gIHRoaXMudmFsdWUgPSB2YWx1ZVxuICB0aGlzLmRlcCA9IG5ldyBEZXAoKVxuICBkZWYodmFsdWUsICdfX29iX18nLCB0aGlzKVxuICBpZiAoaXNBcnJheSh2YWx1ZSkpIHtcbiAgICBjb25zdCBhdWdtZW50ID0gaGFzUHJvdG9cbiAgICAgID8gcHJvdG9BdWdtZW50XG4gICAgICA6IGNvcHlBdWdtZW50XG4gICAgYXVnbWVudCh2YWx1ZSwgYXJyYXlNZXRob2RzLCBhcnJheUtleXMpXG4gICAgdGhpcy5vYnNlcnZlQXJyYXkodmFsdWUpXG4gIH0gZWxzZSB7XG4gICAgdGhpcy53YWxrKHZhbHVlKVxuICB9XG59XG5cbi8vIEluc3RhbmNlIG1ldGhvZHNcblxuLyoqXG4gKiBXYWxrIHRocm91Z2ggZWFjaCBwcm9wZXJ0eSBhbmQgY29udmVydCB0aGVtIGludG9cbiAqIGdldHRlci9zZXR0ZXJzLiBUaGlzIG1ldGhvZCBzaG91bGQgb25seSBiZSBjYWxsZWQgd2hlblxuICogdmFsdWUgdHlwZSBpcyBPYmplY3QuXG4gKlxuICogQHBhcmFtIHtPYmplY3R9IG9ialxuICovXG5cbk9ic2VydmVyLnByb3RvdHlwZS53YWxrID0gZnVuY3Rpb24gKG9iaikge1xuICBmb3IgKGxldCBrZXkgaW4gb2JqKSB7XG4gICAgdGhpcy5jb252ZXJ0KGtleSwgb2JqW2tleV0pXG4gIH1cbn1cblxuLyoqXG4gKiBPYnNlcnZlIGEgbGlzdCBvZiBBcnJheSBpdGVtcy5cbiAqXG4gKiBAcGFyYW0ge0FycmF5fSBpdGVtc1xuICovXG5cbk9ic2VydmVyLnByb3RvdHlwZS5vYnNlcnZlQXJyYXkgPSBmdW5jdGlvbiAoaXRlbXMpIHtcbiAgZm9yIChsZXQgaSA9IDAsIGwgPSBpdGVtcy5sZW5ndGg7IGkgPCBsOyBpKyspIHtcbiAgICBvYnNlcnZlKGl0ZW1zW2ldKVxuICB9XG59XG5cbi8qKlxuICogQ29udmVydCBhIHByb3BlcnR5IGludG8gZ2V0dGVyL3NldHRlciBzbyB3ZSBjYW4gZW1pdFxuICogdGhlIGV2ZW50cyB3aGVuIHRoZSBwcm9wZXJ0eSBpcyBhY2Nlc3NlZC9jaGFuZ2VkLlxuICpcbiAqIEBwYXJhbSB7U3RyaW5nfSBrZXlcbiAqIEBwYXJhbSB7Kn0gdmFsXG4gKi9cblxuT2JzZXJ2ZXIucHJvdG90eXBlLmNvbnZlcnQgPSBmdW5jdGlvbiAoa2V5LCB2YWwpIHtcbiAgZGVmaW5lUmVhY3RpdmUodGhpcy52YWx1ZSwga2V5LCB2YWwpXG59XG5cbi8qKlxuICogQWRkIGFuIG93bmVyIHZtLCBzbyB0aGF0IHdoZW4gJHNldC8kZGVsZXRlIG11dGF0aW9uc1xuICogaGFwcGVuIHdlIGNhbiBub3RpZnkgb3duZXIgdm1zIHRvIHByb3h5IHRoZSBrZXlzIGFuZFxuICogZGlnZXN0IHRoZSB3YXRjaGVycy4gVGhpcyBpcyBvbmx5IGNhbGxlZCB3aGVuIHRoZSBvYmplY3RcbiAqIGlzIG9ic2VydmVkIGFzIGFuIGluc3RhbmNlJ3Mgcm9vdCAkZGF0YS5cbiAqXG4gKiBAcGFyYW0ge1Z1ZX0gdm1cbiAqL1xuXG5PYnNlcnZlci5wcm90b3R5cGUuYWRkVm0gPSBmdW5jdGlvbiAodm0pIHtcbiAgKHRoaXMudm1zIHx8ICh0aGlzLnZtcyA9IFtdKSkucHVzaCh2bSlcbn1cblxuLyoqXG4gKiBSZW1vdmUgYW4gb3duZXIgdm0uIFRoaXMgaXMgY2FsbGVkIHdoZW4gdGhlIG9iamVjdCBpc1xuICogc3dhcHBlZCBvdXQgYXMgYW4gaW5zdGFuY2UncyAkZGF0YSBvYmplY3QuXG4gKlxuICogQHBhcmFtIHtWdWV9IHZtXG4gKi9cblxuT2JzZXJ2ZXIucHJvdG90eXBlLnJlbW92ZVZtID0gZnVuY3Rpb24gKHZtKSB7XG4gIHJlbW92ZSh0aGlzLnZtcywgdm0pXG59XG5cbi8vIGhlbHBlcnNcblxuLyoqXG4gKiBBdWdtZW50IGFuIHRhcmdldCBPYmplY3Qgb3IgQXJyYXkgYnkgaW50ZXJjZXB0aW5nXG4gKiB0aGUgcHJvdG90eXBlIGNoYWluIHVzaW5nIF9fcHJvdG9fX1xuICpcbiAqIEBwYXJhbSB7T2JqZWN0fEFycmF5fSB0YXJnZXRcbiAqIEBwYXJhbSB7T2JqZWN0fSBzcmNcbiAqL1xuXG5mdW5jdGlvbiBwcm90b0F1Z21lbnQgKHRhcmdldCwgc3JjKSB7XG4gIC8qIGVzbGludC1kaXNhYmxlIG5vLXByb3RvICovXG4gIHRhcmdldC5fX3Byb3RvX18gPSBzcmNcbiAgLyogZXNsaW50LWVuYWJsZSBuby1wcm90byAqL1xufVxuXG4vKipcbiAqIEF1Z21lbnQgYW4gdGFyZ2V0IE9iamVjdCBvciBBcnJheSBieSBkZWZpbmluZ1xuICogaGlkZGVuIHByb3BlcnRpZXMuXG4gKlxuICogQHBhcmFtIHtPYmplY3R8QXJyYXl9IHRhcmdldFxuICogQHBhcmFtIHtPYmplY3R9IHByb3RvXG4gKi9cblxuZnVuY3Rpb24gY29weUF1Z21lbnQgKHRhcmdldCwgc3JjLCBrZXlzKSB7XG4gIGZvciAobGV0IGkgPSAwLCBsID0ga2V5cy5sZW5ndGg7IGkgPCBsOyBpKyspIHtcbiAgICBjb25zdCBrZXkgPSBrZXlzW2ldXG4gICAgZGVmKHRhcmdldCwga2V5LCBzcmNba2V5XSlcbiAgfVxufVxuXG4vKipcbiAqIEF0dGVtcHQgdG8gY3JlYXRlIGFuIG9ic2VydmVyIGluc3RhbmNlIGZvciBhIHZhbHVlLFxuICogcmV0dXJucyB0aGUgbmV3IG9ic2VydmVyIGlmIHN1Y2Nlc3NmdWxseSBvYnNlcnZlZCxcbiAqIG9yIHRoZSBleGlzdGluZyBvYnNlcnZlciBpZiB0aGUgdmFsdWUgYWxyZWFkeSBoYXMgb25lLlxuICpcbiAqIEBwYXJhbSB7Kn0gdmFsdWVcbiAqIEBwYXJhbSB7VnVlfSBbdm1dXG4gKiBAcmV0dXJuIHtPYnNlcnZlcnx1bmRlZmluZWR9XG4gKiBAc3RhdGljXG4gKi9cblxuZXhwb3J0IGZ1bmN0aW9uIG9ic2VydmUgKHZhbHVlLCB2bSkge1xuICBpZiAoIWlzT2JqZWN0KHZhbHVlKSkge1xuICAgIHJldHVyblxuICB9XG4gIGxldCBvYlxuICBpZiAoaGFzT3duKHZhbHVlLCAnX19vYl9fJykgJiYgdmFsdWUuX19vYl9fIGluc3RhbmNlb2YgT2JzZXJ2ZXIpIHtcbiAgICBvYiA9IHZhbHVlLl9fb2JfX1xuICB9IGVsc2UgaWYgKFxuICAgIChpc0FycmF5KHZhbHVlKSB8fCBpc1BsYWluT2JqZWN0KHZhbHVlKSkgJiZcbiAgICBPYmplY3QuaXNFeHRlbnNpYmxlKHZhbHVlKSAmJlxuICAgICF2YWx1ZS5faXNWdWVcbiAgKSB7XG4gICAgb2IgPSBuZXcgT2JzZXJ2ZXIodmFsdWUpXG4gIH1cbiAgaWYgKG9iICYmIHZtKSB7XG4gICAgb2IuYWRkVm0odm0pXG4gIH1cbiAgcmV0dXJuIG9iXG59XG5cbi8qKlxuICogRGVmaW5lIGEgcmVhY3RpdmUgcHJvcGVydHkgb24gYW4gT2JqZWN0LlxuICpcbiAqIEBwYXJhbSB7T2JqZWN0fSBvYmpcbiAqIEBwYXJhbSB7U3RyaW5nfSBrZXlcbiAqIEBwYXJhbSB7Kn0gdmFsXG4gKi9cblxuZXhwb3J0IGZ1bmN0aW9uIGRlZmluZVJlYWN0aXZlIChvYmosIGtleSwgdmFsKSB7XG4gIGNvbnN0IGRlcCA9IG5ldyBEZXAoKVxuXG4gIGNvbnN0IHByb3BlcnR5ID0gT2JqZWN0LmdldE93blByb3BlcnR5RGVzY3JpcHRvcihvYmosIGtleSlcbiAgaWYgKHByb3BlcnR5ICYmIHByb3BlcnR5LmNvbmZpZ3VyYWJsZSA9PT0gZmFsc2UpIHtcbiAgICByZXR1cm5cbiAgfVxuXG4gIC8vIGNhdGVyIGZvciBwcmUtZGVmaW5lZCBnZXR0ZXIvc2V0dGVyc1xuICBjb25zdCBnZXR0ZXIgPSBwcm9wZXJ0eSAmJiBwcm9wZXJ0eS5nZXRcbiAgY29uc3Qgc2V0dGVyID0gcHJvcGVydHkgJiYgcHJvcGVydHkuc2V0XG5cbiAgbGV0IGNoaWxkT2IgPSBvYnNlcnZlKHZhbClcbiAgT2JqZWN0LmRlZmluZVByb3BlcnR5KG9iaiwga2V5LCB7XG4gICAgZW51bWVyYWJsZTogdHJ1ZSxcbiAgICBjb25maWd1cmFibGU6IHRydWUsXG4gICAgZ2V0OiBmdW5jdGlvbiByZWFjdGl2ZUdldHRlciAoKSB7XG4gICAgICBjb25zdCB2YWx1ZSA9IGdldHRlciA / IGdldHRlci5jYWxsKG9iaikgOiB2YWxcbiAgICAgIGlmIChEZXAudGFyZ2V0KSB7XG4gICAgICAgIGRlcC5kZXBlbmQoKVxuICAgICAgICBpZiAoY2hpbGRPYikge1xuICAgICAgICAgIGNoaWxkT2IuZGVwLmRlcGVuZCgpXG4gICAgICAgIH1cbiAgICAgICAgaWYgKGlzQXJyYXkodmFsdWUpKSB7XG4gICAgICAgICAgZm9yIChsZXQgZSwgaSA9IDAsIGwgPSB2YWx1ZS5sZW5ndGg7IGkgPCBsOyBpKyspIHtcbiAgICAgICAgICAgIGUgPSB2YWx1ZVtpXVxuICAgICAgICAgICAgZSAmJiBlLl9fb2JfXyAmJiBlLl9fb2JfXy5kZXAuZGVwZW5kKClcbiAgICAgICAgICB9XG4gICAgICAgIH1cbiAgICAgIH1cbiAgICAgIHJldHVybiB2YWx1ZVxuICAgIH0sXG4gICAgc2V0OiBmdW5jdGlvbiByZWFjdGl2ZVNldHRlciAobmV3VmFsKSB7XG4gICAgICBjb25zdCB2YWx1ZSA9IGdldHRlciA / IGdldHRlci5jYWxsKG9iaikgOiB2YWxcbiAgICAgIGlmIChuZXdWYWwgPT09IHZhbHVlKSB7XG4gICAgICAgIHJldHVyblxuICAgICAgfVxuICAgICAgaWYgKHNldHRlcikge1xuICAgICAgICBzZXR0ZXIuY2FsbChvYmosIG5ld1ZhbClcbiAgICAgIH0gZWxzZSB7XG4gICAgICAgIHZhbCA9IG5ld1ZhbFxuICAgICAgfVxuICAgICAgY2hpbGRPYiA9IG9ic2VydmUobmV3VmFsKVxuICAgICAgZGVwLm5vdGlmeSgpXG4gICAgfVxuICB9KVxufVxuXG4vKipcbiAqIFNldCBhIHByb3BlcnR5IG9uIGFuIG9iamVjdC4gQWRkcyB0aGUgbmV3IHByb3BlcnR5IGFuZFxuICogdHJpZ2dlcnMgY2hhbmdlIG5vdGlmaWNhdGlvbiBpZiB0aGUgcHJvcGVydHkgZG9lc24ndFxuICogYWxyZWFkeSBleGlzdC5cbiAqXG4gKiBAcGFyYW0ge09iamVjdH0gb2JqXG4gKiBAcGFyYW0ge1N0cmluZ30ga2V5XG4gKiBAcGFyYW0geyp9IHZhbFxuICogQHB1YmxpY1xuICovXG5cbmV4cG9ydCBmdW5jdGlvbiBzZXQgKG9iaiwga2V5LCB2YWwpIHtcbiAgaWYgKGlzQXJyYXkob2JqKSkge1xuICAgIHJldHVybiBvYmouc3BsaWNlKGtleSwgMSwgdmFsKVxuICB9XG4gIGlmIChoYXNPd24ob2JqLCBrZXkpKSB7XG4gICAgb2JqW2tleV0gPSB2YWxcbiAgICByZXR1cm5cbiAgfVxuICBpZiAob2JqLl9pc1Z1ZSkge1xuICAgIHNldChvYmouX2RhdGEsIGtleSwgdmFsKVxuICAgIHJldHVyblxuICB9XG4gIGNvbnN0IG9iID0gb2JqLl9fb2JfX1xuICBpZiAoIW9iKSB7XG4gICAgb2JqW2tleV0gPSB2YWxcbiAgICByZXR1cm5cbiAgfVxuICBvYi5jb252ZXJ0KGtleSwgdmFsKVxuICBvYi5kZXAubm90aWZ5KClcbiAgaWYgKG9iLnZtcykge1xuICAgIGxldCBpID0gb2Iudm1zLmxlbmd0aFxuICAgIHdoaWxlIChpLS0pIHtcbiAgICAgIGNvbnN0IHZtID0gb2Iudm1zW2ldXG4gICAgICBwcm94eSh2bSwga2V5KVxuICAgICAgdm0uJGZvcmNlVXBkYXRlKClcbiAgICB9XG4gIH1cbiAgcmV0dXJuIHZhbFxufVxuXG4vKipcbiAqIERlbGV0ZSBhIHByb3BlcnR5IGFuZCB0cmlnZ2VyIGNoYW5nZSBpZiBuZWNlc3NhcnkuXG4gKlxuICogQHBhcmFtIHtPYmplY3R9IG9ialxuICogQHBhcmFtIHtTdHJpbmd9IGtleVxuICovXG5cbmV4cG9ydCBmdW5jdGlvbiBkZWwgKG9iaiwga2V5KSB7XG4gIGlmICghaGFzT3duKG9iaiwga2V5KSkge1xuICAgIHJldHVyblxuICB9XG4gIGRlbGV0ZSBvYmpba2V5XVxuICBjb25zdCBvYiA9IG9iai5fX29iX19cblxuICBpZiAoIW9iKSB7XG4gICAgaWYgKG9iai5faXNWdWUpIHtcbiAgICAgIGRlbGV0ZSBvYmouX2RhdGFba2V5XVxuICAgICAgb2JqLiRmb3JjZVVwZGF0ZSgpXG4gICAgfVxuICAgIHJldHVyblxuICB9XG4gIG9iLmRlcC5ub3RpZnkoKVxuICBpZiAob2Iudm1zKSB7XG4gICAgbGV0IGkgPSBvYi52bXMubGVuZ3RoXG4gICAgd2hpbGUgKGktLSkge1xuICAgICAgY29uc3Qgdm0gPSBvYi52bXNbaV1cbiAgICAgIHVucHJveHkodm0sIGtleSlcbiAgICAgIHZtLiRmb3JjZVVwZGF0ZSgpXG4gICAgfVxuICB9XG59XG5cbmNvbnN0IEtFWV9XT1JEUyA9IFsnJGluZGV4JywgJyR2YWx1ZScsICckZXZlbnQnXVxuZXhwb3J0IGZ1bmN0aW9uIHByb3h5ICh2bSwga2V5KSB7XG4gIGlmIChLRVlfV09SRFMuaW5kZXhPZihrZXkpID4gLTEgfHwgIWlzUmVzZXJ2ZWQoa2V5KSkge1xuICAgIE9iamVjdC5kZWZpbmVQcm9wZXJ0eSh2bSwga2V5LCB7XG4gICAgICBjb25maWd1cmFibGU6IHRydWUsXG4gICAgICBlbnVtZXJhYmxlOiB0cnVlLFxuICAgICAgZ2V0OiBmdW5jdGlvbiBwcm94eUdldHRlciAoKSB7XG4gICAgICAgIHJldHVybiB2bS5fZGF0YVtrZXldXG4gICAgICB9LFxuICAgICAgc2V0OiBmdW5jdGlvbiBwcm94eVNldHRlciAodmFsKSB7XG4gICAgICAgIHZtLl9kYXRhW2tleV0gPSB2YWxcbiAgICAgIH1cbiAgICB9KVxuICB9XG59XG5cbmV4cG9ydCBmdW5jdGlvbiB1bnByb3h5ICh2bSwga2V5KSB7XG4gIGlmICghaXNSZXNlcnZlZChrZXkpKSB7XG4gICAgZGVsZXRlIHZtW2tleV1cbiAgfVxufVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L2NvcmUvb2JzZXJ2ZXIuanNcbiAqKi8iLCIvKiBlc2xpbnQtZGlzYWJsZSAqL1xuXG5pbXBvcnQgeyBkZWYgfSBmcm9tICcuLi91dGlsJ1xuXG5jb25zdCBhcnJheVByb3RvID0gQXJyYXkucHJvdG90eXBlXG5leHBvcnQgY29uc3QgYXJyYXlNZXRob2RzID0gT2JqZWN0LmNyZWF0ZShhcnJheVByb3RvKVxuXG4vKipcbiAqIEludGVyY2VwdCBtdXRhdGluZyBtZXRob2RzIGFuZCBlbWl0IGV2ZW50c1xuICovXG5cbjtbXG4gICdwdXNoJyxcbiAgJ3BvcCcsXG4gICdzaGlmdCcsXG4gICd1bnNoaWZ0JyxcbiAgJ3NwbGljZScsXG4gICdzb3J0JyxcbiAgJ3JldmVyc2UnXG5dXG4uZm9yRWFjaChmdW5jdGlvbiAobWV0aG9kKSB7XG4gIC8vIGNhY2hlIG9yaWdpbmFsIG1ldGhvZFxuICBjb25zdCBvcmlnaW5hbCA9IGFycmF5UHJvdG9bbWV0aG9kXVxuICBkZWYoYXJyYXlNZXRob2RzLCBtZXRob2QsIGZ1bmN0aW9uIG11dGF0b3IgKCkge1xuICAgIC8vIGF2b2lkIGxlYWtpbmcgYXJndW1lbnRzOlxuICAgIC8vIGh0dHA6Ly9qc3BlcmYuY29tL2Nsb3N1cmUtd2l0aC1hcmd1bWVudHNcbiAgICBsZXQgaSA9IGFyZ3VtZW50cy5sZW5ndGhcbiAgICBjb25zdCBhcmdzID0gbmV3IEFycmF5KGkpXG4gICAgd2hpbGUgKGktLSkge1xuICAgICAgYXJnc1tpXSA9IGFyZ3VtZW50c1tpXVxuICAgIH1cbiAgICBjb25zdCByZXN1bHQgPSBvcmlnaW5hbC5hcHBseSh0aGlzLCBhcmdzKVxuICAgIGNvbnN0IG9iID0gdGhpcy5fX29iX19cbiAgICBsZXQgaW5zZXJ0ZWRcbiAgICBzd2l0Y2ggKG1ldGhvZCkge1xuICAgICAgY2FzZSAncHVzaCc6XG4gICAgICAgIGluc2VydGVkID0gYXJnc1xuICAgICAgICBicmVha1xuICAgICAgY2FzZSAndW5zaGlmdCc6XG4gICAgICAgIGluc2VydGVkID0gYXJnc1xuICAgICAgICBicmVha1xuICAgICAgY2FzZSAnc3BsaWNlJzpcbiAgICAgICAgaW5zZXJ0ZWQgPSBhcmdzLnNsaWNlKDIpXG4gICAgICAgIGJyZWFrXG4gICAgfVxuICAgIGlmIChpbnNlcnRlZCkgb2Iub2JzZXJ2ZUFycmF5KGluc2VydGVkKVxuICAgIC8vIG5vdGlmeSBjaGFuZ2VcbiAgICBvYi5kZXAubm90aWZ5KClcbiAgICByZXR1cm4gcmVzdWx0XG4gIH0pXG59KVxuXG4vKipcbiAqIFN3YXAgdGhlIGVsZW1lbnQgYXQgdGhlIGdpdmVuIGluZGV4IHdpdGggYSBuZXcgdmFsdWVcbiAqIGFuZCBlbWl0cyBjb3JyZXNwb25kaW5nIGV2ZW50LlxuICpcbiAqIEBwYXJhbSB7TnVtYmVyfSBpbmRleFxuICogQHBhcmFtIHsqfSB2YWxcbiAqIEByZXR1cm4geyp9IC0gcmVwbGFjZWQgZWxlbWVudFxuICovXG5cbmRlZihcbiAgYXJyYXlQcm90byxcbiAgJyRzZXQnLFxuICBmdW5jdGlvbiAkc2V0IChpbmRleCwgdmFsKSB7XG4gICAgaWYgKGluZGV4ID49IHRoaXMubGVuZ3RoKSB7XG4gICAgICB0aGlzLmxlbmd0aCA9IGluZGV4ICsgMVxuICAgIH1cbiAgICByZXR1cm4gdGhpcy5zcGxpY2UoaW5kZXgsIDEsIHZhbClbMF1cbiAgfVxuKVxuXG4vKipcbiAqIENvbnZlbmllbmNlIG1ldGhvZCB0byByZW1vdmUgdGhlIGVsZW1lbnQgYXQgZ2l2ZW4gaW5kZXguXG4gKlxuICogQHBhcmFtIHtOdW1iZXJ9IGluZGV4XG4gKiBAcGFyYW0geyp9IHZhbFxuICovXG5cbmRlZihcbiAgYXJyYXlQcm90byxcbiAgJyRyZW1vdmUnLFxuICBmdW5jdGlvbiAkcmVtb3ZlIChpbmRleCkge1xuICAgIC8qIGlzdGFuYnVsIGlnbm9yZSBpZiAqL1xuICAgIGlmICghdGhpcy5sZW5ndGgpIHJldHVyblxuICAgIGlmICh0eXBlb2YgaW5kZXggIT09ICdudW1iZXInKSB7XG4gICAgICBpbmRleCA9IHRoaXMuaW5kZXhPZihpbmRleClcbiAgICB9XG4gICAgaWYgKGluZGV4ID4gLTEpIHtcbiAgICAgIHRoaXMuc3BsaWNlKGluZGV4LCAxKVxuICAgIH1cbiAgfVxuKVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L2NvcmUvYXJyYXkuanNcbiAqKi8iLCIvKipcbiAqIEBmaWxlT3ZlcnZpZXdcbiAqIFZpZXdNb2RlbCB0ZW1wbGF0ZSBwYXJzZXIgJiBkYXRhLWJpbmRpbmcgcHJvY2Vzc1xuICpcbiAqIHJlcXVpcmVkOlxuICogaW5kZXguanM6IFZtXG4gKiBkb20taGVscGVyLmpzOiBfY3JlYXRlRWxlbWVudCwgX2NyZWF0ZUJsb2NrXG4gKiBkb20taGVscGVyLmpzOiBfYXR0YWNoVGFyZ2V0LCBfbW92ZVRhcmdldCwgX3JlbW92ZVRhcmdldFxuICogZGlyZWN0aXZlLmpzOiBfYmluZEVsZW1lbnQsIF9iaW5kU3ViVm0sIF93YXRjaFxuICogZXZlbnRzLmpzOiAkb25cbiAqL1xuXG5pbXBvcnQgKiBhcyBfIGZyb20gJy4uL3V0aWwnXG5cbi8qKlxuICogYnVpbGQoZXh0ZXJuYWxEaXJzKVxuICogICBjcmVhdGVWbSgpXG4gKiAgIG1lcmdlKGV4dGVybmFsRGlycywgZGlycylcbiAqICAgY29tcGlsZSh0ZW1wbGF0ZSwgcGFyZW50Tm9kZSlcbiAqICAgICBpZiAodHlwZSBpcyBjb250ZW50KSBjcmVhdGUgY29udGVudE5vZGVcbiAqICAgICBlbHNlIGlmIChkaXJzIGhhdmUgdi1mb3IpIGZvcmVhY2ggLT4gY3JlYXRlIGNvbnRleHRcbiAqICAgICAgIC0 + IGNvbXBpbGUodGVtcGxhdGVXaXRob3V0Rm9yLCBwYXJlbnROb2RlKTogZGlmZihsaXN0KSBvbmNoYW5nZVxuICogICAgIGVsc2UgaWYgKGRpcnMgaGF2ZSB2LWlmKSBhc3NlcnRcbiAqICAgICAgIC0 + IGNvbXBpbGUodGVtcGxhdGVXaXRob3V0SWYsIHBhcmVudE5vZGUpOiB0b2dnbGUoc2hvd24pIG9uY2hhbmdlXG4gKiAgICAgZWxzZSBpZiAodHlwZSBpcyBuYXRpdmUpXG4gKiAgICAgICBzZXQoZGlycyk6IHVwZGF0ZShpZC9hdHRyL3N0eWxlL2NsYXNzKSBvbmNoYW5nZVxuICogICAgICAgYXBwZW5kKHRlbXBsYXRlLCBwYXJlbnROb2RlKVxuICogICAgICAgZm9yZWFjaCBjaGlsZE5vZGVzIC0 + IGNvbXBpbGUoY2hpbGROb2RlLCB0ZW1wbGF0ZSlcbiAqICAgICBlbHNlIGlmICh0eXBlIGlzIGN1c3RvbSlcbiAqICAgICAgIGFkZENoaWxkVm0odm0sIHBhcmVudFZtKVxuICogICAgICAgYnVpbGQoZXh0ZXJuYWxEaXJzKVxuICogICAgICAgZm9yZWFjaCBjaGlsZE5vZGVzIC0 + IGNvbXBpbGUoY2hpbGROb2RlLCB0ZW1wbGF0ZSlcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF9idWlsZCAoKSB7XG4gIGNvbnN0IG9wdCA9IHRoaXMuX29wdGlvbnMgfHwge31cbiAgY29uc3QgdGVtcGxhdGUgPSBvcHQudGVtcGxhdGUgfHwge31cblxuICBpZiAob3B0LnJlcGxhY2UpIHtcbiAgICBpZiAodGVtcGxhdGUuY2hpbGRyZW4gJiYgdGVtcGxhdGUuY2hpbGRyZW4ubGVuZ3RoID09PSAxKSB7XG4gICAgICB0aGlzLl9jb21waWxlKHRlbXBsYXRlLmNoaWxkcmVuWzBdLCB0aGlzLl9wYXJlbnRFbClcbiAgICB9XG4gICAgZWxzZSB7XG4gICAgICB0aGlzLl9jb21waWxlKHRlbXBsYXRlLmNoaWxkcmVuLCB0aGlzLl9wYXJlbnRFbClcbiAgICB9XG4gIH1cbiAgZWxzZSB7XG4gICAgdGhpcy5fY29tcGlsZSh0ZW1wbGF0ZSwgdGhpcy5fcGFyZW50RWwpXG4gIH1cblxuICBfLmRlYnVnKGBcInJlYWR5XCIgbGlmZWN5Y2xlIGluIFZtKCR7dGhpcy5fdHlwZX0pYClcbiAgdGhpcy4kZW1pdCgnaG9vazpyZWFkeScpXG4gIHRoaXMuX3JlYWR5ID0gdHJ1ZVxufVxuXG4vKipcbiAqIEdlbmVyYXRlIGVsZW1lbnRzIGJ5IGNoaWxkIG9yIGNoaWxkcmVuIGFuZCBhcHBlbmQgdG8gcGFyZW50IGVsZW1lbnRzLlxuICogUm9vdCBlbGVtZW50IGluZm8gd291bGQgYmUgbWVyZ2VkIGlmIGhhcy4gVGhlIGZpcnN0IGFyZ3VtZW50IG1heSBiZSBhbiBhcnJheVxuICogaWYgdGhlIHJvb3QgZWxlbWVudCB3aXRoIG9wdGlvbnMucmVwbGFjZSBoYXMgbm90IG9ubHkgb25lIGNoaWxkLlxuICpcbiAqIEBwYXJhbSB7b2JqZWN0fGFycmF5fSB0YXJnZXRcbiAqIEBwYXJhbSB7b2JqZWN0fSAgICAgICBkZXN0XG4gKiBAcGFyYW0ge29iamVjdH0gICAgICAgbWV0YVxuICovXG5leHBvcnQgZnVuY3Rpb24gX2NvbXBpbGUgKHRhcmdldCwgZGVzdCwgbWV0YSkge1xuICBjb25zdCBhcHAgPSB0aGlzLl9hcHAgfHwge31cblxuICBpZiAoYXBwLmxhc3RTaWduYWwgPT09IC0xKSB7XG4gICAgcmV0dXJuXG4gIH1cblxuICBjb25zdCBjb250ZXh0ID0gdGhpc1xuICBpZiAoY29udGV4dC5fdGFyZ2V0SXNGcmFnbWVudCh0YXJnZXQpKSB7XG4gICAgY29udGV4dC5fY29tcGlsZUZyYWdtZW50KHRhcmdldCwgZGVzdCwgbWV0YSlcbiAgICByZXR1cm5cbiAgfVxuICBtZXRhID0gbWV0YSB8fCB7fVxuICBpZiAoY29udGV4dC5fdGFyZ2V0SXNDb250ZW50KHRhcmdldCkpIHtcbiAgICBfLmRlYnVnKCdjb21waWxlIFwiY29udGVudFwiIGJsb2NrIGJ5JywgdGFyZ2V0KVxuICAgIGNvbnRleHQuX2NvbnRlbnQgPSBjb250ZXh0Ll9jcmVhdGVCbG9jayhkZXN0KVxuICAgIHJldHVyblxuICB9XG5cbiAgaWYgKGNvbnRleHQuX3RhcmdldE5lZWRDaGVja1JlcGVhdCh0YXJnZXQsIG1ldGEpKSB7XG4gICAgXy5kZWJ1ZygnY29tcGlsZSBcInJlcGVhdFwiIGxvZ2ljIGJ5JywgdGFyZ2V0KVxuICAgIGNvbnRleHQuX2NvbXBpbGVSZXBlYXQodGFyZ2V0LCBkZXN0KVxuICAgIHJldHVyblxuICB9XG4gIGlmIChjb250ZXh0Ll90YXJnZXROZWVkQ2hlY2tTaG93bih0YXJnZXQsIG1ldGEpKSB7XG4gICAgXy5kZWJ1ZygnY29tcGlsZSBcImlmXCIgbG9naWMgYnknLCB0YXJnZXQpXG4gICAgY29udGV4dC5fY29tcGlsZVNob3duKHRhcmdldCwgZGVzdCwgbWV0YSlcbiAgICByZXR1cm5cbiAgfVxuICBjb25zdCB0eXBlR2V0dGVyID0gbWV0YS50eXBlIHx8IHRhcmdldC50eXBlXG4gIGlmIChjb250ZXh0Ll90YXJnZXROZWVkQ2hlY2tUeXBlKHR5cGVHZXR0ZXIsIG1ldGEpKSB7XG4gICAgY29udGV4dC5fY29tcGlsZVR5cGUodGFyZ2V0LCBkZXN0LCB0eXBlR2V0dGVyLCBtZXRhKVxuICAgIHJldHVyblxuICB9XG4gIGNvbnN0IHR5cGUgPSB0eXBlR2V0dGVyXG4gIGNvbnN0IGNvbXBvbmVudCA9IGNvbnRleHQuX3RhcmdldElzQ29tcG9zZWQodGFyZ2V0LCB0eXBlKVxuICBpZiAoY29tcG9uZW50KSB7XG4gICAgXy5kZWJ1ZygnY29tcGlsZSBjb21wb3NlZCBjb21wb25lbnQgYnknLCB0YXJnZXQpXG4gICAgY29udGV4dC5fY29tcGlsZUN1c3RvbUNvbXBvbmVudChjb21wb25lbnQsIHRhcmdldCwgZGVzdCwgdHlwZSwgbWV0YSlcbiAgICByZXR1cm5cbiAgfVxuICBfLmRlYnVnKCdjb21waWxlIG5hdGl2ZSBjb21wb25lbnQgYnknLCB0YXJnZXQpXG4gIGNvbnRleHQuX2NvbXBpbGVOYXRpdmVDb21wb25lbnQodGFyZ2V0LCBkZXN0LCB0eXBlKVxufVxuXG4vKipcbiAqIENoZWNrIGlmIHRhcmdldCBpcyBhIGZyYWdtZW50IChhbiBhcnJheSkuXG4gKlxuICogQHBhcmFtICB7b2JqZWN0fSAgdGFyZ2V0XG4gKiBAcmV0dXJuIHtib29sZWFufVxuICovXG5leHBvcnQgZnVuY3Rpb24gX3RhcmdldElzRnJhZ21lbnQgKHRhcmdldCkge1xuICByZXR1cm4gQXJyYXkuaXNBcnJheSh0YXJnZXQpXG59XG5cbi8qKlxuICogQ2hlY2sgaWYgdGFyZ2V0IHR5cGUgaXMgY29udGVudC9zbG90LlxuICpcbiAqIEBwYXJhbSAge29iamVjdH0gIHRhcmdldFxuICogQHJldHVybiB7Ym9vbGVhbn1cbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF90YXJnZXRJc0NvbnRlbnQgKHRhcmdldCkge1xuICByZXR1cm4gdGFyZ2V0LnR5cGUgPT09ICdjb250ZW50JyB8fCB0YXJnZXQudHlwZSA9PT0gJ3Nsb3QnXG59XG5cbi8qKlxuICogQ2hlY2sgaWYgdGFyZ2V0IG5lZWQgdG8gY29tcGlsZSBieSBhIGxpc3QuXG4gKlxuICogQHBhcmFtICB7b2JqZWN0fSAgdGFyZ2V0XG4gKiBAcGFyYW0gIHtvYmplY3R9ICBtZXRhXG4gKiBAcmV0dXJuIHtib29sZWFufVxuICovXG5leHBvcnQgZnVuY3Rpb24gX3RhcmdldE5lZWRDaGVja1JlcGVhdCAodGFyZ2V0LCBtZXRhKSB7XG4gIHJldHVybiAhbWV0YS5oYXNPd25Qcm9wZXJ0eSgncmVwZWF0JykgJiYgdGFyZ2V0LnJlcGVhdFxufVxuXG4vKipcbiAqIENoZWNrIGlmIHRhcmdldCBuZWVkIHRvIGNvbXBpbGUgYnkgYSBib29sZWFuIHZhbHVlLlxuICpcbiAqIEBwYXJhbSAge29iamVjdH0gIHRhcmdldFxuICogQHBhcmFtICB7b2JqZWN0fSAgbWV0YVxuICogQHJldHVybiB7Ym9vbGVhbn1cbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF90YXJnZXROZWVkQ2hlY2tTaG93biAodGFyZ2V0LCBtZXRhKSB7XG4gIHJldHVybiAhbWV0YS5oYXNPd25Qcm9wZXJ0eSgnc2hvd24nKSAmJiB0YXJnZXQuc2hvd25cbn1cblxuLyoqXG4gKiBDaGVjayBpZiB0YXJnZXQgbmVlZCB0byBjb21waWxlIGJ5IGEgZHluYW1pYyB0eXBlLlxuICpcbiAqIEBwYXJhbSAge3N0cmluZ3xmdW5jdGlvbn0gdHlwZUdldHRlclxuICogQHBhcmFtICB7b2JqZWN0fSAgICAgICAgICBtZXRhXG4gKiBAcmV0dXJuIHtib29sZWFufVxuICovXG5leHBvcnQgZnVuY3Rpb24gX3RhcmdldE5lZWRDaGVja1R5cGUgKHR5cGVHZXR0ZXIsIG1ldGEpIHtcbiAgcmV0dXJuICh0eXBlb2YgdHlwZUdldHRlciA9PT0gJ2Z1bmN0aW9uJykgJiYgIW1ldGEuaGFzT3duUHJvcGVydHkoJ3R5cGUnKVxufVxuXG4vKipcbiAqIENoZWNrIGlmIHRoaXMga2luZCBvZiBjb21wb25lbnQgaXMgY29tcG9zZWQuXG4gKlxuICogQHBhcmFtICB7c3RyaW5nfSAgdHlwZVxuICogQHJldHVybiB7Ym9vbGVhbn1cbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF90YXJnZXRJc0NvbXBvc2VkICh0YXJnZXQsIHR5cGUpIHtcbiAgbGV0IGNvbXBvbmVudFxuICBpZiAodGhpcy5fYXBwICYmIHRoaXMuX2FwcC5jdXN0b21Db21wb25lbnRNYXApIHtcbiAgICBjb21wb25lbnQgPSB0aGlzLl9hcHAuY3VzdG9tQ29tcG9uZW50TWFwW3R5cGVdXG4gIH1cbiAgaWYgKHRoaXMuX29wdGlvbnMgJiYgdGhpcy5fb3B0aW9ucy5jb21wb25lbnRzKSB7XG4gICAgY29tcG9uZW50ID0gdGhpcy5fb3B0aW9ucy5jb21wb25lbnRzW3R5cGVdXG4gIH1cbiAgaWYgKHRhcmdldC5jb21wb25lbnQpIHtcbiAgICBjb21wb25lbnQgPSBjb21wb25lbnQgfHwge31cbiAgfVxuICByZXR1cm4gY29tcG9uZW50XG59XG5cbi8qKlxuICogQ29tcGlsZSBhIGxpc3Qgb2YgdGFyZ2V0cy5cbiAqXG4gKiBAcGFyYW0ge29iamVjdH0gdGFyZ2V0XG4gKiBAcGFyYW0ge29iamVjdH0gZGVzdFxuICogQHBhcmFtIHtvYmplY3R9IG1ldGFcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF9jb21waWxlRnJhZ21lbnQgKHRhcmdldCwgZGVzdCwgbWV0YSkge1xuICBjb25zdCBmcmFnQmxvY2sgPSB0aGlzLl9jcmVhdGVCbG9jayhkZXN0KVxuICB0YXJnZXQuZm9yRWFjaCgoY2hpbGQpID0 + IHtcbiAgICB0aGlzLl9jb21waWxlKGNoaWxkLCBmcmFnQmxvY2ssIG1ldGEpXG4gIH0pXG59XG5cbi8qKlxuICogQ29tcGlsZSBhIHRhcmdldCB3aXRoIHJlcGVhdCBkaXJlY3RpdmUuXG4gKlxuICogQHBhcmFtIHtvYmplY3R9IHRhcmdldFxuICogQHBhcmFtIHtvYmplY3R9IGRlc3RcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF9jb21waWxlUmVwZWF0ICh0YXJnZXQsIGRlc3QpIHtcbiAgY29uc3QgcmVwZWF0ID0gdGFyZ2V0LnJlcGVhdFxuICBjb25zdCBvbGRTdHlsZSA9IHR5cGVvZiByZXBlYXQgPT09ICdmdW5jdGlvbidcbiAgbGV0IGdldHRlciA9IHJlcGVhdC5nZXR0ZXIgfHwgcmVwZWF0LmV4cHJlc3Npb24gfHwgcmVwZWF0XG4gIGlmICh0eXBlb2YgZ2V0dGVyICE9PSAnZnVuY3Rpb24nKSB7XG4gICAgZ2V0dGVyID0gZnVuY3Rpb24gKCkgeyByZXR1cm4gW10gfVxuICB9XG4gIGNvbnN0IGtleSA9IHJlcGVhdC5rZXkgfHwgJyRpbmRleCdcbiAgY29uc3QgdmFsdWUgPSByZXBlYXQudmFsdWUgfHwgJyR2YWx1ZSdcbiAgY29uc3QgdHJhY2tCeSA9IHJlcGVhdC50cmFja0J5IHx8IHRhcmdldC50cmFja0J5IHx8XG4gICAgKHRhcmdldC5hdHRyICYmIHRhcmdldC5hdHRyLnRyYWNrQnkpIHx8IGtleVxuXG4gIGNvbnN0IGZyYWdCbG9jayA9IHRoaXMuX2NyZWF0ZUJsb2NrKGRlc3QpXG4gIGZyYWdCbG9jay5jaGlsZHJlbiA9IFtdXG4gIGZyYWdCbG9jay5kYXRhID0gW11cbiAgZnJhZ0Jsb2NrLnZtcyA9IFtdXG5cbiAgdGhpcy5fYmluZFJlcGVhdCh0YXJnZXQsIGZyYWdCbG9jaywgeyBnZXR0ZXIsIGtleSwgdmFsdWUsIHRyYWNrQnksIG9sZFN0eWxlIH0pXG59XG5cbi8qKlxuICogQ29tcGlsZSBhIHRhcmdldCB3aXRoIGlmIGRpcmVjdGl2ZS5cbiAqXG4gKiBAcGFyYW0ge29iamVjdH0gdGFyZ2V0XG4gKiBAcGFyYW0ge29iamVjdH0gZGVzdFxuICogQHBhcmFtIHtvYmplY3R9IG1ldGFcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF9jb21waWxlU2hvd24gKHRhcmdldCwgZGVzdCwgbWV0YSkge1xuICBjb25zdCBuZXdNZXRhID0geyBzaG93bjogdHJ1ZSB9XG4gIGNvbnN0IGZyYWdCbG9jayA9IHRoaXMuX2NyZWF0ZUJsb2NrKGRlc3QpXG5cbiAgaWYgKGRlc3QuZWxlbWVudCAmJiBkZXN0LmNoaWxkcmVuKSB7XG4gICAgZGVzdC5jaGlsZHJlbi5wdXNoKGZyYWdCbG9jaylcbiAgfVxuXG4gIGlmIChtZXRhLnJlcGVhdCkge1xuICAgIG5ld01ldGEucmVwZWF0ID0gbWV0YS5yZXBlYXRcbiAgfVxuXG4gIHRoaXMuX2JpbmRTaG93bih0YXJnZXQsIGZyYWdCbG9jaywgbmV3TWV0YSlcbn1cblxuLyoqXG4gKiBDb21waWxlIGEgdGFyZ2V0IHdpdGggZHluYW1pYyBjb21wb25lbnQgdHlwZS5cbiAqXG4gKiBAcGFyYW0ge29iamVjdH0gICB0YXJnZXRcbiAqIEBwYXJhbSB7b2JqZWN0fSAgIGRlc3RcbiAqIEBwYXJhbSB7ZnVuY3Rpb259IHR5cGVHZXR0ZXJcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF9jb21waWxlVHlwZSAodGFyZ2V0LCBkZXN0LCB0eXBlR2V0dGVyLCBtZXRhKSB7XG4gIGNvbnN0IHR5cGUgPSB0eXBlR2V0dGVyLmNhbGwodGhpcylcbiAgY29uc3QgbmV3TWV0YSA9IE9iamVjdC5hc3NpZ24oeyB0eXBlIH0sIG1ldGEpXG4gIGNvbnN0IGZyYWdCbG9jayA9IHRoaXMuX2NyZWF0ZUJsb2NrKGRlc3QpXG5cbiAgaWYgKGRlc3QuZWxlbWVudCAmJiBkZXN0LmNoaWxkcmVuKSB7XG4gICAgZGVzdC5jaGlsZHJlbi5wdXNoKGZyYWdCbG9jaylcbiAgfVxuXG4gIHRoaXMuX3dhdGNoKHR5cGVHZXR0ZXIsICh2YWx1ZSkgPT4ge1xuICAgIGNvbnN0IG5ld01ldGEgPSBPYmplY3QuYXNzaWduKHsgdHlwZTogdmFsdWUgfSwgbWV0YSlcbiAgICB0aGlzLl9yZW1vdmVCbG9jayhmcmFnQmxvY2ssIHRydWUpXG4gICAgdGhpcy5fY29tcGlsZSh0YXJnZXQsIGZyYWdCbG9jaywgbmV3TWV0YSlcbiAgfSlcblxuICB0aGlzLl9jb21waWxlKHRhcmdldCwgZnJhZ0Jsb2NrLCBuZXdNZXRhKVxufVxuXG4vKipcbiAqIENvbXBpbGUgYSBjb21wb3NlZCBjb21wb25lbnQuXG4gKlxuICogQHBhcmFtIHtvYmplY3R9IHRhcmdldFxuICogQHBhcmFtIHtvYmplY3R9IGRlc3RcbiAqIEBwYXJhbSB7c3RyaW5nfSB0eXBlXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBfY29tcGlsZUN1c3RvbUNvbXBvbmVudCAoY29tcG9uZW50LCB0YXJnZXQsIGRlc3QsIHR5cGUsIG1ldGEpIHtcbiAgY29uc3QgVm0gPSB0aGlzLmNvbnN0cnVjdG9yXG4gIGNvbnN0IGNvbnRleHQgPSB0aGlzXG4gIGNvbnN0IHN1YlZtID0gbmV3IFZtKHR5cGUsIGNvbXBvbmVudCwgY29udGV4dCwgZGVzdCwgdW5kZWZpbmVkLCB7XG4gICAgJ2hvb2s6aW5pdCc6IGZ1bmN0aW9uICgpIHtcbiAgICAgIGNvbnRleHQuX3NldElkKHRhcmdldC5pZCwgbnVsbCwgdGhpcylcbiAgICAgIC8vIGJpbmQgdGVtcGxhdGUgZWFybGllciBiZWNhdXNlIG9mIGxpZmVjeWNsZSBpc3N1ZXNcbiAgICAgIHRoaXMuX2V4dGVybmFsQmluZGluZyA9IHtcbiAgICAgICAgcGFyZW50OiBjb250ZXh0LFxuICAgICAgICB0ZW1wbGF0ZTogdGFyZ2V0XG4gICAgICB9XG4gICAgfSxcbiAgICAnaG9vazpjcmVhdGVkJzogZnVuY3Rpb24gKCkge1xuICAgICAgY29udGV4dC5fYmluZFN1YlZtKHRoaXMsIHRhcmdldCwgbWV0YS5yZXBlYXQpXG4gICAgfSxcbiAgICAnaG9vazpyZWFkeSc6IGZ1bmN0aW9uICgpIHtcbiAgICAgIGlmICh0aGlzLl9jb250ZW50KSB7XG4gICAgICAgIGNvbnRleHQuX2NvbXBpbGVDaGlsZHJlbih0YXJnZXQsIHRoaXMuX2NvbnRlbnQpXG4gICAgICB9XG4gICAgfVxuICB9KVxuICB0aGlzLl9iaW5kU3ViVm1BZnRlckluaXRpYWxpemVkKHN1YlZtLCB0YXJnZXQpXG59XG5cbi8qKlxuICogR2VuZXJhdGUgZWxlbWVudCBmcm9tIHRlbXBsYXRlIGFuZCBhdHRhY2ggdG8gdGhlIGRlc3QgaWYgbmVlZGVkLlxuICogVGhlIHRpbWUgdG8gYXR0YWNoIGRlcGVuZHMgb24gd2hldGhlciB0aGUgbW9kZSBzdGF0dXMgaXMgbm9kZSBvciB0cmVlLlxuICpcbiAqIEBwYXJhbSB7b2JqZWN0fSB0ZW1wbGF0ZVxuICogQHBhcmFtIHtvYmplY3R9IGRlc3RcbiAqIEBwYXJhbSB7c3RyaW5nfSB0eXBlXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBfY29tcGlsZU5hdGl2ZUNvbXBvbmVudCAodGVtcGxhdGUsIGRlc3QsIHR5cGUpIHtcbiAgdGhpcy5fYXBwbHlOYWl0dmVDb21wb25lbnRPcHRpb25zKHRlbXBsYXRlKVxuXG4gIGxldCBlbGVtZW50XG4gIGlmIChkZXN0LnJlZiA9PT0gJ19kb2N1bWVudEVsZW1lbnQnKSB7XG4gICAgLy8gaWYgaXRzIHBhcmVudCBpcyBkb2N1bWVudEVsZW1lbnQgdGhlbiBpdCdzIGEgYm9keVxuICAgIF8uZGVidWcoJ2NvbXBpbGUgdG8gY3JlYXRlIGJvZHkgZm9yJywgdHlwZSlcbiAgICBlbGVtZW50ID0gdGhpcy5fY3JlYXRlQm9keSh0eXBlKVxuICB9XG4gIGVsc2Uge1xuICAgIF8uZGVidWcoJ2NvbXBpbGUgdG8gY3JlYXRlIGVsZW1lbnQgZm9yJywgdHlwZSlcbiAgICBlbGVtZW50ID0gdGhpcy5fY3JlYXRlRWxlbWVudCh0eXBlKVxuICB9XG5cbiAgaWYgKCF0aGlzLl9yb290RWwpIHtcbiAgICB0aGlzLl9yb290RWwgPSBlbGVtZW50XG4gICAgLy8gYmluZCBldmVudCBlYXJsaWVyIGJlY2F1c2Ugb2YgbGlmZWN5Y2xlIGlzc3Vlc1xuICAgIGNvbnN0IGJpbmRpbmcgPSB0aGlzLl9leHRlcm5hbEJpbmRpbmcgfHwge31cbiAgICBjb25zdCB0YXJnZXQgPSBiaW5kaW5nLnRlbXBsYXRlXG4gICAgY29uc3Qgdm0gPSBiaW5kaW5nLnBhcmVudFxuICAgIGlmICh0YXJnZXQgJiYgdGFyZ2V0LmV2ZW50cyAmJiB2bSAmJiBlbGVtZW50KSB7XG4gICAgICBmb3IgKGNvbnN0IHR5cGUgaW4gdGFyZ2V0LmV2ZW50cykge1xuICAgICAgICBjb25zdCBoYW5kbGVyID0gdm1bdGFyZ2V0LmV2ZW50c1t0eXBlXV1cbiAgICAgICAgaWYgKGhhbmRsZXIpIHtcbiAgICAgICAgICBlbGVtZW50LmFkZEV2ZW50KHR5cGUsIF8uYmluZChoYW5kbGVyLCB2bSkpXG4gICAgICAgIH1cbiAgICAgIH1cbiAgICB9XG4gIH1cblxuICB0aGlzLl9iaW5kRWxlbWVudChlbGVtZW50LCB0ZW1wbGF0ZSlcblxuICBpZiAodGVtcGxhdGUuYXR0ciAmJiB0ZW1wbGF0ZS5hdHRyLmFwcGVuZCkgeyAvLyBiYWNrd2FyZCwgYXBwZW5kIHByb3AgaW4gYXR0clxuICAgIHRlbXBsYXRlLmFwcGVuZCA9IHRlbXBsYXRlLmF0dHIuYXBwZW5kXG4gIH1cblxuICBpZiAodGVtcGxhdGUuYXBwZW5kKSB7IC8vIGdpdmUgdGhlIGFwcGVuZCBhdHRyaWJ1dGUgZm9yIGlvcyBhZGFwdGF0aW9uXG4gICAgZWxlbWVudC5hdHRyID0gZWxlbWVudC5hdHRyIHx8IHt9XG4gICAgZWxlbWVudC5hdHRyLmFwcGVuZCA9IHRlbXBsYXRlLmFwcGVuZFxuICB9XG5cbiAgY29uc3QgdHJlZU1vZGUgPSB0ZW1wbGF0ZS5hcHBlbmQgPT09ICd0cmVlJ1xuICBjb25zdCBhcHAgPSB0aGlzLl9hcHAgfHwge31cbiAgaWYgKGFwcC5sYXN0U2lnbmFsICE9PSAtMSAmJiAhdHJlZU1vZGUpIHtcbiAgICBfLmRlYnVnKCdjb21waWxlIHRvIGFwcGVuZCBzaW5nbGUgbm9kZSBmb3InLCBlbGVtZW50KVxuICAgIGFwcC5sYXN0U2lnbmFsID0gdGhpcy5fYXR0YWNoVGFyZ2V0KGVsZW1lbnQsIGRlc3QpXG4gIH1cbiAgaWYgKGFwcC5sYXN0U2lnbmFsICE9PSAtMSkge1xuICAgIHRoaXMuX2NvbXBpbGVDaGlsZHJlbih0ZW1wbGF0ZSwgZWxlbWVudClcbiAgfVxuICBpZiAoYXBwLmxhc3RTaWduYWwgIT09IC0xICYmIHRyZWVNb2RlKSB7XG4gICAgXy5kZWJ1ZygnY29tcGlsZSB0byBhcHBlbmQgd2hvbGUgdHJlZSBmb3InLCBlbGVtZW50KVxuICAgIGFwcC5sYXN0U2lnbmFsID0gdGhpcy5fYXR0YWNoVGFyZ2V0KGVsZW1lbnQsIGRlc3QpXG4gIH1cbn1cblxuLyoqXG4gKiBTZXQgYWxsIGNoaWxkcmVuIHRvIGEgY2VydGFpbiBwYXJlbnQgZWxlbWVudC5cbiAqXG4gKiBAcGFyYW0ge29iamVjdH0gdGVtcGxhdGVcbiAqIEBwYXJhbSB7b2JqZWN0fSBkZXN0XG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBfY29tcGlsZUNoaWxkcmVuICh0ZW1wbGF0ZSwgZGVzdCkge1xuICBjb25zdCBhcHAgPSB0aGlzLl9hcHAgfHwge31cbiAgY29uc3QgY2hpbGRyZW4gPSB0ZW1wbGF0ZS5jaGlsZHJlblxuICBpZiAoY2hpbGRyZW4gJiYgY2hpbGRyZW4ubGVuZ3RoKSB7XG4gICAgY2hpbGRyZW4uZXZlcnkoKGNoaWxkKSA9PiB7XG4gICAgICB0aGlzLl9jb21waWxlKGNoaWxkLCBkZXN0KVxuICAgICAgcmV0dXJuIGFwcC5sYXN0U2lnbmFsICE9PSAtMVxuICAgIH0pXG4gIH1cbn1cblxuLyoqXG4gKiBXYXRjaCB0aGUgbGlzdCB1cGRhdGUgYW5kIHJlZnJlc2ggdGhlIGNoYW5nZXMuXG4gKlxuICogQHBhcmFtIHtvYmplY3R9IHRhcmdldFxuICogQHBhcmFtIHtvYmplY3R9IGZyYWdCbG9jayB7dm1zLCBkYXRhLCBjaGlsZHJlbn1cbiAqIEBwYXJhbSB7b2JqZWN0fSBpbmZvICAgICAge2dldHRlciwga2V5LCB2YWx1ZSwgdHJhY2tCeSwgb2xkU3R5bGV9XG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBfYmluZFJlcGVhdCAodGFyZ2V0LCBmcmFnQmxvY2ssIGluZm8pIHtcbiAgY29uc3Qgdm1zID0gZnJhZ0Jsb2NrLnZtc1xuICBjb25zdCBjaGlsZHJlbiA9IGZyYWdCbG9jay5jaGlsZHJlblxuICBjb25zdCB7IGdldHRlciwgdHJhY2tCeSwgb2xkU3R5bGUgfSA9IGluZm9cbiAgY29uc3Qga2V5TmFtZSA9IGluZm8ua2V5XG4gIGNvbnN0IHZhbHVlTmFtZSA9IGluZm8udmFsdWVcblxuICBmdW5jdGlvbiBjb21waWxlSXRlbSAoaXRlbSwgaW5kZXgsIGNvbnRleHQpIHtcbiAgICBsZXQgbWVyZ2VkRGF0YVxuICAgIGlmIChvbGRTdHlsZSkge1xuICAgICAgbWVyZ2VkRGF0YSA9IGl0ZW1cbiAgICAgIGlmICh0eXBlb2YgaXRlbSA9PT0gJ29iamVjdCcpIHtcbiAgICAgICAgbWVyZ2VkRGF0YVtrZXlOYW1lXSA9IGluZGV4XG4gICAgICAgIGlmICghbWVyZ2VkRGF0YS5oYXNPd25Qcm9wZXJ0eSgnSU5ERVgnKSkge1xuICAgICAgICAgIE9iamVjdC5kZWZpbmVQcm9wZXJ0eShtZXJnZWREYXRhLCAnSU5ERVgnLCB7XG4gICAgICAgICAgICB2YWx1ZTogKCkgPT4ge1xuICAgICAgICAgICAgICBfLndhcm4oJ1wiSU5ERVhcIiBpbiByZXBlYXQgaXMgZGVwcmVjYXRlZCwnICtcbiAgICAgICAgICAgICAgICAnIHBsZWFzZSB1c2UgXCIkaW5kZXhcIiBpbnN0ZWFkJylcbiAgICAgICAgICAgIH1cbiAgICAgICAgICB9KVxuICAgICAgICB9XG4gICAgICB9XG4gICAgfVxuICAgIGVsc2Uge1xuICAgICAgbWVyZ2VkRGF0YSA9IHt9XG4gICAgICBtZXJnZWREYXRhW2tleU5hbWVdID0gaW5kZXhcbiAgICAgIG1lcmdlZERhdGFbdmFsdWVOYW1lXSA9IGl0ZW1cbiAgICB9XG4gICAgY29udGV4dCA9IGNvbnRleHQuX21lcmdlQ29udGV4dChtZXJnZWREYXRhKVxuICAgIHZtcy5wdXNoKGNvbnRleHQpXG4gICAgY29udGV4dC5fY29tcGlsZSh0YXJnZXQsIGZyYWdCbG9jaywgeyByZXBlYXQ6IGl0ZW0gfSlcbiAgfVxuXG4gIGNvbnN0IGxpc3QgPSB0aGlzLl93YXRjaEJsb2NrKGZyYWdCbG9jaywgZ2V0dGVyLCAncmVwZWF0JyxcbiAgICAoZGF0YSkgPT4ge1xuICAgICAgXy5kZWJ1ZygndGhlIFwicmVwZWF0XCIgaXRlbSBoYXMgY2hhbmdlZCcsIGRhdGEpXG5cbiAgICAgIGlmICghZnJhZ0Jsb2NrKSB7XG4gICAgICAgIHJldHVyblxuICAgICAgfVxuXG4gICAgICBjb25zdCBvbGRDaGlsZHJlbiA9IGNoaWxkcmVuLnNsaWNlKClcbiAgICAgIGNvbnN0IG9sZFZtcyA9IHZtcy5zbGljZSgpXG4gICAgICBjb25zdCBvbGREYXRhID0gZnJhZ0Jsb2NrLmRhdGEuc2xpY2UoKVxuICAgICAgLy8gMS4gY29sbGVjdCBhbGwgbmV3IHJlZnMgdHJhY2sgYnlcbiAgICAgIGNvbnN0IHRyYWNrTWFwID0ge31cbiAgICAgIGNvbnN0IHJldXNlZE1hcCA9IHt9XG4gICAgICBkYXRhLmZvckVhY2goKGl0ZW0sIGluZGV4KSA9PiB7XG4gICAgICAgIGNvbnN0IGtleSA9IHRyYWNrQnkgPyBpdGVtW3RyYWNrQnldIDogaW5kZXhcbiAgICAgICAgLyogaXN0YW5idWwgaWdub3JlIGlmICovXG4gICAgICAgIGlmIChrZXkgPT0gbnVsbCB8fCBrZXkgPT09ICcnKSB7XG4gICAgICAgICAgcmV0dXJuXG4gICAgICAgIH1cbiAgICAgICAgdHJhY2tNYXBba2V5XSA9IGl0ZW1cbiAgICAgIH0pXG5cbiAgICAgIC8vIDIuIHJlbW92ZSB1bnVzZWQgZWxlbWVudCBmb3JlYWNoIG9sZCBpdGVtXG4gICAgICBjb25zdCByZXVzZWRMaXN0ID0gW11cbiAgICAgIG9sZERhdGEuZm9yRWFjaCgoaXRlbSwgaW5kZXgpID0 + IHtcbiAgICAgICAgY29uc3Qga2V5ID0gdHJhY2tCeSA / IGl0ZW1bdHJhY2tCeV0gOiBpbmRleFxuICAgICAgICBpZiAodHJhY2tNYXAuaGFzT3duUHJvcGVydHkoa2V5KSkge1xuICAgICAgICAgIHJldXNlZE1hcFtrZXldID0ge1xuICAgICAgICAgICAgaXRlbSwgaW5kZXgsIGtleSxcbiAgICAgICAgICAgIHRhcmdldDogb2xkQ2hpbGRyZW5baW5kZXhdLFxuICAgICAgICAgICAgdm06IG9sZFZtc1tpbmRleF1cbiAgICAgICAgICB9XG4gICAgICAgICAgcmV1c2VkTGlzdC5wdXNoKGl0ZW0pXG4gICAgICAgIH1cbiAgICAgICAgZWxzZSB7XG4gICAgICAgICAgdGhpcy5fcmVtb3ZlVGFyZ2V0KG9sZENoaWxkcmVuW2luZGV4XSlcbiAgICAgICAgfVxuICAgICAgfSlcblxuICAgICAgLy8gMy4gY3JlYXRlIG5ldyBlbGVtZW50IGZvcmVhY2ggbmV3IGl0ZW1cbiAgICAgIGNoaWxkcmVuLmxlbmd0aCA9IDBcbiAgICAgIHZtcy5sZW5ndGggPSAwXG4gICAgICBmcmFnQmxvY2suZGF0YSA9IGRhdGEuc2xpY2UoKVxuICAgICAgZnJhZ0Jsb2NrLnVwZGF0ZU1hcmsgPSBmcmFnQmxvY2suc3RhcnRcblxuICAgICAgZGF0YS5mb3JFYWNoKChpdGVtLCBpbmRleCkgPT4ge1xuICAgICAgICBjb25zdCBrZXkgPSB0cmFja0J5ID8gaXRlbVt0cmFja0J5XSA6IGluZGV4XG4gICAgICAgIGNvbnN0IHJldXNlZCA9IHJldXNlZE1hcFtrZXldXG4gICAgICAgIGlmIChyZXVzZWQpIHtcbiAgICAgICAgICBpZiAocmV1c2VkLml0ZW0gPT09IHJldXNlZExpc3RbMF0pIHtcbiAgICAgICAgICAgIHJldXNlZExpc3Quc2hpZnQoKVxuICAgICAgICAgIH1cbiAgICAgICAgICBlbHNlIHtcbiAgICAgICAgICAgIHJldXNlZExpc3QuJHJlbW92ZShyZXVzZWQuaXRlbSlcbiAgICAgICAgICAgIHRoaXMuX21vdmVUYXJnZXQocmV1c2VkLnRhcmdldCwgZnJhZ0Jsb2NrLnVwZGF0ZU1hcmssIHRydWUpXG4gICAgICAgICAgfVxuICAgICAgICAgIGNoaWxkcmVuLnB1c2gocmV1c2VkLnRhcmdldClcbiAgICAgICAgICB2bXMucHVzaChyZXVzZWQudm0pXG4gICAgICAgICAgcmV1c2VkLnZtW2tleU5hbWVdID0gaW5kZXhcbiAgICAgICAgICBmcmFnQmxvY2sudXBkYXRlTWFyayA9IHJldXNlZC50YXJnZXRcbiAgICAgICAgfVxuICAgICAgICBlbHNlIHtcbiAgICAgICAgICBjb21waWxlSXRlbShpdGVtLCBpbmRleCwgdGhpcylcbiAgICAgICAgfVxuICAgICAgfSlcblxuICAgICAgZGVsZXRlIGZyYWdCbG9jay51cGRhdGVNYXJrXG4gICAgfVxuICApXG5cbiAgZnJhZ0Jsb2NrLmRhdGEgPSBsaXN0LnNsaWNlKDApXG4gIGxpc3QuZm9yRWFjaCgoaXRlbSwgaW5kZXgpID0 + IHtcbiAgICBjb21waWxlSXRlbShpdGVtLCBpbmRleCwgdGhpcylcbiAgfSlcbn1cblxuLyoqXG4gKiBXYXRjaCB0aGUgZGlzcGxheSB1cGRhdGUgYW5kIGFkZC9yZW1vdmUgdGhlIGVsZW1lbnQuXG4gKlxuICogQHBhcmFtICB7b2JqZWN0fSB0YXJnZXRcbiAqIEBwYXJhbSAge29iamVjdH0gZnJhZ0Jsb2NrXG4gKiBAcGFyYW0gIHtvYmplY3R9IGNvbnRleHRcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF9iaW5kU2hvd24gKHRhcmdldCwgZnJhZ0Jsb2NrLCBtZXRhKSB7XG4gIGNvbnN0IGRpc3BsYXkgPSB0aGlzLl93YXRjaEJsb2NrKGZyYWdCbG9jaywgdGFyZ2V0LnNob3duLCAnc2hvd24nLFxuICAgIChkaXNwbGF5KSA9PiB7XG4gICAgICBfLmRlYnVnKCd0aGUgXCJpZlwiIGl0ZW0gd2FzIGNoYW5nZWQnLCBkaXNwbGF5KVxuXG4gICAgICBpZiAoIWZyYWdCbG9jayB8fCAhIWZyYWdCbG9jay5kaXNwbGF5ID09PSAhIWRpc3BsYXkpIHtcbiAgICAgICAgcmV0dXJuXG4gICAgICB9XG4gICAgICBmcmFnQmxvY2suZGlzcGxheSA9ICEhZGlzcGxheVxuICAgICAgaWYgKGRpc3BsYXkpIHtcbiAgICAgICAgdGhpcy5fY29tcGlsZSh0YXJnZXQsIGZyYWdCbG9jaywgbWV0YSlcbiAgICAgIH1cbiAgICAgIGVsc2Uge1xuICAgICAgICB0aGlzLl9yZW1vdmVCbG9jayhmcmFnQmxvY2ssIHRydWUpXG4gICAgICB9XG4gICAgfVxuICApXG5cbiAgZnJhZ0Jsb2NrLmRpc3BsYXkgPSAhIWRpc3BsYXlcbiAgaWYgKGRpc3BsYXkpIHtcbiAgICB0aGlzLl9jb21waWxlKHRhcmdldCwgZnJhZ0Jsb2NrLCBtZXRhKVxuICB9XG59XG5cbi8qKlxuICogV2F0Y2ggY2FsYyB2YWx1ZSBjaGFuZ2VzIGFuZCBhcHBlbmQgY2VydGFpbiB0eXBlIGFjdGlvbiB0byBkaWZmZXIuXG4gKiBJdCBpcyB1c2VkIGZvciBpZiBvciByZXBlYXQgZGF0YS1iaW5kaW5nIGdlbmVyYXRvci5cbiAqXG4gKiBAcGFyYW0gIHtvYmplY3R9ICAgZnJhZ0Jsb2NrXG4gKiBAcGFyYW0gIHtmdW5jdGlvbn0gY2FsY1xuICogQHBhcmFtICB7c3RyaW5nfSAgIHR5cGVcbiAqIEBwYXJhbSAge2Z1bmN0aW9ufSBoYW5kbGVyXG4gKiBAcmV0dXJuIHthbnl9ICAgICAgaW5pdCB2YWx1ZSBvZiBjYWxjXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBfd2F0Y2hCbG9jayAoZnJhZ0Jsb2NrLCBjYWxjLCB0eXBlLCBoYW5kbGVyKSB7XG4gIGNvbnN0IGRpZmZlciA9IHRoaXMgJiYgdGhpcy5fYXBwICYmIHRoaXMuX2FwcC5kaWZmZXJcbiAgY29uc3QgY29uZmlnID0ge31cbiAgY29uc3QgZGVwdGggPSAoZnJhZ0Jsb2NrLmVsZW1lbnQuZGVwdGggfHwgMCkgKyAxXG5cbiAgcmV0dXJuIHRoaXMuX3dhdGNoKGNhbGMsICh2YWx1ZSkgPT4ge1xuICAgIGNvbmZpZy5sYXRlc3RWYWx1ZSA9IHZhbHVlXG4gICAgaWYgKGRpZmZlciAmJiAhY29uZmlnLnJlY29yZGVkKSB7XG4gICAgICBkaWZmZXIuYXBwZW5kKHR5cGUsIGRlcHRoLCBmcmFnQmxvY2suYmxvY2tJZCwgKCkgPT4ge1xuICAgICAgICBjb25zdCBsYXRlc3RWYWx1ZSA9IGNvbmZpZy5sYXRlc3RWYWx1ZVxuICAgICAgICBoYW5kbGVyKGxhdGVzdFZhbHVlKVxuICAgICAgICBjb25maWcucmVjb3JkZWQgPSBmYWxzZVxuICAgICAgICBjb25maWcubGF0ZXN0VmFsdWUgPSB1bmRlZmluZWRcbiAgICAgIH0pXG4gICAgfVxuICAgIGNvbmZpZy5yZWNvcmRlZCA9IHRydWVcbiAgfSlcbn1cblxuLyoqXG4gKiBDbG9uZSBhIGNvbnRleHQgYW5kIG1lcmdlIGNlcnRhaW4gZGF0YS5cbiAqXG4gKiBAcGFyYW0gIHtvYmplY3R9IG1lcmdlZERhdGFcbiAqIEByZXR1cm4ge29iamVjdH1cbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF9tZXJnZUNvbnRleHQgKG1lcmdlZERhdGEpIHtcbiAgY29uc3QgY29udGV4dCA9IE9iamVjdC5jcmVhdGUodGhpcylcbiAgY29udGV4dC5fZGF0YSA9IG1lcmdlZERhdGFcbiAgY29udGV4dC5faW5pdERhdGEoKVxuICBjb250ZXh0Ll9pbml0Q29tcHV0ZWQoKVxuICBjb250ZXh0Ll9yZWFsUGFyZW50ID0gdGhpc1xuICByZXR1cm4gY29udGV4dFxufVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L3ZtL2NvbXBpbGVyLmpzXG4gKiovIiwiLyoqXG4gKiBAZmlsZU92ZXJ2aWV3XG4gKiBEaXJlY3RpdmUgUGFyc2VyXG4gKi9cblxuaW1wb3J0ICogYXMgXyBmcm9tICcuLi91dGlsJ1xuXG5pbXBvcnQgV2F0Y2hlciBmcm9tICcuLi9jb3JlL3dhdGNoZXInXG5pbXBvcnQgY29uZmlnIGZyb20gJy4uL2NvbmZpZydcblxuY29uc3QgeyBuYXRpdmVDb21wb25lbnRNYXAgfSA9IGNvbmZpZ1xuXG5jb25zdCBTRVRURVJTID0ge1xuICBhdHRyOiAnc2V0QXR0cicsXG4gIHN0eWxlOiAnc2V0U3R5bGUnLFxuICBldmVudDogJ2FkZEV2ZW50J1xufVxuXG4vKipcbiAqIGFwcGx5IHRoZSBuYXRpdmUgY29tcG9uZW50J3Mgb3B0aW9ucyhzcGVjaWZpZWQgYnkgdGVtcGxhdGUudHlwZSlcbiAqIHRvIHRoZSB0ZW1wbGF0ZVxuICovXG5leHBvcnQgZnVuY3Rpb24gX2FwcGx5TmFpdHZlQ29tcG9uZW50T3B0aW9ucyAodGVtcGxhdGUpIHtcbiAgY29uc3QgeyB0eXBlIH0gPSB0ZW1wbGF0ZVxuICBjb25zdCBvcHRpb25zID0gbmF0aXZlQ29tcG9uZW50TWFwW3R5cGVdXG5cbiAgaWYgKHR5cGVvZiBvcHRpb25zID09PSAnb2JqZWN0Jykge1xuICAgIGZvciAoY29uc3Qga2V5IGluIG9wdGlvbnMpIHtcbiAgICAgIGlmICh0ZW1wbGF0ZVtrZXldID09IG51bGwpIHtcbiAgICAgICAgdGVtcGxhdGVba2V5XSA9IG9wdGlvbnNba2V5XVxuICAgICAgfVxuICAgICAgZWxzZSBpZiAoXy50eXBvZih0ZW1wbGF0ZVtrZXldKSA9PT0gJ29iamVjdCcgJiZcbiAgICAgICAgXy50eXBvZihvcHRpb25zW2tleV0pID09PSAnb2JqZWN0Jykge1xuICAgICAgICBmb3IgKGNvbnN0IHN1YmtleSBpbiBvcHRpb25zW2tleV0pIHtcbiAgICAgICAgICBpZiAodGVtcGxhdGVba2V5XVtzdWJrZXldID09IG51bGwpIHtcbiAgICAgICAgICAgIHRlbXBsYXRlW2tleV1bc3Via2V5XSA9IG9wdGlvbnNba2V5XVtzdWJrZXldXG4gICAgICAgICAgfVxuICAgICAgICB9XG4gICAgICB9XG4gICAgfVxuICB9XG59XG5cbi8qKlxuICogYmluZCBhbGwgaWQsIGF0dHIsIGNsYXNzbmFtZXMsIHN0eWxlLCBldmVudHMgdG8gYW4gZWxlbWVudFxuICovXG5leHBvcnQgZnVuY3Rpb24gX2JpbmRFbGVtZW50IChlbCwgdGVtcGxhdGUpIHtcbiAgdGhpcy5fc2V0SWQodGVtcGxhdGUuaWQsIGVsLCB0aGlzKVxuICB0aGlzLl9zZXRBdHRyKGVsLCB0ZW1wbGF0ZS5hdHRyKVxuICB0aGlzLl9zZXRDbGFzcyhlbCwgdGVtcGxhdGUuY2xhc3NMaXN0KVxuICB0aGlzLl9zZXRTdHlsZShlbCwgdGVtcGxhdGUuc3R5bGUpXG4gIHRoaXMuX2JpbmRFdmVudHMoZWwsIHRlbXBsYXRlLmV2ZW50cylcbn1cblxuLyoqXG4gKiBiaW5kIGFsbCBwcm9wcyB0byBzdWIgdm0gYW5kIGJpbmQgYWxsIHN0eWxlLCBldmVudHMgdG8gdGhlIHJvb3QgZWxlbWVudFxuICogb2YgdGhlIHN1YiB2bSBpZiBpdCBkb2Vzbid0IGhhdmUgYSByZXBsYWNlZCBtdWx0aS1ub2RlIGZyYWdtZW50XG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBfYmluZFN1YlZtIChzdWJWbSwgdGVtcGxhdGUsIHJlcGVhdEl0ZW0pIHtcbiAgc3ViVm0gPSBzdWJWbSB8fCB7fVxuICB0ZW1wbGF0ZSA9IHRlbXBsYXRlIHx8IHt9XG5cbiAgY29uc3Qgb3B0aW9ucyA9IHN1YlZtLl9vcHRpb25zIHx8IHt9XG5cbiAgLy8gYmluZCBwcm9wc1xuICBsZXQgcHJvcHMgPSBvcHRpb25zLnByb3BzXG5cbiAgaWYgKEFycmF5LmlzQXJyYXkocHJvcHMpKSB7XG4gICAgcHJvcHMgPSBwcm9wcy5yZWR1Y2UoKHJlc3VsdCwgdmFsdWUpID0 + IHtcbiAgICAgIHJlc3VsdFt2YWx1ZV0gPSB0cnVlXG4gICAgICByZXR1cm4gcmVzdWx0XG4gICAgfSwge30pXG4gIH1cblxuICBtZXJnZVByb3BzKHJlcGVhdEl0ZW0sIHByb3BzLCB0aGlzLCBzdWJWbSlcbiAgbWVyZ2VQcm9wcyh0ZW1wbGF0ZS5hdHRyLCBwcm9wcywgdGhpcywgc3ViVm0pXG59XG5cbmV4cG9ydCBmdW5jdGlvbiBfYmluZFN1YlZtQWZ0ZXJJbml0aWFsaXplZCAoc3ViVm0sIHRlbXBsYXRlKSB7XG4gIG1lcmdlQ2xhc3NTdHlsZSh0ZW1wbGF0ZS5jbGFzc0xpc3QsIHRoaXMsIHN1YlZtKVxuICBtZXJnZVN0eWxlKHRlbXBsYXRlLnN0eWxlLCB0aGlzLCBzdWJWbSlcbn1cblxuZnVuY3Rpb24gbWVyZ2VQcm9wcyAodGFyZ2V0LCBwcm9wcywgdm0sIHN1YlZtKSB7XG4gIGlmICghdGFyZ2V0KSB7XG4gICAgcmV0dXJuXG4gIH1cbiAgZm9yIChjb25zdCBrZXkgaW4gdGFyZ2V0KSB7XG4gICAgaWYgKCFwcm9wcyB8fCBwcm9wc1trZXldKSB7XG4gICAgICBjb25zdCB2YWx1ZSA9IHRhcmdldFtrZXldXG4gICAgICBpZiAodHlwZW9mIHZhbHVlID09PSAnZnVuY3Rpb24nKSB7XG4gICAgICAgIGNvbnN0IHJldHVyblZhbHVlID0gdm0uX3dhdGNoKHZhbHVlLCBmdW5jdGlvbiAodikge1xuICAgICAgICAgIHN1YlZtW2tleV0gPSB2XG4gICAgICAgIH0pXG4gICAgICAgIHN1YlZtW2tleV0gPSByZXR1cm5WYWx1ZVxuICAgICAgfVxuICAgICAgZWxzZSB7XG4gICAgICAgIHN1YlZtW2tleV0gPSB2YWx1ZVxuICAgICAgfVxuICAgIH1cbiAgfVxufVxuXG5mdW5jdGlvbiBtZXJnZVN0eWxlICh0YXJnZXQsIHZtLCBzdWJWbSkge1xuICBmb3IgKGNvbnN0IGtleSBpbiB0YXJnZXQpIHtcbiAgICBjb25zdCB2YWx1ZSA9IHRhcmdldFtrZXldXG4gICAgaWYgKHR5cGVvZiB2YWx1ZSA9PT0gJ2Z1bmN0aW9uJykge1xuICAgICAgY29uc3QgcmV0dXJuVmFsdWUgPSB2bS5fd2F0Y2godmFsdWUsIGZ1bmN0aW9uICh2KSB7XG4gICAgICAgIGlmIChzdWJWbS5fcm9vdEVsKSB7XG4gICAgICAgICAgc3ViVm0uX3Jvb3RFbC5zZXRTdHlsZShrZXksIHYpXG4gICAgICAgIH1cbiAgICAgIH0pXG4gICAgICBzdWJWbS5fcm9vdEVsLnNldFN0eWxlKGtleSwgcmV0dXJuVmFsdWUpXG4gICAgfVxuICAgIGVsc2Uge1xuICAgICAgaWYgKHN1YlZtLl9yb290RWwpIHtcbiAgICAgICAgc3ViVm0uX3Jvb3RFbC5zZXRTdHlsZShrZXksIHZhbHVlKVxuICAgICAgfVxuICAgIH1cbiAgfVxufVxuXG5mdW5jdGlvbiBtZXJnZUNsYXNzU3R5bGUgKHRhcmdldCwgdm0sIHN1YlZtKSB7XG4gIGNvbnN0IGNzcyA9IHZtLl9vcHRpb25zICYmIHZtLl9vcHRpb25zLnN0eWxlIHx8IHt9XG5cbiAgLyogaXN0YW5idWwgaWdub3JlIGlmICovXG4gIGlmICghc3ViVm0uX3Jvb3RFbCkge1xuICAgIHJldHVyblxuICB9XG5cbiAgaWYgKHR5cGVvZiB0YXJnZXQgPT09ICdmdW5jdGlvbicpIHtcbiAgICBjb25zdCB2YWx1ZSA9IHZtLl93YXRjaCh0YXJnZXQsIHYgPT4ge1xuICAgICAgc2V0Q2xhc3NTdHlsZShzdWJWbS5fcm9vdEVsLCBjc3MsIHYpXG4gICAgfSlcbiAgICBzZXRDbGFzc1N0eWxlKHN1YlZtLl9yb290RWwsIGNzcywgdmFsdWUpXG4gIH1cbiAgZWxzZSBpZiAodGFyZ2V0ICE9IG51bGwpIHtcbiAgICBzZXRDbGFzc1N0eWxlKHN1YlZtLl9yb290RWwsIGNzcywgdGFyZ2V0KVxuICB9XG59XG5cbi8qKlxuICogYmluZCBpZCB0byBhbiBlbGVtZW50XG4gKiBlYWNoIGlkIGlzIHVuaXF1ZSBpbiBhIHdob2xlIHZtXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBfc2V0SWQgKGlkLCBlbCwgdm0pIHtcbiAgY29uc3QgbWFwID0gT2JqZWN0LmNyZWF0ZShudWxsKVxuXG4gIE9iamVjdC5kZWZpbmVQcm9wZXJ0aWVzKG1hcCwge1xuICAgIHZtOiB7XG4gICAgICB2YWx1ZTogdm0sXG4gICAgICB3cml0YWJsZTogZmFsc2UsXG4gICAgICBjb25maWd1cmFibGU6IGZhbHNlXG4gICAgfSxcbiAgICBlbDoge1xuICAgICAgZ2V0OiAoKSA9PiBlbCB8fCB2bS5fcm9vdEVsLFxuICAgICAgY29uZmlndXJhYmxlOiBmYWxzZVxuICAgIH1cbiAgfSlcblxuICBpZiAodHlwZW9mIGlkID09PSAnZnVuY3Rpb24nKSB7XG4gICAgY29uc3QgaGFuZGxlciA9IGlkXG4gICAgaWQgPSBoYW5kbGVyLmNhbGwodGhpcylcbiAgICBpZiAoaWQpIHtcbiAgICAgIHRoaXMuX2lkc1tpZF0gPSBtYXBcbiAgICB9XG4gICAgdGhpcy5fd2F0Y2goaGFuZGxlciwgKG5ld0lkKSA9PiB7XG4gICAgICBpZiAobmV3SWQpIHtcbiAgICAgICAgdGhpcy5faWRzW25ld0lkXSA9IG1hcFxuICAgICAgfVxuICAgIH0pXG4gIH1cbiAgZWxzZSBpZiAoaWQgJiYgdHlwZW9mIGlkID09PSAnc3RyaW5nJykge1xuICAgIHRoaXMuX2lkc1tpZF0gPSBtYXBcbiAgfVxufVxuXG4vKipcbiAqIGJpbmQgYXR0ciB0byBhbiBlbGVtZW50XG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBfc2V0QXR0ciAoZWwsIGF0dHIpIHtcbiAgdGhpcy5fYmluZERpcihlbCwgJ2F0dHInLCBhdHRyKVxufVxuXG5mdW5jdGlvbiBzZXRDbGFzc1N0eWxlIChlbCwgY3NzLCBjbGFzc0xpc3QpIHtcbiAgY29uc3QgY2xhc3NTdHlsZSA9IHt9XG4gIGNvbnN0IGxlbmd0aCA9IGNsYXNzTGlzdC5sZW5ndGhcblxuICBmb3IgKGxldCBpID0gMDsgaSA8IGxlbmd0aDsgaSsrKSB7XG4gICAgY29uc3Qgc3R5bGUgPSBjc3NbY2xhc3NMaXN0W2ldXVxuICAgIGlmIChzdHlsZSkge1xuICAgICAgZm9yIChjb25zdCBrZXkgaW4gc3R5bGUpIHtcbiAgICAgICAgY2xhc3NTdHlsZVtrZXldID0gc3R5bGVba2V5XVxuICAgICAgfVxuICAgIH1cbiAgfVxuICBlbC5zZXRDbGFzc1N0eWxlKGNsYXNzU3R5bGUpXG59XG5cbi8qKlxuICogYmluZCBjbGFzc25hbWVzIHRvIGFuIGVsZW1lbnRcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF9zZXRDbGFzcyAoZWwsIGNsYXNzTGlzdCkge1xuICBpZiAodHlwZW9mIGNsYXNzTGlzdCAhPT0gJ2Z1bmN0aW9uJyAmJiAhQXJyYXkuaXNBcnJheShjbGFzc0xpc3QpKSB7XG4gICAgcmV0dXJuXG4gIH1cbiAgaWYgKEFycmF5LmlzQXJyYXkoY2xhc3NMaXN0KSAmJiAhY2xhc3NMaXN0Lmxlbmd0aCkge1xuICAgIGVsLnNldENsYXNzU3R5bGUoe30pXG4gICAgcmV0dXJuXG4gIH1cblxuICBjb25zdCBzdHlsZSA9IHRoaXMuX29wdGlvbnMgJiYgdGhpcy5fb3B0aW9ucy5zdHlsZSB8fCB7fVxuICBpZiAodHlwZW9mIGNsYXNzTGlzdCA9PT0gJ2Z1bmN0aW9uJykge1xuICAgIGNvbnN0IHZhbHVlID0gdGhpcy5fd2F0Y2goY2xhc3NMaXN0LCB2ID0 + IHtcbiAgICAgIHNldENsYXNzU3R5bGUoZWwsIHN0eWxlLCB2KVxuICAgIH0pXG4gICAgc2V0Q2xhc3NTdHlsZShlbCwgc3R5bGUsIHZhbHVlKVxuICB9XG4gIGVsc2Uge1xuICAgIHNldENsYXNzU3R5bGUoZWwsIHN0eWxlLCBjbGFzc0xpc3QpXG4gIH1cbn1cblxuLyoqXG4gKiBiaW5kIHN0eWxlIHRvIGFuIGVsZW1lbnRcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF9zZXRTdHlsZSAoZWwsIHN0eWxlKSB7XG4gIHRoaXMuX2JpbmREaXIoZWwsICdzdHlsZScsIHN0eWxlKVxufVxuXG4vKipcbiAqIGFkZCBhbiBldmVudCB0eXBlIGFuZCBoYW5kbGVyIHRvIGFuIGVsZW1lbnQgYW5kIGdlbmVyYXRlIGEgZG9tIHVwZGF0ZVxuICovXG5leHBvcnQgZnVuY3Rpb24gX3NldEV2ZW50IChlbCwgdHlwZSwgaGFuZGxlcikge1xuICBlbC5hZGRFdmVudCh0eXBlLCBfLmJpbmQoaGFuZGxlciwgdGhpcykpXG59XG5cbi8qKlxuICogYWRkIGFsbCBldmVudHMgb2YgYW4gZWxlbWVudFxuICovXG5leHBvcnQgZnVuY3Rpb24gX2JpbmRFdmVudHMgKGVsLCBldmVudHMpIHtcbiAgaWYgKCFldmVudHMpIHtcbiAgICByZXR1cm5cbiAgfVxuICBjb25zdCBrZXlzID0gT2JqZWN0LmtleXMoZXZlbnRzKVxuICBsZXQgaSA9IGtleXMubGVuZ3RoXG4gIHdoaWxlIChpLS0pIHtcbiAgICBjb25zdCBrZXkgPSBrZXlzW2ldXG4gICAgbGV0IGhhbmRsZXIgPSBldmVudHNba2V5XVxuICAgIGlmICh0eXBlb2YgaGFuZGxlciA9PT0gJ3N0cmluZycpIHtcbiAgICAgIGhhbmRsZXIgPSB0aGlzW2hhbmRsZXJdXG4gICAgICAvKiBpc3RhbmJ1bCBpZ25vcmUgaWYgKi9cbiAgICAgIGlmICghaGFuZGxlcikge1xuICAgICAgICBfLmVycm9yKGBUaGUgbWV0aG9kIFwiJHtoYW5kbGVyfVwiIGlzIG5vdCBkZWZpbmVkLmApXG4gICAgICB9XG4gICAgfVxuICAgIHRoaXMuX3NldEV2ZW50KGVsLCBrZXksIGhhbmRsZXIpXG4gIH1cbn1cblxuLyoqXG4gKiBzZXQgYSBzZXJpZXMgb2YgbWVtYmVycyBhcyBhIGtpbmQgb2YgYW4gZWxlbWVudFxuICogZm9yIGV4YW1wbGU6IHN0eWxlLCBhdHRyLCAuLi5cbiAqIGlmIHRoZSB2YWx1ZSBpcyBhIGZ1bmN0aW9uIHRoZW4gYmluZCB0aGUgZGF0YSBjaGFuZ2VzXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBfYmluZERpciAoZWwsIG5hbWUsIGRhdGEpIHtcbiAgaWYgKCFkYXRhKSB7XG4gICAgcmV0dXJuXG4gIH1cbiAgY29uc3Qga2V5cyA9IE9iamVjdC5rZXlzKGRhdGEpXG4gIGxldCBpID0ga2V5cy5sZW5ndGhcbiAgd2hpbGUgKGktLSkge1xuICAgIGNvbnN0IGtleSA9IGtleXNbaV1cbiAgICBjb25zdCB2YWx1ZSA9IGRhdGFba2V5XVxuICAgIGlmICh0eXBlb2YgdmFsdWUgPT09ICdmdW5jdGlvbicpIHtcbiAgICAgIHRoaXMuX2JpbmRLZXkoZWwsIG5hbWUsIGtleSwgdmFsdWUpXG4gICAgfVxuICAgIGVsc2Uge1xuICAgICAgZWxbU0VUVEVSU1tuYW1lXV0oa2V5LCB2YWx1ZSlcbiAgICB9XG4gIH1cbn1cblxuLyoqXG4gKiBiaW5kIGRhdGEgY2hhbmdlcyB0byBhIGNlcnRhaW4ga2V5IHRvIGEgbmFtZSBzZXJpZXMgaW4gYW4gZWxlbWVudFxuICovXG5leHBvcnQgZnVuY3Rpb24gX2JpbmRLZXkgKGVsLCBuYW1lLCBrZXksIGNhbGMpIHtcbiAgY29uc3QgbWV0aG9kTmFtZSA9IFNFVFRFUlNbbmFtZV1cbiAgLy8gd2F0Y2ggdGhlIGNhbGMsIGFuZCByZXR1cm5zIGEgdmFsdWUgYnkgY2FsYy5jYWxsKClcbiAgY29uc3QgdmFsdWUgPSB0aGlzLl93YXRjaChjYWxjLCAodmFsdWUpID0 + IHtcbiAgICBmdW5jdGlvbiBoYW5kbGVyICgpIHtcbiAgICAgIGVsW21ldGhvZE5hbWVdKGtleSwgdmFsdWUpXG4gICAgfVxuICAgIGNvbnN0IGRpZmZlciA9IHRoaXMgJiYgdGhpcy5fYXBwICYmIHRoaXMuX2FwcC5kaWZmZXJcbiAgICBpZiAoZGlmZmVyKSB7XG4gICAgICBkaWZmZXIuYXBwZW5kKCdlbGVtZW50JywgZWwuZGVwdGgsIGVsLnJlZiwgaGFuZGxlcilcbiAgICB9XG4gICAgZWxzZSB7XG4gICAgICBoYW5kbGVyKClcbiAgICB9XG4gIH0pXG5cbiAgZWxbbWV0aG9kTmFtZV0oa2V5LCB2YWx1ZSlcbn1cblxuLyoqXG4gKiB3YXRjaCBhIGNhbGMgZnVuY3Rpb24gYW5kIGNhbGxiYWNrIGlmIHRoZSBjYWxjIHZhbHVlIGNoYW5nZXNcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF93YXRjaCAoY2FsYywgY2FsbGJhY2spIHtcbiAgY29uc3Qgd2F0Y2hlciA9IG5ldyBXYXRjaGVyKHRoaXMsIGNhbGMsIGZ1bmN0aW9uICh2YWx1ZSwgb2xkVmFsdWUpIHtcbiAgICAvKiBpc3RhbmJ1bCBpZ25vcmUgaWYgKi9cbiAgICBpZiAodHlwZW9mIHZhbHVlICE9PSAnb2JqZWN0JyAmJiB2YWx1ZSA9PT0gb2xkVmFsdWUpIHtcbiAgICAgIHJldHVyblxuICAgIH1cbiAgICBjYWxsYmFjayh2YWx1ZSlcbiAgfSlcblxuICByZXR1cm4gd2F0Y2hlci52YWx1ZVxufVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L3ZtL2RpcmVjdGl2ZS5qc1xuICoqLyIsIi8qKlxuICogQGZpbGVPdmVydmlldyBEb2N1bWVudCAmIEVsZW1lbnQgSGVscGVycy5cbiAqXG4gKiByZXF1aXJlZDpcbiAqIERvY3VtZW50IzogY3JlYXRlRWxlbWVudCwgY3JlYXRlQ29tbWVudCwgZ2V0UmVmXG4gKiBFbGVtZW50IzogYXBwZW5kQ2hpbGQsIGluc2VydEJlZm9yZSwgcmVtb3ZlQ2hpbGQsIG5leHRTaWJsaW5nXG4gKi9cblxuLyoqXG4gKiBDcmVhdGUgYSBib2R5IGJ5IHR5cGVcbiAqIFVzaW5nIHRoaXMuX2FwcC5kb2NcbiAqXG4gKiBAcGFyYW0gIHtzdHJpbmd9IHR5cGVcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF9jcmVhdGVCb2R5ICh0eXBlKSB7XG4gIGNvbnN0IGRvYyA9IHRoaXMuX2FwcC5kb2NcbiAgcmV0dXJuIGRvYy5jcmVhdGVCb2R5KHR5cGUpXG59XG5cbi8qKlxuICogQ3JlYXRlIGFuIGVsZW1lbnQgYnkgdHlwZVxuICogVXNpbmcgdGhpcy5fYXBwLmRvY1xuICpcbiAqIEBwYXJhbSAge3N0cmluZ30gdHlwZVxuICovXG5leHBvcnQgZnVuY3Rpb24gX2NyZWF0ZUVsZW1lbnQgKHR5cGUpIHtcbiAgY29uc3QgZG9jID0gdGhpcy5fYXBwLmRvY1xuICByZXR1cm4gZG9jLmNyZWF0ZUVsZW1lbnQodHlwZSlcbn1cblxuLyoqXG4gKiBDcmVhdGUgYW5kIHJldHVybiBhIGZyYWcgYmxvY2sgZm9yIGFuIGVsZW1lbnQuXG4gKiBUaGUgZnJhZyBibG9jayBoYXMgYSBzdGFydGVyLCBlbmRlciBhbmQgdGhlIGVsZW1lbnQgaXRzZWxmLlxuICpcbiAqIEBwYXJhbSAge29iamVjdH0gZWxlbWVudFxuICovXG5leHBvcnQgZnVuY3Rpb24gX2NyZWF0ZUJsb2NrIChlbGVtZW50KSB7XG4gIGNvbnN0IHN0YXJ0ID0gdGhpcy5fY3JlYXRlQmxvY2tTdGFydCgpXG4gIGNvbnN0IGVuZCA9IHRoaXMuX2NyZWF0ZUJsb2NrRW5kKClcbiAgY29uc3QgYmxvY2tJZCA9IGxhc3Rlc3RCbG9ja0lkKytcbiAgaWYgKGVsZW1lbnQuZWxlbWVudCkge1xuICAgIGVsZW1lbnQuZWxlbWVudC5pbnNlcnRCZWZvcmUoc3RhcnQsIGVsZW1lbnQuZW5kKVxuICAgIGVsZW1lbnQuZWxlbWVudC5pbnNlcnRCZWZvcmUoZW5kLCBlbGVtZW50LmVuZClcbiAgICBlbGVtZW50ID0gZWxlbWVudC5lbGVtZW50XG4gIH1cbiAgZWxzZSB7XG4gICAgZWxlbWVudC5hcHBlbmRDaGlsZChzdGFydClcbiAgICBlbGVtZW50LmFwcGVuZENoaWxkKGVuZClcbiAgfVxuICByZXR1cm4geyBzdGFydCwgZW5kLCBlbGVtZW50LCBibG9ja0lkIH1cbn1cblxubGV0IGxhc3Rlc3RCbG9ja0lkID0gMVxuXG4vKipcbiAqIENyZWF0ZSBhbmQgcmV0dXJuIGEgYmxvY2sgc3RhcnRlci5cbiAqIFVzaW5nIHRoaXMuX2FwcC5kb2NcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF9jcmVhdGVCbG9ja1N0YXJ0ICgpIHtcbiAgY29uc3QgZG9jID0gdGhpcy5fYXBwLmRvY1xuICBjb25zdCBhbmNob3IgPSBkb2MuY3JlYXRlQ29tbWVudCgnc3RhcnQnKVxuICByZXR1cm4gYW5jaG9yXG59XG5cbi8qKlxuICogQ3JlYXRlIGFuZCByZXR1cm4gYSBibG9jayBlbmRlci5cbiAqIFVzaW5nIHRoaXMuX2FwcC5kb2NcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF9jcmVhdGVCbG9ja0VuZCAoKSB7XG4gIGNvbnN0IGRvYyA9IHRoaXMuX2FwcC5kb2NcbiAgY29uc3QgYW5jaG9yID0gZG9jLmNyZWF0ZUNvbW1lbnQoJ2VuZCcpXG4gIHJldHVybiBhbmNob3Jcbn1cblxuLyoqXG4gKiBBdHRhY2ggdGFyZ2V0IHRvIGEgY2VydGFpbiBkZXN0IHVzaW5nIGFwcGVuZENoaWxkIGJ5IGRlZmF1bHQuXG4gKiBJZiB0aGUgZGVzdCBpcyBhIGZyYWcgYmxvY2sgdGhlbiBpbnNlcnQgYmVmb3JlIHRoZSBlbmRlci5cbiAqIElmIHRoZSB0YXJnZXQgaXMgYSBmcmFnIGJsb2NrIHRoZW4gYXR0YWNoIHRoZSBzdGFydGVyIGFuZCBlbmRlciBpbiBvcmRlci5cbiAqXG4gKiBAcGFyYW0gIHtvYmplY3R9IHRhcmdldFxuICogQHBhcmFtICB7b2JqZWN0fSBkZXN0XG4gKi9cbmV4cG9ydCBmdW5jdGlvbiBfYXR0YWNoVGFyZ2V0ICh0YXJnZXQsIGRlc3QpIHtcbiAgaWYgKGRlc3QuZWxlbWVudCkge1xuICAgIGNvbnN0IGJlZm9yZSA9IGRlc3QuZW5kXG4gICAgY29uc3QgYWZ0ZXIgPSBkZXN0LnVwZGF0ZU1hcmtcbiAgICAvLyBwdXNoIG5ldyB0YXJnZXQgZm9yIHdhdGNoIGxpc3QgdXBkYXRlIGxhdGVyXG4gICAgaWYgKGRlc3QuY2hpbGRyZW4pIHtcbiAgICAgIGRlc3QuY2hpbGRyZW4ucHVzaCh0YXJnZXQpXG4gICAgfVxuICAgIC8vIGZvciBjaGVjayByZXBlYXQgY2FzZVxuICAgIGlmIChhZnRlcikge1xuICAgICAgdGhpcy5fbW92ZVRhcmdldCh0YXJnZXQsIGFmdGVyKVxuICAgICAgZGVzdC51cGRhdGVNYXJrID0gdGFyZ2V0LmVsZW1lbnQgPyB0YXJnZXQuZW5kIDogdGFyZ2V0XG4gICAgfVxuICAgIGVsc2UgaWYgKHRhcmdldC5lbGVtZW50KSB7XG4gICAgICBkZXN0LmVsZW1lbnQuaW5zZXJ0QmVmb3JlKHRhcmdldC5zdGFydCwgYmVmb3JlKVxuICAgICAgZGVzdC5lbGVtZW50Lmluc2VydEJlZm9yZSh0YXJnZXQuZW5kLCBiZWZvcmUpXG4gICAgfVxuICAgIGVsc2Uge1xuICAgICAgcmV0dXJuIGRlc3QuZWxlbWVudC5pbnNlcnRCZWZvcmUodGFyZ2V0LCBiZWZvcmUpXG4gICAgfVxuICB9XG4gIGVsc2Uge1xuICAgIGlmICh0YXJnZXQuZWxlbWVudCkge1xuICAgICAgZGVzdC5hcHBlbmRDaGlsZCh0YXJnZXQuc3RhcnQpXG4gICAgICBkZXN0LmFwcGVuZENoaWxkKHRhcmdldC5lbmQpXG4gICAgfVxuICAgIGVsc2Uge1xuICAgICAgcmV0dXJuIGRlc3QuYXBwZW5kQ2hpbGQodGFyZ2V0KVxuICAgIH1cbiAgfVxufVxuXG4vKipcbiAqIE1vdmUgdGFyZ2V0IGJlZm9yZSBhIGNlcnRhaW4gZWxlbWVudC4gVGhlIHRhcmdldCBtYXliZSBibG9jayBvciBlbGVtZW50LlxuICpcbiAqIEBwYXJhbSAge29iamVjdH0gdGFyZ2V0XG4gKiBAcGFyYW0gIHtvYmplY3R9IGJlZm9yZVxuICovXG5leHBvcnQgZnVuY3Rpb24gX21vdmVUYXJnZXQgKHRhcmdldCwgYWZ0ZXIpIHtcbiAgaWYgKHRhcmdldC5lbGVtZW50KSB7XG4gICAgdGhpcy5fbW92ZUJsb2NrKHRhcmdldCwgYWZ0ZXIpXG4gIH1cbiAgZWxzZSB7XG4gICAgdGhpcy5fbW92ZUVsZW1lbnQodGFyZ2V0LCBhZnRlcilcbiAgfVxufVxuXG4vKipcbiAqIE1vdmUgZWxlbWVudCBiZWZvcmUgYSBjZXJ0YWluIGVsZW1lbnQuXG4gKlxuICogQHBhcmFtICB7b2JqZWN0fSBlbGVtZW50XG4gKiBAcGFyYW0gIHtvYmplY3R9IGJlZm9yZVxuICovXG5leHBvcnQgZnVuY3Rpb24gX21vdmVFbGVtZW50IChlbGVtZW50LCBhZnRlcikge1xuICBjb25zdCBwYXJlbnQgPSBhZnRlci5wYXJlbnROb2RlXG4gIGlmIChwYXJlbnQpIHtcbiAgICBwYXJlbnQuaW5zZXJ0QWZ0ZXIoZWxlbWVudCwgYWZ0ZXIpXG4gIH1cbn1cblxuLyoqXG4gKiBNb3ZlIGFsbCBlbGVtZW50cyBvZiB0aGUgYmxvY2sgYmVmb3JlIGEgY2VydGFpbiBlbGVtZW50LlxuICpcbiAqIEBwYXJhbSAge29iamVjdH0gZnJhZ0Jsb2NrXG4gKiBAcGFyYW0gIHtvYmplY3R9IGJlZm9yZVxuICovXG5leHBvcnQgZnVuY3Rpb24gX21vdmVCbG9jayAoZnJhZ0Jsb2NrLCBhZnRlcikge1xuICBjb25zdCBwYXJlbnQgPSBhZnRlci5wYXJlbnROb2RlXG5cbiAgaWYgKHBhcmVudCkge1xuICAgIGxldCBlbCA9IGZyYWdCbG9jay5zdGFydFxuICAgIGNvbnN0IGdyb3VwID0gW2VsXVxuXG4gICAgd2hpbGUgKGVsICYmIGVsICE9PSBmcmFnQmxvY2suZW5kKSB7XG4gICAgICBlbCA9IGVsLm5leHRTaWJsaW5nXG4gICAgICBncm91cC5wdXNoKGVsKVxuICAgIH1cblxuICAgIGxldCB0ZW1wID0gYWZ0ZXJcbiAgICBncm91cC5mb3JFYWNoKChlbCkgPT4ge1xuICAgICAgcGFyZW50Lmluc2VydEFmdGVyKGVsLCB0ZW1wKVxuICAgICAgdGVtcCA9IGVsXG4gICAgfSlcbiAgfVxufVxuXG4vKipcbiAqIFJlbW92ZSB0YXJnZXQgZnJvbSBET00gdHJlZS5cbiAqIElmIHRoZSB0YXJnZXQgaXMgYSBmcmFnIGJsb2NrIHRoZW4gY2FsbCBfcmVtb3ZlQmxvY2tcbiAqXG4gKiBAcGFyYW0gIHtvYmplY3R9IHRhcmdldFxuICovXG5leHBvcnQgZnVuY3Rpb24gX3JlbW92ZVRhcmdldCAodGFyZ2V0KSB7XG4gIGlmICh0YXJnZXQuZWxlbWVudCkge1xuICAgIHRoaXMuX3JlbW92ZUJsb2NrKHRhcmdldClcbiAgfVxuICBlbHNlIHtcbiAgICB0aGlzLl9yZW1vdmVFbGVtZW50KHRhcmdldClcbiAgfVxufVxuXG4vKipcbiAqIFJlbW92ZSBhIGNlcnRhaW4gZWxlbWVudC5cbiAqIFVzaW5nIHRoaXMuX2FwcC5kb2NcbiAqXG4gKiBAcGFyYW0gIHtvYmplY3R9IHRhcmdldFxuICovXG5leHBvcnQgZnVuY3Rpb24gX3JlbW92ZUVsZW1lbnQgKHRhcmdldCkge1xuICBjb25zdCBwYXJlbnQgPSB0YXJnZXQucGFyZW50Tm9kZVxuXG4gIGlmIChwYXJlbnQpIHtcbiAgICBwYXJlbnQucmVtb3ZlQ2hpbGQodGFyZ2V0KVxuICB9XG59XG5cbi8qKlxuICogUmVtb3ZlIGEgZnJhZyBibG9jay5cbiAqIFRoZSBzZWNvbmQgcGFyYW0gZGVjaWRlcyB3aGV0aGVyIHRoZSBibG9jayBzZWxmIHNob3VsZCBiZSByZW1vdmVkIHRvby5cbiAqXG4gKiBAcGFyYW0gIHtvYmplY3R9ICBmcmFnQmxvY2tcbiAqIEBwYXJhbSAge0Jvb2xlYW59IHByZXNlcnZlQmxvY2s9ZmFsc2VcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIF9yZW1vdmVCbG9jayAoZnJhZ0Jsb2NrLCBwcmVzZXJ2ZUJsb2NrID0gZmFsc2UpIHtcbiAgY29uc3QgcmVzdWx0ID0gW11cbiAgbGV0IGVsID0gZnJhZ0Jsb2NrLnN0YXJ0Lm5leHRTaWJsaW5nXG5cbiAgd2hpbGUgKGVsICYmIGVsICE9PSBmcmFnQmxvY2suZW5kKSB7XG4gICAgcmVzdWx0LnB1c2goZWwpXG4gICAgZWwgPSBlbC5uZXh0U2libGluZ1xuICB9XG5cbiAgaWYgKCFwcmVzZXJ2ZUJsb2NrKSB7XG4gICAgdGhpcy5fcmVtb3ZlRWxlbWVudChmcmFnQmxvY2suc3RhcnQpXG4gIH1cbiAgcmVzdWx0LmZvckVhY2goKGVsKSA9PiB7XG4gICAgdGhpcy5fcmVtb3ZlRWxlbWVudChlbClcbiAgfSlcbiAgaWYgKCFwcmVzZXJ2ZUJsb2NrKSB7XG4gICAgdGhpcy5fcmVtb3ZlRWxlbWVudChmcmFnQmxvY2suZW5kKVxuICB9XG59XG5cblxuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvZGVmYXVsdC92bS9kb20taGVscGVyLmpzXG4gKiovIiwiZnVuY3Rpb24gRXZ0ICh0eXBlLCBkZXRhaWwpIHtcbiAgaWYgKGRldGFpbCBpbnN0YW5jZW9mIEV2dCkge1xuICAgIHJldHVybiBkZXRhaWxcbiAgfVxuXG4gIHRoaXMudGltZXN0YW1wID0gRGF0ZS5ub3coKVxuICB0aGlzLmRldGFpbCA9IGRldGFpbFxuICB0aGlzLnR5cGUgPSB0eXBlXG5cbiAgbGV0IHNob3VsZFN0b3AgPSBmYWxzZVxuICB0aGlzLnN0b3AgPSBmdW5jdGlvbiAoKSB7XG4gICAgc2hvdWxkU3RvcCA9IHRydWVcbiAgfVxuICB0aGlzLmhhc1N0b3BwZWQgPSBmdW5jdGlvbiAoKSB7XG4gICAgcmV0dXJuIHNob3VsZFN0b3BcbiAgfVxufVxuXG5leHBvcnQgZnVuY3Rpb24gJGVtaXQgKHR5cGUsIGRldGFpbCkge1xuICBjb25zdCBldmVudHMgPSB0aGlzLl92bUV2ZW50c1xuICBjb25zdCBoYW5kbGVyTGlzdCA9IGV2ZW50c1t0eXBlXVxuICBpZiAoaGFuZGxlckxpc3QpIHtcbiAgICBjb25zdCBldnQgPSBuZXcgRXZ0KHR5cGUsIGRldGFpbClcbiAgICBoYW5kbGVyTGlzdC5mb3JFYWNoKChoYW5kbGVyKSA9PiB7XG4gICAgICBoYW5kbGVyLmNhbGwodGhpcywgZXZ0KVxuICAgIH0pXG4gIH1cbn1cblxuZXhwb3J0IGZ1bmN0aW9uICRkaXNwYXRjaCAodHlwZSwgZGV0YWlsKSB7XG4gIGNvbnN0IGV2dCA9IG5ldyBFdnQodHlwZSwgZGV0YWlsKVxuICB0aGlzLiRlbWl0KHR5cGUsIGV2dClcblxuICBpZiAoIWV2dC5oYXNTdG9wcGVkKCkgJiYgdGhpcy5fcGFyZW50ICYmIHRoaXMuX3BhcmVudC4kZGlzcGF0Y2gpIHtcbiAgICB0aGlzLl9wYXJlbnQuJGRpc3BhdGNoKHR5cGUsIGV2dClcbiAgfVxufVxuXG5leHBvcnQgZnVuY3Rpb24gJGJyb2FkY2FzdCAodHlwZSwgZGV0YWlsKSB7XG4gIGNvbnN0IGV2dCA9IG5ldyBFdnQodHlwZSwgZGV0YWlsKVxuICB0aGlzLiRlbWl0KHR5cGUsIGV2dClcblxuICBpZiAoIWV2dC5oYXNTdG9wcGVkKCkgJiYgdGhpcy5fY2hpbGRyZW5WbXMpIHtcbiAgICB0aGlzLl9jaGlsZHJlblZtcy5mb3JFYWNoKChzdWJWbSkgPT4ge1xuICAgICAgc3ViVm0uJGJyb2FkY2FzdCh0eXBlLCBldnQpXG4gICAgfSlcbiAgfVxufVxuXG5leHBvcnQgZnVuY3Rpb24gJG9uICh0eXBlLCBoYW5kbGVyKSB7XG4gIGlmICghdHlwZSB8fCB0eXBlb2YgaGFuZGxlciAhPT0gJ2Z1bmN0aW9uJykge1xuICAgIHJldHVyblxuICB9XG4gIGNvbnN0IGV2ZW50cyA9IHRoaXMuX3ZtRXZlbnRzXG4gIGNvbnN0IGhhbmRsZXJMaXN0ID0gZXZlbnRzW3R5cGVdIHx8IFtdXG4gIGhhbmRsZXJMaXN0LnB1c2goaGFuZGxlcilcbiAgZXZlbnRzW3R5cGVdID0gaGFuZGxlckxpc3RcblxuICAvLyBmaXhlZCBvbGQgdmVyc2lvbiBsaWZlY3ljbGUgZGVzaWduXG4gIGlmICh0eXBlID09PSAnaG9vazpyZWFkeScgJiYgdGhpcy5fcmVhZHkpIHtcbiAgICB0aGlzLiRlbWl0KCdob29rOnJlYWR5JylcbiAgfVxufVxuXG5leHBvcnQgZnVuY3Rpb24gJG9mZiAodHlwZSwgaGFuZGxlcikge1xuICBpZiAoIXR5cGUpIHtcbiAgICByZXR1cm5cbiAgfVxuICBjb25zdCBldmVudHMgPSB0aGlzLl92bUV2ZW50c1xuICBpZiAoIWhhbmRsZXIpIHtcbiAgICBkZWxldGUgZXZlbnRzW3R5cGVdXG4gICAgcmV0dXJuXG4gIH1cbiAgY29uc3QgaGFuZGxlckxpc3QgPSBldmVudHNbdHlwZV1cbiAgaWYgKCFoYW5kbGVyTGlzdCkge1xuICAgIHJldHVyblxuICB9XG4gIGhhbmRsZXJMaXN0LiRyZW1vdmUoaGFuZGxlcilcbn1cblxuY29uc3QgTElGRV9DWUNMRV9UWVBFUyA9IFsnaW5pdCcsICdjcmVhdGVkJywgJ3JlYWR5J11cblxuZXhwb3J0IGZ1bmN0aW9uIF9pbml0RXZlbnRzIChleHRlcm5hbEV2ZW50cykge1xuICBjb25zdCBvcHRpb25zID0gdGhpcy5fb3B0aW9ucyB8fCB7fVxuICBjb25zdCBldmVudHMgPSBvcHRpb25zLmV2ZW50cyB8fCB7fVxuICBmb3IgKGNvbnN0IHR5cGUxIGluIGV2ZW50cykge1xuICAgIHRoaXMuJG9uKHR5cGUxLCBldmVudHNbdHlwZTFdKVxuICB9XG4gIGZvciAoY29uc3QgdHlwZTIgaW4gZXh0ZXJuYWxFdmVudHMpIHtcbiAgICB0aGlzLiRvbih0eXBlMiwgZXh0ZXJuYWxFdmVudHNbdHlwZTJdKVxuICB9XG4gIExJRkVfQ1lDTEVfVFlQRVMuZm9yRWFjaCgodHlwZSkgPT4ge1xuICAgIHRoaXMuJG9uKGBob29rOiR7dHlwZX1gLCBvcHRpb25zW3R5cGVdKVxuICB9KVxufVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L3ZtL2V2ZW50cy5qc1xuICoqLyIsImxldCBuYXRpdmVNb2R1bGVzID0ge31cblxuZnVuY3Rpb24gYXNzaWduTW9kdWxlcyAobW9kdWxlcywgaWZSZXBsYWNlKSB7XG4gIGZvciAoY29uc3QgbW9kdWxlTmFtZSBpbiBtb2R1bGVzKSB7XG4gICAgLy8gaW5pdCBgbW9kdWxlc1ttb2R1bGVOYW1lXVtdYFxuICAgIGxldCBtZXRob2RzID0gbmF0aXZlTW9kdWxlc1ttb2R1bGVOYW1lXVxuICAgIGlmICghbWV0aG9kcykge1xuICAgICAgbWV0aG9kcyA9IHt9XG4gICAgICBuYXRpdmVNb2R1bGVzW21vZHVsZU5hbWVdID0gbWV0aG9kc1xuICAgIH1cblxuICAgIC8vIHB1c2ggZWFjaCBub24tZXhpc3RlZCBuZXcgbWV0aG9kXG4gICAgbW9kdWxlc1ttb2R1bGVOYW1lXS5mb3JFYWNoKGZ1bmN0aW9uIChtZXRob2QpIHtcbiAgICAgIGlmICh0eXBlb2YgbWV0aG9kID09PSAnc3RyaW5nJykge1xuICAgICAgICBtZXRob2QgPSB7XG4gICAgICAgICAgbmFtZTogbWV0aG9kXG4gICAgICAgIH1cbiAgICAgIH1cblxuICAgICAgaWYgKCFtZXRob2RzW21ldGhvZC5uYW1lXSB8fCBpZlJlcGxhY2UpIHtcbiAgICAgICAgbWV0aG9kc1ttZXRob2QubmFtZV0gPSBtZXRob2RcbiAgICAgIH1cbiAgICB9KVxuICB9XG59XG5cbmZ1bmN0aW9uIGFzc2lnbkFwaXMgKEN0b3IsIGFwaXMpIHtcbiAgY29uc3QgcCA9IEN0b3IucHJvdG90eXBlXG5cbiAgZm9yIChjb25zdCBhcGlOYW1lIGluIGFwaXMpIHtcbiAgICBpZiAoIXAuaGFzT3duUHJvcGVydHkoYXBpTmFtZSkpIHtcbiAgICAgIHBbYXBpTmFtZV0gPSBhcGlzW2FwaU5hbWVdXG4gICAgfVxuICB9XG59XG5cbmV4cG9ydCBmdW5jdGlvbiBjbGVhck1vZHVsZXMgKCkge1xuICBuYXRpdmVNb2R1bGVzID0ge31cbn1cblxuZXhwb3J0IGZ1bmN0aW9uIGdldE1vZHVsZSAobW9kdWxlTmFtZSkge1xuICByZXR1cm4gbmF0aXZlTW9kdWxlc1ttb2R1bGVOYW1lXVxufVxuXG4vKipcbiAqIEBjb250ZXh0IGEgaW5zdGFuY2Ugb2YgQXBwSW5zdGFuY2VcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIHJlcXVpcmVNb2R1bGUgKG1vZHVsZU5hbWUpIHtcbiAgY29uc3QgbWV0aG9kcyA9IG5hdGl2ZU1vZHVsZXNbbW9kdWxlTmFtZV1cbiAgY29uc3QgdGFyZ2V0ID0ge31cblxuICBmb3IgKGNvbnN0IG1ldGhvZE5hbWUgaW4gbWV0aG9kcykge1xuICAgIHRhcmdldFttZXRob2ROYW1lXSA9ICguLi5hcmdzKSA9PiB0aGlzLmNhbGxUYXNrcyh7XG4gICAgICBtb2R1bGU6IG1vZHVsZU5hbWUsXG4gICAgICBtZXRob2Q6IG1ldGhvZE5hbWUsXG4gICAgICBhcmdzOiBhcmdzXG4gICAgfSlcbiAgfVxuXG4gIHJldHVybiB0YXJnZXRcbn1cblxuLyoqXG4gKiBAY29udGV4dCBWbVxuICovXG5leHBvcnQgZnVuY3Rpb24gcmVnaXN0ZXJNb2R1bGVzIChtb2R1bGVzLCBpZlJlcGxhY2UpIHtcbiAgYXNzaWduTW9kdWxlcyhtb2R1bGVzLCBpZlJlcGxhY2UpXG59XG5cbi8qKlxuICogQGNvbnRleHQgVm1cbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIHJlZ2lzdGVyTWV0aG9kcyAoYXBpcykge1xuICBhc3NpZ25BcGlzKHRoaXMsIGFwaXMpXG59XG5cbi8qKlxuICogQGNvbnRleHQgYSBpbnN0YW5jZSBvZiBBcHBJbnN0YW5jZVxuICovXG5leHBvcnQgZnVuY3Rpb24gcmVxdWlyZUNvbXBvbmVudCAobmFtZSkge1xuICBjb25zdCB7IGN1c3RvbUNvbXBvbmVudE1hcCB9ID0gdGhpc1xuICByZXR1cm4gY3VzdG9tQ29tcG9uZW50TWFwW25hbWVdXG59XG5cbi8qKlxuICogQGNvbnRleHQgYSBpbnN0YW5jZSBvZiBBcHBJbnN0YW5jZVxuICovXG5leHBvcnQgZnVuY3Rpb24gcmVnaXN0ZXJDb21wb25lbnQgKG5hbWUsIGV4cG9ydHMpIHtcbiAgY29uc3QgeyBjdXN0b21Db21wb25lbnRNYXAgfSA9IHRoaXNcblxuICBpZiAoY3VzdG9tQ29tcG9uZW50TWFwW25hbWVdKSB7XG4gICAgdGhyb3cgbmV3IEVycm9yKGBkZWZpbmUgYSBjb21wb25lbnQoJHtuYW1lfSkgdGhhdCBhbHJlYWR5IGV4aXN0c2ApXG4gIH1cblxuICBjdXN0b21Db21wb25lbnRNYXBbbmFtZV0gPSBleHBvcnRzXG59XG5cblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L2RlZmF1bHQvYXBwL3JlZ2lzdGVyLmpzXG4gKiovIiwiaW1wb3J0IHNlbXZlciBmcm9tICdzZW12ZXInXG5pbXBvcnQgeyBpc1BsYWluT2JqZWN0LCB0eXBvZiB9IGZyb20gJy4uL3V0aWwnXG5cbi8qKlxuICogW25vcm1hbGl6ZVZlcnNpb24gZGVzY3JpcHRpb25dXG4gKiBAcGFyYW0gIHtTdHJpbmd9IFZlcnNpb24uIGllOiAxLCAxLjAsIDEuMC4wXG4gKiBAcmV0dXJuIHtTdHJpbmd9IFZlcnNpb25cbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIG5vcm1hbGl6ZVZlcnNpb24gKHYpIHtcbiAgY29uc3QgaXNWYWxpZCA9IHNlbXZlci52YWxpZCh2KVxuICBpZiAoaXNWYWxpZCkge1xuICAgIHJldHVybiB2XG4gIH1cblxuICB2ID0gdHlwZW9mICh2KSA9PT0gJ3N0cmluZycgPyB2IDogJydcbiAgY29uc3Qgc3BsaXQgPSB2LnNwbGl0KCcuJylcbiAgbGV0IGkgPSAwXG4gIGNvbnN0IHJlc3VsdCA9IFtdXG5cbiAgd2hpbGUgKGkgPCAzKSB7XG4gICAgY29uc3QgcyA9IHR5cGVvZiAoc3BsaXRbaV0pID09PSAnc3RyaW5nJyAmJiBzcGxpdFtpXSA / IHNwbGl0W2ldIDogJzAnXG4gICAgcmVzdWx0LnB1c2gocylcbiAgICBpKytcbiAgfVxuXG4gIHJldHVybiByZXN1bHQuam9pbignLicpXG59XG5cbmV4cG9ydCBmdW5jdGlvbiBnZXRFcnJvciAoa2V5LCB2YWwsIGNyaXRlcmlhKSB7XG4gIGNvbnN0IHJlc3VsdCA9IHtcbiAgICBpc0Rvd25ncmFkZTogdHJ1ZSxcbiAgICBlcnJvclR5cGU6IDEsXG4gICAgY29kZTogMTAwMFxuICB9XG4gIGNvbnN0IGdldE1zZyA9IGZ1bmN0aW9uIChrZXksIHZhbCwgY3JpdGVyaWEpIHtcbiAgICByZXR1cm4gJ0Rvd25ncmFkZVsnICsga2V5ICsgJ10gOjogZGV2aWNlSW5mbyAnXG4gICAgICArIHZhbCArICcgbWF0Y2hlZCBjcml0ZXJpYSAnICsgY3JpdGVyaWFcbiAgfVxuICBjb25zdCBfa2V5ID0ga2V5LnRvTG93ZXJDYXNlKClcblxuICByZXN1bHQuZXJyb3JNZXNzYWdlID0gZ2V0TXNnKGtleSwgdmFsLCBjcml0ZXJpYSlcblxuICBpZiAoX2tleS5pbmRleE9mKCdvc3ZlcnNpb24nKSA + PSAwKSB7XG4gICAgcmVzdWx0LmNvZGUgPSAxMDAxXG4gIH1cbiAgZWxzZSBpZiAoX2tleS5pbmRleE9mKCdhcHB2ZXJzaW9uJykgPj0gMCkge1xuICAgIHJlc3VsdC5jb2RlID0gMTAwMlxuICB9XG4gIGVsc2UgaWYgKF9rZXkuaW5kZXhPZignd2VleHZlcnNpb24nKSA + PSAwKSB7XG4gICAgcmVzdWx0LmNvZGUgPSAxMDAzXG4gIH1cbiAgZWxzZSBpZiAoX2tleS5pbmRleE9mKCdkZXZpY2Vtb2RlbCcpID49IDApIHtcbiAgICByZXN1bHQuY29kZSA9IDEwMDRcbiAgfVxuXG4gIHJldHVybiByZXN1bHRcbn1cblxuLyoqXG4gKiBXRUVYIGZyYW1ld29yayBpbnB1dChkZXZpY2VJbmZvKVxuICoge1xuICogICBwbGF0Zm9ybTogJ2lPUycgb3IgJ2FuZHJvaWQnXG4gKiAgIG9zVmVyc2lvbjogJzEuMC4wJyBvciAnMS4wJyBvciAnMSdcbiAqICAgYXBwVmVyc2lvbjogJzEuMC4wJyBvciAnMS4wJyBvciAnMSdcbiAqICAgd2VleFZlcnNpb246ICcxLjAuMCcgb3IgJzEuMCcgb3IgJzEnXG4gKiAgIGREZXZpY2VNb2RlbDogJ01PREVMX05BTUUnXG4gKiB9XG4gKlxuICogZG93bmdyYWRlIGNvbmZpZyhjb25maWcpXG4gKiB7XG4gKiAgIGlvczoge1xuICogICAgIG9zVmVyc2lvbjogJz4xLjAuMCcgb3IgJz49MS4wLjAnIG9yICc8MS4wLjAnIG9yICc8PTEuMC4wJyBvciAnMS4wLjAnXG4gKiAgICAgYXBwVmVyc2lvbjogJz4xLjAuMCcgb3IgJz49MS4wLjAnIG9yICc8MS4wLjAnIG9yICc8PTEuMC4wJyBvciAnMS4wLjAnXG4gKiAgICAgd2VleFZlcnNpb246ICc + MS4wLjAnIG9yICc + PTEuMC4wJyBvciAnPDEuMC4wJyBvciAnPD0xLjAuMCcgb3IgJzEuMC4wJ1xuICogICAgIGRldmljZU1vZGVsOiBbJ21vZGVsQScsICdtb2RlbEInLCAuLi5dXG4gKiAgIH0sXG4gKiAgIGFuZHJvaWQ6IHtcbiAqICAgICBvc1ZlcnNpb246ICc + MS4wLjAnIG9yICc + PTEuMC4wJyBvciAnPDEuMC4wJyBvciAnPD0xLjAuMCcgb3IgJzEuMC4wJ1xuICogICAgIGFwcFZlcnNpb246ICc + MS4wLjAnIG9yICc + PTEuMC4wJyBvciAnPDEuMC4wJyBvciAnPD0xLjAuMCcgb3IgJzEuMC4wJ1xuICogICAgIHdlZXhWZXJzaW9uOiAnPjEuMC4wJyBvciAnPj0xLjAuMCcgb3IgJzwxLjAuMCcgb3IgJzw9MS4wLjAnIG9yICcxLjAuMCdcbiAqICAgICBkZXZpY2VNb2RlbDogWydtb2RlbEEnLCAnbW9kZWxCJywgLi4uXVxuICogICB9XG4gKiB9XG4gKlxuICpcbiAqIEBwYXJhbSAge29iamVjdH0gZGV2aWNlSW5mbyBXZWV4IFNESyBmcmFtZXdvcmsgaW5wdXRcbiAqIEBwYXJhbSAge29iamVjdH0gY29uZmlnICAgICB1c2VyIGlucHV0XG4gKiBAcmV0dXJuIHtPYmplY3R9ICAgICAgICAgICAgeyBpc0Rvd25ncmFkZTogdHJ1ZS9mYWxzZSwgZXJyb3JNZXNzYWdlLi4uIH1cbiAqL1xuZXhwb3J0IGZ1bmN0aW9uIGNoZWNrIChjb25maWcsIGRldmljZUluZm8pIHtcbiAgZGV2aWNlSW5mbyA9IGRldmljZUluZm8gfHwgZ2xvYmFsLldYRW52aXJvbm1lbnRcbiAgZGV2aWNlSW5mbyA9IGlzUGxhaW5PYmplY3QoZGV2aWNlSW5mbykgPyBkZXZpY2VJbmZvIDoge31cblxuICBsZXQgcmVzdWx0ID0ge1xuICAgIGlzRG93bmdyYWRlOiBmYWxzZSAvLyBkZWZhdXRsIGlzIHBhc3NcbiAgfVxuXG4gIGlmICh0eXBvZihjb25maWcpID09PSAnZnVuY3Rpb24nKSB7XG4gICAgbGV0IGN1c3RvbURvd25ncmFkZSA9IGNvbmZpZy5jYWxsKHRoaXMsIGRldmljZUluZm8sIHtcbiAgICAgIHNlbXZlcjogc2VtdmVyLFxuICAgICAgbm9ybWFsaXplVmVyc2lvbjogdGhpcy5ub3JtYWxpemVWZXJzaW9uXG4gICAgfSlcblxuICAgIGN1c3RvbURvd25ncmFkZSA9ICEhY3VzdG9tRG93bmdyYWRlXG5cbiAgICByZXN1bHQgPSBjdXN0b21Eb3duZ3JhZGUgPyB0aGlzLmdldEVycm9yKCdjdXN0b20nLCAnJywgJ2N1c3RvbSBwYXJhbXMnKSA6IHJlc3VsdFxuICB9XG4gIGVsc2Uge1xuICAgIGNvbmZpZyA9IGlzUGxhaW5PYmplY3QoY29uZmlnKSA / IGNvbmZpZyA6IHt9XG5cbiAgICBjb25zdCBwbGF0Zm9ybSA9IGRldmljZUluZm8ucGxhdGZvcm0gfHwgJ3Vua25vdydcbiAgICBjb25zdCBkUGxhdGZvcm0gPSBwbGF0Zm9ybS50b0xvd2VyQ2FzZSgpXG4gICAgY29uc3QgY09iaiA9IGNvbmZpZ1tkUGxhdGZvcm1dIHx8IHt9XG5cbiAgICBmb3IgKGNvbnN0IGkgaW4gZGV2aWNlSW5mbykge1xuICAgICAgY29uc3Qga2V5ID0gaVxuICAgICAgY29uc3Qga2V5TG93ZXIgPSBrZXkudG9Mb3dlckNhc2UoKVxuICAgICAgY29uc3QgdmFsID0gZGV2aWNlSW5mb1tpXVxuICAgICAgY29uc3QgaXNWZXJzaW9uID0ga2V5TG93ZXIuaW5kZXhPZigndmVyc2lvbicpID49IDBcbiAgICAgIGNvbnN0IGlzRGV2aWNlTW9kZWwgPSBrZXlMb3dlci5pbmRleE9mKCdkZXZpY2Vtb2RlbCcpID49IDBcbiAgICAgIGNvbnN0IGNyaXRlcmlhID0gY09ialtpXVxuXG4gICAgICBpZiAoY3JpdGVyaWEgJiYgaXNWZXJzaW9uKSB7XG4gICAgICAgIGNvbnN0IGMgPSB0aGlzLm5vcm1hbGl6ZVZlcnNpb24oY3JpdGVyaWEpXG4gICAgICAgIGNvbnN0IGQgPSB0aGlzLm5vcm1hbGl6ZVZlcnNpb24oZGV2aWNlSW5mb1tpXSlcblxuICAgICAgICBpZiAoc2VtdmVyLnNhdGlzZmllcyhkLCBjKSkge1xuICAgICAgICAgIHJlc3VsdCA9IHRoaXMuZ2V0RXJyb3Ioa2V5LCB2YWwsIGNyaXRlcmlhKVxuICAgICAgICAgIGJyZWFrXG4gICAgICAgIH1cbiAgICAgIH1cbiAgICAgIGVsc2UgaWYgKGlzRGV2aWNlTW9kZWwpIHtcbiAgICAgICAgY29uc3QgX2NyaXRlcmlhID0gdHlwb2YoY3JpdGVyaWEpID09PSAnYXJyYXknID8gY3JpdGVyaWEgOiBbY3JpdGVyaWFdXG4gICAgICAgIGlmIChfY3JpdGVyaWEuaW5kZXhPZih2YWwpID49IDApIHtcbiAgICAgICAgICByZXN1bHQgPSB0aGlzLmdldEVycm9yKGtleSwgdmFsLCBjcml0ZXJpYSlcbiAgICAgICAgICBicmVha1xuICAgICAgICB9XG4gICAgICB9XG4gICAgfVxuICB9XG5cbiAgcmV0dXJuIHJlc3VsdFxufVxuXG5cblxuLyoqIFdFQlBBQ0sgRk9PVEVSICoqXG4gKiogL1VzZXJzL3poYW9qaW5qaWFuZy9TaXRlcy9wdWJsaWMvd2VleC9odG1sNS9kZWZhdWx0L2FwcC9kb3duZ3JhZGUuanNcbiAqKi8iLCIvKipcbiAqIEBmaWxlT3ZlcnZpZXdcbiAqIGluc3RhbmNlIGNvbnRyb2xzIGZyb20gbmF0aXZlXG4gKlxuICogLSBpbml0IGJ1bmRsZVxuICogLSBmaXJlIGV2ZW50XG4gKiAtIGNhbGxiYWNrXG4gKiAtIGRlc3Ryb3lcbiAqXG4gKiBjb3JyZXNwb25kZWQgd2l0aCB0aGUgQVBJIG9mIGluc3RhbmNlIG1hbmFnZXIgKGZyYW1ld29yay5qcylcbiAqL1xuXG5pbXBvcnQgKiBhcyBfIGZyb20gJy4uL3V0aWwnXG5cbmV4cG9ydCBmdW5jdGlvbiB1cGRhdGVBY3Rpb25zICgpIHtcbiAgdGhpcy5kaWZmZXIuZmx1c2goKVxuICBjb25zdCB0YXNrcyA9IFtdXG4gIGlmICh0aGlzLmRvYyAmJiB0aGlzLmRvYy5saXN0ZW5lciAmJiB0aGlzLmRvYy5saXN0ZW5lci51cGRhdGVzLmxlbmd0aCkge1xuICAgIHRhc2tzLnB1c2goLi4udGhpcy5kb2MubGlzdGVuZXIudXBkYXRlcylcbiAgICB0aGlzLmRvYy5saXN0ZW5lci51cGRhdGVzID0gW11cbiAgfVxuICBpZiAodGFza3MubGVuZ3RoKSB7XG4gICAgcmV0dXJuIHRoaXMuY2FsbFRhc2tzKHRhc2tzKVxuICB9XG59XG5cbmV4cG9ydCBmdW5jdGlvbiBpbml0IChjb2RlLCBkYXRhKSB7XG4gIF8uZGVidWcoJ0ludGlhbGl6ZSBhbiBpbnN0YW5jZSB3aXRoOlxcbicsIGNvZGUsIGRhdGEpXG5cbiAgbGV0IHJlc3VsdFxuICAvLyBAc2VlOiBsaWIvYXBwL2J1bmRsZS5qc1xuICBjb25zdCBkZWZpbmUgPSBfLmJpbmQodGhpcy5kZWZpbmUsIHRoaXMpXG4gIGNvbnN0IGJvb3RzdHJhcCA9IChuYW1lLCBjb25maWcsIF9kYXRhKSA9PiB7XG4gICAgcmVzdWx0ID0gdGhpcy5ib290c3RyYXAobmFtZSwgY29uZmlnLCBfZGF0YSB8fCBkYXRhKVxuICAgIHRoaXMudXBkYXRlQWN0aW9ucygpXG4gICAgdGhpcy5kb2MubGlzdGVuZXIuY3JlYXRlRmluaXNoKClcbiAgICBfLmRlYnVnKGBBZnRlciBpbnRpYWxpemVkIGFuIGluc3RhbmNlKCR7dGhpcy5pZH0pYClcbiAgfVxuXG4gIC8vIGJhY2t3YXJkKHJlZ2lzdGVyL3JlbmRlcilcbiAgY29uc3QgcmVnaXN0ZXIgPSBfLmJpbmQodGhpcy5yZWdpc3RlciwgdGhpcylcbiAgY29uc3QgcmVuZGVyID0gKG5hbWUsIF9kYXRhKSA9PiB7XG4gICAgcmVzdWx0ID0gdGhpcy5ib290c3RyYXAobmFtZSwge30sIF9kYXRhKVxuICB9XG5cbiAgY29uc3QgcmVxdWlyZSA9IG5hbWUgPT4gX2RhdGEgPT4ge1xuICAgIHJlc3VsdCA9IHRoaXMuYm9vdHN0cmFwKG5hbWUsIHt9LCBfZGF0YSlcbiAgfVxuXG4gIGNvbnN0IGRvY3VtZW50ID0gdGhpcy5kb2NcblxuICBsZXQgZnVuY3Rpb25Cb2R5XG4gIC8qIGlzdGFuYnVsIGlnbm9yZSBpZiAqL1xuICBpZiAodHlwZW9mIGNvZGUgPT09ICdmdW5jdGlvbicpIHtcbiAgICAvLyBgZnVuY3Rpb24gKCkgey4uLn1gIC0 + IGB7Li4ufWBcbiAgICAvLyBub3QgdmVyeSBzdHJpY3RcbiAgICBmdW5jdGlvbkJvZHkgPSBjb2RlLnRvU3RyaW5nKCkuc3Vic3RyKDEyKVxuICB9XG4gIGVsc2UgaWYgKGNvZGUpIHtcbiAgICBmdW5jdGlvbkJvZHkgPSBjb2RlLnRvU3RyaW5nKClcbiAgfVxuXG4gIGNvbnN0IHsgV1hFbnZpcm9ubWVudCB9ID0gZ2xvYmFsXG4gIGlmIChXWEVudmlyb25tZW50ICYmIFdYRW52aXJvbm1lbnQucGxhdGZvcm0gIT09ICdXZWInKSB7XG4gICAgY29uc3QgdGltZXIgPSB0aGlzLnJlcXVpcmVNb2R1bGUoJ3RpbWVyJylcbiAgICBjb25zdCB0aW1lckFQSXMgPSB7XG4gICAgICBzZXRUaW1lb3V0OiAoLi4uYXJncykgPT4ge1xuICAgICAgICBjb25zdCBoYW5kbGVyID0gZnVuY3Rpb24gKCkge1xuICAgICAgICAgIGFyZ3NbMF0oLi4uYXJncy5zbGljZSgyKSlcbiAgICAgICAgfVxuICAgICAgICB0aW1lci5zZXRUaW1lb3V0KGhhbmRsZXIsIGFyZ3NbMV0pXG4gICAgICAgIHJldHVybiB0aGlzLnVpZC50b1N0cmluZygpXG4gICAgICB9LFxuICAgICAgc2V0SW50ZXJ2YWw6ICguLi5hcmdzKSA9PiB7XG4gICAgICAgIGNvbnN0IGhhbmRsZXIgPSBmdW5jdGlvbiAoKSB7XG4gICAgICAgICAgYXJnc1swXSguLi5hcmdzLnNsaWNlKDIpKVxuICAgICAgICB9XG4gICAgICAgIHRpbWVyLnNldEludGVydmFsKGhhbmRsZXIsIGFyZ3NbMV0pXG4gICAgICAgIHJldHVybiB0aGlzLnVpZC50b1N0cmluZygpXG4gICAgICB9LFxuICAgICAgY2xlYXJUaW1lb3V0OiAobikgPT4ge1xuICAgICAgICB0aW1lci5jbGVhclRpbWVvdXQobilcbiAgICAgIH0sXG4gICAgICBjbGVhckludGVydmFsOiAobikgPT4ge1xuICAgICAgICB0aW1lci5jbGVhckludGVydmFsKG4pXG4gICAgICB9XG4gICAgfVxuXG4gICAgY29uc3QgZm4gPSBuZXcgRnVuY3Rpb24oXG4gICAgICAnZGVmaW5lJyxcbiAgICAgICdyZXF1aXJlJyxcbiAgICAgICdkb2N1bWVudCcsXG4gICAgICAnYm9vdHN0cmFwJyxcbiAgICAgICdyZWdpc3RlcicsXG4gICAgICAncmVuZGVyJyxcbiAgICAgICdfX3dlZXhfZGVmaW5lX18nLCAvLyBhbGlhcyBmb3IgZGVmaW5lXG4gICAgICAnX193ZWV4X2Jvb3RzdHJhcF9fJywgLy8gYWxpYXMgZm9yIGJvb3RzdHJhcFxuICAgICAgJ3NldFRpbWVvdXQnLFxuICAgICAgJ3NldEludGVydmFsJyxcbiAgICAgICdjbGVhclRpbWVvdXQnLFxuICAgICAgJ2NsZWFySW50ZXJ2YWwnLFxuICAgICAgZnVuY3Rpb25Cb2R5XG4gICAgKVxuXG4gICAgZm4oXG4gICAgICBkZWZpbmUsXG4gICAgICByZXF1aXJlLFxuICAgICAgZG9jdW1lbnQsXG4gICAgICBib290c3RyYXAsXG4gICAgICByZWdpc3RlcixcbiAgICAgIHJlbmRlcixcbiAgICAgIGRlZmluZSxcbiAgICAgIGJvb3RzdHJhcCxcbiAgICAgIHRpbWVyQVBJcy5zZXRUaW1lb3V0LFxuICAgICAgdGltZXJBUElzLnNldEludGVydmFsLFxuICAgICAgdGltZXJBUElzLmNsZWFyVGltZW91dCxcbiAgICAgIHRpbWVyQVBJcy5jbGVhckludGVydmFsKVxuICB9XG4gIGVsc2Uge1xuICAgIGNvbnN0IGZuID0gbmV3IEZ1bmN0aW9uKFxuICAgICAgJ2RlZmluZScsXG4gICAgICAncmVxdWlyZScsXG4gICAgICAnZG9jdW1lbnQnLFxuICAgICAgJ2Jvb3RzdHJhcCcsXG4gICAgICAncmVnaXN0ZXInLFxuICAgICAgJ3JlbmRlcicsXG4gICAgICAnX193ZWV4X2RlZmluZV9fJywgLy8gYWxpYXMgZm9yIGRlZmluZVxuICAgICAgJ19fd2VleF9ib290c3RyYXBfXycsIC8vIGFsaWFzIGZvciBib290c3RyYXBcbiAgICAgIGZ1bmN0aW9uQm9keVxuICAgIClcblxuICAgIGZuKFxuICAgICAgZGVmaW5lLFxuICAgICAgcmVxdWlyZSxcbiAgICAgIGRvY3VtZW50LFxuICAgICAgYm9vdHN0cmFwLFxuICAgICAgcmVnaXN0ZXIsXG4gICAgICByZW5kZXIsXG4gICAgICBkZWZpbmUsXG4gICAgICBib290c3RyYXApXG4gIH1cblxuICByZXR1cm4gcmVzdWx0XG59XG5cbmV4cG9ydCBmdW5jdGlvbiBkZXN0cm95ICgpIHtcbiAgXy5kZWJ1ZyhgRGVzdG9yeSBhbiBpbnN0YW5jZSgke3RoaXMuaWR9KWApXG5cbiAgdGhpcy5pZCA9ICcnXG4gIHRoaXMub3B0aW9ucyA9IG51bGxcbiAgdGhpcy5ibG9ja3MgPSBudWxsXG4gIHRoaXMudm0gPSBudWxsXG4gIHRoaXMuZG9jID0gbnVsbFxuICB0aGlzLmN1c3RvbUNvbXBvbmVudE1hcCA9IG51bGxcbiAgdGhpcy5jYWxsYmFja3MgPSBudWxsXG59XG5cbmV4cG9ydCBmdW5jdGlvbiBnZXRSb290RWxlbWVudCAoKSB7XG4gIGNvbnN0IGRvYyA9IHRoaXMuZG9jIHx8IHt9XG4gIGNvbnN0IGJvZHkgPSBkb2MuYm9keSB8fCB7fVxuICByZXR1cm4gYm9keS50b0pTT04gPyBib2R5LnRvSlNPTigpIDoge31cbn1cblxuZXhwb3J0IGZ1bmN0aW9uIGZpcmVFdmVudCAocmVmLCB0eXBlLCBlLCBkb21DaGFuZ2VzKSB7XG4gIF8uZGVidWcoYEZpcmUgYSBcIiR7dHlwZX1cIiBldmVudCBvbiBhbiBlbGVtZW50KCR7cmVmfSkgaW4gaW5zdGFuY2UoJHt0aGlzLmlkfSlgKVxuICBpZiAoQXJyYXkuaXNBcnJheShyZWYpKSB7XG4gICAgcmVmLnNvbWUoKHJlZikgPT4ge1xuICAgICAgcmV0dXJuIHRoaXMuZmlyZUV2ZW50KHJlZiwgdHlwZSwgZSkgIT09IGZhbHNlXG4gICAgfSlcbiAgICByZXR1cm5cbiAgfVxuXG4gIGNvbnN0IGVsID0gdGhpcy5kb2MuZ2V0UmVmKHJlZilcblxuICBpZiAoZWwpIHtcbiAgICB0aGlzLmRvYy5jbG9zZSgpXG4gICAgY29uc3QgcmVzdWx0ID0gdGhpcy5kb2MuZmlyZUV2ZW50KGVsLCB0eXBlLCBlLCBkb21DaGFuZ2VzKVxuICAgIHRoaXMudXBkYXRlQWN0aW9ucygpXG4gICAgdGhpcy5kb2MubGlzdGVuZXIudXBkYXRlRmluaXNoKClcbiAgICB0aGlzLmRvYy5vcGVuKClcbiAgICByZXR1cm4gcmVzdWx0XG4gIH1cblxuICByZXR1cm4gbmV3IEVycm9yKGBpbnZhbGlkIGVsZW1lbnQgcmVmZXJlbmNlIFwiJHtyZWZ9XCJgKVxufVxuXG5leHBvcnQgZnVuY3Rpb24gY2FsbGJhY2sgKGNhbGxiYWNrSWQsIGRhdGEsIGlmS2VlcEFsaXZlKSB7XG4gIF8uZGVidWcoYEludm9rZSBhIGNhbGxiYWNrKCR7Y2FsbGJhY2tJZH0pIHdpdGhgLCBkYXRhLFxuICAgICAgICAgICAgYGluIGluc3RhbmNlKCR7dGhpcy5pZH0pYClcblxuICBjb25zdCBjYWxsYmFjayA9IHRoaXMuY2FsbGJhY2tzW2NhbGxiYWNrSWRdXG5cbiAgaWYgKHR5cGVvZiBjYWxsYmFjayA9PT0gJ2Z1bmN0aW9uJykge1xuICAgIHRoaXMuZG9jLmNsb3NlKClcbiAgICBjYWxsYmFjayhkYXRhKSAvLyBkYXRhIGlzIGFscmVhZHkgYSBvYmplY3QsIEBzZWU6IGxpYi9ydW50aW1lL2luZGV4LmpzXG5cbiAgICBpZiAodHlwZW9mIGlmS2VlcEFsaXZlID09PSAndW5kZWZpbmVkJyB8fCBpZktlZXBBbGl2ZSA9PT0gZmFsc2UpIHtcbiAgICAgIHRoaXMuY2FsbGJhY2tzW2NhbGxiYWNrSWRdID0gdW5kZWZpbmVkXG4gICAgfVxuXG4gICAgdGhpcy51cGRhdGVBY3Rpb25zKClcbiAgICB0aGlzLmRvYy5saXN0ZW5lci51cGRhdGVGaW5pc2goKVxuICAgIHRoaXMuZG9jLm9wZW4oKVxuICAgIHJldHVyblxuICB9XG5cbiAgcmV0dXJuIG5ldyBFcnJvcihgaW52YWxpZCBjYWxsYmFjayBpZCBcIiR7Y2FsbGJhY2tJZH1cImApXG59XG5cbmV4cG9ydCBmdW5jdGlvbiByZWZyZXNoRGF0YSAoZGF0YSkge1xuICBfLmRlYnVnKGBSZWZyZXNoIHdpdGhgLCBkYXRhLFxuICAgICAgICAgICAgYGluIGluc3RhbmNlWyR7dGhpcy5pZH1dYClcblxuICBjb25zdCB2bSA9IHRoaXMudm1cblxuICBpZiAodm0gJiYgZGF0YSkge1xuICAgIHRoaXMuZG9jLmNsb3NlKClcbiAgICBpZiAodHlwZW9mIHZtLnJlZnJlc2hEYXRhID09PSAnZnVuY3Rpb24nKSB7XG4gICAgICB2bS5yZWZyZXNoRGF0YShkYXRhKVxuICAgIH1cbiAgICBlbHNlIHtcbiAgICAgIF8uZXh0ZW5kKHZtLCBkYXRhKVxuICAgIH1cbiAgICB0aGlzLnVwZGF0ZUFjdGlvbnMoKVxuICAgIHRoaXMuZG9jLmxpc3RlbmVyLnJlZnJlc2hGaW5pc2goKVxuICAgIHRoaXMuZG9jLm9wZW4oKVxuICAgIHJldHVyblxuICB9XG5cbiAgcmV0dXJuIG5ldyBFcnJvcihgaW52YWxpZCBkYXRhIFwiJHtkYXRhfVwiYClcbn1cblxuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvaHRtbDUvZGVmYXVsdC9hcHAvY3RybC5qc1xuICoqLyIsImV4cG9ydCBkZWZhdWx0IGNsYXNzIERpZmZlciB7XG4gIGNvbnN0cnVjdG9yIChpZCkge1xuICAgIHRoaXMuaWQgPSBpZFxuICAgIHRoaXMubWFwID0gW11cbiAgICB0aGlzLmhvb2tzID0gW11cbiAgfVxuICBpc0VtcHR5ICgpIHtcbiAgICByZXR1cm4gdGhpcy5tYXAubGVuZ3RoID09PSAwXG4gIH1cbiAgYXBwZW5kICh0eXBlLCBkZXB0aCwgcmVmLCBoYW5kbGVyKSB7XG4gICAgaWYgKCF0aGlzLmhhc1RpbWVyKSB7XG4gICAgICB0aGlzLmhhc1RpbWVyID0gdHJ1ZVxuICAgICAgc2V0VGltZW91dCgoKSA9PiB7XG4gICAgICAgIHRoaXMuaGFzVGltZXIgPSBmYWxzZVxuICAgICAgICB0aGlzLmZsdXNoKHRydWUpXG4gICAgICB9LCAwKVxuICAgIH1cbiAgICBjb25zdCBtYXAgPSB0aGlzLm1hcFxuICAgIGlmICghbWFwW2RlcHRoXSkge1xuICAgICAgbWFwW2RlcHRoXSA9IHt9XG4gICAgfVxuICAgIGNvbnN0IGdyb3VwID0gbWFwW2RlcHRoXVxuICAgIGlmICghZ3JvdXBbdHlwZV0pIHtcbiAgICAgIGdyb3VwW3R5cGVdID0ge31cbiAgICB9XG4gICAgaWYgKHR5cGUgPT09ICdlbGVtZW50Jykge1xuICAgICAgaWYgKCFncm91cFt0eXBlXVtyZWZdKSB7XG4gICAgICAgIGdyb3VwW3R5cGVdW3JlZl0gPSBbXVxuICAgICAgfVxuICAgICAgZ3JvdXBbdHlwZV1bcmVmXS5wdXNoKGhhbmRsZXIpXG4gICAgfVxuICAgIGVsc2Uge1xuICAgICAgZ3JvdXBbdHlwZV1bcmVmXSA9IGhhbmRsZXJcbiAgICB9XG4gIH1cbiAgZmx1c2ggKGlzVGltZW91dCkge1xuICAgIGNvbnN0IG1hcCA9IHRoaXMubWFwLnNsaWNlKClcbiAgICB0aGlzLm1hcC5sZW5ndGggPSAwXG4gICAgbWFwLmZvckVhY2goKGdyb3VwKSA9PiB7XG4gICAgICBjYWxsVHlwZU1hcChncm91cCwgJ3JlcGVhdCcpXG4gICAgICBjYWxsVHlwZU1hcChncm91cCwgJ3Nob3duJylcbiAgICAgIGNhbGxUeXBlTGlzdChncm91cCwgJ2VsZW1lbnQnKVxuICAgIH0pXG5cbiAgICBjb25zdCBob29rcyA9IHRoaXMuaG9va3Muc2xpY2UoKVxuICAgIHRoaXMuaG9va3MubGVuZ3RoID0gMFxuICAgIGhvb2tzLmZvckVhY2goKGZuKSA9PiB7XG4gICAgICBmbigpXG4gICAgfSlcblxuICAgIGlmICghdGhpcy5pc0VtcHR5KCkpIHtcbiAgICAgIHRoaXMuZmx1c2goKVxuICAgIH1cbiAgfVxuICB0aGVuIChmbikge1xuICAgIHRoaXMuaG9va3MucHVzaChmbilcbiAgfVxufVxuXG5mdW5jdGlvbiBjYWxsVHlwZU1hcCAoZ3JvdXAsIHR5cGUpIHtcbiAgY29uc3QgbWFwID0gZ3JvdXBbdHlwZV1cbiAgZm9yIChjb25zdCByZWYgaW4gbWFwKSB7XG4gICAgbWFwW3JlZl0oKVxuICB9XG59XG5cbmZ1bmN0aW9uIGNhbGxUeXBlTGlzdCAoZ3JvdXAsIHR5cGUpIHtcbiAgY29uc3QgbWFwID0gZ3JvdXBbdHlwZV1cbiAgZm9yIChjb25zdCByZWYgaW4gbWFwKSB7XG4gICAgY29uc3QgbGlzdCA9IG1hcFtyZWZdXG4gICAgbGlzdC5mb3JFYWNoKChoYW5kbGVyKSA9PiB7IGhhbmRsZXIoKSB9KVxuICB9XG59XG5cblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L2RlZmF1bHQvYXBwL2RpZmZlci5qc1xuICoqLyIsIi8qKlxuICogQGZpbGVPdmVydmlld1xuICogQSBzaW1wbGUgdmlydHVhbCBkb20gaW1wbGVtZW50YXRpb25cbiAqL1xuXG5pbXBvcnQgTGlzdGVuZXIgZnJvbSAnLi9saXN0ZW5lcidcblxuY29uc3QgREVGQVVMVF9UQUdfTkFNRSA9ICdkaXYnXG5cbmV4cG9ydCBjb25zdCBpbnN0YW5jZU1hcCA9IHt9XG5sZXQgbmV4dE5vZGVSZWYgPSAxXG5cbmV4cG9ydCBmdW5jdGlvbiBEb2N1bWVudCAoaWQsIHVybCwgaGFuZGxlcikge1xuICBpZCA9IGlkID8gaWQudG9TdHJpbmcoKSA6ICcnXG4gIHRoaXMuaWQgPSBpZFxuICB0aGlzLlVSTCA9IHVybFxuXG4gIGluc3RhbmNlTWFwW2lkXSA9IHRoaXNcbiAgdGhpcy5ub2RlTWFwID0ge31cbiAgdGhpcy5saXN0ZW5lciA9IG5ldyBMaXN0ZW5lcihpZCwgaGFuZGxlciB8fCBnZW5DYWxsVGFza3MoaWQpKVxuICB0aGlzLmNyZWF0ZURvY3VtZW50RWxlbWVudCgpXG59XG5cbmZ1bmN0aW9uIGdlbkNhbGxUYXNrcyAoaWQpIHtcbiAgcmV0dXJuICh0YXNrcykgPT4ge1xuICAgIGlmICghQXJyYXkuaXNBcnJheSh0YXNrcykpIHtcbiAgICAgIHRhc2tzID0gW3Rhc2tzXVxuICAgIH1cbiAgICByZXR1cm4gY2FsbE5hdGl2ZShpZCwgdGFza3MsICctMScpXG4gIH1cbn1cblxuRG9jdW1lbnQucHJvdG90eXBlLmRlc3Ryb3kgPSBmdW5jdGlvbiAoKSB7XG4gIGRlbGV0ZSB0aGlzLmxpc3RlbmVyXG4gIGRlbGV0ZSB0aGlzLm5vZGVNYXBcbiAgZGVsZXRlIGluc3RhbmNlTWFwW3RoaXMuaWRdXG59XG5cbkRvY3VtZW50LnByb3RvdHlwZS5vcGVuID0gZnVuY3Rpb24gKCkge1xuICB0aGlzLmxpc3RlbmVyLmJhdGNoZWQgPSBmYWxzZVxufVxuXG5Eb2N1bWVudC5wcm90b3R5cGUuY2xvc2UgPSBmdW5jdGlvbiAoKSB7XG4gIHRoaXMubGlzdGVuZXIuYmF0Y2hlZCA9IHRydWVcbn1cblxuRG9jdW1lbnQucHJvdG90eXBlLmNyZWF0ZURvY3VtZW50RWxlbWVudCA9IGZ1bmN0aW9uICgpIHtcbiAgaWYgKCF0aGlzLmRvY3VtZW50RWxlbWVudCkge1xuICAgIGNvbnN0IGVsID0gbmV3IEVsZW1lbnQoJ2RvY3VtZW50JylcbiAgICBlbC5kb2NJZCA9IHRoaXMuaWRcbiAgICBlbC5vd25lckRvY3VtZW50ID0gdGhpc1xuICAgIGVsLnJvbGUgPSAnZG9jdW1lbnRFbGVtZW50J1xuICAgIGVsLmRlcHRoID0gMFxuICAgIGVsLnJlZiA9ICdfZG9jdW1lbnRFbGVtZW50J1xuICAgIHRoaXMubm9kZU1hcC5fZG9jdW1lbnRFbGVtZW50ID0gZWxcbiAgICB0aGlzLmRvY3VtZW50RWxlbWVudCA9IGVsXG4gICAgZWwuYXBwZW5kQ2hpbGQgPSAobm9kZSkgPT4ge1xuICAgICAgYXBwZW5kQm9keSh0aGlzLCBub2RlKVxuICAgIH1cbiAgICBlbC5pbnNlcnRCZWZvcmUgPSAobm9kZSwgYmVmb3JlKSA9PiB7XG4gICAgICBhcHBlbmRCb2R5KHRoaXMsIG5vZGUsIGJlZm9yZSlcbiAgICB9XG4gIH1cblxuICByZXR1cm4gdGhpcy5kb2N1bWVudEVsZW1lbnRcbn1cblxuZnVuY3Rpb24gYXBwZW5kQm9keSAoZG9jLCBub2RlLCBiZWZvcmUpIHtcbiAgY29uc3QgeyBkb2N1bWVudEVsZW1lbnQgfSA9IGRvY1xuXG4gIGlmIChkb2N1bWVudEVsZW1lbnQucHVyZUNoaWxkcmVuLmxlbmd0aCA + IDAgfHwgbm9kZS5wYXJlbnROb2RlKSB7XG4gICAgcmV0dXJuXG4gIH1cbiAgY29uc3QgY2hpbGRyZW4gPSBkb2N1bWVudEVsZW1lbnQuY2hpbGRyZW5cbiAgY29uc3QgYmVmb3JlSW5kZXggPSBjaGlsZHJlbi5pbmRleE9mKGJlZm9yZSlcbiAgaWYgKGJlZm9yZUluZGV4IDwgMCkge1xuICAgIGNoaWxkcmVuLnB1c2gobm9kZSlcbiAgfVxuICBlbHNlIHtcbiAgICBjaGlsZHJlbi5zcGxpY2UoYmVmb3JlSW5kZXgsIDAsIG5vZGUpXG4gIH1cblxuICBpZiAobm9kZS5ub2RlVHlwZSA9PT0gMSkge1xuICAgIGlmIChub2RlLnJvbGUgPT09ICdib2R5Jykge1xuICAgICAgbm9kZS5kb2NJZCA9IGRvYy5pZFxuICAgICAgbm9kZS5vd25lckRvY3VtZW50ID0gZG9jXG4gICAgICBub2RlLnBhcmVudE5vZGUgPSBkb2N1bWVudEVsZW1lbnRcbiAgICB9XG4gICAgZWxzZSB7XG4gICAgICBub2RlLmNoaWxkcmVuLmZvckVhY2goY2hpbGQgPT4ge1xuICAgICAgICBjaGlsZC5wYXJlbnROb2RlID0gbm9kZVxuICAgICAgfSlcbiAgICAgIHNldEJvZHkoZG9jLCBub2RlKVxuICAgICAgbm9kZS5kb2NJZCA9IGRvYy5pZFxuICAgICAgbm9kZS5vd25lckRvY3VtZW50ID0gZG9jXG4gICAgICBsaW5rUGFyZW50KG5vZGUsIGRvY3VtZW50RWxlbWVudClcbiAgICAgIGRlbGV0ZSBkb2Mubm9kZU1hcFtub2RlLm5vZGVJZF1cbiAgICB9XG4gICAgZG9jdW1lbnRFbGVtZW50LnB1cmVDaGlsZHJlbi5wdXNoKG5vZGUpXG4gICAgZG9jLmxpc3RlbmVyLmNyZWF0ZUJvZHkobm9kZSlcbiAgfVxuICBlbHNlIHtcbiAgICBub2RlLnBhcmVudE5vZGUgPSBkb2N1bWVudEVsZW1lbnRcbiAgICBkb2Mubm9kZU1hcFtub2RlLnJlZl0gPSBub2RlXG4gIH1cbn1cblxuZnVuY3Rpb24gc2V0Qm9keSAoZG9jLCBlbCkge1xuICBlbC5yb2xlID0gJ2JvZHknXG4gIGVsLmRlcHRoID0gMVxuICBkZWxldGUgZG9jLm5vZGVNYXBbZWwubm9kZUlkXVxuICBlbC5yZWYgPSAnX3Jvb3QnXG4gIGRvYy5ub2RlTWFwLl9yb290ID0gZWxcbiAgZG9jLmJvZHkgPSBlbFxufVxuXG5Eb2N1bWVudC5wcm90b3R5cGUuY3JlYXRlQm9keSA9IGZ1bmN0aW9uICh0eXBlLCBwcm9wcykge1xuICBpZiAoIXRoaXMuYm9keSkge1xuICAgIGNvbnN0IGVsID0gbmV3IEVsZW1lbnQodHlwZSwgcHJvcHMpXG4gICAgc2V0Qm9keSh0aGlzLCBlbClcbiAgfVxuXG4gIHJldHVybiB0aGlzLmJvZHlcbn1cblxuRG9jdW1lbnQucHJvdG90eXBlLmNyZWF0ZUVsZW1lbnQgPSBmdW5jdGlvbiAodGFnTmFtZSwgcHJvcHMpIHtcbiAgcmV0dXJuIG5ldyBFbGVtZW50KHRhZ05hbWUsIHByb3BzKVxufVxuXG5Eb2N1bWVudC5wcm90b3R5cGUuY3JlYXRlQ29tbWVudCA9IGZ1bmN0aW9uICh0ZXh0KSB7XG4gIHJldHVybiBuZXcgQ29tbWVudCh0ZXh0KVxufVxuXG5Eb2N1bWVudC5wcm90b3R5cGUuZmlyZUV2ZW50ID0gZnVuY3Rpb24gKGVsLCB0eXBlLCBlLCBkb21DaGFuZ2VzKSB7XG4gIGlmICghZWwpIHtcbiAgICByZXR1cm5cbiAgfVxuICBlID0gZSB8fCB7fVxuICBlLnR5cGUgPSB0eXBlXG4gIGUudGFyZ2V0ID0gZWxcbiAgZS50aW1lc3RhbXAgPSBEYXRlLm5vdygpXG4gIGlmIChkb21DaGFuZ2VzKSB7XG4gICAgdXBkYXRlRWxlbWVudChlbCwgZG9tQ2hhbmdlcylcbiAgfVxuICByZXR1cm4gZWwuZmlyZUV2ZW50KHR5cGUsIGUpXG59XG5cbkRvY3VtZW50LnByb3RvdHlwZS5nZXRSZWYgPSBmdW5jdGlvbiAocmVmKSB7XG4gIHJldHVybiB0aGlzLm5vZGVNYXBbcmVmXVxufVxuXG5mdW5jdGlvbiB1cGRhdGVFbGVtZW50IChlbCwgY2hhbmdlcykge1xuICBjb25zdCBhdHRycyA9IGNoYW5nZXMuYXR0cnMgfHwge31cbiAgZm9yIChjb25zdCBuYW1lIGluIGF0dHJzKSB7XG4gICAgZWwuc2V0QXR0cihuYW1lLCBhdHRyc1tuYW1lXSwgdHJ1ZSlcbiAgfVxuICBjb25zdCBzdHlsZSA9IGNoYW5nZXMuc3R5bGUgfHwge31cbiAgZm9yIChjb25zdCBuYW1lIGluIHN0eWxlKSB7XG4gICAgZWwuc2V0U3R5bGUobmFtZSwgc3R5bGVbbmFtZV0sIHRydWUpXG4gIH1cbn1cblxuZXhwb3J0IGZ1bmN0aW9uIE5vZGUgKCkge1xuICB0aGlzLm5vZGVJZCA9IChuZXh0Tm9kZVJlZisrKS50b1N0cmluZygpXG4gIHRoaXMucmVmID0gdGhpcy5ub2RlSWRcbiAgdGhpcy5jaGlsZHJlbiA9IFtdXG4gIHRoaXMucHVyZUNoaWxkcmVuID0gW11cbiAgdGhpcy5wYXJlbnROb2RlID0gbnVsbFxuICB0aGlzLm5leHRTaWJsaW5nID0gbnVsbFxuICB0aGlzLnByZXZpb3VzU2libGluZyA9IG51bGxcbn1cblxuTm9kZS5wcm90b3R5cGUuZGVzdHJveSA9IGZ1bmN0aW9uICgpIHtcbiAgY29uc3QgZG9jID0gaW5zdGFuY2VNYXBbdGhpcy5kb2NJZF1cbiAgaWYgKGRvYykge1xuICAgIGRlbGV0ZSB0aGlzLmRvY0lkXG4gICAgZGVsZXRlIGRvYy5ub2RlTWFwW3RoaXMubm9kZUlkXVxuICB9XG4gIHRoaXMuY2hpbGRyZW4uZm9yRWFjaChjaGlsZCA9PiB7XG4gICAgY2hpbGQuZGVzdHJveSgpXG4gIH0pXG59XG5cbmV4cG9ydCBmdW5jdGlvbiBFbGVtZW50ICh0eXBlID0gREVGQVVMVF9UQUdfTkFNRSwgcHJvcHMpIHtcbiAgcHJvcHMgPSBwcm9wcyB8fCB7fVxuICB0aGlzLm5vZGVUeXBlID0gMVxuICB0aGlzLm5vZGVJZCA9IChuZXh0Tm9kZVJlZisrKS50b1N0cmluZygpXG4gIHRoaXMucmVmID0gdGhpcy5ub2RlSWRcbiAgdGhpcy50eXBlID0gdHlwZVxuICB0aGlzLmF0dHIgPSBwcm9wcy5hdHRyIHx8IHt9XG4gIHRoaXMuY2xhc3NTdHlsZSA9IHByb3BzLmNsYXNzU3R5bGUgfHwge31cbiAgdGhpcy5zdHlsZSA9IHByb3BzLnN0eWxlIHx8IHt9XG4gIHRoaXMuZXZlbnQgPSB7fVxuICB0aGlzLmNoaWxkcmVuID0gW11cbiAgdGhpcy5wdXJlQ2hpbGRyZW4gPSBbXVxufVxuXG5FbGVtZW50LnByb3RvdHlwZSA9IG5ldyBOb2RlKClcblxuRWxlbWVudC5wcm90b3R5cGUuYXBwZW5kQ2hpbGQgPSBmdW5jdGlvbiAobm9kZSkge1xuICBpZiAobm9kZS5wYXJlbnROb2RlICYmIG5vZGUucGFyZW50Tm9kZSAhPT0gdGhpcykge1xuICAgIHJldHVyblxuICB9XG4gIGlmICghbm9kZS5wYXJlbnROb2RlKSB7XG4gICAgbGlua1BhcmVudChub2RlLCB0aGlzKVxuICAgIGluc2VydEluZGV4KG5vZGUsIHRoaXMuY2hpbGRyZW4sIHRoaXMuY2hpbGRyZW4ubGVuZ3RoLCB0cnVlKVxuICAgIGlmICh0aGlzLmRvY0lkKSB7XG4gICAgICByZWdpc3Rlck5vZGUodGhpcy5kb2NJZCwgbm9kZSlcbiAgICB9XG4gICAgaWYgKG5vZGUubm9kZVR5cGUgPT09IDEpIHtcbiAgICAgIGluc2VydEluZGV4KG5vZGUsIHRoaXMucHVyZUNoaWxkcmVuLCB0aGlzLnB1cmVDaGlsZHJlbi5sZW5ndGgpXG4gICAgICBpZiAodGhpcy5kb2NJZCkge1xuICAgICAgICBjb25zdCBsaXN0ZW5lciA9IGluc3RhbmNlTWFwW3RoaXMuZG9jSWRdLmxpc3RlbmVyXG4gICAgICAgIHJldHVybiBsaXN0ZW5lci5hZGRFbGVtZW50KG5vZGUsIHRoaXMucmVmLCAtMSlcbiAgICAgIH1cbiAgICB9XG4gIH1cbiAgZWxzZSB7XG4gICAgbW92ZUluZGV4KG5vZGUsIHRoaXMuY2hpbGRyZW4sIHRoaXMuY2hpbGRyZW4ubGVuZ3RoLCB0cnVlKVxuICAgIGlmIChub2RlLm5vZGVUeXBlID09PSAxKSB7XG4gICAgICBjb25zdCBpbmRleCA9IG1vdmVJbmRleChub2RlLCB0aGlzLnB1cmVDaGlsZHJlbiwgdGhpcy5wdXJlQ2hpbGRyZW4ubGVuZ3RoKVxuICAgICAgaWYgKHRoaXMuZG9jSWQgJiYgaW5kZXggPj0gMCkge1xuICAgICAgICBjb25zdCBsaXN0ZW5lciA9IGluc3RhbmNlTWFwW3RoaXMuZG9jSWRdLmxpc3RlbmVyXG4gICAgICAgIHJldHVybiBsaXN0ZW5lci5tb3ZlRWxlbWVudChub2RlLnJlZiwgdGhpcy5yZWYsIGluZGV4KVxuICAgICAgfVxuICAgIH1cbiAgfVxufVxuXG5FbGVtZW50LnByb3RvdHlwZS5pbnNlcnRCZWZvcmUgPSBmdW5jdGlvbiAobm9kZSwgYmVmb3JlKSB7XG4gIGlmIChub2RlLnBhcmVudE5vZGUgJiYgbm9kZS5wYXJlbnROb2RlICE9PSB0aGlzKSB7XG4gICAgcmV0dXJuXG4gIH1cbiAgaWYgKG5vZGUgPT09IGJlZm9yZSB8fCBub2RlLm5leHRTaWJsaW5nID09PSBiZWZvcmUpIHtcbiAgICByZXR1cm5cbiAgfVxuICBpZiAoIW5vZGUucGFyZW50Tm9kZSkge1xuICAgIGxpbmtQYXJlbnQobm9kZSwgdGhpcylcbiAgICBpbnNlcnRJbmRleChub2RlLCB0aGlzLmNoaWxkcmVuLCB0aGlzLmNoaWxkcmVuLmluZGV4T2YoYmVmb3JlKSwgdHJ1ZSlcbiAgICBpZiAodGhpcy5kb2NJZCkge1xuICAgICAgcmVnaXN0ZXJOb2RlKHRoaXMuZG9jSWQsIG5vZGUpXG4gICAgfVxuICAgIGlmIChub2RlLm5vZGVUeXBlID09PSAxKSB7XG4gICAgICBjb25zdCBwdXJlQmVmb3JlID0gbmV4dEVsZW1lbnQoYmVmb3JlKVxuICAgICAgY29uc3QgaW5kZXggPSBpbnNlcnRJbmRleChcbiAgICAgICAgbm9kZSxcbiAgICAgICAgdGhpcy5wdXJlQ2hpbGRyZW4sXG4gICAgICAgIHB1cmVCZWZvcmVcbiAgICAgICAgICA / IHRoaXMucHVyZUNoaWxkcmVuLmluZGV4T2YocHVyZUJlZm9yZSlcbiAgICAgICAgICA6IHRoaXMucHVyZUNoaWxkcmVuLmxlbmd0aFxuICAgICAgKVxuICAgICAgaWYgKHRoaXMuZG9jSWQpIHtcbiAgICAgICAgY29uc3QgbGlzdGVuZXIgPSBpbnN0YW5jZU1hcFt0aGlzLmRvY0lkXS5saXN0ZW5lclxuICAgICAgICByZXR1cm4gbGlzdGVuZXIuYWRkRWxlbWVudChub2RlLCB0aGlzLnJlZiwgaW5kZXgpXG4gICAgICB9XG4gICAgfVxuICB9XG4gIGVsc2Uge1xuICAgIG1vdmVJbmRleChub2RlLCB0aGlzLmNoaWxkcmVuLCB0aGlzLmNoaWxkcmVuLmluZGV4T2YoYmVmb3JlKSwgdHJ1ZSlcbiAgICBpZiAobm9kZS5ub2RlVHlwZSA9PT0gMSkge1xuICAgICAgY29uc3QgcHVyZUJlZm9yZSA9IG5leHRFbGVtZW50KGJlZm9yZSlcbiAgICAgIGNvbnN0IGluZGV4ID0gbW92ZUluZGV4KFxuICAgICAgICBub2RlLFxuICAgICAgICB0aGlzLnB1cmVDaGlsZHJlbixcbiAgICAgICAgcHVyZUJlZm9yZVxuICAgICAgICAgID8gdGhpcy5wdXJlQ2hpbGRyZW4uaW5kZXhPZihwdXJlQmVmb3JlKVxuICAgICAgICAgIDogdGhpcy5wdXJlQ2hpbGRyZW4ubGVuZ3RoXG4gICAgICApXG4gICAgICBpZiAodGhpcy5kb2NJZCAmJiBpbmRleCA + PSAwKSB7XG4gICAgICAgIGNvbnN0IGxpc3RlbmVyID0gaW5zdGFuY2VNYXBbdGhpcy5kb2NJZF0ubGlzdGVuZXJcbiAgICAgICAgcmV0dXJuIGxpc3RlbmVyLm1vdmVFbGVtZW50KG5vZGUucmVmLCB0aGlzLnJlZiwgaW5kZXgpXG4gICAgICB9XG4gICAgfVxuICB9XG59XG5cbkVsZW1lbnQucHJvdG90eXBlLmluc2VydEFmdGVyID0gZnVuY3Rpb24gKG5vZGUsIGFmdGVyKSB7XG4gIGlmIChub2RlLnBhcmVudE5vZGUgJiYgbm9kZS5wYXJlbnROb2RlICE9PSB0aGlzKSB7XG4gICAgcmV0dXJuXG4gIH1cbiAgaWYgKG5vZGUgPT09IGFmdGVyIHx8IG5vZGUucHJldmlvdXNTaWJsaW5nID09PSBhZnRlcikge1xuICAgIHJldHVyblxuICB9XG4gIGlmICghbm9kZS5wYXJlbnROb2RlKSB7XG4gICAgbGlua1BhcmVudChub2RlLCB0aGlzKVxuICAgIGluc2VydEluZGV4KG5vZGUsIHRoaXMuY2hpbGRyZW4sIHRoaXMuY2hpbGRyZW4uaW5kZXhPZihhZnRlcikgKyAxLCB0cnVlKVxuICAgIGlmICh0aGlzLmRvY0lkKSB7XG4gICAgICByZWdpc3Rlck5vZGUodGhpcy5kb2NJZCwgbm9kZSlcbiAgICB9XG4gICAgaWYgKG5vZGUubm9kZVR5cGUgPT09IDEpIHtcbiAgICAgIGNvbnN0IGluZGV4ID0gaW5zZXJ0SW5kZXgoXG4gICAgICAgIG5vZGUsXG4gICAgICAgIHRoaXMucHVyZUNoaWxkcmVuLFxuICAgICAgICB0aGlzLnB1cmVDaGlsZHJlbi5pbmRleE9mKHByZXZpb3VzRWxlbWVudChhZnRlcikpICsgMVxuICAgICAgKVxuICAgICAgaWYgKHRoaXMuZG9jSWQpIHtcbiAgICAgICAgY29uc3QgbGlzdGVuZXIgPSBpbnN0YW5jZU1hcFt0aGlzLmRvY0lkXS5saXN0ZW5lclxuICAgICAgICByZXR1cm4gbGlzdGVuZXIuYWRkRWxlbWVudChub2RlLCB0aGlzLnJlZiwgaW5kZXgpXG4gICAgICB9XG4gICAgfVxuICB9XG4gIGVsc2Uge1xuICAgIG1vdmVJbmRleChub2RlLCB0aGlzLmNoaWxkcmVuLCB0aGlzLmNoaWxkcmVuLmluZGV4T2YoYWZ0ZXIpICsgMSwgdHJ1ZSlcbiAgICBpZiAobm9kZS5ub2RlVHlwZSA9PT0gMSkge1xuICAgICAgY29uc3QgaW5kZXggPSBtb3ZlSW5kZXgoXG4gICAgICAgIG5vZGUsXG4gICAgICAgIHRoaXMucHVyZUNoaWxkcmVuLFxuICAgICAgICB0aGlzLnB1cmVDaGlsZHJlbi5pbmRleE9mKHByZXZpb3VzRWxlbWVudChhZnRlcikpICsgMVxuICAgICAgKVxuICAgICAgaWYgKHRoaXMuZG9jSWQgJiYgaW5kZXggPj0gMCkge1xuICAgICAgICBjb25zdCBsaXN0ZW5lciA9IGluc3RhbmNlTWFwW3RoaXMuZG9jSWRdLmxpc3RlbmVyXG4gICAgICAgIHJldHVybiBsaXN0ZW5lci5tb3ZlRWxlbWVudChub2RlLnJlZiwgdGhpcy5yZWYsIGluZGV4KVxuICAgICAgfVxuICAgIH1cbiAgfVxufVxuXG5FbGVtZW50LnByb3RvdHlwZS5yZW1vdmVDaGlsZCA9IGZ1bmN0aW9uIChub2RlLCBwcmVzZXJ2ZWQpIHtcbiAgaWYgKG5vZGUucGFyZW50Tm9kZSkge1xuICAgIHJlbW92ZUluZGV4KG5vZGUsIHRoaXMuY2hpbGRyZW4sIHRydWUpXG4gICAgaWYgKG5vZGUubm9kZVR5cGUgPT09IDEpIHtcbiAgICAgIHJlbW92ZUluZGV4KG5vZGUsIHRoaXMucHVyZUNoaWxkcmVuKVxuICAgICAgaWYgKHRoaXMuZG9jSWQpIHtcbiAgICAgICAgY29uc3QgbGlzdGVuZXIgPSBpbnN0YW5jZU1hcFt0aGlzLmRvY0lkXS5saXN0ZW5lclxuICAgICAgICBsaXN0ZW5lci5yZW1vdmVFbGVtZW50KG5vZGUucmVmKVxuICAgICAgfVxuICAgIH1cbiAgfVxuICBpZiAoIXByZXNlcnZlZCkge1xuICAgIG5vZGUuZGVzdHJveSgpXG4gIH1cbn1cblxuRWxlbWVudC5wcm90b3R5cGUuY2xlYXIgPSBmdW5jdGlvbiAoKSB7XG4gIGlmICh0aGlzLmRvY0lkKSB7XG4gICAgY29uc3QgbGlzdGVuZXIgPSBpbnN0YW5jZU1hcFt0aGlzLmRvY0lkXS5saXN0ZW5lclxuICAgIHRoaXMucHVyZUNoaWxkcmVuLmZvckVhY2gobm9kZSA9PiB7XG4gICAgICBsaXN0ZW5lci5yZW1vdmVFbGVtZW50KG5vZGUucmVmKVxuICAgIH0pXG4gIH1cbiAgdGhpcy5jaGlsZHJlbi5mb3JFYWNoKG5vZGUgPT4ge1xuICAgIG5vZGUuZGVzdHJveSgpXG4gIH0pXG4gIHRoaXMuY2hpbGRyZW4ubGVuZ3RoID0gMFxuICB0aGlzLnB1cmVDaGlsZHJlbi5sZW5ndGggPSAwXG59XG5cbmZ1bmN0aW9uIG5leHRFbGVtZW50IChub2RlKSB7XG4gIHdoaWxlIChub2RlKSB7XG4gICAgaWYgKG5vZGUubm9kZVR5cGUgPT09IDEpIHtcbiAgICAgIHJldHVybiBub2RlXG4gICAgfVxuICAgIG5vZGUgPSBub2RlLm5leHRTaWJsaW5nXG4gIH1cbn1cblxuZnVuY3Rpb24gcHJldmlvdXNFbGVtZW50IChub2RlKSB7XG4gIHdoaWxlIChub2RlKSB7XG4gICAgaWYgKG5vZGUubm9kZVR5cGUgPT09IDEpIHtcbiAgICAgIHJldHVybiBub2RlXG4gICAgfVxuICAgIG5vZGUgPSBub2RlLnByZXZpb3VzU2libGluZ1xuICB9XG59XG5cbmZ1bmN0aW9uIGxpbmtQYXJlbnQgKG5vZGUsIHBhcmVudCkge1xuICBub2RlLnBhcmVudE5vZGUgPSBwYXJlbnRcbiAgaWYgKHBhcmVudC5kb2NJZCkge1xuICAgIG5vZGUuZG9jSWQgPSBwYXJlbnQuZG9jSWRcbiAgICBub2RlLm93bmVyRG9jdW1lbnQgPSBwYXJlbnQub3duZXJEb2N1bWVudFxuICAgIG5vZGUub3duZXJEb2N1bWVudC5ub2RlTWFwW25vZGUubm9kZUlkXSA9IG5vZGVcbiAgICBub2RlLmRlcHRoID0gcGFyZW50LmRlcHRoICsgMVxuICB9XG4gIG5vZGUuY2hpbGRyZW4uZm9yRWFjaChjaGlsZCA9PiB7XG4gICAgbGlua1BhcmVudChjaGlsZCwgbm9kZSlcbiAgfSlcbn1cblxuZnVuY3Rpb24gcmVnaXN0ZXJOb2RlIChkb2NJZCwgbm9kZSkge1xuICBjb25zdCBkb2MgPSBpbnN0YW5jZU1hcFtkb2NJZF1cbiAgZG9jLm5vZGVNYXBbbm9kZS5ub2RlSWRdID0gbm9kZVxufVxuXG5mdW5jdGlvbiBpbnNlcnRJbmRleCAodGFyZ2V0LCBsaXN0LCBuZXdJbmRleCwgY2hhbmdlU2libGluZykge1xuICBpZiAobmV3SW5kZXggPCAwKSB7XG4gICAgbmV3SW5kZXggPSAwXG4gIH1cbiAgY29uc3QgYmVmb3JlID0gbGlzdFtuZXdJbmRleCAtIDFdXG4gIGNvbnN0IGFmdGVyID0gbGlzdFtuZXdJbmRleF1cbiAgbGlzdC5zcGxpY2UobmV3SW5kZXgsIDAsIHRhcmdldClcbiAgaWYgKGNoYW5nZVNpYmxpbmcpIHtcbiAgICBiZWZvcmUgJiYgKGJlZm9yZS5uZXh0U2libGluZyA9IHRhcmdldClcbiAgICB0YXJnZXQucHJldmlvdXNTaWJsaW5nID0gYmVmb3JlXG4gICAgdGFyZ2V0Lm5leHRTaWJsaW5nID0gYWZ0ZXJcbiAgICBhZnRlciAmJiAoYWZ0ZXIucHJldmlvdXNTaWJsaW5nID0gdGFyZ2V0KVxuICB9XG4gIHJldHVybiBuZXdJbmRleFxufVxuXG5mdW5jdGlvbiBtb3ZlSW5kZXggKHRhcmdldCwgbGlzdCwgbmV3SW5kZXgsIGNoYW5nZVNpYmxpbmcpIHtcbiAgY29uc3QgaW5kZXggPSBsaXN0LmluZGV4T2YodGFyZ2V0KVxuICBpZiAoaW5kZXggPCAwKSB7XG4gICAgcmV0dXJuIC0xXG4gIH1cbiAgaWYgKGNoYW5nZVNpYmxpbmcpIHtcbiAgICBjb25zdCBiZWZvcmUgPSBsaXN0W2luZGV4IC0gMV1cbiAgICBjb25zdCBhZnRlciA9IGxpc3RbaW5kZXggKyAxXVxuICAgIGJlZm9yZSAmJiAoYmVmb3JlLm5leHRTaWJsaW5nID0gYWZ0ZXIpXG4gICAgYWZ0ZXIgJiYgKGFmdGVyLnByZXZpb3VzU2libGluZyA9IGJlZm9yZSlcbiAgfVxuICBsaXN0LnNwbGljZShpbmRleCwgMSlcbiAgbGV0IG5ld0luZGV4QWZ0ZXIgPSBuZXdJbmRleFxuICBpZiAoaW5kZXggPD0gbmV3SW5kZXgpIHtcbiAgICBuZXdJbmRleEFmdGVyID0gbmV3SW5kZXggLSAxXG4gIH1cbiAgY29uc3QgYmVmb3JlTmV3ID0gbGlzdFtuZXdJbmRleEFmdGVyIC0gMV1cbiAgY29uc3QgYWZ0ZXJOZXcgPSBsaXN0W25ld0luZGV4QWZ0ZXJdXG4gIGxpc3Quc3BsaWNlKG5ld0luZGV4QWZ0ZXIsIDAsIHRhcmdldClcbiAgaWYgKGNoYW5nZVNpYmxpbmcpIHtcbiAgICBiZWZvcmVOZXcgJiYgKGJlZm9yZU5ldy5uZXh0U2libGluZyA9IHRhcmdldClcbiAgICB0YXJnZXQucHJldmlvdXNTaWJsaW5nID0gYmVmb3JlTmV3XG4gICAgdGFyZ2V0Lm5leHRTaWJsaW5nID0gYWZ0ZXJOZXdcbiAgICBhZnRlck5ldyAmJiAoYWZ0ZXJOZXcucHJldmlvdXNTaWJsaW5nID0gdGFyZ2V0KVxuICB9XG4gIGlmIChpbmRleCA9PT0gbmV3SW5kZXhBZnRlcikge1xuICAgIHJldHVybiAtMVxuICB9XG4gIHJldHVybiBuZXdJbmRleFxufVxuXG5mdW5jdGlvbiByZW1vdmVJbmRleCAodGFyZ2V0LCBsaXN0LCBjaGFuZ2VTaWJsaW5nKSB7XG4gIGNvbnN0IGluZGV4ID0gbGlzdC5pbmRleE9mKHRhcmdldClcbiAgaWYgKGluZGV4IDwgMCkge1xuICAgIHJldHVyblxuICB9XG4gIGlmIChjaGFuZ2VTaWJsaW5nKSB7XG4gICAgY29uc3QgYmVmb3JlID0gbGlzdFtpbmRleCAtIDFdXG4gICAgY29uc3QgYWZ0ZXIgPSBsaXN0W2luZGV4ICsgMV1cbiAgICBiZWZvcmUgJiYgKGJlZm9yZS5uZXh0U2libGluZyA9IGFmdGVyKVxuICAgIGFmdGVyICYmIChhZnRlci5wcmV2aW91c1NpYmxpbmcgPSBiZWZvcmUpXG4gIH1cbiAgbGlzdC5zcGxpY2UoaW5kZXgsIDEpXG59XG5cbkVsZW1lbnQucHJvdG90eXBlLnNldEF0dHIgPSBmdW5jdGlvbiAoa2V5LCB2YWx1ZSwgc2lsZW50KSB7XG4gIGlmICh0aGlzLmF0dHJba2V5XSA9PT0gdmFsdWUpIHtcbiAgICByZXR1cm5cbiAgfVxuICB0aGlzLmF0dHJba2V5XSA9IHZhbHVlXG4gIGlmICghc2lsZW50ICYmIHRoaXMuZG9jSWQpIHtcbiAgICBjb25zdCBsaXN0ZW5lciA9IGluc3RhbmNlTWFwW3RoaXMuZG9jSWRdLmxpc3RlbmVyXG4gICAgbGlzdGVuZXIuc2V0QXR0cih0aGlzLnJlZiwga2V5LCB2YWx1ZSlcbiAgfVxufVxuXG5FbGVtZW50LnByb3RvdHlwZS5zZXRTdHlsZSA9IGZ1bmN0aW9uIChrZXksIHZhbHVlLCBzaWxlbnQpIHtcbiAgaWYgKHRoaXMuc3R5bGVba2V5XSA9PT0gdmFsdWUpIHtcbiAgICByZXR1cm5cbiAgfVxuICB0aGlzLnN0eWxlW2tleV0gPSB2YWx1ZVxuICBpZiAoIXNpbGVudCAmJiB0aGlzLmRvY0lkKSB7XG4gICAgY29uc3QgbGlzdGVuZXIgPSBpbnN0YW5jZU1hcFt0aGlzLmRvY0lkXS5saXN0ZW5lclxuICAgIGxpc3RlbmVyLnNldFN0eWxlKHRoaXMucmVmLCBrZXksIHZhbHVlKVxuICB9XG59XG5cbkVsZW1lbnQucHJvdG90eXBlLnNldENsYXNzU3R5bGUgPSBmdW5jdGlvbiAoY2xhc3NTdHlsZSkge1xuICB0aGlzLmNsYXNzU3R5bGUgPSBjbGFzc1N0eWxlXG4gIGlmICh0aGlzLmRvY0lkKSB7XG4gICAgY29uc3QgbGlzdGVuZXIgPSBpbnN0YW5jZU1hcFt0aGlzLmRvY0lkXS5saXN0ZW5lclxuICAgIGxpc3RlbmVyLnNldFN0eWxlcyh0aGlzLnJlZiwgdGhpcy50b1N0eWxlKCkpXG4gIH1cbn1cblxuRWxlbWVudC5wcm90b3R5cGUuYWRkRXZlbnQgPSBmdW5jdGlvbiAodHlwZSwgaGFuZGxlcikge1xuICBpZiAoIXRoaXMuZXZlbnRbdHlwZV0pIHtcbiAgICB0aGlzLmV2ZW50W3R5cGVdID0gaGFuZGxlclxuICAgIGlmICh0aGlzLmRvY0lkKSB7XG4gICAgICBjb25zdCBsaXN0ZW5lciA9IGluc3RhbmNlTWFwW3RoaXMuZG9jSWRdLmxpc3RlbmVyXG4gICAgICBsaXN0ZW5lci5hZGRFdmVudCh0aGlzLnJlZiwgdHlwZSlcbiAgICB9XG4gIH1cbn1cblxuRWxlbWVudC5wcm90b3R5cGUucmVtb3ZlRXZlbnQgPSBmdW5jdGlvbiAodHlwZSkge1xuICBpZiAodGhpcy5ldmVudFt0eXBlXSkge1xuICAgIGRlbGV0ZSB0aGlzLmV2ZW50W3R5cGVdXG4gICAgaWYgKHRoaXMuZG9jSWQpIHtcbiAgICAgIGNvbnN0IGxpc3RlbmVyID0gaW5zdGFuY2VNYXBbdGhpcy5kb2NJZF0ubGlzdGVuZXJcbiAgICAgIGxpc3RlbmVyLnJlbW92ZUV2ZW50KHRoaXMucmVmLCB0eXBlKVxuICAgIH1cbiAgfVxufVxuXG5FbGVtZW50LnByb3RvdHlwZS5maXJlRXZlbnQgPSBmdW5jdGlvbiAodHlwZSwgZSkge1xuICBjb25zdCBoYW5kbGVyID0gdGhpcy5ldmVudFt0eXBlXVxuICBpZiAoaGFuZGxlcikge1xuICAgIHJldHVybiBoYW5kbGVyLmNhbGwodGhpcywgZSlcbiAgfVxufVxuXG5FbGVtZW50LnByb3RvdHlwZS50b1N0eWxlID0gZnVuY3Rpb24gKCkge1xuICByZXR1cm4gT2JqZWN0LmFzc2lnbih7fSwgdGhpcy5jbGFzc1N0eWxlLCB0aGlzLnN0eWxlKVxufVxuXG5FbGVtZW50LnByb3RvdHlwZS50b0pTT04gPSBmdW5jdGlvbiAoKSB7XG4gIGNvbnN0IHJlc3VsdCA9IHtcbiAgICByZWY6IHRoaXMucmVmLnRvU3RyaW5nKCksXG4gICAgdHlwZTogdGhpcy50eXBlLFxuICAgIGF0dHI6IHRoaXMuYXR0cixcbiAgICBzdHlsZTogdGhpcy50b1N0eWxlKClcbiAgfVxuICBjb25zdCBldmVudCA9IE9iamVjdC5rZXlzKHRoaXMuZXZlbnQpXG4gIGlmIChldmVudC5sZW5ndGgpIHtcbiAgICByZXN1bHQuZXZlbnQgPSBldmVudFxuICB9XG4gIGlmICh0aGlzLnB1cmVDaGlsZHJlbi5sZW5ndGgpIHtcbiAgICByZXN1bHQuY2hpbGRyZW4gPSB0aGlzLnB1cmVDaGlsZHJlbi5tYXAoKGNoaWxkKSA9PiBjaGlsZC50b0pTT04oKSlcbiAgfVxuICByZXR1cm4gcmVzdWx0XG59XG5cbkVsZW1lbnQucHJvdG90eXBlLnRvU3RyaW5nID0gZnVuY3Rpb24gKCkge1xuICByZXR1cm4gJzwnICsgdGhpcy50eXBlICtcbiAgICAnIGF0dHI9JyArIEpTT04uc3RyaW5naWZ5KHRoaXMuYXR0cikgK1xuICAgICcgc3R5bGU9JyArIEpTT04uc3RyaW5naWZ5KHRoaXMudG9TdHlsZSgpKSArICc + JyArXG4gICAgdGhpcy5wdXJlQ2hpbGRyZW4ubWFwKChjaGlsZCkgPT4gY2hpbGQudG9TdHJpbmcoKSkuam9pbignJykgK1xuICAgICc8LycgKyB0aGlzLnR5cGUgKyAnPidcbn1cblxuZXhwb3J0IGZ1bmN0aW9uIENvbW1lbnQgKHZhbHVlKSB7XG4gIHRoaXMubm9kZVR5cGUgPSA4XG4gIHRoaXMubm9kZUlkID0gKG5leHROb2RlUmVmKyspLnRvU3RyaW5nKClcbiAgdGhpcy5yZWYgPSB0aGlzLm5vZGVJZFxuICB0aGlzLnR5cGUgPSAnY29tbWVudCdcbiAgdGhpcy52YWx1ZSA9IHZhbHVlXG4gIHRoaXMuY2hpbGRyZW4gPSBbXVxuICB0aGlzLnB1cmVDaGlsZHJlbiA9IFtdXG59XG5cbkNvbW1lbnQucHJvdG90eXBlID0gbmV3IE5vZGUoKVxuXG5Db21tZW50LnByb3RvdHlwZS50b1N0cmluZyA9IGZ1bmN0aW9uICgpIHtcbiAgcmV0dXJuICc8IS0tICcgKyB0aGlzLnZhbHVlICsgJyAtLT4nXG59XG5cblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L3Zkb20vaW5kZXguanNcbiAqKi8iLCJleHBvcnQgZGVmYXVsdCBmdW5jdGlvbiBMaXN0ZW5lciAoaWQsIGhhbmRsZXIpIHtcbiAgdGhpcy5pZCA9IGlkXG4gIHRoaXMuYmF0Y2hlZCA9IGZhbHNlXG4gIHRoaXMudXBkYXRlcyA9IFtdXG4gIGlmICh0eXBlb2YgaGFuZGxlciA9PT0gJ2Z1bmN0aW9uJykge1xuICAgIHRoaXMuaGFuZGxlciA9IGhhbmRsZXJcbiAgfVxufVxuXG5MaXN0ZW5lci5wcm90b3R5cGUuY3JlYXRlRmluaXNoID0gZnVuY3Rpb24gKGNhbGxiYWNrKSB7XG4gIGNvbnN0IGhhbmRsZXIgPSB0aGlzLmhhbmRsZXJcbiAgcmV0dXJuIGhhbmRsZXIoW2NyZWF0ZUFjdGlvbignY3JlYXRlRmluaXNoJywgW10pXSwgY2FsbGJhY2spXG59XG5cbkxpc3RlbmVyLnByb3RvdHlwZS51cGRhdGVGaW5pc2ggPSBmdW5jdGlvbiAoY2FsbGJhY2spIHtcbiAgY29uc3QgaGFuZGxlciA9IHRoaXMuaGFuZGxlclxuICByZXR1cm4gaGFuZGxlcihbY3JlYXRlQWN0aW9uKCd1cGRhdGVGaW5pc2gnLCBbXSldLCBjYWxsYmFjaylcbn1cblxuTGlzdGVuZXIucHJvdG90eXBlLnJlZnJlc2hGaW5pc2ggPSBmdW5jdGlvbiAoY2FsbGJhY2spIHtcbiAgY29uc3QgaGFuZGxlciA9IHRoaXMuaGFuZGxlclxuICByZXR1cm4gaGFuZGxlcihbY3JlYXRlQWN0aW9uKCdyZWZyZXNoRmluaXNoJywgW10pXSwgY2FsbGJhY2spXG59XG5cbkxpc3RlbmVyLnByb3RvdHlwZS5jcmVhdGVCb2R5ID0gZnVuY3Rpb24gKGVsZW1lbnQpIHtcbiAgY29uc3QgYm9keSA9IGVsZW1lbnQudG9KU09OKClcbiAgY29uc3QgY2hpbGRyZW4gPSBib2R5LmNoaWxkcmVuXG4gIGRlbGV0ZSBib2R5LmNoaWxkcmVuXG4gIGNvbnN0IGFjdGlvbnMgPSBbY3JlYXRlQWN0aW9uKCdjcmVhdGVCb2R5JywgW2JvZHldKV1cbiAgaWYgKGNoaWxkcmVuKSB7XG4gICAgYWN0aW9ucy5wdXNoLmFwcGx5KGFjdGlvbnMsIGNoaWxkcmVuLm1hcChjaGlsZCA9PiB7XG4gICAgICByZXR1cm4gY3JlYXRlQWN0aW9uKCdhZGRFbGVtZW50JywgW2JvZHkucmVmLCBjaGlsZCwgLTFdKVxuICAgIH0pKVxuICB9XG4gIHJldHVybiB0aGlzLmFkZEFjdGlvbnMoYWN0aW9ucylcbn1cblxuTGlzdGVuZXIucHJvdG90eXBlLmFkZEVsZW1lbnQgPSBmdW5jdGlvbiAoZWxlbWVudCwgcmVmLCBpbmRleCkge1xuICBpZiAoIShpbmRleCA + PSAwKSkge1xuICAgIGluZGV4ID0gLTFcbiAgfVxuICByZXR1cm4gdGhpcy5hZGRBY3Rpb25zKGNyZWF0ZUFjdGlvbignYWRkRWxlbWVudCcsIFtyZWYsIGVsZW1lbnQudG9KU09OKCksIGluZGV4XSkpXG59XG5cbkxpc3RlbmVyLnByb3RvdHlwZS5yZW1vdmVFbGVtZW50ID0gZnVuY3Rpb24gKHJlZikge1xuICBpZiAoQXJyYXkuaXNBcnJheShyZWYpKSB7XG4gICAgY29uc3QgYWN0aW9ucyA9IHJlZi5tYXAoKHIpID0 + IGNyZWF0ZUFjdGlvbigncmVtb3ZlRWxlbWVudCcsIFtyXSkpXG4gICAgcmV0dXJuIHRoaXMuYWRkQWN0aW9ucyhhY3Rpb25zKVxuICB9XG4gIHJldHVybiB0aGlzLmFkZEFjdGlvbnMoY3JlYXRlQWN0aW9uKCdyZW1vdmVFbGVtZW50JywgW3JlZl0pKVxufVxuXG5MaXN0ZW5lci5wcm90b3R5cGUubW92ZUVsZW1lbnQgPSBmdW5jdGlvbiAodGFyZ2V0UmVmLCBwYXJlbnRSZWYsIGluZGV4KSB7XG4gIHJldHVybiB0aGlzLmFkZEFjdGlvbnMoY3JlYXRlQWN0aW9uKCdtb3ZlRWxlbWVudCcsIFt0YXJnZXRSZWYsIHBhcmVudFJlZiwgaW5kZXhdKSlcbn1cblxuTGlzdGVuZXIucHJvdG90eXBlLnNldEF0dHIgPSBmdW5jdGlvbiAocmVmLCBrZXksIHZhbHVlKSB7XG4gIGNvbnN0IHJlc3VsdCA9IHt9XG4gIHJlc3VsdFtrZXldID0gdmFsdWVcbiAgcmV0dXJuIHRoaXMuYWRkQWN0aW9ucyhjcmVhdGVBY3Rpb24oJ3VwZGF0ZUF0dHJzJywgW3JlZiwgcmVzdWx0XSkpXG59XG5cbkxpc3RlbmVyLnByb3RvdHlwZS5zZXRTdHlsZSA9IGZ1bmN0aW9uIChyZWYsIGtleSwgdmFsdWUpIHtcbiAgY29uc3QgcmVzdWx0ID0ge31cbiAgcmVzdWx0W2tleV0gPSB2YWx1ZVxuICByZXR1cm4gdGhpcy5hZGRBY3Rpb25zKGNyZWF0ZUFjdGlvbigndXBkYXRlU3R5bGUnLCBbcmVmLCByZXN1bHRdKSlcbn1cblxuTGlzdGVuZXIucHJvdG90eXBlLnNldFN0eWxlcyA9IGZ1bmN0aW9uIChyZWYsIHN0eWxlKSB7XG4gIHJldHVybiB0aGlzLmFkZEFjdGlvbnMoY3JlYXRlQWN0aW9uKCd1cGRhdGVTdHlsZScsIFtyZWYsIHN0eWxlXSkpXG59XG5cbkxpc3RlbmVyLnByb3RvdHlwZS5hZGRFdmVudCA9IGZ1bmN0aW9uIChyZWYsIHR5cGUpIHtcbiAgcmV0dXJuIHRoaXMuYWRkQWN0aW9ucyhjcmVhdGVBY3Rpb24oJ2FkZEV2ZW50JywgW3JlZiwgdHlwZV0pKVxufVxuXG5MaXN0ZW5lci5wcm90b3R5cGUucmVtb3ZlRXZlbnQgPSBmdW5jdGlvbiAocmVmLCB0eXBlKSB7XG4gIHJldHVybiB0aGlzLmFkZEFjdGlvbnMoY3JlYXRlQWN0aW9uKCdyZW1vdmVFdmVudCcsIFtyZWYsIHR5cGVdKSlcbn1cblxuTGlzdGVuZXIucHJvdG90eXBlLmhhbmRsZXIgPSBmdW5jdGlvbiAoYWN0aW9ucywgY2IpIHtcbiAgcmV0dXJuIGNiICYmIGNiKClcbn1cblxuTGlzdGVuZXIucHJvdG90eXBlLmFkZEFjdGlvbnMgPSBmdW5jdGlvbiAoYWN0aW9ucykge1xuICBjb25zdCB1cGRhdGVzID0gdGhpcy51cGRhdGVzXG4gIGNvbnN0IGhhbmRsZXIgPSB0aGlzLmhhbmRsZXJcblxuICBpZiAoIUFycmF5LmlzQXJyYXkoYWN0aW9ucykpIHtcbiAgICBhY3Rpb25zID0gW2FjdGlvbnNdXG4gIH1cblxuICBpZiAodGhpcy5iYXRjaGVkKSB7XG4gICAgdXBkYXRlcy5wdXNoLmFwcGx5KHVwZGF0ZXMsIGFjdGlvbnMpXG4gIH1cbiAgZWxzZSB7XG4gICAgcmV0dXJuIGhhbmRsZXIoYWN0aW9ucylcbiAgfVxufVxuXG5leHBvcnQgZnVuY3Rpb24gY3JlYXRlQWN0aW9uIChuYW1lLCBhcmdzKSB7XG4gIHJldHVybiB7IG1vZHVsZTogJ2RvbScsIG1ldGhvZDogbmFtZSwgYXJnczogYXJncyB9XG59XG5cblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L3Zkb20vbGlzdGVuZXIuanNcbiAqKi8iLCJtb2R1bGUuZXhwb3J0cyA9IHtcblx0XCJuYW1lXCI6IFwid2VleFwiLFxuXHRcInZlcnNpb25cIjogXCIwLjQuMFwiLFxuXHRcImRlc2NyaXB0aW9uXCI6IFwiQSBmcmFtZXdvcmsgZm9yIGJ1aWxkaW5nIE1vYmlsZSBjcm9zcy1wbGF0Zm9ybSBVSVwiLFxuXHRcImxpY2Vuc2VcIjogXCJBcGFjaGUtMi4wXCIsXG5cdFwicmVwb3NpdG9yeVwiOiB7XG5cdFx0XCJ0eXBlXCI6IFwiZ2l0XCIsXG5cdFx0XCJ1cmxcIjogXCJnaXRAZ2l0aHViLmNvbTphbGliYWJhL3dlZXguZ2l0XCJcblx0fSxcblx0XCJob21lcGFnZVwiOiBcImh0dHA6Ly9hbGliYWJhLmdpdGh1Yi5pby93ZWV4L1wiLFxuXHRcImJ1Z3NcIjoge1xuXHRcdFwidXJsXCI6IFwiaHR0cHM6Ly9naXRodWIuY29tL2FsaWJhYmEvd2VleC9pc3N1ZXNcIlxuXHR9LFxuXHRcInByaXZhdGVcIjogXCJ0cnVlXCIsXG5cdFwia2V5d29yZHNcIjogW1xuXHRcdFwid2VleFwiLFxuXHRcdFwiaHlicmlkXCIsXG5cdFx0XCJ3ZWJjb21wb25lbnRcIixcblx0XHRcImFwcGZyYW1ld29ya1wiLFxuXHRcdFwibXZ2bVwiLFxuXHRcdFwiamF2YXNjcmlwdFwiLFxuXHRcdFwid2Via2l0XCIsXG5cdFx0XCJ2OFwiLFxuXHRcdFwianNjb3JlXCIsXG5cdFx0XCJodG1sNVwiLFxuXHRcdFwiYW5kcm9pZFwiLFxuXHRcdFwiaW9zXCIsXG5cdFx0XCJ5dW5vc1wiXG5cdF0sXG5cdFwiZW5naW5lc1wiOiB7XG5cdFx0XCJub2RlXCI6IFwiPj00XCJcblx0fSxcblx0XCJzY3JpcHRzXCI6IHtcblx0XHRcInBvc3RpbnN0YWxsXCI6IFwiYmFzaCAuL2Jpbi9pbnN0YWxsLWhvb2tzLnNoXCIsXG5cdFx0XCJidWlsZDpicm93c2VyXCI6IFwid2VicGFjayAtLWNvbmZpZyBidWlsZC93ZWJwYWNrLmJyb3dzZXIuY29uZmlnLmpzXCIsXG5cdFx0XCJidWlsZDpuYXRpdmVcIjogXCJ3ZWJwYWNrIC0tY29uZmlnIGJ1aWxkL3dlYnBhY2submF0aXZlLmNvbmZpZy5qc1wiLFxuXHRcdFwiYnVpbGQ6ZXhhbXBsZXNcIjogXCJ3ZWJwYWNrIC0tY29uZmlnIGJ1aWxkL3dlYnBhY2suZXhhbXBsZXMuY29uZmlnLmpzXCIsXG5cdFx0XCJidWlsZDp0ZXN0XCI6IFwid2VicGFjayAtLWNvbmZpZyBidWlsZC93ZWJwYWNrLnRlc3QuY29uZmlnLmpzXCIsXG5cdFx0XCJkaXN0OmJyb3dzZXJcIjogXCJucG0gcnVuIGJ1aWxkOmJyb3dzZXIgJiYgYmFzaCAuL2Jpbi9kaXN0LWJyb3dzZXIuc2hcIixcblx0XHRcImRpc3RcIjogXCJucG0gcnVuIGRpc3Q6YnJvd3NlclwiLFxuXHRcdFwiZGV2OmJyb3dzZXJcIjogXCJ3ZWJwYWNrIC0td2F0Y2ggLS1jb25maWcgYnVpbGQvd2VicGFjay5icm93c2VyLmNvbmZpZy5qc1wiLFxuXHRcdFwiZGV2Om5hdGl2ZVwiOiBcIndlYnBhY2sgLS13YXRjaCAtLWNvbmZpZyBidWlsZC93ZWJwYWNrLm5hdGl2ZS5jb25maWcuanNcIixcblx0XHRcImRldjpleGFtcGxlc1wiOiBcIndlYnBhY2sgLS13YXRjaCAtLWNvbmZpZyBidWlsZC93ZWJwYWNrLmV4YW1wbGVzLmNvbmZpZy5qc1wiLFxuXHRcdFwiZGV2OnRlc3RcIjogXCJ3ZWJwYWNrIC0td2F0Y2ggLS1jb25maWcgYnVpbGQvd2VicGFjay50ZXN0LmNvbmZpZy5qc1wiLFxuXHRcdFwiYnVpbGRcIjogXCJucG0gcnVuIGJ1aWxkOm5hdGl2ZSAmJiBucG0gcnVuIGJ1aWxkOmJyb3dzZXIgJiYgbnBtIHJ1biBidWlsZDpleGFtcGxlcyAmJiBucG0gcnVuIGJ1aWxkOnRlc3RcIixcblx0XHRcImxpbnRcIjogXCJlc2xpbnQgaHRtbDVcIixcblx0XHRcInRlc3Q6dW5pdFwiOiBcIm1vY2hhIC0tY29tcGlsZXJzIGpzOmJhYmVsLWNvcmUvcmVnaXN0ZXIgaHRtbDUvdGVzdC91bml0LyovKi5qcyBodG1sNS90ZXN0L3VuaXQvKi8qLyouanNcIixcblx0XHRcInRlc3Q6Y292ZXJcIjogXCJiYWJlbC1ub2RlIG5vZGVfbW9kdWxlcy9pc3BhcnRhL2Jpbi9pc3BhcnRhIGNvdmVyIC0tcmVwb3J0IHRleHQgbm9kZV9tb2R1bGVzL21vY2hhL2Jpbi9fbW9jaGEgLS0gLS1yZXBvcnRlciBkb3QgaHRtbDUvdGVzdC91bml0LyovKi5qcyBodG1sNS90ZXN0L3VuaXQvKi8qLyouanNcIixcblx0XHRcInRlc3Q6ZTJlXCI6IFwibm9kZSBodG1sNS90ZXN0L2UyZS9ydW5uZXIuanNcIixcblx0XHRcInRlc3RcIjogXCJucG0gcnVuIGxpbnQgJiYgbnBtIHJ1biB0ZXN0OmNvdmVyICYmIG5wbSBydW4gdGVzdDplMmVcIixcblx0XHRcInNlcnZlXCI6IFwic2VydmUgLi8gLXAgMTI1ODBcIixcblx0XHRcImNsZWFuOmV4YW1wbGVzXCI6IFwiZWNobyBcXFwiXFxcXDAzM1szNjsxbVtDbGVhbl1cXFxcMDMzWzBtIFxcXFwwMzNbMzNtZXhhbXBsZXNcXFxcMDMzWzBtXFxcIiAmJiBybSAtdnJmIGV4YW1wbGVzL2J1aWxkLypcIixcblx0XHRcImNsZWFuOnRlc3RcIjogXCJlY2hvIFxcXCJcXFxcMDMzWzM2OzFtW0NsZWFuXVxcXFwwMzNbMG0gXFxcXDAzM1szM210ZXN0XFxcXDAzM1swbVxcXCIgJiYgcm0gLXZyZiB0ZXN0L2J1aWxkLypcIixcblx0XHRcImNsZWFuXCI6IFwibnBtIHJ1biBjbGVhbjpleGFtcGxlcyAmJiBucG0gcnVuIGNsZWFuOnRlc3RcIixcblx0XHRcImNvcHk6anNcIjogXCJjcCAtdmYgLi9kaXN0L25hdGl2ZS5qcyAuL2FuZHJvaWQvc2RrL2Fzc2V0cy9tYWluLmpzXCIsXG5cdFx0XCJjb3B5OmV4YW1wbGVzXCI6IFwicm0gLXJmIC4vYW5kcm9pZC9wbGF5Z3JvdW5kL2FwcC9zcmMvbWFpbi9hc3NldHMvKiAmJiBjcCAtdnJmIC4vZXhhbXBsZXMvYnVpbGQvKiAuL2FuZHJvaWQvcGxheWdyb3VuZC9hcHAvc3JjL21haW4vYXNzZXRzL1wiLFxuXHRcdFwiY29weVwiOiBcIm5wbSBydW4gY29weTpqcyAmJiBucG0gcnVuIGNvcHk6ZXhhbXBsZXNcIlxuXHR9LFxuXHRcInN1YnZlcnNpb25cIjoge1xuXHRcdFwiYnJvd3NlclwiOiBcIjAuMi4yM1wiLFxuXHRcdFwiZnJhbWV3b3JrXCI6IFwiMC4xMC4xM1wiLFxuXHRcdFwidHJhbnNmb3JtZXJcIjogXCI + PTAuMS41IDwwLjRcIlxuXHR9LFxuXHRcImRlcGVuZGVuY2llc1wiOiB7XG5cdFx0XCJhbmltYXRpb25qc1wiOiBcIl4wLjEuNVwiLFxuXHRcdFwiY2Fycm91c2VsXCI6IFwiXjAuMS4xMVwiLFxuXHRcdFwiY29yZS1qc1wiOiBcIl4yLjQuMFwiLFxuXHRcdFwiY3ViaWNiZXppZXJcIjogXCJeMC4xLjFcIixcblx0XHRcImVudmRcIjogXCJeMC4xLjFcIixcblx0XHRcImZpeGVkc3RpY2t5XCI6IFwiXjAuMS4wXCIsXG5cdFx0XCJodHRwdXJsXCI6IFwiXjAuMS4xXCIsXG5cdFx0XCJrb3VudGRvd25cIjogXCJeMC4xLjJcIixcblx0XHRcImxhenlpbWdcIjogXCJeMC4xLjJcIixcblx0XHRcImxpZVwiOiBcIl4zLjAuNFwiLFxuXHRcdFwibW9kYWxzXCI6IFwiXjAuMS41XCIsXG5cdFx0XCJzY3JvbGwtdG9cIjogXCIwLjAuMlwiLFxuXHRcdFwic2VtdmVyXCI6IFwiXjUuMS4wXCIsXG5cdFx0XCJ0cmFuc2l0aW9uaXplXCI6IFwiMC4wLjNcIixcblx0XHRcIndlZXgtY29tcG9uZW50c1wiOiBcIl4wLjEuMlwiXG5cdH0sXG5cdFwiZGV2RGVwZW5kZW5jaWVzXCI6IHtcblx0XHRcImJhYmVsLWNsaVwiOiBcIn42LjQuNVwiLFxuXHRcdFwiYmFiZWwtbG9hZGVyXCI6IFwiXjYuMi40XCIsXG5cdFx0XCJiYWJlbC1wcmVzZXQtZXMyMDE1XCI6IFwiXjYuOS4wXCIsXG5cdFx0XCJjaGFpXCI6IFwiXjMuNS4wXCIsXG5cdFx0XCJjaHJvbWVkcml2ZXJcIjogXCJeMi4yMS4yXCIsXG5cdFx0XCJjcm9zcy1zcGF3blwiOiBcIl40LjAuMFwiLFxuXHRcdFwiY3NzLWxvYWRlclwiOiBcIl4wLjIzLjFcIixcblx0XHRcImVzbGludFwiOiBcIl4yLjExLjFcIixcblx0XHRcImh0dHAtc2VydmVyXCI6IFwiXjAuOS4wXCIsXG5cdFx0XCJpc3BhcnRhXCI6IFwiXjQuMC4wXCIsXG5cdFx0XCJpc3RhbmJ1bFwiOiBcIl4wLjQuM1wiLFxuXHRcdFwianNvbi1sb2FkZXJcIjogXCJeMC41LjRcIixcblx0XHRcIm1vY2hhXCI6IFwiXjIuNS4zXCIsXG5cdFx0XCJuaWdodHdhdGNoXCI6IFwiXjAuOS40XCIsXG5cdFx0XCJwaGFudG9tanMtcHJlYnVpbHRcIjogXCJeMi4xLjdcIixcblx0XHRcInNlbGVuaXVtLXNlcnZlclwiOiBcIl4yLjUzLjBcIixcblx0XHRcInNlcnZlXCI6IFwiXjEuNC4wXCIsXG5cdFx0XCJzaW5vblwiOiBcIl4xLjE3LjRcIixcblx0XHRcInNpbm9uLWNoYWlcIjogXCJeMi44LjBcIixcblx0XHRcInN0eWxlLWxvYWRlclwiOiBcIl4wLjEzLjFcIixcblx0XHRcInVnbGlmeS1qc1wiOiBcIl4yLjYuNFwiLFxuXHRcdFwid2VicGFja1wiOiBcIl4xLjEzLjFcIixcblx0XHRcIndlZXgtbG9hZGVyXCI6IFwiXjAuMi4wXCJcblx0fVxufTtcblxuXG4vKioqKioqKioqKioqKioqKipcbiAqKiBXRUJQQUNLIEZPT1RFUlxuICoqIC9Vc2Vycy96aGFvamluamlhbmcvU2l0ZXMvcHVibGljL3dlZXgvcGFja2FnZS5qc29uXG4gKiogbW9kdWxlIGlkID0gNjlcbiAqKiBtb2R1bGUgY2h1bmtzID0gMFxuICoqLyIsIi8qKlxuICogQGZpbGVPdmVydmlldyBUaGUgYXBpIGZvciBpbnZva2luZyB3aXRoIFwiJFwiIHByZWZpeFxuICovXG5pbXBvcnQgKiBhcyBfIGZyb20gJy4uL3V0aWwnXG5cbi8qKlxuICogPT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PVxuICogY29tbW9uXG4gKiA9PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09XG4gKi9cblxuLyoqXG4gKiBAZGVwcmVjYXRlZCB1c2UgJHZtIGluc3RlYWRcbiAqIGZpbmQgdGhlIHZtIGJ5IGlkXG4gKiBOb3RlOiB0aGVyZSBpcyBvbmx5IG9uZSBpZCBpbiB3aG9sZSBjb21wb25lbnRcbiAqIEBwYXJhbSAge3N0cmluZ30gaWRcbiAqIEByZXR1cm4ge1ZtfVxuICovXG5leHBvcnQgZnVuY3Rpb24gJCAoaWQpIHtcbiAgXy53YXJuKCdWbSMkIGlzIGRlcHJlY2F0ZWQsIHBsZWFzZSB1c2UgVm0jJHZtIGluc3RlYWQnKVxuICBjb25zdCBpbmZvID0gdGhpcy5faWRzW2lkXVxuICBpZiAoaW5mbykge1xuICAgIHJldHVybiBpbmZvLnZtXG4gIH1cbn1cblxuLyoqXG4gKiBmaW5kIHRoZSBlbGVtZW50IGJ5IGlkXG4gKiBOb3RlOiB0aGVyZSBpcyBvbmx5IG9uZSBpZCBpbiB3aG9sZSBjb21wb25lbnRcbiAqIEBwYXJhbSAge3N0cmluZ30gaWRcbiAqIEByZXR1cm4ge0VsZW1lbnR9XG4gKi9cbmV4cG9ydCBmdW5jdGlvbiAkZWwgKGlkKSB7XG4gIGNvbnN0IGluZm8gPSB0aGlzLl9pZHNbaWRdXG4gIGlmIChpbmZvKSB7XG4gICAgcmV0dXJuIGluZm8uZWxcbiAgfVxufVxuXG4vKipcbiAqIGZpbmQgdGhlIHZtIG9mIHRoZSBjdXN0b20gY29tcG9uZW50IGJ5IGlkXG4gKiBOb3RlOiB0aGVyZSBpcyBvbmx5IG9uZSBpZCBpbiB3aG9sZSBjb21wb25lbnRcbiAqIEBwYXJhbSAge3N0cmluZ30gaWRcbiAqIEByZXR1cm4ge1ZtfVxuICovXG5leHBvcnQgZnVuY3Rpb24gJHZtIChpZCkge1xuICBjb25zdCBpbmZvID0gdGhpcy5faWRzW2lkXVxuICBpZiAoaW5mbykge1xuICAgIHJldHVybiBpbmZvLnZtXG4gIH1cbn1cblxuLyoqXG4gKiBGaXJlIHdoZW4gZGlmZmVyIHJlbmRlcmluZyBmaW5pc2hlZFxuICpcbiAqIEBwYXJhbSAge0Z1bmN0aW9ufSBmblxuICovXG5leHBvcnQgZnVuY3Rpb24gJHJlbmRlclRoZW4gKGZuKSB7XG4gIGNvbnN0IGFwcCA9IHRoaXMuX2FwcFxuICBjb25zdCBkaWZmZXIgPSBhcHAuZGlmZmVyXG4gIHJldHVybiBkaWZmZXIudGhlbigoKSA9PiB7XG4gICAgZm4oKVxuICB9KVxufVxuXG4vKipcbiAqIHNjcm9sbCBhbiBlbGVtZW50IHNwZWNpZmllZCBieSBpZCBpbnRvIHZpZXcsXG4gKiBtb3Jlb3ZlciBzcGVjaWZ5IGEgbnVtYmVyIG9mIG9mZnNldCBvcHRpb25hbGx5XG4gKiBAcGFyYW0gIHtzdHJpbmd9IGlkXG4gKiBAcGFyYW0gIHtudW1iZXJ9IG9mZnNldFxuICovXG5leHBvcnQgZnVuY3Rpb24gJHNjcm9sbFRvIChpZCwgb2Zmc2V0KSB7XG4gIF8ud2FybignVm0jJHNjcm9sbFRvIGlzIGRlcHJlY2F0ZWQsICcgK1xuICAgICAgICAgICdwbGVhc2UgdXNlIFwicmVxdWlyZShcXCdAd2VleC1tb2R1bGUvZG9tXFwnKScgK1xuICAgICAgICAgICcuc2Nyb2xsVG8oZWwsIG9wdGlvbnMpXCIgaW5zdGVhZCcpXG4gIGNvbnN0IGVsID0gdGhpcy4kZWwoaWQpXG4gIGlmIChlbCkge1xuICAgIGNvbnN0IGRvbSA9IHRoaXMuX2FwcC5yZXF1aXJlTW9kdWxlKCdkb20nKVxuICAgIGRvbS5zY3JvbGxUb0VsZW1lbnQoZWwucmVmLCB7IG9mZnNldDogb2Zmc2V0IH0pXG4gIH1cbn1cblxuLyoqXG4gKiBwZXJmb3JtIHRyYW5zaXRpb24gYW5pbWF0aW9uIG9uIGFuIGVsZW1lbnQgc3BlY2lmaWVkIGJ5IGlkXG4gKiBAcGFyYW0gIHtzdHJpbmd9ICAgaWRcbiAqIEBwYXJhbSAge29iamVjdH0gICBvcHRpb25zXG4gKiBAcGFyYW0gIHtvYmplY3R9ICAgb3B0aW9ucy5zdHlsZXNcbiAqIEBwYXJhbSAge29iamVjdH0gICBvcHRpb25zLmR1cmF0aW9uKG1zKVxuICogQHBhcmFtICB7b2JqZWN0fSAgIFtvcHRpb25zLnRpbWluZ0Z1bmN0aW9uXVxuICogQHBhcmFtICB7b2JqZWN0fSAgIFtvcHRpb25zLmRlbGF5PTAobXMpXVxuICogQHBhcmFtICB7RnVuY3Rpb259IGNhbGxiYWNrXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiAkdHJhbnNpdGlvbiAoaWQsIG9wdGlvbnMsIGNhbGxiYWNrKSB7XG4gIGNvbnN0IGVsID0gdGhpcy4kZWwoaWQpXG4gIGlmIChlbCAmJiBvcHRpb25zICYmIG9wdGlvbnMuc3R5bGVzKSB7XG4gICAgY29uc3QgYW5pbWF0aW9uID0gdGhpcy5fYXBwLnJlcXVpcmVNb2R1bGUoJ2FuaW1hdGlvbicpXG4gICAgYW5pbWF0aW9uLnRyYW5zaXRpb24oZWwucmVmLCBvcHRpb25zLCAoLi4uYXJncykgPT4ge1xuICAgICAgdGhpcy5fc2V0U3R5bGUoZWwsIG9wdGlvbnMuc3R5bGVzKVxuICAgICAgY2FsbGJhY2sgJiYgY2FsbGJhY2soLi4uYXJncylcbiAgICB9KVxuICB9XG59XG5cbi8qKlxuICogZ2V0IHNvbWUgY29uZmlnXG4gKiBAcmV0dXJuIHtvYmplY3R9IHNvbWUgY29uZmlnIGZvciBhcHAgaW5zdGFuY2VcbiAqIEBwcm9wZXJ0eSB7c3RyaW5nfSBidW5kbGVVcmxcbiAqIEBwcm9wZXJ0eSB7Ym9vbGVhbn0gZGVidWdcbiAqIEBwcm9wZXJ0eSB7b2JqZWN0fSBlbnZcbiAqIEBwcm9wZXJ0eSB7c3RyaW5nfSBlbnYud2VleFZlcnNpb24oZXguIDEuMC4wKVxuICogQHByb3BlcnR5IHtzdHJpbmd9IGVudi5hcHBOYW1lKGV4LiBUQi9UTSlcbiAqIEBwcm9wZXJ0eSB7c3RyaW5nfSBlbnYuYXBwVmVyc2lvbihleC4gNS4wLjApXG4gKiBAcHJvcGVydHkge3N0cmluZ30gZW52LnBsYXRmb3JtKGV4LiBpT1MvQW5kcm9pZClcbiAqIEBwcm9wZXJ0eSB7c3RyaW5nfSBlbnYub3NWZXJzaW9uKGV4LiA3LjAuMClcbiAqIEBwcm9wZXJ0eSB7c3RyaW5nfSBlbnYuZGV2aWNlTW9kZWwgKipuYXRpdmUgb25seSoqXG4gKiBAcHJvcGVydHkge251bWJlcn0gZW52LltkZXZpY2VXaWR0aD03NTBdXG4gKiBAcHJvcGVydHkge251bWJlcn0gZW52LmRldmljZUhlaWdodFxuICovXG5leHBvcnQgZnVuY3Rpb24gJGdldENvbmZpZyAoY2FsbGJhY2spIHtcbiAgY29uc3QgY29uZmlnID0gXy5leHRlbmQoe1xuICAgIGVudjogZ2xvYmFsLldYRW52aXJvbm1lbnQgfHwge31cbiAgfSwgdGhpcy5fYXBwLm9wdGlvbnMpXG4gIGlmIChfLnR5cG9mKGNhbGxiYWNrKSA9PT0gJ2Z1bmN0aW9uJykge1xuICAgIF8ud2FybigndGhlIGNhbGxiYWNrIG9mIFZtIyRnZXRDb25maWcoY2FsbGJhY2spIGlzIGRlcHJlY2F0ZWQsICcgK1xuICAgICAgJ3RoaXMgYXBpIG5vdyBjYW4gZGlyZWN0bHkgUkVUVVJOIGNvbmZpZyBpbmZvLicpXG4gICAgY2FsbGJhY2soY29uZmlnKVxuICB9XG4gIHJldHVybiBjb25maWdcbn1cblxuLyoqXG4gKiBAZGVwcmVjYXRlZFxuICogcmVxdWVzdCBuZXR3b3JrIHZpYSBodHRwIHByb3RvY29sXG4gKiBAcGFyYW0gIHtvYmplY3R9ICAgcGFyYW1zXG4gKiBAcGFyYW0gIHtGdW5jdGlvbn0gY2FsbGJhY2tcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uICRzZW5kSHR0cCAocGFyYW1zLCBjYWxsYmFjaykge1xuICBfLndhcm4oJ1ZtIyRzZW5kSHR0cCBpcyBkZXByZWNhdGVkLCAnICtcbiAgICAgICAgICAncGxlYXNlIHVzZSBcInJlcXVpcmUoXFwnQHdlZXgtbW9kdWxlL3N0cmVhbVxcJyknICtcbiAgICAgICAgICAnLnNlbmRIdHRwKHBhcmFtcywgY2FsbGJhY2spXCIgaW5zdGVhZCcpXG4gIGNvbnN0IHN0cmVhbSA9IHRoaXMuX2FwcC5yZXF1aXJlTW9kdWxlKCdzdHJlYW0nKVxuICBzdHJlYW0uc2VuZEh0dHAocGFyYW1zLCBjYWxsYmFjaylcbn1cblxuLyoqXG4gKiBAZGVwcmVjYXRlZFxuICogb3BlbiBhIHVybFxuICogQHBhcmFtICB7c3RyaW5nfSB1cmxcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uICRvcGVuVVJMICh1cmwpIHtcbiAgXy53YXJuKCdWbSMkb3BlblVSTCBpcyBkZXByZWNhdGVkLCAnICtcbiAgICAgICAgICAncGxlYXNlIHVzZSBcInJlcXVpcmUoXFwnQHdlZXgtbW9kdWxlL2V2ZW50XFwnKScgK1xuICAgICAgICAgICcub3BlblVSTCh1cmwpXCIgaW5zdGVhZCcpXG4gIGNvbnN0IGV2ZW50ID0gdGhpcy5fYXBwLnJlcXVpcmVNb2R1bGUoJ2V2ZW50JylcbiAgZXZlbnQub3BlblVSTCh1cmwpXG59XG5cbi8qKlxuICogQGRlcHJlY2F0ZWRcbiAqIHNldCBhIHRpdGxlIGZvciBwYWdlXG4gKiBAcGFyYW0gIHtzdHJpbmd9IHRpdGxlXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiAkc2V0VGl0bGUgKHRpdGxlKSB7XG4gIF8ud2FybignVm0jJHNldFRpdGxlIGlzIGRlcHJlY2F0ZWQsICcgK1xuICAgICAgICAgICdwbGVhc2UgdXNlIFwicmVxdWlyZShcXCdAd2VleC1tb2R1bGUvcGFnZUluZm9cXCcpJyArXG4gICAgICAgICAgJy5zZXRUaXRsZSh0aXRsZSlcIiBpbnN0ZWFkJylcbiAgY29uc3QgcGFnZUluZm8gPSB0aGlzLl9hcHAucmVxdWlyZU1vZHVsZSgncGFnZUluZm8nKVxuICBwYWdlSW5mby5zZXRUaXRsZSh0aXRsZSlcbn1cblxuLyoqXG4gKiBAZGVwcmVjYXRlZCB1c2UgXCJyZXF1aXJlKCdAd2VleC1tb2R1bGUvbW9kdWxlTmFtZScpIGluc3RlYWRcIlxuICogaW52b2tlIGEgbmF0aXZlIG1ldGhvZCBieSBzcGVjaWZpbmcgdGhlIG5hbWUgb2YgbW9kdWxlIGFuZCBtZXRob2RcbiAqIEBwYXJhbSAge3N0cmluZ30gbW9kdWxlTmFtZVxuICogQHBhcmFtICB7c3RyaW5nfSBtZXRob2ROYW1lXG4gKiBAcGFyYW0gIHsuLi4qfSB0aGUgcmVzdCBhcmd1bWVudHNcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uICRjYWxsIChtb2R1bGVOYW1lLCBtZXRob2ROYW1lLCAuLi5hcmdzKSB7XG4gIF8ud2FybignVm0jJGNhbGwgaXMgZGVwcmVjYXRlZCwgJyArXG4gICAgJ3BsZWFzZSB1c2UgXCJyZXF1aXJlKFxcJ0B3ZWV4LW1vZHVsZS9tb2R1bGVOYW1lXFwnKVwiIGluc3RlYWQnKVxuICBjb25zdCBtb2R1bGUgPSB0aGlzLl9hcHAucmVxdWlyZU1vZHVsZShtb2R1bGVOYW1lKVxuICBpZiAobW9kdWxlICYmIG1vZHVsZVttZXRob2ROYW1lXSkge1xuICAgIG1vZHVsZVttZXRob2ROYW1lXSguLi5hcmdzKVxuICB9XG59XG5cblxuXG4vKiogV0VCUEFDSyBGT09URVIgKipcbiAqKiAvVXNlcnMvemhhb2ppbmppYW5nL1NpdGVzL3B1YmxpYy93ZWV4L2h0bWw1L2RlZmF1bHQvYXBpL21ldGhvZHMuanNcbiAqKi8iLCIvKipcbiAqIEBmaWxlT3ZlcnZpZXcgVGhlIGFwaSBmb3IgaW52b2tpbmcgd2l0aCBcIiRcIiBwcmVmaXhcbiAqL1xuXG4vKipcbiAqID09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT1cbiAqIHByaXZhdGUgZm9yIGFsaVxuICogPT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PVxuICovXG5cbi8qKlxuICogaW52b2tlIHVzZXItdHJhY2sgb24gVGFvYmFvIE1vYmxpZVxuICogQHBhcmFtIHtzdHJpbmd9IHR5cGXvvJplbnRlciwgY2xpY2ssIGV4cG9zZVxuICogQHBhcmFtIHtzdHJpbmd9IG5hbWVcbiAqIEBwYXJhbSB7c3RyaW5nfSBjb21OYW1lXG4gKiBAcGFyYW0ge29iamVjdH0gcGFyYW1cbiovXG5leHBvcnQgZnVuY3Rpb24gJHVzZXJUcmFjayh0eXBlLCBuYW1lLCBjb21OYW1lLCBwYXJhbSkge1xuICBjb25zdCB1c2VyVHJhY2sgPSB0aGlzLl9hcHAucmVxdWlyZU1vZHVsZSgndXNlclRyYWNrJylcbiAgdXNlclRyYWNrLmNvbW1pdCh0eXBlLCBuYW1lLCBjb21OYW1lLCBwYXJhbSlcbn1cblxuLyoqXG4gKiByZXF1ZXN0IGEgcmVzdGZ1bGwgYXBpIHZpYSB0aGUgbXRvcCBnYXRld2F5XG4gKiBAcGFyYW0gIHtvYmplY3R9ICAgcGFyYW1zXG4gKiBAcGFyYW0gIHtGdW5jdGlvbn0gY2FsbGJhY2tcbiAqL1xuZXhwb3J0IGZ1bmN0aW9uICRzZW5kTXRvcChwYXJhbXMsIGNhbGxiYWNrKSB7XG4gIC8qIGlzdGFuYnVsIGlnbm9yZSBlbHNlICovXG4gIGlmICh0eXBlb2Ygd2luZG93ID09PSAndW5kZWZpbmVkJykge1xuICAgIC8vIGluIG5hdGl2Ze + 8jHVzZSB3aW5kdmFuZVxuICAgIGNvbnN0IHdpbmR2YW5lID0gdGhpcy5fYXBwLnJlcXVpcmVNb2R1bGUoJ3dpbmR2YW5lJylcbiAgICB3aW5kdmFuZS5jYWxsKHtcbiAgICAgIGNsYXNzOiAnTXRvcFdWUGx1Z2luJyxcbiAgICAgIG1ldGhvZDogJ3NlbmQnLFxuICAgICAgZGF0YTogcGFyYW1zXG4gICAgfSwgY2FsbGJhY2spXG4gIH0gZWxzZSB7XG4gICAgLy8gaW4gd2ViIGJyd29zZXLvvIx1c2Ugc3RyZWFtLnNlbmRNdG9wXG4gICAgY29uc3Qgc3RyZWFtID0gdGhpcy5fYXBwLnJlcXVpcmVNb2R1bGUoJ3N0cmVhbScpXG4gICAgc3RyZWFtLnNlbmRNdG9wKHBhcmFtcywgY2FsbGJhY2spXG4gIH1cbn1cblxuLyoqXG4gKiByZXF1ZXN0IGEgbmF0aXZlIGFwaSB2aWEgd2luZHZhbmUgcHJvdG9jb2xcbiAqIEBwYXJhbSAge29iamVjdH0gICBwYXJhbXNcbiAqIEBwYXJhbSAge0Z1bmN0aW9ufSBjYWxsYmFja1xuICovXG5leHBvcnQgZnVuY3Rpb24gJGNhbGxXaW5kdmFuZShwYXJhbXMsIGNhbGxiYWNrKSB7XG4gIGNvbnN0IHdpbmR2YW5lID0gdGhpcy5fYXBwLnJlcXVpcmVNb2R1bGUoJ3dpbmR2YW5lJylcbiAgd2luZHZhbmUuY2FsbChwYXJhbXMsIGNhbGxiYWNrKVxufVxuXG4vKipcbiAqIHNldCBzcG0gZm9yIHRoZSBwYWdlXG4gKiBAcGFyYW0gIHtzdHJpbmd9IGFcbiAqIEBwYXJhbSAge3N0cmluZ30gYlxuICovXG5leHBvcnQgZnVuY3Rpb24gJHNldFNwbShhLCBiKSB7XG4gIGNvbnN0IHBhZ2VJbmZvID0gdGhpcy5fYXBwLnJlcXVpcmVNb2R1bGUoJ3BhZ2VJbmZvJylcbiAgcGFnZUluZm8uc2V0U3BtKGEsIGIpXG59XG5cbi8qKlxuICogZ2V0IHRoZSBpbmZvcm1hdGlvbiBvZiB0aGUgY3VycmVudCBsb2dpbmVkIHVzZXJcbiAqIEBwYXJhbSAge0Z1bmN0aW9ufSBjYWxsYmFja1xuICovXG5leHBvcnQgZnVuY3Rpb24gJGdldFVzZXJJbmZvKGNhbGxiYWNrKSB7XG4gIGNvbnN0IHVzZXIgPSB0aGlzLl9hcHAucmVxdWlyZU1vZHVsZSgndXNlcicpXG4gIHVzZXIuZ2V0VXNlckluZm8oY2FsbGJhY2spXG59XG5cbi8qKlxuICogcGVyZm9ybSBsb2dpblxuICogQHBhcmFtICB7RnVuY3Rpb259IGNhbGxiYWNrXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiAkbG9naW4oY2FsbGJhY2spIHtcbiAgY29uc3QgdXNlciA9IHRoaXMuX2FwcC5yZXF1aXJlTW9kdWxlKCd1c2VyJylcbiAgdXNlci5sb2dpbihjYWxsYmFjaylcbn1cblxuLyoqXG4gKiBwZXJmb3JtIGxvZ291dFxuICogQHBhcmFtICB7RnVuY3Rpb259IGNhbGxiYWNrXG4gKi9cbmV4cG9ydCBmdW5jdGlvbiAkbG9nb3V0KGNhbGxiYWNrKSB7XG4gIGNvbnN0IHVzZXIgPSB0aGlzLl9hcHAucmVxdWlyZU1vZHVsZSgndXNlcicpXG4gIHVzZXIubG9nb3V0KGNhbGxiYWNrKVxufVxuXG5cblxuXG5cbi8qKiBXRUJQQUNLIEZPT1RFUiAqKlxuICoqIC4vbGliL2FwaS9tZXRob2RzLmpzXG4gKiovIl0sInNvdXJjZVJvb3QiOiIifQ = = <nl> \ No newline at end of file <nl> | * [ android ] update main . js | apache/incubator-weex | 5ab3bbb6a9362efcf8073761c2fd86d7e3d1941a | 2016-07-07T11:47:19Z |
mmm a / test / rql_test / connections / connection . js <nl> ppp b / test / rql_test / connections / connection . js <nl> var assertNoError = function ( err ) { <nl> } <nl> } ; <nl> <nl> - var r = require ( ' . . / . . / . . / drivers / javascript2 / build / rethinkdb ' ) ; <nl> + var r = require ( ' . . / . . / . . / drivers / javascript / build / rethinkdb ' ) ; <nl> <nl> var actions = [ <nl> <nl> mmm a / test / rql_test / connections / connection . py <nl> ppp b / test / rql_test / connections / connection . py <nl> <nl> from subprocess import Popen <nl> from time import sleep <nl> from sys import path <nl> - path . append ( " . . / . . / . . / drivers / python2 " ) <nl> + path . append ( " . . / . . / . . / drivers / python " ) <nl> <nl> import rethinkdb as r <nl> <nl> mmm a / test / rql_test / connections / cursor . js <nl> ppp b / test / rql_test / connections / cursor . js <nl> process . on ( ' uncaughtException ' , function ( err ) { <nl> console . log ( err . toString ( ) + err . stack . toString ( ) ) ; <nl> } ) ; <nl> <nl> - var r = require ( ' . . / . . / . . / drivers / javascript2 / build / rethinkdb ' ) ; <nl> + var r = require ( ' . . / . . / . . / drivers / javascript / build / rethinkdb ' ) ; <nl> <nl> var assertNoError = function ( err ) { <nl> if ( err ) { <nl> mmm a / test / rql_test / connections / cursor . py <nl> ppp b / test / rql_test / connections / cursor . py <nl> <nl> <nl> from os import getenv <nl> from sys import path , argv <nl> - path . append ( " . . / . . / . . / drivers / python2 " ) <nl> + path . append ( " . . / . . / . . / drivers / python " ) <nl> <nl> import rethinkdb as r <nl> <nl> mmm a / test / rql_test / connections / cursor_test . py <nl> ppp b / test / rql_test / connections / cursor_test . py <nl> <nl> from random import randint <nl> from os import putenv <nl> from sys import path <nl> - path . append ( " . . / . . / . . / drivers / python2 " ) <nl> + path . append ( " . . / . . / . . / drivers / python " ) <nl> <nl> import rethinkdb as r <nl> <nl> mmm a / test / rql_test / drivers / driver . js <nl> ppp b / test / rql_test / drivers / driver . js <nl> <nl> - var r = require ( ' . / . . / . . / . . / drivers / javascript2 / build / rethinkdb . js ' ) ; <nl> + var r = require ( ' . / . . / . . / . . / drivers / javascript / build / rethinkdb . js ' ) ; <nl> <nl> var JSPORT = process . argv [ 2 ] <nl> var CPPPORT = process . argv [ 3 ] <nl> mmm a / test / rql_test / drivers / driver . py <nl> ppp b / test / rql_test / drivers / driver . py <nl> <nl> import collections <nl> import types <nl> import re <nl> - path . insert ( 0 , " . . / . . / drivers / python2 " ) <nl> + path . insert ( 0 , " . . / . . / drivers / python " ) <nl> <nl> from os import environ <nl> import rethinkdb as r <nl> | changed driver paths | rethinkdb/rethinkdb | db4b4fba2aeb9c6705290f899246eee714e6f847 | 2013-03-09T05:56:07Z |
mmm a / Source / CNTKv2LibraryDll / Utils . cpp <nl> ppp b / Source / CNTKv2LibraryDll / Utils . cpp <nl> namespace CNTK <nl> <nl> bool metricAggregatorUpdated = false ; <nl> bool anyUpdatesPerformed = false ; <nl> + size_t metricAggregatorIndex = 0 ; <nl> for ( size_t i = 0 ; i < m_learners . size ( ) ; i + + ) <nl> { <nl> auto l = m_learners [ i ] ; <nl> namespace CNTK <nl> { <nl> mbInfoPerLearner [ i ] = minibatch ; <nl> metricAggregatorUpdated = true ; <nl> + metricAggregatorIndex = i ; <nl> } <nl> else <nl> { <nl> namespace CNTK <nl> RuntimeError ( " Update failed : Metric aggregation did not happen , none of the learners was marked as metric aggregator . " ) ; <nl> <nl> / / In a single trainer , the number of samples should be same for each learner . <nl> - / / We use the learner marked as MetricAggregator to set the number of samples . <nl> - for ( size_t i = 0 ; i < m_learners . size ( ) ; i + + ) <nl> - { <nl> - mbInfoPerLearner [ i ] . numberOfSamples = minibatch . numberOfSamples ; <nl> - } <nl> + / / Assign the minibatch to the information from the matrix aggregating learner . <nl> + minibatch = mbInfoPerLearner [ metricAggregatorIndex ] ; <nl> return anyUpdatesPerformed ; <nl> } <nl> <nl> mmm a / bindings / python / cntk / learners / tests / bmuf_metrics_aggregation_test . py <nl> ppp b / bindings / python / cntk / learners / tests / bmuf_metrics_aggregation_test . py <nl> class SimpleBMUFTrainer ( ) : <nl> def __init__ ( self , frame_mode = False ) : <nl> self . create_model ( frame_mode ) <nl> self . create_trainer ( ) <nl> - <nl> + <nl> def create_model ( self , frame_mode = False ) : <nl> if frame_mode : <nl> self . feat = cntk . input_variable ( shape = ( feat_dim , ) ) <nl> def create_model ( self , frame_mode = False ) : <nl> <nl> net = cntk . layers . Sequential ( [ cntk . layers . Dense ( cell_dim ) , cntk . layers . Dense ( label_dim ) ] ) <nl> self . output = net ( self . feat ) <nl> - else : <nl> + else : <nl> # sequence mode <nl> self . feat = cntk . sequence . input_variable ( shape = ( feat_dim , ) ) <nl> self . label = cntk . sequence . input_variable ( ( label_dim , ) ) <nl> def create_model ( self , frame_mode = False ) : <nl> <nl> self . ce = cntk . cross_entropy_with_softmax ( self . output , self . label ) <nl> self . err = cntk . classification_error ( self . output , self . label ) <nl> - <nl> + <nl> def create_trainer ( self ) : <nl> try : <nl> learner = cntk . block_momentum_distributed_learner ( cntk . momentum_sgd ( self . output . parameters , cntk . learning_parameter_schedule ( 0 . 0001 ) , cntk . momentum_as_time_constant_schedule ( 1000 ) ) , <nl> block_size = 1000 , block_learning_rate = 0 . 01 , block_momentum_as_time_constant = 1000 ) <nl> - <nl> + <nl> comm_rank = cntk . distributed . Communicator . rank ( ) <nl> self . trainer = cntk . Trainer ( self . output , ( self . ce , self . err ) , [ learner ] , [ cntk . logging . ProgressPrinter ( freq = progress_freq , tag = " Training " , rank = comm_rank ) ] ) <nl> except RuntimeError : <nl> self . trainer = None <nl> return <nl> - <nl> - def get_minibatch ( bmuf , working_dir , mb_source ) : <nl> + <nl> + def get_minibatch ( bmuf , working_dir , mb_source , num_data_partitions = 1 , partition_index = 0 ) : <nl> from cntk . io import MinibatchSource , CTFDeserializer , StreamDef , StreamDefs <nl> - <nl> + <nl> if mb_source = = " numpy " : <nl> + assert ( num_data_partitions = = 1 ) # numpy option does not support more than one partition in this impl . <nl> + assert ( partition_index = = 0 ) <nl> for i in range ( num_batches ) : <nl> features = [ ] <nl> labels = [ ] <nl> def get_minibatch ( bmuf , working_dir , mb_source ) : <nl> features . append ( x ) <nl> labels . append ( y ) <nl> yield { bmuf . feat : features , bmuf . label : labels } <nl> - <nl> + <nl> if mb_source in ( " ctf_utterance " , " ctf_frame " , " ctf_bptt " ) : <nl> if mb_source = = " ctf_frame " : <nl> # frame mode data without sequence ids . <nl> def get_minibatch ( bmuf , working_dir , mb_source ) : <nl> ctf_file = os . path . join ( working_dir , ' 2seqtest . txt ' ) <nl> with open ( ctf_file , ' w ' ) as f : <nl> f . write ( ctf_data ) <nl> - <nl> + <nl> # ctf_utterance model <nl> frame_mode = False <nl> truncation_length = 0 <nl> - <nl> + <nl> if mb_source = = " ctf_frame " : <nl> frame_mode = True <nl> elif mb_source = = " ctf_bptt " : <nl> truncation_length = 2 <nl> - <nl> + <nl> mbs = MinibatchSource ( CTFDeserializer ( ctf_file , StreamDefs ( <nl> features = StreamDef ( field = ' S0 ' , shape = feat_dim , is_sparse = False ) , <nl> labels = StreamDef ( field = ' S1 ' , shape = label_dim , is_sparse = False ) <nl> ) ) , randomize = False , max_samples = batch_size * num_batches , <nl> frame_mode = frame_mode , truncation_length = truncation_length ) <nl> - <nl> + <nl> for i in range ( num_batches ) : <nl> - minibatch = mbs . next_minibatch ( batch_size , { bmuf . feat : mbs . streams . features , bmuf . label : mbs . streams . labels } ) <nl> + minibatch = mbs . next_minibatch ( <nl> + minibatch_size_in_samples = batch_size , <nl> + input_map = { bmuf . feat : mbs . streams . features , bmuf . label : mbs . streams . labels } , <nl> + num_data_partitions = num_data_partitions , <nl> + partition_index = partition_index <nl> + ) <nl> if not minibatch : <nl> break <nl> yield minibatch <nl> - <nl> + <nl> def mpi_worker ( working_dir , mb_source , gpu ) : <nl> comm_rank = cntk . distributed . Communicator . rank ( ) <nl> np . random . seed ( comm_rank ) <nl> - <nl> + <nl> if gpu : <nl> # test with only one GPU <nl> cntk . try_set_default_device ( cntk . gpu ( 0 ) ) <nl> - <nl> + <nl> frame_mode = ( mb_source = = " ctf_frame " ) <nl> bmuf = SimpleBMUFTrainer ( frame_mode ) <nl> for i , data in enumerate ( get_minibatch ( bmuf , working_dir , mb_source ) ) : <nl> bmuf . trainer . train_minibatch ( data ) <nl> if i % 50 = = 0 : <nl> - bmuf . trainer . summarize_training_progress ( ) <nl> + bmuf . trainer . summarize_training_progress ( ) <nl> + print ( " SAMPLES % d " % ( bmuf . trainer . total_number_of_samples_seen ) ) <nl> <nl> MB_SOURCES = [ " numpy " , " ctf_utterance " , " ctf_frame " , " ctf_bptt " ] <nl> # MB_SOURCES = [ " numpy " ] <nl> def mpi_worker ( working_dir , mb_source , gpu ) : <nl> def test_bmuf_correct_metrics_averaging ( tmpdir , device_id , mb_source ) : <nl> if platform . system ( ) = = ' Linux ' : <nl> pytest . skip ( ' test only runs on Windows due to mpiexec - l option ' ) <nl> - <nl> + <nl> # check whether trainer can be initialized or not <nl> bmuf = SimpleBMUFTrainer ( ) <nl> if not bmuf . trainer : <nl> pytest . skip ( ' BMUF not available on this build ' ) <nl> - <nl> + <nl> launch_args = [ ] <nl> if device_id > = 0 : <nl> launch_args + = [ ' - - gpu ' ] <nl> - <nl> + <nl> launch_args + = [ " - - outputdir " , str ( tmpdir ) ] <nl> launch_args + = [ " - - mb_source " , mb_source ] <nl> - <nl> + <nl> ret_str = mpiexec_execute ( __file__ , [ ' - n ' , str ( NUM_WORKERS ) , ' - l ' ] , launch_args ) <nl> # print ( ret_str ) <nl> - <nl> + <nl> # [ 0 ] Finished Epoch [ 1 ] : [ Training ] loss = 1 . 663636 * 10 , metric = 52 . 40 % * 10 0 . 890s ( 11 . 2 samples / s ) ; <nl> regex_pattern = r " \ [ ( ? P < worker_rank > \ d ) \ ] . * ? Epoch \ [ ( ? P < epoch > \ d + ) \ ] . * ? loss = ( ? P < loss > \ d + \ . \ d + ) . * ? metric = ( ? P < metric > \ d + \ . \ d + ) " <nl> loss_perepoch_perworker = { i : { } for i in range ( NUM_WORKERS ) } <nl> def test_bmuf_correct_metrics_averaging ( tmpdir , device_id , mb_source ) : <nl> loss = match . groupdict ( ) [ " loss " ] <nl> metric = match . groupdict ( ) [ " metric " ] <nl> loss_perepoch_perworker [ rank ] . update ( { epoch : ( loss , metric ) } ) <nl> - <nl> + <nl> num_epochs_per_worker = list ( map ( len , loss_perepoch_perworker . values ( ) ) ) <nl> - <nl> + <nl> # assert that data exists <nl> assert len ( num_epochs_per_worker ) ! = 0 <nl> - <nl> + <nl> # assert that number of epochs isn ' t zero for 1st worker . <nl> assert num_epochs_per_worker [ 0 ] ! = 0 <nl> - <nl> + <nl> # assert all workers have same number of epochs <nl> assert min ( num_epochs_per_worker ) = = max ( num_epochs_per_worker ) <nl> - <nl> + <nl> # assert all workers have same loss and metric values <nl> loss_per_worker = loss_perepoch_perworker . values ( ) <nl> loss_per_worker_epochsort = [ ] <nl> for epoch_losses in loss_per_worker : <nl> loss_per_worker_epochsort . append ( [ epoch_losses [ i ] for i in sorted ( epoch_losses ) ] ) <nl> - <nl> + <nl> assert all ( [ loss_per_worker_epochsort [ 0 ] = = i for i in loss_per_worker_epochsort ] ) <nl> - <nl> <nl> if __name__ = = " __main__ " : <nl> parser = argparse . ArgumentParser ( ) <nl> def test_bmuf_correct_metrics_averaging ( tmpdir , device_id , mb_source ) : <nl> parser . add_argument ( ' - mb_source ' , ' - - mb_source ' ) <nl> parser . add_argument ( ' - gpu ' , ' - - gpu ' , action = ' store_true ' ) <nl> args = vars ( parser . parse_args ( ) ) <nl> - <nl> + <nl> mpi_worker ( args [ " outputdir " ] , args [ " mb_source " ] , args [ " gpu " ] ) <nl> cntk . distributed . Communicator . finalize ( ) <nl> mmm a / bindings / python / cntk / learners / tests / distributed_multi_learner_test . py <nl> ppp b / bindings / python / cntk / learners / tests / distributed_multi_learner_test . py <nl> <nl> import cntk <nl> import numpy as np <nl> import sys , os <nl> - sys . path . append ( os . path . dirname ( __file__ ) ) <nl> - from bmuf_metrics_aggregation_test import SimpleBMUFTrainer <nl> - from bmuf_metrics_aggregation_test import get_minibatch <nl> - from bmuf_metrics_aggregation_test import mpi_worker <nl> - from distributed_learner_test import mpiexec_execute <nl> - <nl> import argparse <nl> import re <nl> import platform <nl> - <nl> + sys . path . append ( os . path . dirname ( __file__ ) ) <nl> cntk . cntk_py . set_fixed_random_seed ( 1 ) <nl> - # cntk . logging . set_trace_level ( cntk . logging . TraceLevel . Info ) <nl> + from distributed_learner_test import mpiexec_execute <nl> + from bmuf_metrics_aggregation_test import get_minibatch <nl> <nl> feat_dim = 5 <nl> label_dim = 3 <nl> cell_dim = 5 <nl> seq_len = 20 <nl> num_batches = 101 <nl> - batch_size = 10 <nl> progress_freq = 10 <nl> - NUM_WORKERS = 4 <nl> <nl> - class MultiLearnerMUFTrainer ( SimpleBMUFTrainer ) : <nl> + class SingleDataParallelTrainer ( ) : <nl> + def __init__ ( self , frame_mode = False ) : <nl> + self . create_model ( frame_mode ) <nl> + self . create_trainer ( ) <nl> + <nl> + def create_model ( self , frame_mode = False ) : <nl> + if frame_mode : <nl> + self . feat = cntk . input_variable ( shape = ( feat_dim , ) ) <nl> + self . label = cntk . input_variable ( ( label_dim , ) ) <nl> + <nl> + net = cntk . layers . Sequential ( [ cntk . layers . Dense ( cell_dim ) , cntk . layers . Dense ( label_dim ) ] ) <nl> + self . output = net ( self . feat ) <nl> + else : <nl> + # sequence mode <nl> + self . feat = cntk . sequence . input_variable ( shape = ( feat_dim , ) ) <nl> + self . label = cntk . sequence . input_variable ( ( label_dim , ) ) <nl> + <nl> + net = cntk . layers . Sequential ( [ cntk . layers . Recurrence ( cntk . layers . LSTM ( shape = label_dim , cell_shape = ( cell_dim , ) ) ) ] ) <nl> + self . output = net ( self . feat ) <nl> + <nl> + self . ce = cntk . cross_entropy_with_softmax ( self . output , self . label ) <nl> + self . err = cntk . classification_error ( self . output , self . label ) <nl> + <nl> + def create_trainer ( self ) : <nl> + try : <nl> + lr_per_sample = cntk . learning_parameter_schedule_per_sample ( 0 . 007 ) <nl> + learner = cntk . data_parallel_distributed_learner ( cntk . sgd ( self . output . parameters , lr_per_sample ) ) <nl> + <nl> + comm_rank = cntk . distributed . Communicator . rank ( ) <nl> + self . trainer = cntk . Trainer ( self . output , ( self . ce , self . err ) , [ learner ] , [ cntk . logging . ProgressPrinter ( freq = progress_freq , tag = " Training " , rank = comm_rank ) ] ) <nl> + except RuntimeError : <nl> + self . trainer = None <nl> + return <nl> + <nl> + class TwoDataParallelTrainer ( SingleDataParallelTrainer ) : <nl> def __init__ ( self , frame_mode = False ) : <nl> - SimpleBMUFTrainer . __init__ ( self , frame_mode ) <nl> + SingleDataParallelTrainer . __init__ ( self , frame_mode ) <nl> + <nl> + def create_trainer ( self ) : <nl> + try : <nl> + lr_per_sample = cntk . learning_parameter_schedule_per_sample ( 0 . 007 ) <nl> + p = self . output . parameters <nl> + # Three of four parameters are learned by first data_parallel_distributed_learner . <nl> + learner1 = cntk . data_parallel_distributed_learner ( cntk . sgd ( [ p [ 0 ] , p [ 1 ] , p [ 2 ] ] , lr_per_sample ) ) <nl> + <nl> + # New API to mark which learner is to use for metric aggregaion . <nl> + learner1 . set_as_metric_aggregator ( ) <nl> + <nl> + # The last parameter is learned by another data_parallel_distributed_learner . <nl> + learner2 = cntk . data_parallel_distributed_learner ( cntk . sgd ( [ p [ 3 ] ] , lr_per_sample ) ) <nl> + <nl> + comm_rank = cntk . distributed . Communicator . rank ( ) <nl> + self . trainer = cntk . Trainer ( self . output , ( self . ce , self . err ) , [ learner1 , learner2 ] , [ cntk . logging . ProgressPrinter ( freq = progress_freq , tag = " Training " , rank = comm_rank ) ] ) <nl> + except RuntimeError : <nl> + self . trainer = None <nl> + return <nl> + <nl> + class MultiLearnerTrainer ( SingleDataParallelTrainer ) : <nl> + def __init__ ( self , frame_mode = False ) : <nl> + SingleDataParallelTrainer . __init__ ( self , frame_mode ) <nl> <nl> def create_trainer ( self ) : <nl> try : <nl> def create_trainer ( self ) : <nl> self . trainer = None <nl> return <nl> <nl> - <nl> - def mpi_worker_multi_learner ( working_dir , checkpoint_dir , mb_source , gpu ) : <nl> + def mpi_worker_multi_learner ( trainer , working_dir , checkpoint_dir , mb_source , gpu ) : <nl> comm_rank = cntk . distributed . Communicator . rank ( ) <nl> np . random . seed ( comm_rank ) <nl> - <nl> + <nl> if gpu : <nl> # test with only one GPU <nl> cntk . try_set_default_device ( cntk . gpu ( 0 ) ) <nl> <nl> - frame_mode = ( mb_source = = " ctf_frame " ) <nl> - bmuf = MultiLearnerMUFTrainer ( frame_mode ) <nl> - <nl> + num_paritions = cntk . Communicator . num_workers ( ) ; <nl> + partition_index = cntk . Communicator . rank ( ) ; <nl> checkpoint_performed = False <nl> - for i , data in enumerate ( get_minibatch ( bmuf , working_dir , mb_source ) ) : <nl> - bmuf . trainer . train_minibatch ( data ) <nl> + for i , data in enumerate ( get_minibatch ( trainer , working_dir , mb_source , num_paritions , partition_index ) ) : <nl> + trainer . trainer . train_minibatch ( data ) <nl> if i % 50 = = 0 : <nl> - bmuf . trainer . summarize_training_progress ( ) <nl> + trainer . trainer . summarize_training_progress ( ) <nl> if not checkpoint_performed and not checkpoint_dir = = " " : <nl> - bmuf . trainer . save_checkpoint ( checkpoint_dir ) <nl> - bmuf . trainer . restore_from_checkpoint ( checkpoint_dir ) <nl> + trainer . trainer . save_checkpoint ( checkpoint_dir ) <nl> + trainer . trainer . restore_from_checkpoint ( checkpoint_dir ) <nl> checkpoint_performed = True <nl> <nl> + def get_loss_perepoch_perworker ( log_line , num_workers ) : <nl> + # [ 0 ] Finished Epoch [ 1 ] : [ Training ] loss = 1 . 663636 * 10 , metric = 52 . 40 % * 10 0 . 890s ( 11 . 2 samples / s ) ; <nl> + regex_pattern = r " \ [ ( ? P < worker_rank > \ d ) \ ] . * ? Epoch \ [ ( ? P < epoch > \ d + ) \ ] . * ? loss = ( ? P < loss > \ d + \ . \ d + ) \ * ( ? P < samples > \ d + ) . * ? metric = ( ? P < metric > \ d + \ . \ d + ) " <nl> + loss_perepoch_perworker = { i : { } for i in range ( num_workers ) } <nl> + for match in re . finditer ( regex_pattern , log_line ) : <nl> + rank = int ( match . groupdict ( ) [ " worker_rank " ] ) <nl> + epoch = int ( match . groupdict ( ) [ " epoch " ] ) <nl> + loss = match . groupdict ( ) [ " loss " ] <nl> + metric = match . groupdict ( ) [ " metric " ] <nl> + samples = int ( match . groupdict ( ) [ " samples " ] ) <nl> + loss_perepoch_perworker [ rank ] . update ( { epoch : ( loss , metric , samples ) } ) <nl> + return loss_perepoch_perworker <nl> + <nl> + MB_SOURCES = [ " ctf_frame " ] <nl> + @ pytest . mark . parametrize ( " mb_source " , MB_SOURCES ) <nl> + def test_single_data_parallel_learner_vs_two_data_parallel_learners ( tmpdir , device_id , mb_source ) : <nl> + if platform . system ( ) = = ' Linux ' : <nl> + pytest . skip ( ' test only runs on Windows due to mpiexec - l option ' ) <nl> + <nl> + launch_args = [ ] <nl> + if device_id > = 0 : <nl> + launch_args + = [ ' - - gpu ' ] <nl> + <nl> + launch_args + = [ " - - outputdir " , str ( tmpdir ) ] <nl> + launch_args + = [ " - - mb_source " , mb_source ] <nl> + launch_args + = [ " - - trainer_type " , " single " ] <nl> + <nl> + num_workers = 1 # use a single worker . <nl> + ret_str = mpiexec_execute ( __file__ , [ ' - n ' , str ( num_workers ) , ' - l ' ] , launch_args ) <nl> + print ( ret_str ) <nl> + loss_perepoch_perworker = get_loss_perepoch_perworker ( ret_str , num_workers ) <nl> + <nl> + loss_per_worker = loss_perepoch_perworker . values ( ) <nl> + single_learner_loss_per_worker_epochsort = [ ] <nl> + for epoch_losses in loss_per_worker : <nl> + single_learner_loss_per_worker_epochsort . append ( [ epoch_losses [ i ] for i in sorted ( epoch_losses ) ] ) <nl> + <nl> + # We don ' t add the - - gpu argument , <nl> + # because it is already set during the previous mpi call . <nl> + launch_args + = [ " - - outputdir " , str ( tmpdir ) ] <nl> + launch_args + = [ " - - mb_source " , mb_source ] <nl> + launch_args + = [ " - - trainer_type " , " two " ] <nl> + <nl> + num_workers = 2 # now run in distributed workers . <nl> + ret_str = mpiexec_execute ( __file__ , [ ' - n ' , str ( num_workers ) , ' - l ' ] , launch_args ) <nl> + print ( ret_str ) <nl> + loss_perepoch_perworker = get_loss_perepoch_perworker ( ret_str , num_workers ) <nl> + <nl> + loss_per_worker = loss_perepoch_perworker . values ( ) <nl> + multi_learner_loss_per_worker_epochsort = [ ] <nl> + for epoch_losses in loss_per_worker : <nl> + multi_learner_loss_per_worker_epochsort . append ( [ epoch_losses [ i ] for i in sorted ( epoch_losses ) ] ) <nl> <nl> - # MB_SOURCES = [ " numpy " ] <nl> - MB_SOURCES = [ " ctf_utterance " ] <nl> + assert all ( [ single_learner_loss_per_worker_epochsort [ 0 ] = = i for i in multi_learner_loss_per_worker_epochsort ] ) <nl> + <nl> + MB_SOURCES = [ " ctf_frame " ] <nl> @ pytest . mark . parametrize ( " mb_source " , MB_SOURCES ) <nl> def test_multi_learner_bmuf_correct_metrics_averaging ( tmpdir , device_id , mb_source ) : <nl> if platform . system ( ) = = ' Linux ' : <nl> pytest . skip ( ' test only runs on Windows due to mpiexec - l option ' ) <nl> <nl> + num_workers = 2 <nl> # check whether trainer can be initialized or not <nl> - bmuf = MultiLearnerMUFTrainer ( ) <nl> + bmuf = MultiLearnerTrainer ( ) <nl> if not bmuf . trainer : <nl> pytest . skip ( ' BMUF not available on this build ' ) <nl> <nl> def test_multi_learner_bmuf_correct_metrics_averaging ( tmpdir , device_id , mb_sour <nl> <nl> launch_args + = [ " - - outputdir " , str ( tmpdir ) ] <nl> launch_args + = [ " - - mb_source " , mb_source ] <nl> + launch_args + = [ " - - trainer_type " , " multi " ] <nl> <nl> - ret_str = mpiexec_execute ( __file__ , [ ' - n ' , str ( NUM_WORKERS ) , ' - l ' ] , launch_args ) <nl> - # print ( ret_str ) <nl> - <nl> - # [ 0 ] Finished Epoch [ 1 ] : [ Training ] loss = 1 . 663636 * 10 , metric = 52 . 40 % * 10 0 . 890s ( 11 . 2 samples / s ) ; <nl> - regex_pattern = r " \ [ ( ? P < worker_rank > \ d ) \ ] . * ? Epoch \ [ ( ? P < epoch > \ d + ) \ ] . * ? loss = ( ? P < loss > \ d + \ . \ d + ) . * ? metric = ( ? P < metric > \ d + \ . \ d + ) " <nl> - loss_perepoch_perworker = { i : { } for i in range ( NUM_WORKERS ) } <nl> - for match in re . finditer ( regex_pattern , ret_str ) : <nl> - rank = int ( match . groupdict ( ) [ " worker_rank " ] ) <nl> - epoch = int ( match . groupdict ( ) [ " epoch " ] ) <nl> - loss = match . groupdict ( ) [ " loss " ] <nl> - metric = match . groupdict ( ) [ " metric " ] <nl> - loss_perepoch_perworker [ rank ] . update ( { epoch : ( loss , metric ) } ) <nl> + ret_str = mpiexec_execute ( __file__ , [ ' - n ' , str ( num_workers ) , ' - l ' ] , launch_args ) <nl> + print ( ret_str ) <nl> + loss_perepoch_perworker = get_loss_perepoch_perworker ( ret_str , num_workers ) <nl> <nl> num_epochs_per_worker = list ( map ( len , loss_perepoch_perworker . values ( ) ) ) <nl> <nl> def test_multi_learner_bmuf_correct_metrics_averaging ( tmpdir , device_id , mb_sour <nl> <nl> # assert that number of epochs isn ' t zero for 1st worker . <nl> assert num_epochs_per_worker [ 0 ] ! = 0 <nl> - <nl> + <nl> # assert all workers have same number of epochs <nl> assert min ( num_epochs_per_worker ) = = max ( num_epochs_per_worker ) <nl> - <nl> + <nl> # assert all workers have same loss and metric values <nl> loss_per_worker = loss_perepoch_perworker . values ( ) <nl> loss_per_worker_epochsort = [ ] <nl> for epoch_losses in loss_per_worker : <nl> loss_per_worker_epochsort . append ( [ epoch_losses [ i ] for i in sorted ( epoch_losses ) ] ) <nl> - <nl> + <nl> assert all ( [ loss_per_worker_epochsort [ 0 ] = = i for i in loss_per_worker_epochsort ] ) <nl> <nl> # Do the same test with checkpoint and compare the results . <nl> launch_args + = [ " - - checkpointdir " , str ( tmpdir . join ( ' checkpoint ' ) ) ] <nl> <nl> - ret_str = mpiexec_execute ( __file__ , [ ' - n ' , str ( NUM_WORKERS ) , ' - l ' ] , launch_args ) <nl> - # print ( ret_str ) <nl> + ret_str = mpiexec_execute ( __file__ , [ ' - n ' , str ( num_workers ) , ' - l ' ] , launch_args ) <nl> + print ( ret_str ) <nl> <nl> - loss_perepoch_perworker = { i : { } for i in range ( NUM_WORKERS ) } <nl> - for match in re . finditer ( regex_pattern , ret_str ) : <nl> - rank = int ( match . groupdict ( ) [ " worker_rank " ] ) <nl> - epoch = int ( match . groupdict ( ) [ " epoch " ] ) <nl> - loss = match . groupdict ( ) [ " loss " ] <nl> - metric = match . groupdict ( ) [ " metric " ] <nl> - loss_perepoch_perworker [ rank ] . update ( { epoch : ( loss , metric ) } ) <nl> + loss_perepoch_perworker = get_loss_perepoch_perworker ( ret_str , num_workers ) <nl> <nl> num_epochs_per_worker = list ( map ( len , loss_perepoch_perworker . values ( ) ) ) <nl> <nl> def test_multi_learner_bmuf_correct_metrics_averaging ( tmpdir , device_id , mb_sour <nl> for epoch_losses in loss_per_worker : <nl> multi_learner_loss_per_worker_epochsort . append ( [ epoch_losses [ i ] for i in sorted ( epoch_losses ) ] ) <nl> <nl> - # Compare no checkpoint loss values to checkpoint loss values . <nl> - assert all ( [ loss_per_worker_epochsort [ 0 ] = = i for i in multi_learner_loss_per_worker_epochsort ] ) <nl> - <nl> + # Compare no checkpoint loss , matric , and num samples , to checkpoint loss values . <nl> + for i in multi_learner_loss_per_worker_epochsort : <nl> + for n in range ( 3 ) : <nl> + for m in range ( 3 ) : <nl> + assert np . allclose ( float ( loss_per_worker_epochsort [ 0 ] [ n ] [ m ] ) , float ( i [ n ] [ m ] ) ) <nl> <nl> if __name__ = = " __main__ " : <nl> parser = argparse . ArgumentParser ( ) <nl> def test_multi_learner_bmuf_correct_metrics_averaging ( tmpdir , device_id , mb_sour <nl> parser . add_argument ( ' - checkpointdir ' , ' - - checkpointdir ' ) <nl> parser . add_argument ( ' - mb_source ' , ' - - mb_source ' ) <nl> parser . add_argument ( ' - gpu ' , ' - - gpu ' , action = ' store_true ' ) <nl> + parser . add_argument ( " - trainer_type " , " - - trainer_type " ) <nl> args = vars ( parser . parse_args ( ) ) <nl> <nl> - if args [ " checkpointdir " ] : <nl> - mpi_worker_multi_learner ( args [ " outputdir " ] , args [ " checkpointdir " ] , args [ " mb_source " ] , args [ " gpu " ] ) <nl> - else : <nl> - mpi_worker_multi_learner ( args [ " outputdir " ] , " " , args [ " mb_source " ] , args [ " gpu " ] ) <nl> + frame_mode = ( args [ " mb_source " ] = = " ctf_frame " ) <nl> + <nl> + if args [ " trainer_type " ] = = " multi " : <nl> + trainer = MultiLearnerTrainer ( frame_mode ) <nl> + if args [ " checkpointdir " ] : <nl> + <nl> + mpi_worker_multi_learner ( trainer , args [ " outputdir " ] , args [ " checkpointdir " ] , args [ " mb_source " ] , args [ " gpu " ] ) <nl> + else : <nl> + mpi_worker_multi_learner ( trainer , args [ " outputdir " ] , " " , args [ " mb_source " ] , args [ " gpu " ] ) <nl> + <nl> + elif args [ " trainer_type " ] = = " two " : <nl> + trainer = TwoDataParallelTrainer ( frame_mode ) <nl> + mpi_worker_multi_learner ( trainer , args [ " outputdir " ] , " " , args [ " mb_source " ] , args [ " gpu " ] ) <nl> + <nl> + elif args [ " trainer_type " ] = = " single " : <nl> + print ( " Coming to a single learner " ) <nl> + trainer = SingleDataParallelTrainer ( frame_mode ) <nl> + mpi_worker_multi_learner ( trainer , args [ " outputdir " ] , " " , args [ " mb_source " ] , args [ " gpu " ] ) <nl> + <nl> cntk . distributed . Communicator . finalize ( ) <nl> | Fixing the distributed learner sample count issue | microsoft/CNTK | 82b56d121f6631c68612a60bee1c1124b7f3104a | 2018-03-12T08:00:49Z |
mmm a / tensorflow / python / keras / layers / preprocessing / text_vectorization . py <nl> ppp b / tensorflow / python / keras / layers / preprocessing / text_vectorization . py <nl> def _preprocess ( self , inputs ) : <nl> return inputs <nl> <nl> def call ( self , inputs ) : <nl> + if inputs . shape . rank = = 1 : <nl> + inputs = array_ops . expand_dims ( inputs , axis = - 1 ) <nl> + <nl> self . _called = True <nl> inputs = self . _preprocess ( inputs ) <nl> <nl> mmm a / tensorflow / python / keras / layers / preprocessing / text_vectorization_test . py <nl> ppp b / tensorflow / python / keras / layers / preprocessing / text_vectorization_test . py <nl> def test_tfidf_appending_with_oov_replacement ( self ) : <nl> output_dataset = model . predict ( input_array ) <nl> self . assertAllClose ( expected_output , output_dataset ) <nl> <nl> + def test_accept_1D_input ( self ) : <nl> + input_array = np . array ( [ " earth wind and fire " , <nl> + " fire and earth michigan " ] ) <nl> + layer = get_layer_class ( ) ( <nl> + standardize = None , <nl> + split = None , <nl> + output_mode = " int " ) <nl> + layer . adapt ( input_array ) <nl> + _ = layer ( input_array ) <nl> + <nl> <nl> @ keras_parameterized . run_all_keras_modes <nl> class TextVectorizationModelBuildingTest ( <nl> | Enable TextVectorization to be used with 1D batches of strings ( so users don ' t | tensorflow/tensorflow | e449843cc3d3d27e1071542a2cd955a7e3b05b72 | 2020-04-20T16:11:33Z |
mmm a / File . lua <nl> ppp b / File . lua <nl> function torch . load ( filename , mode ) <nl> return object <nl> end <nl> <nl> + - - simple helpers to serialize / deserialize arbitrary objects / tables <nl> + function torch . serialize ( object ) <nl> + local f = torch . MemoryFile ( ) <nl> + f : writeObject ( object ) <nl> + local s = f : storage ( ) : string ( ) <nl> + f : close ( ) <nl> + return s <nl> + end <nl> + <nl> + function torch . deserialize ( str ) <nl> + local x = torch . CharStorage ( ) : string ( str ) <nl> + local tx = torch . CharTensor ( x ) <nl> + local xp = torch . CharStorage ( x : size ( 1 ) + 1 ) <nl> + local txp = torch . CharTensor ( xp ) <nl> + txp : narrow ( 1 , 1 , tx : size ( 1 ) ) : copy ( tx ) <nl> + txp [ tx : size ( 1 ) + 1 ] = 0 <nl> + local f = torch . MemoryFile ( xp ) <nl> + local object = f : readObject ( ) <nl> + f : close ( ) <nl> + return object <nl> + end <nl> + <nl> - - public API ( saveobj / loadobj are safe for global import ) <nl> torch . saveobj = torch . save <nl> torch . loadobj = torch . load <nl> | Added standard Serialization functions ( torch . [ de ] serialize ( ) ) | pytorch/pytorch | 3dbc05d8efc2c872fab6b506ffff52dff5bc13b1 | 2012-09-23T03:50:37Z |
mmm a / Jenkinsfile <nl> ppp b / Jenkinsfile <nl> def buildPlatformCmake ( buildName , conf , nodeReq , dockerTarget ) { <nl> } <nl> def test_suite = conf [ " withGpu " ] ? ( conf [ " multiGpu " ] ? " mgpu " : " gpu " ) : " cpu " <nl> / / Build node - this is returned result <nl> - retry ( 3 ) { <nl> + retry ( 1 ) { <nl> node ( nodeReq ) { <nl> unstash name : ' srcs ' <nl> echo " " " <nl> mmm a / Jenkinsfile - restricted <nl> ppp b / Jenkinsfile - restricted <nl> pipeline { <nl> stage ( ' Jenkins : Build doc ' ) { <nl> steps { <nl> script { <nl> - retry ( 3 ) { <nl> + retry ( 1 ) { <nl> node ( ' linux & & cpu & & restricted ' ) { <nl> unstash name : ' srcs ' <nl> echo ' Building doc . . . ' <nl> def buildPlatformCmake ( buildName , conf , nodeReq , dockerTarget ) { <nl> dockerArgs = " - - build - arg CUDA_VERSION = " + conf [ " cudaVersion " ] <nl> } <nl> / / Build node - this is returned result <nl> - retry ( 3 ) { <nl> + retry ( 1 ) { <nl> node ( nodeReq ) { <nl> unstash name : ' srcs ' <nl> echo " " " <nl> | Disable retries in Jenkins CI , since we ' re now using On - Demand instances instead of Spot ( ) | dmlc/xgboost | c5f92df4754b91c6c5871621d3ecd5f3a704df45 | 2018-11-28T22:57:09Z |
mmm a / plugins / net_plugin / net_plugin . cpp <nl> ppp b / plugins / net_plugin / net_plugin . cpp <nl> namespace eosio { <nl> close ( ) ; <nl> return false ; <nl> } else { <nl> + fc_dlog ( logger , " connected to $ { peer } " , ( " peer " , peer_name ( ) ) ) ; <nl> socket_open = true ; <nl> start_read_message ( ) ; <nl> return true ; <nl> namespace eosio { <nl> * Used to trigger a new connection from RPC API <nl> * / <nl> string net_plugin : : connect ( const string & host ) { <nl> + std : : unique_lock < std : : shared_timed_mutex > g ( my - > connections_mtx ) ; <nl> if ( my - > find_connection ( host ) ) <nl> return " already connected " ; <nl> <nl> connection_ptr c = std : : make_shared < connection > ( host ) ; <nl> - fc_dlog ( logger , " calling active connector " ) ; <nl> + fc_dlog ( logger , " calling active connector : $ { h } " , ( " h " , host ) ) ; <nl> if ( c - > resolve_and_connect ( ) ) { <nl> - fc_dlog ( logger , " adding new connection to the list " ) ; <nl> - std : : unique_lock < std : : shared_timed_mutex > g ( my - > connections_mtx ) ; <nl> + fc_dlog ( logger , " adding new connection to the list : $ { c } " , ( " c " , c - > peer_name ( ) ) ) ; <nl> my - > connections . insert ( c ) ; <nl> } <nl> return " added connection " ; <nl> namespace eosio { <nl> } <nl> <nl> optional < connection_status > net_plugin : : status ( const string & host ) const { <nl> + std : : shared_lock < std : : shared_timed_mutex > g ( my - > connections_mtx ) ; <nl> auto con = my - > find_connection ( host ) ; <nl> if ( con ) <nl> return con - > get_status ( ) ; <nl> namespace eosio { <nl> } <nl> return result ; <nl> } <nl> - connection_ptr net_plugin_impl : : find_connection ( const string & host ) const { <nl> - std : : shared_lock < std : : shared_timed_mutex > g ( connections_mtx ) ; <nl> + <nl> + / / call with connections_mtx <nl> + connection_ptr net_plugin_impl : : find_connection ( const string & host ) const { <nl> for ( const auto & c : connections ) <nl> if ( c - > peer_address ( ) = = host ) return c ; <nl> return connection_ptr ( ) ; <nl> | Fix connect race condition | EOSIO/eos | 93276047dbfe92a8cd783f7f0d6fc90520c827c5 | 2019-04-03T19:20:39Z |
mmm a / src / api . cc <nl> ppp b / src / api . cc <nl> namespace v8 { <nl> PREPARE_FOR_EXECUTION_WITH_CONTEXT ( context , class_name , function_name , \ <nl> false , i : : HandleScope , false ) <nl> <nl> + # define ENTER_V8_FOR_NEW_CONTEXT ( isolate ) \ <nl> + i : : VMState < v8 : : OTHER > __state__ ( ( isolate ) ) ; \ <nl> + i : : DisallowExceptions __no_exceptions__ ( ( isolate ) ) <nl> + <nl> # define EXCEPTION_BAILOUT_CHECK_SCOPED ( isolate , value ) \ <nl> do { \ <nl> if ( has_pending_exception ) { \ <nl> static i : : Handle < ObjectType > CreateEnvironment ( <nl> v8 : : DeserializeInternalFieldsCallback internal_fields_deserializer ) { <nl> i : : Handle < ObjectType > result ; <nl> <nl> - / / Enter V8 via an ENTER_V8 scope . <nl> { <nl> - ENTER_V8 ( isolate ) ; <nl> + ENTER_V8_FOR_NEW_CONTEXT ( isolate ) ; <nl> v8 : : Local < ObjectTemplate > proxy_template ; <nl> i : : Handle < i : : FunctionTemplateInfo > proxy_constructor ; <nl> i : : Handle < i : : FunctionTemplateInfo > global_constructor ; <nl> + i : : Handle < i : : Object > named_interceptor ( <nl> + isolate - > factory ( ) - > undefined_value ( ) ) ; <nl> + i : : Handle < i : : Object > indexed_interceptor ( <nl> + isolate - > factory ( ) - > undefined_value ( ) ) ; <nl> <nl> if ( ! maybe_global_template . IsEmpty ( ) ) { <nl> v8 : : Local < v8 : : ObjectTemplate > global_template = <nl> static i : : Handle < ObjectType > CreateEnvironment ( <nl> global_constructor - > set_access_check_info ( <nl> isolate - > heap ( ) - > undefined_value ( ) ) ; <nl> } <nl> + <nl> + / / Same for other interceptors . If the global constructor has <nl> + / / interceptors , we need to replace them temporarily with noop <nl> + / / interceptors , so the map is correctly marked as having interceptors , <nl> + / / but we don ' t invoke any . <nl> + if ( ! global_constructor - > named_property_handler ( ) - > IsUndefined ( isolate ) ) { <nl> + named_interceptor = <nl> + handle ( global_constructor - > named_property_handler ( ) , isolate ) ; <nl> + global_constructor - > set_named_property_handler ( <nl> + isolate - > heap ( ) - > noop_interceptor_info ( ) ) ; <nl> + } <nl> + if ( ! global_constructor - > indexed_property_handler ( ) - > IsUndefined ( <nl> + isolate ) ) { <nl> + indexed_interceptor = <nl> + handle ( global_constructor - > indexed_property_handler ( ) , isolate ) ; <nl> + global_constructor - > set_indexed_property_handler ( <nl> + isolate - > heap ( ) - > noop_interceptor_info ( ) ) ; <nl> + } <nl> } <nl> <nl> i : : MaybeHandle < i : : JSGlobalProxy > maybe_proxy ; <nl> static i : : Handle < ObjectType > CreateEnvironment ( <nl> invoke . Invoke ( isolate , maybe_proxy , proxy_template , extensions , <nl> context_snapshot_index , internal_fields_deserializer ) ; <nl> <nl> - / / Restore the access check info on the global template . <nl> + / / Restore the access check info and interceptors on the global template . <nl> if ( ! maybe_global_template . IsEmpty ( ) ) { <nl> DCHECK ( ! global_constructor . is_null ( ) ) ; <nl> DCHECK ( ! proxy_constructor . is_null ( ) ) ; <nl> static i : : Handle < ObjectType > CreateEnvironment ( <nl> proxy_constructor - > access_check_info ( ) ) ; <nl> global_constructor - > set_needs_access_check ( <nl> proxy_constructor - > needs_access_check ( ) ) ; <nl> + global_constructor - > set_named_property_handler ( * named_interceptor ) ; <nl> + global_constructor - > set_indexed_property_handler ( * indexed_interceptor ) ; <nl> } <nl> } <nl> / / Leave V8 . <nl> mmm a / src / assert - scope . cc <nl> ppp b / src / assert - scope . cc <nl> template class PerIsolateAssertScope < DEOPTIMIZATION_ASSERT , false > ; <nl> template class PerIsolateAssertScope < DEOPTIMIZATION_ASSERT , true > ; <nl> template class PerIsolateAssertScope < COMPILATION_ASSERT , false > ; <nl> template class PerIsolateAssertScope < COMPILATION_ASSERT , true > ; <nl> + template class PerIsolateAssertScope < NO_EXCEPTION_ASSERT , false > ; <nl> + template class PerIsolateAssertScope < NO_EXCEPTION_ASSERT , true > ; <nl> <nl> } / / namespace internal <nl> } / / namespace v8 <nl> mmm a / src / assert - scope . h <nl> ppp b / src / assert - scope . h <nl> enum PerThreadAssertType { <nl> LAST_PER_THREAD_ASSERT_TYPE <nl> } ; <nl> <nl> - <nl> enum PerIsolateAssertType { <nl> JAVASCRIPT_EXECUTION_ASSERT , <nl> JAVASCRIPT_EXECUTION_THROWS , <nl> DEOPTIMIZATION_ASSERT , <nl> - COMPILATION_ASSERT <nl> + COMPILATION_ASSERT , <nl> + NO_EXCEPTION_ASSERT <nl> } ; <nl> <nl> template < PerThreadAssertType kType , bool kAllow > <nl> typedef PerIsolateAssertScope < JAVASCRIPT_EXECUTION_ASSERT , false > <nl> typedef PerIsolateAssertScope < JAVASCRIPT_EXECUTION_ASSERT , true > <nl> AllowJavascriptExecution ; <nl> <nl> + / / Scope to document where we do not expect javascript execution ( debug only ) <nl> + typedef PerIsolateAssertScopeDebugOnly < JAVASCRIPT_EXECUTION_ASSERT , false > <nl> + DisallowJavascriptExecutionDebugOnly ; <nl> + <nl> + / / Scope to introduce an exception to DisallowJavascriptExecutionDebugOnly . <nl> + typedef PerIsolateAssertScopeDebugOnly < JAVASCRIPT_EXECUTION_ASSERT , true > <nl> + AllowJavascriptExecutionDebugOnly ; <nl> + <nl> / / Scope in which javascript execution leads to exception being thrown . <nl> typedef PerIsolateAssertScope < JAVASCRIPT_EXECUTION_THROWS , false > <nl> ThrowOnJavascriptExecution ; <nl> typedef PerIsolateAssertScopeDebugOnly < COMPILATION_ASSERT , false > <nl> / / Scope to introduce an exception to DisallowDeoptimization . <nl> typedef PerIsolateAssertScopeDebugOnly < COMPILATION_ASSERT , true > <nl> AllowCompilation ; <nl> + <nl> + / / Scope to document where we do not expect exceptions . <nl> + typedef PerIsolateAssertScopeDebugOnly < NO_EXCEPTION_ASSERT , false > <nl> + DisallowExceptions ; <nl> + <nl> + / / Scope to introduce an exception to DisallowExceptions . <nl> + typedef PerIsolateAssertScopeDebugOnly < NO_EXCEPTION_ASSERT , true > <nl> + AllowExceptions ; <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / bootstrapper . cc <nl> ppp b / src / bootstrapper . cc <nl> bool Bootstrapper : : CompileNative ( Isolate * isolate , Vector < const char > name , <nl> <nl> / / For non - extension scripts , run script to get the function wrapper . <nl> Handle < Object > wrapper ; <nl> - if ( ! Execution : : Call ( isolate , fun , receiver , 0 , NULL ) . ToHandle ( & wrapper ) ) { <nl> + if ( ! Execution : : TryCall ( isolate , fun , receiver , 0 , nullptr , <nl> + Execution : : MessageHandling : : kKeepPending , nullptr ) <nl> + . ToHandle ( & wrapper ) ) { <nl> return false ; <nl> } <nl> / / Then run the function wrapper . <nl> - return ! Execution : : Call ( isolate , Handle < JSFunction > : : cast ( wrapper ) , receiver , <nl> - argc , argv ) . is_null ( ) ; <nl> + return ! Execution : : TryCall ( isolate , Handle < JSFunction > : : cast ( wrapper ) , <nl> + receiver , argc , argv , <nl> + Execution : : MessageHandling : : kKeepPending , nullptr ) <nl> + . is_null ( ) ; <nl> } <nl> <nl> <nl> bool Genesis : : CallUtilsFunction ( Isolate * isolate , const char * name ) { <nl> Handle < Object > fun = JSObject : : GetDataProperty ( utils , name_string ) ; <nl> Handle < Object > receiver = isolate - > factory ( ) - > undefined_value ( ) ; <nl> Handle < Object > args [ ] = { utils } ; <nl> - return ! Execution : : Call ( isolate , fun , receiver , 1 , args ) . is_null ( ) ; <nl> + return ! Execution : : TryCall ( isolate , fun , receiver , 1 , args , <nl> + Execution : : MessageHandling : : kKeepPending , nullptr ) <nl> + . is_null ( ) ; <nl> } <nl> <nl> <nl> bool Genesis : : CompileExtension ( Isolate * isolate , v8 : : Extension * extension ) { <nl> / / Call function using either the runtime object or the global <nl> / / object as the receiver . Provide no parameters . <nl> Handle < Object > receiver = isolate - > global_object ( ) ; <nl> - return ! Execution : : Call ( isolate , fun , receiver , 0 , NULL ) . is_null ( ) ; <nl> + return ! Execution : : TryCall ( isolate , fun , receiver , 0 , nullptr , <nl> + Execution : : MessageHandling : : kKeepPending , nullptr ) <nl> + . is_null ( ) ; <nl> } <nl> <nl> <nl> mmm a / src / compiler . cc <nl> ppp b / src / compiler . cc <nl> Handle < SharedFunctionInfo > Compiler : : GetSharedFunctionInfoForScript ( <nl> } <nl> <nl> if ( result . is_null ( ) ) { <nl> - isolate - > ReportPendingMessages ( ) ; <nl> + if ( natives ! = EXTENSION_CODE ) isolate - > ReportPendingMessages ( ) ; <nl> } else { <nl> isolate - > debug ( ) - > OnAfterCompile ( script ) ; <nl> } <nl> mmm a / src / debug / debug . cc <nl> ppp b / src / debug / debug . cc <nl> MaybeHandle < Object > Debug : : CallFunction ( const char * name , int argc , <nl> Handle < JSFunction > fun = Handle < JSFunction > : : cast ( <nl> JSReceiver : : GetProperty ( isolate_ , holder , name ) . ToHandleChecked ( ) ) ; <nl> Handle < Object > undefined = isolate_ - > factory ( ) - > undefined_value ( ) ; <nl> - return Execution : : TryCall ( isolate_ , fun , undefined , argc , args ) ; <nl> + MaybeHandle < Object > maybe_exception ; <nl> + return Execution : : TryCall ( isolate_ , fun , undefined , argc , args , <nl> + Execution : : MessageHandling : : kReport , <nl> + & maybe_exception ) ; <nl> } <nl> <nl> <nl> void Debug : : NotifyMessageHandler ( v8 : : DebugEvent event , <nl> Handle < Object > answer_value ; <nl> Handle < String > answer ; <nl> MaybeHandle < Object > maybe_exception ; <nl> - MaybeHandle < Object > maybe_result = <nl> - Execution : : TryCall ( isolate_ , process_debug_request , cmd_processor , 1 , <nl> - request_args , & maybe_exception ) ; <nl> + MaybeHandle < Object > maybe_result = Execution : : TryCall ( <nl> + isolate_ , process_debug_request , cmd_processor , 1 , request_args , <nl> + Execution : : MessageHandling : : kReport , & maybe_exception ) ; <nl> <nl> if ( maybe_result . ToHandle ( & answer_value ) ) { <nl> if ( answer_value - > IsUndefined ( isolate_ ) ) { <nl> v8 : : Local < v8 : : String > MessageImpl : : GetJSON ( ) const { <nl> return v8 : : Local < v8 : : String > ( ) ; <nl> } <nl> <nl> - MaybeHandle < Object > maybe_json = <nl> - Execution : : TryCall ( isolate , fun , event_data_ , 0 , NULL ) ; <nl> + MaybeHandle < Object > maybe_exception ; <nl> + MaybeHandle < Object > maybe_json = Execution : : TryCall ( <nl> + isolate , fun , event_data_ , 0 , nullptr , <nl> + Execution : : MessageHandling : : kReport , & maybe_exception ) ; <nl> Handle < Object > json ; <nl> if ( ! maybe_json . ToHandle ( & json ) | | ! json - > IsString ( ) ) { <nl> return v8 : : Local < v8 : : String > ( ) ; <nl> mmm a / src / execution . cc <nl> ppp b / src / execution . cc <nl> static void PrintDeserializedCodeInfo ( Handle < JSFunction > function ) { <nl> <nl> namespace { <nl> <nl> - MUST_USE_RESULT MaybeHandle < Object > Invoke ( Isolate * isolate , bool is_construct , <nl> - Handle < Object > target , <nl> - Handle < Object > receiver , int argc , <nl> - Handle < Object > args [ ] , <nl> - Handle < Object > new_target ) { <nl> + MUST_USE_RESULT MaybeHandle < Object > Invoke ( <nl> + Isolate * isolate , bool is_construct , Handle < Object > target , <nl> + Handle < Object > receiver , int argc , Handle < Object > args [ ] , <nl> + Handle < Object > new_target , Execution : : MessageHandling message_handling ) { <nl> DCHECK ( ! receiver - > IsJSGlobalObject ( ) ) ; <nl> <nl> # ifdef USE_SIMULATOR <nl> MUST_USE_RESULT MaybeHandle < Object > Invoke ( Isolate * isolate , bool is_construct , <nl> StackLimitCheck check ( isolate ) ; <nl> if ( check . HasOverflowed ( ) ) { <nl> isolate - > StackOverflow ( ) ; <nl> - isolate - > ReportPendingMessages ( ) ; <nl> + if ( message_handling = = Execution : : MessageHandling : : kReport ) { <nl> + isolate - > ReportPendingMessages ( ) ; <nl> + } <nl> return MaybeHandle < Object > ( ) ; <nl> } <nl> # endif <nl> MUST_USE_RESULT MaybeHandle < Object > Invoke ( Isolate * isolate , bool is_construct , <nl> bool has_exception = value . is_null ( ) ; <nl> DCHECK ( has_exception = = isolate - > has_pending_exception ( ) ) ; <nl> if ( has_exception ) { <nl> - isolate - > ReportPendingMessages ( ) ; <nl> + if ( message_handling = = Execution : : MessageHandling : : kReport ) { <nl> + isolate - > ReportPendingMessages ( ) ; <nl> + } <nl> return MaybeHandle < Object > ( ) ; <nl> } else { <nl> isolate - > clear_pending_message ( ) ; <nl> MUST_USE_RESULT MaybeHandle < Object > Invoke ( Isolate * isolate , bool is_construct , <nl> CHECK ( AllowJavascriptExecution : : IsAllowed ( isolate ) ) ; <nl> if ( ! ThrowOnJavascriptExecution : : IsAllowed ( isolate ) ) { <nl> isolate - > ThrowIllegalOperation ( ) ; <nl> - isolate - > ReportPendingMessages ( ) ; <nl> + if ( message_handling = = Execution : : MessageHandling : : kReport ) { <nl> + isolate - > ReportPendingMessages ( ) ; <nl> + } <nl> return MaybeHandle < Object > ( ) ; <nl> } <nl> <nl> MUST_USE_RESULT MaybeHandle < Object > Invoke ( Isolate * isolate , bool is_construct , <nl> bool has_exception = value - > IsException ( isolate ) ; <nl> DCHECK ( has_exception = = isolate - > has_pending_exception ( ) ) ; <nl> if ( has_exception ) { <nl> - isolate - > ReportPendingMessages ( ) ; <nl> + if ( message_handling = = Execution : : MessageHandling : : kReport ) { <nl> + isolate - > ReportPendingMessages ( ) ; <nl> + } <nl> return MaybeHandle < Object > ( ) ; <nl> } else { <nl> isolate - > clear_pending_message ( ) ; <nl> MUST_USE_RESULT MaybeHandle < Object > Invoke ( Isolate * isolate , bool is_construct , <nl> return Handle < Object > ( value , isolate ) ; <nl> } <nl> <nl> - } / / namespace <nl> - <nl> - <nl> - / / static <nl> - MaybeHandle < Object > Execution : : Call ( Isolate * isolate , Handle < Object > callable , <nl> - Handle < Object > receiver , int argc , <nl> - Handle < Object > argv [ ] ) { <nl> + MaybeHandle < Object > CallInternal ( Isolate * isolate , Handle < Object > callable , <nl> + Handle < Object > receiver , int argc , <nl> + Handle < Object > argv [ ] , <nl> + Execution : : MessageHandling message_handling ) { <nl> / / Convert calls on global objects to be calls on the global <nl> / / receiver instead to avoid having a ' this ' pointer which refers <nl> / / directly to a global object . <nl> MaybeHandle < Object > Execution : : Call ( Isolate * isolate , Handle < Object > callable , <nl> handle ( Handle < JSGlobalObject > : : cast ( receiver ) - > global_proxy ( ) , isolate ) ; <nl> } <nl> return Invoke ( isolate , false , callable , receiver , argc , argv , <nl> - isolate - > factory ( ) - > undefined_value ( ) ) ; <nl> + isolate - > factory ( ) - > undefined_value ( ) , message_handling ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> + / / static <nl> + MaybeHandle < Object > Execution : : Call ( Isolate * isolate , Handle < Object > callable , <nl> + Handle < Object > receiver , int argc , <nl> + Handle < Object > argv [ ] ) { <nl> + return CallInternal ( isolate , callable , receiver , argc , argv , <nl> + MessageHandling : : kReport ) ; <nl> } <nl> <nl> <nl> MaybeHandle < Object > Execution : : New ( Isolate * isolate , Handle < Object > constructor , <nl> Handle < Object > new_target , int argc , <nl> Handle < Object > argv [ ] ) { <nl> return Invoke ( isolate , true , constructor , <nl> - isolate - > factory ( ) - > undefined_value ( ) , argc , argv , new_target ) ; <nl> + isolate - > factory ( ) - > undefined_value ( ) , argc , argv , new_target , <nl> + MessageHandling : : kReport ) ; <nl> } <nl> <nl> - <nl> MaybeHandle < Object > Execution : : TryCall ( Isolate * isolate , <nl> Handle < Object > callable , <nl> Handle < Object > receiver , int argc , <nl> Handle < Object > args [ ] , <nl> + MessageHandling message_handling , <nl> MaybeHandle < Object > * exception_out ) { <nl> bool is_termination = false ; <nl> MaybeHandle < Object > maybe_result ; <nl> if ( exception_out ! = NULL ) * exception_out = MaybeHandle < Object > ( ) ; <nl> + DCHECK_IMPLIES ( message_handling = = MessageHandling : : kKeepPending , <nl> + exception_out = = nullptr ) ; <nl> / / Enter a try - block while executing the JavaScript code . To avoid <nl> / / duplicate error printing it must be non - verbose . Also , to avoid <nl> / / creating message objects during stack overflow we shouldn ' t <nl> MaybeHandle < Object > Execution : : TryCall ( Isolate * isolate , <nl> catcher . SetVerbose ( false ) ; <nl> catcher . SetCaptureMessage ( false ) ; <nl> <nl> - maybe_result = Call ( isolate , callable , receiver , argc , args ) ; <nl> + maybe_result = <nl> + CallInternal ( isolate , callable , receiver , argc , args , message_handling ) ; <nl> <nl> if ( maybe_result . is_null ( ) ) { <nl> - DCHECK ( catcher . HasCaught ( ) ) ; <nl> DCHECK ( isolate - > has_pending_exception ( ) ) ; <nl> - DCHECK ( isolate - > external_caught_exception ( ) ) ; <nl> if ( isolate - > pending_exception ( ) = = <nl> isolate - > heap ( ) - > termination_exception ( ) ) { <nl> is_termination = true ; <nl> } else { <nl> - if ( exception_out ! = NULL ) { <nl> + if ( exception_out ! = nullptr ) { <nl> + DCHECK ( catcher . HasCaught ( ) ) ; <nl> + DCHECK ( isolate - > external_caught_exception ( ) ) ; <nl> * exception_out = v8 : : Utils : : OpenHandle ( * catcher . Exception ( ) ) ; <nl> } <nl> } <nl> - isolate - > OptionalRescheduleException ( true ) ; <nl> + if ( message_handling = = MessageHandling : : kReport ) { <nl> + isolate - > OptionalRescheduleException ( true ) ; <nl> + } <nl> } <nl> - <nl> - DCHECK ( ! isolate - > has_pending_exception ( ) ) ; <nl> } <nl> <nl> / / Re - request terminate execution interrupt to trigger later . <nl> mmm a / src / execution . h <nl> ppp b / src / execution . h <nl> namespace internal { <nl> <nl> class Execution final : public AllStatic { <nl> public : <nl> + / / Whether to report pending messages , or keep them pending on the isolate . <nl> + enum class MessageHandling { kReport , kKeepPending } ; <nl> + <nl> / / Call a function , the caller supplies a receiver and an array <nl> / / of arguments . <nl> / / <nl> class Execution final : public AllStatic { <nl> int argc , <nl> Handle < Object > argv [ ] ) ; <nl> <nl> - / / Call a function , just like Call ( ) , but make sure to silently catch <nl> - / / any thrown exceptions . The return value is either the result of <nl> - / / calling the function ( if caught exception is false ) or the exception <nl> - / / that occurred ( if caught exception is true ) . <nl> - / / In the exception case , exception_out holds the caught exceptions , unless <nl> - / / it is a termination exception . <nl> + / / Call a function , just like Call ( ) , but handle don ' t report exceptions <nl> + / / externally . <nl> + / / The return value is either the result of calling the function ( if no <nl> + / / exception occurred ) , or an empty handle . <nl> + / / If message_handling is MessageHandling : : kReport , exceptions ( except for <nl> + / / termination exceptions ) will be stored in exception_out ( if not a <nl> + / / nullptr ) . <nl> static MaybeHandle < Object > TryCall ( Isolate * isolate , Handle < Object > callable , <nl> Handle < Object > receiver , int argc , <nl> Handle < Object > argv [ ] , <nl> - MaybeHandle < Object > * exception_out = NULL ) ; <nl> + MessageHandling message_handling , <nl> + MaybeHandle < Object > * exception_out ) ; <nl> } ; <nl> <nl> <nl> mmm a / src / heap / heap . cc <nl> ppp b / src / heap / heap . cc <nl> AllocationResult Heap : : AllocateTransitionArray ( int capacity ) { <nl> return array ; <nl> } <nl> <nl> - <nl> - void Heap : : CreateApiObjects ( ) { <nl> + bool Heap : : CreateApiObjects ( ) { <nl> HandleScope scope ( isolate ( ) ) ; <nl> set_message_listeners ( * TemplateList : : New ( isolate ( ) , 2 ) ) ; <nl> + HeapObject * obj = nullptr ; <nl> + { <nl> + AllocationResult allocation = AllocateStruct ( INTERCEPTOR_INFO_TYPE ) ; <nl> + if ( ! allocation . To ( & obj ) ) return false ; <nl> + } <nl> + InterceptorInfo * info = InterceptorInfo : : cast ( obj ) ; <nl> + info - > set_flags ( 0 ) ; <nl> + set_noop_interceptor_info ( info ) ; <nl> + return true ; <nl> } <nl> <nl> <nl> bool Heap : : SetUp ( ) { <nl> bool Heap : : CreateHeapObjects ( ) { <nl> / / Create initial maps . <nl> if ( ! CreateInitialMaps ( ) ) return false ; <nl> - CreateApiObjects ( ) ; <nl> + if ( ! CreateApiObjects ( ) ) return false ; <nl> <nl> / / Create initial objects <nl> CreateInitialObjects ( ) ; <nl> mmm a / src / heap / heap . h <nl> ppp b / src / heap / heap . h <nl> using v8 : : MemoryPressureLevel ; <nl> V ( FixedArray , serialized_global_proxy_sizes , SerializedGlobalProxySizes ) \ <nl> / * Configured values * / \ <nl> V ( TemplateList , message_listeners , MessageListeners ) \ <nl> + V ( InterceptorInfo , noop_interceptor_info , NoOpInterceptorInfo ) \ <nl> V ( Code , js_entry_code , JsEntryCode ) \ <nl> V ( Code , js_construct_entry_code , JsConstructEntryCode ) \ <nl> / * Oddball maps * / \ <nl> class Heap { <nl> / / Support for the API . <nl> / / <nl> <nl> - void CreateApiObjects ( ) ; <nl> + bool CreateApiObjects ( ) ; <nl> <nl> / / Implements the corresponding V8 API function . <nl> bool IdleNotification ( double deadline_in_seconds ) ; <nl> mmm a / src / isolate . cc <nl> ppp b / src / isolate . cc <nl> bool Isolate : : IsExternalHandlerOnTop ( Object * exception ) { <nl> <nl> <nl> void Isolate : : ReportPendingMessages ( ) { <nl> + DCHECK ( AllowExceptions : : IsAllowed ( this ) ) ; <nl> + <nl> Object * exception = pending_exception ( ) ; <nl> <nl> / / Try to propagate the exception to an external v8 : : TryCatch handler . If <nl> void Isolate : : PromiseReactionJob ( Handle < PromiseReactionJobInfo > info , <nl> handle ( deferred_promise_arr - > get ( i ) , this ) , <nl> handle ( deferred_on_resolve_arr - > get ( i ) , this ) , <nl> handle ( deferred_on_reject_arr - > get ( i ) , this ) } ; <nl> - * result = Execution : : TryCall ( this , promise_handle_fn , undefined , <nl> - arraysize ( argv ) , argv , maybe_exception ) ; <nl> + * result = Execution : : TryCall ( <nl> + this , promise_handle_fn , undefined , arraysize ( argv ) , argv , <nl> + Execution : : MessageHandling : : kReport , maybe_exception ) ; <nl> / / If execution is terminating , just bail out . <nl> if ( result - > is_null ( ) & & maybe_exception - > is_null ( ) ) { <nl> return ; <nl> void Isolate : : PromiseReactionJob ( Handle < PromiseReactionJobInfo > info , <nl> Handle < Object > argv [ ] = { value , tasks , deferred_promise , <nl> handle ( info - > deferred_on_resolve ( ) , this ) , <nl> handle ( info - > deferred_on_reject ( ) , this ) } ; <nl> - * result = Execution : : TryCall ( this , promise_handle_fn , undefined , <nl> - arraysize ( argv ) , argv , maybe_exception ) ; <nl> + * result = Execution : : TryCall ( <nl> + this , promise_handle_fn , undefined , arraysize ( argv ) , argv , <nl> + Execution : : MessageHandling : : kReport , maybe_exception ) ; <nl> } <nl> } <nl> <nl> void Isolate : : PromiseResolveThenableJob ( <nl> Handle < JSFunction > reject ( info - > reject ( ) , this ) ; <nl> Handle < JSReceiver > then ( info - > then ( ) , this ) ; <nl> Handle < Object > argv [ ] = { resolve , reject } ; <nl> - * result = Execution : : TryCall ( this , then , thenable , arraysize ( argv ) , argv , <nl> - maybe_exception ) ; <nl> + * result = <nl> + Execution : : TryCall ( this , then , thenable , arraysize ( argv ) , argv , <nl> + Execution : : MessageHandling : : kReport , maybe_exception ) ; <nl> <nl> Handle < Object > reason ; <nl> if ( maybe_exception - > ToHandle ( & reason ) ) { <nl> DCHECK ( result - > is_null ( ) ) ; <nl> Handle < Object > reason_arg [ ] = { reason } ; <nl> - * result = <nl> - Execution : : TryCall ( this , reject , factory ( ) - > undefined_value ( ) , <nl> - arraysize ( reason_arg ) , reason_arg , maybe_exception ) ; <nl> + * result = Execution : : TryCall ( <nl> + this , reject , factory ( ) - > undefined_value ( ) , arraysize ( reason_arg ) , <nl> + reason_arg , Execution : : MessageHandling : : kReport , maybe_exception ) ; <nl> } <nl> } <nl> <nl> void Isolate : : RunMicrotasksInternal ( ) { <nl> if ( microtask - > IsJSFunction ( ) ) { <nl> Handle < JSFunction > microtask_function = <nl> Handle < JSFunction > : : cast ( microtask ) ; <nl> - result = Execution : : TryCall ( this , microtask_function , <nl> - factory ( ) - > undefined_value ( ) , 0 , NULL , <nl> - & maybe_exception ) ; <nl> + result = Execution : : TryCall ( <nl> + this , microtask_function , factory ( ) - > undefined_value ( ) , 0 , <nl> + nullptr , Execution : : MessageHandling : : kReport , & maybe_exception ) ; <nl> } else if ( microtask - > IsPromiseResolveThenableJobInfo ( ) ) { <nl> PromiseResolveThenableJob ( <nl> Handle < PromiseResolveThenableJobInfo > : : cast ( microtask ) , & result , <nl> mmm a / test / cctest / test - thread - termination . cc <nl> ppp b / test / cctest / test - thread - termination . cc <nl> void InnerTryCallTerminate ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> v8 : : Local < v8 : : Function > loop = v8 : : Local < v8 : : Function > : : Cast ( <nl> global - > Get ( CcTest : : isolate ( ) - > GetCurrentContext ( ) , v8_str ( " loop " ) ) <nl> . ToLocalChecked ( ) ) ; <nl> + i : : MaybeHandle < i : : Object > exception ; <nl> i : : MaybeHandle < i : : Object > result = <nl> i : : Execution : : TryCall ( CcTest : : i_isolate ( ) , v8 : : Utils : : OpenHandle ( ( * loop ) ) , <nl> - v8 : : Utils : : OpenHandle ( ( * global ) ) , 0 , NULL , NULL ) ; <nl> + v8 : : Utils : : OpenHandle ( ( * global ) ) , 0 , nullptr , <nl> + i : : Execution : : MessageHandling : : kReport , & exception ) ; <nl> CHECK ( result . is_null ( ) ) ; <nl> / / TryCall ignores terminate execution , but rerequests the interrupt . <nl> CHECK ( ! args . GetIsolate ( ) - > IsExecutionTerminating ( ) ) ; <nl> mmm a / test / cctest / wasm / test - wasm - stack . cc <nl> ppp b / test / cctest / wasm / test - wasm - stack . cc <nl> TEST ( CollectDetailedWasmStack_ExplicitThrowFromJs ) { <nl> MaybeHandle < Object > maybe_exc ; <nl> Handle < Object > args [ ] = { js_wasm_wrapper } ; <nl> MaybeHandle < Object > returnObjMaybe = <nl> - Execution : : TryCall ( isolate , js_trampoline , global , 1 , args , & maybe_exc ) ; <nl> + Execution : : TryCall ( isolate , js_trampoline , global , 1 , args , <nl> + Execution : : MessageHandling : : kReport , & maybe_exc ) ; <nl> CHECK ( returnObjMaybe . is_null ( ) ) ; <nl> <nl> / / Line and column are 1 - based , so add 1 for the expected wasm output . <nl> TEST ( CollectDetailedWasmStack_WasmError ) { <nl> MaybeHandle < Object > maybe_exc ; <nl> Handle < Object > args [ ] = { js_wasm_wrapper } ; <nl> MaybeHandle < Object > maybe_return_obj = <nl> - Execution : : TryCall ( isolate , js_trampoline , global , 1 , args , & maybe_exc ) ; <nl> + Execution : : TryCall ( isolate , js_trampoline , global , 1 , args , <nl> + Execution : : MessageHandling : : kReport , & maybe_exc ) ; <nl> CHECK ( maybe_return_obj . is_null ( ) ) ; <nl> <nl> / / Line and column are 1 - based , so add 1 for the expected wasm output . <nl> mmm a / test / cctest / wasm / test - wasm - trap - position . cc <nl> ppp b / test / cctest / wasm / test - wasm - trap - position . cc <nl> TEST ( Unreachable ) { <nl> MaybeHandle < Object > maybe_exc ; <nl> Handle < Object > args [ ] = { js_wasm_wrapper } ; <nl> MaybeHandle < Object > returnObjMaybe = <nl> - Execution : : TryCall ( isolate , js_trampoline , global , 1 , args , & maybe_exc ) ; <nl> + Execution : : TryCall ( isolate , js_trampoline , global , 1 , args , <nl> + Execution : : MessageHandling : : kReport , & maybe_exc ) ; <nl> CHECK ( returnObjMaybe . is_null ( ) ) ; <nl> <nl> / / Line and column are 1 - based , so add 1 for the expected wasm output . <nl> TEST ( IllegalLoad ) { <nl> MaybeHandle < Object > maybe_exc ; <nl> Handle < Object > args [ ] = { js_wasm_wrapper } ; <nl> MaybeHandle < Object > returnObjMaybe = <nl> - Execution : : TryCall ( isolate , js_trampoline , global , 1 , args , & maybe_exc ) ; <nl> + Execution : : TryCall ( isolate , js_trampoline , global , 1 , args , <nl> + Execution : : MessageHandling : : kReport , & maybe_exc ) ; <nl> CHECK ( returnObjMaybe . is_null ( ) ) ; <nl> <nl> / / Line and column are 1 - based , so add 1 for the expected wasm output . <nl> | Assert that context creation doesn ' t throw | v8/v8 | 0389df514d48c6394f606bf4e5b9b0b0cb134d1b | 2017-01-17T13:01:03Z |
mmm a / src / arm64 / macro - assembler - arm64 - inl . h <nl> ppp b / src / arm64 / macro - assembler - arm64 - inl . h <nl> void TurboAssembler : : SmiUntag ( Register dst , const MemOperand & src ) { <nl> DCHECK ( dst . Is64Bits ( ) ) ; <nl> if ( SmiValuesAre32Bits ( ) ) { <nl> if ( src . IsImmediateOffset ( ) & & src . shift_amount ( ) = = 0 ) { <nl> - if ( FLAG_enable_slow_asserts ) { <nl> - Ldr ( dst , src ) ; <nl> - AssertSmi ( dst ) ; <nl> - } <nl> / / Load value directly from the upper half - word . <nl> / / Assumes that Smis are shifted by 32 bits and little endianness . <nl> DCHECK_EQ ( kSmiShift , 32 ) ; <nl> | Remove invalid slow assert in arm64 ' s SmiUntag | v8/v8 | 9498d609a045f644a7af378ebec407ddd8cbdb6e | 2018-06-21T08:32:38Z |
mmm a / etc / evergreen . yml <nl> ppp b / etc / evergreen . yml <nl> functions : <nl> script : | <nl> set - o verbose <nl> # Jepsen system failure if file exists . <nl> - if [ - f jepsen_systemfailure_ $ { task_name } _ $ { execution } ] ; then <nl> + if [ - f jepsen_system_failure_ $ { task_name } _ $ { execution } ] ; then <nl> exit $ ( cat jepsen_system_failure_ $ { task_name } _ $ { execution } ) <nl> fi <nl> <nl> | SERVER - 26633 Jpesen tests - Fix typo for system failure file name | mongodb/mongo | 910ad1d9ea6462b847f1432f041bbf68b2cffa8d | 2016-10-28T17:27:21Z |
mmm a / torch / csrc / api / include / torch / serialize . h <nl> ppp b / torch / csrc / api / include / torch / serialize . h <nl> namespace torch { <nl> / / / \ endrst <nl> template < typename Value , typename . . . SaveToArgs > <nl> void save ( const Value & value , SaveToArgs & & . . . args ) { <nl> - serialize : : OutputArchive archive ; <nl> + serialize : : OutputArchive archive ( <nl> + std : : make_shared < jit : : script : : CompilationUnit > ( ) ) ; <nl> archive < < value ; <nl> archive . save_to ( std : : forward < SaveToArgs > ( args ) . . . ) ; <nl> } <nl> void save ( const Value & value , SaveToArgs & & . . . args ) { <nl> / / / \ endrst <nl> template < typename . . . SaveToArgs > <nl> void save ( const std : : vector < torch : : Tensor > & tensor_vec , SaveToArgs & & . . . args ) { <nl> - serialize : : OutputArchive archive ; <nl> + serialize : : OutputArchive archive ( <nl> + std : : make_shared < jit : : script : : CompilationUnit > ( ) ) ; <nl> for ( size_t i = 0 ; i < tensor_vec . size ( ) ; i + + ) { <nl> auto & value = tensor_vec [ i ] ; <nl> archive . write ( std : : to_string ( i ) , value ) ; <nl> mmm a / torch / csrc / api / include / torch / serialize / output - archive . h <nl> ppp b / torch / csrc / api / include / torch / serialize / output - archive . h <nl> namespace torch { <nl> namespace serialize { <nl> class TORCH_API OutputArchive final { <nl> public : <nl> - / / / Default - constructs the ` OutputArchive ` . <nl> - OutputArchive ( ) ; <nl> + explicit OutputArchive ( std : : shared_ptr < jit : : script : : CompilationUnit > cu ) ; <nl> <nl> / / Move is allowed . <nl> OutputArchive ( OutputArchive & & ) = default ; <nl> class TORCH_API OutputArchive final { <nl> OutputArchive ( OutputArchive & ) = delete ; <nl> OutputArchive & operator = ( OutputArchive & ) = delete ; <nl> <nl> + std : : shared_ptr < jit : : script : : CompilationUnit > compilation_unit ( ) const { <nl> + return cu_ ; <nl> + } <nl> + <nl> / / / Writes a ` ( key , tensor ) ` pair to the ` OutputArchive ` , and marks it as <nl> / / / being or not being a buffer ( non - differentiable tensor ) . <nl> void write ( <nl> class TORCH_API OutputArchive final { <nl> } <nl> <nl> private : <nl> + std : : shared_ptr < jit : : script : : CompilationUnit > cu_ ; <nl> jit : : script : : Module module_ ; <nl> } ; <nl> } / / namespace serialize <nl> mmm a / torch / csrc / api / src / nn / module . cpp <nl> ppp b / torch / csrc / api / src / nn / module . cpp <nl> void Module : : save ( serialize : : OutputArchive & archive ) const { <nl> } <nl> for ( const auto & child : children_ ) { <nl> if ( child . value ( ) - > is_serializable ( ) ) { <nl> - serialize : : OutputArchive child_archive ; <nl> + serialize : : OutputArchive child_archive ( archive . compilation_unit ( ) ) ; <nl> child . value ( ) - > save ( child_archive ) ; <nl> archive . write ( child . key ( ) , child_archive ) ; <nl> } <nl> mmm a / torch / csrc / api / src / serialize / output - archive . cpp <nl> ppp b / torch / csrc / api / src / serialize / output - archive . cpp <nl> <nl> <nl> namespace torch { <nl> namespace serialize { <nl> - OutputArchive : : OutputArchive ( ) <nl> - : module_ ( " __main__ " ) { } <nl> + OutputArchive : : OutputArchive ( std : : shared_ptr < jit : : script : : CompilationUnit > cu ) <nl> + : cu_ ( std : : move ( cu ) ) , <nl> + module_ ( " __torch__ . Module " , cu_ , / * shouldMangle = * / true ) { } <nl> <nl> void OutputArchive : : write ( <nl> const std : : string & key , <nl> | serialize all c + + frontend modules to a single CU . ( ) | pytorch/pytorch | 3d23c04a1c2773797d959f7d95786fef838dbccf | 2019-08-09T07:52:07Z |
mmm a / ios / sdk / WeexSDK / Sources / Model / WXComponent . m <nl> ppp b / ios / sdk / WeexSDK / Sources / Model / WXComponent . m <nl> - ( void ) _updateStylesOnComponentThread : ( NSDictionary * ) styles resetStyles : ( NSMut <nl> } <nl> if ( isUpdateStyles ) { <nl> [ self _modifyStyles : styles ] ; <nl> + if ( [ self needsLayout ] ) { <nl> + / / call update style may take effect on layout , maybe the component <nl> + / / displaylink has been paused , so we need to restart the component task , and it will auto - pause when task queue is empty . <nl> + [ self . weexInstance . componentManager startComponentTasks ] ; <nl> + } <nl> } <nl> } <nl> <nl> | * [ iOS ] restart component display - link when updating style action takes effect on layout | apache/incubator-weex | 9d11a5da045819b74c95797806ac8d1a08f845eb | 2018-03-01T02:51:44Z |
mmm a / src / core / lib / channel / message_size_filter . c <nl> ppp b / src / core / lib / channel / message_size_filter . c <nl> static void recv_message_ready ( grpc_exec_ctx * exec_ctx , void * user_data , <nl> if ( * calld - > recv_message ! = NULL & & <nl> ( * calld - > recv_message ) - > length > chand - > max_recv_size ) { <nl> char * message_string ; <nl> - gpr_asprintf ( & message_string , " Received message larger than max ( % lu ) " , <nl> + gpr_asprintf ( & message_string , <nl> + " Received message larger than max ( % lu vs . % lu ) " , <nl> + ( unsigned long ) ( * calld - > recv_message ) - > length , <nl> ( unsigned long ) chand - > max_recv_size ) ; <nl> gpr_slice message = gpr_slice_from_copied_string ( message_string ) ; <nl> gpr_free ( message_string ) ; <nl> static void start_transport_stream_op ( grpc_exec_ctx * exec_ctx , <nl> if ( op - > send_message ! = NULL & & <nl> op - > send_message - > length > chand - > max_send_size ) { <nl> char * message_string ; <nl> - gpr_asprintf ( & message_string , " Sent message larger than max ( % lu ) " , <nl> + gpr_asprintf ( & message_string , " Sent message larger than max ( % lu vs . % lu ) " , <nl> + ( unsigned long ) op - > send_message - > length , <nl> ( unsigned long ) chand - > max_send_size ) ; <nl> gpr_slice message = gpr_slice_from_copied_string ( message_string ) ; <nl> gpr_free ( message_string ) ; <nl> mmm a / test / core / end2end / tests / max_message_length . c <nl> ppp b / test / core / end2end / tests / max_message_length . c <nl> static void test_max_message_length ( grpc_end2end_test_config config , <nl> <nl> done : <nl> GPR_ASSERT ( status = = GRPC_STATUS_INVALID_ARGUMENT ) ; <nl> + GPR_ASSERT ( strcmp ( details , <nl> + send_limit <nl> + ? " Sent message larger than max ( 11 vs . 5 ) " <nl> + : " Received message larger than max ( 11 vs . 5 ) " ) = = 0 ) ; <nl> <nl> gpr_free ( details ) ; <nl> grpc_metadata_array_destroy ( & initial_metadata_recv ) ; <nl> | Change error messages to include actual values and limits . | grpc/grpc | ed4605b71c8ed5ea1b4d4ff27dbea230a458fb44 | 2016-08-31T20:05:46Z |
mmm a / src / json . cc <nl> ppp b / src / json . cc <nl> decode <nl> return decodeNull ( first , last ) ; <nl> } else { <nl> throw DL_ABORT_EX2 ( " JSON decoding failed : " <nl> - " Unexpected EOF in term context . " , <nl> + " Unexpected character in term context . " , <nl> error_code : : JSON_PARSE_ERROR ) ; <nl> } <nl> } <nl> | Fixed error message in json : : decode ( ) | aria2/aria2 | bc7ac15d7e71b83c501b6863111f9cafa7f83028 | 2011-03-09T15:54:07Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.